diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 678143b7f31e..0c8e47c9823b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,3 +9,4 @@ /java-vertexai/ @googleapis/vertexai-team @googleapis/cloud-sdk-java-team /java-bigquerystorage/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team /java-bigquery/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team +/java-spanner/ @googleapis/spanner-team @googleapis/cloud-sdk-java-team diff --git a/.github/workflows/java-spanner-integration-tests-against-emulator.yaml b/.github/workflows/java-spanner-integration-tests-against-emulator.yaml new file mode 100644 index 000000000000..f43cc1c5bf21 --- /dev/null +++ b/.github/workflows/java-spanner-integration-tests-against-emulator.yaml @@ -0,0 +1,45 @@ +on: + push: + branches: + - main + pull_request: +name: java-spanner integration-tests-against-emulator +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner/**' + units: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + + services: + emulator: + image: gcr.io/cloud-spanner-emulator/emulator:latest + ports: + - 9010:9010 + - 9020:9020 + + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 11 + - name: Running tests + run: .kokoro/build.sh + env: + JOB_TYPE: test + BUILD_SUBDIR: java-spanner + SPANNER_EMULATOR_HOST: localhost:9010 + GOOGLE_CLOUD_PROJECT: emulator-test-project + SUREFIRE_JVM_OPT: '-Penable-integration-tests -DskipUnitTests=true -Dspanner.testenv.instance="" -Dmaven.main.skip=true' diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 81a5338354e3..c20f1633a357 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -245,10 +245,12 @@ case ${JOB_TYPE} in if [ -f "${dir}/pom.xml" ] && [ "${dir}" != "." ]; then # Filter out directories not participating in the default formatting reactor: # - samples are handwritten by developers + # - benchmarks are handwritten by developers # - proto-*/grpc-* are generated code and should use the compiler format # - *-bom/parents are POM-only and contain no Java source if [[ "${dir}" != *"samples"* ]] && \ [[ "${dir}" != *"java-showcase"* ]] && \ + [[ "$(basename "${dir}")" != *"benchmark"* ]] && \ [[ "$(basename "${dir}")" != "proto-google-"* ]] && \ [[ "$(basename "${dir}")" != "grpc-google-"* ]] && \ [[ "$(basename "${dir}")" != *"-bom" ]] && \ diff --git a/.kokoro/common.sh b/.kokoro/common.sh index 7072ebe9c6a0..24648d119932 100644 --- a/.kokoro/common.sh +++ b/.kokoro/common.sh @@ -29,6 +29,7 @@ excluded_modules=( 'sdk-platform-java/java-showcase' 'sdk-platform-java/java-showcase-3.21.0' 'sdk-platform-java/java-showcase-3.25.8' + 'java-spanner' ) function retry_with_backoff { diff --git a/.kokoro/presubmit/spanner-graalvm-native-presubmit.cfg b/.kokoro/presubmit/spanner-graalvm-native-presubmit.cfg new file mode 100644 index 000000000000..81725ddd5648 --- /dev/null +++ b/.kokoro/presubmit/spanner-graalvm-native-presubmit.cfg @@ -0,0 +1,42 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} +} + +env_vars: { + key: "JOB_TYPE" + value: "graalvm-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account" +} + +env_vars: { + key: "IT_SERVICE_ACCOUNT_EMAIL" + value: "it-service-account@gcloud-devel.iam.gserviceaccount.com" +} +env_vars: { + key: "BUILD_SUBDIR" + value: "java-spanner" +} diff --git a/.kokoro/presubmit/spanner-integration.cfg b/.kokoro/presubmit/spanner-integration.cfg new file mode 100644 index 000000000000..e2a471eecf2f --- /dev/null +++ b/.kokoro/presubmit/spanner-integration.cfg @@ -0,0 +1,39 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/java11" +} + +env_vars: { + key: "JOB_TYPE" + value: "integration-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account" +} + + +env_vars: { + key: "BUILD_SUBDIR" + value: "java-spanner" +} diff --git a/gapic-libraries-bom/pom.xml b/gapic-libraries-bom/pom.xml index 188e7f6c6198..306f311ca006 100644 --- a/gapic-libraries-bom/pom.xml +++ b/gapic-libraries-bom/pom.xml @@ -1242,6 +1242,13 @@ pom import + + com.google.cloud + google-cloud-spanner-bom + 6.112.1-SNAPSHOT + pom + import + com.google.cloud google-cloud-spanneradapter-bom diff --git a/generation/check_non_release_please_versions.sh b/generation/check_non_release_please_versions.sh index 8a2628e8c649..07295aeb238c 100755 --- a/generation/check_non_release_please_versions.sh +++ b/generation/check_non_release_please_versions.sh @@ -13,6 +13,7 @@ for pomFile in $(find . -mindepth 2 -name pom.xml | sort ); do [[ "${pomFile}" =~ .*java-logging-logback.* ]] || \ [[ "${pomFile}" =~ .*java-bigquery.* ]] || \ [[ "${pomFile}" =~ .*sdk-platform-java.* ]] || \ + [[ "${pomFile}" =~ .*java-spanner.* ]] || \ [[ "${pomFile}" =~ .*.github*. ]]; then continue fi diff --git a/generation_config.yaml b/generation_config.yaml index e6845cad5e4b..47c301cebb41 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -622,29 +622,6 @@ libraries: GAPICs: - proto_path: google/cloud/support/v2 - proto_path: google/cloud/support/v2beta -# - api_shortname: common-protos -# name_pretty: Common Protos -# product_documentation: https://github.com/googleapis/api-common-protos -# api_description: Protobuf classes for Google's common protos. -# release_level: stable -# client_documentation: https://cloud.google.com/java/docs/reference/proto-google-common-protos/latest/history -# distribution_name: com.google.api.grpc:proto-google-common-protos -# excluded_dependencies: proto-google-common-protos,grpc-google-common-protos,proto-google-common-protos-parent -# excluded_poms: proto-google-common-protos-bom,proto-google-common-protos -# library_type: OTHER -# GAPICs: -# - proto_path: google/api -# - proto_path: google/apps/card/v1 -# - proto_path: google/cloud -# - proto_path: google/cloud/audit -# - proto_path: google/cloud/location -# - proto_path: google/geo/type -# - proto_path: google/logging/type -# - proto_path: google/longrunning -# - proto_path: google/rpc -# - proto_path: google/rpc/context -# - proto_path: google/shopping/type -# - proto_path: google/type - api_shortname: compute name_pretty: Compute Engine product_documentation: https://cloud.google.com/compute/ @@ -2490,15 +2467,32 @@ libraries: - proto_path: google/shopping/merchant/reviews/v1beta requires_billing: true library_name: shopping-merchant-reviews -# - api_shortname: showcase -# excluded_poms: gapic-showcase-bom -# name_pretty: Showcase -# api_description: Showcase module -# product_documentation: https://cloud.google.com/dummy -# distribution_name: com.google.cloud:gapic-showcase -# library_type: OTHER -# GAPICs: -# - proto_path: schema/google/showcase/v1beta1 +- api_shortname: spanner + name_pretty: Cloud Spanner + product_documentation: https://cloud.google.com/spanner/docs/ + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history + api_description: is a fully managed, mission-critical, relational database service + that offers transactional consistency at global scale, schemas, SQL (ANSI 2011 + with extensions), and automatic, synchronous replication for high availability. + Be sure to activate the Cloud Spanner API on the Developer's Console to use Cloud + Spanner from your project. + issue_tracker: https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open + release_level: stable + language: java + min_java_version: 8 + distribution_name: com.google.cloud:google-cloud-spanner + api_id: spanner.googleapis.com + transport: grpc + requires_billing: true + codeowner_team: '@googleapis/spanner-team' + library_type: GAPIC_COMBO + excluded_poms: google-cloud-spanner-bom,google-cloud-spanner + recommended_package: com.google.cloud.spanner + GAPICs: + - proto_path: google/spanner/admin/database/v1 + - proto_path: google/spanner/admin/instance/v1 + - proto_path: google/spanner/executor/v1 + - proto_path: google/spanner/v1 - api_shortname: spanneradapter name_pretty: Cloud Spanner Adapter API product_documentation: https://cloud.google.com/java/docs/reference/google-cloud-spanneradapter/latest/overview diff --git a/java-spanner/.OwlBot-hermetic.yaml b/java-spanner/.OwlBot-hermetic.yaml new file mode 100644 index 000000000000..dec6b8d39446 --- /dev/null +++ b/java-spanner/.OwlBot-hermetic.yaml @@ -0,0 +1,48 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deep-remove-regex: +- /java-spanner/grpc-google-.*/src +- /java-spanner/proto-google-.*/src +- /java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1 +- /java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin +deep-preserve-regex: +- /java-spanner/google-.*/src/test/java/com/google/cloud/.*/v.*/it/IT.*Test.java +- /java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyName.java +- /java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyVersionName.java +deep-copy-regex: +- source: /google/spanner/(v.*)/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/proto-google-cloud-spanner-$1/src +- source: /google/spanner/(v.*)/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/grpc-google-cloud-spanner-$1/src +- source: /google/spanner/(v.*)/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/google-cloud-spanner/src/ +- source: /google/spanner/admin/database/(v.*)/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/proto-google-cloud-spanner-admin-database-$1/src +- source: /google/spanner/admin/database/(v.*)/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/grpc-google-cloud-spanner-admin-database-$1/src +- source: /google/spanner/admin/database/(v.*)/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/google-cloud-spanner/src/ +- source: /google/spanner/admin/instance/(v.*)/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/proto-google-cloud-spanner-admin-instance-$1/src +- source: /google/spanner/admin/instance/(v.*)/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/grpc-google-cloud-spanner-admin-instance-$1/src +- source: /google/spanner/admin/instance/(v.*)/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/google-cloud-spanner/src/ +- source: /google/spanner/executor/(v.*)/.*-java/proto-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/proto-google-cloud-spanner-executor-$1/src +- source: /google/spanner/executor/(v.*)/.*-java/grpc-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/grpc-google-cloud-spanner-executor-$1/src +- source: /google/spanner/executor/(v.*)/.*-java/gapic-google-.*/src + dest: /owl-bot-staging/java-spanner/$1/google-cloud-spanner-executor/src/ diff --git a/java-spanner/.devcontainer/Dockerfile b/java-spanner/.devcontainer/Dockerfile new file mode 100644 index 000000000000..f736df80665a --- /dev/null +++ b/java-spanner/.devcontainer/Dockerfile @@ -0,0 +1,21 @@ +# Reference - https://github.com/microsoft/vscode-dev-containers/tree/main/containers/java-8/.devcontainer +# [Choice] Debian OS version (use bullseye on local arm64/Apple Silicon): buster, bullseye +# [Choice] Java version (use -bullseye variants on local arm64/Apple Silicon): 11, 17, 11-bullseye, 17-bullseye, 11-buster, 17-buster +ARG VARIANT="bullseye" +FROM mcr.microsoft.com/vscode/devcontainers/java:11-${VARIANT} + +# [Option] Install Maven +ARG INSTALL_MAVEN="true" +ARG MAVEN_VERSION="" +# [Option] Install Gradle +ARG INSTALL_GRADLE="false" +ARG GRADLE_VERSION="" +RUN if [ "${INSTALL_MAVEN}" = "true" ]; then su vscode -c "umask 0002 && . /usr/local/sdkman/bin/sdkman-init.sh && sdk install maven \"${MAVEN_VERSION}\""; fi \ + && if [ "${INSTALL_GRADLE}" = "true" ]; then su vscode -c "umask 0002 && . /usr/local/sdkman/bin/sdkman-init.sh && sdk install gradle \"${GRADLE_VERSION}\""; fi + +# [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 +ARG NODE_VERSION="lts/*" +RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi + +# install gloud sdk +RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && apt-get update -y && apt-get install google-cloud-cli -y diff --git a/java-spanner/.devcontainer/devcontainer.json b/java-spanner/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..4e69cd5b453d --- /dev/null +++ b/java-spanner/.devcontainer/devcontainer.json @@ -0,0 +1,43 @@ +{ + "name": "Java 11", + "build": { + "dockerfile": "Dockerfile", + "args": { + // Use the VARIANT arg to pick a Debian OS version: buster, bullseye + // Use bullseye when running on local arm64/Apple Silicon. + "VARIANT": "bullseye", + // Options + "INSTALL_MAVEN": "true", + "INSTALL_GRADLE": "false", + "NODE_VERSION": "lts/*" + } + }, + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "java.import.gradle.java.home": "/usr/local/sdkman/candidates/java/current", + "java.configuration.runtimes": [{ + "default": true, + "name": "JavaSE-11", + "path": "/usr/local/sdkman/candidates/java/current" + }] + }, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "vscjava.vscode-java-pack", + "ms-azuretools.vscode-docker" + ] + } + }, + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "bash .devcontainer/postCreate.sh", + + // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" +} diff --git a/java-spanner/.devcontainer/postCreate.sh b/java-spanner/.devcontainer/postCreate.sh new file mode 100644 index 000000000000..8dc53f690c38 --- /dev/null +++ b/java-spanner/.devcontainer/postCreate.sh @@ -0,0 +1,6 @@ +echo "Post Create Starting" + +mvn clean install -B -V -ntp \ + -DskipTests=true \ + -Dmaven.javadoc.skip=true \ + -Dclirr.skip=true diff --git a/java-spanner/.readme-partials.yaml b/java-spanner/.readme-partials.yaml new file mode 100644 index 000000000000..392f727e622a --- /dev/null +++ b/java-spanner/.readme-partials.yaml @@ -0,0 +1,217 @@ +custom_content: | + #### Calling Cloud Spanner + Here is a code snippet showing a simple usage example. Add the following imports + at the top of your file: + + ```java + import com.google.cloud.spanner.DatabaseClient; + import com.google.cloud.spanner.DatabaseId; + import com.google.cloud.spanner.ResultSet; + import com.google.cloud.spanner.Spanner; + import com.google.cloud.spanner.SpannerOptions; + import com.google.cloud.spanner.Statement; + + ``` + + Then, to make a query to Spanner, use the following code: + ```java + // Instantiates a client + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + String instance = "my-instance"; + String database = "my-database"; + try { + // Creates a database client + DatabaseClient dbClient = spanner.getDatabaseClient( + DatabaseId.of(options.getProjectId(), instance, database)); + // Queries the database + try (ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of("SELECT 1"))) { + // Prints the results + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong(0)); + } + } + } finally { + // Closes the client which will free up the resources used + spanner.close(); + } + ``` + + #### Complete source code + + In [DatabaseSelect.java](https://github.com/googleapis/google-cloud-java/tree/master/google-cloud-examples/src/main/java/com/google/cloud/examples/spanner/snippets/DatabaseSelect.java) we put together all the code shown above in a single program. + + ## Session Pool + + The Cloud Spanner client maintains a session pool, as sessions are expensive to create and are + intended to be long-lived. The client automatically takes a session from the pool and uses this + executing queries and transactions. + See [Session Pool and Channel Pool Configuration](session-and-channel-pool-configuration.md) + for in-depth background information about sessions and gRPC channels and how these are handled in + the Cloud Spanner Java client. + + ## Metrics + + Cloud Spanner client supports [client-side metrics](https://cloud.google.com/spanner/docs/view-manage-client-side-metrics) that you can use along with server-side metrics to optimize performance and troubleshoot performance issues if they occur. + + Client-side metrics are measured from the time a request leaves your application to the time your application receives the response. + In contrast, server-side metrics are measured from the time Spanner receives a request until the last byte of data is sent to the client. + + These metrics are enabled by default. You can opt out of using client-side metrics with the following code: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setBuiltInMetricsEnabled(false) + .build(); + ``` + + You can also disable these metrics by setting `SPANNER_DISABLE_BUILTIN_METRICS` to `true`. + + > Note: Client-side metrics needs `monitoring.timeSeries.create` IAM permission to export metrics data. Ask your administrator to grant your service account the [Monitoring Metric Writer](https://cloud.google.com/iam/docs/roles-permissions/monitoring#monitoring.metricWriter) (roles/monitoring.metricWriter) IAM role on the project. + + ## Traces + Cloud Spanner client supports OpenTelemetry Traces, which gives insight into the client internals and aids in debugging/troubleshooting production issues. + + By default, the functionality is disabled. You need to add OpenTelemetry dependencies, enable OpenTelemetry traces and must configure the OpenTelemetry with appropriate exporters at the startup of your application. + + See [Configure client-side tracing](https://cloud.google.com/spanner/docs/set-up-tracing#configure-client-side-tracing) for more details on configuring traces. + + #### OpenTelemetry Dependencies + + If you are using Maven, add this to your pom.xml file + ```xml + + io.opentelemetry + opentelemetry-sdk + {opentelemetry.version} + + + io.opentelemetry + opentelemetry-sdk-trace + {opentelemetry.version} + + + io.opentelemetry + opentelemetry-exporter-otlp + {opentelemetry.version} + + ``` + If you are using Gradle, add this to your dependencies + ```Groovy + compile 'io.opentelemetry:opentelemetry-sdk:{opentelemetry.version}' + compile 'io.opentelemetry:opentelemetry-sdk-trace:{opentelemetry.version}' + compile 'io.opentelemetry:opentelemetry-exporter-oltp:{opentelemetry.version}' + ``` + #### OpenTelemetry Configuration + + > Note: Enabling OpenTelemetry traces will automatically disable OpenCensus traces. + + ```java + // Enable OpenTelemetry traces + SpannerOptions.enableOpenTelemetryTraces(); + + // Create a new tracer provider + SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + // Use Otlp exporter or any other exporter of your choice. + .addSpanProcessor(SimpleSpanProcessor.builder(OtlpGrpcSpanExporter + .builder().build()).build()) + .build(); + + + OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .build() + + SpannerOptions options = SpannerOptions.newBuilder() + // Inject OpenTelemetry object via Spanner Options or register OpenTelmetry object as Global + .setOpenTelemetry(openTelemetry) + .build(); + + Spanner spanner = options.getService(); + ``` + + #### OpenTelemetry SQL Statement Tracing + The OpenTelemetry traces that are generated by the Java client include any request and transaction + tags that have been set. The traces can also include the SQL statements that are executed and the + name of the thread that executes the statement. Enable this with the `enableExtendedTracing` + option: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_EXTENDED_TRACING=true`. + + #### OpenTelemetry API Tracing + You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` + option. These traces also include any retry attempts for an API call: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableApiTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_API_TRACING=true`. + + > Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. + + #### End-to-end Tracing + + In addition to client-side tracing, you can opt in for [end-to-end tracing](https://cloud.google.com/spanner/docs/tracing-overview#end-to-end-side-tracing). End-to-end tracing helps you understand and debug latency issues that are specific to Spanner such as the following: + * Identify whether the latency is due to network latency between your application and Spanner, or if the latency is occurring within Spanner. + * Identify the Google Cloud regions that your application requests are being routed through and if there is a cross-region request. A cross-region request usually means higher latencies between your application and Spanner. + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(true) + .build(); + ``` + + Refer to [Configure end-to-end tracing](https://cloud.google.com/spanner/docs/set-up-tracing#configure-end-to-end-tracing) to configure end-to-end tracing and to understand its attributes. + + > Note: End-to-end traces can only be exported to [Cloud Trace](https://cloud.google.com/trace/docs). + + + ## Instrument with OpenCensus + + > Note: OpenCensus project is deprecated. See [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/). + We recommend migrating to OpenTelemetry, the successor project. + + ## Migrate from OpenCensus to OpenTelemetry + + > Using the [OpenTelemetry OpenCensus Bridge](https://mvnrepository.com/artifact/io.opentelemetry/opentelemetry-opencensus-shim), you can immediately begin exporting your metrics and traces with OpenTelemetry. + + #### Disable OpenCensus metrics + Disable OpenCensus metrics for Spanner by including the following code if you still possess OpenCensus dependencies and exporter. + + ```java + SpannerOptions.disableOpenCensusMetrics(); + ``` + + #### Disable OpenCensus traces + Enabling OpenTelemetry traces for Spanner will automatically disable OpenCensus traces. + + ```java + SpannerOptions.enableOpenTelemetryTraces(); + ``` + + #### Remove OpenCensus Dependencies and Code + Remove any OpenCensus-related code and dependencies from your codebase if all your dependencies are ready to move to OpenTelemetry. + + * Remove the OpenCensus Exporters which were configured [here](#configure-the-opencensus-exporter) + * Remove SpannerRPCViews reference which were configured [here](#enable-rpc-views) + * Remove the OpenCensus dependencies which were added [here](#opencensus-dependencies) + + #### Update your Dashboards and Alerts + + Update your dashboards and alerts to reflect below changes + * **Metrics name** : `cloud.google.com/java` prefix has been removed from OpenTelemery metrics and instead has been added as Instrumenation Scope. + * **Metrics namespace** : OpenTelmetry exporters uses `workload.googleapis.com` namespace opposed to `custom.googleapis.com` with OpenCensus. \ No newline at end of file diff --git a/java-spanner/.repo-metadata.json b/java-spanner/.repo-metadata.json new file mode 100644 index 000000000000..1ab4bfa9c8c7 --- /dev/null +++ b/java-spanner/.repo-metadata.json @@ -0,0 +1,21 @@ +{ + "api_shortname": "spanner", + "name_pretty": "Cloud Spanner", + "product_documentation": "https://cloud.google.com/spanner/docs/", + "api_description": "is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, schemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication for high availability. Be sure to activate the Cloud Spanner API on the Developer's Console to use Cloud Spanner from your project.", + "client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history", + "release_level": "stable", + "transport": "grpc", + "language": "java", + "repo": "googleapis/google-cloud-java", + "repo_short": "java-spanner", + "distribution_name": "com.google.cloud:google-cloud-spanner", + "api_id": "spanner.googleapis.com", + "library_type": "GAPIC_COMBO", + "requires_billing": true, + "codeowner_team": "@googleapis/spanner-team", + "excluded_poms": "google-cloud-spanner-bom,google-cloud-spanner", + "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open", + "recommended_package": "com.google.cloud.spanner", + "min_java_version": 8 +} \ No newline at end of file diff --git a/java-spanner/CHANGELOG.md b/java-spanner/CHANGELOG.md new file mode 100644 index 000000000000..3e7586cd187b --- /dev/null +++ b/java-spanner/CHANGELOG.md @@ -0,0 +1,3707 @@ +# Changelog + +## [6.112.0](https://github.com/googleapis/java-spanner/compare/v6.111.1...v6.112.0) (2026-03-17) + + +### Features + +* Ability to update credentials on long running client ([#4371](https://github.com/googleapis/java-spanner/issues/4371)) ([e238990](https://github.com/googleapis/java-spanner/commit/e238990077badb063b1b05b0d71f58859434f7ee)) +* Add SI, adapt, split point related proto ([7aa4d90](https://github.com/googleapis/java-spanner/commit/7aa4d90cd4f001713ee2b0b5113303a748b237e0)) +* **spanner:** Include cache updates and routing hint into BeginTransaction and Commit request/response respectively ([7aa4d90](https://github.com/googleapis/java-spanner/commit/7aa4d90cd4f001713ee2b0b5113303a748b237e0)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.67.0 ([7aa4d90](https://github.com/googleapis/java-spanner/commit/7aa4d90cd4f001713ee2b0b5113303a748b237e0)) +* Fix unclosed literal error for consecutive backslashes ([#4387](https://github.com/googleapis/java-spanner/issues/4387)) ([f4884a8](https://github.com/googleapis/java-spanner/commit/f4884a83d15dcff6e246c7db47c8bafc3369a0a3)) + +## [6.111.1](https://github.com/googleapis/java-spanner/compare/v6.111.0...v6.111.1) (2026-03-03) + + +### Bug Fixes + +* Retry CreateSession also when waitForMinSessions is zero ([#4360](https://github.com/googleapis/java-spanner/issues/4360)) ([9263972](https://github.com/googleapis/java-spanner/commit/92639722793a994032761155013e506c9693b464)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.57.0 ([#4358](https://github.com/googleapis/java-spanner/issues/4358)) ([1ce4b8e](https://github.com/googleapis/java-spanner/commit/1ce4b8e24bac44c89f742f0afb395ae4c711abfd)) +* Update googleapis/sdk-platform-java action to v2.67.0 ([#4359](https://github.com/googleapis/java-spanner/issues/4359)) ([23781d9](https://github.com/googleapis/java-spanner/commit/23781d9f05db66d033d4d9125707a9988e1697db)) + +## [6.111.0](https://github.com/googleapis/java-spanner/compare/v6.110.0...v6.111.0) (2026-02-13) + + +### Features + +* Add E2E fallback to the spanner client. ([#4282](https://github.com/googleapis/java-spanner/issues/4282)) ([d36bd21](https://github.com/googleapis/java-spanner/commit/d36bd21a09cdd2006e53a43b6984d2a68ea24d3e)) + + +### Bug Fixes + +* Rollback transactions that are waiting for tx-id to be returned ([#4342](https://github.com/googleapis/java-spanner/issues/4342)) ([866a8c2](https://github.com/googleapis/java-spanner/commit/866a8c2d23f0d5edee1d98ead7d002b1981d5339)) + +## [6.110.0](https://github.com/googleapis/java-spanner/compare/v6.109.0...v6.110.0) (2026-02-11) + + +### Features + +* Add gRPC A66/A94 metrics ([#4333](https://github.com/googleapis/java-spanner/issues/4333)) ([485c700](https://github.com/googleapis/java-spanner/commit/485c70046e3e67dac899011580f9c350bdb31a6d)) +* ClientContext and secure parameters support ([#4316](https://github.com/googleapis/java-spanner/issues/4316)) ([6356ef2](https://github.com/googleapis/java-spanner/commit/6356ef2ce1ef87898e7bc4a6bc11174f629a9b5b)) +* Next release from main branch is 6.110.0 ([#4338](https://github.com/googleapis/java-spanner/issues/4338)) ([95ac7a7](https://github.com/googleapis/java-spanner/commit/95ac7a71463bfca4bb22f2e4ae61da97b97169ce)) +* **spanner:** Include cache updates into the ResultSet response ([aa53a43](https://github.com/googleapis/java-spanner/commit/aa53a43bdce6f4215fea8695837ad2c538598896)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.66.1 ([aa53a43](https://github.com/googleapis/java-spanner/commit/aa53a43bdce6f4215fea8695837ad2c538598896)) +* Preserve channel configurator for grpc-gcp and add opt-out for gcp OTel metrics ([#4329](https://github.com/googleapis/java-spanner/issues/4329)) ([2565137](https://github.com/googleapis/java-spanner/commit/25651378831fcd98ef48802872fe82a42cfa4942)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.1 ([#4331](https://github.com/googleapis/java-spanner/issues/4331)) ([2fd403f](https://github.com/googleapis/java-spanner/commit/2fd403f3c994b1b038e876be6e58ecadf731d848)) + +## [6.109.0](https://github.com/googleapis/java-spanner/compare/v6.108.0...v6.109.0) (2026-02-02) + + +### Features + +* Adding Send and Ack Mutation Support for Cloud Spanner Queue ([#4298](https://github.com/googleapis/java-spanner/issues/4298)) ([4b637ac](https://github.com/googleapis/java-spanner/commit/4b637ac0e4d6d696f3da8ae7fbac31c877aceba9)) + + +### Documentation + +* Add snippet for ReadLockMode configuration at client and transaction ([#4305](https://github.com/googleapis/java-spanner/issues/4305)) ([0fd4098](https://github.com/googleapis/java-spanner/commit/0fd40983b3bbb2f753e07036cedea9e7b9e26132)) + +## [6.108.0](https://github.com/googleapis/java-spanner/compare/v6.107.0...v6.108.0) (2026-01-28) + + +### Features + +* Add a ClientContext field to Spanner requests ([da6880e](https://github.com/googleapis/java-spanner/commit/da6880e425b7be55b11ba400046692e7af09bccb)) +* Add ChannelFinder server interfaces ([#4293](https://github.com/googleapis/java-spanner/issues/4293)) ([0b7a32e](https://github.com/googleapis/java-spanner/commit/0b7a32e7a24c027387a768a75632022a29562ef6)) +* Exposing total CPU related fields in AutoscalingConfig ([da6880e](https://github.com/googleapis/java-spanner/commit/da6880e425b7be55b11ba400046692e7af09bccb)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.66.0 ([da6880e](https://github.com/googleapis/java-spanner/commit/da6880e425b7be55b11ba400046692e7af09bccb)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.0 ([#4313](https://github.com/googleapis/java-spanner/issues/4313)) ([f7d0abc](https://github.com/googleapis/java-spanner/commit/f7d0abc241acb4c58d0ac3c60a7b18f5512275df)) +* Update googleapis/sdk-platform-java action to v2.66.0 ([#4314](https://github.com/googleapis/java-spanner/issues/4314)) ([d09a900](https://github.com/googleapis/java-spanner/commit/d09a900e26223eb9d646e33d29fc5692b8aba36a)) + +## [6.107.0](https://github.com/googleapis/java-spanner/compare/v6.106.0...v6.107.0) (2026-01-16) + + +### Features + +* Add Dynamic Channel Pooling (DCP) support to Connection API ([#4299](https://github.com/googleapis/java-spanner/issues/4299)) ([bba03a4](https://github.com/googleapis/java-spanner/commit/bba03a44dbfbd59288ecd33e3e53276809ad69b1)) +* Add SsFormat encoding library ([#4292](https://github.com/googleapis/java-spanner/issues/4292)) ([338a9b1](https://github.com/googleapis/java-spanner/commit/338a9b1409cafedcdef674bdff09a72c3f2cd772)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.82.0 ([#4227](https://github.com/googleapis/java-spanner/issues/4227)) ([22bc6cf](https://github.com/googleapis/java-spanner/commit/22bc6cf3431f6e507d384f6e86a36503f1175ee7)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.83.0 ([#4169](https://github.com/googleapis/java-spanner/issues/4169)) ([61ae915](https://github.com/googleapis/java-spanner/commit/61ae915242a3c8a0aa1385bc1367f67df2c209d6)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.55.1 ([#4302](https://github.com/googleapis/java-spanner/issues/4302)) ([52acc0c](https://github.com/googleapis/java-spanner/commit/52acc0c620fec0aa67ecd81d634eec271fe4e429)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.4 ([#4244](https://github.com/googleapis/java-spanner/issues/4244)) ([c8e4d91](https://github.com/googleapis/java-spanner/commit/c8e4d912155ab6829498822dcf0783fac5fe2747)) +* Update google.cloud.monitoring.version to v3.83.0 ([#4270](https://github.com/googleapis/java-spanner/issues/4270)) ([7ae68c8](https://github.com/googleapis/java-spanner/commit/7ae68c8e889f44f1057310bc45b70c086af9c385)) +* Update googleapis/sdk-platform-java action to v2.65.1 ([#4301](https://github.com/googleapis/java-spanner/issues/4301)) ([7d98f4e](https://github.com/googleapis/java-spanner/commit/7d98f4e12843826c18cbb8e0998c8687c94fc3d2)) + +## [6.106.0](https://github.com/googleapis/java-spanner/compare/v6.105.0...v6.106.0) (2026-01-07) + + +### Features + +* Support SHOW DEFAULT_TRANSACTION_ISOLATION for PG databases ([#4285](https://github.com/googleapis/java-spanner/issues/4285)) ([aec0515](https://github.com/googleapis/java-spanner/commit/aec051514dd3d122a7231eb6d25d1aaec8d90bda)) + + +### Bug Fixes + +* Adjust the initial polling delay for ddl operations ([#4275](https://github.com/googleapis/java-spanner/issues/4275)) ([8d36967](https://github.com/googleapis/java-spanner/commit/8d36967d010bed8f5a4a0c32f9ec1b5fe7d33e1d)) +* Retry creation of multiplexed session ([#4288](https://github.com/googleapis/java-spanner/issues/4288)) ([735e29e](https://github.com/googleapis/java-spanner/commit/735e29ed394faea9f5e697b5934a1f4895055d56)) + +## [6.105.0](https://github.com/googleapis/java-spanner/compare/v6.104.0...v6.105.0) (2025-12-16) + + +### Features + +* Add support of dynamic channel pooling ([#4265](https://github.com/googleapis/java-spanner/issues/4265)) ([923a14a](https://github.com/googleapis/java-spanner/commit/923a14aad99ff6fc91868f02d657145dd0f31c18)) +* Include RequestID in requests and errors ([#4263](https://github.com/googleapis/java-spanner/issues/4263)) ([afd7d6b](https://github.com/googleapis/java-spanner/commit/afd7d6b008f13d7a4d1a3b7f924122bd41d14b59)) +* Make grpc-gcp default enabled ([#4239](https://github.com/googleapis/java-spanner/issues/4239)) ([bb82f9e](https://github.com/googleapis/java-spanner/commit/bb82f9e55c40cac29b090e54be780c2e42545ee1)) + + +### Bug Fixes + +* Refine connecitivity metrics to capture RPCs with no response he… ([#4252](https://github.com/googleapis/java-spanner/issues/4252)) ([7b49412](https://github.com/googleapis/java-spanner/commit/7b4941221969f48d077ff459214c7d1e65ef843c)) +* Retry as PDML dit not retry Resource limit exceeded ([#4258](https://github.com/googleapis/java-spanner/issues/4258)) ([c735d42](https://github.com/googleapis/java-spanner/commit/c735d42875092b0d1482fe641b99645f288cdf4f)), closes [#4253](https://github.com/googleapis/java-spanner/issues/4253) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([#4261](https://github.com/googleapis/java-spanner/issues/4261)) ([61dfd62](https://github.com/googleapis/java-spanner/commit/61dfd620637da6ef76b699edbad1095c26b81950)) +* Update googleapis/sdk-platform-java action to v2.64.2 ([#4262](https://github.com/googleapis/java-spanner/issues/4262)) ([f9505a9](https://github.com/googleapis/java-spanner/commit/f9505a97bdd9f6da7dd5ab1b60b47f7ed0a70402)) + +## [6.104.0](https://github.com/googleapis/java-spanner/compare/v6.103.0...v6.104.0) (2025-12-03) + + +### Features + +* Include PostgreSQL error code in exceptions ([#4236](https://github.com/googleapis/java-spanner/issues/4236)) ([5874f8b](https://github.com/googleapis/java-spanner/commit/5874f8b3e65adc3e78832866ebe667cd746e2d7f)) + + +### Bug Fixes + +* Backslash at end of string literal was misinterpreted ([#4246](https://github.com/googleapis/java-spanner/issues/4246)) ([477ca51](https://github.com/googleapis/java-spanner/commit/477ca51baf6cd1a0a5773bd53677f64195100ae2)) +* Fix transaction tag issue with the blind-write ([#4243](https://github.com/googleapis/java-spanner/issues/4243)) ([cf2ba69](https://github.com/googleapis/java-spanner/commit/cf2ba695cdb4038dc8e3ca3e9859231a2203da60)) + +## [6.103.0](https://github.com/googleapis/java-spanner/compare/v6.102.1...v6.103.0) (2025-11-17) + + +### Features + +* Add grpc.xds.resource_type label to xDS client metrics ([#4222](https://github.com/googleapis/java-spanner/issues/4222)) ([97bed3c](https://github.com/googleapis/java-spanner/commit/97bed3cf1a9df542acc4685c2ce4dbfa629b2cd3)) +* Exposing AutoscalingConfig in InstancePartition ([22edecf](https://github.com/googleapis/java-spanner/commit/22edecf8518844860c3cb47883544efd36cbc311)) + + +### Bug Fixes + +* Add env var to allow disabling directpath bound token ([#4189](https://github.com/googleapis/java-spanner/issues/4189)) ([0ca9541](https://github.com/googleapis/java-spanner/commit/0ca95412c778e3478cb66e4bea124396326c6056)) +* Allow DML THEN RETURN with retryAbortsInternally=false ([#4225](https://github.com/googleapis/java-spanner/issues/4225)) ([f49cc47](https://github.com/googleapis/java-spanner/commit/f49cc47e663836696ef151738510e68324e139dc)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.64.1 ([22edecf](https://github.com/googleapis/java-spanner/commit/22edecf8518844860c3cb47883544efd36cbc311)) +* Remove URL encoding in project name ([#4188](https://github.com/googleapis/java-spanner/issues/4188)) ([abba0c1](https://github.com/googleapis/java-spanner/commit/abba0c1730ea792407bea073ea65da55128cd764)) + + +### Dependencies + +* Update actions/checkout action to v5 ([#4166](https://github.com/googleapis/java-spanner/issues/4166)) ([50a56f7](https://github.com/googleapis/java-spanner/commit/50a56f7d47541dd581f7b425df36a080ecc11a74)) +* Update all tracing and telemetry dependencies ([#4230](https://github.com/googleapis/java-spanner/issues/4230)) ([d60124c](https://github.com/googleapis/java-spanner/commit/d60124cbe317d4c2489ea35de81943cfd2b8f697)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.79.0 ([#4172](https://github.com/googleapis/java-spanner/issues/4172)) ([3a329fd](https://github.com/googleapis/java-spanner/commit/3a329fdb2fc68ff9d19717b534dd667f931d51fd)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.11.1 ([#4216](https://github.com/googleapis/java-spanner/issues/4216)) ([84150c7](https://github.com/googleapis/java-spanner/commit/84150c73bbed2a6d58408ae0b8bd59709fc751db)) +* Update dependency com.google.cloud:google-cloud-trace to v2.79.0 ([#4174](https://github.com/googleapis/java-spanner/issues/4174)) ([3e93ca0](https://github.com/googleapis/java-spanner/commit/3e93ca077b94ad06867e3c9fdfe19527855423a2)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.1 ([#4193](https://github.com/googleapis/java-spanner/issues/4193)) ([ad235cf](https://github.com/googleapis/java-spanner/commit/ad235cfc9041f52c2f7b76f67eeaa6c03c5840aa)) +* Update dependency commons-cli:commons-cli to v1.11.0 ([#4218](https://github.com/googleapis/java-spanner/issues/4218)) ([33449ba](https://github.com/googleapis/java-spanner/commit/33449baf64a3d5b78fff323737ffeb28c8a9461b)) +* Update dependency commons-io:commons-io to v2.21.0 ([#4198](https://github.com/googleapis/java-spanner/issues/4198)) ([1f31169](https://github.com/googleapis/java-spanner/commit/1f3116947069ac11c948b510e6a9a7a8a6aa6061)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.1 ([#4214](https://github.com/googleapis/java-spanner/issues/4214)) ([0c1d843](https://github.com/googleapis/java-spanner/commit/0c1d843ad42f213d4d9ec2d98a12e21e991ac010)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.18.1 ([#4215](https://github.com/googleapis/java-spanner/issues/4215)) ([76ce01b](https://github.com/googleapis/java-spanner/commit/76ce01b99e5c1274e9103c27ebc6bbdf482bebcd)) +* Update opentelemetry.version to v1.56.0 ([#4167](https://github.com/googleapis/java-spanner/issues/4167)) ([a24f219](https://github.com/googleapis/java-spanner/commit/a24f21930978583a0b8d7d39130fa0fc3fec7b2d)) + +## [6.102.1](https://github.com/googleapis/java-spanner/compare/v6.102.0...v6.102.1) (2025-10-23) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.63.0 ([c1a8238](https://github.com/googleapis/java-spanner/commit/c1a8238af33a083411f63cf6276eb683ee67ac6a)) +* Do a quick check if the application runs on GCP ([#4163](https://github.com/googleapis/java-spanner/issues/4163)) ([b9d7daf](https://github.com/googleapis/java-spanner/commit/b9d7daf000c0fb8b67142c6161bb578cadf49b18)) +* Migrate away from GoogleCredentials.fromStream() usages ([#4151](https://github.com/googleapis/java-spanner/issues/4151)) ([94d0474](https://github.com/googleapis/java-spanner/commit/94d0474ace62ea1059e5b69243f0b6eef31ddd06)) + + +### Dependencies + +* Update actions/checkout action to v5 ([#4158](https://github.com/googleapis/java-spanner/issues/4158)) ([b32ebcf](https://github.com/googleapis/java-spanner/commit/b32ebcf96bbf696b1eb84204622463fac59be017)) +* Update actions/checkout action to v5 ([#4161](https://github.com/googleapis/java-spanner/issues/4161)) ([02a17c6](https://github.com/googleapis/java-spanner/commit/02a17c6e6253e026cb3c6360eb925a322143b518)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.53.0 ([#4178](https://github.com/googleapis/java-spanner/issues/4178)) ([24fe194](https://github.com/googleapis/java-spanner/commit/24fe194fa3595b2ab817b9fc4cd57840250fef1f)) +* Update dependency net.bytebuddy:byte-buddy to v1.17.8 ([#4154](https://github.com/googleapis/java-spanner/issues/4154)) ([c911381](https://github.com/googleapis/java-spanner/commit/c911381c2ca9cd46fbeb831c659aaf55f21437f2)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.17.8 ([#4155](https://github.com/googleapis/java-spanner/issues/4155)) ([3075df7](https://github.com/googleapis/java-spanner/commit/3075df714b1787512174b3f18cbc802359d442dc)) +* Update googleapis/sdk-platform-java action to v2.63.0 ([#4179](https://github.com/googleapis/java-spanner/issues/4179)) ([5f48191](https://github.com/googleapis/java-spanner/commit/5f481913d60372fccf399c5c0e168b7d0c553ba0)) + + +### Documentation + +* Add warning for encoded credential ([#4182](https://github.com/googleapis/java-spanner/issues/4182)) ([92620f9](https://github.com/googleapis/java-spanner/commit/92620f969908a8ba7fcf92d0b350a8c4d05398f8)) + +## [6.102.0](https://github.com/googleapis/java-spanner/compare/v6.101.1...v6.102.0) (2025-10-08) + + +### Features + +* Add connection property for gRPC interceptor provider ([#4149](https://github.com/googleapis/java-spanner/issues/4149)) ([deb8dff](https://github.com/googleapis/java-spanner/commit/deb8dff6c01c37a3158e8f4a28ef5e821d10092a)) +* Support statement_timeout in connection url ([#4103](https://github.com/googleapis/java-spanner/issues/4103)) ([542c6aa](https://github.com/googleapis/java-spanner/commit/542c6aa63bfdd526070f14cb76921dd34527c1f9)) + + +### Bug Fixes + +* Automatically set default_sequence_kind for CREATE SEQUENCE ([#4105](https://github.com/googleapis/java-spanner/issues/4105)) ([3beea6a](https://github.com/googleapis/java-spanner/commit/3beea6ac4eb53b70db34e0a2d2e33e56f450c88b)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.3 ([7047a3a](https://github.com/googleapis/java-spanner/commit/7047a3ae31aae51e9e23758fe004b93855a0ee4b)) + + +### Dependencies + +* Update actions/checkout action to v5 ([#4069](https://github.com/googleapis/java-spanner/issues/4069)) ([4c88eb9](https://github.com/googleapis/java-spanner/commit/4c88eb91a321aa718f957296012f9e7501c7caec)) +* Update actions/checkout action to v5 ([#4106](https://github.com/googleapis/java-spanner/issues/4106)) ([14ebdb3](https://github.com/googleapis/java-spanner/commit/14ebdb35c33442c4e0f70d63dce3425edb730525)) +* Update actions/setup-java action to v5 ([#4071](https://github.com/googleapis/java-spanner/issues/4071)) ([e23134a](https://github.com/googleapis/java-spanner/commit/e23134a2f864e8abd2890ac3a81ff6b668afbe63)) +* Update all dependencies ([#4099](https://github.com/googleapis/java-spanner/issues/4099)) ([b262edc](https://github.com/googleapis/java-spanner/commit/b262edcfc4713bb64986bc4acd3f02b69d3367f8)) +* Update dependency com.google.api.grpc:grpc-google-cloud-monitoring-v3 to v3.77.0 ([#4117](https://github.com/googleapis/java-spanner/issues/4117)) ([2451ca2](https://github.com/googleapis/java-spanner/commit/2451ca2abe1dd2de3907b88e8d18beab1a15a634)) +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.77.0 ([#4143](https://github.com/googleapis/java-spanner/issues/4143)) ([6c9dc26](https://github.com/googleapis/java-spanner/commit/6c9dc26330cf66f196adc2203323a482e08f0325)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.76.0 ([#4144](https://github.com/googleapis/java-spanner/issues/4144)) ([d566a42](https://github.com/googleapis/java-spanner/commit/d566a4295be018070169ba082a018394a2e60b45)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.77.0 ([#4145](https://github.com/googleapis/java-spanner/issues/4145)) ([8917c05](https://github.com/googleapis/java-spanner/commit/8917c054410e4035d6d4e201e43599d5ddc1fadd)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.77.0 ([#4146](https://github.com/googleapis/java-spanner/issues/4146)) ([4ebea1a](https://github.com/googleapis/java-spanner/commit/4ebea1adf726069084087ce46900f3174658055c)) +* Update dependency com.google.cloud:google-cloud-trace to v2.76.0 ([#4147](https://github.com/googleapis/java-spanner/issues/4147)) ([4b1d4af](https://github.com/googleapis/java-spanner/commit/4b1d4af19336e493af38a1e58c95786da3892d34)) +* Update dependency com.google.cloud:google-cloud-trace to v2.76.0 ([#4148](https://github.com/googleapis/java-spanner/issues/4148)) ([8f91a89](https://github.com/googleapis/java-spanner/commit/8f91a894771653213b6fcded5795349ad7ea6724)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.3 ([#4107](https://github.com/googleapis/java-spanner/issues/4107)) ([8a8a042](https://github.com/googleapis/java-spanner/commit/8a8a042494b092b3dddd0c9606a63197d8a23555)) +* Update dependency org.json:json to v20250517 ([#3881](https://github.com/googleapis/java-spanner/issues/3881)) ([5658c83](https://github.com/googleapis/java-spanner/commit/5658c8378aa2e8028d4ef7dfaf94b647f33cd812)) +* Update googleapis/sdk-platform-java action to v2.62.3 ([#4108](https://github.com/googleapis/java-spanner/issues/4108)) ([65913ec](https://github.com/googleapis/java-spanner/commit/65913ec0638fec4ea536cf42f8fe25460133f68e)) + +## [6.101.1](https://github.com/googleapis/java-spanner/compare/v6.101.0...v6.101.1) (2025-09-26) + + +### Bug Fixes + +* Potential NullPointerException in LocalConnectionChecker ([#4092](https://github.com/googleapis/java-spanner/issues/4092)) ([3b9f597](https://github.com/googleapis/java-spanner/commit/3b9f597ba60199a16556824568b24908ce938a69)) + +## [6.101.0](https://github.com/googleapis/java-spanner/compare/v6.100.0...v6.101.0) (2025-09-26) + + +### Features + +* Add transaction_timeout connection property ([#4056](https://github.com/googleapis/java-spanner/issues/4056)) ([cdc52d4](https://github.com/googleapis/java-spanner/commit/cdc52d49b39c57e7255f4e09fb33a41f4810397d)) +* TPC support ([#4055](https://github.com/googleapis/java-spanner/issues/4055)) ([7625cce](https://github.com/googleapis/java-spanner/commit/7625cce9ad48b14a1cff9c2ede86a066ea292bef)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.2 ([8d6cbf6](https://github.com/googleapis/java-spanner/commit/8d6cbf6bea9cbd823b8f0070516e34b4d8428e87)) +* Potential NullPointerException in Value#hashCode ([#4046](https://github.com/googleapis/java-spanner/issues/4046)) ([74abb34](https://github.com/googleapis/java-spanner/commit/74abb341e2ea42bbf0a2de4ec3e3555335b5fd9f)) +* Recalculate remaining statement timeout after retry ([#4053](https://github.com/googleapis/java-spanner/issues/4053)) ([5e26596](https://github.com/googleapis/java-spanner/commit/5e26596f4f9c924260da0908920854d8ddfc626b)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.2 ([#4057](https://github.com/googleapis/java-spanner/issues/4057)) ([d782aff](https://github.com/googleapis/java-spanner/commit/d782aff63ff81e1b760690d4dee3e566028d522e)) + +## [6.100.0](https://github.com/googleapis/java-spanner/compare/v6.99.0...v6.100.0) (2025-09-11) + + +### Features + +* Read_lock_mode support for connections ([#4031](https://github.com/googleapis/java-spanner/issues/4031)) ([261abb4](https://github.com/googleapis/java-spanner/commit/261abb4b9c5ff00fac2d816a31926b23264657c4)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.1 ([e9773a7](https://github.com/googleapis/java-spanner/commit/e9773a7aa27a414d56093b4e09e0f197a07b5980)) +* Disable afe_connectivity_error_count metric ([#4041](https://github.com/googleapis/java-spanner/issues/4041)) ([f89c1c0](https://github.com/googleapis/java-spanner/commit/f89c1c0517ba6b895f405b0085b8df41aac952be)) +* Skip session delete in case of multiplexed sessions ([#4029](https://github.com/googleapis/java-spanner/issues/4029)) ([8bcb09d](https://github.com/googleapis/java-spanner/commit/8bcb09d141fe986c92ccacbaa9a45302c5c8e79d)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.1 ([#4034](https://github.com/googleapis/java-spanner/issues/4034)) ([13bfa7c](https://github.com/googleapis/java-spanner/commit/13bfa7c68c7ea887e679fb5504dceb85cbb43cb9)) + + +### Documentation + +* A comment for field `ranges` in message `.google.spanner.v1.KeySet` is changed ([e9773a7](https://github.com/googleapis/java-spanner/commit/e9773a7aa27a414d56093b4e09e0f197a07b5980)) + +## [6.99.0](https://github.com/googleapis/java-spanner/compare/v6.98.1...v6.99.0) (2025-08-26) + + +### Features + +* Support read lock mode for R/W transactions ([#4010](https://github.com/googleapis/java-spanner/issues/4010)) ([7d752d6](https://github.com/googleapis/java-spanner/commit/7d752d686e638b6266aab3a5188c01641d2f9adc)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.62.0 ([52c68db](https://github.com/googleapis/java-spanner/commit/52c68db5c75f24a066c2e828ed79917c824f699b)) +* GetCommitResponse() should return error if tx has not committed ([#4021](https://github.com/googleapis/java-spanner/issues/4021)) ([a2c179f](https://github.com/googleapis/java-spanner/commit/a2c179f2e7c19d295bdbf9cf1bbd1c5562dd9e21)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 ([#4024](https://github.com/googleapis/java-spanner/issues/4024)) ([7e3294f](https://github.com/googleapis/java-spanner/commit/7e3294f6d42bddb4cfff67334118f615c90c3bb7)) + +## [6.98.1](https://github.com/googleapis/java-spanner/compare/v6.98.0...v6.98.1) (2025-08-11) + + +### Bug Fixes + +* Add missing span.end calls for AsyncTransactionManager ([#4012](https://github.com/googleapis/java-spanner/issues/4012)) ([1a4adb4](https://github.com/googleapis/java-spanner/commit/1a4adb4d70c3a3822fa6bda93d689f2dae1835fa)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.61.0 ([8156ef3](https://github.com/googleapis/java-spanner/commit/8156ef31d93932c14f9fdd13c8c5e5b7ce370ba5)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.51.0 ([#4013](https://github.com/googleapis/java-spanner/issues/4013)) ([4e90c29](https://github.com/googleapis/java-spanner/commit/4e90c29ce3447d14411368e45a39c7b0965cb40a)) + +## [6.98.0](https://github.com/googleapis/java-spanner/compare/v6.97.1...v6.98.0) (2025-07-31) + + +### Features + +* Proto changes for an internal api ([675e90b](https://github.com/googleapis/java-spanner/commit/675e90b4582b4fc968118121e6c23ec98ee178e9)) +* **spanner:** A new field `snapshot_timestamp` is added to message `.google.spanner.v1.CommitResponse` ([675e90b](https://github.com/googleapis/java-spanner/commit/675e90b4582b4fc968118121e6c23ec98ee178e9)) +* Support Exemplar ([#3997](https://github.com/googleapis/java-spanner/issues/3997)) ([fcf0a01](https://github.com/googleapis/java-spanner/commit/fcf0a0182a33f229e865e4593635efaed34d6dac)) +* Use multiplex sessions for RW and Partition Ops ([#3996](https://github.com/googleapis/java-spanner/issues/3996)) ([a882204](https://github.com/googleapis/java-spanner/commit/a882204e07a2084b228c14fb37ac53e4e33d0f59)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.60.2 ([675e90b](https://github.com/googleapis/java-spanner/commit/675e90b4582b4fc968118121e6c23ec98ee178e9)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.2 ([#4004](https://github.com/googleapis/java-spanner/issues/4004)) ([986c0e0](https://github.com/googleapis/java-spanner/commit/986c0e07fddecd51cd310a9759ce1d41c1f5c657)) + +## [6.97.1](https://github.com/googleapis/java-spanner/compare/v6.97.0...v6.97.1) (2025-07-15) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.1 ([#3992](https://github.com/googleapis/java-spanner/issues/3992)) ([69ffd72](https://github.com/googleapis/java-spanner/commit/69ffd7282220b8b12c6b9b64d8856ff88068ffa2)) +* Update googleapis/sdk-platform-java action to v2.60.1 ([#3926](https://github.com/googleapis/java-spanner/issues/3926)) ([7001b7f](https://github.com/googleapis/java-spanner/commit/7001b7faaff581e26ec81c4db2c99a1e8726d5eb)) + +## [6.97.0](https://github.com/googleapis/java-spanner/compare/v6.96.1...v6.97.0) (2025-07-10) + + +### Features + +* Next release from main branch is 6.97.0 ([#3984](https://github.com/googleapis/java-spanner/issues/3984)) ([5651f61](https://github.com/googleapis/java-spanner/commit/5651f6160e1e655f118aa2e7f0203a47cd6914c0)) + + +### Bug Fixes + +* Drop max message size ([#3987](https://github.com/googleapis/java-spanner/issues/3987)) ([3eee899](https://github.com/googleapis/java-spanner/commit/3eee89965547dfa49b4282b470f625d43c92f4fd)) +* Return non-empty metadata for DataBoost queries ([#3936](https://github.com/googleapis/java-spanner/issues/3936)) ([79c0684](https://github.com/googleapis/java-spanner/commit/79c06848c0ac4eff8410dd3bd63db8675c202d94)) + +## [6.96.1](https://github.com/googleapis/java-spanner/compare/v6.96.0...v6.96.1) (2025-06-30) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.59.0 ([2836042](https://github.com/googleapis/java-spanner/commit/2836042217fe29bb967fe892bd6b492391ded95c)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.0 ([#3925](https://github.com/googleapis/java-spanner/issues/3925)) ([1372bbd](https://github.com/googleapis/java-spanner/commit/1372bbd82b7828629cbc407b78878469bc477977)) + +## [6.96.0](https://github.com/googleapis/java-spanner/compare/v6.95.1...v6.96.0) (2025-06-27) + + +### Features + +* Allow JDBC to configure directpath for connection ([#3929](https://github.com/googleapis/java-spanner/issues/3929)) ([d754f1f](https://github.com/googleapis/java-spanner/commit/d754f1f99294d86ec881583f217fa09f291a3d7a)) +* Support getOrNull and getOrDefault in Struct ([#3914](https://github.com/googleapis/java-spanner/issues/3914)) ([1dc5a3e](https://github.com/googleapis/java-spanner/commit/1dc5a3ec0ca9ea530e8691df5c2734c0a1ece559)) +* Use multiplexed sessions for read-only transactions ([#3917](https://github.com/googleapis/java-spanner/issues/3917)) ([37fdc27](https://github.com/googleapis/java-spanner/commit/37fdc27aab4e71ac141c2a2c979f864e97395a97)) + + +### Bug Fixes + +* Allow zero durations to be set for connections ([#3916](https://github.com/googleapis/java-spanner/issues/3916)) ([43ea4fa](https://github.com/googleapis/java-spanner/commit/43ea4fa68eac00801beb8e58c1eb09e9f32e5ce5)) + + +### Documentation + +* Add snippet for Repeatable Read configuration at client and transaction ([#3908](https://github.com/googleapis/java-spanner/issues/3908)) ([ff3d212](https://github.com/googleapis/java-spanner/commit/ff3d212c98276c4084f44619916d0444c9652803)) +* Update SpannerSample.java to align with best practices ([#3625](https://github.com/googleapis/java-spanner/issues/3625)) ([7bfc62d](https://github.com/googleapis/java-spanner/commit/7bfc62d3d9e57242e0dfddea090208f8c65f0f8e)) + +## [6.95.1](https://github.com/googleapis/java-spanner/compare/v6.95.0...v6.95.1) (2025-06-06) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.0 ([#3909](https://github.com/googleapis/java-spanner/issues/3909)) ([3de8502](https://github.com/googleapis/java-spanner/commit/3de8502b98ebb90526fc2339e279f9b710816b3b)) +* Update googleapis/sdk-platform-java action to v2.59.0 ([#3910](https://github.com/googleapis/java-spanner/issues/3910)) ([aed8bd6](https://github.com/googleapis/java-spanner/commit/aed8bd6d5a0b1e0dfab345e0de68f285e8b8aedb)) + +## [6.95.0](https://github.com/googleapis/java-spanner/compare/v6.94.0...v6.95.0) (2025-06-05) + + +### Features + +* Enable ALTS hard bound token in DirectPath ([#3904](https://github.com/googleapis/java-spanner/issues/3904)) ([2b0f2ff](https://github.com/googleapis/java-spanner/commit/2b0f2ff214f4b68dd5957bc4280edb713b77a763)) +* Enable grpc and afe metrics ([#3896](https://github.com/googleapis/java-spanner/issues/3896)) ([706f794](https://github.com/googleapis/java-spanner/commit/706f794f044c2cb1112cfdae6f379e5f2bc3f26f)) +* Last statement sample ([#3830](https://github.com/googleapis/java-spanner/issues/3830)) ([2f62816](https://github.com/googleapis/java-spanner/commit/2f62816b0af9aced1b73e25525f60f8e3e923454)) +* **spanner:** Add new change_stream.proto ([f385698](https://github.com/googleapis/java-spanner/commit/f38569865de7465ae9a37b844a9dd983571d3688)) + + +### Bug Fixes + +* Directpath_enabled attribute ([#3897](https://github.com/googleapis/java-spanner/issues/3897)) ([53bc510](https://github.com/googleapis/java-spanner/commit/53bc510145921d00bc3df04aa4cf407179ed8d8e)) + + +### Dependencies + +* Update dependency io.opentelemetry:opentelemetry-bom to v1.50.0 ([#3887](https://github.com/googleapis/java-spanner/issues/3887)) ([94b879c](https://github.com/googleapis/java-spanner/commit/94b879c8c1848fa0b14dbe8cda8390cfe9e8fce6)) + +## [6.94.0](https://github.com/googleapis/java-spanner/compare/v6.93.0...v6.94.0) (2025-05-21) + + +### Features + +* Add throughput_mode to UpdateDatabaseDdlRequest to be used by Spanner Migration Tool. See https://github.com/GoogleCloudPlatform/spanner-migration-tool ([3070f1d](https://github.com/googleapis/java-spanner/commit/3070f1db97788c2a55c553ab8a4de3419d1ccf5c)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.58.0 ([3070f1d](https://github.com/googleapis/java-spanner/commit/3070f1db97788c2a55c553ab8a4de3419d1ccf5c)) +* Remove trailing semicolons in DDL ([#3879](https://github.com/googleapis/java-spanner/issues/3879)) ([ca3a67d](https://github.com/googleapis/java-spanner/commit/ca3a67db715f398943382df1f8a9979905811ff8)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.48.0 ([#3869](https://github.com/googleapis/java-spanner/issues/3869)) ([afa17f7](https://github.com/googleapis/java-spanner/commit/afa17f73beab80639467916bc73b5c96305093aa)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.48.0 ([#3880](https://github.com/googleapis/java-spanner/issues/3880)) ([f3b00b6](https://github.com/googleapis/java-spanner/commit/f3b00b663aa897fda1bc21222d29726e6be630cb)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.34.0 ([#3861](https://github.com/googleapis/java-spanner/issues/3861)) ([676b14f](https://github.com/googleapis/java-spanner/commit/676b14f916dea783b40ddec4061bd7af157b5d98)) +* Update dependency commons-io:commons-io to v2.19.0 ([#3863](https://github.com/googleapis/java-spanner/issues/3863)) ([80a6af8](https://github.com/googleapis/java-spanner/commit/80a6af836ca29ec196a2f509831e1d36c557168f)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.50.0 ([#3865](https://github.com/googleapis/java-spanner/issues/3865)) ([ae63050](https://github.com/googleapis/java-spanner/commit/ae6305089b394be0c1eaf8ff7e188711288d87ad)) +* Update googleapis/sdk-platform-java action to v2.58.0 ([#3870](https://github.com/googleapis/java-spanner/issues/3870)) ([d1e45fa](https://github.com/googleapis/java-spanner/commit/d1e45fa88bb005529bcfb2a6ff2df44065be0fd2)) +* Update opentelemetry.version to v1.50.0 ([#3866](https://github.com/googleapis/java-spanner/issues/3866)) ([f7e09b8](https://github.com/googleapis/java-spanner/commit/f7e09b8148c0e51503255694bd3347c637724b34)) + + +### Documentation + +* Add samples for unnamed (positional) parameters ([#3849](https://github.com/googleapis/java-spanner/issues/3849)) ([035cadd](https://github.com/googleapis/java-spanner/commit/035cadd5bb77a8f9f6fb25ac8c8e5a3e186d9a22)) + +## [6.93.0](https://github.com/googleapis/java-spanner/compare/v6.92.0...v6.93.0) (2025-05-09) + + +### Features + +* Enable AFE and gRPC metrics for DP ([#3852](https://github.com/googleapis/java-spanner/issues/3852)) ([203baae](https://github.com/googleapis/java-spanner/commit/203baae3996378435095cb90e3b2c7ee71a643cd)) + + +### Bug Fixes + +* Change server timing duration attribute to float as per w3c ([#3851](https://github.com/googleapis/java-spanner/issues/3851)) ([da8dd8d](https://github.com/googleapis/java-spanner/commit/da8dd8da3171a073d7b450d4413936351a4c1060)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.57.0 ([23b985c](https://github.com/googleapis/java-spanner/commit/23b985c9a04837b0b38f2cfc5d96469e1d664d67)) +* Non-ASCII Unicode characters in code ([#3844](https://github.com/googleapis/java-spanner/issues/3844)) ([85a0820](https://github.com/googleapis/java-spanner/commit/85a0820505889ae6482a9e4f845cd53430dd6b44)) +* Only close and return sessions once ([#3846](https://github.com/googleapis/java-spanner/issues/3846)) ([32b2373](https://github.com/googleapis/java-spanner/commit/32b2373d62cac3047d9686c56af278c706d7c488)) + +## [6.92.0](https://github.com/googleapis/java-spanner/compare/v6.91.1...v6.92.0) (2025-04-29) + + +### Features + +* [Internal] client-side metrics for afe latency and connectivity error ([#3819](https://github.com/googleapis/java-spanner/issues/3819)) ([a8dba0a](https://github.com/googleapis/java-spanner/commit/a8dba0a83939fdbbc324f0a7aa6c44180462fa3a)) +* Support begin with AbortedException for manager interface ([#3835](https://github.com/googleapis/java-spanner/issues/3835)) ([5783116](https://github.com/googleapis/java-spanner/commit/578311693bed836c8916f4b4ffa0782a468c1af3)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.56.2 ([11bfd90](https://github.com/googleapis/java-spanner/commit/11bfd90daa244dbd31a76bc5a1d2e694e43fa292)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.2 ([#3836](https://github.com/googleapis/java-spanner/issues/3836)) ([2ee7f97](https://github.com/googleapis/java-spanner/commit/2ee7f971f3374b01d22e5a7f8f2483cf60c3363d)) + +## [6.91.1](https://github.com/googleapis/java-spanner/compare/v6.91.0...v6.91.1) (2025-04-21) + + +### Bug Fixes + +* SkipHint in the internal parser skipped too much ([#3827](https://github.com/googleapis/java-spanner/issues/3827)) ([fbf7b4c](https://github.com/googleapis/java-spanner/commit/fbf7b4c4324c4d565bfe3950ecf80de02c88f16e)) + +## [6.91.0](https://github.com/googleapis/java-spanner/compare/v6.90.0...v6.91.0) (2025-04-17) + + +### Features + +* [Internal] open telemetry built in metrics for GRPC ([#3709](https://github.com/googleapis/java-spanner/issues/3709)) ([cd76c73](https://github.com/googleapis/java-spanner/commit/cd76c73d838a9ccde2c8c11fc63144a62d76886c)) +* Add java sample for the pre-splitting feature ([#3713](https://github.com/googleapis/java-spanner/issues/3713)) ([e97b92e](https://github.com/googleapis/java-spanner/commit/e97b92ea4728bc8f013ff73478de4af9eaa1793b)) +* Add TransactionMutationLimitExceededException as cause to SpannerBatchUpdateException ([#3723](https://github.com/googleapis/java-spanner/issues/3723)) ([4cf5261](https://github.com/googleapis/java-spanner/commit/4cf52613c6c8280fdb864f5b8d04f8fb6ea55e16)) +* Built in metrics for afe latency and connectivity error ([#3724](https://github.com/googleapis/java-spanner/issues/3724)) ([e13a2f9](https://github.com/googleapis/java-spanner/commit/e13a2f9c5cadd15ab5a565c7dd1c1eec64c09488)) +* Support unnamed parameters ([#3820](https://github.com/googleapis/java-spanner/issues/3820)) ([1afd815](https://github.com/googleapis/java-spanner/commit/1afd815869785588dfd03ffc12e381e32c4aa0fe)) + + +### Bug Fixes + +* Add default implementations for Interval methods in AbstractStructReader ([#3722](https://github.com/googleapis/java-spanner/issues/3722)) ([97f4544](https://github.com/googleapis/java-spanner/commit/97f45448ecb51bd20699d1f163f78b2a7736b21f)) +* Set transaction isolation level had no effect ([#3718](https://github.com/googleapis/java-spanner/issues/3718)) ([b382999](https://github.com/googleapis/java-spanner/commit/b382999f42d1b643472cf3f605f8c6dc839dec19)) + + +### Performance Improvements + +* Cache the key used for OTEL traces and metrics ([#3814](https://github.com/googleapis/java-spanner/issues/3814)) ([c5a2045](https://github.com/googleapis/java-spanner/commit/c5a20452ad2ed5a8f1ac12cca4072a86f4457b93)) +* Optimize parsing in Connection API ([#3800](https://github.com/googleapis/java-spanner/issues/3800)) ([a2780ed](https://github.com/googleapis/java-spanner/commit/a2780edb3d9d4972c78befd097692f626a6a4bea)) +* Qualify statements without removing comments ([#3810](https://github.com/googleapis/java-spanner/issues/3810)) ([d358cb9](https://github.com/googleapis/java-spanner/commit/d358cb96e33bdf6de6528d03c884aa702b40b802)) +* Remove all calls to getSqlWithoutComments ([#3822](https://github.com/googleapis/java-spanner/issues/3822)) ([0e1e14c](https://github.com/googleapis/java-spanner/commit/0e1e14c0e8c1f3726c4d3cfd836c580b3b4122d0)) + +## [6.90.0](https://github.com/googleapis/java-spanner/compare/v6.89.0...v6.90.0) (2025-03-31) + + +### Features + +* Add default_isolation_level connection property ([#3702](https://github.com/googleapis/java-spanner/issues/3702)) ([9472d23](https://github.com/googleapis/java-spanner/commit/9472d23c2b233275e779815f89040323e073a7d1)) +* Adds support for Interval datatype in Java client ([#3416](https://github.com/googleapis/java-spanner/issues/3416)) ([8be8f5e](https://github.com/googleapis/java-spanner/commit/8be8f5e6b08c8cf3e5f062e4b985b3ec9c725064)) +* Integration test for End to End tracing ([#3691](https://github.com/googleapis/java-spanner/issues/3691)) ([bf1a07a](https://github.com/googleapis/java-spanner/commit/bf1a07a153b1eb899757260b8ac2bc12384e45af)) +* Specify isolation level per transaction ([#3704](https://github.com/googleapis/java-spanner/issues/3704)) ([868f30f](https://github.com/googleapis/java-spanner/commit/868f30fde95d07c3fc18feaca64b4d1c3ba6a27d)) +* Support PostgreSQL isolation level statements ([#3706](https://github.com/googleapis/java-spanner/issues/3706)) ([dda2e1d](https://github.com/googleapis/java-spanner/commit/dda2e1dec38febdad54b61f588590c7572017ba9)) + +## [6.89.0](https://github.com/googleapis/java-spanner/compare/v6.88.0...v6.89.0) (2025-03-20) + + +### Features + +* Enable ALTS hard bound token in DirectPath ([#3645](https://github.com/googleapis/java-spanner/issues/3645)) ([42cc961](https://github.com/googleapis/java-spanner/commit/42cc9616fa74c765d5716fd948dc0823df0a07a6)) +* Next release from main branch is 6.89.0 ([#3669](https://github.com/googleapis/java-spanner/issues/3669)) ([7a8a29b](https://github.com/googleapis/java-spanner/commit/7a8a29be40258294cafd13b1df7df5ea349a675d)) +* Support isolation level REPEATABLE_READ for R/W transactions ([#3670](https://github.com/googleapis/java-spanner/issues/3670)) ([e62f5ab](https://github.com/googleapis/java-spanner/commit/e62f5ab46da8696a8ff0d213f924588612bb4025)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.55.1 ([b959f4c](https://github.com/googleapis/java-spanner/commit/b959f4c8ebb3551796a894b659aa42ba16fb1c39)) +* Revert the ALTS bound token enablement ([#3679](https://github.com/googleapis/java-spanner/issues/3679)) ([183c1f0](https://github.com/googleapis/java-spanner/commit/183c1f0e228a927a575596a38a01d63bb8eb6943)) + + +### Performance Improvements + +* Get database dialect using multiplexed session ([#3684](https://github.com/googleapis/java-spanner/issues/3684)) ([f641a40](https://github.com/googleapis/java-spanner/commit/f641a40ed515a6559718c2fe2757c322f037d83b)) +* Skip gRPC trailers for StreamingRead & ExecuteStreamingSql ([#3661](https://github.com/googleapis/java-spanner/issues/3661)) ([bd4b1f5](https://github.com/googleapis/java-spanner/commit/bd4b1f5b9612f6a4dfd748d735c887f8e46ae106)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.45.1 ([#3689](https://github.com/googleapis/java-spanner/issues/3689)) ([67188df](https://github.com/googleapis/java-spanner/commit/67188df2be23eef88de8f4febc3ac7208ebdd937)) + +## [6.88.0](https://github.com/googleapis/java-spanner/compare/v6.87.0...v6.88.0) (2025-02-27) + + +### Features + +* Add a last field in the PartialResultSet ([7c714be](https://github.com/googleapis/java-spanner/commit/7c714be10eb345f2d8f566d752f6de615061c4da)) +* Automatically set default sequence kind in JDBC and PGAdapter ([#3658](https://github.com/googleapis/java-spanner/issues/3658)) ([e8abf33](https://github.com/googleapis/java-spanner/commit/e8abf338b85e95f185ab2875a804134523f84de3)) +* Default authentication support for external hosts ([#3656](https://github.com/googleapis/java-spanner/issues/3656)) ([ace11d5](https://github.com/googleapis/java-spanner/commit/ace11d5d928fb567b16560263ae95aa9cd916e22)) +* **spanner:** A new enum `IsolationLevel` is added ([3fd33ba](https://github.com/googleapis/java-spanner/commit/3fd33ba9c5fab43ed475ed3cff9d60c008843981)) +* **spanner:** Add instance partitions field in backup proto ([3fd33ba](https://github.com/googleapis/java-spanner/commit/3fd33ba9c5fab43ed475ed3cff9d60c008843981)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.54.0 ([57497ad](https://github.com/googleapis/java-spanner/commit/57497ad00c62f152f493645f382530cf0eedf19e)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.44.0 ([#3665](https://github.com/googleapis/java-spanner/issues/3665)) ([3543548](https://github.com/googleapis/java-spanner/commit/35435488f87ebd59179698e8f74578b41eb219da)) + +## [6.87.0](https://github.com/googleapis/java-spanner/compare/v6.86.0...v6.87.0) (2025-02-20) + + +### Features + +* Add AddSplitPoints API ([a5ebcd3](https://github.com/googleapis/java-spanner/commit/a5ebcd343a67c57d61362cfb0ccb4888f5503681)) +* Add option for multiplexed sessions with partitioned operations ([#3635](https://github.com/googleapis/java-spanner/issues/3635)) ([dc89b4d](https://github.com/googleapis/java-spanner/commit/dc89b4d7663f0e40a9169b21243f2d94f2fc5749)) +* Add option to indicate that a statement is the last in a transaction ([#3647](https://github.com/googleapis/java-spanner/issues/3647)) ([b04ea80](https://github.com/googleapis/java-spanner/commit/b04ea804cfa9551b4d7c49cd83f0ef1120942423)) +* Adding gfe_latencies metric to built-in metrics ([#3490](https://github.com/googleapis/java-spanner/issues/3490)) ([314dadc](https://github.com/googleapis/java-spanner/commit/314dadc31f4a5aa798d45886db7231c1bd8b7a91)) +* **spanner:** Support multiplexed session for read-write transactions ([#3608](https://github.com/googleapis/java-spanner/issues/3608)) ([bda78ed](https://github.com/googleapis/java-spanner/commit/bda78edaba827acf974c87c335868a6f8caa38f2)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.53.0 ([20a3d0d](https://github.com/googleapis/java-spanner/commit/20a3d0da41509ffca66c77de6771fc8080930613)) +* **spanner:** End spans for read-write methods ([#3629](https://github.com/googleapis/java-spanner/issues/3629)) ([4a1f99c](https://github.com/googleapis/java-spanner/commit/4a1f99c6bb872ffc08e60d3843e4cdfc4efa2690)) +* **spanner:** Release resources in TransactionManager ([#3638](https://github.com/googleapis/java-spanner/issues/3638)) ([e0a3e5b](https://github.com/googleapis/java-spanner/commit/e0a3e5bd169e28e349a2dc92f86a2a9b5510f8f6)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.43.0 ([#3642](https://github.com/googleapis/java-spanner/issues/3642)) ([c12968a](https://github.com/googleapis/java-spanner/commit/c12968a5f6dad95017d9867d96d4f19a26643a07)) + +## [6.86.0](https://github.com/googleapis/java-spanner/compare/v6.85.0...v6.86.0) (2025-01-31) + + +### Features + +* Add sample for asymmetric autoscaling instances ([#3562](https://github.com/googleapis/java-spanner/issues/3562)) ([3584b81](https://github.com/googleapis/java-spanner/commit/3584b81a27bfcdd071fbf7e0d40dfa840ea88151)) +* Support graph and pipe queries in Connection API ([#3586](https://github.com/googleapis/java-spanner/issues/3586)) ([71c3063](https://github.com/googleapis/java-spanner/commit/71c306346d5b3805f55d5698cf8867d5f4ae519e)) + + +### Bug Fixes + +* Always add instance-id for built-in metrics ([#3612](https://github.com/googleapis/java-spanner/issues/3612)) ([705b627](https://github.com/googleapis/java-spanner/commit/705b627646f1679b7d1c4c1f86a853872cf8bfd5)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.1 ([3e27251](https://github.com/googleapis/java-spanner/commit/3e272510970d1951b74c4ec9425f1a890790ddb3)) +* **deps:** Update the Java code generator (gapic-generator-java) to 2.52.0 ([bf69673](https://github.com/googleapis/java-spanner/commit/bf69673886dbe040292214ed6e64997a230441f6)) +* **spanner:** Moved mTLSContext configurator from builder to construtor ([#3605](https://github.com/googleapis/java-spanner/issues/3605)) ([ac7c30b](https://github.com/googleapis/java-spanner/commit/ac7c30bfb14bdafc11675c2a120effde4a71c922)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.42.0 ([#3616](https://github.com/googleapis/java-spanner/issues/3616)) ([2ea59f0](https://github.com/googleapis/java-spanner/commit/2ea59f05225f2dba2effb503e6abddcfdb6fe6ee)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.46.0 ([#3530](https://github.com/googleapis/java-spanner/issues/3530)) ([d505850](https://github.com/googleapis/java-spanner/commit/d5058504b94501cabd75ad5e7030404b63c3f8b4)) + + +### Documentation + +* Clarify how async updates can overtake each other ([#3581](https://github.com/googleapis/java-spanner/issues/3581)) ([1be250f](https://github.com/googleapis/java-spanner/commit/1be250fea686f3a41739c9c8aa474ed956b130e4)) +* Fix typo timzeone -> timezone ([bf69673](https://github.com/googleapis/java-spanner/commit/bf69673886dbe040292214ed6e64997a230441f6)) +* Fixed parameter arguments for AbstractResultSet's Listener's on TransactionMetadata doc ([#3602](https://github.com/googleapis/java-spanner/issues/3602)) ([1f143a4](https://github.com/googleapis/java-spanner/commit/1f143a4b7b899aec8cf58546f7540a41d1c73731)) +* **samples:** Add samples and tests for change streams transaction exclusion ([#3098](https://github.com/googleapis/java-spanner/issues/3098)) ([1f81600](https://github.com/googleapis/java-spanner/commit/1f816009abdbfb32bb26686d8fdb2a771216004e)) + +## [6.85.0](https://github.com/googleapis/java-spanner/compare/v6.84.0...v6.85.0) (2025-01-10) + + +### Features + +* Add gcp client attributes in OpenTelemetry traces ([#3595](https://github.com/googleapis/java-spanner/issues/3595)) ([7893f24](https://github.com/googleapis/java-spanner/commit/7893f2499f6a43e4e80ec78a9f0da5beedb6967a)) +* Add LockHint feature ([#3588](https://github.com/googleapis/java-spanner/issues/3588)) ([326442b](https://github.com/googleapis/java-spanner/commit/326442bca41700debcbeb67b6bd11fc36bd4f26d)) +* **spanner:** MTLS setup for spanner external host clients ([#3574](https://github.com/googleapis/java-spanner/issues/3574)) ([f8dd152](https://github.com/googleapis/java-spanner/commit/f8dd15272f2a250c5b57c9f2527d03dbd557d717)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.56.0 ([#3563](https://github.com/googleapis/java-spanner/issues/3563)) ([e4d0b0f](https://github.com/googleapis/java-spanner/commit/e4d0b0ffa2308c8d949630b52c67e3b79c4491fb)) +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.57.0 ([#3592](https://github.com/googleapis/java-spanner/issues/3592)) ([a7542da](https://github.com/googleapis/java-spanner/commit/a7542daff466226221eeb9a885a2e67a99adb678)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.41.1 ([#3589](https://github.com/googleapis/java-spanner/issues/3589)) ([2cd4238](https://github.com/googleapis/java-spanner/commit/2cd42388370dac004bfd807f6aede3ba45456706)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#3455](https://github.com/googleapis/java-spanner/issues/3455)) ([70649dc](https://github.com/googleapis/java-spanner/commit/70649dc2f64aa06404893cc6a36716fc366c83e7)) +* Update dependency com.google.re2j:re2j to v1.8 ([#3594](https://github.com/googleapis/java-spanner/issues/3594)) ([0f2013d](https://github.com/googleapis/java-spanner/commit/0f2013d66d3fd14e6be019cda6745ddc32032091)) +* Update googleapis/sdk-platform-java action to v2.51.1 ([#3591](https://github.com/googleapis/java-spanner/issues/3591)) ([3daa1a0](https://github.com/googleapis/java-spanner/commit/3daa1a0c735000845558a1d3612257a7d0524350)) + +## [6.84.0](https://github.com/googleapis/java-spanner/compare/v6.83.0...v6.84.0) (2025-01-06) + + +### Features + +* Add support for ARRAY<STRUCT> to CloudCilentExecutor ([#3544](https://github.com/googleapis/java-spanner/issues/3544)) ([6cbaf7e](https://github.com/googleapis/java-spanner/commit/6cbaf7ec6502d04fc0a0c09720e2054bd10bead9)) +* Add transaction runner for connections ([#3559](https://github.com/googleapis/java-spanner/issues/3559)) ([5a1be3d](https://github.com/googleapis/java-spanner/commit/5a1be3dedeafa6858502eadc7918820b9cd90f68)) +* Exposing InstanceType in Instance configuration (to define PROVISIONED or FREE spanner instance) ([8d295c4](https://github.com/googleapis/java-spanner/commit/8d295c4a4030b4e97b1d653cc3baf412864f3042)) +* Improve tracing by adding attributes ([#3576](https://github.com/googleapis/java-spanner/issues/3576)) ([eee333b](https://github.com/googleapis/java-spanner/commit/eee333b51fa69123e011dfbd2a0896fd31ac10dc)) +* **spanner:** Add jdbc support for external hosts ([#3536](https://github.com/googleapis/java-spanner/issues/3536)) ([801346a](https://github.com/googleapis/java-spanner/commit/801346a1b2efe7d0144f7442e1568eb5b02ddcbc)) + + +### Bug Fixes + +* AsyncTransactionManager did not always close the session ([#3580](https://github.com/googleapis/java-spanner/issues/3580)) ([d9813a0](https://github.com/googleapis/java-spanner/commit/d9813a05240b966f444168d3b8c30da9d27a8cc4)) +* Retry specific internal errors ([#3565](https://github.com/googleapis/java-spanner/issues/3565)) ([b9ce1a6](https://github.com/googleapis/java-spanner/commit/b9ce1a6fcbd11373a5cc82807af15c1cca0dd48e)) +* Update max_in_use_session at 10 mins interval ([#3570](https://github.com/googleapis/java-spanner/issues/3570)) ([cc1753d](https://github.com/googleapis/java-spanner/commit/cc1753da72b3e508f8fea8a6d19e1ed3f34e3602)) + + +### Dependencies + +* Update opentelemetry.version to v1.45.0 ([#3531](https://github.com/googleapis/java-spanner/issues/3531)) ([78c82ed](https://github.com/googleapis/java-spanner/commit/78c82edb4fcc4a5a9a372225ca429038c3b34955)) + +## [6.83.0](https://github.com/googleapis/java-spanner/compare/v6.82.0...v6.83.0) (2024-12-13) + + +### Features + +* Add Metrics host for built in metrics ([#3519](https://github.com/googleapis/java-spanner/issues/3519)) ([4ed455a](https://github.com/googleapis/java-spanner/commit/4ed455a43edf7ff8d138ce4d40a52d3224383b14)) +* Add opt-in for using multiplexed sessions for blind writes ([#3540](https://github.com/googleapis/java-spanner/issues/3540)) ([216f53e](https://github.com/googleapis/java-spanner/commit/216f53e4cbc0150078ece7785da33b342a6ab082)) +* Add UUID in Spanner TypeCode enum ([41f83dc](https://github.com/googleapis/java-spanner/commit/41f83dcf046f955ec289d4e976f40a03922054cb)) +* Introduce java.time variables and methods ([#3495](https://github.com/googleapis/java-spanner/issues/3495)) ([8a7d533](https://github.com/googleapis/java-spanner/commit/8a7d533ded21b9b94992b68c702c08bb84474e1b)) +* **spanner:** Support multiplexed session for Partitioned operations ([#3231](https://github.com/googleapis/java-spanner/issues/3231)) ([4501a3e](https://github.com/googleapis/java-spanner/commit/4501a3ea69a9346e8b95edf6f94ff839b509ec73)) +* Support 'set local' for retry_aborts_internally ([#3532](https://github.com/googleapis/java-spanner/issues/3532)) ([331942f](https://github.com/googleapis/java-spanner/commit/331942f51b11660b9de9c8fe8aacd6f60ac254b5)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.51.0 ([41f83dc](https://github.com/googleapis/java-spanner/commit/41f83dcf046f955ec289d4e976f40a03922054cb)) + + +### Dependencies + +* Update sdk platform java dependencies ([#3549](https://github.com/googleapis/java-spanner/issues/3549)) ([6235f0f](https://github.com/googleapis/java-spanner/commit/6235f0f2c223718c537addc450fa5910d1500271)) + +## [6.82.0](https://github.com/googleapis/java-spanner/compare/v6.81.2...v6.82.0) (2024-12-04) + + +### Features + +* Add option for retrying DML as PDML ([#3480](https://github.com/googleapis/java-spanner/issues/3480)) ([b545557](https://github.com/googleapis/java-spanner/commit/b545557b1a27868aeb5115b3947d42db015cc00e)) +* Add the last statement option to ExecuteSqlRequest and ExecuteBatchDmlRequest ([76ab801](https://github.com/googleapis/java-spanner/commit/76ab8011b0aa03e5bb98e375595358732cde31b7)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.50.0 ([76ab801](https://github.com/googleapis/java-spanner/commit/76ab8011b0aa03e5bb98e375595358732cde31b7)) +* Shutdown built in metrics meter provider ([#3518](https://github.com/googleapis/java-spanner/issues/3518)) ([c935e2e](https://github.com/googleapis/java-spanner/commit/c935e2eff780100273bc35c11458485c9bb05230)) +* **spanner:** GetEdition() is returning null for Instance ([#3496](https://github.com/googleapis/java-spanner/issues/3496)) ([77cb585](https://github.com/googleapis/java-spanner/commit/77cb585d57fd30f953b0ffb80be124e3cb1c6f39)) + + +### Dependencies + +* Update dependency commons-io:commons-io to v2.18.0 ([#3492](https://github.com/googleapis/java-spanner/issues/3492)) ([5c8b3ad](https://github.com/googleapis/java-spanner/commit/5c8b3ade163b4cdb81a53f5dcf777ebba48ef265)) + + +### Documentation + +* Add Multi Region Encryption samples ([#3524](https://github.com/googleapis/java-spanner/issues/3524)) ([316f971](https://github.com/googleapis/java-spanner/commit/316f97146a1fb9f120b642421ec1196be9abddf0)) + +## [6.81.2](https://github.com/googleapis/java-spanner/compare/v6.81.1...v6.81.2) (2024-11-20) + + +### Bug Fixes + +* Directpath enabled attribute ([#3477](https://github.com/googleapis/java-spanner/issues/3477)) ([ea1ebad](https://github.com/googleapis/java-spanner/commit/ea1ebadd1ef5d2a343e7117828cae71a798c38eb)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.55.0 ([#3482](https://github.com/googleapis/java-spanner/issues/3482)) ([bf350b0](https://github.com/googleapis/java-spanner/commit/bf350b024592312b0a00a04c2ab6d3d2312ea686)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.53.0 ([#3454](https://github.com/googleapis/java-spanner/issues/3454)) ([8729b30](https://github.com/googleapis/java-spanner/commit/8729b30a1043a7e77b0277036c70c7c2616d0b47)) +* Update dependency com.google.cloud:google-cloud-trace to v2.53.0 ([#3464](https://github.com/googleapis/java-spanner/issues/3464)) ([a507e4c](https://github.com/googleapis/java-spanner/commit/a507e4c89bb59d154881812f10cab02d68325a08)) +* Update dependency com.google.cloud:google-cloud-trace to v2.54.0 ([#3488](https://github.com/googleapis/java-spanner/issues/3488)) ([1d1fecf](https://github.com/googleapis/java-spanner/commit/1d1fecf04a4e800c9b756324914cb1feed7c9866)) +* Update googleapis/sdk-platform-java action to v2.50.0 ([#3475](https://github.com/googleapis/java-spanner/issues/3475)) ([e992f18](https://github.com/googleapis/java-spanner/commit/e992f18a651ec034b89aa214cb87ec43f33f2f79)) +* Update sdk platform java dependencies ([#3476](https://github.com/googleapis/java-spanner/issues/3476)) ([acb6446](https://github.com/googleapis/java-spanner/commit/acb6446cb952bdbc54ca1b6c53dc466c72cb55b0)) + +## [6.81.1](https://github.com/googleapis/java-spanner/compare/v6.81.0...v6.81.1) (2024-11-11) + + +### Bug Fixes + +* Client built in metrics. Skip export if instance id is null ([#3447](https://github.com/googleapis/java-spanner/issues/3447)) ([8b2e5ef](https://github.com/googleapis/java-spanner/commit/8b2e5ef5bb391e5a4d4df3cb45d6a3f722a8cfbe)) +* **spanner:** Avoid blocking thread in AsyncResultSet ([#3446](https://github.com/googleapis/java-spanner/issues/3446)) ([7c82f1c](https://github.com/googleapis/java-spanner/commit/7c82f1c7823d4d529a70c0da231d2593f00b638b)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.54.0 ([#3437](https://github.com/googleapis/java-spanner/issues/3437)) ([7e28326](https://github.com/googleapis/java-spanner/commit/7e283261961d6435488ed668133dc3bdd238d402)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.54.0 ([#3438](https://github.com/googleapis/java-spanner/issues/3438)) ([fa18894](https://github.com/googleapis/java-spanner/commit/fa188942c506c85f4c628a8b442b0ee2e6cb845f)) +* Update dependency com.google.cloud:google-cloud-trace to v2.53.0 ([#3440](https://github.com/googleapis/java-spanner/issues/3440)) ([314eeb8](https://github.com/googleapis/java-spanner/commit/314eeb823e14c386ea6e65caae8c80e908e05600)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.44.1 ([#3452](https://github.com/googleapis/java-spanner/issues/3452)) ([6518eea](https://github.com/googleapis/java-spanner/commit/6518eea2921006f1aa431e02754118e3d3d3b620)) +* Update opentelemetry.version to v1.44.1 ([#3451](https://github.com/googleapis/java-spanner/issues/3451)) ([d9b0271](https://github.com/googleapis/java-spanner/commit/d9b0271603dd14c51954532054b134419150625a)) + + +### Documentation + +* Update samples' README.md to ensure given ([#3420](https://github.com/googleapis/java-spanner/issues/3420)) ([663a974](https://github.com/googleapis/java-spanner/commit/663a974dc2a52d773deb620b0bc65f0049f63693)) + +## [6.81.0](https://github.com/googleapis/java-spanner/compare/v6.80.1...v6.81.0) (2024-11-01) + + +### Features + +* Client built in metrics ([#3408](https://github.com/googleapis/java-spanner/issues/3408)) ([6a36103](https://github.com/googleapis/java-spanner/commit/6a3610379d1d0eee741d5ef4b30e811ff5a67bc0)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.54.0 ([#3439](https://github.com/googleapis/java-spanner/issues/3439)) ([cdec63f](https://github.com/googleapis/java-spanner/commit/cdec63f84ef9b615adf19e4611b2dc223eec687b)) + +## [6.80.1](https://github.com/googleapis/java-spanner/compare/v6.80.0...v6.80.1) (2024-10-28) + + +### Dependencies + +* Update googleapis/sdk-platform-java action to v2.49.0 ([#3430](https://github.com/googleapis/java-spanner/issues/3430)) ([beb788c](https://github.com/googleapis/java-spanner/commit/beb788c05d099a0c5edeabb7ed63f4a6a7a24c16)) +* Update sdk platform java dependencies ([#3431](https://github.com/googleapis/java-spanner/issues/3431)) ([eef03e9](https://github.com/googleapis/java-spanner/commit/eef03e9e5a5ce9d4fcf9728d6b14630bbb99afce)) + +## [6.80.0](https://github.com/googleapis/java-spanner/compare/v6.79.0...v6.80.0) (2024-10-25) + + +### Features + +* Enabling endToEndTracing support in Connection API ([#3412](https://github.com/googleapis/java-spanner/issues/3412)) ([16cc6ee](https://github.com/googleapis/java-spanner/commit/16cc6eed58cf735026d7757a28f61f29821a14bf)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.38.0 ([#3424](https://github.com/googleapis/java-spanner/issues/3424)) ([b727453](https://github.com/googleapis/java-spanner/commit/b727453b93d1089f76e1b908255610cc2796da43)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.43.0 ([#3399](https://github.com/googleapis/java-spanner/issues/3399)) ([a755c6c](https://github.com/googleapis/java-spanner/commit/a755c6c2f44cc3eb0f5a54cd58244cebc62b7a4f)) +* Update dependency io.opentelemetry:opentelemetry-sdk-testing to v1.43.0 ([#3398](https://github.com/googleapis/java-spanner/issues/3398)) ([693243a](https://github.com/googleapis/java-spanner/commit/693243afae34610441345645f627bf199e8ddb8b)) +* Update googleapis/sdk-platform-java action to v2.48.0 ([#3422](https://github.com/googleapis/java-spanner/issues/3422)) ([d5d1f55](https://github.com/googleapis/java-spanner/commit/d5d1f55d7e8e8f9aa89b7ab9e5f5bd0464bf0e1a)) + + +### Documentation + +* Fix tracing sample to exit when completed, and use custom monitored resource for export ([#3287](https://github.com/googleapis/java-spanner/issues/3287)) ([ddb65b1](https://github.com/googleapis/java-spanner/commit/ddb65b197a6f311c2bb8ec9856ea968f3a31d62a)) + +## [6.79.0](https://github.com/googleapis/java-spanner/compare/v6.78.0...v6.79.0) (2024-10-11) + + +### Features + +* Support DML auto-batching in Connection API ([#3386](https://github.com/googleapis/java-spanner/issues/3386)) ([a1ce267](https://github.com/googleapis/java-spanner/commit/a1ce267cbd4d4c5c638ab7fe0dd5dba24bcfab86)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.53.0 ([#3390](https://github.com/googleapis/java-spanner/issues/3390)) ([a060e92](https://github.com/googleapis/java-spanner/commit/a060e92141d3dad0db1fc5175416e24a191fa326)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.53.0 ([#3391](https://github.com/googleapis/java-spanner/issues/3391)) ([7f0927d](https://github.com/googleapis/java-spanner/commit/7f0927d495966d7a2ef9023d65545bfe1fecc20b)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.53.0 ([#3392](https://github.com/googleapis/java-spanner/issues/3392)) ([fd3e92d](https://github.com/googleapis/java-spanner/commit/fd3e92da940419cd1aed14f770186381d59a2b47)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.37.0 ([#3395](https://github.com/googleapis/java-spanner/issues/3395)) ([8ecb1a9](https://github.com/googleapis/java-spanner/commit/8ecb1a901f94d9d49efb0278516428e379803088)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.33.0 ([#3388](https://github.com/googleapis/java-spanner/issues/3388)) ([26aa51d](https://github.com/googleapis/java-spanner/commit/26aa51d561c35295dfb7e2867c3b04b79ce6efc9)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#3389](https://github.com/googleapis/java-spanner/issues/3389)) ([6e34c5a](https://github.com/googleapis/java-spanner/commit/6e34c5a1c20c20a2e994d112f042a59c9b93e1e6)) +* Update googleapis/sdk-platform-java action to v2.47.0 ([#3383](https://github.com/googleapis/java-spanner/issues/3383)) ([4f0d693](https://github.com/googleapis/java-spanner/commit/4f0d69316a910c23abcb2a142e59bbaf550ca89c)) + +## [6.78.0](https://github.com/googleapis/java-spanner/compare/v6.77.0...v6.78.0) (2024-10-11) + + +### Features + +* Define ReplicaComputeCapacity and AsymmetricAutoscalingOption ([f46a6b3](https://github.com/googleapis/java-spanner/commit/f46a6b34383fe45d63b2db912389b26067f3a853)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.47.0 ([139a715](https://github.com/googleapis/java-spanner/commit/139a715d3f617b20a00b0cf4f5819e5a61a87c96)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.52.0 ([#3393](https://github.com/googleapis/java-spanner/issues/3393)) ([79453f9](https://github.com/googleapis/java-spanner/commit/79453f9985eda10631cd29ae58c0cedf234c2e18)) + +## [6.77.0](https://github.com/googleapis/java-spanner/compare/v6.76.0...v6.77.0) (2024-10-02) + + +### Features + +* Add INTERVAL API ([c078ac3](https://github.com/googleapis/java-spanner/commit/c078ac34c3d14b13bbd4a507de4f0013975dca4e)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.52.0 ([#3291](https://github.com/googleapis/java-spanner/issues/3291)) ([9241063](https://github.com/googleapis/java-spanner/commit/92410638b0ba88f8e89e28bd12dd58830f7aaeb3)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.52.0 ([#3292](https://github.com/googleapis/java-spanner/issues/3292)) ([da27a19](https://github.com/googleapis/java-spanner/commit/da27a1992e40b1b4591f0232f687d8031387e749)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.52.0 ([#3293](https://github.com/googleapis/java-spanner/issues/3293)) ([c6dbdb2](https://github.com/googleapis/java-spanner/commit/c6dbdb255eb4cd231a2dc7cef94bf3353fa7e837)) +* Update dependency com.google.cloud:google-cloud-trace to v2.51.0 ([#3294](https://github.com/googleapis/java-spanner/issues/3294)) ([a269747](https://github.com/googleapis/java-spanner/commit/a269747889ea0b2380f07e1efef3b288a9c4fd04)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.1 ([#3355](https://github.com/googleapis/java-spanner/issues/3355)) ([5191e71](https://github.com/googleapis/java-spanner/commit/5191e71a83a316b41564ce2604980c8f33135f2f)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.32.0 ([#3371](https://github.com/googleapis/java-spanner/issues/3371)) ([d5b5ca0](https://github.com/googleapis/java-spanner/commit/d5b5ca0cccc6cf73d759245d2bd72f33c7d39830)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.32.0 ([#3372](https://github.com/googleapis/java-spanner/issues/3372)) ([aa9a71d](https://github.com/googleapis/java-spanner/commit/aa9a71d38dabd8d1974bb553761e93735ade5c26)) +* Update dependency commons-io:commons-io to v2.17.0 ([#3349](https://github.com/googleapis/java-spanner/issues/3349)) ([7c21164](https://github.com/googleapis/java-spanner/commit/7c21164f2b8e75afab268f2fb8e132a372ac0d67)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.42.1 ([#3323](https://github.com/googleapis/java-spanner/issues/3323)) ([95dfc02](https://github.com/googleapis/java-spanner/commit/95dfc02ae2d65f99219dcced66cf4e74d1c4975b)) +* Update dependency ubuntu to v24 ([#3356](https://github.com/googleapis/java-spanner/issues/3356)) ([042c294](https://github.com/googleapis/java-spanner/commit/042c294cc5f83eebd2e3600cffb165e5b467d63e)) +* Update googleapis/sdk-platform-java action to v2.46.1 ([#3354](https://github.com/googleapis/java-spanner/issues/3354)) ([378f5cf](https://github.com/googleapis/java-spanner/commit/378f5cfb08d4e5ee80b21007bfc829de61bfbdbe)) +* Update junixsocket.version to v2.10.1 ([#3367](https://github.com/googleapis/java-spanner/issues/3367)) ([5f94915](https://github.com/googleapis/java-spanner/commit/5f94915941c4e4132f8460a04dde0643fa63ab99)) +* Update opentelemetry.version to v1.42.1 ([#3330](https://github.com/googleapis/java-spanner/issues/3330)) ([7b05e43](https://github.com/googleapis/java-spanner/commit/7b05e4301953364617691e8ae225cea823e3a323)) + + +### Documentation + +* Update comment for PROFILE QueryMode ([c078ac3](https://github.com/googleapis/java-spanner/commit/c078ac34c3d14b13bbd4a507de4f0013975dca4e)) + +## [6.76.0](https://github.com/googleapis/java-spanner/compare/v6.75.0...v6.76.0) (2024-09-27) + + +### Features + +* Add opt-in flag and ClientInterceptor to propagate trace context for Spanner end to end tracing ([#3162](https://github.com/googleapis/java-spanner/issues/3162)) ([0b7fdaf](https://github.com/googleapis/java-spanner/commit/0b7fdaf1d25e81ca8dd35a0f8d8caa7b77a7e58c)) +* Add samples for backup schedule feature APIs. ([#3339](https://github.com/googleapis/java-spanner/issues/3339)) ([8cd5163](https://github.com/googleapis/java-spanner/commit/8cd516351e7859a81f00f17cb5071edbd804ea90)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.46.1 ([1719f44](https://github.com/googleapis/java-spanner/commit/1719f4465841354db3253fd132868394e530a82d)) + +## [6.75.0](https://github.com/googleapis/java-spanner/compare/v6.74.1...v6.75.0) (2024-09-19) + + +### Features + +* Support multiplexed session for blind write with single use transaction ([#3229](https://github.com/googleapis/java-spanner/issues/3229)) ([b3e2b0f](https://github.com/googleapis/java-spanner/commit/b3e2b0f4892951867715cb7f354c089fca4f050f)) + +## [6.74.1](https://github.com/googleapis/java-spanner/compare/v6.74.0...v6.74.1) (2024-09-16) + + +### Bug Fixes + +* Use core pool size 1 for maintainer ([#3314](https://github.com/googleapis/java-spanner/issues/3314)) ([cce008d](https://github.com/googleapis/java-spanner/commit/cce008d212535d32da990242973f7f517ca5d6dc)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.35.0 ([#3329](https://github.com/googleapis/java-spanner/issues/3329)) ([654835f](https://github.com/googleapis/java-spanner/commit/654835f2433b97665c74be9ec80c169ac905a720)) + +## [6.74.0](https://github.com/googleapis/java-spanner/compare/v6.73.0...v6.74.0) (2024-08-27) + + +### Features + +* **spanner:** Add edition field to the instance proto ([6b7e6ca](https://github.com/googleapis/java-spanner/commit/6b7e6ca109ea9679b5e36598d3c343fa40bff724)) + + +### Documentation + +* Change the example timestamps in Spanner Graph java sample code ([#3295](https://github.com/googleapis/java-spanner/issues/3295)) ([b6490b6](https://github.com/googleapis/java-spanner/commit/b6490b6a6ee2b7399431881a5e87b5ef7b577c89)) + +## [6.73.0](https://github.com/googleapis/java-spanner/compare/v6.72.0...v6.73.0) (2024-08-22) + + +### Features + +* Add option for cancelling queries when closing client ([#3276](https://github.com/googleapis/java-spanner/issues/3276)) ([95da1ed](https://github.com/googleapis/java-spanner/commit/95da1eddbc979f4ce78c9d1ac15bc4c1faba6dca)) + + +### Bug Fixes + +* Github workflow vulnerable to script injection ([#3232](https://github.com/googleapis/java-spanner/issues/3232)) ([599255c](https://github.com/googleapis/java-spanner/commit/599255c36d1fbe8317705a7eeb2a9e400c3efd15)) +* Make DecodeMode.DIRECT the deafult ([#3280](https://github.com/googleapis/java-spanner/issues/3280)) ([f31a95a](https://github.com/googleapis/java-spanner/commit/f31a95ab105407305e988e86c8f7b0d8654995e0)) +* Synchronize lazy ResultSet decoding ([#3267](https://github.com/googleapis/java-spanner/issues/3267)) ([4219cf8](https://github.com/googleapis/java-spanner/commit/4219cf86dba5e44d55f13ab118113f119c92b9e9)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.34.0 ([#3277](https://github.com/googleapis/java-spanner/issues/3277)) ([c449a91](https://github.com/googleapis/java-spanner/commit/c449a91628b005481996bce5ab449d62496a4d2d)) +* Update dependency commons-cli:commons-cli to v1.9.0 ([#3275](https://github.com/googleapis/java-spanner/issues/3275)) ([84790f7](https://github.com/googleapis/java-spanner/commit/84790f7d437e88739487b148bf963f0ac9dc3f96)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.41.0 ([#3269](https://github.com/googleapis/java-spanner/issues/3269)) ([a7458e9](https://github.com/googleapis/java-spanner/commit/a7458e970e4ca55ff3e312b2129e890576145db1)) +* Update dependency org.hamcrest:hamcrest to v3 ([#3271](https://github.com/googleapis/java-spanner/issues/3271)) ([fc2e343](https://github.com/googleapis/java-spanner/commit/fc2e343dc06f80617a2cd6f2bea59b0631e70678)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.0 ([#3272](https://github.com/googleapis/java-spanner/issues/3272)) ([1bc0c46](https://github.com/googleapis/java-spanner/commit/1bc0c469b99ebf3778592b04dbf175b00bf5b06e)) +* Update opentelemetry.version to v1.41.0 ([#3270](https://github.com/googleapis/java-spanner/issues/3270)) ([88f6b56](https://github.com/googleapis/java-spanner/commit/88f6b56fb243bb17b814a7ae150c8f38dced119a)) + + +### Documentation + +* Create a few code snippets as examples for using Spanner Graph using Java ([#3234](https://github.com/googleapis/java-spanner/issues/3234)) ([61f0ab7](https://github.com/googleapis/java-spanner/commit/61f0ab7a48bc3e51b830534b1cfa70e40166ec91)) + +## [6.72.0](https://github.com/googleapis/java-spanner/compare/v6.71.0...v6.72.0) (2024-08-07) + + +### Features + +* Add `RESOURCE_EXHAUSTED` to the list of retryable error codes ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add field order_by in spanner.proto ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add QueryCancellationAction message in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add SessionPoolOptions, SpannerOptions protos in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add support for multi region encryption config ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Enable hermetic library generation ([#3129](https://github.com/googleapis/java-spanner/issues/3129)) ([94b2a86](https://github.com/googleapis/java-spanner/commit/94b2a8610ac02d2b4212c421f03b4e9561ec9949)) +* **spanner:** Add samples for instance partitions ([#3221](https://github.com/googleapis/java-spanner/issues/3221)) ([bc48bf2](https://github.com/googleapis/java-spanner/commit/bc48bf212e37441221b3b6c8742b07ff601f6c41)) +* **spanner:** Add support for Cloud Spanner Scheduled Backups ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* **spanner:** Adding `EXPECTED_FULFILLMENT_PERIOD` to the indicate instance creation times (with `FULFILLMENT_PERIOD_NORMAL` or `FULFILLMENT_PERIOD_EXTENDED` ENUM) with the extended instance creation time triggered by On-Demand Capacity Feature ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* **spanner:** Set manual affinity incase of gRPC-GCP extenstion ([#3215](https://github.com/googleapis/java-spanner/issues/3215)) ([86b306a](https://github.com/googleapis/java-spanner/commit/86b306a4189483a5fd2746052bed817443630567)) +* Support Read RPC OrderBy ([#3180](https://github.com/googleapis/java-spanner/issues/3180)) ([735bca5](https://github.com/googleapis/java-spanner/commit/735bca523e4ea53a24929fb2c27d282c41350e91)) + + +### Bug Fixes + +* Make sure commitAsync always finishes ([#3216](https://github.com/googleapis/java-spanner/issues/3216)) ([440c88b](https://github.com/googleapis/java-spanner/commit/440c88bd67e1c9d08445fe26b01bf243f7fd1ca4)) +* SessionPoolOptions.Builder#toBuilder() skipped useMultiplexedSessions ([#3197](https://github.com/googleapis/java-spanner/issues/3197)) ([027f92c](https://github.com/googleapis/java-spanner/commit/027f92cf32fee8217d2075db61fe0be58d43a40d)) + + +### Dependencies + +* Bump sdk-platform-java-config to 3.33.0 ([#3243](https://github.com/googleapis/java-spanner/issues/3243)) ([35907c6](https://github.com/googleapis/java-spanner/commit/35907c63ae981612ba24dd9605db493b5b864217)) +* Update dependencies to latest ([#3250](https://github.com/googleapis/java-spanner/issues/3250)) ([d1d566b](https://github.com/googleapis/java-spanner/commit/d1d566b096915a537e0978715c81bfca00e34ceb)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.11.0 ([#3191](https://github.com/googleapis/java-spanner/issues/3191)) ([065cd48](https://github.com/googleapis/java-spanner/commit/065cd489964aaee42fffe1e71327906bde907205)) +* Update dependency com.google.cloud:google-cloud-trace to v2.47.0 ([#3067](https://github.com/googleapis/java-spanner/issues/3067)) ([e336ab8](https://github.com/googleapis/java-spanner/commit/e336ab81a1d392d56386f9302bf51bf14e385dad)) + +## [6.71.0](https://github.com/googleapis/java-spanner/compare/v6.70.0...v6.71.0) (2024-07-03) + + +### Features + +* Include thread name in traces ([#3173](https://github.com/googleapis/java-spanner/issues/3173)) ([92b1e07](https://github.com/googleapis/java-spanner/commit/92b1e079e6093bc4a2e7b458c1bbe0f62a0fada9)) +* Support multiplexed sessions for RO transactions ([#3141](https://github.com/googleapis/java-spanner/issues/3141)) ([2b8e9ed](https://github.com/googleapis/java-spanner/commit/2b8e9ededc1ea1a5e8d4f90083f2cf862fcc198a)) + +## [6.70.0](https://github.com/googleapis/java-spanner/compare/v6.69.0...v6.70.0) (2024-06-27) + + +### Features + +* Add field order_by in spanner.proto ([#3064](https://github.com/googleapis/java-spanner/issues/3064)) ([52ee196](https://github.com/googleapis/java-spanner/commit/52ee1967ee3a37fb0482ad8b51c6e77e28b79844)) + + +### Bug Fixes + +* Do not end transaction span when rolling back to savepoint ([#3167](https://github.com/googleapis/java-spanner/issues/3167)) ([8ec0cf2](https://github.com/googleapis/java-spanner/commit/8ec0cf2032dece545c9e4d8a794b80d06550b710)) +* Remove unused DmlBatch span ([#3147](https://github.com/googleapis/java-spanner/issues/3147)) ([f7891c1](https://github.com/googleapis/java-spanner/commit/f7891c1ca42727c775cdbe91bff8d55191a3d799)) + + +### Dependencies + +* Update dependencies ([#3181](https://github.com/googleapis/java-spanner/issues/3181)) ([0c787e6](https://github.com/googleapis/java-spanner/commit/0c787e6fa67d2a259a76bbd2d7f1cfa20a1dbee8)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.32.0 ([#3184](https://github.com/googleapis/java-spanner/issues/3184)) ([9c85a6f](https://github.com/googleapis/java-spanner/commit/9c85a6fabea527253ea40a8970cc9071804d94c4)) +* Update dependency commons-cli:commons-cli to v1.8.0 ([#3073](https://github.com/googleapis/java-spanner/issues/3073)) ([36b5340](https://github.com/googleapis/java-spanner/commit/36b5340ef8bf197fbc8ed882f76caff9a6fe84b6)) + +## [6.69.0](https://github.com/googleapis/java-spanner/compare/v6.68.1...v6.69.0) (2024-06-12) + + +### Features + +* Add option to enable ApiTracer ([#3095](https://github.com/googleapis/java-spanner/issues/3095)) ([a0a4bc5](https://github.com/googleapis/java-spanner/commit/a0a4bc58d4269a8c1e5e76d9a0469f649bb69148)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#3159](https://github.com/googleapis/java-spanner/issues/3159)) ([1ee19d1](https://github.com/googleapis/java-spanner/commit/1ee19d19c2db30d79c8741cc5739de1c69fb95f9)) + +## [6.68.1](https://github.com/googleapis/java-spanner/compare/v6.68.0...v6.68.1) (2024-05-29) + + +### Bug Fixes + +* Make SessionPoolOptions#setUseMultiplexedSession(boolean) package private ([#3130](https://github.com/googleapis/java-spanner/issues/3130)) ([575c3e0](https://github.com/googleapis/java-spanner/commit/575c3e01541e12294dd37a622f0b1dca52d200ba)) + +## [6.68.0](https://github.com/googleapis/java-spanner/compare/v6.67.0...v6.68.0) (2024-05-27) + + +### Features + +* [java] allow passing libraries_bom_version from env ([#1967](https://github.com/googleapis/java-spanner/issues/1967)) ([#3112](https://github.com/googleapis/java-spanner/issues/3112)) ([7d5a52c](https://github.com/googleapis/java-spanner/commit/7d5a52c19a4b8028b78fc64a10f1ba6127fa6ffe)) +* Allow DML batches in transactions to execute analyzeUpdate ([#3114](https://github.com/googleapis/java-spanner/issues/3114)) ([dee7cda](https://github.com/googleapis/java-spanner/commit/dee7cdabe74058434e4d630846f066dc82fdf512)) +* **spanner:** Add support for Proto Columns in Connection API ([#3123](https://github.com/googleapis/java-spanner/issues/3123)) ([7e7c814](https://github.com/googleapis/java-spanner/commit/7e7c814045dc84aaa57e7c716b0221e6cb19bcd1)) + + +### Bug Fixes + +* Allow getMetadata() calls before calling next() ([#3111](https://github.com/googleapis/java-spanner/issues/3111)) ([39902c3](https://github.com/googleapis/java-spanner/commit/39902c384f3f7f9438252cbee287f2428faf1440)) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.2 ([#3117](https://github.com/googleapis/java-spanner/issues/3117)) ([ddebbbb](https://github.com/googleapis/java-spanner/commit/ddebbbbeef976f61f23cdd66c5f7c1f412e2f9bd)) + +## [6.67.0](https://github.com/googleapis/java-spanner/compare/v6.66.0...v6.67.0) (2024-05-22) + + +### Features + +* Add tracing for batchUpdate, executeUpdate, and connections ([#3097](https://github.com/googleapis/java-spanner/issues/3097)) ([45cdcfc](https://github.com/googleapis/java-spanner/commit/45cdcfcde02aa7976b017a90f81c2ccd28658c8f)) + + +### Performance Improvements + +* Minor optimizations to the standard query path ([#3101](https://github.com/googleapis/java-spanner/issues/3101)) ([ec820a1](https://github.com/googleapis/java-spanner/commit/ec820a16e2b3cb1a12a15231491b75cd73afaa13)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.44.0 ([#3099](https://github.com/googleapis/java-spanner/issues/3099)) ([da44e93](https://github.com/googleapis/java-spanner/commit/da44e932a39ac0124b63914f8ea926998c10ea2e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.1 ([#3116](https://github.com/googleapis/java-spanner/issues/3116)) ([d205a73](https://github.com/googleapis/java-spanner/commit/d205a73714786a609673012b771e7a0722b3e1f2)) + +## [6.66.0](https://github.com/googleapis/java-spanner/compare/v6.65.1...v6.66.0) (2024-05-03) + + +### Features + +* Allow DDL with autocommit=false ([#3057](https://github.com/googleapis/java-spanner/issues/3057)) ([22833ac](https://github.com/googleapis/java-spanner/commit/22833acf9f073271ce0ee10f2b496f3a1d39566a)) +* Include stack trace of checked out sessions in exception ([#3092](https://github.com/googleapis/java-spanner/issues/3092)) ([ba6a0f6](https://github.com/googleapis/java-spanner/commit/ba6a0f644b6caa4d2f3aa130c6061341b70957dd)) + + +### Bug Fixes + +* Multiplexed session metrics were not included in refactor move ([#3088](https://github.com/googleapis/java-spanner/issues/3088)) ([f3589c4](https://github.com/googleapis/java-spanner/commit/f3589c430b0e84933a91008bb306c26089788357)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.0 ([#3082](https://github.com/googleapis/java-spanner/issues/3082)) ([ddfc98e](https://github.com/googleapis/java-spanner/commit/ddfc98e240fb47ef51075ba4461bf9a98aa25ce0)) + +## [6.65.1](https://github.com/googleapis/java-spanner/compare/v6.65.0...v6.65.1) (2024-04-30) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.43.0 ([#3066](https://github.com/googleapis/java-spanner/issues/3066)) ([97b0a93](https://github.com/googleapis/java-spanner/commit/97b0a93469ea1b0f0c9a3413e2364951c2d667d1)) + + +### Documentation + +* Add a sample for max commit delays ([#2941](https://github.com/googleapis/java-spanner/issues/2941)) ([d3b5097](https://github.com/googleapis/java-spanner/commit/d3b50976f8a6687a6dac2f483ae133c026b81cac)) + +## [6.65.0](https://github.com/googleapis/java-spanner/compare/v6.64.0...v6.65.0) (2024-04-20) + + +### Features + +* Remove grpclb ([#2760](https://github.com/googleapis/java-spanner/issues/2760)) ([1df09d9](https://github.com/googleapis/java-spanner/commit/1df09d9b9189c5527de91189a063ecc15779ac77)) +* Support client-side hints for tags and priority ([#3005](https://github.com/googleapis/java-spanner/issues/3005)) ([48828df](https://github.com/googleapis/java-spanner/commit/48828df3489465bb53a18be50808fbd435f3e896)), closes [#2978](https://github.com/googleapis/java-spanner/issues/2978) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.39.0 ([#3001](https://github.com/googleapis/java-spanner/issues/3001)) ([6cec1bf](https://github.com/googleapis/java-spanner/commit/6cec1bf1bb44a52c62c2310447c6a068a88209ea)) +* NullPointerException on AbstractReadContext.span ([#3036](https://github.com/googleapis/java-spanner/issues/3036)) ([55732fd](https://github.com/googleapis/java-spanner/commit/55732fd107ac1d3b8c16eee198c904d54d98b2b4)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.29.0 ([#3045](https://github.com/googleapis/java-spanner/issues/3045)) ([67a6534](https://github.com/googleapis/java-spanner/commit/67a65346d5a01d118d5220230e3bed6db7e79a33)) +* Update dependency commons-cli:commons-cli to v1.7.0 ([#3043](https://github.com/googleapis/java-spanner/issues/3043)) ([9fea7a3](https://github.com/googleapis/java-spanner/commit/9fea7a30e90227e735ad3595f4ca58dfb1ca1b93)) + +## [6.64.0](https://github.com/googleapis/java-spanner/compare/v6.63.0...v6.64.0) (2024-04-12) + + +### Features + +* Add endpoint connection URL property ([#2969](https://github.com/googleapis/java-spanner/issues/2969)) ([c9be29c](https://github.com/googleapis/java-spanner/commit/c9be29c717924d7f4c5acd8fe09ee371d0101642)) +* Add PG OID support ([#2736](https://github.com/googleapis/java-spanner/issues/2736)) ([ba2a4af](https://github.com/googleapis/java-spanner/commit/ba2a4afa5c1d64c932e9687d52b15c28d9dd7d91)) +* Add SessionPoolOptions, SpannerOptions protos in executor protos ([#2932](https://github.com/googleapis/java-spanner/issues/2932)) ([1673fd7](https://github.com/googleapis/java-spanner/commit/1673fd70df4ebfaa4b5fa07112d152119427699a)) +* Support max_commit_delay in Connection API ([#2954](https://github.com/googleapis/java-spanner/issues/2954)) ([a8f1852](https://github.com/googleapis/java-spanner/commit/a8f185261c812e7d6c92cb61ecc1f9c78ba3c4d9)) + + +### Bug Fixes + +* Executor framework changes skipped in clirr checks, and added exception for partition methods in admin class ([#3000](https://github.com/googleapis/java-spanner/issues/3000)) ([c2d8e95](https://github.com/googleapis/java-spanner/commit/c2d8e955abddb0117f1b3b94c2d9650d2cf4fdfd)) + + +### Dependencies + +* Update actions/checkout action to v4 ([#3006](https://github.com/googleapis/java-spanner/issues/3006)) ([368a9f3](https://github.com/googleapis/java-spanner/commit/368a9f33758961d8e3fd387ec94d380e7c6460cc)) +* Update actions/github-script action to v7 ([#3007](https://github.com/googleapis/java-spanner/issues/3007)) ([b0cfea6](https://github.com/googleapis/java-spanner/commit/b0cfea6e73b7293f564357e8d1c8c6bb2e0cf855)) +* Update actions/setup-java action to v4 ([#3008](https://github.com/googleapis/java-spanner/issues/3008)) ([d337080](https://github.com/googleapis/java-spanner/commit/d337080089dbd58cb4bf94f2cb5925f627435d39)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.42.0 ([#2997](https://github.com/googleapis/java-spanner/issues/2997)) ([0615beb](https://github.com/googleapis/java-spanner/commit/0615beb806ef62dbbfcc6bbffd082adc9c62372c)) +* Update dependency com.google.cloud:google-cloud-trace to v2.41.0 ([#2998](https://github.com/googleapis/java-spanner/issues/2998)) ([f50cd04](https://github.com/googleapis/java-spanner/commit/f50cd04660f480c62ddbd6c8a9e892cd95ec16b0)) +* Update dependency commons-io:commons-io to v2.16.1 ([#3020](https://github.com/googleapis/java-spanner/issues/3020)) ([aafd5b9](https://github.com/googleapis/java-spanner/commit/aafd5b9514c14a0dbfd0bf2616990f3c347ac0c6)) +* Update opentelemetry.version to v1.37.0 ([#3021](https://github.com/googleapis/java-spanner/issues/3021)) ([8f1ed2a](https://github.com/googleapis/java-spanner/commit/8f1ed2ac20896fb413749bb18652764096f1fb2d)) +* Update stcarolas/setup-maven action to v5 ([#3009](https://github.com/googleapis/java-spanner/issues/3009)) ([541acd2](https://github.com/googleapis/java-spanner/commit/541acd23aaf2c9336615406e30618fb65606e6c5)) + +## [6.63.0](https://github.com/googleapis/java-spanner/compare/v6.62.1...v6.63.0) (2024-03-30) + + +### Features + +* Add support for transaction-level exclusion from change streams ([#2959](https://github.com/googleapis/java-spanner/issues/2959)) ([7ae376a](https://github.com/googleapis/java-spanner/commit/7ae376acea4dce7a0bb4565d6c9bfdbbb75146c6)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.40.0 ([#2987](https://github.com/googleapis/java-spanner/issues/2987)) ([0a1ffcb](https://github.com/googleapis/java-spanner/commit/0a1ffcb371bdee6e478e3aa53b0a4591055134e3)) +* Update dependency com.google.cloud:google-cloud-trace to v2.39.0 ([#2988](https://github.com/googleapis/java-spanner/issues/2988)) ([cf11641](https://github.com/googleapis/java-spanner/commit/cf116412d46c5047167d4dd60ef9c88c3d9c754b)) +* Update dependency commons-io:commons-io to v2.16.0 ([#2986](https://github.com/googleapis/java-spanner/issues/2986)) ([4697261](https://github.com/googleapis/java-spanner/commit/46972619f88018bad1b4e05526a618d38e2e0897)) + +## [6.62.1](https://github.com/googleapis/java-spanner/compare/v6.62.0...v6.62.1) (2024-03-28) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.39.0 ([#2966](https://github.com/googleapis/java-spanner/issues/2966)) ([a5cb1dd](https://github.com/googleapis/java-spanner/commit/a5cb1ddd065100497d9215eff30d57361d7e84de)) +* Update dependency com.google.cloud:google-cloud-trace to v2.38.0 ([#2967](https://github.com/googleapis/java-spanner/issues/2967)) ([b2dc788](https://github.com/googleapis/java-spanner/commit/b2dc788d5a54244d83a192ecac894ff931f884c4)) + +## [6.62.0](https://github.com/googleapis/java-spanner/compare/v6.61.0...v6.62.0) (2024-03-19) + + +### Features + +* Allow attempt direct path xds via env var ([#2950](https://github.com/googleapis/java-spanner/issues/2950)) ([247a15f](https://github.com/googleapis/java-spanner/commit/247a15f2b8b858143bc906e0619f95a017ffe5c3)) +* Next release from main branch is 6.56.0 ([#2929](https://github.com/googleapis/java-spanner/issues/2929)) ([66374b1](https://github.com/googleapis/java-spanner/commit/66374b1c4ed88e01ff60fb8e1b7409e5dbbcb811)) + + +### Bug Fixes + +* Return type of max commit delay option. ([#2953](https://github.com/googleapis/java-spanner/issues/2953)) ([6e937ab](https://github.com/googleapis/java-spanner/commit/6e937ab16d130e72d633979c1a76bf7b3edbe7b6)) + + +### Performance Improvements + +* Keep comments when searching for params ([#2951](https://github.com/googleapis/java-spanner/issues/2951)) ([b782725](https://github.com/googleapis/java-spanner/commit/b782725b92a2662c42ad35647b23009ad95a99a5)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.38.0 ([#2942](https://github.com/googleapis/java-spanner/issues/2942)) ([ba665bd](https://github.com/googleapis/java-spanner/commit/ba665bd483ba70f09770d92028355ad499003fed)) +* Update dependency com.google.cloud:google-cloud-trace to v2.37.0 ([#2944](https://github.com/googleapis/java-spanner/issues/2944)) ([b5e608e](https://github.com/googleapis/java-spanner/commit/b5e608ef001473ab5575f1619804b351053c57f2)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#2952](https://github.com/googleapis/java-spanner/issues/2952)) ([1e45237](https://github.com/googleapis/java-spanner/commit/1e45237dd235484a6a279f71ae7e126727382f9c)) +* Update opentelemetry.version to v1.36.0 ([#2945](https://github.com/googleapis/java-spanner/issues/2945)) ([e70b035](https://github.com/googleapis/java-spanner/commit/e70b0357543d38b6e9265e04444cec494ebd6885)) + + +### Documentation + +* **samples:** Add tag to statement timeout sample ([#2931](https://github.com/googleapis/java-spanner/issues/2931)) ([2392afe](https://github.com/googleapis/java-spanner/commit/2392afed0d25266294e0ce11c6ae32d7307e6830)) + +## [6.61.0](https://github.com/googleapis/java-spanner/compare/v6.60.1...v6.61.0) (2024-03-04) + + +### Features + +* Support float32 type ([#2894](https://github.com/googleapis/java-spanner/issues/2894)) ([19b7976](https://github.com/googleapis/java-spanner/commit/19b79764294e938ad85d02b7c0662db6ec3afeda)) + + +### Bug Fixes + +* Flaky test issue due to AbortedException. ([#2925](https://github.com/googleapis/java-spanner/issues/2925)) ([cd34c1d](https://github.com/googleapis/java-spanner/commit/cd34c1d3ae9a5a36f4d5516dcf7c3667a9cf015a)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.27.0 ([#2935](https://github.com/googleapis/java-spanner/issues/2935)) ([f8f835a](https://github.com/googleapis/java-spanner/commit/f8f835a9da705605c492e232a58276c39d1d7e6c)) +* Update dependency org.json:json to v20240303 ([#2936](https://github.com/googleapis/java-spanner/issues/2936)) ([1d7044e](https://github.com/googleapis/java-spanner/commit/1d7044e97d16f5296b7de020cd24b11cbe2a7df0)) + + +### Documentation + +* Samples and tests for backup Admin APIs and overall spanner Admin APIs. ([#2882](https://github.com/googleapis/java-spanner/issues/2882)) ([de13636](https://github.com/googleapis/java-spanner/commit/de1363645e03f46deed5be41f90ddfed72766751)) +* Update all public documents to use auto-generated admin clients. ([#2928](https://github.com/googleapis/java-spanner/issues/2928)) ([ccb110a](https://github.com/googleapis/java-spanner/commit/ccb110ad6835557870933c95cfd76580fd317a16)) + +## [6.60.1](https://github.com/googleapis/java-spanner/compare/v6.60.0...v6.60.1) (2024-02-23) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.37.0 ([#2920](https://github.com/googleapis/java-spanner/issues/2920)) ([a3441bb](https://github.com/googleapis/java-spanner/commit/a3441bbad546a1aac1349d6e142a4ac8d32d2a90)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.0 ([#2861](https://github.com/googleapis/java-spanner/issues/2861)) ([a652c3b](https://github.com/googleapis/java-spanner/commit/a652c3b6ef6d6ed87d581e73a26a5086acdc5f07)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.1 ([#2919](https://github.com/googleapis/java-spanner/issues/2919)) ([8800a28](https://github.com/googleapis/java-spanner/commit/8800a2894a1c17bde1a0da3ffcc868f10f7690d5)) +* Update dependency org.json:json to v20240205 ([#2913](https://github.com/googleapis/java-spanner/issues/2913)) ([277ed81](https://github.com/googleapis/java-spanner/commit/277ed81a0beb95ea57f95a9660a4a6b6adea645b)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.2 ([#2868](https://github.com/googleapis/java-spanner/issues/2868)) ([71a65ec](https://github.com/googleapis/java-spanner/commit/71a65ecee5af63996297f8692d569d2a9acfd8ac)) +* Update opentelemetry.version to v1.35.0 ([#2902](https://github.com/googleapis/java-spanner/issues/2902)) ([3286eae](https://github.com/googleapis/java-spanner/commit/3286eaea96a40c6ace8abed22040a637d291b09c)) + +## [6.60.0](https://github.com/googleapis/java-spanner/compare/v6.59.0...v6.60.0) (2024-02-21) + + +### Features + +* Add an API method for reordering firewall policies ([62319f0](https://github.com/googleapis/java-spanner/commit/62319f032163c4ad3e8771dd5f92e7b8a086b5ee)) +* **spanner:** Add field for multiplexed session in spanner.proto ([62319f0](https://github.com/googleapis/java-spanner/commit/62319f032163c4ad3e8771dd5f92e7b8a086b5ee)) +* Update TransactionOptions to include new option exclude_txn_from_change_streams ([#2853](https://github.com/googleapis/java-spanner/issues/2853)) ([62319f0](https://github.com/googleapis/java-spanner/commit/62319f032163c4ad3e8771dd5f92e7b8a086b5ee)) + + +### Bug Fixes + +* Add ensureDecoded to proto type ([#2897](https://github.com/googleapis/java-spanner/issues/2897)) ([e99b78c](https://github.com/googleapis/java-spanner/commit/e99b78c5d810195d368112eed2b185d2d99e62a9)) +* **spanner:** Fix write replace used by dataflow template and import export ([#2901](https://github.com/googleapis/java-spanner/issues/2901)) ([64b9042](https://github.com/googleapis/java-spanner/commit/64b90429d4fe53f8509a3923e046406b4bc5876a)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.36.0 ([#2749](https://github.com/googleapis/java-spanner/issues/2749)) ([51a348a](https://github.com/googleapis/java-spanner/commit/51a348a0c2b84106ea763721bed3420a0d07f30a)) + + +### Documentation + +* Update comments ([62319f0](https://github.com/googleapis/java-spanner/commit/62319f032163c4ad3e8771dd5f92e7b8a086b5ee)) +* Update the comment regarding eligible SQL shapes for PartitionQuery ([62319f0](https://github.com/googleapis/java-spanner/commit/62319f032163c4ad3e8771dd5f92e7b8a086b5ee)) + +## [6.59.0](https://github.com/googleapis/java-spanner/compare/v6.58.0...v6.59.0) (2024-02-15) + + +### Features + +* Support public methods to use autogenerated admin clients. ([#2878](https://github.com/googleapis/java-spanner/issues/2878)) ([53bcb3e](https://github.com/googleapis/java-spanner/commit/53bcb3eca2e814472c3def24e8e03d47652a8e42)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.25.0 ([#2888](https://github.com/googleapis/java-spanner/issues/2888)) ([8e2da51](https://github.com/googleapis/java-spanner/commit/8e2da5126263c7acd134fb7fcfeb590ca190ce8e)) + + +### Documentation + +* README for OpenTelemetry metrics and traces ([#2880](https://github.com/googleapis/java-spanner/issues/2880)) ([c8632f5](https://github.com/googleapis/java-spanner/commit/c8632f5b2f462420a8c2a1f4308a68a18a414472)) +* Samples and tests for database Admin APIs. ([#2775](https://github.com/googleapis/java-spanner/issues/2775)) ([14ae01c](https://github.com/googleapis/java-spanner/commit/14ae01cd82e455a0dc22d7e3bb8c362e541ede12)) + +## [6.58.0](https://github.com/googleapis/java-spanner/compare/v6.57.0...v6.58.0) (2024-02-08) + + +### Features + +* Open telemetry implementation ([#2770](https://github.com/googleapis/java-spanner/issues/2770)) ([244d6a8](https://github.com/googleapis/java-spanner/commit/244d6a836795bf07dacd6b766436dbd6bf5fa912)) +* **spanner:** Support max_commit_delay in Spanner transactions ([#2854](https://github.com/googleapis/java-spanner/issues/2854)) ([e2b7ae6](https://github.com/googleapis/java-spanner/commit/e2b7ae66648ea775c18c71ab353edd6c0f50e7ac)) +* Support Directed Read in Connection API ([#2855](https://github.com/googleapis/java-spanner/issues/2855)) ([ee477c2](https://github.com/googleapis/java-spanner/commit/ee477c2e7c509ce4b7c43da3b68c1433c59e46fb)) + + +### Bug Fixes + +* Cast for Proto type ([#2862](https://github.com/googleapis/java-spanner/issues/2862)) ([0a95dba](https://github.com/googleapis/java-spanner/commit/0a95dba47681c9c4cc4e41ecfb5dadec6357bff6)) +* Ignore UnsupportedOperationException for virtual threads ([#2866](https://github.com/googleapis/java-spanner/issues/2866)) ([aa9ad7f](https://github.com/googleapis/java-spanner/commit/aa9ad7f5a5e2405e8082a542916c3d1fa7d0fa25)) +* Use default query options with statement cache ([#2860](https://github.com/googleapis/java-spanner/issues/2860)) ([741e4cf](https://github.com/googleapis/java-spanner/commit/741e4cf4eb51c4635078cfe2c52b7462bd4cbbd8)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.24.0 ([#2856](https://github.com/googleapis/java-spanner/issues/2856)) ([968877e](https://github.com/googleapis/java-spanner/commit/968877e4eff7da3ff27180c2a6129b04922d1af4)) + +## [6.57.0](https://github.com/googleapis/java-spanner/compare/v6.56.0...v6.57.0) (2024-01-29) + + +### Features + +* Add FLOAT32 enum to TypeCode ([#2800](https://github.com/googleapis/java-spanner/issues/2800)) ([383fea5](https://github.com/googleapis/java-spanner/commit/383fea5b5dc434621585a1b5cfd128a01780472a)) +* Add support for Proto Columns ([#2779](https://github.com/googleapis/java-spanner/issues/2779)) ([30d37dd](https://github.com/googleapis/java-spanner/commit/30d37dd80c91b2dffdfee732677607ce028fb8d2)) +* **spanner:** Add proto descriptors for proto and enum types in create/update/get database ddl requests ([#2774](https://github.com/googleapis/java-spanner/issues/2774)) ([4a906bf](https://github.com/googleapis/java-spanner/commit/4a906bf2719c30dcd7371f497a8a28c250db77be)) + + +### Bug Fixes + +* Remove google-cloud-spanner-executor from the BOM ([#2844](https://github.com/googleapis/java-spanner/issues/2844)) ([655000a](https://github.com/googleapis/java-spanner/commit/655000a3b0471b279cbcbe8a4a601337e7274ef8)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.22.0 ([#2785](https://github.com/googleapis/java-spanner/issues/2785)) ([f689f74](https://github.com/googleapis/java-spanner/commit/f689f742d8754134523ed0394b9c1b8256adcae2)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.23.0 ([#2801](https://github.com/googleapis/java-spanner/issues/2801)) ([95f064f](https://github.com/googleapis/java-spanner/commit/95f064f9f60a17de375e532ec6dd78dca0743e79)) + + +### Documentation + +* Samples and tests for instance APIs. ([#2768](https://github.com/googleapis/java-spanner/issues/2768)) ([88e24c7](https://github.com/googleapis/java-spanner/commit/88e24c7a7d046056605a2a824450e0153b339c86)) + +## [6.56.0](https://github.com/googleapis/java-spanner/compare/v6.55.0...v6.56.0) (2024-01-05) + + +### Features + +* Add autoscaling config in the instance to support autoscaling in systests ([#2756](https://github.com/googleapis/java-spanner/issues/2756)) ([99ae565](https://github.com/googleapis/java-spanner/commit/99ae565c5e90a2862b4f195fe64656ba8a05373d)) +* Add support for Directed Read options ([#2766](https://github.com/googleapis/java-spanner/issues/2766)) ([26c6c63](https://github.com/googleapis/java-spanner/commit/26c6c634b685bce66ce7caf05057a98e9cc6f5dc)) +* Update OwlBot.yaml file to pull autogenerated executor code ([#2754](https://github.com/googleapis/java-spanner/issues/2754)) ([20562d4](https://github.com/googleapis/java-spanner/commit/20562d4d7e62ab20bb1c4e78547b218a9a506f21)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.21.0 ([#2772](https://github.com/googleapis/java-spanner/issues/2772)) ([173f520](https://github.com/googleapis/java-spanner/commit/173f520f931073c4c6ddf3b3d98d255fb575914f)) + + +### Documentation + +* Samples and tests for auto-generated createDatabase and createInstance APIs. ([#2764](https://github.com/googleapis/java-spanner/issues/2764)) ([74a586f](https://github.com/googleapis/java-spanner/commit/74a586f8713ef742d65400da8f04a750316faf78)) + +## [6.55.0](https://github.com/googleapis/java-spanner/compare/v6.54.0...v6.55.0) (2023-12-01) + + +### Features + +* Add java sample for managed autoscaler ([#2709](https://github.com/googleapis/java-spanner/issues/2709)) ([9ea4f4f](https://github.com/googleapis/java-spanner/commit/9ea4f4fe2925410b3defb4e53f3f0a328cc2e738)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.30.0 ([#2703](https://github.com/googleapis/java-spanner/issues/2703)) ([961aa78](https://github.com/googleapis/java-spanner/commit/961aa7894be41ff87f1b460aa374ee2ed75a163b)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.20.0 ([#2746](https://github.com/googleapis/java-spanner/issues/2746)) ([12bcabb](https://github.com/googleapis/java-spanner/commit/12bcabbf1ef82b19524400ebe280d9986bf70ea7)) +* Update dependency commons-io:commons-io to v2.15.1 ([#2745](https://github.com/googleapis/java-spanner/issues/2745)) ([b9d9571](https://github.com/googleapis/java-spanner/commit/b9d9571dcc2d1d004cd785d79e45754c0ce63a51)) + +## [6.54.0](https://github.com/googleapis/java-spanner/compare/v6.53.0...v6.54.0) (2023-11-15) + + +### Features + +* Enable session leaks prevention by cleaning up long-running tra… ([#2655](https://github.com/googleapis/java-spanner/issues/2655)) ([faa7e5d](https://github.com/googleapis/java-spanner/commit/faa7e5dff17897b0432bc505b7ed24c33805f418)) + + +### Bug Fixes + +* Copy backup issue when backup is done across different instance IDs ([#2732](https://github.com/googleapis/java-spanner/issues/2732)) ([7f6b158](https://github.com/googleapis/java-spanner/commit/7f6b1582770d2270efc9501136afb17a2677eaeb)) +* Respect SPANNER_EMULATOR_HOST env var when autoConfigEmulator=true ([#2730](https://github.com/googleapis/java-spanner/issues/2730)) ([9c19934](https://github.com/googleapis/java-spanner/commit/9c19934a6170232f6ac2478ef9bfcdb2914d2562)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.30.0 ([#2725](https://github.com/googleapis/java-spanner/issues/2725)) ([8618042](https://github.com/googleapis/java-spanner/commit/8618042bb716d8a6626bacee59f9e6c6f0d50362)) + +## [6.53.0](https://github.com/googleapis/java-spanner/compare/v6.52.1...v6.53.0) (2023-11-06) + + +### Features + +* Move session lastUseTime parameter from PooledSession to SessionImpl class. Fix updation of the parameter for chained RPCs within one transaction. ([#2704](https://github.com/googleapis/java-spanner/issues/2704)) ([e75a281](https://github.com/googleapis/java-spanner/commit/e75a2818124621a3ab837151a8e1094fa6c3b8f3)) +* Rely on graal-sdk version declaration from property in java-shared-config ([#2696](https://github.com/googleapis/java-spanner/issues/2696)) ([cfab83a](https://github.com/googleapis/java-spanner/commit/cfab83ad3bd1a026e0b3da5a4cc2154b0f8c3ddf)) + + +### Bug Fixes + +* Prevent illegal negative timeout values into thread sleep() method in ITTransactionManagerTest. ([#2715](https://github.com/googleapis/java-spanner/issues/2715)) ([1c26cf6](https://github.com/googleapis/java-spanner/commit/1c26cf60efa1b98203af9b21a47e37c8fb1e0e97)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.19.0 ([#2719](https://github.com/googleapis/java-spanner/issues/2719)) ([e320753](https://github.com/googleapis/java-spanner/commit/e320753b2bd125f94775db9c71a4b7803fa49c38)) +* Update dependency com.google.cloud:google-cloud-trace to v2.28.0 ([#2670](https://github.com/googleapis/java-spanner/issues/2670)) ([078b7ca](https://github.com/googleapis/java-spanner/commit/078b7ca95548ac984c79d29197032b3f813abbcf)) +* Update dependency com.google.cloud:google-cloud-trace to v2.29.0 ([#2714](https://github.com/googleapis/java-spanner/issues/2714)) ([b400eca](https://github.com/googleapis/java-spanner/commit/b400ecabb9fa6f262befa903163746fac2c7c15e)) +* Update dependency commons-cli:commons-cli to v1.6.0 ([#2710](https://github.com/googleapis/java-spanner/issues/2710)) ([e3e8f6a](https://github.com/googleapis/java-spanner/commit/e3e8f6ac82d827280299038d3962fe66b110e0c4)) +* Update dependency commons-io:commons-io to v2.15.0 ([#2712](https://github.com/googleapis/java-spanner/issues/2712)) ([a5f59aa](https://github.com/googleapis/java-spanner/commit/a5f59aa3e992d0594519983880a29f17301923e7)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.28 ([#2692](https://github.com/googleapis/java-spanner/issues/2692)) ([d8a2b02](https://github.com/googleapis/java-spanner/commit/d8a2b02d43a68e04bebb2349af61cc8901ccd667)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.28 ([#2705](https://github.com/googleapis/java-spanner/issues/2705)) ([2b17f09](https://github.com/googleapis/java-spanner/commit/2b17f095a294defa5ea022c243fa750486b7d496)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.1 ([#2723](https://github.com/googleapis/java-spanner/issues/2723)) ([9cf6d0e](https://github.com/googleapis/java-spanner/commit/9cf6d0eae5d2a86c89de2d252d0f4a4dab0b54a4)) + +## [6.52.1](https://github.com/googleapis/java-spanner/compare/v6.52.0...v6.52.1) (2023-10-20) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.18.0 ([#2691](https://github.com/googleapis/java-spanner/issues/2691)) ([b425021](https://github.com/googleapis/java-spanner/commit/b4250218a500eb1540920ed0023454d06c54d621)) + +## [6.52.0](https://github.com/googleapis/java-spanner/compare/v6.51.0...v6.52.0) (2023-10-19) + + +### Features + +* Add support for Managed Autoscaler ([#2624](https://github.com/googleapis/java-spanner/issues/2624)) ([e5e6923](https://github.com/googleapis/java-spanner/commit/e5e6923a351670ab237c411bb4a549533dac1b6b)) + +## [6.51.0](https://github.com/googleapis/java-spanner/compare/v6.50.1...v6.51.0) (2023-10-14) + + +### Features + +* **spanner:** Add autoscaling config to the instance proto ([#2674](https://github.com/googleapis/java-spanner/issues/2674)) ([8d38ca3](https://github.com/googleapis/java-spanner/commit/8d38ca393a6c0f9df18c9d02fa9392e11af01246)) + + +### Bug Fixes + +* Always include default client lib header ([#2676](https://github.com/googleapis/java-spanner/issues/2676)) ([74fd174](https://github.com/googleapis/java-spanner/commit/74fd174a84f6f97949b9caaadddf366aafd4a469)) + +## [6.50.1](https://github.com/googleapis/java-spanner/compare/v6.50.0...v6.50.1) (2023-10-11) + + +### Bug Fixes + +* Noop in case there is no change in autocommit value for setAutocommit() method ([#2662](https://github.com/googleapis/java-spanner/issues/2662)) ([9f51b64](https://github.com/googleapis/java-spanner/commit/9f51b6445f064439379af752372a3490a2fd5087)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.17.0 ([#2660](https://github.com/googleapis/java-spanner/issues/2660)) ([96b9dd6](https://github.com/googleapis/java-spanner/commit/96b9dd6b6a0ee7b1a0a1cc58a8880a10799665e6)) +* Update dependency commons-io:commons-io to v2.14.0 ([#2649](https://github.com/googleapis/java-spanner/issues/2649)) ([fa1b73c](https://github.com/googleapis/java-spanner/commit/fa1b73c1bf4700be5e8865211817e2bc7cc77119)) + +## [6.50.0](https://github.com/googleapis/java-spanner/compare/v6.49.0...v6.50.0) (2023-10-09) + + +### Features + +* Support setting core pool size for async API in system property ([#2632](https://github.com/googleapis/java-spanner/issues/2632)) ([e51c55d](https://github.com/googleapis/java-spanner/commit/e51c55d332bacb9d174a24b0d842b2cba4762db8)), closes [#2631](https://github.com/googleapis/java-spanner/issues/2631) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.24.0 ([#2577](https://github.com/googleapis/java-spanner/issues/2577)) ([311c2ad](https://github.com/googleapis/java-spanner/commit/311c2ad97311490893f3abf4da5fe4d511c445dd)) + +## [6.49.0](https://github.com/googleapis/java-spanner/compare/v6.48.0...v6.49.0) (2023-09-28) + + +### Features + +* Add session pool option for modelling a timeout around session acquisition. ([#2641](https://github.com/googleapis/java-spanner/issues/2641)) ([428e294](https://github.com/googleapis/java-spanner/commit/428e294b94392e290921b5c0eda0139c57d3a185)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.1 ([#2637](https://github.com/googleapis/java-spanner/issues/2637)) ([3f48624](https://github.com/googleapis/java-spanner/commit/3f486245f574f3a6abf4d3b9146b51dc92cf5eea)) + + +### Documentation + +* Improve timeout and retry sample ([#2630](https://github.com/googleapis/java-spanner/issues/2630)) ([f03ce56](https://github.com/googleapis/java-spanner/commit/f03ce56119e2985286ede15352f19c3cb6f39979)) +* Remove reference to returning clauses for Batch DML ([#2644](https://github.com/googleapis/java-spanner/issues/2644)) ([038d8ca](https://github.com/googleapis/java-spanner/commit/038d8cac3fe06ca2dcf0b4e85f5e536b73ce9313)) + +## [6.48.0](https://github.com/googleapis/java-spanner/compare/v6.47.0...v6.48.0) (2023-09-26) + + +### Features + +* Add support for BatchWriteAtLeastOnce ([#2520](https://github.com/googleapis/java-spanner/issues/2520)) ([8ea7bd1](https://github.com/googleapis/java-spanner/commit/8ea7bd18e92a7c5547d8a33bf46c1e322326447b)) + + +### Bug Fixes + +* Retry aborted errors for writeAtLeastOnce ([#2627](https://github.com/googleapis/java-spanner/issues/2627)) ([2addb19](https://github.com/googleapis/java-spanner/commit/2addb1930a7b9ada4a4304a44a36d8ff1397cf9e)) + + +### Dependencies + +* Update actions/checkout action to v4 ([#2608](https://github.com/googleapis/java-spanner/issues/2608)) ([59f3e70](https://github.com/googleapis/java-spanner/commit/59f3e7047a0a9578350b37b46395377d7e014763)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.27 ([#2574](https://github.com/googleapis/java-spanner/issues/2574)) ([e804a4c](https://github.com/googleapis/java-spanner/commit/e804a4c60f369ca88b804fef182b5afae44bd05e)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.27 ([#2575](https://github.com/googleapis/java-spanner/issues/2575)) ([6fe132a](https://github.com/googleapis/java-spanner/commit/6fe132a7c1458da4fc28c950009d152643ced038)) + +## [6.47.0](https://github.com/googleapis/java-spanner/compare/v6.46.0...v6.47.0) (2023-09-12) + + +### Features + +* Add devcontainers for enabling github codespaces usage. ([#2605](https://github.com/googleapis/java-spanner/issues/2605)) ([a7d60f1](https://github.com/googleapis/java-spanner/commit/a7d60f13781f87054a1631ca511492c5c8334751)) +* Disable dynamic code loading properties by default ([#2606](https://github.com/googleapis/java-spanner/issues/2606)) ([d855ebb](https://github.com/googleapis/java-spanner/commit/d855ebbd2dec11cdd6cdbe326de81115632598cd)) + + +### Bug Fixes + +* Add reflection configurations for com.google.rpc classes ([#2617](https://github.com/googleapis/java-spanner/issues/2617)) ([c42460a](https://github.com/googleapis/java-spanner/commit/c42460ae7b6bb5874cc18c7aecff34186dcbff2a)) +* Avoid unbalanced session pool creation ([#2442](https://github.com/googleapis/java-spanner/issues/2442)) ([db751ce](https://github.com/googleapis/java-spanner/commit/db751ceebc8b6981d00cd07ce4742196cc1dd50d)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.15.0 ([#2615](https://github.com/googleapis/java-spanner/issues/2615)) ([ac762fb](https://github.com/googleapis/java-spanner/commit/ac762fbf079db79eab5f2ebee971b850ac89eb11)) + +## [6.46.0](https://github.com/googleapis/java-spanner/compare/v6.45.3...v6.46.0) (2023-09-06) + + +### Features + +* Adding support for databoost ([#2505](https://github.com/googleapis/java-spanner/issues/2505)) ([dd3e9a0](https://github.com/googleapis/java-spanner/commit/dd3e9a0fe4846edcab9501b71c3d9e0fa24ed75b)) +* Support PostgreSQL for autoConfigEmulator ([#2601](https://github.com/googleapis/java-spanner/issues/2601)) ([fbf1df9](https://github.com/googleapis/java-spanner/commit/fbf1df9f3fb12faaead8634b88fd4843cbdedf5b)) + + +### Bug Fixes + +* Fix kokoro windows java8 ci ([#2573](https://github.com/googleapis/java-spanner/issues/2573)) ([465df7b](https://github.com/googleapis/java-spanner/commit/465df7bad12fbea7dbcf6dbabb1b29d088c42665)) + + +### Documentation + +* Add sample for transaction timeouts ([#2599](https://github.com/googleapis/java-spanner/issues/2599)) ([59cec9b](https://github.com/googleapis/java-spanner/commit/59cec9b9cdad169bd8de8ab7b264b04150dda7fb)) + +## [6.45.3](https://github.com/googleapis/java-spanner/compare/v6.45.2...v6.45.3) (2023-08-17) + + +### Bug Fixes + +* Use streaming read/query settings for stream retry ([#2579](https://github.com/googleapis/java-spanner/issues/2579)) ([f78b838](https://github.com/googleapis/java-spanner/commit/f78b838e294f9c29bfc34a5d964933657b70417f)) + +## [6.45.2](https://github.com/googleapis/java-spanner/compare/v6.45.1...v6.45.2) (2023-08-14) + + +### Bug Fixes + +* GetColumnCount would fail for empty partititioned result sets ([#2588](https://github.com/googleapis/java-spanner/issues/2588)) ([9a2f3fc](https://github.com/googleapis/java-spanner/commit/9a2f3fc01748224fc8084fbf2b4a0223426b1603)) + +## [6.45.1](https://github.com/googleapis/java-spanner/compare/v6.45.0...v6.45.1) (2023-08-11) + + +### Bug Fixes + +* Always allow metadata queries ([#2580](https://github.com/googleapis/java-spanner/issues/2580)) ([ebb17fc](https://github.com/googleapis/java-spanner/commit/ebb17fc8aeac5fc75e4f135f33dba970f2480585)) + +## [6.45.0](https://github.com/googleapis/java-spanner/compare/v6.44.0...v6.45.0) (2023-08-04) + + +### Features + +* Enable leader aware routing by default in Connection API. This enables its use in the JDBC driver and PGAdapter. The update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([2a85446](https://github.com/googleapis/java-spanner/commit/2a85446b162b006ce84a86285af1767c879b27ed)) +* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([441c1b0](https://github.com/googleapis/java-spanner/commit/441c1b03c3e976c6304a99fefd93b5c4291e5364)) +* Long running transaction clean up background task. Adding configuration options for closing inactive transactions. ([#2419](https://github.com/googleapis/java-spanner/issues/2419)) ([423e1a4](https://github.com/googleapis/java-spanner/commit/423e1a4b483798d9683ff9bd232b53d76e09beb0)) +* Support partitioned queries + data boost in Connection API ([#2540](https://github.com/googleapis/java-spanner/issues/2540)) ([4e31d04](https://github.com/googleapis/java-spanner/commit/4e31d046f5d80abe8876a729ddba045c70f3261d)) + + +### Bug Fixes + +* Apply stream wait timeout ([#2544](https://github.com/googleapis/java-spanner/issues/2544)) ([5a12cd2](https://github.com/googleapis/java-spanner/commit/5a12cd29601253423c5738be5471a036fd0334be)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.14.0 ([#2562](https://github.com/googleapis/java-spanner/issues/2562)) ([dbd5c75](https://github.com/googleapis/java-spanner/commit/dbd5c75be39262003092ff4a925ed470cc45f8be)) +* Update dependency org.openjdk.jmh:jmh-core to v1.37 ([#2565](https://github.com/googleapis/java-spanner/issues/2565)) ([d5c36bf](https://github.com/googleapis/java-spanner/commit/d5c36bfbb67ecb14854944779da6e4dbd93f3559)) +* Update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.37 ([#2566](https://github.com/googleapis/java-spanner/issues/2566)) ([73e92d4](https://github.com/googleapis/java-spanner/commit/73e92d42fe6d334b6efa6485246dc67858adb0a9)) + +## [6.44.0](https://github.com/googleapis/java-spanner/compare/v6.43.2...v6.44.0) (2023-07-27) + + +### Features + +* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([55c93ac](https://github.com/googleapis/java-spanner/commit/55c93acfeb8c2a6e5cc2f99ca20d0b72fbe6f8a4)) +* Foreign key on delete cascade ([#2340](https://github.com/googleapis/java-spanner/issues/2340)) ([f659105](https://github.com/googleapis/java-spanner/commit/f6591053db1c38f0e13e35cba2087a68d3ab1b01)) + + +### Bug Fixes + +* Add imports used in sample files. ([#2532](https://github.com/googleapis/java-spanner/issues/2532)) ([9a6d3fc](https://github.com/googleapis/java-spanner/commit/9a6d3fcbaa8d44f2e08407252a69beca1e4525b1)) + + +### Documentation + +* Fixing errors ([#2536](https://github.com/googleapis/java-spanner/issues/2536)) ([8aa407f](https://github.com/googleapis/java-spanner/commit/8aa407f3e1b4c6cf66b679e698992a6a5e3034c0)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.22.0 ([#2525](https://github.com/googleapis/java-spanner/issues/2525)) ([be0db6f](https://github.com/googleapis/java-spanner/commit/be0db6f10509fe3e5f74aa6ca6569552e65cb87a)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.23.0 ([#2542](https://github.com/googleapis/java-spanner/issues/2542)) ([67351dd](https://github.com/googleapis/java-spanner/commit/67351dd2cb557d461421c4a0321ae6d2d0fd9dcb)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.1 ([#2537](https://github.com/googleapis/java-spanner/issues/2537)) ([9396d8d](https://github.com/googleapis/java-spanner/commit/9396d8d8b5450dd545687af6c513b7f6c7a6c283)) +* Update dependency com.google.cloud:google-cloud-trace to v2.21.0 ([#2526](https://github.com/googleapis/java-spanner/issues/2526)) ([2d95234](https://github.com/googleapis/java-spanner/commit/2d952347e0eb7db42387d8abb91d4b11d51cef9c)) +* Update dependency com.google.cloud:google-cloud-trace to v2.22.0 ([#2543](https://github.com/googleapis/java-spanner/issues/2543)) ([47c6a43](https://github.com/googleapis/java-spanner/commit/47c6a430405ebf1c2fe392991e3f4554e9ac37aa)) +* Update dependency org.graalvm.sdk:graal-sdk to v22.3.3 ([#2533](https://github.com/googleapis/java-spanner/issues/2533)) ([0806b11](https://github.com/googleapis/java-spanner/commit/0806b116cc6650b353cee26c83929e7bcdcb1c34)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.10.0 ([#2539](https://github.com/googleapis/java-spanner/issues/2539)) ([8801b2b](https://github.com/googleapis/java-spanner/commit/8801b2bf639b7903958668a2274a6e5d457de00a)) + +## [6.43.2](https://github.com/googleapis/java-spanner/compare/v6.43.1...v6.43.2) (2023-07-09) + + +### Bug Fixes + +* Recognize ABORT statements for PostgreSQL ([#2479](https://github.com/googleapis/java-spanner/issues/2479)) ([da47b0a](https://github.com/googleapis/java-spanner/commit/da47b0aef7a2e03fc9b5e25cf036ef8d8d001672)) + + +### Documentation + +* Add background info for session pool ([#2498](https://github.com/googleapis/java-spanner/issues/2498)) ([0bbb1a1](https://github.com/googleapis/java-spanner/commit/0bbb1a1b5ac6b9d4ea061a2f2a4d26c3bd958d7e)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#2521](https://github.com/googleapis/java-spanner/issues/2521)) ([bdb2461](https://github.com/googleapis/java-spanner/commit/bdb2461dfa90535241c333d1cfee33afc2b33eca)) + +## [6.43.1](https://github.com/googleapis/java-spanner/compare/v6.43.0...v6.43.1) (2023-06-26) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.20.0 ([#2492](https://github.com/googleapis/java-spanner/issues/2492)) ([faa6807](https://github.com/googleapis/java-spanner/commit/faa68073673e789e35b600dab72152591a647dc6)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.21.0 ([#2510](https://github.com/googleapis/java-spanner/issues/2510)) ([f10400b](https://github.com/googleapis/java-spanner/commit/f10400baf2d320991e75794250b9e1b2fb218718)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0.with temp exclusions. ([#2512](https://github.com/googleapis/java-spanner/issues/2512)) ([ce04645](https://github.com/googleapis/java-spanner/commit/ce0464527ef489d351b9086f6bb8922f295f1897)) +* Update dependency com.google.cloud:google-cloud-trace to v2.19.0 ([#2493](https://github.com/googleapis/java-spanner/issues/2493)) ([1dc7cea](https://github.com/googleapis/java-spanner/commit/1dc7cea723658c43b8c8d2e085c964371fb72223)) +* Update dependency com.google.cloud:google-cloud-trace to v2.20.0 ([#2511](https://github.com/googleapis/java-spanner/issues/2511)) ([2ea52ec](https://github.com/googleapis/java-spanner/commit/2ea52ec1cef2468e6c36b76797a3878f270badaa)) +* Update dependency commons-io:commons-io to v2.13.0 ([#2490](https://github.com/googleapis/java-spanner/issues/2490)) ([b087b0e](https://github.com/googleapis/java-spanner/commit/b087b0e813cacb4f08d12815d9371fe9c004ca9e)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.23 ([#2500](https://github.com/googleapis/java-spanner/issues/2500)) ([0b794a6](https://github.com/googleapis/java-spanner/commit/0b794a68d57eb990e013fdd05c72eaed868497b0)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.23 ([#2501](https://github.com/googleapis/java-spanner/issues/2501)) ([9db5c78](https://github.com/googleapis/java-spanner/commit/9db5c7850b53fa10d1856d88908d5e8e95467206)) +* Update dependency org.json:json to v20230618 ([#2504](https://github.com/googleapis/java-spanner/issues/2504)) ([8a87fee](https://github.com/googleapis/java-spanner/commit/8a87fee19bb2dd41495a15740893375c8778f71a)) + +## [6.43.0](https://github.com/googleapis/java-spanner/compare/v6.42.3...v6.43.0) (2023-06-07) + + +### Features + +* Delay transaction start option ([#2462](https://github.com/googleapis/java-spanner/issues/2462)) ([f1cbd16](https://github.com/googleapis/java-spanner/commit/f1cbd168a7e5f48206cdfc2d782835cf7ccb8b0d)) +* Make administrative request retries optional ([#2476](https://github.com/googleapis/java-spanner/issues/2476)) ([ee6548c](https://github.com/googleapis/java-spanner/commit/ee6548cfa511d6efc99f508290ed0b1ce025a4cc)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.11.0 ([#2486](https://github.com/googleapis/java-spanner/issues/2486)) ([82400d5](https://github.com/googleapis/java-spanner/commit/82400d5576c3ffe08ff6bb94d8b1a307e2f41662)) + +## [6.42.3](https://github.com/googleapis/java-spanner/compare/v6.42.2...v6.42.3) (2023-05-31) + + +### Performance Improvements + +* Only capture the call stack if the call is actually async ([#2471](https://github.com/googleapis/java-spanner/issues/2471)) ([ae9c8ad](https://github.com/googleapis/java-spanner/commit/ae9c8add484bc0f7808571cbcffb7b352d6ed739)) + +## [6.42.2](https://github.com/googleapis/java-spanner/compare/v6.42.1...v6.42.2) (2023-05-30) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.19.0 ([#2466](https://github.com/googleapis/java-spanner/issues/2466)) ([6de2cf6](https://github.com/googleapis/java-spanner/commit/6de2cf6a2d075b4347d69b9af21ac0cf96413884)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.10.1 ([#2465](https://github.com/googleapis/java-spanner/issues/2465)) ([0a89f49](https://github.com/googleapis/java-spanner/commit/0a89f49cd55311f4cb84a501aa302eab88b46575)) +* Update dependency com.google.cloud:google-cloud-trace to v2.18.0 ([#2467](https://github.com/googleapis/java-spanner/issues/2467)) ([45609ed](https://github.com/googleapis/java-spanner/commit/45609ed65e49147077eaaf3eb90ab0c732eef80b)) + +## [6.42.1](https://github.com/googleapis/java-spanner/compare/v6.42.0...v6.42.1) (2023-05-22) + + +### Dependencies + +* Update dependency commons-io:commons-io to v2.12.0 ([#2439](https://github.com/googleapis/java-spanner/issues/2439)) ([d08b226](https://github.com/googleapis/java-spanner/commit/d08b226d5da6272b2de5f66ee1657d03268e396d)) + +## [6.42.0](https://github.com/googleapis/java-spanner/compare/v6.41.0...v6.42.0) (2023-05-15) + + +### Features + +* Add support for UpdateDatabase in Cloud Spanner ([#2265](https://github.com/googleapis/java-spanner/issues/2265)) ([2ea06e7](https://github.com/googleapis/java-spanner/commit/2ea06e70a6f22635bcad7b7e4c79d0cf710dc6dc)) +* Add support for UpdateDatabase in Cloud Spanner ([#2429](https://github.com/googleapis/java-spanner/issues/2429)) ([09f20bd](https://github.com/googleapis/java-spanner/commit/09f20bd43913a7a01985fd290964d134612c14eb)) + + +### Bug Fixes + +* Add error details for INTERNAL error ([#2413](https://github.com/googleapis/java-spanner/issues/2413)) ([ed62aa6](https://github.com/googleapis/java-spanner/commit/ed62aa666ae34cf5e552e19b6b5dc2a8c6609e4e)) +* Use javax.annotation.Nonnull in executor framework ([#2414](https://github.com/googleapis/java-spanner/issues/2414)) ([afcc598](https://github.com/googleapis/java-spanner/commit/afcc598e05c75610db8d0adacd4da79b4c124122)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.18.0 ([#2426](https://github.com/googleapis/java-spanner/issues/2426)) ([05a45f8](https://github.com/googleapis/java-spanner/commit/05a45f81c2c71dd236fa36cc987e78a6aa31b594)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.9.0 ([#2427](https://github.com/googleapis/java-spanner/issues/2427)) ([42dbfe3](https://github.com/googleapis/java-spanner/commit/42dbfe3600b1d482d64c6c4f6865f88db399bae3)) +* Update dependency com.google.cloud:google-cloud-trace to v2.17.0 ([#2428](https://github.com/googleapis/java-spanner/issues/2428)) ([6f7fee8](https://github.com/googleapis/java-spanner/commit/6f7fee81233811f5bc002f212c8972ffc6afbe16)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.22 ([#2423](https://github.com/googleapis/java-spanner/issues/2423)) ([679bb36](https://github.com/googleapis/java-spanner/commit/679bb366162575c28bab1df9b87d01517ea8d5aa)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.22 ([#2424](https://github.com/googleapis/java-spanner/issues/2424)) ([a72f4ff](https://github.com/googleapis/java-spanner/commit/a72f4ff64cce2e9c746e8f6a9e107cbd72afa67f)) +* Update dependency org.graalvm.sdk:graal-sdk to v22.3.2 ([#2391](https://github.com/googleapis/java-spanner/issues/2391)) ([c082a1f](https://github.com/googleapis/java-spanner/commit/c082a1fccb79cf4c001519eba4a75cef30150541)) + +## [6.41.0](https://github.com/googleapis/java-spanner/compare/v6.40.1...v6.41.0) (2023-04-28) + + +### Features + +* Add TransactionExecutionOptions support to executor. ([#2396](https://github.com/googleapis/java-spanner/issues/2396)) ([8327f21](https://github.com/googleapis/java-spanner/commit/8327f210df86bf681ffed6a78ccc9e8fd899c967)) +* Leader Aware Routing ([#2214](https://github.com/googleapis/java-spanner/issues/2214)) ([9695ace](https://github.com/googleapis/java-spanner/commit/9695acee9195b50e525d87700e86d701b1d9eed2)) +* Make leak detection configurable for connections ([#2405](https://github.com/googleapis/java-spanner/issues/2405)) ([85213c8](https://github.com/googleapis/java-spanner/commit/85213c8764fcb7fb12df49baaac9bd00e095f269)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-spanner-executor-v1 to v1.4.0 ([#2395](https://github.com/googleapis/java-spanner/issues/2395)) ([02dc53c](https://github.com/googleapis/java-spanner/commit/02dc53c097bae3f20d7915fecc9c236c4a5f91f9)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.17.0 ([#2406](https://github.com/googleapis/java-spanner/issues/2406)) ([d46097f](https://github.com/googleapis/java-spanner/commit/d46097f9f17d9009d211c8c0f16b3e084f8fdbad)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.8.0 ([#2400](https://github.com/googleapis/java-spanner/issues/2400)) ([b815cb8](https://github.com/googleapis/java-spanner/commit/b815cb88ff29fb5b9a5d7998e765548244f287c1)) +* Update dependency com.google.cloud:google-cloud-trace to v2.16.0 ([#2407](https://github.com/googleapis/java-spanner/issues/2407)) ([7993be2](https://github.com/googleapis/java-spanner/commit/7993be25e9f380071cded2fa4c2bf630d760a53e)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.3 ([#2401](https://github.com/googleapis/java-spanner/issues/2401)) ([8aa7a1d](https://github.com/googleapis/java-spanner/commit/8aa7a1dbbf484446ae8eed3cb27d16fc65e6de83)) + +## [6.40.1](https://github.com/googleapis/java-spanner/compare/v6.40.0...v6.40.1) (2023-04-17) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.16.0 ([#2383](https://github.com/googleapis/java-spanner/issues/2383)) ([5d5c33a](https://github.com/googleapis/java-spanner/commit/5d5c33ae7c01e10112c72777f202187a50b55ac3)) +* Update dependency com.google.cloud:google-cloud-trace to v2.15.0 ([#2384](https://github.com/googleapis/java-spanner/issues/2384)) ([6b4ce1f](https://github.com/googleapis/java-spanner/commit/6b4ce1fc7ffd837fab6250e36269589d95f5b8c6)) + +## [6.40.0](https://github.com/googleapis/java-spanner/compare/v6.39.0...v6.40.0) (2023-04-14) + + +### Features + +* Savepoints ([#2278](https://github.com/googleapis/java-spanner/issues/2278)) ([b02f584](https://github.com/googleapis/java-spanner/commit/b02f58435b97346cc8e08a96635affe8383981bb)) + + +### Performance Improvements + +* Remove custom transport executor ([#2366](https://github.com/googleapis/java-spanner/issues/2366)) ([e27dbe5](https://github.com/googleapis/java-spanner/commit/e27dbe5f58229dab208eeeed44d53e741700c814)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.7.0 ([#2377](https://github.com/googleapis/java-spanner/issues/2377)) ([40402af](https://github.com/googleapis/java-spanner/commit/40402af54f94f16619d018e252181db29ae6855e)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.21 ([#2379](https://github.com/googleapis/java-spanner/issues/2379)) ([ae7262d](https://github.com/googleapis/java-spanner/commit/ae7262d37391c0ec2fee1dcbb24899e4fa16ae17)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.21 ([#2380](https://github.com/googleapis/java-spanner/issues/2380)) ([0cb159e](https://github.com/googleapis/java-spanner/commit/0cb159efc97f02b42f064244e3812a0fd3d82db6)) + +## [6.39.0](https://github.com/googleapis/java-spanner/compare/v6.38.2...v6.39.0) (2023-04-11) + + +### Features + +* Capture stack trace for session checkout is now optional ([#2350](https://github.com/googleapis/java-spanner/issues/2350)) ([6b6427a](https://github.com/googleapis/java-spanner/commit/6b6427a25af25fde944dfc1dd4bf6a6463682caf)) + +## [6.38.2](https://github.com/googleapis/java-spanner/compare/v6.38.1...v6.38.2) (2023-04-01) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.15.0 ([#2356](https://github.com/googleapis/java-spanner/issues/2356)) ([e4c001a](https://github.com/googleapis/java-spanner/commit/e4c001a2a78af756213fb28e01c571721e105262)) +* Update dependency com.google.cloud:google-cloud-trace to v2.14.0 ([#2357](https://github.com/googleapis/java-spanner/issues/2357)) ([dbb8e66](https://github.com/googleapis/java-spanner/commit/dbb8e669d855c08f48c15c9eafec03a85fa08bca)) + +## [6.38.1](https://github.com/googleapis/java-spanner/compare/v6.38.0...v6.38.1) (2023-03-29) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.6.0 ([#2352](https://github.com/googleapis/java-spanner/issues/2352)) ([19175ce](https://github.com/googleapis/java-spanner/commit/19175ce22777ac68f8c825a438c0a2503234aa42)) + +## [6.38.0](https://github.com/googleapis/java-spanner/compare/v6.37.0...v6.38.0) (2023-03-20) + + +### Features + +* Add option to wait on session pool creation ([#2329](https://github.com/googleapis/java-spanner/issues/2329)) ([ff17244](https://github.com/googleapis/java-spanner/commit/ff17244ee918fa17c96488a0f7081728cda7b342)) +* Add PartitionedUpdate support to executor ([#2228](https://github.com/googleapis/java-spanner/issues/2228)) ([2c8ecf6](https://github.com/googleapis/java-spanner/commit/2c8ecf6fee591df95ee4abfa230c3fcf0c34c589)) +* Adding support for databoost enabled in PartitionedRead and PartitionedQuery ([#2316](https://github.com/googleapis/java-spanner/issues/2316)) ([f39e4a3](https://github.com/googleapis/java-spanner/commit/f39e4a383cbe720b9814077317940fa3452e2f96)) + + +### Bug Fixes + +* Correcting the proto field Id for field data_boost_enabled ([#2328](https://github.com/googleapis/java-spanner/issues/2328)) ([6159d7e](https://github.com/googleapis/java-spanner/commit/6159d7ec49b17f6bc40e1b8c93d1e64198c59dcf)) +* Update executeCloudBatchDmlUpdates. ([#2326](https://github.com/googleapis/java-spanner/issues/2326)) ([27ef53c](https://github.com/googleapis/java-spanner/commit/27ef53c8447bd51a56fdfe6b2b206afe234fad80)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.14.0 ([#2333](https://github.com/googleapis/java-spanner/issues/2333)) ([9c81109](https://github.com/googleapis/java-spanner/commit/9c81109e452d6bae2598cf6cf541a09423a8ed6e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.5.0 ([#2335](https://github.com/googleapis/java-spanner/issues/2335)) ([5eac2be](https://github.com/googleapis/java-spanner/commit/5eac2beb2ce5eebb61e70428e2ac2e11593fc986)) +* Update dependency com.google.cloud:google-cloud-trace to v2.13.0 ([#2334](https://github.com/googleapis/java-spanner/issues/2334)) ([c461ba0](https://github.com/googleapis/java-spanner/commit/c461ba0b1a145cc3e9bee805ec6ad827376e5168)) + +## [6.37.0](https://github.com/googleapis/java-spanner/compare/v6.36.1...v6.37.0) (2023-03-03) + + +### Features + +* Adding new fields for Serverless analytics ([#2315](https://github.com/googleapis/java-spanner/issues/2315)) ([ce9cd74](https://github.com/googleapis/java-spanner/commit/ce9cd7469e2fed15711a8dffe944934cdaa45ce8)) + + +### Bug Fixes + +* Update test certificate name. ([#2300](https://github.com/googleapis/java-spanner/issues/2300)) ([18e76d6](https://github.com/googleapis/java-spanner/commit/18e76d6636c530c9cfc0ac872d72e321e75c990e)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-spanner-executor-v1 to v1.3.0 ([#2306](https://github.com/googleapis/java-spanner/issues/2306)) ([8372250](https://github.com/googleapis/java-spanner/commit/8372250e0aaae68b0d610d59c1ee88c4dc0d9e8b)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.13.0 ([#2311](https://github.com/googleapis/java-spanner/issues/2311)) ([6ba613b](https://github.com/googleapis/java-spanner/commit/6ba613b44598e48699aca320683e65572a730fc7)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.4.0 ([#2312](https://github.com/googleapis/java-spanner/issues/2312)) ([266c49c](https://github.com/googleapis/java-spanner/commit/266c49cc58beaa935a328599a3e75d3b1fb4988d)) +* Update dependency com.google.cloud:google-cloud-trace to v2.12.0 ([#2313](https://github.com/googleapis/java-spanner/issues/2313)) ([e5f76c6](https://github.com/googleapis/java-spanner/commit/e5f76c6598887b616d371b4d0b3551e236e080f8)) +* Update dependency org.json:json to v20230227 ([#2310](https://github.com/googleapis/java-spanner/issues/2310)) ([badcc14](https://github.com/googleapis/java-spanner/commit/badcc14182244929042412f97e5a7e05799eea22)) + +## [6.36.1](https://github.com/googleapis/java-spanner/compare/v6.36.0...v6.36.1) (2023-02-21) + + +### Bug Fixes + +* Prevent illegal negative timeout values into thread sleep() method while retrying exceptions in unit tests. ([#2268](https://github.com/googleapis/java-spanner/issues/2268)) ([ce66098](https://github.com/googleapis/java-spanner/commit/ce66098c7139ea13d5ea91cf6fbceb5c732b392d)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-spanner-executor-v1 to v1.2.0 ([#2256](https://github.com/googleapis/java-spanner/issues/2256)) ([f0ca86a](https://github.com/googleapis/java-spanner/commit/f0ca86a0858bde84cc38f1ad8fae5f3c4f4f3395)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.12.0 ([#2284](https://github.com/googleapis/java-spanner/issues/2284)) ([0be701a](https://github.com/googleapis/java-spanner/commit/0be701a8b59277f2cfb990a88e4f1dafcbafdd97)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.3.0 ([#2285](https://github.com/googleapis/java-spanner/issues/2285)) ([bb5d5c6](https://github.com/googleapis/java-spanner/commit/bb5d5c66e78812b943a85e0fd888e7021c11bde1)) +* Update dependency com.google.cloud:google-cloud-trace to v2.11.0 ([#2286](https://github.com/googleapis/java-spanner/issues/2286)) ([3c80932](https://github.com/googleapis/java-spanner/commit/3c80932d577de0ea108e695d0a4e542fbfc01deb)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.20 ([#2280](https://github.com/googleapis/java-spanner/issues/2280)) ([685d1ea](https://github.com/googleapis/java-spanner/commit/685d1ea1c3bf59cd71093a68c260276c605d835f)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.20 ([#2281](https://github.com/googleapis/java-spanner/issues/2281)) ([f2aabc2](https://github.com/googleapis/java-spanner/commit/f2aabc24770d1b9c505dfc96b39fe81c6a0ad5a5)) + +## [6.36.0](https://github.com/googleapis/java-spanner/compare/v6.35.2...v6.36.0) (2023-02-08) + + +### Features + +* Support UNRECOGNIZED types + decode BYTES columns lazily ([#2219](https://github.com/googleapis/java-spanner/issues/2219)) ([fc721c4](https://github.com/googleapis/java-spanner/commit/fc721c4d30de6ed9e5bc4fbbe0e1e7b79a5c7490)) + + +### Bug Fixes + +* **java:** Skip fixing poms for special modules ([#1744](https://github.com/googleapis/java-spanner/issues/1744)) ([#2244](https://github.com/googleapis/java-spanner/issues/2244)) ([e7f4b40](https://github.com/googleapis/java-spanner/commit/e7f4b4016f8c4c7e4fac0b822f5af2cffd181134)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.11.0 ([#2262](https://github.com/googleapis/java-spanner/issues/2262)) ([d566613](https://github.com/googleapis/java-spanner/commit/d566613442217bdfc69caea7242464fba2647519)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.2.0 ([#2264](https://github.com/googleapis/java-spanner/issues/2264)) ([b5fdbc0](https://github.com/googleapis/java-spanner/commit/b5fdbc0accdaaf1f63c62c1837d72bb378dc8f43)) +* Update dependency com.google.cloud:google-cloud-trace to v2.10.0 ([#2263](https://github.com/googleapis/java-spanner/issues/2263)) ([96f0c81](https://github.com/googleapis/java-spanner/commit/96f0c8181aeb8ca75647a783d8b163f371ad937e)) + +## [6.35.2](https://github.com/googleapis/java-spanner/compare/v6.35.1...v6.35.2) (2023-01-24) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.10.0 ([#2249](https://github.com/googleapis/java-spanner/issues/2249)) ([d18780e](https://github.com/googleapis/java-spanner/commit/d18780ec0278fc49495939647fe6a2f9e0b4f94e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.2 ([#2246](https://github.com/googleapis/java-spanner/issues/2246)) ([1adaf7c](https://github.com/googleapis/java-spanner/commit/1adaf7cae629ba7b9903d6512adc7b13b6d1208e)) +* Update dependency com.google.cloud:google-cloud-trace to v2.9.0 ([#2250](https://github.com/googleapis/java-spanner/issues/2250)) ([3cd5ab0](https://github.com/googleapis/java-spanner/commit/3cd5ab05e1fd24090fd58c2320b6875135e49b69)) + +## [6.35.1](https://github.com/googleapis/java-spanner/compare/v6.35.0...v6.35.1) (2023-01-18) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.9.0 ([#2230](https://github.com/googleapis/java-spanner/issues/2230)) ([717f70f](https://github.com/googleapis/java-spanner/commit/717f70f76f915e15a7283b32a83a6f4ac64fc931)) +* Update dependency com.google.cloud:google-cloud-trace to v2.8.0 ([#2231](https://github.com/googleapis/java-spanner/issues/2231)) ([557ea16](https://github.com/googleapis/java-spanner/commit/557ea164ebf948cd78f937c6996fd21e9618d3ae)) +* Update dependency org.graalvm.sdk:graal-sdk to v22.3.1 ([#2238](https://github.com/googleapis/java-spanner/issues/2238)) ([d5f5237](https://github.com/googleapis/java-spanner/commit/d5f52375394ef617f4fcb823937a374930f941e7)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.2 ([#2223](https://github.com/googleapis/java-spanner/issues/2223)) ([3278f91](https://github.com/googleapis/java-spanner/commit/3278f9167b1b2688ed090a7dfd5874e88b8945a5)) + +## [6.35.0](https://github.com/googleapis/java-spanner/compare/v6.34.1...v6.35.0) (2023-01-12) + + +### Features + +* Add support for new cloud client test framework in google-cloud-spanner-executor ([#2217](https://github.com/googleapis/java-spanner/issues/2217)) ([d75ebc1](https://github.com/googleapis/java-spanner/commit/d75ebc1387de7ba0e0a32dfcdd564392d43ff555)) +* **spanner:** Add samples for fine grained access control ([#2172](https://github.com/googleapis/java-spanner/issues/2172)) ([77969e3](https://github.com/googleapis/java-spanner/commit/77969e35feee4dee3460fcdc45227e9a9d924d74)) + + +### Bug Fixes + +* Retry on RST_STREAM internal error ([#2111](https://github.com/googleapis/java-spanner/issues/2111)) ([d5372e6](https://github.com/googleapis/java-spanner/commit/d5372e662624831abc694d81acecf797d32d86e3)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.8.0 ([#2192](https://github.com/googleapis/java-spanner/issues/2192)) ([fe7e755](https://github.com/googleapis/java-spanner/commit/fe7e755a798b584bf79d16d1f419b1ca7f957172)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.1 ([#2222](https://github.com/googleapis/java-spanner/issues/2222)) ([7d3bcca](https://github.com/googleapis/java-spanner/commit/7d3bcca4e5846d823106f724fef42d2ef3a1c822)) +* Update dependency com.google.cloud:google-cloud-trace to v2.7.0 ([#2193](https://github.com/googleapis/java-spanner/issues/2193)) ([da2b924](https://github.com/googleapis/java-spanner/commit/da2b924e037dd366d171c481c6db799de7cacc22)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.19 ([#2180](https://github.com/googleapis/java-spanner/issues/2180)) ([43b54e9](https://github.com/googleapis/java-spanner/commit/43b54e92b4df3ec6474b8ba7fef61b5b613e6ab0)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.19 ([#2181](https://github.com/googleapis/java-spanner/issues/2181)) ([b42eb38](https://github.com/googleapis/java-spanner/commit/b42eb3866e1fd74f9a9ad2a9dc3d100ac0893f38)) + +## [6.34.1](https://github.com/googleapis/java-spanner/compare/v6.34.0...v6.34.1) (2022-12-13) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.0 ([#2187](https://github.com/googleapis/java-spanner/issues/2187)) ([4d9df2b](https://github.com/googleapis/java-spanner/commit/4d9df2bac3a2dd6c910ba5fdd466ccd43a226c7f)) + +## [6.34.0](https://github.com/googleapis/java-spanner/compare/v6.33.0...v6.34.0) (2022-12-12) + + +### Features + +* Setting up 6.33.x branch ([#2184](https://github.com/googleapis/java-spanner/issues/2184)) ([e237a21](https://github.com/googleapis/java-spanner/commit/e237a213cf5cb5edc338ca4e5f8ad5dd0593d2d1)) + + +### Bug Fixes + +* Remove the statement of session number limits ([#1928](https://github.com/googleapis/java-spanner/issues/1928)) ([ddd0625](https://github.com/googleapis/java-spanner/commit/ddd062527674659ca2ea73e079bca4dee62ca67f)), closes [#1927](https://github.com/googleapis/java-spanner/issues/1927) +* Update samples/snippets pom.xml configuration to avoid fat jar ([#2100](https://github.com/googleapis/java-spanner/issues/2100)) ([19058b4](https://github.com/googleapis/java-spanner/commit/19058b4cd324ce33e8dd52447bde2486c87d4754)) +* Use a proper endpoint for DirectPath tests ([#2186](https://github.com/googleapis/java-spanner/issues/2186)) ([4d74a0d](https://github.com/googleapis/java-spanner/commit/4d74a0d8ae48e190c126ab4047b81cca117f4de1)) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.18 ([#2171](https://github.com/googleapis/java-spanner/issues/2171)) ([f348780](https://github.com/googleapis/java-spanner/commit/f3487805fe5f976596e94047c3796bc623eeae95)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.18 ([#2145](https://github.com/googleapis/java-spanner/issues/2145)) ([dcdd2c3](https://github.com/googleapis/java-spanner/commit/dcdd2c3b684e38892fac0abbdf06081e9c7d83b2)) + +## [6.33.0](https://github.com/googleapis/java-spanner/compare/v6.32.0...v6.33.0) (2022-11-17) + + +### Features + +* Adding samples for Jsonb data type ([#2147](https://github.com/googleapis/java-spanner/issues/2147)) ([1112203](https://github.com/googleapis/java-spanner/commit/1112203bd6bde68fcd04ae68a2a31ec88dd5b1ac)) +* Analyze update returns param types ([#2156](https://github.com/googleapis/java-spanner/issues/2156)) ([7c5e3da](https://github.com/googleapis/java-spanner/commit/7c5e3da4c128cb9220213db8b3e2291e33566715)) +* Support DML with Returning clause in Connection API ([#1978](https://github.com/googleapis/java-spanner/issues/1978)) ([aac20be](https://github.com/googleapis/java-spanner/commit/aac20bedf9ee7a6a2170f87fa88373b7d364ed9f)) +* Support PostgreSQL END statement ([#2131](https://github.com/googleapis/java-spanner/issues/2131)) ([4c29c17](https://github.com/googleapis/java-spanner/commit/4c29c17fb35e51fdad99e393a8f6bb57c914dc8a)) +* Update transaction.proto to include different lock modes ([#2112](https://github.com/googleapis/java-spanner/issues/2112)) ([d0195b4](https://github.com/googleapis/java-spanner/commit/d0195b45423b73969636bc911980613a46dffa97)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.7.0 ([#2164](https://github.com/googleapis/java-spanner/issues/2164)) ([82385b8](https://github.com/googleapis/java-spanner/commit/82385b8526e0299e8c85e4435e3c740474de854c)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.6 ([#2150](https://github.com/googleapis/java-spanner/issues/2150)) ([dba545f](https://github.com/googleapis/java-spanner/commit/dba545ff5ebb069a78b42cbffff032d66dc3d062)) +* Update dependency com.google.cloud:google-cloud-trace to v2.6.0 ([#2165](https://github.com/googleapis/java-spanner/issues/2165)) ([99f2779](https://github.com/googleapis/java-spanner/commit/99f277974fdcebf587d1e25ad643575e15cee7ff)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.17 ([#2144](https://github.com/googleapis/java-spanner/issues/2144)) ([dd24b89](https://github.com/googleapis/java-spanner/commit/dd24b894fd80ccc962a414bb404d9624336f4612)) +* Update dependency org.openjdk.jmh:jmh-core to v1.36 ([#2160](https://github.com/googleapis/java-spanner/issues/2160)) ([29f9096](https://github.com/googleapis/java-spanner/commit/29f9096d1a10bfb9eacdbc4d6dbc4bc9c7ed05c1)) +* Update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.36 ([#2161](https://github.com/googleapis/java-spanner/issues/2161)) ([9148aa3](https://github.com/googleapis/java-spanner/commit/9148aa37bfb61af25023d56bfcf6d0e735e51b9a)) + +## [6.32.0](https://github.com/googleapis/java-spanner/compare/v6.31.2...v6.32.0) (2022-10-27) + + +### Features + +* Enable client to server compression ([#2117](https://github.com/googleapis/java-spanner/issues/2117)) ([50f8425](https://github.com/googleapis/java-spanner/commit/50f8425fe9e1db16ed060337d26feccc9a9813e2)) +* Increase default number of channels when gRPC-GCP channel pool is enabled ([#1997](https://github.com/googleapis/java-spanner/issues/1997)) ([44f27fc](https://github.com/googleapis/java-spanner/commit/44f27fc90fa3f9f4914574fb0476e971da4c02ff)) +* Update result_set.proto to return undeclared parameters in ExecuteSql API ([#2101](https://github.com/googleapis/java-spanner/issues/2101)) ([826eb93](https://github.com/googleapis/java-spanner/commit/826eb9305095db064f52a15dc502bc0e0df9a984)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.4.6 ([#2093](https://github.com/googleapis/java-spanner/issues/2093)) ([b08db44](https://github.com/googleapis/java-spanner/commit/b08db443229afdc1d49ef9f5e459cade5e2abe90)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.5.0 ([#2113](https://github.com/googleapis/java-spanner/issues/2113)) ([99d825b](https://github.com/googleapis/java-spanner/commit/99d825b18397ff9e8633b89effa05e61159d956f)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.6.0 ([#2125](https://github.com/googleapis/java-spanner/issues/2125)) ([7d86fe4](https://github.com/googleapis/java-spanner/commit/7d86fe40de29311ad65bd382e55f75326d16c4e3)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.5 ([#2122](https://github.com/googleapis/java-spanner/issues/2122)) ([308a65c](https://github.com/googleapis/java-spanner/commit/308a65c3e07e33f82b7ce474e0e95099192bb593)) +* Update dependency com.google.cloud:google-cloud-trace to v2.3.7 ([#2094](https://github.com/googleapis/java-spanner/issues/2094)) ([6ec3f3f](https://github.com/googleapis/java-spanner/commit/6ec3f3f585ed5eaecdb09d5fd1eb6c9af3b22555)) +* Update dependency com.google.cloud:google-cloud-trace to v2.4.0 ([#2114](https://github.com/googleapis/java-spanner/issues/2114)) ([84347f1](https://github.com/googleapis/java-spanner/commit/84347f1c6a52f3dfe569649f061cb16e2e466f6a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.5.0 ([#2126](https://github.com/googleapis/java-spanner/issues/2126)) ([5167928](https://github.com/googleapis/java-spanner/commit/516792809cf976aeab10709ca62503b7f03bb333)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.15 ([#2109](https://github.com/googleapis/java-spanner/issues/2109)) ([bf092ad](https://github.com/googleapis/java-spanner/commit/bf092ad7ac86c500e8a445397e192cb8fb0594ae)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.16 ([#2119](https://github.com/googleapis/java-spanner/issues/2119)) ([b2d27e8](https://github.com/googleapis/java-spanner/commit/b2d27e8f841cab096d5ccad64a250c7f0b35f670)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.15 ([#2110](https://github.com/googleapis/java-spanner/issues/2110)) ([d28b202](https://github.com/googleapis/java-spanner/commit/d28b202cfc29e8fbbfdf3612b94bab5c2f319419)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.16 ([#2120](https://github.com/googleapis/java-spanner/issues/2120)) ([151cf77](https://github.com/googleapis/java-spanner/commit/151cf778ff76edaee9e849181f72119ffa6cb897)) +* Update dependency org.graalvm.sdk:graal-sdk to v22.2.0.1 ([#2102](https://github.com/googleapis/java-spanner/issues/2102)) ([68c2089](https://github.com/googleapis/java-spanner/commit/68c2089101124b9887af57b2697c35a64eb1a51f)) +* Update dependency org.graalvm.sdk:graal-sdk to v22.3.0 ([#2116](https://github.com/googleapis/java-spanner/issues/2116)) ([9d6930b](https://github.com/googleapis/java-spanner/commit/9d6930b77ec479e5f517236852244476c23dc5c8)) + +## [6.31.2](https://github.com/googleapis/java-spanner/compare/v6.31.1...v6.31.2) (2022-10-05) + + +### Bug Fixes + +* update protobuf to v3.21.7 ([ac71008](https://github.com/googleapis/java-spanner/commit/ac71008bf8b1244cb3c5cf4317a0d25d4ffc5bbd)) + +## [6.31.1](https://github.com/googleapis/java-spanner/compare/v6.31.0...v6.31.1) (2022-10-03) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.4 ([#2090](https://github.com/googleapis/java-spanner/issues/2090)) ([8f46938](https://github.com/googleapis/java-spanner/commit/8f46938b67e44a7b739dc156dc8a0a89bcb33ef0)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.14 ([#2031](https://github.com/googleapis/java-spanner/issues/2031)) ([c5e9ba1](https://github.com/googleapis/java-spanner/commit/c5e9ba1c1a47faf89c47a9146a97cb6711dce242)) + +## [6.31.0](https://github.com/googleapis/java-spanner/compare/v6.30.2...v6.31.0) (2022-09-29) + + +### Features + +* Support customer managed instance configurations ([#1742](https://github.com/googleapis/java-spanner/issues/1742)) ([c1c805c](https://github.com/googleapis/java-spanner/commit/c1c805cf6e9c00f2d6796627d919338be1a0599a)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.3.4 ([#2027](https://github.com/googleapis/java-spanner/issues/2027)) ([14890ed](https://github.com/googleapis/java-spanner/commit/14890ed8e0df99eba7c2521a196132c78054b6ed)) +* Update dependency com.google.cloud:google-cloud-trace to v2.3.5 ([#2083](https://github.com/googleapis/java-spanner/issues/2083)) ([cef4e0a](https://github.com/googleapis/java-spanner/commit/cef4e0ada98ab65020f32836fc0c8ab1ee0c7eed)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.14 ([#2030](https://github.com/googleapis/java-spanner/issues/2030)) ([04b59ff](https://github.com/googleapis/java-spanner/commit/04b59ff8a1efaa32082aa4e9567d90b5956810c6)) +* Update dependency org.json:json to v20220924 ([#2035](https://github.com/googleapis/java-spanner/issues/2035)) ([a26a14a](https://github.com/googleapis/java-spanner/commit/a26a14a94ac3ca6cd7eabce6826cce3dde27ea66)) + +## [6.30.2](https://github.com/googleapis/java-spanner/compare/v6.30.1...v6.30.2) (2022-09-21) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.4.5 ([#2022](https://github.com/googleapis/java-spanner/issues/2022)) ([0536962](https://github.com/googleapis/java-spanner/commit/0536962df9af3feed237f758a560c24fafd81d60)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.1 ([#2023](https://github.com/googleapis/java-spanner/issues/2023)) ([3fb4235](https://github.com/googleapis/java-spanner/commit/3fb423571c1128b7cafdc6596d5366268d74f0e4)) + +## [6.30.1](https://github.com/googleapis/java-spanner/compare/v6.30.0...v6.30.1) (2022-09-20) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.4.4 ([#2014](https://github.com/googleapis/java-spanner/issues/2014)) ([9cebad4](https://github.com/googleapis/java-spanner/commit/9cebad485afc8b8d94bd4bc1673542a330451fbd)) +* Update dependency com.google.cloud:google-cloud-trace to v2.3.3 ([#2004](https://github.com/googleapis/java-spanner/issues/2004)) ([54f9095](https://github.com/googleapis/java-spanner/commit/54f90957544f0798d9872956dbe40ce822d5167d)) + +## [6.30.0](https://github.com/googleapis/java-spanner/compare/v6.29.1...v6.30.0) (2022-09-16) + + +### Features + +* Add custom instance config operations ([#1999](https://github.com/googleapis/java-spanner/issues/1999)) ([74f9c3b](https://github.com/googleapis/java-spanner/commit/74f9c3bc161748e52fed9af8f9fa26a236dc0140)) +* Add gRPC RLS dependency ([#1875](https://github.com/googleapis/java-spanner/issues/1875)) ([31cf06e](https://github.com/googleapis/java-spanner/commit/31cf06e1f145dfaba8c2ed70732b4eb06086e0cc)) +* Default transaction isolation ([#1998](https://github.com/googleapis/java-spanner/issues/1998)) ([33aa21c](https://github.com/googleapis/java-spanner/commit/33aa21c09f01cc40d156035d2b63fca03257ef6c)) + + +### Bug Fixes + +* Retries of updates in the Connection API ignored analyze mode ([#2010](https://github.com/googleapis/java-spanner/issues/2010)) ([d54f252](https://github.com/googleapis/java-spanner/commit/d54f2521f1629658bc54f67ba549ea199a77c5a8)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.4.3 ([#2003](https://github.com/googleapis/java-spanner/issues/2003)) ([2f04f18](https://github.com/googleapis/java-spanner/commit/2f04f18f131cf656a94d8b1a78d311d2cc46797e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.2 ([#2002](https://github.com/googleapis/java-spanner/issues/2002)) ([342190a](https://github.com/googleapis/java-spanner/commit/342190ab06917d0527316802a6c33da4f20213db)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.3 ([#2013](https://github.com/googleapis/java-spanner/issues/2013)) ([16db975](https://github.com/googleapis/java-spanner/commit/16db975fbcbd7ce8aee74b6988bf0d125619675f)) + +## [6.29.1](https://github.com/googleapis/java-spanner/compare/v6.29.0...v6.29.1) (2022-09-02) + + +### Dependencies + +* Update dependency com.google.cloud ([e90575d](https://github.com/googleapis/java-spanner/commit/e90575dcb30782d6c8f15a5765b487faf4b66d58)) + +## [6.29.0](https://github.com/googleapis/java-spanner/compare/v6.28.0...v6.29.0) (2022-08-29) + + +### Features + +* add support for db roles list ([#1916](https://github.com/googleapis/java-spanner/issues/1916)) ([8034c67](https://github.com/googleapis/java-spanner/commit/8034c67af6cfe24e96cc26b1cea51c3405ed98d6)) +* add support for PG JSONB data type ([#1964](https://github.com/googleapis/java-spanner/issues/1964)) ([d2b426f](https://github.com/googleapis/java-spanner/commit/d2b426fda2cd1463dfa0719dd80f8346cbef51c6)) +* Adds auto-generated CL for googleapis for jsonb ([#1983](https://github.com/googleapis/java-spanner/issues/1983)) ([23e57ff](https://github.com/googleapis/java-spanner/commit/23e57ffc627d0f688fa656887d82f8f1f99f3675)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.4.1 ([#1968](https://github.com/googleapis/java-spanner/issues/1968)) ([e93ab4c](https://github.com/googleapis/java-spanner/commit/e93ab4cc4031ee2300f4e73d7d3a8e41de1bc7ae)) +* update dependency com.google.cloud:google-cloud-trace to v2.3.1 ([#1967](https://github.com/googleapis/java-spanner/issues/1967)) ([6479d19](https://github.com/googleapis/java-spanner/commit/6479d19dcca2b3e3df43a2858f5dcaf85685c31f)) + +## [6.28.0](https://github.com/googleapis/java-spanner/compare/v6.27.0...v6.28.0) (2022-08-11) + + +### Features + +* Add ListDatabaseRoles API to support role based access control ([cb13534](https://github.com/googleapis/java-spanner/commit/cb13534d7ca2e1b581cb4551d0f95834fbf7b640)) +* support multiple PostgreSQL transaction options ([#1949](https://github.com/googleapis/java-spanner/issues/1949)) ([8b99f30](https://github.com/googleapis/java-spanner/commit/8b99f30285e4ef68376aa9bfc11617f74e110bf2)) + + +### Bug Fixes + +* target new spanner db admin service config ([#1956](https://github.com/googleapis/java-spanner/issues/1956)) ([cb13534](https://github.com/googleapis/java-spanner/commit/cb13534d7ca2e1b581cb4551d0f95834fbf7b640)) +* Use the key instead of the value to verify the number of channels created in ChannelUsageTest. ([#1965](https://github.com/googleapis/java-spanner/issues/1965)) ([ea329bb](https://github.com/googleapis/java-spanner/commit/ea329bb57b343c58bab2680b0c9412e51522b90b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.3.6 ([#1962](https://github.com/googleapis/java-spanner/issues/1962)) ([5bb9844](https://github.com/googleapis/java-spanner/commit/5bb98441d65ba462c49810f980770406df8ca127)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v3 ([#1960](https://github.com/googleapis/java-spanner/issues/1960)) ([327b5f0](https://github.com/googleapis/java-spanner/commit/327b5f069f8fe4625be49c258c721a4db5fb0f6e)) +* update dependency org.junit.vintage:junit-vintage-engine to v5.9.0 ([#1959](https://github.com/googleapis/java-spanner/issues/1959)) ([f908626](https://github.com/googleapis/java-spanner/commit/f90862667613280a8c7a2901ba4b5940b0647eb2)) + +## [6.27.0](https://github.com/googleapis/java-spanner/compare/v6.26.0...v6.27.0) (2022-07-19) + + +### Features + +* Adding new fields for Instance Create Time and Update Time ([#1913](https://github.com/googleapis/java-spanner/issues/1913)) ([2c71e02](https://github.com/googleapis/java-spanner/commit/2c71e0233333803f271931f6ef471b7eacfa52d7)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.3.1 ([#1933](https://github.com/googleapis/java-spanner/issues/1933)) ([e3d646b](https://github.com/googleapis/java-spanner/commit/e3d646bae4abf2215d44f282d4faf722c638b823)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.13 ([#1944](https://github.com/googleapis/java-spanner/issues/1944)) ([765d11b](https://github.com/googleapis/java-spanner/commit/765d11b2e5ee7b1f12d2d27a139f92efbc1caa07)) +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.13 ([#1945](https://github.com/googleapis/java-spanner/issues/1945)) ([0da75b8](https://github.com/googleapis/java-spanner/commit/0da75b819d6e9d0f7c6850d77656e46b76ddad6d)) +* update dependency org.graalvm.sdk:graal-sdk to v22.2.0 ([#1953](https://github.com/googleapis/java-spanner/issues/1953)) ([c7f1040](https://github.com/googleapis/java-spanner/commit/c7f1040d849901194e5672b270ccee7fbc695d17)) + +## [6.26.0](https://github.com/googleapis/java-spanner/compare/v6.25.7...v6.26.0) (2022-07-13) + + +### Features + +* Adding two new fields for Instance create_time and update_time ([#1908](https://github.com/googleapis/java-spanner/issues/1908)) ([00b3817](https://github.com/googleapis/java-spanner/commit/00b38178e851401e293aa457f7ba5ea593a7b7c5)) +* changes to support data, timestamp and arrays in IT tests ([#1840](https://github.com/googleapis/java-spanner/issues/1840)) ([c667653](https://github.com/googleapis/java-spanner/commit/c667653ec380dccbf205e7b419843da11cf4155a)) +* Error Details Improvement ([c8a2184](https://github.com/googleapis/java-spanner/commit/c8a2184c51cc92ec35c759eff68e614fc78fb2e6)) +* Error Details Improvement ([#1929](https://github.com/googleapis/java-spanner/issues/1929)) ([c8a2184](https://github.com/googleapis/java-spanner/commit/c8a2184c51cc92ec35c759eff68e614fc78fb2e6)) + + +### Bug Fixes + +* enable longpaths support for windows test ([#1485](https://github.com/googleapis/java-spanner/issues/1485)) ([#1946](https://github.com/googleapis/java-spanner/issues/1946)) ([fd0b845](https://github.com/googleapis/java-spanner/commit/fd0b84523535ba583a1b56acbea98835191daa06)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v2.3.0 ([#1934](https://github.com/googleapis/java-spanner/issues/1934)) ([2813eb2](https://github.com/googleapis/java-spanner/commit/2813eb21c9f168e8dea149e40dac188933c7e2db)) + +## [6.25.7](https://github.com/googleapis/java-spanner/compare/v6.25.6...v6.25.7) (2022-06-30) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.13.0 ([#1924](https://github.com/googleapis/java-spanner/issues/1924)) ([dde5ee8](https://github.com/googleapis/java-spanner/commit/dde5ee8c5fcef36b415929aa32931dc811036eb4)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.12 ([#1906](https://github.com/googleapis/java-spanner/issues/1906)) ([1800cd9](https://github.com/googleapis/java-spanner/commit/1800cd917c26934768296253cbbcf7c91c54afef)) + +## [6.25.6](https://github.com/googleapis/java-spanner/compare/v6.25.5...v6.25.6) (2022-06-22) + + +### Bug Fixes + +* PostgreSQL parser should not treat \ as an escape char ([#1921](https://github.com/googleapis/java-spanner/issues/1921)) ([260bbe3](https://github.com/googleapis/java-spanner/commit/260bbe3cb78e0583975d7085ae5a95dbfd3efd73)), closes [#1920](https://github.com/googleapis/java-spanner/issues/1920) + + +### Documentation + +* **sample:** relocate native image sample from old repo ([#1758](https://github.com/googleapis/java-spanner/issues/1758)) ([ef187f4](https://github.com/googleapis/java-spanner/commit/ef187f4fccaf1c5550e9f6795228e6c7361030db)) + + +### Dependencies + +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.11 ([#1907](https://github.com/googleapis/java-spanner/issues/1907)) ([01f8a07](https://github.com/googleapis/java-spanner/commit/01f8a07c64358368615d8c729c7c47c4b2c687fd)) +* update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.12 ([#1918](https://github.com/googleapis/java-spanner/issues/1918)) ([be8b50b](https://github.com/googleapis/java-spanner/commit/be8b50b56e51245d941c52445498600025e26ba9)) + +## [6.25.5](https://github.com/googleapis/java-spanner/compare/v6.25.4...v6.25.5) (2022-05-31) + + +### Bug Fixes + +* add configurations for Explain feature ([#1899](https://github.com/googleapis/java-spanner/issues/1899)) ([86895b7](https://github.com/googleapis/java-spanner/commit/86895b756d963a13f138842a6743ea6d24b7c391)) +* gracefully ignore RejectedExecutionException during Connection#close() ([#1887](https://github.com/googleapis/java-spanner/issues/1887)) ([091bd1d](https://github.com/googleapis/java-spanner/commit/091bd1d3757751a29c962e2c0b7f4f8720e06a6a)) + +### [6.25.4](https://github.com/googleapis/java-spanner/compare/v6.25.3...v6.25.4) (2022-05-26) + + +### Dependencies + +* update dependency org.graalvm.sdk:graal-sdk to v22.1.0.1 ([#1894](https://github.com/googleapis/java-spanner/issues/1894)) ([cddb745](https://github.com/googleapis/java-spanner/commit/cddb745e0b7212225a430d1823e9670eb968f98a)) + +### [6.25.3](https://github.com/googleapis/java-spanner/compare/v6.25.2...v6.25.3) (2022-05-25) + + +### Bug Fixes + +* add native image configurations for Spanner classes ([#1858](https://github.com/googleapis/java-spanner/issues/1858)) ([92d0292](https://github.com/googleapis/java-spanner/commit/92d02922c23e9445c438b69017634415e05d2d98)) + +### [6.25.2](https://github.com/googleapis/java-spanner/compare/v6.25.1...v6.25.2) (2022-05-25) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.3.0 ([#1888](https://github.com/googleapis/java-spanner/issues/1888)) ([1b109e9](https://github.com/googleapis/java-spanner/commit/1b109e9fd66c74b70af808eced162a684287200e)) +* update dependency com.google.cloud:google-cloud-trace to v2.2.0 ([#1889](https://github.com/googleapis/java-spanner/issues/1889)) ([f89f70e](https://github.com/googleapis/java-spanner/commit/f89f70e95e068998ff5f9e211fa1172c4fe37b94)) + +### [6.25.1](https://github.com/googleapis/java-spanner/compare/v6.25.0...v6.25.1) (2022-05-23) + + +### Dependencies + +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.35 ([#1790](https://github.com/googleapis/java-spanner/issues/1790)) ([d68095b](https://github.com/googleapis/java-spanner/commit/d68095b274bb8ef778176d4ff88d54b607e3de73)) + +## [6.25.0](https://github.com/googleapis/java-spanner/compare/v6.24.0...v6.25.0) (2022-05-20) + + +### Features + +* add build scripts for native image testing in Java 17 ([#1440](https://github.com/googleapis/java-spanner/issues/1440)) ([#1881](https://github.com/googleapis/java-spanner/issues/1881)) ([993e893](https://github.com/googleapis/java-spanner/commit/993e89365d167e07114ebc352dfa835487045ecb)) +* Add support for Explain feature ([#1852](https://github.com/googleapis/java-spanner/issues/1852)) ([01f460e](https://github.com/googleapis/java-spanner/commit/01f460e9fc755c02797c50a50d8dc2df31116268)) +* AuditConfig for IAM v1 ([f7437b2](https://github.com/googleapis/java-spanner/commit/f7437b294a7c05f288142626d71c7aff00616c89)) +* support analyze DDL statement ([#1879](https://github.com/googleapis/java-spanner/issues/1879)) ([1704ac3](https://github.com/googleapis/java-spanner/commit/1704ac3dbcf959294b6d609b4dce2aa1fa80d594)) +* support analyzeUpdate ([#1867](https://github.com/googleapis/java-spanner/issues/1867)) ([2d8cfa4](https://github.com/googleapis/java-spanner/commit/2d8cfa40a22e5b77a39b6ec86552734ec47afbe0)) + + +### Bug Fixes + +* ignore errors during Connection.close() ([#1877](https://github.com/googleapis/java-spanner/issues/1877)) ([6ab8ed2](https://github.com/googleapis/java-spanner/commit/6ab8ed236b1393e67a4edc5d430d9535dffbadb5)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.12.0 ([#1880](https://github.com/googleapis/java-spanner/issues/1880)) ([daccd1b](https://github.com/googleapis/java-spanner/commit/daccd1b394a95f59246b36ef91c5d9459b3be577)) +* update opencensus.version to v0.31.1 ([#1863](https://github.com/googleapis/java-spanner/issues/1863)) ([2d2b526](https://github.com/googleapis/java-spanner/commit/2d2b526777b918f50511ef57433a809a672ab832)) + +## [6.24.0](https://github.com/googleapis/java-spanner/compare/v6.23.3...v6.24.0) (2022-05-05) + + +### Features + +* Copy backup samples ([#1802](https://github.com/googleapis/java-spanner/issues/1802)) ([787ccad](https://github.com/googleapis/java-spanner/commit/787ccadcba01193d541bfd1b80b055fb5d4c2bb3)) +* support CREATE DATABASE in Connection API ([#1845](https://github.com/googleapis/java-spanner/issues/1845)) ([40110fe](https://github.com/googleapis/java-spanner/commit/40110feb22986c6b5dac6885eae7f0b331aede61)) +* support CredentialsProvider in Connection API ([#1869](https://github.com/googleapis/java-spanner/issues/1869)) ([f1d2d3e](https://github.com/googleapis/java-spanner/commit/f1d2d3ef1dbd30c153616c2efcc362c1330705e1)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.8 ([#1831](https://github.com/googleapis/java-spanner/issues/1831)) ([088fb50](https://github.com/googleapis/java-spanner/commit/088fb50a673a99e6921503be0f84b8291173240e)) +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.9 ([#1851](https://github.com/googleapis/java-spanner/issues/1851)) ([4d6bb2d](https://github.com/googleapis/java-spanner/commit/4d6bb2dd233fba60d213d36f15aead67dff57dec)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.11 ([#1799](https://github.com/googleapis/java-spanner/issues/1799)) ([049635d](https://github.com/googleapis/java-spanner/commit/049635d4bc3210bd9ce41444f17c8b9d67af969a)) + + +### Documentation + +* add samples for PostgresSQL ([#1781](https://github.com/googleapis/java-spanner/issues/1781)) ([e832298](https://github.com/googleapis/java-spanner/commit/e8322986f158a86cdbb04332a9c49ead79fb2587)) + +### [6.23.3](https://github.com/googleapis/java-spanner/compare/v6.23.2...v6.23.3) (2022-04-21) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.10.0 ([#1830](https://github.com/googleapis/java-spanner/issues/1830)) ([3c55eb3](https://github.com/googleapis/java-spanner/commit/3c55eb336e77ee1ddfb6c055722697f81419578c)) + + +### Documentation + +* add samples for PostgreSQL ([#1700](https://github.com/googleapis/java-spanner/issues/1700)) ([a024483](https://github.com/googleapis/java-spanner/commit/a02448388ba2415d31593a8c81b4430e2264c10c)) + +### [6.23.2](https://github.com/googleapis/java-spanner/compare/v6.23.1...v6.23.2) (2022-04-11) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.7 ([#1810](https://github.com/googleapis/java-spanner/issues/1810)) ([0acb53d](https://github.com/googleapis/java-spanner/commit/0acb53d430a0e7170fccc0cf936de9123d9b1689)) +* update dependency org.openjdk.jmh:jmh-core to v1.35 ([#1789](https://github.com/googleapis/java-spanner/issues/1789)) ([3511fe6](https://github.com/googleapis/java-spanner/commit/3511fe6cd1b929b916048dc95ba3c966138730a7)) + +### [6.23.1](https://github.com/googleapis/java-spanner/compare/v6.23.0...v6.23.1) (2022-03-29) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.6 ([#1797](https://github.com/googleapis/java-spanner/issues/1797)) ([48097de](https://github.com/googleapis/java-spanner/commit/48097dec5fd6c748d32cb666f82b8e9bfcfffe46)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.9.0 ([#1791](https://github.com/googleapis/java-spanner/issues/1791)) ([603e91c](https://github.com/googleapis/java-spanner/commit/603e91c7be63caf563d415b6f8b301b5edf7bb5e)) + +## [6.23.0](https://github.com/googleapis/java-spanner/compare/v6.22.0...v6.23.0) (2022-03-28) + + +### Features + +* Copy Backup Support ([#1778](https://github.com/googleapis/java-spanner/issues/1778)) ([dc79366](https://github.com/googleapis/java-spanner/commit/dc79366f05f28d4b1a68240989b5ad06621e4a01)) + +## [6.22.0](https://github.com/googleapis/java-spanner/compare/v6.21.2...v6.22.0) (2022-03-25) + + +### Features + +* Cross Region backup proto changes ([#1754](https://github.com/googleapis/java-spanner/issues/1754)) ([6d64104](https://github.com/googleapis/java-spanner/commit/6d641044fae595acaafd6020359598c0efd4551f)) +* support PG show transaction isolation level ([#1777](https://github.com/googleapis/java-spanner/issues/1777)) ([111f74c](https://github.com/googleapis/java-spanner/commit/111f74c36776a481452ccb9b631a017cab592189)) + + +### Bug Fixes + +* Correct recording values in opencensus measureMap in HeaderInterceptor ([#1726](https://github.com/googleapis/java-spanner/issues/1726)) ([bdb2b89](https://github.com/googleapis/java-spanner/commit/bdb2b89e17fe0957e393aea3a0b2f310158dc1e8)) +* return errors from BatchCreateSession to dialect detection ([#1760](https://github.com/googleapis/java-spanner/issues/1760)) ([6550a9d](https://github.com/googleapis/java-spanner/commit/6550a9d64b3e5525085f26bf1344e4524f8d0ffb)), closes [#1759](https://github.com/googleapis/java-spanner/issues/1759) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v2.1.7 ([#1748](https://github.com/googleapis/java-spanner/issues/1748)) ([a794387](https://github.com/googleapis/java-spanner/commit/a7943878ccebb2e48431fb50a0e9f3974e21dcfa)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.8 ([#1757](https://github.com/googleapis/java-spanner/issues/1757)) ([2b54949](https://github.com/googleapis/java-spanner/commit/2b54949ec5082f1aab4b3b5b46bf0bef94f73d9e)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.9 ([#1782](https://github.com/googleapis/java-spanner/issues/1782)) ([d623b7e](https://github.com/googleapis/java-spanner/commit/d623b7e40592fd02e2f08355a002205fbbce14f5)) +* update dependency org.json:json to v20220320 ([#1761](https://github.com/googleapis/java-spanner/issues/1761)) ([6eee5eb](https://github.com/googleapis/java-spanner/commit/6eee5ebf5117d59e001e85546bf046970f367505)) + +### [6.21.2](https://github.com/googleapis/java-spanner/compare/v6.21.1...v6.21.2) (2022-03-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v2.1.6 ([#1743](https://github.com/googleapis/java-spanner/issues/1743)) ([6b0f813](https://github.com/googleapis/java-spanner/commit/6b0f813c29d580391179d27f5fd3ab7d81a9d43c)) + +### [6.21.1](https://github.com/googleapis/java-spanner/compare/v6.21.0...v6.21.1) (2022-03-09) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v2.1.5 ([#1739](https://github.com/googleapis/java-spanner/issues/1739)) ([b553c03](https://github.com/googleapis/java-spanner/commit/b553c032131a5fe147e48ff031a85b2ee5d982be)) + +## [6.21.0](https://github.com/googleapis/java-spanner/compare/v6.20.0...v6.21.0) (2022-03-08) + + +### Features + +* parse query parameters in PostgreSQL query ([#1732](https://github.com/googleapis/java-spanner/issues/1732)) ([7357ac6](https://github.com/googleapis/java-spanner/commit/7357ac6e3ddfdfee37e70343a970e7e63fb08bf2)) +* Track PG Adapter usage from user-agent headers ([#1711](https://github.com/googleapis/java-spanner/issues/1711)) ([cb640ab](https://github.com/googleapis/java-spanner/commit/cb640abeb8ec9321136b86d5b54e620dba087080)) + + +### Bug Fixes + +* annotating some fields as REQUIRED ([#1695](https://github.com/googleapis/java-spanner/issues/1695)) ([8b90b6c](https://github.com/googleapis/java-spanner/commit/8b90b6cce0fd36a1e3ca1c8e0c0f34661ab9c2a3)) +* catch ExecutionException for op.getName ([#1729](https://github.com/googleapis/java-spanner/issues/1729)) ([8ea3ac0](https://github.com/googleapis/java-spanner/commit/8ea3ac086371beebd22f04c8c5f74beb8058e84f)) +* PostgreSQL supports newline in quoted literals and identifiers ([#1731](https://github.com/googleapis/java-spanner/issues/1731)) ([f403d99](https://github.com/googleapis/java-spanner/commit/f403d99acd21db8d494855d71b5ec410164a5232)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.4 ([#1719](https://github.com/googleapis/java-spanner/issues/1719)) ([20336cd](https://github.com/googleapis/java-spanner/commit/20336cd5d3307a48f968587212af38872dec5a50)) +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.5 ([#1727](https://github.com/googleapis/java-spanner/issues/1727)) ([92a9f14](https://github.com/googleapis/java-spanner/commit/92a9f148b8dcbd0ac7ca1ff0029ad7c09f577e40)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#1722](https://github.com/googleapis/java-spanner/issues/1722)) ([9704974](https://github.com/googleapis/java-spanner/commit/9704974a92f56886269e6cbcb1f74528fbe7e73f)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.4 ([#1728](https://github.com/googleapis/java-spanner/issues/1728)) ([d193a26](https://github.com/googleapis/java-spanner/commit/d193a26ec46df1b229103ec50c0db9b62d98507a)) + +## [6.20.0](https://github.com/googleapis/java-spanner/compare/v6.19.1...v6.20.0) (2022-02-22) + + +### Features + +* allows for getting json columns using getValue ([#1699](https://github.com/googleapis/java-spanner/issues/1699)) ([a51973b](https://github.com/googleapis/java-spanner/commit/a51973b1a87c0a57b114892fe39a24caa1458d1d)) + + +### Bug Fixes + +* **java:** make system property accessible for native image compilation ([#1694](https://github.com/googleapis/java-spanner/issues/1694)) ([e3fb2b2](https://github.com/googleapis/java-spanner/commit/e3fb2b273f939314d9cdbce539f373d6fc77d0ad)) +* use information_schema instead of pg_catalog for dialect detection ([#1708](https://github.com/googleapis/java-spanner/issues/1708)) ([91e157a](https://github.com/googleapis/java-spanner/commit/91e157a6dcd08afd81a4cbddffcb8e02defb8d3a)) + +### [6.19.1](https://github.com/googleapis/java-spanner/compare/v6.19.0...v6.19.1) (2022-02-18) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.3 ([#1698](https://github.com/googleapis/java-spanner/issues/1698)) ([cd4f4ca](https://github.com/googleapis/java-spanner/commit/cd4f4ca3fe870227dceae8c6ab66993477b4bdc4)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.3 ([#1684](https://github.com/googleapis/java-spanner/issues/1684)) ([e70e5c4](https://github.com/googleapis/java-spanner/commit/e70e5c4c9c9ce0b8d18f9f1f7d01baf6a97ec264)) + +## [6.19.0](https://github.com/googleapis/java-spanner/compare/v6.18.0...v6.19.0) (2022-02-16) + + +### Features + +* automatically detect database dialect ([#1677](https://github.com/googleapis/java-spanner/issues/1677)) ([9eccfc4](https://github.com/googleapis/java-spanner/commit/9eccfc441237272b01140c1f3d7da51b2b985554)) +* PostgreSQL dialect databases ([#1673](https://github.com/googleapis/java-spanner/issues/1673)) ([5f156f2](https://github.com/googleapis/java-spanner/commit/5f156f2efdb4726679766b385d500a030c24e477)) + + +### Bug Fixes + +* allow getting metadata without calling next() ([#1691](https://github.com/googleapis/java-spanner/issues/1691)) ([4cfe74e](https://github.com/googleapis/java-spanner/commit/4cfe74ef780f57747ea1dfef1a7098f809bcb300)) +* do not delete session in close method for BatchReadOnlyTransactionImpl ([#1688](https://github.com/googleapis/java-spanner/issues/1688)) ([5dc3e19](https://github.com/googleapis/java-spanner/commit/5dc3e191bee603a7feec29b7d4412646d53d73e4)) +* untyped null parameters would cause NPE ([#1680](https://github.com/googleapis/java-spanner/issues/1680)) ([7095f94](https://github.com/googleapis/java-spanner/commit/7095f940638d786745ed6715cf7a221d3e4a41a9)), closes [#1679](https://github.com/googleapis/java-spanner/issues/1679) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.2 ([#1666](https://github.com/googleapis/java-spanner/issues/1666)) ([8ea2220](https://github.com/googleapis/java-spanner/commit/8ea22205ea1361012b8f237af9150f320b41cc23)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.2 ([#1664](https://github.com/googleapis/java-spanner/issues/1664)) ([4f46635](https://github.com/googleapis/java-spanner/commit/4f46635577f0e754ce271e4aba338b84d34f57dd)) + +## [6.18.0](https://github.com/googleapis/java-spanner/compare/v6.17.4...v6.18.0) (2022-02-03) + + +### Features + +* add database dialect ([#1657](https://github.com/googleapis/java-spanner/issues/1657)) ([269f090](https://github.com/googleapis/java-spanner/commit/269f090805b366fcd7a7163a6602268b4d143aa4)) +* Updating readme with new gfe latency metrics ([#1630](https://github.com/googleapis/java-spanner/issues/1630)) ([d02601a](https://github.com/googleapis/java-spanner/commit/d02601ac73a1b9ab580480c4370ba26260996d8c)) + + +### Dependencies + +* **java:** update actions/github-script action to v5 ([#1339](https://github.com/googleapis/java-spanner/issues/1339)) ([#1659](https://github.com/googleapis/java-spanner/issues/1659)) ([203b346](https://github.com/googleapis/java-spanner/commit/203b346e748b78e56aad2246c3970593a7584825)) +* update actions/github-script action to v5 ([#1658](https://github.com/googleapis/java-spanner/issues/1658)) ([a2f3790](https://github.com/googleapis/java-spanner/commit/a2f3790c35ecc960b50979caa12f6355f397c127)) +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.1 ([#1637](https://github.com/googleapis/java-spanner/issues/1637)) ([73c9434](https://github.com/googleapis/java-spanner/commit/73c94349b56710adc788c3a8440648e7f66f228b)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.7.0 ([#1662](https://github.com/googleapis/java-spanner/issues/1662)) ([ece31c0](https://github.com/googleapis/java-spanner/commit/ece31c0d873ee537b167792dcbe9dc62d783a52d)) +* update opencensus.version to v0.31.0 ([#1661](https://github.com/googleapis/java-spanner/issues/1661)) ([1e86a3a](https://github.com/googleapis/java-spanner/commit/1e86a3a4542e6744cb1d8a8dbca36218c147c9f0)) + +### [6.17.4](https://www.github.com/googleapis/java-spanner/compare/v6.17.3...v6.17.4) (2022-01-07) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.6.0 ([#1632](https://www.github.com/googleapis/java-spanner/issues/1632)) ([c7d4d4d](https://www.github.com/googleapis/java-spanner/commit/c7d4d4d833e9027642a870e5f03cf768c02216e3)) +* update dependency com.google.cloud:google-cloud-trace to v2.1.1 ([#1633](https://www.github.com/googleapis/java-spanner/issues/1633)) ([4607c21](https://www.github.com/googleapis/java-spanner/commit/4607c21518a13fd9e48a8876bbfa9f587dbe1823)) + +### [6.17.3](https://www.github.com/googleapis/java-spanner/compare/v6.17.2...v6.17.3) (2021-12-17) + + +### Bug Fixes + +* re-adds test-jar to bom definition ([#1596](https://www.github.com/googleapis/java-spanner/issues/1596)) ([5accdcd](https://www.github.com/googleapis/java-spanner/commit/5accdcdb163a4f434ba1b47ac4f1ecba92be6f67)) + + +### Dependencies + +* bump OpenCensus API to 0.30.0 ([#1598](https://www.github.com/googleapis/java-spanner/issues/1598)) ([b953363](https://www.github.com/googleapis/java-spanner/commit/b953363c531cd2cd7e831d546a30b3bbfab54268)) + +### [6.17.2](https://www.github.com/googleapis/java-spanner/compare/v6.17.1...v6.17.2) (2021-12-15) + + +### Dependencies + +* update opencensus.version to v0.29.0 ([#1589](https://www.github.com/googleapis/java-spanner/issues/1589)) ([7abf7ff](https://www.github.com/googleapis/java-spanner/commit/7abf7ff9b339eaef499313be17c7cabc169246fb)) + +### [6.17.1](https://www.github.com/googleapis/java-spanner/compare/v6.17.0...v6.17.1) (2021-12-08) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v2.1.0 ([#1574](https://www.github.com/googleapis/java-spanner/issues/1574)) ([eaf2831](https://www.github.com/googleapis/java-spanner/commit/eaf28318f0a8eb5dc3795865de438f1d0e7bd982)) + +## [6.17.0](https://www.github.com/googleapis/java-spanner/compare/v6.16.0...v6.17.0) (2021-12-06) + + +### Features + +* NaNs in Mutations are equal and have the same hashcode ([#1554](https://www.github.com/googleapis/java-spanner/issues/1554)) ([91a18fc](https://www.github.com/googleapis/java-spanner/commit/91a18fc09a2034959758d38f1278dc93128c7622)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.2.0 ([#1571](https://www.github.com/googleapis/java-spanner/issues/1571)) ([0e0d9f7](https://www.github.com/googleapis/java-spanner/commit/0e0d9f7c45c71dd4e9b5500bb3931e1d399041bc)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.1 ([#1570](https://www.github.com/googleapis/java-spanner/issues/1570)) ([563879e](https://www.github.com/googleapis/java-spanner/commit/563879e82e77da0603f1b817190d98cfbee4e81f)) +* update dependency org.json:json to v20211205 ([#1572](https://www.github.com/googleapis/java-spanner/issues/1572)) ([59593bd](https://www.github.com/googleapis/java-spanner/commit/59593bd471e7e890b589c9e5a7291a837a88a0e7)) + +## [6.16.0](https://www.github.com/googleapis/java-spanner/compare/v6.15.2...v6.16.0) (2021-11-15) + + +### Features + +* support RPC priority for JDBC connections and statements ([#1548](https://www.github.com/googleapis/java-spanner/issues/1548)) ([b61a0d4](https://www.github.com/googleapis/java-spanner/commit/b61a0d4db80a689f6f1b2ccf53c9360226890e9d)) + +### [6.15.2](https://www.github.com/googleapis/java-spanner/compare/v6.15.1...v6.15.2) (2021-11-10) + + +### Bug Fixes + +* **java:** java 17 dependency arguments ([#1537](https://www.github.com/googleapis/java-spanner/issues/1537)) ([0e30ebf](https://www.github.com/googleapis/java-spanner/commit/0e30ebffc63de2de940db1eb807175ec19aa752d)) + +### [6.15.1](https://www.github.com/googleapis/java-spanner/compare/v6.15.0...v6.15.1) (2021-10-27) + + +### Dependencies + +* upgrade Mockito to version 4.x ([#1498](https://www.github.com/googleapis/java-spanner/issues/1498)) ([09bd561](https://www.github.com/googleapis/java-spanner/commit/09bd56157827119586fd3e0a1ee056bb793d08e3)) + +## [6.15.0](https://www.github.com/googleapis/java-spanner/compare/v6.14.0...v6.15.0) (2021-10-27) + + +### Features + +* next release from main branch is 6.15.0 ([#1518](https://www.github.com/googleapis/java-spanner/issues/1518)) ([9e5e27e](https://www.github.com/googleapis/java-spanner/commit/9e5e27eee8ba9906900bb2868183b1ec88f19ecf)) + +## [6.14.0](https://www.github.com/googleapis/java-spanner/compare/v6.13.0...v6.14.0) (2021-10-25) + + +### Features + +* Introduce Native Image testing build script changes ([#1500](https://www.github.com/googleapis/java-spanner/issues/1500)) ([7a034c9](https://www.github.com/googleapis/java-spanner/commit/7a034c9120ffa433f64e67d565c854f1fb3ce9f5)) + + +### Bug Fixes + +* **java:** java 17 dependency arguments ([#1512](https://www.github.com/googleapis/java-spanner/issues/1512)) ([4cebefa](https://www.github.com/googleapis/java-spanner/commit/4cebefa1ce6502d48c2e2e0a3a484f60eeed450f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.1.0 ([#1506](https://www.github.com/googleapis/java-spanner/issues/1506)) ([ea35b27](https://www.github.com/googleapis/java-spanner/commit/ea35b2723fcc8c255ab0e52306e066c689c6a0c6)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.4.0 ([#1501](https://www.github.com/googleapis/java-spanner/issues/1501)) ([d5a37b8](https://www.github.com/googleapis/java-spanner/commit/d5a37b8853fc21a28b6610b2933ed31fcbe206e2)) +* update dependency com.google.cloud:google-cloud-trace to v2.0.6 ([#1504](https://www.github.com/googleapis/java-spanner/issues/1504)) ([667b8b1](https://www.github.com/googleapis/java-spanner/commit/667b8b17cc2f8d217ecda0af89bdc668670f3aab)) + +## [6.13.0](https://www.github.com/googleapis/java-spanner/compare/v6.12.5...v6.13.0) (2021-10-07) + + +### Features + +* expose GFE latency metrics ([#1473](https://www.github.com/googleapis/java-spanner/issues/1473)) ([de82f78](https://www.github.com/googleapis/java-spanner/commit/de82f7809f8585fcbd13e117a2e29e06f1424de4)) + + +### Bug Fixes + +* keep track of any BeginTransaction option for a Read ([#1485](https://www.github.com/googleapis/java-spanner/issues/1485)) ([757d6ec](https://www.github.com/googleapis/java-spanner/commit/757d6ecfcceea58e0db7623778dde6f3e5f4b865)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.7 ([#1491](https://www.github.com/googleapis/java-spanner/issues/1491)) ([58f0e5a](https://www.github.com/googleapis/java-spanner/commit/58f0e5a6db04d6298ae5d8760f907946ffffbae4)) + +### [6.12.5](https://www.github.com/googleapis/java-spanner/compare/v6.12.4...v6.12.5) (2021-09-27) + + +### Bug Fixes + +* sessions were not always removed from checkedOutSessions ([#1438](https://www.github.com/googleapis/java-spanner/issues/1438)) ([49360b1](https://www.github.com/googleapis/java-spanner/commit/49360b13e5d8904bfdc09cb4db8c24848debfa0b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.6 ([#1443](https://www.github.com/googleapis/java-spanner/issues/1443)) ([159c026](https://www.github.com/googleapis/java-spanner/commit/159c026a250e6f9d6d583ef3123403a64f817e40)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.3.0 ([#1439](https://www.github.com/googleapis/java-spanner/issues/1439)) ([6bdeddf](https://www.github.com/googleapis/java-spanner/commit/6bdeddf7612964d4d59061d0a7c2956d66619a4b)) +* update dependency com.google.cloud:google-cloud-trace to v2.0.5 ([#1459](https://www.github.com/googleapis/java-spanner/issues/1459)) ([2ce9a1b](https://www.github.com/googleapis/java-spanner/commit/2ce9a1bd5cf8edb36b1c4fe57f2d9b304dcd6ccc)) + +### [6.12.4](https://www.github.com/googleapis/java-spanner/compare/v6.12.3...v6.12.4) (2021-09-16) + + +### Bug Fixes + +* do not serialize unnecessary fields ([#1426](https://www.github.com/googleapis/java-spanner/issues/1426)) ([29209f8](https://www.github.com/googleapis/java-spanner/commit/29209f83d10fa01b5566da66259da95dd60abca0)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.5 ([#1431](https://www.github.com/googleapis/java-spanner/issues/1431)) ([32eee0a](https://www.github.com/googleapis/java-spanner/commit/32eee0aa14f0b276673dca7a65e011a509e96453)) + +### [6.12.3](https://www.github.com/googleapis/java-spanner/compare/v6.12.2...v6.12.3) (2021-09-15) + + +### Bug Fixes + +* drop databases after sample tests ([#1401](https://www.github.com/googleapis/java-spanner/issues/1401)) ([c9f5048](https://www.github.com/googleapis/java-spanner/commit/c9f504829f53bfcff6f78bbbbc447cc8f10f5940)) +* fix JSON sample test ([#1417](https://www.github.com/googleapis/java-spanner/issues/1417)) ([dc1f9a9](https://www.github.com/googleapis/java-spanner/commit/dc1f9a92a7562e2585e2762c2749eb3207f67c25)) +* revert test category refactoring ([#1419](https://www.github.com/googleapis/java-spanner/issues/1419)) ([fe2ad14](https://www.github.com/googleapis/java-spanner/commit/fe2ad14eae2002552d61e497f9892c96584efc24)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.4 ([#1422](https://www.github.com/googleapis/java-spanner/issues/1422)) ([d57d47e](https://www.github.com/googleapis/java-spanner/commit/d57d47eb3086d7352b6f7af1c4cc694de030e3ee)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.1 ([#1420](https://www.github.com/googleapis/java-spanner/issues/1420)) ([85b4f31](https://www.github.com/googleapis/java-spanner/commit/85b4f31d065202527ad3220cca9df94d40020e0a)) +* update dependency com.google.cloud:google-cloud-trace to v2.0.4 ([#1425](https://www.github.com/googleapis/java-spanner/issues/1425)) ([ce8776a](https://www.github.com/googleapis/java-spanner/commit/ce8776a310f0d53ea2aee738e0d56dc56371fa51)) + +### [6.12.2](https://www.github.com/googleapis/java-spanner/compare/v6.12.1...v6.12.2) (2021-09-01) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.3 ([#1402](https://www.github.com/googleapis/java-spanner/issues/1402)) ([417fc5a](https://www.github.com/googleapis/java-spanner/commit/417fc5a6b19a8be6d8f015a1fb036e89dcaad433)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.0 ([#1397](https://www.github.com/googleapis/java-spanner/issues/1397)) ([cc543c7](https://www.github.com/googleapis/java-spanner/commit/cc543c79a7ead75da35dc1bffc9ac7a27ec14443)) +* update dependency com.google.cloud:google-cloud-trace to v2.0.3 ([#1399](https://www.github.com/googleapis/java-spanner/issues/1399)) ([2874720](https://www.github.com/googleapis/java-spanner/commit/2874720a5b938edd861a7259164876b25d8cb0bd)) + +### [6.12.1](https://www.github.com/googleapis/java-spanner/compare/v6.12.0...v6.12.1) (2021-08-25) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3.0.2 ([#1372](https://www.github.com/googleapis/java-spanner/issues/1372)) ([8d08076](https://www.github.com/googleapis/java-spanner/commit/8d0807638f91ce8b4e4d56e2cb455e04bd70d82b)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.1.0 ([#1369](https://www.github.com/googleapis/java-spanner/issues/1369)) ([c94ad5b](https://www.github.com/googleapis/java-spanner/commit/c94ad5b99a7a7ac10d06ef651d6519568c57bdd1)) +* update dependency com.google.cloud:google-cloud-trace to v2.0.2 ([#1373](https://www.github.com/googleapis/java-spanner/issues/1373)) ([1b7933d](https://www.github.com/googleapis/java-spanner/commit/1b7933d3a440b8c791d1d34fe3cc30c53a2b71e4)) + +## [6.12.0](https://www.github.com/googleapis/java-spanner/compare/v6.11.1...v6.12.0) (2021-08-24) + + +### Features + +* add support for JSON data type ([#872](https://www.github.com/googleapis/java-spanner/issues/872)) ([d7ff940](https://www.github.com/googleapis/java-spanner/commit/d7ff9409e974602dc9b18f82d6dbd11d96c956bf)) +* use dummy emulator-project when no project is set ([#1363](https://www.github.com/googleapis/java-spanner/issues/1363)) ([673855e](https://www.github.com/googleapis/java-spanner/commit/673855eea8c244457ad4c8ac5abe3ad3a0a0cdde)), closes [#1345](https://www.github.com/googleapis/java-spanner/issues/1345) + +### [6.11.1](https://www.github.com/googleapis/java-spanner/compare/v6.11.0...v6.11.1) (2021-08-17) + + +### Dependencies + +* update dependency org.openjdk.jmh:jmh-core to v1.33 ([#1338](https://www.github.com/googleapis/java-spanner/issues/1338)) ([fa88b73](https://www.github.com/googleapis/java-spanner/commit/fa88b73e6535d5754e5b10493d76ddb0a33033b1)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.33 ([#1339](https://www.github.com/googleapis/java-spanner/issues/1339)) ([94cfecc](https://www.github.com/googleapis/java-spanner/commit/94cfeccc336e2e56c9eb296b5c7096f575863147)) + +## [6.11.0](https://www.github.com/googleapis/java-spanner/compare/v6.10.1...v6.11.0) (2021-08-12) + + +### Features + +* release gapic-generator-java v2.0.0 ([#1334](https://www.github.com/googleapis/java-spanner/issues/1334)) ([368fb80](https://www.github.com/googleapis/java-spanner/commit/368fb80e8ae9fd9bee7af81c13bef32b26361877)) + + +### Documentation + +* use 'latest' stats package in samples to prevent build failures ([#1313](https://www.github.com/googleapis/java-spanner/issues/1313)) ([6a8351c](https://www.github.com/googleapis/java-spanner/commit/6a8351c9d2cf0fe805b87a611ff1d94d4dba3f87)), closes [#1273](https://www.github.com/googleapis/java-spanner/issues/1273) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v3 ([#1341](https://www.github.com/googleapis/java-spanner/issues/1341)) ([de7b540](https://www.github.com/googleapis/java-spanner/commit/de7b54094b6bb2928616e2e04215f4ba5b8bc750)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2 ([#1331](https://www.github.com/googleapis/java-spanner/issues/1331)) ([cd1ad7b](https://www.github.com/googleapis/java-spanner/commit/cd1ad7b4cd1716b60f3f96ee953f76c126742788)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.0.1 ([#1344](https://www.github.com/googleapis/java-spanner/issues/1344)) ([300837f](https://www.github.com/googleapis/java-spanner/commit/300837f0a27dab89285895f753aececb8d641da9)) +* update dependency com.google.cloud:google-cloud-trace to v2 ([#1342](https://www.github.com/googleapis/java-spanner/issues/1342)) ([d24886b](https://www.github.com/googleapis/java-spanner/commit/d24886b058fd87ea744a4f375fb6affd8f9398d9)) + +### [6.10.1](https://www.github.com/googleapis/java-spanner/compare/v6.10.0...v6.10.1) (2021-07-21) + + +### Dependencies + +* update dependency com.google.cloud:grpc-gcp to v1.1.0 ([#1306](https://www.github.com/googleapis/java-spanner/issues/1306)) ([fa0c65d](https://www.github.com/googleapis/java-spanner/commit/fa0c65dc31236e05e6b10508281cf58e82ee87ef)) + +## [6.10.0](https://www.github.com/googleapis/java-spanner/compare/v6.9.1...v6.10.0) (2021-07-19) + + +### Features + +* exposes default leader in database, and leader options / replicas in instance config ([#1283](https://www.github.com/googleapis/java-spanner/issues/1283)) ([d72c2f7](https://www.github.com/googleapis/java-spanner/commit/d72c2f79f8cf0b83da00060587a079ce859c87a2)) + + +### Bug Fixes + +* shorten the test instance name ([#1284](https://www.github.com/googleapis/java-spanner/issues/1284)) ([07c3eae](https://www.github.com/googleapis/java-spanner/commit/07c3eae134df0a0a3814e0e7225e14741a269771)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v1.4.2 ([#1291](https://www.github.com/googleapis/java-spanner/issues/1291)) ([c4208ed](https://www.github.com/googleapis/java-spanner/commit/c4208ed5992ba5d1525df488a9eff64471fb0030)) + +### [6.9.1](https://www.github.com/googleapis/java-spanner/compare/v6.9.0...v6.9.1) (2021-07-05) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.3.4 ([#1278](https://www.github.com/googleapis/java-spanner/issues/1278)) ([c692336](https://www.github.com/googleapis/java-spanner/commit/c6923366bc407b45a6bbf736b4a1d8efad8b67b7)) + +## [6.9.0](https://www.github.com/googleapis/java-spanner/compare/v6.8.0...v6.9.0) (2021-07-05) + + +### Features + +* add support for tagging to Connection API ([#623](https://www.github.com/googleapis/java-spanner/issues/623)) ([5722372](https://www.github.com/googleapis/java-spanner/commit/5722372b7869828e372dec06e80e5b0e7280af61)) +* **spanner:** add leader_options to InstanceConfig and default_leader to Database ([#1271](https://www.github.com/googleapis/java-spanner/issues/1271)) ([f257671](https://www.github.com/googleapis/java-spanner/commit/f25767144344f0df67662f1b3ef662902384599a)) +* support setting an async executor provider ([#1263](https://www.github.com/googleapis/java-spanner/issues/1263)) ([369c8a7](https://www.github.com/googleapis/java-spanner/commit/369c8a771ec48fa1476236f800b0e8eb5982a33c)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.4.0 ([#1269](https://www.github.com/googleapis/java-spanner/issues/1269)) ([025e162](https://www.github.com/googleapis/java-spanner/commit/025e162813d6321dabe49e32f00934f9ae334e24)) + +## [6.8.0](https://www.github.com/googleapis/java-spanner/compare/v6.7.0...v6.8.0) (2021-06-29) + + +### Features + +* add gRPC-GCP channel pool as an option ([#1227](https://www.github.com/googleapis/java-spanner/issues/1227)) ([1fa95a9](https://www.github.com/googleapis/java-spanner/commit/1fa95a9993ea8c7a5f943ab39eced4ced4cb87e7)) +* spanner JSON type ([#1260](https://www.github.com/googleapis/java-spanner/issues/1260)) ([b2a56c6](https://www.github.com/googleapis/java-spanner/commit/b2a56c68695b6209e20f9f86d83d7c5a0f39c7a8)) + + +### Bug Fixes + +* Add `shopt -s nullglob` to dependencies script ([#1256](https://www.github.com/googleapis/java-spanner/issues/1256)) ([d1712f7](https://www.github.com/googleapis/java-spanner/commit/d1712f7c51752c2359045e5eabac8fc0530a2421)) + +## [6.7.0](https://www.github.com/googleapis/java-spanner/compare/v6.6.1...v6.7.0) (2021-06-21) + + +### Features + +* add support for instance processing units ([#665](https://www.github.com/googleapis/java-spanner/issues/665)) ([9c1c8e9](https://www.github.com/googleapis/java-spanner/commit/9c1c8e90b0e02e26ea3c16def49bb7e07c2b04b1)) +* **spanner:** add processing_units to Instance resource ([#1248](https://www.github.com/googleapis/java-spanner/issues/1248)) ([e3c7e8f](https://www.github.com/googleapis/java-spanner/commit/e3c7e8fbdfb5d41a1c418f176679bf5b19f22f83)) + + +### Bug Fixes + +* Update dependencies.sh to not break on mac ([#1249](https://www.github.com/googleapis/java-spanner/issues/1249)) ([1e1df84](https://www.github.com/googleapis/java-spanner/commit/1e1df84e74011fb2b665e94b428cfa78102de7fe)) + +### [6.6.1](https://www.github.com/googleapis/java-spanner/compare/v6.6.0...v6.6.1) (2021-06-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.3.3 ([#1241](https://www.github.com/googleapis/java-spanner/issues/1241)) ([9816b3f](https://www.github.com/googleapis/java-spanner/commit/9816b3fe90419486e94a4927f368c8cecfaac424)) + +## [6.6.0](https://www.github.com/googleapis/java-spanner/compare/v6.5.0...v6.6.0) (2021-06-07) + + +### Features + +* adds query optimizer statistics support ([#385](https://www.github.com/googleapis/java-spanner/issues/385)) ([e294532](https://www.github.com/googleapis/java-spanner/commit/e2945324783bc6d5a7a323578e8dbf00969f3163)) +* support encoded credentials in connection URL ([#1223](https://www.github.com/googleapis/java-spanner/issues/1223)) ([43d5d7e](https://www.github.com/googleapis/java-spanner/commit/43d5d7e8d7fc1b0304a6fcf940846fe269fd661a)) + + +### Documentation + +* document retry settings in sample ([#1214](https://www.github.com/googleapis/java-spanner/issues/1214)) ([ab4592d](https://www.github.com/googleapis/java-spanner/commit/ab4592d6f5040d0125b2848369c516d01fd38106)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.3.0 ([#1225](https://www.github.com/googleapis/java-spanner/issues/1225)) ([2023839](https://www.github.com/googleapis/java-spanner/commit/2023839cce80de0ff6451a4b6274f5da9b18416f)) +* update dependency com.google.cloud:google-cloud-monitoring to v2.3.2 ([#1229](https://www.github.com/googleapis/java-spanner/issues/1229)) ([8a23ad0](https://www.github.com/googleapis/java-spanner/commit/8a23ad047ec7fc4a8a5c8d6292678e579c323eb2)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.3.0 ([#1230](https://www.github.com/googleapis/java-spanner/issues/1230)) ([db64451](https://www.github.com/googleapis/java-spanner/commit/db6445133de143391dbd9da6d3393b0d2736971a)) +* update dependency com.google.cloud:google-cloud-trace to v1.4.0 ([#1226](https://www.github.com/googleapis/java-spanner/issues/1226)) ([da4407a](https://www.github.com/googleapis/java-spanner/commit/da4407a60fb2917d1ea8043b57bdff41263af241)) +* update dependency com.google.cloud:google-cloud-trace to v1.4.1 ([#1231](https://www.github.com/googleapis/java-spanner/issues/1231)) ([76af3ac](https://www.github.com/googleapis/java-spanner/commit/76af3ace6d6745673006cc1a529d66a74513c615)) +* update dependency org.openjdk.jmh:jmh-core to v1.32 ([#1221](https://www.github.com/googleapis/java-spanner/issues/1221)) ([b009c9b](https://www.github.com/googleapis/java-spanner/commit/b009c9b09a9200a674b629cc74a479f8b746e727)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.32 ([#1222](https://www.github.com/googleapis/java-spanner/issues/1222)) ([7ef76a9](https://www.github.com/googleapis/java-spanner/commit/7ef76a910defd6f9cd24191de4eb0c523a294fea)) + +## [6.5.0](https://www.github.com/googleapis/java-spanner/compare/v6.4.4...v6.5.0) (2021-05-25) + + +### Features + +* add `gcf-owl-bot[bot]` to `ignoreAuthors` ([#1196](https://www.github.com/googleapis/java-spanner/issues/1196)) ([4f6e18d](https://www.github.com/googleapis/java-spanner/commit/4f6e18d9c8afab0acf1b66e2b32a0907008d4ff5)) +* add bufferAsync methods ([#1145](https://www.github.com/googleapis/java-spanner/issues/1145)) ([7d6816f](https://www.github.com/googleapis/java-spanner/commit/7d6816f1fd14bcd2c7f91d814855b5d921ba970d)) + + +### Bug Fixes + +* stop invoking callback after pausing and cancelling result set ([#1192](https://www.github.com/googleapis/java-spanner/issues/1192)) ([78e6784](https://www.github.com/googleapis/java-spanner/commit/78e678448782d5d16ba43ec7c10ab85b89059d88)), closes [#1191](https://www.github.com/googleapis/java-spanner/issues/1191) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.2.0 ([#1194](https://www.github.com/googleapis/java-spanner/issues/1194)) ([9935066](https://www.github.com/googleapis/java-spanner/commit/99350663fb638d913e803b139d89be597be9ce1d)) + +### [6.4.4](https://www.github.com/googleapis/java-spanner/compare/v6.4.3...v6.4.4) (2021-05-17) + + +### Bug Fixes + +* re-adds test verifyStatementsInFile ([#1181](https://www.github.com/googleapis/java-spanner/issues/1181)) ([7a715b4](https://www.github.com/googleapis/java-spanner/commit/7a715b429ba2a9561d24ba66404142bdc9de5a4f)) + +### [6.4.3](https://www.github.com/googleapis/java-spanner/compare/v6.4.2...v6.4.3) (2021-05-16) + + +### Bug Fixes + +* re-adds test utility method for connection ([#1178](https://www.github.com/googleapis/java-spanner/issues/1178)) ([0e0dcb7](https://www.github.com/googleapis/java-spanner/commit/0e0dcb7cdc412e54c26d5e8f0176ac1917fa4c59)) + +### [6.4.2](https://www.github.com/googleapis/java-spanner/compare/v6.4.1...v6.4.2) (2021-05-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.2.3 ([#1170](https://www.github.com/googleapis/java-spanner/issues/1170)) ([3bb6885](https://www.github.com/googleapis/java-spanner/commit/3bb688519774d2865701c6ffea5687513a8c7776)) +* update dependency com.google.cloud:google-cloud-trace to v1.3.4 ([#1171](https://www.github.com/googleapis/java-spanner/issues/1171)) ([6faa310](https://www.github.com/googleapis/java-spanner/commit/6faa310a5c7f035c39eeaa65eb73584f535a4aeb)) + +### [6.4.1](https://www.github.com/googleapis/java-spanner/compare/v6.4.0...v6.4.1) (2021-05-13) + + +### Documentation + +* close Spanner instance when it is no longer needed ([#1116](https://www.github.com/googleapis/java-spanner/issues/1116)) ([85bd0cf](https://www.github.com/googleapis/java-spanner/commit/85bd0cf11eab7b2ec47a082a4c2c0c4d9cea01d4)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.2.2 ([#1158](https://www.github.com/googleapis/java-spanner/issues/1158)) ([63eed2e](https://www.github.com/googleapis/java-spanner/commit/63eed2e66fb063358e8b123ba5f919663b70bbe4)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.1.0 ([#1152](https://www.github.com/googleapis/java-spanner/issues/1152)) ([2e7f18a](https://www.github.com/googleapis/java-spanner/commit/2e7f18a52ef2ed5de6a87169eeefd570844a4c55)) +* update dependency org.openjdk.jmh:jmh-core to v1.30 ([#1137](https://www.github.com/googleapis/java-spanner/issues/1137)) ([699a426](https://www.github.com/googleapis/java-spanner/commit/699a4260e3b1a4cf53fc690910aeeadac293e469)) +* update dependency org.openjdk.jmh:jmh-core to v1.31 ([#1160](https://www.github.com/googleapis/java-spanner/issues/1160)) ([43a0fb9](https://www.github.com/googleapis/java-spanner/commit/43a0fb97352d928e16ec5138ed2ea494ebaae343)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.30 ([#1138](https://www.github.com/googleapis/java-spanner/issues/1138)) ([ad6649d](https://www.github.com/googleapis/java-spanner/commit/ad6649df03a1a193dd524a84fe9dc1a72ed14e09)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.31 ([#1161](https://www.github.com/googleapis/java-spanner/issues/1161)) ([4d17da2](https://www.github.com/googleapis/java-spanner/commit/4d17da25977dde0cc1032192045d9ee26d3fae09)) + +## [6.4.0](https://www.github.com/googleapis/java-spanner/compare/v6.3.3...v6.4.0) (2021-04-29) + + +### Features + +* adds getValue to ResultSet ([#1073](https://www.github.com/googleapis/java-spanner/issues/1073)) ([7792c90](https://www.github.com/googleapis/java-spanner/commit/7792c9085a6e4ce1fb9fe2f8df4279f30539d87e)) + + +### Bug Fixes + +* allow using case-insensitive user-agent key ([#1110](https://www.github.com/googleapis/java-spanner/issues/1110)) ([f4f9e43](https://www.github.com/googleapis/java-spanner/commit/f4f9e43ce102788b81c032df8da223108e484252)) +* check for timeout in connection after last statement finished ([#1086](https://www.github.com/googleapis/java-spanner/issues/1086)) ([aec0b54](https://www.github.com/googleapis/java-spanner/commit/aec0b541672d66fe0c34816b1c1b5a6bdeffccd1)), closes [#1077](https://www.github.com/googleapis/java-spanner/issues/1077) +* check for timeout in connection after last statement finished ([#1086](https://www.github.com/googleapis/java-spanner/issues/1086)) ([51d753c](https://www.github.com/googleapis/java-spanner/commit/51d753c507e7248132eb5d6ea2c4b735542eda49)), closes [#1077](https://www.github.com/googleapis/java-spanner/issues/1077) +* do not keep references to invalidated clients ([#1093](https://www.github.com/googleapis/java-spanner/issues/1093)) ([b4595a6](https://www.github.com/googleapis/java-spanner/commit/b4595a6b52417c716f8e70563bb5a7ef05067707)), closes [#1089](https://www.github.com/googleapis/java-spanner/issues/1089) +* prevent potential NullPointerException in Struct with Array field that contains null elements ([#1107](https://www.github.com/googleapis/java-spanner/issues/1107)) ([c414abb](https://www.github.com/googleapis/java-spanner/commit/c414abb9ec59f8200ba20e08846e442321de76bd)), closes [#1106](https://www.github.com/googleapis/java-spanner/issues/1106) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.2.1 ([#1104](https://www.github.com/googleapis/java-spanner/issues/1104)) ([37ca990](https://www.github.com/googleapis/java-spanner/commit/37ca9905bb150d1791e70103e002261e40261b05)) +* update dependency com.google.cloud:google-cloud-trace to v1.3.3 ([#1103](https://www.github.com/googleapis/java-spanner/issues/1103)) ([b4327c0](https://www.github.com/googleapis/java-spanner/commit/b4327c0666bb97d1d591b5ce65a6ecdc51f5a49d)) + + +### Documentation + +* fix javadoc for Date type ([#1102](https://www.github.com/googleapis/java-spanner/issues/1102)) ([ce095f7](https://www.github.com/googleapis/java-spanner/commit/ce095f7b0c196e03ea248eeb9c5060f4f430d8c4)) +* use default timeout for restore operation ([#1109](https://www.github.com/googleapis/java-spanner/issues/1109)) ([3f3c13e](https://www.github.com/googleapis/java-spanner/commit/3f3c13e7fcbf08b8ab6f0d11d7451b3ae86c9500)), closes [#1019](https://www.github.com/googleapis/java-spanner/issues/1019) + +### [6.3.3](https://www.github.com/googleapis/java-spanner/compare/v6.3.2...v6.3.3) (2021-04-24) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1 ([#1095](https://www.github.com/googleapis/java-spanner/issues/1095)) ([a21e0bb](https://www.github.com/googleapis/java-spanner/commit/a21e0bbafad086f29d3c719b9e4a7690c1cac129)) + +### [6.3.2](https://www.github.com/googleapis/java-spanner/compare/v6.3.1...v6.3.2) (2021-04-20) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v1.3.2 ([#1081](https://www.github.com/googleapis/java-spanner/issues/1081)) ([e145c95](https://www.github.com/googleapis/java-spanner/commit/e145c9531d70af6c11be9f682fb52708d0dcb569)) + +### [6.3.1](https://www.github.com/googleapis/java-spanner/compare/v6.3.0...v6.3.1) (2021-04-20) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.1 ([#1074](https://www.github.com/googleapis/java-spanner/issues/1074)) ([ccd8cd1](https://www.github.com/googleapis/java-spanner/commit/ccd8cd1fb96c9d2046cc9c3ec4f35d8e45ebb5f5)) + +## [6.3.0](https://www.github.com/googleapis/java-spanner/compare/v6.2.1...v6.3.0) (2021-04-19) + + +### Features + +* async work as functional interface ([#1068](https://www.github.com/googleapis/java-spanner/issues/1068)) ([734fb60](https://www.github.com/googleapis/java-spanner/commit/734fb6095819bde94ea482b02a8e77983f2a5449)) +* **spanner:** add `progress` field to `UpdateDatabaseDdlMetadata` ([#1063](https://www.github.com/googleapis/java-spanner/issues/1063)) ([7992342](https://www.github.com/googleapis/java-spanner/commit/7992342bffc273ad8249e7564ae9ef51764bf83c)) +* transaction callable as functional interface ([#1066](https://www.github.com/googleapis/java-spanner/issues/1066)) ([b036a77](https://www.github.com/googleapis/java-spanner/commit/b036a77196886f16d2738e70f676ccc99a52874c)) + + +### Bug Fixes + +* release scripts from issuing overlapping phases ([#1064](https://www.github.com/googleapis/java-spanner/issues/1064)) ([2f6fe5e](https://www.github.com/googleapis/java-spanner/commit/2f6fe5e87cc4c9ae26a6f2867411004a8c2b39fe)) + + +### Dependencies + +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.29 ([#1014](https://www.github.com/googleapis/java-spanner/issues/1014)) ([81ee9b0](https://www.github.com/googleapis/java-spanner/commit/81ee9b02d5846f6569f588d3b17da4faf2f2dae9)) + +### [6.2.1](https://www.github.com/googleapis/java-spanner/compare/v6.2.0...v6.2.1) (2021-04-13) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.2.0 ([#1054](https://www.github.com/googleapis/java-spanner/issues/1054)) ([0b59b94](https://www.github.com/googleapis/java-spanner/commit/0b59b946b31c4b5ca95a2c279bdc835f23f1a923)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.0 ([#1045](https://www.github.com/googleapis/java-spanner/issues/1045)) ([94dcb46](https://www.github.com/googleapis/java-spanner/commit/94dcb468e807516f07777fc62faff345441ccdf6)) +* update dependency com.google.cloud:google-cloud-trace to v1.3.1 ([#1050](https://www.github.com/googleapis/java-spanner/issues/1050)) ([cbb1038](https://www.github.com/googleapis/java-spanner/commit/cbb103846e33210c914f51f64e1e47f32ff775da)) + +## [6.2.0](https://www.github.com/googleapis/java-spanner/compare/v6.1.0...v6.2.0) (2021-04-07) + + +### Features + +* add support for tagging ([#576](https://www.github.com/googleapis/java-spanner/issues/576)) ([2a9086f](https://www.github.com/googleapis/java-spanner/commit/2a9086fcc7e8caae55f71bf5616b2d0db18681d3)) +* Support query hints for DML statements ([#1030](https://www.github.com/googleapis/java-spanner/issues/1030)) ([6a58433](https://www.github.com/googleapis/java-spanner/commit/6a58433919d9f69e91639a1b52cbbc1151ca6804)) + + +### Bug Fixes + +* local connection checker ignores exceptions ([#1036](https://www.github.com/googleapis/java-spanner/issues/1036)) ([2d61bc4](https://www.github.com/googleapis/java-spanner/commit/2d61bc410b7c680169129725bcc11069c2390505)) + +## [6.1.0](https://www.github.com/googleapis/java-spanner/compare/v6.0.0...v6.1.0) (2021-03-31) + + +### Features + +* support RPC priority ([#676](https://www.github.com/googleapis/java-spanner/issues/676)) ([0bc9972](https://www.github.com/googleapis/java-spanner/commit/0bc9972b140d6a3de9c5481a4b73ecba3e139656)) + + +### Bug Fixes + +* plain text when testing emulator connection ([#1020](https://www.github.com/googleapis/java-spanner/issues/1020)) ([1e6e23f](https://www.github.com/googleapis/java-spanner/commit/1e6e23f8d64cd16d5e5034c89c65283b3b0cae89)) +* retry cancelled error on first statement in transaction ([#999](https://www.github.com/googleapis/java-spanner/issues/999)) ([a95f6f8](https://www.github.com/googleapis/java-spanner/commit/a95f6f8dc21d27133a0150ea8df963e2bc543e40)), closes [#938](https://www.github.com/googleapis/java-spanner/issues/938) +* transaction retries should not timeout ([#1009](https://www.github.com/googleapis/java-spanner/issues/1009)) ([6d9c3b8](https://www.github.com/googleapis/java-spanner/commit/6d9c3b884357ddc4d314ebdfac5fc6dda2de3b49)), closes [#1008](https://www.github.com/googleapis/java-spanner/issues/1008) +* update link and directory ([#1012](https://www.github.com/googleapis/java-spanner/issues/1012)) ([865bf01](https://www.github.com/googleapis/java-spanner/commit/865bf011093341382a2c70f5530e9f7ef58b2d5a)) + + +### Dependencies + +* update dependency org.openjdk.jmh:jmh-core to v1.29 ([#1013](https://www.github.com/googleapis/java-spanner/issues/1013)) ([a71079f](https://www.github.com/googleapis/java-spanner/commit/a71079f5bb7f209f6afe6f5bc21a58d39e131086)) + + +### Documentation + +* improve error messages ([#1011](https://www.github.com/googleapis/java-spanner/issues/1011)) ([7dacfdc](https://www.github.com/googleapis/java-spanner/commit/7dacfdc7ca1219a0ddf5929d7b46860b46e3c300)) +* new libraries-bom ([#1025](https://www.github.com/googleapis/java-spanner/issues/1025)) ([3485252](https://www.github.com/googleapis/java-spanner/commit/3485252ce3d98a01fca1b6a9e1ca031283440b5e)) + +## [6.0.0](https://www.github.com/googleapis/java-spanner/compare/v5.2.0...v6.0.0) (2021-03-21) + + +### ⚠ BREAKING CHANGES + +* add closeAsync() method to Connection (#984) +* drops support of Java 7 (#946) +* customer-managed encryption keys for Spanner (#666) + +### Features + +* add closeAsync() method to Connection ([#984](https://www.github.com/googleapis/java-spanner/issues/984)) ([e7ec96e](https://www.github.com/googleapis/java-spanner/commit/e7ec96ec09a9d273d4f576356d3e4c6cbbb6de9e)) +* customer-managed encryption keys for Spanner ([#666](https://www.github.com/googleapis/java-spanner/issues/666)) ([8338116](https://www.github.com/googleapis/java-spanner/commit/8338116dffe847931cae1212333af04338ea1d45)) +* drops support of Java 7 ([#946](https://www.github.com/googleapis/java-spanner/issues/946)) ([7af1951](https://www.github.com/googleapis/java-spanner/commit/7af19514dfae5f87ba50572d8867568d2c09daab)) + +## [5.2.0](https://www.github.com/googleapis/java-spanner/compare/v5.1.0...v5.2.0) (2021-03-18) + + +### Features + +* add autoConfigEmulator connection option ([#931](https://www.github.com/googleapis/java-spanner/issues/931)) ([32fdd60](https://www.github.com/googleapis/java-spanner/commit/32fdd606f392bc97dab7f37b1c566b3954839f7e)) + + +### Bug Fixes + +* all throwables should be ignored in shutdown hook ([#950](https://www.github.com/googleapis/java-spanner/issues/950)) ([213dddc](https://www.github.com/googleapis/java-spanner/commit/213dddcb4f84e19be2f98115493208e3af819485)), closes [#949](https://www.github.com/googleapis/java-spanner/issues/949) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.1.0 ([#953](https://www.github.com/googleapis/java-spanner/issues/953)) ([f991c87](https://www.github.com/googleapis/java-spanner/commit/f991c875d7ec62d19d048576263c5714d4d48a3f)) +* update dependency com.google.cloud:google-cloud-trace to v1.3.0 ([#947](https://www.github.com/googleapis/java-spanner/issues/947)) ([c1d560b](https://www.github.com/googleapis/java-spanner/commit/c1d560ba4e799953aff6ba146f6f1b679a4b75b7)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.28 ([#924](https://www.github.com/googleapis/java-spanner/issues/924)) ([693fe5d](https://www.github.com/googleapis/java-spanner/commit/693fe5d4df3d279edb8f6f7f9879366980fd81d8)) + +## [5.1.0](https://www.github.com/googleapis/java-spanner/compare/v5.0.0...v5.1.0) (2021-03-10) + + +### Features + +* add client lib token for Liquibase ([#925](https://www.github.com/googleapis/java-spanner/issues/925)) ([0d93d92](https://www.github.com/googleapis/java-spanner/commit/0d93d92fcd7c8bb2ffd3198560c4be3e4afc4990)) +* adds samples for PITR ([#837](https://www.github.com/googleapis/java-spanner/issues/837)) ([55fa0cc](https://www.github.com/googleapis/java-spanner/commit/55fa0ccca4faf44da8f9a3553ab4b35574c14830)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.14 ([#919](https://www.github.com/googleapis/java-spanner/issues/919)) ([178500c](https://www.github.com/googleapis/java-spanner/commit/178500c7e48cbdeb45f657d9c413e9afdacefbab)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.1 ([#944](https://www.github.com/googleapis/java-spanner/issues/944)) ([b74b764](https://www.github.com/googleapis/java-spanner/commit/b74b7648343dc789b60fb2636615f288b6e6c854)) +* update dependency org.json:json to v20210307 ([#943](https://www.github.com/googleapis/java-spanner/issues/943)) ([4088981](https://www.github.com/googleapis/java-spanner/commit/4088981314097647e3ed79f2c748545cac6fc34e)) +* update dependency org.openjdk.jmh:jmh-core to v1.28 ([#923](https://www.github.com/googleapis/java-spanner/issues/923)) ([b4d6e5a](https://www.github.com/googleapis/java-spanner/commit/b4d6e5ac762393b70b684159d11a55edf8f2fba7)) + +## [5.0.0](https://www.github.com/googleapis/java-spanner/compare/v4.0.2...v5.0.0) (2021-02-26) + + +### ⚠ BREAKING CHANGES + +* add CommitStats to Connection API (#608) + +### Features + +* add CommitStats to Connection API ([#608](https://www.github.com/googleapis/java-spanner/issues/608)) ([b2b1191](https://www.github.com/googleapis/java-spanner/commit/b2b1191763cd47ca39849bdf93292ed5ef3e0c8a)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.0 ([#917](https://www.github.com/googleapis/java-spanner/issues/917)) ([aca9d45](https://www.github.com/googleapis/java-spanner/commit/aca9d45c4e86c45a75e6b5e0d3794e7ac97bdf1a)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.13 ([#918](https://www.github.com/googleapis/java-spanner/issues/918)) ([8843998](https://www.github.com/googleapis/java-spanner/commit/8843998a1c5ddb9228fa16162e0ea13f859f7f35)) + +### [4.0.2](https://www.github.com/googleapis/java-spanner/compare/v4.0.1...v4.0.2) (2021-02-23) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.13 ([#901](https://www.github.com/googleapis/java-spanner/issues/901)) ([10749c7](https://www.github.com/googleapis/java-spanner/commit/10749c7a074d33c853b0f11a0e6c6ee5f09e75c9)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.12 ([#896](https://www.github.com/googleapis/java-spanner/issues/896)) ([84ee6e0](https://www.github.com/googleapis/java-spanner/commit/84ee6e0d442a29893e1ac77fa7882ed0407c9a7d)) + +### [4.0.1](https://www.github.com/googleapis/java-spanner/compare/v4.0.0...v4.0.1) (2021-02-22) + + +### Bug Fixes + +* wrong use of getRetryDelayInMillis() / 1000 in documentation and retry loops ([#885](https://www.github.com/googleapis/java-spanner/issues/885)) ([a55d7ce](https://www.github.com/googleapis/java-spanner/commit/a55d7ce64fff434151c1c3af0796d290e9db7470)), closes [#874](https://www.github.com/googleapis/java-spanner/issues/874) + + +### Documentation + +* Add OpenCensus to OpenTelemetry shim to README ([#879](https://www.github.com/googleapis/java-spanner/issues/879)) ([b58d73d](https://www.github.com/googleapis/java-spanner/commit/b58d73ddb768c0d33d149ed8bc84f5af618514e1)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.19.0 ([#895](https://www.github.com/googleapis/java-spanner/issues/895)) ([e3e2c95](https://www.github.com/googleapis/java-spanner/commit/e3e2c95936f40a7954639a95c84cc9495e318e55)) + +## [4.0.0](https://www.github.com/googleapis/java-spanner/compare/v3.3.2...v4.0.0) (2021-02-17) + + +### ⚠ BREAKING CHANGES + +* Point In Time Recovery (PITR) (#452) +* add support for CommitStats (#544) + +### Features + +* add option for returning Spanner commit stats ([#817](https://www.github.com/googleapis/java-spanner/issues/817)) ([80d3585](https://www.github.com/googleapis/java-spanner/commit/80d3585870b81949ec641291e5a88fe391f78e27)) +* add support for CommitStats ([#544](https://www.github.com/googleapis/java-spanner/issues/544)) ([44aa384](https://www.github.com/googleapis/java-spanner/commit/44aa384429056dd6c6563351c43fe7dcac451008)) +* allow session pool settings in connection url ([#821](https://www.github.com/googleapis/java-spanner/issues/821)) ([e1e9152](https://www.github.com/googleapis/java-spanner/commit/e1e915289755e5f46ba07569d85afda5df5e3f0d)) +* generate sample code in the Java microgenerator ([#859](https://www.github.com/googleapis/java-spanner/issues/859)) ([7cdfb82](https://www.github.com/googleapis/java-spanner/commit/7cdfb82b40487600547d0bad92119508161ca689)) +* Point In Time Recovery (PITR) ([#452](https://www.github.com/googleapis/java-spanner/issues/452)) ([ab14a5e](https://www.github.com/googleapis/java-spanner/commit/ab14a5ec2dc2b7e2141305b5326f436eb6eee76f)) + + +### Bug Fixes + +* allows user-agent header with header provider ([#871](https://www.github.com/googleapis/java-spanner/issues/871)) ([3de7e2a](https://www.github.com/googleapis/java-spanner/commit/3de7e2a91349cac5d79a32d2cda7ca727140f0bf)) +* make compiled statements immutable ([#843](https://www.github.com/googleapis/java-spanner/issues/843)) ([118d1b3](https://www.github.com/googleapis/java-spanner/commit/118d1b31f5f7771023766fd72a8229db80f1f5a2)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.12 ([#854](https://www.github.com/googleapis/java-spanner/issues/854)) ([58cebd8](https://www.github.com/googleapis/java-spanner/commit/58cebd85a9d82bd1526b9eae98892181f1a022f1)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.11 ([#825](https://www.github.com/googleapis/java-spanner/issues/825)) ([49c8c5d](https://www.github.com/googleapis/java-spanner/commit/49c8c5d241803565fa9ff96ba55f3eb00ed5b85e)) + + +### Documentation + +* libraries-bom 16.4.0 ([#867](https://www.github.com/googleapis/java-spanner/issues/867)) ([5af3673](https://www.github.com/googleapis/java-spanner/commit/5af36739532037360dfd504a4a0988562550526c)) + +### [3.3.2](https://www.github.com/googleapis/java-spanner/compare/v3.3.1...v3.3.2) (2021-01-18) + + +### Bug Fixes + +* closes pool maintainer on invalidation ([#784](https://www.github.com/googleapis/java-spanner/issues/784)) ([d122ed9](https://www.github.com/googleapis/java-spanner/commit/d122ed9662c9f01efd7d2a9797b1252f0427089c)) +* UNAVAILABLE error on first query could cause transaction to get stuck ([#807](https://www.github.com/googleapis/java-spanner/issues/807)) ([c7dc6e6](https://www.github.com/googleapis/java-spanner/commit/c7dc6e6b11af76cb5db1f160c4466a5d75b524b2)), closes [#799](https://www.github.com/googleapis/java-spanner/issues/799) + + +### Dependencies + +* update opencensus.version to v0.28.3 ([#806](https://www.github.com/googleapis/java-spanner/issues/806)) ([77910a0](https://www.github.com/googleapis/java-spanner/commit/77910a04e0fa42c90064fd533b6c13fe0372fb1e)) + +### [3.3.1](https://www.github.com/googleapis/java-spanner/compare/v3.3.0...v3.3.1) (2021-01-14) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Bug Fixes + +* blanks span for session keepAlive traces ([#797](https://www.github.com/googleapis/java-spanner/issues/797)) ([1a86e4f](https://www.github.com/googleapis/java-spanner/commit/1a86e4fd5b6198c300c13eba4d3d9d91c12c43f7)) +* mark transaction as invalid if no tx is returned before RS is closed ([#791](https://www.github.com/googleapis/java-spanner/issues/791)) ([e02e5a7](https://www.github.com/googleapis/java-spanner/commit/e02e5a7d95c0e92d9f13640dd2afe5b899f4e56d)) +* remove time series before adding it ([#766](https://www.github.com/googleapis/java-spanner/issues/766)) ([90255ea](https://www.github.com/googleapis/java-spanner/commit/90255ea7a1cc70ba4f4ab48551c509f503981540)), closes [#202](https://www.github.com/googleapis/java-spanner/issues/202) +* safeguard against statements errors when requesting tx ([#800](https://www.github.com/googleapis/java-spanner/issues/800)) ([c4776e4](https://www.github.com/googleapis/java-spanner/commit/c4776e42ad4a2795b0bfc6e1a9fb10c40d64a809)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.1 ([#794](https://www.github.com/googleapis/java-spanner/issues/794)) ([f0beabb](https://www.github.com/googleapis/java-spanner/commit/f0beabb228a4f555e1bcb1817a14e8074a54ef8c)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.18.0 ([#796](https://www.github.com/googleapis/java-spanner/issues/796)) ([1a71e50](https://www.github.com/googleapis/java-spanner/commit/1a71e503c68eb10ca140fe93f281a0474ddf21d3)) + +## [3.3.0](https://www.github.com/googleapis/java-spanner/compare/v3.2.1...v3.3.0) (2021-01-07) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Features + +* attempt DirectPath by default ([#770](https://www.github.com/googleapis/java-spanner/issues/770)) ([dc02244](https://www.github.com/googleapis/java-spanner/commit/dc02244d5ad29715f0c5d4c0ba8070659744c512)) + + +### Bug Fixes + +* Set up DirectPath e2e tests correctly ([#780](https://www.github.com/googleapis/java-spanner/issues/780)) ([9b94c6e](https://www.github.com/googleapis/java-spanner/commit/9b94c6ef54776fdb8868acf04e371599b7500d57)) + +### [3.2.1](https://www.github.com/googleapis/java-spanner/compare/v3.2.0...v3.2.1) (2021-01-06) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Bug Fixes + +* grpc-alts is used not only in tests ([#761](https://www.github.com/googleapis/java-spanner/issues/761)) ([72d93d5](https://www.github.com/googleapis/java-spanner/commit/72d93d5aa9a301c64c9d572d10211882a359e414)) + + +### Dependencies + +* grpc-alts is only used for tests ([#757](https://www.github.com/googleapis/java-spanner/issues/757)) ([c8ef46f](https://www.github.com/googleapis/java-spanner/commit/c8ef46f2637b58cc71d023764cdc11a7414d855f)) +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.11 ([#754](https://www.github.com/googleapis/java-spanner/issues/754)) ([ee2de33](https://www.github.com/googleapis/java-spanner/commit/ee2de3356038cef429eb4d3fa67656e68994bc46)) + + +### Documentation + +* add sample for timeout for one RPC ([#707](https://www.github.com/googleapis/java-spanner/issues/707)) ([056f54f](https://www.github.com/googleapis/java-spanner/commit/056f54f3cc10d103151fccba569d46796a103591)) +* cleanup inner region tags ([#764](https://www.github.com/googleapis/java-spanner/issues/764)) ([90ad9d6](https://www.github.com/googleapis/java-spanner/commit/90ad9d614bc1950f46d148930e06bde93aeb2098)) +* documents resume on update database ddl ([#767](https://www.github.com/googleapis/java-spanner/issues/767)) ([aeb255d](https://www.github.com/googleapis/java-spanner/commit/aeb255d2e5998ebb6f3eb7f655f63c957d5d92bd)) + +## [3.2.0](https://www.github.com/googleapis/java-spanner/compare/v3.1.3...v3.2.0) (2020-12-17) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Features + +* include client version in user agent header ([#747](https://www.github.com/googleapis/java-spanner/issues/747)) ([fc63bc3](https://www.github.com/googleapis/java-spanner/commit/fc63bc3f1bd9cdd83156cc63548b544188de6592)) +* introduce TransactionOptions and UpdateOptions ([#716](https://www.github.com/googleapis/java-spanner/issues/716)) ([5c96fab](https://www.github.com/googleapis/java-spanner/commit/5c96fab6d1c19518d52d0a7f0d634f0526066f03)) + + +### Bug Fixes + +* reduce the probability of RESOURCE_EXHAUSTED errors during tests ([#734](https://www.github.com/googleapis/java-spanner/issues/734)) ([cd946d7](https://www.github.com/googleapis/java-spanner/commit/cd946d71501a2af7a2b3bb986ef75272c3ed92e1)), closes [#733](https://www.github.com/googleapis/java-spanner/issues/733) + + +### Documentation + +* homogenize region tags ([#752](https://www.github.com/googleapis/java-spanner/issues/752)) ([2b3775a](https://www.github.com/googleapis/java-spanner/commit/2b3775a02466176695d7b88312b17c1aeedfbc16)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.0 ([#751](https://www.github.com/googleapis/java-spanner/issues/751)) ([f52776f](https://www.github.com/googleapis/java-spanner/commit/f52776f3af1c9653bfdd38aa1dac1a0d1e727b7f)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.10 ([#759](https://www.github.com/googleapis/java-spanner/issues/759)) ([405c4cc](https://www.github.com/googleapis/java-spanner/commit/405c4cc1af42d4440157438986c8911695ee32d6)) + +### [3.1.3](https://www.github.com/googleapis/java-spanner/compare/v3.1.2...v3.1.3) (2020-12-14) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.1 ([09968d5](https://www.github.com/googleapis/java-spanner/commit/09968d5092268b6ac2083b6914185f5e73d23648)) + +### [3.1.2](https://www.github.com/googleapis/java-spanner/compare/v3.1.1...v3.1.2) (2020-12-14) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.9 ([#710](https://www.github.com/googleapis/java-spanner/issues/710)) ([37a636d](https://www.github.com/googleapis/java-spanner/commit/37a636d989d2783875065b89141e532064f2647b)) + +### [3.1.1](https://www.github.com/googleapis/java-spanner/compare/v3.1.0...v3.1.1) (2020-12-10) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Dependencies + +* update dependency com.google.cloud:google-cloud-trace to v1.2.8 ([#699](https://www.github.com/googleapis/java-spanner/issues/699)) ([e3289bd](https://www.github.com/googleapis/java-spanner/commit/e3289bdf1f5c723c88f4e719c4a7a15f5d131556)) + +## [3.1.0](https://www.github.com/googleapis/java-spanner/compare/v3.0.5...v3.1.0) (2020-12-10) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Features + +* allow lenient mode for connection properties ([#671](https://www.github.com/googleapis/java-spanner/issues/671)) ([f6a8ba6](https://www.github.com/googleapis/java-spanner/commit/f6a8ba6baff53ededf890e3f22a8e49402c98775)) +* retry admin request limit exceeded error ([#669](https://www.github.com/googleapis/java-spanner/issues/669)) ([3f9f74a](https://www.github.com/googleapis/java-spanner/commit/3f9f74aed52bce681b4bfd10d1006e5fa05b7cc9)), closes [#655](https://www.github.com/googleapis/java-spanner/issues/655) + + +### Bug Fixes + +* fixes changelog of upgrade 2.0.0 ([#672](https://www.github.com/googleapis/java-spanner/issues/672)) ([c035546](https://www.github.com/googleapis/java-spanner/commit/c0355462d839a1e38a4efec9e4019272a76d822f)) +* transaction retry could fail if tx contained failed statements ([#688](https://www.github.com/googleapis/java-spanner/issues/688)) ([f78c64e](https://www.github.com/googleapis/java-spanner/commit/f78c64e3e2bee6d6ed1f44a0b2e57249cba0e6d0)), closes [#685](https://www.github.com/googleapis/java-spanner/issues/685) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.0 ([#680](https://www.github.com/googleapis/java-spanner/issues/680)) ([81cba9a](https://www.github.com/googleapis/java-spanner/commit/81cba9ade891aa65176d4be137f902651499b05c)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.7 ([#646](https://www.github.com/googleapis/java-spanner/issues/646)) ([0e17be0](https://www.github.com/googleapis/java-spanner/commit/0e17be0f81483eba4570faf884388cb43a42d84d)) +* update dependency org.openjdk.jmh:jmh-core to v1.27 ([#691](https://www.github.com/googleapis/java-spanner/issues/691)) ([a2e82e4](https://www.github.com/googleapis/java-spanner/commit/a2e82e424802f1544443ee29588bd1fabe3f38c3)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.27 ([#692](https://www.github.com/googleapis/java-spanner/issues/692)) ([bca15c2](https://www.github.com/googleapis/java-spanner/commit/bca15c226a914c8728a6a52083dd1ff074cc97e8)) + +### [3.0.5](https://www.github.com/googleapis/java-spanner/compare/v3.0.4...v3.0.5) (2020-11-19) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Bug Fixes + +* delete stale sample databases ([#622](https://www.github.com/googleapis/java-spanner/issues/622)) ([7584baa](https://www.github.com/googleapis/java-spanner/commit/7584baa8b7051764f1055ddb1616069e7d591b64)) +* does not generate codeowners ([#631](https://www.github.com/googleapis/java-spanner/issues/631)) ([9e133a9](https://www.github.com/googleapis/java-spanner/commit/9e133a972f648ee804f324bbf55163849cb478b8)) +* query could hang transaction if ResultSet#next() is not called ([#643](https://www.github.com/googleapis/java-spanner/issues/643)) ([48f92e3](https://www.github.com/googleapis/java-spanner/commit/48f92e3d1b26644bde62a8d864cec96c3c71687d)), closes [#641](https://www.github.com/googleapis/java-spanner/issues/641) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.8 ([#644](https://www.github.com/googleapis/java-spanner/issues/644)) ([447a99b](https://www.github.com/googleapis/java-spanner/commit/447a99b9a6ccdfd3855505fca13e849fb9513943)) + +### [3.0.4](https://www.github.com/googleapis/java-spanner/compare/v3.0.3...v3.0.4) (2020-11-17) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Reverts + +* Revert "fix: skip failing backup tests for now" (#634) ([b22cd7d](https://www.github.com/googleapis/java-spanner/commit/b22cd7dfc377a0445534946af29500cee316e6b1)), closes [#634](https://www.github.com/googleapis/java-spanner/issues/634) + +### [3.0.3](https://www.github.com/googleapis/java-spanner/compare/v3.0.2...v3.0.3) (2020-11-16) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Dependencies + +* update dependency org.json:json to v20201115 ([#624](https://www.github.com/googleapis/java-spanner/issues/624)) ([60e31d1](https://www.github.com/googleapis/java-spanner/commit/60e31d1947b6930ec030e1f3170dfbde62833b96)) + +### [3.0.2](https://www.github.com/googleapis/java-spanner/compare/v3.0.1...v3.0.2) (2020-11-13) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Bug Fixes + +* adds api spanner team as samples code owners ([#610](https://www.github.com/googleapis/java-spanner/issues/610)) ([35cc56c](https://www.github.com/googleapis/java-spanner/commit/35cc56c375615b26f522b7342916fd30ce826c2d)) +* make enums in the Connection API public ([#579](https://www.github.com/googleapis/java-spanner/issues/579)) ([19b1629](https://www.github.com/googleapis/java-spanner/commit/19b1629450a8956b810e27e5d6ab8532dec75267)), closes [#253](https://www.github.com/googleapis/java-spanner/issues/253) +* session retry could cause infinite wait ([#616](https://www.github.com/googleapis/java-spanner/issues/616)) ([8a66d84](https://www.github.com/googleapis/java-spanner/commit/8a66d84edbdaeba6b021d962a9b1984a3d2f40df)), closes [#605](https://www.github.com/googleapis/java-spanner/issues/605) +* updates project / instance for samples tests ([#613](https://www.github.com/googleapis/java-spanner/issues/613)) ([2589e7d](https://www.github.com/googleapis/java-spanner/commit/2589e7d6f400a7b050c21f46a4ab1662baa1cdb7)) + + +### Documentation + +* add descriptions for connection URL properties ([#609](https://www.github.com/googleapis/java-spanner/issues/609)) ([34221d7](https://www.github.com/googleapis/java-spanner/commit/34221d7a889c131fb1f797a0f9434deee60d755b)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.7 ([#573](https://www.github.com/googleapis/java-spanner/issues/573)) ([5135e50](https://www.github.com/googleapis/java-spanner/commit/5135e50d21417ca9514b47bd1f7eaf3d2d1417ca)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.14.1 ([#567](https://www.github.com/googleapis/java-spanner/issues/567)) ([2e9c133](https://www.github.com/googleapis/java-spanner/commit/2e9c13346423a2e1e2798bec14a1dc8799203235)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.15.0 ([#614](https://www.github.com/googleapis/java-spanner/issues/614)) ([3fa7910](https://www.github.com/googleapis/java-spanner/commit/3fa7910c8e5089cff1c9ed645f160a9e0ddfc351)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.6 ([#574](https://www.github.com/googleapis/java-spanner/issues/574)) ([efabe0f](https://www.github.com/googleapis/java-spanner/commit/efabe0f44a5ec92ac07be3c3e964396b613099d1)) + +### [3.0.1](https://www.github.com/googleapis/java-spanner/compare/v3.0.0...v3.0.1) (2020-10-28) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### Bug Fixes + +* adds assembly descriptor to snippets samples ([#559](https://www.github.com/googleapis/java-spanner/issues/559)) ([d4ae85c](https://www.github.com/googleapis/java-spanner/commit/d4ae85c91c2bda3f46cab8c9f7a4033ddd639c94)) +* always delete all backups from an owned test instance ([#557](https://www.github.com/googleapis/java-spanner/issues/557)) ([ff571b0](https://www.github.com/googleapis/java-spanner/commit/ff571b01b9dffdda44a9bd322e04ff04b5b5c57a)), closes [#542](https://www.github.com/googleapis/java-spanner/issues/542) +* fixes the code of conduct document ([#541](https://www.github.com/googleapis/java-spanner/issues/541)) ([7b9d1db](https://www.github.com/googleapis/java-spanner/commit/7b9d1db28b7037d6b18df88f00b9213f2f6dab80)) +* SessionNotFound was not retried for AsyncTransactionManager ([#552](https://www.github.com/googleapis/java-spanner/issues/552)) ([5969f83](https://www.github.com/googleapis/java-spanner/commit/5969f8313a4df6ece63ee8f14df98cbc8511f026)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.13.0 ([#521](https://www.github.com/googleapis/java-spanner/issues/521)) ([0f4c017](https://www.github.com/googleapis/java-spanner/commit/0f4c017f112478ffc7dd15b0b234a9c48cd55a6e)) + +## [3.0.0](https://www.github.com/googleapis/java-spanner/compare/v2.0.2...v3.0.0) (2020-10-23) + +### ⚠ IMPORTANT: Known issue with this version of the client + +Since [v3.0.0](https://github.com/googleapis/java-spanner/releases/tag/v3.0.0), transactions can get stuck if the Spanner backend returns a retryable error when consuming the first record of a read / query in a transaction. + +A [fix](https://github.com/googleapis/java-spanner/pull/807) is submitted and available in version [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) + +**Please use [v3.3.2](https://github.com/googleapis/java-spanner/releases/tag/v3.3.2) instead of this version.** + +Apologies for the inconvenience. + +### ⚠ BREAKING CHANGES + +* initialize should be protected (#536) +* async connection API (#392) + +### Features + +* adds options to the write operations ([#531](https://www.github.com/googleapis/java-spanner/issues/531)) ([659719d](https://www.github.com/googleapis/java-spanner/commit/659719deb5a18a87859bc174f5bde1e1147834d8)) +* async connection API ([#392](https://www.github.com/googleapis/java-spanner/issues/392)) ([3dd0675](https://www.github.com/googleapis/java-spanner/commit/3dd0675d2d7882d40a6af1e12fda3b4617019870)), closes [#378](https://www.github.com/googleapis/java-spanner/issues/378) +* inline begin transaction ([#325](https://www.github.com/googleapis/java-spanner/issues/325)) ([d08d3de](https://www.github.com/googleapis/java-spanner/commit/d08d3debb6457548bb6b04335b7a2d2227369211)), closes [#515](https://www.github.com/googleapis/java-spanner/issues/515) + + +### Bug Fixes + +* AsyncTransactionManager did not propagate statement errors ([#516](https://www.github.com/googleapis/java-spanner/issues/516)) ([4b8b845](https://www.github.com/googleapis/java-spanner/commit/4b8b8452589d63f6768b971a880a19bde80a9671)), closes [#514](https://www.github.com/googleapis/java-spanner/issues/514) +* AsyncTransactionManager should rollback on close ([#505](https://www.github.com/googleapis/java-spanner/issues/505)) ([c580df8](https://www.github.com/googleapis/java-spanner/commit/c580df8e1175bde293890c2a68e8816951c068d3)), closes [#504](https://www.github.com/googleapis/java-spanner/issues/504) +* close executor when closing pool ([#501](https://www.github.com/googleapis/java-spanner/issues/501)) ([2086746](https://www.github.com/googleapis/java-spanner/commit/208674632b20b37f51b828c1c4cc76c91154952b)) +* fixes javadocs for Key ([#532](https://www.github.com/googleapis/java-spanner/issues/532)) ([768c19d](https://www.github.com/googleapis/java-spanner/commit/768c19dc1b9985f7823ec1e4ca92491936062f3b)) +* fixes sample tests ([ed0665c](https://www.github.com/googleapis/java-spanner/commit/ed0665c71abbce57a28cb79531783145eccab1fb)) +* ignores failing backup operations ([2ad0b7f](https://www.github.com/googleapis/java-spanner/commit/2ad0b7fc6d1369795702484181ee11ecf59a1f8b)) +* increase visibility of #get() ([#486](https://www.github.com/googleapis/java-spanner/issues/486)) ([fa6d964](https://www.github.com/googleapis/java-spanner/commit/fa6d9641b7b2a5bb1d00de6b99b0f8bc157245d6)) +* initialize should be protected ([#536](https://www.github.com/googleapis/java-spanner/issues/536)) ([5c4c8c5](https://www.github.com/googleapis/java-spanner/commit/5c4c8c58674490ba524b678b409b8b19184af02f)) +* remove dependency on commons-lang ([#494](https://www.github.com/googleapis/java-spanner/issues/494)) ([c99294b](https://www.github.com/googleapis/java-spanner/commit/c99294beb43ce1bd67cc3d12e4104641efab6710)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-monitoring to v2 ([#498](https://www.github.com/googleapis/java-spanner/issues/498)) ([3ab7348](https://www.github.com/googleapis/java-spanner/commit/3ab7348781e56384921d8287a5b5c0725dfed221)) +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.5 ([#525](https://www.github.com/googleapis/java-spanner/issues/525)) ([fb874ec](https://www.github.com/googleapis/java-spanner/commit/fb874ec2e1738d569d585d30825a6e9d3de96c66)) +* update dependency com.google.cloud:google-cloud-monitoring to v2.0.6 ([#540](https://www.github.com/googleapis/java-spanner/issues/540)) ([ce3bed6](https://www.github.com/googleapis/java-spanner/commit/ce3bed6f5359224c37502331a9f776e29632d3a5)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.2 ([#500](https://www.github.com/googleapis/java-spanner/issues/500)) ([eb59929](https://www.github.com/googleapis/java-spanner/commit/eb5992949de326326a6bb02ec75b4a2a65a37b84)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.3 ([#496](https://www.github.com/googleapis/java-spanner/issues/496)) ([0595a80](https://www.github.com/googleapis/java-spanner/commit/0595a80d5a6bb09e62ce1b6d101a3a039896c7af)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.4 ([#526](https://www.github.com/googleapis/java-spanner/issues/526)) ([1020989](https://www.github.com/googleapis/java-spanner/commit/1020989e1ec1ad7f5185579da58d7a839167f05a)) +* update dependency com.google.cloud:google-cloud-trace to v1.2.5 ([#539](https://www.github.com/googleapis/java-spanner/issues/539)) ([eddd6ad](https://www.github.com/googleapis/java-spanner/commit/eddd6ad4e5093ee21290b85f15fa432d071bae59)) +* update dependency org.openjdk.jmh:jmh-core to v1.26 ([#506](https://www.github.com/googleapis/java-spanner/issues/506)) ([0f13c4c](https://www.github.com/googleapis/java-spanner/commit/0f13c4c5db37a736e391c002ed2456d78d04a090)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.26 ([#507](https://www.github.com/googleapis/java-spanner/issues/507)) ([600f397](https://www.github.com/googleapis/java-spanner/commit/600f397a37f1808eb387fa3c31be0be5bb076c77)) +* update opencensus.version to v0.27.1 ([#497](https://www.github.com/googleapis/java-spanner/issues/497)) ([62fa39a](https://www.github.com/googleapis/java-spanner/commit/62fa39a2fbac6aa667073f16898e6861f0f5ec21)) +* update opencensus.version to v0.28.1 ([#533](https://www.github.com/googleapis/java-spanner/issues/533)) ([777f5fc](https://www.github.com/googleapis/java-spanner/commit/777f5fc486de7a54801c9f3f82adca561388ebfe)) +* update opencensus.version to v0.28.2 ([#538](https://www.github.com/googleapis/java-spanner/issues/538)) ([e1843ef](https://www.github.com/googleapis/java-spanner/commit/e1843ef38580fecb1f017330f3fa1447028607c7)) + +### [2.0.2](https://www.github.com/googleapis/java-spanner/compare/v2.0.1...v2.0.2) (2020-10-02) + + +### Bug Fixes + +* improve numeric range checks ([#424](https://www.github.com/googleapis/java-spanner/issues/424)) ([9f26785](https://www.github.com/googleapis/java-spanner/commit/9f2678568be77e82c14632b1c7ffcaafb71e7679)) +* ResultSet#close() should not throw exceptions from session creation ([#487](https://www.github.com/googleapis/java-spanner/issues/487)) ([60fb986](https://www.github.com/googleapis/java-spanner/commit/60fb986f8b758a65e20c5315faf85fc0a935d0cc)) +* skip failing backup tests for now ([#463](https://www.github.com/googleapis/java-spanner/issues/463)) ([f037f2d](https://www.github.com/googleapis/java-spanner/commit/f037f2d28096cd173ba338a966fd16babe8c697e)) +* use credentials key in pool ([#430](https://www.github.com/googleapis/java-spanner/issues/430)) ([28103fb](https://www.github.com/googleapis/java-spanner/commit/28103fb2d6e293d20399ecdfd680be67d9d62a1c)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.0 ([#453](https://www.github.com/googleapis/java-spanner/issues/453)) ([e05ee0e](https://www.github.com/googleapis/java-spanner/commit/e05ee0eaa16984393b60fc47f94412e560c36ff1)) + +### [2.0.1](https://www.github.com/googleapis/java-spanner/compare/v2.0.0...v2.0.1) (2020-09-18) + + +### Bug Fixes + +* do not close delegate rs in callback runnable ([#425](https://www.github.com/googleapis/java-spanner/issues/425)) ([dce3ee7](https://www.github.com/googleapis/java-spanner/commit/dce3ee79664cc528415db08b3268d719ea720ded)) +* re-adds method used in internal testing ([#438](https://www.github.com/googleapis/java-spanner/issues/438)) ([c36e41b](https://www.github.com/googleapis/java-spanner/commit/c36e41bfaaf8026d2f6601ed12bfaa0d7a4ea802)) + +## [2.0.0](https://www.github.com/googleapis/java-spanner/compare/v1.61.0...v2.0.0) (2020-09-16) + + +### ⚠ BREAKING CHANGES + +* Remove Guava ImmutableList from API surface ([#411](https://www.github.com/googleapis/java-spanner/issues/411)) ([b35304e](https://www.github.com/googleapis/java-spanner/commit/b35304ede5c980c3c042b89247058cc5a4ab1488)) + +### Features + +* add lazy initializer ([#423](https://www.github.com/googleapis/java-spanner/issues/423)) ([e8522b9](https://www.github.com/googleapis/java-spanner/commit/e8522b9955c4a19fa7d6297fd463e9d2521dff92)) + + +### Bug Fixes + +* fix aborted handling of batchUpdateAsync ([#421](https://www.github.com/googleapis/java-spanner/issues/421)) ([6154008](https://www.github.com/googleapis/java-spanner/commit/61540085c971d7885e4938b486e051a1ed9cf35f)) +* uses old version of gax-grpc method ([#426](https://www.github.com/googleapis/java-spanner/issues/426)) ([fe6dc79](https://www.github.com/googleapis/java-spanner/commit/fe6dc796db6aa4c28832457ca54e6952a4b51c7e)) + + +### Miscellaneous Chores + +* ensure next release is major ([#428](https://www.github.com/googleapis/java-spanner/issues/428)) ([bdae120](https://www.github.com/googleapis/java-spanner/commit/bdae120fff807df760e7be2b34a559dc995adf7e)) + +## [1.61.0](https://www.github.com/googleapis/java-spanner/compare/v1.60.0...v1.61.0) (2020-09-09) + + +### Features + +* Add experimental DirectPath support ([#396](https://www.github.com/googleapis/java-spanner/issues/396)) ([46264d1](https://www.github.com/googleapis/java-spanner/commit/46264d11529accde7b520638264732937b2feb03)) +* support setting timeout per RPC ([#379](https://www.github.com/googleapis/java-spanner/issues/379)) ([5d115d4](https://www.github.com/googleapis/java-spanner/commit/5d115d49b988b3fc1c59ae41ee53d7c5a83b4d11)), closes [#378](https://www.github.com/googleapis/java-spanner/issues/378) + + +### Bug Fixes + +* iterate over async result set in sync ([#416](https://www.github.com/googleapis/java-spanner/issues/416)) ([45d8419](https://www.github.com/googleapis/java-spanner/commit/45d8419250c904b2f785d6cc5abacf098e5781de)) +* remove potential infinite loop in administrative requests ([#398](https://www.github.com/googleapis/java-spanner/issues/398)) ([81d2c76](https://www.github.com/googleapis/java-spanner/commit/81d2c7634edd30efd428846fdbc468aee5406ed5)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.9.0 ([#409](https://www.github.com/googleapis/java-spanner/issues/409)) ([ae43165](https://www.github.com/googleapis/java-spanner/commit/ae43165ba736e17b780ce128d97b9757039275c2)) +* update dependency org.openjdk.jmh:jmh-core to v1.25.1 ([#399](https://www.github.com/googleapis/java-spanner/issues/399)) ([52fc363](https://www.github.com/googleapis/java-spanner/commit/52fc3638854116ab87b7e6bdd719134d3108229d)) +* update dependency org.openjdk.jmh:jmh-core to v1.25.2 ([#412](https://www.github.com/googleapis/java-spanner/issues/412)) ([86d18cd](https://www.github.com/googleapis/java-spanner/commit/86d18cdcc2d3aa0771e3f331ebb50591ce811113)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.25.2 ([#400](https://www.github.com/googleapis/java-spanner/issues/400)) ([8a40a96](https://www.github.com/googleapis/java-spanner/commit/8a40a96123831ce992d18ecff6e699dbb7ffc82c)) + + +### Documentation + +* updates bom and spanner version in readme ([#415](https://www.github.com/googleapis/java-spanner/issues/415)) ([def7fdf](https://www.github.com/googleapis/java-spanner/commit/def7fdf9b11fc0f8e7bacd6be41875b6542f64d5)) + +## [1.60.0](https://www.github.com/googleapis/java-spanner/compare/v1.59.0...v1.60.0) (2020-08-18) + + +### Features + +* adds clirr check on pre-commit hook ([#388](https://www.github.com/googleapis/java-spanner/issues/388)) ([bd5c93f](https://www.github.com/googleapis/java-spanner/commit/bd5c93f045e06372b2235f3d350bade93bff2c24)) +* include SQL statement in error message ([#355](https://www.github.com/googleapis/java-spanner/issues/355)) ([cc5ac48](https://www.github.com/googleapis/java-spanner/commit/cc5ac48232b6e4550b98d213c5877d6ec37b293f)) + + +### Bug Fixes + +* enables emulator tests ([#380](https://www.github.com/googleapis/java-spanner/issues/380)) ([f61c6d0](https://www.github.com/googleapis/java-spanner/commit/f61c6d0d332f15826499996a292acc7cbab267a7)) +* remove custom timeout and retry settings ([#365](https://www.github.com/googleapis/java-spanner/issues/365)) ([f6afd21](https://www.github.com/googleapis/java-spanner/commit/f6afd213430d3f06d9a72c64a5c37172840fed0e)) +* remove unused kokoro files ([#367](https://www.github.com/googleapis/java-spanner/issues/367)) ([6125c7d](https://www.github.com/googleapis/java-spanner/commit/6125c7d221c77f4c42497b72107627ee09312813)) +* retry pdml transaction on EOS internal error ([#360](https://www.github.com/googleapis/java-spanner/issues/360)) ([a53d736](https://www.github.com/googleapis/java-spanner/commit/a53d7369bb2a8640ab42e409632b352decbdbf5e)) +* sets the project for the integration tests ([#386](https://www.github.com/googleapis/java-spanner/issues/386)) ([c8fa458](https://www.github.com/googleapis/java-spanner/commit/c8fa458f5369a09c780ee38ecc09bd2562e8f987)) + + +### Dependencies + +* stop auto updates of commons-lang3 ([#362](https://www.github.com/googleapis/java-spanner/issues/362)) ([8f07ed6](https://www.github.com/googleapis/java-spanner/commit/8f07ed6b44f9c70f56b9ee2e4505c40385337ca7)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.6 ([#374](https://www.github.com/googleapis/java-spanner/issues/374)) ([6f47b8a](https://www.github.com/googleapis/java-spanner/commit/6f47b8a759643f772230df0c2e153338d44f70ce)) +* update dependency org.openjdk.jmh:jmh-core to v1.24 ([#375](https://www.github.com/googleapis/java-spanner/issues/375)) ([94f568c](https://www.github.com/googleapis/java-spanner/commit/94f568cf731ba22cac7f0d898d7776a3cc2c178f)) +* update dependency org.openjdk.jmh:jmh-core to v1.25 ([#382](https://www.github.com/googleapis/java-spanner/issues/382)) ([ec7888e](https://www.github.com/googleapis/java-spanner/commit/ec7888e1d62cf800bf6ad166d242e89443ddc7aa)) +* update dependency org.openjdk.jmh:jmh-generator-annprocess to v1.25 ([#376](https://www.github.com/googleapis/java-spanner/issues/376)) ([8ffdc48](https://www.github.com/googleapis/java-spanner/commit/8ffdc481e15901f78eac592bd8d4bef33ac3378a)) + +## [1.59.0](https://www.github.com/googleapis/java-spanner/compare/v1.58.0...v1.59.0) (2020-07-16) + + +### Features + +* add support for NUMERIC data type ([#193](https://www.github.com/googleapis/java-spanner/issues/193)) ([b38a91d](https://www.github.com/googleapis/java-spanner/commit/b38a91d8daac264b9dea327d6b31430d9599bd78)) +* spanner NUMERIC type ([#349](https://www.github.com/googleapis/java-spanner/issues/349)) ([78c3192](https://www.github.com/googleapis/java-spanner/commit/78c3192266c474fc43277a8bf3f15caa968a0100)) + + +### Bug Fixes + +* check if emulator is running if env var is set ([#340](https://www.github.com/googleapis/java-spanner/issues/340)) ([597f501](https://www.github.com/googleapis/java-spanner/commit/597f501803e6d58717a6e3770e6fd3f34454e9a5)) +* fix potential unnecessary transaction retry ([#337](https://www.github.com/googleapis/java-spanner/issues/337)) ([1a4f4fd](https://www.github.com/googleapis/java-spanner/commit/1a4f4fd675a1580c87ad1d53c650a20bd2ff4811)), closes [#327](https://www.github.com/googleapis/java-spanner/issues/327) +* respect PDML timeout when using streaming RPC ([#338](https://www.github.com/googleapis/java-spanner/issues/338)) ([d67f108](https://www.github.com/googleapis/java-spanner/commit/d67f108e86925c1296e695db8e78fa82e11fa4fa)) +* runs sample tests in java 8 and java 11 ([#345](https://www.github.com/googleapis/java-spanner/issues/345)) ([b547e31](https://www.github.com/googleapis/java-spanner/commit/b547e31d095be3cf1646e0e9c07bfc467ecc3c22)) +* set gRPC keep-alive to 120 seconds ([#339](https://www.github.com/googleapis/java-spanner/issues/339)) ([26be103](https://www.github.com/googleapis/java-spanner/commit/26be103da1117c4940550fad1672c66e6edfbdb3)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.3 ([#334](https://www.github.com/googleapis/java-spanner/issues/334)) ([45acd89](https://www.github.com/googleapis/java-spanner/commit/45acd8960c961d48e91a7b1546efa64d9e9ae576)) +* update shared config to 0.9.2 ([#328](https://www.github.com/googleapis/java-spanner/issues/328)) ([75df62c](https://www.github.com/googleapis/java-spanner/commit/75df62c0176137fda1d0a9076b83be06f11228ce)) + +## [1.58.0](https://www.github.com/googleapis/java-spanner/compare/v1.57.0...v1.58.0) (2020-07-07) + + +### Features + +* add async api ([#81](https://www.github.com/googleapis/java-spanner/issues/81)) ([462839b](https://www.github.com/googleapis/java-spanner/commit/462839b625e58e235581b8ba10b398e1d222eaaf)) +* support setting compression option ([#192](https://www.github.com/googleapis/java-spanner/issues/192)) ([965e95e](https://www.github.com/googleapis/java-spanner/commit/965e95e70ccd9c62abd6513b0011aab136e48e26)) + + +### Bug Fixes + +* set default values for streaming retry ([#316](https://www.github.com/googleapis/java-spanner/issues/316)) ([543373b](https://www.github.com/googleapis/java-spanner/commit/543373b22336be72b10026fda9f0b55939ab94b4)) + + +### Performance Improvements + +* use streaming RPC for PDML ([#287](https://www.github.com/googleapis/java-spanner/issues/287)) ([df47c13](https://www.github.com/googleapis/java-spanner/commit/df47c13a4c00bdf5e6eafa01bbb64c12a96d7fb8)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.2 ([#315](https://www.github.com/googleapis/java-spanner/issues/315)) ([3d6fb9f](https://www.github.com/googleapis/java-spanner/commit/3d6fb9fd7dc6b2b5b2ff9935228701ac795c9167)) + +## [1.57.0](https://www.github.com/googleapis/java-spanner/compare/v1.56.0...v1.57.0) (2020-06-29) + + +### Features + +* **deps:** adopt flatten plugin and google-cloud-shared-dependencies and update ExecutorProvider ([#302](https://www.github.com/googleapis/java-spanner/issues/302)) ([5aef6c3](https://www.github.com/googleapis/java-spanner/commit/5aef6c3f6d3e9564cb8728ad51718feb6b64475a)) + +## [1.56.0](https://www.github.com/googleapis/java-spanner/compare/v1.55.1...v1.56.0) (2020-06-17) + + +### Features + +* add num_sessions_in_pool metric ([#128](https://www.github.com/googleapis/java-spanner/issues/128)) ([3a7a8ad](https://www.github.com/googleapis/java-spanner/commit/3a7a8ad79f1de3371d32a1298406990cb7bbf5be)) + + +### Bug Fixes + +* backend now supports optimizer version for DML ([#252](https://www.github.com/googleapis/java-spanner/issues/252)) ([24b986b](https://www.github.com/googleapis/java-spanner/commit/24b986b03a785f4c5ee978dcdc57f51687701e52)) +* include an explicit version for javax-annotations-api ([#261](https://www.github.com/googleapis/java-spanner/issues/261)) ([e256d22](https://www.github.com/googleapis/java-spanner/commit/e256d22f33d5f091ea90ed81c0b0f8600beae96c)) +* inconsistent json and yaml spanner configs ([#238](https://www.github.com/googleapis/java-spanner/issues/238)) ([627fdc1](https://www.github.com/googleapis/java-spanner/commit/627fdc13d64ab7b51934d4866ff753f7b08dabe4)) +* test allowed a too old staleness ([#214](https://www.github.com/googleapis/java-spanner/issues/214)) ([f4fa6bf](https://www.github.com/googleapis/java-spanner/commit/f4fa6bfca4bb821cbda426c4cb7bf32f091a2913)) +* use millis to prevent rounding errors ([#260](https://www.github.com/googleapis/java-spanner/issues/260)) ([22ed458](https://www.github.com/googleapis/java-spanner/commit/22ed45816098f5e50104935b66bc55297ea7f7b7)) + + +### Dependencies + +* include test-jar in bom ([#253](https://www.github.com/googleapis/java-spanner/issues/253)) ([4e86a37](https://www.github.com/googleapis/java-spanner/commit/4e86a374aacbcfc34d64809b7d9606f21176f6b9)) +* update dependency org.json:json to v20200518 ([#239](https://www.github.com/googleapis/java-spanner/issues/239)) ([e3d7921](https://www.github.com/googleapis/java-spanner/commit/e3d79214ac4d6e72992acdddb7ddeb2148b1ae15)) + +### [1.55.1](https://www.github.com/googleapis/java-spanner/compare/v1.55.0...v1.55.1) (2020-05-21) + + +### Bug Fixes + +* PDML retry settings were not applied for aborted tx ([#232](https://www.github.com/googleapis/java-spanner/issues/232)) ([308a465](https://www.github.com/googleapis/java-spanner/commit/308a465c768ba6e641c95d8c6efd214637266f50)), closes [#199](https://www.github.com/googleapis/java-spanner/issues/199) +* remove the need for any env var in all tests ([#235](https://www.github.com/googleapis/java-spanner/issues/235)) ([374fb40](https://www.github.com/googleapis/java-spanner/commit/374fb403306612330db58dfa5549205394a08e67)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.4.0 ([#224](https://www.github.com/googleapis/java-spanner/issues/224)) ([2cf04aa](https://www.github.com/googleapis/java-spanner/commit/2cf04aad7edc68baf5c296bda11f66c140abf669)) + +## [1.55.0](https://www.github.com/googleapis/java-spanner/compare/v1.54.0...v1.55.0) (2020-05-19) + + +### Features + +* mark when a Spanner client is closed ([#198](https://www.github.com/googleapis/java-spanner/issues/198)) ([50cb174](https://www.github.com/googleapis/java-spanner/commit/50cb1744e7ede611758d3ff63b3df77a1d3682eb)) + + +### Bug Fixes + +* make it possible to override backups methods ([#195](https://www.github.com/googleapis/java-spanner/issues/195)) ([2d19c25](https://www.github.com/googleapis/java-spanner/commit/2d19c25ba32847d116194565e67e1b1276fcb9f8)) +* Partitioned DML timeout was not always respected ([#203](https://www.github.com/googleapis/java-spanner/issues/203)) ([13cb37e](https://www.github.com/googleapis/java-spanner/commit/13cb37e55ddfd1ff4ec22b1dcdc20c4832eee444)), closes [#199](https://www.github.com/googleapis/java-spanner/issues/199) +* partitionedDml stub was not closed ([#213](https://www.github.com/googleapis/java-spanner/issues/213)) ([a2d9a33](https://www.github.com/googleapis/java-spanner/commit/a2d9a33fa31f7467fc2bfbef5a29c4b3f5aea7c8)) +* reuse clientId for invalidated databases ([#206](https://www.github.com/googleapis/java-spanner/issues/206)) ([7b4490d](https://www.github.com/googleapis/java-spanner/commit/7b4490dfb61fbc81b5bd6be6c9a663b36b5ce402)) +* use nanos to prevent truncation errors ([#204](https://www.github.com/googleapis/java-spanner/issues/204)) ([a608460](https://www.github.com/googleapis/java-spanner/commit/a60846043dc0ca47e1970d8ab99380b6d725c7a9)), closes [#200](https://www.github.com/googleapis/java-spanner/issues/200) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.3.1 ([#190](https://www.github.com/googleapis/java-spanner/issues/190)) ([ad41a0d](https://www.github.com/googleapis/java-spanner/commit/ad41a0d4b0cc6a2c0ae0611c767652f64cfb2fb7)) + +## [1.54.0](https://www.github.com/googleapis/java-spanner/compare/v1.53.0...v1.54.0) (2020-05-05) + + +### Features + +* **deps:** import shared-dependencies bom and use maven-flatten-plugin ([#172](https://www.github.com/googleapis/java-spanner/issues/172)) ([060a81a](https://www.github.com/googleapis/java-spanner/commit/060a81ac938ef644aefd8c90d026018107742141)) + + +### Bug Fixes + +* create filter in correct order ([#180](https://www.github.com/googleapis/java-spanner/issues/180)) ([d80428a](https://www.github.com/googleapis/java-spanner/commit/d80428a5b0291516b2298e2309de09b23e4c387d)) +* remove error message checking ([#183](https://www.github.com/googleapis/java-spanner/issues/183)) ([b477322](https://www.github.com/googleapis/java-spanner/commit/b4773223dbeb682c2c8fa9c0a9dea31001dd94d6)), closes [#175](https://www.github.com/googleapis/java-spanner/issues/175) +* set resource type for database parameter of Backup ([#174](https://www.github.com/googleapis/java-spanner/issues/174)) ([bb4d7cf](https://www.github.com/googleapis/java-spanner/commit/bb4d7cf4a363cf4980e22be97d2b5e4267368a7d)) +* stop preparing session on most errors ([#181](https://www.github.com/googleapis/java-spanner/issues/181)) ([d0e3d41](https://www.github.com/googleapis/java-spanner/commit/d0e3d41131a7480baee787654b7b9591efae5069)), closes [#177](https://www.github.com/googleapis/java-spanner/issues/177) + +## [1.53.0](https://www.github.com/googleapis/java-spanner/compare/v1.52.0...v1.53.0) (2020-04-22) + + +### Features + +* optimize maintainer to let sessions be GC'ed instead of deleted ([#135](https://www.github.com/googleapis/java-spanner/issues/135)) ([d65747c](https://www.github.com/googleapis/java-spanner/commit/d65747cbc704508f6f1bcef6eea53aa411d42ee2)) + + +### Bug Fixes + +* assign unique id's per test case ([#129](https://www.github.com/googleapis/java-spanner/issues/129)) ([a553b6d](https://www.github.com/googleapis/java-spanner/commit/a553b6d48c4f5ee2d0583e5b825d73a85f06216e)) +* check for not null input for Id classes ([#159](https://www.github.com/googleapis/java-spanner/issues/159)) ([ecf5826](https://www.github.com/googleapis/java-spanner/commit/ecf582670818f32e85f534ec400d0b8d31cf9ca6)), closes [#145](https://www.github.com/googleapis/java-spanner/issues/145) +* clean up test instance if creation failed ([#162](https://www.github.com/googleapis/java-spanner/issues/162)) ([ff571e1](https://www.github.com/googleapis/java-spanner/commit/ff571e16a45fbce692d9bb172749ff15fafe7a9c)) +* fix flaky test and remove warnings ([#153](https://www.github.com/googleapis/java-spanner/issues/153)) ([d534e35](https://www.github.com/googleapis/java-spanner/commit/d534e350346b0c9ab8057ede36bc3aac473c0b06)), closes [#146](https://www.github.com/googleapis/java-spanner/issues/146) +* increase test timeout and remove warnings ([#160](https://www.github.com/googleapis/java-spanner/issues/160)) ([63a6bd8](https://www.github.com/googleapis/java-spanner/commit/63a6bd8be08a56d002f58bc2cdb2856ad0dc5fa3)), closes [#158](https://www.github.com/googleapis/java-spanner/issues/158) +* retry non-idempotent long-running RPCs ([#141](https://www.github.com/googleapis/java-spanner/issues/141)) ([4669c02](https://www.github.com/googleapis/java-spanner/commit/4669c02a24e0f7b1d53c9edf5ab7b146b4116960)) +* retry restore if blocked by pending restore ([#119](https://www.github.com/googleapis/java-spanner/issues/119)) ([220653d](https://www.github.com/googleapis/java-spanner/commit/220653d8e25c518d0df447bf777a7fcbf04a01ca)), closes [#118](https://www.github.com/googleapis/java-spanner/issues/118) +* StatementParser did not accept multiple query hints ([#170](https://www.github.com/googleapis/java-spanner/issues/170)) ([ef41a6e](https://www.github.com/googleapis/java-spanner/commit/ef41a6e503f218c00c16914aa9c1433d9b26db13)), closes [#163](https://www.github.com/googleapis/java-spanner/issues/163) +* wait for initialization to finish before test ([#161](https://www.github.com/googleapis/java-spanner/issues/161)) ([fe434ff](https://www.github.com/googleapis/java-spanner/commit/fe434ff7068b4b618e70379c224e1c5ab88f6ba1)), closes [#146](https://www.github.com/googleapis/java-spanner/issues/146) + + +### Performance Improvements + +* increase sessions in the pool in batches ([#134](https://www.github.com/googleapis/java-spanner/issues/134)) ([9e5a1cd](https://www.github.com/googleapis/java-spanner/commit/9e5a1cdaacf71147b67681861f063c3276705f44)) +* prepare sessions with r/w tx in-process ([#152](https://www.github.com/googleapis/java-spanner/issues/152)) ([2db27ce](https://www.github.com/googleapis/java-spanner/commit/2db27ce048efafaa3c28b097de33518747011465)), closes [#151](https://www.github.com/googleapis/java-spanner/issues/151) + + +### Dependencies + +* update core dependencies ([#109](https://www.github.com/googleapis/java-spanner/issues/109)) ([5753f1f](https://www.github.com/googleapis/java-spanner/commit/5753f1f4fed83df87262404f7a7ba7eedcd366cb)) +* update core dependencies ([#132](https://www.github.com/googleapis/java-spanner/issues/132)) ([77c1558](https://www.github.com/googleapis/java-spanner/commit/77c1558652ee00e529674ac3a2dcf3210ef049fa)) +* update dependency com.google.api:api-common to v1.9.0 ([#127](https://www.github.com/googleapis/java-spanner/issues/127)) ([b2c744f](https://www.github.com/googleapis/java-spanner/commit/b2c744f01a4d5a8981df5ff900f3536c83265a61)) +* update dependency com.google.guava:guava-bom to v29 ([#147](https://www.github.com/googleapis/java-spanner/issues/147)) ([3fe3ae0](https://www.github.com/googleapis/java-spanner/commit/3fe3ae02376af552564c93c766f562d6454b7ac1)) +* update dependency io.grpc:grpc-bom to v1.29.0 ([#164](https://www.github.com/googleapis/java-spanner/issues/164)) ([2d2ce5c](https://www.github.com/googleapis/java-spanner/commit/2d2ce5ce4dc8f410ec671e542e144d47f39ab40b)) +* update dependency org.threeten:threetenbp to v1.4.3 ([#120](https://www.github.com/googleapis/java-spanner/issues/120)) ([49d1abc](https://www.github.com/googleapis/java-spanner/commit/49d1abcb6c9c48762dcf0fe1466ab107bf67146b)) + +## [1.52.0](https://www.github.com/googleapis/java-spanner/compare/v1.51.0...v1.52.0) (2020-03-20) + + +### Features + +* add backup support ([#100](https://www.github.com/googleapis/java-spanner/issues/100)) ([ed3874a](https://www.github.com/googleapis/java-spanner/commit/ed3874afcf55fe7381354e03dab3a3b97d7eb520)) +* add Backups protos and APIs ([#97](https://www.github.com/googleapis/java-spanner/issues/97)) ([5643c22](https://www.github.com/googleapis/java-spanner/commit/5643c22a4531dac75b9fac5b128eb714a27920a0)) + + +### Bug Fixes + +* add client id to metrics to avoid collisions ([#117](https://www.github.com/googleapis/java-spanner/issues/117)) ([338e136](https://www.github.com/googleapis/java-spanner/commit/338e136508edc6745f9371e8a5d66638021bc8d7)), closes [#106](https://www.github.com/googleapis/java-spanner/issues/106) +* ignore added interface methods for generated code ([#101](https://www.github.com/googleapis/java-spanner/issues/101)) ([402cfa1](https://www.github.com/googleapis/java-spanner/commit/402cfa1e1e2994f7bb1b783cf823021b54fb175e)), closes [#99](https://www.github.com/googleapis/java-spanner/issues/99) +* use grpc 1.27.2 to prevent version conflicts ([#105](https://www.github.com/googleapis/java-spanner/issues/105)) ([37b7c88](https://www.github.com/googleapis/java-spanner/commit/37b7c8859e5f35d85bd14ef72662614fd185c020)) + + +### Dependencies + +* update core dependencies ([#94](https://www.github.com/googleapis/java-spanner/issues/94)) ([f3ca4c9](https://www.github.com/googleapis/java-spanner/commit/f3ca4c99c3d54f64c5eda11e4a4c076140fdbc6a)) +* update opencensus.version to v0.26.0 ([#116](https://www.github.com/googleapis/java-spanner/issues/116)) ([1b8db0b](https://www.github.com/googleapis/java-spanner/commit/1b8db0b407429e02bb1e4c9af839afeed21dac5d)) + +## [1.51.0](https://www.github.com/googleapis/java-spanner/compare/v1.50.0...v1.51.0) (2020-03-13) + + +### Features + +* add backend query options ([#90](https://www.github.com/googleapis/java-spanner/issues/90)) ([e96e172](https://www.github.com/googleapis/java-spanner/commit/e96e17246bee9691171b46857806d03d1f8e19b4)) +* add QueryOptions proto ([#84](https://www.github.com/googleapis/java-spanner/issues/84)) ([eb8fc37](https://www.github.com/googleapis/java-spanner/commit/eb8fc375bbd766f25966aa565e266ed972bbe818)) + + +### Bug Fixes + +* never use credentials in combination with plain text ([#98](https://www.github.com/googleapis/java-spanner/issues/98)) ([7eb8d49](https://www.github.com/googleapis/java-spanner/commit/7eb8d49cd6c35d7f757cb89009ad16be601b77c3)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-core-bom to v1.93.1 ([#91](https://www.github.com/googleapis/java-spanner/issues/91)) ([29d8db8](https://www.github.com/googleapis/java-spanner/commit/29d8db8cfc9d12824b9264d0fb870049a58a9a03)) +* update dependency io.opencensus:opencensus-api to v0.25.0 ([#95](https://www.github.com/googleapis/java-spanner/issues/95)) ([57f5fd0](https://www.github.com/googleapis/java-spanner/commit/57f5fd0f3bee4b437f48b6a08ab3174f035c8cca)) + +## [1.50.0](https://www.github.com/googleapis/java-spanner/compare/v1.49.2...v1.50.0) (2020-02-28) + + +### Features + +* add metrics to capture acquired and released sessions data ([#67](https://www.github.com/googleapis/java-spanner/issues/67)) ([94d0557](https://www.github.com/googleapis/java-spanner/commit/94d05575c37c7c7c7e9d7d3fbaea46c6d2eb6a4d)) +* add session timeout metric ([#65](https://www.github.com/googleapis/java-spanner/issues/65)) ([8d84b53](https://www.github.com/googleapis/java-spanner/commit/8d84b53efd2d237e193b68bc36345d338b0cdf20)) +* instrument Spanner client with OpenCensus metrics ([#54](https://www.github.com/googleapis/java-spanner/issues/54)) ([d9a00a8](https://www.github.com/googleapis/java-spanner/commit/d9a00a81c454ae793f9687d0e2de2bcc58d96502)) + + +### Bug Fixes + +* multiple calls to end of span ([#75](https://www.github.com/googleapis/java-spanner/issues/75)) ([3f32f51](https://www.github.com/googleapis/java-spanner/commit/3f32f51d70ceacbea02439c0f48ad057b10fb570)) + + +### Dependencies + +* update core dependencies ([#87](https://www.github.com/googleapis/java-spanner/issues/87)) ([b096651](https://www.github.com/googleapis/java-spanner/commit/b096651ddde940de9929600b31f78f965939139d)) +* update dependency com.google.cloud:google-cloud-core-bom to v1.92.5 ([56742c9](https://www.github.com/googleapis/java-spanner/commit/56742c96ff30f444e18a8bbde94ca173123385be)) +* update dependency com.google.http-client:google-http-client-bom to v1.34.2 ([#88](https://www.github.com/googleapis/java-spanner/issues/88)) ([628093d](https://www.github.com/googleapis/java-spanner/commit/628093d97877b912f6e4e706d22c2c24ba77a808)) +* update dependency com.google.protobuf:protobuf-bom to v3.11.4 ([#77](https://www.github.com/googleapis/java-spanner/issues/77)) ([fb2c683](https://www.github.com/googleapis/java-spanner/commit/fb2c683cf195e7229fe3d61a3332c32298be2625)) +* update dependency io.grpc:grpc-bom to v1.27.1 ([054b7e7](https://www.github.com/googleapis/java-spanner/commit/054b7e7091af6b61c7d2ad203688a65bcb18ed0c)) +* update opencensus.version to v0.25.0 ([#70](https://www.github.com/googleapis/java-spanner/issues/70)) ([26a3eff](https://www.github.com/googleapis/java-spanner/commit/26a3eff44c7d1f36541440aa7d29fc1d3ae8a4d7)) + + +### Documentation + +* **regen:** update sample code to set total timeout, add API client header test ([#66](https://www.github.com/googleapis/java-spanner/issues/66)) ([1178958](https://www.github.com/googleapis/java-spanner/commit/1178958eaec5aa6ea80938ad91dfb0b1a688463d)) + +### [1.49.2](https://www.github.com/googleapis/java-spanner/compare/v1.49.1...v1.49.2) (2020-02-06) + + +### Bug Fixes + +* stop sending RPCs on InstanceNotFound ([#61](https://www.github.com/googleapis/java-spanner/issues/61)) ([7618ac8](https://www.github.com/googleapis/java-spanner/commit/7618ac8bc32f7d3482bd4a0850be2bce71c33fc3)), closes [#60](https://www.github.com/googleapis/java-spanner/issues/60) +* use default retry settings for aborted tx ([#48](https://www.github.com/googleapis/java-spanner/issues/48)) ([6709552](https://www.github.com/googleapis/java-spanner/commit/6709552653f344537c209eef7f1e9e037a38e849)) +* use resource type to identify type of error ([#57](https://www.github.com/googleapis/java-spanner/issues/57)) ([89c3e77](https://www.github.com/googleapis/java-spanner/commit/89c3e77b99b303576c83b2313fc54d8c0e075e18)) +* use streaming retry settings for ResumableStreamIterator ([#49](https://www.github.com/googleapis/java-spanner/issues/49)) ([63b33e9](https://www.github.com/googleapis/java-spanner/commit/63b33e93e17303fe8f1fae01cfe44427178baf6c)) + + +### Dependencies + +* update core dependencies ([#59](https://www.github.com/googleapis/java-spanner/issues/59)) ([74b6b98](https://www.github.com/googleapis/java-spanner/commit/74b6b983ec275280572a5dcc49ececc94c4a4dce)) + +### [1.49.1](https://www.github.com/googleapis/java-spanner/compare/v1.49.0...v1.49.1) (2020-01-24) + + +### Bug Fixes + +* stop sending RPCs to deleted database ([#34](https://www.github.com/googleapis/java-spanner/issues/34)) ([11e4a90](https://www.github.com/googleapis/java-spanner/commit/11e4a90e73af8a5baf9aa593daa6192520363398)), closes [#16](https://www.github.com/googleapis/java-spanner/issues/16) + + +### Performance Improvements + +* close sessions async ([#24](https://www.github.com/googleapis/java-spanner/issues/24)) ([ab25087](https://www.github.com/googleapis/java-spanner/commit/ab250871cae51b3f496719d579db5bb6e263d5c3)), closes [#19](https://www.github.com/googleapis/java-spanner/issues/19) +* close sessions async revert revert ([#46](https://www.github.com/googleapis/java-spanner/issues/46)) ([c9864e5](https://www.github.com/googleapis/java-spanner/commit/c9864e58b14bb428e443bf958e7596a94199f629)), closes [#24](https://www.github.com/googleapis/java-spanner/issues/24) [#43](https://www.github.com/googleapis/java-spanner/issues/43) [#24](https://www.github.com/googleapis/java-spanner/issues/24) + + +### Reverts + +* Revert "perf: close sessions async (#24)" (#43) ([809ed88](https://www.github.com/googleapis/java-spanner/commit/809ed8875d65362ef14d27c5382dfe4c1ad9aa1b)), closes [#24](https://www.github.com/googleapis/java-spanner/issues/24) [#43](https://www.github.com/googleapis/java-spanner/issues/43) + +## [1.49.0](https://www.github.com/googleapis/java-spanner/compare/v1.48.0...v1.49.0) (2020-01-16) + + +### Features + +* add support for CallCredentials ([#26](https://www.github.com/googleapis/java-spanner/issues/26)) ([1112357](https://www.github.com/googleapis/java-spanner/commit/1112357be1c5fb9c4abfba48989fe8217853876a)), closes [#18](https://www.github.com/googleapis/java-spanner/issues/18) + + +### Bug Fixes + +* add keepalives to GRPC channel ([#11](https://www.github.com/googleapis/java-spanner/issues/11)) ([428a4a6](https://www.github.com/googleapis/java-spanner/commit/428a4a6d3c9e1536a80f1fa9f76f36fe1062a104)) + + +### Dependencies + +* mockito scope should be test ([#29](https://www.github.com/googleapis/java-spanner/issues/29)) ([9b0733d](https://www.github.com/googleapis/java-spanner/commit/9b0733d927237d8d16f507a1d0129ddb638df55a)) +* update dependency com.google.truth:truth to v1.0.1 ([#35](https://www.github.com/googleapis/java-spanner/issues/35)) ([fa2b471](https://www.github.com/googleapis/java-spanner/commit/fa2b471884c3b805fd6aa56a38d7c1f98c4cb940)) +* update dependency org.threeten:threetenbp to v1.4.1 ([c22c831](https://www.github.com/googleapis/java-spanner/commit/c22c831473dd0b18b71e1ea4d000cd34555a3a48)) + +## [1.48.0](https://www.github.com/googleapis/java-spanner/compare/1.47.0...v1.48.0) (2020-01-10) + + +### Features + +* add public method to get gRPC status code ([#25](https://www.github.com/googleapis/java-spanner/issues/25)) ([2dbe3cf](https://www.github.com/googleapis/java-spanner/commit/2dbe3cf397357de09d24bb57e367bbe947e682f4)), closes [#14](https://www.github.com/googleapis/java-spanner/issues/14) +* make repo releasable, add parent/bom ([#4](https://www.github.com/googleapis/java-spanner/issues/4)) ([f0073ee](https://www.github.com/googleapis/java-spanner/commit/f0073ee8d0aa68161f3071e6a72af376a1db1731)) + + +### Dependencies + +* update dependency org.jacoco:jacoco-maven-plugin to v0.8.5 ([#7023](https://www.github.com/googleapis/java-spanner/issues/7023)) ([d8b6438](https://www.github.com/googleapis/java-spanner/commit/d8b6438aa3b881c1c9baff584a74813664be4df8)) diff --git a/java-spanner/README.md b/java-spanner/README.md new file mode 100644 index 000000000000..db743ab10b68 --- /dev/null +++ b/java-spanner/README.md @@ -0,0 +1,573 @@ +# Google Cloud Spanner Client for Java + +Java idiomatic client for [Cloud Spanner][product-docs]. + +[![Maven][maven-version-image]][maven-version-link] +![Stability][stability-image] + +- [Product Documentation][product-docs] +- [Client Library Documentation][javadocs] + + +## Quickstart + +If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: + +```xml + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + com.google.cloud + google-cloud-spanner + + +``` + +If you are using Maven without the BOM, add this to your dependencies: + + +```xml + + com.google.cloud + google-cloud-spanner + 6.110.0 + + +``` + +If you are using Gradle 5.x or later, add this to your dependencies: + +```Groovy +implementation platform('com.google.cloud:libraries-bom:26.78.0') + +implementation 'com.google.cloud:google-cloud-spanner' +``` +If you are using Gradle without BOM, add this to your dependencies: + +```Groovy +implementation 'com.google.cloud:google-cloud-spanner:6.112.0' +``` + +If you are using SBT, add this to your dependencies: + +```Scala +libraryDependencies += "com.google.cloud" % "google-cloud-spanner" % "6.112.0" +``` + +## Authentication + +See the [Authentication][authentication] section in the base directory's README. + +## Authorization + +The client application making API calls must be granted [authorization scopes][auth-scopes] required for the desired Cloud Spanner APIs, and the authenticated principal must have the [IAM role(s)][predefined-iam-roles] required to access GCP resources using the Cloud Spanner API calls. + +## Getting Started + +### Prerequisites + +You will need a [Google Cloud Platform Console][developer-console] project with the Cloud Spanner [API enabled][enable-api]. +You will need to [enable billing][enable-billing] to use Google Cloud Spanner. +[Follow these instructions][create-project] to get your project set up. You will also need to set up the local development environment by +[installing the Google Cloud Command Line Interface][cloud-cli] and running the following commands in command line: +`gcloud auth login` and `gcloud config set project [YOUR PROJECT ID]`. + +### Installation and setup + +You'll need to obtain the `google-cloud-spanner` library. See the [Quickstart](#quickstart) section +to add `google-cloud-spanner` as a dependency in your code. + +## About Cloud Spanner + + +[Cloud Spanner][product-docs] is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, schemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication for high availability. Be sure to activate the Cloud Spanner API on the Developer's Console to use Cloud Spanner from your project. + +See the [Cloud Spanner client library docs][javadocs] to learn how to +use this Cloud Spanner Client Library. + + +#### Calling Cloud Spanner +Here is a code snippet showing a simple usage example. Add the following imports +at the top of your file: + +```java +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +``` + +Then, to make a query to Spanner, use the following code: +```java +// Instantiates a client +SpannerOptions options = SpannerOptions.newBuilder().build(); +Spanner spanner = options.getService(); +String instance = "my-instance"; +String database = "my-database"; +try { + // Creates a database client + DatabaseClient dbClient = spanner.getDatabaseClient( + DatabaseId.of(options.getProjectId(), instance, database)); + // Queries the database + try (ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of("SELECT 1"))) { + // Prints the results + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong(0)); + } + } +} finally { + // Closes the client which will free up the resources used + spanner.close(); +} +``` + +#### Complete source code + +In [DatabaseSelect.java](https://github.com/googleapis/google-cloud-java/tree/master/google-cloud-examples/src/main/java/com/google/cloud/examples/spanner/snippets/DatabaseSelect.java) we put together all the code shown above in a single program. + +## Session Pool + +The Cloud Spanner client maintains a session pool, as sessions are expensive to create and are +intended to be long-lived. The client automatically takes a session from the pool and uses this +executing queries and transactions. +See [Session Pool and Channel Pool Configuration](session-and-channel-pool-configuration.md) +for in-depth background information about sessions and gRPC channels and how these are handled in +the Cloud Spanner Java client. + +## Metrics + +Cloud Spanner client supports [client-side metrics](https://cloud.google.com/spanner/docs/view-manage-client-side-metrics) that you can use along with server-side metrics to optimize performance and troubleshoot performance issues if they occur. + +Client-side metrics are measured from the time a request leaves your application to the time your application receives the response. +In contrast, server-side metrics are measured from the time Spanner receives a request until the last byte of data is sent to the client. + +These metrics are enabled by default. You can opt out of using client-side metrics with the following code: + +``` +SpannerOptions options = SpannerOptions.newBuilder() + .setBuiltInMetricsEnabled(false) + .build(); +``` + +You can also disable these metrics by setting `SPANNER_DISABLE_BUILTIN_METRICS` to `true`. + +> Note: Client-side metrics needs `monitoring.timeSeries.create` IAM permission to export metrics data. Ask your administrator to grant your service account the [Monitoring Metric Writer](https://cloud.google.com/iam/docs/roles-permissions/monitoring#monitoring.metricWriter) (roles/monitoring.metricWriter) IAM role on the project. + +## Traces +Cloud Spanner client supports OpenTelemetry Traces, which gives insight into the client internals and aids in debugging/troubleshooting production issues. + +By default, the functionality is disabled. You need to add OpenTelemetry dependencies, enable OpenTelemetry traces and must configure the OpenTelemetry with appropriate exporters at the startup of your application. + +See [Configure client-side tracing](https://cloud.google.com/spanner/docs/set-up-tracing#configure-client-side-tracing) for more details on configuring traces. + +#### OpenTelemetry Dependencies + +If you are using Maven, add this to your pom.xml file +```xml + + io.opentelemetry + opentelemetry-sdk + {opentelemetry.version} + + + io.opentelemetry + opentelemetry-sdk-trace + {opentelemetry.version} + + + io.opentelemetry + opentelemetry-exporter-otlp + {opentelemetry.version} + +``` +If you are using Gradle, add this to your dependencies +```Groovy +compile 'io.opentelemetry:opentelemetry-sdk:{opentelemetry.version}' +compile 'io.opentelemetry:opentelemetry-sdk-trace:{opentelemetry.version}' +compile 'io.opentelemetry:opentelemetry-exporter-oltp:{opentelemetry.version}' +``` +#### OpenTelemetry Configuration + +> Note: Enabling OpenTelemetry traces will automatically disable OpenCensus traces. + +```java +// Enable OpenTelemetry traces +SpannerOptions.enableOpenTelemetryTraces(); + +// Create a new tracer provider +SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + // Use Otlp exporter or any other exporter of your choice. + .addSpanProcessor(SimpleSpanProcessor.builder(OtlpGrpcSpanExporter + .builder().build()).build()) + .build(); + + +OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .build() + +SpannerOptions options = SpannerOptions.newBuilder() +// Inject OpenTelemetry object via Spanner Options or register OpenTelmetry object as Global + .setOpenTelemetry(openTelemetry) + .build(); + +Spanner spanner = options.getService(); +``` + +#### OpenTelemetry SQL Statement Tracing +The OpenTelemetry traces that are generated by the Java client include any request and transaction +tags that have been set. The traces can also include the SQL statements that are executed and the +name of the thread that executes the statement. Enable this with the `enableExtendedTracing` +option: + +``` +SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_EXTENDED_TRACING=true`. + +#### OpenTelemetry API Tracing +You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` +option. These traces also include any retry attempts for an API call: + +``` +SpannerOptions options = SpannerOptions.newBuilder() +.setOpenTelemetry(openTelemetry) +.setEnableApiTracing(true) +.build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_API_TRACING=true`. + +> Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. + +#### End-to-end Tracing + +In addition to client-side tracing, you can opt in for [end-to-end tracing](https://cloud.google.com/spanner/docs/tracing-overview#end-to-end-side-tracing). End-to-end tracing helps you understand and debug latency issues that are specific to Spanner such as the following: +* Identify whether the latency is due to network latency between your application and Spanner, or if the latency is occurring within Spanner. +* Identify the Google Cloud regions that your application requests are being routed through and if there is a cross-region request. A cross-region request usually means higher latencies between your application and Spanner. + +``` +SpannerOptions options = SpannerOptions.newBuilder() +.setOpenTelemetry(openTelemetry) +.setEnableEndToEndTracing(true) +.build(); +``` + +Refer to [Configure end-to-end tracing](https://cloud.google.com/spanner/docs/set-up-tracing#configure-end-to-end-tracing) to configure end-to-end tracing and to understand its attributes. + +> Note: End-to-end traces can only be exported to [Cloud Trace](https://cloud.google.com/trace/docs). + + +## Instrument with OpenCensus + +> Note: OpenCensus project is deprecated. See [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/). +We recommend migrating to OpenTelemetry, the successor project. + +## Migrate from OpenCensus to OpenTelemetry + +> Using the [OpenTelemetry OpenCensus Bridge](https://mvnrepository.com/artifact/io.opentelemetry/opentelemetry-opencensus-shim), you can immediately begin exporting your metrics and traces with OpenTelemetry. + +#### Disable OpenCensus metrics +Disable OpenCensus metrics for Spanner by including the following code if you still possess OpenCensus dependencies and exporter. + +```java +SpannerOptions.disableOpenCensusMetrics(); +``` + +#### Disable OpenCensus traces +Enabling OpenTelemetry traces for Spanner will automatically disable OpenCensus traces. + +```java +SpannerOptions.enableOpenTelemetryTraces(); +``` + +#### Remove OpenCensus Dependencies and Code +Remove any OpenCensus-related code and dependencies from your codebase if all your dependencies are ready to move to OpenTelemetry. + +* Remove the OpenCensus Exporters which were configured [here](#configure-the-opencensus-exporter) +* Remove SpannerRPCViews reference which were configured [here](#enable-rpc-views) +* Remove the OpenCensus dependencies which were added [here](#opencensus-dependencies) + +#### Update your Dashboards and Alerts + +Update your dashboards and alerts to reflect below changes +* **Metrics name** : `cloud.google.com/java` prefix has been removed from OpenTelemery metrics and instead has been added as Instrumenation Scope. +* **Metrics namespace** : OpenTelmetry exporters uses `workload.googleapis.com` namespace opposed to `custom.googleapis.com` with OpenCensus. + + + +## Samples + +Samples are in the [`samples/`](https://github.com/googleapis/google-cloud-java/tree/main/samples) directory. + +| Sample | Source Code | Try it | +| --------------------------- | --------------------------------- | ------ | +| Add And Drop Database Role | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java) | +| Add Json Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java) | +| Add Jsonb Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java) | +| Add Numeric Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java) | +| Add Proto Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java) | +| Alter Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java) | +| Alter Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java) | +| Async Dml Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java) | +| Async Query Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncQueryExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncQueryExample.java) | +| Async Query To List Async Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncQueryToListAsyncExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncQueryToListAsyncExample.java) | +| Async Read Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncReadExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncReadExample.java) | +| Async Read Only Transaction Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncReadOnlyTransactionExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncReadOnlyTransactionExample.java) | +| Async Read Row Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncReadRowExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncReadRowExample.java) | +| Async Read Using Index Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncReadUsingIndexExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncReadUsingIndexExample.java) | +| Async Runner Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncRunnerExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncRunnerExample.java) | +| Async Transaction Manager Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncTransactionManagerExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncTransactionManagerExample.java) | +| Batch Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/BatchSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/BatchSample.java) | +| Batch Write At Least Once Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/BatchWriteAtLeastOnceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/BatchWriteAtLeastOnceSample.java) | +| Change Streams Txn Exclusion Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java) | +| Copy Backup Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CopyBackupSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CopyBackupSample.java) | +| Copy Backup With Multi Region Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CopyBackupWithMultiRegionEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CopyBackupWithMultiRegionEncryptionKey.java) | +| Create Backup With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateBackupWithEncryptionKey.java) | +| Create Backup With Multi Region Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithMultiRegionEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateBackupWithMultiRegionEncryptionKey.java) | +| Create Database With Default Leader Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java) | +| Create Database With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java) | +| Create Database With Multi Region Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithMultiRegionEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithMultiRegionEncryptionKey.java) | +| Create Database With Version Retention Period Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java) | +| Create Full Backup Schedule Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java) | +| Create Incremental Backup Schedule Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java) | +| Create Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java) | +| Create Instance Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java) | +| Create Instance Partition Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java) | +| Create Instance With Asymmetric Autoscaling Config Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigExample.java) | +| Create Instance With Autoscaling Config Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java) | +| Create Instance With Processing Units Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java) | +| Create Instance Without Default Backup Schedules Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithoutDefaultBackupSchedulesExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithoutDefaultBackupSchedulesExample.java) | +| Create Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java) | +| Create Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java) | +| Custom Timeout And Retry Settings Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java) | +| Database Add Split Points Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DatabaseAddSplitPointsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DatabaseAddSplitPointsSample.java) | +| Delete Backup Schedule Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java) | +| Delete Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java) | +| Delete Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java) | +| Directed Read Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java) | +| Drop Foreign Key Constraint Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java) | +| Drop Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java) | +| Enable Fine Grained Access | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java) | +| Get Backup Schedule Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java) | +| Get Commit Stats Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java) | +| Get Database Ddl Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java) | +| Get Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java) | +| Insert Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java) | +| Isolation Level And Read Lock Mode Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/IsolationLevelAndReadLockModeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/IsolationLevelAndReadLockModeSample.java) | +| Last Statement Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/LastStatementSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/LastStatementSample.java) | +| List Backup Schedules Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java) | +| List Database Roles | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java) | +| List Databases Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java) | +| List Instance Config Operations Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java) | +| List Instance Configs Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigsSample.java) | +| Pg Alter Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgAlterSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgAlterSequenceSample.java) | +| Pg Async Query To List Async Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgAsyncQueryToListAsyncExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgAsyncQueryToListAsyncExample.java) | +| Pg Async Runner Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgAsyncRunnerExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgAsyncRunnerExample.java) | +| Pg Async Transaction Manager Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgAsyncTransactionManagerExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgAsyncTransactionManagerExample.java) | +| Pg Batch Dml Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgBatchDmlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgBatchDmlSample.java) | +| Pg Case Sensitivity Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgCaseSensitivitySample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgCaseSensitivitySample.java) | +| Pg Create Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgCreateSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgCreateSequenceSample.java) | +| Pg Delete Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgDeleteUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgDeleteUsingDmlReturningSample.java) | +| Pg Drop Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgDropSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgDropSequenceSample.java) | +| Pg Insert Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgInsertUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgInsertUsingDmlReturningSample.java) | +| Pg Interleaved Table Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgInterleavedTableSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgInterleavedTableSample.java) | +| Pg Last Statement Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgLastStatementSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgLastStatementSample.java) | +| Pg Partitioned Dml Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgPartitionedDmlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgPartitionedDmlSample.java) | +| Pg Query With Numeric Parameter Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgQueryWithNumericParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgQueryWithNumericParameterSample.java) | +| Pg Spanner Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgSpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgSpannerSample.java) | +| Pg Update Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/PgUpdateUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/PgUpdateUsingDmlReturningSample.java) | +| Query Information Schema Database Options Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSample.java) | +| Query With Json Parameter Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java) | +| Query With Jsonb Parameter Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java) | +| Query With Numeric Parameter Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java) | +| Query With Proto Parameter Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java) | +| Quickstart Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java) | +| Read Data With Database Role | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java) | +| Restore Backup With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java) | +| Restore Backup With Multi Region Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithMultiRegionEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithMultiRegionEncryptionKey.java) | +| Set Max Commit Delay Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java) | +| Singer Proto | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/SingerProto.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SingerProto.java) | +| Spanner Graph Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java) | +| Spanner Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/SpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SpannerSample.java) | +| Statement Timeout Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java) | +| Tag Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/TagSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TagSample.java) | +| Tracing Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/TracingSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TracingSample.java) | +| Transaction Timeout Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java) | +| Unnamed Parameters Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UnnamedParametersExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UnnamedParametersExample.java) | +| Update Backup Schedule Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java) | +| Update Database Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java) | +| Update Database With Default Leader Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java) | +| Update Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java) | +| Update Instance Default Backup Schedule Type Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceDefaultBackupScheduleTypeExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateInstanceDefaultBackupScheduleTypeExample.java) | +| Update Instance Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java) | +| Update Json Data Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java) | +| Update Jsonb Data Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java) | +| Update Numeric Data Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java) | +| Update Proto Data Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java) | +| Update Proto Data Sample Using Dml | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java) | +| Update Using Dml Returning Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java) | +| Add And Drop Database Role | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java) | +| Add Json Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java) | +| Add Jsonb Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonbColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonbColumnSample.java) | +| Add Numeric Column Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddNumericColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddNumericColumnSample.java) | +| Alter Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterSequenceSample.java) | +| Alter Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSample.java) | +| Copy Backup Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CopyBackupSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CopyBackupSample.java) | +| Create Backup With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateBackupWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateBackupWithEncryptionKey.java) | +| Create Database With Default Leader Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSample.java) | +| Create Database With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithEncryptionKey.java) | +| Create Database With Version Retention Period Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSample.java) | +| Create Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceConfigSample.java) | +| Create Instance Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java) | +| Create Instance With Autoscaling Config Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java) | +| Create Instance With Processing Units Example | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java) | +| Create Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateSequenceSample.java) | +| Create Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSample.java) | +| Delete Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/DeleteInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/DeleteInstanceConfigSample.java) | +| Drop Foreign Key Constraint Delete Cascade Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSample.java) | +| Drop Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/DropSequenceSample.java) | +| Enable Fine Grained Access | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/EnableFineGrainedAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/EnableFineGrainedAccess.java) | +| Get Database Ddl Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetDatabaseDdlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/GetDatabaseDdlSample.java) | +| Get Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/GetInstanceConfigSample.java) | +| List Database Roles | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabaseRoles.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabaseRoles.java) | +| List Databases Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabasesSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabasesSample.java) | +| List Instance Config Operations Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigOperationsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigOperationsSample.java) | +| List Instance Configs Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigsSample.java) | +| Pg Alter Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgAlterSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgAlterSequenceSample.java) | +| Pg Case Sensitivity Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCaseSensitivitySample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCaseSensitivitySample.java) | +| Pg Create Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCreateSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCreateSequenceSample.java) | +| Pg Drop Sequence Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgDropSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgDropSequenceSample.java) | +| Pg Interleaved Table Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgInterleavedTableSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgInterleavedTableSample.java) | +| Pg Spanner Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgSpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/PgSpannerSample.java) | +| Restore Backup With Encryption Key | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/RestoreBackupWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/RestoreBackupWithEncryptionKey.java) | +| Spanner Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/SpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/SpannerSample.java) | +| Update Database Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseSample.java) | +| Update Database With Default Leader Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSample.java) | +| Update Instance Config Sample | [source code](https://github.com/googleapis/google-cloud-java/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/google-cloud-java&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateInstanceConfigSample.java) | + + + +## Troubleshooting + +To get help, follow the instructions in the [shared Troubleshooting document][troubleshooting]. + +## Transport + +Cloud Spanner uses gRPC for the transport layer. + +## Supported Java Versions + +Java 8 or above is required for using this client. + +Google's Java client libraries, +[Google Cloud Client Libraries][cloudlibs] +and +[Google Cloud API Libraries][apilibs], +follow the +[Oracle Java SE support roadmap][oracle] +(see the Oracle Java SE Product Releases section). + +### For new development + +In general, new feature development occurs with support for the lowest Java +LTS version covered by Oracle's Premier Support (which typically lasts 5 years +from initial General Availability). If the minimum required JVM for a given +library is changed, it is accompanied by a [semver][semver] major release. + +Java 11 and (in September 2021) Java 17 are the best choices for new +development. + +### Keeping production systems current + +Google tests its client libraries with all current LTS versions covered by +Oracle's Extended Support (which typically lasts 8 years from initial +General Availability). + +#### Legacy support + +Google's client libraries support legacy versions of Java runtimes with long +term stable libraries that don't receive feature updates on a best efforts basis +as it may not be possible to backport all patches. + +Google provides updates on a best efforts basis to apps that continue to use +Java 7, though apps might need to upgrade to current versions of the library +that supports their JVM. + +#### Where to find specific information + +The latest versions and the supported Java versions are identified on +the individual GitHub repository `github.com/GoogleAPIs/java-SERVICENAME` +and on [google-cloud-java][g-c-j]. + +## Versioning + + +This library follows [Semantic Versioning](http://semver.org/). + + + +## Contributing + + +Contributions to this library are always welcome and highly encouraged. + +See [CONTRIBUTING][contributing] for more information how to get started. + +Please note that this project is released with a Contributor Code of Conduct. By participating in +this project you agree to abide by its terms. See [Code of Conduct][code-of-conduct] for more +information. + + +## License + +Apache 2.0 - See [LICENSE][license] for more information. + +Java is a registered trademark of Oracle and/or its affiliates. + +[product-docs]: https://cloud.google.com/spanner/docs/ +[javadocs]: https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history +[stability-image]: https://img.shields.io/badge/stability-stable-green +[maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-spanner.svg +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-spanner/6.112.0 +[authentication]: https://github.com/googleapis/google-cloud-java#authentication +[auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes +[predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles +[iam-policy]: https://cloud.google.com/iam/docs/overview#cloud-iam-policy +[developer-console]: https://console.developers.google.com/ +[create-project]: https://cloud.google.com/resource-manager/docs/creating-managing-projects +[cloud-cli]: https://cloud.google.com/cli +[troubleshooting]: https://github.com/googleapis/google-cloud-java/blob/main/TROUBLESHOOTING.md +[contributing]: https://github.com/googleapis/google-cloud-java/blob/main/CONTRIBUTING.md +[code-of-conduct]: https://github.com/googleapis/google-cloud-java/blob/main/CODE_OF_CONDUCT.md#contributor-code-of-conduct +[license]: https://github.com/googleapis/google-cloud-java/blob/main/LICENSE +[enable-billing]: https://cloud.google.com/apis/docs/getting-started#enabling_billing +[enable-api]: https://console.cloud.google.com/flows/enableapi?apiid=spanner.googleapis.com +[libraries-bom]: https://github.com/GoogleCloudPlatform/cloud-opensource-java/wiki/The-Google-Cloud-Platform-Libraries-BOM +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png + +[semver]: https://semver.org/ +[cloudlibs]: https://cloud.google.com/apis/docs/client-libraries-explained +[apilibs]: https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries +[oracle]: https://www.oracle.com/java/technologies/java-se-support-roadmap.html +[g-c-j]: http://github.com/googleapis/google-cloud-java diff --git a/java-spanner/benchmarks/README.md b/java-spanner/benchmarks/README.md new file mode 100644 index 000000000000..4f829e22a037 --- /dev/null +++ b/java-spanner/benchmarks/README.md @@ -0,0 +1,53 @@ +# Latency Tests + +This directory contains a utility to compare latencies of different public methods supported by the Java Client Library. +The tests use simple statements that operate on a single row at a time. + +The goal is that addition of new features should not add any latency. + +## Setup Test Database + +All tests in this directory use a database with a single table. Follow these steps to create a +database that you can use for these tests: + +1. Set up some environment variables. These will be used by all following steps. +```shell +export SPANNER_CLIENT_BENCHMARK_GOOGLE_CLOUD_PROJECT=my-project +export SPANNER_CLIENT_BENCHMARK_SPANNER_INSTANCE=my-instance +export SPANNER_CLIENT_BENCHMARK_SPANNER_DATABASE=my-database +``` + +2. Create the Cloud Spanner database if it does not already exist using Pantheon UI. +3. Create the test table in the Cloud Spanner database. The tests assume the below DDL for the table. + +```shell +CREATE TABLE FOO ( id INT64 NOT NULL, BAZ INT64, BAR INT64, ) PRIMARY KEY(id); +``` +4. Generate some random test data that can be used for the benchmarking. This can be done +by using the script `bulkInsertTestData` in class `BenchmarkingUtilityScripts` to bulk +load 1000000 rows into this table. A large table makes sure that the queries are well +randomised and there is no hot-spotting. + +## Running + +The benchmark application includes Java Client as a dependency. Modify the dependency +version in `pom.xml` file if you wish to benchmark a different version of Java Client. + + +* The below command uses only 1 thread and 1000 operations. So the total load would +be 1000 read operations. The test also uses multiplexed sessions. +```shell +mvn clean compile exec:java -Dexec.args="--clients=1 --operations=1000 --multiplexed=true" +``` + +* The below command uses 10 threads, so at any point in time there would be roughly +10 concurrent requests. The total load of the benchmark would be 50000 read operations. +```shell +mvn clean compile exec:java -Dexec.args="--clients=10 --operations=5000 --multiplexed=true" +``` + +* To run the same test without multiplexed sessions avoid passing `multiplexed` flag. This will +make sure that tests uses regular sessions. +```shell +mvn clean compile exec:java -Dexec.args="--clients=10 --operations=5000" +``` \ No newline at end of file diff --git a/java-spanner/benchmarks/pom.xml b/java-spanner/benchmarks/pom.xml new file mode 100644 index 000000000000..edcdd065b608 --- /dev/null +++ b/java-spanner/benchmarks/pom.xml @@ -0,0 +1,149 @@ + + + + + 4.0.0 + com.google.cloud + google-cloud-spanner-benchmark + jar + Google Cloud Spanner Benchmark + + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + 1.8 + 1.8 + 1.8 + UTF-8 + UTF-8 + 2.10.1 + 1.59.0 + 3.85.0 + + + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-context + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + com.google.cloud + google-cloud-monitoring + 3.88.0 + + + io.opentelemetry + opentelemetry-sdk + ${opentelemetry.version} + + + io.opentelemetry + opentelemetry-sdk-metrics + ${opentelemetry.version} + + + io.opentelemetry + opentelemetry-sdk-trace + ${opentelemetry.version} + + + io.opentelemetry + opentelemetry-sdk-testing + ${opentelemetry.version} + + + com.google.re2j + re2j + 1.8 + + + com.google.cloud + google-cloud-spanner + 6.110.0 + + + com.google.auto.value + auto-value-annotations + 1.11.1 + + + com.kohlschutter.junixsocket + junixsocket-core + ${junixsocket.version} + pom + + + com.kohlschutter.junixsocket + junixsocket-common + ${junixsocket.version} + + + commons-cli + commons-cli + 1.11.0 + + + + junit + junit + 4.13.2 + test + + + + + + org.codehaus.mojo + exec-maven-plugin + 3.6.3 + + com.google.cloud.spanner.benchmark.LatencyBenchmark + false + + + + com.spotify.fmt + fmt-maven-plugin + 2.29 + + + + format + + + + + + + diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java new file mode 100644 index 000000000000..8f978a39a313 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; + +/** + * Simple helper class to get access to a package-private method in the {@link + * com.google.cloud.spanner.SessionPoolOptions}. + */ +@InternalApi +public class SessionPoolOptionsHelper { + + // TODO: Remove when Builder.setUseMultiplexedSession(..) has been made public. + public static SessionPoolOptions.Builder setUseMultiplexedSession( + SessionPoolOptions.Builder sessionPoolOptionsBuilder, boolean useMultiplexedSession) { + return sessionPoolOptionsBuilder.setUseMultiplexedSession(useMultiplexedSession); + } +} diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/AbstractRunner.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/AbstractRunner.java new file mode 100644 index 000000000000..b233cedaf227 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/AbstractRunner.java @@ -0,0 +1,135 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmark; + +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +abstract class AbstractRunner implements BenchmarkRunner { + static final int TOTAL_RECORDS = 100000; + static final String TABLE_NAME = "Employees"; + static final String SELECT_QUERY = String.format("SELECT ID FROM %s WHERE ID = @id", TABLE_NAME); + static final String UPDATE_QUERY = + String.format("UPDATE %s SET Name=Google WHERE ID = @id", TABLE_NAME); + static final String ID_COLUMN_NAME = "id"; + static final Map SERVER_URL_MAPPING = new HashMap<>(); + + static { + SERVER_URL_MAPPING.put( + Environment.CLOUD_DEVEL, "https://staging-wrenchworks.sandbox.googleapis.com"); + SERVER_URL_MAPPING.put(Environment.PROD, "https://spanner.googleapis.com"); + } + + Map timerConfigurations = new HashMap<>(); + private final Set completedClients = new HashSet<>(); + private final Set finishedClients = new HashSet<>(); + + protected void initiateTimer(int clientId, String message, Instant endTime) { + TimerConfiguration timerConfiguration = + timerConfigurations.getOrDefault(clientId, new TimerConfiguration()); + timerConfiguration.setMessage(message); + timerConfiguration.setEndTime(endTime); + timerConfigurations.put(clientId, timerConfiguration); + } + + protected void setBenchmarkingCompleted(int clientId) { + this.completedClients.add(clientId); + } + + protected List collectResults( + ExecutorService service, + List>> results, + BenchmarkingConfiguration configuration) + throws Exception { + while (!(finishedClients.size() == configuration.getNumOfClients())) + for (int i = 0; i < configuration.getNumOfClients(); i++) { + TimerConfiguration timerConfiguration = + timerConfigurations.getOrDefault(i, new TimerConfiguration()); + long totalSeconds = + ChronoUnit.SECONDS.between(Instant.now(), timerConfiguration.getEndTime()); + if (completedClients.contains(i)) { + if (!finishedClients.contains(i)) { + System.out.printf("Client %s: Completed", i); + finishedClients.add(i); + } + } else { + System.out.printf( + "Client %s: %s %s Minutes %s Seconds\r", + i + 1, timerConfiguration.getMessage(), totalSeconds / 60, totalSeconds % 60); + } + //noinspection BusyWait + Thread.sleep(1000L); + } + service.shutdown(); + if (!service.awaitTermination(60L, TimeUnit.MINUTES)) { + throw new TimeoutException(); + } + List allResults = new ArrayList<>(); + for (Future> result : results) { + allResults.addAll(result.get()); + } + return allResults; + } + + protected void randomWait(int waitMillis) throws InterruptedException { + if (waitMillis <= 0) { + return; + } + int randomMillis = ThreadLocalRandom.current().nextInt(waitMillis * 2); + Thread.sleep(randomMillis); + } + + protected String generateRandomString() { + byte[] bytes = new byte[64]; + ThreadLocalRandom.current().nextBytes(bytes); + return new String(bytes, StandardCharsets.UTF_8); + } + + static class TimerConfiguration { + private Instant endTime = Instant.now(); + private String message = "Waiting for benchmarks to start..."; + + Instant getEndTime() { + return endTime; + } + + void setEndTime(Instant endTime) { + this.endTime = endTime; + } + + String getMessage() { + return message; + } + + void setMessage(String message) { + this.message = message; + } + } +} diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkRunner.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkRunner.java new file mode 100644 index 000000000000..4f8a77c3a1d1 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkRunner.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmark; + +import java.time.Duration; +import java.util.List; + +interface BenchmarkRunner { + enum TransactionType { + READ_ONLY_SINGLE_USE_READ, + READ_ONLY_SINGLE_USE_QUERY, + READ_ONLY_MULTI_USE, + READ_WRITE + } + + enum Environment { + PROD, + CLOUD_DEVEL + } + + List execute(BenchmarkingConfiguration configuration); +} diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkingConfiguration.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkingConfiguration.java new file mode 100644 index 000000000000..e3003cf58a15 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/BenchmarkingConfiguration.java @@ -0,0 +1,115 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmark; + +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.benchmark.BenchmarkRunner.Environment; +import com.google.cloud.spanner.benchmark.BenchmarkRunner.TransactionType; + +class BenchmarkingConfiguration { + + private DatabaseId databaseId; + private int numOfClients; + private int staleness; + private int warmupTime; + private int executionTime; + private int waitBetweenRequests; + private boolean useMultiplexSession; + private TransactionType transactionType; + private Environment environment; + + int getExecutionTime() { + return executionTime; + } + + BenchmarkingConfiguration setExecutionTime(int executionTime) { + this.executionTime = executionTime; + return this; + } + + DatabaseId getDatabaseId() { + return databaseId; + } + + BenchmarkingConfiguration setDatabaseId(DatabaseId databaseId) { + this.databaseId = databaseId; + return this; + } + + int getNumOfClients() { + return numOfClients; + } + + BenchmarkingConfiguration setNumOfClients(int numOfClients) { + this.numOfClients = numOfClients; + return this; + } + + int getStaleness() { + return staleness; + } + + BenchmarkingConfiguration setStaleness(int staleness) { + this.staleness = staleness; + return this; + } + + int getWarmupTime() { + return warmupTime; + } + + BenchmarkingConfiguration setWarmupTime(int warmupTime) { + this.warmupTime = warmupTime; + return this; + } + + int getWaitBetweenRequests() { + return waitBetweenRequests; + } + + BenchmarkingConfiguration setWaitBetweenRequests(int waitBetweenRequests) { + this.waitBetweenRequests = waitBetweenRequests; + return this; + } + + boolean isUseMultiplexSession() { + return useMultiplexSession; + } + + BenchmarkingConfiguration setUseMultiplexSession(boolean useMultiplexSession) { + this.useMultiplexSession = useMultiplexSession; + return this; + } + + TransactionType getTransactionType() { + return transactionType; + } + + BenchmarkingConfiguration setTransactionType(TransactionType transactionType) { + this.transactionType = transactionType; + return this; + } + + Environment getEnvironment() { + return environment; + } + + BenchmarkingConfiguration setEnvironment(Environment environment) { + this.environment = environment; + return this; + } +} diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/JavaClientRunner.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/JavaClientRunner.java new file mode 100644 index 000000000000..ebe8f3bbaab3 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/JavaClientRunner.java @@ -0,0 +1,261 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmark; + +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.SessionPoolOptionsHelper; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.common.base.Stopwatch; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +class JavaClientRunner extends AbstractRunner { + private final DatabaseId databaseId; + private long numNullValues; + private long numNonNullValues; + + JavaClientRunner(DatabaseId databaseId) { + this.databaseId = databaseId; + } + + @Override + public List execute(BenchmarkingConfiguration configuration) { + // setup open telemetry metrics and traces + // setup open telemetry metrics and traces + SpanExporter traceExporter = TraceExporter.createWithDefaultConfiguration(); + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .setResource( + Resource.create( + Attributes.of( + AttributeKey.stringKey("service.name"), + "Java-MultiplexedSession-Benchmark"))) + .setSampler(Sampler.alwaysOn()) + .build(); + MetricExporter cloudMonitoringExporter = + GoogleCloudMetricExporter.createWithDefaultConfiguration(); + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder() + .registerMetricReader(PeriodicMetricReader.create(cloudMonitoringExporter)) + .build(); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setMeterProvider(sdkMeterProvider) + .setTracerProvider(tracerProvider) + .build(); + SessionPoolOptions sessionPoolOptions = + SessionPoolOptionsHelper.setUseMultiplexedSession( + SessionPoolOptions.newBuilder(), configuration.isUseMultiplexSession()) + .build(); + SpannerOptions.enableOpenTelemetryMetrics(); + SpannerOptions.enableOpenTelemetryTraces(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setProjectId(databaseId.getInstanceId().getProject()) + .setSessionPoolOption(sessionPoolOptions) + .setHost(SERVER_URL_MAPPING.get(configuration.getEnvironment())) + .build(); + try (Spanner spanner = options.getService()) { + DatabaseClient databaseClient = spanner.getDatabaseClient(databaseId); + + List>> results = new ArrayList<>(configuration.getNumOfClients()); + ExecutorService service = Executors.newFixedThreadPool(configuration.getNumOfClients()); + for (int client = 0; client < configuration.getNumOfClients(); client++) { + int clientId = client; + results.add(service.submit(() -> runBenchmark(databaseClient, clientId, configuration))); + } + return collectResults(service, results, configuration); + } catch (Throwable t) { + throw SpannerExceptionFactory.asSpannerException(t); + } + } + + private List runBenchmark( + DatabaseClient databaseClient, int clientId, BenchmarkingConfiguration configuration) { + List results = new ArrayList<>(); + // Execute one query to make sure everything has been warmed up. + warmUp(databaseClient, clientId, configuration); + runBenchmark(databaseClient, clientId, configuration, results); + setBenchmarkingCompleted(clientId); + return results; + } + + private void runBenchmark( + DatabaseClient databaseClient, + int clientId, + BenchmarkingConfiguration configuration, + List results) { + Instant endTime = Instant.now().plus(Duration.ofMinutes(configuration.getExecutionTime())); + initiateTimer(clientId, "Remaining execution time", endTime); + while (endTime.isAfter(Instant.now())) { + try { + randomWait(configuration.getWaitBetweenRequests()); + results.add( + executeTransaction( + databaseClient, configuration.getTransactionType(), configuration.getStaleness())); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + } + + private void warmUp( + DatabaseClient databaseClient, int clientId, BenchmarkingConfiguration configuration) { + Instant endTime = Instant.now().plus(Duration.ofMinutes(configuration.getWarmupTime())); + initiateTimer(clientId, "Remaining warmup time", endTime); + while (endTime.isAfter(Instant.now())) { + executeTransaction( + databaseClient, configuration.getTransactionType(), configuration.getStaleness()); + } + } + + private Duration executeTransaction( + DatabaseClient client, TransactionType transactionType, int staleness) { + Stopwatch watch = Stopwatch.createStarted(); + switch (transactionType) { + case READ_ONLY_SINGLE_USE_READ: + executeSingleUseReadOnlyTransactionWithRead(client, staleness); + break; + case READ_ONLY_SINGLE_USE_QUERY: + executeSingleUseReadOnlyTransactionWithQuery(client, staleness); + break; + case READ_ONLY_MULTI_USE: + executeMultiUseReadOnlyTransaction(client); + break; + case READ_WRITE: + executeReadWriteTransaction(client); + break; + } + return watch.elapsed(); + } + + private void executeSingleUseReadOnlyTransactionWithRead(DatabaseClient client, int staleness) { + List columns = new ArrayList<>(); + int key = getRandomKey(); + columns.add("ID"); + try (ResultSet resultSet = + client + .singleUse(TimestampBound.ofExactStaleness(staleness, TimeUnit.SECONDS)) + .read(TABLE_NAME, KeySet.singleKey(Key.of(key)), columns)) { + while (resultSet.next()) { + for (int i = 0; i < resultSet.getColumnCount(); i++) { + if (resultSet.isNull(i)) { + numNullValues++; + } else { + numNonNullValues++; + } + } + } + } + } + + private void executeSingleUseReadOnlyTransactionWithQuery(DatabaseClient client, int staleness) { + try (ResultSet resultSet = + client + .singleUse(TimestampBound.ofExactStaleness(staleness, TimeUnit.SECONDS)) + .executeQuery(getRandomisedReadStatement())) { + while (resultSet.next()) { + for (int i = 0; i < resultSet.getColumnCount(); i++) { + if (resultSet.isNull(i)) { + numNullValues++; + } else { + numNonNullValues++; + } + } + } + } + } + + private void executeMultiUseReadOnlyTransaction(DatabaseClient client) { + try (ReadOnlyTransaction transaction = client.readOnlyTransaction()) { + ResultSet resultSet = transaction.executeQuery(getRandomisedReadStatement()); + iterateResultSet(resultSet); + + ResultSet resultSet1 = transaction.executeQuery(getRandomisedReadStatement()); + iterateResultSet(resultSet1); + + ResultSet resultSet2 = transaction.executeQuery(getRandomisedReadStatement()); + iterateResultSet(resultSet2); + + ResultSet resultSet3 = transaction.executeQuery(getRandomisedReadStatement()); + iterateResultSet(resultSet3); + } + } + + private void iterateResultSet(ResultSet resultSet) { + while (resultSet.next()) { + for (int i = 0; i < resultSet.getColumnCount(); i++) { + if (resultSet.isNull(i)) { + numNullValues++; + } else { + numNonNullValues++; + } + } + } + } + + private void executeReadWriteTransaction(DatabaseClient client) { + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(getRandomisedUpdateStatement())); + } + + static Statement getRandomisedReadStatement() { + return Statement.newBuilder(SELECT_QUERY).bind(ID_COLUMN_NAME).to(getRandomKey()).build(); + } + + static Statement getRandomisedUpdateStatement() { + return Statement.newBuilder(UPDATE_QUERY).bind(ID_COLUMN_NAME).to(getRandomKey()).build(); + } + + static int getRandomKey() { + return ThreadLocalRandom.current().nextInt(TOTAL_RECORDS); + } +} diff --git a/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/LatencyBenchmark.java b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/LatencyBenchmark.java new file mode 100644 index 000000000000..d3c2d71e9554 --- /dev/null +++ b/java-spanner/benchmarks/src/main/java/com/google/cloud/spanner/benchmark/LatencyBenchmark.java @@ -0,0 +1,188 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmark; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.benchmark.BenchmarkRunner.Environment; +import com.google.cloud.spanner.benchmark.BenchmarkRunner.TransactionType; +import com.google.common.annotations.VisibleForTesting; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; + +@InternalApi +@VisibleForTesting +public class LatencyBenchmark { + public static void main(String[] args) throws ParseException { + CommandLine cmd = parseCommandLine(args); + String project = System.getenv("SPANNER_CLIENT_BENCHMARK_GOOGLE_CLOUD_PROJECT"); + String instance = System.getenv("SPANNER_CLIENT_BENCHMARK_SPANNER_INSTANCE"); + String database = System.getenv("SPANNER_CLIENT_BENCHMARK_SPANNER_DATABASE"); + String fullyQualifiedDatabase; + + if (project != null && instance != null && database != null) { + fullyQualifiedDatabase = + String.format("projects/%s/instances/%s/databases/%s", project, instance, database); + } else { + throw new IllegalArgumentException( + "You must either set all the environment variables" + + " SPANNER_CLIENT_BENCHMARK_GOOGLE_CLOUD_PROJECT," + + " SPANNER_CLIENT_BENCHMARK_SPANNER_INSTANCE and" + + " SPANNER_CLIENT_BENCHMARK_SPANNER_DATABASE, or specify a value for the command" + + " line argument --database"); + } + + LatencyBenchmark benchmark = new LatencyBenchmark(DatabaseId.of(fullyQualifiedDatabase)); + benchmark.run(cmd); + } + + private static CommandLine parseCommandLine(String[] args) throws ParseException { + Options options = new Options(); + options.addOption("d", "database", true, "The database to use for benchmarking."); + options.addOption( + "c", "clients", true, "The number of clients that will be executing queries in parallel."); + options.addOption( + "wu", + "warmupTime", + true, + "Total warm up time before running actual benchmarking. Defaults to 7 minutes."); + options.addOption( + "et", + "executionTime", + true, + "Total execution time of the benchmarking. Defaults to 30 minutes."); + options.addOption( + "st", "staleness", true, "Total Staleness for Reads and Queries. Defaults to 15 seconds."); + options.addOption( + "w", + "wait", + true, + "The wait time in milliseconds between each query that is executed by each client. Defaults" + + " to 0. Set this to for example 1000 to have each client execute 1 query per" + + " second."); + options.addOption( + "t", + "transactionType", + true, + "The type of transaction to execute. Must be either READ_ONLY or READ_WRITE. Defaults to" + + " READ_ONLY."); + options.addOption( + "e", + "environment", + true, + "Spanner Environment. Must be either PROD or CLOUD_DEVEL. Default to CLOUD_DEVEL"); + options.addOption("m", "multiplexed", true, "Use multiplexed sessions. Defaults to true."); + options.addOption("name", true, "Name of this test run"); + CommandLineParser parser = new DefaultParser(); + return parser.parse(options, args); + } + + private final DatabaseId databaseId; + + LatencyBenchmark(DatabaseId databaseId) { + this.databaseId = databaseId; + } + + public void run(CommandLine commandLine) { + int clients = + commandLine.hasOption('c') ? Integer.parseInt(commandLine.getOptionValue('c')) : 1; + int executionTime = + commandLine.hasOption("et") ? Integer.parseInt(commandLine.getOptionValue("et")) : 30; + int warmUpTime = + commandLine.hasOption("wu") ? Integer.parseInt(commandLine.getOptionValue("wu")) : 7; + int waitMillis = + commandLine.hasOption('w') ? Integer.parseInt(commandLine.getOptionValue('w')) : 0; + int staleness = + commandLine.hasOption("st") ? Integer.parseInt(commandLine.getOptionValue("st")) : 15; + TransactionType transactionType = + commandLine.hasOption('t') + ? TransactionType.valueOf(commandLine.getOptionValue('t').toUpperCase(Locale.ENGLISH)) + : TransactionType.READ_ONLY_SINGLE_USE_QUERY; + boolean useMultiplexedSession = + !commandLine.hasOption('m') || Boolean.parseBoolean(commandLine.getOptionValue('m')); + Environment environment = + commandLine.hasOption('e') + ? Environment.valueOf(commandLine.getOptionValue('e').toUpperCase(Locale.ENGLISH)) + : Environment.CLOUD_DEVEL; + + BenchmarkingConfiguration configuration = + new BenchmarkingConfiguration() + .setDatabaseId(databaseId) + .setNumOfClients(clients) + .setExecutionTime(executionTime) + .setWarmupTime(warmUpTime) + .setStaleness(staleness) + .setTransactionType(transactionType) + .setUseMultiplexSession(useMultiplexedSession) + .setWaitBetweenRequests(waitMillis) + .setEnvironment(environment); + + System.out.println(); + System.out.println("Running benchmark with the following options"); + System.out.printf("Database: %s\n", configuration.getDatabaseId()); + System.out.printf("Clients: %d\n", configuration.getNumOfClients()); + System.out.printf("Total Warm up Time: %d mins\n", configuration.getWarmupTime()); + System.out.printf("Total Execution Time: %d mins\n", configuration.getExecutionTime()); + System.out.printf("Staleness: %d secs\n", configuration.getStaleness()); + System.out.printf("Transaction type: %s\n", configuration.getTransactionType()); + System.out.printf("Use Multiplexed Sessions: %s\n", configuration.isUseMultiplexSession()); + System.out.printf("Wait between requests: %dms\n", configuration.getWaitBetweenRequests()); + + List javaClientResults = null; + System.out.println(); + System.out.println("Running benchmark for Java Client Library"); + JavaClientRunner javaClientRunner = new JavaClientRunner(configuration.getDatabaseId()); + javaClientResults = javaClientRunner.execute(configuration); + + printResults("Java Client Library", javaClientResults); + } + + private void printResults(String header, List results) { + if (results == null) { + return; + } + List orderedResults = new ArrayList<>(results); + Collections.sort(orderedResults); + System.out.println(); + System.out.println(header); + System.out.printf("Total number of queries: %d\n", orderedResults.size()); + System.out.printf("Avg: %.2fms\n", avg(results)); + System.out.printf("P50: %.2fms\n", percentile(50, orderedResults)); + System.out.printf("P95: %.2fms\n", percentile(95, orderedResults)); + System.out.printf("P99: %.2fms\n", percentile(99, orderedResults)); + } + + private double percentile(int percentile, List orderedResults) { + return orderedResults.get(percentile * orderedResults.size() / 100).get(ChronoUnit.NANOS) + / 1_000_000.0f; + } + + private double avg(List results) { + return results.stream() + .collect(Collectors.averagingDouble(result -> result.get(ChronoUnit.NANOS) / 1_000_000.0f)); + } +} diff --git a/java-spanner/git-hooks/pre-commit b/java-spanner/git-hooks/pre-commit new file mode 100755 index 000000000000..0683a6fcc64b --- /dev/null +++ b/java-spanner/git-hooks/pre-commit @@ -0,0 +1,8 @@ +#!/bin/sh +# +# Copy this file into your .git/hooks folder + +set -e + +# Checks for Formatting errors +mvn com.coveo:fmt-maven-plugin:check diff --git a/java-spanner/google-cloud-spanner-bom/pom.xml b/java-spanner/google-cloud-spanner-bom/pom.xml new file mode 100644 index 000000000000..f3e86d996fc0 --- /dev/null +++ b/java-spanner/google-cloud-spanner-bom/pom.xml @@ -0,0 +1,122 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner-bom + 6.112.1-SNAPSHOT + pom + + com.google.cloud + google-cloud-pom-parent + 1.83.0-SNAPSHOT + ../../google-cloud-pom-parent/pom.xml + + + Google Cloud Spanner BOM + https://github.com/googleapis/google-cloud-java + + BOM for Google Cloud Spanner + + + + Google LLC + + + + + chingor13 + Jeff Ching + chingor@google.com + Google LLC + + Developer + + + + + + scm:git:https://github.com/googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + + + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + + + com.google.cloud + google-cloud-spanner + test-jar + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.15.0 + + 1.8 + 1.8 + UTF-8 + -Xlint:unchecked + -Xlint:deprecation + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + + diff --git a/java-spanner/google-cloud-spanner-executor/README.md b/java-spanner/google-cloud-spanner-executor/README.md new file mode 100644 index 000000000000..78739a4949d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/README.md @@ -0,0 +1,4 @@ +This module is for Google-internal use ([details]( +http://go/cloud-spanner-client-testing-design)). +This is not for customers. + diff --git a/java-spanner/google-cloud-spanner-executor/clirr-ignored-differences.xml b/java-spanner/google-cloud-spanner-executor/clirr-ignored-differences.xml new file mode 100644 index 000000000000..11e9890f1d9b --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/clirr-ignored-differences.xml @@ -0,0 +1,15 @@ + + + + + 7004 + com/google/cloud/executor/spanner/CloudExecutorImpl + CloudExecutorImpl(boolean) + CloudExecutorImpl(boolean, double) + + + 7002 + com/google/cloud/spanner/SessionPoolOptionsHelper + com.google.cloud.spanner.SessionPoolOptions$Builder setUseMultiplexedSessionBlindWrite(com.google.cloud.spanner.SessionPoolOptions$Builder, boolean) + + diff --git a/java-spanner/google-cloud-spanner-executor/pom.xml b/java-spanner/google-cloud-spanner-executor/pom.xml new file mode 100644 index 000000000000..1141f3b8d963 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/pom.xml @@ -0,0 +1,307 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner-executor + 6.112.1-SNAPSHOT + jar + Google Cloud Spanner Executor + + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + 1.8 + 1.8 + UTF-8 + 0.36.0 + + + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-context + + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-common + + + io.opentelemetry + opentelemetry-sdk-trace + + + com.google.cloud.opentelemetry + shared-resourcemapping + ${google.cloud.opentelemetry.version} + + + com.google.cloud.opentelemetry + exporter-trace + ${google.cloud.opentelemetry.version} + + + io.opentelemetry.semconv + opentelemetry-semconv + + + + + com.google.cloud + grpc-gcp + + + io.opentelemetry.semconv + opentelemetry-semconv + + + com.google.cloud + google-cloud-spanner + + + com.google.cloud + google-cloud-trace + 2.84.0 + + + + com.google.guava + failureaccess + + + + + io.grpc + grpc-api + + + io.grpc + grpc-netty-shaded + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + com.google.api + api-common + + + com.google.protobuf + protobuf-java + + + com.google.protobuf + protobuf-java-util + + + + com.google.j2objc + j2objc-annotations + + + + + com.google.api.grpc + proto-google-common-protos + + + com.google.cloud + google-cloud-core + + + com.google.auth + google-auth-library-oauth2-http + + + com.google.http-client + google-http-client + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + com.google.api.grpc + proto-google-cloud-trace-v1 + 2.84.0 + + + + com.google.guava + failureaccess + + + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + + + com.google.guava + guava + + + com.google.api + gax + + + com.google.api + gax-grpc + + + com.google.code.findbugs + jsr305 + + + com.google.auth + google-auth-library-credentials + + + io.grpc + grpc-services + + + commons-cli + commons-cli + 1.11.0 + + + commons-io + commons-io + 2.21.0 + + + + junit + junit + test + + + + com.google.api + gax + testlib + test + + + com.google.api + gax-grpc + testlib + test + + + + org.apache.maven.surefire + surefire-junit4 + 3.5.5 + test + + + + + + + maven-resources-plugin + + + copy-resources + validate + + copy-resources + + + ${project.build.directory}/spanner-executor + + + resources + true + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/spanner-executor/lib + false + false + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + spanner-executor/google-spanner-cloud-executor + + false + + com.google.cloud.executor.spanner.WorkerProxy + true + lib/ + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.5 + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.api:gax,org.apache.maven.surefire:surefire-junit4,io.opentelemetry.semconv:opentelemetry-semconv,com.google.cloud.opentelemetry:shared-resourcemapping,com.google.cloud:grpc-gcp + + + + + + diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java new file mode 100644 index 000000000000..a9323fbdc25f --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java @@ -0,0 +1,3933 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.executor.spanner; + +import static com.google.cloud.spanner.TransactionRunner.TransactionCallable; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.DeadlineExceededException; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnavailableException; +import com.google.auth.Credentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.BatchTransactionId; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceConfigInfo; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Interval; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Mutation.WriteBuilder; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Partition; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ReplicaInfo; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SessionPoolOptionsHelper; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.StructReader; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.encryption.CustomerManagedEncryption; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.cloud.trace.v1.TraceServiceClient; +import com.google.cloud.trace.v1.TraceServiceSettings; +import com.google.common.base.Function; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.devtools.cloudtrace.v1.GetTraceRequest; +import com.google.devtools.cloudtrace.v1.Trace; +import com.google.devtools.cloudtrace.v1.TraceSpan; +import com.google.longrunning.Operation; +import com.google.protobuf.ByteString; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.instance.v1.Instance.State; +import com.google.spanner.executor.v1.AdminAction; +import com.google.spanner.executor.v1.AdminResult; +import com.google.spanner.executor.v1.BatchDmlAction; +import com.google.spanner.executor.v1.BatchPartition; +import com.google.spanner.executor.v1.CancelOperationAction; +import com.google.spanner.executor.v1.ChangeStreamRecord; +import com.google.spanner.executor.v1.ChildPartitionsRecord; +import com.google.spanner.executor.v1.CloseBatchTransactionAction; +import com.google.spanner.executor.v1.CloudBackupResponse; +import com.google.spanner.executor.v1.CloudDatabaseResponse; +import com.google.spanner.executor.v1.CloudInstanceConfigResponse; +import com.google.spanner.executor.v1.CloudInstanceResponse; +import com.google.spanner.executor.v1.Concurrency; +import com.google.spanner.executor.v1.CopyCloudBackupAction; +import com.google.spanner.executor.v1.CreateCloudBackupAction; +import com.google.spanner.executor.v1.CreateCloudDatabaseAction; +import com.google.spanner.executor.v1.CreateCloudInstanceAction; +import com.google.spanner.executor.v1.CreateUserInstanceConfigAction; +import com.google.spanner.executor.v1.DataChangeRecord; +import com.google.spanner.executor.v1.DeleteCloudBackupAction; +import com.google.spanner.executor.v1.DeleteCloudInstanceAction; +import com.google.spanner.executor.v1.DeleteUserInstanceConfigAction; +import com.google.spanner.executor.v1.DmlAction; +import com.google.spanner.executor.v1.DropCloudDatabaseAction; +import com.google.spanner.executor.v1.ExecuteChangeStreamQuery; +import com.google.spanner.executor.v1.ExecutePartitionAction; +import com.google.spanner.executor.v1.FinishTransactionAction; +import com.google.spanner.executor.v1.FinishTransactionAction.Mode; +import com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction; +import com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction; +import com.google.spanner.executor.v1.GetCloudBackupAction; +import com.google.spanner.executor.v1.GetCloudDatabaseAction; +import com.google.spanner.executor.v1.GetCloudInstanceAction; +import com.google.spanner.executor.v1.GetCloudInstanceConfigAction; +import com.google.spanner.executor.v1.GetOperationAction; +import com.google.spanner.executor.v1.HeartbeatRecord; +import com.google.spanner.executor.v1.ListCloudBackupOperationsAction; +import com.google.spanner.executor.v1.ListCloudBackupsAction; +import com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction; +import com.google.spanner.executor.v1.ListCloudDatabasesAction; +import com.google.spanner.executor.v1.ListCloudInstanceConfigsAction; +import com.google.spanner.executor.v1.ListCloudInstancesAction; +import com.google.spanner.executor.v1.MutationAction; +import com.google.spanner.executor.v1.MutationAction.InsertArgs; +import com.google.spanner.executor.v1.MutationAction.Mod; +import com.google.spanner.executor.v1.MutationAction.UpdateArgs; +import com.google.spanner.executor.v1.OperationResponse; +import com.google.spanner.executor.v1.PartitionedUpdateAction; +import com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions; +import com.google.spanner.executor.v1.QueryAction; +import com.google.spanner.executor.v1.ReadAction; +import com.google.spanner.executor.v1.RestoreCloudDatabaseAction; +import com.google.spanner.executor.v1.SessionPoolOptions; +import com.google.spanner.executor.v1.SpannerAction; +import com.google.spanner.executor.v1.SpannerActionOutcome; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import com.google.spanner.executor.v1.StartBatchTransactionAction; +import com.google.spanner.executor.v1.StartTransactionAction; +import com.google.spanner.executor.v1.TransactionExecutionOptions; +import com.google.spanner.executor.v1.UpdateCloudBackupAction; +import com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction; +import com.google.spanner.executor.v1.UpdateCloudInstanceAction; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.math.BigDecimal; +import java.text.ParseException; +import java.time.Duration; +import java.time.LocalDate; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.apache.commons.io.FileUtils; + +/** + * Implementation of the SpannerExecutorProxy gRPC service that proxies action request through the + * Java Cloud Client. + */ +public class CloudClientExecutor extends CloudExecutor { + + private static final Logger LOGGER = Logger.getLogger(CloudClientExecutor.class.getName()); + + // Prefix for host address. + private static final String HOST_PREFIX = "https://localhost:"; + + public CloudClientExecutor(boolean enableGrpcFaultInjector) { + this.enableGrpcFaultInjector = enableGrpcFaultInjector; + } + + // Helper for unexpected results. + public static String unexpectedExceptionResponse(Exception e) { + return "Unexpected error in Github Cloud Java Client Executor: " + + e + + " Msg: " + + e.getMessage() + + " Stack: " + + Joiner.on("\n").join(e.getStackTrace()); + } + + /** + * Implementation of a ReadWriteTransaction, which is a wrapper of the cloud TransactionRunner. It + * stores all the status and related variables from the start to finish, and control the running + * flow of this transaction. + * + *

The following functions should be called on this struct externally: + * + *

startRWTransaction() initializes a transaction. It creates a callable and runs it with a + * ReadWriteTransaction() runner in a separate thread. That callable will accept a + * transactionContext when created, and we will pass it out to execute actions on it. Then the + * callable will be blocked until we need to exit (e.g. commit) the transaction. + * + *

getContext() returns the current ReadWriteTransaction context. Reads and writes can be + * performed on that object. + * + *

finish() is used to either commit or abandon the transaction. It gets a finishMode from + * finishAction and essentially unblocks the separate callable thread that's waiting inside + * ReadWriteTransaction(). As a result of this call, Spanner will commit the current transaction, + * abandon it without committing, or restart it, in which case the client should get a new + * transaction instance using getContext() and replay all the reads and writes through it. + * + *

Here's a typical workflow for how a read-write transaction works. + * + *

When we call startRWTransaction, a transaction runner will be started in another thread with + * a callable that stores the passed TransactionContext into the ReadWriteTransaction and blocks. + * This TransactionContext is used to run the read/write actions. To execute the finish action, we + * store the FinishMode in the ReadWriteTransaction object, which unblocks the thread in the + * callable and causes the callable to either return (to commit) or throw an exception (to abort). + * If the underlying Spanner transaction aborted, the transaction runner will invoke the callable + * again. + */ + private static class ReadWriteTransaction { + private final DatabaseClient dbClient; + private TransactionRunner runner; + private TransactionContext txnContext; + private com.google.protobuf.Timestamp timestamp; + private Mode finishMode; + private SpannerException error; + private final String transactionSeed; + private final boolean optimistic; + // Set to true when the transaction runner completed, one of these three could happen: runner + // committed, abandoned or threw an error. + private boolean runnerCompleted; + + public ReadWriteTransaction( + DatabaseClient dbClient, String transactionSeed, boolean optimistic) { + this.dbClient = dbClient; + this.transactionSeed = transactionSeed; + this.optimistic = optimistic; + this.runnerCompleted = false; + } + + /** Set context to be used for executing actions. */ + private synchronized void setContext(TransactionContext transaction) { + finishMode = null; + txnContext = transaction; + Preconditions.checkNotNull(txnContext); + LOGGER.log(Level.INFO, "Transaction callable created, setting context %s\n", transactionSeed); + notifyAll(); + } + + /** Wait for finishAction to be executed and return the requested finish mode. */ + private synchronized Mode waitForFinishAction() throws Exception { + while (finishMode == null) { + wait(); + } + return finishMode; + } + + /** Wait for transactionContext to be set. */ + private synchronized void waitForTransactionContext() throws Exception { + while (txnContext == null && error == null) { + wait(); + } + if (error != null) { + throw error; + } + } + + /** Transaction successfully committed with a timestamp. */ + private synchronized void transactionSucceeded(com.google.protobuf.Timestamp timestamp) { + this.timestamp = timestamp; + this.runnerCompleted = true; + notifyAll(); + } + + /** Transaction failed to commit, maybe abandoned or other errors occurred. */ + private synchronized void transactionFailed(SpannerException e) { + // Handle abandon case + if (e.getErrorCode() == ErrorCode.UNKNOWN && e.getMessage().contains(TRANSACTION_ABANDONED)) { + LOGGER.log(Level.INFO, "Transaction abandoned"); + } else { + // Store the error for sending back + error = e; + } + this.runnerCompleted = true; + notifyAll(); + } + + /** Return the commit timestamp. */ + public synchronized com.google.protobuf.Timestamp getTimestamp() { + return timestamp; + } + + /** Return the transactionContext to run actions. Must be called after start action. */ + public synchronized TransactionContext getContext() { + Preconditions.checkState(txnContext != null); + return txnContext; + } + + /** + * Create a new transaction runner and corresponding transaction callable to start a read-write + * transaction. + */ + public void startRWTransaction() throws Exception { + final TransactionCallable callable = + transaction -> { + setContext(transaction); + LOGGER.log( + Level.INFO, + String.format( + "Transaction context set, executing and waiting for finish %s\n", + transactionSeed)); + Mode mode = waitForFinishAction(); + if (mode == Mode.ABANDON) { + throw new Exception(TRANSACTION_ABANDONED); + } + // Try to commit + return null; + }; + io.opentelemetry.context.Context context = io.opentelemetry.context.Context.current(); + Runnable runnable = + context.wrap( + () -> { + try { + runner = + optimistic + ? dbClient.readWriteTransaction(Options.optimisticLock()) + : dbClient.readWriteTransaction(); + LOGGER.log( + Level.INFO, String.format("Ready to run callable %s\n", transactionSeed)); + runner.run(callable); + transactionSucceeded(runner.getCommitTimestamp().toProto()); + } catch (SpannerException e) { + LOGGER.log( + Level.WARNING, + String.format( + "Transaction runnable failed with exception %s\n", e.getMessage()), + e); + transactionFailed(e); + } + }); + LOGGER.log( + Level.INFO, + String.format("Callable and Runnable created, ready to execute %s\n", transactionSeed)); + txnThreadPool.execute(runnable); + waitForTransactionContext(); + LOGGER.log( + Level.INFO, + String.format("Transaction successfully created and running %s\n", transactionSeed)); + } + + /** + * Finish current transaction in given finishMode, if failed, throw the exception back to + * caller. Returns true if the transaction completed (i.e., committed), false if it was + * restarted by the transaction runner. + */ + public synchronized boolean finish(Mode finishMode) throws Exception { + switch (finishMode) { + case COMMIT: + case ABANDON: + // Signal that finish action has been called and finishMode has been set. + this.finishMode = finishMode; + Preconditions.checkNotNull(finishMode); + txnContext = null; + LOGGER.log( + Level.INFO, + String.format( + "TxnContext cleared, sending finishMode to finish transaction %s\n", + transactionSeed)); + notifyAll(); + // Wait for the transaction to finish or restart + while (txnContext == null && !runnerCompleted) { + wait(); + } + LOGGER.log( + Level.INFO, + String.format("Transaction finished, getting back to caller %s\n", transactionSeed)); + if (txnContext != null) { + // Transaction restarted + return false; + } else if (error != null) { + // Transaction runner threw an exception: re-throw it to the client. + // Filter expected errors + if (error.getErrorCode() == ErrorCode.UNKNOWN + && error.getMessage().contains("Transaction outcome unknown")) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Transaction outcome unknown."); + } else { + throw error; + } + } + // Transaction successfully completed + return true; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported finish mode: " + finishMode); + } + } + } + + /** + * All the context in which SpannerActions are executed. It stores the current running transaction + * and table metadata, shared by all the action executor and protected by a lock. There will only + * be exactly one instance of this class per stubby call, created when the executor is + * initialized. + */ + class ExecutionFlowContext { + // Database path from previous action + private String prevDbPath; + // Current read-write transaction + private ReadWriteTransaction rwTxn; + // Current read-only transaction + private ReadOnlyTransaction roTxn; + // Current batch read-only transaction + private BatchReadOnlyTransaction batchTxn; + // Current database client + private DatabaseClient dbClient; + // Metadata info about table columns + private Metadata metadata; + // Number of pending read/query actions. + private int numPendingReads; + // Indicate whether there's a read/query action got aborted and the transaction need to be + // reset. + private boolean readAborted; + // Log the workid and op pair for tracing the thread. + private String transactionSeed; + // Outgoing stream. + StreamObserver responseObserver; + + public ExecutionFlowContext(StreamObserver responseObserver) { + this.responseObserver = responseObserver; + } + + /** Call the underlying stream to send response. */ + public synchronized void onNext(SpannerAsyncActionResponse response) { + responseObserver.onNext(response); + } + + /** Call the underlying stream to send error. */ + public synchronized void onError(Throwable t) { + responseObserver.onError(t); + } + + /** Return current transaction that can used for performing read/query actions. */ + public synchronized ReadContext getTransactionForRead() throws SpannerException { + if (roTxn != null) { + return roTxn; + } + if (rwTxn != null) { + return rwTxn.getContext(); + } + if (batchTxn != null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Can't execute regular read in a batch transaction"); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "No active transaction"); + } + + /** Return current transaction that can used for performing mutation/update actions. */ + public synchronized TransactionContext getTransactionForWrite() throws SpannerException { + if (rwTxn == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Not in a read-write transaction"); + } + return rwTxn.getContext(); + } + + /** Return current batch transaction if it exists. */ + public synchronized BatchReadOnlyTransaction getBatchTxn() throws SpannerException { + if (batchTxn == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Not in a batch transaction"); + } + return batchTxn; + } + + /** Set the transactionSeed string retrieved from startTransactionAction. */ + public synchronized void updateTransactionSeed(String transactionSeed) { + if (!transactionSeed.isEmpty()) { + this.transactionSeed = transactionSeed; + } + } + + /** Return current workid and op pair for logging. */ + public synchronized String getTransactionSeed() { + return transactionSeed; + } + + /** Return current database client. */ + public DatabaseClient getDbClient() { + return dbClient; + } + + /** Clear the transaction related variables. */ + public synchronized void clear() { + rwTxn = null; + roTxn = null; + metadata = null; + } + + /** Cleanup all the active transactions if the stubby call is closing. */ + public synchronized void cleanup() { + if (roTxn != null) { + LOGGER.log(Level.INFO, "A read only transaction was active when stubby call closed"); + roTxn.close(); + } + if (rwTxn != null) { + LOGGER.log(Level.INFO, "A read write transaction was active when stubby call closed"); + try { + rwTxn.finish(Mode.ABANDON); + } catch (Exception e) { + LOGGER.log( + Level.WARNING, "Failed to abandon a read-write transaction: " + e.getMessage()); + } + } + } + + /** Return previous databasePath if given dbPath is empty, then update. */ + public synchronized String getDatabasePath(String dbPath) { + if (dbPath == null || dbPath.isEmpty()) { + return prevDbPath; + } + prevDbPath = dbPath; + return dbPath; + } + + /** Set the metadata for future use. */ + public synchronized void setMetadata(Metadata metadata) { + this.metadata = metadata; + } + + /** Start a read-only transaction. */ + public synchronized void startReadOnlyTxn( + DatabaseClient dbClient, TimestampBound timestampBound, Metadata metadata) { + if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Already in a transaction"); + } + this.metadata = metadata; + if (timestampBound.getMode() == TimestampBound.Mode.MIN_READ_TIMESTAMP + || timestampBound.getMode() == TimestampBound.Mode.MAX_STALENESS) { + roTxn = dbClient.singleUseReadOnlyTransaction(timestampBound); + } else { + roTxn = dbClient.readOnlyTransaction(timestampBound); + } + } + + /** Start a read-write transaction. */ + public synchronized void startReadWriteTxn( + DatabaseClient dbClient, Metadata metadata, TransactionExecutionOptions options) + throws Exception { + if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Already in a transaction"); + } + LOGGER.log( + Level.INFO, + String.format( + "There's no active transaction, safe to create rwTxn: %s\n", getTransactionSeed())); + this.metadata = metadata; + rwTxn = new ReadWriteTransaction(dbClient, transactionSeed, options.getOptimistic()); + LOGGER.log( + Level.INFO, + String.format( + "Read-write transaction object created, try to start: %s\n", getTransactionSeed())); + rwTxn.startRWTransaction(); + } + + /** Start a batch transaction. */ + public synchronized Status startBatchTxn( + StartBatchTransactionAction action, BatchClient batchClient, OutcomeSender sender) { + try { + if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Already in a transaction"); + } + + if (action.hasBatchTxnTime()) { + TimestampBound timestampBound = + TimestampBound.ofReadTimestamp(Timestamp.fromProto(action.getBatchTxnTime())); + batchTxn = batchClient.batchReadOnlyTransaction(timestampBound); + } else if (action.hasTid()) { + BatchTransactionId tId = unmarshall(action.getTid()); + batchTxn = batchClient.batchReadOnlyTransaction(tId); + } else { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Either timestamp or tid must be set"); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setBatchTxnId(marshall(batchTxn.getBatchTransactionId())) + .build(); + initReadState(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Increase the read count when a read/query is issued. */ + public synchronized void startRead() { + ++numPendingReads; + } + + /** + * Decrease the read count when a read/query is finished, if status is aborted and there's no + * pending read/query, reset the transaction for retry. + */ + public synchronized void finishRead(Status status) { + if (status.getCode() == Status.ABORTED.getCode()) { + readAborted = true; + } + --numPendingReads; + if (readAborted && numPendingReads <= 0) { + LOGGER.log(Level.FINE, "Transaction reset due to read/query abort"); + readAborted = false; + } + } + + /** Initialize the read count and aborted status when transaction started. */ + public synchronized void initReadState() { + readAborted = false; + numPendingReads = 0; + } + + /** Store the reference to the database client for future action use. */ + public void setDatabaseClient(DatabaseClient client) { + dbClient = client; + } + + /** Return a list of key column types of the given table. */ + public List getKeyColumnTypes(String tableName) + throws SpannerException { + Preconditions.checkNotNull(metadata); + return metadata.getKeyColumnTypes(tableName); + } + + /** Return column type of the given table and column. */ + public com.google.spanner.v1.Type getColumnType(String tableName, String columnName) + throws SpannerException { + Preconditions.checkNotNull(metadata); + return metadata.getColumnType(tableName, columnName); + } + + /** Buffer a list of mutations in a read-write transaction. */ + public synchronized void bufferMutations(List mutations) throws SpannerException { + getTransactionForWrite().buffer(mutations); + } + + /** Execute a batch of updates in a read-write transaction. */ + public synchronized long[] executeBatchDml(@Nonnull List stmts) + throws SpannerException { + for (int i = 0; i < stmts.size(); i++) { + LOGGER.log( + Level.INFO, String.format("executeBatchDml [%d]: %s", i + 1, stmts.get(i).toString())); + } + return getTransactionForWrite() + .batchUpdate(stmts, Options.tag("batch-update-transaction-tag")); + } + + /** Finish active transaction in given finishMode, then send outcome back to client. */ + public synchronized Status finish(Mode finishMode, OutcomeSender sender) { + if (numPendingReads > 0) { + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Reads pending when trying to finish"))); + } + SpannerActionOutcome.Builder outcomeBuilder = SpannerActionOutcome.newBuilder(); + outcomeBuilder.setStatus(toProto(Status.OK)); + if (roTxn != null || rwTxn != null) { + try { + if (roTxn != null) { + // read-only transaction + Timestamp ts = roTxn.getReadTimestamp(); + outcomeBuilder.setCommitTime(ts.toProto()); + roTxn.close(); + clear(); + } else { + // read-write transaction + if (!rwTxn.finish(finishMode)) { + LOGGER.log(Level.FINE, "Transaction restarted"); + outcomeBuilder.setTransactionRestarted(true); + } else { + LOGGER.log(Level.FINE, "Transaction finish successfully"); + if (rwTxn.getTimestamp() != null) { + outcomeBuilder.setCommitTime(rwTxn.getTimestamp()); + } + clear(); + } + } + } catch (SpannerException e) { + outcomeBuilder.setStatus(toProto(toStatus(e))); + clear(); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.sendOutcome(outcomeBuilder.build()); + } else if (batchTxn != null) { + return toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Can't commit/abort a batch transaction")); + } else { + return toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "No currently active transaction")); + } + } + + /** Close active batch transaction. */ + public synchronized void closeBatchTxn() throws SpannerException { + if (batchTxn == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Not in a batch transaction"); + } + batchTxn.close(); + } + } + + private Spanner client; + private Spanner clientWithTimeout; + + private static final String TRANSACTION_ABANDONED = "Fake error to abandon transaction"; + + // Read-write transaction thread pool + private static final Executor txnThreadPool = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat("txn-pool-%d").build()); + + // Action thread pool + private static final Executor actionThreadPool = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat("action-pool-%d").build()); + + // Thread pool to verify end to end traces. + private static final ExecutorService endToEndTracesThreadPool = + Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat("end-to-end-traces-pool-%d").build()); + + private synchronized Spanner getClientWithTimeout( + long timeoutSeconds, boolean useMultiplexedSession) throws IOException { + if (clientWithTimeout != null) { + return clientWithTimeout; + } + clientWithTimeout = getClient(timeoutSeconds, useMultiplexedSession); + return clientWithTimeout; + } + + private synchronized Spanner getClient(boolean useMultiplexedSession) throws IOException { + if (client != null) { + return client; + } + client = getClient(/* timeoutSeconds= */ 0, useMultiplexedSession); + return client; + } + + // Return the spanner client, create one if not exists. + private synchronized Spanner getClient(long timeoutSeconds, boolean useMultiplexedSession) + throws IOException { + // Create a cloud spanner client + Credentials credentials; + if (WorkerProxy.serviceKeyFile.isEmpty()) { + credentials = NoCredentials.getInstance(); + } else { + credentials = + ServiceAccountCredentials.fromStream( + new ByteArrayInputStream( + FileUtils.readFileToByteArray(new File(WorkerProxy.serviceKeyFile))), + HTTP_TRANSPORT_FACTORY); + } + + TransportChannelProvider channelProvider = + CloudUtil.newChannelProviderHelper(WorkerProxy.spannerPort); + + Duration rpcTimeout = Duration.ofHours(1L); + if (timeoutSeconds > 0) { + rpcTimeout = Duration.ofSeconds(timeoutSeconds); + } + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofSeconds(32)) + .setInitialRpcTimeoutDuration(rpcTimeout) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(rpcTimeout) + .setTotalTimeoutDuration(rpcTimeout) + .build(); + + com.google.cloud.spanner.SessionPoolOptions.Builder poolOptionsBuilder = + com.google.cloud.spanner.SessionPoolOptions.newBuilder(); + SessionPoolOptionsHelper.setUseMultiplexedSession(poolOptionsBuilder, useMultiplexedSession); + SessionPoolOptionsHelper.setUseMultiplexedSessionForRW( + poolOptionsBuilder, useMultiplexedSession); + SessionPoolOptionsHelper.setUseMultiplexedSessionForPartitionedOperations( + poolOptionsBuilder, useMultiplexedSession); + LOGGER.log( + Level.INFO, + String.format( + "Using multiplexed sessions for read-write transactions: %s", useMultiplexedSession)); + com.google.cloud.spanner.SessionPoolOptions sessionPoolOptions = poolOptionsBuilder.build(); + // Cloud Spanner Client does not support global retry settings, + // Thus, we need to add retry settings to each individual stub. + SpannerOptions.Builder optionsBuilder = + SpannerOptions.newBuilder() + .setProjectId(PROJECT_ID) + .setHost(HOST_PREFIX + WorkerProxy.spannerPort) + .setCredentials(credentials) + .setChannelProvider(channelProvider) + .setEnableEndToEndTracing(true) + .setOpenTelemetry(WorkerProxy.openTelemetrySdk) + .setSessionPoolOption(sessionPoolOptions); + + SpannerStubSettings.Builder stubSettingsBuilder = + optionsBuilder.getSpannerStubSettingsBuilder(); + + stubSettingsBuilder.executeSqlSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.executeStreamingSqlSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.readSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.streamingReadSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.commitSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.executeBatchDmlSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.partitionQuerySettings().setRetrySettings(retrySettings); + stubSettingsBuilder.partitionReadSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.rollbackSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.batchCreateSessionsSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.beginTransactionSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.createSessionSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.getSessionSettings().setRetrySettings(retrySettings); + stubSettingsBuilder.deleteSessionSettings().setRetrySettings(retrySettings); + + return optionsBuilder.build().getService(); + } + + private TraceServiceClient traceServiceClient; + + // Return the trace service client, create one if not exists. + private synchronized TraceServiceClient getTraceServiceClient() throws IOException { + if (traceServiceClient != null) { + return traceServiceClient; + } + // Create a trace service client + Credentials credentials; + if (WorkerProxy.serviceKeyFile.isEmpty()) { + credentials = NoCredentials.getInstance(); + } else { + credentials = + ServiceAccountCredentials.fromStream( + new ByteArrayInputStream( + FileUtils.readFileToByteArray(new File(WorkerProxy.serviceKeyFile))), + HTTP_TRANSPORT_FACTORY); + } + + TraceServiceSettings traceServiceSettings = + TraceServiceSettings.newBuilder() + .setEndpoint(WorkerProxy.CLOUD_TRACE_ENDPOINT) + .setCredentialsProvider(FixedCredentialsProvider.create(credentials)) + .build(); + + traceServiceClient = TraceServiceClient.create(traceServiceSettings); + return traceServiceClient; + } + + public Future getEndToEndTraceVerificationTask(String traceId) { + return endToEndTracesThreadPool.submit( + () -> { + try { + // Wait for 10 seconds before verifying to ensure traces are exported. + long sleepDuration = TimeUnit.SECONDS.toMillis(10); + LOGGER.log( + Level.INFO, + String.format( + "Sleeping for %d milliseconds before verifying end to end trace", + sleepDuration)); + Thread.sleep(sleepDuration); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); // Handle interruption + LOGGER.log(Level.INFO, String.format("Thread interrupted.")); + return false; // Return false if interrupted + } + return isExportedEndToEndTraceValid(traceId); + }); + } + + private static final String READ_WRITE_TRANSACTION = "CloudSpanner.ReadWriteTransaction"; + private static final String READ_ONLY_TRANSACTION = "CloudSpanner.ReadOnlyTransaction"; + + /* Returns whether a exported trace is valid. */ + public boolean isExportedEndToEndTraceValid(String traceId) { + try { + GetTraceRequest getTraceRequest = + GetTraceRequest.newBuilder() + .setProjectId(WorkerProxy.PROJECT_ID) + .setTraceId(traceId) + .build(); + Trace trace = getTraceServiceClient().getTrace(getTraceRequest); + boolean readWriteOrReadOnlyTxnPresent = false, spannerServerSideSpanPresent = false; + for (TraceSpan span : trace.getSpansList()) { + if (span.getName().contains(READ_ONLY_TRANSACTION) + || span.getName().contains(READ_WRITE_TRANSACTION)) { + readWriteOrReadOnlyTxnPresent = true; + } + if (span.getName().startsWith("Spanner.")) { + spannerServerSideSpanPresent = true; + } + } + if (readWriteOrReadOnlyTxnPresent && !spannerServerSideSpanPresent) { + return false; + } + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Failed to verify end to end trace.", e); + return false; + } + return true; + } + + /** Handle actions. */ + public Status startHandlingRequest( + SpannerAsyncActionRequest req, ExecutionFlowContext executionContext) { + OutcomeSender outcomeSender = new OutcomeSender(req.getActionId(), executionContext); + + if (!req.hasAction()) { + return outcomeSender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid request"))); + } + SpannerAction action = req.getAction(); + + // Update dbPath + String dbPath = executionContext.getDatabasePath(action.getDatabasePath()); + // Update session pool options + boolean useMultiplexedSession; + if (action.hasSpannerOptions() && action.getSpannerOptions().hasSessionPoolOptions()) { + SessionPoolOptions sessionPoolOptions = action.getSpannerOptions().getSessionPoolOptions(); + useMultiplexedSession = sessionPoolOptions.getUseMultiplexed(); + } else { + useMultiplexedSession = false; + } + + io.opentelemetry.context.Context context = io.opentelemetry.context.Context.current(); + actionThreadPool.execute( + context.wrap( + () -> { + Status status = + executeAction( + outcomeSender, action, dbPath, useMultiplexedSession, executionContext); + if (!status.isOk()) { + LOGGER.log( + Level.WARNING, + String.format("Failed to execute action with error: %s\n%s", status, action)); + executionContext.onError(status.getCause()); + } + })); + return Status.OK; + } + + /** Execute actions by action case, using OutcomeSender to send status and results back. */ + private Status executeAction( + OutcomeSender outcomeSender, + SpannerAction action, + String dbPath, + boolean useMultiplexedSession, + ExecutionFlowContext executionContext) { + Tracer tracer = WorkerProxy.openTelemetrySdk.getTracer(CloudClientExecutor.class.getName()); + String actionType = action.getActionCase().toString(); + Span span = tracer.spanBuilder(String.format("performaction_%s", actionType)).startSpan(); + Scope scope = span.makeCurrent(); + try { + if (action.hasAdmin()) { + return executeAdminAction(useMultiplexedSession, action.getAdmin(), outcomeSender); + } else if (action.hasStart()) { + if (dbPath == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Database path must be set for this action"); + } + DatabaseClient dbClient = + getClient(useMultiplexedSession).getDatabaseClient(DatabaseId.of(dbPath)); + return executeStartTxn(action.getStart(), dbClient, outcomeSender, executionContext); + } else if (action.hasFinish()) { + return executeFinishTxn(action.getFinish(), outcomeSender, executionContext); + } else if (action.hasMutation()) { + return executeMutation( + action.getMutation(), outcomeSender, executionContext, /* isWrite= */ false); + } else if (action.hasRead()) { + return executeRead( + useMultiplexedSession, action.getRead(), outcomeSender, executionContext); + } else if (action.hasQuery()) { + return executeQuery( + useMultiplexedSession, action.getQuery(), outcomeSender, executionContext); + } else if (action.hasDml()) { + return executeCloudDmlUpdate( + useMultiplexedSession, action.getDml(), outcomeSender, executionContext); + } else if (action.hasBatchDml()) { + return executeCloudBatchDmlUpdates(action.getBatchDml(), outcomeSender, executionContext); + } else if (action.hasWrite()) { + return executeMutation( + action.getWrite().getMutation(), outcomeSender, executionContext, /* isWrite= */ true); + } else if (action.hasStartBatchTxn()) { + if (dbPath == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "database path must be set for this action"); + } + BatchClient batchClient = + getClient(useMultiplexedSession).getBatchClient(DatabaseId.of(dbPath)); + return executeStartBatchTxn( + action.getStartBatchTxn(), batchClient, outcomeSender, executionContext); + } else if (action.hasGenerateDbPartitionsRead()) { + return executeGenerateDbPartitionsRead( + action.getGenerateDbPartitionsRead(), outcomeSender, executionContext); + } else if (action.hasGenerateDbPartitionsQuery()) { + return executeGenerateDbPartitionsQuery( + action.getGenerateDbPartitionsQuery(), outcomeSender, executionContext); + } else if (action.hasExecutePartition()) { + return executeExecutePartition( + useMultiplexedSession, action.getExecutePartition(), outcomeSender, executionContext); + } else if (action.hasPartitionedUpdate()) { + if (dbPath == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Database path must be set for this action"); + } + DatabaseClient dbClient = + getClient(useMultiplexedSession).getDatabaseClient(DatabaseId.of(dbPath)); + return executePartitionedUpdate(action.getPartitionedUpdate(), dbClient, outcomeSender); + } else if (action.hasCloseBatchTxn()) { + return executeCloseBatchTxn(action.getCloseBatchTxn(), outcomeSender, executionContext); + } else if (action.hasExecuteChangeStreamQuery()) { + if (dbPath == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Database path must be set for this action"); + } + return executeExecuteChangeStreamQuery( + dbPath, useMultiplexedSession, action.getExecuteChangeStreamQuery(), outcomeSender); + } else { + return outcomeSender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, "Not implemented yet: \n" + action))); + } + } catch (Exception e) { + span.recordException(e); + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return outcomeSender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } finally { + scope.close(); + span.end(); + } + } + + /** Execute admin actions by action case, using OutcomeSender to send status and results back. */ + private Status executeAdminAction( + boolean useMultiplexedSession, AdminAction action, OutcomeSender outcomeSender) { + try { + if (action.hasCreateCloudInstance()) { + return executeCreateCloudInstance( + useMultiplexedSession, action.getCreateCloudInstance(), outcomeSender); + } else if (action.hasUpdateCloudInstance()) { + return executeUpdateCloudInstance( + useMultiplexedSession, action.getUpdateCloudInstance(), outcomeSender); + } else if (action.hasDeleteCloudInstance()) { + return executeDeleteCloudInstance( + useMultiplexedSession, action.getDeleteCloudInstance(), outcomeSender); + } else if (action.hasListCloudInstances()) { + return executeListCloudInstances( + useMultiplexedSession, action.getListCloudInstances(), outcomeSender); + } else if (action.hasListInstanceConfigs()) { + return executeListCloudInstanceConfigs( + useMultiplexedSession, action.getListInstanceConfigs(), outcomeSender); + } else if (action.hasGetCloudInstanceConfig()) { + return executeGetCloudInstanceConfig( + useMultiplexedSession, action.getGetCloudInstanceConfig(), outcomeSender); + } else if (action.hasGetCloudInstance()) { + return executeGetCloudInstance( + useMultiplexedSession, action.getGetCloudInstance(), outcomeSender); + } else if (action.hasCreateUserInstanceConfig()) { + return executeCreateUserInstanceConfig( + useMultiplexedSession, action.getCreateUserInstanceConfig(), outcomeSender); + } else if (action.hasDeleteUserInstanceConfig()) { + return executeDeleteUserInstanceConfig( + useMultiplexedSession, action.getDeleteUserInstanceConfig(), outcomeSender); + } else if (action.hasCreateCloudDatabase()) { + return executeCreateCloudDatabase( + useMultiplexedSession, action.getCreateCloudDatabase(), outcomeSender); + } else if (action.hasUpdateCloudDatabaseDdl()) { + return executeUpdateCloudDatabaseDdl( + useMultiplexedSession, action.getUpdateCloudDatabaseDdl(), outcomeSender); + } else if (action.hasDropCloudDatabase()) { + return executeDropCloudDatabase( + useMultiplexedSession, action.getDropCloudDatabase(), outcomeSender); + } else if (action.hasCreateCloudBackup()) { + return executeCreateCloudBackup( + useMultiplexedSession, action.getCreateCloudBackup(), outcomeSender); + } else if (action.hasCopyCloudBackup()) { + return executeCopyCloudBackup( + useMultiplexedSession, action.getCopyCloudBackup(), outcomeSender); + } else if (action.hasGetCloudBackup()) { + return executeGetCloudBackup( + useMultiplexedSession, action.getGetCloudBackup(), outcomeSender); + } else if (action.hasUpdateCloudBackup()) { + return executeUpdateCloudBackup( + useMultiplexedSession, action.getUpdateCloudBackup(), outcomeSender); + } else if (action.hasDeleteCloudBackup()) { + return executeDeleteCloudBackup( + useMultiplexedSession, action.getDeleteCloudBackup(), outcomeSender); + } else if (action.hasListCloudBackups()) { + return executeListCloudBackups( + useMultiplexedSession, action.getListCloudBackups(), outcomeSender); + } else if (action.hasListCloudBackupOperations()) { + return executeListCloudBackupOperations( + useMultiplexedSession, action.getListCloudBackupOperations(), outcomeSender); + } else if (action.hasListCloudDatabases()) { + return executeListCloudDatabases( + useMultiplexedSession, action.getListCloudDatabases(), outcomeSender); + } else if (action.hasListCloudDatabaseOperations()) { + return executeListCloudDatabaseOperations( + useMultiplexedSession, action.getListCloudDatabaseOperations(), outcomeSender); + } else if (action.hasRestoreCloudDatabase()) { + return executeRestoreCloudDatabase( + useMultiplexedSession, action.getRestoreCloudDatabase(), outcomeSender); + } else if (action.hasGetCloudDatabase()) { + return executeGetCloudDatabase( + useMultiplexedSession, action.getGetCloudDatabase(), outcomeSender); + } else if (action.hasGetOperation()) { + return executeGetOperation(useMultiplexedSession, action.getGetOperation(), outcomeSender); + } else if (action.hasCancelOperation()) { + return executeCancelOperation( + useMultiplexedSession, action.getCancelOperation(), outcomeSender); + } else { + return outcomeSender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, "Not implemented yet: \n" + action))); + } + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return outcomeSender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that creates a cloud instance. */ + private Status executeCreateCloudInstance( + boolean useMultiplexedSession, CreateCloudInstanceAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Creating instance: \n%s", action)); + InstanceAdminClient instanceAdminClient = + getClient(useMultiplexedSession).getInstanceAdminClient(); + final String instanceId = action.getInstanceId(); + InstanceId instance = InstanceId.of(action.getProjectId(), instanceId); + InstanceInfo.Builder builder = + InstanceInfo.newBuilder(instance) + .setInstanceConfigId( + InstanceConfigId.of(action.getProjectId(), action.getInstanceConfigId())) + .setDisplayName(instanceId) + .putAllLabels(action.getLabelsMap()); + if (action.hasNodeCount()) { + builder.setNodeCount(action.getNodeCount()); + } + if (action.hasProcessingUnits()) { + builder.setProcessingUnits(action.getProcessingUnits()); + } + final InstanceInfo request = builder.build(); + instanceAdminClient.createInstance(request).get(); + } catch (ExecutionException | InterruptedException ex) { + SpannerException e = SpannerExceptionFactory.newSpannerException(ex); + if (e.getErrorCode() == ErrorCode.ALREADY_EXISTS) { + // Another worker or our previous attempt already created the instance. + return sender.finishWithOK(); + } + return sender.finishWithError(toStatus(e)); + } catch (SpannerException se) { + return sender.finishWithError(toStatus(se)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that updates a cloud instance. */ + private Status executeUpdateCloudInstance( + boolean useMultiplexedSession, UpdateCloudInstanceAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Updating instance: \n%s", action)); + InstanceAdminClient instanceAdminClient = + getClient(useMultiplexedSession).getInstanceAdminClient(); + final String instanceId = action.getInstanceId(); + final InstanceId instance = InstanceId.of(action.getProjectId(), instanceId); + final InstanceInfo.Builder builder = InstanceInfo.newBuilder(instance); + + ArrayList fieldsToUpdate = new ArrayList<>(); + if (action.hasDisplayName()) { + fieldsToUpdate.add(InstanceInfo.InstanceField.DISPLAY_NAME); + builder.setDisplayName(instanceId); + } + if (action.hasNodeCount()) { + fieldsToUpdate.add(InstanceInfo.InstanceField.NODE_COUNT); + builder.setNodeCount(action.getNodeCount()); + } + if (action.hasProcessingUnits()) { + fieldsToUpdate.add(InstanceInfo.InstanceField.PROCESSING_UNITS); + builder.setProcessingUnits(action.getProcessingUnits()); + } + Map labels = action.getLabelsMap(); + if (!labels.isEmpty()) { + fieldsToUpdate.add(InstanceInfo.InstanceField.LABELS); + builder.putAllLabels(action.getLabelsMap()); + } + final InstanceInfo request = builder.build(); + instanceAdminClient + .updateInstance(request, fieldsToUpdate.toArray(new InstanceInfo.InstanceField[0])) + .get(); + } catch (ExecutionException | InterruptedException ex) { + SpannerException e = SpannerExceptionFactory.newSpannerException(ex); + return sender.finishWithError(toStatus(e)); + } catch (SpannerException se) { + return sender.finishWithError(toStatus(se)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that deletes a cloud instance. */ + private Status executeDeleteCloudInstance( + boolean useMultiplexedSession, DeleteCloudInstanceAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Deleting instance: \n%s", action)); + InstanceAdminClient instanceAdminClient = + getClient(useMultiplexedSession).getInstanceAdminClient(); + final String instanceId = action.getInstanceId(); + final InstanceId instance = InstanceId.of(action.getProjectId(), instanceId); + instanceAdminClient.deleteInstance(instance.getInstance()); + } catch (SpannerException se) { + return sender.finishWithError(toStatus(se)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that lists cloud instances. */ + private Status executeListCloudInstances( + boolean useMultiplexedSession, ListCloudInstancesAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Listing instances:\n%s", action)); + ArrayList options = new ArrayList<>(); + if (action.hasPageSize()) { + options.add(Options.pageSize(action.getPageSize())); + } + if (action.hasFilter()) { + options.add(Options.filter(action.getFilter())); + } + if (action.hasPageToken()) { + options.add(Options.pageToken(action.getPageToken())); + } + + Page response = + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .listInstances(options.toArray(new Options.ListOption[0])); + List instanceList = new ArrayList<>(); + for (Instance instance : response.iterateAll()) { + instanceList.add(instanceToProto(instance)); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setInstanceResponse( + CloudInstanceResponse.newBuilder() + .addAllListedInstances(instanceList) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that lists cloud instance configs. */ + private Status executeListCloudInstanceConfigs( + boolean useMultiplexedSession, ListCloudInstanceConfigsAction action, OutcomeSender sender) { + LOGGER.log(Level.INFO, String.format("Listing instance configs:\n%s", action)); + ArrayList options = new ArrayList<>(); + if (action.hasPageSize()) { + options.add(Options.pageSize(action.getPageSize())); + } + if (action.hasPageToken()) { + options.add(Options.pageToken(action.getPageToken())); + } + try { + Page response = + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .listInstanceConfigs(options.toArray(new Options.ListOption[0])); + List instanceConfigList = + new ArrayList<>(); + for (InstanceConfig instanceConfig : response.iterateAll()) { + instanceConfigList.add(instanceConfigToProto(instanceConfig)); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setInstanceConfigResponse( + CloudInstanceConfigResponse.newBuilder() + .addAllListedInstanceConfigs(instanceConfigList) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that gets a cloud instance config. */ + private Status executeGetCloudInstanceConfig( + boolean useMultiplexedSession, GetCloudInstanceConfigAction action, OutcomeSender sender) { + LOGGER.log(Level.INFO, String.format("Getting instance config:\n%s", action)); + try { + InstanceConfig instanceConfig = + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .getInstanceConfig(action.getInstanceConfigId()); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setInstanceConfigResponse( + CloudInstanceConfigResponse.newBuilder() + .setInstanceConfig(instanceConfigToProto(instanceConfig)) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that retrieves a cloud instance. */ + private Status executeGetCloudInstance( + boolean useMultiplexedSession, GetCloudInstanceAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Retrieving instance:\n%s", action)); + Instance instance = + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .getInstance(action.getInstanceId()); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setInstanceResponse( + CloudInstanceResponse.newBuilder() + .setInstance(instanceToProto(instance)) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that creates a user instance config. */ + private Status executeCreateUserInstanceConfig( + boolean useMultiplexedSession, CreateUserInstanceConfigAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Creating user instance config:\n%s", action)); + final InstanceConfig baseConfig = + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .getInstanceConfig(action.getBaseConfigId()); + InstanceConfigInfo instanceConfigInfo = + InstanceConfig.newBuilder( + InstanceConfigId.of(action.getProjectId(), action.getUserConfigId()), baseConfig) + .setDisplayName(action.getUserConfigId()) + .addReadOnlyReplicas(baseConfig.getOptionalReplicas()) + .build(); + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .createInstanceConfig(instanceConfigInfo) + .get(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that deletes a user instance config. */ + private Status executeDeleteUserInstanceConfig( + boolean useMultiplexedSession, DeleteUserInstanceConfigAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Deleting user instance config:\n%s", action)); + getClient(useMultiplexedSession) + .getInstanceAdminClient() + .deleteInstanceConfig(action.getUserConfigId()); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that creates a cloud custom encrypted database. */ + private Status executeCreateCloudCustomEncryptedDatabase( + boolean useMultiplexedSession, CreateCloudDatabaseAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Creating database: \n%s", action)); + Database dbInfo = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .newDatabaseBuilder( + DatabaseId.of( + action.getProjectId(), action.getInstanceId(), action.getDatabaseId())) + .setEncryptionConfig( + CustomerManagedEncryption.fromProtoOrNull(action.getEncryptionConfig())) + .build(); + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .createDatabase(dbInfo, action.getSdlStatementList()); + } catch (SpannerException se) { + return sender.finishWithError(toStatus(se)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that creates a cloud database. */ + private Status executeCreateCloudDatabase( + boolean useMultiplexedSession, CreateCloudDatabaseAction action, OutcomeSender sender) { + if (action.hasEncryptionConfig()) { + return executeCreateCloudCustomEncryptedDatabase(useMultiplexedSession, action, sender); + } + try { + LOGGER.log(Level.INFO, String.format("Creating database: \n%s", action)); + final String instanceId = action.getInstanceId(); + final String databaseId = action.getDatabaseId(); + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .createDatabase(instanceId, databaseId, action.getSdlStatementList()) + .get(); + } catch (ExecutionException | InterruptedException ex) { + SpannerException e = SpannerExceptionFactory.newSpannerException(ex); + if (e.getErrorCode() == ErrorCode.ALREADY_EXISTS) { + // Client does not retry database, but we assume that no other process has created + // the database with a different schema (another instance of a worker may have + // created it with the same schema). + return sender.finishWithOK(); + } + return sender.finishWithError(toStatus(e)); + } catch (SpannerException se) { + return sender.finishWithError(toStatus(se)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that updates a cloud database. */ + private Status executeUpdateCloudDatabaseDdl( + boolean useMultiplexedSession, UpdateCloudDatabaseDdlAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Updating database: \n%s", action)); + DatabaseAdminClient dbAdminClient = getClient(useMultiplexedSession).getDatabaseAdminClient(); + final String instanceId = action.getInstanceId(); + final String databaseId = action.getDatabaseId(); + UpdateDatabaseDdlMetadata metadata; + OperationFuture updateOp = + dbAdminClient.updateDatabaseDdl( + instanceId, databaseId, action.getSdlStatementList(), action.getOperationId()); + updateOp.get(); + metadata = updateOp.getMetadata().get(); + int tsCount = metadata.getCommitTimestampsCount(); + // Fetch the last timestamp + sender.setTimestamp(metadata.getCommitTimestamps(tsCount - 1)); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log( + Level.WARNING, + "Unexpected error executing DDL: " + + String.join("; ", action.getSdlStatementList()) + + " " + + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that updates a cloud database. */ + private Status executeDropCloudDatabase( + boolean useMultiplexedSession, DropCloudDatabaseAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Dropping database: \n%s", action)); + DatabaseAdminClient dbAdminClient = getClient(useMultiplexedSession).getDatabaseAdminClient(); + final String instanceId = action.getInstanceId(); + final String databaseId = action.getDatabaseId(); + dbAdminClient.dropDatabase(instanceId, databaseId); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + return sender.finishWithOK(); + } + + /** Execute action that creates a cloud database backup. */ + private Status executeCreateCloudBackup( + boolean useMultiplexedSession, CreateCloudBackupAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Creating backup: \n%s", action)); + Backup backupResult = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .createBackup( + action.getInstanceId(), + action.getBackupId(), + action.getDatabaseId(), + Timestamp.fromProto(action.getExpireTime())) + .get(); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .setBackup(backupResult.getProto()) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that copies a cloud database backup. */ + private Status executeCopyCloudBackup( + boolean useMultiplexedSession, CopyCloudBackupAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Copying backup: \n%s", action)); + Backup backupResult = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .copyBackup( + action.getInstanceId(), + action.getBackupId(), + action.getSourceBackup(), + Timestamp.fromProto(action.getExpireTime())) + .get(); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .setBackup(backupResult.getProto()) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that gets a cloud database backup. */ + private Status executeGetCloudBackup( + boolean useMultiplexedSession, GetCloudBackupAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Getting backup: \n%s", action)); + Backup backupResult = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .getBackup(action.getInstanceId(), action.getBackupId()); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .setBackup(backupResult.getProto()) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that updates a cloud database backup. */ + private Status executeUpdateCloudBackup( + boolean useMultiplexedSession, UpdateCloudBackupAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Updating backup: \n%s", action)); + Backup backupResult = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .updateBackup( + action.getInstanceId(), + action.getBackupId(), + Timestamp.fromProto(action.getExpireTime())); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .setBackup(backupResult.getProto()) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that deletes a cloud database backup. */ + private Status executeDeleteCloudBackup( + boolean useMultiplexedSession, DeleteCloudBackupAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, "Deleting backup: \n%s", action); + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .deleteBackup(action.getInstanceId(), action.getBackupId()); + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that lists cloud database backups. */ + private Status executeListCloudBackups( + boolean useMultiplexedSession, ListCloudBackupsAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Listing backup: \n%s", action)); + Page response = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .listBackups( + action.getInstanceId(), + Options.pageSize(action.getPageSize()), + Options.filter(action.getFilter()), + Options.pageToken(action.getPageToken())); + List backupList = new ArrayList<>(); + for (Backup backup : response.iterateAll()) { + backupList.add(backup.getProto()); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .addAllListedBackups(backupList) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that lists cloud database backup operations. */ + private Status executeListCloudBackupOperations( + boolean useMultiplexedSession, ListCloudBackupOperationsAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Listing backup operation: \n%s", action)); + + Page response = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .listBackupOperations( + action.getInstanceId(), + Options.pageSize(action.getPageSize()), + Options.filter(action.getFilter()), + Options.pageToken(action.getPageToken())); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setBackupResponse( + CloudBackupResponse.newBuilder() + .addAllListedBackupOperations(response.iterateAll()) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that list cloud databases. */ + private Status executeListCloudDatabases( + boolean useMultiplexedSession, ListCloudDatabasesAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Listing database: \n%s", action)); + Page response = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .listDatabases( + action.getInstanceId(), + Options.pageSize(action.getPageSize()), + Options.pageToken(action.getPageToken())); + List databaseList = new ArrayList<>(); + for (Database database : response.iterateAll()) { + databaseList.add(database.getProto()); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setDatabaseResponse( + CloudDatabaseResponse.newBuilder() + .addAllListedDatabases(databaseList) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that lists cloud database operations. */ + private Status executeListCloudDatabaseOperations( + boolean useMultiplexedSession, + ListCloudDatabaseOperationsAction action, + OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Listing database operation: \n%s", action)); + + Page response = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .listDatabaseOperations( + action.getInstanceId(), + Options.pageSize(action.getPageSize()), + Options.filter(action.getFilter()), + Options.pageToken(action.getPageToken())); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setDatabaseResponse( + CloudDatabaseResponse.newBuilder() + .addAllListedDatabaseOperations(response.iterateAll()) + .setNextPageToken("") + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that restores a cloud database. */ + private Status executeRestoreCloudDatabase( + boolean useMultiplexedSession, RestoreCloudDatabaseAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Restoring database: \n%s", action)); + Database db = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .restoreDatabase( + action.getBackupInstanceId(), + action.getBackupId(), + action.getDatabaseInstanceId(), + action.getDatabaseId()) + .get(); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setDatabaseResponse( + CloudDatabaseResponse.newBuilder().setDatabase(db.getProto()).build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that gets a cloud database. */ + private Status executeGetCloudDatabase( + boolean useMultiplexedSession, GetCloudDatabaseAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Getting database: \n%s", action)); + Database databaseResult = + getClient(useMultiplexedSession) + .getDatabaseAdminClient() + .getDatabase(action.getInstanceId(), action.getDatabaseId()); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setDatabaseResponse( + CloudDatabaseResponse.newBuilder() + .setDatabase(databaseResult.getProto()) + .build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that gets an operation. */ + private Status executeGetOperation( + boolean useMultiplexedSession, GetOperationAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Getting operation: \n%s", action)); + final String operationName = action.getOperation(); + Operation operationResult = + getClient(useMultiplexedSession).getDatabaseAdminClient().getOperation(operationName); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .setAdminResult( + AdminResult.newBuilder() + .setOperationResponse( + OperationResponse.newBuilder().setOperation(operationResult).build())) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that cancels an operation. */ + private Status executeCancelOperation( + boolean useMultiplexedSession, CancelOperationAction action, OutcomeSender sender) { + try { + LOGGER.log(Level.INFO, String.format("Cancelling operation: \n%s", action)); + final String operationName = action.getOperation(); + getClient(useMultiplexedSession).getDatabaseAdminClient().cancelOperation(operationName); + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that starts a batch transaction. */ + private Status executeStartBatchTxn( + StartBatchTransactionAction action, + BatchClient batchClient, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + LOGGER.log(Level.INFO, "Starting batch transaction"); + return executionContext.startBatchTxn(action, batchClient, sender); + } + + /** Execute action that finishes a batch transaction. */ + private Status executeCloseBatchTxn( + CloseBatchTransactionAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + LOGGER.log(Level.INFO, "Closing batch transaction"); + if (action.getCleanup()) { + executionContext.closeBatchTxn(); + } + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Execute action that generates database partitions for the given read. */ + private Status executeGenerateDbPartitionsRead( + GenerateDbPartitionsForReadAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn(); + Metadata metadata = new Metadata(action.getTableList()); + executionContext.setMetadata(metadata); + ReadAction request = action.getRead(); + + List typeList = new ArrayList<>(); + for (int i = 0; i < request.getColumnCount(); ++i) { + typeList.add(executionContext.getColumnType(request.getTable(), request.getColumn(i))); + } + KeySet keySet = keySetProtoToCloudKeySet(request.getKeys(), typeList); + PartitionOptions.Builder partitionOptionsBuilder = PartitionOptions.newBuilder(); + if (action.hasDesiredBytesPerPartition() && action.getDesiredBytesPerPartition() > 0) { + partitionOptionsBuilder.setPartitionSizeBytes(action.getDesiredBytesPerPartition()); + } + if (action.hasMaxPartitionCount()) { + partitionOptionsBuilder.setMaxPartitions(action.getMaxPartitionCount()); + } + List parts; + if (request.hasIndex()) { + parts = + batchTxn.partitionReadUsingIndex( + partitionOptionsBuilder.build(), + request.getTable(), + request.getIndex(), + keySet, + new ArrayList<>(request.getColumnList())); + } else { + parts = + batchTxn.partitionRead( + partitionOptionsBuilder.build(), + request.getTable(), + keySet, + new ArrayList<>(request.getColumnList())); + } + List batchPartitions = new ArrayList<>(); + for (Partition part : parts) { + batchPartitions.add( + BatchPartition.newBuilder() + .setPartition(marshall(part)) + .setPartitionToken(part.getPartitionToken()) + .setTable(request.getTable()) + .setIndex(request.getIndex()) + .build()); + } + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .addAllDbPartition(batchPartitions) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + LOGGER.log(Level.WARNING, String.format("GenerateDbPartitionsRead failed for %s", action)); + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute action that generates database partitions for the given query. */ + private Status executeGenerateDbPartitionsQuery( + GenerateDbPartitionsForQueryAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn(); + Statement.Builder stmt = Statement.newBuilder(action.getQuery().getSql()); + for (int i = 0; i < action.getQuery().getParamsCount(); ++i) { + stmt.bind(action.getQuery().getParams(i).getName()) + .to( + valueProtoToCloudValue( + action.getQuery().getParams(i).getType(), + action.getQuery().getParams(i).getValue())); + } + PartitionOptions partitionOptions = + PartitionOptions.newBuilder() + .setPartitionSizeBytes(action.getDesiredBytesPerPartition()) + .build(); + List parts = batchTxn.partitionQuery(partitionOptions, stmt.build()); + List batchPartitions = new ArrayList<>(); + for (Partition part : parts) { + batchPartitions.add( + BatchPartition.newBuilder() + .setPartition(marshall(part)) + .setPartitionToken(part.getPartitionToken()) + .build()); + } + + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .addAllDbPartition(batchPartitions) + .build(); + return sender.sendOutcome(outcome); + } catch (SpannerException e) { + LOGGER.log(Level.WARNING, String.format("GenerateDbPartitionsQuery failed for %s", action)); + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute a read or query for the given partitions. */ + private Status executeExecutePartition( + boolean useMultiplexedSession, + ExecutePartitionAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn(); + ByteString partitionBinary = action.getPartition().getPartition(); + if (partitionBinary.size() == 0) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid batchPartition " + action); + } + if (action.getPartition().hasTable()) { + sender.initForBatchRead(action.getPartition().getTable(), action.getPartition().getIndex()); + } else { + sender.initForQuery(); + } + Partition partition = unmarshall(partitionBinary); + executionContext.startRead(); + ResultSet result = batchTxn.execute(partition); + return processResults(useMultiplexedSession, result, 0, sender, executionContext); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Execute a partitioned update which runs different partitions in parallel. */ + private Status executePartitionedUpdate( + PartitionedUpdateAction action, DatabaseClient dbClient, OutcomeSender sender) { + try { + ExecutePartitionedUpdateOptions options = action.getOptions(); + Long count = + dbClient.executePartitionedUpdate( + Statement.of(action.getUpdate().getSql()), + Options.tag(options.getTag()), + Options.priority(RpcPriority.fromProto(options.getRpcPriority()))); + SpannerActionOutcome outcome = + SpannerActionOutcome.newBuilder() + .setStatus(toProto(Status.OK)) + .addDmlRowsModified(count) + .build(); + sender.sendOutcome(outcome); + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** Build a child partition record proto out of childPartitionRecord returned by client. */ + private ChildPartitionsRecord buildChildPartitionRecord(Struct childPartitionRecord) + throws Exception { + ChildPartitionsRecord.Builder childPartitionRecordBuilder = ChildPartitionsRecord.newBuilder(); + childPartitionRecordBuilder.setStartTime( + Timestamps.parse(childPartitionRecord.getTimestamp(0).toString())); + childPartitionRecordBuilder.setRecordSequence(childPartitionRecord.getString(1)); + for (Struct childPartition : childPartitionRecord.getStructList(2)) { + ChildPartitionsRecord.ChildPartition.Builder childPartitionBuilder = + ChildPartitionsRecord.ChildPartition.newBuilder(); + childPartitionBuilder.setToken(childPartition.getString(0)); + childPartitionBuilder.addAllParentPartitionTokens(childPartition.getStringList(1)); + childPartitionRecordBuilder.addChildPartitions(childPartitionBuilder.build()); + } + return childPartitionRecordBuilder.build(); + } + + /** Build a data change record proto out of dataChangeRecord returned by client. */ + private DataChangeRecord buildDataChangeRecord(Struct dataChangeRecord) throws Exception { + DataChangeRecord.Builder dataChangeRecordBuilder = DataChangeRecord.newBuilder(); + dataChangeRecordBuilder.setCommitTime( + Timestamps.parse(dataChangeRecord.getTimestamp(0).toString())); + dataChangeRecordBuilder.setRecordSequence(dataChangeRecord.getString(1)); + dataChangeRecordBuilder.setTransactionId(dataChangeRecord.getString(2)); + dataChangeRecordBuilder.setIsLastRecord(dataChangeRecord.getBoolean(3)); + dataChangeRecordBuilder.setTable(dataChangeRecord.getString(4)); + for (Struct columnType : dataChangeRecord.getStructList(5)) { + DataChangeRecord.ColumnType.Builder columnTypeBuilder = + DataChangeRecord.ColumnType.newBuilder(); + columnTypeBuilder.setName(columnType.getString(0)); + columnTypeBuilder.setType(getJsonStringForStructColumn(columnType, 1)); + columnTypeBuilder.setIsPrimaryKey(columnType.getBoolean(2)); + columnTypeBuilder.setOrdinalPosition(columnType.getLong(3)); + dataChangeRecordBuilder.addColumnTypes(columnTypeBuilder.build()); + } + for (Struct mod : dataChangeRecord.getStructList(6)) { + DataChangeRecord.Mod.Builder modBuilder = DataChangeRecord.Mod.newBuilder(); + modBuilder.setKeys(getJsonStringForStructColumn(mod, 0)); + modBuilder.setNewValues(getJsonStringForStructColumn(mod, 1)); + modBuilder.setOldValues(getJsonStringForStructColumn(mod, 2)); + dataChangeRecordBuilder.addMods(modBuilder.build()); + } + dataChangeRecordBuilder.setModType(dataChangeRecord.getString(7)); + dataChangeRecordBuilder.setValueCaptureType(dataChangeRecord.getString(8)); + + // Get transaction tag. + dataChangeRecordBuilder.setTransactionTag( + dataChangeRecord.getString(DataChangeRecord.TRANSACTION_TAG_FIELD_NUMBER - 1)); + + // Get is system transaction. + dataChangeRecordBuilder.setIsSystemTransaction( + dataChangeRecord.getBoolean(DataChangeRecord.IS_SYSTEM_TRANSACTION_FIELD_NUMBER - 1)); + return dataChangeRecordBuilder.build(); + } + + /** Returns the json or string value of a struct column with index=columnIndex. */ + private String getJsonStringForStructColumn(Struct struct, int columnIndex) { + Type columnType = struct.getColumnType(columnIndex); + switch (columnType.getCode()) { + case JSON: + return struct.getJson(columnIndex); + case STRING: + return struct.getString(columnIndex); + default: + throw new IllegalArgumentException( + String.format( + "Cannot extract value from column with index = %d and column type = %s for struct:" + + " %s", + columnIndex, columnType, struct)); + } + } + + /** Build a heartbeat record proto out of heartbeatRecord returned by client. */ + private HeartbeatRecord buildHeartbeatRecord(Struct heartbeatRecord) throws Exception { + HeartbeatRecord.Builder heartbeatRecordBuilder = HeartbeatRecord.newBuilder(); + heartbeatRecordBuilder.setHeartbeatTime( + Timestamps.parse(heartbeatRecord.getTimestamp(0).toString())); + return heartbeatRecordBuilder.build(); + } + + /** Execute action that executes a change stream query. */ + private Status executeExecuteChangeStreamQuery( + String dbPath, + boolean useMultiplexedSession, + ExecuteChangeStreamQuery action, + OutcomeSender sender) { + try { + LOGGER.log( + Level.INFO, String.format("Start executing change change stream query: \n%s", action)); + + // Retrieve TVF parameters from the action. + String changeStreamName = action.getName(); + // For initial partition query (no partition token) we simulate precision of the timestamp + // in nanoseconds as that's closer inlined with the production client code. + + String startTime = + timestampToString( + !action.hasPartitionToken(), Timestamps.toMicros(action.getStartTime())); + String endTime = "null"; + if (action.hasEndTime()) { + endTime = + timestampToString( + !action.hasPartitionToken(), Timestamps.toMicros(action.getEndTime())); + } + String heartbeat = "null"; + if (action.hasHeartbeatMilliseconds()) { + heartbeat = Integer.toString(action.getHeartbeatMilliseconds()); + } + String partitionToken = "null"; + if (action.hasPartitionToken()) { + partitionToken = String.format("\"%s\"", action.getPartitionToken()); + } + + String tvfQuery = + String.format( + "SELECT * FROM READ_%s(%s,%s,%s,%s);", + changeStreamName, startTime, endTime, partitionToken, heartbeat); + + LOGGER.log(Level.INFO, String.format("Start executing change stream TVF: \n%s", tvfQuery)); + sender.initForChangeStreamQuery( + action.getHeartbeatMilliseconds(), action.getName(), action.getPartitionToken()); + Spanner spannerClient; + if (action.hasDeadlineSeconds()) { + spannerClient = getClientWithTimeout(action.getDeadlineSeconds(), useMultiplexedSession); + } else { + spannerClient = getClient(useMultiplexedSession); + } + DatabaseClient dbClient = spannerClient.getDatabaseClient(DatabaseId.of(dbPath)); + ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of(tvfQuery)); + + ChangeStreamRecord.Builder changeStreamRecordBuilder = ChangeStreamRecord.newBuilder(); + while (resultSet.next()) { + Struct record = resultSet.getStructList(0).get(0); + for (Struct dataChangeRecord : record.getStructList("data_change_record")) { + // If the data change record is null, that means the ChangeRecord is either a heartbeat + // or a child partitions record. + if (dataChangeRecord.isNull(0)) { + continue; + } + DataChangeRecord builtDataChangeRecord = buildDataChangeRecord(dataChangeRecord); + changeStreamRecordBuilder.setDataChange(builtDataChangeRecord); + } + for (Struct heartbeatRecord : record.getStructList("heartbeat_record")) { + // If the heartbeat record is null, that means the ChangeRecord is either a data change + // record or a child partitions record. + if (heartbeatRecord.isNull(0)) { + continue; + } + HeartbeatRecord builtHeartbeatRecord = buildHeartbeatRecord(heartbeatRecord); + changeStreamRecordBuilder.setHeartbeat(builtHeartbeatRecord); + } + for (Struct childPartitionRecord : record.getStructList("child_partitions_record")) { + // If the child partitions record is null, that means the ChangeRecord is either a + // data change record or a heartbeat record. + if (childPartitionRecord.isNull(0)) { + continue; + } + ChildPartitionsRecord builtChildPartitionsRecord = + buildChildPartitionRecord(childPartitionRecord); + changeStreamRecordBuilder.setChildPartition(builtChildPartitionsRecord); + } + // For partitioned queries, validate that the time between received change records are + // less than 10x the heartbeat interval. + // Right now, we are not failing the handler since there are other issues besides change + // stream related issues that can cause the heartbeat check to fail (i.e. RPC latency). + if (sender.getIsPartitionedChangeStreamQuery()) { + long lastReceivedTimestamp = sender.getChangeStreamRecordReceivedTimestamp(); + long currentChangeRecordReceivedTimestamp = System.currentTimeMillis(); + long discrepancyMillis = currentChangeRecordReceivedTimestamp - lastReceivedTimestamp; + // Only do the heartbeat check after we have already received one record for the query + // (i.e. lastReceivedTimestamp > 0). + // We should only check the heartbeat interval if heartbeat is greater than 5 seconds, + // to prevent flaky failures. + if (lastReceivedTimestamp > 0 + && discrepancyMillis > sender.getChangeStreamHeartbeatMilliSeconds() * 10 + && sender.getChangeStreamHeartbeatMilliSeconds() > 5000) { + // Log.info( + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, + "Does not pass the heartbeat interval check. The last record was received seconds" + + discrepancyMillis / 1000 + + " ago, which is more than ten times the heartbeat interval, which is " + + sender.getChangeStreamHeartbeatMilliSeconds() / 1000 + + " seconds. The change record received is: " + + changeStreamRecordBuilder.build()); + } + sender.updateChangeStreamRecordReceivedTimestamp(currentChangeRecordReceivedTimestamp); + } + Status appendStatus = sender.appendChangeStreamRecord(changeStreamRecordBuilder.build()); + if (!appendStatus.isOk()) { + return appendStatus; + } + } + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + if (e instanceof DeadlineExceededException) { + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Deadline exceeded error: " + e))); + } else if (e instanceof UnavailableException) { + return toStatus( + SpannerExceptionFactory.newSpannerException(ErrorCode.UNAVAILABLE, e.getMessage())); + } + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e))); + } + } + + /** + * Execute action that start a read-write or read-only transaction. For read-write transaction, + * see {@link ReadWriteTransaction}. + */ + private Status executeStartTxn( + StartTransactionAction action, + DatabaseClient dbClient, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + executionContext.updateTransactionSeed(action.getTransactionSeed()); + Metadata metadata = new Metadata(action.getTableList()); + if (action.hasConcurrency()) { + LOGGER.log( + Level.INFO, + String.format( + "Starting read-only transaction %s\n", executionContext.getTransactionSeed())); + executionContext.startReadOnlyTxn( + dbClient, timestampBoundsFromConcurrency(action.getConcurrency()), metadata); + } else { + LOGGER.log( + Level.INFO, + "Starting read-write transaction %s\n", + executionContext.getTransactionSeed()); + executionContext.startReadWriteTxn(dbClient, metadata, action.getExecutionOptions()); + } + executionContext.setDatabaseClient(dbClient); + executionContext.initReadState(); + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } catch (Exception e) { + LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage()); + return sender.finishWithError( + toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); + } + } + + /** + * Execute action that finish a transaction. For read-write transaction, either commit or abandon + * the transaction is allowed. Batch transaction is not supported. + */ + private Status executeFinishTxn( + FinishTransactionAction action, OutcomeSender sender, ExecutionFlowContext executionContext) { + LOGGER.log( + Level.INFO, + String.format( + "Finishing transaction %s\n%s", executionContext.getTransactionSeed(), action)); + return executionContext.finish(action.getMode(), sender); + } + + /** Execute mutation action request and buffer the mutations. */ + private Status executeMutation( + MutationAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext, + boolean isWrite) { + String prevTable = ""; + try { + for (int i = 0; i < action.getModCount(); ++i) { + Mod mod = action.getMod(i); + String table = mod.getTable(); + if (table.isEmpty()) { + table = prevTable; + } + if (table.isEmpty()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Table name missing: " + action); + } + prevTable = table; + LOGGER.log(Level.FINE, String.format("Executing mutation mod: \n%s", mod)); + + final List mutations = Lists.newArrayList(); + + if (mod.hasInsert()) { + InsertArgs insertArgs = mod.getInsert(); + for (int j = 0; j < insertArgs.getValuesCount(); ++j) { + mutations.add( + buildWrite( + insertArgs.getColumnList(), + cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()), + Mutation.newInsertBuilder(table))); + } + } else if (mod.hasUpdate()) { + UpdateArgs updateArgs = mod.getUpdate(); + for (int j = 0; j < updateArgs.getValuesCount(); ++j) { + mutations.add( + buildWrite( + updateArgs.getColumnList(), + cloudValuesFromValueList(updateArgs.getValues(j), updateArgs.getTypeList()), + Mutation.newUpdateBuilder(table))); + } + } else if (mod.hasInsertOrUpdate()) { + InsertArgs insertArgs = mod.getInsertOrUpdate(); + for (int j = 0; j < insertArgs.getValuesCount(); ++j) { + mutations.add( + buildWrite( + insertArgs.getColumnList(), + cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()), + Mutation.newInsertOrUpdateBuilder(table))); + } + } else if (mod.hasReplace()) { + InsertArgs insertArgs = mod.getReplace(); + for (int j = 0; j < insertArgs.getValuesCount(); ++j) { + mutations.add( + buildWrite( + insertArgs.getColumnList(), + cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()), + Mutation.newReplaceBuilder(table))); + } + } else if (mod.hasDeleteKeys()) { + KeySet keySet = + keySetProtoToCloudKeySet( + mod.getDeleteKeys(), executionContext.getKeyColumnTypes(table)); + mutations.add(Mutation.delete(table, keySet)); + } else { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported mod: " + mod); + } + if (!isWrite) { + executionContext.bufferMutations(mutations); + } else { + executionContext.getDbClient().write(mutations); + } + } + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Build a Mutation by using the given WriteBuilder to set the columns for the action. */ + private Mutation buildWrite( + List columnList, List valueList, WriteBuilder write) { + Preconditions.checkState(columnList.size() == valueList.size()); + for (int i = 0; i < columnList.size(); i++) { + write.set(columnList.get(i)).to(valueList.get(i)); + } + return write.build(); + } + + /** Execute a read action request, store the results in the OutcomeSender. */ + private Status executeRead( + boolean useMultiplexedSession, + ReadAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + LOGGER.log( + Level.INFO, + String.format("Executing read %s\n%s\n", executionContext.getTransactionSeed(), action)); + List typeList = new ArrayList<>(); + if (action.hasIndex()) { + // For index read, we assume the key columns are listed at the front of the read + // column + // list. + for (int i = 0; i < action.getColumnCount(); ++i) { + String col = action.getColumn(i); + typeList.add(executionContext.getColumnType(action.getTable(), col)); + } + } else { + typeList = executionContext.getKeyColumnTypes(action.getTable()); + } + KeySet keySet = keySetProtoToCloudKeySet(action.getKeys(), typeList); + ReadContext txn = executionContext.getTransactionForRead(); + sender.initForRead(action.getTable(), action.getIndex()); + + executionContext.startRead(); + LOGGER.log( + Level.INFO, + String.format( + "Finish read building, ready to execute %s\n", + executionContext.getTransactionSeed())); + ResultSet result; + if (action.hasIndex()) { + result = + txn.readUsingIndex( + action.getTable(), action.getIndex(), keySet, action.getColumnList()); + } else { + result = txn.read(action.getTable(), keySet, action.getColumnList()); + } + LOGGER.log( + Level.INFO, + String.format("Parsing read result %s\n", executionContext.getTransactionSeed())); + return processResults( + useMultiplexedSession, result, action.getLimit(), sender, executionContext); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Execute a query action request, store the results in the OutcomeSender. */ + private Status executeQuery( + boolean useMultiplexedSession, + QueryAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + LOGGER.log( + Level.INFO, + String.format("Executing query %s\n%s\n", executionContext.getTransactionSeed(), action)); + ReadContext txn = executionContext.getTransactionForRead(); + sender.initForQuery(); + + Statement.Builder stmt = Statement.newBuilder(action.getSql()); + for (int i = 0; i < action.getParamsCount(); ++i) { + stmt.bind(action.getParams(i).getName()) + .to( + valueProtoToCloudValue( + action.getParams(i).getType(), action.getParams(i).getValue())); + } + + executionContext.startRead(); + LOGGER.log( + Level.INFO, + String.format( + "Finish query building, ready to execute %s\n", + executionContext.getTransactionSeed())); + ResultSet result = txn.executeQuery(stmt.build(), Options.tag("query-tag")); + LOGGER.log( + Level.INFO, + String.format("Parsing query result %s\n", executionContext.getTransactionSeed())); + return processResults(useMultiplexedSession, result, 0, sender, executionContext); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Execute a dml update action request, store the results in the OutcomeSender. */ + private Status executeCloudDmlUpdate( + boolean useMultiplexedSession, + DmlAction action, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + try { + LOGGER.log( + Level.INFO, + String.format( + "Executing Dml update %s\n%s\n", executionContext.getTransactionSeed(), action)); + QueryAction update = action.getUpdate(); + Statement.Builder stmt = Statement.newBuilder(update.getSql()); + for (int i = 0; i < update.getParamsCount(); ++i) { + stmt.bind(update.getParams(i).getName()) + .to( + valueProtoToCloudValue( + update.getParams(i).getType(), update.getParams(i).getValue())); + } + sender.initForQuery(); + ResultSet result = + executionContext + .getTransactionForWrite() + .executeQuery(stmt.build(), Options.tag("dml-transaction-tag")); + LOGGER.log( + Level.INFO, + String.format("Parsing Dml result %s\n", executionContext.getTransactionSeed())); + return processResults(useMultiplexedSession, result, 0, sender, executionContext, true); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Execute a BatchDml update action request, store the results in the OutcomeSender. */ + private Status executeCloudBatchDmlUpdates( + BatchDmlAction action, OutcomeSender sender, ExecutionFlowContext executionContext) { + try { + List queries = new ArrayList<>(); + for (int i = 0; i < action.getUpdatesCount(); ++i) { + LOGGER.log( + Level.INFO, + String.format( + "Executing BatchDml update [%d] %s\n%s\n", + i + 1, executionContext.getTransactionSeed(), action)); + QueryAction update = action.getUpdates(i); + Statement.Builder stmt = Statement.newBuilder(update.getSql()); + for (int j = 0; j < update.getParamsCount(); ++j) { + stmt.bind(update.getParams(j).getName()) + .to( + valueProtoToCloudValue( + update.getParams(j).getType(), update.getParams(j).getValue())); + } + queries.add(stmt.build()); + } + long[] rowCounts = executionContext.executeBatchDml(queries); + sender.initForQuery(); + for (long rowCount : rowCounts) { + sender.appendRowsModifiedInDml(rowCount); + } + // The batchDml request failed. By design, `rowCounts` contains rows + // modified for DML queries that succeeded only. Add 0 as the row count + // for the last executed DML in the batch (that failed). + if (rowCounts.length != queries.size()) { + sender.appendRowsModifiedInDml(0L); + } + return sender.finishWithOK(); + } catch (SpannerException e) { + return sender.finishWithError(toStatus(e)); + } + } + + /** Process a ResultSet from a read/query and store the results in the OutcomeSender. */ + private Status processResults( + boolean useMultiplexedSession, + ResultSet results, + int limit, + OutcomeSender sender, + ExecutionFlowContext executionContext) { + return processResults(useMultiplexedSession, results, limit, sender, executionContext, false); + } + + /** Process a ResultSet from a read/query/dml and store the results in the OutcomeSender. */ + private Status processResults( + boolean useMultiplexedSession, + ResultSet results, + int limit, + OutcomeSender sender, + ExecutionFlowContext executionContext, + boolean isDml) { + try { + int rowCount = 0; + LOGGER.log( + Level.INFO, + String.format("Iterating result set: %s\n", executionContext.getTransactionSeed())); + while (results.next()) { + com.google.spanner.executor.v1.ValueList row = + buildRow(results.getCurrentRowAsStruct(), sender); + Status appendStatus = sender.appendRow(row); + if (!appendStatus.isOk()) { + return appendStatus; + } + ++rowCount; + if (limit > 0 && rowCount >= limit) { + LOGGER.log(Level.INFO, "Stopping at row limit: " + limit); + break; + } + } + if (isDml) { + sender.appendRowsModifiedInDml( + Objects.requireNonNull(results.getStats()).getRowCountExact()); + } + + LOGGER.log( + Level.INFO, + String.format( + "Successfully processed result: %s\n", executionContext.getTransactionSeed())); + executionContext.finishRead(Status.OK); + return sender.finishWithOK(); + } catch (SpannerException e) { + LOGGER.log(Level.WARNING, "Encountered exception: ", e); + Status status = toStatus(e); + LOGGER.log( + Level.WARNING, + String.format( + "Encountered exception: %s %s\n", + status.getDescription(), executionContext.getTransactionSeed())); + executionContext.finishRead(status); + if (status.getCode() == Status.ABORTED.getCode()) { + return sender.finishWithTransactionRestarted(); + } else { + if (status.getCode() == Status.UNAUTHENTICATED.getCode()) { + try { + LOGGER.log( + Level.INFO, + String.format( + "Found Unauthenticated error, client credentials:\n%s", + getClient(useMultiplexedSession).getOptions().getCredentials().toString())); + } catch (Exception exception) { + LOGGER.log(Level.WARNING, String.format("Failed to getClient %s", exception)); + } + } + return sender.finishWithError(status); + } + } finally { + LOGGER.log( + Level.INFO, + String.format("Closing result set %s\n", executionContext.getTransactionSeed())); + results.close(); + } + } + + /** Convert a result row to a row proto(value list) for sending back to the client. */ + private com.google.spanner.executor.v1.ValueList buildRow( + StructReader result, OutcomeSender sender) throws SpannerException { + sender.setRowType(buildStructType(result)); + return buildStruct(result); + } + + /** Construct a StructType for a given struct. This is used to set the row type. */ + private com.google.spanner.v1.StructType buildStructType(StructReader struct) { + com.google.spanner.v1.StructType.Builder rowTypeBuilder = + com.google.spanner.v1.StructType.newBuilder(); + for (int i = 0; i < struct.getColumnCount(); ++i) { + com.google.cloud.spanner.Type columnType = struct.getColumnType(i); + rowTypeBuilder.addFields( + com.google.spanner.v1.StructType.Field.newBuilder() + .setName(struct.getType().getStructFields().get(i).getName()) + .setType(cloudTypeToTypeProto(columnType)) + .build()); + } + return rowTypeBuilder.build(); + } + + /** Convert a struct to a proto(value list) for constructing result rows and struct values. */ + private com.google.spanner.executor.v1.ValueList buildStruct(StructReader struct) { + com.google.spanner.executor.v1.ValueList.Builder structBuilder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + for (int i = 0; i < struct.getColumnCount(); ++i) { + com.google.cloud.spanner.Type columnType = struct.getColumnType(i); + com.google.spanner.executor.v1.Value.Builder value = + com.google.spanner.executor.v1.Value.newBuilder(); + if (struct.isNull(i)) { + value.setIsNull(true); + } else { + switch (columnType.getCode()) { + case BOOL: + value.setBoolValue(struct.getBoolean(i)); + break; + case FLOAT32: + value.setDoubleValue((double) struct.getFloat(i)); + break; + case FLOAT64: + value.setDoubleValue(struct.getDouble(i)); + break; + case INT64: + value.setIntValue(struct.getLong(i)); + break; + case STRING: + value.setStringValue(struct.getString(i)); + break; + case BYTES: + value.setBytesValue(toByteString(struct.getBytes(i))); + break; + case TIMESTAMP: + value.setTimestampValue(timestampToProto(struct.getTimestamp(i))); + break; + case DATE: + value.setDateDaysValue(daysFromDate(struct.getDate(i))); + break; + case INTERVAL: + value.setStringValue(struct.getInterval(i).toISO8601()); + break; + case UUID: + value.setStringValue(struct.getUuid(i).toString()); + break; + case NUMERIC: + String ascii = struct.getBigDecimal(i).toPlainString(); + value.setStringValue(ascii); + break; + case JSON: + value.setStringValue(struct.getJson(i)); + break; + case ARRAY: + switch (struct.getColumnType(i).getArrayElementType().getCode()) { + case BOOL: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getBooleanList(i); + for (Boolean booleanValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (booleanValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setBoolValue(booleanValue).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BOOL).build()); + } + break; + case FLOAT32: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getFloatList(i); + for (Float floatValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (floatValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setDoubleValue((double) floatValue).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT32).build()); + } + break; + case FLOAT64: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getDoubleList(i); + for (Double doubleValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (doubleValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setDoubleValue(doubleValue).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT64).build()); + } + break; + case INT64: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getLongList(i); + for (Long longValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (longValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setIntValue(longValue).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INT64).build()); + } + break; + case STRING: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getStringList(i); + for (String stringValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (stringValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStringValue(stringValue)).build(); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING).build()); + } + break; + case BYTES: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getBytesList(i); + for (ByteArray byteArrayValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (byteArrayValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue( + valueProto + .setBytesValue(ByteString.copyFrom(byteArrayValue.toByteArray())) + .build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BYTES).build()); + } + break; + case DATE: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getDateList(i); + for (Date dateValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (dateValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue( + valueProto.setDateDaysValue(daysFromDate(dateValue)).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.DATE).build()); + } + break; + case TIMESTAMP: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getTimestampList(i); + for (Timestamp timestampValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (timestampValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue( + valueProto.setTimestampValue(timestampToProto(timestampValue)).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()); + } + break; + case INTERVAL: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getIntervalList(i); + for (Interval interval : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (interval == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStringValue(interval.toISO8601()).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INTERVAL).build()); + } + break; + case UUID: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getUuidList(i); + for (UUID uuidValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (uuidValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStringValue(uuidValue.toString()).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.UUID).build()); + } + break; + case NUMERIC: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getBigDecimalList(i); + for (BigDecimal bigDec : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (bigDec == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStringValue(bigDec.toPlainString()).build()); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.NUMERIC).build()); + } + break; + case JSON: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getJsonList(i); + for (String stringValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (stringValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStringValue(stringValue)).build(); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.JSON).build()); + } + break; + case STRUCT: + { + com.google.spanner.executor.v1.ValueList.Builder builder = + com.google.spanner.executor.v1.ValueList.newBuilder(); + List values = struct.getStructList(i); + for (StructReader structValue : values) { + com.google.spanner.executor.v1.Value.Builder valueProto = + com.google.spanner.executor.v1.Value.newBuilder(); + if (structValue == null) { + builder.addValue(valueProto.setIsNull(true).build()); + } else { + builder.addValue(valueProto.setStructValue(buildStruct(structValue))).build(); + } + } + value.setArrayValue(builder.build()); + value.setArrayType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRUCT).build()); + } + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unsupported row array type: " + + struct.getColumnType(i) + + " for result type " + + struct.getType().toString()); + } + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unsupported row type: " + + struct.getColumnType(i) + + " for result type " + + struct.getType().toString()); + } + } + structBuilder.addValue(value.build()); + } + ; + return structBuilder.build(); + } + + /** Convert a ListValue proto to a list of cloud Value. */ + private static List cloudValuesFromValueList( + com.google.spanner.executor.v1.ValueList valueList, List typeList) + throws SpannerException { + LOGGER.log(Level.INFO, String.format("Converting valueList: %s\n", valueList)); + Preconditions.checkState(valueList.getValueCount() == typeList.size()); + List cloudValues = new ArrayList<>(); + for (int i = 0; i < valueList.getValueCount(); ++i) { + com.google.cloud.spanner.Value value = + valueProtoToCloudValue(typeList.get(i), valueList.getValue(i)); + cloudValues.add(value); + } + return cloudValues; + } + + /** Convert a proto KeySet to a cloud KeySet. */ + private static com.google.cloud.spanner.KeySet keySetProtoToCloudKeySet( + com.google.spanner.executor.v1.KeySet keySetProto, List typeList) + throws SpannerException { + if (keySetProto.getAll()) { + return com.google.cloud.spanner.KeySet.all(); + } + com.google.cloud.spanner.KeySet.Builder cloudKeySetBuilder = + com.google.cloud.spanner.KeySet.newBuilder(); + for (int i = 0; i < keySetProto.getPointCount(); ++i) { + cloudKeySetBuilder.addKey(keyProtoToCloudKey(keySetProto.getPoint(i), typeList)); + } + for (int i = 0; i < keySetProto.getRangeCount(); ++i) { + cloudKeySetBuilder.addRange(keyRangeProtoToCloudKeyRange(keySetProto.getRange(i), typeList)); + } + return cloudKeySetBuilder.build(); + } + + /** Convert a keyRange proto to a cloud KeyRange. */ + private static com.google.cloud.spanner.KeyRange keyRangeProtoToCloudKeyRange( + com.google.spanner.executor.v1.KeyRange keyRangeProto, + List typeList) + throws SpannerException { + Key start = keyProtoToCloudKey(keyRangeProto.getStart(), typeList); + Key end = keyProtoToCloudKey(keyRangeProto.getLimit(), typeList); + if (!keyRangeProto.hasType()) { + // default + return KeyRange.closedOpen(start, end); + } + switch (keyRangeProto.getType()) { + case CLOSED_CLOSED: + return KeyRange.closedClosed(start, end); + case CLOSED_OPEN: + return KeyRange.closedOpen(start, end); + case OPEN_CLOSED: + return KeyRange.openClosed(start, end); + case OPEN_OPEN: + return KeyRange.openOpen(start, end); + // Unreachable. + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unrecognized key range type"); + } + } + + /** Convert a key proto(value list) to a cloud Key. */ + private static com.google.cloud.spanner.Key keyProtoToCloudKey( + com.google.spanner.executor.v1.ValueList keyProto, List typeList) + throws SpannerException { + com.google.cloud.spanner.Key.Builder cloudKey = com.google.cloud.spanner.Key.newBuilder(); + if (typeList.size() < keyProto.getValueCount()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "There's more key parts in " + keyProto + " than column types in " + typeList); + } + + for (int i = 0; i < keyProto.getValueCount(); ++i) { + com.google.spanner.v1.Type type = typeList.get(i); + com.google.spanner.executor.v1.Value part = keyProto.getValue(i); + if (part.hasIsNull()) { + switch (type.getCode()) { + case BOOL: + case INT64: + case STRING: + case BYTES: + case FLOAT64: + case DATE: + case UUID: + case TIMESTAMP: + case NUMERIC: + case JSON: + cloudKey.appendObject(null); + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unsupported null key part type: " + type.getCode().name()); + } + } else if (part.hasIntValue()) { + cloudKey.append(part.getIntValue()); + } else if (part.hasBoolValue()) { + cloudKey.append(part.getBoolValue()); + } else if (part.hasDoubleValue()) { + cloudKey.append(part.getDoubleValue()); + } else if (part.hasBytesValue()) { + switch (type.getCode()) { + case STRING: + cloudKey.append(part.getBytesValue().toStringUtf8()); + break; + case BYTES: + cloudKey.append(toByteArray(part.getBytesValue())); + break; + // Unreachable + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported key part type: " + type.getCode().name()); + } + } else if (part.hasStringValue()) { + if (type.getCode() == TypeCode.NUMERIC) { + String ascii = part.getStringValue(); + cloudKey.append(new BigDecimal(ascii)); + } else if (type.getCode() == TypeCode.UUID) { + cloudKey.append(UUID.fromString(part.getStringValue())); + } else { + cloudKey.append(part.getStringValue()); + } + } else if (part.hasTimestampValue()) { + cloudKey.append(Timestamp.parseTimestamp(Timestamps.toString(part.getTimestampValue()))); + } else if (part.hasDateDaysValue()) { + cloudKey.append(dateFromDays(part.getDateDaysValue())); + } else { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported key part: " + part); + } + } + return cloudKey.build(); + } + + /** Convert a Value proto to a cloud Value. */ + @SuppressWarnings("NullTernary") + private static com.google.cloud.spanner.Value valueProtoToCloudValue( + com.google.spanner.v1.Type type, com.google.spanner.executor.v1.Value value) { + if (value.hasIsCommitTimestamp() && value.getIsCommitTimestamp()) { + return Value.timestamp(com.google.cloud.spanner.Value.COMMIT_TIMESTAMP); + } + switch (type.getCode()) { + case INT64: + return com.google.cloud.spanner.Value.int64(value.hasIsNull() ? null : value.getIntValue()); + case FLOAT32: + return com.google.cloud.spanner.Value.float32( + value.hasIsNull() ? null : (float) value.getDoubleValue()); + case FLOAT64: + return com.google.cloud.spanner.Value.float64( + value.hasIsNull() ? null : value.getDoubleValue()); + case STRING: + return com.google.cloud.spanner.Value.string( + value.hasIsNull() ? null : value.getStringValue()); + case BYTES: + return com.google.cloud.spanner.Value.bytes( + value.hasIsNull() ? null : ByteArray.copyFrom(value.getBytesValue().toByteArray())); + case BOOL: + return com.google.cloud.spanner.Value.bool(value.hasIsNull() ? null : value.getBoolValue()); + case TIMESTAMP: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.timestamp(null); + } else { + if (!value.hasBytesValue()) { + return com.google.cloud.spanner.Value.timestamp( + Timestamp.parseTimestamp(Timestamps.toString(value.getTimestampValue()))); + } else { + return com.google.cloud.spanner.Value.timestamp( + com.google.cloud.spanner.Value.COMMIT_TIMESTAMP); + } + } + case DATE: + return com.google.cloud.spanner.Value.date( + value.hasIsNull() ? null : dateFromDays(value.getDateDaysValue())); + case INTERVAL: + return com.google.cloud.spanner.Value.interval( + value.hasIsNull() ? null : Interval.parseFromString(value.getStringValue())); + case UUID: + return com.google.cloud.spanner.Value.uuid( + value.hasIsNull() ? null : UUID.fromString(value.getStringValue())); + case NUMERIC: + { + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.numeric(null); + } + String ascii = value.getStringValue(); + return com.google.cloud.spanner.Value.numeric(new BigDecimal(ascii)); + } + case JSON: + return com.google.cloud.spanner.Value.json( + value.hasIsNull() ? null : value.getStringValue()); + case STRUCT: + return com.google.cloud.spanner.Value.struct( + typeProtoToCloudType(type), + value.hasIsNull() ? null : structProtoToCloudStruct(type, value.getStructValue())); + case ARRAY: + switch (type.getArrayElementType().getCode()) { + case INT64: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.int64Array((Iterable) null); + } else { + return com.google.cloud.spanner.Value.int64Array( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIntValue) + .collect(Collectors.toList()))); + } + case FLOAT32: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.float32Array((Iterable) null); + } else { + return com.google.cloud.spanner.Value.float32Array( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(v -> (float) v.getDoubleValue()) + .collect(Collectors.toList()))); + } + case FLOAT64: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.float64Array((Iterable) null); + } else { + return com.google.cloud.spanner.Value.float64Array( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getDoubleValue) + .collect(Collectors.toList()))); + } + case STRING: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.stringArray(null); + } else { + return com.google.cloud.spanner.Value.stringArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStringValue) + .collect(Collectors.toList()))); + } + case BYTES: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.bytesArray(null); + } else { + return com.google.cloud.spanner.Value.bytesArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getBytesValue) + .collect(Collectors.toList()), + element -> ByteArray.copyFrom(element.toByteArray()))); + } + case BOOL: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.boolArray((Iterable) null); + } else { + return com.google.cloud.spanner.Value.boolArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getBoolValue) + .collect(Collectors.toList()))); + } + case TIMESTAMP: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.timestampArray(null); + } else { + return com.google.cloud.spanner.Value.timestampArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getTimestampValue) + .collect(Collectors.toList()), + element -> Timestamp.parseTimestamp(Timestamps.toString(element)))); + } + case DATE: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.dateArray(null); + } else { + return com.google.cloud.spanner.Value.dateArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getDateDaysValue) + .collect(Collectors.toList()), + CloudClientExecutor::dateFromDays)); + } + case INTERVAL: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.intervalArray(null); + } else { + return com.google.cloud.spanner.Value.intervalArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStringValue) + .collect(Collectors.toList()), + Interval::parseFromString)); + } + case UUID: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.uuidArray(null); + } else { + return com.google.cloud.spanner.Value.uuidArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStringValue) + .collect(Collectors.toList()), + UUID::fromString)); + } + case NUMERIC: + { + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.numericArray(null); + } + List nullList = + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()); + List valueList = + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStringValue) + .collect(Collectors.toList()); + List newValueList = new ArrayList<>(valueList.size()); + + for (int i = 0; i < valueList.size(); ++i) { + if (i < nullList.size() && nullList.get(i)) { + newValueList.add(null); + continue; + } + String ascii = valueList.get(i); + newValueList.add(new BigDecimal(ascii)); + } + return com.google.cloud.spanner.Value.numericArray(newValueList); + } + case STRUCT: + com.google.cloud.spanner.Type elementType = + typeProtoToCloudType(type.getArrayElementType()); + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.structArray(elementType, null); + } else { + return com.google.cloud.spanner.Value.structArray( + elementType, + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStructValue) + .collect(Collectors.toList()), + element -> structProtoToCloudStruct(type.getArrayElementType(), element))); + } + case JSON: + if (value.hasIsNull()) { + return com.google.cloud.spanner.Value.jsonArray(null); + } else { + return com.google.cloud.spanner.Value.jsonArray( + unmarshallValueList( + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getIsNull) + .collect(Collectors.toList()), + value.getArrayValue().getValueList().stream() + .map(com.google.spanner.executor.v1.Value::getStringValue) + .collect(Collectors.toList()))); + } + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unsupported array element type while converting from value proto: " + + type.getArrayElementType().getCode().name()); + } + } + // Unreachable + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported type while converting from value proto: " + type); + } + + /** Convert a cloud Timestamp to a Timestamp proto. */ + private com.google.protobuf.Timestamp timestampToProto(Timestamp t) throws SpannerException { + try { + return Timestamps.parse(t.toString()); + } catch (ParseException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Timestamp parse error", e); + } + } + + /** Convert a cloud Date to a Date proto. */ + private static int daysFromDate(Date date) { + return (int) LocalDate.of(date.getYear(), date.getMonth(), date.getDayOfMonth()).toEpochDay(); + } + + /** Convert a Date proto to a cloud Date. */ + private static Date dateFromDays(int daysSinceEpoch) { + LocalDate localDate = LocalDate.ofEpochDay(daysSinceEpoch); + return Date.fromYearMonthDay( + localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth()); + } + + @Nullable + private static ByteString toByteString(@Nullable ByteArray byteArray) { + if (byteArray == null) { + return null; + } + return ByteString.copyFrom(byteArray.toByteArray()); + } + + @Nullable + private static ByteArray toByteArray(@Nullable ByteString byteString) { + if (byteString == null) { + return null; + } + return ByteArray.copyFrom(byteString.toByteArray()); + } + + /** Convert a list of nullable value to another type. */ + private static List unmarshallValueList( + List isNullList, List valueList, Function converter) { + List newValueList = new ArrayList<>(valueList.size()); + if (isNullList.isEmpty()) { + for (S value : valueList) { + newValueList.add(converter.apply(value)); + } + } else { + for (int i = 0; i < valueList.size(); ++i) { + newValueList.add(isNullList.get(i) ? null : converter.apply(valueList.get(i))); + } + } + return newValueList; + } + + /** Insert null into valueList according to isNullList. */ + private static List unmarshallValueList(List isNullList, List valueList) { + return unmarshallValueList(isNullList, valueList, element -> element); + } + + /** Convert a Struct proto to a cloud Struct. */ + private static com.google.cloud.spanner.Struct structProtoToCloudStruct( + com.google.spanner.v1.Type type, com.google.spanner.executor.v1.ValueList structValue) { + List fieldValues = structValue.getValueList(); + List fieldTypes = type.getStructType().getFieldsList(); + + if (fieldTypes.size() != fieldValues.size()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Mismatch between number of expected fields and specified values for struct type"); + } + + com.google.cloud.spanner.Struct.Builder builder = com.google.cloud.spanner.Struct.newBuilder(); + for (int i = 0; i < fieldTypes.size(); ++i) { + builder + .set(fieldTypes.get(i).getName()) + .to(valueProtoToCloudValue(fieldTypes.get(i).getType(), fieldValues.get(i))); + } + return builder.build(); + } + + /** Convert a Type proto to a cloud Type. */ + private static com.google.cloud.spanner.Type typeProtoToCloudType( + com.google.spanner.v1.Type typeProto) { + switch (typeProto.getCode()) { + case BOOL: + return com.google.cloud.spanner.Type.bool(); + case INT64: + return com.google.cloud.spanner.Type.int64(); + case STRING: + return com.google.cloud.spanner.Type.string(); + case BYTES: + return com.google.cloud.spanner.Type.bytes(); + case FLOAT32: + return com.google.cloud.spanner.Type.float32(); + case FLOAT64: + return com.google.cloud.spanner.Type.float64(); + case DATE: + return com.google.cloud.spanner.Type.date(); + case TIMESTAMP: + return com.google.cloud.spanner.Type.timestamp(); + case INTERVAL: + return com.google.cloud.spanner.Type.interval(); + case UUID: + return com.google.cloud.spanner.Type.uuid(); + case NUMERIC: + if (typeProto.getTypeAnnotation().equals(TypeAnnotationCode.PG_NUMERIC)) { + return com.google.cloud.spanner.Type.pgNumeric(); + } else { + return com.google.cloud.spanner.Type.numeric(); + } + case STRUCT: + List fields = typeProto.getStructType().getFieldsList(); + List cloudFields = new ArrayList<>(); + for (StructType.Field field : fields) { + com.google.cloud.spanner.Type fieldType = typeProtoToCloudType(field.getType()); + cloudFields.add(com.google.cloud.spanner.Type.StructField.of(field.getName(), fieldType)); + } + return com.google.cloud.spanner.Type.struct(cloudFields); + case ARRAY: + com.google.spanner.v1.Type elementType = typeProto.getArrayElementType(); + if (elementType.getCode() == TypeCode.ARRAY) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported array-of-array proto type"); + } else { + com.google.cloud.spanner.Type cloudElementType = typeProtoToCloudType(elementType); + return com.google.cloud.spanner.Type.array(cloudElementType); + } + case JSON: + if (typeProto.getTypeAnnotation().equals(TypeAnnotationCode.PG_JSONB)) { + return com.google.cloud.spanner.Type.pgJsonb(); + } else { + return com.google.cloud.spanner.Type.json(); + } + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported proto type: " + typeProto); + } + } + + /** Convert a cloud Type to a Type proto. */ + private static com.google.spanner.v1.Type cloudTypeToTypeProto(@Nonnull Type cloudTypeProto) { + switch (cloudTypeProto.getCode()) { + case BOOL: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BOOL).build(); + case INT64: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INT64).build(); + case FLOAT32: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT32).build(); + case FLOAT64: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT64).build(); + case STRING: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING).build(); + case BYTES: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BYTES).build(); + case TIMESTAMP: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.TIMESTAMP).build(); + case DATE: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.DATE).build(); + case INTERVAL: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INTERVAL).build(); + case UUID: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.UUID).build(); + case NUMERIC: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.NUMERIC).build(); + case PG_NUMERIC: + return com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build(); + case STRUCT: + com.google.spanner.v1.StructType.Builder StructDescriptorBuilder = + com.google.spanner.v1.StructType.newBuilder(); + for (com.google.cloud.spanner.Type.StructField cloudField : + cloudTypeProto.getStructFields()) { + StructDescriptorBuilder.addFields( + com.google.spanner.v1.StructType.Field.newBuilder() + .setName(cloudField.getName()) + .setType(cloudTypeToTypeProto(cloudField.getType()))); + } + return com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRUCT) + .setStructType(StructDescriptorBuilder.build()) + .build(); + case ARRAY: + if (cloudTypeProto.getArrayElementType().getCode() + == com.google.cloud.spanner.Type.Code.ARRAY) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported array-of-array cloud type"); + } else { + return com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(cloudTypeToTypeProto(cloudTypeProto.getArrayElementType())) + .build(); + } + case JSON: + return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.JSON).build(); + case PG_JSONB: + return com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.JSON) + .setTypeAnnotation(TypeAnnotationCode.PG_JSONB) + .build(); + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported cloud type: " + cloudTypeProto); + } + } + + /** Unmarshall ByteString to serializable object. */ + private T unmarshall(ByteString input) + throws IOException, ClassNotFoundException { + ObjectInputStream objectInputStream = new ObjectInputStream(input.newInput()); + return (T) objectInputStream.readObject(); + } + + /** Marshall a serializable object into ByteString. */ + private ByteString marshall(T object) throws IOException { + ByteString.Output output = ByteString.newOutput(); + ObjectOutputStream objectOutputStream = new ObjectOutputStream(output); + objectOutputStream.writeObject(object); + objectOutputStream.flush(); + objectOutputStream.close(); + return output.toByteString(); + } + + /** Build Timestamp from micros. */ + private Timestamp timestampFromMicros(long micros) { + long seconds = TimeUnit.MICROSECONDS.toSeconds(micros); + int nanos = (int) (micros * 1000 - seconds * 1000000000); + return Timestamp.ofTimeSecondsAndNanos(seconds, nanos); + } + + /** Build TimestampBound from Concurrency. */ + private TimestampBound timestampBoundsFromConcurrency(Concurrency concurrency) { + if (concurrency.hasStalenessSeconds()) { + return TimestampBound.ofExactStaleness( + (long) (concurrency.getStalenessSeconds() * 1000000), TimeUnit.MICROSECONDS); + } else if (concurrency.hasMinReadTimestampMicros()) { + return TimestampBound.ofMinReadTimestamp( + timestampFromMicros(concurrency.getMinReadTimestampMicros())); + } else if (concurrency.hasMaxStalenessSeconds()) { + return TimestampBound.ofMaxStaleness( + (long) (concurrency.getMaxStalenessSeconds() * 1000000), TimeUnit.MICROSECONDS); + } else if (concurrency.hasExactTimestampMicros()) { + return TimestampBound.ofReadTimestamp( + timestampFromMicros(concurrency.getExactTimestampMicros())); + } else if (concurrency.hasStrong()) { + return TimestampBound.strong(); + } else if (concurrency.hasBatch()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "batch mode should not be in snapshot transaction: " + concurrency); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unsupported concurrency mode: " + concurrency); + } + + /** Build instance proto from cloud spanner instance. */ + private com.google.spanner.admin.instance.v1.Instance instanceToProto(Instance instance) { + com.google.spanner.admin.instance.v1.Instance.Builder instanceBuilder = + com.google.spanner.admin.instance.v1.Instance.newBuilder(); + instanceBuilder + .setConfig(instance.getInstanceConfigId().getInstanceConfig()) + .setName(instance.getId().getName()) + .setDisplayName(instance.getDisplayName()) + .setCreateTime(instance.getCreateTime().toProto()) + .setNodeCount(instance.getNodeCount()) + .setProcessingUnits(instance.getProcessingUnits()) + .setUpdateTime(instance.getUpdateTime().toProto()) + .putAllLabels(instance.getLabels()); + com.google.spanner.admin.instance.v1.Instance.State state; + switch (instance.getState()) { + case UNSPECIFIED: + state = State.STATE_UNSPECIFIED; + break; + case CREATING: + state = State.CREATING; + break; + case READY: + state = State.READY; + break; + default: + throw new IllegalArgumentException("Unknown state:" + instance.getState()); + } + instanceBuilder.setState(state); + return instanceBuilder.build(); + } + + /** Build instance proto from cloud spanner instance. */ + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfigToProto( + InstanceConfig instanceConfig) { + com.google.spanner.admin.instance.v1.InstanceConfig.Builder instanceConfigBuilder = + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder(); + instanceConfigBuilder + .setDisplayName(instanceConfig.getDisplayName()) + .setEtag(instanceConfig.getEtag()) + .setName(instanceConfig.getId().getName()) + .addAllLeaderOptions(instanceConfig.getLeaderOptions()) + .addAllOptionalReplicas( + instanceConfig.getOptionalReplicas().stream() + .map(ReplicaInfo::getProto) + .collect(Collectors.toList())) + .addAllReplicas( + instanceConfig.getReplicas().stream() + .map(ReplicaInfo::getProto) + .collect(Collectors.toList())) + .putAllLabels(instanceConfig.getLabels()) + .setReconciling(instanceConfig.getReconciling()); + com.google.spanner.admin.instance.v1.InstanceConfig.State state; + switch (instanceConfig.getState()) { + case STATE_UNSPECIFIED: + state = com.google.spanner.admin.instance.v1.InstanceConfig.State.STATE_UNSPECIFIED; + break; + case CREATING: + state = com.google.spanner.admin.instance.v1.InstanceConfig.State.CREATING; + break; + case READY: + state = com.google.spanner.admin.instance.v1.InstanceConfig.State.READY; + break; + default: + throw new IllegalArgumentException("Unknown state:" + instanceConfig.getState()); + } + instanceConfigBuilder.setState(state); + com.google.spanner.admin.instance.v1.InstanceConfig.Type type; + switch (instanceConfig.getConfigType()) { + case TYPE_UNSPECIFIED: + type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.TYPE_UNSPECIFIED; + break; + case GOOGLE_MANAGED: + type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.GOOGLE_MANAGED; + break; + case USER_MANAGED: + type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.USER_MANAGED; + break; + default: + throw new IllegalArgumentException("Unknown type:" + instanceConfig.getConfigType()); + } + instanceConfigBuilder.setConfigType(type); + if (instanceConfig.getBaseConfig() != null) { + instanceConfigBuilder.setBaseConfig(instanceConfig.getBaseConfig().getId().getName()); + } + return instanceConfigBuilder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java new file mode 100644 index 000000000000..eb6502c461cd --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java @@ -0,0 +1,486 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.executor.spanner; + +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.http.HttpTransportFactory; +import com.google.cloud.executor.spanner.CloudClientExecutor.ExecutionFlowContext; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.executor.v1.ChangeStreamRecord; +import com.google.spanner.executor.v1.ChildPartitionsRecord; +import com.google.spanner.executor.v1.ColumnMetadata; +import com.google.spanner.executor.v1.QueryResult; +import com.google.spanner.executor.v1.ReadResult; +import com.google.spanner.executor.v1.SpannerActionOutcome; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import com.google.spanner.executor.v1.TableMetadata; +import com.google.spanner.v1.StructType; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.regex.Pattern; + +/** Superclass of cloud Java Client implementations for cloud requests. */ +public abstract class CloudExecutor { + + private static final Logger LOGGER = Logger.getLogger(CloudExecutor.class.getName()); + + // Pattern for a database name: projects//instances//databases/ + protected static final Pattern DB_NAME = + Pattern.compile( + "projects/([A-Za-z0-9-_]+)/instances/([A-Za-z0-9-_]+)/databases/([A-Za-z0-9-_]+)"); + + // Project id. + protected static final String PROJECT_ID = "spanner-cloud-systest"; + + // Transport factory. + protected static final HttpTransportFactory HTTP_TRANSPORT_FACTORY = NetHttpTransport::new; + + // Indicate whether grpc fault injector should be enabled. + protected boolean enableGrpcFaultInjector; + + /** + * Metadata is used to hold and retrieve metadata of tables and columns involved in a transaction. + */ + public static class Metadata { + + private final Map> tableKeyColumnsInOrder; + private final Map> tableColumnsByName; + + /** Init metadata from list of tableMetadata in startTransaction action. */ + public Metadata(List metadata) { + tableKeyColumnsInOrder = new HashMap<>(); + tableColumnsByName = new HashMap<>(); + for (TableMetadata table : metadata) { + String tableName = table.getName(); + tableKeyColumnsInOrder.put(tableName, table.getKeyColumnList()); + tableColumnsByName.put(tableName, new HashMap<>()); + for (int j = 0; j < table.getColumnCount(); ++j) { + ColumnMetadata column = table.getColumn(j); + tableColumnsByName.get(tableName).put(column.getName(), column); + } + } + } + + public List getKeyColumnTypes(String tableName) + throws SpannerException { + if (!tableKeyColumnsInOrder.containsKey(tableName)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "There is no metadata for table: " + tableName); + } + List typeList = new ArrayList<>(); + List columns = tableKeyColumnsInOrder.get(tableName); + for (ColumnMetadata column : columns) { + typeList.add(column.getType()); + } + return typeList; + } + + /** Return column type of the given table and column. */ + public com.google.spanner.v1.Type getColumnType(String tableName, String columnName) + throws SpannerException { + if (!tableColumnsByName.containsKey(tableName)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "There is no metadata for table: " + tableName); + } + Map columnList = tableColumnsByName.get(tableName); + if (!columnList.containsKey(columnName)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Metadata for table " + tableName + " contains no column named " + columnName); + } + return columnList.get(columnName).getType(); + } + } + + /** + * OutcomeSender is a utility class used for sending action outcomes back to the client. For read + * actions, it buffers rows and sends partial read results in batches. + */ + public class OutcomeSender { + + private final int actionId; + private final ExecutionFlowContext context; + + // All the relevant variables below should be set before first outcome is sent back, and unused + // variables should leave null. + private Timestamp timestamp; + private boolean hasReadResult; + private boolean hasQueryResult; + private boolean hasChangeStreamRecords; + private String table; + private String index; + private Integer requestIndex; + private StructType rowType; + + // PartialOutcome accumulates rows and relevant information. + private SpannerActionOutcome.Builder partialOutcomeBuilder; + private ReadResult.Builder readResultBuilder; + private QueryResult.Builder queryResultBuilder; + + // Current row count in Read/Query result. + private int rowCount; + // Modified row count in DML result. + private final List rowsModified = new ArrayList<>(); + // Current ChangeStreamRecord count in Cloud result. + private int changeStreamRecordCount; + // Change stream records to be returned. + private final List changeStreamRecords = new ArrayList<>(); + // Change stream related variables. + private String partitionTokensString = "["; + private String dataChangeRecordsString = "["; + private String changeStreamForQuery = ""; + private String partitionTokenForQuery = ""; + + // The timestamp in milliseconds of when the last ChangeStreamRecord received. + private long changeStreamRecordReceivedTimestamp; + // The heartbeat interval for the change stream query in milliseconds. + private long changeStreamHeartbeatMilliseconds; + // Whether the change stream query is a partitioned change stream query. + private boolean isPartitionedChangeStreamQuery; + + // If row count exceed this value, we should send rows back in batch. + private static final int MAX_ROWS_PER_BATCH = 100; + // If change stream record count exceed this value, send change stream records back in batch. + private static final int MAX_CHANGE_STREAM_RECORDS_PER_BATCH = 2000; + + public OutcomeSender(int actionId, ExecutionFlowContext context) { + this.actionId = actionId; + this.context = context; + this.index = null; + this.rowType = null; + this.requestIndex = null; + this.timestamp = Timestamp.newBuilder().setSeconds(0).setNanos(0).build(); + } + + /** Set the timestamp for commit. */ + public void setTimestamp(Timestamp timestamp) { + this.timestamp = timestamp; + } + + /** Set the rowType for appending row. */ + public void setRowType(StructType rowType) { + this.rowType = rowType; + } + + /** Init the sender for read action, then set the table and index if there exists. */ + public void initForRead(String table, String index) { + this.hasReadResult = true; + this.table = table; + if (!index.isEmpty()) { + this.index = index; + } + } + + /** Init the sender for query action. */ + public void initForQuery() { + this.hasQueryResult = true; + } + + /** Init the sender for batch read action, then set the table and index if there exists. */ + public void initForBatchRead(String table, String index) { + initForRead(table, index); + // Cloud API supports only simple batch reads (not multi reads), so request index + // is always 0 + this.requestIndex = 0; + } + + /** Init the sender for change stream query action. */ + public void initForChangeStreamQuery( + long changeStreamHeartbeatMilliseconds, String changeStreamName, String partitionToken) { + this.hasChangeStreamRecords = true; + this.changeStreamRecordReceivedTimestamp = 0; + this.changeStreamHeartbeatMilliseconds = changeStreamHeartbeatMilliseconds; + this.changeStreamForQuery = changeStreamName; + if (!partitionToken.isEmpty()) { + this.isPartitionedChangeStreamQuery = true; + this.partitionTokenForQuery = partitionToken; + } + } + + /** Update change stream record timestamp. */ + public void updateChangeStreamRecordReceivedTimestamp( + long changeStreamRecordReceivedTimestamp) { + this.changeStreamRecordReceivedTimestamp = changeStreamRecordReceivedTimestamp; + } + + /** Add rows modified in DML to result. */ + public void appendRowsModifiedInDml(Long rowsModified) { + this.rowsModified.add(rowsModified); + } + + public long getChangeStreamRecordReceivedTimestamp() { + return this.changeStreamRecordReceivedTimestamp; + } + + public long getChangeStreamHeartbeatMilliSeconds() { + return this.changeStreamHeartbeatMilliseconds; + } + + public boolean getIsPartitionedChangeStreamQuery() { + return this.isPartitionedChangeStreamQuery; + } + + /** Send the last outcome with OK status. */ + public Status finishWithOK() { + buildOutcome(); + partialOutcomeBuilder.setStatus(toProto(Status.OK)); + return flush(); + } + + /** Send the last outcome with aborted error, this will set the transactionRestarted to true. */ + public Status finishWithTransactionRestarted() { + buildOutcome(); + partialOutcomeBuilder.setTransactionRestarted(true); + partialOutcomeBuilder.setStatus(toProto(Status.OK)); + return flush(); + } + + /** Send the last outcome with given error status. */ + public Status finishWithError(Status err) { + buildOutcome(); + partialOutcomeBuilder.setStatus(toProto(err)); + return flush(); + } + + /** + * Add another row to buffer. If buffer hits its size limit, buffered rows will be sent back. + */ + public Status appendRow(com.google.spanner.executor.v1.ValueList row) { + if (!hasReadResult && !hasQueryResult) { + return toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Either hasReadResult or hasQueryResult should be true")); + } + if (rowType == null) { + return toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "RowType should be set first")); + } + buildOutcome(); + if (hasReadResult) { + readResultBuilder.addRow(row); + ++rowCount; + } else if (hasQueryResult) { + queryResultBuilder.addRow(row); + ++rowCount; + } + if (rowCount >= MAX_ROWS_PER_BATCH) { + return flush(); + } + return Status.OK; + } + + /** Append change stream record to result. */ + public Status appendChangeStreamRecord(ChangeStreamRecord record) { + if (!hasChangeStreamRecords) { + return toStatus( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "hasChangeStreamRecords should be true")); + } + if (record.hasDataChange()) { + String appendedString = + String.format( + "{%s, %s}, ", + record.getDataChange().getTransactionId(), + record.getDataChange().getRecordSequence()); + dataChangeRecordsString += appendedString; + } else if (record.hasChildPartition()) { + for (ChildPartitionsRecord.ChildPartition childPartition : + record.getChildPartition().getChildPartitionsList()) { + partitionTokensString = partitionTokensString.concat(childPartition.getToken() + ", "); + } + } + buildOutcome(); + changeStreamRecords.add(record); + ++changeStreamRecordCount; + if (changeStreamRecordCount >= MAX_CHANGE_STREAM_RECORDS_PER_BATCH) { + return flush(); + } + return Status.OK; + } + + /** Build the partialOutcome if not exists using relevant variables. */ + private void buildOutcome() { + if (partialOutcomeBuilder != null) { + return; + } + partialOutcomeBuilder = SpannerActionOutcome.newBuilder(); + partialOutcomeBuilder.setCommitTime(timestamp); + if (hasReadResult) { + readResultBuilder = ReadResult.newBuilder(); + readResultBuilder.setTable(table); + if (index != null) { + readResultBuilder.setIndex(index); + } + if (rowType != null) { + readResultBuilder.setRowType(rowType); + } + if (requestIndex != null) { + readResultBuilder.setRequestIndex(requestIndex); + } + } else if (hasQueryResult) { + queryResultBuilder = QueryResult.newBuilder(); + if (rowType != null) { + queryResultBuilder.setRowType(rowType); + } + } + } + + /** Send partialOutcome to stream and clear the internal state. */ + private Status flush() { + Preconditions.checkNotNull(partialOutcomeBuilder); + for (Long rowCount : rowsModified) { + partialOutcomeBuilder.addDmlRowsModified(rowCount); + } + if (hasReadResult) { + partialOutcomeBuilder.setReadResult(readResultBuilder.build()); + } else if (hasQueryResult) { + partialOutcomeBuilder.setQueryResult(queryResultBuilder.build()); + } else if (hasChangeStreamRecords) { + partialOutcomeBuilder.addAllChangeStreamRecords(changeStreamRecords); + partitionTokensString += "]\n"; + dataChangeRecordsString += "]\n"; + LOGGER.log( + Level.INFO, + String.format( + "OutcomeSender with action ID %s for change stream %s and partition token %s is" + + " sending data change records with the following transaction id/record" + + " sequence combinations: %s and partition tokens: %s", + this.changeStreamForQuery, + this.partitionTokenForQuery, + actionId, + dataChangeRecordsString, + partitionTokensString)); + partitionTokensString = ""; + dataChangeRecordsString = ""; + } + Status status = sendOutcome(partialOutcomeBuilder.build()); + partialOutcomeBuilder = null; + readResultBuilder = null; + queryResultBuilder = null; + rowCount = 0; + rowsModified.clear(); + changeStreamRecordCount = 0; + changeStreamRecords.clear(); + return status; + } + + /** Send the given SpannerActionOutcome. */ + public Status sendOutcome(SpannerActionOutcome outcome) { + try { + LOGGER.log(Level.INFO, String.format("Sending result %s actionId %s", outcome, actionId)); + SpannerAsyncActionResponse result = + SpannerAsyncActionResponse.newBuilder() + .setActionId(actionId) + .setOutcome(outcome) + .build(); + context.onNext(result); + LOGGER.log(Level.INFO, String.format("Sent result %s actionId %s", outcome, actionId)); + } catch (SpannerException e) { + LOGGER.log(Level.SEVERE, "Failed to send outcome with error: " + e.getMessage(), e); + return toStatus(e); + } catch (Throwable t) { + LOGGER.log(Level.SEVERE, "Failed to send outcome with error: " + t.getMessage(), t); + return Status.fromThrowable( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unexpected error during rpc send: " + t)); + } + return Status.OK; + } + } + + /** Map Cloud ErrorCode to Status. */ + protected Status toStatus(SpannerException e) { + String errorMessage = e.getMessage(); + com.google.rpc.Status rpcStatus = io.grpc.protobuf.StatusProto.fromThrowable(e); + if (rpcStatus != null) { + if (rpcStatus.getDetailsCount() > 0) { + errorMessage += "/n"; + } + for (int i = 0; i < rpcStatus.getDetailsCount(); i++) { + errorMessage += "\nError detail: " + rpcStatus.getDetails(i).toString(); + } + } + switch (e.getErrorCode()) { + case INVALID_ARGUMENT: + return Status.fromCode(Status.INVALID_ARGUMENT.getCode()).withDescription(errorMessage); + case PERMISSION_DENIED: + return Status.fromCode(Status.PERMISSION_DENIED.getCode()).withDescription(errorMessage); + case ABORTED: + return Status.fromCode(Status.ABORTED.getCode()).withDescription(errorMessage); + case ALREADY_EXISTS: + return Status.fromCode(Status.ALREADY_EXISTS.getCode()).withDescription(errorMessage); + case CANCELLED: + return Status.fromCode(Status.CANCELLED.getCode()).withDescription(errorMessage); + case INTERNAL: + return Status.fromCode(Status.INTERNAL.getCode()) + .withDescription(errorMessage + e.getReason() == null ? "" : ": " + e.getReason()); + case FAILED_PRECONDITION: + return Status.fromCode(Status.FAILED_PRECONDITION.getCode()).withDescription(errorMessage); + case NOT_FOUND: + return Status.fromCode(Status.NOT_FOUND.getCode()).withDescription(errorMessage); + case DEADLINE_EXCEEDED: + return Status.fromCode(Status.DEADLINE_EXCEEDED.getCode()).withDescription(errorMessage); + case RESOURCE_EXHAUSTED: + return Status.fromCode(Status.RESOURCE_EXHAUSTED.getCode()).withDescription(errorMessage); + case OUT_OF_RANGE: + return Status.fromCode(Status.OUT_OF_RANGE.getCode()).withDescription(errorMessage); + case UNAUTHENTICATED: + return Status.fromCode(Status.UNAUTHENTICATED.getCode()).withDescription(errorMessage); + case UNIMPLEMENTED: + return Status.fromCode(Status.UNIMPLEMENTED.getCode()).withDescription(errorMessage); + case UNAVAILABLE: + return Status.fromCode(Status.UNAVAILABLE.getCode()).withDescription(errorMessage); + case UNKNOWN: + return Status.fromCode(Status.UNKNOWN.getCode()).withDescription(errorMessage); + default: + return Status.fromCode(Status.UNKNOWN.getCode()) + .withDescription("Unsupported Spanner error code: " + e.getErrorCode()); + } + } + + /** Convert a Status to a Status Proto. */ + protected static com.google.rpc.Status toProto(Status status) { + return com.google.rpc.Status.newBuilder() + .setCode(status.getCode().value()) + .setMessage(status.getDescription() == null ? "" : status.getDescription()) + .build(); + } + + /** + * Converts timestamp microseconds to query-friendly timestamp string. If useNanosPrecision is set + * to true it pads input timestamp with 3 random digits treating it as timestamp nanoseconds. + */ + protected static String timestampToString(boolean useNanosPrecision, long timestampInMicros) { + Timestamp timestamp = + useNanosPrecision + ? Timestamps.fromNanos(timestampInMicros * 1000 + System.nanoTime() % 1000) + : Timestamps.fromMicros(timestampInMicros); + return String.format("\"%s\"", Timestamps.toString(timestamp)); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java new file mode 100644 index 000000000000..f3de36ac7b4a --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java @@ -0,0 +1,174 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.executor.spanner; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.spanner.executor.v1.SessionPoolOptions; +import com.google.spanner.executor.v1.SpannerAction; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import com.google.spanner.executor.v1.SpannerExecutorProxyGrpc; +import com.google.spanner.executor.v1.SpannerOptions; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** Send proxied action requests through Spanner Cloud API. */ +public class CloudExecutorImpl extends SpannerExecutorProxyGrpc.SpannerExecutorProxyImplBase { + + private static final Logger LOGGER = Logger.getLogger(CloudExecutorImpl.class.getName()); + + // Executors to proxy. + private final CloudClientExecutor clientExecutor; + + // Ratio of operations to use multiplexed sessions. + private final double multiplexedSessionOperationsRatio; + + // Count of checks performed to verify end to end traces using Cloud Trace APIs. + private int cloudTraceCheckCount = 0; + + // Maximum checks allowed to verify end to end traces using Cloud Trace APIs. + private static final int MAX_CLOUD_TRACE_CHECK_LIMIT = 20; + + public CloudExecutorImpl( + boolean enableGrpcFaultInjector, double multiplexedSessionOperationsRatio) { + clientExecutor = new CloudClientExecutor(enableGrpcFaultInjector); + this.multiplexedSessionOperationsRatio = multiplexedSessionOperationsRatio; + } + + private synchronized void incrementCloudTraceCheckCount() { + cloudTraceCheckCount++; + } + + private synchronized int getCloudTraceCheckCount() { + return cloudTraceCheckCount; + } + + /** Execute SpannerAsync action requests. */ + @Override + public StreamObserver executeActionAsync( + StreamObserver responseObserver) { + // Create a top-level OpenTelemetry span for streaming request. + Tracer tracer = WorkerProxy.openTelemetrySdk.getTracer(CloudClientExecutor.class.getName()); + Span span = tracer.spanBuilder("java_systest_execute_actions_stream").setNoParent().startSpan(); + Scope scope = span.makeCurrent(); + + final String traceId = span.getSpanContext().getTraceId(); + final boolean isSampled = span.getSpanContext().getTraceFlags().isSampled(); + AtomicBoolean requestHasReadOrQueryAction = new AtomicBoolean(false); + + CloudClientExecutor.ExecutionFlowContext executionContext = + clientExecutor.new ExecutionFlowContext(responseObserver); + return new StreamObserver() { + @Override + public void onNext(SpannerAsyncActionRequest request) { + LOGGER.log(Level.INFO, String.format("Receiving request: \n%s", request)); + + // Use Multiplexed sessions for all supported operations if the + // multiplexedSessionOperationsRatio from command line is > 0.0 + if (multiplexedSessionOperationsRatio > 0.0) { + SessionPoolOptions.Builder sessionPoolOptionsBuilder; + if (request.getAction().getSpannerOptions().hasSessionPoolOptions()) { + sessionPoolOptionsBuilder = + request.getAction().getSpannerOptions().getSessionPoolOptions().toBuilder() + .setUseMultiplexed(true); + } else { + sessionPoolOptionsBuilder = SessionPoolOptions.newBuilder().setUseMultiplexed(true); + } + + SpannerOptions.Builder optionsBuilder = + request.getAction().getSpannerOptions().toBuilder() + .setSessionPoolOptions(sessionPoolOptionsBuilder); + SpannerAction.Builder actionBuilder = + request.getAction().toBuilder().setSpannerOptions(optionsBuilder); + request = request.toBuilder().setAction(actionBuilder).build(); + LOGGER.log( + Level.INFO, + String.format("Updated request to set multiplexed session flag: \n%s", request)); + } + String actionName = request.getAction().getActionCase().toString(); + if (actionName == "READ" || actionName == "QUERY") { + requestHasReadOrQueryAction.set(true); + } + + Status status = clientExecutor.startHandlingRequest(request, executionContext); + if (!status.isOk()) { + LOGGER.log( + Level.WARNING, + "Failed to handle request, half closed", + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, status.getDescription())); + } + } + + @Override + public void onError(Throwable t) { + LOGGER.log(Level.WARNING, "Client ends the stream with error.", t); + executionContext.cleanup(); + } + + @Override + public void onCompleted() { + // Close the scope and end the span. + scope.close(); + span.end(); + if (isSampled + && getCloudTraceCheckCount() < MAX_CLOUD_TRACE_CHECK_LIMIT + && requestHasReadOrQueryAction.get()) { + Future traceVerificationTask = + clientExecutor.getEndToEndTraceVerificationTask(traceId); + try { + LOGGER.log( + Level.INFO, + String.format("Starting end to end trace verification for trace_id:%s", traceId)); + Boolean isValidTrace = traceVerificationTask.get(); + incrementCloudTraceCheckCount(); + if (!isValidTrace) { + executionContext.onError( + Status.INTERNAL + .withDescription( + String.format( + "failed to verify end to end trace for trace_id: %s", traceId)) + .getCause()); + executionContext.cleanup(); + return; + } + } catch (Exception e) { + LOGGER.log( + Level.WARNING, + String.format( + "Failed to verify end to end trace with exception: %s\n", e.getMessage()), + e); + executionContext.onError(e); + executionContext.cleanup(); + return; + } + } + LOGGER.log(Level.INFO, "Client called Done, half closed"); + executionContext.cleanup(); + responseObserver.onCompleted(); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java new file mode 100644 index 000000000000..17b98bbdada8 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java @@ -0,0 +1,136 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.executor.spanner; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.rpc.TransportChannel; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.spanner.spi.v1.TraceContextInterceptor; +import com.google.common.net.HostAndPort; +import io.grpc.ManagedChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.grpc.netty.InternalNettyChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.NegotiationType; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.net.URI; +import java.net.URISyntaxException; + +public class CloudUtil { + + // If this is set too low, the peer server may return RESOURCE_EXHAUSTED errors if the response + // error message causes the trailing headers to exceed this limit. + private static final int GRPC_MAX_HEADER_LIST_SIZE_BYTES = 10 * 1024 * 1024; + + private static final String TEST_HOST_IN_CERT = "test-cert-2"; + + public static TransportChannelProvider newChannelProviderHelper(int port) { + NettyChannelBuilder builder = + (NettyChannelBuilder) + getChannelBuilderForTestGFE("localhost", port, WorkerProxy.cert, TEST_HOST_IN_CERT) + .maxInboundMessageSize(100 * 1024 * 1024 /* 100 MB */); + if (WorkerProxy.usePlainTextChannel) { + builder.usePlaintext(); + } + TransportChannel channel = + GrpcTransportChannel.newBuilder() + .setManagedChannel( + builder.maxInboundMetadataSize(GRPC_MAX_HEADER_LIST_SIZE_BYTES).build()) + .build(); + return FixedTransportChannelProvider.create(channel); + } + + public static ManagedChannelBuilder getChannelBuilderForTestGFE( + String host, int sslPort, String certPath, String hostInCert) { + SslContext sslContext; + try { + sslContext = + GrpcSslContexts.forClient() + .trustManager(CertUtil.copyCert(certPath)) + .ciphers(null) + .build(); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + + HostAndPort hostPort = HostAndPort.fromParts(host, sslPort); + String target; + try { + target = new URI("dns", "", "/" + hostPort, null).toString(); + } catch (URISyntaxException ex) { + throw new RuntimeException(ex); + } + try { + NettyChannelBuilder channelBuilder = NettyChannelBuilder.forTarget(target); + InternalNettyChannelBuilder.disableCheckAuthority(channelBuilder); + + return channelBuilder + .overrideAuthority(hostInCert) + .sslContext(sslContext) + .intercept(new TraceContextInterceptor(WorkerProxy.openTelemetrySdk)) + .negotiationType(NegotiationType.TLS); + } catch (Throwable t) { + throw new RuntimeException(t); + } + } + + static final class CertUtil { + private CertUtil() { + // prevent instantiation + } + + /** Copies cert resource to file, stripping out PEM comments. */ + public static File copyCert(String certFileName) throws IOException { + File certFile = new File(certFileName); + File file = File.createTempFile("CAcert", "pem"); + file.deleteOnExit(); + try (BufferedReader in = + new BufferedReader(new InputStreamReader(new FileInputStream(certFile), UTF_8)); + Writer out = new OutputStreamWriter(new FileOutputStream(file), UTF_8)) { + String line; + do { + while ((line = in.readLine()) != null) { + if ("-----BEGIN CERTIFICATE-----".equals(line)) { + break; + } + } + out.append(line); + out.append("\n"); + while ((line = in.readLine()) != null) { + out.append(line); + out.append("\n"); + if ("-----END CERTIFICATE-----".equals(line)) { + break; + } + } + } while (line != null); + } + return file; + } + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java new file mode 100644 index 000000000000..0da30f82d23c --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java @@ -0,0 +1,234 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.executor.spanner; + +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.Credentials; +import com.google.auth.http.HttpTransportFactory; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.opentelemetry.trace.TraceConfiguration; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.io.FileUtils; + +/** + * Worker proxy for Java API. This is the main entry of the Java client proxy on cloud Spanner Java + * client. + */ +public class WorkerProxy { + + private static final Logger LOGGER = Logger.getLogger(WorkerProxy.class.getName()); + + private static final String OPTION_SPANNER_PORT = "spanner_port"; + private static final String OPTION_PROXY_PORT = "proxy_port"; + private static final String OPTION_CERTIFICATE = "cert"; + private static final String OPTION_SERVICE_KEY_FILE = "service_key_file"; + private static final String OPTION_USE_PLAIN_TEXT_CHANNEL = "use_plain_text_channel"; + private static final String OPTION_ENABLE_GRPC_FAULT_INJECTOR = "enable_grpc_fault_injector"; + private static final String OPTION_MULTIPLEXED_SESSION_OPERATIONS_RATIO = + "multiplexed_session_operations_ratio"; + + public static int spannerPort = 0; + public static int proxyPort = 0; + public static String cert = ""; + public static String serviceKeyFile = ""; + public static double multiplexedSessionOperationsRatio = 0.0; + public static boolean usePlainTextChannel = false; + public static boolean enableGrpcFaultInjector = false; + public static OpenTelemetrySdk openTelemetrySdk; + + public static CommandLine commandLine; + + public static final String PROJECT_ID = "spanner-cloud-systest"; + public static final String CLOUD_TRACE_ENDPOINT = "staging-cloudtrace.sandbox.googleapis.com:443"; + + private static final int MIN_PORT = 0, MAX_PORT = 65535; + private static final double MIN_RATIO = 0.0, MAX_RATIO = 1.0, TRACE_SAMPLING_RATE = 0.01; + + public static OpenTelemetrySdk setupOpenTelemetrySdk() throws Exception { + // Read credentials from the serviceKeyFile. + HttpTransportFactory HTTP_TRANSPORT_FACTORY = NetHttpTransport::new; + Credentials credentials = + ServiceAccountCredentials.fromStream( + new ByteArrayInputStream(FileUtils.readFileToByteArray(new File(serviceKeyFile))), + HTTP_TRANSPORT_FACTORY); + + // OpenTelemetry configuration. + SpanExporter spanExporter = + TraceExporter.createWithConfiguration( + TraceConfiguration.builder() + .setProjectId(PROJECT_ID) + .setCredentials(credentials) + .setTraceServiceEndpoint(CLOUD_TRACE_ENDPOINT) + .build()); + return OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .addSpanProcessor(BatchSpanProcessor.builder(spanExporter).build()) + .setResource(Resource.getDefault()) + .setSampler(Sampler.parentBased(Sampler.traceIdRatioBased(TRACE_SAMPLING_RATE))) + .build()) + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .build(); + } + + public static void main(String[] args) throws Exception { + // Enable OpenTelemetry metrics and traces before injecting Opentelemetry. + SpannerOptions.enableOpenTelemetryMetrics(); + SpannerOptions.enableOpenTelemetryTraces(); + + commandLine = buildOptions(args); + + if (!commandLine.hasOption(OPTION_SPANNER_PORT)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Spanner proxyPort need to be assigned in order to start worker proxy."); + } + spannerPort = Integer.parseInt(commandLine.getOptionValue(OPTION_SPANNER_PORT)); + if (spannerPort < MIN_PORT || spannerPort > MAX_PORT) { + throw new IllegalArgumentException( + "Spanner proxyPort must be between " + MIN_PORT + " and " + MAX_PORT); + } + + if (!commandLine.hasOption(OPTION_PROXY_PORT)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Proxy port need to be assigned in order to start worker proxy."); + } + proxyPort = Integer.parseInt(commandLine.getOptionValue(OPTION_PROXY_PORT)); + if (proxyPort < MIN_PORT || proxyPort > MAX_PORT) { + throw new IllegalArgumentException( + "Proxy port must be between " + MIN_PORT + " and " + MAX_PORT); + } + + if (!commandLine.hasOption(OPTION_CERTIFICATE)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Certificate need to be assigned in order to start worker proxy."); + } + cert = commandLine.getOptionValue(OPTION_CERTIFICATE); + if (commandLine.hasOption(OPTION_SERVICE_KEY_FILE)) { + serviceKeyFile = commandLine.getOptionValue(OPTION_SERVICE_KEY_FILE); + } + + usePlainTextChannel = commandLine.hasOption(OPTION_USE_PLAIN_TEXT_CHANNEL); + enableGrpcFaultInjector = commandLine.hasOption(OPTION_ENABLE_GRPC_FAULT_INJECTOR); + + if (commandLine.hasOption(OPTION_MULTIPLEXED_SESSION_OPERATIONS_RATIO)) { + multiplexedSessionOperationsRatio = + Double.parseDouble( + commandLine.getOptionValue(OPTION_MULTIPLEXED_SESSION_OPERATIONS_RATIO)); + LOGGER.log( + Level.INFO, + String.format( + "Multiplexed session ratio from commandline arg: \n%s", + multiplexedSessionOperationsRatio)); + if (multiplexedSessionOperationsRatio < MIN_RATIO + || multiplexedSessionOperationsRatio > MAX_RATIO) { + throw new IllegalArgumentException( + "Spanner multiplexedSessionOperationsRatio must be between " + + MIN_RATIO + + " and " + + MAX_RATIO); + } + } + // Setup the OpenTelemetry for tracing. + openTelemetrySdk = setupOpenTelemetrySdk(); + + Server server; + while (true) { + try { + CloudExecutorImpl cloudExecutorImpl = + new CloudExecutorImpl(enableGrpcFaultInjector, multiplexedSessionOperationsRatio); + HealthStatusManager healthStatusManager = new HealthStatusManager(); + // Set up Cloud server. + server = + ServerBuilder.forPort(proxyPort) + .addService(cloudExecutorImpl) + .addService(ProtoReflectionService.newInstance()) + .addService(healthStatusManager.getHealthService()) + .build(); + server.start(); + LOGGER.log(Level.INFO, String.format("Server started on proxyPort: %d", proxyPort)); + } catch (IOException e) { + LOGGER.log( + Level.WARNING, String.format("Failed to start server on proxyPort %d", proxyPort), e); + continue; // We did not bind in time. Try another proxyPort. + } + break; + } + server.awaitTermination(); + } + + private static CommandLine buildOptions(String[] args) { + Options options = new Options(); + + options.addOption( + null, OPTION_SPANNER_PORT, true, "Port of Spanner Frontend to which to send requests."); + options.addOption(null, OPTION_PROXY_PORT, true, "Proxy port to start worker proxy on."); + options.addOption( + null, OPTION_CERTIFICATE, true, "Certificate used to connect to Spanner GFE."); + options.addOption( + null, OPTION_SERVICE_KEY_FILE, true, "Service key file used to set authentication."); + options.addOption( + null, + OPTION_USE_PLAIN_TEXT_CHANNEL, + false, + "Use a plain text gRPC channel (intended for the Cloud Spanner Emulator)."); + options.addOption( + null, + OPTION_ENABLE_GRPC_FAULT_INJECTOR, + false, + "Enable grpc fault injector in cloud client executor."); + options.addOption( + null, + OPTION_MULTIPLEXED_SESSION_OPERATIONS_RATIO, + true, + "Ratio of operations to use multiplexed sessions."); + + CommandLineParser parser = new DefaultParser(); + try { + return parser.parse(options, args); + } catch (ParseException e) { + throw new IllegalArgumentException(e.getMessage()); + } + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java new file mode 100644 index 000000000000..9dd8ac295636 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java @@ -0,0 +1,47 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; + +/** + * Simple helper class to get access to a package-private method in the {@link + * com.google.cloud.spanner.SessionPoolOptions}. + */ +@InternalApi +public class SessionPoolOptionsHelper { + + // TODO: Remove when Builder.setUseMultiplexedSession(..) has been made public. + public static SessionPoolOptions.Builder setUseMultiplexedSession( + SessionPoolOptions.Builder sessionPoolOptionsBuilder, boolean useMultiplexedSession) { + return sessionPoolOptionsBuilder.setUseMultiplexedSession(useMultiplexedSession); + } + + // TODO: Remove when multiplexed session for read write is released. + public static SessionPoolOptions.Builder setUseMultiplexedSessionForRW( + SessionPoolOptions.Builder sessionPoolOptionsBuilder, boolean useMultiplexedSessionForRW) { + return sessionPoolOptionsBuilder.setUseMultiplexedSessionForRW(useMultiplexedSessionForRW); + } + + // TODO: Remove when multiplexed session for partitioned operations are released. + public static SessionPoolOptions.Builder setUseMultiplexedSessionForPartitionedOperations( + SessionPoolOptions.Builder sessionPoolOptionsBuilder, + boolean useMultiplexedSessionForPartitionedOps) { + return sessionPoolOptionsBuilder.setUseMultiplexedSessionPartitionedOps( + useMultiplexedSessionForPartitionedOps); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClient.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClient.java new file mode 100644 index 000000000000..fd83e680e9cc --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClient.java @@ -0,0 +1,242 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.cloud.spanner.executor.v1.stub.SpannerExecutorProxyStub; +import com.google.cloud.spanner.executor.v1.stub.SpannerExecutorProxyStubSettings; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: Service that executes SpannerActions asynchronously. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (SpannerExecutorProxyClient spannerExecutorProxyClient =
+ *     SpannerExecutorProxyClient.create()) {
+ *   BidiStream bidiStream =
+ *       spannerExecutorProxyClient.executeActionAsyncCallable().call();
+ *   SpannerAsyncActionRequest request =
+ *       SpannerAsyncActionRequest.newBuilder()
+ *           .setActionId(198295492)
+ *           .setAction(SpannerAction.newBuilder().build())
+ *           .build();
+ *   bidiStream.send(request);
+ *   for (SpannerAsyncActionResponse response : bidiStream) {
+ *     // Do something when a response is received.
+ *   }
+ * }
+ * }
+ * + *

Note: close() needs to be called on the SpannerExecutorProxyClient object to clean up + * resources such as threads. In the example above, try-with-resources is used, which automatically + * calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

ExecuteActionAsync

ExecuteActionAsync is a streaming call that starts executing a new Spanner action. + *

For each request, the server will reply with one or more responses, but only the last response will contain status in the outcome. + *

Responses can be matched to requests by action_id. It is allowed to have multiple actions in flight--in that case, actions are be executed in parallel.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • executeActionAsyncCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of SpannerExecutorProxySettings + * to create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerExecutorProxySettings spannerExecutorProxySettings =
+ *     SpannerExecutorProxySettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * SpannerExecutorProxyClient spannerExecutorProxyClient =
+ *     SpannerExecutorProxyClient.create(spannerExecutorProxySettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerExecutorProxySettings spannerExecutorProxySettings =
+ *     SpannerExecutorProxySettings.newBuilder().setEndpoint(myEndpoint).build();
+ * SpannerExecutorProxyClient spannerExecutorProxyClient =
+ *     SpannerExecutorProxyClient.create(spannerExecutorProxySettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class SpannerExecutorProxyClient implements BackgroundResource { + private final SpannerExecutorProxySettings settings; + private final SpannerExecutorProxyStub stub; + + /** Constructs an instance of SpannerExecutorProxyClient with default settings. */ + public static final SpannerExecutorProxyClient create() throws IOException { + return create(SpannerExecutorProxySettings.newBuilder().build()); + } + + /** + * Constructs an instance of SpannerExecutorProxyClient, using the given settings. The channels + * are created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final SpannerExecutorProxyClient create(SpannerExecutorProxySettings settings) + throws IOException { + return new SpannerExecutorProxyClient(settings); + } + + /** + * Constructs an instance of SpannerExecutorProxyClient, using the given stub for making calls. + * This is for advanced usage - prefer using create(SpannerExecutorProxySettings). + */ + public static final SpannerExecutorProxyClient create(SpannerExecutorProxyStub stub) { + return new SpannerExecutorProxyClient(stub); + } + + /** + * Constructs an instance of SpannerExecutorProxyClient, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected SpannerExecutorProxyClient(SpannerExecutorProxySettings settings) throws IOException { + this.settings = settings; + this.stub = ((SpannerExecutorProxyStubSettings) settings.getStubSettings()).createStub(); + } + + protected SpannerExecutorProxyClient(SpannerExecutorProxyStub stub) { + this.settings = null; + this.stub = stub; + } + + public final SpannerExecutorProxySettings getSettings() { + return settings; + } + + public SpannerExecutorProxyStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * ExecuteActionAsync is a streaming call that starts executing a new Spanner action. + * + *

For each request, the server will reply with one or more responses, but only the last + * response will contain status in the outcome. + * + *

Responses can be matched to requests by action_id. It is allowed to have multiple actions in + * flight--in that case, actions are be executed in parallel. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerExecutorProxyClient spannerExecutorProxyClient =
+   *     SpannerExecutorProxyClient.create()) {
+   *   BidiStream bidiStream =
+   *       spannerExecutorProxyClient.executeActionAsyncCallable().call();
+   *   SpannerAsyncActionRequest request =
+   *       SpannerAsyncActionRequest.newBuilder()
+   *           .setActionId(198295492)
+   *           .setAction(SpannerAction.newBuilder().build())
+   *           .build();
+   *   bidiStream.send(request);
+   *   for (SpannerAsyncActionResponse response : bidiStream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final BidiStreamingCallable + executeActionAsyncCallable() { + return stub.executeActionAsyncCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java new file mode 100644 index 000000000000..ed4b3c70d653 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.spanner.executor.v1.stub.SpannerExecutorProxyStubSettings; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link SpannerExecutorProxyClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner-cloud-executor.googleapis.com) and default port (443) + * are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of executeActionAsync: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerExecutorProxySettings.Builder spannerExecutorProxySettingsBuilder =
+ *     SpannerExecutorProxySettings.newBuilder();
+ * spannerExecutorProxySettingsBuilder
+ *     .executeActionAsyncSettings()
+ *     .setRetrySettings(
+ *         spannerExecutorProxySettingsBuilder
+ *             .executeActionAsyncSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * SpannerExecutorProxySettings spannerExecutorProxySettings =
+ *     spannerExecutorProxySettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class SpannerExecutorProxySettings extends ClientSettings { + + /** Returns the object with the settings used for calls to executeActionAsync. */ + public StreamingCallSettings + executeActionAsyncSettings() { + return ((SpannerExecutorProxyStubSettings) getStubSettings()).executeActionAsyncSettings(); + } + + public static final SpannerExecutorProxySettings create(SpannerExecutorProxyStubSettings stub) + throws IOException { + return new SpannerExecutorProxySettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return SpannerExecutorProxyStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return SpannerExecutorProxyStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return SpannerExecutorProxyStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return SpannerExecutorProxyStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return SpannerExecutorProxyStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return SpannerExecutorProxyStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return SpannerExecutorProxyStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected SpannerExecutorProxySettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for SpannerExecutorProxySettings. */ + public static class Builder + extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(SpannerExecutorProxyStubSettings.newBuilder(clientContext)); + } + + protected Builder(SpannerExecutorProxySettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(SpannerExecutorProxyStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(SpannerExecutorProxyStubSettings.newBuilder()); + } + + public SpannerExecutorProxyStubSettings.Builder getStubSettingsBuilder() { + return ((SpannerExecutorProxyStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to executeActionAsync. */ + public StreamingCallSettings.Builder + executeActionAsyncSettings() { + return getStubSettingsBuilder().executeActionAsyncSettings(); + } + + @Override + public SpannerExecutorProxySettings build() throws IOException { + return new SpannerExecutorProxySettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/gapic_metadata.json b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/gapic_metadata.json new file mode 100644 index 000000000000..91e72f27e576 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/gapic_metadata.json @@ -0,0 +1,21 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.spanner.executor.v1", + "libraryPackage": "com.google.cloud.spanner.executor.v1", + "services": { + "SpannerExecutorProxy": { + "clients": { + "grpc": { + "libraryClient": "SpannerExecutorProxyClient", + "rpcs": { + "ExecuteActionAsync": { + "methods": ["executeActionAsyncCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/package-info.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/package-info.java new file mode 100644 index 000000000000..f3f9883a079c --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/package-info.java @@ -0,0 +1,53 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Spanner Executor test API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= SpannerExecutorProxyClient ======================= + * + *

Service Description: Service that executes SpannerActions asynchronously. + * + *

Sample for SpannerExecutorProxyClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (SpannerExecutorProxyClient spannerExecutorProxyClient =
+ *     SpannerExecutorProxyClient.create()) {
+ *   BidiStream bidiStream =
+ *       spannerExecutorProxyClient.executeActionAsyncCallable().call();
+ *   SpannerAsyncActionRequest request =
+ *       SpannerAsyncActionRequest.newBuilder()
+ *           .setActionId(198295492)
+ *           .setAction(SpannerAction.newBuilder().build())
+ *           .build();
+ *   bidiStream.send(request);
+ *   for (SpannerAsyncActionResponse response : bidiStream) {
+ *     // Do something when a response is received.
+ *   }
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.spanner.executor.v1; + +import javax.annotation.Generated; diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyCallableFactory.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyCallableFactory.java new file mode 100644 index 000000000000..2bdb97650365 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the SpannerExecutorProxy service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcSpannerExecutorProxyCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyStub.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyStub.java new file mode 100644 index 000000000000..56a1d3ff55ad --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/GrpcSpannerExecutorProxyStub.java @@ -0,0 +1,163 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the SpannerExecutorProxy service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcSpannerExecutorProxyStub extends SpannerExecutorProxyStub { + private static final MethodDescriptor + executeActionAsyncMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName( + "google.spanner.executor.v1.SpannerExecutorProxy/ExecuteActionAsync") + .setRequestMarshaller( + ProtoUtils.marshaller(SpannerAsyncActionRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(SpannerAsyncActionResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final BidiStreamingCallable + executeActionAsyncCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcSpannerExecutorProxyStub create(SpannerExecutorProxyStubSettings settings) + throws IOException { + return new GrpcSpannerExecutorProxyStub(settings, ClientContext.create(settings)); + } + + public static final GrpcSpannerExecutorProxyStub create(ClientContext clientContext) + throws IOException { + return new GrpcSpannerExecutorProxyStub( + SpannerExecutorProxyStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcSpannerExecutorProxyStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcSpannerExecutorProxyStub( + SpannerExecutorProxyStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcSpannerExecutorProxyStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcSpannerExecutorProxyStub( + SpannerExecutorProxyStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new GrpcSpannerExecutorProxyCallableFactory()); + } + + /** + * Constructs an instance of GrpcSpannerExecutorProxyStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected GrpcSpannerExecutorProxyStub( + SpannerExecutorProxyStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + executeActionAsyncTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(executeActionAsyncMethodDescriptor) + .build(); + + this.executeActionAsyncCallable = + callableFactory.createBidiStreamingCallable( + executeActionAsyncTransportSettings, + settings.executeActionAsyncSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public BidiStreamingCallable + executeActionAsyncCallable() { + return executeActionAsyncCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStub.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStub.java new file mode 100644 index 000000000000..8c932e57f959 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStub.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1.stub; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the SpannerExecutorProxy service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class SpannerExecutorProxyStub implements BackgroundResource { + + public BidiStreamingCallable + executeActionAsyncCallable() { + throw new UnsupportedOperationException("Not implemented: executeActionAsyncCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java new file mode 100644 index 000000000000..132282528fa3 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java @@ -0,0 +1,298 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.LibraryMetadata; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link SpannerExecutorProxyStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner-cloud-executor.googleapis.com) and default port (443) + * are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of executeActionAsync: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerExecutorProxyStubSettings.Builder spannerExecutorProxySettingsBuilder =
+ *     SpannerExecutorProxyStubSettings.newBuilder();
+ * spannerExecutorProxySettingsBuilder
+ *     .executeActionAsyncSettings()
+ *     .setRetrySettings(
+ *         spannerExecutorProxySettingsBuilder
+ *             .executeActionAsyncSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * SpannerExecutorProxyStubSettings spannerExecutorProxySettings =
+ *     spannerExecutorProxySettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +@SuppressWarnings("CanonicalDuration") +public class SpannerExecutorProxyStubSettings + extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder().build(); + + private final StreamingCallSettings + executeActionAsyncSettings; + + /** Returns the object with the settings used for calls to executeActionAsync. */ + public StreamingCallSettings + executeActionAsyncSettings() { + return executeActionAsyncSettings; + } + + public SpannerExecutorProxyStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcSpannerExecutorProxyStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "spanner-cloud-executor"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "spanner-cloud-executor.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "spanner-cloud-executor.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(SpannerExecutorProxyStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected SpannerExecutorProxyStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + executeActionAsyncSettings = settingsBuilder.executeActionAsyncSettings().build(); + } + + @Override + protected LibraryMetadata getLibraryMetadata() { + return LibraryMetadata.newBuilder() + .setArtifactName("com.google.cloud:google-cloud-spanner") + .setRepository("googleapis/google-cloud-java") + .setVersion(Version.VERSION) + .build(); + } + + /** Builder for SpannerExecutorProxyStubSettings. */ + public static class Builder + extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final StreamingCallSettings.Builder< + SpannerAsyncActionRequest, SpannerAsyncActionResponse> + executeActionAsyncSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); + definitions.put("no_retry_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + executeActionAsyncSettings = StreamingCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = ImmutableList.>of(); + initDefaults(this); + } + + protected Builder(SpannerExecutorProxyStubSettings settings) { + super(settings); + + executeActionAsyncSettings = settings.executeActionAsyncSettings.toBuilder(); + + unaryMethodSettingsBuilders = ImmutableList.>of(); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to executeActionAsync. */ + public StreamingCallSettings.Builder + executeActionAsyncSettings() { + return executeActionAsyncSettings; + } + + @Override + public SpannerExecutorProxyStubSettings build() throws IOException { + return new SpannerExecutorProxyStubSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/Version.java b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/Version.java new file mode 100644 index 000000000000..e5f367695de1 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/Version.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1.stub; + +import com.google.api.core.InternalApi; + +@InternalApi("For internal use only") +final class Version { + // {x-version-update-start:google-cloud-spanner:current} + static final String VERSION = "0.0.0-SNAPSHOT"; + // {x-version-update-end} + +} diff --git a/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json b/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json new file mode 100644 index 000000000000..02102d0112e3 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json @@ -0,0 +1,6689 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$LogType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.NullValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInstancePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInstancePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseDialect", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseRole", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseRole$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DdlStatementActionInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DdlStatementActionInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OperationProgress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OperationProgress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreSourceType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Key", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Key$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingTargets", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingTargets$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata$ExpireBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FulfillmentPeriod", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$DefaultBackupScheduleType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$InstanceType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$FreeInstanceAvailability", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$QuorumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.OperationProgress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.OperationProgress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo$ReplicaType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdaptMessageAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdaptMessageAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AddSplitPointsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AddSplitPointsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdminAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdminAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdminResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.AdminResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.BatchDmlAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.BatchDmlAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.BatchPartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.BatchPartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CancelOperationAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CancelOperationAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChangeStreamRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChangeStreamRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChildPartitionsRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChildPartitionsRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChildPartitionsRecord$ChildPartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ChildPartitionsRecord$ChildPartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloseBatchTransactionAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloseBatchTransactionAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudBackupResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudBackupResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudDatabaseResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudDatabaseResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudInstanceConfigResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudInstanceConfigResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudInstanceResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CloudInstanceResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ColumnMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ColumnMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.Concurrency", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.Concurrency$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CopyCloudBackupAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CopyCloudBackupAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudBackupAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudBackupAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudInstanceAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateCloudInstanceAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateUserInstanceConfigAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.CreateUserInstanceConfigAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord$ColumnType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord$ColumnType$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord$Mod", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DataChangeRecord$Mod$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteCloudBackupAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteCloudBackupAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteCloudInstanceAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteCloudInstanceAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteUserInstanceConfigAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DeleteUserInstanceConfigAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DmlAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DmlAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DropCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.DropCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ExecuteChangeStreamQuery", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ExecuteChangeStreamQuery$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ExecutePartitionAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ExecutePartitionAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.FinishTransactionAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.FinishTransactionAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.FinishTransactionAction$Mode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudBackupAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudBackupAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudInstanceAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudInstanceAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudInstanceConfigAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetCloudInstanceConfigAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetOperationAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.GetOperationAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.HeartbeatRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.HeartbeatRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.KeyRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.KeyRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.KeyRange$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.KeySet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.KeySet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudBackupOperationsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudBackupOperationsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudBackupsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudBackupsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudDatabasesAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudDatabasesAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudInstanceConfigsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudInstanceConfigsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudInstancesAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ListCloudInstancesAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$InsertArgs", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$InsertArgs$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$Mod", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$Mod$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$UpdateArgs", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.MutationAction$UpdateArgs$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.OperationResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.OperationResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.PartitionedUpdateAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.PartitionedUpdateAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.PartitionedUpdateAction$ExecutePartitionedUpdateOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.PartitionedUpdateAction$ExecutePartitionedUpdateOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryAction$Parameter", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryAction$Parameter$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryCancellationAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryCancellationAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ReadAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ReadAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ReadResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ReadResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.RestoreCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.RestoreCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SessionPoolOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SessionPoolOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerActionOutcome", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerActionOutcome$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAsyncActionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAsyncActionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAsyncActionResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerAsyncActionResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.SpannerOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.StartBatchTransactionAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.StartBatchTransactionAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.StartTransactionAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.StartTransactionAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.TableMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.TableMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.TransactionExecutionOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.TransactionExecutionOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudBackupAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudBackupAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudDatabaseAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudDatabaseAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudInstanceAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateCloudInstanceAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateUserInstanceConfigAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.UpdateUserInstanceConfigAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ValueList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.ValueList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.WriteMutationsAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.WriteMutationsAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$MutationGroup", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$MutationGroup$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BeginTransactionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BeginTransactionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CacheUpdate", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CacheUpdate$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$CommitStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$CommitStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CreateSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CreateSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DeleteSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DeleteSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ExcludeReplicas", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ExcludeReplicas$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$IncludeReplicas", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$IncludeReplicas$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Statement", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Statement$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.GetSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.GetSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Group", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Group$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$NullOrder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$Order", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeySet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeySet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Ack", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Ack$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Delete", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Delete$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Send", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Send$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Write", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Write$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartialResultSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartialResultSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Partition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Partition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionQueryRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionQueryRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionReadRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionReadRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ChildLink", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ChildLink$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$Kind", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ShortRepresentation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ShortRepresentation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$IndexAdvice", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$IndexAdvice$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryPlan", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryPlan$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Range", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Range$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$LockHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$OrderBy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RecipeList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RecipeList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$ClientContext", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$ClientContext$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$Priority", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RollbackRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RollbackRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$SkippedTablet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$SkippedTablet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Session", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Session$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Field", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Field$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet$Role", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Transaction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Transaction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$IsolationLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$PartitionedDml", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$PartitionedDml$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadOnly", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadOnly$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite$ReadLockMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionSelector", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionSelector$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Type$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TypeAnnotationCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TypeCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider b/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider new file mode 100644 index 000000000000..bbc367f8fc5e --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider @@ -0,0 +1 @@ +io.grpc.internal.PickFirstLoadBalancerProvider diff --git a/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxy.java b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxy.java new file mode 100644 index 000000000000..95aaa8fea228 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxy.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockSpannerExecutorProxy implements MockGrpcService { + private final MockSpannerExecutorProxyImpl serviceImpl; + + public MockSpannerExecutorProxy() { + serviceImpl = new MockSpannerExecutorProxyImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxyImpl.java b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxyImpl.java new file mode 100644 index 000000000000..9e5c02c6ba54 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/MockSpannerExecutorProxyImpl.java @@ -0,0 +1,100 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1; + +import com.google.api.core.BetaApi; +import com.google.protobuf.AbstractMessage; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import com.google.spanner.executor.v1.SpannerExecutorProxyGrpc.SpannerExecutorProxyImplBase; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockSpannerExecutorProxyImpl extends SpannerExecutorProxyImplBase { + private List requests; + private Queue responses; + + public MockSpannerExecutorProxyImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public StreamObserver executeActionAsync( + final StreamObserver responseObserver) { + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(SpannerAsyncActionRequest value) { + requests.add(value); + final Object response = responses.remove(); + if (response instanceof SpannerAsyncActionResponse) { + responseObserver.onNext(((SpannerAsyncActionResponse) response)); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ExecuteActionAsync, expected %s" + + " or %s", + response == null ? "null" : response.getClass().getName(), + SpannerAsyncActionResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } +} diff --git a/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClientTest.java b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClientTest.java new file mode 100644 index 000000000000..8b74f5ad9362 --- /dev/null +++ b/java-spanner/google-cloud-spanner-executor/src/test/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxyClientTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.executor.v1; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.spanner.executor.v1.SpannerAction; +import com.google.spanner.executor.v1.SpannerActionOutcome; +import com.google.spanner.executor.v1.SpannerAsyncActionRequest; +import com.google.spanner.executor.v1.SpannerAsyncActionResponse; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class SpannerExecutorProxyClientTest { + private static MockServiceHelper mockServiceHelper; + private static MockSpannerExecutorProxy mockSpannerExecutorProxy; + private LocalChannelProvider channelProvider; + private SpannerExecutorProxyClient client; + + @BeforeClass + public static void startStaticServer() { + mockSpannerExecutorProxy = new MockSpannerExecutorProxy(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockSpannerExecutorProxy)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + SpannerExecutorProxySettings settings = + SpannerExecutorProxySettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = SpannerExecutorProxyClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void executeActionAsyncTest() throws Exception { + SpannerAsyncActionResponse expectedResponse = + SpannerAsyncActionResponse.newBuilder() + .setActionId(198295492) + .setOutcome(SpannerActionOutcome.newBuilder().build()) + .build(); + mockSpannerExecutorProxy.addResponse(expectedResponse); + SpannerAsyncActionRequest request = + SpannerAsyncActionRequest.newBuilder() + .setActionId(198295492) + .setAction(SpannerAction.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.executeActionAsyncCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void executeActionAsyncExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpannerExecutorProxy.addException(exception); + SpannerAsyncActionRequest request = + SpannerAsyncActionRequest.newBuilder() + .setActionId(198295492) + .setAction(SpannerAction.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.executeActionAsyncCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/clirr-ignored-differences.xml b/java-spanner/google-cloud-spanner/clirr-ignored-differences.xml new file mode 100644 index 000000000000..a57bf40d1ba8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/clirr-ignored-differences.xml @@ -0,0 +1,1142 @@ + + + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.Dialect getDialect() + + + 7012 + com/google/cloud/spanner/DatabaseClient + com.google.cloud.spanner.Dialect getDialect() + + + 7012 + com/google/cloud/spanner/BatchReadOnlyTransaction + void cleanup() + + + 8001 + com/google/cloud/spanner/connection/StatementParser + + + + 7002 + com/google/cloud/spanner/Session + void prepareReadWriteTransaction() + + + 7002 + com/google/cloud/spanner/SpannerOptions + com.google.cloud.spanner.Dialect getDialect() + + + 7002 + com/google/cloud/spanner/SpannerOptions$Builder + com.google.cloud.spanner.SpannerOptions$Builder setDialect(com.google.cloud.spanner.Dialect) + + + 7002 + com/google/cloud/spanner/connection/ConnectionOptions + com.google.cloud.spanner.Dialect getDialect() + + + 7012 + com/google/cloud/spanner/InstanceAdminClient + com.google.api.gax.longrunning.OperationFuture createInstanceConfig(com.google.cloud.spanner.InstanceConfigInfo, com.google.cloud.spanner.Options$CreateAdminApiOption[]) + + + 7012 + com/google/cloud/spanner/InstanceAdminClient + com.google.api.gax.longrunning.OperationFuture updateInstanceConfig(com.google.cloud.spanner.InstanceConfigInfo, java.lang.Iterable, com.google.cloud.spanner.Options$UpdateAdminApiOption[]) + + + 7012 + com/google/cloud/spanner/InstanceAdminClient + void deleteInstanceConfig(java.lang.String, com.google.cloud.spanner.Options$DeleteAdminApiOption[]) + + + 7012 + com/google/cloud/spanner/InstanceAdminClient + com.google.api.gax.paging.Page listInstanceConfigOperations(com.google.cloud.spanner.Options$ListOption[]) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.longrunning.OperationFuture createInstanceConfig(java.lang.String, java.lang.String, com.google.spanner.admin.instance.v1.InstanceConfig, java.lang.Boolean) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.longrunning.OperationFuture updateInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig, java.lang.Boolean, com.google.protobuf.FieldMask) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + void deleteInstanceConfig(java.lang.String, java.lang.String, java.lang.Boolean) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$Paginated listInstanceConfigOperations(int, java.lang.String, java.lang.String) + + + 7013 + com/google/cloud/spanner/BackupInfo$Builder + com.google.cloud.spanner.BackupInfo$Builder setMaxExpireTime(com.google.cloud.Timestamp) + + + 7013 + com/google/cloud/spanner/BackupInfo$Builder + com.google.cloud.spanner.BackupInfo$Builder setReferencingBackup(com.google.protobuf.ProtocolStringList) + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.api.gax.longrunning.OperationFuture copyBackup(java.lang.String, java.lang.String, java.lang.String, com.google.cloud.Timestamp) + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.api.gax.longrunning.OperationFuture copyBackup(com.google.cloud.spanner.BackupId, com.google.cloud.spanner.Backup) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.longrunning.OperationFuture copyBackup(com.google.cloud.spanner.BackupId, com.google.cloud.spanner.Backup) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.DatabaseClient getDatabaseClient() + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.api.gax.longrunning.OperationFuture createDatabase(java.lang.String, java.lang.String, com.google.cloud.spanner.Dialect, java.lang.Iterable) + + + + 7012 + com/google/cloud/spanner/TransactionContext + com.google.spanner.v1.ResultSetStats analyzeUpdate(com.google.cloud.spanner.Statement, com.google.cloud.spanner.ReadContext$QueryAnalyzeMode, com.google.cloud.spanner.Options$UpdateOption[]) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.spanner.v1.ResultSetStats analyzeUpdate(com.google.cloud.spanner.Statement, com.google.cloud.spanner.ReadContext$QueryAnalyzeMode) + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.api.gax.paging.Page listDatabaseRoles(java.lang.String, java.lang.String, com.google.cloud.spanner.Options$ListOption[]) + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$Paginated listDatabaseRoles(java.lang.String, int, java.lang.String) + + + 7004 + com/google/cloud/spanner/Database + com.google.cloud.Policy getIAMPolicy() + + + 7004 + com/google/cloud/spanner/DatabaseAdminClient + com.google.cloud.Policy getDatabaseIAMPolicy(java.lang.String, java.lang.String) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.spanner.v1.Session createSession(java.lang.String, java.util.Map, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + java.util.List batchCreateSessions(java.lang.String, int, java.util.Map, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.iam.v1.Policy getDatabaseAdminIAMPolicy(java.lang.String) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.spanner.v1.Session createSession(java.lang.String, java.util.Map, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + java.util.List batchCreateSessions(java.lang.String, int, java.util.Map, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.iam.v1.Policy getDatabaseAdminIAMPolicy(java.lang.String) + + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.String getPgJsonb(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.String getPgJsonb(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getPgJsonbList(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getPgJsonbList(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.protobuf.ProtocolMessageEnum getProtoEnum(int, java.util.function.Function) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.protobuf.ProtocolMessageEnum getProtoEnum(java.lang.String, java.util.function.Function) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.protobuf.AbstractMessage getProtoMessage(int, com.google.protobuf.AbstractMessage) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.protobuf.AbstractMessage getProtoMessage(java.lang.String, com.google.protobuf.AbstractMessage) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getProtoEnumList(int, java.util.function.Function) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getProtoEnumList(java.lang.String, java.util.function.Function) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getProtoMessageList(int, com.google.protobuf.AbstractMessage) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getProtoMessageList(java.lang.String, com.google.protobuf.AbstractMessage) + + + 7012 + com/google/cloud/spanner/BatchClient + java.lang.String getDatabaseRole() + + + 7012 + com/google/cloud/spanner/DatabaseClient + java.lang.String getDatabaseRole() + + + 7013 + com/google/cloud/spanner/connection/AbstractStatementParser + boolean checkReturningClauseInternal(java.lang.String) + + + 7012 + com/google/cloud/spanner/ResultSet + com.google.spanner.v1.ResultSetMetadata getMetadata() + + + 7012 + com/google/cloud/spanner/TransactionContext + com.google.cloud.spanner.ResultSet analyzeUpdateStatement(com.google.cloud.spanner.Statement, com.google.cloud.spanner.ReadContext$QueryAnalyzeMode, com.google.cloud.spanner.Options$UpdateOption[]) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.ResultSet analyzeUpdateStatement(com.google.cloud.spanner.Statement, com.google.cloud.spanner.ReadContext$QueryAnalyzeMode, com.google.cloud.spanner.Options$UpdateOption[]) + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDatabaseDdlResponse(java.lang.String, java.lang.String) + + + 7012 + com/google/cloud/spanner/DatabaseAdminClient + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(com.google.cloud.spanner.Database, java.lang.Iterable, java.lang.String) + + + 7013 + com/google/cloud/spanner/DatabaseInfo$Builder + com.google.cloud.spanner.DatabaseInfo$Builder setProtoDescriptors(byte[]) + + + 7013 + com/google/cloud/spanner/DatabaseInfo$Builder + com.google.cloud.spanner.DatabaseInfo$Builder setProtoDescriptors(java.io.InputStream) + + + 7013 + com/google/cloud/spanner/DatabaseInfo$Builder + com.google.cloud.spanner.DatabaseInfo$Builder setProtoDescriptors(java.lang.String) + + + 7006 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + java.util.List getDatabaseDdl(java.lang.String) + java.util.List + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.spanner.v1.Transaction beginTransaction(com.google.spanner.v1.BeginTransactionRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.api.core.ApiFuture beginTransactionAsync(com.google.spanner.v1.BeginTransactionRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.api.core.ApiFuture beginTransactionAsync(com.google.spanner.v1.BeginTransactionRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.spanner.v1.ResultSet executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + com.google.spanner.v1.ResultSet executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map, boolean) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map, boolean) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.api.core.ApiFuture executeQueryAsync(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall read(com.google.spanner.v1.ReadRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + + + 7005 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(java.lang.String, java.lang.Iterable, java.lang.String) + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(com.google.cloud.spanner.Database, java.lang.Iterable, java.lang.String) + + + 7005 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map, boolean) + + + 7006 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + com.google.spanner.v1.ResultSet + + + 7006 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.spanner.v1.ResultSet executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall + + + 7006 + com/google/cloud/spanner/spi/v1/SpannerRpc + java.util.List getDatabaseDdl(java.lang.String) + java.util.List + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.spanner.v1.Transaction beginTransaction(com.google.spanner.v1.BeginTransactionRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.core.ApiFuture beginTransactionAsync(com.google.spanner.v1.BeginTransactionRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.spanner.v1.ResultSet executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.core.ApiFuture executeQueryAsync(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + + + 7004 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall read(com.google.spanner.v1.ReadRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + + + 7005 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(java.lang.String, java.lang.Iterable, java.lang.String) + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(com.google.cloud.spanner.Database, java.lang.Iterable, java.lang.String) + + + 7005 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map, boolean) + + + 7006 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall executeQuery(com.google.spanner.v1.ExecuteSqlRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + com.google.spanner.v1.ResultSet + + + 7006 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.spanner.v1.ResultSet executeQuery(com.google.spanner.v1.ExecuteSqlRequest, java.util.Map) + com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setSavepointSupport(com.google.cloud.spanner.connection.SavepointSupport) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.connection.SavepointSupport getSavepointSupport() + + + 7012 + com/google/cloud/spanner/connection/Connection + void savepoint(java.lang.String) + + + 7012 + com/google/cloud/spanner/connection/Connection + void releaseSavepoint(java.lang.String) + + + 7012 + com/google/cloud/spanner/connection/Connection + void rollbackToSavepoint(java.lang.String) + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDelayTransactionStartUntilFirstWrite(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isDelayTransactionStartUntilFirstWrite() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + int getMaxPartitionedParallelism() + + + 7012 + com/google/cloud/spanner/connection/Connection + int getMaxPartitions() + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isAutoPartitionMode() + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isDataBoostEnabled() + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.ResultSet partitionQuery(com.google.cloud.spanner.Statement, com.google.cloud.spanner.PartitionOptions, com.google.cloud.spanner.Options$QueryOption[]) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.ResultSet runPartition(java.lang.String) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.connection.PartitionedQueryResultSet runPartitionedQuery(com.google.cloud.spanner.Statement, com.google.cloud.spanner.PartitionOptions, com.google.cloud.spanner.Options$QueryOption[]) + + + 7012 + com/google/cloud/spanner/connection/Connection + void setAutoPartitionMode(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDataBoostEnabled(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + void setMaxPartitionedParallelism(int) + + + 7012 + com/google/cloud/spanner/connection/Connection + void setMaxPartitions(int) + + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.connection.StatementResult execute(com.google.cloud.spanner.Statement, java.util.Set) + + + + + 7012 + com/google/cloud/spanner/StructReader + float getFloat(int) + + + 7012 + com/google/cloud/spanner/StructReader + float getFloat(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + float[] getFloatArray(int) + + + 7012 + com/google/cloud/spanner/StructReader + float[] getFloatArray(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getFloatList(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getFloatList(java.lang.String) + + + 7013 + com/google/cloud/spanner/Value + float getFloat32() + + + 7013 + com/google/cloud/spanner/Value + java.util.List getFloat32Array() + + + + + 7012 + com/google/cloud/spanner/StructReader + java.util.UUID getUuid(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.UUID getUuid(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getUuidList(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getUuidList(java.lang.String) + + + 7013 + com/google/cloud/spanner/Value + java.util.UUID getUuid() + + + 7013 + com/google/cloud/spanner/Value + java.util.List getUuidArray() + + + + + 7012 + com/google/cloud/spanner/StructReader + com.google.cloud.spanner.Interval getInterval(int) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.cloud.spanner.Interval getInterval(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.cloud.spanner.Interval[] getIntervalArray(int) + + + 7012 + com/google/cloud/spanner/StructReader + com.google.cloud.spanner.Interval[] getIntervalArray(java.lang.String) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getIntervalList(int) + + + 7012 + com/google/cloud/spanner/StructReader + java.util.List getIntervalList(java.lang.String) + + + 7013 + com/google/cloud/spanner/Value + com.google.cloud.spanner.Interval getInterval() + + + 7013 + com/google/cloud/spanner/Value + java.util.List getIntervalArray() + + + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc$StreamingCall + com.google.api.gax.rpc.ApiCallContext getCallContext() + + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.retrying.RetrySettings getReadRetrySettings() + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.retrying.RetrySettings getExecuteQueryRetrySettings() + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + java.util.Set getReadRetryableCodes() + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + java.util.Set getExecuteQueryRetryableCodes() + + + + 7013 + com/google/cloud/spanner/Dialect + java.lang.String getDefaultSchema() + + + + 7012 + com/google/cloud/spanner/Spanner + com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient createDatabaseAdminClient() + + + 7012 + com/google/cloud/spanner/Spanner + com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient createInstanceAdminClient() + + + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings getDatabaseAdminStubSettings() + + + 7012 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings getInstanceAdminStubSettings() + + + + + 7006 + com/google/cloud/spanner/Options + com.google.cloud.spanner.Options$ReadQueryUpdateTransactionOption maxCommitDelay(java.time.Duration) + com.google.cloud.spanner.Options$TransactionOption + + + + 7005 + com/google/cloud/spanner/PartitionedDmlTransaction + void setSpan(io.opencensus.trace.Span) + void setSpan(com.google.cloud.spanner.ISpan) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.spanner.v1.DirectedReadOptions getDirectedRead() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDirectedRead(com.google.spanner.v1.DirectedReadOptions) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + java.time.Duration getMaxCommitDelay() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setMaxCommitDelay(java.time.Duration) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.Spanner getSpanner() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDdlInTransactionMode(com.google.cloud.spanner.connection.DdlInTransactionMode) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.cloud.spanner.connection.DdlInTransactionMode getDdlInTransactionMode() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableExtendedTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableApiTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableEndToEndTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableBuiltInMetrics() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableGRPCBuiltInMetrics() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableAFEServerTiming() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + java.lang.String getMonitoringHost() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isExcludeTxnFromChangeStreams() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setExcludeTxnFromChangeStreams(boolean) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + byte[] getProtoDescriptors() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setProtoDescriptors(byte[]) + + + + + 7009 + com/google/cloud/spanner/SessionPoolOptions$Builder + com.google.cloud.spanner.SessionPoolOptions$Builder setUseMultiplexedSession(boolean) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void reset() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setKeepTransactionAlive(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isKeepTransactionAlive() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setAutoBatchDml(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isAutoBatchDml() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setAutoBatchDmlUpdateCount(long) + + + 7012 + com/google/cloud/spanner/connection/Connection + long getAutoBatchDmlUpdateCount() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setAutoBatchDmlUpdateCountVerification(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isAutoBatchDmlUpdateCountVerification() + + + + + 7012 + com/google/cloud/spanner/connection/TransactionRetryListener + void retryDmlAsPartitionedDmlStarting(java.util.UUID, com.google.cloud.spanner.Statement, com.google.cloud.spanner.TransactionMutationLimitExceededException) + + + 7012 + com/google/cloud/spanner/connection/TransactionRetryListener + void retryDmlAsPartitionedDmlFinished(java.util.UUID, com.google.cloud.spanner.Statement, long) + + + 7012 + com/google/cloud/spanner/connection/TransactionRetryListener + void retryDmlAsPartitionedDmlFailed(java.util.UUID, com.google.cloud.spanner.Statement, java.lang.Throwable) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + java.lang.Object runTransaction(com.google.cloud.spanner.connection.Connection$TransactionCallable) + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + com.google.auth.oauth2.GoogleCredentials getDefaultExperimentalHostCredentials() + + + 7002 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + com.google.auth.oauth2.GoogleCredentials getDefaultExternalHostCredentials() + + + 7002 + com/google/cloud/spanner/SpannerOptions + com.google.auth.oauth2.GoogleCredentials getDefaultExternalHostCredentialsFromSysEnv() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDefaultSequenceKind(java.lang.String) + + + 7012 + com/google/cloud/spanner/connection/Connection + java.lang.String getDefaultSequenceKind() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setDefaultIsolationLevel(com.google.spanner.v1.TransactionOptions$IsolationLevel) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.spanner.v1.TransactionOptions$IsolationLevel getDefaultIsolationLevel() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void beginTransaction(com.google.spanner.v1.TransactionOptions$IsolationLevel) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.api.core.ApiFuture beginTransactionAsync(com.google.spanner.v1.TransactionOptions$IsolationLevel) + + + + + 8001 + com/google/cloud/spanner/connection/ConnectionOptions$ConnectionProperty + + + 6001 + com/google/cloud/spanner/connection/ConnectionOptions + VALID_PROPERTIES + + + + + 7002 + com/google/cloud/spanner/connection/AbstractStatementParser + boolean supportsExplain() + + + 7002 + com/google/cloud/spanner/connection/PostgreSQLStatementParser + boolean supportsExplain() + + + 7002 + com/google/cloud/spanner/connection/SpannerStatementParser + boolean supportsExplain() + + + + 7012 + com/google/cloud/spanner/DatabaseClient + com.google.cloud.spanner.Statement$StatementFactory getStatementFactory() + + + + + 7012 + com/google/cloud/spanner/AsyncTransactionManager + com.google.cloud.spanner.AsyncTransactionManager$TransactionContextFuture beginAsync(com.google.cloud.spanner.AbortedException) + + + 7012 + com/google/cloud/spanner/TransactionManager + com.google.cloud.spanner.TransactionContext begin(com.google.cloud.spanner.AbortedException) + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.Object getOrNull(int, java.util.function.BiFunction) + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.Object getOrNull(java.lang.String, java.util.function.BiFunction) + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.Object getOrDefault(int, java.util.function.BiFunction, java.lang.Object) + + + 7012 + com/google/cloud/spanner/StructReader + java.lang.Object getOrDefault(java.lang.String, java.util.function.BiFunction, java.lang.Object) + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableDirectAccess() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setReadLockMode(com.google.spanner.v1.TransactionOptions$ReadWrite$ReadLockMode) + + + 7012 + com/google/cloud/spanner/connection/Connection + com.google.spanner.v1.TransactionOptions$ReadWrite$ReadLockMode getReadLockMode() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setTransactionTimeout(java.time.Duration) + + + 7012 + com/google/cloud/spanner/connection/Connection + java.time.Duration getTransactionTimeout() + + + 8001 + com/google/cloud/spanner/LatencyTest + + + 7012 + com/google/cloud/spanner/connection/Connection + java.lang.Object getConnectionPropertyValue(com.google.cloud.spanner.connection.ConnectionProperty) + + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordAFELatency(java.lang.Long) + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordAFELatency(java.lang.Float) + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordAfeHeaderMissingCount(java.lang.Long) + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordGFELatency(java.lang.Long) + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordGFELatency(java.lang.Float) + + + 7002 + com/google/cloud/spanner/CompositeTracer + void recordGfeHeaderMissingCount(java.lang.Long) + + + + 7002 + com/google/cloud/spanner/SpannerException + void setRequestId(com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerBatchUpdateException newSpannerBatchUpdateException(com.google.cloud.spanner.ErrorCode, java.lang.String, long[], com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerException newSpannerException(com.google.cloud.spanner.ErrorCode, java.lang.String, java.lang.Throwable, com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerException newSpannerException(com.google.cloud.spanner.ErrorCode, java.lang.String, com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerException newSpannerException(java.lang.Throwable, com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerException newSpannerException(io.grpc.Context, java.lang.Throwable, com.google.cloud.spanner.XGoogSpannerRequestId) + + + 7002 + com/google/cloud/spanner/SpannerExceptionFactory + com.google.cloud.spanner.SpannerException propagateInterrupt(java.lang.InterruptedException, com.google.cloud.spanner.XGoogSpannerRequestId) + + + 6001 + com/google/cloud/spanner/XGoogSpannerRequestId + REQUEST_HEADER_KEY + + + 6001 + com/google/cloud/spanner/XGoogSpannerRequestId + REQUEST_ID + + diff --git a/java-spanner/google-cloud-spanner/pom.xml b/java-spanner/google-cloud-spanner/pom.xml new file mode 100644 index 000000000000..9d237b0b66b4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/pom.xml @@ -0,0 +1,725 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + jar + Google Cloud Spanner + https://github.com/googleapis/google-cloud-java + Java idiomatic client for Google Cloud Spanner. + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + google-cloud-spanner + 0.31.1 + 3.85.0 + com.google.cloud.spanner.GceTestEnvConfig + projects/gcloud-devel/instances/spanner-testing-east1 + gcloud-devel + projects/gcloud-devel/locations/us-east1/keyRings/cmek-test-key-ring/cryptoKeys/cmek-test-key + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + + org.jacoco + jacoco-maven-plugin + 0.8.14 + + + + prepare-agent + + + + report + prepare-package + + report + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + sponge_log + + + + + default-test + + com.google.cloud.spanner.TracerTest,com.google.cloud.spanner.IntegrationTest + + + + ${spanner.testenv.config.class} + ${spanner.testenv.instance} + ${spanner.gce.config.project_id} + ${spanner.testenv.kms_key.name} + logging.properties + + + + + + tracer + + test + + + com.google.cloud.spanner.TracerTest + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + ${spanner.testenv.config.class} + ${spanner.testenv.instance} + ${spanner.gce.config.project_id} + ${spanner.testenv.kms_key.name} + logging.properties + + 3000 + + + + + default + + com.google.cloud.spanner.SerialIntegrationTest + + + + + parallel-integration-test + + integration-test + + + com.google.cloud.spanner.ParallelIntegrationTest + 12 + true + com.google.cloud.spanner.ParallelIntegrationTest + + + + + + + org.graalvm.buildtools + native-maven-plugin + + + -Dspanner.testenv.config.class=${spanner.testenv.config.class} + -Dspanner.testenv.instance=${spanner.testenv.instance} + -Dspanner.gce.config.project_id=${spanner.gce.config.project_id} + -Dspanner.testenv.kms_key.name=${spanner.testenv.kms_key.name} + -Djava.util.logging.config.file=logging.properties + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + org.codehaus.mojo + clirr-maven-plugin + + + com/google/cloud/spanner/spi/v1/** + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:4.33.2:exe:${os.detected.classifier} + + ${project.basedir}/../proto-google-cloud-spanner-v1/src/main/proto + + + + + test-compile + + test-compile + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + io.grpc:grpc-protobuf-lite,org.hamcrest:hamcrest,org.hamcrest:hamcrest-core,com.google.errorprone:error_prone_annotations,org.openjdk.jmh:jmh-generator-annprocess,com.google.api.grpc:grpc-google-cloud-spanner-v1,com.google.api.grpc:grpc-google-cloud-spanner-admin-instance-v1,com.google.api.grpc:grpc-google-cloud-spanner-admin-database-v1,javax.annotation:javax.annotation-api,io.opencensus:opencensus-impl,org.graalvm.sdk:graal-sdk,io.grpc:grpc-googleapis,io.grpc:grpc-rls,com.google.api.grpc:proto-google-cloud-spanner-executor-v1,com.google.api.grpc:grpc-google-cloud-spanner-executor-v1 + + + + + + + + + + com.google.cloud + grpc-gcp + + + io.opentelemetry + opentelemetry-api + + + + + io.grpc + grpc-api + + + io.grpc + grpc-auth + + + io.grpc + grpc-inprocess + + + io.grpc + grpc-core + + + io.grpc + grpc-netty-shaded + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + io.grpc + grpc-opentelemetry + + + io.opentelemetry + opentelemetry-api + + + + + com.google.api + api-common + + + com.google.protobuf + protobuf-java + + + com.google.protobuf + protobuf-java-util + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + grpc-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.cloud + google-cloud-core + + + com.google.cloud + google-cloud-core-grpc + + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-contrib-grpc-util + ${opencensus.version} + + + io.opencensus + opencensus-impl + test + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-context + + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-common + + + io.opentelemetry + opentelemetry-sdk-metrics + + + com.google.cloud.opentelemetry + detector-resources-support + + + com.google.cloud + google-cloud-monitoring + ${google.cloud.monitoring.version} + + + + com.google.guava + failureaccess + + + + + com.google.api.grpc + proto-google-cloud-monitoring-v3 + ${google.cloud.monitoring.version} + + + + com.google.guava + failureaccess + + + + + com.google.api.grpc + grpc-google-cloud-monitoring-v3 + ${google.cloud.monitoring.version} + test + + + + com.google.guava + failureaccess + + + + + com.google.auth + google-auth-library-oauth2-http + + + com.google.http-client + google-http-client + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-instance-v1 + + + com.google.api.grpc + grpc-google-cloud-spanner-v1 + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-database-v1 + + + com.google.guava + guava + + + com.google.api + gax + + + com.google.api + gax-grpc + + + com.google.api + gax-httpjson + + + org.threeten + threetenbp + + + com.google.code.findbugs + jsr305 + + + com.google.code.gson + gson + + + com.google.auth + google-auth-library-credentials + + + io.grpc + grpc-alts + + + io.grpc + grpc-googleapis + runtime + + + io.grpc + grpc-rls + runtime + + + org.graalvm.sdk + nativeimage + ${graal-sdk-nativeimage.version} + provided + + + + junit + junit + test + + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + provided + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + provided + + + + com.google.api + gax + testlib + test + + + com.google.api + gax-grpc + testlib + test + + + com.google.api + gax-httpjson + testlib + test + + + com.google.truth + truth + test + + + org.mockito + mockito-core + 4.11.0 + test + + + org.json + json + 20250517 + test + + + com.google.guava + guava-testlib + test + + + org.hamcrest + hamcrest + 3.0 + test + + + org.openjdk.jmh + jmh-core + 1.37 + test + + + org.openjdk.jmh + jmh-generator-annprocess + 1.37 + test + + + io.opentelemetry + opentelemetry-sdk-trace + test + + + io.opentelemetry + opentelemetry-sdk-testing + test + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + test + + + com.google.cloud + google-cloud-trace + 2.87.0 + test + + + com.google.api.grpc + proto-google-cloud-trace-v1 + 2.87.0 + test + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + benchmark + + + + + + + org.codehaus.mojo + exec-maven-plugin + + + run-benchmarks + test + + exec + + + test + java + + -classpath + + org.openjdk.jmh.Main + ${benchmark.name} + -rf + JSON + -rff + jmh-results.json + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.openjdk.jmh + jmh-generator-annprocess + 1.37 + + + + + + + + + validate-benchmark + + + + org.codehaus.mojo + exec-maven-plugin + + com.google.cloud.spanner.benchmarking.BenchmarkValidator + test + + + + + + + slow-tests + + + + org.apache.maven.plugins + maven-surefire-plugin + + + default-test + + com.google.cloud.spanner.SlowTest + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + default + + com.google.cloud.spanner.SlowTest + + 7200 + + + + + parallel-integration-test + + integration-test + + + true + + + + + + + + + spanner-directpath-it + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + com.google.cloud.spanner.GceTestEnvConfig + projects/directpath-prod-manual-testing/instances/spanner-testing + directpath-prod-manual-testing + true + ipv4 + + 3000 + + + + + + + + generate-test-sql-scripts + + + + org.codehaus.mojo + exec-maven-plugin + + + generateTestScripts + compile + + java + + + com.google.cloud.spanner.connection.SqlTestScriptsGenerator + + + do_log_statements + true + + + test + false + + + + + + + + + executor-tests + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + + + + + diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedDueToConcurrentModificationException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedDueToConcurrentModificationException.java new file mode 100644 index 000000000000..7aff83139eaf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedDueToConcurrentModificationException.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.connection.Connection; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Exception thrown by a {@link Connection} when a database operation detects that a transaction has + * aborted and an internal retry failed because of a concurrent modification. This type of error has + * its own subclass since it is often necessary to handle this specific kind of aborted exceptions + * differently to other types of errors. + */ +public class AbortedDueToConcurrentModificationException extends AbortedException { + private static final long serialVersionUID = 7600146169922053323L; + private final SpannerException databaseError; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AbortedDueToConcurrentModificationException( + DoNotConstructDirectly token, @Nullable String message, @Nullable Throwable cause) { + super(token, message, cause); + this.databaseError = null; + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AbortedDueToConcurrentModificationException( + DoNotConstructDirectly token, + @Nullable String message, + @Nullable Throwable cause, + @Nonnull SpannerException databaseError) { + super(token, message, cause); + this.databaseError = databaseError; + } + + public SpannerException getDatabaseErrorDuringRetry() { + return databaseError; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedException.java new file mode 100644 index 000000000000..28b5f1fa257a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbortedException.java @@ -0,0 +1,71 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.protobuf.ByteString; +import javax.annotation.Nullable; + +/** + * Exception thrown by Cloud Spanner when an operation detects that a transaction has aborted. This + * type of error has its own subclass since it is often necessary to handle aborted differently to + * other types of errors, most typically by retrying the transaction. + */ +public class AbortedException extends SpannerException { + + /** + * Abort is not retryable per se: the operation request needs to change (generally to reflect a + * new transaction attempt) before a retry can succeed. + */ + private static final boolean IS_RETRYABLE = false; + + private ByteString transactionID; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AbortedException( + DoNotConstructDirectly token, @Nullable String message, @Nullable Throwable cause) { + this(token, message, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AbortedException( + DoNotConstructDirectly token, + @Nullable String message, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, ErrorCode.ABORTED, IS_RETRYABLE, message, cause, apiException); + if (cause instanceof AbortedException) { + this.transactionID = ((AbortedException) cause).getTransactionID(); + } + } + + /** + * Returns true if this aborted exception was returned by the emulator, and was caused by another + * transaction already being active on the emulator. + */ + public boolean isEmulatorOnlySupportsOneTransactionException() { + return getMessage().endsWith("The emulator only supports one transaction at a time."); + } + + void setTransactionID(ByteString transactionID) { + this.transactionID = transactionID; + } + + ByteString getTransactionID() { + return this.transactionID; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractLazyInitializer.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractLazyInitializer.java new file mode 100644 index 000000000000..734368526020 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractLazyInitializer.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** + * Generic {@link AbstractLazyInitializer} for any heavy-weight object that might throw an exception + * during initialization. The underlying object is initialized at most once. + * + * @param Object which is to be initialized lazily + */ +public abstract class AbstractLazyInitializer { + private final Object lock = new Object(); + private volatile boolean initialized; + private volatile T object; + private volatile Exception error; + + /** Returns an initialized instance of T. */ + public T get() throws Exception { + // First check without a lock to improve performance. + if (!initialized) { + synchronized (lock) { + if (!initialized) { + try { + object = initialize(); + } catch (Exception e) { + error = e; + } + initialized = true; + } + } + } + if (error != null) { + throw error; + } + return object; + } + + /** + * Initializes the actual object that should be returned. Is called once the first time an + * instance of T is required. + */ + protected abstract T initialize() throws Exception; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java new file mode 100644 index 000000000000..7d083db211af --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; + +/** + * Base class for the Multiplexed Session {@link DatabaseClient} implementation. Throws {@link + * UnsupportedOperationException} for all methods that are currently not supported for multiplexed + * sessions. The concrete implementation implements the methods that are supported with multiplexed + * sessions. + */ +abstract class AbstractMultiplexedSessionDatabaseClient implements DatabaseClient { + + @Override + public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { + return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java new file mode 100644 index 000000000000..619ea42441a1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java @@ -0,0 +1,1150 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionClient.optionMap; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.SessionClient.SessionOption; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** + * Abstract base class for all {@link ReadContext}s + concrete implementations of read-only {@link + * ReadContext}s. + */ +abstract class AbstractReadContext + implements ReadContext, AbstractResultSet.Listener, SessionTransaction { + private static final Logger logger = Logger.getLogger(AbstractReadContext.class.getName()); + + abstract static class Builder, T extends AbstractReadContext> { + private SessionImpl session; + private boolean cancelQueryWhenClientIsClosed; + private SpannerRpc rpc; + private ISpan span; + private TraceWrapper tracer; + private int defaultPrefetchChunks = SpannerOptions.Builder.DEFAULT_PREFETCH_CHUNKS; + private QueryOptions defaultQueryOptions = SpannerOptions.Builder.DEFAULT_QUERY_OPTIONS; + private DecodeMode defaultDecodeMode = SpannerOptions.Builder.DEFAULT_DECODE_MODE; + private DirectedReadOptions defaultDirectedReadOption; + private ExecutorProvider executorProvider; + private Clock clock = Clock.INSTANCE; + + Builder() {} + + @SuppressWarnings("unchecked") + B self() { + return (B) this; + } + + B setSession(SessionImpl session) { + this.session = session; + return self(); + } + + B setCancelQueryWhenClientIsClosed(boolean cancelQueryWhenClientIsClosed) { + this.cancelQueryWhenClientIsClosed = cancelQueryWhenClientIsClosed; + return self(); + } + + B setRpc(SpannerRpc rpc) { + this.rpc = rpc; + return self(); + } + + B setSpan(ISpan span) { + this.span = span; + return self(); + } + + B setTracer(TraceWrapper tracer) { + this.tracer = tracer; + return self(); + } + + B setDefaultPrefetchChunks(int defaultPrefetchChunks) { + this.defaultPrefetchChunks = defaultPrefetchChunks; + return self(); + } + + B setDefaultQueryOptions(QueryOptions defaultQueryOptions) { + this.defaultQueryOptions = defaultQueryOptions; + return self(); + } + + B setDefaultDecodeMode(DecodeMode defaultDecodeMode) { + this.defaultDecodeMode = defaultDecodeMode; + return self(); + } + + B setExecutorProvider(ExecutorProvider executorProvider) { + this.executorProvider = executorProvider; + return self(); + } + + B setClock(Clock clock) { + this.clock = Preconditions.checkNotNull(clock); + return self(); + } + + B setDefaultDirectedReadOptions(DirectedReadOptions directedReadOptions) { + this.defaultDirectedReadOption = directedReadOptions; + return self(); + } + + abstract T build(); + } + + /** + * {@link AsyncResultSet} that supports adding listeners that are called when all rows from the + * underlying result stream have been fetched. + */ + interface ListenableAsyncResultSet extends AsyncResultSet { + /** Adds a listener to this {@link AsyncResultSet}. */ + void addListener(Runnable listener); + + void removeListener(Runnable listener); + } + + /** + * A {@code ReadContext} for standalone reads. This can only be used for a single operation, since + * each standalone read may see a different timestamp of Cloud Spanner data. + */ + static class SingleReadContext extends AbstractReadContext { + static class Builder extends AbstractReadContext.Builder { + private TimestampBound bound; + + private Builder() {} + + Builder setTimestampBound(TimestampBound bound) { + this.bound = bound; + return self(); + } + + @Override + SingleReadContext build() { + return new SingleReadContext(this); + } + + SingleUseReadOnlyTransaction buildSingleUseReadOnlyTransaction() { + return new SingleUseReadOnlyTransaction(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + final TimestampBound bound; + + @GuardedBy("lock") + private boolean used; + + private Map channelHint; + + private SingleReadContext(Builder builder) { + super(builder); + this.bound = builder.bound; + // single use transaction have a single RPC and hence there is no need + // of a channel hint. GAX will automatically choose a hint when used + // with a multiplexed session to perform a round-robin channel selection. We are + // passing a hint here to prefer random channel selection instead of doing GAX round-robin. + this.channelHint = + getChannelHintOptions( + session.getOptions(), ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)); + } + + @Override + protected boolean isRouteToLeader() { + return false; + } + + @GuardedBy("lock") + @Override + void beforeReadOrQueryLocked() { + super.beforeReadOrQueryLocked(); + checkState(!used, "Cannot use a single-read ReadContext for multiple reads"); + used = true; + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + if (bound.getMode() == TimestampBound.Mode.STRONG) { + // Default mode: no need to specify a transaction. + return null; + } + return TransactionSelector.newBuilder() + .setSingleUse(TransactionOptions.newBuilder().setReadOnly(bound.toProto())) + .build(); + } + + @Override + Map getTransactionChannelHint() { + return channelHint; + } + + @Override + boolean prepareRetryOnDifferentGrpcChannel() { + if (session.getIsMultiplexed() && channelHint.get(Option.CHANNEL_HINT) != null) { + long channelHintForTransaction = Option.CHANNEL_HINT.getLong(channelHint) + 1L; + channelHint = optionMap(SessionOption.channelHint(channelHintForTransaction)); + return true; + } + return super.prepareRetryOnDifferentGrpcChannel(); + } + } + + private static void assertTimestampAvailable(boolean available) { + checkState(available, "Method can only be called after read has returned data or finished"); + } + + static class SingleUseReadOnlyTransaction extends SingleReadContext + implements ReadOnlyTransaction { + + @GuardedBy("lock") + private Timestamp timestamp; + + private SingleUseReadOnlyTransaction(SingleReadContext.Builder builder) { + super(builder); + } + + @Override + public Timestamp getReadTimestamp() { + synchronized (lock) { + assertTimestampAvailable(timestamp != null); + return timestamp; + } + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + TransactionOptions.Builder options = TransactionOptions.newBuilder(); + bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); + return TransactionSelector.newBuilder().setSingleUse(options).build(); + } + + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) { + synchronized (lock) { + if (!transaction.hasReadTimestamp()) { + throw newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); + } + try { + timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); + } catch (IllegalArgumentException e) { + throw newSpannerException( + ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); + } + } + } + } + + static class MultiUseReadOnlyTransaction extends AbstractReadContext + implements ReadOnlyTransaction { + static class Builder extends AbstractReadContext.Builder { + private TimestampBound bound; + private Timestamp timestamp; + private ByteString transactionId; + + private Builder() {} + + Builder setTimestampBound(TimestampBound bound) { + this.bound = bound; + return this; + } + + Builder setTimestamp(Timestamp timestamp) { + this.timestamp = timestamp; + return this; + } + + Builder setTransactionId(ByteString transactionId) { + this.transactionId = transactionId; + return this; + } + + @Override + MultiUseReadOnlyTransaction build() { + return new MultiUseReadOnlyTransaction(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private TimestampBound bound; + private final Object txnLock = new Object(); + + @GuardedBy("txnLock") + private Timestamp timestamp; + + @GuardedBy("txnLock") + private ByteString transactionId; + + private final Map channelHint; + + MultiUseReadOnlyTransaction(Builder builder) { + super(builder); + checkArgument( + !(builder.bound != null && builder.transactionId != null) + && !(builder.bound == null && builder.transactionId == null), + "Either TimestampBound or TransactionId must be specified"); + if (builder.bound != null) { + checkArgument( + builder.bound.getMode() != TimestampBound.Mode.MAX_STALENESS + && builder.bound.getMode() != TimestampBound.Mode.MIN_READ_TIMESTAMP, + "Bounded staleness mode %s is not supported for multi-use read-only transactions." + + " Create a single-use read or read-only transaction instead.", + builder.bound.getMode()); + this.bound = builder.bound; + } else { + this.timestamp = builder.timestamp; + this.transactionId = builder.transactionId; + } + this.channelHint = + getChannelHintOptions( + session.getOptions(), ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)); + } + + @Override + public Map getTransactionChannelHint() { + return channelHint; + } + + @Override + protected boolean isRouteToLeader() { + return false; + } + + @Override + void beforeReadOrQuery() { + super.beforeReadOrQuery(); + initTransaction(); + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + // No need for synchronization: super.readInternal() is always preceded by a check of + // "transactionId" that provides a happens-before from initialization, and the value is never + // changed afterwards. + @SuppressWarnings("GuardedByChecker") + TransactionSelector selector = TransactionSelector.newBuilder().setId(transactionId).build(); + return selector; + } + + @Override + public Timestamp getReadTimestamp() { + synchronized (txnLock) { + assertTimestampAvailable(timestamp != null); + return timestamp; + } + } + + ByteString getTransactionId() { + synchronized (txnLock) { + return transactionId; + } + } + + @Override + public void close() { + ByteString id = getTransactionId(); + if (id != null && !id.isEmpty()) { + rpc.clearTransactionAffinity(id); + } + super.close(); + } + + /** + * Initializes the transaction with the timestamp specified within MultiUseReadOnlyTransaction. + * This is used only for fallback of PartitionQueryRequest and PartitionReadRequest with + * Multiplexed Session. + */ + void initFallbackTransaction() { + synchronized (txnLock) { + span.addAnnotation("Creating Transaction"); + TransactionOptions.Builder options = TransactionOptions.newBuilder(); + if (timestamp != null) { + options + .getReadOnlyBuilder() + .setReadTimestamp(timestamp.toProto()) + .setReturnReadTimestamp(true); + } else { + bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); + } + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session.getName()) + .setOptions(options) + .build(); + initTransactionInternal(request); + } + } + + void initTransaction() { + SessionImpl.throwIfTransactionsPending(); + + // Since we only support synchronous calls, just block on "txnLock" while the RPC is in + // flight. Note that we use the strategy of sending an explicit BeginTransaction() RPC, + // rather than using the first read in the transaction to begin it implicitly. The chosen + // strategy is sub-optimal in the case of the first read being fast, as it incurs an extra + // RTT, but optimal if the first read is slow. As the client library is now using streaming + // reads, a possible optimization could be to use the first read in the transaction to begin + // it implicitly. + synchronized (txnLock) { + if (transactionId != null) { + return; + } + span.addAnnotation("Creating Transaction"); + TransactionOptions.Builder options = TransactionOptions.newBuilder(); + bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session.getName()) + .setOptions(options) + .build(); + initTransactionInternal(request); + } + } + + private void initTransactionInternal(BeginTransactionRequest request) { + try { + Transaction transaction = + rpc.beginTransaction(request, getTransactionChannelHint(), isRouteToLeader()); + if (!transaction.hasReadTimestamp()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); + } + if (transaction.getId().isEmpty()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.id metadata field"); + } + try { + timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); + } catch (IllegalArgumentException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); + } + transactionId = transaction.getId(); + span.addAnnotation( + "Transaction Creation Done", + ImmutableMap.of( + "Id", transaction.getId().toStringUtf8(), "Timestamp", timestamp.toString())); + } catch (SpannerException e) { + span.addAnnotation("Transaction Creation Failed", e); + throw e; + } + } + } + + final Object lock = new Object(); + final SessionImpl session; + final boolean cancelQueryWhenClientIsClosed; + final SpannerRpc rpc; + final ExecutorProvider executorProvider; + ISpan span; + TraceWrapper tracer; + private final int defaultPrefetchChunks; + private final QueryOptions defaultQueryOptions; + private final DirectedReadOptions defaultDirectedReadOptions; + private final DecodeMode defaultDecodeMode; + private final Clock clock; + + @GuardedBy("lock") + private boolean isValid = true; + + @GuardedBy("lock") + protected boolean isClosed = false; + + // A per-transaction sequence number used to identify this ExecuteSqlRequests. Required for DML, + // ignored for query by the server. + private final AtomicLong seqNo = new AtomicLong(); + + // Allow up to 512MB to be buffered (assuming 1MB chunks). In practice, restart tokens are sent + // much more frequently. + private static final int MAX_BUFFERED_CHUNKS = 512; + + protected static final String NO_TRANSACTION_RETURNED_MSG = + "The statement did not return a transaction even though one was requested"; + + AbstractReadContext(Builder builder) { + this.session = builder.session; + this.cancelQueryWhenClientIsClosed = builder.cancelQueryWhenClientIsClosed; + this.rpc = builder.rpc; + this.defaultPrefetchChunks = builder.defaultPrefetchChunks; + this.defaultQueryOptions = builder.defaultQueryOptions; + this.defaultDirectedReadOptions = builder.defaultDirectedReadOption; + this.defaultDecodeMode = builder.defaultDecodeMode; + this.span = builder.span; + this.executorProvider = builder.executorProvider; + this.clock = builder.clock; + this.tracer = builder.tracer; + } + + @Override + public void setSpan(ISpan span) { + this.span = span; + } + + long getSeqNo() { + return seqNo.incrementAndGet(); + } + + protected boolean isReadOnly() { + return true; + } + + protected boolean isRouteToLeader() { + return false; + } + + @Override + public final ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return readInternal(table, null, keys, columns, options); + } + + @Override + public ListenableAsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + Options readOptions = Options.fromReadOptions(options); + final int bufferRows = + readOptions.hasBufferRows() + ? readOptions.bufferRows() + : AsyncResultSetImpl.DEFAULT_BUFFER_SIZE; + return new AsyncResultSetImpl( + executorProvider, readInternal(table, null, keys, columns, options), bufferRows); + } + + @Override + public final ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return readInternal(table, checkNotNull(index), keys, columns, options); + } + + @Override + public ListenableAsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + Options readOptions = Options.fromReadOptions(options); + final int bufferRows = + readOptions.hasBufferRows() + ? readOptions.bufferRows() + : AsyncResultSetImpl.DEFAULT_BUFFER_SIZE; + return new AsyncResultSetImpl( + executorProvider, + readInternal(table, checkNotNull(index), keys, columns, options), + bufferRows); + } + + @Nullable + @Override + public final Struct readRow(String table, Key key, Iterable columns) { + try (ResultSet resultSet = read(table, KeySet.singleKey(key), columns)) { + return consumeSingleRow(resultSet); + } + } + + @Override + public final ApiFuture readRowAsync(String table, Key key, Iterable columns) { + try (AsyncResultSet resultSet = readAsync(table, KeySet.singleKey(key), columns)) { + return consumeSingleRowAsync(resultSet); + } + } + + @Nullable + @Override + public final Struct readRowUsingIndex( + String table, String index, Key key, Iterable columns) { + try (ResultSet resultSet = readUsingIndex(table, index, KeySet.singleKey(key), columns)) { + return consumeSingleRow(resultSet); + } + } + + @Override + public final ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns) { + try (AsyncResultSet resultSet = + readUsingIndexAsync(table, index, KeySet.singleKey(key), columns)) { + return consumeSingleRowAsync(resultSet); + } + } + + @Override + public final ResultSet executeQuery(Statement statement, QueryOption... options) { + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL, options); + } + + @Override + public ListenableAsyncResultSet executeQueryAsync(Statement statement, QueryOption... options) { + Options readOptions = Options.fromQueryOptions(options); + final int bufferRows = + readOptions.hasBufferRows() + ? readOptions.bufferRows() + : AsyncResultSetImpl.DEFAULT_BUFFER_SIZE; + return new AsyncResultSetImpl( + executorProvider, + executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL, options), + bufferRows); + } + + @Override + public final ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode readContextQueryMode) { + switch (readContextQueryMode) { + case PROFILE: + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PROFILE); + case PLAN: + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN); + default: + throw new IllegalStateException( + "Unknown value for QueryAnalyzeMode : " + readContextQueryMode); + } + } + + private ResultSet executeQueryInternal( + Statement statement, + com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, + QueryOption... options) { + Options queryOptions = Options.fromQueryOptions(options); + return executeQueryInternalWithOptions( + statement, queryMode, queryOptions, null /*partitionToken*/); + } + + /** + * Determines the {@link QueryOptions} to use for a query. This is determined using the following + * precedence: + * + *
    + *
  1. Specific {@link QueryOptions} passed in for this query. + *
  2. Any value specified in a valid environment variable when the {@link SpannerOptions} + * instance was created. + *
  3. The default {@link SpannerOptions#getDefaultQueryOptions(DatabaseId)} ()} specified for + * the database where the query is executed. + *
+ */ + @VisibleForTesting + QueryOptions buildQueryOptions(QueryOptions requestOptions) { + // Shortcut for the most common return value. + if (requestOptions == null) { + return defaultQueryOptions; + } + return defaultQueryOptions.toBuilder().mergeFrom(requestOptions).build(); + } + + RequestOptions buildRequestOptions(Options options) { + RequestOptions.Builder builder = options.toRequestOptionsProto(false).toBuilder(); + RequestOptions.ClientContext defaultClientContext = + session.getSpanner().getOptions().getClientContext(); + if (defaultClientContext != null) { + RequestOptions.ClientContext.Builder clientContextBuilder = defaultClientContext.toBuilder(); + if (builder.hasClientContext()) { + clientContextBuilder.mergeFrom(builder.getClientContext()); + } + builder.setClientContext(clientContextBuilder.build()); + } + if (getTransactionTag() != null) { + builder.setTransactionTag(getTransactionTag()); + } + return builder.build(); + } + + ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder( + Statement statement, QueryMode queryMode, Options options, boolean withTransactionSelector) { + ExecuteSqlRequest.Builder builder = + ExecuteSqlRequest.newBuilder() + .setSql(statement.getSql()) + .setQueryMode(queryMode) + .setSession(session.getName()); + addParameters(builder, statement.getParameters()); + if (withTransactionSelector) { + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + } + if (options.hasDataBoostEnabled()) { + builder.setDataBoostEnabled(options.dataBoostEnabled()); + } + if (options.hasDirectedReadOptions()) { + builder.setDirectedReadOptions(options.directedReadOptions()); + } else if (defaultDirectedReadOptions != null) { + builder.setDirectedReadOptions(defaultDirectedReadOptions); + } + if (!isReadOnly()) { + builder.setSeqno(getSeqNo()); + } + if (options.hasLastStatement()) { + builder.setLastStatement(options.isLastStatement()); + } + builder.setQueryOptions(buildQueryOptions(statement.getQueryOptions())); + builder.setRequestOptions(buildRequestOptions(options)); + return builder; + } + + static void addParameters(ExecuteSqlRequest.Builder builder, Map stmtParameters) { + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); + if (param.getValue() != null && param.getValue().getType() != null) { + builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + } + } + + ExecuteBatchDmlRequest.Builder getExecuteBatchDmlRequestBuilder( + Iterable statements, Options options) { + ExecuteBatchDmlRequest.Builder builder = + ExecuteBatchDmlRequest.newBuilder().setSession(session.getName()); + int idx = 0; + for (Statement stmt : statements) { + builder.addStatementsBuilder(); + builder.getStatementsBuilder(idx).setSql(stmt.getSql()); + Map stmtParameters = stmt.getParameters(); + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = + builder.getStatementsBuilder(idx).getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); + if (param.getValue() != null && param.getValue().getType() != null) { + builder + .getStatementsBuilder(idx) + .putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + } + idx++; + } + + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + if (options.hasLastStatement()) { + builder.setLastStatements(options.isLastStatement()); + } + builder.setSeqno(getSeqNo()); + builder.setRequestOptions(buildRequestOptions(options)); + return builder; + } + + ResultSet executeQueryInternalWithOptions( + final Statement statement, + final com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, + final Options options, + final ByteString partitionToken) { + beforeReadOrQuery(); + final int prefetchChunks = + options.hasPrefetchChunks() ? options.prefetchChunks() : defaultPrefetchChunks; + final ExecuteSqlRequest.Builder request = + getExecuteSqlRequestBuilder( + statement, queryMode, options, /* withTransactionSelector= */ false); + ResumableStreamIterator stream = + new ResumableStreamIterator( + MAX_BUFFERED_CHUNKS, + SpannerImpl.QUERY, + span, + tracer, + tracer.createStatementAttributes(statement, options), + session.getErrorHandler(), + rpc.getExecuteQueryRetrySettings(), + rpc.getExecuteQueryRetryableCodes(), + session.getRequestIdCreator()) { + @Override + CloseableIterator startStream( + @Nullable ByteString resumeToken, + AsyncResultSet.StreamMessageListener streamListener, + XGoogSpannerRequestId requestId) { + GrpcStreamIterator stream = + new GrpcStreamIterator( + statement, + request.getLastStatement(), + prefetchChunks, + cancelQueryWhenClientIsClosed); + if (streamListener != null) { + stream.registerListener(streamListener); + } + if (partitionToken != null) { + request.setPartitionToken(partitionToken); + } + TransactionSelector selector = null; + if (resumeToken != null) { + request.setResumeToken(resumeToken); + selector = getTransactionSelector(); + } else if (!request.hasTransaction()) { + selector = getTransactionSelector(); + } + if (selector != null) { + request.setTransaction(selector); + } + SpannerRpc.StreamingCall call = + rpc.executeQuery( + request.build(), + stream.consumer(), + getTransactionChannelHint(), + requestId, + isRouteToLeader()); + session.markUsed(clock.instant()); + stream.setCall(call, request.getTransaction().hasBegin()); + return stream; + } + + @Override + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return AbstractReadContext.this.prepareRetryOnDifferentGrpcChannel(); + } + }; + return new GrpcResultSet( + stream, this, options.hasDecodeMode() ? options.decodeMode() : defaultDecodeMode); + } + + static Map getChannelHintOptions( + Map channelHintForSession, Long channelHintForTransaction) { + if (channelHintForSession != null) { + return channelHintForSession; + } else if (channelHintForTransaction != null) { + return optionMap(SessionOption.channelHint(channelHintForTransaction)); + } + return null; + } + + /** + * Called before any read or query is started to perform state checks and initializations. + * Subclasses should call {@code super.beforeReadOrQuery()} if overriding. + */ + void beforeReadOrQuery() { + synchronized (lock) { + beforeReadOrQueryLocked(); + } + } + + /** Called as part of {@link #beforeReadOrQuery()} under {@link #lock}. */ + @GuardedBy("lock") + void beforeReadOrQueryLocked() { + // Note that transactions are invalidated under some circumstances on the backend, but we + // implement the check more strictly here to encourage coding to contract rather than the + // implementation. + checkState(isValid, "Context has been invalidated by a new operation on the session"); + checkState(!isClosed, "Context has been closed"); + } + + /** Invalidates the context since another context has been created more recently. */ + @Override + public final void invalidate() { + synchronized (lock) { + isValid = false; + } + } + + @Override + public void close() { + session.onTransactionDone(); + span.end(); + synchronized (lock) { + isClosed = true; + } + } + + /** + * Returns the {@link TransactionSelector} that should be used for a statement that is executed on + * this read context. This could be a reference to an existing transaction ID, or it could be a + * BeginTransaction option that should be included with the statement. + */ + @Nullable + abstract TransactionSelector getTransactionSelector(); + + /** + * Channel hint to be used for a transaction. This enables soft-stickiness per transaction by + * ensuring all RPCs within a transaction land up on the same channel. + */ + abstract Map getTransactionChannelHint(); + + boolean prepareRetryOnDifferentGrpcChannel() { + return false; + } + + /** + * Returns the transaction tag for this {@link AbstractReadContext} or null if this + * {@link AbstractReadContext} does not have a transaction tag. + */ + @Nullable + String getTransactionTag() { + return null; + } + + /** This method is called when a statement returned a new transaction as part of its results. */ + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) {} + + @Override + public SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean lastStatement) { + this.session.onError(e); + return e; + } + + @Override + public void onDone(boolean withBeginTransaction) { + this.session.onReadDone(); + } + + /** + * For transactions other than read-write, the MultiplexedSessionPrecommitToken will not be + * present in the RPC response. In such cases, this method will be a no-op. + */ + @Override + public void onPrecommitToken(MultiplexedSessionPrecommitToken token) {} + + private ResultSet readInternal( + String table, + @Nullable String index, + KeySet keys, + Iterable columns, + ReadOption... options) { + Options readOptions = Options.fromReadOptions(options); + return readInternalWithOptions( + table, index, keys, columns, readOptions, null /*partitionToken*/); + } + + ResultSet readInternalWithOptions( + String table, + @Nullable String index, + KeySet keys, + Iterable columns, + final Options readOptions, + ByteString partitionToken) { + beforeReadOrQuery(); + final ReadRequest.Builder builder = + ReadRequest.newBuilder() + .setSession(session.getName()) + .setTable(checkNotNull(table)) + .addAllColumns(columns); + if (readOptions.hasLimit()) { + builder.setLimit(readOptions.limit()); + } + + keys.appendToProto(builder.getKeySetBuilder()); + if (index != null) { + builder.setIndex(index); + } + if (partitionToken != null) { + builder.setPartitionToken(partitionToken); + } + if (readOptions.hasDataBoostEnabled()) { + builder.setDataBoostEnabled(readOptions.dataBoostEnabled()); + } + if (readOptions.hasOrderBy()) { + builder.setOrderBy(readOptions.orderBy()); + } + if (readOptions.hasDirectedReadOptions()) { + builder.setDirectedReadOptions(readOptions.directedReadOptions()); + } else if (defaultDirectedReadOptions != null) { + builder.setDirectedReadOptions(defaultDirectedReadOptions); + } + if (readOptions.hasLockHint()) { + if (isReadOnly()) { + logger.warning( + "Lock hint is only supported for ReadWrite transactions. " + + "Overriding lock hint to default unspecified."); + } else { + builder.setLockHint(readOptions.lockHint()); + } + } + final int prefetchChunks = + readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; + final boolean lastStatement = + readOptions.hasLastStatement() ? readOptions.isLastStatement() : false; + ResumableStreamIterator stream = + new ResumableStreamIterator( + MAX_BUFFERED_CHUNKS, + SpannerImpl.READ, + span, + tracer, + tracer.createTableAttributes(table, readOptions), + session.getErrorHandler(), + rpc.getReadRetrySettings(), + rpc.getReadRetryableCodes(), + session.getRequestIdCreator()) { + @Override + CloseableIterator startStream( + @Nullable ByteString resumeToken, + AsyncResultSet.StreamMessageListener streamListener, + XGoogSpannerRequestId requestId) { + GrpcStreamIterator stream = + new GrpcStreamIterator( + lastStatement, prefetchChunks, cancelQueryWhenClientIsClosed); + if (streamListener != null) { + stream.registerListener(streamListener); + } + TransactionSelector selector = null; + if (resumeToken != null) { + builder.setResumeToken(resumeToken); + selector = getTransactionSelector(); + } else if (!builder.hasTransaction()) { + selector = getTransactionSelector(); + } + if (selector != null) { + builder.setTransaction(selector); + } + builder.setRequestOptions(buildRequestOptions(readOptions)); + SpannerRpc.StreamingCall call = + rpc.read( + builder.build(), + stream.consumer(), + getTransactionChannelHint(), + requestId, + isRouteToLeader()); + session.markUsed(clock.instant()); + stream.setCall(call, /* withBeginTransaction= */ builder.getTransaction().hasBegin()); + return stream; + } + + @Override + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return AbstractReadContext.this.prepareRetryOnDifferentGrpcChannel(); + } + }; + return new GrpcResultSet( + stream, this, readOptions.hasDecodeMode() ? readOptions.decodeMode() : defaultDecodeMode); + } + + private Struct consumeSingleRow(ResultSet resultSet) { + if (!resultSet.next()) { + return null; + } + Struct row = resultSet.getCurrentRowAsStruct(); + if (resultSet.next()) { + throw newSpannerException(ErrorCode.INTERNAL, "Multiple rows returned for single key"); + } + return row; + } + + static ApiFuture consumeSingleRowAsync(AsyncResultSet resultSet) { + final SettableApiFuture result = SettableApiFuture.create(); + // We can safely use a directExecutor here, as we will only be consuming one row, and we will + // not be doing any blocking stuff in the handler. + final SettableApiFuture row = SettableApiFuture.create(); + ApiFutures.addCallback( + resultSet.setCallback(MoreExecutors.directExecutor(), ConsumeSingleRowCallback.create(row)), + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + result.setException(t); + } + + @Override + public void onSuccess(Void input) { + try { + result.set(row.get()); + } catch (Throwable t) { + result.setException(t); + } + } + }, + MoreExecutors.directExecutor()); + return result; + } + + /** + * {@link ReadyCallback} for returning the first row in a result set as a future {@link Struct}. + */ + private static class ConsumeSingleRowCallback implements ReadyCallback { + private final SettableApiFuture result; + private Struct row; + + static ConsumeSingleRowCallback create(SettableApiFuture result) { + return new ConsumeSingleRowCallback(result); + } + + private ConsumeSingleRowCallback(SettableApiFuture result) { + this.result = result; + } + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + switch (resultSet.tryNext()) { + case DONE: + result.set(row); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + if (row != null) { + throw newSpannerException( + ErrorCode.INTERNAL, "Multiple rows returned for single key"); + } + row = resultSet.getCurrentRowAsStruct(); + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } catch (Throwable t) { + result.setException(t); + return CallbackResponse.DONE; + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java new file mode 100644 index 000000000000..0717cae74f25 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java @@ -0,0 +1,562 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ListValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.protobuf.Value.KindCase; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.Transaction; +import java.io.IOException; +import java.io.Serializable; +import java.math.BigDecimal; +import java.util.AbstractList; +import java.util.Base64; +import java.util.BitSet; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.function.Function; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** Implementation of {@link ResultSet}. */ +abstract class AbstractResultSet extends AbstractStructReader implements ResultSet { + + interface Listener { + /** + * Called when transaction metadata is seen. This method may be invoked at most once. If the + * method is invoked, it will precede {@link #onError(SpannerException,boolean)} or {@link + * #onDone(boolean)}. + */ + void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) + throws SpannerException; + + /** Called when the read finishes with an error. Returns the error that should be thrown. */ + SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean lastStatement); + + /** Called when the read finishes normally. */ + void onDone(boolean withBeginTransaction); + + /** + * Called when the RPC response contains a MultiplexedSessionPrecommitToken. A precommit token + * will be included if the read-write transaction is executed on a multiplexed session. + */ + void onPrecommitToken(MultiplexedSessionPrecommitToken token); + } + + static final class LazyByteArray implements Serializable { + private static final Base64.Encoder ENCODER = Base64.getEncoder(); + private static final Base64.Decoder DECODER = Base64.getDecoder(); + private final String base64String; + private transient AbstractLazyInitializer byteArray; + + LazyByteArray(@Nonnull String base64String) { + this.base64String = Preconditions.checkNotNull(base64String); + this.byteArray = defaultInitializer(); + } + + LazyByteArray(@Nonnull ByteArray byteArray) { + this.base64String = + ENCODER.encodeToString(Preconditions.checkNotNull(byteArray).toByteArray()); + this.byteArray = + new AbstractLazyInitializer() { + @Override + protected ByteArray initialize() { + return byteArray; + } + }; + } + + private AbstractLazyInitializer defaultInitializer() { + return new AbstractLazyInitializer() { + @Override + protected ByteArray initialize() { + return ByteArray.copyFrom(DECODER.decode(base64String)); + } + }; + } + + private void readObject(java.io.ObjectInputStream in) + throws IOException, ClassNotFoundException { + in.defaultReadObject(); + byteArray = defaultInitializer(); + } + + ByteArray getByteArray() { + try { + return byteArray.get(); + } catch (Throwable t) { + throw SpannerExceptionFactory.asSpannerException(t); + } + } + + String getBase64String() { + return base64String; + } + + @Override + public String toString() { + return getBase64String(); + } + + @Override + public int hashCode() { + return base64String.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof LazyByteArray) { + return lazyByteArraysEqual((LazyByteArray) o); + } + return false; + } + + private boolean lazyByteArraysEqual(LazyByteArray other) { + return Objects.equals(getBase64String(), other.getBase64String()); + } + } + + @VisibleForTesting + interface CloseableIterator extends Iterator { + + /** + * Closes the iterator, freeing any underlying resources. + * + * @param message a message to include in the final RPC status + */ + void close(@Nullable String message); + + boolean isWithBeginTransaction(); + + boolean isLastStatement(); + + /** + * @param streamMessageListener A class object which implements StreamMessageListener + * @return true if streaming is supported by the iterator, otherwise false + */ + default boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener) { + return false; + } + + /** it requests the initial prefetch chunks from gRPC stream */ + default void requestPrefetchChunks() {} + } + + static double valueProtoToFloat64(com.google.protobuf.Value proto) { + if (proto.getKindCase() == KindCase.STRING_VALUE) { + switch (proto.getStringValue()) { + case "-Infinity": + return Double.NEGATIVE_INFINITY; + case "Infinity": + return Double.POSITIVE_INFINITY; + case "NaN": + return Double.NaN; + default: + // Fall-through to handling below to produce an error. + } + } + if (proto.getKindCase() != KindCase.NUMBER_VALUE) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value for column type " + + Type.float64() + + " expected NUMBER_VALUE or STRING_VALUE with value one of" + + " \"Infinity\", \"-Infinity\", or \"NaN\" but was " + + proto.getKindCase() + + (proto.getKindCase() == KindCase.STRING_VALUE + ? " with value \"" + proto.getStringValue() + "\"" + : "")); + } + return proto.getNumberValue(); + } + + static float valueProtoToFloat32(com.google.protobuf.Value proto) { + if (proto.getKindCase() == KindCase.STRING_VALUE) { + switch (proto.getStringValue()) { + case "-Infinity": + return Float.NEGATIVE_INFINITY; + case "Infinity": + return Float.POSITIVE_INFINITY; + case "NaN": + return Float.NaN; + default: + // Fall-through to handling below to produce an error. + } + } + if (proto.getKindCase() != KindCase.NUMBER_VALUE) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value for column type " + + Type.float32() + + " expected NUMBER_VALUE or STRING_VALUE with value one of" + + " \"Infinity\", \"-Infinity\", or \"NaN\" but was " + + proto.getKindCase() + + (proto.getKindCase() == KindCase.STRING_VALUE + ? " with value \"" + proto.getStringValue() + "\"" + : "")); + } + return (float) proto.getNumberValue(); + } + + static NullPointerException throwNotNull(int columnIndex) { + throw new NullPointerException( + "Cannot call array getter for column " + columnIndex + " with null elements"); + } + + /** + * Memory-optimized base class for {@code ARRAY}, {@code ARRAY} and {@code + * ARRAY} types. All of these involve conversions from the type yielded by JSON parsing, + * which are {@code String} and {@code BigDecimal} respectively. Rather than construct new wrapper + * objects for each array element, we use primitive arrays and a {@code BitSet} to track nulls. + */ + abstract static class PrimitiveArray extends AbstractList { + private final A data; + private final BitSet nulls; + private final int size; + + PrimitiveArray(ListValue protoList) { + this.size = protoList.getValuesCount(); + A data = newArray(size); + BitSet nulls = new BitSet(size); + for (int i = 0; i < protoList.getValuesCount(); ++i) { + if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) { + nulls.set(i); + } else { + setProto(data, i, protoList.getValues(i)); + } + } + this.data = data; + this.nulls = nulls; + } + + PrimitiveArray(A data, BitSet nulls, int size) { + this.data = data; + this.nulls = nulls; + this.size = size; + } + + abstract A newArray(int size); + + abstract void setProto(A array, int i, com.google.protobuf.Value protoValue); + + abstract T get(A array, int i); + + @Override + public T get(int index) { + if (index < 0 || index >= size) { + throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size); + } + return nulls.get(index) ? null : get(data, index); + } + + @Override + public int size() { + return size; + } + + A toPrimitiveArray(int columnIndex) { + if (nulls.length() > 0) { + throw throwNotNull(columnIndex); + } + A r = newArray(size); + System.arraycopy(data, 0, r, 0, size); + return r; + } + } + + static class Int64Array extends PrimitiveArray { + Int64Array(ListValue protoList) { + super(protoList); + } + + Int64Array(long[] data, BitSet nulls) { + super(data, nulls, data.length); + } + + @Override + long[] newArray(int size) { + return new long[size]; + } + + @Override + void setProto(long[] array, int i, com.google.protobuf.Value protoValue) { + array[i] = Long.parseLong(protoValue.getStringValue()); + } + + @Override + Long get(long[] array, int i) { + return array[i]; + } + } + + static class Float32Array extends PrimitiveArray { + Float32Array(ListValue protoList) { + super(protoList); + } + + Float32Array(float[] data, BitSet nulls) { + super(data, nulls, data.length); + } + + @Override + float[] newArray(int size) { + return new float[size]; + } + + @Override + void setProto(float[] array, int i, com.google.protobuf.Value protoValue) { + array[i] = valueProtoToFloat32(protoValue); + } + + @Override + Float get(float[] array, int i) { + return array[i]; + } + } + + static class Float64Array extends PrimitiveArray { + Float64Array(ListValue protoList) { + super(protoList); + } + + Float64Array(double[] data, BitSet nulls) { + super(data, nulls, data.length); + } + + @Override + double[] newArray(int size) { + return new double[size]; + } + + @Override + void setProto(double[] array, int i, com.google.protobuf.Value protoValue) { + array[i] = valueProtoToFloat64(protoValue); + } + + @Override + Double get(double[] array, int i) { + return array[i]; + } + } + + protected abstract GrpcStruct currRow(); + + @Override + public Struct getCurrentRowAsStruct() { + return currRow().immutableCopy(); + } + + @Override + protected boolean getBooleanInternal(int columnIndex) { + return currRow().getBooleanInternal(columnIndex); + } + + @Override + protected long getLongInternal(int columnIndex) { + return currRow().getLongInternal(columnIndex); + } + + @Override + protected float getFloatInternal(int columnIndex) { + return currRow().getFloatInternal(columnIndex); + } + + @Override + protected double getDoubleInternal(int columnIndex) { + return currRow().getDoubleInternal(columnIndex); + } + + @Override + protected BigDecimal getBigDecimalInternal(int columnIndex) { + return currRow().getBigDecimalInternal(columnIndex); + } + + @Override + protected String getStringInternal(int columnIndex) { + return currRow().getStringInternal(columnIndex); + } + + @Override + protected T getProtoMessageInternal(int columnIndex, T message) { + return currRow().getProtoMessageInternal(columnIndex, message); + } + + @Override + protected T getProtoEnumInternal( + int columnIndex, Function method) { + return currRow().getProtoEnumInternal(columnIndex, method); + } + + @Override + protected String getJsonInternal(int columnIndex) { + return currRow().getJsonInternal(columnIndex); + } + + @Override + protected String getPgJsonbInternal(int columnIndex) { + return currRow().getPgJsonbInternal(columnIndex); + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + return currRow().getBytesInternal(columnIndex); + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + return currRow().getTimestampInternal(columnIndex); + } + + @Override + protected Date getDateInternal(int columnIndex) { + return currRow().getDateInternal(columnIndex); + } + + @Override + protected UUID getUuidInternal(int columnIndex) { + return currRow().getUuidInternal(columnIndex); + } + + @Override + protected Interval getIntervalInternal(int columnIndex) { + return currRow().getIntervalInternal(columnIndex); + } + + @Override + protected Value getValueInternal(int columnIndex) { + return currRow().getValueInternal(columnIndex); + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + return currRow().getBooleanArrayInternal(columnIndex); + } + + @Override + protected List getBooleanListInternal(int columnIndex) { + return currRow().getBooleanListInternal(columnIndex); + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + return currRow().getLongArrayInternal(columnIndex); + } + + @Override + protected List getLongListInternal(int columnIndex) { + return currRow().getLongListInternal(columnIndex); + } + + @Override + protected float[] getFloatArrayInternal(int columnIndex) { + return currRow().getFloatArrayInternal(columnIndex); + } + + @Override + protected List getFloatListInternal(int columnIndex) { + return currRow().getFloatListInternal(columnIndex); + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + return currRow().getDoubleArrayInternal(columnIndex); + } + + @Override + protected List getDoubleListInternal(int columnIndex) { + return currRow().getDoubleListInternal(columnIndex); + } + + @Override + protected List getBigDecimalListInternal(int columnIndex) { + return currRow().getBigDecimalListInternal(columnIndex); + } + + @Override + protected List getStringListInternal(int columnIndex) { + return currRow().getStringListInternal(columnIndex); + } + + @Override + protected List getJsonListInternal(int columnIndex) { + return currRow().getJsonListInternal(columnIndex); + } + + @Override + protected List getPgJsonbListInternal(int columnIndex) { + return currRow().getJsonListInternal(columnIndex); + } + + @Override + protected List getBytesListInternal(int columnIndex) { + return currRow().getBytesListInternal(columnIndex); + } + + @Override + protected List getProtoMessageListInternal( + int columnIndex, T message) { + return currRow().getProtoMessageListInternal(columnIndex, message); + } + + @Override + protected List getProtoEnumListInternal( + int columnIndex, Function method) { + return currRow().getProtoEnumListInternal(columnIndex, method); + } + + @Override + protected List getTimestampListInternal(int columnIndex) { + return currRow().getTimestampListInternal(columnIndex); + } + + @Override + protected List getDateListInternal(int columnIndex) { + return currRow().getDateListInternal(columnIndex); + } + + @Override + protected List getUuidListInternal(int columnIndex) { + return currRow().getUuidListInternal(columnIndex); + } + + @Override + protected List getIntervalListInternal(int columnIndex) { + return currRow().getIntervalListInternal(columnIndex); + } + + @Override + protected List getStructListInternal(int columnIndex) { + return currRow().getStructListInternal(columnIndex); + } + + @Override + public boolean isNull(int columnIndex) { + return currRow().isNull(columnIndex); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractStructReader.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractStructReader.java new file mode 100644 index 000000000000..60ff4fd330e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractStructReader.java @@ -0,0 +1,743 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Type.Code; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** + * Base class for assisting {@link StructReader} implementations. + * + *

This class implements the majority of the {@code StructReader} interface, leaving subclasses + * to implement core data access via the {@code getTypeNameInternal()} methods. {@code + * AbstractStructReader} guarantees that these will only be called for non-{@code NULL} columns of a + * type appropriate for the method. + */ +public abstract class AbstractStructReader implements StructReader { + protected abstract boolean getBooleanInternal(int columnIndex); + + protected abstract long getLongInternal(int columnIndex); + + protected float getFloatInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected abstract double getDoubleInternal(int columnIndex); + + protected abstract BigDecimal getBigDecimalInternal(int columnIndex); + + protected abstract String getStringInternal(int columnIndex); + + protected String getJsonInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected String getPgJsonbInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected abstract ByteArray getBytesInternal(int columnIndex); + + protected abstract Timestamp getTimestampInternal(int columnIndex); + + protected abstract Date getDateInternal(int columnIndex); + + protected UUID getUuidInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected Interval getIntervalInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected T getProtoMessageInternal(int columnIndex, T message) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected T getProtoEnumInternal( + int columnIndex, Function method) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected List getProtoMessageListInternal( + int columnIndex, T message) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected List getProtoEnumListInternal( + int columnIndex, Function method) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected Value getValueInternal(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + protected abstract boolean[] getBooleanArrayInternal(int columnIndex); + + protected abstract List getBooleanListInternal(int columnIndex); + + protected abstract long[] getLongArrayInternal(int columnIndex); + + protected abstract List getLongListInternal(int columnIndex); + + protected float[] getFloatArrayInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected List getFloatListInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected abstract double[] getDoubleArrayInternal(int columnIndex); + + protected abstract List getDoubleListInternal(int columnIndex); + + protected abstract List getBigDecimalListInternal(int columnIndex); + + protected abstract List getStringListInternal(int columnIndex); + + protected List getJsonListInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected List getPgJsonbListInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected abstract List getBytesListInternal(int columnIndex); + + protected abstract List getTimestampListInternal(int columnIndex); + + protected abstract List getDateListInternal(int columnIndex); + + protected List getUuidListInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected List getIntervalListInternal(int columnIndex) { + throw new UnsupportedOperationException("Not implemented"); + } + + protected abstract List getStructListInternal(int columnIndex); + + @Override + public int getColumnCount() { + return getType().getStructFields().size(); + } + + @Override + public Type getColumnType(int columnIndex) { + return getType().getStructFields().get(columnIndex).getType(); + } + + @Override + public Type getColumnType(String columnName) { + return getType().getStructFields().get(getColumnIndex(columnName)).getType(); + } + + @Override + public boolean isNull(String columnName) { + return isNull(getColumnIndex(columnName)); + } + + @Override + public boolean getBoolean(int columnIndex) { + checkNonNullOfType(columnIndex, Type.bool(), columnIndex); + return getBooleanInternal(columnIndex); + } + + @Override + public boolean getBoolean(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.bool(), columnName); + return getBooleanInternal(columnIndex); + } + + @Override + public long getLong(int columnIndex) { + checkNonNullOfCodes( + columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnIndex); + return getLongInternal(columnIndex); + } + + @Override + public long getLong(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnName); + return getLongInternal(columnIndex); + } + + @Override + public float getFloat(int columnIndex) { + checkNonNullOfType(columnIndex, Type.float32(), columnIndex); + return getFloatInternal(columnIndex); + } + + @Override + public float getFloat(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.float32(), columnName); + return getFloatInternal(columnIndex); + } + + @Override + public double getDouble(int columnIndex) { + checkNonNullOfType(columnIndex, Type.float64(), columnIndex); + return getDoubleInternal(columnIndex); + } + + @Override + public double getDouble(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.float64(), columnName); + return getDoubleInternal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) { + checkNonNullOfType(columnIndex, Type.numeric(), columnIndex); + return getBigDecimalInternal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.numeric(), columnName); + return getBigDecimalInternal(columnIndex); + } + + @Override + public String getString(int columnIndex) { + checkNonNullOfTypes( + columnIndex, + Arrays.asList(Type.string(), Type.pgNumeric()), + columnIndex, + "STRING, NUMERIC"); + return getStringInternal(columnIndex); + } + + @Override + public String getString(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfTypes( + columnIndex, Arrays.asList(Type.string(), Type.pgNumeric()), columnName, "STRING, NUMERIC"); + return getStringInternal(columnIndex); + } + + @Override + public String getJson(int columnIndex) { + checkNonNullOfType(columnIndex, Type.json(), columnIndex); + return getJsonInternal(columnIndex); + } + + @Override + public String getJson(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.json(), columnName); + return getJsonInternal(columnIndex); + } + + @Override + public String getPgJsonb(int columnIndex) { + checkNonNullOfType(columnIndex, Type.pgJsonb(), columnIndex); + return getPgJsonbInternal(columnIndex); + } + + @Override + public String getPgJsonb(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.pgJsonb(), columnName); + return getPgJsonbInternal(columnIndex); + } + + @Override + public ByteArray getBytes(int columnIndex) { + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnIndex); + return getBytesInternal(columnIndex); + } + + @Override + public ByteArray getBytes(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnName); + return getBytesInternal(columnIndex); + } + + @Override + public Timestamp getTimestamp(int columnIndex) { + checkNonNullOfType(columnIndex, Type.timestamp(), columnIndex); + return getTimestampInternal(columnIndex); + } + + @Override + public Timestamp getTimestamp(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.timestamp(), columnName); + return getTimestampInternal(columnIndex); + } + + @Override + public Date getDate(int columnIndex) { + checkNonNullOfType(columnIndex, Type.date(), columnIndex); + return getDateInternal(columnIndex); + } + + @Override + public Date getDate(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.date(), columnName); + return getDateInternal(columnIndex); + } + + @Override + public UUID getUuid(int columnIndex) { + checkNonNullOfType(columnIndex, Type.uuid(), columnIndex); + return getUuidInternal(columnIndex); + } + + @Override + public UUID getUuid(String columnName) { + final int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.uuid(), columnName); + return getUuidInternal(columnIndex); + } + + @Override + public Interval getInterval(int columnIndex) { + checkNonNullOfType(columnIndex, Type.interval(), columnIndex); + return getIntervalInternal(columnIndex); + } + + @Override + public Interval getInterval(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.interval(), columnName); + return getIntervalInternal(columnIndex); + } + + @Override + public T getProtoEnum( + int columnIndex, Function method) { + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.ENUM, Code.INT64), columnIndex); + return getProtoEnumInternal(columnIndex, method); + } + + @Override + public T getProtoEnum( + String columnName, Function method) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.ENUM, Code.INT64), columnName); + return getProtoEnumInternal(columnIndex, method); + } + + @Override + public T getProtoMessage(int columnIndex, T message) { + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnIndex); + return getProtoMessageInternal(columnIndex, message); + } + + @Override + public T getProtoMessage(String columnName, T message) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnName); + return getProtoMessageInternal(columnIndex, message); + } + + @Override + public Value getValue(int columnIndex) { + return getValueInternal(columnIndex); + } + + @Override + public Value getValue(String columnName) { + int columnIndex = getColumnIndex(columnName); + return getValueInternal(columnIndex); + } + + @Override + public boolean[] getBooleanArray(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.bool()), columnIndex); + return getBooleanArrayInternal(columnIndex); + } + + @Override + public boolean[] getBooleanArray(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.bool()), columnName); + return getBooleanArrayInternal(columnIndex); + } + + @Override + public List getBooleanList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.bool()), columnIndex); + return getBooleanListInternal(columnIndex); + } + + @Override + public List getBooleanList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.bool()), columnName); + return getBooleanListInternal(columnIndex); + } + + @Override + public long[] getLongArray(int columnIndex) { + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnIndex); + checkArrayElementType( + columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnIndex); + return getLongArrayInternal(columnIndex); + } + + @Override + public long[] getLongArray(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnName); + checkArrayElementType( + columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnName); + return getLongArrayInternal(columnIndex); + } + + @Override + public List getLongList(int columnIndex) { + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnIndex); + checkArrayElementType( + columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnIndex); + return getLongListInternal(columnIndex); + } + + @Override + public List getLongList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnName); + checkArrayElementType( + columnIndex, Arrays.asList(Code.ENUM, Code.PG_OID, Code.INT64), columnName); + return getLongListInternal(columnIndex); + } + + @Override + public float[] getFloatArray(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.float32()), columnIndex); + return getFloatArrayInternal(columnIndex); + } + + @Override + public float[] getFloatArray(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.float32()), columnName); + return getFloatArrayInternal(columnIndex); + } + + @Override + public List getFloatList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.float32()), columnIndex); + return getFloatListInternal(columnIndex); + } + + @Override + public List getFloatList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.float32()), columnName); + return getFloatListInternal(columnIndex); + } + + @Override + public double[] getDoubleArray(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.float64()), columnIndex); + return getDoubleArrayInternal(columnIndex); + } + + @Override + public double[] getDoubleArray(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.float64()), columnName); + return getDoubleArrayInternal(columnIndex); + } + + @Override + public List getDoubleList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.float64()), columnIndex); + return getDoubleListInternal(columnIndex); + } + + @Override + public List getDoubleList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.float64()), columnName); + return getDoubleListInternal(columnIndex); + } + + @Override + public List getBigDecimalList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.numeric()), columnIndex); + return getBigDecimalListInternal(columnIndex); + } + + @Override + public List getBigDecimalList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.numeric()), columnName); + return getBigDecimalListInternal(columnIndex); + } + + @Override + public List getStringList(int columnIndex) { + checkNonNullOfTypes( + columnIndex, + Arrays.asList(Type.array(Type.string()), Type.array(Type.pgNumeric())), + columnIndex, + "ARRAY, ARRAY"); + return getStringListInternal(columnIndex); + } + + @Override + public List getStringList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfTypes( + columnIndex, + Arrays.asList(Type.array(Type.string()), Type.array(Type.pgNumeric())), + columnName, + "ARRAY, ARRAY"); + return getStringListInternal(columnIndex); + } + + @Override + public List getJsonList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.json()), columnIndex); + return getJsonListInternal(columnIndex); + } + + @Override + public List getJsonList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.json()), columnName); + return getJsonListInternal(columnIndex); + } + + @Override + public List getPgJsonbList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.pgJsonb()), columnIndex); + return getPgJsonbListInternal(columnIndex); + } + + @Override + public List getPgJsonbList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.pgJsonb()), columnName); + return getPgJsonbListInternal(columnIndex); + } + + @Override + public List getBytesList(int columnIndex) { + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnIndex); + checkArrayElementType(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnIndex); + return getBytesListInternal(columnIndex); + } + + @Override + public List getBytesList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnName); + checkArrayElementType(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnName); + return getBytesListInternal(columnIndex); + } + + @Override + public List getProtoMessageList(int columnIndex, T message) { + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnIndex); + checkArrayElementType(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnIndex); + return getProtoMessageListInternal(columnIndex, message); + } + + @Override + public List getProtoMessageList(String columnName, T message) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnName); + checkArrayElementType(columnIndex, Arrays.asList(Code.PROTO, Code.BYTES), columnName); + return getProtoMessageListInternal(columnIndex, message); + } + + @Override + public List getProtoEnumList( + int columnIndex, Function method) { + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnIndex); + checkArrayElementType(columnIndex, Arrays.asList(Code.ENUM, Code.INT64), columnIndex); + return getProtoEnumListInternal(columnIndex, method); + } + + @Override + public List getProtoEnumList( + String columnName, Function method) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfCodes(columnIndex, Collections.singletonList(Code.ARRAY), columnName); + checkArrayElementType(columnIndex, Arrays.asList(Code.ENUM, Code.INT64), columnName); + return getProtoEnumListInternal(columnIndex, method); + } + + @Override + public List getTimestampList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.timestamp()), columnIndex); + return getTimestampListInternal(columnIndex); + } + + @Override + public List getTimestampList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.timestamp()), columnName); + return getTimestampListInternal(columnIndex); + } + + @Override + public List getDateList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.date()), columnIndex); + return getDateListInternal(columnIndex); + } + + @Override + public List getDateList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.date()), columnName); + return getDateListInternal(columnIndex); + } + + @Override + public List getUuidList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.uuid()), columnIndex); + return getUuidListInternal(columnIndex); + } + + @Override + public List getUuidList(String columnName) { + final int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.uuid()), columnName); + return getUuidListInternal(columnIndex); + } + + @Override + public List getIntervalList(int columnIndex) { + checkNonNullOfType(columnIndex, Type.array(Type.interval()), columnIndex); + return getIntervalListInternal(columnIndex); + } + + @Override + public List getIntervalList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullOfType(columnIndex, Type.array(Type.interval()), columnName); + return getIntervalListInternal(columnIndex); + } + + @Override + public List getStructList(int columnIndex) { + checkNonNullArrayOfStruct(columnIndex, columnIndex); + return getStructListInternal(columnIndex); + } + + @Override + public List getStructList(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullArrayOfStruct(columnIndex, columnName); + return getStructListInternal(columnIndex); + } + + @Override + public int getColumnIndex(String columnName) { + // Use the Type instance for field name lookup. Type instances are naturally shared by the + // ResultSet, all Structs corresponding to rows in the read, and all Structs corresponding to + // the values of ARRAY> columns in the read, so this is the best location to + // amortize lookup costs. + return getType().getFieldIndex(columnName); + } + + protected void checkNonNull(int columnIndex, Object columnNameForError) { + if (isNull(columnIndex)) { + throw new NullPointerException("Column " + columnNameForError + " contains NULL value"); + } + } + + private void checkNonNullOfType(int columnIndex, Type expectedType, Object columnNameForError) { + Type actualType = getColumnType(columnIndex); + checkState( + expectedType.equals(actualType), + "Column %s is not of correct type: expected %s but was %s", + columnNameForError, + expectedType, + actualType); + checkNonNull(columnIndex, columnNameForError); + } + + /** Checks if the value at {@code columnIndex} is one of {@code expectedCode} */ + private void checkNonNullOfCodes( + int columnIndex, List expectedCodes, Object columnNameForError) { + Type actualType = getColumnType(columnIndex); + checkState( + expectedCodes.contains(actualType.getCode()), + "Column %s is not of correct type code: expected one of [%s] but was %s", + columnNameForError, + expectedCodes, + actualType); + checkNonNull(columnIndex, columnNameForError); + } + + private void checkArrayElementType( + int columnIndex, List expectedCodes, Object columnNameForError) { + Type arrayElementType = getColumnType(columnIndex).getArrayElementType(); + checkState( + expectedCodes.contains(arrayElementType.getCode()), + "Array element for Column %s is not of correct type code: expected one of [%s] but was %s", + columnNameForError, + expectedCodes, + Type.array(arrayElementType)); + } + + private void checkNonNullOfTypes( + int columnIndex, + List expectedTypes, + Object columnNameForError, + String expectedTypeNames) { + Type actualType = getColumnType(columnIndex); + checkState( + expectedTypes.contains(actualType), + "Column %s is not of correct type: expected one of [%s] but was %s", + columnNameForError, + expectedTypeNames, + actualType); + checkNonNull(columnIndex, columnNameForError); + } + + private void checkNonNullArrayOfStruct(int columnIndex, Object columnNameForError) { + Type actualType = getColumnType(columnIndex); + checkState( + actualType.getCode() == Type.Code.ARRAY + && actualType.getArrayElementType().getCode() == Type.Code.STRUCT, + "Column %s is not of correct type: expected ARRAY> but was %s", + columnNameForError, + actualType); + checkNonNull(columnIndex, columnNameForError); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AdminRequestsPerMinuteExceededException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AdminRequestsPerMinuteExceededException.java new file mode 100644 index 000000000000..72d8b0ab15d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AdminRequestsPerMinuteExceededException.java @@ -0,0 +1,46 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import javax.annotation.Nullable; + +/** + * Exception thrown by Cloud Spanner the number of administrative requests per minute has been + * exceeded. + */ +public class AdminRequestsPerMinuteExceededException extends SpannerException { + private static final long serialVersionUID = -6395746612598975751L; + + static final String ADMIN_REQUESTS_LIMIT_KEY = "quota_limit"; + static final String ADMIN_REQUESTS_LIMIT_VALUE = "AdminMethodQuotaPerMinutePerProject"; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AdminRequestsPerMinuteExceededException( + DoNotConstructDirectly token, @Nullable String message, @Nullable Throwable cause) { + this(token, message, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + AdminRequestsPerMinuteExceededException( + DoNotConstructDirectly token, + @Nullable String message, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, ErrorCode.RESOURCE_EXHAUSTED, true, message, cause, apiException); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSet.java new file mode 100644 index 000000000000..2b3225bfc598 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSet.java @@ -0,0 +1,235 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.common.base.Function; +import com.google.spanner.v1.PartialResultSet; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; + +/** Interface for result sets returned by async query methods. */ +public interface AsyncResultSet extends ResultSet { + + /** Response code from {@code tryNext()}. */ + enum CursorState { + /** Cursor has been moved to a new row. */ + OK, + /** Read is complete, all rows have been consumed, and there are no more. */ + DONE, + /** No further information known at this time, thus current row not available. */ + NOT_READY + } + + /** + * Non-blocking call that attempts to step the cursor to the next position in the stream. The + * cursor may be inspected only if the cursor returns {@code CursorState.OK}. + * + *

A caller will typically call tryNext in a loop inside the ReadyCallback, consuming all + * results available. For more information see {@link #setCallback(Executor, ReadyCallback)}. + * + *

Currently this method may only be called if a ReadyCallback has been registered. This is for + * safety purposes only, and may be relaxed in future. + * + * @return current cursor readiness state + * @throws SpannerException When an unrecoverable problem downstream occurs. Once this occurs you + * will get no further callbacks. You should return CallbackResponse.DONE back from callback. + */ + CursorState tryNext() throws SpannerException; + + enum CallbackResponse { + /** + * Tell the cursor to continue issuing callbacks when data is available. This is the standard + * "I'm ready for more" response. If cursor is not completely drained of all ready results the + * callback will be called again immediately. + */ + CONTINUE, + + /** + * Tell the cursor to suspend all callbacks until application calls {@link + * ForwardingAsyncResultSet#resume()}. + */ + PAUSE, + + /** + * Tell the cursor you are done receiving results, even if there are more results sitting in the + * buffer. Once you return DONE, you will receive no further callbacks. + * + *

Approximately equivalent to calling {@link ForwardingAsyncResultSet#cancel()}, and then + * returning {@code PAUSE}, but more clear, immediate, and idiomatic. + * + *

It is legal to commit a transaction that owns this read before actually returning {@code + * DONE}. + */ + DONE, + } + + /** + * Interface for receiving asynchronous callbacks when new data is ready. See {@link + * AsyncResultSet#setCallback(Executor, ReadyCallback)}. + */ + interface ReadyCallback { + CallbackResponse cursorReady(AsyncResultSet resultSet); + } + + /** + * Register a callback with the ResultSet to be made aware when more data is available, changing + * the usage pattern from sync to async. Details: + * + *

    + *
  • The callback will be called at least once. + *
  • The callback is run each time more results are available, or when we discover that there + * will be no more results. (unless paused, see below). Spurious callbacks are possible, see + * below. + *
  • Spanner guarantees that one callback is ever outstanding at a time. Also, future + * callbacks guarantee the "happens before" property with previous callbacks. + *
  • A callback normally consumes all available data in the ResultSet, and then returns {@link + * CallbackResponse#CONTINUE}. + *
  • If a callback returns {@link CallbackResponse#CONTINUE} with data still in the ResultSet, + * the callback is invoked again immediately! + *
  • Once a callback has returned {@link CallbackResponse#PAUSE} on the cursor no more + * callbacks will be run until a corresponding {@link #resume()}. + *
  • Callback will stop being called once any of the following occurs: + *
      + *
    1. Callback returns {@link CallbackResponse#DONE}. + *
    2. {@link ForwardingAsyncResultSet#tryNext()} returns {@link CursorState#DONE}. + *
    3. {@link ForwardingAsyncResultSet#tryNext()} throws an exception. + *
    + *
  • Callback may possibly be invoked after a call to {@link + * ForwardingAsyncResultSet#cancel()} call, but the subsequent call to {@link #tryNext()} + * will yield a SpannerException. + *
  • Spurious callbacks are possible where cursors are not actually ready. Typically callback + * should return {@link CallbackResponse#CONTINUE} any time it sees {@link + * CursorState#NOT_READY}. + *
+ * + *

Flow Control

+ * + * If no flow control is needed (say because result sizes are known in advance to be finite in + * size) then async processing is simple. The following is a code example that transfers work from + * the cursor to an upstream sink: + * + *
{@code
+   * @Override
+   * public CallbackResponse cursorReady(ResultSet cursor) {
+   *   try {
+   *     while (true) {
+   *       switch (cursor.tryNext()) {
+   *         case OK:    upstream.emit(cursor.getRow()); break;
+   *         case DONE:  upstream.done(); return CallbackResponse.DONE;
+   *         case NOT_READY:  return CallbackResponse.CONTINUE;
+   *       }
+   *     }
+   *   } catch (SpannerException e) {
+   *     upstream.doneWithError(e);
+   *     return CallbackResponse.DONE;
+   *   }
+   * }
+   * }
+ * + * Flow control may be needed if for example the upstream system may not always be ready to handle + * more data. In this case the app developer has two main options: + * + *
    + *
  • Semi-async: make {@code upstream.emit()} a blocking call. This will block the callback + * thread until progress is possible. When coding in this way the threads in the Executor + * provided to setCallback must be blockable without causing harm to progress in your + * system. + *
  • Full-async: call {@code cursor.pause()} and return from the callback with data still in + * the Cursor. Once in this state cursor waits until resume() is called before calling + * callback again. + *
+ * + * @param exec executor on which to run all callbacks. Typically use a threadpool. If the executor + * is one that runs the work on the submitting thread, you must be very careful not to throw + * RuntimeException up the stack, lest you do damage to calling components. For example, it + * may cause an event dispatcher thread to crash. + * @param cb ready callback + * @return An {@link ApiFuture} that returns null when the consumption of the {@link + * AsyncResultSet} has finished successfully. No more calls to the {@link ReadyCallback} will + * follow and all resources used by the {@link AsyncResultSet} have been cleaned up. The + * {@link ApiFuture} throws an {@link ExecutionException} if the consumption of the {@link + * AsyncResultSet} finished with an error. + */ + ApiFuture setCallback(Executor exec, ReadyCallback cb); + + /** + * Attempt to cancel this operation and free all resources. Non-blocking. This is a no-op for + * child row cursors and does not cancel the parent cursor. + */ + void cancel(); + + /** + * Resume callbacks from the cursor. If there is more data available, a callback will be + * dispatched immediately. This can be called from any thread. + */ + void resume(); + + /** + * Transforms the row cursor into an immutable list using the given transformer function. {@code + * transformer} will be called once per row, thus the returned list will contain one entry per + * row. The returned future will throw a {@link SpannerException} if the row cursor encountered + * any error or if the transformer threw an exception on any row. + * + *

The transformer will be run on the supplied executor. The implementation may batch multiple + * transformer invocations together into a single {@code Runnable} when possible to increase + * efficiency. At any point in time, there will be at most one invocation of the transformer in + * progress. + * + *

WARNING: This will result in materializing the entire list so this should be used + * judiciously after considering the memory requirements of the returned list. + * + *

WARNING: The {@code RowBase} object passed to transformer function is not immutable and is + * not guaranteed to remain valid after the transformer function returns. The same {@code RowBase} + * object might be passed multiple times to the transformer with different underlying data each + * time. So *NEVER* keep a reference to the {@code RowBase} outside of the transformer. + * Specifically do not use {@link com.google.common.base.Functions#identity()} function. + * + * @param transformer function which will be used to transform the row. It should not return null. + * @param executor executor on which the transformer will be run. This should ideally not be an + * inline executor such as {@code MoreExecutors.directExecutor()}; using such an executor may + * degrade the performance of the Spanner library. + */ + ApiFuture> toListAsync(Function transformer, Executor executor); + + /** + * Transforms the row cursor into an immutable list using the given transformer function. {@code + * transformer} will be called once per row, thus the returned list will contain one entry per + * row. This method will block until all the rows have been yielded by the cursor. + * + *

WARNING: This will result in consuming the entire list so this should be used judiciously + * after considering the memory requirements of the returned list. + * + *

WARNING: The {@code RowBase} object passed to transformer function is not immutable and is + * not guaranteed to remain valid after the transformer function returns. The same {@code RowBase} + * object might be passed multiple times to the transformer with different underlying data each + * time. So *NEVER* keep a reference to the {@code RowBase} outside of the transformer. + * Specifically do not use {@link com.google.common.base.Functions#identity()} function. + * + * @param transformer function which will be used to transform the row. It should not return null. + */ + List toList(Function transformer) throws SpannerException; + + /** + * An interface to register the listener for streaming gRPC request. It will be called when a + * chunk is received from gRPC streaming call. + */ + interface StreamMessageListener { + void onStreamMessage(PartialResultSet partialResultSet, boolean bufferIsFull); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSetImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSetImpl.java new file mode 100644 index 000000000000..e53e4db94b66 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncResultSetImpl.java @@ -0,0 +1,660 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.spanner.AbstractReadContext.ListenableAsyncResultSet; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** Default implementation for {@link AsyncResultSet}. */ +class AsyncResultSetImpl extends ForwardingStructReader + implements ListenableAsyncResultSet, AsyncResultSet.StreamMessageListener { + private static final Logger log = Logger.getLogger(AsyncResultSetImpl.class.getName()); + + /** State of an {@link AsyncResultSetImpl}. */ + private enum State { + INITIALIZED, + STREAMING_INITIALIZED, + /** SYNC indicates that the {@link ResultSet} is used in sync pattern. */ + SYNC, + CONSUMING, + RUNNING, + PAUSED, + CANCELLED(true), + DONE(true); + + /** Does this state mean that the result set should permanently stop producing rows. */ + private final boolean shouldStop; + + State() { + shouldStop = false; + } + + State(boolean shouldStop) { + this.shouldStop = shouldStop; + } + } + + static final int DEFAULT_BUFFER_SIZE = 10; + private static final int MAX_WAIT_FOR_BUFFER_CONSUMPTION = 10; + private static final SpannerException CANCELLED_EXCEPTION = + SpannerExceptionFactory.newSpannerException( + ErrorCode.CANCELLED, "This AsyncResultSet has been cancelled"); + + private final Object monitor = new Object(); + private boolean closed; + + /** + * {@link ExecutorProvider} provides executor services that are used to fetch data from the + * backend and put these into the buffer for further consumption by the callback. + */ + private final ExecutorProvider executorProvider; + + private final ListeningScheduledExecutorService service; + + private final BlockingDeque buffer; + private Struct currentRow; + + /** Supplies the underlying synchronous {@link ResultSet} that will be producing the rows. */ + private final Supplier delegateResultSet; + + /** + * Any exception that occurs while executing the query and iterating over the result set will be + * stored in this variable and propagated to the user through {@link #tryNext()}. + */ + private volatile SpannerException executionException; + + /** + * Executor for callbacks. Regardless of the type of executor that is provided, the {@link + * AsyncResultSetImpl} will ensure that at most 1 callback call will be active at any one time. + */ + private Executor executor; + + private ReadyCallback callback; + + /** + * Listeners that will be called when the {@link AsyncResultSetImpl} has finished fetching all + * rows and any underlying transaction or session can be closed. + */ + private final Collection listeners = new LinkedList<>(); + + private volatile State state = State.INITIALIZED; + + /** This variable indicates that produce rows thread is initiated */ + private volatile boolean produceRowsInitiated; + + /** + * This variable indicates whether all the results from the underlying result set have been read. + */ + private volatile boolean finished; + + private volatile SettableApiFuture result; + + /** + * This variable indicates whether {@link #tryNext()} has returned {@link CursorState#DONE} or a + * {@link SpannerException}. + */ + private volatile boolean cursorReturnedDoneOrException; + + /** + * This variable is used to pause the producer when the {@link AsyncResultSet} is paused. The + * production of rows that are put into the buffer is only paused once the buffer is full. + */ + private volatile CountDownLatch pausedLatch = new CountDownLatch(1); + + /** + * This variable is used to pause the producer when the buffer is full and the consumer needs some + * time to catch up. + */ + private volatile CountDownLatch bufferConsumptionLatch = new CountDownLatch(0); + + /** + * This variable is used to pause the producer when all rows have been put into the buffer, but + * the consumer (the callback) has not yet received and processed all rows. + */ + private volatile CountDownLatch consumingLatch = new CountDownLatch(0); + + AsyncResultSetImpl(ExecutorProvider executorProvider, ResultSet delegate, int bufferSize) { + this(executorProvider, Suppliers.ofInstance(Preconditions.checkNotNull(delegate)), bufferSize); + } + + AsyncResultSetImpl( + ExecutorProvider executorProvider, Supplier delegate, int bufferSize) { + super(delegate); + this.executorProvider = Preconditions.checkNotNull(executorProvider); + this.delegateResultSet = Preconditions.checkNotNull(delegate); + this.service = MoreExecutors.listeningDecorator(executorProvider.getExecutor()); + this.buffer = new LinkedBlockingDeque<>(bufferSize); + } + + /** + * Closes the {@link AsyncResultSet}. {@link #close()} is non-blocking and may be called multiple + * times without side effects. An {@link AsyncResultSet} may be closed before all rows have been + * returned to the callback, and calling {@link #tryNext()} on a closed {@link AsyncResultSet} is + * allowed as long as this is done from within a {@link ReadyCallback}. Calling {@link #resume()} + * on a closed {@link AsyncResultSet} is also allowed. + */ + @Override + public void close() { + synchronized (monitor) { + if (this.closed) { + return; + } + if (state == State.INITIALIZED || state == State.SYNC) { + delegateResultSet.get().close(); + } + this.closed = true; + } + } + + /** + * Adds a listener that will be called when no more rows will be read from the underlying {@link + * ResultSet}, either because all rows have been read, or because {@link + * ReadyCallback#cursorReady(AsyncResultSet)} returned {@link CallbackResponse#DONE}. + */ + @Override + public void addListener(Runnable listener) { + Preconditions.checkState(state == State.INITIALIZED); + listeners.add(listener); + } + + @Override + public void removeListener(Runnable listener) { + Preconditions.checkState(state == State.INITIALIZED); + listeners.remove(listener); + } + + /** + * Tries to advance this {@link AsyncResultSet} to the next row. This method may only be called + * from within a {@link ReadyCallback}. + */ + @Override + public CursorState tryNext() throws SpannerException { + synchronized (monitor) { + if (state == State.CANCELLED) { + cursorReturnedDoneOrException = true; + throw CANCELLED_EXCEPTION; + } + if (buffer.isEmpty() && executionException != null) { + cursorReturnedDoneOrException = true; + throw executionException; + } + Preconditions.checkState( + this.callback != null, "tryNext may only be called after a callback has been set."); + Preconditions.checkState( + this.state == State.CONSUMING, + "tryNext may only be called from a DataReady callback. Current state: " + + this.state.name()); + + if (finished && buffer.isEmpty()) { + cursorReturnedDoneOrException = true; + return CursorState.DONE; + } + } + if (!buffer.isEmpty()) { + // Set the next row from the buffer as the current row of the StructReader. + replaceDelegate(currentRow = buffer.pop()); + synchronized (monitor) { + bufferConsumptionLatch.countDown(); + } + return CursorState.OK; + } + return CursorState.NOT_READY; + } + + private void closeDelegateResultSet() { + try { + delegateResultSet.get().close(); + } catch (Throwable t) { + log.log(Level.FINE, "Ignoring error from closing delegate result set", t); + } + } + + /** + * {@link CallbackRunnable} calls the {@link ReadyCallback} registered for this {@link + * AsyncResultSet}. + */ + private class CallbackRunnable implements Runnable { + @Override + public void run() { + try { + while (true) { + synchronized (monitor) { + if (cursorReturnedDoneOrException) { + break; + } + if (state == State.CANCELLED) { + // The callback should always get at least one chance to catch the CANCELLED + // exception. It is however possible that the callback does not call tryNext(), and + // instead directly returns PAUSE or DONE. In those cases, the callback runner should + // also stop, even though the callback has not seen the CANCELLED state. + cursorReturnedDoneOrException = true; + } + } + CallbackResponse response; + try { + response = callback.cursorReady(AsyncResultSetImpl.this); + } catch (Throwable e) { + synchronized (monitor) { + if (cursorReturnedDoneOrException + && state == State.CANCELLED + && e instanceof SpannerException + && ((SpannerException) e).getErrorCode() == ErrorCode.CANCELLED) { + // The callback did not catch the cancelled exception (which it should have), but + // we'll keep the cancelled state. + return; + } + executionException = SpannerExceptionFactory.asSpannerException(e); + cursorReturnedDoneOrException = true; + } + return; + } + synchronized (monitor) { + if (state == State.CANCELLED) { + if (cursorReturnedDoneOrException) { + return; + } + } else { + switch (response) { + case DONE: + state = State.DONE; + cursorReturnedDoneOrException = true; + return; + case PAUSE: + state = State.PAUSED; + // Make sure no-one else is waiting on the current pause latch and create a new + // one. + pausedLatch.countDown(); + pausedLatch = new CountDownLatch(1); + return; + case CONTINUE: + if (buffer.isEmpty()) { + // Call the callback once more if the entire result set has been processed but + // the callback has not yet received a CursorState.DONE or a CANCELLED error. + if (finished && !cursorReturnedDoneOrException) { + break; + } + state = State.RUNNING; + return; + } + break; + default: + throw new IllegalStateException("Unknown response: " + response); + } + } + } + } + } finally { + synchronized (monitor) { + // Count down all latches that the producer might be waiting on. + consumingLatch.countDown(); + while (bufferConsumptionLatch.getCount() > 0L) { + bufferConsumptionLatch.countDown(); + } + } + } + } + } + + private final CallbackRunnable callbackRunnable = new CallbackRunnable(); + + /** + * {@link ProduceRowsRunnable} reads data from the underlying {@link ResultSet}, places these in + * the buffer and dispatches the {@link CallbackRunnable} when data is ready to be consumed. + */ + private class ProduceRowsRunnable implements Runnable { + @Override + public void run() { + boolean stop = false; + boolean hasNext = false; + try { + hasNext = delegateResultSet.get().next(); + } catch (Throwable e) { + synchronized (monitor) { + executionException = SpannerExceptionFactory.asSpannerException(e); + } + } + try { + while (!stop && hasNext) { + try { + synchronized (monitor) { + stop = state.shouldStop; + } + if (!stop) { + while (buffer.remainingCapacity() == 0 && !stop) { + waitIfPaused(); + // The buffer is full and we should let the callback consume a number of rows before + // we proceed with producing any more rows to prevent us from potentially waiting on + // a full buffer repeatedly. + // Wait until at least half of the buffer is available, or if it's a bigger buffer, + // wait until at least 10 rows can be placed in it. + // TODO: Make this more dynamic / configurable? + startCallbackWithBufferLatchIfNecessary( + Math.min( + Math.min(buffer.size() / 2 + 1, buffer.size()), + MAX_WAIT_FOR_BUFFER_CONSUMPTION)); + bufferConsumptionLatch.await(); + synchronized (monitor) { + stop = state.shouldStop; + } + } + } + if (!stop) { + buffer.put(delegateResultSet.get().getCurrentRowAsStruct()); + startCallbackIfNecessary(); + hasNext = delegateResultSet.get().next(); + } + } catch (Throwable e) { + synchronized (monitor) { + executionException = SpannerExceptionFactory.asSpannerException(e); + stop = true; + } + } + } + // We don't need any more data from the underlying result set, so we close it as soon as + // possible. Any error that might occur during this will be ignored. + closeDelegateResultSet(); + + // Ensure that the callback has been called at least once, even if the result set was + // cancelled. + synchronized (monitor) { + finished = true; + stop = cursorReturnedDoneOrException; + } + // Call the callback if there are still rows in the buffer that need to be processed. + while (!stop) { + try { + waitIfPaused(); + startCallbackIfNecessary(); + // Make sure we wait until the callback runner has actually finished. + consumingLatch.await(); + synchronized (monitor) { + stop = cursorReturnedDoneOrException; + } + } catch (Throwable e) { + result.setException(e); + return; + } + } + } finally { + if (executorProvider.shouldAutoClose()) { + service.shutdown(); + } + for (Runnable listener : listeners) { + listener.run(); + } + synchronized (monitor) { + if (executionException != null) { + result.setException(executionException); + } else if (state == State.CANCELLED) { + result.setException(CANCELLED_EXCEPTION); + } else { + result.set(null); + } + } + } + } + + private void waitIfPaused() throws InterruptedException { + CountDownLatch pause; + synchronized (monitor) { + pause = pausedLatch; + } + pause.await(); + } + + private void startCallbackIfNecessary() { + startCallbackWithBufferLatchIfNecessary(0); + } + + private void startCallbackWithBufferLatchIfNecessary(int bufferLatch) { + synchronized (monitor) { + if ((state == State.RUNNING || state == State.CANCELLED) + && !cursorReturnedDoneOrException) { + consumingLatch = new CountDownLatch(1); + if (bufferLatch > 0) { + bufferConsumptionLatch = new CountDownLatch(bufferLatch); + } + if (state == State.RUNNING) { + state = State.CONSUMING; + } + executor.execute(callbackRunnable); + } + } + } + } + + private class InitiateStreamingRunnable implements Runnable { + + @Override + public void run() { + try { + // This method returns true if the underlying result set is a streaming result set (e.g. a + // GrpcResultSet). + // Those result sets will trigger initiateProduceRows() when the first results are received. + // Non-streaming result sets do not trigger this callback, and for those result sets, we + // need to eagerly start the ProduceRowsRunnable. + if (!initiateStreaming(AsyncResultSetImpl.this)) { + initiateProduceRows(); + } + } catch (Throwable exception) { + executionException = SpannerExceptionFactory.asSpannerException(exception); + initiateProduceRows(); + } + } + } + + /** Sets the callback for this {@link AsyncResultSet}. */ + @Override + public ApiFuture setCallback(Executor exec, ReadyCallback cb) { + synchronized (monitor) { + Preconditions.checkState(!closed, "This AsyncResultSet has been closed"); + Preconditions.checkState( + this.state == State.INITIALIZED, "callback may not be set multiple times"); + + // Start to fetch data and buffer these. + this.result = SettableApiFuture.create(); + this.state = State.STREAMING_INITIALIZED; + this.service.execute(new InitiateStreamingRunnable()); + this.executor = MoreExecutors.newSequentialExecutor(Preconditions.checkNotNull(exec)); + this.callback = Preconditions.checkNotNull(cb); + pausedLatch.countDown(); + return result; + } + } + + private void initiateProduceRows() { + synchronized (monitor) { + if (this.state == State.STREAMING_INITIALIZED) { + this.state = State.RUNNING; + } + produceRowsInitiated = true; + } + this.service.execute(new ProduceRowsRunnable()); + } + + Future getResult() { + return result; + } + + @Override + public void cancel() { + synchronized (monitor) { + Preconditions.checkState( + state != State.INITIALIZED && state != State.SYNC, + "cannot cancel a result set without a callback"); + state = State.CANCELLED; + pausedLatch.countDown(); + } + } + + @Override + public void resume() { + synchronized (monitor) { + Preconditions.checkState( + state != State.INITIALIZED && state != State.SYNC, + "cannot resume a result set without a callback"); + if (state == State.PAUSED) { + state = State.RUNNING; + pausedLatch.countDown(); + } + } + } + + private static class CreateListCallback implements ReadyCallback { + private final SettableApiFuture> future; + private final Function transformer; + private final ImmutableList.Builder builder = ImmutableList.builder(); + + private CreateListCallback( + SettableApiFuture> future, Function transformer) { + this.future = future; + this.transformer = transformer; + } + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + future.set(builder.build()); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + builder.add(transformer.apply(resultSet)); + break; + } + } + } catch (Throwable t) { + future.setException(t); + return CallbackResponse.DONE; + } + } + } + + @Override + public ApiFuture> toListAsync( + Function transformer, Executor executor) { + synchronized (monitor) { + Preconditions.checkState(!closed, "This AsyncResultSet has been closed"); + Preconditions.checkState( + this.state == State.INITIALIZED, "This AsyncResultSet has already been used."); + final SettableApiFuture> res = SettableApiFuture.create(); + CreateListCallback callback = new CreateListCallback<>(res, transformer); + ApiFuture finished = setCallback(executor, callback); + return ApiFutures.transformAsync(finished, ignored -> res, MoreExecutors.directExecutor()); + } + } + + @Override + public List toList(Function transformer) throws SpannerException { + ApiFuture> future = toListAsync(transformer, MoreExecutors.directExecutor()); + try { + return future.get(); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (Throwable e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + @Override + public boolean next() throws SpannerException { + synchronized (monitor) { + Preconditions.checkState( + this.state == State.INITIALIZED || this.state == State.SYNC, + "Cannot call next() on a result set with a callback."); + this.state = State.SYNC; + } + boolean res = delegateResultSet.get().next(); + currentRow = res ? delegateResultSet.get().getCurrentRowAsStruct() : null; + return res; + } + + @Override + public ResultSetStats getStats() { + return delegateResultSet.get().getStats(); + } + + @Override + public ResultSetMetadata getMetadata() { + return delegateResultSet.get().getMetadata(); + } + + boolean initiateStreaming(StreamMessageListener streamMessageListener) { + return StreamingUtil.initiateStreaming(delegateResultSet.get(), streamMessageListener); + } + + @Override + protected void checkValidState() { + synchronized (monitor) { + Preconditions.checkState( + state == State.SYNC || state == State.CONSUMING || state == State.CANCELLED, + "only allowed after a next() call or from within a ReadyCallback#cursorReady callback"); + Preconditions.checkState(state != State.SYNC || !closed, "ResultSet is closed"); + } + } + + @Override + public Struct getCurrentRowAsStruct() { + checkValidState(); + return currentRow; + } + + @Override + public void onStreamMessage(PartialResultSet partialResultSet, boolean bufferIsFull) { + synchronized (monitor) { + if (produceRowsInitiated) { + return; + } + // if PartialResultSet contains a resume token or buffer size is full, or + // we have reached the end of the stream, we can start the thread. + boolean startJobThread = + !partialResultSet.getResumeToken().isEmpty() + || bufferIsFull + || partialResultSet == GrpcStreamIterator.END_OF_STREAM; + if (startJobThread || state != State.STREAMING_INITIALIZED) { + initiateProduceRows(); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunner.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunner.java new file mode 100644 index 000000000000..3df3b9068b24 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunner.java @@ -0,0 +1,67 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import io.grpc.Status.Code; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; + +public interface AsyncRunner { + + /** + * Functional interface for executing a read/write transaction asynchronously that returns a + * result of type R. + */ + @FunctionalInterface + interface AsyncWork { + /** + * Performs a single transaction attempt. All reads/writes should be performed using {@code + * txn}. + * + *

Implementations of this method should not attempt to commit the transaction directly: + * returning normally will result in the runner attempting to commit the transaction once the + * returned future completes, retrying on abort. + * + *

In most cases, the implementation will not need to catch {@code SpannerException}s from + * Spanner operations, instead letting these propagate to the framework. The transaction runner + * will take appropriate action based on the type of exception. In particular, implementations + * should never catch an exception of type {@link Code#ABORTED}: these indicate that some reads + * may have returned inconsistent data and the transaction attempt must be aborted. + * + * @param txn the transaction + * @return future over the result of the work + */ + ApiFuture doWorkAsync(TransactionContext txn); + } + + /** Executes a read/write transaction asynchronously using the given executor. */ + ApiFuture runAsync(AsyncWork work, Executor executor); + + /** + * Returns the timestamp at which the transaction committed. {@link ApiFuture#get()} will throw an + * {@link ExecutionException} if the transaction did not commit. + */ + ApiFuture getCommitTimestamp(); + + /** + * Returns the {@link CommitResponse} of this transaction. {@link ApiFuture#get()} throws an + * {@link ExecutionException} if the transaction did not commit. + */ + ApiFuture getCommitResponse(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunnerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunnerImpl.java new file mode 100644 index 000000000000..afe2fdb24617 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncRunnerImpl.java @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; + +class AsyncRunnerImpl implements AsyncRunner { + private final TransactionRunnerImpl delegate; + private SettableApiFuture commitResponse; + + AsyncRunnerImpl(TransactionRunnerImpl delegate) { + this.delegate = Preconditions.checkNotNull(delegate); + } + + @Override + public ApiFuture runAsync(final AsyncWork work, Executor executor) { + Preconditions.checkState(commitResponse == null, "runAsync() can only be called once"); + commitResponse = SettableApiFuture.create(); + final SettableApiFuture res = SettableApiFuture.create(); + executor.execute( + () -> { + try { + res.set(runTransaction(work)); + } catch (Throwable t) { + res.setException(t); + } finally { + setCommitResponse(); + } + }); + return res; + } + + private R runTransaction(final AsyncWork work) { + return delegate.run( + transaction -> { + try { + return work.doWorkAsync(transaction).get(); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }); + } + + private void setCommitResponse() { + try { + commitResponse.set(delegate.getCommitResponse()); + } catch (Throwable t) { + commitResponse.setException(t); + } + } + + @Override + public ApiFuture getCommitTimestamp() { + checkState(commitResponse != null, "runAsync() has not yet been called"); + return ApiFutures.transform( + commitResponse, CommitResponse::getCommitTimestamp, MoreExecutors.directExecutor()); + } + + public ApiFuture getCommitResponse() { + checkState(commitResponse != null, "runAsync() has not yet been called"); + return commitResponse; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManager.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManager.java new file mode 100644 index 000000000000..502ec9d54f8b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManager.java @@ -0,0 +1,225 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * An interface for managing the life cycle of a read write transaction including all its retries. + * See {@link TransactionContext} for a description of transaction semantics. + * + *

At any point in time there can be at most one active transaction in this manager. When that + * transaction is committed, if it fails with an {@code ABORTED} error, calling {@link + * #resetForRetryAsync()} would create a new {@link TransactionContextFuture}. The newly created + * transaction would use the same session thus increasing its lock priority. If the transaction is + * committed successfully, or is rolled back or commit fails with any error other than {@code + * ABORTED}, the manager is considered complete and no further transactions are allowed to be + * created in it. + * + *

Every {@code AsyncTransactionManager} should either be committed or rolled back. Failure to do + * so can cause resources to be leaked and deadlocks. Easiest way to guarantee this is by calling + * {@link #close()} in a finally block. + * + * @see DatabaseClient#transactionManagerAsync(TransactionOption...) + */ +public interface AsyncTransactionManager extends AutoCloseable { + /** + * {@link ApiFuture} that returns a {@link TransactionContext} and that supports chaining of + * multiple {@link TransactionContextFuture}s to form a transaction. + */ + interface TransactionContextFuture extends ApiFuture { + /** + * Sets the first step to execute as part of this transaction after the transaction has started + * using the specified executor. {@link MoreExecutors#directExecutor()} can be be used for + * lightweight functions, but should be avoided for heavy or blocking operations. See also + * {@link ListenableFuture#addListener(Runnable, Executor)} for further information. + */ + AsyncTransactionStep then( + AsyncTransactionFunction function, Executor executor); + } + + /** + * {@link ApiFuture} that returns the commit {@link Timestamp} of a Cloud Spanner transaction that + * is executed using an {@link AsyncTransactionManager}. This future is returned by the call to + * {@link AsyncTransactionStep#commitAsync()} of the last step in the transaction. + */ + interface CommitTimestampFuture extends ApiFuture { + /** + * Returns the commit timestamp of the transaction. Getting this value should always be done in + * order to ensure that the transaction succeeded. If any of the steps in the transaction fails + * with an uncaught exception, this method will automatically stop the transaction at that point + * and the exception will be returned as the cause of the {@link ExecutionException} that is + * thrown by this method. + * + * @throws AbortedException if the transaction was aborted by Cloud Spanner and needs to be + * retried. + */ + @Override + Timestamp get() throws AbortedException, InterruptedException, ExecutionException; + + /** + * Same as {@link #get()}, but will throw a {@link TimeoutException} if the transaction does not + * finish within the timeout. + */ + @Override + Timestamp get(long timeout, TimeUnit unit) + throws AbortedException, InterruptedException, ExecutionException, TimeoutException; + } + + /** + * {@link AsyncTransactionStep} is returned by {@link + * TransactionContextFuture#then(AsyncTransactionFunction, Executor)} and {@link + * AsyncTransactionStep#then(AsyncTransactionFunction, Executor)} and allows transaction steps + * that should be executed serially to be chained together. Each step can contain one or more + * statements that may execute in parallel. + * + *

Example usage: + * + *

{@code
+   * final String column = "FirstName";
+   * final long singerId = 1L;
+   * AsyncTransactionManager manager = client.transactionManagerAsync();
+   * TransactionContextFuture txnFuture = manager.beginAsync();
+   * txnFuture
+   *   .then((transaction, ignored) ->
+   *     transaction.readRowAsync("Singers", Key.of(singerId), Collections.singleton(column)),
+   *     executor)
+   *   .then((transaction, row) ->
+   *     transaction.bufferAsync(
+   *         Mutation.newUpdateBuilder("Singers")
+   *           .set(column).to(row.getString(column).toUpperCase())
+   *           .build()),
+   *     executor)
+   *   .commitAsync();
+   * }
+ * + * @param + * @param + */ + interface AsyncTransactionStep extends ApiFuture { + /** + * Adds a step to the transaction chain that should be executed using the specified executor. + * This step is guaranteed to be executed only after the previous step executed successfully. + * {@link MoreExecutors#directExecutor()} can be be used for lightweight functions, but should + * be avoided for heavy or blocking operations. See also {@link + * ListenableFuture#addListener(Runnable, Executor)} for further information. + */ + AsyncTransactionStep then( + AsyncTransactionFunction next, Executor executor); + + /** + * Commits the transaction and returns a {@link CommitTimestampFuture} that will return the + * commit timestamp of the transaction, or throw the first uncaught exception in the transaction + * chain as an {@link ExecutionException}. + */ + CommitTimestampFuture commitAsync(); + } + + /** + * Each step in a transaction chain is defined by an {@link AsyncTransactionFunction}. It receives + * a {@link TransactionContext} and the output value of the previous transaction step as its input + * parameters. The method should return an {@link ApiFuture} that will return the result of this + * step. + * + * @param + * @param + */ + interface AsyncTransactionFunction { + /** + * This method is called when this transaction step is executed. The input value is the result + * of the previous step, and this method will only be called if the previous step executed + * successfully. + * + * @param txn the {@link TransactionContext} that can be used to execute statements. + * @param input the result of the previous transaction step. + * @return an {@link ApiFuture} that will return the result of this step, and that will be the + * input of the next transaction step. This method should never return null. + * Instead, if the method does not have a return value, the method should return + * ApiFutures#immediateFuture(null). + */ + ApiFuture apply(TransactionContext txn, I input) throws Exception; + } + + /** + * Creates a new read write transaction. This must be called before doing any other operation and + * can only be called once. To create a new transaction for subsequent retries, see {@link + * #resetForRetryAsync()}. + */ + TransactionContextFuture beginAsync(); + + /** + * Initializes a new read-write transaction that is a retry of a previously aborted transaction. + * This method must be called before performing any operations, and it can only be invoked once + * per transaction lifecycle. + * + *

This method should only be used when multiplexed sessions are enabled to create a retry for + * a previously aborted transaction. This method can be used instead of {@link + * #resetForRetryAsync()} to create a retry. Using this method or {@link #resetForRetryAsync()} + * will have the same effect. You must pass in the {@link AbortedException} from the previous + * attempt to preserve the transaction's priority. + * + *

For regular sessions, this behaves the same as {@link #beginAsync()}. + */ + TransactionContextFuture beginAsync(AbortedException exception); + + /** + * Rolls back the currently active transaction. In most cases there should be no need to call this + * explicitly since {@link #close()} would automatically roll back any active transaction. + */ + ApiFuture rollbackAsync(); + + /** + * Creates a new transaction for retry. This should only be called if the previous transaction + * failed with {@code ABORTED}. In all other cases, this will throw an {@link + * IllegalStateException}. Users should backoff before calling this method. Backoff delay is + * specified by {@link SpannerException#getRetryDelayInMillis()} on the {@code SpannerException} + * throw by the previous commit call. + */ + TransactionContextFuture resetForRetryAsync(); + + /** Returns the state of the transaction. */ + TransactionState getState(); + + /** + * Returns the {@link CommitResponse} of this transaction. This method may only be called after + * committing the transaction. + */ + ApiFuture getCommitResponse(); + + /** + * Closes the manager. If there is an active transaction, it will be rolled back. Underlying + * session will be released back to the session pool. + */ + @Override + void close(); + + /** + * Closes the transaction manager. If there is an active transaction, it will be rolled back. The + * underlying session will be released back to the session pool. The returned {@link ApiFuture} is + * done when the transaction (if any) has been rolled back. + */ + ApiFuture closeAsync(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManagerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManagerImpl.java new file mode 100644 index 000000000000..c394ad09fe26 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AsyncTransactionManagerImpl.java @@ -0,0 +1,236 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.TransactionContextFutureImpl.CommittableAsyncTransactionManager; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; + +/** Implementation of {@link AsyncTransactionManager}. */ +final class AsyncTransactionManagerImpl + implements CommittableAsyncTransactionManager, SessionTransaction { + + private final SessionImpl session; + private ISpan span; + private final Options options; + + private TransactionRunnerImpl.TransactionContextImpl txn; + private TransactionState txnState; + private final SettableApiFuture commitResponse = SettableApiFuture.create(); + + AsyncTransactionManagerImpl(SessionImpl session, ISpan span, TransactionOption... options) { + this.session = session; + this.span = span; + this.options = Options.fromTransactionOptions(options); + } + + @Override + public void setSpan(ISpan span) { + this.span = span; + } + + @Override + public void close() { + SpannerApiFutures.get(closeAsync()); + } + + @Override + public ApiFuture closeAsync() { + ApiFuture res = null; + if (txnState == TransactionState.STARTED) { + res = rollbackAsync(); + } + if (txn != null) { + txn.close(); + } + if (session != null) { + session.onTransactionDone(); + } + return MoreObjects.firstNonNull(res, ApiFutures.immediateFuture(null)); + } + + @Override + public TransactionContextFutureImpl beginAsync() { + Preconditions.checkState(txn == null, "begin can only be called once"); + return new TransactionContextFutureImpl(this, internalBeginAsync(true, ByteString.EMPTY)); + } + + @Override + public TransactionContextFutureImpl beginAsync(AbortedException exception) { + Preconditions.checkState(txn == null, "begin can only be called once"); + Preconditions.checkNotNull(exception, "AbortedException from the previous attempt is required"); + ByteString abortedTransactionId = + exception.getTransactionID() != null ? exception.getTransactionID() : ByteString.EMPTY; + return new TransactionContextFutureImpl(this, internalBeginAsync(true, abortedTransactionId)); + } + + private ApiFuture internalBeginAsync( + boolean firstAttempt, ByteString abortedTransactionID) { + txnState = TransactionState.STARTED; + + // Determine the latest transactionId when using a multiplexed session. + ByteString multiplexedSessionPreviousTransactionId = ByteString.EMPTY; + if (firstAttempt && session.getIsMultiplexed()) { + multiplexedSessionPreviousTransactionId = abortedTransactionID; + } + if (txn != null && session.getIsMultiplexed() && !firstAttempt) { + // Use the current transactionId if available, otherwise fallback to the previous aborted + // transactionId. + multiplexedSessionPreviousTransactionId = + txn.transactionId != null ? txn.transactionId : txn.getPreviousTransactionId(); + } + + txn = + session.newTransaction( + options, /* previousTransactionId= */ multiplexedSessionPreviousTransactionId); + if (firstAttempt) { + session.setActive(this); + } + final SettableApiFuture res = SettableApiFuture.create(); + final ApiFuture fut; + if (firstAttempt) { + fut = ApiFutures.immediateFuture(null); + } else { + fut = txn.ensureTxnAsync(); + } + ApiFutures.addCallback( + fut, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + onError(t); + res.setException(SpannerExceptionFactory.asSpannerException(t)); + } + + @Override + public void onSuccess(Void result) { + res.set(txn); + } + }, + MoreExecutors.directExecutor()); + return res; + } + + @Override + public void onError(Throwable t) { + if (t instanceof AbortedException) { + txnState = TransactionState.ABORTED; + } + } + + @Override + public ApiFuture commitAsync() { + Preconditions.checkState( + txnState == TransactionState.STARTED, + "commit can only be invoked if the transaction is in progress. Current state: " + txnState); + if (txn.isAborted()) { + txnState = TransactionState.ABORTED; + return ApiFutures.immediateFailedFuture( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "Transaction already aborted")); + } + ApiFuture commitResponseFuture = txn.commitAsync(); + txnState = TransactionState.COMMITTED; + + ApiFutures.addCallback( + commitResponseFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + if (t instanceof AbortedException) { + txnState = TransactionState.ABORTED; + } else { + txnState = TransactionState.COMMIT_FAILED; + if (span != null) { + span.setStatus(t); + span.end(); + } + commitResponse.setException(t); + } + } + + @Override + public void onSuccess(CommitResponse result) { + if (span != null) { + span.end(); + } + commitResponse.set(result); + } + }, + MoreExecutors.directExecutor()); + return ApiFutures.transform( + commitResponseFuture, CommitResponse::getCommitTimestamp, MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture rollbackAsync() { + Preconditions.checkState( + txnState == TransactionState.STARTED, + "rollback can only be called if the transaction is in progress"); + try { + return ApiFutures.transformAsync( + txn.rollbackAsync(), + ignored -> ApiFutures.immediateFuture(null), + MoreExecutors.directExecutor()); + } finally { + if (span != null) { + span.addAnnotation("Transaction rolled back"); + span.end(); + } + txnState = TransactionState.ROLLED_BACK; + } + } + + @Override + public TransactionContextFuture resetForRetryAsync() { + if (txn == null || !txn.isAborted() && txnState != TransactionState.ABORTED) { + throw new IllegalStateException( + "resetForRetry can only be called if the previous attempt aborted"); + } + return new TransactionContextFutureImpl(this, internalBeginAsync(false, ByteString.EMPTY)); + } + + @Override + public TransactionState getState() { + return txnState; + } + + @Override + public ApiFuture getCommitResponse() { + Preconditions.checkState( + txnState == TransactionState.COMMITTED, + "getCommitResponse can only be invoked if the transaction was successfully committed"); + return commitResponse; + } + + @Override + public void invalidate() { + if (txnState == TransactionState.STARTED || txnState == null) { + txnState = TransactionState.ROLLED_BACK; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Backup.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Backup.java new file mode 100644 index 000000000000..6d694b0052a0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Backup.java @@ -0,0 +1,204 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.client.util.Preconditions; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.encryption.EncryptionInfo; +import com.google.longrunning.Operation; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; + +/** + * Represents a Cloud Spanner database backup. {@code Backup} adds a layer of service related + * functionality over {@code BackupInfo}. + */ +public class Backup extends BackupInfo { + public static class Builder extends BackupInfo.BuilderImpl { + private final DatabaseAdminClient dbClient; + + Builder(DatabaseAdminClient dbClient, BackupId backupId) { + super(backupId); + this.dbClient = Preconditions.checkNotNull(dbClient); + } + + private Builder(Backup backup) { + super(backup); + this.dbClient = backup.dbClient; + } + + @Override + public Backup build() { + return new Backup(this); + } + } + + private static final String FILTER_BACKUP_OPERATIONS_TEMPLATE = "name:backups/%s"; + private final DatabaseAdminClient dbClient; + + Backup(Builder builder) { + super(builder); + this.dbClient = Preconditions.checkNotNull(builder.dbClient); + } + + /** Creates a backup on the server based on the source of this {@link Backup} instance. */ + public OperationFuture create() { + return dbClient.createBackup(this); + } + + /** + * Returns true if a backup with the id of this {@link Backup} exists on Cloud + * Spanner. + */ + public boolean exists() { + try { + dbClient.getBackup(instance(), backup()); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.NOT_FOUND) { + return false; + } + throw e; + } + return true; + } + + /** + * Returns true if this backup is ready to use. The value returned by this method + * could be out-of-sync with the value returned by {@link #getState()}, as this method will make a + * round-trip to the server and return a value based on the response from the server. + */ + public boolean isReady() { + return reload().getState() == State.READY; + } + + /** + * Fetches the backup's current information and returns a new {@link Backup} instance. It does not + * update this instance. + */ + public Backup reload() throws SpannerException { + return dbClient.getBackup(instance(), backup()); + } + + /** Deletes this backup on Cloud Spanner. */ + public void delete() throws SpannerException { + dbClient.deleteBackup(instance(), backup()); + } + + /** + * Updates the expire time of this backup on Cloud Spanner. If this {@link Backup} does not have + * an expire time, the method will throw an {@link IllegalStateException}. + */ + public void updateExpireTime() { + Preconditions.checkState(getExpireTime() != null, "This backup has no expire time"); + dbClient.updateBackup(instance(), backup(), getExpireTime()); + } + + /** + * Restores this backup to the specified database. The database must not already exist and will be + * created by this call. The database may be created in a different instance than where the backup + * is stored. + */ + public OperationFuture restore(DatabaseId database) { + Preconditions.checkNotNull(database); + return dbClient.restoreDatabase( + instance(), backup(), database.getInstanceId().getInstance(), database.getDatabase()); + } + + /** Returns all long-running backup operations for this {@link Backup}. */ + public Page listBackupOperations() { + return dbClient.listBackupOperations( + instance(), Options.filter(String.format(FILTER_BACKUP_OPERATIONS_TEMPLATE, backup()))); + } + + /** Returns the IAM {@link Policy} for this backup. */ + public Policy getIAMPolicy() { + return dbClient.getBackupIAMPolicy(instance(), backup()); + } + + /** + * Updates the IAM policy for this backup and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + public Policy setIAMPolicy(Policy policy) { + return dbClient.setBackupIAMPolicy(instance(), backup(), policy); + } + + /** + * Tests for the given permissions on this backup for the caller. + * + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + public Iterable testIAMPermissions(Iterable permissions) { + return dbClient.testBackupIAMPermissions(instance(), backup(), permissions); + } + + public Builder toBuilder() { + return new Builder(this); + } + + private String instance() { + return getInstanceId().getInstance(); + } + + private String backup() { + return getId().getBackup(); + } + + private String sourceDatabase() { + return getDatabase().getDatabase(); + } + + static Backup fromProto( + com.google.spanner.admin.database.v1.Backup proto, DatabaseAdminClient client) { + checkArgument(!proto.getName().isEmpty(), "Missing expected 'name' field"); + checkArgument(!proto.getDatabase().isEmpty(), "Missing expected 'database' field"); + return new Backup.Builder(client, BackupId.of(proto.getName())) + .setState(fromProtoState(proto.getState())) + .setSize(proto.getSizeBytes()) + .setExpireTime(Timestamp.fromProto(proto.getExpireTime())) + .setVersionTime(Timestamp.fromProto(proto.getVersionTime())) + .setDatabase(DatabaseId.of(proto.getDatabase())) + .setEncryptionInfo(EncryptionInfo.fromProtoOrNull(proto.getEncryptionInfo())) + .setProto(proto) + .setMaxExpireTime(Timestamp.fromProto(proto.getMaxExpireTime())) + .addAllReferencingBackups(proto.getReferencingBackupsList()) + .build(); + } + + static BackupInfo.State fromProtoState( + com.google.spanner.admin.database.v1.Backup.State protoState) { + switch (protoState) { + case STATE_UNSPECIFIED: + return BackupInfo.State.UNSPECIFIED; + case CREATING: + return BackupInfo.State.CREATING; + case READY: + return BackupInfo.State.READY; + default: + throw new IllegalArgumentException("Unrecognized state " + protoState); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupId.java new file mode 100644 index 000000000000..754ad02be875 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupId.java @@ -0,0 +1,108 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.Preconditions; +import java.util.Map; +import java.util.Objects; + +/** Represents an id of a Cloud Spanner backup resource. */ +public final class BackupId { + private static final PathTemplate NAME_TEMPLATE = + PathTemplate.create("projects/{project}/instances/{instance}/backups/{backup}"); + + private final InstanceId instanceId; + private final String backup; + + BackupId(InstanceId instanceId, String backup) { + this.instanceId = Preconditions.checkNotNull(instanceId); + this.backup = Preconditions.checkNotNull(backup); + } + + /** Returns the instance id for this backup. */ + public InstanceId getInstanceId() { + return instanceId; + } + + /** Returns the backup id. */ + public String getBackup() { + return backup; + } + + /** Returns the name of this backup. */ + public String getName() { + return String.format( + "projects/%s/instances/%s/backups/%s", + instanceId.getProject(), instanceId.getInstance(), backup); + } + + @Override + public int hashCode() { + return Objects.hash(instanceId, backup); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BackupId that = (BackupId) o; + return that.instanceId.equals(instanceId) && that.backup.equals(backup); + } + + @Override + public String toString() { + return getName(); + } + + /** + * Creates a {@code BackupId} from the name of the backup. + * + * @param name the backup name of the form {@code + * projects/PROJECT_ID/instances/INSTANCE_ID/backups/BACKUP_ID} + * @throws IllegalArgumentException if {@code name} does not conform to the expected pattern + */ + static BackupId of(String name) { + Preconditions.checkNotNull(name); + Map parts = NAME_TEMPLATE.match(name); + Preconditions.checkArgument( + parts != null, "Name should conform to pattern %s: %s", NAME_TEMPLATE, name); + return of(parts.get("project"), parts.get("instance"), parts.get("backup")); + } + + /** + * Creates a {@code BackupId} given project, instance and backup IDs. The backup id must conform + * to the regular expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 60 characters in + * length. + */ + public static BackupId of(String project, String instance, String backup) { + return new BackupId(new InstanceId(project, instance), backup); + } + + /** + * Creates a {@code BackupId} given the instance identity and backup id. The backup id must + * conform to the regular expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 60 characters + * in length. + */ + public static BackupId of(InstanceId instanceId, String backup) { + return new BackupId(instanceId, backup); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupInfo.java new file mode 100644 index 000000000000..1349efeee2ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BackupInfo.java @@ -0,0 +1,356 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.client.util.Preconditions; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.encryption.BackupEncryptionConfig; +import com.google.cloud.spanner.encryption.EncryptionInfo; +import com.google.spanner.admin.database.v1.Database; +import java.util.List; +import java.util.Objects; +import javax.annotation.Nullable; + +/** Represents a Cloud Spanner database backup. */ +public class BackupInfo { + public abstract static class Builder { + abstract Builder setState(State state); + + abstract Builder setSize(long size); + + /** + * Returned when retrieving a backup. + * + *

The encryption information for the backup. If the encryption key protecting this resource + * is customer managed, then kms_key_version will be filled. + */ + abstract Builder setEncryptionInfo(EncryptionInfo encryptionInfo); + + abstract Builder setProto(com.google.spanner.admin.database.v1.Backup proto); + + /** + * Optional for creating a new backup. + * + *

The encryption configuration to be used for the backup. The possible configurations are + * {@link com.google.cloud.spanner.encryption.CustomerManagedEncryption}, {@link + * com.google.cloud.spanner.encryption.GoogleDefaultEncryption} and {@link + * com.google.cloud.spanner.encryption.UseDatabaseEncryption}. + * + *

If no encryption config is given the backup will be created with the same encryption as + * set by the database ({@link com.google.cloud.spanner.encryption.UseDatabaseEncryption}). + */ + public abstract Builder setEncryptionConfig(BackupEncryptionConfig encryptionConfig); + + /** + * Required for creating a new backup. + * + *

Sets the expiration time of the backup. The expiration time of the backup, with + * microseconds granularity that must be at least 6 hours and at most 366 days from the time the + * request is received. Once the expireTime has passed, Cloud Spanner will delete the backup and + * free the resources used by the backup. + */ + public abstract Builder setExpireTime(Timestamp expireTime); + + /** + * Optional for creating a new backup. + * + *

Specifies the timestamp to have an externally consistent copy of the database. If no + * version time is specified, it will be automatically set to the backup create time. + * + *

The version time can be as far in the past as specified by the database earliest version + * time (see {@link Database#getEarliestVersionTime()}). + */ + public abstract Builder setVersionTime(Timestamp versionTime); + + /** + * Required for creating a new backup. + * + *

Sets the source database to use for creating the backup. + */ + public abstract Builder setDatabase(DatabaseId database); + + /** Builds the backup from this builder. */ + public abstract Backup build(); + + /** + * Output Only. + * + *

Returns the max allowed expiration time of the backup, with microseconds granularity. + */ + protected Builder setMaxExpireTime(Timestamp maxExpireTime) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Output Only. + * + *

Returns the names of the destination backups being created by copying this source backup. + */ + protected Builder addAllReferencingBackups(List referencingBackups) { + throw new UnsupportedOperationException("Unimplemented"); + } + } + + abstract static class BuilderImpl extends Builder { + protected final BackupId id; + private State state = State.UNSPECIFIED; + private Timestamp expireTime; + private Timestamp versionTime; + private DatabaseId database; + private long size; + private BackupEncryptionConfig encryptionConfig; + private EncryptionInfo encryptionInfo; + private com.google.spanner.admin.database.v1.Backup proto; + private Timestamp maxExpireTime; + private List referencingBackups; + + BuilderImpl(BackupId id) { + this.id = Preconditions.checkNotNull(id); + } + + BuilderImpl(BackupInfo other) { + this.id = other.id; + this.state = other.state; + this.expireTime = other.expireTime; + this.versionTime = other.versionTime; + this.database = other.database; + this.size = other.size; + this.encryptionConfig = other.encryptionConfig; + this.encryptionInfo = other.encryptionInfo; + this.proto = other.proto; + this.maxExpireTime = other.maxExpireTime; + this.referencingBackups = other.referencingBackups; + } + + @Override + Builder setState(State state) { + this.state = Preconditions.checkNotNull(state); + return this; + } + + @Override + public Builder setExpireTime(Timestamp expireTime) { + this.expireTime = Preconditions.checkNotNull(expireTime); + return this; + } + + @Override + public Builder setVersionTime(Timestamp versionTime) { + this.versionTime = versionTime; + return this; + } + + @Override + public Builder setDatabase(DatabaseId database) { + this.database = Preconditions.checkNotNull(database); + return this; + } + + @Override + public Builder setEncryptionConfig(BackupEncryptionConfig encryptionConfig) { + this.encryptionConfig = encryptionConfig; + return this; + } + + @Override + Builder setSize(long size) { + this.size = size; + return this; + } + + @Override + Builder setEncryptionInfo(EncryptionInfo encryptionInfo) { + this.encryptionInfo = encryptionInfo; + return this; + } + + @Override + Builder setProto(@Nullable com.google.spanner.admin.database.v1.Backup proto) { + this.proto = proto; + return this; + } + + @Override + protected Builder setMaxExpireTime(Timestamp maxExpireTime) { + this.maxExpireTime = Preconditions.checkNotNull(maxExpireTime); + return this; + } + + @Override + protected Builder addAllReferencingBackups(List referencingBackups) { + this.referencingBackups = Preconditions.checkNotNull(referencingBackups); + return this; + } + } + + /** State of the backup. */ + public enum State { + // Not specified. + UNSPECIFIED, + // The backup is still being created and is not ready to use. + CREATING, + // The backup is fully created and ready to use. + READY, + } + + private final BackupId id; + private final State state; + private final Timestamp expireTime; + private final Timestamp versionTime; + private final DatabaseId database; + private final long size; + private final BackupEncryptionConfig encryptionConfig; + private final EncryptionInfo encryptionInfo; + private final com.google.spanner.admin.database.v1.Backup proto; + private final Timestamp maxExpireTime; + private final List referencingBackups; + + BackupInfo(BuilderImpl builder) { + this.id = builder.id; + this.state = builder.state; + this.size = builder.size; + this.encryptionConfig = builder.encryptionConfig; + this.encryptionInfo = builder.encryptionInfo; + this.expireTime = builder.expireTime; + this.versionTime = builder.versionTime; + this.database = builder.database; + this.proto = builder.proto; + this.maxExpireTime = builder.maxExpireTime; + this.referencingBackups = builder.referencingBackups; + } + + /** Returns the backup id. */ + public BackupId getId() { + return id; + } + + /** Returns the id of the instance that the backup belongs to. */ + public InstanceId getInstanceId() { + return id.getInstanceId(); + } + + /** Returns the state of the backup. */ + public State getState() { + return state; + } + + /** Returns the size of the backup in bytes. */ + public long getSize() { + return size; + } + + /** + * Returns the {@link BackupEncryptionConfig} to encrypt the backup during its creation. Returns + * null if no customer-managed encryption key should be used. + */ + public BackupEncryptionConfig getEncryptionConfig() { + return encryptionConfig; + } + + /** + * Returns the {@link EncryptionInfo} of the backup if the backup is encrypted, or null + * if this backup is not encrypted. + */ + public EncryptionInfo getEncryptionInfo() { + return encryptionInfo; + } + + /** Returns the expire time of the backup. */ + public Timestamp getExpireTime() { + return expireTime; + } + + /** Returns the version time of the backup. */ + public Timestamp getVersionTime() { + return versionTime; + } + + /** Returns the id of the database that was used to create the backup. */ + public DatabaseId getDatabase() { + return database; + } + + /** Returns the raw proto instance that was used to construct this {@link Backup}. */ + public @Nullable com.google.spanner.admin.database.v1.Backup getProto() { + return proto; + } + + /** Returns the max expire time of this {@link Backup}. */ + public Timestamp getMaxExpireTime() { + return maxExpireTime; + } + + /** + * Returns the names of the destination backups being created by copying this source backup {@link + * Backup}. + */ + public List getReferencingBackups() { + return referencingBackups; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BackupInfo that = (BackupInfo) o; + return id.equals(that.id) + && state == that.state + && size == that.size + && Objects.equals(encryptionConfig, that.encryptionConfig) + && Objects.equals(encryptionInfo, that.encryptionInfo) + && Objects.equals(expireTime, that.expireTime) + && Objects.equals(versionTime, that.versionTime) + && Objects.equals(database, that.database) + && Objects.equals(maxExpireTime, that.maxExpireTime) + && Objects.equals(referencingBackups, that.referencingBackups); + } + + @Override + public int hashCode() { + return Objects.hash( + id, + state, + size, + encryptionConfig, + encryptionInfo, + expireTime, + versionTime, + database, + maxExpireTime, + referencingBackups); + } + + @Override + public String toString() { + return String.format( + "Backup[%s, %s, %d, %s, %s, %s, %s, %s, %s, %s]", + id.getName(), + state, + size, + encryptionConfig, + encryptionInfo, + expireTime, + versionTime, + database, + maxExpireTime, + referencingBackups); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClient.java new file mode 100644 index 000000000000..2d12179bc913 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClient.java @@ -0,0 +1,72 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** + * Interface for the Batch Client that is used to read data from a Cloud Spanner database. An + * instance of this is tied to a specific database. + * + *

{@code BatchClient} is useful when one wants to read or query a large amount of data from + * Cloud Spanner across multiple processes, even across different machines. It allows to create + * partitions of Cloud Spanner database and then read or query over each partition independently yet + * at the same snapshot. + */ +public interface BatchClient { + + /** + * Returns a {@link BatchReadOnlyTransaction} context in which multiple reads and/or queries can + * be performed. All reads/queries will use the same timestamp, and the timestamp can be inspected + * after this transaction is created successfully. This is a blocking method since it waits to + * finish the rpcs. + * + *

Note that the bounded staleness modes, {@link TimestampBound.Mode#MIN_READ_TIMESTAMP} and + * {@link TimestampBound.Mode#MAX_STALENESS}, are not supported for {@link + * BatchReadOnlyTransaction}. + * + * @param bound the timestamp bound at which to perform the read + * + *

{@code
+   * BatchReadOnlyTransaction txn = batchClient.batchReadOnlyTransaction(TimestampBound.strong());
+   * }
+ * + */ + BatchReadOnlyTransaction batchReadOnlyTransaction(TimestampBound bound); + + /** + * Returns a {@link BatchReadOnlyTransaction} context in which multiple reads and/or queries can + * be performed. This is a non-blocking method. All reads/queries will use the same timestamp, and + * the timestamp can be inspected after this transaction is created successfully. + * + *

This method is useful to recreate a BatchReadOnlyTransaction object from an existing + * batchTransactionId. For example one might send the transaction id to a different process or + * machine and recreate the transaction object there. + * + * @param batchTransactionId to re-initialize the transaction, re-using the timestamp for + * successive read/query. + */ + BatchReadOnlyTransaction batchReadOnlyTransaction(BatchTransactionId batchTransactionId); + + /** + * Returns the {@link DatabaseRole} used by the client connection. The database role that is used + * determines the access permissions that a connection has. This can for example be used to create + * connections that are only permitted to access certain tables. + */ + default String getDatabaseRole() { + throw new UnsupportedOperationException("method should be overwritten"); + } + ; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java new file mode 100644 index 000000000000..5cbe01aa7117 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java @@ -0,0 +1,300 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.spanner.AbstractReadContext.MultiUseReadOnlyTransaction; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Struct; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.TransactionSelector; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Default implementation for Batch Client interface. */ +public class BatchClientImpl implements BatchClient { + private final SessionClient sessionClient; + + /** Lock to protect the multiplexed session. */ + private final ReentrantLock multiplexedSessionLock = new ReentrantLock(); + + /** The duration before we try to replace the multiplexed session. The default is 7 days. */ + private final Duration sessionExpirationDuration; + + /** The expiration date/time of the current multiplexed session. */ + @GuardedBy("multiplexedSessionLock") + private final AtomicReference expirationDate; + + @GuardedBy("multiplexedSessionLock") + private final AtomicReference multiplexedSessionReference; + + BatchClientImpl(SessionClient sessionClient) { + this.sessionClient = checkNotNull(sessionClient); + this.sessionExpirationDuration = + Duration.ofMillis( + sessionClient + .getSpanner() + .getOptions() + .getSessionPoolOptions() + .getMultiplexedSessionMaintenanceDuration() + .toMillis()); + // Initialize the expiration date to the start of time to avoid unnecessary null checks. + // This also ensured that a new session is created on first request. + this.expirationDate = new AtomicReference<>(Instant.MIN); + this.multiplexedSessionReference = new AtomicReference<>(); + } + + @Override + @Nullable + public String getDatabaseRole() { + return this.sessionClient.getSpanner().getOptions().getDatabaseRole(); + } + + @Override + public BatchReadOnlyTransaction batchReadOnlyTransaction(TimestampBound bound) { + SessionImpl session = getMultiplexedSession(); + return new BatchReadOnlyTransactionImpl( + MultiUseReadOnlyTransaction.newBuilder() + .setSession(session) + .setCancelQueryWhenClientIsClosed(true) + .setRpc(sessionClient.getSpanner().getRpc()) + .setTimestampBound(bound) + .setDefaultQueryOptions( + sessionClient.getSpanner().getDefaultQueryOptions(sessionClient.getDatabaseId())) + .setExecutorProvider(sessionClient.getSpanner().getAsyncExecutorProvider()) + .setDefaultPrefetchChunks(sessionClient.getSpanner().getDefaultPrefetchChunks()) + .setDefaultDecodeMode(sessionClient.getSpanner().getDefaultDecodeMode()) + .setDefaultDirectedReadOptions( + sessionClient.getSpanner().getOptions().getDirectedReadOptions()) + .setSpan(sessionClient.getSpanner().getTracer().getCurrentSpan()) + .setTracer(sessionClient.getSpanner().getTracer()), + checkNotNull(bound)); + } + + @Override + public BatchReadOnlyTransaction batchReadOnlyTransaction(BatchTransactionId batchTransactionId) { + SessionImpl session = + sessionClient.sessionWithId(checkNotNull(batchTransactionId).getSessionId()); + return new BatchReadOnlyTransactionImpl( + MultiUseReadOnlyTransaction.newBuilder() + .setSession(session) + .setCancelQueryWhenClientIsClosed(true) + .setRpc(sessionClient.getSpanner().getRpc()) + .setTransactionId(batchTransactionId.getTransactionId()) + .setTimestamp(batchTransactionId.getTimestamp()) + .setDefaultQueryOptions( + sessionClient.getSpanner().getDefaultQueryOptions(sessionClient.getDatabaseId())) + .setExecutorProvider(sessionClient.getSpanner().getAsyncExecutorProvider()) + .setDefaultPrefetchChunks(sessionClient.getSpanner().getDefaultPrefetchChunks()) + .setDefaultDecodeMode(sessionClient.getSpanner().getDefaultDecodeMode()) + .setDefaultDirectedReadOptions( + sessionClient.getSpanner().getOptions().getDirectedReadOptions()) + .setSpan(sessionClient.getSpanner().getTracer().getCurrentSpan()) + .setTracer(sessionClient.getSpanner().getTracer()), + batchTransactionId); + } + + private SessionImpl getMultiplexedSession() { + this.multiplexedSessionLock.lock(); + try { + if (Clock.systemUTC().instant().isAfter(this.expirationDate.get()) + || this.multiplexedSessionReference.get() == null) { + this.multiplexedSessionReference.set(this.sessionClient.createMultiplexedSession()); + this.expirationDate.set(Clock.systemUTC().instant().plus(this.sessionExpirationDuration)); + } + return this.multiplexedSessionReference.get(); + } finally { + this.multiplexedSessionLock.unlock(); + } + } + + private static class BatchReadOnlyTransactionImpl extends MultiUseReadOnlyTransaction + implements BatchReadOnlyTransaction { + private final String sessionName; + private final Map options; + + BatchReadOnlyTransactionImpl( + MultiUseReadOnlyTransaction.Builder builder, TimestampBound bound) { + super(builder.setTimestampBound(bound)); + this.sessionName = session.getName(); + this.options = session.getOptions(); + initTransaction(); + } + + BatchReadOnlyTransactionImpl( + MultiUseReadOnlyTransaction.Builder builder, BatchTransactionId batchTransactionId) { + super(builder.setTransactionId(batchTransactionId.getTransactionId())); + this.sessionName = session.getName(); + this.options = session.getOptions(); + } + + @Override + public BatchTransactionId getBatchTransactionId() { + return new BatchTransactionId(sessionName, getTransactionId(), getReadTimestamp()); + } + + @Override + public List partitionRead( + PartitionOptions partitionOptions, + String table, + KeySet keys, + Iterable columns, + ReadOption... options) + throws SpannerException { + return partitionReadUsingIndex( + partitionOptions, table, null /*index*/, keys, columns, options); + } + + @Override + public List partitionReadUsingIndex( + PartitionOptions partitionOptions, + String table, + String index, + KeySet keys, + Iterable columns, + ReadOption... option) + throws SpannerException { + Options readOptions = Options.fromReadOptions(option); + Preconditions.checkArgument( + !readOptions.hasLimit(), + "Limit option not supported by partitionRead|partitionReadUsingIndex"); + final PartitionReadRequest.Builder builder = + PartitionReadRequest.newBuilder() + .setSession(sessionName) + .setTable(checkNotNull(table)) + .addAllColumns(columns); + keys.appendToProto(builder.getKeySetBuilder()); + if (index != null) { + builder.setIndex(index); + } + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + com.google.spanner.v1.PartitionOptions.Builder pbuilder = + com.google.spanner.v1.PartitionOptions.newBuilder(); + if (partitionOptions != null) { + partitionOptions.appendToProto(pbuilder); + } + builder.setPartitionOptions(pbuilder.build()); + + final PartitionReadRequest request = builder.build(); + PartitionResponse response = rpc.partitionRead(request, options); + ImmutableList.Builder partitions = ImmutableList.builder(); + for (com.google.spanner.v1.Partition p : response.getPartitionsList()) { + Partition partition = + Partition.createReadPartition( + p.getPartitionToken(), partitionOptions, table, index, keys, columns, readOptions); + partitions.add(partition); + } + return partitions.build(); + } + + @Override + public List partitionQuery( + PartitionOptions partitionOptions, Statement statement, QueryOption... option) + throws SpannerException { + return partitionQuery(partitionOptions, statement, false, option); + } + + private List partitionQuery( + PartitionOptions partitionOptions, + Statement statement, + boolean isFallback, + QueryOption... option) + throws SpannerException { + Options queryOptions = Options.fromQueryOptions(option); + final PartitionQueryRequest.Builder builder = + PartitionQueryRequest.newBuilder().setSession(sessionName).setSql(statement.getSql()); + Map stmtParameters = statement.getParameters(); + if (!stmtParameters.isEmpty()) { + Struct.Builder paramsBuilder = builder.getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); + if (param.getValue() != null && param.getValue().getType() != null) { + builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + } + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + com.google.spanner.v1.PartitionOptions.Builder pbuilder = + com.google.spanner.v1.PartitionOptions.newBuilder(); + if (partitionOptions != null) { + partitionOptions.appendToProto(pbuilder); + } + builder.setPartitionOptions(pbuilder.build()); + + final PartitionQueryRequest request = builder.build(); + PartitionResponse response = rpc.partitionQuery(request, options); + ImmutableList.Builder partitions = ImmutableList.builder(); + for (com.google.spanner.v1.Partition p : response.getPartitionsList()) { + Partition partition = + Partition.createQueryPartition( + p.getPartitionToken(), partitionOptions, statement, queryOptions); + partitions.add(partition); + } + return partitions.build(); + } + + @Override + public ResultSet execute(Partition partition) throws SpannerException { + if (partition.getStatement() != null) { + return executeQueryInternalWithOptions( + partition.getStatement(), + QueryMode.NORMAL, + partition.getQueryOptions(), + partition.getPartitionToken()); + } + return readInternalWithOptions( + partition.getTable(), + partition.getIndex(), + partition.getKeys(), + partition.getColumns(), + partition.getReadOptions(), + partition.getPartitionToken()); + } + + /** + * Closes the session as part of the cleanup. It is the responsibility of the caller to make a + * call to this method once the transaction completes execution across all the channels (which + * is understandably hard to identify). It is okay if the caller does not call the method + * because the backend will anyways clean up the unused session. + */ + @Override + public void cleanup() { + session.close(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchReadOnlyTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchReadOnlyTransaction.java new file mode 100644 index 000000000000..03b08a11730e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchReadOnlyTransaction.java @@ -0,0 +1,208 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import java.util.List; + +/** + * {@code BatchReadOnlyTransaction} can be configured to read at timestamps in the past and allows + * for exporting arbitrarily large amounts of data from Cloud Spanner databases. This is a read only + * transaction which additionally allows to partition a read or query request. Read/query request + * can then be executed independently over each partition while observing the same snapshot of the + * database. BatchReadOnlyTransaction can also be shared across multiple processes/machines by + * passing around the BatchTransactionId and then recreating the transaction using {@link + * BatchClient#batchReadOnlyTransaction(BatchTransactionId)}. + * + *

Unlike locking read-write transactions, BatchReadOnlyTransaction never abort. They can fail if + * the chosen read timestamp is garbage collected; however any read or query activity within an hour + * on the transaction avoids garbage collection and most applications do not need to worry about + * this in practice. + * + *

To execute a BatchReadOnlyTransaction, specify a {@link TimestampBound}, which tells Cloud + * Spanner how to choose a read timestamp. + */ +public interface BatchReadOnlyTransaction extends ReadOnlyTransaction { + + /** + * Returns a list of {@link Partition} to read zero or more rows from a database. + * + *

These partitions can be executed across multiple processes, even across different machines. + * The partition size and count hints can be configured using {@link PartitionOptions}. + * + * @param partitionOptions configuration for size and count of partitions returned + * @param table the name of the table to read + * @param keys the keys and ranges of rows to read. Regardless of ordering in {@code keys}, rows + * are returned in their natural key order. + * @param columns the columns to read + * @param options the options to configure the read, supported values are {@link + * Options#prefetchChunks()} + * + *

{@code
+   * final BatchReadOnlyTransaction txn =
+   *     batchClient.batchReadOnlyTransaction(TimestampBound.strong());
+   * List partitions =
+   *     txn.partitionRead(
+   *         PartitionOptions.getDefaultInstance(),
+   *         "Singers",
+   *         KeySet.all(),
+   *         Arrays.asList("SingerId", "FirstName", "LastName"));
+   * for (final Partition p : partitions) {
+   *   try (ResultSet results = txn.execute(p)) {
+   *     while (results.next()) {
+   *       long singerId = results.getLong(0);
+   *       String firstName = results.getString(1);
+   *       String lastName = results.getString(2);
+   *       System.out.println("[" + singerId + "] " + firstName + " " + lastName);
+   *     }
+   *   }
+   * }
+   * }
+ * + */ + List partitionRead( + PartitionOptions partitionOptions, + String table, + KeySet keys, + Iterable columns, + ReadOption... options) + throws SpannerException; + + /** + * Returns a list of {@link Partition} to read zero or more rows from a database using an index. + * + *

These partitions can be executed across multiple processes, even across different machines. + * The partition size and count can be configured using {@link PartitionOptions}. Though it may + * not necessarily be honored depending on the parameters in the request. + * + * @param partitionOptions configuration for size and count of partitions returned + * @param table the name of the table to read + * @param index the name of the index on {@code table} to use + * @param keys the keys and ranges of index rows to read. Regardless of ordering in {@code keys}, + * rows are returned in the natural key order of the index. + * @param columns the columns to read + * @param options the options to configure the read + * + *

{@code
+   * final BatchReadOnlyTransaction txn =
+   *     batchClient.batchReadOnlyTransaction(TimestampBound.strong());
+   * List partitions =
+   *     txn.partitionReadUsingIndex(
+   *         PartitionOptions.getDefaultInstance(),
+   *         "Singers",
+   *         "SingerId",
+   *         KeySet.all(),
+   *         Arrays.asList("SingerId", "FirstName", "LastName"));
+   *
+   * for (Partition p : partitions) {
+   *   try (ResultSet results = txn.execute(p)) {
+   *     while (results.next()) {
+   *       long singerId = results.getLong(0);
+   *       String firstName = results.getString(1);
+   *       String lastName = results.getString(2);
+   *       System.out.println("[" + singerId + "] " + firstName + " " + lastName);
+   *     }
+   *   }
+   * }
+   * }
+ * + */ + List partitionReadUsingIndex( + PartitionOptions partitionOptions, + String table, + String index, + KeySet keys, + Iterable columns, + ReadOption... options) + throws SpannerException; + + /** + * Returns a list of {@link Partition} to execute a query against the database. + * + *

These partitions can be executed across multiple processes, even across different machines. + * The partition size and count can be configured using {@link PartitionOptions}. Though it may + * not necessarily be honored depending on the query and options in the request. + * + * @param partitionOptions configuration for size and count of partitions returned + * @param statement the query statement to execute + * @param options the options to configure the query + * + *

{@code
+   * final BatchReadOnlyTransaction txn =
+   *     batchClient.batchReadOnlyTransaction(TimestampBound.strong());
+   * List partitions = txn.partitionQuery(PartitionOptions.getDefaultInstance(),
+   *     Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"));
+   *
+   * for (final Partition p : partitions) {
+   *   try (ResultSet results = txn.execute(p)) {
+   *     while (results.next()) {
+   *       long singerId = results.getLong(0);
+   *       String firstName = results.getString(1);
+   *       String lastName = results.getString(2);
+   *       System.out.println("[" + singerId + "] " + firstName + " " + lastName);
+   *     }
+   *   }
+   * }
+   * }
+ * + */ + List partitionQuery( + PartitionOptions partitionOptions, Statement statement, QueryOption... options) + throws SpannerException; + + /** + * Execute the partition to return {@link ResultSet}. The result returned could be zero or more + * rows. The row metadata may be absent if no rows are returned. + * + * + *
{@code
+   * final BatchReadOnlyTransaction txn =
+   *     batchClient.batchReadOnlyTransaction(TimestampBound.strong());
+   * List partitions = txn.partitionQuery(PartitionOptions.getDefaultInstance(),
+   *     Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"));
+   *
+   * for (final Partition p : partitions) {
+   *   try (ResultSet results = txn.execute(p)) {
+   *     while (results.next()) {
+   *       long singerId = results.getLong(0);
+   *       String firstName = results.getString(1);
+   *       String lastName = results.getString(2);
+   *       System.out.println("[" + singerId + "] " + firstName + " " + lastName);
+   *     }
+   *   }
+   * }
+   * }
+ * + * + */ + ResultSet execute(Partition partition) throws SpannerException; + + /** + * Returns a {@link BatchTransactionId} to be re-used across several machines/processes. This + * BatchTransactionId guarantees the subsequent read/query to be executed at the same timestamp. + */ + BatchTransactionId getBatchTransactionId(); + + /** + * Closes the session as part of the cleanup. It is the responsibility of the caller to make a + * call to this method once the transaction completes execution across all the channels (which is + * understandably hard to identify). It is okay if the caller does not call the method because the + * backend will anyways clean up the unused session. + */ + default void cleanup() {} +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchTransactionId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchTransactionId.java new file mode 100644 index 000000000000..0de705c12ea9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchTransactionId.java @@ -0,0 +1,83 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import java.io.Serializable; +import java.util.Objects; + +/** + * {@code BatchTransactionId} is unique identifier for {@link BatchReadOnlyTransaction}. It can be + * used to re-initialize a BatchReadOnlyTransaction on different machine or process by calling + * {@link BatchClient#batchReadOnlyTransaction(BatchTransactionId)}. + */ +public class BatchTransactionId implements Serializable { + + private final ByteString transactionId; + private final String sessionId; + private final Timestamp timestamp; + private static final long serialVersionUID = 8067099123096783939L; + + @VisibleForTesting + BatchTransactionId(String sessionId, ByteString transactionId, Timestamp timestamp) { + this.transactionId = Preconditions.checkNotNull(transactionId); + this.sessionId = Preconditions.checkNotNull(sessionId); + this.timestamp = Preconditions.checkNotNull(timestamp); + } + + ByteString getTransactionId() { + return transactionId; + } + + String getSessionId() { + return sessionId; + } + + Timestamp getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + return String.format( + "transactionId: %s, sessionId: %s, timestamp: %s", + transactionId.toStringUtf8(), sessionId, timestamp); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BatchTransactionId that = (BatchTransactionId) o; + return Objects.equals(getTransactionId(), that.getTransactionId()) + && Objects.equals(getTimestamp(), that.getTimestamp()) + && Objects.equals(getSessionId(), that.getSessionId()); + } + + @Override + public int hashCode() { + return Objects.hash(getTransactionId(), getTimestamp(), getSessionId()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java new file mode 100644 index 000000000000..56fd6d28eb8f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java @@ -0,0 +1,284 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_HEADER_NAME; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.OpenTelemetryMetricsRecorder; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.View; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +@InternalApi +public class BuiltInMetricsConstant { + + public static final String METER_NAME = "spanner.googleapis.com/internal/client"; + public static final String GAX_METER_NAME = OpenTelemetryMetricsRecorder.GAX_METER_NAME; + static final String SPANNER_METER_NAME = "spanner-java"; + static final String GRPC_METER_NAME = "grpc-java"; + static final String GFE_LATENCIES_NAME = "gfe_latencies"; + static final String AFE_LATENCIES_NAME = "afe_latencies"; + static final String GFE_CONNECTIVITY_ERROR_NAME = "gfe_connectivity_error_count"; + static final String AFE_CONNECTIVITY_ERROR_NAME = "afe_connectivity_error_count"; + static final String OPERATION_LATENCIES_NAME = "operation_latencies"; + static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; + static final String OPERATION_LATENCY_NAME = "operation_latency"; + static final String ATTEMPT_LATENCY_NAME = "attempt_latency"; + static final String OPERATION_COUNT_NAME = "operation_count"; + static final String ATTEMPT_COUNT_NAME = "attempt_count"; + + public static final Set SPANNER_METRICS = + ImmutableSet.of( + OPERATION_LATENCIES_NAME, + ATTEMPT_LATENCIES_NAME, + OPERATION_COUNT_NAME, + ATTEMPT_COUNT_NAME, + GFE_LATENCIES_NAME, + AFE_LATENCIES_NAME, + GFE_CONNECTIVITY_ERROR_NAME, + AFE_CONNECTIVITY_ERROR_NAME) + .stream() + .map(m -> METER_NAME + '/' + m) + .collect(Collectors.toSet()); + + // The following attributes are optional and need to be enabled explicitly. + public static final String GRPC_LB_BACKEND_SERVICE_ATTRIBUTE = "grpc.lb.backend_service"; + public static final String GRPC_LB_LOCALITY_ATTRIBUTE = "grpc.lb.locality"; + public static final String GRPC_DISCONNECT_ERROR_ATTRIBUTE = "grpc.disconnect_error"; + + static final Set GRPC_LB_RLS_ATTRIBUTES = + ImmutableSet.of("grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"); + static final Set GRPC_CLIENT_ATTEMPT_STARTED_ATTRIBUTES = + ImmutableSet.of("grpc.method", "grpc.target"); + static final Set GRPC_SUBCHANNEL_DEFAULT_ATTRIBUTES = + ImmutableSet.of("grpc.target", GRPC_LB_BACKEND_SERVICE_ATTRIBUTE, GRPC_LB_LOCALITY_ATTRIBUTE); + static final Set GRPC_SUBCHANNEL_DISCONNECTION_ATTRIBUTES = + ImmutableSet.of( + "grpc.target", + GRPC_LB_BACKEND_SERVICE_ATTRIBUTE, + GRPC_LB_LOCALITY_ATTRIBUTE, + GRPC_DISCONNECT_ERROR_ATTRIBUTE); + static final Set GRPC_XDS_CLIENT_RESOURCE_UPDATE_ATTRIBUTES = + ImmutableSet.of("grpc.xds.resource_type"); + + // Additional gRPC attributes to enable. + static final Map> GRPC_METRIC_ADDITIONAL_ATTRIBUTES = + ImmutableMap.>builder() + .put("grpc.client.attempt.started", GRPC_CLIENT_ATTEMPT_STARTED_ATTRIBUTES) + .put("grpc.subchannel.open_connections", GRPC_SUBCHANNEL_DEFAULT_ATTRIBUTES) + .put("grpc.subchannel.disconnections", GRPC_SUBCHANNEL_DISCONNECTION_ATTRIBUTES) + .put("grpc.subchannel.connection_attempts_succeeded", GRPC_SUBCHANNEL_DEFAULT_ATTRIBUTES) + .put("grpc.subchannel.connection_attempts_failed", GRPC_SUBCHANNEL_DEFAULT_ATTRIBUTES) + .put("grpc.lb.rls.default_target_picks", GRPC_LB_RLS_ATTRIBUTES) + .put("grpc.lb.rls.target_picks", GRPC_LB_RLS_ATTRIBUTES) + .put( + "grpc.xds_client.resource_updates_invalid", + GRPC_XDS_CLIENT_RESOURCE_UPDATE_ATTRIBUTES) + .put("grpc.xds_client.resource_updates_valid", GRPC_XDS_CLIENT_RESOURCE_UPDATE_ATTRIBUTES) + .build(); + + static final Collection GRPC_METRICS_TO_ENABLE = + ImmutableList.of( + "grpc.client.attempt.started", + "grpc.subchannel.open_connections", + "grpc.subchannel.disconnections", + "grpc.subchannel.connection_attempts_succeeded", + "grpc.subchannel.connection_attempts_failed", + "grpc.lb.rls.default_target_picks", + "grpc.lb.rls.target_picks", + "grpc.xds_client.server_failure", + "grpc.xds_client.resource_updates_invalid", + "grpc.xds_client.resource_updates_valid"); + + public static final String SPANNER_RESOURCE_TYPE = "spanner_instance_client"; + + public static final AttributeKey PROJECT_ID_KEY = AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance_id"); + public static final AttributeKey LOCATION_ID_KEY = AttributeKey.stringKey("location"); + public static final AttributeKey INSTANCE_CONFIG_ID_KEY = + AttributeKey.stringKey("instance_config"); + public static final AttributeKey CLIENT_HASH_KEY = AttributeKey.stringKey("client_hash"); + + // These metric labels will be promoted to the spanner monitored resource fields + public static final Set> SPANNER_PROMOTED_RESOURCE_LABELS = + ImmutableSet.of(INSTANCE_ID_KEY); + + public static final AttributeKey DATABASE_KEY = AttributeKey.stringKey("database"); + public static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); + public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); + public static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + public static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + public static final AttributeKey DIRECT_PATH_ENABLED_KEY = + AttributeKey.stringKey("directpath_enabled"); + public static final AttributeKey DIRECT_PATH_USED_KEY = + AttributeKey.stringKey("directpath_used"); + public static final AttributeKey REQUEST_ID_KEY = + AttributeKey.stringKey(REQUEST_ID_HEADER_NAME); + public static Set ALLOWED_EXEMPLARS_ATTRIBUTES = + new HashSet<>(Arrays.asList(REQUEST_ID_HEADER_NAME)); + + // IP address prefixes allocated for DirectPath backends. + public static final String DP_IPV6_PREFIX = "2001:4860:8040"; + public static final String DP_IPV4_PREFIX = "34.126"; + + public static final Set COMMON_ATTRIBUTES = + ImmutableSet.of( + PROJECT_ID_KEY, + INSTANCE_ID_KEY, + LOCATION_ID_KEY, + INSTANCE_CONFIG_ID_KEY, + CLIENT_UID_KEY, + CLIENT_HASH_KEY, + METHOD_KEY, + STATUS_KEY, + DATABASE_KEY, + CLIENT_NAME_KEY, + DIRECT_PATH_ENABLED_KEY, + DIRECT_PATH_USED_KEY); + + static List BUCKET_BOUNDARIES = + ImmutableList.of( + 0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, + 16.0, 17.0, 18.0, 19.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, + 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, + 50000.0, 100000.0, 200000.0, 400000.0, 800000.0, 1600000.0, 3200000.0); + static Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Aggregation.explicitBucketHistogram(BUCKET_BOUNDARIES); + + static final Collection GRPC_METRICS_ENABLED_BY_DEFAULT = + ImmutableList.of( + "grpc.client.attempt.sent_total_compressed_message_size", + "grpc.client.attempt.rcvd_total_compressed_message_size", + "grpc.client.attempt.duration", + "grpc.client.call.duration"); + + static Map getAllViews() { + ImmutableMap.Builder views = ImmutableMap.builder(); + defineView( + views, + BuiltInMetricsConstant.GAX_METER_NAME, + BuiltInMetricsConstant.OPERATION_LATENCY_NAME, + BuiltInMetricsConstant.OPERATION_LATENCIES_NAME, + BuiltInMetricsConstant.AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms"); + defineView( + views, + BuiltInMetricsConstant.GAX_METER_NAME, + BuiltInMetricsConstant.ATTEMPT_LATENCY_NAME, + BuiltInMetricsConstant.ATTEMPT_LATENCIES_NAME, + BuiltInMetricsConstant.AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms"); + defineView( + views, + BuiltInMetricsConstant.GAX_METER_NAME, + BuiltInMetricsConstant.OPERATION_COUNT_NAME, + BuiltInMetricsConstant.OPERATION_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1"); + defineView( + views, + BuiltInMetricsConstant.GAX_METER_NAME, + BuiltInMetricsConstant.ATTEMPT_COUNT_NAME, + BuiltInMetricsConstant.ATTEMPT_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1"); + defineSpannerView(views); + defineGRPCView(views); + return views.build(); + } + + private static void defineView( + ImmutableMap.Builder viewMap, + String meterName, + String metricName, + String metricViewName, + Aggregation aggregation, + InstrumentType type, + String unit) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(BuiltInMetricsConstant.METER_NAME + '/' + metricName) + .setMeterName(meterName) + .setType(type) + .setUnit(unit) + .build(); + Set attributesFilter = + BuiltInMetricsConstant.COMMON_ATTRIBUTES.stream() + .map(AttributeKey::getKey) + .collect(Collectors.toSet()); + View view = + View.builder() + .setName(BuiltInMetricsConstant.METER_NAME + '/' + metricViewName) + .setAggregation(aggregation) + .setAttributeFilter(attributesFilter) + .build(); + viewMap.put(selector, view); + } + + private static void defineSpannerView(ImmutableMap.Builder viewMap) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setMeterName(BuiltInMetricsConstant.SPANNER_METER_NAME) + .build(); + Set attributesFilter = + BuiltInMetricsConstant.COMMON_ATTRIBUTES.stream() + .map(AttributeKey::getKey) + .collect(Collectors.toSet()); + View view = View.builder().setAttributeFilter(attributesFilter).build(); + viewMap.put(selector, view); + } + + private static void defineGRPCView(ImmutableMap.Builder viewMap) { + for (String metric : BuiltInMetricsConstant.GRPC_METRICS_TO_ENABLE) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(metric) + .setMeterName(BuiltInMetricsConstant.GRPC_METER_NAME) + .build(); + Set attributesFilter = + BuiltInMetricsConstant.COMMON_ATTRIBUTES.stream() + .map(AttributeKey::getKey) + .collect(Collectors.toSet()); + attributesFilter.addAll( + GRPC_METRIC_ADDITIONAL_ATTRIBUTES.getOrDefault(metric, ImmutableSet.of())); + + View view = + View.builder() + .setName(BuiltInMetricsConstant.METER_NAME + '/' + metric.replace(".", "/")) + .setAttributeFilter(attributesFilter) + .build(); + viewMap.put(selector, view); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsProvider.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsProvider.java new file mode 100644 index 000000000000..0a51ebfae26f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsProvider.java @@ -0,0 +1,278 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.opentelemetry.detection.GCPPlatformDetector.SupportedPlatform.GOOGLE_KUBERNETES_ENGINE; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_HASH_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_NAME_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_UID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_CONFIG_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.LOCATION_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.auth.Credentials; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.base.Strings; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import io.grpc.ManagedChannelBuilder; +import io.grpc.opentelemetry.GrpcOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.resources.Resource; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Method; +import java.net.HttpURLConnection; +import java.net.InetAddress; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +final class BuiltInMetricsProvider { + + static BuiltInMetricsProvider INSTANCE = new BuiltInMetricsProvider(); + + private static final Logger logger = Logger.getLogger(BuiltInMetricsProvider.class.getName()); + + private static String taskId; + + private static String location; + + private static final String default_location = "global"; + + private OpenTelemetry openTelemetry; + + private BuiltInMetricsProvider() {} + + OpenTelemetry getOrCreateOpenTelemetry( + String projectId, + @Nullable Credentials credentials, + @Nullable String monitoringHost, + String universeDomain) { + try { + if (this.openTelemetry == null) { + SdkMeterProviderBuilder sdkMeterProviderBuilder = SdkMeterProvider.builder(); + BuiltInMetricsView.registerBuiltinMetrics( + SpannerCloudMonitoringExporter.create( + projectId, credentials, monitoringHost, universeDomain), + sdkMeterProviderBuilder); + sdkMeterProviderBuilder.setResource(Resource.create(createResourceAttributes(projectId))); + SdkMeterProvider sdkMeterProvider = sdkMeterProviderBuilder.build(); + this.openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).build(); + Runtime.getRuntime().addShutdownHook(new Thread(sdkMeterProvider::close)); + } + return this.openTelemetry; + } catch (IOException ex) { + logger.log( + Level.WARNING, + "Unable to get OpenTelemetry object for client side metrics, will skip exporting client" + + " side metrics", + ex); + return null; + } + } + + // TODO: Remove when + // https://github.com/GoogleCloudPlatform/opentelemetry-operations-java/issues/421 + // has been fixed. + static boolean quickCheckIsRunningOnGcp() { + int timeout = 5000; + try { + timeout = + Integer.parseInt(System.getProperty("spanner.check_is_running_on_gcp_timeout", "5000")); + } catch (NumberFormatException ignore) { + // ignore + } + try { + URL url = new URL("http://metadata.google.internal/computeMetadata/v1/project/project-id"); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setConnectTimeout(timeout); + connection.setRequestProperty("Metadata-Flavor", "Google"); + if (connection.getResponseCode() == 200 + && ("Google").equals(connection.getHeaderField("Metadata-Flavor"))) { + InputStream input = connection.getInputStream(); + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8))) { + return !Strings.isNullOrEmpty(reader.readLine()); + } + } + } catch (IOException ignore) { + // ignore + } + return false; + } + + void enableGrpcMetrics( + InstantiatingGrpcChannelProvider.Builder channelProviderBuilder, + String projectId, + @Nullable Credentials credentials, + @Nullable String monitoringHost, + String universeDomain) { + GrpcOpenTelemetry grpcOpenTelemetry = + GrpcOpenTelemetry.newBuilder() + .sdk( + this.getOrCreateOpenTelemetry( + projectId, credentials, monitoringHost, universeDomain)) + .enableMetrics(BuiltInMetricsConstant.GRPC_METRICS_TO_ENABLE) + // Disable gRPCs default metrics as they are not needed for Spanner. + .disableMetrics(BuiltInMetricsConstant.GRPC_METRICS_ENABLED_BY_DEFAULT) + .addOptionalLabel(BuiltInMetricsConstant.GRPC_LB_BACKEND_SERVICE_ATTRIBUTE) + .addOptionalLabel(BuiltInMetricsConstant.GRPC_LB_LOCALITY_ATTRIBUTE) + .addOptionalLabel(BuiltInMetricsConstant.GRPC_DISCONNECT_ERROR_ATTRIBUTE) + .build(); + ApiFunction channelConfigurator = + channelProviderBuilder.getChannelConfigurator(); + channelProviderBuilder.setChannelConfigurator( + b -> { + grpcOpenTelemetry.configureChannelBuilder(b); + if (channelConfigurator != null) { + return channelConfigurator.apply(b); + } + return b; + }); + } + + Attributes createResourceAttributes(String projectId) { + AttributesBuilder attributesBuilder = + Attributes.builder() + .put(PROJECT_ID_KEY.getKey(), projectId) + .put(INSTANCE_CONFIG_ID_KEY.getKey(), "unknown") + .put(CLIENT_HASH_KEY.getKey(), generateClientHash(getDefaultTaskValue())) + .put(INSTANCE_ID_KEY.getKey(), "unknown") + .put(LOCATION_ID_KEY.getKey(), detectClientLocation()); + + return attributesBuilder.build(); + } + + Map createClientAttributes() { + Map clientAttributes = new HashMap<>(); + clientAttributes.put( + CLIENT_NAME_KEY.getKey(), "spanner-java/" + GaxProperties.getLibraryVersion(getClass())); + clientAttributes.put(CLIENT_UID_KEY.getKey(), getDefaultTaskValue()); + return clientAttributes; + } + + /** + * Generates a 6-digit zero-padded all lower case hexadecimal representation of hash of the + * accounting group. The hash utilizes the 10 most significant bits of the value returned by + * `Hashing.goodFastHash(64).hashBytes()`, so effectively the returned values are uniformly + * distributed in the range [000000, 0003ff]. + * + *

The primary purpose of this function is to generate a hash value for the `client_hash` + * resource label using `client_uid` metric field. The range of values is chosen to be small + * enough to keep the cardinality of the Resource targets under control. Note: If at later time + * the range needs to be increased, it can be done by increasing the value of `kPrefixLength` to + * up to 24 bits without changing the format of the returned value. + * + * @return Returns a 6-digit zero-padded all lower case hexadecimal representation of hash of the + * accounting group. + */ + static String generateClientHash(String clientUid) { + if (clientUid == null) { + return "000000"; + } + + HashFunction hashFunction = Hashing.goodFastHash(64); + Long hash = hashFunction.hashBytes(clientUid.getBytes()).asLong(); + // Don't change this value without reading above comment + int kPrefixLength = 10; + long shiftedValue = hash >>> (64 - kPrefixLength); + return String.format("%06x", shiftedValue); + } + + static String detectClientLocation() { + if (location == null) { + location = default_location; + if (quickCheckIsRunningOnGcp()) { + GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; + DetectedPlatform detectedPlatform = detector.detectPlatform(); + // All platform except GKE uses "cloud_region" for region attribute. + String region = detectedPlatform.getAttributes().get("cloud_region"); + if (detectedPlatform.getSupportedPlatform() == GOOGLE_KUBERNETES_ENGINE) { + region = detectedPlatform.getAttributes().get(AttributeKeys.GKE_CLUSTER_LOCATION); + } + location = region == null ? location : region; + } + } + return location; + } + + /** + * Generates a unique identifier for the Client_uid metric field. The identifier is composed of a + * UUID, the process ID (PID), and the hostname. + * + *

For Java 9 and later, the PID is obtained using the ProcessHandle API. For Java 8, the PID + * is extracted from ManagementFactory.getRuntimeMXBean().getName(). + * + * @return A unique identifier string in the format UUID@PID@hostname + */ + private static String getDefaultTaskValue() { + if (taskId == null) { + String identifier = UUID.randomUUID().toString(); + String pid = getProcessId(); + + try { + String hostname = InetAddress.getLocalHost().getHostName(); + taskId = identifier + "@" + pid + "@" + hostname; + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + taskId = identifier + "@" + pid + "@localhost"; + } + } + return taskId; + } + + private static String getProcessId() { + try { + // Check if Java 9+ and ProcessHandle class is available + Class processHandleClass = Class.forName("java.lang.ProcessHandle"); + Method currentMethod = processHandleClass.getMethod("current"); + Object processHandleInstance = currentMethod.invoke(null); + Method pidMethod = processHandleClass.getMethod("pid"); + long pid = (long) pidMethod.invoke(processHandleInstance); + return Long.toString(pid); + } catch (Exception e) { + // Fallback to Java 8 method + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + if (jvmName != null && jvmName.contains("@")) { + return jvmName.split("@")[0]; + } else { + return "unknown"; + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsRecorder.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsRecorder.java new file mode 100644 index 000000000000..67e75a1d3837 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsRecorder.java @@ -0,0 +1,128 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.tracing.OpenTelemetryMetricsRecorder; +import com.google.common.base.Preconditions; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import java.util.Map; + +/** + * Implementation for recording built in metrics. + * + *

This class extends the {@link OpenTelemetryMetricsRecorder} which implements the * + * measurements related to the lifecyle of an RPC. + */ +class BuiltInMetricsRecorder extends OpenTelemetryMetricsRecorder { + + private final DoubleHistogram gfeLatencyRecorder; + private final DoubleHistogram afeLatencyRecorder; + private final LongCounter gfeHeaderMissingCountRecorder; + private final LongCounter afeHeaderMissingCountRecorder; + + /** + * Creates the following instruments for the following metrics: + * + *

    + *
  • GFE Latency: Histogram + *
+ * + * @param openTelemetry OpenTelemetry instance + * @param serviceName Service Name + */ + BuiltInMetricsRecorder(OpenTelemetry openTelemetry, String serviceName) { + super(openTelemetry, serviceName); + Meter meter = + openTelemetry + .meterBuilder(BuiltInMetricsConstant.SPANNER_METER_NAME) + .setInstrumentationVersion(GaxProperties.getLibraryVersion(getClass())) + .build(); + this.gfeLatencyRecorder = + meter + .histogramBuilder(serviceName + '/' + BuiltInMetricsConstant.GFE_LATENCIES_NAME) + .setDescription( + "Latency between Google's network receiving an RPC and reading back the first byte" + + " of the response") + .setUnit("ms") + .setExplicitBucketBoundariesAdvice(BuiltInMetricsConstant.BUCKET_BOUNDARIES) + .build(); + this.afeLatencyRecorder = + meter + .histogramBuilder(serviceName + '/' + BuiltInMetricsConstant.AFE_LATENCIES_NAME) + .setDescription( + "Latency between Spanner API Frontend receiving an RPC and starting to write back" + + " the response.") + .setExplicitBucketBoundariesAdvice(BuiltInMetricsConstant.BUCKET_BOUNDARIES) + .setUnit("ms") + .build(); + this.gfeHeaderMissingCountRecorder = + meter + .counterBuilder(serviceName + '/' + BuiltInMetricsConstant.GFE_CONNECTIVITY_ERROR_NAME) + .setDescription("Number of requests that failed to reach the Google network.") + .setUnit("1") + .build(); + this.afeHeaderMissingCountRecorder = + meter + .counterBuilder(serviceName + '/' + BuiltInMetricsConstant.AFE_CONNECTIVITY_ERROR_NAME) + .setDescription("Number of requests that failed to reach the Spanner API Frontend.") + .setUnit("1") + .build(); + } + + /** + * Record the latency between Google's network receiving an RPC and reading back the first byte of + * the response. Data is stored in a Histogram. + * + * @param gfeLatency Attempt Latency in ms + * @param attributes Map of the attributes to store + */ + void recordServerTimingHeaderMetrics( + Float gfeLatency, + Float afeLatency, + Map attributes, + boolean isDirectPathUsed, + boolean isAfeEnabled) { + io.opentelemetry.api.common.Attributes otelAttributes = toOtelAttributes(attributes); + if (!isDirectPathUsed) { + if (gfeLatency != null) { + gfeLatencyRecorder.record(gfeLatency, otelAttributes); + } else { + gfeHeaderMissingCountRecorder.add(1, otelAttributes); + } + } + if (isAfeEnabled) { + if (afeLatency != null) { + afeLatencyRecorder.record(afeLatency, otelAttributes); + } else { + afeHeaderMissingCountRecorder.add(1, otelAttributes); + } + } + } + + Attributes toOtelAttributes(Map attributes) { + Preconditions.checkNotNull(attributes, "Attributes map cannot be null"); + AttributesBuilder attributesBuilder = Attributes.builder(); + attributes.forEach(attributesBuilder::put); + return attributesBuilder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracer.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracer.java new file mode 100644 index 000000000000..a982a3f11ab5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracer.java @@ -0,0 +1,174 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.MethodName; +import com.google.api.gax.tracing.MetricsTracer; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CancellationException; +import javax.annotation.Nullable; + +/** + * Implements built-in metrics tracer. + * + *

This class extends the {@link MetricsTracer} which computes generic metrics that can be + * observed in the lifecycle of an RPC operation. + */ +class BuiltInMetricsTracer extends MetricsTracer implements ApiTracer { + + private final BuiltInMetricsRecorder builtInOpenTelemetryMetricsRecorder; + // These are RPC specific attributes and pertain to a specific API Trace + private final Map attributes = new HashMap<>(); + private Float gfeLatency = null; + private Float afeLatency = null; + private final TraceWrapper traceWrapper; + private final ISpan currentSpan; + private boolean isDirectPathUsed; + private boolean isAfeEnabled; + + BuiltInMetricsTracer( + MethodName methodName, + BuiltInMetricsRecorder builtInOpenTelemetryMetricsRecorder, + TraceWrapper traceWrapper, + ISpan currentSpan) { + super(methodName, builtInOpenTelemetryMetricsRecorder); + this.builtInOpenTelemetryMetricsRecorder = builtInOpenTelemetryMetricsRecorder; + this.attributes.put(METHOD_ATTRIBUTE, methodName.toString()); + this.traceWrapper = traceWrapper; + this.currentSpan = currentSpan; + } + + /** + * Adds an annotation that the attempt succeeded. Successful attempt add "OK" value to the status + * attribute key. + */ + @Override + public void attemptSucceeded() { + try (IScope s = this.traceWrapper.withSpan(this.currentSpan)) { + super.attemptSucceeded(); + attributes.put(STATUS_ATTRIBUTE, StatusCode.Code.OK.toString()); + builtInOpenTelemetryMetricsRecorder.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, attributes, isDirectPathUsed, isAfeEnabled); + } + } + + /** + * Add an annotation that the attempt was cancelled by the user. Cancelled attempt add "CANCELLED" + * to the status attribute key. + */ + @Override + public void attemptCancelled() { + try (IScope s = this.traceWrapper.withSpan(this.currentSpan)) { + super.attemptCancelled(); + attributes.put(STATUS_ATTRIBUTE, StatusCode.Code.CANCELLED.toString()); + builtInOpenTelemetryMetricsRecorder.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, attributes, isDirectPathUsed, isAfeEnabled); + } + } + + /** + * Adds an annotation that the attempt failed, but another attempt will be made after the delay. + * + * @param error the error that caused the attempt to fail. + * @param delay the amount of time to wait before the next attempt will start. + *

Failed attempt extracts the error from the throwable and adds it to the status attribute + * key. + */ + @Override + public void attemptFailedDuration(Throwable error, java.time.Duration delay) { + try (IScope s = this.traceWrapper.withSpan(this.currentSpan)) { + super.attemptFailedDuration(error, delay); + attributes.put(STATUS_ATTRIBUTE, extractStatus(error)); + builtInOpenTelemetryMetricsRecorder.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, attributes, isDirectPathUsed, isAfeEnabled); + } + } + + /** + * Adds an annotation that the attempt failed and that no further attempts will be made because + * retry limits have been reached. This extracts the error from the throwable and adds it to the + * status attribute key. + * + * @param error the last error received before retries were exhausted. + */ + @Override + public void attemptFailedRetriesExhausted(Throwable error) { + try (IScope s = this.traceWrapper.withSpan(this.currentSpan)) { + super.attemptFailedRetriesExhausted(error); + attributes.put(STATUS_ATTRIBUTE, extractStatus(error)); + builtInOpenTelemetryMetricsRecorder.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, attributes, isDirectPathUsed, isAfeEnabled); + } + } + + /** + * Adds an annotation that the attempt failed and that no further attempts will be made because + * the last error was not retryable. This extracts the error from the throwable and adds it to the + * status attribute key. + * + * @param error the error that caused the final attempt to fail. + */ + @Override + public void attemptPermanentFailure(Throwable error) { + try (IScope s = this.traceWrapper.withSpan(this.currentSpan)) { + super.attemptPermanentFailure(error); + attributes.put(STATUS_ATTRIBUTE, extractStatus(error)); + builtInOpenTelemetryMetricsRecorder.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, attributes, isDirectPathUsed, isAfeEnabled); + } + } + + public void recordServerTimingHeaderMetrics( + Float gfeLatency, Float afeLatency, boolean isDirectPathUsed, boolean isAfeEnabled) { + this.gfeLatency = gfeLatency; + this.isDirectPathUsed = isDirectPathUsed; + this.afeLatency = afeLatency; + this.isAfeEnabled = isAfeEnabled; + } + + @Override + public void addAttributes(Map attributes) { + super.addAttributes(attributes); + this.attributes.putAll(attributes); + } + + @Override + public void addAttributes(String key, String value) { + super.addAttributes(key, value); + this.attributes.put(key, value); + } + + private static String extractStatus(@Nullable Throwable error) { + final String statusString; + + if (error == null) { + return StatusCode.Code.OK.toString(); + } else if (error instanceof CancellationException) { + statusString = StatusCode.Code.CANCELLED.toString(); + } else if (error instanceof ApiException) { + statusString = ((ApiException) error).getStatusCode().getCode().toString(); + } else { + statusString = StatusCode.Code.UNKNOWN.toString(); + } + + return statusString; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracerFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracerFactory.java new file mode 100644 index 000000000000..52e1acc68bdb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsTracerFactory.java @@ -0,0 +1,69 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.MethodName; +import com.google.api.gax.tracing.MetricsTracer; +import com.google.api.gax.tracing.MetricsTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.common.collect.ImmutableMap; +import java.util.Map; + +/** + * A {@link ApiTracerFactory} to build instances of {@link MetricsTracer}. + * + *

This class extends the {@link MetricsTracerFactory} which wraps the {@link + * BuiltInMetricsRecorder} and pass it to {@link BuiltInMetricsTracer}. It will be * used to record + * metrics in {@link BuiltInMetricsTracer}. + * + *

This class is expected to be initialized once during client initialization. + */ +class BuiltInMetricsTracerFactory extends MetricsTracerFactory { + + protected BuiltInMetricsRecorder builtInMetricsRecorder; + private final Map attributes; + private final TraceWrapper traceWrapper; + + /** + * Pass in a Map of client level attributes which will be added to every single MetricsTracer + * created from the ApiTracerFactory. + */ + public BuiltInMetricsTracerFactory( + BuiltInMetricsRecorder builtInMetricsRecorder, + Map attributes, + TraceWrapper traceWrapper) { + super(builtInMetricsRecorder, attributes); + this.builtInMetricsRecorder = builtInMetricsRecorder; + this.attributes = ImmutableMap.copyOf(attributes); + this.traceWrapper = traceWrapper; + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + ISpan currentSpan = this.traceWrapper.getCurrentSpan(); + BuiltInMetricsTracer metricsTracer = + new BuiltInMetricsTracer( + MethodName.of(spanName.getClientName(), spanName.getMethodName()), + builtInMetricsRecorder, + this.traceWrapper, + currentSpan); + metricsTracer.addAttributes(attributes); + return metricsTracer; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsView.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsView.java new file mode 100644 index 000000000000..e72eeb9425a4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsView.java @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; + +class BuiltInMetricsView { + + private BuiltInMetricsView() {} + + /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */ + static void registerBuiltinMetrics( + MetricExporter metricExporter, SdkMeterProviderBuilder builder) { + BuiltInMetricsConstant.getAllViews().forEach(builder::registerView); + builder.registerMetricReader(PeriodicMetricReader.create(metricExporter)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Clock.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Clock.java new file mode 100644 index 000000000000..567ccc1a0439 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Clock.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import java.time.Instant; + +/** + * Wrapper around current time so that we can fake it in tests. TODO(user): Replace with Java 8 + * Clock. + */ +class Clock { + static final Clock INSTANCE = new Clock(); + + Clock() {} + + Instant instant() { + return Instant.now(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitResponse.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitResponse.java new file mode 100644 index 000000000000..85975e10f6c1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitResponse.java @@ -0,0 +1,90 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import com.google.common.base.Preconditions; +import java.util.Objects; +import javax.annotation.Nullable; + +/** Represents a response from a commit operation. */ +public class CommitResponse { + + private final com.google.spanner.v1.CommitResponse proto; + + public CommitResponse(Timestamp commitTimestamp) { + this.proto = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp(commitTimestamp.toProto()) + .build(); + } + + CommitResponse(com.google.spanner.v1.CommitResponse proto) { + this.proto = Preconditions.checkNotNull(proto); + } + + /** Returns a {@link Timestamp} representing the commit time of the transaction. */ + public Timestamp getCommitTimestamp() { + return Timestamp.fromProto(proto.getCommitTimestamp()); + } + + /** + * Returns a {@link Timestamp} representing the timestamp at which all reads in the transaction + * ran at, if the transaction ran at repeatable read isolation in internal test environments, and + * otherwise returns null. + */ + public @Nullable Timestamp getSnapshotTimestamp() { + if (proto.getSnapshotTimestamp() == com.google.protobuf.Timestamp.getDefaultInstance()) { + return null; + } + return Timestamp.fromProto(proto.getSnapshotTimestamp()); + } + + /** + * @return true if the {@link CommitResponse} includes {@link CommitStats} + */ + public boolean hasCommitStats() { + return proto.hasCommitStats(); + } + + /** + * Commit statistics are returned by a read/write transaction if specifically requested by passing + * in {@link Options#commitStats()} to the transaction. + */ + public CommitStats getCommitStats() { + Preconditions.checkState( + proto.hasCommitStats(), "The CommitResponse does not contain any commit statistics."); + return CommitStats.fromProto(proto.getCommitStats()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CommitResponse that = (CommitResponse) o; + return Objects.equals(proto, that.proto); + } + + @Override + public int hashCode() { + return Objects.hash(proto); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitStats.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitStats.java new file mode 100644 index 000000000000..eaf1a78819f2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CommitStats.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Preconditions; + +/** + * Commit statistics are returned by a read/write transaction if specifically requested by passing + * in {@link Options#commitStats()} to the transaction. + */ +public class CommitStats { + private final long mutationCount; + + private CommitStats(long mutationCount) { + this.mutationCount = mutationCount; + } + + static CommitStats fromProto(com.google.spanner.v1.CommitResponse.CommitStats proto) { + Preconditions.checkNotNull(proto); + return new CommitStats(proto.getMutationCount()); + } + + /** + * The number of mutations that were executed by the transaction. Insert and update operations + * count with the multiplicity of the number of columns they affect. For example, inserting a new + * record may count as five mutations, if values are inserted into five columns. Delete and delete + * range operations count as one mutation regardless of the number of columns affected. Deleting a + * row from a parent table that has the ON DELETE CASCADE annotation is also counted as one + * mutation regardless of the number of interleaved child rows present. The exception to this is + * if there are secondary indexes defined on rows being deleted, then the changes to the secondary + * indexes are counted individually. For example, if a table has 2 secondary indexes, deleting a + * range of rows in the table counts as 1 mutation for the table, plus 2 mutations for each row + * that is deleted because the rows in the secondary index might be scattered over the key-space, + * making it impossible for Cloud Spanner to call a single delete range operation on the secondary + * indexes. Secondary indexes include the foreign keys backing indexes. + */ + public long getMutationCount() { + return mutationCount; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java new file mode 100644 index 000000000000..105dbd0a5129 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java @@ -0,0 +1,204 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.BaseApiTracer; +import com.google.api.gax.tracing.MetricsTracer; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@InternalApi +public class CompositeTracer extends BaseApiTracer { + private final List children; + + public CompositeTracer(List children) { + this.children = ImmutableList.copyOf(children); + } + + @Override + public Scope inScope() { + final List childScopes = new ArrayList<>(children.size()); + + for (ApiTracer child : children) { + childScopes.add(child.inScope()); + } + + return new Scope() { + @Override + public void close() { + for (Scope childScope : childScopes) { + childScope.close(); + } + } + }; + } + + @Override + public void operationSucceeded() { + for (ApiTracer child : children) { + child.operationSucceeded(); + } + } + + @Override + public void operationCancelled() { + for (ApiTracer child : children) { + child.operationCancelled(); + } + } + + @Override + public void operationFailed(Throwable error) { + for (ApiTracer child : children) { + child.operationFailed(error); + } + } + + @Override + public void connectionSelected(String id) { + for (ApiTracer child : children) { + child.connectionSelected(id); + } + } + + @Override + public void attemptStarted(int attemptNumber) { + for (ApiTracer child : children) { + child.attemptStarted(null, attemptNumber); + } + } + + @Override + public void attemptStarted(Object request, int attemptNumber) { + for (ApiTracer child : children) { + child.attemptStarted(request, attemptNumber); + } + } + + @Override + public void attemptSucceeded() { + for (ApiTracer child : children) { + child.attemptSucceeded(); + } + } + + @Override + public void attemptCancelled() { + for (ApiTracer child : children) { + child.attemptCancelled(); + } + } + + @Override + public void attemptFailed(Throwable error, org.threeten.bp.Duration delay) { + for (ApiTracer child : children) { + child.attemptFailedDuration(error, toJavaTimeDuration(delay)); + } + } + + @Override + public void attemptFailedDuration(Throwable error, Duration delay) { + for (ApiTracer child : children) { + child.attemptFailedDuration(error, delay); + } + } + + @Override + public void attemptFailedRetriesExhausted(Throwable error) { + for (ApiTracer child : children) { + child.attemptFailedRetriesExhausted(error); + } + } + + @Override + public void attemptPermanentFailure(Throwable error) { + for (ApiTracer child : children) { + child.attemptPermanentFailure(error); + } + } + + @Override + public void lroStartFailed(Throwable error) { + for (ApiTracer child : children) { + child.lroStartFailed(error); + } + } + + @Override + public void lroStartSucceeded() { + for (ApiTracer child : children) { + child.lroStartSucceeded(); + } + } + + @Override + public void responseReceived() { + for (ApiTracer child : children) { + child.responseReceived(); + } + } + + @Override + public void requestSent() { + for (ApiTracer child : children) { + child.requestSent(); + } + } + + @Override + public void batchRequestSent(long elementCount, long requestSize) { + for (ApiTracer child : children) { + child.batchRequestSent(elementCount, requestSize); + } + } + + public void addAttributes(String key, String value) { + for (ApiTracer child : children) { + if (child instanceof MetricsTracer) { + MetricsTracer metricsTracer = (MetricsTracer) child; + metricsTracer.addAttributes(key, value); + } + } + } + + public void addAttributes(Map attributes) { + for (ApiTracer child : children) { + if (child instanceof MetricsTracer) { + MetricsTracer metricsTracer = (MetricsTracer) child; + metricsTracer.addAttributes(attributes); + } + } + } + + public void recordServerTimingHeaderMetrics( + Float gfeLatency, Float afeLatency, boolean isDirectPathUsed, boolean isAfeEnabled) { + for (ApiTracer child : children) { + if (child instanceof BuiltInMetricsTracer) { + ((BuiltInMetricsTracer) child) + .recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, isDirectPathUsed, isAfeEnabled); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java new file mode 100644 index 000000000000..2e3965de0953 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.BaseApiTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; + +@InternalApi +public class CompositeTracerFactory extends BaseApiTracerFactory { + + private final List apiTracerFactories; + + public CompositeTracerFactory(List apiTracerFactories) { + this.apiTracerFactories = ImmutableList.copyOf(apiTracerFactories); + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + List children = new ArrayList<>(apiTracerFactories.size()); + + for (ApiTracerFactory factory : apiTracerFactories) { + children.add(factory.newTracer(parent, spanName, operationType)); + } + return new CompositeTracer(children); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Database.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Database.java new file mode 100644 index 000000000000..92c4d55d4d93 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Database.java @@ -0,0 +1,264 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.encryption.CustomerManagedEncryption; +import com.google.common.base.Preconditions; +import com.google.longrunning.Operation; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; + +/** + * Represents a Cloud Spanner database. {@code Database} adds a layer of service related + * functionality over {@code DatabaseInfo}. + */ +public class Database extends DatabaseInfo { + public static class Builder extends DatabaseInfo.BuilderImpl { + private final DatabaseAdminClient dbClient; + + Builder(DatabaseAdminClient dbClient, DatabaseId databaseId) { + super(databaseId); + this.dbClient = Preconditions.checkNotNull(dbClient); + } + + private Builder(Database database) { + super(database); + this.dbClient = database.dbClient; + } + + @Override + public Database build() { + return new Database(this); + } + } + + private static final String FILTER_DB_OPERATIONS_TEMPLATE = "name:databases/%s"; + private final DatabaseAdminClient dbClient; + + public Database(DatabaseId id, State state, DatabaseAdminClient dbClient) { + super(id, state); + this.dbClient = dbClient; + } + + Database(Builder builder) { + super(builder); + this.dbClient = Preconditions.checkNotNull(builder.dbClient); + } + + /** Fetches the database's current information. */ + public Database reload() throws SpannerException { + return dbClient.getDatabase(instance(), database()); + } + + /** + * Enqueues the given DDL statements to be applied, in order but not necessarily all at once, to + * the database schema at some point (or points) in the future. The server checks that the + * statements are executable (syntactically valid, name tables that exist, etc.) before enqueueing + * them, but they may still fail upon later execution (e.g., if a statement from another batch of + * statements is applied first and it conflicts in some way, or if there is some data-related + * problem like a `NULL` value in a column to which `NOT NULL` would be added). If a statement + * fails, all subsequent statements in the batch are automatically cancelled. + * + * @param operationId Operation id assigned to this operation. If null, system will autogenerate + * one. This must be unique within a database abd must be a valid identifier + * [a-zA-Z][a-zA-Z0-9_]*. + */ + public OperationFuture updateDdl( + Iterable statements, String operationId) throws SpannerException { + return dbClient.updateDatabaseDdl(instance(), database(), statements, operationId); + } + + /** Drops this database. */ + public void drop() throws SpannerException { + dbClient.dropDatabase(instance(), database()); + } + + /** + * Returns true if a database with the id of this {@link Database} exists on Cloud + * Spanner. + */ + public boolean exists() { + try { + dbClient.getDatabase(instance(), database()); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.NOT_FOUND) { + return false; + } + throw e; + } + return true; + } + + /** + * Backs up this database to the location specified by the {@link Backup}. The given {@link + * Backup} must have an expire time. The backup must belong to the same instance as this database. + */ + public OperationFuture backup(Backup backup) { + Preconditions.checkArgument( + backup.getExpireTime() != null, "The backup does not have an expire time."); + Preconditions.checkArgument( + backup.getInstanceId().equals(getId().getInstanceId()), + "The instance of the backup must be equal to the instance of this database."); + + return dbClient.createBackup( + dbClient + .newBackupBuilder(backup.getId()) + .setDatabase(getId()) + .setExpireTime(backup.getExpireTime()) + .setVersionTime(backup.getVersionTime()) + .build()); + } + + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates. + */ + public Iterable getDdl() throws SpannerException { + return dbClient.getDatabaseDdl(instance(), database()); + } + + /** Returns the long-running operations for this database. */ + public Page listDatabaseOperations() { + return dbClient.listDatabaseOperations( + instance(), Options.filter(String.format(FILTER_DB_OPERATIONS_TEMPLATE, database()))); + } + + /** + * Returns the IAM {@link Policy} for this database. + * + *

Version specifies the format used to create the policy, valid values are 0, 1, and 3. + * Requests specifying an invalid value will be rejected. Requests for policies with any + * conditional role bindings must specify version 3. Policies with no conditional role bindings + * may specify any valid value or leave the field unset. + * + *

The policy in the response might use the policy version that you specified, or it might use + * a lower policy version. For example, if you specify version 3, but the policy has no + * conditional role bindings, the response uses version 1. + * + *

To learn which resources support conditions in their IAM policies, see the [IAM + * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + */ + public Policy getIAMPolicy(int version) { + return dbClient.getDatabaseIAMPolicy(instance(), database(), version); + } + + /** + * Updates the IAM policy for this database and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + public Policy setIAMPolicy(Policy policy) { + return dbClient.setDatabaseIAMPolicy(instance(), database(), policy); + } + + /** + * Tests for the given permissions on this database for the caller. + * + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + public Iterable testIAMPermissions(Iterable permissions) { + return dbClient.testDatabaseIAMPermissions(instance(), database(), permissions); + } + + private String instance() { + return getId().getInstanceId().getInstance(); + } + + private String database() { + return getId().getDatabase(); + } + + static Database fromProto( + com.google.spanner.admin.database.v1.Database proto, DatabaseAdminClient client) { + checkArgument(!proto.getName().isEmpty(), "Missing expected 'name' field"); + DatabaseInfo.Builder builder = + new Builder(client, DatabaseId.of(proto.getName())) + .setState(fromProtoState(proto.getState())) + .setCreateTime(Timestamp.fromProto(proto.getCreateTime())) + .setRestoreInfo(RestoreInfo.fromProtoOrNullIfDefaultInstance(proto.getRestoreInfo())) + .setVersionRetentionPeriod(proto.getVersionRetentionPeriod()) + .setEarliestVersionTime(Timestamp.fromProto(proto.getEarliestVersionTime())) + .setEncryptionConfig( + CustomerManagedEncryption.fromProtoOrNull(proto.getEncryptionConfig())) + .setDefaultLeader(proto.getDefaultLeader()) + .setDialect(Dialect.fromProto(proto.getDatabaseDialect())) + .setReconciling(proto.getReconciling()) + .setProto(proto); + if (proto.getEnableDropProtection()) { + builder.enableDropProtection(); + } else { + builder.disableDropProtection(); + } + return builder.build(); + } + + public com.google.spanner.admin.database.v1.Database toProto() { + com.google.spanner.admin.database.v1.Database.Builder builder = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(getId().getName()) + .setState(getState().toProto()) + .setEnableDropProtection(isDropProtectionEnabled()) + .setReconciling(getReconciling()); + if (getCreateTime() != null) { + builder.setCreateTime(getCreateTime().toProto()); + } + if (getRestoreInfo() != null) { + builder.setRestoreInfo(getRestoreInfo().getProto()); + } + if (getVersionRetentionPeriod() != null) { + builder.setVersionRetentionPeriod(getVersionRetentionPeriod()); + } + if (getEarliestVersionTime() != null) { + builder.setEarliestVersionTime(getEarliestVersionTime().toProto()); + } + if (getEncryptionConfig() != null) { + builder.setEncryptionConfig(getEncryptionConfig().toProto()); + } + if (getDefaultLeader() != null) { + builder.setDefaultLeader(getDefaultLeader()); + } + if (getDialect() != null) { + builder.setDatabaseDialect(getDialect().toProto()); + } + return builder.build(); + } + + static DatabaseInfo.State fromProtoState( + com.google.spanner.admin.database.v1.Database.State protoState) { + switch (protoState) { + case STATE_UNSPECIFIED: + return DatabaseInfo.State.UNSPECIFIED; + case CREATING: + return DatabaseInfo.State.CREATING; + case READY: + return DatabaseInfo.State.READY; + case READY_OPTIMIZING: + return DatabaseInfo.State.READY_OPTIMIZING; + default: + throw new IllegalArgumentException("Unrecognized state " + protoState); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClient.java new file mode 100644 index 000000000000..f961a13845bd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClient.java @@ -0,0 +1,653 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.ListOption; +import com.google.longrunning.Operation; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import java.util.List; +import javax.annotation.Nullable; + +/** Client to do admin operations on a Cloud Spanner Database. */ +public interface DatabaseAdminClient { + /** + * Creates a new database in a Cloud Spanner instance. + * + *

Example to create database. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * Operation op = dbAdminClient
+   *     .createDatabase(
+   *         instanceId,
+   *         databaseId,
+   *         Arrays.asList(
+   *             "CREATE TABLE Singers (\n"
+   *                 + "  SingerId   INT64 NOT NULL,\n"
+   *                 + "  FirstName  STRING(1024),\n"
+   *                 + "  LastName   STRING(1024),\n"
+   *                 + "  SingerInfo BYTES(MAX)\n"
+   *                 + ") PRIMARY KEY (SingerId)",
+   *             "CREATE TABLE Albums (\n"
+   *                 + "  SingerId     INT64 NOT NULL,\n"
+   *                 + "  AlbumId      INT64 NOT NULL,\n"
+   *                 + "  AlbumTitle   STRING(MAX)\n"
+   *                 + ") PRIMARY KEY (SingerId, AlbumId),\n"
+   *                 + "  INTERLEAVE IN PARENT Singers ON DELETE CASCADE"));
+   * Database db = op.waitFor().getResult();
+   * }
+ * + * @param instanceId the id of the instance in which to create the database. + * @param databaseId the id of the database which will be created. It must conform to the regular + * expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 30 characters in length + * @param statements DDL statements to run while creating the database, for example {@code CREATE + * TABLE MyTable ( ... )}. This should not include {@code CREATE DATABASE} statement. + */ + OperationFuture createDatabase( + String instanceId, String databaseId, Iterable statements) throws SpannerException; + + /** + * Creates a new database in a Cloud Spanner instance with the given {@link Dialect}. + * + *

Example to create database. + * + *

{@code
+   * String instanceId = "my_instance_id";
+   * String createDatabaseStatement = "CREATE DATABASE \"my-database\"";
+   * Operation op = dbAdminClient
+   *     .createDatabase(
+   *         instanceId,
+   *         createDatabaseStatement,
+   *         Dialect.POSTGRESQL
+   *         Collections.emptyList());
+   * Database db = op.waitFor().getResult();
+   * }
+ * + * @param instanceId the id of the instance in which to create the database. + * @param createDatabaseStatement the CREATE DATABASE statement for the database. This statement + * must use the dialect for the new database. + * @param dialect the dialect that the new database should use. + * @param statements DDL statements to run while creating the database, for example {@code CREATE + * TABLE MyTable ( ... )}. This should not include {@code CREATE DATABASE} statement. + */ + default OperationFuture createDatabase( + String instanceId, + String createDatabaseStatement, + Dialect dialect, + Iterable statements) + throws SpannerException { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Creates a database in a Cloud Spanner instance. Any configuration options in the {@link + * Database} instance will be included in the {@link CreateDatabaseRequest}. + * + *

Example to create an encrypted database. + * + *

{@code
+   * Database dbInfo =
+   *     dbClient
+   *         .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database"))
+   *         .setEncryptionConfig(
+   *             EncryptionConfig.ofKey(
+   *                 "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"))
+   *         .build();
+   * Operation op = dbAdminClient
+   *     .createDatabase(
+   *         dbInfo,
+   *         Arrays.asList(
+   *             "CREATE TABLE Singers (\n"
+   *                 + "  SingerId   INT64 NOT NULL,\n"
+   *                 + "  FirstName  STRING(1024),\n"
+   *                 + "  LastName   STRING(1024),\n"
+   *                 + "  SingerInfo BYTES(MAX)\n"
+   *                 + ") PRIMARY KEY (SingerId)",
+   *             "CREATE TABLE Albums (\n"
+   *                 + "  SingerId     INT64 NOT NULL,\n"
+   *                 + "  AlbumId      INT64 NOT NULL,\n"
+   *                 + "  AlbumTitle   STRING(MAX)\n"
+   *                 + ") PRIMARY KEY (SingerId, AlbumId),\n"
+   *                 + "  INTERLEAVE IN PARENT Singers ON DELETE CASCADE"));
+   * Database db = op.waitFor().getResult();
+   * }
+ * + * @see #createDatabase(String, String, Iterable) + */ + OperationFuture createDatabase( + Database database, Iterable statements) throws SpannerException; + + /** Returns a builder for a {@code Database} object with the given id. */ + Database.Builder newDatabaseBuilder(DatabaseId id); + + /** Returns a builder for a {@code Backup} object with the given id. */ + Backup.Builder newBackupBuilder(BackupId id); + + /** Returns a builder for a {@link Restore} object with the given source and destination */ + Restore.Builder newRestoreBuilder(BackupId source, DatabaseId destination); + + /** + * Creates a new backup from a database in a Cloud Spanner instance. + * + *

Example to create a backup. + * + *

{@code
+   * String instance       = my_instance_id;
+   * String backupId       = my_backup_id;
+   * String databaseId     = my_database_id;
+   * Timestamp expireTime  = Timestamp.ofTimeMicroseconds(micros);
+   * OperationFuture op = dbAdminClient
+   *     .createBackup(
+   *         instanceId,
+   *         backupId,
+   *         databaseId,
+   *         expireTime);
+   * Backup backup = op.get();
+   * }
+ * + * @param sourceInstanceId the id of the instance where the database to backup is located and + * where the backup will be created. + * @param backupId the id of the backup which will be created. It must conform to the regular + * expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 60 characters in length. + * @param databaseId the id of the database to backup. + * @param expireTime the time that the backup will automatically expire. + */ + OperationFuture createBackup( + String sourceInstanceId, String backupId, String databaseId, Timestamp expireTime) + throws SpannerException; + + /** + * Creates a new backup from a database in a Cloud Spanner. Any configuration options in the + * {@link Backup} instance will be included in the {@link + * com.google.spanner.admin.database.v1.CreateBackupRequest}. + * + *

Example to create an encrypted backup. + * + *

{@code
+   * BackupId backupId = BackupId.of("project", "instance", "backup-id");
+   * DatabaseId databaseId = DatabaseId.of("project", "instance", "database-id");
+   * Timestamp expireTime = Timestamp.ofTimeMicroseconds(expireTimeMicros);
+   * Timestamp versionTime = Timestamp.ofTimeMicroseconds(versionTimeMicros);
+   * EncryptionConfig encryptionConfig =
+   *         EncryptionConfig.ofKey(
+   *             "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"));
+   *
+   * Backup backupToCreate = dbAdminClient
+   *     .newBackupBuilder(backupId)
+   *     .setDatabase(databaseId)
+   *     .setExpireTime(expireTime)
+   *     .setVersionTime(versionTime)
+   *     .setEncryptionConfig(encryptionConfig)
+   *     .build();
+   *
+   * OperationFuture op = dbAdminClient.createBackup(backupToCreate);
+   * Backup createdBackup = op.get();
+   * }
+ * + * @param backup the backup to be created + */ + OperationFuture createBackup(Backup backup) throws SpannerException; + + /** + * Creates a copy of backup from an existing backup in a Cloud Spanner instance. + * + *

Example to copy a backup. + * + *

{@code
+   * String instanceId                  ="my_instance_id";
+   * String sourceBackupId              ="source_backup_id";
+   * String destinationBackupId         ="destination_backup_id";
+   * Timestamp expireTime               =Timestamp.ofTimeMicroseconds(micros);
+   * OperationFuture op = dbAdminClient
+   *     .copyBackup(
+   *         instanceId,
+   *         sourceBackupId,
+   *         destinationBackupId,
+   *         expireTime);
+   * Backup backup = op.get();
+   * }
+ * + * @param instanceId the id of the instance where the source backup is located and where the new + * backup will be created. + * @param sourceBackupId the source backup id. + * @param destinationBackupId the id of the backup which will be created. It must conform to the + * regular expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 60 characters in length. + * @param expireTime the time that the new backup will automatically expire. + */ + default OperationFuture copyBackup( + String instanceId, String sourceBackupId, String destinationBackupId, Timestamp expireTime) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Creates a copy of backup from an existing backup in Cloud Spanner in the same instance. Any + * configuration options in the {@link Backup} instance will be included in the {@link + * com.google.spanner.admin.database.v1.CopyBackupRequest}. + * + *

The expire time of the new backup must be set and be at least 6 hours and at most 366 days + * after the creation time of the existing backup that is being copied. + * + *

Example to create a copy of a backup. + * + *

{@code
+   * BackupId sourceBackupId = BackupId.of("source-project", "source-instance", "source-backup-id");
+   * BackupId destinationBackupId = BackupId.of("destination-project", "destination-instance", "new-backup-id");
+   * Timestamp expireTime = Timestamp.ofTimeMicroseconds(expireTimeMicros);
+   * EncryptionConfig encryptionConfig =
+   *         EncryptionConfig.ofKey(
+   *             "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"));
+   *
+   * Backup destinationBackup = dbAdminClient
+   *     .newBackupBuilder(destinationBackupId)
+   *     .setExpireTime(expireTime)
+   *     .setEncryptionConfig(encryptionConfig)
+   *     .build();
+   *
+   * OperationFuture op = dbAdminClient.copyBackup(sourceBackupId, destinationBackup);
+   * Backup copiedBackup = op.get();
+   * }
+ * + * @param sourceBackupId the backup to be copied + * @param destinationBackup the new backup to create + */ + default OperationFuture copyBackup( + BackupId sourceBackupId, Backup destinationBackup) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Restore a database from a backup. The database that is restored will be created and may not + * already exist. + * + *

Example to restore a database. + * + *

{@code
+   * String backupInstanceId   = my_instance_id;
+   * String backupId           = my_backup_id;
+   * String restoreInstanceId  = my_db_instance_id;
+   * String restoreDatabaseId  = my_database_id;
+   * OperationFuture op = dbAdminClient
+   *     .restoreDatabase(
+   *         backupInstanceId,
+   *         backupId,
+   *         restoreInstanceId,
+   *         restoreDatabaseId);
+   * Database database = op.get();
+   * }
+ * + * @param backupInstanceId the id of the instance where the backup is located. + * @param backupId the id of the backup to restore. + * @param restoreInstanceId the id of the instance where the database should be created. This may + * be a different instance than where the backup is stored. + * @param restoreDatabaseId the id of the database to restore to. + */ + OperationFuture restoreDatabase( + String backupInstanceId, String backupId, String restoreInstanceId, String restoreDatabaseId) + throws SpannerException; + + /** + * Restore a database from a backup. The database that is restored will be created and may not + * already exist. + * + *

Example to restore an encrypted database. + * + *

{@code
+   * final Restore restore = dbAdminClient
+   *     .newRestoreBuilder(
+   *         BackupId.of("my-project", "my-instance", "my-backup"),
+   *         DatabaseId.of("my-project", "my-instance", "my-database")
+   *     )
+   *     .setEncryptionConfig(EncryptionConfig.ofKey(
+   *         "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"))
+   *     .build();
+   *
+   * final OperationFuture op = dbAdminClient
+   *     .restoreDatabase(restore);
+   *
+   * Database database = op.get();
+   * }
+ * + * @param restore a {@link Restore} instance with the backup source and destination database + */ + OperationFuture restoreDatabase(Restore restore) + throws SpannerException; + + /** Lists long-running database operations on the specified instance. */ + Page listDatabaseOperations(String instanceId, ListOption... options); + + /** Lists database roles on the specified database. */ + Page listDatabaseRoles(String instanceId, String databaseId, ListOption... options); + + /** Lists long-running backup operations on the specified instance. */ + Page listBackupOperations(String instanceId, ListOption... options); + + /** + * Gets the current state of a Cloud Spanner database. + * + *

Example to getDatabase. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * Database db = dbAdminClient.getDatabase(instanceId, databaseId);
+   * }
+ */ + Database getDatabase(String instanceId, String databaseId) throws SpannerException; + + /** + * Updates a Cloud Spanner database. The returned {@code Operation} can be used to track the + * progress of the update. Throws SpannerException if the Cloud Spanner database does not exist. + * + *

Until completion of the returned operation: + * + *

    + *
  • Cancelling the operation is best effort and may or may not succeed. + *
  • All other attempts to modify the database are rejected. + *
  • Reading the database via the API continues to give the pre-request field values. + *
+ * + * Upon completion of the returned operation: + * + *
    + *
  • The database's new fields are readable via the API. + *
+ * + *

Example of updating a database. + * + *

{@code
+   * String projectId = my_project_id;
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * Database databaseToUpdate = databaseAdminClient.newDatabaseBuilder(
+   *         DatabaseId.of(projectId, instanceId, databaseId))
+   *      .enableDropProtection().build();
+   * OperationFuture op = databaseAdminClient.updateDatabase(
+   *           databaseToUpdate, DatabaseField.DROP_PROTECTION);
+   * Database updateDatabase = op.get(5, TimeUnit.MINUTES);
+   * }
+ * + * @param database The database to update to. The current field values of the database will be + * updated to the values specified in this parameter. + * @param fieldsToUpdate The fields that should be updated. Only these fields will have their + * values updated to the values specified in {@param database}, even if there are other fields + * specified in {@param database}. + */ + OperationFuture updateDatabase( + Database database, DatabaseInfo.DatabaseField... fieldsToUpdate) throws SpannerException; + + /** + * Gets the current state of a Cloud Spanner database backup. + * + *

Example to get a backup. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String backupId   = my_backup_id;
+   * Backup backup = dbAdminClient.getBackup(instanceId, backupId);
+   * }
+ */ + Backup getBackup(String instanceId, String backupId) throws SpannerException; + + /** + * Enqueues the given DDL statements to be applied, in order but not necessarily all at once, to + * the database schema at some point (or points) in the future. The server checks that the + * statements are executable (syntactically valid, name tables that exist, etc.) before enqueueing + * them, but they may still fail upon later execution (e.g., if a statement from another batch of + * statements is applied first and it conflicts in some way, or if there is some data-related + * problem like a `NULL` value in a column to which `NOT NULL` would be added). If a statement + * fails, all subsequent statements in the batch are automatically cancelled. + * + *

If an operation already exists with the given operation id, the operation will be resumed + * and the returned future will complete when the original operation finishes. See more + * information in {@link + * com.google.cloud.spanner.spi.v1.GapicSpannerRpc#updateDatabaseDdl(com.google.cloud.spanner.Database, + * Iterable, String)} + * + *

Example to update the database DDL. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * dbAdminClient.updateDatabaseDdl(instanceId,
+   *     databaseId,
+   *     Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget INT64"),
+   *     null).waitFor();
+   * }
+ * + * @param operationId Operation id assigned to this operation. If null, system will autogenerate + * one. This must be unique within a database abd must be a valid identifier + * [a-zA-Z][a-zA-Z0-9_]*. + */ + OperationFuture updateDatabaseDdl( + String instanceId, + String databaseId, + Iterable statements, + @Nullable String operationId) + throws SpannerException; + + /** + * Updates a database in a Cloud Spanner instance. Any proto descriptors that have been set for + * the {@link com.google.cloud.spanner.Database} instance will be included in the {@link + * com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest}. + * + *

If an operation already exists with the given operation id, the operation will be resumed + * and the returned future will complete when the original operation finishes. See more + * information in {@link + * com.google.cloud.spanner.spi.v1.GapicSpannerRpc#updateDatabaseDdl(com.google.cloud.spanner.Database, + * Iterable, String)} + * + *

Example to update the database DDL with proto descriptors. + * + *

{@code
+   * Database dbInfo =
+   *         dbClient
+   *            .newDatabaseBuilder(DatabaseId.of("my_project_id", "my_instance_id", "my_database_id"))
+   *            .setProtoDescriptors("com/google/cloud/spanner/descriptors.pb")
+   *            .build();
+   * dbAdminClient.updateDatabaseDdl(dbInfo,
+   *     Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget INT64"),
+   *     null).waitFor();
+   * }
+ * + * @param database Database object to set configuration options such as proto_descriptors. + * @param statements DDL statements to run while updating the database. + * @param operationId Operation id assigned to this operation. If null, system will autogenerate + * one. This must be unique within a database abd must be a valid identifier + * [a-zA-Z][a-zA-Z0-9_]*. + */ + OperationFuture updateDatabaseDdl( + Database database, Iterable statements, @Nullable String operationId) + throws SpannerException; + + /** + * Drops a Cloud Spanner database. + * + *

Example to drop a Cloud Spanner database. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * dbAdminClient.dropDatabase(instanceId, databaseId);
+   * }
+ */ + void dropDatabase(String instanceId, String databaseId) throws SpannerException; + + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates. + * + *

Example to get the schema of a Cloud Spanner database. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * List statementsInDb = dbAdminClient.getDatabaseDdl(instanceId, databaseId);
+   * }
+ */ + List getDatabaseDdl(String instanceId, String databaseId); + + /** + * Returns the GetDatabaseDdlResponse object of a Cloud Spanner database. + * + *

Example to get GetDatabaseDdlResponse object of a Cloud Spanner database. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * String databaseId = my_database_id;
+   * GetDatabaseDdlResponse response = dbAdminClient.getDatabaseDdl(instanceId, databaseId);
+   * }
+ * + * @param instanceId the id of the instance where the database was created. + * @param databaseId the id of the database. + * @return GetDatabaseDdlResponse object + */ + GetDatabaseDdlResponse getDatabaseDdlResponse(String instanceId, String databaseId); + + /** + * Returns the list of Cloud Spanner database in the given instance. + * + *

Example to get the list of Cloud Spanner database in the given instance. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * Page page = dbAdminClient.listDatabases(instanceId, Options.pageSize(1));
+   * List dbs = new ArrayList<>();
+   * while (page != null) {
+   *   Database db = Iterables.getOnlyElement(page.getValues());
+   *   dbs.add(db);
+   *   page = page.getNextPage();
+   * }
+   * }
+ */ + Page listDatabases(String instanceId, ListOption... options); + + /** + * Returns the list of Cloud Spanner backups in the given instance. + * + *

Example to get the list of Cloud Spanner backups in the given instance. + * + *

{@code
+   * String instanceId = my_instance_id;
+   * Page page = dbAdminClient.listBackups(instanceId, Options.pageSize(1));
+   * List backups = new ArrayList<>();
+   * while (page != null) {
+   *   Backup backup = Iterables.getOnlyElement(page.getValues());
+   *   dbs.add(backup);
+   *   page = page.getNextPage();
+   * }
+   * }
+ */ + Page listBackups(String instanceId, ListOption... options); + + /** + * Updates the expire time of a backup. + * + * @param instanceId Required. The instance of the backup to update. + * @param backupId Required. The backup id of the backup to update. + * @param expireTime Required. The new expire time of the backup to set to. + * @return the updated Backup object. + */ + Backup updateBackup(String instanceId, String backupId, Timestamp expireTime); + + /** + * Deletes a pending or completed backup. + * + * @param instanceId Required. The instance where the backup exists. + * @param backupId Required. The id of the backup to delete. + */ + void deleteBackup(String instanceId, String backupId); + + /** Cancels the specified long-running operation. */ + void cancelOperation(String name); + + /** Gets the specified long-running operation. */ + Operation getOperation(String name); + + /** + * Returns the IAM policy for the given database. + * + *

Version specifies the format used to create the policy, valid values are 0, 1, and 3. + * Requests specifying an invalid value will be rejected. Requests for policies with any + * conditional role bindings must specify version 3. Policies with no conditional role bindings + * may specify any valid value or leave the field unset. + * + *

The policy in the response might use the policy version that you specified, or it might use + * a lower policy version. For example, if you specify version 3, but the policy has no + * conditional role bindings, the response uses version 1. + * + *

To learn which resources support conditions in their IAM policies, see the + * + * @see IAM + * documentation. + */ + Policy getDatabaseIAMPolicy(String instanceId, String databaseId, int version); + + /** + * Updates the IAM policy for the given database and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + Policy setDatabaseIAMPolicy(String instanceId, String databaseId, Policy policy); + + /** + * Tests for the given permissions on the specified database for the caller. + * + * @param instanceId the id of the instance where the database to test is located. + * @param databaseId the id of the database to test. + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + Iterable testDatabaseIAMPermissions( + String instanceId, String databaseId, Iterable permissions); + + /** Returns the IAM policy for the given backup. */ + Policy getBackupIAMPolicy(String instanceId, String backupId); + + /** + * Updates the IAM policy for the given backup and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + Policy setBackupIAMPolicy(String instanceId, String backupId, Policy policy); + + /** + * Tests for the given permissions on the specified backup for the caller. + * + * @param instanceId the id of the instance where the backup to test is located. + * @param backupId the id of the backup to test. + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + Iterable testBackupIAMPermissions( + String instanceId, String backupId, Iterable permissions); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java new file mode 100644 index 000000000000..f53ae3bb0d38 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java @@ -0,0 +1,600 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationFutureImpl; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.Policy.DefaultMarshaller; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.cloud.spanner.Options.ListOption; +import com.google.cloud.spanner.SpannerImpl.PageFetcher; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.iam.v1.GetPolicyOptions; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.*; +import java.util.List; +import java.util.UUID; +import javax.annotation.Nullable; + +/** Default implementation of {@link DatabaseAdminClient}. */ +class DatabaseAdminClientImpl implements DatabaseAdminClient { + private static final class PolicyMarshaller extends DefaultMarshaller { + @Override + protected Policy fromPb(com.google.iam.v1.Policy policyPb) { + return super.fromPb(policyPb); + } + + @Override + protected com.google.iam.v1.Policy toPb(Policy policy) { + return super.toPb(policy); + } + } + + private final String projectId; + private final SpannerRpc rpc; + private final PolicyMarshaller policyMarshaller = new PolicyMarshaller(); + private static final String EXPIRE_TIME_MASK = "expire_time"; + + DatabaseAdminClientImpl(String projectId, SpannerRpc rpc) { + this.projectId = projectId; + this.rpc = rpc; + } + + /** Generates a random operation id for long-running database operations. */ + private static String randomOperationId() { + UUID uuid = UUID.randomUUID(); + return ("r" + uuid.toString()).replace("-", "_"); + } + + @Override + public Database.Builder newDatabaseBuilder(DatabaseId databaseId) { + return new Database.Builder(this, databaseId); + } + + @Override + public Backup.Builder newBackupBuilder(BackupId backupId) { + return new Backup.Builder(this, backupId); + } + + @Override + public Restore.Builder newRestoreBuilder(BackupId source, DatabaseId destination) { + return new Restore.Builder(source, destination); + } + + @Override + public OperationFuture restoreDatabase( + String backupInstanceId, String backupId, String restoreInstanceId, String restoreDatabaseId) + throws SpannerException { + return restoreDatabase( + newRestoreBuilder( + BackupId.of(projectId, backupInstanceId, backupId), + DatabaseId.of(projectId, restoreInstanceId, restoreDatabaseId)) + .build()); + } + + @Override + public OperationFuture restoreDatabase(Restore restore) + throws SpannerException { + final OperationFuture + rawOperationFuture = rpc.restoreDatabase(restore); + + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + Database.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Database.class) + .apply(snapshot), + DatabaseAdminClientImpl.this), + ProtoOperationTransformers.MetadataTransformer.create(RestoreDatabaseMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public OperationFuture createBackup( + String instanceId, String backupId, String databaseId, Timestamp expireTime) + throws SpannerException { + final Backup backupInfo = + newBackupBuilder(BackupId.of(projectId, instanceId, backupId)) + .setDatabase(DatabaseId.of(projectId, instanceId, databaseId)) + .setExpireTime(expireTime) + .build(); + + return createBackup(backupInfo); + } + + @Override + public OperationFuture createBackup(Backup backupInfo) + throws SpannerException { + Preconditions.checkArgument( + backupInfo.getExpireTime() != null, "Cannot create a backup without an expire time"); + Preconditions.checkArgument( + backupInfo.getDatabase() != null, "Cannot create a backup without a source database"); + + final OperationFuture + rawOperationFuture = rpc.createBackup(backupInfo); + + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> { + com.google.spanner.admin.database.v1.Backup proto = + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Backup.class) + .apply(snapshot); + return Backup.fromProto( + com.google.spanner.admin.database.v1.Backup.newBuilder(proto) + .setName(proto.getName()) + .setExpireTime(proto.getExpireTime()) + .setVersionTime(proto.getVersionTime()) + .setState(proto.getState()) + .setEncryptionInfo(proto.getEncryptionInfo()) + .build(), + DatabaseAdminClientImpl.this); + }, + ProtoOperationTransformers.MetadataTransformer.create(CreateBackupMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public OperationFuture copyBackup( + String instanceId, String sourceBackupId, String destinationBackupId, Timestamp expireTime) + throws SpannerException { + final Backup destinationBackup = + newBackupBuilder(BackupId.of(projectId, instanceId, destinationBackupId)) + .setExpireTime(expireTime) + .build(); + + return copyBackup(BackupId.of(projectId, instanceId, sourceBackupId), destinationBackup); + } + + @Override + public OperationFuture copyBackup( + BackupId sourceBackupId, Backup destinationBackup) throws SpannerException { + Preconditions.checkNotNull(sourceBackupId); + Preconditions.checkNotNull(destinationBackup); + + final OperationFuture + rawOperationFuture = rpc.copyBackup(sourceBackupId, destinationBackup); + + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> { + com.google.spanner.admin.database.v1.Backup proto = + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Backup.class) + .apply(snapshot); + return Backup.fromProto( + com.google.spanner.admin.database.v1.Backup.newBuilder(proto) + .setName(proto.getName()) + .setExpireTime(proto.getExpireTime()) + .setEncryptionInfo(proto.getEncryptionInfo()) + .build(), + DatabaseAdminClientImpl.this); + }, + ProtoOperationTransformers.MetadataTransformer.create(CopyBackupMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public Backup updateBackup(String instanceId, String backupId, Timestamp expireTime) { + String backupName = getBackupName(instanceId, backupId); + final com.google.spanner.admin.database.v1.Backup backup = + com.google.spanner.admin.database.v1.Backup.newBuilder() + .setName(backupName) + .setExpireTime(expireTime.toProto()) + .build(); + // Only update the expire time of the backup. + final FieldMask updateMask = FieldMask.newBuilder().addPaths(EXPIRE_TIME_MASK).build(); + try { + return Backup.fromProto(rpc.updateBackup(backup, updateMask), DatabaseAdminClientImpl.this); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + @Override + public void deleteBackup(String instanceId, String backupId) { + final String backupName = getBackupName(instanceId, backupId); + try { + rpc.deleteBackup(backupName); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + @Override + public Backup getBackup(String instanceId, String backupId) throws SpannerException { + final String backupName = getBackupName(instanceId, backupId); + return Backup.fromProto(rpc.getBackup(backupName), DatabaseAdminClientImpl.this); + } + + @Override + public final Page listBackupOperations(String instanceId, ListOption... options) { + final String instanceName = getInstanceName(instanceId); + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + final String filter = listOptions.hasFilter() ? listOptions.filter() : null; + final String pageToken = listOptions.hasPageToken() ? listOptions.pageToken() : null; + + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage(String nextPageToken) { + return rpc.listBackupOperations(instanceName, pageSize, filter, pageToken); + } + + @Override + public Operation fromProto(Operation proto) { + return proto; + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public final Page listDatabaseOperations(String instanceId, ListOption... options) { + final String instanceName = getInstanceName(instanceId); + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + final String filter = listOptions.hasFilter() ? listOptions.filter() : null; + final String pageToken = listOptions.hasPageToken() ? listOptions.pageToken() : null; + + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage(String nextPageToken) { + return rpc.listDatabaseOperations(instanceName, pageSize, filter, pageToken); + } + + @Override + public Operation fromProto(Operation proto) { + return proto; + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public final Page listDatabaseRoles( + String instanceId, String databaseId, ListOption... options) { + final String databaseName = getDatabaseName(instanceId, databaseId); + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + try { + return rpc.listDatabaseRoles(databaseName, pageSize, nextPageToken); + } catch (SpannerException e) { + throw SpannerExceptionFactory.newSpannerException( + e.getErrorCode(), + String.format( + "Failed to list the databases roles of %s with pageToken %s: %s", + databaseName, + MoreObjects.firstNonNull(nextPageToken, ""), + e.getMessage()), + e); + } + } + + @Override + public DatabaseRole fromProto(com.google.spanner.admin.database.v1.DatabaseRole proto) { + return DatabaseRole.fromProto(proto); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public Page listBackups(String instanceId, ListOption... options) { + final String instanceName = getInstanceName(instanceId); + final Options listOptions = Options.fromListOptions(options); + final String filter = listOptions.hasFilter() ? listOptions.filter() : null; + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listBackups(instanceName, pageSize, filter, nextPageToken); + } + + @Override + public Backup fromProto(com.google.spanner.admin.database.v1.Backup proto) { + return Backup.fromProto(proto, DatabaseAdminClientImpl.this); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public OperationFuture createDatabase( + String instanceId, String databaseId, Iterable statements) throws SpannerException { + return createDatabase( + newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.GOOGLE_STANDARD_SQL) + .build(), + statements); + } + + @Override + public OperationFuture createDatabase( + Database database, Iterable statements) throws SpannerException { + final Dialect dialect = Preconditions.checkNotNull(database.getDialect()); + final String createStatement = + dialect.createDatabaseStatementFor(database.getId().getDatabase()); + + return createDatabase(createStatement, database, statements); + } + + @Override + public OperationFuture createDatabase( + String instanceId, + String createDatabaseStatement, + Dialect dialect, + Iterable statements) + throws SpannerException { + Database database = + newDatabaseBuilder(DatabaseId.of(projectId, instanceId, "")).setDialect(dialect).build(); + + return createDatabase(createDatabaseStatement, database, statements); + } + + private OperationFuture createDatabase( + String createStatement, Database database, Iterable statements) + throws SpannerException { + OperationFuture + rawOperationFuture = + rpc.createDatabase( + database.getId().getInstanceId().getName(), createStatement, statements, database); + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + Database.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Database.class) + .apply(snapshot), + DatabaseAdminClientImpl.this), + ProtoOperationTransformers.MetadataTransformer.create(CreateDatabaseMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public Database getDatabase(String instanceId, String databaseId) throws SpannerException { + String dbName = getDatabaseName(instanceId, databaseId); + return Database.fromProto(rpc.getDatabase(dbName), DatabaseAdminClientImpl.this); + } + + @Override + public OperationFuture updateDatabase( + Database database, DatabaseField... fieldsToUpdate) throws SpannerException { + FieldMask fieldMask = DatabaseInfo.DatabaseField.toFieldMask(fieldsToUpdate); + OperationFuture + rawOperationFuture = rpc.updateDatabase(database.toProto(), fieldMask); + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + Database.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Database.class) + .apply(snapshot), + DatabaseAdminClientImpl.this), + ProtoOperationTransformers.MetadataTransformer.create(UpdateDatabaseMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public OperationFuture updateDatabaseDdl( + final String instanceId, + final String databaseId, + final Iterable statements, + @Nullable String operationId) + throws SpannerException { + + return updateDatabaseDdl( + newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)).build(), + statements, + operationId); + } + + @Override + public OperationFuture updateDatabaseDdl( + Database database, final Iterable statements, @Nullable String operationId) + throws SpannerException { + final String opId = operationId != null ? operationId : randomOperationId(); + OperationFuture rawOperationFuture = + rpc.updateDatabaseDdl(database, statements, opId); + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> { + ProtoOperationTransformers.ResponseTransformer.create(Empty.class).apply(snapshot); + return null; + }, + ProtoOperationTransformers.MetadataTransformer.create(UpdateDatabaseDdlMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public void dropDatabase(String instanceId, String databaseId) throws SpannerException { + String dbName = getDatabaseName(instanceId, databaseId); + rpc.dropDatabase(dbName); + } + + @Override + public List getDatabaseDdl(String instanceId, String databaseId) { + return getDatabaseDdlResponse(instanceId, databaseId).getStatementsList(); + } + + @Override + public GetDatabaseDdlResponse getDatabaseDdlResponse(String instanceId, String databaseId) { + String dbName = getDatabaseName(instanceId, databaseId); + return rpc.getDatabaseDdl(dbName); + } + + @Override + public Page listDatabases(String instanceId, ListOption... options) { + final String instanceName = getInstanceName(instanceId); + final Options listOptions = Options.fromListOptions(options); + Preconditions.checkArgument( + !listOptions.hasFilter(), "Filter option is not supported by listDatabases"); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + try { + return rpc.listDatabases(instanceName, pageSize, nextPageToken); + } catch (SpannerException e) { + throw SpannerExceptionFactory.newSpannerException( + e.getErrorCode(), + String.format( + "Failed to list the databases of %s with pageToken %s: %s", + instanceName, + MoreObjects.firstNonNull(nextPageToken, ""), + e.getMessage()), + e); + } + } + + @Override + public Database fromProto(com.google.spanner.admin.database.v1.Database proto) { + return Database.fromProto(proto, DatabaseAdminClientImpl.this); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public void cancelOperation(String name) { + Preconditions.checkNotNull(name); + rpc.cancelOperation(name); + } + + @Override + public Operation getOperation(String name) { + Preconditions.checkNotNull(name); + return rpc.getOperation(name); + } + + @Override + public Policy getDatabaseIAMPolicy(String instanceId, String databaseId, int version) { + final String databaseName = DatabaseId.of(projectId, instanceId, databaseId).getName(); + GetPolicyOptions options = null; + if (version > 0) { + options = GetPolicyOptions.newBuilder().setRequestedPolicyVersion(version).build(); + } + return policyMarshaller.fromPb(rpc.getDatabaseAdminIAMPolicy(databaseName, options)); + } + + @Override + public Policy setDatabaseIAMPolicy(String instanceId, String databaseId, Policy policy) { + Preconditions.checkNotNull(policy); + String databaseName = DatabaseId.of(projectId, instanceId, databaseId).getName(); + return policyMarshaller.fromPb( + rpc.setDatabaseAdminIAMPolicy(databaseName, policyMarshaller.toPb(policy))); + } + + @Override + public Iterable testDatabaseIAMPermissions( + String instanceId, String databaseId, Iterable permissions) { + Preconditions.checkNotNull(permissions); + String databaseName = DatabaseId.of(projectId, instanceId, databaseId).getName(); + return rpc.testDatabaseAdminIAMPermissions(databaseName, permissions).getPermissionsList(); + } + + @Override + public Policy getBackupIAMPolicy(String instanceId, String backupId) { + final String databaseName = BackupId.of(projectId, instanceId, backupId).getName(); + return policyMarshaller.fromPb(rpc.getDatabaseAdminIAMPolicy(databaseName, null)); + } + + @Override + public Policy setBackupIAMPolicy(String instanceId, String backupId, final Policy policy) { + Preconditions.checkNotNull(policy); + final String databaseName = BackupId.of(projectId, instanceId, backupId).getName(); + return policyMarshaller.fromPb( + rpc.setDatabaseAdminIAMPolicy(databaseName, policyMarshaller.toPb(policy))); + } + + @Override + public Iterable testBackupIAMPermissions( + String instanceId, String backupId, final Iterable permissions) { + Preconditions.checkNotNull(permissions); + final String databaseName = BackupId.of(projectId, instanceId, backupId).getName(); + return rpc.testDatabaseAdminIAMPermissions(databaseName, permissions).getPermissionsList(); + } + + private String getInstanceName(String instanceId) { + return new InstanceId(projectId, instanceId).getName(); + } + + private String getDatabaseName(String instanceId, String databaseId) { + return new DatabaseId(new InstanceId(projectId, instanceId), databaseId).getName(); + } + + private String getBackupName(String instanceId, String backupId) { + InstanceId instance = new InstanceId(projectId, instanceId); + return new BackupId(instance, backupId).getName(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClient.java new file mode 100644 index 000000000000..8b2dcc31786a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClient.java @@ -0,0 +1,635 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.Statement.StatementFactory; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; + +/** + * Interface for all the APIs that are used to read/write data into a Cloud Spanner database. An + * instance of this is tied to a specific database. + */ +public interface DatabaseClient { + + /** + * Returns the SQL dialect that is used by the database. + * + * @return the SQL dialect that is used by the database. + */ + default Dialect getDialect() { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Returns the {@link DatabaseRole} used by the client connection. The database role that is used + * determines the access permissions that a connection has. This can for example be used to create + * connections that are only permitted to access certain tables. + * + * @return the {@link DatabaseRole} used by the client connection. + */ + default String getDatabaseRole() { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Writes the given mutations atomically to the database. + * + *

This method uses retries and replay protection internally, which means that the mutations + * are applied exactly once on success, or not at all if an error is returned, regardless of any + * failures in the underlying network. Note that if the call is cancelled or reaches deadline, it + * is not possible to know whether the mutations were applied without performing a subsequent + * database operation, but the mutations will have been applied at most once. + * + *

Example of blind write. + * + *

{@code
+   * long singerId = my_singer_id;
+   * Mutation mutation = Mutation.newInsertBuilder("Singer")
+   *         .set("SingerId")
+   *         .to(singerId)
+   *         .set("FirstName")
+   *         .to("Billy")
+   *         .set("LastName")
+   *         .to("Joel")
+   *         .build();
+   * dbClient.write(Collections.singletonList(mutation));
+   * }
+ * + * @return the timestamp at which the write was committed + */ + Timestamp write(Iterable mutations) throws SpannerException; + + /** + * Writes the given mutations atomically to the database with the given options. + * + *

This method uses retries and replay protection internally, which means that the mutations + * are applied exactly once on success, or not at all if an error is returned, regardless of any + * failures in the underlying network. Note that if the call is cancelled or reaches deadline, it + * is not possible to know whether the mutations were applied without performing a subsequent + * database operation, but the mutations will have been applied at most once. + * + *

Example of blind write. + * + *

{@code
+   * long singerId = my_singer_id;
+   * Mutation mutation = Mutation.newInsertBuilder("Singer")
+   *         .set("SingerId")
+   *         .to(singerId)
+   *         .set("FirstName")
+   *         .to("Billy")
+   *         .set("LastName")
+   *         .to("Joel")
+   *         .build();
+   * dbClient.writeWithOptions(
+   *         Collections.singletonList(mutation),
+   *         Options.priority(RpcPriority.HIGH));
+   * }
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
+ * + * @return a response with the timestamp at which the write was committed + */ + CommitResponse writeWithOptions(Iterable mutations, TransactionOption... options) + throws SpannerException; + + /** + * Writes the given mutations atomically to the database without replay protection. + * + *

Since this method does not feature replay protection, it may attempt to apply {@code + * mutations} more than once; if the mutations are not idempotent, this may lead to a failure + * being reported when the mutation was applied once. For example, an insert may fail with {@link + * ErrorCode#ALREADY_EXISTS} even though the row did not exist before this method was called. For + * this reason, most users of the library will prefer to use {@link #write(Iterable)} instead. + * However, {@code writeAtLeastOnce()} requires only a single RPC, whereas {@code write()} + * requires two RPCs (one of which may be performed in advance), and so this method may be + * appropriate for latency sensitive and/or high throughput blind writing. + * + *

Example of unprotected blind write. + * + *

{@code
+   * long singerId = my_singer_id;
+   * Mutation mutation = Mutation.newInsertBuilder("Singers")
+   *         .set("SingerId")
+   *         .to(singerId)
+   *         .set("FirstName")
+   *         .to("Billy")
+   *         .set("LastName")
+   *         .to("Joel")
+   *         .build();
+   * dbClient.writeAtLeastOnce(Collections.singletonList(mutation));
+   * }
+ * + * @return the timestamp at which the write was committed + */ + Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException; + + /** + * Writes the given mutations atomically to the database without replay protection. + * + *

Since this method does not feature replay protection, it may attempt to apply {@code + * mutations} more than once; if the mutations are not idempotent, this may lead to a failure + * being reported when the mutation was applied once. For example, an insert may fail with {@link + * ErrorCode#ALREADY_EXISTS} even though the row did not exist before this method was called. For + * this reason, most users of the library will prefer to use {@link #write(Iterable)} instead. + * However, {@code writeAtLeastOnce()} requires only a single RPC, whereas {@code write()} + * requires two RPCs (one of which may be performed in advance), and so this method may be + * appropriate for latency sensitive and/or high throughput blind writing. + * + *

Example of unprotected blind write. + * + *

{@code
+   * long singerId = my_singer_id;
+   * Mutation mutation = Mutation.newInsertBuilder("Singers")
+   *         .set("SingerId")
+   *         .to(singerId)
+   *         .set("FirstName")
+   *         .to("Billy")
+   *         .set("LastName")
+   *         .to("Joel")
+   *         .build();
+   * dbClient.writeAtLeastOnceWithOptions(
+   *         Collections.singletonList(mutation),
+   *         Options.priority(RpcPriority.LOW));
+   * }
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
+ * + * @return a response with the timestamp at which the write was committed + */ + CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException; + + /** + * Applies batch of mutation groups in a collection of efficient transactions. The mutation groups + * are applied non-atomically in an unspecified order and thus, they must be independent of each + * other. Partial failure is possible, i.e., some mutation groups may have been applied + * successfully, while some may have failed. The results of individual batches are streamed into + * the response as and when the batches are applied. + * + *

One BatchWriteResponse can contain the results for multiple MutationGroups. Inspect the + * indexes field to determine the MutationGroups that the BatchWriteResponse is for. + * + *

The mutation groups may be applied more than once. This can lead to failures if the mutation + * groups are non-idempotent. For example, an insert that is replayed can return an {@link + * ErrorCode#ALREADY_EXISTS} error. For this reason, users of the library may prefer to use {@link + * #write(Iterable)} instead. However, {@code batchWriteAtLeastOnce()} method may be appropriate + * for non-atomically committing multiple mutation groups in a single RPC with low latency. + * + *

Example of BatchWriteAtLeastOnce + * + *

{@code
+   * Iterable mutationGroups =
+   *     ImmutableList.of(
+   *         MutationGroup.of(
+   *             Mutation.newInsertBuilder("FOO1").set("ID").to(1L).set("NAME").to("Bar1").build(),
+   *             Mutation.newInsertBuilder("FOO2").set("ID").to(2L).set("NAME").to("Bar2").build()),
+   *         MutationGroup.of(
+   *             Mutation.newInsertBuilder("FOO3").set("ID").to(3L).set("NAME").to("Bar3").build(),
+   *             Mutation.newInsertBuilder("FOO4").set("ID").to(4L).set("NAME").to("Bar4").build()),
+   *         MutationGroup.of(
+   *             Mutation.newInsertBuilder("FOO4").set("ID").to(4L).set("NAME").to("Bar4").build(),
+   *             Mutation.newInsertBuilder("FOO5").set("ID").to(5L).set("NAME").to("Bar5").build()),
+   *         MutationGroup.of(
+   *             Mutation.newInsertBuilder("FOO6").set("ID").to(6L).set("NAME").to("Bar6").build()));
+   * ServerStream responses =
+   *     dbClient.batchWriteAtLeastOnce(mutationGroups, Options.tag("batch-write-tag"));
+   * for (BatchWriteResponse response : responses) {
+   *   // Do something when a response is received.
+   * }
+   * }
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the batch write request. + *
  • {@link Options#tag(String)}: The transaction tag to use for the batch write request. + *
+ */ + ServerStream batchWriteAtLeastOnce( + Iterable mutationGroups, TransactionOption... options) throws SpannerException; + + /** + * Returns a context in which a single read can be performed using {@link TimestampBound#strong()} + * concurrency. This method will return a {@link ReadContext} that will not return the read + * timestamp that was used by Cloud Spanner. If you want to be able to access the read timestamp, + * you should use the method {@link #singleUseReadOnlyTransaction()}. + * + *

Example of single use. + * + *

{@code
+   * long singerId = my_singer_id;
+   * String column = "FirstName";
+   * Struct row =
+   *     dbClient.singleUse().readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   * String firstName = row.getString(column);
+   * }
+ */ + ReadContext singleUse(); + + /** + * Returns a context in which a single read can be performed at the given timestamp bound. This + * method will return a {@link ReadContext} that will not return the read timestamp that was used + * by Cloud Spanner. If you want to be able to access the read timestamp, you should use the + * method {@link #singleUseReadOnlyTransaction()}. + * + *

Example of single use with timestamp bound. + * + *

{@code
+   * long singerId = my_singer_id;
+   * String column = "FirstName";
+   * Struct row = dbClient.singleUse(TimestampBound.ofMaxStaleness(10, TimeUnit.SECONDS))
+   *     .readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   * String firstName = row.getString(column);
+   * }
+ * + * @param bound the timestamp bound at which to perform the read + */ + ReadContext singleUse(TimestampBound bound); + + /** + * Returns a read-only transaction context in which a single read or query can be performed using + * {@link TimestampBound#strong()} concurrency. This method differs from {@link #singleUse()} in + * that the read timestamp used may be inspected after the read has returned data or finished + * successfully. + * + *

Example of single use read only transaction. + * + *

{@code
+   * long singerId = my_singer_id;
+   * String column = "FirstName";
+   * ReadOnlyTransaction txn = dbClient.singleUseReadOnlyTransaction();
+   * Struct row = txn.readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   * row.getString(column);
+   * Timestamp timestamp = txn.getReadTimestamp();
+   * }
+ */ + ReadOnlyTransaction singleUseReadOnlyTransaction(); + + /** + * Returns a read-only transaction context in which a single read or query can be performed at + * given timestamp bound. This method differs from {@link #singleUse(TimestampBound)} in that the + * read timestamp used may be inspected after the read has returned data or finished successfully. + * + *

Example of single use read only transaction with timestamp bound. + * + *

{@code
+   * long singerId = my_singer_id;
+   * String column = "FirstName";
+   * ReadOnlyTransaction txn =
+   *     dbClient.singleUseReadOnlyTransaction(TimestampBound.ofMaxStaleness(10, TimeUnit.SECONDS));
+   * Struct row = txn.readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   * row.getString(column);
+   * Timestamp timestamp = txn.getReadTimestamp();
+   * }
+ * + * @param bound the timestamp bound at which to perform the read + */ + ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound); + + /** + * Returns a read-only transaction context in which a multiple reads and/or queries can be + * performed using {@link TimestampBound#strong()} concurrency. All reads/queries will use the + * same timestamp, and the timestamp can be inspected after any read/query has returned data or + * finished successfully. + * + *

Example of read only transaction. + * + *

{@code
+   * long singerId = my_singer_id;
+   * long albumId = my_album_id;
+   * String singerColumn = "FirstName";
+   * String albumColumn = "AlbumTitle";
+   * String albumTitle = null;
+   * // ReadOnlyTransaction should be closed to prevent resource leak.
+   * try (ReadOnlyTransaction txn = dbClient.readOnlyTransaction()) {
+   *   Struct singerRow =
+   *       txn.readRow("Singers", Key.of(singerId), Collections.singleton(singerColumn));
+   *   Struct albumRow =
+   *       txn.readRow("Albums", Key.of(singerId, albumId), Collections.singleton(albumColumn));
+   *   singerRow.getString(singerColumn);
+   *   albumTitle = albumRow.getString(albumColumn);
+   * }
+   * }
+ */ + ReadOnlyTransaction readOnlyTransaction(); + + /** + * Returns a read-only transaction context in which a multiple reads and/or queries can be + * performed at the given timestamp bound. All reads/queries will use the same timestamp, and the + * timestamp can be inspected after any read/query has returned data or finished successfully. + * + *

Note that the bounded staleness modes, {@link TimestampBound.Mode#MIN_READ_TIMESTAMP} and + * {@link TimestampBound.Mode#MAX_STALENESS}, are not supported for multi-use read-only + * transactions. + * + *

Example of read only transaction with timestamp bound. + * + *

{@code
+   * long singerId = my_singer_id;
+   * long albumId = my_album_id;
+   * String singerColumn = "FirstName";
+   * String albumColumn = "AlbumTitle";
+   * String albumTitle = null;
+   * // ReadOnlyTransaction should be closed to prevent resource leak.
+   * try (ReadOnlyTransaction txn =
+   *     dbClient.readOnlyTransaction(TimestampBound.ofExactStaleness(10, TimeUnit.SECONDS))) {
+   *   Struct singerRow =
+   *       txn.readRow("Singers", Key.of(singerId), Collections.singleton(singerColumn));
+   *   Struct albumRow =
+   *       txn.readRow("Albums", Key.of(singerId, albumId), Collections.singleton(albumColumn));
+   *   singerRow.getString(singerColumn);
+   *   albumTitle = albumRow.getString(albumColumn);
+   * }
+   * }
+ * + * @param bound the timestamp bound at which to perform the read + */ + ReadOnlyTransaction readOnlyTransaction(TimestampBound bound); + + /** + * Returns a transaction runner for executing a single logical transaction with retries. The + * returned runner can only be used once. + * + *

Example of a read write transaction. + * + *

 
+   * long singerId = my_singer_id;
+   * TransactionRunner runner = dbClient.readWriteTransaction();
+   * runner.run(
+   *     new TransactionCallable<Void>() {
+   *
+   *       {@literal @}Override
+   *       public Void run(TransactionContext transaction) throws Exception {
+   *         String column = "FirstName";
+   *         Struct row =
+   *             transaction.readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   *         String name = row.getString(column);
+   *         transaction.buffer(
+   *             Mutation.newUpdateBuilder("Singers").set(column).to(name.toUpperCase()).build());
+   *         return null;
+   *       }
+   *     });
+   * 
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
  • {@link Options#isolationLevel(IsolationLevel)}: The isolation level for the transaction + *
  • {@link Options#readLockMode(ReadLockMode)}: The read lock mode for the transaction + *
+ */ + TransactionRunner readWriteTransaction(TransactionOption... options); + + /** + * Returns a transaction manager which allows manual management of transaction lifecycle. This API + * is meant for advanced users. Most users should instead use the {@link + * #readWriteTransaction(TransactionOption...)} API instead. + * + *

Example of using {@link TransactionManager}. + * + *

{@code
+   * long singerId = my_singer_id;
+   * try (TransactionManager manager = dbClient.transactionManager()) {
+   *   TransactionContext transaction = manager.begin();
+   *   while (true) {
+   *     String column = "FirstName";
+   *     Struct row = transaction.readRow("Singers", Key.of(singerId), Collections.singleton(column));
+   *     String name = row.getString(column);
+   *     transaction.buffer(
+   *         Mutation.newUpdateBuilder("Singers").set(column).to(name.toUpperCase()).build());
+   *     try {
+   *       manager.commit();
+   *       break;
+   *     } catch (AbortedException e) {
+   *       Thread.sleep(e.getRetryDelayInMillis());
+   *       transaction = manager.resetForRetry();
+   *     }
+   *   }
+   * }
+   * }
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
  • {@link Options#isolationLevel(IsolationLevel)}: The isolation level for the transaction + *
  • {@link Options#readLockMode(ReadLockMode)}: The read lock mode for the transaction + *
+ */ + TransactionManager transactionManager(TransactionOption... options); + + /** + * Returns an asynchronous transaction runner for executing a single logical transaction with + * retries. The returned runner can only be used once. + * + *

Example of a read write transaction. + * + *

{@code
+   * Executor executor = Executors.newSingleThreadExecutor();
+   * final long singerId = my_singer_id;
+   * AsyncRunner runner = client.runAsync();
+   * ApiFuture rowCount =
+   *     runner.runAsync(
+   *         () -> {
+   *           String column = "FirstName";
+   *           Struct row =
+   *               txn.readRow("Singers", Key.of(singerId), Collections.singleton("Name"));
+   *           String name = row.getString("Name");
+   *           return txn.executeUpdateAsync(
+   *               Statement.newBuilder("UPDATE Singers SET Name=@name WHERE SingerId=@id")
+   *                   .bind("id")
+   *                   .to(singerId)
+   *                   .bind("name")
+   *                   .to(name.toUpperCase())
+   *                   .build());
+   *         },
+   *         executor);
+   * }
+ * + * Options for a transaction can include: + * + *
    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
  • {@link Options#isolationLevel(IsolationLevel)}: The isolation level for the transaction + *
  • {@link Options#readLockMode(ReadLockMode)}: The read lock mode for the transaction + *
+ */ + AsyncRunner runAsync(TransactionOption... options); + + /** + * Returns an asynchronous transaction manager which allows manual management of transaction + * lifecycle. This API is meant for advanced users. Most users should instead use the {@link + * #runAsync(TransactionOption...)} API instead. + * + *

Example of using {@link AsyncTransactionManager}. + * + *

{@code
+   * long singerId = 1L;
+   * try (AsyncTransactionManager manager = client.transactionManagerAsync()) {
+   *   TransactionContextFuture transactionFuture = manager.beginAsync();
+   *   while (true) {
+   *     String column = "FirstName";
+   *     CommitTimestampFuture commitTimestamp =
+   *         transactionFuture
+   *             .then(
+   *                 (transaction, __) ->
+   *                     transaction.readRowAsync(
+   *                         "Singers", Key.of(singerId), Collections.singleton(column)))
+   *             .then(
+   *                 (transaction, row) -> {
+   *                   String name = row.getString(column);
+   *                   return transaction.bufferAsync(
+   *                       Mutation.newUpdateBuilder("Singers")
+   *                           .set(column)
+   *                           .to(name.toUpperCase())
+   *                           .build());
+   *                 })
+   *             .commitAsync();
+   *     try {
+   *       commitTimestamp.get();
+   *       break;
+   *     } catch (AbortedException e) {
+   *       Thread.sleep(e.getRetryDelayInMillis());
+   *       transactionFuture = manager.resetForRetryAsync();
+   *     }
+   *   }
+   * }
+   * }
+ * + * Options for a transaction can include: + * + *

Options for a transaction can include: + * + *

    + *
  • {@link Options#priority(com.google.cloud.spanner.Options.RpcPriority)}: The {@link + * RpcPriority} to use for the commit request of the transaction. The priority will not be + * applied to any other requests on the transaction. + *
  • {@link Options#commitStats()}: Request that the server includes commit statistics in the + * {@link CommitResponse}. + *
  • {@link Options#isolationLevel(IsolationLevel)}: The isolation level for the transaction + *
  • {@link Options#readLockMode(ReadLockMode)}: The read lock mode for the transaction + *
+ */ + AsyncTransactionManager transactionManagerAsync(TransactionOption... options); + + /** + * Returns the lower bound of rows modified by this DML statement. + * + *

The method will block until the update is complete. Running a DML statement with this method + * does not offer exactly once semantics, and therefore the DML statement should be idempotent. + * The DML statement must be fully-partitionable. Specifically, the statement must be expressible + * as the union of many statements which each access only a single row of the table. This is a + * Partitioned DML transaction in which a single Partitioned DML statement is executed. + * Partitioned DML partitions the key space and runs the DML statement over each partition in + * parallel using separate, internal transactions that commit independently. Partitioned DML + * transactions do not need to be committed. + * + *

Partitioned DML updates are used to execute a single DML statement with a different + * execution strategy that provides different, and often better, scalability properties for large, + * table-wide operations than DML in a {@link #readWriteTransaction(TransactionOption...)} + * transaction. Smaller scoped statements, such as an OLTP workload, should prefer using {@link + * TransactionContext#executeUpdate(Statement,UpdateOption...)} with {@link + * #readWriteTransaction(TransactionOption...)}. + * + *

That said, Partitioned DML is not a drop-in replacement for standard DML used in {@link + * #readWriteTransaction(TransactionOption...)}. + * + *

    + *
  • The DML statement must be fully-partitionable. Specifically, the statement must be + * expressible as the union of many statements which each access only a single row of the + * table. + *
  • The statement is not applied atomically to all rows of the table. Rather, the statement + * is applied atomically to partitions of the table, in independent internal transactions. + * Secondary index rows are updated atomically with the base table rows. + *
  • Partitioned DML does not guarantee exactly-once execution semantics against a partition. + * The statement will be applied at least once to each partition. It is strongly recommended + * that the DML statement should be idempotent to avoid unexpected results. For instance, it + * is potentially dangerous to run a statement such as `UPDATE table SET column = column + + * 1` as it could be run multiple times against some rows. + *
  • The partitions are committed automatically - there is no support for Commit or Rollback. + * If the call returns an error, or if the client issuing the DML statement dies, it is + * possible that some rows had the statement executed on them successfully. It is also + * possible that statement was never executed against other rows. + *
  • If any error is encountered during the execution of the partitioned DML operation (for + * instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored + * due to schema constraints), then the operation is stopped at that point and an error is + * returned. It is possible that at this point, some partitions have been committed (or even + * committed multiple times), and other partitions have not been run at all. + *
+ * + *

Given the above, Partitioned DML is good fit for large, database-wide, operations that are + * idempotent, such as deleting old rows from a very large table. + */ + long executePartitionedUpdate(Statement stmt, UpdateOption... options); + + /** + * Returns a {@link StatementFactory} for the given dialect. + * + *

A {@link StatementFactory} can be used to create statements with unnamed parameters. This is + * primarily intended for framework developers who want to integrate the Spanner client with + * frameworks that use unnamed parameters. Developers who just want to use the Spanner client in + * their application, should use named parameters. + * + *

Examples using {@link StatementFactory} + * + *

{@code
+   * Statement statement = databaseClient
+   *     .getStatementFactory()
+   *     .withUnnamedParameters("SELECT NAME FROM TABLE WHERE ID = ?", 10);
+   * }
+ */ + default StatementFactory getStatementFactory() { + throw new UnsupportedOperationException("method should be overwritten"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java new file mode 100644 index 000000000000..bae8067e33dd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java @@ -0,0 +1,309 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SpannerImpl.ClosedException; +import com.google.cloud.spanner.Statement.StatementFactory; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.spanner.v1.BatchWriteResponse; +import io.opentelemetry.api.common.Attributes; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.Nullable; + +class DatabaseClientImpl implements DatabaseClient { + private static final String READ_WRITE_TRANSACTION = "CloudSpanner.ReadWriteTransaction"; + private static final String READ_ONLY_TRANSACTION = "CloudSpanner.ReadOnlyTransaction"; + private static final String PARTITION_DML_TRANSACTION = "CloudSpanner.PartitionDMLTransaction"; + private final TraceWrapper tracer; + private final Attributes databaseAttributes; + @VisibleForTesting final String clientId; + @VisibleForTesting final MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient; + @VisibleForTesting final int dbId; + private final AtomicInteger nthRequest; + private final Map clientIdToOrdinalMap; + + DatabaseClientImpl( + String clientId, + MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient, + TraceWrapper tracer, + Attributes databaseAttributes) { + this.clientId = clientId; + this.multiplexedSessionDatabaseClient = multiplexedSessionDatabaseClient; + this.tracer = tracer; + this.databaseAttributes = databaseAttributes; + + this.clientIdToOrdinalMap = new HashMap(); + this.dbId = this.dbIdFromClientId(this.clientId); + this.nthRequest = new AtomicInteger(0); + } + + @VisibleForTesting + synchronized int dbIdFromClientId(String clientId) { + Integer id = this.clientIdToOrdinalMap.get(clientId); + if (id == null) { + id = this.clientIdToOrdinalMap.size() + 1; + this.clientIdToOrdinalMap.put(clientId, id); + } + return id; + } + + @VisibleForTesting + DatabaseClient getMultiplexedSession() { + return this.multiplexedSessionDatabaseClient; + } + + @Override + public Dialect getDialect() { + return this.multiplexedSessionDatabaseClient.getDialect(); + } + + private final AbstractLazyInitializer statementFactorySupplier = + new AbstractLazyInitializer() { + @Override + protected StatementFactory initialize() { + try { + Dialect dialect = getDialectAsync().get(30, TimeUnit.SECONDS); + return new StatementFactory(dialect); + } catch (ExecutionException | TimeoutException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + }; + + @Override + public StatementFactory getStatementFactory() { + try { + return statementFactorySupplier.get(); + } catch (Exception exception) { + throw SpannerExceptionFactory.asSpannerException(exception); + } + } + + @Override + @Nullable + public String getDatabaseRole() { + return multiplexedSessionDatabaseClient.getDatabaseRole(); + } + + @Override + public Timestamp write(final Iterable mutations) throws SpannerException { + return writeWithOptions(mutations).getCommitTimestamp(); + } + + @Override + public CommitResponse writeWithOptions( + final Iterable mutations, final TransactionOption... options) + throws SpannerException { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.writeWithOptions(mutations, options); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + @Override + public Timestamp writeAtLeastOnce(final Iterable mutations) throws SpannerException { + return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp(); + } + + @Override + public CommitResponse writeAtLeastOnceWithOptions( + final Iterable mutations, final TransactionOption... options) + throws SpannerException { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.writeAtLeastOnceWithOptions(mutations, options); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + @Override + public ServerStream batchWriteAtLeastOnce( + final Iterable mutationGroups, final TransactionOption... options) + throws SpannerException { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.batchWriteAtLeastOnce(mutationGroups, options); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + @Override + public ReadContext singleUse() { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().singleUse(); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public ReadContext singleUse(TimestampBound bound) { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().singleUse(bound); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction() { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().singleUseReadOnlyTransaction(); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().singleUseReadOnlyTransaction(bound); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public ReadOnlyTransaction readOnlyTransaction() { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().readOnlyTransaction(); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { + ISpan span = tracer.spanBuilder(READ_ONLY_TRANSACTION, databaseAttributes); + try (IScope s = tracer.withSpan(span)) { + return getMultiplexedSession().readOnlyTransaction(bound); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public TransactionRunner readWriteTransaction(TransactionOption... options) { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.readWriteTransaction(options); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public TransactionManager transactionManager(TransactionOption... options) { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.transactionManager(options); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public AsyncRunner runAsync(TransactionOption... options) { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.runAsync(options); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) { + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, databaseAttributes, options); + try (IScope s = tracer.withSpan(span)) { + return multiplexedSessionDatabaseClient.transactionManagerAsync(options); + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + @Override + public long executePartitionedUpdate(final Statement stmt, final UpdateOption... options) { + return multiplexedSessionDatabaseClient.executePartitionedUpdate(stmt, options); + } + + private Future getDialectAsync() { + return multiplexedSessionDatabaseClient.getDialectAsync(); + } + + boolean isValid() { + return multiplexedSessionDatabaseClient.isValid(); + } + + ListenableFuture closeAsync(ClosedException closedException) { + // This method is non-blocking. + this.multiplexedSessionDatabaseClient.close(); + return Futures.immediateVoidFuture(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseId.java new file mode 100644 index 000000000000..3602d0ee080b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseId.java @@ -0,0 +1,101 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.Preconditions; +import java.util.Map; +import java.util.Objects; + +/** Represents an id of a Cloud Spanner database resource. */ +public final class DatabaseId { + + private static final PathTemplate NAME_TEMPLATE = + PathTemplate.create("projects/{project}/instances/{instance}/databases/{database}"); + + private final InstanceId instanceId; + private final String database; + + DatabaseId(InstanceId instanceId, String database) { + this.instanceId = Preconditions.checkNotNull(instanceId); + this.database = Preconditions.checkNotNull(database); + } + + /** Returns the instance id for this database. */ + public InstanceId getInstanceId() { + return instanceId; + } + + /** Returns the database id. */ + public String getDatabase() { + return database; + } + + /** Returns the name of this database. */ + public String getName() { + return String.format( + "projects/%s/instances/%s/databases/%s", + instanceId.getProject(), instanceId.getInstance(), database); + } + + @Override + public int hashCode() { + return Objects.hash(instanceId, database); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DatabaseId that = (DatabaseId) o; + return that.instanceId.equals(instanceId) && that.database.equals(database); + } + + @Override + public String toString() { + return getName(); + } + + /** + * Creates a {@code DatabaseId} from the name of the database. + * + * @param name the database name of the form {@code + * projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID} + * @throws IllegalArgumentException if {@code name} does not conform to the expected pattern + */ + public static DatabaseId of(String name) { + Preconditions.checkNotNull(name); + Map parts = NAME_TEMPLATE.match(name); + Preconditions.checkArgument( + parts != null, "Name should conform to pattern %s: %s", NAME_TEMPLATE, name); + return of(parts.get("project"), parts.get("instance"), parts.get("database")); + } + + /** Creates a {@code DatabaseId} given project, instance and database IDs. */ + public static DatabaseId of(String project, String instance, String database) { + return new DatabaseId(new InstanceId(project, instance), database); + } + + /** Creates a {@code DatabaseId} given the instance identity and database id. */ + public static DatabaseId of(InstanceId instanceId, String database) { + return new DatabaseId(instanceId, database); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseInfo.java new file mode 100644 index 000000000000..3f1a0f81eb0a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseInfo.java @@ -0,0 +1,478 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.FieldSelector; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.encryption.CustomerManagedEncryption; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.Database.State; +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** Represents a Cloud Spanner database. */ +public class DatabaseInfo { + + /** Represent an updatable field in a Cloud Spanner database. */ + public enum DatabaseField implements FieldSelector { + DROP_PROTECTION("enable_drop_protection"); + + private final String selector; + + DatabaseField(String selector) { + this.selector = selector; + } + + @Override + public String getSelector() { + return selector; + } + + static FieldMask toFieldMask(DatabaseInfo.DatabaseField... fields) { + FieldMask.Builder builder = FieldMask.newBuilder(); + for (DatabaseInfo.DatabaseField field : fields) { + builder.addPaths(field.getSelector()); + } + return builder.build(); + } + } + + public abstract static class Builder { + abstract Builder setState(State state); + + abstract Builder setCreateTime(Timestamp createTime); + + abstract Builder setRestoreInfo(RestoreInfo restoreInfo); + + abstract Builder setVersionRetentionPeriod(String versionRetentionPeriod); + + abstract Builder setEarliestVersionTime(Timestamp earliestVersionTime); + + /** + * Optional for creating a new backup. + * + *

The encryption configuration to be used for the database. The only encryption, other than + * Google's default encryption, is a customer managed encryption with a provided key. If no + * encryption is provided, Google's default encryption will be used. + */ + public abstract Builder setEncryptionConfig(CustomerManagedEncryption encryptionConfig); + + /** + * The read-write region which will be used for the database's leader replicas. This can be one + * of the values as specified in + * https://cloud.google.com/spanner/docs/instances#available-configurations-multi-region. + */ + public Builder setDefaultLeader(String defaultLeader) { + throw new UnsupportedOperationException("Unimplemented"); + } + + public Builder setDialect(Dialect dialect) { + throw new UnsupportedOperationException("Unimplemented"); + } + + public Builder enableDropProtection() { + throw new UnsupportedOperationException("Unimplemented"); + } + + public Builder disableDropProtection() { + throw new UnsupportedOperationException("Unimplemented"); + } + + protected Builder setReconciling(boolean reconciling) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Optional for creating a new database. + * + *

It is used by CREATE/ALTER PROTO BUNDLE statements which are part of DDL statements. + * Contains a protobuf-serialized [google.protobuf.FileDescriptorSet]. To generate a proto + * descriptors file run {@code protoc --include_imports + * --descriptor_set_out=DESCRIPTOR_OUTPUT_LOCATION LOCATION-OF-PROTO-FILES} + * + * @param protoDescriptors The proto descriptors input as byte[] to be used for the database. + * @return {@link Builder} + */ + public abstract Builder setProtoDescriptors(@Nonnull byte[] protoDescriptors); + + /** + * Optional for creating a new database. + * + *

It is used by CREATE/ALTER PROTO BUNDLE statements which are part of DDL statements. + * Contains a protobuf-serialized [google.protobuf.FileDescriptorSet]. To generate a proto + * descriptors file run {@code protoc --include_imports + * --descriptor_set_out=DESCRIPTOR_OUTPUT_LOCATION LOCATION-OF-PROTO-FILES} + * + * @param inputStream The proto descriptors input as InputStream to be used for the database. + * @return {@link Builder} + * @throws IOException if there is a problem reading the underlying stream. + */ + public abstract Builder setProtoDescriptors(@Nonnull InputStream inputStream) + throws IOException; + + abstract Builder setProto(com.google.spanner.admin.database.v1.Database proto); + + /** Builds the database from this builder. */ + public abstract Database build(); + } + + abstract static class BuilderImpl extends Builder { + protected final DatabaseId id; + private State state = State.UNSPECIFIED; + private Timestamp createTime; + private RestoreInfo restoreInfo; + private String versionRetentionPeriod; + private Timestamp earliestVersionTime; + private CustomerManagedEncryption encryptionConfig; + private String defaultLeader; + private Dialect dialect = Dialect.GOOGLE_STANDARD_SQL; + private boolean dropProtectionEnabled; + private boolean reconciling; + private ByteString protoDescriptors; + private com.google.spanner.admin.database.v1.Database proto; + + BuilderImpl(DatabaseId id) { + this.id = Preconditions.checkNotNull(id); + } + + BuilderImpl(DatabaseInfo other) { + this.id = other.id; + this.state = other.state; + this.createTime = other.createTime; + this.restoreInfo = other.restoreInfo; + this.versionRetentionPeriod = other.versionRetentionPeriod; + this.earliestVersionTime = other.earliestVersionTime; + this.encryptionConfig = other.encryptionConfig; + this.defaultLeader = other.defaultLeader; + this.dialect = other.dialect; + this.protoDescriptors = other.protoDescriptors; + this.proto = other.proto; + } + + @Override + Builder setState(State state) { + this.state = Preconditions.checkNotNull(state); + return this; + } + + @Override + Builder setCreateTime(Timestamp createTime) { + this.createTime = Preconditions.checkNotNull(createTime); + return this; + } + + @Override + Builder setRestoreInfo(@Nullable RestoreInfo restoreInfo) { + this.restoreInfo = restoreInfo; + return this; + } + + @Override + Builder setVersionRetentionPeriod(String versionRetentionPeriod) { + this.versionRetentionPeriod = versionRetentionPeriod; + return this; + } + + @Override + Builder setEarliestVersionTime(Timestamp earliestVersionTime) { + this.earliestVersionTime = earliestVersionTime; + return this; + } + + @Override + public Builder setEncryptionConfig(@Nullable CustomerManagedEncryption encryptionConfig) { + this.encryptionConfig = encryptionConfig; + return this; + } + + @Override + public Builder setDefaultLeader(String defaultLeader) { + this.defaultLeader = defaultLeader; + return this; + } + + @Override + public Builder setDialect(Dialect dialect) { + this.dialect = dialect; + return this; + } + + @Override + public Builder enableDropProtection() { + this.dropProtectionEnabled = true; + return this; + } + + @Override + public Builder disableDropProtection() { + this.dropProtectionEnabled = false; + return this; + } + + @Override + protected Builder setReconciling(boolean reconciling) { + this.reconciling = reconciling; + return this; + } + + @Override + public Builder setProtoDescriptors(@Nonnull byte[] protoDescriptors) { + Preconditions.checkNotNull(protoDescriptors); + this.protoDescriptors = ByteString.copyFrom(protoDescriptors); + return this; + } + + @Override + public Builder setProtoDescriptors(@Nonnull InputStream inputStream) throws IOException { + Preconditions.checkNotNull(inputStream); + this.protoDescriptors = ByteString.readFrom(inputStream); + return this; + } + + @Override + Builder setProto(@Nullable com.google.spanner.admin.database.v1.Database proto) { + this.proto = proto; + return this; + } + } + + /** State of the database. */ + public enum State { + // Not specified. + UNSPECIFIED { + @Override + public com.google.spanner.admin.database.v1.Database.State toProto() { + return com.google.spanner.admin.database.v1.Database.State.STATE_UNSPECIFIED; + } + }, + // The database is still being created and is not ready to use. + CREATING { + @Override + public com.google.spanner.admin.database.v1.Database.State toProto() { + return com.google.spanner.admin.database.v1.Database.State.CREATING; + } + }, + // The database is fully created and ready to use. + READY { + @Override + public com.google.spanner.admin.database.v1.Database.State toProto() { + return com.google.spanner.admin.database.v1.Database.State.READY; + } + }, + // The database has restored and is being optimized for use. + READY_OPTIMIZING { + @Override + public com.google.spanner.admin.database.v1.Database.State toProto() { + return com.google.spanner.admin.database.v1.Database.State.READY_OPTIMIZING; + } + }; + + public abstract com.google.spanner.admin.database.v1.Database.State toProto(); + } + + private final DatabaseId id; + private final State state; + private final Timestamp createTime; + private final RestoreInfo restoreInfo; + private final String versionRetentionPeriod; + private final Timestamp earliestVersionTime; + private final CustomerManagedEncryption encryptionConfig; + private final String defaultLeader; + private final Dialect dialect; + private final boolean dropProtectionEnabled; + private final boolean reconciling; + + private final ByteString protoDescriptors; + private final com.google.spanner.admin.database.v1.Database proto; + + public DatabaseInfo(DatabaseId id, State state) { + this.id = id; + this.state = state; + this.createTime = null; + this.restoreInfo = null; + this.versionRetentionPeriod = null; + this.earliestVersionTime = null; + this.encryptionConfig = null; + this.defaultLeader = null; + this.dialect = null; + this.dropProtectionEnabled = false; + this.reconciling = false; + this.protoDescriptors = null; + this.proto = null; + } + + DatabaseInfo(BuilderImpl builder) { + this.id = builder.id; + this.state = builder.state; + this.createTime = builder.createTime; + this.restoreInfo = builder.restoreInfo; + this.versionRetentionPeriod = builder.versionRetentionPeriod; + this.earliestVersionTime = builder.earliestVersionTime; + this.encryptionConfig = builder.encryptionConfig; + this.defaultLeader = builder.defaultLeader; + this.dialect = builder.dialect; + this.dropProtectionEnabled = builder.dropProtectionEnabled; + this.reconciling = builder.reconciling; + this.protoDescriptors = builder.protoDescriptors; + this.proto = builder.proto; + } + + /** Returns the database id. */ + public DatabaseId getId() { + return id; + } + + /** Returns the state of the database. */ + public State getState() { + return state; + } + + /** Returns the creation time of the database. */ + public Timestamp getCreateTime() { + return createTime; + } + + /** + * Returns the version retention period of the database. This is the period for which Cloud + * Spanner retains all versions of data for the database. For instance, if set to 3 days, Cloud + * Spanner will retain data versions that are up to 3 days old. + */ + public String getVersionRetentionPeriod() { + return versionRetentionPeriod; + } + + /** + * Returns the earliest version time of the database. This is the oldest timestamp that can be + * used to read old versions of the data. + */ + public Timestamp getEarliestVersionTime() { + return earliestVersionTime; + } + + /** + * Returns the {@link RestoreInfo} of the database if any is available, or null if no + * {@link RestoreInfo} is available for this database. + */ + public @Nullable RestoreInfo getRestoreInfo() { + return restoreInfo; + } + + /** + * Returns the {@link CustomerManagedEncryption} of the database if the database is encrypted, or + * null if this database is not encrypted. + */ + public @Nullable CustomerManagedEncryption getEncryptionConfig() { + return encryptionConfig; + } + + /** + * The read-write region which contains the database's leader replicas. If this value was not + * explicitly set during a create database or update database ddl operations, it will be {@code + * NULL}. + */ + public @Nullable String getDefaultLeader() { + return defaultLeader; + } + + /** + * The dialect that is used by the database. It can be one of the values as specified in {@link + * Dialect#values()}. + */ + public @Nullable Dialect getDialect() { + return dialect; + } + + public boolean isDropProtectionEnabled() { + return dropProtectionEnabled; + } + + public boolean getReconciling() { + return reconciling; + } + + public ByteString getProtoDescriptors() { + return protoDescriptors; + } + + /** Returns the raw proto instance that was used to construct this {@link Database}. */ + public @Nullable com.google.spanner.admin.database.v1.Database getProto() { + return proto; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DatabaseInfo that = (DatabaseInfo) o; + return id.equals(that.id) + && state == that.state + && Objects.equals(createTime, that.createTime) + && Objects.equals(restoreInfo, that.restoreInfo) + && Objects.equals(versionRetentionPeriod, that.versionRetentionPeriod) + && Objects.equals(earliestVersionTime, that.earliestVersionTime) + && Objects.equals(encryptionConfig, that.encryptionConfig) + && Objects.equals(defaultLeader, that.defaultLeader) + && Objects.equals(dialect, that.dialect) + && Objects.equals(dropProtectionEnabled, that.dropProtectionEnabled) + && Objects.equals(reconciling, that.reconciling) + && Objects.equals(protoDescriptors, that.protoDescriptors); + } + + @Override + public int hashCode() { + return Objects.hash( + id, + state, + createTime, + restoreInfo, + versionRetentionPeriod, + earliestVersionTime, + encryptionConfig, + defaultLeader, + dialect, + dropProtectionEnabled, + reconciling, + protoDescriptors); + } + + @Override + public String toString() { + return String.format( + "Database[%s, %s, %s, %s, %s, %s, %s, %s, %s %s %s %s]", + id.getName(), + state, + createTime, + restoreInfo, + versionRetentionPeriod, + earliestVersionTime, + encryptionConfig, + defaultLeader, + dialect, + dropProtectionEnabled, + reconciling, + protoDescriptors); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseNotFoundException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseNotFoundException.java new file mode 100644 index 000000000000..cc4a2e32f0ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseNotFoundException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.rpc.ResourceInfo; +import javax.annotation.Nullable; + +/** + * Exception thrown by Cloud Spanner when an operation detects that the database that is being used + * no longer exists. This type of error has its own subclass as it is a condition that should cause + * the client library to stop trying to send RPCs to the backend until the user has taken action. + */ +public class DatabaseNotFoundException extends ResourceNotFoundException { + private static final long serialVersionUID = -6395746612598975751L; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + DatabaseNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause) { + this(token, message, resourceInfo, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + DatabaseNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, message, resourceInfo, cause, apiException); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseRole.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseRole.java new file mode 100644 index 000000000000..37daf1cced9f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseRole.java @@ -0,0 +1,78 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import java.util.Objects; + +/** A Cloud Spanner database role. */ +public class DatabaseRole { + + public static class Builder { + + private final String name; + + public Builder(String name) { + this.name = Preconditions.checkNotNull(name); + } + + public DatabaseRole build() { + return new DatabaseRole(this.name); + } + } + + private final String name; + + @VisibleForTesting + DatabaseRole(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || !getClass().equals(o.getClass())) { + return false; + } + DatabaseRole databaseRole = (DatabaseRole) o; + return Objects.equals(name, databaseRole.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return String.format("DatabaseRole[%s]", name); + } + + static DatabaseRole fromProto(com.google.spanner.admin.database.v1.DatabaseRole proto) { + checkArgument(!proto.getName().isEmpty(), "Missing expected 'name' field"); + return new DatabaseRole.Builder(proto.getName()).build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DecodeMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DecodeMode.java new file mode 100644 index 000000000000..c1bea9a3ce1e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DecodeMode.java @@ -0,0 +1,35 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Specifies how and when to decode a value from protobuf to a plain Java object. */ +public enum DecodeMode { + /** + * Decodes all columns of a row directly when a {@link ResultSet} is advanced to the next row with + * {@link ResultSet#next()} + */ + DIRECT, + /** + * Decodes all columns of a row the first time a {@link ResultSet} value is retrieved from the + * row. + */ + LAZY_PER_ROW, + /** + * Decodes a columns of a row the first time the value of that column is retrieved from the row. + */ + LAZY_PER_COL, +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncRunner.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncRunner.java new file mode 100644 index 000000000000..3783a84903f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncRunner.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.Timestamp; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; + +/** + * Represents a {@link AsyncRunner} using a multiplexed session that is not yet ready. The execution + * will be delayed until the multiplexed session has been created and is ready. This class is only + * used during the startup of the client and the multiplexed session has not yet been created. + */ +public class DelayedAsyncRunner implements AsyncRunner { + + private final ApiFuture asyncRunnerFuture; + + public DelayedAsyncRunner(ApiFuture asyncRunnerFuture) { + this.asyncRunnerFuture = asyncRunnerFuture; + } + + ApiFuture getAsyncRunner() { + return ApiFutures.catchingAsync( + asyncRunnerFuture, + Exception.class, + exception -> { + if (exception instanceof InterruptedException) { + throw SpannerExceptionFactory.propagateInterrupt((InterruptedException) exception); + } + if (exception instanceof ExecutionException) { + throw SpannerExceptionFactory.causeAsRunTimeException((ExecutionException) exception); + } + throw exception; + }, + MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture runAsync(AsyncWork work, Executor executor) { + return ApiFutures.transformAsync( + getAsyncRunner(), + asyncRunner -> asyncRunner.runAsync(work, executor), + MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture getCommitTimestamp() { + return ApiFutures.transformAsync( + getAsyncRunner(), AsyncRunner::getCommitTimestamp, MoreExecutors.directExecutor()); + } + + @Override + public ApiFuture getCommitResponse() { + return ApiFutures.transformAsync( + getAsyncRunner(), AsyncRunner::getCommitResponse, MoreExecutors.directExecutor()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncTransactionManager.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncTransactionManager.java new file mode 100644 index 000000000000..530670960cab --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedAsyncTransactionManager.java @@ -0,0 +1,87 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import java.util.concurrent.ExecutionException; + +/** + * Represents a {@link AsyncTransactionManager} using a multiplexed session that is not yet ready. + * The execution will be delayed until the multiplexed session has been created and is ready. This + * class is only used during the startup of the client and the multiplexed session has not yet been + * created. + */ +public class DelayedAsyncTransactionManager implements AsyncTransactionManager { + + private final ApiFuture asyncTransactionManagerApiFuture; + + DelayedAsyncTransactionManager( + ApiFuture asyncTransactionManagerApiFuture) { + this.asyncTransactionManagerApiFuture = asyncTransactionManagerApiFuture; + } + + AsyncTransactionManager getAsyncTransactionManager() { + try { + return this.asyncTransactionManagerApiFuture.get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.causeAsRunTimeException(executionException); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + @Override + public TransactionContextFuture beginAsync() { + return getAsyncTransactionManager().beginAsync(); + } + + @Override + public TransactionContextFuture beginAsync(AbortedException exception) { + return getAsyncTransactionManager().beginAsync(exception); + } + + @Override + public ApiFuture rollbackAsync() { + return getAsyncTransactionManager().rollbackAsync(); + } + + @Override + public TransactionContextFuture resetForRetryAsync() { + return getAsyncTransactionManager().resetForRetryAsync(); + } + + @Override + public TransactionState getState() { + return getAsyncTransactionManager().getState(); + } + + @Override + public ApiFuture getCommitResponse() { + return getAsyncTransactionManager().getCommitResponse(); + } + + @Override + public void close() { + getAsyncTransactionManager().close(); + } + + @Override + public ApiFuture closeAsync() { + return getAsyncTransactionManager().closeAsync(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java new file mode 100644 index 000000000000..81e29cfda487 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java @@ -0,0 +1,263 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionImpl.NO_CHANNEL_HINT; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DelayedReadContext.DelayedReadOnlyTransaction; +import com.google.cloud.spanner.MultiplexedSessionDatabaseClient.MultiplexedSessionTransaction; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.BatchWriteResponse; +import java.util.concurrent.ExecutionException; + +/** + * Represents a delayed execution of a transaction on a multiplexed session. The execution is + * delayed because the multiplexed session is not yet ready. This class is only used during client + * creation before the multiplexed session has been created. The use of this class while the + * multiplexed session is still being created ensures that the creation of a {@link DatabaseClient} + * is non-blocking. + */ +class DelayedMultiplexedSessionTransaction extends AbstractMultiplexedSessionDatabaseClient { + + private final MultiplexedSessionDatabaseClient client; + + private final ISpan span; + + private final ApiFuture sessionFuture; + + DelayedMultiplexedSessionTransaction( + MultiplexedSessionDatabaseClient client, + ISpan span, + ApiFuture sessionFuture) { + this.client = client; + this.span = span; + this.sessionFuture = sessionFuture; + } + + @Override + public String getDatabaseRole() { + return this.client.getDatabaseRole(); + } + + @Override + public ReadContext singleUse() { + return new DelayedReadContext<>( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true) + .singleUse(), + MoreExecutors.directExecutor())); + } + + @Override + public ReadContext singleUse(TimestampBound bound) { + return new DelayedReadContext<>( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true) + .singleUse(bound), + MoreExecutors.directExecutor())); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction() { + return new DelayedReadOnlyTransaction( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true) + .singleUseReadOnlyTransaction(), + MoreExecutors.directExecutor())); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { + return new DelayedReadOnlyTransaction( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true) + .singleUseReadOnlyTransaction(bound), + MoreExecutors.directExecutor())); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction() { + return new DelayedReadOnlyTransaction( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .readOnlyTransaction(), + MoreExecutors.directExecutor())); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { + return new DelayedReadOnlyTransaction( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .readOnlyTransaction(bound), + MoreExecutors.directExecutor())); + } + + /** + * This is a blocking method, as the interface that it implements is also defined as a blocking + * method. + */ + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + SessionReference sessionReference = getSessionReference(); + try (MultiplexedSessionTransaction transaction = + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true)) { + return transaction.writeAtLeastOnceWithOptions(mutations, options); + } + } + + // This is a blocking method, as the interface that it implements is also defined as a blocking + // method. + @Override + public Timestamp write(Iterable mutations) throws SpannerException { + SessionReference sessionReference = getSessionReference(); + try (MultiplexedSessionTransaction transaction = + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false)) { + return transaction.write(mutations); + } + } + + // This is a blocking method, as the interface that it implements is also defined as a blocking + // method. + @Override + public CommitResponse writeWithOptions(Iterable mutations, TransactionOption... options) + throws SpannerException { + SessionReference sessionReference = getSessionReference(); + try (MultiplexedSessionTransaction transaction = + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false)) { + return transaction.writeWithOptions(mutations, options); + } + } + + /** + * This is a blocking method, as the interface that it implements is also defined as a blocking + * method. + */ + @Override + public ServerStream batchWriteAtLeastOnce( + Iterable mutationGroups, TransactionOption... options) + throws SpannerException { + SessionReference sessionReference = getSessionReference(); + try (MultiplexedSessionTransaction transaction = + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true)) { + return transaction.batchWriteAtLeastOnce(mutationGroups, options); + } + } + + @Override + public TransactionRunner readWriteTransaction(TransactionOption... options) { + return new DelayedTransactionRunner( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .readWriteTransaction(options), + MoreExecutors.directExecutor())); + } + + @Override + public TransactionManager transactionManager(TransactionOption... options) { + return new DelayedTransactionManager( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .transactionManager(options), + MoreExecutors.directExecutor())); + } + + @Override + public AsyncRunner runAsync(TransactionOption... options) { + return new DelayedAsyncRunner( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .runAsync(options), + MoreExecutors.directExecutor())); + } + + @Override + public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) { + return new DelayedAsyncTransactionManager( + ApiFutures.transform( + this.sessionFuture, + sessionReference -> + new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ false) + .transactionManagerAsync(options), + MoreExecutors.directExecutor())); + } + + /** + * Gets the session reference that this delayed transaction is waiting for. This method should + * only be called by methods that are allowed to be blocking. + */ + private SessionReference getSessionReference() { + try { + return this.sessionFuture.get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.causeAsRunTimeException(executionException); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + /** + * Execute `stmt` within PARTITIONED_DML transaction using multiplexed session. This method is a + * blocking call as the interface expects to return the output of the `stmt`. + */ + @Override + public long executePartitionedUpdate(Statement stmt, UpdateOption... options) { + SessionReference sessionReference = getSessionReference(); + return new MultiplexedSessionTransaction( + client, span, sessionReference, NO_CHANNEL_HINT, /* singleUse= */ true) + .executePartitionedUpdate(stmt, options); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java new file mode 100644 index 000000000000..86d6ae079d31 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java @@ -0,0 +1,157 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.common.base.Suppliers; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; +import javax.annotation.Nullable; + +/** + * Represents a {@link ReadContext} using a multiplexed session that is not yet ready. The execution + * will be delayed until the multiplexed session has been created and is ready. This class is only + * used during the startup of the client and the multiplexed session has not yet been created. This + * ensures that the creation of {@link DatabaseClient} is non-blocking. + */ +class DelayedReadContext implements ReadContext { + + private final ApiFuture readContextFuture; + + DelayedReadContext(ApiFuture readContextFuture) { + this.readContextFuture = readContextFuture; + } + + T getReadContext() { + try { + return this.readContextFuture.get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.causeAsRunTimeException(executionException); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + @Override + public ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return new ForwardingResultSet( + Suppliers.memoize(() -> getReadContext().read(table, keys, columns, options))); + } + + @Override + public AsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return new ForwardingAsyncResultSet( + Suppliers.memoize(() -> getReadContext().readAsync(table, keys, columns, options))); + } + + @Override + public ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return new ForwardingResultSet( + Suppliers.memoize( + () -> getReadContext().readUsingIndex(table, index, keys, columns, options))); + } + + @Override + public AsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return new ForwardingAsyncResultSet( + Suppliers.memoize( + () -> getReadContext().readUsingIndexAsync(table, index, keys, columns, options))); + } + + @Nullable + @Override + public Struct readRow(String table, Key key, Iterable columns) { + // This is allowed to be blocking. + return getReadContext().readRow(table, key, columns); + } + + @Override + public ApiFuture readRowAsync(String table, Key key, Iterable columns) { + return ApiFutures.transformAsync( + this.readContextFuture, + readContext -> readContext.readRowAsync(table, key, columns), + MoreExecutors.directExecutor()); + } + + @Nullable + @Override + public Struct readRowUsingIndex(String table, String index, Key key, Iterable columns) { + // This is allowed to be blocking. + return getReadContext().readRowUsingIndex(table, index, key, columns); + } + + @Override + public ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns) { + return ApiFutures.transformAsync( + this.readContextFuture, + readContext -> readContext.readRowUsingIndexAsync(table, index, key, columns), + MoreExecutors.directExecutor()); + } + + @Override + public ResultSet executeQuery(Statement statement, QueryOption... options) { + return new ForwardingResultSet( + Suppliers.memoize(() -> getReadContext().executeQuery(statement, options))); + } + + @Override + public AsyncResultSet executeQueryAsync(Statement statement, QueryOption... options) { + return new ForwardingAsyncResultSet( + Suppliers.memoize(() -> getReadContext().executeQueryAsync(statement, options))); + } + + @Override + public ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode) { + return new ForwardingResultSet( + Suppliers.memoize(() -> getReadContext().analyzeQuery(statement, queryMode))); + } + + @Override + public void close() { + try { + this.readContextFuture.get().close(); + } catch (Throwable ignore) { + // Ignore any errors during close, as this error has already propagated to the user through + // other means. + } + } + + /** + * Represents a {@link ReadContext} using a multiplexed session that is not yet ready. The + * execution will be delayed until the multiplexed session has been created and is ready. + */ + static class DelayedReadOnlyTransaction extends DelayedReadContext + implements ReadOnlyTransaction { + DelayedReadOnlyTransaction(ApiFuture readContextFuture) { + super(readContextFuture); + } + + @Override + public Timestamp getReadTimestamp() { + return getReadContext().getReadTimestamp(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionManager.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionManager.java new file mode 100644 index 000000000000..96400e9e9bb9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionManager.java @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import java.util.concurrent.ExecutionException; + +/** + * Represents a {@link TransactionManager} using a multiplexed session that is not yet ready. The + * execution will be delayed until the multiplexed session has been created and is ready. This class + * is only used during the startup of the client and the multiplexed session has not yet been + * created. + */ +class DelayedTransactionManager implements TransactionManager { + + private final ApiFuture transactionManagerFuture; + + DelayedTransactionManager(ApiFuture transactionManagerFuture) { + this.transactionManagerFuture = transactionManagerFuture; + } + + TransactionManager getTransactionManager() { + try { + return this.transactionManagerFuture.get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.causeAsRunTimeException(executionException); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + @Override + public TransactionContext begin() { + return getTransactionManager().begin(); + } + + @Override + public TransactionContext begin(AbortedException exception) { + return getTransactionManager().begin(exception); + } + + @Override + public void commit() { + getTransactionManager().commit(); + } + + @Override + public void rollback() { + getTransactionManager().rollback(); + } + + @Override + public TransactionContext resetForRetry() { + return getTransactionManager().resetForRetry(); + } + + @Override + public Timestamp getCommitTimestamp() { + return getTransactionManager().getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + return getTransactionManager().getCommitResponse(); + } + + @Override + public TransactionState getState() { + return getTransactionManager().getState(); + } + + @Override + public void close() { + getTransactionManager().close(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionRunner.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionRunner.java new file mode 100644 index 000000000000..bf0ed1b880a1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedTransactionRunner.java @@ -0,0 +1,67 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import java.util.concurrent.ExecutionException; +import javax.annotation.Nullable; + +/** + * Represents a {@link TransactionRunner} using a multiplexed session that is not yet ready. The + * execution will be delayed until the multiplexed session has been created and is ready. This class + * is only used during the startup of the client and the multiplexed session has not yet been + * created. + */ +class DelayedTransactionRunner implements TransactionRunner { + private final ApiFuture transactionRunnerFuture; + + DelayedTransactionRunner(ApiFuture transactionRunnerFuture) { + this.transactionRunnerFuture = transactionRunnerFuture; + } + + TransactionRunner getTransactionRunner() { + try { + return this.transactionRunnerFuture.get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.causeAsRunTimeException(executionException); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + @Nullable + @Override + public T run(TransactionCallable callable) { + return getTransactionRunner().run(callable); + } + + @Override + public Timestamp getCommitTimestamp() { + return getTransactionRunner().getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + return getTransactionRunner().getCommitResponse(); + } + + @Override + public TransactionRunner allowNestedTransaction() { + return getTransactionRunner().allowNestedTransaction(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Dialect.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Dialect.java new file mode 100644 index 000000000000..d92608c9bc46 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Dialect.java @@ -0,0 +1,95 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.collect.ImmutableMap; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + +public enum Dialect { + GOOGLE_STANDARD_SQL { + @Override + public String createDatabaseStatementFor(String databaseName) { + return "CREATE DATABASE `" + databaseName + "`"; + } + + @Override + public DatabaseDialect toProto() { + return DatabaseDialect.GOOGLE_STANDARD_SQL; + } + + @Override + public String getDefaultSchema() { + return ""; + } + }, + POSTGRESQL { + @Override + public String createDatabaseStatementFor(String databaseName) { + return "CREATE DATABASE \"" + databaseName + "\""; + } + + @Override + public DatabaseDialect toProto() { + return DatabaseDialect.POSTGRESQL; + } + + @Override + public String getDefaultSchema() { + return "public"; + } + }; + + private static final Map protoToDialect = + ImmutableMap.of( + DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED, Dialect.GOOGLE_STANDARD_SQL, + DatabaseDialect.GOOGLE_STANDARD_SQL, Dialect.GOOGLE_STANDARD_SQL, + DatabaseDialect.POSTGRESQL, Dialect.POSTGRESQL); + + public abstract String createDatabaseStatementFor(String databaseName); + + public abstract DatabaseDialect toProto(); + + public abstract String getDefaultSchema(); + + public static Dialect fromProto(DatabaseDialect databaseDialect) { + final Dialect dialect = protoToDialect.get(databaseDialect); + if (dialect == null) { + throw new IllegalArgumentException( + String.format( + "Invalid dialect: %s. Dialect must be one of [%s]", + databaseDialect, + protoToDialect.keySet().stream() + .map(DatabaseDialect::name) + .collect(Collectors.joining(", ")))); + } + return dialect; + } + + public static Dialect fromName(String name) { + try { + return Dialect.valueOf(name); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Invalid dialect: %s. Dialect must be one of %s", + name, Arrays.toString(Dialect.values()))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DmlBatchUpdateCountVerificationFailedException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DmlBatchUpdateCountVerificationFailedException.java new file mode 100644 index 000000000000..c2c945981680 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DmlBatchUpdateCountVerificationFailedException.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.connection.Connection; +import java.util.Arrays; +import java.util.stream.Collectors; + +/** + * Exception thrown by a {@link Connection} when an automatic DML batch detects that one or more of + * the update counts that it returned during the buffering of DML statements does not match with the + * actual update counts that were returned after execution. + */ +public class DmlBatchUpdateCountVerificationFailedException extends AbortedException { + private static final long serialVersionUID = 1L; + + private final long[] expected; + + private final long[] actual; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + DmlBatchUpdateCountVerificationFailedException( + DoNotConstructDirectly token, long[] expected, long[] actual) { + super( + token, + String.format( + "Actual update counts that were returned during execution do not match the previously" + + " returned update counts.\n" + + "Expected: %s\n" + + "Actual: %s\n" + + "Set auto_batch_dml_update_count_verification to false to skip this" + + " verification.", + Arrays.stream(expected).mapToObj(Long::toString).collect(Collectors.joining()), + Arrays.stream(actual).mapToObj(Long::toString).collect(Collectors.joining())), + /* cause= */ null); + this.expected = expected; + this.actual = actual; + } + + /** + * The expected update counts. These were returned to the client application when the DML + * statements were buffered. + */ + public long[] getExpected() { + return Arrays.copyOf(this.expected, this.expected.length); + } + + /** + * The actual update counts. These were returned by Spanner to the client when the DML statements + * were actually executed, and are the update counts that the client application would have + * received if auto-batching had not been enabled. + */ + public long[] getActual() { + return Arrays.copyOf(this.actual, this.actual.length); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorCode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorCode.java new file mode 100644 index 000000000000..07771a3faca0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorCode.java @@ -0,0 +1,106 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import java.util.Map; + +/** + * Enumerates the major types of error that the Cloud Spanner service can produce. These codes are + * accessible via {@link SpannerException#getErrorCode()}. + */ +public enum ErrorCode { + + // TODO(user): Add documentation. + CANCELLED(Status.CANCELLED), + UNKNOWN(Status.UNKNOWN), + INVALID_ARGUMENT(Status.INVALID_ARGUMENT), + DEADLINE_EXCEEDED(Status.DEADLINE_EXCEEDED), + NOT_FOUND(Status.NOT_FOUND), + ALREADY_EXISTS(Status.ALREADY_EXISTS), + PERMISSION_DENIED(Status.PERMISSION_DENIED), + UNAUTHENTICATED(Status.UNAUTHENTICATED), + RESOURCE_EXHAUSTED(Status.RESOURCE_EXHAUSTED), + FAILED_PRECONDITION(Status.FAILED_PRECONDITION), + ABORTED(Status.ABORTED), + OUT_OF_RANGE(Status.OUT_OF_RANGE), + UNIMPLEMENTED(Status.UNIMPLEMENTED), + INTERNAL(Status.INTERNAL), + UNAVAILABLE(Status.UNAVAILABLE), + DATA_LOSS(Status.DATA_LOSS), + ; + + private static final Map errorByRpcCode; + + static { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (ErrorCode errorCode : ErrorCode.values()) { + builder.put(errorCode.getCode(), errorCode); + } + errorByRpcCode = builder.build(); + } + + private final Status.Code code; + + ErrorCode(Status status) { + this.code = status.getCode(); + } + + int getCode() { + return this.code.value(); + } + + Status getGrpcStatus() { + return this.code.toStatus(); + } + + /** + * @return the corresponding gRPC status code of this {@link ErrorCode}. + */ + public Status.Code getGrpcStatusCode() { + return this.code; + } + + /** + * Returns the error code represents by {@code name}, or {@code defaultValue} if {@code name} does + * not map to a known code. + */ + static ErrorCode valueOf(String name, ErrorCode defaultValue) { + try { + return ErrorCode.valueOf(name); + } catch (IllegalArgumentException e) { + return defaultValue; + } + } + + /** + * Returns the error code corresponding to a gRPC status, or {@code UNKNOWN} if not recognized. + */ + public static ErrorCode fromGrpcStatus(Status status) { + ErrorCode code = errorByRpcCode.get(status.getCode().value()); + return code == null ? UNKNOWN : code; + } + + /** + * Returns the error code corresponding to the given status, or {@code UNKNOWN} if not recognized. + */ + static ErrorCode fromRpcStatus(com.google.rpc.Status status) { + ErrorCode code = errorByRpcCode.get(status.getCode()); + return code == null ? UNKNOWN : code; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java new file mode 100644 index 000000000000..cf2465d7ade7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.BetaApi; +import javax.annotation.Nonnull; + +/** + * The {@link ErrorHandler} interface can be used to implement custom error and retry handling for + * specific cases. The default implementation does nothing and falls back to the standard error and + * retry handling in Gax and the Spanner client. + */ +@BetaApi +interface ErrorHandler { + @Nonnull + Throwable translateException(@Nonnull Throwable exception); + + int getMaxAttempts(); + + class DefaultErrorHandler implements ErrorHandler { + static final DefaultErrorHandler INSTANCE = new DefaultErrorHandler(); + + private DefaultErrorHandler() {} + + @Nonnull + @Override + public Throwable translateException(@Nonnull Throwable exception) { + return exception; + } + + @Override + public int getMaxAttempts() { + return 0; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingAsyncResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingAsyncResultSet.java new file mode 100644 index 000000000000..a0197cb7b8dd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingAsyncResultSet.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import java.util.List; +import java.util.concurrent.Executor; + +/** Forwarding implementation of {@link AsyncResultSet} that forwards all calls to a delegate. */ +public class ForwardingAsyncResultSet extends ForwardingResultSet implements AsyncResultSet { + + public ForwardingAsyncResultSet(AsyncResultSet delegate) { + super(Preconditions.checkNotNull(delegate)); + } + + ForwardingAsyncResultSet(Supplier delegateSupplier) { + super(Preconditions.checkNotNull(delegateSupplier)); + } + + @Override + AsyncResultSet getDelegate() { + return (AsyncResultSet) super.getDelegate(); + } + + @Override + public CursorState tryNext() throws SpannerException { + return getDelegate().tryNext(); + } + + @Override + public ApiFuture setCallback(Executor exec, ReadyCallback cb) { + return getDelegate().setCallback(exec, cb); + } + + @Override + public void cancel() { + getDelegate().cancel(); + } + + @Override + public void resume() { + getDelegate().resume(); + } + + @Override + public ApiFuture> toListAsync( + Function transformer, Executor executor) { + return getDelegate().toListAsync(transformer, executor); + } + + @Override + public List toList(Function transformer) throws SpannerException { + return getDelegate().toList(transformer); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingResultSet.java new file mode 100644 index 000000000000..3c4883e65862 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingResultSet.java @@ -0,0 +1,113 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; + +/** Forwarding implementation of ResultSet that forwards all calls to a delegate. */ +public class ForwardingResultSet extends ForwardingStructReader + implements ProtobufResultSet, StreamingResultSet { + + private Supplier delegate; + + public ForwardingResultSet(ResultSet delegate) { + super(delegate); + this.delegate = Suppliers.ofInstance(Preconditions.checkNotNull(delegate)); + } + + public ForwardingResultSet(Supplier supplier) { + super(supplier); + this.delegate = supplier; + } + + /** + * Replaces the underlying {@link ResultSet}. It is the responsibility of the caller to ensure + * that the new delegate has the same properties and is in the same state as the original + * delegate. This method can be used if the underlying delegate needs to be replaced after a + * session or transaction needed to be restarted after the {@link ResultSet} had already been + * returned to the user. + */ + void replaceDelegate(ResultSet newDelegate) { + Preconditions.checkNotNull(newDelegate); + super.replaceDelegate(newDelegate); + this.delegate = Suppliers.ofInstance(Preconditions.checkNotNull(newDelegate)); + } + + ResultSet getDelegate() { + return delegate.get(); + } + + @Override + public boolean next() throws SpannerException { + return delegate.get().next(); + } + + @Override + public boolean canGetProtobufValue(int columnIndex) { + ResultSet resultSetDelegate = delegate.get(); + return (resultSetDelegate instanceof ProtobufResultSet) + && ((ProtobufResultSet) resultSetDelegate).canGetProtobufValue(columnIndex); + } + + @Override + public com.google.protobuf.Value getProtobufValue(int columnIndex) { + ResultSet resultSetDelegate = delegate.get(); + Preconditions.checkState( + resultSetDelegate instanceof ProtobufResultSet, + "The result set does not support protobuf values"); + return ((ProtobufResultSet) resultSetDelegate).getProtobufValue(columnIndex); + } + + @Override + public Struct getCurrentRowAsStruct() { + return delegate.get().getCurrentRowAsStruct(); + } + + @Override + public void close() { + ResultSet rs; + try { + rs = delegate.get(); + } catch (Exception e) { + // Ignore any exceptions when getting the underlying result set, as that means that there is + // nothing that needs to be closed. + return; + } + rs.close(); + } + + @Override + public ResultSetStats getStats() { + return delegate.get().getStats(); + } + + @Override + public ResultSetMetadata getMetadata() { + return delegate.get().getMetadata(); + } + + @Override + @InternalApi + public boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener) { + return StreamingUtil.initiateStreaming(delegate.get(), streamMessageListener); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingStructReader.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingStructReader.java new file mode 100644 index 000000000000..839202bb9fe3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ForwardingStructReader.java @@ -0,0 +1,536 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.math.BigDecimal; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** Forwarding implements of StructReader */ +public class ForwardingStructReader implements StructReader { + + private Supplier delegate; + + public ForwardingStructReader(StructReader delegate) { + this.delegate = Suppliers.ofInstance(Preconditions.checkNotNull(delegate)); + } + + public ForwardingStructReader(Supplier delegate) { + this.delegate = Preconditions.checkNotNull(delegate); + } + + /** + * Replaces the underlying {@link StructReader}. It is the responsibility of the caller to ensure + * that the new delegate has the same properties and is in the same state as the original + * delegate. This method can be used if the underlying delegate needs to be replaced after a + * session or transaction needed to be restarted after the {@link StructReader} had already been + * returned to the user. + */ + void replaceDelegate(StructReader newDelegate) { + this.delegate = Suppliers.ofInstance(Preconditions.checkNotNull(newDelegate)); + } + + /** + * Called before each forwarding call to allow sub classes to do additional state checking. Sub + * classes should throw an {@link Exception} if the current state is not valid for reading data + * from this {@link ForwardingStructReader}. The default implementation does nothing. + */ + protected void checkValidState() {} + + @Override + public Type getType() { + checkValidState(); + return delegate.get().getType(); + } + + @Override + public int getColumnCount() { + checkValidState(); + return delegate.get().getColumnCount(); + } + + @Override + public int getColumnIndex(String columnName) { + checkValidState(); + return delegate.get().getColumnIndex(columnName); + } + + @Override + public Type getColumnType(int columnIndex) { + checkValidState(); + return delegate.get().getColumnType(columnIndex); + } + + @Override + public Type getColumnType(String columnName) { + checkValidState(); + return delegate.get().getColumnType(columnName); + } + + @Override + public boolean isNull(int columnIndex) { + checkValidState(); + return delegate.get().isNull(columnIndex); + } + + @Override + public boolean isNull(String columnName) { + checkValidState(); + return delegate.get().isNull(columnName); + } + + @Override + public boolean getBoolean(int columnIndex) { + checkValidState(); + return delegate.get().getBoolean(columnIndex); + } + + @Override + public boolean getBoolean(String columnName) { + checkValidState(); + return delegate.get().getBoolean(columnName); + } + + @Override + public long getLong(int columnIndex) { + checkValidState(); + return delegate.get().getLong(columnIndex); + } + + @Override + public long getLong(String columnName) { + checkValidState(); + return delegate.get().getLong(columnName); + } + + @Override + public float getFloat(int columnIndex) { + checkValidState(); + return delegate.get().getFloat(columnIndex); + } + + @Override + public float getFloat(String columnName) { + checkValidState(); + return delegate.get().getFloat(columnName); + } + + @Override + public double getDouble(int columnIndex) { + checkValidState(); + return delegate.get().getDouble(columnIndex); + } + + @Override + public double getDouble(String columnName) { + checkValidState(); + return delegate.get().getDouble(columnName); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) { + return delegate.get().getBigDecimal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnName) { + return delegate.get().getBigDecimal(columnName); + } + + @Override + public String getString(int columnIndex) { + checkValidState(); + return delegate.get().getString(columnIndex); + } + + @Override + public String getString(String columnName) { + checkValidState(); + return delegate.get().getString(columnName); + } + + @Override + public String getJson(int columnIndex) { + checkValidState(); + return delegate.get().getJson(columnIndex); + } + + @Override + public String getJson(String columnName) { + checkValidState(); + return delegate.get().getJson(columnName); + } + + @Override + public String getPgJsonb(int columnIndex) { + checkValidState(); + return delegate.get().getPgJsonb(columnIndex); + } + + @Override + public String getPgJsonb(String columnName) { + checkValidState(); + return delegate.get().getPgJsonb(columnName); + } + + @Override + public ByteArray getBytes(int columnIndex) { + checkValidState(); + return delegate.get().getBytes(columnIndex); + } + + @Override + public ByteArray getBytes(String columnName) { + checkValidState(); + return delegate.get().getBytes(columnName); + } + + @Override + public Timestamp getTimestamp(int columnIndex) { + checkValidState(); + return delegate.get().getTimestamp(columnIndex); + } + + @Override + public Timestamp getTimestamp(String columnName) { + checkValidState(); + return delegate.get().getTimestamp(columnName); + } + + @Override + public Date getDate(int columnIndex) { + checkValidState(); + return delegate.get().getDate(columnIndex); + } + + @Override + public Date getDate(String columnName) { + checkValidState(); + return delegate.get().getDate(columnName); + } + + @Override + public UUID getUuid(int columnIndex) { + checkValidState(); + return delegate.get().getUuid(columnIndex); + } + + @Override + public UUID getUuid(String columnName) { + checkValidState(); + return delegate.get().getUuid(columnName); + } + + @Override + public Interval getInterval(int columnIndex) { + checkValidState(); + return delegate.get().getInterval(columnIndex); + } + + @Override + public Interval getInterval(String columnName) { + checkValidState(); + return delegate.get().getInterval(columnName); + } + + @Override + public boolean[] getBooleanArray(int columnIndex) { + checkValidState(); + return delegate.get().getBooleanArray(columnIndex); + } + + @Override + public boolean[] getBooleanArray(String columnName) { + checkValidState(); + return delegate.get().getBooleanArray(columnName); + } + + @Override + public List getBooleanList(int columnIndex) { + checkValidState(); + return delegate.get().getBooleanList(columnIndex); + } + + @Override + public List getBooleanList(String columnName) { + checkValidState(); + return delegate.get().getBooleanList(columnName); + } + + @Override + public long[] getLongArray(int columnIndex) { + checkValidState(); + return delegate.get().getLongArray(columnIndex); + } + + @Override + public long[] getLongArray(String columnName) { + checkValidState(); + return delegate.get().getLongArray(columnName); + } + + @Override + public List getLongList(int columnIndex) { + checkValidState(); + return delegate.get().getLongList(columnIndex); + } + + @Override + public List getLongList(String columnName) { + checkValidState(); + return delegate.get().getLongList(columnName); + } + + @Override + public float[] getFloatArray(int columnIndex) { + checkValidState(); + return delegate.get().getFloatArray(columnIndex); + } + + @Override + public float[] getFloatArray(String columnName) { + checkValidState(); + return delegate.get().getFloatArray(columnName); + } + + @Override + public List getFloatList(int columnIndex) { + checkValidState(); + return delegate.get().getFloatList(columnIndex); + } + + @Override + public List getFloatList(String columnName) { + checkValidState(); + return delegate.get().getFloatList(columnName); + } + + @Override + public double[] getDoubleArray(int columnIndex) { + checkValidState(); + return delegate.get().getDoubleArray(columnIndex); + } + + @Override + public double[] getDoubleArray(String columnName) { + checkValidState(); + return delegate.get().getDoubleArray(columnName); + } + + @Override + public List getDoubleList(int columnIndex) { + checkValidState(); + return delegate.get().getDoubleList(columnIndex); + } + + @Override + public List getDoubleList(String columnName) { + checkValidState(); + return delegate.get().getDoubleList(columnName); + } + + @Override + public List getBigDecimalList(int columnIndex) { + return delegate.get().getBigDecimalList(columnIndex); + } + + @Override + public List getBigDecimalList(String columnName) { + return delegate.get().getBigDecimalList(columnName); + } + + @Override + public List getStringList(int columnIndex) { + checkValidState(); + return delegate.get().getStringList(columnIndex); + } + + @Override + public List getStringList(String columnName) { + checkValidState(); + return delegate.get().getStringList(columnName); + } + + @Override + public List getJsonList(int columnIndex) { + checkValidState(); + return delegate.get().getJsonList(columnIndex); + } + + @Override + public List getJsonList(String columnName) { + checkValidState(); + return delegate.get().getJsonList(columnName); + } + + @Override + public List getPgJsonbList(int columnIndex) { + checkValidState(); + return delegate.get().getPgJsonbList(columnIndex); + } + + @Override + public List getPgJsonbList(String columnName) { + checkValidState(); + return delegate.get().getPgJsonbList(columnName); + } + + @Override + public List getBytesList(int columnIndex) { + checkValidState(); + return delegate.get().getBytesList(columnIndex); + } + + @Override + public List getBytesList(String columnName) { + checkValidState(); + return delegate.get().getBytesList(columnName); + } + + @Override + public List getTimestampList(int columnIndex) { + checkValidState(); + return delegate.get().getTimestampList(columnIndex); + } + + @Override + public List getTimestampList(String columnName) { + checkValidState(); + return delegate.get().getTimestampList(columnName); + } + + @Override + public List getDateList(int columnIndex) { + checkValidState(); + return delegate.get().getDateList(columnIndex); + } + + @Override + public List getDateList(String columnName) { + checkValidState(); + return delegate.get().getDateList(columnName); + } + + @Override + public List getUuidList(int columnIndex) { + checkValidState(); + return delegate.get().getUuidList(columnIndex); + } + + @Override + public List getUuidList(String columnName) { + checkValidState(); + return delegate.get().getUuidList(columnName); + } + + @Override + public List getIntervalList(int columnIndex) { + checkValidState(); + return delegate.get().getIntervalList(columnIndex); + } + + @Override + public List getIntervalList(String columnName) { + checkValidState(); + return delegate.get().getIntervalList(columnName); + } + + @Override + public List getProtoMessageList(int columnIndex, T message) { + checkValidState(); + return delegate.get().getProtoMessageList(columnIndex, message); + } + + @Override + public List getProtoMessageList(String columnName, T message) { + checkValidState(); + return delegate.get().getProtoMessageList(columnName, message); + } + + @Override + public List getProtoEnumList( + int columnIndex, Function method) { + checkValidState(); + return delegate.get().getProtoEnumList(columnIndex, method); + } + + @Override + public List getProtoEnumList( + String columnName, Function method) { + checkValidState(); + return delegate.get().getProtoEnumList(columnName, method); + } + + @Override + public List getStructList(int columnIndex) { + checkValidState(); + return delegate.get().getStructList(columnIndex); + } + + @Override + public List getStructList(String columnName) { + checkValidState(); + return delegate.get().getStructList(columnName); + } + + @Override + public T getProtoMessage(int columnIndex, T message) { + checkValidState(); + return delegate.get().getProtoMessage(columnIndex, message); + } + + @Override + public T getProtoMessage(String columnName, T message) { + checkValidState(); + return delegate.get().getProtoMessage(columnName, message); + } + + @Override + public T getProtoEnum( + int columnIndex, Function method) { + checkValidState(); + return delegate.get().getProtoEnum(columnIndex, method); + } + + @Override + public T getProtoEnum( + String columnName, Function method) { + checkValidState(); + return delegate.get().getProtoEnum(columnName, method); + } + + @Override + public Value getValue(int columnIndex) { + checkValidState(); + return delegate.get().getValue(columnIndex); + } + + @Override + public Value getValue(String columnName) { + checkValidState(); + return delegate.get().getValue(columnName); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java new file mode 100644 index 000000000000..80a9dfcf5331 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java @@ -0,0 +1,159 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.asSpannerException; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.Value; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.annotation.Nullable; + +@VisibleForTesting +class GrpcResultSet extends AbstractResultSet> + implements ProtobufResultSet, StreamingResultSet { + private final GrpcValueIterator iterator; + private final Listener listener; + private final DecodeMode decodeMode; + private ResultSetMetadata metadata; + private GrpcStruct currRow; + private List rowData; + private SpannerException error; + private ResultSetStats statistics; + private boolean closed; + + GrpcResultSet(CloseableIterator iterator, Listener listener) { + this(iterator, listener, DecodeMode.DIRECT); + } + + GrpcResultSet( + CloseableIterator iterator, Listener listener, DecodeMode decodeMode) { + this.iterator = new GrpcValueIterator(iterator, listener); + this.listener = listener; + this.decodeMode = decodeMode; + } + + @Override + public boolean canGetProtobufValue(int columnIndex) { + return !closed && currRow != null && currRow.canGetProtoValue(columnIndex); + } + + @Override + public Value getProtobufValue(int columnIndex) { + checkState(!closed, "ResultSet is closed"); + checkState(currRow != null, "next() call required"); + return currRow.getProtoValueInternal(columnIndex); + } + + @Override + protected GrpcStruct currRow() { + checkState(!closed, "ResultSet is closed"); + checkState(currRow != null, "next() call required"); + return currRow; + } + + @Override + public boolean next() throws SpannerException { + if (error != null) { + throw asSpannerException(error); + } + try { + if (currRow == null) { + metadata = iterator.getMetadata(); + if (metadata.hasTransaction()) { + listener.onTransactionMetadata( + metadata.getTransaction(), iterator.isWithBeginTransaction()); + } else if (iterator.isWithBeginTransaction()) { + // The query should have returned a transaction. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + } + if (rowData == null) { + rowData = new ArrayList<>(metadata.getRowType().getFieldsCount()); + if (decodeMode != DecodeMode.DIRECT) { + rowData = Collections.synchronizedList(rowData); + } + } else { + rowData.clear(); + } + currRow = new GrpcStruct(iterator.type(), rowData, decodeMode); + } + boolean hasNext = currRow.consumeRow(iterator); + if (!hasNext) { + statistics = iterator.getStats(); + // Close the ResultSet when there is no more data. + close(); + } + return hasNext; + } catch (Throwable t) { + throw yieldError( + asSpannerException(t), + iterator.isWithBeginTransaction() && currRow == null, + iterator.isLastStatement()); + } + } + + @Override + @Nullable + public ResultSetStats getStats() { + return statistics; + } + + @Override + public ResultSetMetadata getMetadata() { + checkState(metadata != null, "next() call required"); + return metadata; + } + + @Override + @InternalApi + public boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener) { + return iterator.initiateStreaming(streamMessageListener); + } + + @Override + public void close() { + synchronized (this) { + if (closed) { + return; + } + closed = true; + } + listener.onDone(iterator.isWithBeginTransaction()); + iterator.close("ResultSet closed"); + } + + @Override + public Type getType() { + checkState(currRow != null, "next() call required"); + return currRow.getType(); + } + + private SpannerException yieldError( + SpannerException e, boolean beginTransaction, boolean lastStatement) { + SpannerException toThrow = listener.onError(e, beginTransaction, lastStatement); + close(); + throw toThrow; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java new file mode 100644 index 000000000000..e0df4c422e3c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java @@ -0,0 +1,219 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.AbstractIterator; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.spanner.v1.PartialResultSet; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Adapts a streaming read/query call into an iterator over partial result sets. */ +@VisibleForTesting +class GrpcStreamIterator extends AbstractIterator + implements CloseableIterator { + private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName()); + static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build(); + private final int prefetchChunks; + private AsyncResultSet.StreamMessageListener streamMessageListener; + + private final ConsumerImpl consumer; + private final BlockingQueue stream; + private final Statement statement; + + private SpannerRpc.StreamingCall call; + private volatile boolean withBeginTransaction; + private final boolean lastStatement; + private TimeUnit streamWaitTimeoutUnit; + private long streamWaitTimeoutValue; + private SpannerException error; + private boolean done; + + @VisibleForTesting + GrpcStreamIterator( + boolean lastStatement, int prefetchChunks, boolean cancelQueryWhenClientIsClosed) { + this(null, lastStatement, prefetchChunks, cancelQueryWhenClientIsClosed); + } + + @VisibleForTesting + GrpcStreamIterator( + Statement statement, + boolean lastStatement, + int prefetchChunks, + boolean cancelQueryWhenClientIsClosed) { + this.statement = statement; + this.lastStatement = lastStatement; + this.prefetchChunks = prefetchChunks; + this.consumer = new ConsumerImpl(cancelQueryWhenClientIsClosed); + // One extra to allow for END_OF_STREAM message. + this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1); + } + + protected final SpannerRpc.ResultStreamConsumer consumer() { + return consumer; + } + + void registerListener(AsyncResultSet.StreamMessageListener streamMessageListener) { + this.streamMessageListener = Preconditions.checkNotNull(streamMessageListener); + } + + public void setCall(SpannerRpc.StreamingCall call, boolean withBeginTransaction) { + this.call = call; + this.withBeginTransaction = withBeginTransaction; + ApiCallContext callContext = call.getCallContext(); + Duration streamWaitTimeout = + callContext == null ? null : callContext.getStreamWaitTimeoutDuration(); + if (streamWaitTimeout != null) { + // Determine the timeout unit to use. This reduces the precision to seconds if the timeout + // value is more than 1 second, which is lower than the precision that would normally be + // used by the stream watchdog (which uses a precision of 10 seconds by default). + if (streamWaitTimeout.getSeconds() > 0L) { + streamWaitTimeoutValue = streamWaitTimeout.getSeconds(); + streamWaitTimeoutUnit = TimeUnit.SECONDS; + } else if (streamWaitTimeout.getNano() > 0) { + streamWaitTimeoutValue = streamWaitTimeout.getNano(); + streamWaitTimeoutUnit = TimeUnit.NANOSECONDS; + } + // Note that if the stream-wait-timeout is zero, we won't set a timeout at all. + // That is consistent with ApiCallContext#withStreamWaitTimeout(Duration.ZERO). + } + } + + @Override + public void close(@Nullable String message) { + if (call != null) { + call.cancel(message); + } + } + + @Override + @InternalApi + public void requestPrefetchChunks() { + Preconditions.checkState(call != null, "The StreamingCall object is not initialized"); + call.request(prefetchChunks); + } + + @Override + public boolean isWithBeginTransaction() { + return withBeginTransaction; + } + + @Override + public boolean isLastStatement() { + return lastStatement; + } + + @Override + protected final PartialResultSet computeNext() { + PartialResultSet next; + try { + if (streamWaitTimeoutUnit != null) { + next = stream.poll(streamWaitTimeoutValue, streamWaitTimeoutUnit); + if (next == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "stream wait timeout"); + } + } else { + next = stream.take(); + } + } catch (InterruptedException e) { + // Treat interrupt as a request to cancel the read. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + if (next != END_OF_STREAM) { + call.request(1); + return next; + } + + // All done - close() no longer needs to cancel the call. + call = null; + + if (error != null) { + throw SpannerExceptionFactory.asSpannerException(error); + } + + endOfData(); + return null; + } + + private void addToStream(PartialResultSet results) { + // We assume that nothing from the user will interrupt gRPC event threads. + Uninterruptibles.putUninterruptibly(stream, results); + onStreamMessage(results); + } + + private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer { + private final boolean cancelQueryWhenClientIsClosed; + + ConsumerImpl(boolean cancelQueryWhenClientIsClosed) { + this.cancelQueryWhenClientIsClosed = cancelQueryWhenClientIsClosed; + } + + @Override + public void onPartialResultSet(PartialResultSet results) { + addToStream(results); + if (results.getLast()) { + done = true; + addToStream(END_OF_STREAM); + } + } + + @Override + public void onCompleted() { + if (!done) { + addToStream(END_OF_STREAM); + } + } + + @Override + public void onError(SpannerException exception) { + if (statement != null) { + if (logger.isLoggable(Level.FINEST)) { + // Include parameter values if logging level is set to FINEST or higher. + exception.setStatement(statement.toString()); + logger.log(Level.FINEST, "Error executing statement", exception); + } else { + exception.setStatement(statement.getSql()); + } + } + error = exception; + addToStream(END_OF_STREAM); + } + + @Override + public boolean cancelQueryWhenClientIsClosed() { + return this.cancelQueryWhenClientIsClosed; + } + } + + private void onStreamMessage(PartialResultSet partialResultSet) { + Optional.ofNullable(streamMessageListener) + .ifPresent(sl -> sl.onStreamMessage(partialResultSet, stream.remainingCapacity() <= 1)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java new file mode 100644 index 000000000000..6f0a54039b79 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java @@ -0,0 +1,911 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.AbstractResultSet.throwNotNull; +import static com.google.cloud.spanner.AbstractResultSet.valueProtoToFloat32; +import static com.google.cloud.spanner.AbstractResultSet.valueProtoToFloat64; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.Float32Array; +import com.google.cloud.spanner.AbstractResultSet.Float64Array; +import com.google.cloud.spanner.AbstractResultSet.Int64Array; +import com.google.cloud.spanner.AbstractResultSet.LazyByteArray; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Type.StructField; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.io.CharSource; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.protobuf.Value.KindCase; +import java.io.IOException; +import java.io.Serializable; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.BitSet; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + +class GrpcStruct extends Struct implements Serializable { + private static final com.google.protobuf.Value NULL_VALUE = + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(); + + private final Type type; + private final List rowData; + private final DecodeMode decodeMode; + private final BitSet colDecoded; + private final AtomicBoolean rowDecoded; + + /** + * Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as a + * serialization proxy. + */ + private Object writeReplace() { + Builder builder = Struct.newBuilder(); + List structFields = getType().getStructFields(); + for (int i = 0; i < structFields.size(); i++) { + Type.StructField field = structFields.get(i); + String fieldName = field.getName(); + ensureDecoded(i); + Object value = rowData.get(i); + Type fieldType = field.getType(); + switch (fieldType.getCode()) { + case BOOL: + builder.set(fieldName).to((Boolean) value); + break; + case INT64: + builder.set(fieldName).to((Long) value); + break; + case FLOAT64: + builder.set(fieldName).to((Double) value); + break; + case FLOAT32: + builder.set(fieldName).to((Float) value); + break; + case NUMERIC: + builder.set(fieldName).to((BigDecimal) value); + break; + case PG_NUMERIC: + builder.set(fieldName).to((String) value); + break; + case STRING: + builder.set(fieldName).to((String) value); + break; + case JSON: + builder.set(fieldName).to(Value.json((String) value)); + break; + case PROTO: + builder + .set(fieldName) + .to( + Value.protoMessage( + value == null ? null : ((LazyByteArray) value).getByteArray(), + fieldType.getProtoTypeFqn())); + break; + case ENUM: + builder.set(fieldName).to(Value.protoEnum((Long) value, fieldType.getProtoTypeFqn())); + break; + case PG_JSONB: + builder.set(fieldName).to(Value.pgJsonb((String) value)); + break; + case PG_OID: + builder.set(fieldName).to(Value.pgOid((Long) value)); + break; + case BYTES: + builder + .set(fieldName) + .to( + Value.bytesFromBase64( + value == null ? null : ((LazyByteArray) value).getBase64String())); + break; + case TIMESTAMP: + builder.set(fieldName).to((Timestamp) value); + break; + case DATE: + builder.set(fieldName).to((Date) value); + break; + case UUID: + builder.set(fieldName).to((UUID) value); + break; + case INTERVAL: + builder.set(fieldName).to((Interval) value); + break; + case ARRAY: + final Type elementType = fieldType.getArrayElementType(); + switch (elementType.getCode()) { + case BOOL: + builder.set(fieldName).toBoolArray((Iterable) value); + break; + case INT64: + case ENUM: + builder.set(fieldName).toInt64Array((Iterable) value); + break; + case FLOAT64: + builder.set(fieldName).toFloat64Array((Iterable) value); + break; + case FLOAT32: + builder.set(fieldName).toFloat32Array((Iterable) value); + break; + case NUMERIC: + builder.set(fieldName).toNumericArray((Iterable) value); + break; + case PG_NUMERIC: + builder.set(fieldName).toPgNumericArray((Iterable) value); + break; + case STRING: + builder.set(fieldName).toStringArray((Iterable) value); + break; + case JSON: + builder.set(fieldName).toJsonArray((Iterable) value); + break; + case PG_JSONB: + builder.set(fieldName).toPgJsonbArray((Iterable) value); + break; + case PG_OID: + builder.set(fieldName).toPgOidArray((Iterable) value); + break; + case BYTES: + case PROTO: + builder + .set(fieldName) + .toBytesArrayFromBase64( + value == null + ? null + : ((List) value) + .stream() + .map( + element -> element == null ? null : element.getBase64String()) + .collect(Collectors.toList())); + break; + case TIMESTAMP: + builder.set(fieldName).toTimestampArray((Iterable) value); + break; + case DATE: + builder.set(fieldName).toDateArray((Iterable) value); + break; + case UUID: + builder.set(fieldName).toUuidArray((Iterable) value); + break; + case INTERVAL: + builder.set(fieldName).toIntervalArray((Iterable) value); + break; + case STRUCT: + builder.set(fieldName).toStructArray(elementType, (Iterable) value); + break; + default: + throw new AssertionError("Unhandled array type code: " + elementType); + } + break; + case STRUCT: + if (value == null) { + builder.set(fieldName).to(fieldType, null); + } else { + builder.set(fieldName).to((Struct) value); + } + break; + default: + throw new AssertionError("Unhandled type code: " + fieldType.getCode()); + } + } + return builder.build(); + } + + GrpcStruct(Type type, List rowData, DecodeMode decodeMode) { + this( + type, + rowData, + decodeMode, + /* rowDecoded= */ false, + /* colDecoded= */ decodeMode == DecodeMode.LAZY_PER_COL + ? new BitSet(type.getStructFields().size()) + : null); + } + + private GrpcStruct( + Type type, + List rowData, + DecodeMode decodeMode, + boolean rowDecoded, + BitSet colDecoded) { + this.type = type; + this.rowData = rowData; + this.decodeMode = decodeMode; + this.rowDecoded = new AtomicBoolean(rowDecoded); + this.colDecoded = colDecoded; + } + + @Override + public String toString() { + return this.rowData.toString(); + } + + boolean consumeRow(Iterator iterator) { + synchronized (rowData) { + rowData.clear(); + if (decodeMode == DecodeMode.LAZY_PER_ROW) { + rowDecoded.set(false); + } else if (decodeMode == DecodeMode.LAZY_PER_COL) { + colDecoded.clear(); + } + if (!iterator.hasNext()) { + return false; + } + for (Type.StructField fieldType : getType().getStructFields()) { + if (!iterator.hasNext()) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value stream: end of stream reached before row is complete"); + } + com.google.protobuf.Value value = iterator.next(); + if (decodeMode == DecodeMode.DIRECT) { + rowData.add(decodeValue(fieldType.getType(), value)); + } else { + rowData.add(value); + } + } + return true; + } + } + + private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) { + if (proto.getKindCase() == KindCase.NULL_VALUE) { + return null; + } + switch (fieldType.getCode()) { + case BOOL: + checkType(fieldType, proto, KindCase.BOOL_VALUE); + return proto.getBoolValue(); + case INT64: + case PG_OID: + case ENUM: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Long.parseLong(proto.getStringValue()); + case FLOAT64: + return valueProtoToFloat64(proto); + case FLOAT32: + return valueProtoToFloat32(proto); + case NUMERIC: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return new BigDecimal(proto.getStringValue()); + case PG_NUMERIC: + case STRING: + case JSON: + case PG_JSONB: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return proto.getStringValue(); + case BYTES: + case PROTO: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return new LazyByteArray(proto.getStringValue()); + case TIMESTAMP: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Timestamp.parseTimestamp(proto.getStringValue()); + case DATE: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Date.parseDate(proto.getStringValue()); + case UUID: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return UUID.fromString(proto.getStringValue()); + case INTERVAL: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Interval.parseFromString(proto.getStringValue()); + case ARRAY: + checkType(fieldType, proto, KindCase.LIST_VALUE); + ListValue listValue = proto.getListValue(); + return decodeArrayValue(fieldType.getArrayElementType(), listValue); + case STRUCT: + checkType(fieldType, proto, KindCase.LIST_VALUE); + ListValue structValue = proto.getListValue(); + return decodeStructValue(fieldType, structValue); + case UNRECOGNIZED: + return proto; + default: + throw new AssertionError("Unhandled type code: " + fieldType.getCode()); + } + } + + private static Struct decodeStructValue(Type structType, ListValue structValue) { + List fieldTypes = structType.getStructFields(); + checkArgument( + structValue.getValuesCount() == fieldTypes.size(), + "Size mismatch between type descriptor and actual values."); + List fields = new ArrayList<>(fieldTypes.size()); + List fieldValues = structValue.getValuesList(); + for (int i = 0; i < fieldTypes.size(); ++i) { + fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i))); + } + return new GrpcStruct(structType, fields, DecodeMode.DIRECT); + } + + static Object decodeArrayValue(Type elementType, ListValue listValue) { + switch (elementType.getCode()) { + case INT64: + case PG_OID: + case ENUM: + // For int64/float64/float32/enum types, use custom containers. + // These avoid wrapper object creation for non-null arrays. + return new Int64Array(listValue); + case FLOAT64: + return new Float64Array(listValue); + case FLOAT32: + return new Float32Array(listValue); + case BOOL: + case NUMERIC: + case PG_NUMERIC: + case STRING: + case JSON: + case PG_JSONB: + case BYTES: + case TIMESTAMP: + case DATE: + case UUID: + case INTERVAL: + case STRUCT: + case PROTO: + return Lists.transform(listValue.getValuesList(), input -> decodeValue(elementType, input)); + default: + throw new AssertionError("Unhandled type code: " + elementType.getCode()); + } + } + + private static void checkType( + Type fieldType, com.google.protobuf.Value proto, KindCase expected) { + if (proto.getKindCase() != expected) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value for column type " + + fieldType + + " expected " + + expected + + " but was " + + proto.getKindCase()); + } + } + + Struct immutableCopy() { + synchronized (rowData) { + return new GrpcStruct( + type, + this.decodeMode == DecodeMode.DIRECT + ? new ArrayList<>(rowData) + : Collections.synchronizedList(new ArrayList<>(rowData)), + this.decodeMode, + this.rowDecoded.get(), + this.colDecoded == null ? null : (BitSet) this.colDecoded.clone()); + } + } + + @Override + public Type getType() { + return type; + } + + @Override + public boolean isNull(int columnIndex) { + if (decodeMode == DecodeMode.LAZY_PER_ROW || decodeMode == DecodeMode.LAZY_PER_COL) { + synchronized (rowData) { + if ((decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded.get()) + || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex))) { + return ((com.google.protobuf.Value) rowData.get(columnIndex)).hasNullValue(); + } + return rowData.get(columnIndex) == null; + } + } + return rowData.get(columnIndex) == null; + } + + @Override + protected T getProtoMessageInternal(int columnIndex, T message) { + Preconditions.checkNotNull( + message, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + ensureDecoded(columnIndex); + try { + return (T) + message.toBuilder() + .mergeFrom( + Base64.getDecoder() + .wrap( + CharSource.wrap( + ((LazyByteArray) rowData.get(columnIndex)).getBase64String()) + .asByteSource(StandardCharsets.UTF_8) + .openStream())) + .build(); + } catch (IOException ioException) { + throw SpannerExceptionFactory.asSpannerException(ioException); + } + } + + @Override + protected T getProtoEnumInternal( + int columnIndex, Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + return (T) method.apply((int) getLongInternal(columnIndex)); + } + + @Override + protected boolean getBooleanInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Boolean) rowData.get(columnIndex); + } + + @Override + protected long getLongInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Long) rowData.get(columnIndex); + } + + @Override + protected double getDoubleInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Double) rowData.get(columnIndex); + } + + @Override + protected float getFloatInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Float) rowData.get(columnIndex); + } + + @Override + protected BigDecimal getBigDecimalInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (BigDecimal) rowData.get(columnIndex); + } + + @Override + protected String getStringInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (String) rowData.get(columnIndex); + } + + @Override + protected String getJsonInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (String) rowData.get(columnIndex); + } + + @Override + protected String getPgJsonbInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (String) rowData.get(columnIndex); + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + ensureDecoded(columnIndex); + return getLazyBytesInternal(columnIndex).getByteArray(); + } + + LazyByteArray getLazyBytesInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (LazyByteArray) rowData.get(columnIndex); + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Timestamp) rowData.get(columnIndex); + } + + @Override + protected Date getDateInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Date) rowData.get(columnIndex); + } + + @Override + protected UUID getUuidInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (UUID) rowData.get(columnIndex); + } + + @Override + protected Interval getIntervalInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Interval) rowData.get(columnIndex); + } + + private boolean isUnrecognizedType(int columnIndex) { + return type.getStructFields().get(columnIndex).getType().getCode() == Code.UNRECOGNIZED; + } + + boolean canGetProtoValue(int columnIndex) { + synchronized (rowData) { + return isUnrecognizedType(columnIndex) + || (decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded.get()) + || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex)); + } + } + + protected com.google.protobuf.Value getProtoValueInternal(int columnIndex) { + synchronized (rowData) { + checkProtoValueSupported(columnIndex); + return (com.google.protobuf.Value) rowData.get(columnIndex); + } + } + + private void checkProtoValueSupported(int columnIndex) { + // Unrecognized types are returned as protobuf values. + if (isUnrecognizedType(columnIndex)) { + return; + } + Preconditions.checkState( + decodeMode != DecodeMode.DIRECT, + "Getting proto value is not supported when DecodeMode#DIRECT is used."); + Preconditions.checkState( + !(decodeMode == DecodeMode.LAZY_PER_ROW && rowDecoded.get()), + "Getting proto value after the row has been decoded is not supported."); + Preconditions.checkState( + !(decodeMode == DecodeMode.LAZY_PER_COL && colDecoded.get(columnIndex)), + "Getting proto value after the column has been decoded is not supported."); + } + + private void ensureDecoded(int columnIndex) { + if (decodeMode == DecodeMode.LAZY_PER_ROW) { + synchronized (rowData) { + if (!rowDecoded.get()) { + for (int i = 0; i < rowData.size(); i++) { + rowData.set( + i, + decodeValue( + type.getStructFields().get(i).getType(), + (com.google.protobuf.Value) rowData.get(i))); + } + } + rowDecoded.set(true); + } + } else if (decodeMode == DecodeMode.LAZY_PER_COL) { + boolean decoded; + Object value; + synchronized (rowData) { + decoded = colDecoded.get(columnIndex); + value = rowData.get(columnIndex); + } + if (!decoded) { + // Use the column as a lock during decoding to ensure that we decode once (mostly), but also + // that multiple different columns can be decoded in parallel if requested. + synchronized (type.getStructFields().get(columnIndex)) { + // Note: It can be that we decode the value twice if two threads request this at the same + // time, but the synchronization on rowData above and below makes sure that we always get + // and set a consistent value (and only set it once). + if (!colDecoded.get(columnIndex)) { + value = + decodeValue( + type.getStructFields().get(columnIndex).getType(), + (com.google.protobuf.Value) value); + decoded = true; + } + } + if (decoded) { + synchronized (rowData) { + rowData.set(columnIndex, value); + colDecoded.set(columnIndex); + } + } + } + } + } + + @Override + protected Value getValueInternal(int columnIndex) { + ensureDecoded(columnIndex); + final List structFields = getType().getStructFields(); + final StructField structField = structFields.get(columnIndex); + final Type columnType = structField.getType(); + final boolean isNull = rowData.get(columnIndex) == null; + switch (columnType.getCode()) { + case BOOL: + return Value.bool(isNull ? null : getBooleanInternal(columnIndex)); + case INT64: + return Value.int64(isNull ? null : getLongInternal(columnIndex)); + case ENUM: + return Value.protoEnum( + isNull ? null : getLongInternal(columnIndex), columnType.getProtoTypeFqn()); + case NUMERIC: + return Value.numeric(isNull ? null : getBigDecimalInternal(columnIndex)); + case PG_NUMERIC: + return Value.pgNumeric(isNull ? null : getStringInternal(columnIndex)); + case FLOAT64: + return Value.float64(isNull ? null : getDoubleInternal(columnIndex)); + case FLOAT32: + return Value.float32(isNull ? null : getFloatInternal(columnIndex)); + case STRING: + return Value.string(isNull ? null : getStringInternal(columnIndex)); + case JSON: + return Value.json(isNull ? null : getJsonInternal(columnIndex)); + case PG_JSONB: + return Value.pgJsonb(isNull ? null : getPgJsonbInternal(columnIndex)); + case PG_OID: + return Value.pgOid(isNull ? null : getLongInternal(columnIndex)); + case BYTES: + return Value.internalBytes(isNull ? null : getLazyBytesInternal(columnIndex)); + case PROTO: + return Value.protoMessage( + isNull ? null : getBytesInternal(columnIndex), columnType.getProtoTypeFqn()); + case TIMESTAMP: + return Value.timestamp(isNull ? null : getTimestampInternal(columnIndex)); + case DATE: + return Value.date(isNull ? null : getDateInternal(columnIndex)); + case UUID: + return Value.uuid(isNull ? null : getUuidInternal(columnIndex)); + case INTERVAL: + return Value.interval(isNull ? null : getIntervalInternal(columnIndex)); + case STRUCT: + return Value.struct(isNull ? null : getStructInternal(columnIndex)); + case UNRECOGNIZED: + return Value.unrecognized( + isNull ? NULL_VALUE : getProtoValueInternal(columnIndex), columnType); + case ARRAY: + final Type elementType = columnType.getArrayElementType(); + switch (elementType.getCode()) { + case BOOL: + return Value.boolArray(isNull ? null : getBooleanListInternal(columnIndex)); + case INT64: + return Value.int64Array(isNull ? null : getLongListInternal(columnIndex)); + case NUMERIC: + return Value.numericArray(isNull ? null : getBigDecimalListInternal(columnIndex)); + case PG_NUMERIC: + return Value.pgNumericArray(isNull ? null : getStringListInternal(columnIndex)); + case FLOAT64: + return Value.float64Array(isNull ? null : getDoubleListInternal(columnIndex)); + case FLOAT32: + return Value.float32Array(isNull ? null : getFloatListInternal(columnIndex)); + case STRING: + return Value.stringArray(isNull ? null : getStringListInternal(columnIndex)); + case JSON: + return Value.jsonArray(isNull ? null : getJsonListInternal(columnIndex)); + case PG_JSONB: + return Value.pgJsonbArray(isNull ? null : getPgJsonbListInternal(columnIndex)); + case PG_OID: + return Value.pgOidArray(isNull ? null : getLongListInternal(columnIndex)); + case BYTES: + return Value.bytesArray(isNull ? null : getBytesListInternal(columnIndex)); + case PROTO: + return Value.protoMessageArray( + isNull ? null : getBytesListInternal(columnIndex), elementType.getProtoTypeFqn()); + case ENUM: + return Value.protoEnumArray( + isNull ? null : getLongListInternal(columnIndex), elementType.getProtoTypeFqn()); + case TIMESTAMP: + return Value.timestampArray(isNull ? null : getTimestampListInternal(columnIndex)); + case DATE: + return Value.dateArray(isNull ? null : getDateListInternal(columnIndex)); + case UUID: + return Value.uuidArray(isNull ? null : getUuidListInternal(columnIndex)); + case INTERVAL: + return Value.intervalArray(isNull ? null : getIntervalListInternal(columnIndex)); + case STRUCT: + return Value.structArray( + elementType, isNull ? null : getStructListInternal(columnIndex)); + default: + throw new IllegalArgumentException( + "Invalid array value type " + this.type.getArrayElementType()); + } + default: + throw new IllegalArgumentException("Invalid value type " + this.type); + } + } + + @Override + protected Struct getStructInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Struct) rowData.get(columnIndex); + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + ensureDecoded(columnIndex); + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + List values = (List) rowData.get(columnIndex); + boolean[] r = new boolean[values.size()]; + for (int i = 0; i < values.size(); ++i) { + if (values.get(i) == null) { + throw throwNotNull(columnIndex); + } + r[i] = values.get(i); + } + return r; + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getBooleanListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + ensureDecoded(columnIndex); + return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex); + } + + @Override + protected Int64Array getLongListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Int64Array) rowData.get(columnIndex); + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + ensureDecoded(columnIndex); + return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex); + } + + @Override + protected Float64Array getDoubleListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Float64Array) rowData.get(columnIndex); + } + + @Override + protected float[] getFloatArrayInternal(int columnIndex) { + ensureDecoded(columnIndex); + return getFloatListInternal(columnIndex).toPrimitiveArray(columnIndex); + } + + @Override + protected Float32Array getFloatListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (Float32Array) rowData.get(columnIndex); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getBigDecimalListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return (List) rowData.get(columnIndex); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getStringListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getJsonListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getProtoMessageListInternal( + int columnIndex, T message) { + Preconditions.checkNotNull( + message, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + ensureDecoded(columnIndex); + + List bytesArray = (List) rowData.get(columnIndex); + + try { + List protoMessagesList = new ArrayList<>(bytesArray.size()); + for (LazyByteArray protoMessageBytes : bytesArray) { + if (protoMessageBytes == null) { + protoMessagesList.add(null); + } else { + protoMessagesList.add( + (T) + message.toBuilder() + .mergeFrom( + Base64.getDecoder() + .wrap( + CharSource.wrap(protoMessageBytes.getBase64String()) + .asByteSource(StandardCharsets.UTF_8) + .openStream())) + .build()); + } + } + return protoMessagesList; + } catch (IOException ioException) { + throw SpannerExceptionFactory.asSpannerException(ioException); + } + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getProtoEnumListInternal( + int columnIndex, Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + ensureDecoded(columnIndex); + + List enumIntArray = (List) rowData.get(columnIndex); + List protoEnumList = new ArrayList<>(enumIntArray.size()); + for (Long enumIntValue : enumIntArray) { + if (enumIntValue == null) { + protoEnumList.add(null); + } else { + protoEnumList.add((T) method.apply(enumIntValue.intValue())); + } + } + + return protoEnumList; + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getPgJsonbListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getBytesListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Lists.transform( + (List) rowData.get(columnIndex), l -> l == null ? null : l.getByteArray()); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getTimestampListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getDateListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getUuidListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getIntervalListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY> produces a List. + protected List getStructListInternal(int columnIndex) { + ensureDecoded(columnIndex); + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcValueIterator.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcValueIterator.java new file mode 100644 index 000000000000..09b850c93f3c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcValueIterator.java @@ -0,0 +1,229 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.AbstractResultSet.Listener; +import com.google.common.collect.AbstractIterator; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value.KindCase; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.TypeCode; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nullable; + +/** Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages. */ +class GrpcValueIterator extends AbstractIterator { + private enum StreamValue { + METADATA, + RESULT, + } + + private final CloseableIterator stream; + private ResultSetMetadata metadata; + private Type type; + private PartialResultSet current; + private int pos; + private ResultSetStats statistics; + private final Listener listener; + + GrpcValueIterator(CloseableIterator stream, Listener listener) { + this.stream = stream; + this.listener = listener; + } + + @SuppressWarnings("unchecked") + @Override + protected com.google.protobuf.Value computeNext() { + if (!ensureReady(StreamValue.RESULT)) { + endOfData(); + return null; + } + com.google.protobuf.Value value = current.getValues(pos++); + KindCase kind = value.getKindCase(); + + if (!isMergeable(kind)) { + if (pos == current.getValuesCount() && current.getChunkedValue()) { + throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet."); + } else { + return value; + } + } + if (!current.getChunkedValue() || pos != current.getValuesCount()) { + return value; + } + + Object merged = + kind == KindCase.STRING_VALUE + ? value.getStringValue() + : new ArrayList<>(value.getListValue().getValuesList()); + while (current.getChunkedValue() && pos == current.getValuesCount()) { + if (!ensureReady(StreamValue.RESULT)) { + throw newSpannerException( + ErrorCode.INTERNAL, "Stream closed in the middle of chunked value"); + } + com.google.protobuf.Value newValue = current.getValues(pos++); + if (newValue.getKindCase() != kind) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Unexpected type in middle of chunked value. Expected: " + + kind + + " but got: " + + newValue.getKindCase()); + } + if (kind == KindCase.STRING_VALUE) { + merged = merged + newValue.getStringValue(); + } else { + concatLists( + (List) merged, newValue.getListValue().getValuesList()); + } + } + if (kind == KindCase.STRING_VALUE) { + return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build(); + } else { + return com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder().addAllValues((List) merged)) + .build(); + } + } + + ResultSetMetadata getMetadata() throws SpannerException { + if (metadata == null) { + if (!ensureReady(StreamValue.METADATA)) { + throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata"); + } + } + return metadata; + } + + /** + * Get the query statistics. Query statistics are delivered with the last PartialResultSet in the + * stream. Any attempt to call this method before the caller has finished consuming the results + * will return null. + */ + @Nullable + ResultSetStats getStats() { + return statistics; + } + + boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener) { + return stream.initiateStreaming(streamMessageListener); + } + + Type type() { + checkState(type != null, "metadata has not been received"); + return type; + } + + private boolean ensureReady(StreamValue requiredValue) throws SpannerException { + while (current == null || pos >= current.getValuesCount()) { + if (!stream.hasNext()) { + return false; + } + current = stream.next(); + pos = 0; + if (type == null) { + // This is the first message on the stream. + if (!current.hasMetadata() || !current.getMetadata().hasRowType()) { + throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message"); + } + metadata = current.getMetadata(); + com.google.spanner.v1.Type typeProto = + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRUCT) + .setStructType(metadata.getRowType()) + .build(); + try { + type = Type.fromProto(typeProto); + } catch (IllegalArgumentException e) { + throw newSpannerException( + ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e); + } + } + // collect the precommit token from each PartialResultSet + if (current.hasPrecommitToken()) { + listener.onPrecommitToken(current.getPrecommitToken()); + } + if (current.hasStats()) { + statistics = current.getStats(); + } + if (requiredValue == StreamValue.METADATA) { + return true; + } + } + return true; + } + + void close(@Nullable String message) { + stream.close(message); + } + + boolean isWithBeginTransaction() { + return stream.isWithBeginTransaction(); + } + + boolean isLastStatement() { + return stream.isLastStatement(); + } + + /** + * @param a is a mutable list and b will be concatenated into a. + */ + private void concatLists(List a, List b) { + if (a.size() == 0 || b.size() == 0) { + a.addAll(b); + return; + } else { + com.google.protobuf.Value last = a.get(a.size() - 1); + com.google.protobuf.Value first = b.get(0); + KindCase lastKind = last.getKindCase(); + KindCase firstKind = first.getKindCase(); + if (isMergeable(lastKind) && lastKind == firstKind) { + com.google.protobuf.Value merged; + if (lastKind == KindCase.STRING_VALUE) { + String lastStr = last.getStringValue(); + String firstStr = first.getStringValue(); + merged = + com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build(); + } else { // List + List mergedList = new ArrayList<>(); + mergedList.addAll(last.getListValue().getValuesList()); + concatLists(mergedList, first.getListValue().getValuesList()); + merged = + com.google.protobuf.Value.newBuilder() + .setListValue(ListValue.newBuilder().addAllValues(mergedList)) + .build(); + } + a.set(a.size() - 1, merged); + a.addAll(b.subList(1, b.size())); + } else { + a.addAll(b); + } + } + } + + private boolean isMergeable(KindCase kind) { + return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IScope.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IScope.java new file mode 100644 index 000000000000..cbefe47b8879 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IScope.java @@ -0,0 +1,26 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** + * This interface represents a scope that wraps both OpenCensus and OpenTelemetry scopes. It extends + * the AutoCloseable interface and overrides the close method that does not throw an exception. + */ +interface IScope extends AutoCloseable { + @Override + void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ISpan.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ISpan.java new file mode 100644 index 000000000000..ce837de0e580 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ISpan.java @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import java.util.Map; + +interface ISpan { + + /** + * Adds an annotation to the OpenCensus and OpenTelemetry span. + * + * @param message the description of the annotation event. + * @param attributes the map of attribute key-value pairs that will be added; these are associated + * with this annotation. + */ + void addAnnotation(String message, Map attributes); + + void addAnnotation(String message); + + void addAnnotation(String message, String key, String value); + + void addAnnotation(String message, String key, long value); + + void addAnnotation(String message, Throwable e); + + void setStatus(Throwable e); + + void setStatus(ErrorCode errorCode); + + void end(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Instance.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Instance.java new file mode 100644 index 000000000000..4e4bb0927021 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Instance.java @@ -0,0 +1,258 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.ListOption; +import com.google.longrunning.Operation; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.Instance.Edition; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import java.util.Map; + +/** + * Represents a Cloud Spanner Instance. {@code Instance} adds a layer of service related + * functionality over {@code InstanceInfo}. + */ +public class Instance extends InstanceInfo { + + /** Builder of {@code Instance}. */ + public static class Builder extends InstanceInfo.Builder { + private final InstanceAdminClient instanceClient; + private final DatabaseAdminClient dbClient; + private final InstanceInfo.BuilderImpl infoBuilder; + + Builder(Instance instance) { + this.instanceClient = instance.instanceClient; + this.dbClient = instance.dbClient; + this.infoBuilder = new InstanceInfo.BuilderImpl(instance); + } + + Builder(InstanceAdminClient instanceClient, DatabaseAdminClient dbClient, InstanceId id) { + this.instanceClient = instanceClient; + this.dbClient = dbClient; + this.infoBuilder = new InstanceInfo.BuilderImpl(id); + } + + @Override + public Builder setInstanceConfigId(InstanceConfigId instanceConfigId) { + infoBuilder.setInstanceConfigId(instanceConfigId); + return this; + } + + @Override + public Builder setDisplayName(String displayName) { + infoBuilder.setDisplayName(displayName); + return this; + } + + @Override + Builder setUpdateTime(Timestamp updateTime) { + infoBuilder.setUpdateTime(updateTime); + return this; + } + + @Override + Builder setCreateTime(Timestamp createTime) { + infoBuilder.setCreateTime(createTime); + return this; + } + + @Override + public Builder setNodeCount(int nodeCount) { + infoBuilder.setNodeCount(nodeCount); + return this; + } + + @Override + public Builder setProcessingUnits(int processingUnits) { + infoBuilder.setProcessingUnits(processingUnits); + return this; + } + + @Override + public Builder setAutoscalingConfig(AutoscalingConfig autoscalingConfig) { + infoBuilder.setAutoscalingConfig(autoscalingConfig); + return this; + } + + @Override + public Builder setState(State state) { + infoBuilder.setState(state); + return this; + } + + @Override + public Builder addLabel(String key, String value) { + infoBuilder.addLabel(key, value); + return this; + } + + @Override + public Builder putAllLabels(Map labels) { + infoBuilder.putAllLabels(labels); + return this; + } + + @Override + public Builder setEdition(Edition edition) { + infoBuilder.setEdition(edition); + return this; + } + + @Override + public Instance build() { + return new Instance(this); + } + } + + private final InstanceAdminClient instanceClient; + private final DatabaseAdminClient dbClient; + + Instance(Builder builder) { + super(builder.infoBuilder); + this.instanceClient = builder.instanceClient; + this.dbClient = builder.dbClient; + } + + public Instance reload() { + return instanceClient.getInstance(instanceId()); + } + + public void delete() { + instanceClient.deleteInstance(instanceId()); + } + + public OperationFuture update( + InstanceInfo.InstanceField... fieldsToUpdate) { + return instanceClient.updateInstance(this, fieldsToUpdate); + } + + public Page listDatabases(ListOption... options) { + return dbClient.listDatabases(instanceId(), options); + } + + public Database getDatabase(String databaseId) { + return dbClient.getDatabase(instanceId(), databaseId); + } + + /** + * Creates a new database in this instance. + * + * @param databaseId the id of the database which will be created. It must conform to the regular + * expression [a-z][a-z0-9_\-]*[a-z0-9] and be between 2 and 30 characters in length + * @param statements DDL statements to run while creating the database, for example {@code CREATE + * TABLE MyTable ( ... )}. This should not include {@code CREATE DATABASE} statement. + */ + public OperationFuture createDatabase( + String databaseId, Iterable statements) throws SpannerException { + return dbClient.createDatabase(instanceId(), databaseId, statements); + } + + /** Returns the backups belonging to this instance. */ + public Page listBackups(ListOption... options) { + return dbClient.listBackups(instanceId(), options); + } + + /** Returns the backup with the given id on this instance. */ + public Backup getBackup(String backupId) { + return dbClient.getBackup(instanceId(), backupId); + } + + /** Returns the long-running database operations on this instance. */ + public Page listDatabaseOperations(ListOption... options) { + return dbClient.listDatabaseOperations(instanceId(), options); + } + + /** Returns the long-running backup operations on this instance. */ + public Page listBackupOperations(ListOption... options) { + return dbClient.listBackupOperations(instanceId(), options); + } + + /** Returns the IAM {@link Policy} for this instance. */ + public Policy getIAMPolicy() { + return instanceClient.getInstanceIAMPolicy(instanceId()); + } + + /** + * Updates the IAM policy for this instance and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + public Policy setIAMPolicy(Policy policy) { + return instanceClient.setInstanceIAMPolicy(instanceId(), policy); + } + + /** + * Tests for the given permissions on this instance for the caller. + * + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + public Iterable testIAMPermissions(Iterable permissions) { + return instanceClient.testInstanceIAMPermissions(instanceId(), permissions); + } + + private String instanceId() { + return getId().getInstance(); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + static Instance fromProto( + com.google.spanner.admin.instance.v1.Instance proto, + InstanceAdminClient instanceClient, + DatabaseAdminClient dbClient) { + InstanceId id = InstanceId.of(proto.getName()); + Builder builder = + new Builder(instanceClient, dbClient, id) + .setInstanceConfigId(InstanceConfigId.of(proto.getConfig())) + .setDisplayName(proto.getDisplayName()) + .setNodeCount(proto.getNodeCount()) + .setCreateTime(Timestamp.fromProto(proto.getCreateTime())) + .setUpdateTime(Timestamp.fromProto(proto.getUpdateTime())) + .setAutoscalingConfig(proto.getAutoscalingConfig()) + .setEdition(proto.getEdition()) + .setProcessingUnits(proto.getProcessingUnits()); + State state; + switch (proto.getState()) { + case STATE_UNSPECIFIED: + state = State.UNSPECIFIED; + break; + case CREATING: + state = State.CREATING; + break; + case READY: + state = State.READY; + break; + default: + throw new IllegalArgumentException("Unknown state:" + proto.getState()); + } + builder.setState(state); + builder.putAllLabels(proto.getLabelsMap()); + return builder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClient.java new file mode 100644 index 000000000000..702b9c405982 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClient.java @@ -0,0 +1,381 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.spanner.Options.CreateAdminApiOption; +import com.google.cloud.spanner.Options.DeleteAdminApiOption; +import com.google.cloud.spanner.Options.ListOption; +import com.google.cloud.spanner.Options.UpdateAdminApiOption; +import com.google.longrunning.Operation; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; + +/** Client to do admin operations on Cloud Spanner Instance and Instance Configs. */ +public interface InstanceAdminClient { + + /** + * Creates an instance config and begins preparing it to be used. The returned {@code Operation} + * can be used to track the progress of preparing the new instance config. The instance config + * name is assigned by the caller and must start with the string 'custom'. If the named instance + * config already exists, a SpannerException is thrown. + * + *

Immediately after the request returns: + * + *

    + *
  • The instance config is readable via the API, with all requested attributes. + *
  • The instance config's {@code reconciling} field is set to true. Its state is {@code + * CREATING}. + *
+ * + * While the operation is pending: + * + *
    + *
  • Cancelling the operation renders the instance config immediately unreadable via the API. + *
  • Except for deleting the creating resource, all other attempts to modify the instance + * config are rejected. + *
+ * + * Upon completion of the returned operation: + * + *
    + *
  • Instances can be created using the instance configuration. + *
  • The instance config's {@code reconciling} field becomes false. + *
  • Its state becomes {@code READY}. + *
+ * + * + * + *
{@code
+   * String projectId = "my-project";
+   * String baseInstanceConfig = "my-base-config";
+   * String instanceConfigId = "custom-user-config";
+   *
+   * final InstanceConfig baseConfig = instanceAdminClient.getInstanceConfig(baseInstanceConfig);
+   *
+   * List readOnlyReplicas = ImmutableList.of(baseConfig.getOptionalReplicas().get(0));
+   *
+   * InstanceConfigInfo instanceConfigInfo =
+   *     InstanceConfigInfo.newBuilder(InstanceConfigId.of(projectId, instanceConfigId), baseConfig)
+   *         .setDisplayName(instanceConfigId)
+   *         .addReadOnlyReplicas(readOnlyReplicas)
+   *         .build();
+   *
+   * final OperationFuture operation =
+   *     instanceAdminClient.createInstanceConfig(instanceConfigInfo);
+   *
+   * InstanceConfig instanceConfig = op.get(5, TimeUnit.MINUTES)
+   * }
+ * + * + */ + default OperationFuture createInstanceConfig( + InstanceConfigInfo instanceConfig, CreateAdminApiOption... options) throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Updates a custom instance config. This can not be used to update a Google managed instance + * config. The returned {@code Operation} can be used to track the progress of updating the + * instance. If the named instance config does not exist, a SpannerException is thrown. The + * request must include at least one field to update. + * + *

Only user managed configurations can be updated. + * + *

Immediately after the request returns: + * + *

    + *
  • The instance config's {@code reconciling} field is set to true. + *
+ * + * While the operation is pending: + * + *
    + *
  • Cancelling the operation sets its metadata's cancel_time. + *
  • The operation is guaranteed to succeed at undoing all changes, after which point it + * terminates with a `CANCELLED` status. + *
  • All other attempts to modify the instance config are rejected. + *
  • Reading the instance config via the API continues to give the pre-request values. + *
+ * + * Upon completion of the returned operation: + * + *
    + *
  • Creating instances using the instance configuration uses the new values. + *
  • The instance config's new values are readable via the API. + *
  • The instance config's {@code reconciling} field becomes false. + *
+ * + * + * + *
{@code
+   * String projectId = "my-project";
+   * String instanceConfigId = "custom-user-config";
+   * String displayName = "my-display-name";
+   *
+   * InstanceConfigInfo instanceConfigInfo =
+   *     InstanceConfigInfo.newBuilder(InstanceConfigId.of(projectId, instanceConfigId))
+   *         .setDisplayName(displayName)
+   *         .build();
+   *
+   * // Only update display name.
+   * final OperationFuture operation =
+   *     instanceAdminClient.updateInstanceConfig(
+   *         instanceConfigInfo, ImmutableList.of(InstanceConfigField.DISPLAY_NAME));
+   *
+   * InstanceConfig instanceConfig = operation.get(5, TimeUnit.MINUTES);
+   * }
+ * + * + */ + default OperationFuture updateInstanceConfig( + InstanceConfigInfo instanceConfig, + Iterable fieldsToUpdate, + UpdateAdminApiOption... options) + throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + /** Gets an instance config. */ + /* + *
{@code
+   * final String configId = my_config_id;
+   * InstanceConfig instanceConfig = instanceAdminClient.getInstanceConfig(configId);
+   * }
+ * + */ + InstanceConfig getInstanceConfig(String configId) throws SpannerException; + + /** + * Deletes a custom instance config. Deletion is only allowed for custom instance configs and when + * no instances are using the configuration. If any instances are using the config, a + * SpannerException is thrown. + * + *

Only user managed configurations can be deleted. + * + * + *

{@code
+   * String projectId = "my-project";
+   * String instanceConfigId = "custom-user-config";
+   *
+   * instanceAdminClient.deleteInstanceConfig(instanceConfigId);
+   * }
+ * + * + */ + default void deleteInstanceConfig(String instanceConfigId, DeleteAdminApiOption... options) + throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + /** Lists the supported instance configs for current project. */ + /* + *
{@code
+   * List configs =
+   *     Lists.newArrayList(instanceAdminClient.listInstanceConfigs(Options.pageSize(1)).iterateAll());
+   * }
+ * + */ + Page listInstanceConfigs(ListOption... options) throws SpannerException; + + /** Lists long-running instance config operations. */ + default Page listInstanceConfigOperations(ListOption... options) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Creates an instance and begins preparing it to begin serving. The returned {@code Operation} + * can be used to track the progress of preparing the new instance. The instance name is assigned + * by the caller. If the named instance already exists, a SpannerException is thrown. Immediately + * upon completion of this request: + * + *
    + *
  • The instance is readable via the API, with all requested attributes but no allocated + * resources. + *
  • Its state is {@code CREATING}. + *
+ * + * Until completion of the returned operation: + * + *
    + *
  • Cancelling the operation renders the instance immediately unreadable via the API. + *
  • The instance can be deleted. + *
  • All other attempts to modify the instance are rejected. + *
+ * + * Upon completion of the returned operation: + * + *
    + *
  • Billing for all successfully-allocated resources begins (some types may have lower than + * the requested levels). + *
  • Databases can be created in the instance. + *
  • The instance's allocated resource levels are readable via the + *
+ * + * + * + *
{@code
+   * final String instanceId = my_instance_id;
+   * final String configId = my_config_id;
+   * final String clientProject = my_client_project;
+   *
+   * Operation op =
+   *     instanceAdminClient.createInstance(InstanceInfo
+   *         .newBuilder(InstanceId.of(clientProject, instanceId))
+   *         .setInstanceConfigId(InstanceConfigId.of(clientProject, configId))
+   *         .setDisplayName(instanceId)
+   *         .setNodeCount(1)
+   *         .build());
+   * op.waitFor();
+   * }
+ * + * + */ + OperationFuture createInstance(InstanceInfo instance) + throws SpannerException; + + /** Gets an instance. */ + /* + *
{@code
+   * final String instanceId = my_instance_id;
+   * Instance ins = instanceAdminClient.getInstance(instanceId);
+   * }
+ * + */ + Instance getInstance(String instanceId) throws SpannerException; + + /** + * Lists the instances. + * + * @param options Options to control the instances returned. It also supports {@link + * Options#filter(String)} option. The fields eligible for filtering are: + *
    + *
  • name + *
  • display_name + *
  • labels.key where key is the name of a label + *
+ * + *
{@code
+   * List instances =
+   *     Lists.newArrayList(
+   *         instanceAdminClient.listInstances(Options.pageSize(1)).iterateAll());
+   * }
+ * + */ + Page listInstances(ListOption... options) throws SpannerException; + + /** Deletes an instance. */ + /* + *
{@code
+   * final String instanceId = my_instance_id;
+   * instanceAdminClient.deleteInstance(instanceId);
+   * }
+ * + */ + void deleteInstance(String instanceId) throws SpannerException; + + /** + * Updates an instance, and begins allocating or releasing resources as requested. The returned + * {@code Operation} can be used to track the progress of updating the instance. If the named + * instance does not exist, throws SpannerException. + * + *

Immediately upon completion of this request: + * + *

    + *
  • For resource types for which a decrease in the instance's allocation has been requested, + * billing is based on the newly-requested level. + *
+ * + * Until completion of the returned operation: + * + *
    + *
  • Cancelling the operation sets its metadata's + * [cancel_time][UpdateInstanceMetadata.cancel_time], and begins restoring resources to + * their pre-request values. The operation is guaranteed to succeed at undoing all resource + * changes, after which point it terminates with a `CANCELLED` status. + *
  • All other attempts to modify the instance are rejected. + *
  • Reading the instance via the API continues to give the pre-request resource levels. + *
+ * + * Upon completion of the returned operation: + * + *
    + *
  • Billing begins for all successfully-allocated resources (some types may have lower than + * the requested levels). + *
  • All newly-reserved resources are available for serving the instance's tables. + *
  • The instance's new resource levels are readable via the API. + *
+ * + * + * + *
{@code
+   * Instance instance = my_instance;
+   * final String clientProject = my_client_project;
+   * final String instanceId = my_instance_id;
+   *
+   * final String newDisplayName = my_display_name;
+   *
+   * InstanceInfo toUpdate =
+   *     InstanceInfo.newBuilder(InstanceId.of(clientProject, instanceId))
+   *         .setDisplayName(newDisplayName)
+   *         .setNodeCount(instance.getNodeCount() + 1)
+   *         .build();
+   * // Only update display name
+   * Operation op =
+   *     instanceAdminClient.updateInstance(toUpdate, InstanceInfo.InstanceField.DISPLAY_NAME);
+   * op.waitFor().getResult();
+   * }
+ * + * + */ + OperationFuture updateInstance( + InstanceInfo instance, InstanceInfo.InstanceField... fieldsToUpdate); + + /** Returns the IAM policy for the given instance. */ + Policy getInstanceIAMPolicy(String instanceId); + + /** + * Updates the IAM policy for the given instance and returns the resulting policy. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(String)} for information on the recommended read-modify-write + * cycle. + */ + Policy setInstanceIAMPolicy(String instanceId, Policy policy); + + /** + * Tests for the given permissions on the specified instance for the caller. + * + * @param instanceId the id of the instance to test. + * @param permissions the permissions to test for. Permissions with wildcards (such as '*', + * 'spanner.*', 'spanner.instances.*') are not allowed. + * @return the subset of the tested permissions that the caller is allowed. + */ + Iterable testInstanceIAMPermissions(String instanceId, Iterable permissions); + + /** Returns a builder for {@code Instance} object with the given id. */ + Instance.Builder newInstanceBuilder(InstanceId id); + + /** Cancels the specified long-running operation. */ + void cancelOperation(String name); + + /** Gets the specified long-running operation. */ + Operation getOperation(String name); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java new file mode 100644 index 000000000000..4cceaffa3092 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java @@ -0,0 +1,316 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationFutureImpl; +import com.google.api.gax.paging.Page; +import com.google.api.pathtemplate.PathTemplate; +import com.google.cloud.Policy; +import com.google.cloud.Policy.DefaultMarshaller; +import com.google.cloud.spanner.Options.CreateAdminApiOption; +import com.google.cloud.spanner.Options.DeleteAdminApiOption; +import com.google.cloud.spanner.Options.ListOption; +import com.google.cloud.spanner.Options.UpdateAdminApiOption; +import com.google.cloud.spanner.SpannerImpl.PageFetcher; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.base.Preconditions; +import com.google.longrunning.Operation; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; + +/** Default implementation of {@link InstanceAdminClient} */ +class InstanceAdminClientImpl implements InstanceAdminClient { + private static final class PolicyMarshaller extends DefaultMarshaller { + @Override + protected Policy fromPb(com.google.iam.v1.Policy policyPb) { + return super.fromPb(policyPb); + } + + @Override + protected com.google.iam.v1.Policy toPb(Policy policy) { + return super.toPb(policy); + } + } + + private static final PathTemplate PROJECT_NAME_TEMPLATE = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private final DatabaseAdminClient dbClient; + private final String projectId; + private final SpannerRpc rpc; + private final PolicyMarshaller policyMarshaller = new PolicyMarshaller(); + + InstanceAdminClientImpl(String projectId, SpannerRpc rpc, DatabaseAdminClient dbClient) { + this.projectId = projectId; + this.rpc = rpc; + this.dbClient = dbClient; + } + + @Override + public OperationFuture createInstanceConfig( + InstanceConfigInfo instanceConfig, CreateAdminApiOption... options) throws SpannerException { + final Options createAdminApiOptions = Options.fromAdminApiOptions(options); + String projectName = PROJECT_NAME_TEMPLATE.instantiate("project", projectId); + OperationFuture< + com.google.spanner.admin.instance.v1.InstanceConfig, CreateInstanceConfigMetadata> + rawOperationFuture = + rpc.createInstanceConfig( + projectName, + instanceConfig.getId().getInstanceConfig(), + instanceConfig.toProto(), + createAdminApiOptions.validateOnly()); + + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + InstanceConfig.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.InstanceConfig.class) + .apply(snapshot), + InstanceAdminClientImpl.this), + ProtoOperationTransformers.MetadataTransformer.create(CreateInstanceConfigMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public OperationFuture updateInstanceConfig( + InstanceConfigInfo instanceConfig, + Iterable fieldsToUpdate, + UpdateAdminApiOption... options) + throws SpannerException { + final Options deleteAdminApiOptions = Options.fromAdminApiOptions(options); + FieldMask fieldMask = InstanceConfigInfo.InstanceConfigField.toFieldMask(fieldsToUpdate); + + OperationFuture< + com.google.spanner.admin.instance.v1.InstanceConfig, UpdateInstanceConfigMetadata> + rawOperationFuture = + rpc.updateInstanceConfig( + instanceConfig.toProto(), deleteAdminApiOptions.validateOnly(), fieldMask); + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + InstanceConfig.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.InstanceConfig.class) + .apply(snapshot), + InstanceAdminClientImpl.this), + ProtoOperationTransformers.MetadataTransformer.create(UpdateInstanceConfigMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public InstanceConfig getInstanceConfig(String configId) throws SpannerException { + String instanceConfigName = new InstanceConfigId(projectId, configId).getName(); + return InstanceConfig.fromProto( + rpc.getInstanceConfig(instanceConfigName), InstanceAdminClientImpl.this); + } + + @Override + public void deleteInstanceConfig(final String instanceConfigId, DeleteAdminApiOption... options) + throws SpannerException { + final Options deleteAdminApiOptions = Options.fromAdminApiOptions(options); + rpc.deleteInstanceConfig( + new InstanceConfigId(projectId, instanceConfigId).getName(), + deleteAdminApiOptions.etag(), + deleteAdminApiOptions.validateOnly()); + } + + @Override + public Page listInstanceConfigs(ListOption... options) { + final Options listOptions = Options.fromListOptions(options); + Preconditions.checkArgument( + !listOptions.hasFilter(), "Filter option is not supported by listInstanceConfigs"); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listInstanceConfigs(pageSize, nextPageToken); + } + + @Override + public InstanceConfig fromProto( + com.google.spanner.admin.instance.v1.InstanceConfig proto) { + return InstanceConfig.fromProto(proto, InstanceAdminClientImpl.this); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public final Page listInstanceConfigOperations(ListOption... options) { + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + final String filter = listOptions.hasFilter() ? listOptions.filter() : null; + + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage(String nextPageToken) { + return rpc.listInstanceConfigOperations(pageSize, filter, nextPageToken); + } + + @Override + public Operation fromProto(Operation proto) { + return proto; + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public OperationFuture createInstance(InstanceInfo instance) + throws SpannerException { + String projectName = PROJECT_NAME_TEMPLATE.instantiate("project", projectId); + OperationFuture + rawOperationFuture = + rpc.createInstance(projectName, instance.getId().getInstance(), instance.toProto()); + + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + Instance.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.Instance.class) + .apply(snapshot), + InstanceAdminClientImpl.this, + dbClient), + ProtoOperationTransformers.MetadataTransformer.create(CreateInstanceMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public Instance getInstance(String instanceId) throws SpannerException { + String instanceName = new InstanceId(projectId, instanceId).getName(); + return Instance.fromProto( + rpc.getInstance(instanceName), InstanceAdminClientImpl.this, dbClient); + } + + @Override + public Page listInstances(ListOption... options) throws SpannerException { + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + final String filter = listOptions.filter(); + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listInstances(pageSize, nextPageToken, filter); + } + + @Override + public Instance fromProto(com.google.spanner.admin.instance.v1.Instance proto) { + return Instance.fromProto(proto, InstanceAdminClientImpl.this, dbClient); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.setNextPageToken(listOptions.pageToken()); + } + return pageFetcher.getNextPage(); + } + + @Override + public void deleteInstance(final String instanceId) throws SpannerException { + rpc.deleteInstance(new InstanceId(projectId, instanceId).getName()); + } + + @Override + public OperationFuture updateInstance( + InstanceInfo instance, InstanceInfo.InstanceField... fieldsToUpdate) { + FieldMask fieldMask = + fieldsToUpdate.length == 0 + ? InstanceInfo.InstanceField.toFieldMask( + InstanceInfo.InstanceField.defaultFieldsToUpdate(instance)) + : InstanceInfo.InstanceField.toFieldMask(fieldsToUpdate); + + OperationFuture + rawOperationFuture = rpc.updateInstance(instance.toProto(), fieldMask); + return new OperationFutureImpl<>( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + snapshot -> + Instance.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.Instance.class) + .apply(snapshot), + InstanceAdminClientImpl.this, + dbClient), + ProtoOperationTransformers.MetadataTransformer.create(UpdateInstanceMetadata.class), + e -> { + throw SpannerExceptionFactory.newSpannerException(e); + }); + } + + @Override + public Policy getInstanceIAMPolicy(String instanceId) { + String instanceName = InstanceId.of(projectId, instanceId).getName(); + return policyMarshaller.fromPb(rpc.getInstanceAdminIAMPolicy(instanceName)); + } + + @Override + public Policy setInstanceIAMPolicy(String instanceId, Policy policy) { + Preconditions.checkNotNull(policy); + String instanceName = InstanceId.of(projectId, instanceId).getName(); + return policyMarshaller.fromPb( + rpc.setInstanceAdminIAMPolicy(instanceName, policyMarshaller.toPb(policy))); + } + + @Override + public Iterable testInstanceIAMPermissions( + String instanceId, Iterable permissions) { + Preconditions.checkNotNull(permissions); + String instanceName = InstanceId.of(projectId, instanceId).getName(); + return rpc.testInstanceAdminIAMPermissions(instanceName, permissions).getPermissionsList(); + } + + @Override + public Instance.Builder newInstanceBuilder(InstanceId id) { + return new Instance.Builder(this, dbClient, id); + } + + @Override + public void cancelOperation(String name) { + rpc.cancelOperation(Preconditions.checkNotNull(name)); + } + + @Override + public Operation getOperation(String name) { + return rpc.getOperation(Preconditions.checkNotNull(name)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfig.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfig.java new file mode 100644 index 000000000000..9b96dda4df73 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfig.java @@ -0,0 +1,163 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Represents a Cloud Spanner instance config.{@code InstanceConfig} adds a layer of service related + * functionality over {@code InstanceConfigInfo}. + */ +public class InstanceConfig extends InstanceConfigInfo { + + private final InstanceAdminClient client; + + /** Builder of {@code InstanceConfig}. */ + public static class Builder extends InstanceConfigInfo.BuilderImpl { + private final InstanceAdminClient client; + + Builder(InstanceConfig instanceConfig) { + super(instanceConfig); + this.client = instanceConfig.client; + } + + Builder(InstanceAdminClient client, InstanceConfigId id) { + super(id); + this.client = client; + } + + @Override + public Builder setDisplayName(String displayName) { + super.setDisplayName(displayName); + return this; + } + + @Override + protected Builder setReplicas(List replicas) { + super.setReplicas(replicas); + return this; + } + + @Override + public Builder setLeaderOptions(List leaderOptions) { + super.setLeaderOptions(leaderOptions); + return this; + } + + @Override + protected Builder setOptionalReplicas(List optionalReplicas) { + super.setOptionalReplicas(optionalReplicas); + return this; + } + + @Override + protected Builder setBaseConfig(InstanceConfigInfo baseConfig) { + super.setBaseConfig(baseConfig); + return this; + } + + @Override + protected Builder setConfigType(Type configType) { + super.setConfigType(configType); + return this; + } + + @Override + protected Builder setState(State state) { + super.setState(state); + return this; + } + + @Override + public Builder setEtag(String etag) { + super.setEtag(etag); + return this; + } + + @Override + protected Builder setReconciling(boolean reconciling) { + super.setReconciling(reconciling); + return this; + } + + @Override + public Builder addLabel(String key, String value) { + super.addLabel(key, value); + return this; + } + + @Override + public Builder putAllLabels(Map labels) { + super.putAllLabels(labels); + return this; + } + + @Override + public Builder addReadOnlyReplicas(List readOnlyReplicas) { + super.addReadOnlyReplicas(readOnlyReplicas); + return this; + } + + @Override + public InstanceConfig build() { + return new InstanceConfig(this); + } + } + + public static Builder newBuilder(InstanceConfig instanceConfig) { + return new Builder(instanceConfig); + } + + public static Builder newBuilder(InstanceAdminClient client, InstanceConfigId instanceConfigId) { + return new Builder(client, instanceConfigId); + } + + /** Use {@link #newBuilder} instead */ + @Deprecated + public InstanceConfig(InstanceConfigId id, String displayName, InstanceAdminClient client) { + this(id, displayName, Collections.emptyList(), Collections.emptyList(), client); + } + + /** Use {@link #newBuilder} instead */ + @Deprecated + public InstanceConfig( + InstanceConfigId id, + String displayName, + List replicas, + List leaderOptions, + InstanceAdminClient client) { + super(id, displayName, replicas, leaderOptions); + this.client = client; + } + + InstanceConfig(Builder builder) { + super(builder); + this.client = builder.client; + } + + /** Gets the current state of this instance config. */ + public InstanceConfig reload() { + return client.getInstanceConfig(getId().getInstanceConfig()); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigId.java new file mode 100644 index 000000000000..77d8035217e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigId.java @@ -0,0 +1,94 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.Preconditions; +import java.util.Map; +import java.util.Objects; + +/** Returns id of a Cloud Spanner instance config. */ +public final class InstanceConfigId { + private static final PathTemplate NAME_TEMPLATE = + PathTemplate.create("projects/{project}/instanceConfigs/{instanceconfig}"); + + private final String project; + private final String instanceConfig; + + InstanceConfigId(String project, String instanceConfig) { + this.project = Preconditions.checkNotNull(project); + this.instanceConfig = Preconditions.checkNotNull(instanceConfig); + } + + /** Returns project of this instane config. */ + public String getProject() { + return project; + } + + /** Returns id of this instance config. */ + public String getInstanceConfig() { + return instanceConfig; + } + + /** Returns the name of the instance config. */ + public String getName() { + return String.format("projects/%s/instanceConfigs/%s", project, instanceConfig); + } + + @Override + public int hashCode() { + return Objects.hash(project, instanceConfig); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InstanceConfigId that = (InstanceConfigId) o; + return that.project.equals(project) && that.instanceConfig.equals(instanceConfig); + } + + @Override + public String toString() { + return getName(); + } + + /** + * Creates an {@code InstanceConfigId} from the name of the instance config. + * + * @param name the instance config name of the form {@code + * projects/PROJECT_ID/instanceConfigs/INSTANCE_CONFIG_ID} + * @throws IllegalArgumentException if {@code instanceName} does not conform to the expected + * pattern. + */ + static InstanceConfigId of(String name) { + Preconditions.checkNotNull(name); + Map parts = NAME_TEMPLATE.match(name); + Preconditions.checkArgument( + parts != null, "Name should confirm to pattern %s: %s", NAME_TEMPLATE, name); + return new InstanceConfigId(parts.get("project"), parts.get("instanceconfig")); + } + + /** Creates an {@code InstanceConfigId} given project and instance config IDs. */ + public static InstanceConfigId of(String project, String instanceConfig) { + return new InstanceConfigId(project, instanceConfig); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigInfo.java new file mode 100644 index 000000000000..39d32fc8a803 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceConfigInfo.java @@ -0,0 +1,524 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.FieldSelector; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfig.State; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** Represents a Cloud Spanner instance config resource. */ +public class InstanceConfigInfo { + + /** Represent an updatable field in Cloud Spanner InstanceConfig. */ + public enum InstanceConfigField implements FieldSelector { + DISPLAY_NAME("display_name"), + LABELS("labels"); + + private final String selector; + + InstanceConfigField(String selector) { + this.selector = selector; + } + + @Override + public String getSelector() { + return selector; + } + + static FieldMask toFieldMask(Iterable fields) { + FieldMask.Builder builder = FieldMask.newBuilder(); + for (InstanceConfigField field : fields) { + builder.addPaths(field.getSelector()); + } + return builder.build(); + } + } + + /** Type of the Instance config. */ + public enum Type { + TYPE_UNSPECIFIED, + GOOGLE_MANAGED, + USER_MANAGED + } + + /** Type of the Instance config. */ + public enum State { + STATE_UNSPECIFIED, + CREATING, + READY + } + + private final InstanceConfigId id; + private final String displayName; + private final List replicas; + private final List leaderOptions; + private final List optionalReplicas; + private final InstanceConfigInfo baseConfig; + private final Type configType; + private final String etag; + private final boolean reconciling; + private final State state; + private final Map labels; + + /** Returns the id of this instance config. */ + public InstanceConfigId getId() { + return id; + } + + /** Returns the display name of this instance config. */ + public String getDisplayName() { + return displayName; + } + + /** + * The geographic placement of nodes in this instance configuration and their replication + * properties. + */ + public List getReplicas() { + return replicas; + } + + /** + * Allowed values of the default leader schema option for databases in instances that use this + * instance configuration. + */ + public List getLeaderOptions() { + return leaderOptions; + } + + /** + * The available optional replicas to choose from for user managed configurations. Populated for + * Google managed configurations. + */ + public List getOptionalReplicas() { + return optionalReplicas; + } + + /** + * Base configuration, e.g. {@code projects//instanceConfigs/nam3}, based on which + * this configuration is created. Only set for user managed configurations. The base config must + * refer to a configuration of type GOOGLE_MANAGED. + */ + public InstanceConfigInfo getBaseConfig() { + return baseConfig; + } + + /** + * Config type, indicates whether this instance config is a Google or User Managed Configuration. + */ + public Type getConfigType() { + return configType; + } + + /** + * etag, which is used for optimistic concurrency control as a way to help prevent simultaneous + * updates of an instance config from overwriting each other. + */ + public String getEtag() { + return etag; + } + + /** + * If true, the instance config is being created or updated. If false, there are no ongoing + * operations for the instance config. + */ + public boolean getReconciling() { + return reconciling; + } + + /** The current instance config state. */ + public State getState() { + return state; + } + + /** + * Cloud Labels, which can be used to filter collections of resources. They can be used to control + * how resource metrics are aggregated. + */ + public Map getLabels() { + return labels; + } + + /** Builder for {@code InstanceConfigInfo}. */ + public abstract static class Builder { + public abstract Builder setDisplayName(String displayName); + + protected abstract Builder setReplicas(List replicas); + + protected abstract Builder setOptionalReplicas(List optionalReplicas); + + protected abstract Builder setBaseConfig(InstanceConfigInfo baseConfig); + + /** + * Sets the allowed values of the "default_leader" schema option for databases in instances that + * use this instance configuration. + */ + public abstract Builder setLeaderOptions(List leaderOptions); + + protected abstract Builder setConfigType(Type configType); + + protected abstract Builder setState(State state); + + public abstract Builder setEtag(String etag); + + protected abstract Builder setReconciling(boolean reconciling); + + public abstract Builder addLabel(String key, String value); + + public abstract Builder putAllLabels(Map labels); + + /** + * Adds the read only replicas to the set of replicas for a custom instance config. Called with + * one or more of the optional replicas of the base config. + */ + public abstract Builder addReadOnlyReplicas(List readOnlyReplicas); + + public abstract InstanceConfigInfo build(); + } + + static class BuilderImpl extends Builder { + private InstanceConfigId id; + private String displayName = ""; + private List replicas = new ArrayList<>(); + private List leaderOptions = new ArrayList<>(); + private List optionalReplicas = new ArrayList<>(); + private InstanceConfigInfo baseConfig; + private Type configType = Type.TYPE_UNSPECIFIED; + private String etag = ""; + private boolean reconciling = false; + private State state = State.STATE_UNSPECIFIED; + private Map labels = new HashMap<>(); + + BuilderImpl(InstanceConfigId id) { + this.id = id; + } + + BuilderImpl(InstanceConfigId id, InstanceConfigInfo baseConfig) { + this.id = id; + this.baseConfig = baseConfig; + this.replicas = new ArrayList<>(baseConfig.replicas); + } + + BuilderImpl(InstanceConfigInfo instanceConfigInfo) { + this.id = instanceConfigInfo.id; + this.displayName = instanceConfigInfo.displayName; + this.replicas = new ArrayList<>(instanceConfigInfo.replicas); + this.leaderOptions = new ArrayList<>(instanceConfigInfo.leaderOptions); + this.optionalReplicas = new ArrayList<>(instanceConfigInfo.optionalReplicas); + this.baseConfig = instanceConfigInfo.baseConfig; + this.configType = instanceConfigInfo.configType; + this.etag = instanceConfigInfo.etag; + this.reconciling = instanceConfigInfo.reconciling; + this.state = instanceConfigInfo.state; + this.labels = new HashMap<>(instanceConfigInfo.labels); + } + + @Override + public Builder setDisplayName(String displayName) { + this.displayName = displayName; + return this; + } + + @Override + protected Builder setReplicas(List replicas) { + this.replicas = replicas; + return this; + } + + @Override + public Builder setLeaderOptions(List leaderOptions) { + this.leaderOptions = leaderOptions; + return this; + } + + @Override + protected Builder setOptionalReplicas(List optionalReplicas) { + this.optionalReplicas = optionalReplicas; + return this; + } + + @Override + protected Builder setBaseConfig(InstanceConfigInfo baseConfig) { + this.baseConfig = baseConfig; + return this; + } + + @Override + protected Builder setConfigType(Type configType) { + this.configType = configType; + return this; + } + + @Override + protected Builder setState(State state) { + this.state = state; + return this; + } + + @Override + public Builder setEtag(String etag) { + this.etag = etag; + return this; + } + + @Override + protected Builder setReconciling(boolean reconciling) { + this.reconciling = reconciling; + return this; + } + + @Override + public Builder addLabel(String key, String value) { + this.labels.put(key, value); + return this; + } + + @Override + public Builder putAllLabels(Map labels) { + this.labels.putAll(labels); + return this; + } + + @Override + public Builder addReadOnlyReplicas(List readOnlyReplicas) { + this.replicas.addAll(readOnlyReplicas); + return this; + } + + @Override + public InstanceConfigInfo build() { + return new InstanceConfigInfo(this); + } + } + + /** Use {@link #newBuilder} instead */ + @Deprecated + public InstanceConfigInfo(InstanceConfigId id, String displayName) { + this((BuilderImpl) newBuilder(id).setDisplayName(displayName)); + } + + public static Builder newBuilder(InstanceConfigId id) { + return new BuilderImpl(id); + } + + public static Builder newBuilder(InstanceConfigId id, InstanceConfigInfo baseConfig) { + return new BuilderImpl(id, baseConfig); + } + + /** Use {@link #newBuilder} instead */ + @Deprecated + public InstanceConfigInfo( + InstanceConfigId id, + String displayName, + List replicas, + List leaderOptions) { + this( + (BuilderImpl) + newBuilder(id) + .setDisplayName(displayName) + .setReplicas(replicas) + .setLeaderOptions(leaderOptions)); + } + + InstanceConfigInfo(BuilderImpl builder) { + this.id = builder.id; + this.displayName = builder.displayName; + this.replicas = new ArrayList<>(builder.replicas); + this.leaderOptions = new ArrayList<>(builder.leaderOptions); + this.baseConfig = builder.baseConfig; + this.optionalReplicas = new ArrayList<>(builder.optionalReplicas); + this.configType = builder.configType; + this.etag = builder.etag; + this.reconciling = builder.reconciling; + this.state = builder.state; + this.labels = ImmutableMap.copyOf(builder.labels); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof InstanceConfigInfo)) { + return false; + } + InstanceConfigInfo that = (InstanceConfigInfo) o; + return Objects.equals(id, that.id) + && Objects.equals(displayName, that.displayName) + && Objects.equals(replicas, that.replicas) + && Objects.equals(leaderOptions, that.leaderOptions) + && Objects.equals(optionalReplicas, that.optionalReplicas) + && Objects.equals(baseConfig, that.baseConfig) + && Objects.equals(configType, that.configType) + && Objects.equals(etag, that.etag) + && Objects.equals(reconciling, that.reconciling) + && Objects.equals(state, that.state) + && Objects.equals(labels, that.labels); + } + + @Override + public int hashCode() { + return Objects.hash( + id, + displayName, + replicas, + leaderOptions, + optionalReplicas, + baseConfig, + configType, + etag, + reconciling, + state, + labels); + } + + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return String.format( + "Instance Config[%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s]", + id, + displayName, + replicas, + leaderOptions, + optionalReplicas, + baseConfig, + configType, + etag, + reconciling, + state, + labels); + } + + com.google.spanner.admin.instance.v1.InstanceConfig toProto() { + InstanceConfig.Builder builder = + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setName(getId().getName()) + .setDisplayName(getDisplayName()) + .addAllReplicas( + getReplicas().stream().map(ReplicaInfo::getProto).collect(Collectors.toList())) + .addAllLeaderOptions(getLeaderOptions()) + .setEtag(getEtag()) + .setReconciling(getReconciling()) + .putAllLabels(getLabels()) + .addAllOptionalReplicas( + getOptionalReplicas().stream() + .map(ReplicaInfo::getProto) + .collect(Collectors.toList())) + .setConfigType(toProtoConfigType(getConfigType())) + .setState(toProtoState(getState())); + + if (getBaseConfig() != null) { + builder.setBaseConfig(getBaseConfig().getId().getName()); + } + + return builder.build(); + } + + private static InstanceConfig.Type toProtoConfigType(Type type) { + switch (type) { + case TYPE_UNSPECIFIED: + return com.google.spanner.admin.instance.v1.InstanceConfig.Type.TYPE_UNSPECIFIED; + case GOOGLE_MANAGED: + return com.google.spanner.admin.instance.v1.InstanceConfig.Type.GOOGLE_MANAGED; + case USER_MANAGED: + return InstanceConfig.Type.USER_MANAGED; + default: + throw new IllegalArgumentException("Unknown config type:" + type); + } + } + + private static InstanceConfig.State toProtoState(State state) { + switch (state) { + case STATE_UNSPECIFIED: + return com.google.spanner.admin.instance.v1.InstanceConfig.State.STATE_UNSPECIFIED; + case CREATING: + return com.google.spanner.admin.instance.v1.InstanceConfig.State.CREATING; + case READY: + return com.google.spanner.admin.instance.v1.InstanceConfig.State.READY; + default: + throw new IllegalArgumentException("Unknown state:" + state); + } + } + + static com.google.cloud.spanner.InstanceConfig fromProto( + com.google.spanner.admin.instance.v1.InstanceConfig proto, InstanceAdminClient client) { + com.google.cloud.spanner.InstanceConfig.Builder builder = + com.google.cloud.spanner.InstanceConfig.newBuilder( + client, InstanceConfigId.of(proto.getName())) + .setReconciling(proto.getReconciling()) + .setReplicas( + proto.getReplicasList().stream() + .map(ReplicaInfo::fromProto) + .collect(Collectors.toList())) + .setDisplayName(proto.getDisplayName()) + .putAllLabels(proto.getLabelsMap()) + .setEtag(proto.getEtag()) + .setLeaderOptions(proto.getLeaderOptionsList()) + .setOptionalReplicas( + proto.getOptionalReplicasList().stream() + .map(ReplicaInfo::fromProto) + .collect(Collectors.toList())) + .setState(fromProtoState(proto.getState())) + .setConfigType(fromProtoConfigType(proto.getConfigType())); + + if (!proto.getBaseConfig().isEmpty()) { + builder.setBaseConfig(newBuilder(InstanceConfigId.of(proto.getBaseConfig())).build()); + } + + return builder.build(); + } + + private static State fromProtoState( + com.google.spanner.admin.instance.v1.InstanceConfig.State state) { + switch (state) { + case STATE_UNSPECIFIED: + return State.STATE_UNSPECIFIED; + case CREATING: + return State.CREATING; + case READY: + return State.READY; + default: + throw new IllegalArgumentException("Unknown state:" + state); + } + } + + private static Type fromProtoConfigType( + com.google.spanner.admin.instance.v1.InstanceConfig.Type type) { + switch (type) { + case TYPE_UNSPECIFIED: + return Type.TYPE_UNSPECIFIED; + case GOOGLE_MANAGED: + return Type.GOOGLE_MANAGED; + case USER_MANAGED: + return Type.USER_MANAGED; + default: + throw new IllegalArgumentException("Unknown config type:" + type); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceId.java new file mode 100644 index 000000000000..75ce66c2b8df --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceId.java @@ -0,0 +1,93 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.common.base.Objects; +import com.google.common.base.Preconditions; +import java.util.Map; + +/** Represents the resource name of a Cloud Spanner Instance. */ +public final class InstanceId { + private static final PathTemplate NAME_TEMPLATE = + PathTemplate.create("projects/{project}/instances/{instance}"); + + private final String project; + private final String instance; + + InstanceId(String project, String instance) { + this.project = Preconditions.checkNotNull(project); + this.instance = Preconditions.checkNotNull(instance); + } + + /** Returns the instance ID. */ + public String getInstance() { + return instance; + } + + /** Returns the ID of the project that owns this instance. */ + public String getProject() { + return project; + } + + /** Returns the name of the instance. */ + public String getName() { + return String.format("projects/%s/instances/%s", project, instance); + } + + @Override + public int hashCode() { + return Objects.hashCode(project, instance); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InstanceId that = (InstanceId) o; + return that.project.equals(project) && that.instance.equals(instance); + } + + @Override + public String toString() { + return getName(); + } + + /** + * Creates an {@code InstanceId} from the name of the instance. + * + * @param name the instance name of the form {@code projects/PROJECT_ID/instances/INSTANCE_ID} + * @throws IllegalArgumentException if {@code instanceName} does not conform to the expected + * pattern. + */ + static InstanceId of(String name) { + Preconditions.checkNotNull(name); + Map parts = NAME_TEMPLATE.match(name); + Preconditions.checkArgument( + parts != null, "Name should conform to pattern %s: %s", NAME_TEMPLATE, name); + return of(parts.get("project"), parts.get("instance")); + } + + /** Creates an {@code InstanceId} given project and instance IDs. */ + public static InstanceId of(String project, String instance) { + return new InstanceId(project, instance); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java new file mode 100644 index 000000000000..e03819529892 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java @@ -0,0 +1,400 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.FieldSelector; +import com.google.cloud.Timestamp; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.Instance.Edition; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** Represents a Cloud Spanner Instance. */ +public class InstanceInfo { + + /** Represent an updatable field in Cloud Spanner instance. */ + public enum InstanceField implements FieldSelector { + DISPLAY_NAME("display_name"), + NODE_COUNT("node_count"), + PROCESSING_UNITS("processing_units"), + AUTOSCALING_CONFIG("autoscaling_config"), + EDITION("edition"), + LABELS("labels"); + + static InstanceField[] defaultFieldsToUpdate(InstanceInfo info) { + if (info.getAutoscalingConfig() != null) { + return new InstanceField[] {DISPLAY_NAME, AUTOSCALING_CONFIG, LABELS}; + } else if (info.getNodeCount() > 0) { + return new InstanceField[] {DISPLAY_NAME, AUTOSCALING_CONFIG, NODE_COUNT, LABELS}; + } else { + return new InstanceField[] {DISPLAY_NAME, AUTOSCALING_CONFIG, PROCESSING_UNITS, LABELS}; + } + } + + private final String selector; + + InstanceField(String selector) { + this.selector = selector; + } + + @Override + public String getSelector() { + return selector; + } + + static FieldMask toFieldMask(InstanceField... fields) { + FieldMask.Builder builder = FieldMask.newBuilder(); + for (InstanceField field : fields) { + builder.addPaths(field.getSelector()); + } + return builder.build(); + } + } + + /** State of the Instance. */ + public enum State { + UNSPECIFIED, + CREATING, + READY + } + + /** Builder for {@code InstanceInfo}. */ + public abstract static class Builder { + public abstract Builder setInstanceConfigId(InstanceConfigId configId); + + public abstract Builder setDisplayName(String displayName); + + Builder setUpdateTime(Timestamp updateTime) { + throw new UnsupportedOperationException("Unimplemented"); + } + + Builder setCreateTime(Timestamp createTime) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets the number of nodes for the instance. Exactly one of processing units, node count or + * autoscaling config must be set when creating a new instance. + */ + public abstract Builder setNodeCount(int nodeCount); + + /** + * Sets the number of processing units for the instance. Exactly one of processing units, node + * count, or autoscaling config must be set when creating a new instance. Processing units must + * be between 1 and 999 (inclusive) when creating a new instance with node count = 0. Processing + * units from 1000 and up must always be a multiple of 1000 (that is equal to an integer number + * of nodes). + */ + public Builder setProcessingUnits(int processingUnits) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets the autoscaling config for the instance, which will enable the autoscaling for this + * instance. Exactly one of processing units, node count, or autoscaling config must be set when + * creating a new instance. + */ + public Builder setAutoscalingConfig(AutoscalingConfig autoscalingConfig) { + throw new UnsupportedOperationException("Unimplemented"); + } + + public Builder setEdition(Edition edition) { + throw new UnsupportedOperationException("Unimplemented"); + } + + public abstract Builder setState(State state); + + public abstract Builder addLabel(String key, String value); + + public abstract Builder putAllLabels(Map labels); + + public abstract InstanceInfo build(); + } + + static class BuilderImpl extends Builder { + private InstanceId id; + private InstanceConfigId configId; + private String displayName; + private int nodeCount; + private int processingUnits; + private AutoscalingConfig autoscalingConfig; + private Edition edition; + private State state; + private Map labels; + private Timestamp updateTime; + private Timestamp createTime; + + BuilderImpl(InstanceId id) { + this.id = id; + this.labels = new HashMap<>(); + } + + BuilderImpl(InstanceInfo instance) { + this.id = instance.id; + this.configId = instance.configId; + this.displayName = instance.displayName; + this.nodeCount = instance.nodeCount; + this.processingUnits = instance.processingUnits; + this.autoscalingConfig = instance.autoscalingConfig; + this.state = instance.state; + this.labels = new HashMap<>(instance.labels); + this.updateTime = instance.updateTime; + this.createTime = instance.createTime; + this.edition = instance.edition; + } + + @Override + public BuilderImpl setInstanceConfigId(InstanceConfigId configId) { + this.configId = configId; + return this; + } + + @Override + public BuilderImpl setDisplayName(String displayName) { + this.displayName = displayName; + return this; + } + + @Override + Builder setUpdateTime(Timestamp updateTime) { + this.updateTime = updateTime; + return this; + } + + @Override + Builder setCreateTime(Timestamp createTime) { + this.createTime = createTime; + return this; + } + + @Override + public BuilderImpl setNodeCount(int nodeCount) { + this.nodeCount = nodeCount; + return this; + } + + @Override + public BuilderImpl setProcessingUnits(int processingUnits) { + this.processingUnits = processingUnits; + return this; + } + + @Override + public BuilderImpl setAutoscalingConfig(AutoscalingConfig autoscalingConfig) { + this.autoscalingConfig = autoscalingConfig; + return this; + } + + @Override + public BuilderImpl setEdition(Edition edition) { + this.edition = edition; + return this; + } + + @Override + public BuilderImpl setState(State state) { + this.state = state; + return this; + } + + @Override + public BuilderImpl addLabel(String key, String value) { + labels.put(key, value); + return this; + } + + @Override + public BuilderImpl putAllLabels(Map labels) { + this.labels.putAll(labels); + return this; + } + + @Override + public InstanceInfo build() { + return new InstanceInfo(this); + } + } + + private final InstanceId id; + private final InstanceConfigId configId; + private final String displayName; + private final int nodeCount; + private final int processingUnits; + private final AutoscalingConfig autoscalingConfig; + private final Edition edition; + private final State state; + private final ImmutableMap labels; + private final Timestamp updateTime; + private final Timestamp createTime; + + InstanceInfo(BuilderImpl builder) { + this.id = builder.id; + this.configId = builder.configId; + this.displayName = builder.displayName; + this.nodeCount = builder.nodeCount; + this.processingUnits = builder.processingUnits; + this.autoscalingConfig = builder.autoscalingConfig; + this.edition = builder.edition; + this.state = builder.state; + this.labels = ImmutableMap.copyOf(builder.labels); + this.updateTime = builder.updateTime; + this.createTime = builder.createTime; + } + + /** Returns the identifier of the instance. */ + public InstanceId getId() { + return id; + } + + /** Returns the identifier of the instance configuration. */ + public InstanceConfigId getInstanceConfigId() { + return configId; + } + + /** Returns the display name of the instance. */ + public String getDisplayName() { + return displayName; + } + + public Timestamp getUpdateTime() { + return updateTime; + } + + public Timestamp getCreateTime() { + return createTime; + } + + /** Returns the node count of the instance. */ + public int getNodeCount() { + return nodeCount; + } + + /** Returns the number of processing units of the instance. */ + public int getProcessingUnits() { + return processingUnits; + } + + /** Returns the autoscaling config of the instance. */ + public AutoscalingConfig getAutoscalingConfig() { + return autoscalingConfig; + } + + public Edition getEdition() { + return edition; + } + + /** Returns the current state of the instance. */ + public State getState() { + return state; + } + + /** Returns the cloud labels attached to this instance. */ + public Map getLabels() { + return labels; + } + + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", id.getName()) + .add("configName", configId == null ? null : configId.getName()) + .add("displayName", displayName) + .add("nodeCount", nodeCount) + .add("processingUnits", processingUnits) + .add("autoscaling_config", autoscalingConfig) + .add("edition", edition) + .add("state", state) + .add("labels", labels) + .add("createTime", createTime) + .add("updateTime", updateTime) + .toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InstanceInfo that = (InstanceInfo) o; + return id.equals(that.id) + && Objects.equals(configId, that.configId) + && Objects.equals(displayName, that.displayName) + && nodeCount == that.nodeCount + && processingUnits == that.processingUnits + && Objects.equals(autoscalingConfig, that.autoscalingConfig) + && edition == that.edition + && state == that.state + && Objects.equals(labels, that.labels) + && Objects.equals(updateTime, that.updateTime) + && Objects.equals(createTime, that.createTime); + } + + @Override + public int hashCode() { + return Objects.hash( + id, + configId, + displayName, + nodeCount, + processingUnits, + autoscalingConfig, + edition, + state, + labels, + updateTime, + createTime); + } + + com.google.spanner.admin.instance.v1.Instance toProto() { + com.google.spanner.admin.instance.v1.Instance.Builder builder = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(getId().getName()) + .setNodeCount(getNodeCount()) + .setProcessingUnits(getProcessingUnits()) + .putAllLabels(getLabels()); + if (getDisplayName() != null) { + builder.setDisplayName(getDisplayName()); + } + if (getInstanceConfigId() != null) { + builder.setConfig(getInstanceConfigId().getName()); + } + if (getAutoscalingConfig() != null) { + builder.setAutoscalingConfig(getAutoscalingConfig()); + } + if (getEdition() != null) { + builder.setEdition(getEdition()); + } + return builder.build(); + } + + public static Builder newBuilder(InstanceId id) { + return new BuilderImpl(checkNotNull(id)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceNotFoundException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceNotFoundException.java new file mode 100644 index 000000000000..dc4192e109fc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceNotFoundException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.rpc.ResourceInfo; +import javax.annotation.Nullable; + +/** + * Exception thrown by Cloud Spanner when an operation detects that the instance that is being used + * no longer exists. This type of error has its own subclass as it is a condition that should cause + * the client library to stop trying to send RPCs to the backend until the user has taken action. + */ +public class InstanceNotFoundException extends ResourceNotFoundException { + private static final long serialVersionUID = 45297002L; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + InstanceNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause) { + this(token, message, resourceInfo, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + InstanceNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, message, resourceInfo, cause, apiException); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Interval.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Interval.java new file mode 100644 index 000000000000..53f2cd04e0d7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Interval.java @@ -0,0 +1,275 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.client.util.Preconditions; +import com.google.errorprone.annotations.Immutable; +import java.io.Serializable; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Represents the time duration as a combination of months, days and nanoseconds. Nanoseconds are + * broken into two components microseconds and nanoFractions, where nanoFractions can range from + * [-999, 999]. Internally, Spanner supports Interval value with the following range of individual + * fields: months: [-120000, 120000] days: [-3660000, 3660000] nanoseconds: [-316224000000000000000, + * 316224000000000000000]. Interval value created outside the specified domain will return error + * when sent to Spanner backend. + */ +@Immutable +public class Interval implements Serializable { + private final int months; + private final int days; + private final BigInteger nanos; + + private static final long MONTHS_PER_YEAR = 12; + private static final long MINUTES_PER_HOUR = 60; + private static final long SECONDS_PER_MINUTE = 60; + private static final long SECONDS_PER_HOUR = MINUTES_PER_HOUR * SECONDS_PER_MINUTE; + private static final long MILLIS_PER_SECOND = 1000; + private static final long MICROS_PER_MILLI = 1000; + private static final long NANOS_PER_MICRO = 1000; + private static final long MICROS_PER_SECOND = MICROS_PER_MILLI * MILLIS_PER_SECOND; + private static final long MICROS_PER_MINUTE = SECONDS_PER_MINUTE * MICROS_PER_SECOND; + private static final long MICROS_PER_HOUR = SECONDS_PER_HOUR * MICROS_PER_SECOND; + private static final BigInteger NANOS_PER_MILLI = + BigInteger.valueOf(MICROS_PER_MILLI * NANOS_PER_MICRO); + private static final BigInteger NANOS_PER_SECOND = + BigInteger.valueOf(MICROS_PER_SECOND * NANOS_PER_MICRO); + private static final BigInteger NANOS_PER_MINUTE = + BigInteger.valueOf(MICROS_PER_MINUTE * NANOS_PER_MICRO); + private static final BigInteger NANOS_PER_HOUR = + BigInteger.valueOf(MICROS_PER_HOUR * NANOS_PER_MICRO); + private static final Interval ZERO = Interval.builder().build(); + + /** Regex to parse ISO8601 interval format- `P[n]Y[n]M[n]DT[n]H[n]M[n([.,][fraction])]S` */ + private static final Pattern INTERVAL_PATTERN = + Pattern.compile( + "^P(?!$)(-?\\d+Y)?(-?\\d+M)?(-?\\d+D)?(T(?=-?[.,]?\\d)(-?\\d+H)?(-?\\d+M)?(-?((\\d+([.,]\\d{1,9})?)|([.,]\\d{1,9}))S)?)?$"); + + private Interval(int months, int days, BigInteger nanos) { + this.months = months; + this.days = days; + this.nanos = nanos; + } + + /** Returns the months component of the interval. */ + public int getMonths() { + return months; + } + + /** Returns the days component of the interval. */ + public int getDays() { + return days; + } + + /** Returns the nanoseconds component of the interval. */ + public BigInteger getNanos() { + return nanos; + } + + public static Builder builder() { + return new Builder(); + } + + /** Creates an interval with specified number of months. */ + public static Interval ofMonths(int months) { + return builder().setMonths(months).build(); + } + + /** Creates an interval with specified number of days. */ + public static Interval ofDays(int days) { + return builder().setDays(days).build(); + } + + /** Creates an interval with specified number of seconds. */ + public static Interval ofSeconds(long seconds) { + return builder().setNanos(BigInteger.valueOf(seconds).multiply(NANOS_PER_SECOND)).build(); + } + + /** Creates an interval with specified number of milliseconds. */ + public static Interval ofMillis(long millis) { + return builder().setNanos(BigInteger.valueOf(millis).multiply(NANOS_PER_MILLI)).build(); + } + + /** Creates an interval with specified number of microseconds. */ + public static Interval ofMicros(long micros) { + return builder() + .setNanos(BigInteger.valueOf(micros).multiply(BigInteger.valueOf(NANOS_PER_MICRO))) + .build(); + } + + /** Creates an interval with specified number of nanoseconds. */ + public static Interval ofNanos(BigInteger nanos) { + return builder().setNanos(nanos).build(); + } + + /** Creates an interval with specified number of months, days and nanoseconds. */ + public static Interval fromMonthsDaysNanos(int months, int days, BigInteger nanos) { + return builder().setMonths(months).setDays(days).setNanos(nanos).build(); + } + + private static String getNullOrDefault(Matcher matcher, int groupIdx) { + String value = matcher.group(groupIdx); + return value == null ? "0" : value; + } + + /* Parses ISO8601 duration format string to Interval. */ + public static Interval parseFromString(String interval) { + Matcher matcher = INTERVAL_PATTERN.matcher(interval); + if (!matcher.matches()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid Interval String: " + interval); + } + + long years = Long.parseLong(getNullOrDefault(matcher, 1).replace("Y", "")); + long months = Long.parseLong(getNullOrDefault(matcher, 2).replace("M", "")); + long days = Long.parseLong(getNullOrDefault(matcher, 3).replace("D", "")); + long hours = Long.parseLong(getNullOrDefault(matcher, 5).replace("H", "")); + long minutes = Long.parseLong(getNullOrDefault(matcher, 6).replace("M", "")); + BigDecimal seconds = + new BigDecimal(getNullOrDefault(matcher, 7).replace("S", "").replace(",", ".")); + + long totalMonths = Math.addExact(Math.multiplyExact(years, MONTHS_PER_YEAR), months); + BigInteger totalNanos = seconds.movePointRight(9).toBigInteger(); + totalNanos = + totalNanos.add(BigInteger.valueOf(minutes * SECONDS_PER_MINUTE).multiply(NANOS_PER_SECOND)); + totalNanos = + totalNanos.add(BigInteger.valueOf(hours * SECONDS_PER_HOUR).multiply(NANOS_PER_SECOND)); + + return Interval.builder() + .setMonths(Math.toIntExact(totalMonths)) + .setDays(Math.toIntExact(days)) + .setNanos(totalNanos) + .build(); + } + + /** Converts Interval to ISO8601 duration format string. */ + public String toISO8601() { + if (this.equals(ZERO)) { + return "P0Y"; + } + + StringBuilder result = new StringBuilder(); + result.append("P"); + + long monthsPart = this.getMonths(); + long yearsPart = monthsPart / MONTHS_PER_YEAR; + monthsPart = monthsPart - yearsPart * MONTHS_PER_YEAR; + + if (yearsPart != 0) { + result.append(String.format("%dY", yearsPart)); + } + + if (monthsPart != 0) { + result.append(String.format("%dM", monthsPart)); + } + + if (this.getDays() != 0) { + result.append(String.format("%dD", this.getDays())); + } + + BigInteger nanos = this.getNanos(); + BigInteger zero = BigInteger.valueOf(0); + if (nanos.compareTo(zero) != 0) { + result.append("T"); + BigInteger hoursPart = nanos.divide(NANOS_PER_HOUR); + nanos = nanos.subtract(hoursPart.multiply(NANOS_PER_HOUR)); + if (hoursPart.compareTo(zero) != 0) { + result.append(String.format("%sH", hoursPart)); + } + + BigInteger minutesPart = nanos.divide(NANOS_PER_MINUTE); + nanos = nanos.subtract(minutesPart.multiply(NANOS_PER_MINUTE)); + if (minutesPart.compareTo(zero) != 0) { + result.append(String.format("%sM", minutesPart)); + } + + if (!nanos.equals(zero)) { + String secondsSign = ""; + if (nanos.signum() == -1) { + secondsSign = "-"; + nanos = nanos.negate(); + } + + BigInteger seconds_part = nanos.divide(NANOS_PER_SECOND); + nanos = nanos.subtract(seconds_part.multiply(NANOS_PER_SECOND)); + result.append(String.format("%s%s", secondsSign, seconds_part)); + + if (!nanos.equals(zero)) { + result.append(String.format(".%09d", nanos).replaceAll("(0{3})+$", "")); + } + result.append("S"); + } + } + + return result.toString(); + } + + @Override + public String toString() { + return toISO8601(); + } + + @Override + public boolean equals(Object rhs) { + if (!(rhs instanceof Interval)) { + return false; + } + + Interval anotherInterval = (Interval) rhs; + return getMonths() == anotherInterval.getMonths() + && getDays() == anotherInterval.getDays() + && getNanos().equals(anotherInterval.getNanos()); + } + + @Override + public int hashCode() { + int result = 17; + result = 31 * result + Integer.valueOf(getMonths()).hashCode(); + result = 31 * result + Integer.valueOf(getDays()).hashCode(); + result = 31 * result + getNanos().hashCode(); + return result; + } + + public static class Builder { + private int months = 0; + private int days = 0; + private BigInteger nanos = BigInteger.ZERO; + + Builder setMonths(int months) { + this.months = months; + return this; + } + + Builder setDays(int days) { + this.days = days; + return this; + } + + Builder setNanos(BigInteger nanos) { + this.nanos = Preconditions.checkNotNull(nanos); + return this; + } + + public Interval build() { + return new Interval(months, days, nanos); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java new file mode 100644 index 000000000000..367d75a13cbf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.UnavailableException; +import com.google.common.base.Predicate; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; + +/** + * Predicate that checks whether an exception is a ChannelShutdownException. This exception is + * thrown by gRPC if the underlying gRPC stub has been shut down and uses the UNAVAILABLE error + * code. This means that it would normally be retried by the Spanner client, but this specific + * UNAVAILABLE error should not be retried, as it would otherwise directly return the same error. + */ +class IsChannelShutdownException implements Predicate { + + @Override + public boolean apply(Throwable input) { + Throwable cause = input; + do { + if (isUnavailableError(cause) + && (cause.getMessage().contains("Channel shutdown invoked") + || cause.getMessage().contains("Channel shutdownNow invoked"))) { + return true; + } + } while ((cause = cause.getCause()) != null); + return false; + } + + private boolean isUnavailableError(Throwable cause) { + return (cause instanceof UnavailableException) + || (cause instanceof StatusRuntimeException + && ((StatusRuntimeException) cause).getStatus().getCode() == Code.UNAVAILABLE); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsRetryableInternalError.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsRetryableInternalError.java new file mode 100644 index 000000000000..e69e1ec9d785 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsRetryableInternalError.java @@ -0,0 +1,57 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.InternalException; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; + +public class IsRetryableInternalError implements Predicate { + public static final IsRetryableInternalError INSTANCE = new IsRetryableInternalError(); + + private static final ImmutableList RETRYABLE_ERROR_MESSAGES = + ImmutableList.of( + "HTTP/2 error code: INTERNAL_ERROR", + "Connection closed with unknown cause", + "Received unexpected EOS on DATA frame from server", + "stream terminated by RST_STREAM", + "Authentication backend internal server error. Please retry."); + + public boolean isRetryableInternalError(Status status) { + return status.getCode() == Code.INTERNAL + && status.getDescription() != null + && isRetryableErrorMessage(status.getDescription()); + } + + @Override + public boolean apply(Throwable cause) { + return isInternalError(cause) && isRetryableErrorMessage(cause.getMessage()); + } + + private boolean isRetryableErrorMessage(String errorMessage) { + return RETRYABLE_ERROR_MESSAGES.stream().anyMatch(errorMessage::contains); + } + + private boolean isInternalError(Throwable cause) { + return (cause instanceof InternalException) + || (cause instanceof StatusRuntimeException + && ((StatusRuntimeException) cause).getStatus().getCode() == Status.Code.INTERNAL); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsSslHandshakeException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsSslHandshakeException.java new file mode 100644 index 000000000000..53ff151a83cd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsSslHandshakeException.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Predicate; +import javax.net.ssl.SSLHandshakeException; + +public class IsSslHandshakeException implements Predicate { + + @Override + public boolean apply(Throwable input) { + return input instanceof SSLHandshakeException; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Key.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Key.java new file mode 100644 index 000000000000..83c0db0a3e11 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Key.java @@ -0,0 +1,341 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.common.base.Joiner; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.protobuf.Value; +import java.io.Serializable; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import javax.annotation.Nullable; + +/** + * Represents a row key in a Cloud Spanner table or index. A key is a tuple of values constrained to + * the scalar Cloud Spanner types: currently these are {@code BOOLEAN}, {@code INT64}, {@code + * FLOAT64}, {@code STRING}, {@code BYTES} and {@code TIMESTAMP}. Values may be null where the table + * definition permits it. + * + *

{@code Key} is used to define the row, or endpoints of a range of rows, to retrieve in read + * operations or to delete in a mutation. + * + *

{@code Key} instances are immutable. + */ +public final class Key implements Serializable { + private static final Joiner joiner = Joiner.on(',').useForNull(""); + private static final com.google.protobuf.Value NULL_PROTO = + Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(); + private static final long serialVersionUID = 4433485671785063530L; + + private final List parts; + + private Key(List parts) { + this.parts = Collections.unmodifiableList(parts); + } + + /** + * Construct a key with parts specified by {@code values}. Each object in {@code values} must be + * either {@code null} or one of the following supported types: + * + *
    + *
  • {@code Boolean} for the {@code BOOL} Cloud Spanner type + *
  • {@code Integer}, {@code Long} for the {@code INT64} Cloud Spanner type + *
  • {@code Float}, {@code Double} for the {@code FLOAT64} Cloud Spanner type + *
  • {@code BigDecimal} for the {@code NUMERIC} Cloud Spanner type + *
  • {@code String} for the {@code STRING} Cloud Spanner type + *
  • {@code String} for the {@code JSON} Cloud Spanner type + *
  • {@link ByteArray} for the {@code BYTES} Cloud Spanner type + *
  • {@link Timestamp} for the {@code TIMESTAMP} Cloud Spanner type + *
  • {@link Date} for the {@code DATE} Cloud Spanner type + *
  • {@link java.util.UUID} for the {@code UUID} Cloud Spanner type + *
+ * + * @throws IllegalArgumentException if any member of {@code values} is not a supported type + */ + public static Key of(Object... values) { + // A literal Key.of(null) results in a null array being passed. Provide a clearer error. + checkNotNull( + values, + "'values' cannot be null. For a literal key containing a single null value, " + + "call Key.of((Object) null)."); + Builder b = new Builder(false /* builder never leaves this scope */); + for (Object value : values) { + b.appendObject(value); + } + return b.build(); + } + + /** Returns a new builder for constructing a key. */ + public static Builder newBuilder() { + return new Builder(true /* escaped */); + } + + /** Builder for {@link Key} instances. */ + public static class Builder { + /** + * Indicates whether this builder can escape the scope of this class. If so, we must assume that + * the builder can be modified after {@code build()} is called and so we perform a defensive + * copy. + */ + private final boolean canEscape; + + private final ArrayList buffer = new ArrayList<>(); + + private Builder(boolean canEscape) { + this.canEscape = canEscape; + } + + private Builder(Key key) { + canEscape = true; + buffer.addAll(key.parts); + } + + /** Appends a {@code BOOL} value to the key. */ + public Builder append(@Nullable Boolean value) { + buffer.add(value); + return this; + } + + /** Appends an {@code INT64} value to the key. */ + public Builder append(long value) { + buffer.add(value); + return this; + } + + /** Appends an {@code INT64} value to the key. */ + public Builder append(@Nullable Long value) { + buffer.add(value); + return this; + } + + /** Appends a {@code FLOAT64} value to the key. */ + public Builder append(double value) { + buffer.add(value); + return this; + } + + /** Appends a {@code FLOAT64} value to the key. */ + public Builder append(@Nullable Double value) { + buffer.add(value); + return this; + } + + /** Appends a {@code NUMERIC} value to the key. */ + public Builder append(@Nullable BigDecimal value) { + buffer.add(value); + return this; + } + + /** Appends a {@code ENUM} value to the key. */ + public Builder append(@Nullable ProtocolMessageEnum value) { + buffer.add(value); + return this; + } + + /** Appends a {@code STRING} value to the key. */ + public Builder append(@Nullable String value) { + buffer.add(value); + return this; + } + + /** Appends a {@code BYTES} value to the key. */ + public Builder append(@Nullable ByteArray value) { + buffer.add(value); + return this; + } + + /** Appends a {@code TIMESTAMP} value to the key */ + public Builder append(@Nullable Timestamp value) { + buffer.add(value); + return this; + } + + /** Appends a {@code DATE} value to the key */ + public Builder append(@Nullable Date value) { + buffer.add(value); + return this; + } + + /** Appends a {@code UUID} value to the key */ + public Builder append(@Nullable UUID value) { + buffer.add(value); + return this; + } + + /** + * Appends an object following the same conversion rules as {@link Key#of(Object...)}. When + * using the {@code Builder}, most code should prefer using the strongly typed {@code + * append(...)} methods, for both performance and the benefit of compile-time checking. + */ + public Builder appendObject(@Nullable Object value) { + if (value == null) { + append((Boolean) null); + } else if (value instanceof Boolean) { + append((Boolean) value); + } else if (value instanceof Integer) { + append((Integer) value); + } else if (value instanceof Long) { + append((Long) value); + } else if (value instanceof Float) { + append((Float) value); + } else if (value instanceof Double) { + append((Double) value); + } else if (value instanceof BigDecimal) { + append((BigDecimal) value); + } else if (value instanceof String) { + append((String) value); + } else if (value instanceof ByteArray) { + append((ByteArray) value); + } else if (value instanceof Timestamp) { + append((Timestamp) value); + } else if (value instanceof Date) { + append((Date) value); + } else if (value instanceof UUID) { + append((UUID) value); + } else if (value instanceof ProtocolMessageEnum) { + append((ProtocolMessageEnum) value); + } else { + throw new IllegalArgumentException( + "Unsupported type [" + + value.getClass().getCanonicalName() + + "] for argument: " + + value); + } + return this; + } + + public Key build() { + if (canEscape) { + // Copy buffer to preserve immutability contract. + return new Key(new ArrayList<>(buffer)); + } else { + // Internal use of builder that does not escape; no need for defensive copy. + return new Key(buffer); + } + } + } + + /** Returns the number of parts in this key, including {@code null} values. */ + public int size() { + return parts.size(); + } + + /** + * Returns the parts in this key. Each part is represented by the corresponding Cloud Spanner + * type's canonical Java type, as listed below. Note that other types supported by {@link + * #of(Object...)} are converted to one of the canonical types. + * + *
    + *
  • {@code BOOL} is represented by {@code Boolean} + *
  • {@code INT64} is represented by {@code Long} + *
  • {@code FLOAT64} is represented by {@code Double} + *
  • {@code NUMERIC} is represented by {@code BigDecimal} + *
  • {@code STRING} is represented by {@code String} + *
  • {@code JSON} is represented by {@code String} + *
  • {@code BYTES} is represented by {@link ByteArray} + *
  • {@code TIMESTAMP} is represented by {@link Timestamp} + *
  • {@code DATE} is represented by {@link Date} + *
+ * + * @return an unmodifiable list containing the key parts + */ + public Iterable getParts() { + return parts; + } + + /** Returns a builder initialized with the value of this key. */ + public Builder toBuilder() { + return new Builder(this); + } + + void toString(StringBuilder b) { + // TODO(user): Consider limiting the length of string output. + // Note: the format produced should match that used for keys in error messages yielded by the + // backend. + b.append('['); + joiner.appendTo(b, parts); + b.append(']'); + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Key that = (Key) o; + return parts.equals(that.parts); + } + + @Override + public int hashCode() { + return parts.hashCode(); + } + + ListValue toProto() { + ListValue.Builder builder = ListValue.newBuilder(); + for (Object part : parts) { + if (part == null) { + builder.addValues(NULL_PROTO); + } else if (part instanceof Boolean) { + builder.addValuesBuilder().setBoolValue((Boolean) part); + } else if (part instanceof Long) { + builder.addValuesBuilder().setStringValue(part.toString()); + } else if (part instanceof Double) { + builder.addValuesBuilder().setNumberValue((Double) part); + } else if (part instanceof BigDecimal) { + builder.addValuesBuilder().setStringValue(part.toString()); + } else if (part instanceof String) { + builder.addValuesBuilder().setStringValue((String) part); + } else if (part instanceof ByteArray) { + builder.addValuesBuilder().setStringValue(((ByteArray) part).toBase64()); + } else if (part instanceof Timestamp) { + builder.addValuesBuilder().setStringValue(part.toString()); + } else if (part instanceof Date) { + builder.addValuesBuilder().setStringValue(part.toString()); + } else if (part instanceof UUID) { + builder.addValuesBuilder().setStringValue(part.toString()); + } else if (part instanceof ProtocolMessageEnum) { + builder + .addValuesBuilder() + .setStringValue(Long.toString(((ProtocolMessageEnum) part).getNumber())); + } else { + throw new AssertionError("Illegal key part: " + part.getClass()); + } + } + return builder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeyRange.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeyRange.java new file mode 100644 index 000000000000..6261c0253dbe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeyRange.java @@ -0,0 +1,311 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Represents a range of rows in a table or index. + * + *

A range has a start key and an end key. These keys can be open or closed, indicating if the + * range includes rows with that key. + * + *

For example, consider the following table definition: + * + *

+ *     CREATE TABLE UserEvents (
+ *       UserName STRING(MAX),
+ *       EventDate STRING(10),
+ *     ) PRIMARY KEY(UserName, EventDate);
+ * 
+ * + * The following keys name rows in this table: + * + *
    + *
  • {@code Key.of("Bob", "2014-09-23")} + *
  • {@code Key.of("Alfred", "2015-06-12")} + *
+ * + * Since the {@code UserEvents} table's {@code PRIMARY KEY} clause names two columns, each {@code + * UserEvents} key has two elements; the first is the {@code UserName}, and the second is the {@code + * EventDate}. + * + *

Key ranges with multiple components are interpreted lexicographically by component using the + * table or index key's declared sort order. For example, the following range returns all events for + * user "Bob" that occurred in the year 2015: + * + *

+ *     KeyRange.closedClosed(
+ *         Key.of("Bob", "2015-01-01"),
+ *         Key.of("Bob", "2015-12-31"))
+ * 
+ * + * Start and end keys can omit trailing key components. This affects the inclusion and exclusion of + * rows that exactly match the provided key components: if the key is closed, then rows that exactly + * match the provided components are included; if the key is open, then rows that exactly match are + * not included. + * + *

For example, the following range includes all events for "Bob" that occurred during and after + * the year 2000: + * + *

+ *     KeyRange.closedClosed(
+ *         Key.of("Bob", "2000-01-01"),
+ *         Key.of("Bob"))
+ * 
+ * + * The next example retrieves all events for "Bob": + * + *
+ *     KeyRange.prefix(Key.of("Bob"))
+ * 
+ * + * To retrieve events before the year 2000: + * + *
+ *     KeyRange.closedOpen(
+ *         Key.of("Bob"),
+ *         Key.of("Bob", "2000-01-01"))
+ * 
+ * + * The following range includes all rows in the table: + * + *
+ *     KeyRange.all()
+ * 
+ * + * This range returns all users whose {@code UserName} begins with any character from A to C: + * + *
+ *     KeyRange.closedOpen(Key.of("A"), Key.of("D"))
+ * 
+ * + * This range returns all users whose {@code UserName} begins with B: + * + *
+ *     KeyRange.closedOpen(Key.of("B"), Key.of("C"))
+ * 
+ * + * Key ranges honor column sort order. For example, suppose a table is defined as follows: + * + *
+ *     CREATE TABLE DescendingSortedTable {
+ *       Key INT64,
+ *       ...
+ *     ) PRIMARY KEY(Key DESC);
+ * 
+ * + * The following range retrieves all rows with key values between 1 and 100 inclusive: + * + *
+ *     KeyRange.closedClosed(Key.of(100), Key.of(1))
+ * 
+ * + * Note that 100 is passed as the start, and 1 is passed as the end, because {@code Key} is a + * descending column in the schema. + * + *

{@code KeyRange} instances are immutable. + */ +public final class KeyRange implements Serializable { + private static final long serialVersionUID = 100894273141111331L; + + /** Defines whether a range includes or excludes its endpoint keys. */ + public enum Endpoint { + /** Ranges include the endpoint key. */ + CLOSED, + + /** Ranges exclude the endpoint key. */ + OPEN + } + + private final Key start; + private final Endpoint startType; + private final Key end; + private final Endpoint endType; + + private KeyRange(Key start, Endpoint startType, Key end, Endpoint endType) { + this.start = start; + this.startType = startType; + this.end = end; + this.endType = endType; + } + + /** Returns a key range from {@code start} inclusive to {@code end} exclusive. */ + public static KeyRange closedOpen(Key start, Key end) { + return new KeyRange(checkNotNull(start), Endpoint.CLOSED, checkNotNull(end), Endpoint.OPEN); + } + + /** Returns a key range from {@code start} inclusive to {@code end} inclusive. */ + public static KeyRange closedClosed(Key start, Key end) { + return new KeyRange(checkNotNull(start), Endpoint.CLOSED, checkNotNull(end), Endpoint.CLOSED); + } + + /** Returns a key range from {@code start} exclusive to {@code end} exclusive. */ + public static KeyRange openOpen(Key start, Key end) { + return new KeyRange(checkNotNull(start), Endpoint.OPEN, checkNotNull(end), Endpoint.OPEN); + } + + /** Returns a key range from {@code start} exclusive to {@code end} inclusive. */ + public static KeyRange openClosed(Key start, Key end) { + return new KeyRange(checkNotNull(start), Endpoint.OPEN, checkNotNull(end), Endpoint.CLOSED); + } + + /** + * Returns a key range that covers all keys where the first {@code prefix.size()} components match + * {@code prefix} exactly. + */ + public static KeyRange prefix(Key prefix) { + return closedClosed(prefix, prefix); + } + + /** Returns a new builder for constructing a range. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Builder for {@link KeyRange} instances. */ + public static class Builder { + private Key start; + private Endpoint startType = Endpoint.CLOSED; + private Key end; + private Endpoint endType = Endpoint.OPEN; + + private Builder() {} + + private Builder(KeyRange r) { + start = r.start; + startType = r.startType; + end = r.end; + endType = r.endType; + } + + /** Sets the start key of the range. This must be set before {@link #build()} is called. */ + public Builder setStart(Key key) { + start = checkNotNull(key); + return this; + } + + /** + * Sets whether the start key is inclusive ({@code CLOSED}) or exclusive ({@code OPEN}). The + * default is {@code CLOSED}. + */ + public Builder setStartType(Endpoint type) { + startType = checkNotNull(type); + return this; + } + + /** Sets the end key of the range. This must be set before {@link #build()} is called. */ + public Builder setEnd(Key key) { + end = checkNotNull(key); + return this; + } + + /** + * Sets whether the end key is inclusive ({@code CLOSED}) or exclusive ({@code OPEN}). The + * default is {@code OPEN}. + */ + public Builder setEndType(Endpoint type) { + endType = checkNotNull(type); + return this; + } + + public KeyRange build() { + checkState(start != null, "Missing required call to start(Key)"); + checkState(end != null, "Missing required call to end(Key)"); + return new KeyRange(start, startType, end, endType); + } + } + + /** Returns the start key of the range. */ + public Key getStart() { + return start; + } + + /** Indicates whether the start key is inclusive ({@code CLOSED}) or exclusive ({@code OPEN}). */ + public Endpoint getStartType() { + return startType; + } + + /** Returns the end key of the range. */ + public Key getEnd() { + return end; + } + + /** Indicates whether the end key is inclusive ({@code CLOSED}) or exclusive ({@code OPEN}). */ + public Endpoint geEndType() { + return endType; + } + + /** Returns a builder initialized with the value of this range. */ + public Builder toBuilder() { + return new Builder(this); + } + + void toString(StringBuilder b) { + b.append(startType == Endpoint.CLOSED ? '[' : '('); + start.toString(b); + b.append(','); + end.toString(b); + b.append(endType == Endpoint.CLOSED ? ']' : ')'); + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KeyRange that = (KeyRange) o; + return startType == that.startType + && endType == that.endType + && start.equals(that.start) + && end.equals(that.end); + } + + @Override + public int hashCode() { + return Objects.hash(start, startType, end, endType); + } + + com.google.spanner.v1.KeyRange toProto() { + com.google.spanner.v1.KeyRange.Builder builder = com.google.spanner.v1.KeyRange.newBuilder(); + if (startType == Endpoint.CLOSED) { + builder.setStartClosed(start.toProto()); + } else { + builder.setStartOpen(start.toProto()); + } + if (endType == Endpoint.CLOSED) { + builder.setEndClosed(end.toProto()); + } else { + builder.setEndOpen(end.toProto()); + } + return builder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeySet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeySet.java new file mode 100644 index 000000000000..76c60aac9bb8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/KeySet.java @@ -0,0 +1,212 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.collect.ImmutableList; +import java.io.Serializable; +import java.util.Objects; + +/** + * Defines a collection of Cloud Spanner keys and/or key ranges. All the keys are expected to be in + * the same table or index. The keys need not be sorted in any particular way. + * + *

If the same key is specified multiple times in the set (for example if two ranges, two keys, + * or a key and a range overlap), the Cloud Spanner backend behaves as if the key were only + * specified once. However, the {@code KeySet} object itself does not perform any de-duplication. + * + *

{@code KeySet} instances are immutable. + */ +public final class KeySet implements Serializable { + private static final long serialVersionUID = -542201151451064347L; + private final boolean all; + private final ImmutableList keys; + private final ImmutableList ranges; + + private KeySet(boolean all, ImmutableList keys, ImmutableList ranges) { + this.all = all; + this.keys = keys; + this.ranges = ranges; + } + + /** + * Creates a key set containing a single key. {@code key} should contain exactly as many elements + * as there are columns in the primary or index key with this this key set is used. + */ + public static KeySet singleKey(Key key) { + return new KeySet(false, ImmutableList.of(key), ImmutableList.of()); + } + + /** + * Creates a key set containing a single range. See {@link KeyRange} for details of how to specify + * ranges. + */ + public static KeySet range(KeyRange range) { + return new KeySet(false, ImmutableList.of(), ImmutableList.of(range)); + } + + /** + * Creates a key set that covers all keys where the first {@code prefix.size()} components match + * {@code prefix} exactly. + */ + public static KeySet prefixRange(Key prefix) { + return range(KeyRange.prefix(prefix)); + } + + /** Creates a key set that will retrieve all rows of a table or index. */ + public static KeySet all() { + return new KeySet(true, ImmutableList.of(), ImmutableList.of()); + } + + /** Returns a new builder that can be used to construct a key set. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Builder for {@link KeySet} instances. */ + public static class Builder { + private boolean all; + private ImmutableList.Builder keys; + private ImmutableList.Builder ranges; + + private Builder() {} + + private Builder(KeySet set) { + all = set.all; + if (!set.keys.isEmpty()) { + keys = ImmutableList.builder().addAll(set.keys); + } + if (!set.ranges.isEmpty()) { + ranges = ImmutableList.builder().addAll(set.ranges); + } + } + + /** + * Adds a key to the key set. {@code key} should contain exactly as many elements as there are + * columns in the primary or index key with this this key set is used. + */ + public Builder addKey(Key key) { + if (keys == null) { + keys = ImmutableList.builder(); + } + keys.add(key); + return this; + } + + /** Adds a range to the key set. See {@link KeyRange} for details of how to specify ranges. */ + public Builder addRange(KeyRange range) { + if (ranges == null) { + ranges = ImmutableList.builder(); + } + ranges.add(range); + return this; + } + + /** Makes the key set retrieve all rows of a table or index. */ + public Builder setAll() { + all = true; + return this; + } + + public KeySet build() { + return new KeySet( + all, + keys != null ? keys.build() : ImmutableList.of(), + ranges != null ? ranges.build() : ImmutableList.of()); + } + } + + /** Returns the keys in this set. */ + public Iterable getKeys() { + // Design note: returns Iterable<> rather than List<> since future internal representations + // may not maintain a List internally, but rather decode the rep and reconstruct Keys on + // the fly. + return keys; + } + + /** Returns the ranges in this set. */ + public Iterable getRanges() { + return ranges; + } + + /** Indicates whether the set will retrieve all rows in a table or index. */ + public boolean isAll() { + return all; + } + + /** Returns a builder initialized with the contents of this set. */ + public Builder toBuilder() { + return new Builder(this); + } + + void toString(StringBuilder b) { + b.append('{'); + int size = 0; + if (all) { + ++size; + b.append(""); + } + for (Key key : keys) { + if (size++ > 0) { + b.append(','); + } + key.toString(b); + } + for (KeyRange range : ranges) { + if (size++ > 0) { + b.append(','); + } + range.toString(b); + } + b.append('}'); + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KeySet that = (KeySet) o; + return all == that.all && keys.equals(that.keys) && ranges.equals(that.ranges); + } + + @Override + public int hashCode() { + return Objects.hash(all, keys, ranges); + } + + void appendToProto(com.google.spanner.v1.KeySet.Builder proto) { + for (Key key : keys) { + proto.addKeys(key.toProto()); + } + for (KeyRange range : ranges) { + proto.addRanges(range.toProto()); + } + if (all) { + proto.setAll(true); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LazySpannerInitializer.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LazySpannerInitializer.java new file mode 100644 index 000000000000..009c276751e4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LazySpannerInitializer.java @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Default implementation of {@link AbstractLazyInitializer} for a {@link Spanner} instance. */ +public class LazySpannerInitializer extends AbstractLazyInitializer { + /** + * Initializes a default {@link Spanner} instance. Override this method to create an instance with + * custom configuration. + */ + @Override + protected Spanner initialize() throws Exception { + return SpannerOptions.newBuilder().build().getService(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MetricRegistryConstants.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MetricRegistryConstants.java new file mode 100644 index 000000000000..e634065b91fe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MetricRegistryConstants.java @@ -0,0 +1,109 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.common.collect.ImmutableList; +import io.opencensus.metrics.LabelKey; +import io.opencensus.metrics.LabelValue; + +/** A helper class that holds OpenCensus's related constants. */ +class MetricRegistryConstants { + + // The label keys are used to uniquely identify timeseries. + private static final LabelKey CLIENT_ID = + LabelKey.create("client_id", "User defined database client id"); + private static final LabelKey DATABASE = LabelKey.create("database", "Target database"); + private static final LabelKey INSTANCE_ID = + LabelKey.create("instance_id", "Name of the instance"); + private static final LabelKey LIBRARY_VERSION = + LabelKey.create("library_version", "Library version"); + static final LabelKey IS_MULTIPLEXED_KEY = + LabelKey.create("is_multiplexed", "Multiplexed Session"); + + private static final LabelKey SESSION_TYPE = LabelKey.create("Type", "Type of the Sessions"); + + /** The label value is used to represent missing value. */ + private static final LabelValue UNSET_LABEL = LabelValue.create(null); + + static final LabelValue NUM_IN_USE_SESSIONS = LabelValue.create("num_in_use_sessions"); + + /** + * The session pool no longer prepares a fraction of the sessions with a read/write transaction. + * This metric will therefore always be zero and may be removed in the future. + */ + @Deprecated + static final LabelValue NUM_SESSIONS_BEING_PREPARED = + LabelValue.create("num_sessions_being_prepared"); + + static final LabelValue NUM_READ_SESSIONS = LabelValue.create("num_read_sessions"); + + /** + * The session pool no longer prepares a fraction of the sessions with a read/write transaction. + * This metric will therefore always be zero and may be removed in the future. + */ + @Deprecated + static final LabelValue NUM_WRITE_SESSIONS = LabelValue.create("num_write_prepared_sessions"); + + static final ImmutableList SPANNER_LABEL_KEYS = + ImmutableList.of(CLIENT_ID, DATABASE, INSTANCE_ID, LIBRARY_VERSION); + static final ImmutableList SPANNER_LABEL_KEYS_WITH_TYPE = + ImmutableList.of(CLIENT_ID, DATABASE, INSTANCE_ID, LIBRARY_VERSION, SESSION_TYPE); + + static final ImmutableList SPANNER_DEFAULT_LABEL_VALUES = + ImmutableList.of(UNSET_LABEL, UNSET_LABEL, UNSET_LABEL, UNSET_LABEL); + + static final ImmutableList SPANNER_LABEL_KEYS_WITH_MULTIPLEXED_SESSIONS = + ImmutableList.of(CLIENT_ID, DATABASE, INSTANCE_ID, LIBRARY_VERSION, IS_MULTIPLEXED_KEY); + + /** Unit to represent counts. */ + static final String COUNT = "1"; + + static final String INSTRUMENTATION_SCOPE = "cloud.google.com/java"; + + static final String METRIC_PREFIX = "cloud.google.com/java/"; + + // The Metric name and description + static final String MAX_IN_USE_SESSIONS = "spanner/max_in_use_sessions"; + static final String MAX_ALLOWED_SESSIONS = "spanner/max_allowed_sessions"; + static final String GET_SESSION_TIMEOUTS = "spanner/get_session_timeouts"; + static final String NUM_ACQUIRED_SESSIONS = "spanner/num_acquired_sessions"; + static final String NUM_RELEASED_SESSIONS = "spanner/num_released_sessions"; + static final String NUM_SESSIONS_IN_POOL = "spanner/num_sessions_in_pool"; + static final String NUM_SESSIONS_IN_USE = "spanner/num_in_use_sessions"; + static final String NUM_SESSIONS_AVAILABLE = "spanner/num_available_sessions"; + static final String SESSIONS_TYPE = "session_type"; + static final String IS_MULTIPLEXED = "is_multiplexed"; + static final String MAX_IN_USE_SESSIONS_DESCRIPTION = + "The maximum number of sessions in use during the last 10 minute interval."; + static final String MAX_ALLOWED_SESSIONS_DESCRIPTION = + "The maximum number of sessions allowed. Configurable by the user."; + static final String SESSIONS_TIMEOUTS_DESCRIPTION = + "The number of get sessions timeouts due to pool exhaustion"; + static final String NUM_ACQUIRED_SESSIONS_DESCRIPTION = + "The number of sessions acquired from the session pool."; + static final String NUM_RELEASED_SESSIONS_DESCRIPTION = + "The number of sessions released by the user and pool maintainer."; + static final String NUM_SESSIONS_IN_POOL_DESCRIPTION = "The number of sessions in the pool."; + + static final String SPANNER_GFE_LATENCY = "spanner/gfe_latency"; + static final String SPANNER_GFE_LATENCY_DESCRIPTION = + "Latency between Google's network receiving an RPC and reading back the first byte of the" + + " response"; + static final String SPANNER_GFE_HEADER_MISSING_COUNT = "spanner/gfe_header_missing_count"; + static final String SPANNER_GFE_HEADER_MISSING_COUNT_DESCRIPTION = + "Number of RPC responses received without the server-timing header, most likely means that" + + " the RPC never reached Google's network"; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MissingDefaultSequenceKindException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MissingDefaultSequenceKindException.java new file mode 100644 index 000000000000..d29a9489c3e1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MissingDefaultSequenceKindException.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import java.util.regex.Pattern; +import javax.annotation.Nullable; + +/** + * Exception thrown by Spanner when a DDL statement failed because no default sequence kind has been + * configured for a database. + */ +public class MissingDefaultSequenceKindException extends SpannerException { + private static final long serialVersionUID = 1L; + + private static final Pattern PATTERN = + Pattern.compile( + ".*Please specify the sequence kind explicitly or set the database option" + + " `default_sequence_kind`\\."); + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + MissingDefaultSequenceKindException( + DoNotConstructDirectly token, + ErrorCode errorCode, + String message, + Throwable cause, + @Nullable ApiException apiException) { + super(token, errorCode, /* retryable= */ false, message, cause, apiException); + } + + static boolean isMissingDefaultSequenceKindException(Throwable cause) { + if (cause == null + || cause.getMessage() == null + || !PATTERN.matcher(cause.getMessage()).find()) { + return false; + } + return true; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java new file mode 100644 index 000000000000..1021e1c469d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java @@ -0,0 +1,632 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionImpl.NO_CHANNEL_HINT; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SessionClient.SessionConsumer; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.spanner.v1.BatchWriteResponse; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.BitSet; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * {@link DatabaseClient} implementation that uses a single multiplexed session to execute + * transactions. + */ +final class MultiplexedSessionDatabaseClient extends AbstractMultiplexedSessionDatabaseClient { + /** + * The maximum number of attempts that the client will try to execute CreateSession for the + * initial multiplexed session. This value is only used for the very first multiplexed session + * that is created, and it is only used if the application has not set a waitForMinSessions value. + * If waitForMinSessions has been set, then the client will retry until the duration in + * waitForMinSessions has been reached. + */ + private static final int MAX_INITIAL_CREATE_SESSION_ATTEMPTS = 10; + + @VisibleForTesting + static final Statement DETERMINE_DIALECT_STATEMENT = + Statement.newBuilder( + "select option_value " + + "from information_schema.database_options " + + "where option_name='database_dialect'") + .build(); + + /** + * Represents a single transaction on a multiplexed session. This can be both a single-use or + * multi-use transaction, and both read/write or read-only transaction. This can be compared to a + * 'checked out session' of a pool, except as multiplexed sessions support multiple parallel + * transactions, we do not need to actually check out and exclusively reserve a single session for + * a transaction. This class therefore only contains context information about the current + * transaction, such as the current span, and a reference to the multiplexed session that is used + * for the transaction. + */ + static class MultiplexedSessionTransaction extends SessionImpl { + private final MultiplexedSessionDatabaseClient client; + + private final boolean singleUse; + + private final int singleUseChannelHint; + + private boolean done; + + MultiplexedSessionTransaction( + MultiplexedSessionDatabaseClient client, + ISpan span, + SessionReference sessionReference, + int singleUseChannelHint, + boolean singleUse) { + super(client.sessionClient.getSpanner(), sessionReference, singleUseChannelHint); + this.client = client; + this.singleUse = singleUse; + this.singleUseChannelHint = singleUseChannelHint; + this.client.numSessionsAcquired.incrementAndGet(); + setCurrentSpan(span); + } + + @Override + void onError(SpannerException spannerException) { + if (this.client.resourceNotFoundException.get() == null + && (spannerException instanceof DatabaseNotFoundException + || spannerException instanceof InstanceNotFoundException + || spannerException instanceof SessionNotFoundException)) { + // This could in theory set this field more than once, but we don't want to bother with + // synchronizing, as it does not really matter exactly which error is set. + this.client.resourceNotFoundException.set((ResourceNotFoundException) spannerException); + } + } + + @Override + void onReadDone() { + // This method is called whenever a ResultSet that was returned by this transaction is closed. + // Close the active transaction if this is a single-use transaction. This ensures that the + // active span is ended. + if (this.singleUse && getActiveTransaction() != null) { + getActiveTransaction().close(); + setActive(null); + if (this.singleUseChannelHint != NO_CHANNEL_HINT) { + this.client.channelUsage.clear(this.singleUseChannelHint); + } + this.client.numCurrentSingleUseTransactions.decrementAndGet(); + } + } + + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + CommitResponse response = super.writeAtLeastOnceWithOptions(mutations, options); + onTransactionDone(); + return response; + } + + @Override + void onTransactionDone() { + boolean markedDone = false; + synchronized (this) { + if (!this.done) { + this.done = true; + markedDone = true; + } + } + if (markedDone) { + client.numSessionsReleased.incrementAndGet(); + } + } + + @Override + public void close() { + // no-op, we don't want to delete the multiplexed session. + } + } + + /** + * Keeps track of which channels have been 'given' to single-use transactions for a given Spanner + * instance. + */ + private static final Map CHANNEL_USAGE = new HashMap<>(); + + private static final EnumSet RETRYABLE_ERROR_CODES = + EnumSet.of(ErrorCode.DEADLINE_EXCEEDED, ErrorCode.RESOURCE_EXHAUSTED, ErrorCode.UNAVAILABLE); + + private final BitSet channelUsage; + + private final int numChannels; + + /** + * The number of single-use read-only transactions currently running on this multiplexed session. + */ + private final AtomicInteger numCurrentSingleUseTransactions = new AtomicInteger(); + + private boolean isClosed; + + /** The duration before we try to replace the multiplexed session. The default is 7 days. */ + private final Duration sessionExpirationDuration; + + private final SessionClient sessionClient; + + private final TraceWrapper tracer; + + /** The current multiplexed session that is used by this client. */ + private final AtomicReference> multiplexedSessionReference; + + /** The expiration date/time of the current multiplexed session. */ + private final AtomicReference expirationDate; + + /** + * The maintainer runs every 10 minutes to check whether the multiplexed session should be + * refreshed. + */ + private final MultiplexedSessionMaintainer maintainer; + + /** + * If a {@link DatabaseNotFoundException} or {@link InstanceNotFoundException} is returned by the + * server, then we set this field to mark the client as invalid. + */ + private final AtomicReference resourceNotFoundException = + new AtomicReference<>(); + + private final AtomicLong numSessionsAcquired = new AtomicLong(); + + private final AtomicLong numSessionsReleased = new AtomicLong(); + + MultiplexedSessionDatabaseClient(SessionClient sessionClient) { + this(sessionClient, Clock.systemUTC()); + } + + @VisibleForTesting + MultiplexedSessionDatabaseClient(SessionClient sessionClient, Clock clock) { + this.numChannels = sessionClient.getSpanner().getOptions().getNumChannels(); + synchronized (CHANNEL_USAGE) { + CHANNEL_USAGE.putIfAbsent(sessionClient.getSpanner(), new BitSet(numChannels)); + this.channelUsage = CHANNEL_USAGE.get(sessionClient.getSpanner()); + } + this.sessionExpirationDuration = + Duration.ofMillis( + sessionClient + .getSpanner() + .getOptions() + .getSessionPoolOptions() + .getMultiplexedSessionMaintenanceDuration() + .toMillis()); + // Initialize the expiration date to the current time + 7 days to avoid unnecessary null checks. + // The time difference with the actual creation is small enough that it does not matter. + this.expirationDate = new AtomicReference<>(Instant.now().plus(this.sessionExpirationDuration)); + this.sessionClient = sessionClient; + this.maintainer = new MultiplexedSessionMaintainer(clock); + this.tracer = sessionClient.getSpanner().getTracer(); + final SettableApiFuture initialSessionReferenceFuture = + SettableApiFuture.create(); + this.multiplexedSessionReference = new AtomicReference<>(initialSessionReferenceFuture); + + Duration waitDuration = + sessionClient.getSpanner().getOptions().getSessionPoolOptions().getWaitForMinSessions(); + int initialAttempts = + waitDuration == null || waitDuration.isZero() ? MAX_INITIAL_CREATE_SESSION_ATTEMPTS : 1; + asyncCreateMultiplexedSession(initialSessionReferenceFuture, initialAttempts); + maybeWaitForSessionCreation( + sessionClient.getSpanner().getOptions().getSessionPoolOptions(), + initialSessionReferenceFuture); + } + + private void asyncCreateMultiplexedSession( + SettableApiFuture sessionReferenceFuture, int remainingAttempts) { + this.sessionClient.asyncCreateMultiplexedSession( + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + sessionReferenceFuture.set(session.getSessionReference()); + // only start the maintainer if we actually managed to create a session in the first + // place. + maintainer.start(); + if (sessionClient + .getSpanner() + .getOptions() + .getSessionPoolOptions() + .isAutoDetectDialect()) { + MAINTAINER_SERVICE.submit(() -> getDialect()); + } + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { + SpannerException spannerException = SpannerExceptionFactory.asSpannerException(t); + if (MultiplexedSessionDatabaseClient.this.resourceNotFoundException.get() == null + && (spannerException instanceof DatabaseNotFoundException + || spannerException instanceof InstanceNotFoundException + || spannerException instanceof SessionNotFoundException)) { + // This could in theory set this field more than once, but we don't want to bother + // with synchronizing, as it does not really matter exactly which error is set. + MultiplexedSessionDatabaseClient.this.resourceNotFoundException.set( + (ResourceNotFoundException) spannerException); + } + // Set the exception to trigger an error for all waiters. + // Then retry the session creation if the error is (potentially) transient. + sessionReferenceFuture.setException(t); + if (remainingAttempts > 1 + && RETRYABLE_ERROR_CODES.contains(spannerException.getErrorCode())) { + final SettableApiFuture future = SettableApiFuture.create(); + MultiplexedSessionDatabaseClient.this.multiplexedSessionReference.set(future); + asyncCreateMultiplexedSession(future, remainingAttempts - 1); + } + } + }); + } + + private void maybeWaitForSessionCreation( + SessionPoolOptions sessionPoolOptions, + SettableApiFuture initialSessionReferenceFuture) { + Duration waitDuration = sessionPoolOptions.getWaitForMinSessions(); + if (waitDuration != null && !waitDuration.isZero()) { + + SpannerException lastException = null; + SettableApiFuture sessionReferenceFuture = initialSessionReferenceFuture; + Duration remainingTime; + + Instant endTime = Instant.now().plus(waitDuration); + while ((remainingTime = Duration.between(Instant.now(), endTime)).toMillis() > 0) { + // If any exception is thrown, then retry the multiplexed session creation + if (sessionReferenceFuture == null) { + sessionReferenceFuture = SettableApiFuture.create(); + asyncCreateMultiplexedSession(sessionReferenceFuture, 1); + this.multiplexedSessionReference.set(sessionReferenceFuture); + } + try { + sessionReferenceFuture.get(remainingTime.toMillis(), TimeUnit.MILLISECONDS); + lastException = null; + break; + } catch (ExecutionException executionException) { + lastException = SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } catch (InterruptedException interruptedException) { + lastException = SpannerExceptionFactory.propagateInterrupt(interruptedException); + } catch (TimeoutException timeoutException) { + lastException = + SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, + "Timed out after waiting " + + waitDuration.toMillis() + + "ms for multiplexed session creation"); + } + // if any exception is thrown, then set the session reference to null to retry the + // multiplexed session creation only if the error code is DEADLINE EXCEEDED, UNAVAILABLE or + // RESOURCE_EXHAUSTED + if (RETRYABLE_ERROR_CODES.contains(lastException.getErrorCode())) { + sessionReferenceFuture = null; + } else { + break; + } + } + // if the wait time elapsed and multiplexed session fetch failed then throw the last exception + // that we have received + if (lastException != null) { + throw lastException; + } + } + } + + boolean isValid() { + return resourceNotFoundException.get() == null; + } + + AtomicLong getNumSessionsAcquired() { + return this.numSessionsAcquired; + } + + AtomicLong getNumSessionsReleased() { + return this.numSessionsReleased; + } + + void close() { + synchronized (this) { + if (!this.isClosed) { + this.isClosed = true; + this.maintainer.stop(); + } + } + } + + @VisibleForTesting + MultiplexedSessionMaintainer getMaintainer() { + return this.maintainer; + } + + @VisibleForTesting + SessionReference getCurrentSessionReference() { + try { + return this.multiplexedSessionReference.get().get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + /** + * Returns true if the multiplexed session has been created. This client can be used before the + * session has been created, and will in that case use a delayed transaction that contains a + * future reference to the multiplexed session. The delayed transaction will block at the first + * actual statement that is being executed (e.g. the first query that is sent to Spanner). + */ + private boolean isMultiplexedSessionCreated() { + return multiplexedSessionReference.get().isDone(); + } + + private DatabaseClient createMultiplexedSessionTransaction(boolean singleUse) { + Preconditions.checkState(!isClosed, "This client has been closed"); + return isMultiplexedSessionCreated() + ? createDirectMultiplexedSessionTransaction(singleUse) + : createDelayedMultiplexSessionTransaction(); + } + + private MultiplexedSessionTransaction createDirectMultiplexedSessionTransaction( + boolean singleUse) { + try { + return new MultiplexedSessionTransaction( + this, + tracer.getCurrentSpan(), + // Getting the result of the SettableApiFuture that contains the multiplexed session will + // also automatically propagate any error that happened during the creation of the + // session, such as for example a DatabaseNotFound exception. We therefore do not need + // any special handling of such errors. + multiplexedSessionReference.get().get(), + singleUse ? getSingleUseChannelHint() : NO_CHANNEL_HINT, + singleUse); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + + private DelayedMultiplexedSessionTransaction createDelayedMultiplexSessionTransaction() { + return new DelayedMultiplexedSessionTransaction( + this, tracer.getCurrentSpan(), multiplexedSessionReference.get()); + } + + private int getSingleUseChannelHint() { + if (this.numCurrentSingleUseTransactions.incrementAndGet() > this.numChannels) { + return NO_CHANNEL_HINT; + } + synchronized (this.channelUsage) { + // Get the first unused channel. + int channel = this.channelUsage.nextClearBit(/* fromIndex= */ 0); + // BitSet returns an index larger than its original size if all the bits are set. + // This then means that all channels have already been assigned to single-use transactions, + // and that we should not use a specific channel, but rather pick a random one. + if (channel == this.numChannels) { + return NO_CHANNEL_HINT; + } + this.channelUsage.set(channel); + return channel; + } + } + + private final AbstractLazyInitializer dialectSupplier = + new AbstractLazyInitializer() { + @Override + protected Dialect initialize() { + try (ResultSet dialectResultSet = singleUse().executeQuery(DETERMINE_DIALECT_STATEMENT)) { + if (dialectResultSet.next()) { + return Dialect.fromName(dialectResultSet.getString(0)); + } + } + // This should not really happen, but it is the safest fallback value. + return Dialect.GOOGLE_STANDARD_SQL; + } + }; + + @Override + public Dialect getDialect() { + try { + return dialectSupplier.get(); + } catch (Exception exception) { + throw SpannerExceptionFactory.asSpannerException(exception); + } + } + + Future getDialectAsync() { + try { + return MAINTAINER_SERVICE.submit(dialectSupplier::get); + } catch (Exception exception) { + throw SpannerExceptionFactory.asSpannerException(exception); + } + } + + @Override + public String getDatabaseRole() { + return this.sessionClient.getSpanner().getOptions().getDatabaseRole(); + } + + @Override + public Timestamp write(Iterable mutations) throws SpannerException { + return createMultiplexedSessionTransaction(/* singleUse= */ false).write(mutations); + } + + @Override + public CommitResponse writeWithOptions( + final Iterable mutations, final TransactionOption... options) + throws SpannerException { + return createMultiplexedSessionTransaction(/* singleUse= */ false) + .writeWithOptions(mutations, options); + } + + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + return createMultiplexedSessionTransaction(/* singleUse= */ true) + .writeAtLeastOnceWithOptions(mutations, options); + } + + @Override + public ServerStream batchWriteAtLeastOnce( + Iterable mutationGroups, TransactionOption... options) + throws SpannerException { + return createMultiplexedSessionTransaction(/* singleUse= */ true) + .batchWriteAtLeastOnce(mutationGroups, options); + } + + @Override + public ReadContext singleUse() { + return createMultiplexedSessionTransaction(/* singleUse= */ true).singleUse(); + } + + @Override + public ReadContext singleUse(TimestampBound bound) { + return createMultiplexedSessionTransaction(/* singleUse= */ true).singleUse(bound); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction() { + return createMultiplexedSessionTransaction(/* singleUse= */ true) + .singleUseReadOnlyTransaction(); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { + return createMultiplexedSessionTransaction(/* singleUse= */ true) + .singleUseReadOnlyTransaction(bound); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction() { + return createMultiplexedSessionTransaction(/* singleUse= */ false).readOnlyTransaction(); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { + return createMultiplexedSessionTransaction(/* singleUse= */ false).readOnlyTransaction(bound); + } + + @Override + public TransactionRunner readWriteTransaction(TransactionOption... options) { + return createMultiplexedSessionTransaction(/* singleUse= */ false) + .readWriteTransaction(options); + } + + @Override + public TransactionManager transactionManager(TransactionOption... options) { + return createMultiplexedSessionTransaction(/* singleUse= */ false).transactionManager(options); + } + + @Override + public AsyncRunner runAsync(TransactionOption... options) { + return createMultiplexedSessionTransaction(/* singleUse= */ false).runAsync(options); + } + + @Override + public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) { + return createMultiplexedSessionTransaction(/* singleUse= */ false) + .transactionManagerAsync(options); + } + + @Override + public long executePartitionedUpdate(Statement stmt, UpdateOption... options) { + return createMultiplexedSessionTransaction(/* singleUse= */ false) + .executePartitionedUpdate(stmt, options); + } + + /** + * It is enough with one executor to maintain the multiplexed sessions in all the clients, as they + * do not need to be updated often, and the maintenance task is light. The core pool size is set + * to 1 to prevent continuous creating and tearing down threads, and to avoid high CPU usage when + * running on Java 8 due to + * https://bugs.openjdk.org/browse/JDK-8129861. + */ + private static final ScheduledExecutorService MAINTAINER_SERVICE = + Executors.newScheduledThreadPool( + /* corePoolSize= */ 1, + ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory( + "multiplexed-session-maintainer", /* tryVirtual= */ false)); + + final class MultiplexedSessionMaintainer { + private final Clock clock; + + private ScheduledFuture scheduledFuture; + + MultiplexedSessionMaintainer(Clock clock) { + this.clock = clock; + } + + private synchronized void start() { + // Schedule the maintainer to run once every ten minutes (by default). + long loopFrequencyMillis = + MultiplexedSessionDatabaseClient.this + .sessionClient + .getSpanner() + .getOptions() + .getSessionPoolOptions() + .getMultiplexedSessionMaintenanceLoopFrequency() + .toMillis(); + this.scheduledFuture = + MAINTAINER_SERVICE.scheduleAtFixedRate( + this::maintain, loopFrequencyMillis, loopFrequencyMillis, TimeUnit.MILLISECONDS); + } + + private synchronized void stop() { + if (this.scheduledFuture != null) { + this.scheduledFuture.cancel(false); + } + } + + void maintain() { + if (clock.instant().isAfter(expirationDate.get())) { + sessionClient.asyncCreateMultiplexedSession( + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + multiplexedSessionReference.set( + ApiFutures.immediateFuture(session.getSessionReference())); + expirationDate.set( + clock + .instant() + .plus(MultiplexedSessionDatabaseClient.this.sessionExpirationDuration)); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { + // ignore any errors during re-creation of the multiplexed session. This means that + // we continue to use the session that has passed its expiration date for now, and + // that a new attempt at creating a new session will be done in 10 minutes from now. + } + }); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutableCredentials.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutableCredentials.java new file mode 100644 index 000000000000..9d09b9fe2686 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutableCredentials.java @@ -0,0 +1,119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.auth.CredentialTypeForMetrics; +import com.google.auth.Credentials; +import com.google.auth.RequestMetadataCallback; +import com.google.auth.oauth2.ServiceAccountCredentials; +import java.io.IOException; +import java.net.URI; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.Executor; +import javax.annotation.Nonnull; + +/** + * A mutable {@link Credentials} implementation that delegates authentication behavior to a scoped + * {@link ServiceAccountCredentials} instance. + * + *

This class is intended for scenarios where an application needs to replace the underlying + * service account credentials for a long-running Spanner Client. + * + *

All operations inherited from {@link Credentials} are forwarded to the current delegate, + * including request metadata retrieval and token refresh. Calling {@link + * #updateCredentials(ServiceAccountCredentials)} replaces the delegate with a newly scoped + * credentials instance created from the same scopes that were provided when this object was + * constructed. + */ +public class MutableCredentials extends Credentials { + private volatile ServiceAccountCredentials delegate; + private final Set scopes; + + /** Creates a MutableCredentials instance with default spanner scopes. */ + public MutableCredentials(ServiceAccountCredentials credentials) { + this(credentials, SpannerOptions.SCOPES); + } + + public MutableCredentials( + @Nonnull ServiceAccountCredentials credentials, @Nonnull Set scopes) { + Objects.requireNonNull(credentials, "credentials must not be null"); + Objects.requireNonNull(scopes, "scopes must not be null"); + if (scopes.isEmpty()) { + throw new IllegalArgumentException("Scopes must not be empty"); + } + this.scopes = new java.util.HashSet<>(scopes); + delegate = (ServiceAccountCredentials) credentials.createScoped(this.scopes); + } + + /** + * Replaces the current delegate with a newly scoped credentials instance. + * + *

Note any in-flight RPC may continue to use the old credentials. + * + *

The provided {@link ServiceAccountCredentials} is scoped using the same scopes that were + * supplied when this {@link MutableCredentials} instance was created. + * + * @param credentials the new base service account credentials to scope and use for client + * authorization. + */ + public void updateCredentials(@Nonnull ServiceAccountCredentials credentials) { + Objects.requireNonNull(credentials, "credentials must not be null"); + delegate = (ServiceAccountCredentials) credentials.createScoped(scopes); + } + + @Override + public String getAuthenticationType() { + return delegate.getAuthenticationType(); + } + + @Override + public Map> getRequestMetadata(URI uri) throws IOException { + return delegate.getRequestMetadata(uri); + } + + @Override + public boolean hasRequestMetadata() { + return delegate.hasRequestMetadata(); + } + + @Override + public boolean hasRequestMetadataOnly() { + return delegate.hasRequestMetadataOnly(); + } + + @Override + public void refresh() throws IOException { + delegate.refresh(); + } + + @Override + public void getRequestMetadata(URI uri, Executor executor, RequestMetadataCallback callback) { + delegate.getRequestMetadata(uri, executor, callback); + } + + @Override + public String getUniverseDomain() throws IOException { + return delegate.getUniverseDomain(); + } + + @Override + public CredentialTypeForMetrics getMetricsCredentialType() { + return delegate.getMetricsCredentialType(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Mutation.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Mutation.java new file mode 100644 index 000000000000..0545221804c5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Mutation.java @@ -0,0 +1,755 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.protobuf.Timestamp; +import java.io.Serializable; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; +import javax.annotation.Nullable; + +/** + * Represents an individual table modification to be applied to Cloud Spanner. + * + *

The types of mutation that can be created are defined by {@link Op}. To construct a mutation, + * use one of the builder methods. For example, to create a mutation that will insert a value of "x" + * into "C1" and a value of "y" into "C2" of table "T", write the following code: + * + *

+ *     Mutation m = Mutation.newInsertBuilder("T")
+ *         .set("C1").to("x")
+ *         .set("C2").to("y")
+ *         .build();
+ * 
+ * + * Mutations are applied to a database by performing a standalone write or buffering them as part of + * a transaction. TODO(user): Add links/code samples once the corresponding APIs are available. + * + *

{@code Mutation} instances are immutable. + */ +public final class Mutation implements Serializable { + private static final long serialVersionUID = 1784900828296918555L; + + /** Enumerates the types of mutation that can be applied. */ + public enum Op { + /** + * Inserts a new row in a table. If the row already exists, the write or transaction fails with + * {@link ErrorCode#ALREADY_EXISTS}. When inserting a row, all NOT NULL columns in the table + * must be given a value. + */ + INSERT, + + /** + * Updates an existing row in a table. If the row does not already exist, the transaction fails + * with error {@link ErrorCode#NOT_FOUND}. + */ + UPDATE, + + /** + * Like {@link #INSERT}, except that if the row already exists, then its column values are + * overwritten with the ones provided. All NOT NUll columns in the table must be give a value + * and this holds true even when the row already exists and will actually be updated. Values for + * all NULL columns not explicitly written are preserved. + */ + INSERT_OR_UPDATE, + + /** + * Like {@link #INSERT}, except that if the row already exists, it is deleted, and the column + * values provided are inserted instead. Unlike {@link #INSERT_OR_UPDATE}, this means any values + * not explicitly written become {@code NULL}. + */ + REPLACE, + + /** Deletes rows from a table. Succeeds whether or not the named rows were present. */ + DELETE, + + /** Send a message to a queue, optionally with specified delivery time. */ + SEND, + + /** Acknowledge a message in a queue. Ack only succeeds if the message still exists. */ + ACK, + } + + private final String table; + private final Op operation; + private final ImmutableList columns; + private final ImmutableList values; + private final KeySet keySet; + // Queue related fields + private final String queue; + private final Key key; + private final Value payload; + private final Instant deliveryTime; + private final boolean ignoreNotFound; + + private Mutation( + String table, + Op operation, + @Nullable ImmutableList columns, + @Nullable ImmutableList values, + @Nullable KeySet keySet) { + this(table, operation, columns, values, keySet, null, null, null, null, false); + } + + private Mutation( + @Nullable String table, + Op operation, + @Nullable ImmutableList columns, + @Nullable ImmutableList values, + @Nullable KeySet keySet, + @Nullable String queue, + @Nullable Key key, + @Nullable Value payload, + @Nullable Instant deliveryTime, + boolean ignoreNotFound) { + this.table = table; + this.operation = operation; + this.columns = columns; + this.values = values; + this.keySet = keySet; + this.queue = queue; + this.key = key; + this.payload = payload; + this.deliveryTime = deliveryTime; + this.ignoreNotFound = ignoreNotFound; + } + + /** + * Returns a builder that can be used to construct an {@link Op#INSERT} mutation against {@code + * table}; see the {@code INSERT} documentation for mutation semantics. + */ + public static WriteBuilder newInsertBuilder(String table) { + return new WriteBuilder(table, Op.INSERT); + } + + /** + * Returns a builder that can be used to construct an {@link Op#UPDATE} mutation against {@code + * table}; see the {@code UPDATE} documentation for mutation semantics. + */ + public static WriteBuilder newUpdateBuilder(String table) { + return new WriteBuilder(table, Op.UPDATE); + } + + /** + * Returns a builder that can be used to construct an {@link Op#INSERT_OR_UPDATE} mutation against + * {@code table}; see the {@code INSERT_OR_UPDATE} documentation for mutation semantics. + */ + public static WriteBuilder newInsertOrUpdateBuilder(String table) { + return new WriteBuilder(table, Op.INSERT_OR_UPDATE); + } + + /** + * Returns a builder that can be used to construct an {@link Op#REPLACE} mutation against {@code + * table}; see the {@code REPLACE} documentation for mutation semantics. + */ + public static WriteBuilder newReplaceBuilder(String table) { + return new WriteBuilder(table, Op.REPLACE); + } + + /** + * Returns a mutation that will delete the row with primary key {@code key}. Exactly equivalent to + * {@code delete(table, KeySet.singleKey(key))}. + */ + public static Mutation delete(String table, Key key) { + return delete(table, KeySet.singleKey(key)); + } + + /** Returns a mutation that will delete all rows with primary keys covered by {@code keySet}. */ + public static Mutation delete(String table, KeySet keySet) { + return new Mutation(table, Op.DELETE, null, null, checkNotNull(keySet)); + } + + /** + * Returns a builder that can be used to construct an {@link Op#SEND} mutation against {@code + * queue}; see the {@code SEND} documentation for mutation semantics. + */ + public static SendBuilder newSendBuilder(String queue) { + return new SendBuilder(queue); + } + + /** + * Returns a builder that can be used to construct an {@link Op#ACK} mutation against {@code + * queue}; see the {@code ACK} documentation for mutation semantics. + */ + public static AckBuilder newAckBuilder(String queue) { + return new AckBuilder(queue); + } + + /** + * Builder for {@link Op#INSERT}, {@link Op#INSERT_OR_UPDATE}, {@link Op#UPDATE}, and {@link + * Op#REPLACE} mutations. + */ + public static class WriteBuilder { + private final String table; + private final Op operation; + private final ImmutableList.Builder columns; + private final ImmutableList.Builder values; + private final ValueBinder binder; + private String currentColumn; + + private WriteBuilder(String table, Op operation) { + this.table = checkNotNull(table); + this.operation = operation; + // Empty writes are sufficiently rare that it is not worth optimizing for that case. + this.columns = ImmutableList.builder(); + this.values = ImmutableList.builder(); + class BinderImpl extends ValueBinder { + @Override + WriteBuilder handle(Value value) { + checkBindingInProgress(true); + columns.add(currentColumn); + values.add(value); + currentColumn = null; + return WriteBuilder.this; + } + } + this.binder = new BinderImpl(); + } + + /** + * Returns a binder to set the value of {@code columnName} that should be applied by the + * mutation. + */ + public ValueBinder set(String columnName) { + checkBindingInProgress(false); + currentColumn = checkNotNull(columnName); + return binder; + } + + /** + * Returns a newly created {@code Mutation} based on the contents of the {@code Builder}. + * + * @throws IllegalStateException if any duplicate columns are present. Duplicate detection is + * case-insensitive. + */ + public Mutation build() { + checkBindingInProgress(false); + ImmutableList columnNames = columns.build(); + checkDuplicateColumns(columnNames); + return new Mutation(table, operation, columnNames, values.build(), null); + } + + private void checkBindingInProgress(boolean expectInProgress) { + if (expectInProgress) { + checkState(currentColumn != null, "No binding currently active"); + } else if (currentColumn != null) { + throw new IllegalStateException("Incomplete binding for column " + currentColumn); + } + } + + private void checkDuplicateColumns(ImmutableList columnNames) { + Set columnNameSet = new HashSet<>(); + for (String columnName : columnNames) { + columnName = columnName.toLowerCase(); + if (columnNameSet.contains(columnName)) { + throw new IllegalStateException("Duplicate column: " + columnName); + } + columnNameSet.add(columnName); + } + } + } + + /** Builder for {@link Op#SEND} mutation. */ + public static class SendBuilder { + private final String queue; + private Key key; + private Value payload; + private Instant deliveryTime; + + private SendBuilder(String queue) { + this.queue = checkNotNull(queue); + } + + public SendBuilder setKey(Key key) { + this.key = checkNotNull(key); + return this; + } + + public SendBuilder setPayload(Value payload) { + this.payload = checkNotNull(payload); + return this; + } + + public SendBuilder setDeliveryTime(Instant deliveryTime) { + this.deliveryTime = deliveryTime; + return this; + } + + public Mutation build() { + checkState(key != null, "Key must be set for Send mutation"); + checkState(payload != null, "Payload must be set for Send mutation"); + return new Mutation( + null, Op.SEND, null, null, null, queue, key, payload, deliveryTime, false); + } + } + + /** Builder for {@link Op#ACK} mutation. */ + public static class AckBuilder { + private final String queue; + private Key key; + private boolean ignoreNotFound = false; + + private AckBuilder(String queue) { + this.queue = checkNotNull(queue); + } + + public AckBuilder setKey(Key key) { + this.key = checkNotNull(key); + return this; + } + + public AckBuilder setIgnoreNotFound(boolean ignoreNotFound) { + this.ignoreNotFound = ignoreNotFound; + return this; + } + + public Mutation build() { + checkState(key != null, "Key must be set for Ack mutation"); + return new Mutation(null, Op.ACK, null, null, null, queue, key, null, null, ignoreNotFound); + } + } + + /** Returns the name of the table that this mutation will affect. */ + public String getTable() { + return table; + } + + /** Returns the type of operation that this mutation will perform. */ + public Op getOperation() { + return operation; + } + + /** + * For all types except {@link Op#DELETE}, returns the columns that this mutation will affect. + * + * @throws IllegalStateException if {@code operation() == Op.DELETE} + */ + public Iterable getColumns() { + checkState(operation != Op.DELETE, "columns() cannot be called for a DELETE mutation"); + return columns; + } + + /** + * For all types except {@link Op#DELETE}, {@link Op#SEND}, and {@link Op#ACK}, returns the values + * that this mutation will write. The number of elements returned is always the same as the number + * returned by {@link #getColumns()}, and the {@code i}th value corresponds to the {@code i}th + * column. + * + * @throws IllegalStateException if {@code operation() == Op.DELETE or operation() == Op.SEND or + * operation() == Op.ACK} + */ + public Iterable getValues() { + checkState( + operation != Op.DELETE && operation != Op.SEND && operation != Op.ACK, + "values() cannot be called for a DELETE/SEND/ACK mutation"); + return values; + } + + /** Returns the name of the queue that this mutation will affect. */ + public String getQueue() { + checkState( + operation == Op.SEND || operation == Op.ACK, + "getQueue() can only be called " + "for SEND or ACK mutations"); + return queue; + } + + /** Returns the key of the message to the queue that this mutation will affect. */ + public Key getKey() { + checkState( + operation == Op.SEND || operation == Op.ACK, + "getKey() can only be called for " + "SEND or ACK mutations"); + return key; + } + + /** Returns the payload of the message to the queue that this mutation will affect. */ + public Value getPayload() { + checkState(operation == Op.SEND, "getPayload() can only be called for a SEND mutation"); + return payload; + } + + /** Returns the delivery timestamp of the message to the queue that this mutation will affect. */ + @Nullable + public Instant getDeliveryTime() { + checkState(operation == Op.SEND, "getDeliverTime() can only be called for a SEND mutation"); + return deliveryTime; + } + + /** + * Returns whether an error will be ignored for an ACK mutation that affects a message that does + * not exist + */ + public boolean getIgnoreNotFound() { + checkState(operation == Op.ACK, "getIgnoreNotFound() can only be called for an ACK mutation"); + return ignoreNotFound; + } + + /** + * For all types except {@link Op#DELETE}, {@link Op#SEND}, and {@link Op#ACK}, constructs a map + * from column name to value. This is mainly intended as a convenience for testing; direct access + * via {@link #getColumns()} and {@link #getValues()} is more efficient. + * + * @throws IllegalStateException if {@code operation() == Op.DELETE or operation() == Op.SEND or + * operation() == Op.ACK}, or if any duplicate columns are present. Detection of duplicates + * does not consider case. + */ + public Map asMap() { + checkState( + operation != Op.DELETE && operation != Op.SEND && operation != Op.ACK, + "asMap() cannot be called for a DELETE/SEND/ACK mutation"); + LinkedHashMap map = new LinkedHashMap<>(); + for (int i = 0; i < columns.size(); ++i) { + Value existing = map.put(columns.get(i), values.get(i)); + } + return Collections.unmodifiableMap(map); + } + + /** + * For {@link Op#DELETE} mutations, returns the key set that defines the rows to be deleted. + * + * @throws IllegalStateException if {@code operation() != Op.DELETE} + */ + public KeySet getKeySet() { + checkState(operation == Op.DELETE, "keySet() can only be called for a DELETE mutation"); + return keySet; + } + + void toString(StringBuilder b) { + String opName; + boolean isWrite; + switch (operation) { + case INSERT: + opName = "insert"; + isWrite = true; + break; + case INSERT_OR_UPDATE: + opName = "insert_or_update"; + isWrite = true; + break; + case UPDATE: + opName = "update"; + isWrite = true; + break; + case REPLACE: + opName = "replace"; + isWrite = true; + break; + case DELETE: + opName = "delete"; + isWrite = false; + break; + case SEND: + // return directly for SEND + b.append("send(").append(queue).append('{'); + b.append("key=").append(key); + b.append(", payload=").append(payload); + if (deliveryTime != null) { + b.append(", deliveryTime=").append(deliveryTime); + } + b.append("})"); + return; + case ACK: + // return directly for ACK + b.append("ack(").append(queue).append('{'); + b.append("key=").append(key); + if (ignoreNotFound) { + b.append(", ignoreNotFound=true"); + } + b.append("})"); + return; + default: + throw new AssertionError("Unhandled Op: " + operation); + } + if (isWrite) { + b.append(opName).append('(').append(table).append('{'); + for (int i = 0; i < columns.size(); ++i) { + if (i > 0) { + b.append(','); + } + b.append(columns.get(i)); + b.append('='); + b.append(values.get(i)); + } + b.append("})"); + } else { + b.append("delete(").append(table); + keySet.toString(b); + b.append(')'); + } + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Mutation that = (Mutation) o; + if (operation != that.operation) { + return false; + } + + if (operation == Op.SEND) { + return Objects.equals(queue, that.queue) + && Objects.equals(key, that.key) + && Objects.equals(payload, that.payload) + && Objects.equals(deliveryTime, that.deliveryTime); + } + + if (operation == Op.ACK) { + return Objects.equals(queue, that.queue) + && Objects.equals(key, that.key) + && Objects.equals(ignoreNotFound, that.ignoreNotFound); + } + + return Objects.equals(table, that.table) + && Objects.equals(columns, that.columns) + && areValuesEqual(values, that.values) + && Objects.equals(keySet, that.keySet); + } + + @Override + public int hashCode() { + return Objects.hash( + operation, table, columns, values, keySet, key, payload, deliveryTime, ignoreNotFound); + } + + /** + * We are relaxing equality values here, making sure that Double.NaNs and Float.NaNs are equal to + * each other. This is because our Cloud Spanner Import / Export template in Apache Beam uses the + * mutation equality to check for modifications before committing. We noticed that when NaNs where + * used the template would always indicate a modification was present, when it turned out not to + * be the case. For more information see b/206339664. + * + *

Similar change is being done while calculating `Value.hashCode()`. + */ + private boolean areValuesEqual(List values, List otherValues) { + if (values == null && otherValues == null) { + return true; + } else if (values == null || otherValues == null) { + return false; + } else if (values.size() != otherValues.size()) { + return false; + } else { + for (int i = 0; i < values.size(); i++) { + final Value value = values.get(i); + final Value otherValue = otherValues.get(i); + if (!value.equals(otherValue) && (!isNaN(value) || !isNaN(otherValue))) { + return false; + } + } + return true; + } + } + + private boolean isNaN(Value value) { + return !value.isNull() && (isFloat64NaN(value) || isFloat32NaN(value)); + } + + // Checks if the Float64 value is either a "Double" or a "Float" NaN. + // Refer the comment above `areValuesEqual` for more details. + private boolean isFloat64NaN(Value value) { + return value.getType().equals(Type.float64()) && Double.isNaN(value.getFloat64()); + } + + // Checks if the Float32 value is either a "Double" or a "Float" NaN. + // Refer the comment above `areValuesEqual` for more details. + private boolean isFloat32NaN(Value value) { + return value.getType().equals(Type.float32()) && Float.isNaN(value.getFloat32()); + } + + /** + * Converts the list of mutations to the corresponding protobuf mutations and returns a random + * mutation from the available list based on the following heuristics: + * + *

    + *
  1. 1. Prefer mutations other than INSERT, as INSERT mutations may contain autogenerated + * columns whose information is unavailable on the client. + *
  2. If the list only contains INSERT mutations, select the one with the highest number of + * values. + *
+ */ + static com.google.spanner.v1.Mutation toProtoAndReturnRandomMutation( + Iterable mutations, List out) { + Mutation last = null; + // The mutation currently being built. + com.google.spanner.v1.Mutation.Builder proto = null; + // The "write" (!= DELETE) or "keySet" (==DELETE) for the last mutation encoded, for coalescing. + com.google.spanner.v1.Mutation.Write.Builder write = null; + com.google.spanner.v1.KeySet.Builder keySet = null; + + // Stores all the mutations excluding INSERT mutations. + List allMutationsExcludingInsert = new ArrayList<>(); + // Stores the INSERT mutation with largest number of values. + com.google.spanner.v1.Mutation largestInsertMutation = + com.google.spanner.v1.Mutation.getDefaultInstance(); + + for (Mutation mutation : mutations) { + if (mutation.operation == Op.DELETE) { + if (last != null && last.operation == Op.DELETE && mutation.table.equals(last.table)) { + mutation.keySet.appendToProto(keySet); + } else { + largestInsertMutation = + flushMutation(out, proto, allMutationsExcludingInsert, largestInsertMutation); + proto = com.google.spanner.v1.Mutation.newBuilder(); + com.google.spanner.v1.Mutation.Delete.Builder delete = + proto.getDeleteBuilder().setTable(mutation.table); + keySet = delete.getKeySetBuilder(); + mutation.keySet.appendToProto(keySet); + } + write = null; + } else if (mutation.operation == Op.SEND) { + largestInsertMutation = + flushMutation(out, proto, allMutationsExcludingInsert, largestInsertMutation); + proto = com.google.spanner.v1.Mutation.newBuilder(); + com.google.spanner.v1.Mutation.Send.Builder send = + proto + .getSendBuilder() + .setQueue(mutation.queue) + .setKey(mutation.key.toProto()) + .setPayload(mutation.payload.toProto()); + if (mutation.getDeliveryTime() != null) { + Instant deliveryTime = mutation.getDeliveryTime(); + Timestamp.Builder timeBuilder = + send.getDeliverTimeBuilder() + .setSeconds(deliveryTime.getEpochSecond()) + .setNanos(deliveryTime.getNano()); + send.setDeliverTime(timeBuilder); + } + } else if (mutation.operation == Op.ACK) { + largestInsertMutation = + flushMutation(out, proto, allMutationsExcludingInsert, largestInsertMutation); + proto = com.google.spanner.v1.Mutation.newBuilder(); + proto + .getAckBuilder() + .setQueue(mutation.queue) + .setKey(mutation.getKey().toProto()) + .setIgnoreNotFound(mutation.ignoreNotFound); + } else { + ListValue.Builder values = ListValue.newBuilder(); + for (Value value : mutation.getValues()) { + values.addValues(value.toProto()); + } + if (last != null + && mutation.operation == last.operation + && mutation.table.equals(last.table) + && mutation.columns.equals(last.columns)) { + // Same as previous mutation: coalesce values to reduce request size. + write.addValues(values); + } else { + largestInsertMutation = + flushMutation(out, proto, allMutationsExcludingInsert, largestInsertMutation); + proto = com.google.spanner.v1.Mutation.newBuilder(); + switch (mutation.operation) { + case INSERT: + write = proto.getInsertBuilder(); + break; + case UPDATE: + write = proto.getUpdateBuilder(); + break; + case INSERT_OR_UPDATE: + write = proto.getInsertOrUpdateBuilder(); + break; + case REPLACE: + write = proto.getReplaceBuilder(); + break; + default: + throw new AssertionError("Impossible: " + mutation.operation); + } + write.setTable(mutation.table).addAllColumns(mutation.columns).addValues(values); + } + keySet = null; + } + last = mutation; + } + // Flush last item. + largestInsertMutation = + flushMutation(out, proto, allMutationsExcludingInsert, largestInsertMutation); + + // Select a random mutation based on the heuristic. + if (!allMutationsExcludingInsert.isEmpty()) { + return allMutationsExcludingInsert.get( + ThreadLocalRandom.current().nextInt(allMutationsExcludingInsert.size())); + } else { + return largestInsertMutation; + } + } + + private static com.google.spanner.v1.Mutation flushMutation( + List out, + com.google.spanner.v1.Mutation.Builder proto, + List allMutationsExcludingInsert, + com.google.spanner.v1.Mutation largestInsertMutation) { + if (proto != null) { + com.google.spanner.v1.Mutation builtMutation = proto.build(); + out.add(builtMutation); + // Skip tracking the largest insert mutation if there are mutations other than INSERT. + if (allMutationsExcludingInsert.isEmpty() + && checkIfInsertMutationWithLargeValue(builtMutation, largestInsertMutation)) { + largestInsertMutation = builtMutation; + } + maybeAddMutationToListExcludingInserts(builtMutation, allMutationsExcludingInsert); + } + return largestInsertMutation; + } + + // Returns true if the input mutation is of type INSERT and has more values than the current + // largest insert mutation. + private static boolean checkIfInsertMutationWithLargeValue( + com.google.spanner.v1.Mutation mutation, + com.google.spanner.v1.Mutation largestInsertMutation) { + // If largestInsertMutation is a default instance of Mutation, replace it with the current + // INSERT mutation, even if it contains zero values. + if (mutation.hasInsert() && !largestInsertMutation.hasInsert()) { + return true; + } + return mutation.hasInsert() + && mutation.getInsert().getValuesCount() + > largestInsertMutation.getInsert().getValuesCount(); + } + + // Stores all mutations that are not of type INSERT. + private static void maybeAddMutationToListExcludingInserts( + com.google.spanner.v1.Mutation mutation, + List allMutationsExcludingInsert) { + if (!mutation.hasInsert()) { + allMutationsExcludingInsert.add(mutation); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutationGroup.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutationGroup.java new file mode 100644 index 000000000000..67d95290030a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MutationGroup.java @@ -0,0 +1,63 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BatchWriteRequest; +import java.util.ArrayList; +import java.util.List; + +/** Represents a group of Cloud Spanner mutations to be committed together. */ +public class MutationGroup { + private final ImmutableList mutations; + + private MutationGroup(ImmutableList mutations) { + this.mutations = mutations; + } + + /** Creates a {@code MutationGroup} given a vararg of mutations. */ + public static MutationGroup of(Mutation... mutations) { + Preconditions.checkArgument(mutations.length > 0, "Should pass in at least one mutation."); + return new MutationGroup(ImmutableList.copyOf(mutations)); + } + + /** Creates a {@code MutationGroup} given an iterable of mutations. */ + public static MutationGroup of(Iterable mutations) { + return new MutationGroup(ImmutableList.copyOf(mutations)); + } + + /** Returns corresponding mutations for this MutationGroup. */ + public ImmutableList getMutations() { + return mutations; + } + + static BatchWriteRequest.MutationGroup toProto(final MutationGroup mutationGroup) { + List mutationsProto = new ArrayList<>(); + Mutation.toProtoAndReturnRandomMutation(mutationGroup.getMutations(), mutationsProto); + return BatchWriteRequest.MutationGroup.newBuilder().addAllMutations(mutationsProto).build(); + } + + static List toListProto( + final Iterable mutationGroups) { + List mutationGroupsProto = new ArrayList<>(); + for (MutationGroup group : mutationGroups) { + mutationGroupsProto.add(toProto(group)); + } + return mutationGroupsProto; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/NoRowsResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/NoRowsResultSet.java new file mode 100644 index 000000000000..d78a157ca470 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/NoRowsResultSet.java @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.util.List; +import javax.annotation.Nullable; + +class NoRowsResultSet extends AbstractResultSet> { + private final ResultSetStats stats; + private final ResultSetMetadata metadata; + + NoRowsResultSet(ResultSet resultSet) { + this.stats = resultSet.getStats(); + this.metadata = resultSet.getMetadata(); + } + + @Override + protected GrpcStruct currRow() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "This result set has no rows"); + } + + @Override + public boolean next() throws SpannerException { + return false; + } + + @Override + public void close() {} + + @Nullable + @Override + public ResultSetStats getStats() { + return stats; + } + + @Override + public ResultSetMetadata getMetadata() { + return metadata; + } + + @Override + public Type getType() { + return Type.struct(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusScope.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusScope.java new file mode 100644 index 000000000000..81c1db16571d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusScope.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import io.opencensus.common.Scope; + +class OpenCensusScope implements IScope { + + private final Scope openCensusScope; + + OpenCensusScope(Scope openCensusScope) { + this.openCensusScope = openCensusScope; + } + + @Override + public void close() { + openCensusScope.close(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusSpan.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusSpan.java new file mode 100644 index 000000000000..86e43778f313 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenCensusSpan.java @@ -0,0 +1,111 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.collect.ImmutableMap; +import io.opencensus.contrib.grpc.util.StatusConverter; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.EndSpanOptions; +import io.opencensus.trace.Span; +import io.opencensus.trace.Status; +import java.util.HashMap; +import java.util.Map; + +public class OpenCensusSpan implements ISpan { + + static final EndSpanOptions END_SPAN_OPTIONS = + EndSpanOptions.builder().setSampleToLocalSpanStore(true).build(); + private final Span openCensusSpan; + + public OpenCensusSpan(Span openCensusSpan) { + this.openCensusSpan = openCensusSpan; + } + + Span getOpenCensusSpan() { + return openCensusSpan; + } + + private ImmutableMap getOpenCensusExceptionAnnotations(Throwable e) { + if (e instanceof SpannerException) { + return ImmutableMap.of( + "Status", + AttributeValue.stringAttributeValue(((SpannerException) e).getErrorCode().toString())); + } + return ImmutableMap.of(); + } + + @Override + public void addAnnotation(String message, Map attributes) { + Map ocAttributeValues = new HashMap<>(); + for (Map.Entry entry : attributes.entrySet()) { + String key = entry.getKey(); + Object value = entry.getValue(); + if (value instanceof String) { + ocAttributeValues.put(key, AttributeValue.stringAttributeValue((String) value)); + } else if (value instanceof Long) { + ocAttributeValues.put(key, AttributeValue.longAttributeValue((Long) value)); + } + } + + if (ocAttributeValues.size() > 0) { + openCensusSpan.addAnnotation(message, ocAttributeValues); + } + } + + @Override + public void addAnnotation(String message) { + openCensusSpan.addAnnotation(message); + } + + @Override + public void addAnnotation(String message, String key, String value) { + openCensusSpan.addAnnotation( + message, ImmutableMap.of(key, AttributeValue.stringAttributeValue(value))); + } + + @Override + public void addAnnotation(String message, String key, long value) { + openCensusSpan.addAnnotation( + message, ImmutableMap.of(key, AttributeValue.longAttributeValue(value))); + } + + @Override + public void addAnnotation(String message, Throwable e) { + openCensusSpan.addAnnotation(message, this.getOpenCensusExceptionAnnotations(e)); + } + + @Override + public void setStatus(Throwable e) { + if (e instanceof SpannerException) { + openCensusSpan.setStatus( + StatusConverter.fromGrpcStatus(((SpannerException) e).getErrorCode().getGrpcStatus()) + .withDescription(e.getMessage())); + } else { + openCensusSpan.setStatus(Status.INTERNAL.withDescription(e.getMessage())); + } + } + + @Override + public void setStatus(ErrorCode errorCode) { + openCensusSpan.setStatus(StatusConverter.fromGrpcStatus(errorCode.getGrpcStatus())); + } + + @Override + public void end() { + openCensusSpan.end(END_SPAN_OPTIONS); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java new file mode 100644 index 000000000000..863c531de30a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java @@ -0,0 +1,284 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; + +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import com.google.common.base.Preconditions; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * {@link com.google.api.gax.tracing.ApiTracer} for use with OpenTelemetry. Based on {@link + * com.google.api.gax.tracing.OpencensusTracer}. + */ +class OpenTelemetryApiTracer implements ApiTracer { + /** The attribute keys that are used by this tracer might change in a future release. */ + private final AttributeKey ATTEMPT_COUNT_KEY = AttributeKey.longKey("attempt.count"); + + private final AttributeKey TOTAL_REQUEST_COUNT_KEY = + AttributeKey.longKey("total_request_count"); + private final AttributeKey TOTAL_RESPONSE_COUNT_KEY = + AttributeKey.longKey("total_response_count"); + private final AttributeKey EXCEPTION_MESSAGE_KEY = + AttributeKey.stringKey("exception.message"); + private final AttributeKey ATTEMPT_NUMBER_KEY = AttributeKey.longKey("attempt.number"); + private final AttributeKey ATTEMPT_REQUEST_COUNT_KEY = + AttributeKey.longKey("attempt.request_count"); + private final AttributeKey ATTEMPT_RESPONSE_COUNT_KEY = + AttributeKey.longKey("attempt.response_count"); + private final AttributeKey CONNECTION_ID_KEY = AttributeKey.stringKey("connection"); + private final AttributeKey RETRY_DELAY_KEY = AttributeKey.longKey("delay_ms"); + private static final AttributeKey BATCH_SIZE_KEY = AttributeKey.longKey("batch.size"); + private static final AttributeKey BATCH_COUNT_KEY = AttributeKey.longKey("batch.count"); + + private final Span span; + private final OperationType operationType; + + private volatile String lastConnectionId; + private volatile long currentAttemptId; + private final AtomicLong attemptSentMessages = new AtomicLong(0); + private long attemptReceivedMessages = 0; + private final AtomicLong totalSentMessages = new AtomicLong(0); + private long totalReceivedMessages = 0; + + OpenTelemetryApiTracer(@Nonnull Span span, @Nonnull OperationType operationType) { + this.span = Preconditions.checkNotNull(span); + this.operationType = Preconditions.checkNotNull(operationType); + } + + Span getSpan() { + return this.span; + } + + @Override + public Scope inScope() { + final io.opentelemetry.context.Scope openTelemetryScope = span.makeCurrent(); + return openTelemetryScope::close; + } + + @Override + public void operationSucceeded() { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.OK); + span.end(); + } + + @Override + public void operationCancelled() { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.ERROR, "Cancelled by caller"); + span.end(); + } + + @Override + public void operationFailed(Throwable error) { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.ERROR, error.getMessage()); + span.end(); + } + + @Override + public void lroStartFailed(Throwable error) { + span.addEvent( + "Operation failed to start", Attributes.of(EXCEPTION_MESSAGE_KEY, error.getMessage())); + span.setStatus(StatusCode.ERROR, error.getMessage()); + span.end(); + } + + @Override + public void lroStartSucceeded() { + span.addEvent("Operation started"); + } + + @Override + public void connectionSelected(String id) { + lastConnectionId = id; + } + + @Override + public void attemptStarted(int attemptNumber) { + attemptStarted(null, attemptNumber); + } + + @Override + public void attemptStarted(@Nullable Object request, int attemptNumber) { + currentAttemptId = attemptNumber; + attemptSentMessages.set(0); + attemptReceivedMessages = 0; + + // Attempts start counting a zero, so more than zero indicates a retry. + if (attemptNumber > 0 && operationType != OperationType.LongRunning) { + // Add an event if the RPC retries, as this is otherwise transparent to the user. Retries + // would then show up as higher latency without any logical explanation. + span.addEvent("Starting RPC retry " + attemptNumber); + } else if (operationType == OperationType.LongRunning) { + span.addEvent("Starting poll attempt " + attemptNumber); + } + } + + @Override + public void attemptSucceeded() { + Attributes attributes = baseAttemptAttributes(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling completed", attributes); + } else { + span.addEvent("Attempt succeeded", attributes); + } + } + + @Override + public void attemptCancelled() { + Attributes attributes = baseAttemptAttributes(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling was cancelled", attributes); + } else { + span.addEvent("Attempt cancelled", attributes); + } + lastConnectionId = null; + } + + /** This method is obsolete. Use {@link #attemptFailedDuration(Throwable, Duration)} instead. */ + @Override + @ObsoleteApi("Use attemptFailedDuration(Throwable, Duration) instead") + public void attemptFailed(Throwable error, org.threeten.bp.Duration delay) { + attemptFailedDuration(error, toJavaTimeDuration(delay)); + } + + @Override + public void attemptFailedDuration(Throwable error, Duration delay) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + if (delay != null) { + builder.put(RETRY_DELAY_KEY, delay.toMillis()); + } + if (error != null) { + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + } + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + // The poll RPC was successful, but it indicated that the operation is still running. + span.addEvent("Scheduling next poll", attributes); + } else { + span.addEvent("Attempt failed, scheduling next attempt", attributes); + } + lastConnectionId = null; + } + + @Override + public void attemptFailedRetriesExhausted(@Nonnull Throwable error) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling attempts exhausted", attributes); + } else { + span.addEvent("Attempts exhausted", attributes); + } + lastConnectionId = null; + } + + @Override + public void attemptPermanentFailure(@Nonnull Throwable error) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling failed", attributes); + } else { + span.addEvent("Attempt failed, error not retryable", attributes); + } + lastConnectionId = null; + } + + @Override + public void responseReceived() { + attemptReceivedMessages++; + totalReceivedMessages++; + } + + @Override + public void requestSent() { + attemptSentMessages.incrementAndGet(); + totalSentMessages.incrementAndGet(); + } + + @Override + public void batchRequestSent(long elementCount, long requestSize) { + span.setAllAttributes( + Attributes.of(BATCH_COUNT_KEY, elementCount, BATCH_SIZE_KEY, requestSize)); + } + + private Attributes baseOperationAttributes() { + AttributesBuilder builder = Attributes.builder(); + builder.put(ATTEMPT_COUNT_KEY, currentAttemptId + 1); + long localTotalSentMessages = totalSentMessages.get(); + if (localTotalSentMessages > 0) { + builder.put(TOTAL_REQUEST_COUNT_KEY, localTotalSentMessages); + } + if (totalReceivedMessages > 0) { + builder.put(TOTAL_RESPONSE_COUNT_KEY, totalReceivedMessages); + } + return builder.build(); + } + + private Attributes baseAttemptAttributes() { + return baseAttemptAttributesBuilder().build(); + } + + private AttributesBuilder baseAttemptAttributesBuilder() { + AttributesBuilder builder = Attributes.builder(); + populateAttemptNumber(builder); + + long localAttemptSentMessages = attemptSentMessages.get(); + if (localAttemptSentMessages > 0) { + builder.put(ATTEMPT_REQUEST_COUNT_KEY, localAttemptSentMessages); + } + if (attemptReceivedMessages > 0) { + builder.put(ATTEMPT_RESPONSE_COUNT_KEY, attemptReceivedMessages); + } + String localLastConnectionId = lastConnectionId; + if (localLastConnectionId != null) { + builder.put(CONNECTION_ID_KEY, localLastConnectionId); + } + + return builder; + } + + private void populateAttemptNumber(AttributesBuilder builder) { + builder.put(ATTEMPT_NUMBER_KEY, currentAttemptId); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java new file mode 100644 index 000000000000..7c66c3239e2d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java @@ -0,0 +1,60 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.common.base.Preconditions; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import javax.annotation.Nonnull; + +/** {@link ApiTracerFactory} that can be used with OpenTelemetry tracing. */ +class OpenTelemetryApiTracerFactory implements ApiTracerFactory { + @Nonnull private final Tracer internalTracer; + @Nonnull private final Attributes spanAttributes; + + OpenTelemetryApiTracerFactory( + @Nonnull Tracer internalTracer, @Nonnull Attributes spanAttributes) { + this.internalTracer = Preconditions.checkNotNull(internalTracer); + this.spanAttributes = spanAttributes; + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + // Default to the current in context span. This is used for outermost tracers that inherit + // the caller's parent span. + Span parentSpan = Span.current(); + + // If an outer callable started a span, use it as the parent. + if (parent instanceof OpenTelemetryApiTracer) { + parentSpan = ((OpenTelemetryApiTracer) parent).getSpan(); + } + + Span span = + internalTracer + .spanBuilder(spanName.toString()) + .setParent(Context.current().with(parentSpan)) + .setAllAttributes(spanAttributes) + .startSpan(); + + return new OpenTelemetryApiTracer(span, operationType); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java new file mode 100644 index 000000000000..e5fbedb7c37b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import io.opentelemetry.context.ContextKey; + +/** + * Keys for OpenTelemetry context variables that are used by the Spanner client library. Only + * intended for internal use. + */ +@InternalApi +public class OpenTelemetryContextKeys { + @InternalApi + public static final ContextKey THREAD_NAME_KEY = ContextKey.named("thread.name"); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryScope.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryScope.java new file mode 100644 index 000000000000..6766bc54e6bf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryScope.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import io.opentelemetry.context.Scope; + +class OpenTelemetryScope implements IScope { + + private final Scope openTelemetryScope; + + OpenTelemetryScope(Scope openTelemetryScope) { + this.openTelemetryScope = openTelemetryScope; + } + + @Override + public void close() { + openTelemetryScope.close(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetrySpan.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetrySpan.java new file mode 100644 index 000000000000..98e1540e760b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetrySpan.java @@ -0,0 +1,99 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import java.util.Map; + +class OpenTelemetrySpan implements ISpan { + + private final io.opentelemetry.api.trace.Span openTelemetrySpan; + + OpenTelemetrySpan(Span openTelemetrySpan) { + this.openTelemetrySpan = openTelemetrySpan; + } + + Span getOpenTelemetrySpan() { + return openTelemetrySpan; + } + + @Override + public void addAnnotation(String message, Map attributes) { + AttributesBuilder otAttributesBuilder = Attributes.builder(); + for (Map.Entry entry : attributes.entrySet()) { + String key = entry.getKey(); + Object value = entry.getValue(); + if (value instanceof String) { + otAttributesBuilder.put(key, (String) value); + } else if (value instanceof Long) { + otAttributesBuilder.put(key, (Long) value); + } + } + openTelemetrySpan.addEvent(message, otAttributesBuilder.build()); + } + + @Override + public void addAnnotation(String message) { + openTelemetrySpan.addEvent(message); + } + + @Override + public void addAnnotation(String message, String key, String value) { + openTelemetrySpan.addEvent(message, Attributes.builder().put(key, value).build()); + } + + @Override + public void addAnnotation(String message, String key, long value) { + openTelemetrySpan.addEvent(message, Attributes.builder().put(key, value).build()); + } + + @Override + public void addAnnotation(String message, Throwable e) { + openTelemetrySpan.addEvent(message, this.createOpenTelemetryExceptionAnnotations(e)); + } + + @Override + public void setStatus(Throwable e) { + if (e instanceof SpannerException) { + openTelemetrySpan.setStatus(StatusCode.ERROR, ((SpannerException) e).getErrorCode().name()); + } else { + openTelemetrySpan.setStatus(StatusCode.ERROR, ErrorCode.INTERNAL.name()); + } + openTelemetrySpan.recordException(e); + } + + @Override + public void setStatus(ErrorCode errorCode) { + openTelemetrySpan.setStatus(StatusCode.ERROR, errorCode.name()); + } + + @Override + public void end() { + openTelemetrySpan.end(); + } + + private Attributes createOpenTelemetryExceptionAnnotations(Throwable e) { + AttributesBuilder attributesBuilder = Attributes.builder(); + if (e instanceof SpannerException) { + attributesBuilder.put("Status", ((SpannerException) e).getErrorCode().toString()); + } + return attributesBuilder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Operation.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Operation.java new file mode 100644 index 000000000000..8cfbcedc175e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Operation.java @@ -0,0 +1,223 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiClock; +import com.google.api.core.CurrentMillisClock; +import com.google.api.gax.retrying.BasicResultRetryAlgorithm; +import com.google.api.gax.retrying.PollException; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryOption; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.longrunning.Operation.ResultCase; +import com.google.protobuf.Any; +import com.google.rpc.Status; +import java.time.Duration; +import java.util.concurrent.ExecutionException; +import javax.annotation.Nullable; + +/** + * Represents a long-running operation. + * + * @param + * @param + */ +// TODO(user): Implement other operations on Operation. +public class Operation { + + private final RetrySettings DEFAULT_OPERATION_WAIT_SETTINGS = + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(12L)) + .setInitialRetryDelayDuration(Duration.ofMillis(500L)) + .setRetryDelayMultiplier(1.0) + .setJittered(false) + .setMaxRetryDelayDuration(Duration.ofMinutes(500L)) + .build(); + + interface Parser { + R parseResult(Any response); + + M parseMetadata(Any metadata); + } + + private final M metadata; + private final R result; + private final SpannerException exception; + private final boolean isDone; + private final SpannerRpc rpc; + private final String name; + private final Parser parser; + private final ApiClock clock; + + @VisibleForTesting + Operation( + SpannerRpc rpc, + String name, + @Nullable M metadata, + @Nullable R result, + @Nullable SpannerException exception, + boolean isDone, + Parser parser, + ApiClock clock) { + this.rpc = rpc; + this.name = name; + this.metadata = metadata; + this.result = result; + this.exception = exception; + this.isDone = isDone; + this.parser = parser; + this.clock = clock; + } + + private static Operation failed( + SpannerRpc rpc, String name, Status status, M metadata, Parser parser, ApiClock clock) { + SpannerException e = + SpannerExceptionFactory.newSpannerException( + ErrorCode.fromRpcStatus(status), status.getMessage(), (Throwable) (null)); + return new Operation<>(rpc, name, metadata, null, e, true, parser, clock); + } + + private static Operation successful( + SpannerRpc rpc, String name, M metadata, R result, Parser parser, ApiClock clock) { + return new Operation<>(rpc, name, metadata, result, null, true, parser, clock); + } + + private static Operation pending( + SpannerRpc rpc, String name, M metadata, Parser parser, ApiClock clock) { + return new Operation<>(rpc, name, metadata, null, null, false, parser, clock); + } + + static Operation create( + SpannerRpc rpc, com.google.longrunning.Operation proto, Parser parser) { + return Operation.create(rpc, proto, parser, CurrentMillisClock.getDefaultClock()); + } + + static Operation create( + SpannerRpc rpc, com.google.longrunning.Operation proto, Parser parser, ApiClock clock) { + M metadata = proto.hasMetadata() ? parser.parseMetadata(proto.getMetadata()) : null; + String name = proto.getName(); + if (proto.getDone()) { + if (proto.getResultCase() == ResultCase.ERROR) { + return Operation.failed(rpc, name, proto.getError(), metadata, parser, clock); + } else { + return Operation.successful( + rpc, name, metadata, parser.parseResult(proto.getResponse()), parser, clock); + } + } else { + return Operation.pending(rpc, name, metadata, parser, clock); + } + } + + /** Fetches the current status of this operation. */ + public Operation reload() throws SpannerException { + if (isDone) { + return this; + } + com.google.longrunning.Operation proto = rpc.getOperation(name); + return Operation.create(rpc, proto, parser); + } + + /** + * Blocks till the operation is complete or maximum time, if specified, has elapsed. + * + * @return null if operation is not found otherwise the current operation. + */ + public Operation waitFor(RetryOption... waitOptions) throws SpannerException { + if (isDone()) { + return this; + } + RetrySettings waitSettings = + RetryOption.mergeToSettings(DEFAULT_OPERATION_WAIT_SETTINGS, waitOptions); + try { + com.google.longrunning.Operation proto = + RetryHelper.poll( + () -> rpc.getOperation(name), + waitSettings, + new BasicResultRetryAlgorithm() { + @Override + public boolean shouldRetry( + Throwable prevThrowable, com.google.longrunning.Operation prevResponse) { + if (prevResponse != null) { + return !prevResponse.getDone(); + } + if (prevThrowable instanceof SpannerException) { + SpannerException spannerException = (SpannerException) prevThrowable; + return spannerException.getErrorCode() != ErrorCode.NOT_FOUND + && spannerException.isRetryable(); + } + return false; + } + }, + clock); + return Operation.create(rpc, proto, parser); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof SpannerException) { + SpannerException spannerException = (SpannerException) cause; + if (spannerException.getErrorCode() == ErrorCode.NOT_FOUND) { + return null; + } + throw spannerException; + } + if (cause instanceof PollException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Operation did not complete in the given time"); + } + throw SpannerExceptionFactory.newSpannerException(cause); + } + } + + /** + * Returns the metadata returned by the last refresh of this operation. Returns null if no + * metadata was returned or if this operation has not been refreshed. + */ + public M getMetadata() { + return metadata; + } + + /** Returns true if the operation is done. */ + public boolean isDone() { + return isDone; + } + + /** + * Returns result of the operation if the operation is complete and had a result. Returns null if + * the operation is not complete or did not have a result. + * + * @throws SpannerException if the operation failed. + */ + public R getResult() throws SpannerException { + if (exception != null) { + throw exception; + } + return result; + } + + /** Returns the name of the operation. */ + public String getName() { + return name; + } + + /** Returns true if the operation completed successfully. */ + public boolean isSuccessful() { + return isDone && exception == null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java new file mode 100644 index 000000000000..116e1aa4fc5d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java @@ -0,0 +1,1212 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Preconditions; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ReadRequest.LockHint; +import com.google.spanner.v1.ReadRequest.OrderBy; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.io.Serializable; +import java.time.Duration; +import java.util.Objects; + +/** Specifies options for various spanner operations */ +public final class Options implements Serializable { + private static final long serialVersionUID = 8067099123096783941L; + + /** + * Priority for an RPC invocation. The default priority is {@link #HIGH}. This enum can be used to + * set a lower priority for a specific RPC invocation. + */ + public enum RpcPriority { + LOW(Priority.PRIORITY_LOW), + MEDIUM(Priority.PRIORITY_MEDIUM), + HIGH(Priority.PRIORITY_HIGH), + UNSPECIFIED(Priority.PRIORITY_UNSPECIFIED); + + private final Priority proto; + + RpcPriority(Priority proto) { + this.proto = Preconditions.checkNotNull(proto); + } + + public static RpcPriority fromProto(Priority proto) { + for (RpcPriority e : RpcPriority.values()) { + if (e.proto.equals(proto)) return e; + } + return RpcPriority.UNSPECIFIED; + } + } + + /** + * OrderBy for an RPC invocation. The default orderby is {@link #PRIMARY_KEY}. This enum can be + * used to control the order in which rows are returned from a read. + */ + public enum RpcOrderBy { + PRIMARY_KEY(OrderBy.ORDER_BY_PRIMARY_KEY), + NO_ORDER(OrderBy.ORDER_BY_NO_ORDER), + UNSPECIFIED(OrderBy.ORDER_BY_UNSPECIFIED); + + private final OrderBy proto; + + RpcOrderBy(OrderBy proto) { + this.proto = Preconditions.checkNotNull(proto); + } + + public static RpcOrderBy fromProto(OrderBy proto) { + for (RpcOrderBy e : RpcOrderBy.values()) { + if (e.proto.equals(proto)) return e; + } + return RpcOrderBy.UNSPECIFIED; + } + } + + public enum RpcLockHint { + UNSPECIFIED(LockHint.LOCK_HINT_UNSPECIFIED), + SHARED(LockHint.LOCK_HINT_SHARED), + EXCLUSIVE(LockHint.LOCK_HINT_EXCLUSIVE); + + private final LockHint proto; + + RpcLockHint(LockHint proto) { + this.proto = Preconditions.checkNotNull(proto); + } + + public static RpcLockHint fromProto(LockHint proto) { + for (RpcLockHint e : RpcLockHint.values()) { + if (e.proto.equals(proto)) return e; + } + return RpcLockHint.UNSPECIFIED; + } + } + + /** Marker interface to mark options applicable to both Read and Query operations */ + public interface ReadAndQueryOption extends ReadOption, QueryOption {} + + /** Marker interface to mark options applicable to read operation */ + public interface ReadOption {} + + /** Marker interface to mark options applicable to Read, Query, Update and Write operations */ + public interface ReadQueryUpdateTransactionOption + extends ReadOption, QueryOption, UpdateOption, TransactionOption {} + + /** Marker interface to mark options applicable to Update and Write operations */ + public interface UpdateTransactionOption extends UpdateOption, TransactionOption {} + + /** Marker interface for options that can be used with both executeQuery and executeUpdate. */ + public interface QueryUpdateOption extends QueryOption, UpdateOption {} + + /** + * Marker interface to mark options applicable to Create, Update and Delete operations in admin + * API. + */ + public interface CreateUpdateDeleteAdminApiOption + extends CreateAdminApiOption, UpdateAdminApiOption, DeleteAdminApiOption {} + + /** Marker interface to mark options applicable to Create operations in admin API. */ + public interface CreateAdminApiOption extends AdminApiOption {} + + /** Marker interface to mark options applicable to Delete operations in admin API. */ + public interface DeleteAdminApiOption extends AdminApiOption {} + + /** Marker interface to mark options applicable to Update operations in admin API. */ + public interface UpdateAdminApiOption extends AdminApiOption {} + + /** Marker interface to mark options applicable to query operation. */ + public interface QueryOption {} + + /** Marker interface to mark options applicable to write operations */ + public interface TransactionOption {} + + /** Marker interface to mark options applicable to update operation. */ + public interface UpdateOption {} + + /** Marker interface to mark options applicable to list operations in admin API. */ + public interface ListOption {} + + /** Marker interface to mark options applicable to operations in admin API. */ + public interface AdminApiOption {} + + /** Specifying this instructs the transaction to request {@link CommitStats} from the backend. */ + public static TransactionOption commitStats() { + return COMMIT_STATS_OPTION; + } + + /** + * Specifying this instructs the transaction to request Optimistic Lock from the backend. In this + * concurrency mode, operations during the execution phase, i.e., reads and queries, are performed + * without acquiring locks, and transactional consistency is ensured by running a validation + * process in the commit phase (when any needed locks are acquired). The validation process + * succeeds only if there are no conflicting committed transactions (that committed mutations to + * the read data at a commit timestamp after the read timestamp). + * + * @deprecated Use {@link Options#readLockMode(ReadLockMode)} instead. + */ + @Deprecated + public static TransactionOption optimisticLock() { + return Options.readLockMode(ReadLockMode.OPTIMISTIC); + } + + /** + * Returns a {@link TransactionOption} to set the desired {@link ReadLockMode} for a read-write + * transaction. + * + *

This option controls the locking behavior for read operations and queries within a + * read-write transaction. It works in conjunction with the transaction's {@link IsolationLevel}. + * + *

    + *
  • {@link ReadLockMode#PESSIMISTIC}: Read locks are acquired immediately on read. This mode + * only applies to {@code SERIALIZABLE} isolation. This mode prevents concurrent + * modifications by locking data throughout the transaction. This reduces commit-time aborts + * due to conflicts but can increase how long transactions wait for locks and the overall + * contention. + *
  • {@link ReadLockMode#OPTIMISTIC}: Locks for reads within the transaction are not acquired + * on read. Instead the locks are acquired on commit to validate that read/queried data has + * not changed since the transaction started. If a conflict is detected, the transaction + * will fail. This mode only applies to {@code SERIALIZABLE} isolation. This mode defers + * locking until commit, which can reduce contention and improve throughput. However, be + * aware that this increases the risk of transaction aborts if there's significant write + * competition on the same data. + *
  • {@link ReadLockMode#READ_LOCK_MODE_UNSPECIFIED}: This is the default if no mode is set. + * The locking behavior depends on the isolation level: + *
      + *
    • For {@code REPEATABLE_READ} isolation: Locking semantics default to {@code + * OPTIMISTIC}. However, validation checks at commit are only performed for queries + * using {@code SELECT FOR UPDATE}, statements with {@code LOCK_SCANNED_RANGES} hints, + * and DML statements.
      + * Note: It is an error to explicitly set {@code ReadLockMode} when the isolation + * level is {@code REPEATABLE_READ}. + *
    • For all other isolation levels: If the read lock mode is not set, it defaults to + * {@code PESSIMISTIC} locking. + *
    + *
+ */ + public static TransactionOption readLockMode(ReadLockMode readLockMode) { + return new ReadLockModeOption(readLockMode); + } + + /** + * Specifying this instructs the transaction to request {@link IsolationLevel} from the backend. + */ + public static TransactionOption isolationLevel(IsolationLevel isolationLevel) { + return new IsolationLevelOption(isolationLevel); + } + + /** + * Specifying this instructs the transaction to be excluded from being recorded in change streams + * with the DDL option `allow_txn_exclusion=true`. This does not exclude the transaction from + * being recorded in the change streams with the DDL option `allow_txn_exclusion` being false or + * unset. + */ + public static UpdateTransactionOption excludeTxnFromChangeStreams() { + return EXCLUDE_TXN_FROM_CHANGE_STREAMS_OPTION; + } + + public static RequestIdOption requestId(XGoogSpannerRequestId reqId) { + return new RequestIdOption(reqId); + } + + /** + * Specifying this will cause the read to yield at most this many rows. This should be greater + * than 0. + */ + public static ReadOption limit(long limit) { + Preconditions.checkArgument(limit > 0, "Limit should be greater than 0"); + return new LimitOption(limit); + } + + /** Specifies the order_by to use for the RPC. */ + public static ReadOption orderBy(RpcOrderBy orderBy) { + return new OrderByOption(orderBy); + } + + public static ReadOption lockHint(RpcLockHint orderBy) { + return new LockHintOption(orderBy); + } + + /** + * Specifying this will allow the client to prefetch up to {@code prefetchChunks} {@code + * PartialResultSet} chunks for read and query. The data size of each chunk depends on the server + * implementation but a good rule of thumb is that each chunk will be up to 1 MiB. Larger values + * reduce the likelihood of blocking while consuming results at the cost of greater memory + * consumption. {@code prefetchChunks} should be greater than 0. To get good performance choose a + * value that is large enough to allow buffering of chunks for an entire row. Apart from the + * buffered chunks, there can be at most one more row buffered in the client. + */ + public static ReadAndQueryOption prefetchChunks(int prefetchChunks) { + Preconditions.checkArgument(prefetchChunks > 0, "prefetchChunks should be greater than 0"); + return new FlowControlOption(prefetchChunks); + } + + public static ReadAndQueryOption bufferRows(int bufferRows) { + Preconditions.checkArgument(bufferRows > 0, "bufferRows should be greater than 0"); + return new BufferRowsOption(bufferRows); + } + + /** Specifies the priority to use for the RPC. */ + public static ReadQueryUpdateTransactionOption priority(RpcPriority priority) { + return new PriorityOption(priority); + } + + /** + * Specifying this will add the given client context to the request. The client context is used to + * pass side-channel or configuration information to the backend, such as a user ID for a + * parameterized secure view. + */ + public static ReadQueryUpdateTransactionOption clientContext( + RequestOptions.ClientContext clientContext) { + return new ClientContextOption(clientContext); + } + + RequestOptions toRequestOptionsProto(boolean isTransactionOption) { + if (!hasPriority() && !hasTag() && !hasClientContext()) { + return RequestOptions.getDefaultInstance(); + } + RequestOptions.Builder builder = RequestOptions.newBuilder(); + if (hasPriority()) { + builder.setPriority(priority()); + } + if (hasTag()) { + if (isTransactionOption) { + builder.setTransactionTag(tag()); + } else { + builder.setRequestTag(tag()); + } + } + if (hasClientContext()) { + builder.setClientContext(clientContext()); + } + return builder.build(); + } + + public static TransactionOption maxCommitDelay(Duration maxCommitDelay) { + Preconditions.checkArgument(!maxCommitDelay.isNegative(), "maxCommitDelay should be positive"); + return new MaxCommitDelayOption(maxCommitDelay); + } + + /** + * Specifying this will cause the reads, queries, updates and writes operations statistics + * collection to be grouped by tag. + */ + public static ReadQueryUpdateTransactionOption tag(String name) { + return new TagOption(name); + } + + /** + * Specifying this will cause the list operations to fetch at most this many records in a page. + */ + public static ListOption pageSize(int pageSize) { + return new PageSizeOption(pageSize); + } + + /** + * If this is for PartitionedRead or PartitionedQuery and this field is set to `true`, the request + * will be executed via Spanner independent compute resources. + */ + public static DataBoostQueryOption dataBoostEnabled(Boolean dataBoostEnabled) { + return new DataBoostQueryOption(dataBoostEnabled); + } + + /** + * If set to true, this option marks the end of the transaction. The transaction should be + * committed or aborted after this statement executes, and attempts to execute any other requests + * against this transaction (including reads and queries) will be rejected. Mixing mutations with + * statements that are marked as the last statement is not allowed. + * + *

For DML statements, setting this option may cause some error reporting to be deferred until + * commit time (e.g. validation of unique constraints). Given this, successful execution of a DML + * statement should not be assumed until the transaction commits. + */ + public static QueryUpdateOption lastStatement() { + return new LastStatementUpdateOption(); + } + + /** + * Specifying this will cause the list operation to start fetching the record from this onwards. + */ + public static ListOption pageToken(String pageToken) { + return new PageTokenOption(pageToken); + } + + /** + * Specifying this will cause the given filter to be applied to the list operation. List + * operations that support this options are: + * + *

    + *
  • {@link InstanceAdminClient#listInstances} + *
+ * + * If this option is passed to any other list operation, it will throw an + * IllegalArgumentException. + * + * @param filter An expression for filtering the results of the request. Filter rules are case + * insensitive. Some examples of using filters are: + *
    + *
  • name:* The entity has a name. + *
  • name:Howl The entity's name contains "howl". + *
  • name:HOWL Equivalent to above. + *
  • NAME:howl Equivalent to above. + *
  • labels.env:* The entity has the label env. + *
  • labels.env:dev The entity has a label env whose value contains "dev". + *
  • name:howl labels.env:dev The entity's name contains "howl" and it has the label env + * whose value contains "dev". + *
+ */ + public static ListOption filter(String filter) { + return new FilterOption(filter); + } + + /** + * Specifying this will help in optimistic concurrency control as a way to help prevent + * simultaneous deletes of an instance config from overwriting each other. Operations that support + * this option are: + * + *
    + *
  • {@link InstanceAdminClient#deleteInstanceConfig} + *
+ */ + public static DeleteAdminApiOption etag(String etag) { + return new EtagOption(etag); + } + + /** + * Specifying this will not actually execute a request, and provide the same response. Operations + * that support this option are: + * + *
    + *
  • {@link InstanceAdminClient#createInstanceConfig} + *
  • {@link InstanceAdminClient#updateInstanceConfig} + *
  • {@link InstanceAdminClient#deleteInstanceConfig} + *
+ */ + public static CreateUpdateDeleteAdminApiOption validateOnly(Boolean validateOnly) { + return new ValidateOnlyOption(validateOnly); + } + + /** + * Option to request DirectedRead for ReadOnlyTransaction and SingleUseTransaction. + * + *

The DirectedReadOptions can be used to indicate which replicas or regions should be used for + * non-transactional reads or queries. Not all requests can be sent to non-leader replicas. In + * particular, some requests such as reads within read-write transactions must be sent to a + * designated leader replica. These requests ignore DirectedReadOptions. + */ + public static ReadAndQueryOption directedRead(DirectedReadOptions directedReadOptions) { + return new DirectedReadOption(directedReadOptions); + } + + public static ReadAndQueryOption decodeMode(DecodeMode decodeMode) { + return new DecodeOption(decodeMode); + } + + /** Option to request {@link CommitStats} for read/write transactions. */ + static final class CommitStatsOption extends InternalOption implements TransactionOption { + @Override + void appendToOptions(Options options) { + options.withCommitStats = true; + } + } + + static final CommitStatsOption COMMIT_STATS_OPTION = new CommitStatsOption(); + + /** Option to request {@link MaxCommitDelayOption} for read/write transactions. */ + static final class MaxCommitDelayOption extends InternalOption implements TransactionOption { + final Duration maxCommitDelay; + + MaxCommitDelayOption(Duration maxCommitDelay) { + this.maxCommitDelay = maxCommitDelay; + } + + @Override + void appendToOptions(Options options) { + options.maxCommitDelay = maxCommitDelay; + } + } + + /** Option to request the transaction to be excluded from change streams. */ + static final class ExcludeTxnFromChangeStreamsOption extends InternalOption + implements UpdateTransactionOption { + @Override + void appendToOptions(Options options) { + options.withExcludeTxnFromChangeStreams = true; + } + } + + static final ExcludeTxnFromChangeStreamsOption EXCLUDE_TXN_FROM_CHANGE_STREAMS_OPTION = + new ExcludeTxnFromChangeStreamsOption(); + + /** Option pertaining to flow control. */ + static final class FlowControlOption extends InternalOption implements ReadAndQueryOption { + final int prefetchChunks; + + FlowControlOption(int prefetchChunks) { + this.prefetchChunks = prefetchChunks; + } + + @Override + void appendToOptions(Options options) { + options.prefetchChunks = prefetchChunks; + } + } + + static final class BufferRowsOption extends InternalOption implements ReadAndQueryOption { + final int bufferRows; + + BufferRowsOption(int bufferRows) { + this.bufferRows = bufferRows; + } + + @Override + void appendToOptions(Options options) { + options.bufferRows = bufferRows; + } + } + + static final class PriorityOption extends InternalOption + implements ReadQueryUpdateTransactionOption { + private final RpcPriority priority; + + PriorityOption(RpcPriority priority) { + this.priority = priority; + } + + @Override + void appendToOptions(Options options) { + options.priority = priority; + } + } + + static final class ClientContextOption extends InternalOption + implements ReadQueryUpdateTransactionOption { + private final RequestOptions.ClientContext clientContext; + + ClientContextOption(RequestOptions.ClientContext clientContext) { + this.clientContext = clientContext; + } + + @Override + void appendToOptions(Options options) { + options.clientContext = clientContext; + } + } + + static final class TagOption extends InternalOption implements ReadQueryUpdateTransactionOption { + private final String tag; + + TagOption(String tag) { + this.tag = tag; + } + + String getTag() { + return tag; + } + + @Override + void appendToOptions(Options options) { + options.tag = tag; + } + } + + static final class EtagOption extends InternalOption implements DeleteAdminApiOption { + private final String etag; + + EtagOption(String etag) { + this.etag = etag; + } + + @Override + void appendToOptions(Options options) { + options.etag = etag; + } + } + + static final class ValidateOnlyOption extends InternalOption + implements CreateUpdateDeleteAdminApiOption { + private final Boolean validateOnly; + + ValidateOnlyOption(Boolean validateOnly) { + this.validateOnly = validateOnly; + } + + @Override + void appendToOptions(Options options) { + options.validateOnly = validateOnly; + } + } + + static final class DirectedReadOption extends InternalOption implements ReadAndQueryOption { + private final DirectedReadOptions directedReadOptions; + + DirectedReadOption(DirectedReadOptions directedReadOptions) { + this.directedReadOptions = + Preconditions.checkNotNull(directedReadOptions, "DirectedReadOptions cannot be null"); + ; + } + + @Override + void appendToOptions(Options options) { + options.directedReadOptions = directedReadOptions; + } + } + + static final class DecodeOption extends InternalOption implements ReadAndQueryOption { + private final DecodeMode decodeMode; + + DecodeOption(DecodeMode decodeMode) { + this.decodeMode = Preconditions.checkNotNull(decodeMode, "DecodeMode cannot be null"); + } + + @Override + void appendToOptions(Options options) { + options.decodeMode = decodeMode; + } + } + + /** Option to set isolation level for read/write transactions. */ + static final class IsolationLevelOption extends InternalOption implements TransactionOption { + private final IsolationLevel isolationLevel; + + public IsolationLevelOption(IsolationLevel isolationLevel) { + this.isolationLevel = isolationLevel; + } + + @Override + void appendToOptions(Options options) { + options.isolationLevel = isolationLevel; + } + } + + /** Option to set read lock mode for read/write transactions. */ + static final class ReadLockModeOption extends InternalOption implements TransactionOption { + private final ReadLockMode readLockMode; + + public ReadLockModeOption(ReadLockMode readLockMode) { + this.readLockMode = readLockMode; + } + + @Override + void appendToOptions(Options options) { + options.readLockMode = readLockMode; + } + } + + private boolean withCommitStats; + + private Duration maxCommitDelay; + + private Long limit; + private Integer prefetchChunks; + private Integer bufferRows; + private Integer pageSize; + private String pageToken; + private String filter; + private RpcPriority priority; + private String tag; + private RequestOptions.ClientContext clientContext; + private String etag; + private Boolean validateOnly; + private Boolean withExcludeTxnFromChangeStreams; + private Boolean dataBoostEnabled; + private DirectedReadOptions directedReadOptions; + private DecodeMode decodeMode; + private RpcOrderBy orderBy; + private RpcLockHint lockHint; + private Boolean lastStatement; + private IsolationLevel isolationLevel; + private XGoogSpannerRequestId reqId; + private ReadLockMode readLockMode; + + // Construction is via factory methods below. + private Options() {} + + boolean withCommitStats() { + return withCommitStats; + } + + boolean hasMaxCommitDelay() { + return maxCommitDelay != null; + } + + Duration maxCommitDelay() { + return maxCommitDelay; + } + + boolean hasLimit() { + return limit != null; + } + + long limit() { + return limit; + } + + boolean hasPrefetchChunks() { + return prefetchChunks != null; + } + + int prefetchChunks() { + return prefetchChunks; + } + + boolean hasBufferRows() { + return bufferRows != null; + } + + int bufferRows() { + return bufferRows; + } + + boolean hasPageSize() { + return pageSize != null; + } + + int pageSize() { + return pageSize; + } + + boolean hasPageToken() { + return pageToken != null; + } + + String pageToken() { + return pageToken; + } + + boolean hasFilter() { + return filter != null; + } + + String filter() { + return filter; + } + + boolean hasReqId() { + return reqId != null; + } + + XGoogSpannerRequestId reqId() { + return reqId; + } + + boolean hasPriority() { + return priority != null; + } + + Priority priority() { + return priority == null ? null : priority.proto; + } + + boolean hasClientContext() { + return clientContext != null; + } + + RequestOptions.ClientContext clientContext() { + return clientContext; + } + + boolean hasTag() { + return tag != null; + } + + String tag() { + return tag; + } + + boolean hasEtag() { + return etag != null; + } + + String etag() { + return etag; + } + + boolean hasValidateOnly() { + return validateOnly != null; + } + + Boolean validateOnly() { + return validateOnly; + } + + Boolean withExcludeTxnFromChangeStreams() { + return withExcludeTxnFromChangeStreams; + } + + boolean hasDataBoostEnabled() { + return dataBoostEnabled != null; + } + + Boolean dataBoostEnabled() { + return dataBoostEnabled; + } + + boolean hasDirectedReadOptions() { + return directedReadOptions != null; + } + + DirectedReadOptions directedReadOptions() { + return directedReadOptions; + } + + boolean hasDecodeMode() { + return decodeMode != null; + } + + DecodeMode decodeMode() { + return decodeMode; + } + + boolean hasOrderBy() { + return orderBy != null; + } + + OrderBy orderBy() { + return orderBy == null ? null : orderBy.proto; + } + + boolean hasLastStatement() { + return lastStatement != null; + } + + Boolean isLastStatement() { + return lastStatement; + } + + boolean hasLockHint() { + return lockHint != null; + } + + LockHint lockHint() { + return lockHint == null ? null : lockHint.proto; + } + + IsolationLevel isolationLevel() { + return isolationLevel; + } + + ReadLockMode readLockMode() { + return readLockMode; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + if (withCommitStats) { + b.append("withCommitStats: ").append(withCommitStats).append(' '); + } + if (maxCommitDelay != null) { + b.append("maxCommitDelay: ").append(maxCommitDelay).append(' '); + } + if (limit != null) { + b.append("limit: ").append(limit).append(' '); + } + if (prefetchChunks != null) { + b.append("prefetchChunks: ").append(prefetchChunks).append(' '); + } + if (pageSize != null) { + b.append("pageSize: ").append(pageSize).append(' '); + } + if (pageToken != null) { + b.append("pageToken: ").append(pageToken).append(' '); + } + if (filter != null) { + b.append("filter: ").append(filter).append(' '); + } + if (priority != null) { + b.append("priority: ").append(priority).append(' '); + } + if (clientContext != null) { + b.append("clientContext: ").append(clientContext).append(' '); + } + if (tag != null) { + b.append("tag: ").append(tag).append(' '); + } + if (etag != null) { + b.append("etag: ").append(etag).append(' '); + } + if (validateOnly != null) { + b.append("validateOnly: ").append(validateOnly).append(' '); + } + if (withExcludeTxnFromChangeStreams != null) { + b.append("withExcludeTxnFromChangeStreams: ") + .append(withExcludeTxnFromChangeStreams) + .append(' '); + } + if (dataBoostEnabled != null) { + b.append("dataBoostEnabled: ").append(dataBoostEnabled).append(' '); + } + if (directedReadOptions != null) { + b.append("directedReadOptions: ").append(directedReadOptions).append(' '); + } + if (decodeMode != null) { + b.append("decodeMode: ").append(decodeMode).append(' '); + } + if (orderBy != null) { + b.append("orderBy: ").append(orderBy).append(' '); + } + if (lastStatement != null) { + b.append("lastStatement: ").append(lastStatement).append(' '); + } + if (lockHint != null) { + b.append("lockHint: ").append(lockHint).append(' '); + } + if (isolationLevel != null) { + b.append("isolationLevel: ").append(isolationLevel).append(' '); + } + if (reqId != null) { + b.append("requestId: ").append(reqId.toString()); + } + if (readLockMode != null) { + b.append("readLockMode: ").append(readLockMode).append(' '); + } + return b.toString(); + } + + @Override + // Since Options mandates checking hasXX() before XX() is called, the equals & hashCode look more + // complicated than usual. + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Options that = (Options) o; + return Objects.equals(withCommitStats, that.withCommitStats) + && Objects.equals(maxCommitDelay, that.maxCommitDelay) + && (!hasLimit() && !that.hasLimit() + || hasLimit() && that.hasLimit() && Objects.equals(limit(), that.limit())) + && (!hasPrefetchChunks() && !that.hasPrefetchChunks() + || hasPrefetchChunks() + && that.hasPrefetchChunks() + && Objects.equals(prefetchChunks(), that.prefetchChunks())) + && (!hasBufferRows() && !that.hasBufferRows() + || hasBufferRows() + && that.hasBufferRows() + && Objects.equals(bufferRows(), that.bufferRows())) + && (!hasPageSize() && !that.hasPageSize() + || hasPageSize() && that.hasPageSize() && Objects.equals(pageSize(), that.pageSize())) + && Objects.equals(pageToken(), that.pageToken()) + && Objects.equals(filter(), that.filter()) + && Objects.equals(priority(), that.priority()) + && Objects.equals(clientContext(), that.clientContext()) + && Objects.equals(tag(), that.tag()) + && Objects.equals(etag(), that.etag()) + && Objects.equals(validateOnly(), that.validateOnly()) + && Objects.equals(withExcludeTxnFromChangeStreams(), that.withExcludeTxnFromChangeStreams()) + && Objects.equals(dataBoostEnabled(), that.dataBoostEnabled()) + && Objects.equals(directedReadOptions(), that.directedReadOptions()) + && Objects.equals(orderBy(), that.orderBy()) + && Objects.equals(isLastStatement(), that.isLastStatement()) + && Objects.equals(lockHint(), that.lockHint()) + && Objects.equals(isolationLevel(), that.isolationLevel()) + && Objects.equals(reqId(), that.reqId()) + && Objects.equals(readLockMode(), that.readLockMode()); + } + + @Override + public int hashCode() { + int result = 31; + if (withCommitStats) { + result = 31 * result + 1231; + } + if (maxCommitDelay != null) { + result = 31 * result + maxCommitDelay.hashCode(); + } + if (limit != null) { + result = 31 * result + limit.hashCode(); + } + if (prefetchChunks != null) { + result = 31 * result + prefetchChunks.hashCode(); + } + if (bufferRows != null) { + result = 31 * result + bufferRows.hashCode(); + } + if (pageSize != null) { + result = 31 * result + pageSize.hashCode(); + } + if (pageToken != null) { + result = 31 * result + pageToken.hashCode(); + } + if (filter != null) { + result = 31 * result + filter.hashCode(); + } + if (priority != null) { + result = 31 * result + priority.hashCode(); + } + if (clientContext != null) { + result = 31 * result + clientContext.hashCode(); + } + if (tag != null) { + result = 31 * result + tag.hashCode(); + } + if (etag != null) { + result = 31 * result + etag.hashCode(); + } + if (validateOnly != null) { + result = 31 * result + validateOnly.hashCode(); + } + if (withExcludeTxnFromChangeStreams != null) { + result = 31 * result + withExcludeTxnFromChangeStreams.hashCode(); + } + if (dataBoostEnabled != null) { + result = 31 * result + dataBoostEnabled.hashCode(); + } + if (directedReadOptions != null) { + result = 31 * result + directedReadOptions.hashCode(); + } + if (decodeMode != null) { + result = 31 * result + decodeMode.hashCode(); + } + if (orderBy != null) { + result = 31 * result + orderBy.hashCode(); + } + if (lastStatement != null) { + result = 31 * result + lastStatement.hashCode(); + } + if (lockHint != null) { + result = 31 * result + lockHint.hashCode(); + } + if (isolationLevel != null) { + result = 31 * result + isolationLevel.hashCode(); + } + if (reqId != null) { + result = 31 * result + reqId.hashCode(); + } + if (readLockMode != null) { + result = 31 * result + readLockMode.hashCode(); + } + return result; + } + + static Options fromReadOptions(ReadOption... options) { + Options readOptions = new Options(); + for (ReadOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(readOptions); + } + } + return readOptions; + } + + static Options fromQueryOptions(QueryOption... options) { + Options readOptions = new Options(); + for (QueryOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(readOptions); + } + } + return readOptions; + } + + static Options fromUpdateOptions(UpdateOption... options) { + Options updateOptions = new Options(); + for (UpdateOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(updateOptions); + } + } + return updateOptions; + } + + static Options fromTransactionOptions(TransactionOption... options) { + Options transactionOptions = new Options(); + for (TransactionOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(transactionOptions); + } + } + return transactionOptions; + } + + static Options fromListOptions(ListOption... options) { + Options listOptions = new Options(); + for (ListOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(listOptions); + } + } + return listOptions; + } + + static Options fromAdminApiOptions(AdminApiOption... options) { + Options adminApiOptions = new Options(); + for (AdminApiOption option : options) { + if (option instanceof InternalOption) { + ((InternalOption) option).appendToOptions(adminApiOptions); + } + } + return adminApiOptions; + } + + private abstract static class InternalOption { + abstract void appendToOptions(Options options); + } + + static class LimitOption extends InternalOption implements ReadOption { + private final long limit; + + LimitOption(long limit) { + this.limit = limit; + } + + @Override + void appendToOptions(Options options) { + options.limit = limit; + } + } + + static class OrderByOption extends InternalOption implements ReadOption { + private final RpcOrderBy orderBy; + + OrderByOption(RpcOrderBy orderBy) { + this.orderBy = orderBy; + } + + @Override + void appendToOptions(Options options) { + options.orderBy = orderBy; + } + } + + static class LockHintOption extends InternalOption implements ReadOption { + private final RpcLockHint lockHint; + + LockHintOption(RpcLockHint lockHint) { + this.lockHint = lockHint; + } + + @Override + void appendToOptions(Options options) { + options.lockHint = lockHint; + } + } + + static final class DataBoostQueryOption extends InternalOption implements ReadAndQueryOption { + + private final Boolean dataBoostEnabled; + + DataBoostQueryOption(Boolean dataBoostEnabled) { + this.dataBoostEnabled = dataBoostEnabled; + } + + @Override + void appendToOptions(Options options) { + options.dataBoostEnabled = dataBoostEnabled; + } + } + + static class PageSizeOption extends InternalOption implements ListOption { + private final int pageSize; + + PageSizeOption(int pageSize) { + this.pageSize = pageSize; + } + + @Override + void appendToOptions(Options options) { + options.pageSize = pageSize; + } + } + + static class PageTokenOption extends InternalOption implements ListOption { + private final String pageToken; + + PageTokenOption(String pageToken) { + this.pageToken = pageToken; + } + + @Override + void appendToOptions(Options options) { + options.pageToken = pageToken; + } + } + + static class FilterOption extends InternalOption implements ListOption { + private final String filter; + + FilterOption(String filter) { + this.filter = filter; + } + + @Override + void appendToOptions(Options options) { + options.filter = filter; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (!(o instanceof FilterOption)) return false; + return Objects.equals(filter, ((FilterOption) o).filter); + } + } + + static final class LastStatementUpdateOption extends InternalOption implements QueryUpdateOption { + + LastStatementUpdateOption() {} + + @Override + void appendToOptions(Options options) { + options.lastStatement = true; + } + + @Override + public int hashCode() { + return LastStatementUpdateOption.class.hashCode(); + } + + @Override + public boolean equals(Object o) { + return o instanceof LastStatementUpdateOption; + } + } + + static final class RequestIdOption extends InternalOption + implements ReadOption, TransactionOption, UpdateOption { + private final XGoogSpannerRequestId reqId; + + RequestIdOption(XGoogSpannerRequestId reqId) { + this.reqId = reqId; + } + + @Override + void appendToOptions(Options options) { + options.reqId = this.reqId; + } + + @Override + public int hashCode() { + return this.reqId.hashCode(); + } + + @Override + public boolean equals(Object o) { + // instanceof for a null object returns false. + if (!(o instanceof RequestIdOption)) { + return false; + } + RequestIdOption other = (RequestIdOption) o; + return Objects.equals(this.reqId, other.reqId); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Partition.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Partition.java new file mode 100644 index 000000000000..64c3236e5011 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Partition.java @@ -0,0 +1,197 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.protobuf.ByteString; +import java.io.Serializable; +import java.util.Objects; + +/** + * Defines the segments of data to be read in a batch read/query context. They can be serialized and + * processed across several different machines or processes. + */ +public class Partition implements Serializable { + private static final long serialVersionUID = 8067099123096783937L; + + private final ByteString partitionToken; + private final String table; + private final KeySet keys; + private final Iterable columns; + private final String index; + private final Options readOptions; + private final Statement statement; + private final Options queryOptions; + private final PartitionOptions partitionOptions; + + private Partition( + ByteString partitionToken, + PartitionOptions partitionOptions, + Statement statement, + Options queryOption) { + this.partitionToken = partitionToken; + this.partitionOptions = partitionOptions; + this.statement = statement; + this.queryOptions = queryOption; + this.table = null; + this.keys = null; + this.columns = null; + this.index = null; + this.readOptions = null; + } + + private Partition( + ByteString partitionToken, + PartitionOptions partitionOptions, + String table, + String index, + KeySet keys, + Iterable columns, + Options readOptions) { + this.partitionToken = partitionToken; + this.partitionOptions = partitionOptions; + this.table = table; + this.index = index; + this.keys = keys; + this.columns = columns; + this.readOptions = readOptions; + this.statement = null; + this.queryOptions = null; + } + + static Partition createReadPartition( + ByteString partitionToken, + PartitionOptions partitionOptions, + String table, + String index, + KeySet keys, + Iterable columns, + Options readOptions) { + return new Partition( + partitionToken, partitionOptions, table, index, keys, columns, readOptions); + } + + static Partition createQueryPartition( + ByteString partitionToken, + PartitionOptions partitionOptions, + Statement statement, + Options queryOption) { + return new Partition(partitionToken, partitionOptions, statement, queryOption); + } + + public ByteString getPartitionToken() { + return partitionToken; + } + + String getTable() { + return table; + } + + KeySet getKeys() { + return keys; + } + + Iterable getColumns() { + return columns; + } + + String getIndex() { + return index; + } + + Options getReadOptions() { + return readOptions; + } + + Statement getStatement() { + return statement; + } + + Options getQueryOptions() { + return queryOptions; + } + + PartitionOptions getPartitionOptions() { + return partitionOptions; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("partitionToken: ") + .append(partitionToken.toStringUtf8()) + .append(" partitionOptions: ") + .append(partitionOptions) + .append(' '); + if (table != null) { + b.append("table: ").append(table).append(' '); + } + if (index != null) { + b.append("index: ").append(index).append(' '); + } + if (keys != null) { + b.append("keys: ").append(keys).append(' '); + } + if (columns != null) { + b.append("columns: ").append(columns).append(' '); + } + if (readOptions != null) { + b.append("readOptions: ").append(readOptions).append(' '); + } + if (statement != null) { + b.append("statement: ").append(statement).append(' '); + } + if (queryOptions != null) { + b.append("queryOptions: ").append(queryOptions).append(' '); + } + return b.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Partition that = (Partition) o; + return Objects.equals(getPartitionToken(), that.getPartitionToken()) + && Objects.equals(getPartitionOptions(), that.getPartitionOptions()) + && Objects.equals(getTable(), that.getTable()) + && Objects.equals(getKeys(), that.getKeys()) + && Objects.equals(getColumns(), that.getColumns()) + && Objects.equals(getIndex(), that.getIndex()) + && Objects.equals(getReadOptions(), that.getReadOptions()) + && Objects.equals(getStatement(), that.getStatement()) + && Objects.equals(getQueryOptions(), that.getQueryOptions()); + } + + @Override + public int hashCode() { + return Objects.hash( + getPartitionToken(), + getPartitionOptions(), + getTable(), + getKeys(), + getColumns(), + getIndex(), + getReadOptions(), + getStatement(), + getQueryOptions()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionOptions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionOptions.java new file mode 100644 index 000000000000..aa70901e7e62 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionOptions.java @@ -0,0 +1,132 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Preconditions; +import java.io.Serializable; +import java.util.Objects; + +/** + * Defines the configuration for the number and size of partitions returned from {@link + * BatchReadOnlyTransaction#partitionRead}, {@link BatchReadOnlyTransaction#partitionReadUsingIndex} + * and {@link BatchReadOnlyTransaction#partitionQuery} + * + *

Note: these options may not be honored based on the other parameters in the request. + */ +public class PartitionOptions implements Serializable { + + private static final long serialVersionUID = 8067099123096783931L; + + private final long partitionSizeBytes; + private final long maxPartitions; + + private PartitionOptions(Builder builder) { + partitionSizeBytes = builder.partitionSizeBytes; + maxPartitions = builder.maxPartitions; + } + + /** Builder for {@code PartitionOptions} instance. */ + public static class Builder { + + private long partitionSizeBytes; + private long maxPartitions; + + private Builder() {} + + /** + * The desired data size for each partition generated. This is only a hint. The actual size of + * each partition may be smaller or larger than this size request. + * + * @param partitionSizeBytes configuration for size of the partitions returned + */ + public Builder setPartitionSizeBytes(long partitionSizeBytes) { + Preconditions.checkArgument( + partitionSizeBytes > 0, "Invalid partitionSizeBytes: " + partitionSizeBytes); + this.partitionSizeBytes = partitionSizeBytes; + return this; + } + + /* + * The desired maximum number of partitions to return. For example, this may be set to the + * number of workers available. This is only a hint and may provide different results based on + * the request. + * + * @param maxPartitions configuration for count of the partitions returned + */ + public Builder setMaxPartitions(long maxPartitions) { + Preconditions.checkArgument(maxPartitions > 0, "Invalid maxPartitions: " + maxPartitions); + this.maxPartitions = maxPartitions; + return this; + } + + public PartitionOptions build() { + return new PartitionOptions(this); + } + } + + public static PartitionOptions getDefaultInstance() { + return newBuilder().build(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** Returns the size of the partition in bytes. */ + public long getPartitionSizeBytes() { + return partitionSizeBytes; + } + + /** Returns the maximum number of partitions to be created. */ + public long getMaxPartitions() { + return maxPartitions; + } + + void appendToProto(com.google.spanner.v1.PartitionOptions.Builder proto) { + if (partitionSizeBytes > 0) { + proto.setPartitionSizeBytes(partitionSizeBytes); + } + if (maxPartitions > 0) { + proto.setMaxPartitions(maxPartitions); + } + } + + @Override + public String toString() { + return String.format( + "partitionSizeBytes: %s, maxPartitions: %s", getPartitionSizeBytes(), getMaxPartitions()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PartitionOptions that = (PartitionOptions) o; + return Objects.equals(getMaxPartitions(), that.getMaxPartitions()) + && Objects.equals(getPartitionSizeBytes(), that.getPartitionSizeBytes()); + } + + @Override + public int hashCode() { + return Objects.hash(getMaxPartitions(), getPartitionSizeBytes()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java new file mode 100644 index 000000000000..394b8bfbd9ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java @@ -0,0 +1,248 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.AbstractReadContext.getChannelHintOptions; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.DeadlineExceededException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Stopwatch; +import com.google.common.base.Ticker; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.Status; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +@InternalApi +public class PartitionedDmlTransaction implements SessionImpl.SessionTransaction { + + private static final Logger LOGGER = Logger.getLogger(PartitionedDmlTransaction.class.getName()); + + private final SessionImpl session; + private final SpannerRpc rpc; + private final Ticker ticker; + private final IsRetryableInternalError isRetryableInternalErrorPredicate; + private volatile boolean isValid = true; + private final Map channelHintOptions; + + PartitionedDmlTransaction(SessionImpl session, SpannerRpc rpc, Ticker ticker) { + this.session = session; + this.rpc = rpc; + this.ticker = ticker; + this.isRetryableInternalErrorPredicate = new IsRetryableInternalError(); + this.channelHintOptions = + getChannelHintOptions( + session.getOptions(), ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)); + } + + /** + * Executes the {@link Statement} using a partitioned dml transaction with automatic retry if the + * transaction was aborted. The update method uses the ExecuteStreamingSql RPC to execute the + * statement, and will retry the stream if an {@link UnavailableException} is thrown, using the + * last seen resume token if the server returns any. + */ + long executeStreamingPartitionedUpdate( + final Statement statement, final Duration timeout, final UpdateOption... updateOptions) { + checkState(isValid, "Partitioned DML has been invalidated by a new operation on the session"); + LOGGER.log(Level.FINER, "Starting PartitionedUpdate statement"); + + ByteString resumeToken = ByteString.EMPTY; + boolean foundStats = false; + long updateCount = 0L; + Stopwatch stopwatch = Stopwatch.createStarted(ticker); + Options options = Options.fromUpdateOptions(updateOptions); + + try { + ExecuteSqlRequest request = newTransactionRequestFrom(statement, options); + // The channel ID is set to zero here. It will be filled in later by SpannerRpc when it reads + // the channel hint from the options that are passed in. + XGoogSpannerRequestId requestId = this.session.getRequestIdCreator().nextRequestId(0); + + while (true) { + final Duration remainingTimeout = tryUpdateTimeout(timeout, stopwatch); + + try { + ServerStream stream = + rpc.executeStreamingPartitionedDml( + request, channelHintOptions, requestId, remainingTimeout); + + for (PartialResultSet rs : stream) { + if (rs.getResumeToken() != null && !rs.getResumeToken().isEmpty()) { + resumeToken = rs.getResumeToken(); + } + if (rs.hasStats()) { + foundStats = rs.getStats().hasRowCountLowerBound(); + updateCount += rs.getStats().getRowCountLowerBound(); + } + } + break; + } catch (UnavailableException e) { + LOGGER.log( + Level.FINER, "Retrying PartitionedDml transaction after UnavailableException", e); + request = resumeOrRestartRequest(resumeToken, statement, request, options); + if (resumeToken.isEmpty()) { + // Create a new xGoogSpannerRequestId if there is no resume token, as that means that + // the entire transaction will be retried. + requestId = session.getRequestIdCreator().nextRequestId(session.getChannel()); + } + } catch (InternalException e) { + if (!isRetryableInternalErrorPredicate.apply(e)) { + throw e; + } + + LOGGER.log( + Level.FINER, "Retrying PartitionedDml transaction after InternalException - EOS", e); + request = resumeOrRestartRequest(resumeToken, statement, request, options); + if (resumeToken.isEmpty()) { + // Create a new xGoogSpannerRequestId if there is no resume token, as that means that + // the entire transaction will be retried. + requestId = session.getRequestIdCreator().nextRequestId(session.getChannel()); + } + } catch (AbortedException e) { + LOGGER.log(Level.FINER, "Retrying PartitionedDml transaction after AbortedException", e); + resumeToken = ByteString.EMPTY; + foundStats = false; + updateCount = 0L; + request = newTransactionRequestFrom(statement, options); + // Create a new xGoogSpannerRequestId. + requestId = session.getRequestIdCreator().nextRequestId(session.getChannel()); + } + } + if (!foundStats) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Partitioned DML response missing stats possibly due to non-DML statement as input"); + } + LOGGER.log(Level.FINER, "Finished PartitionedUpdate statement"); + return updateCount; + } catch (Exception e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + @Override + public void invalidate() { + isValid = false; + } + + /** No-op method needed to implement SessionTransaction interface. */ + @Override + public void setSpan(ISpan span) {} + + /** No-op method needed to implement SessionTransaction interface. */ + @Override + public void close() {} + + private Duration tryUpdateTimeout(final Duration timeout, final Stopwatch stopwatch) { + final Duration remainingTimeout = + timeout.minus(stopwatch.elapsed(TimeUnit.MILLISECONDS), ChronoUnit.MILLIS); + if (remainingTimeout.isNegative() || remainingTimeout.isZero()) { + // The total deadline has been exceeded while retrying. + throw new DeadlineExceededException( + null, GrpcStatusCode.of(Status.Code.DEADLINE_EXCEEDED), false); + } + return remainingTimeout; + } + + private ExecuteSqlRequest resumeOrRestartRequest( + final ByteString resumeToken, + final Statement statement, + final ExecuteSqlRequest originalRequest, + final Options options) { + if (resumeToken.isEmpty()) { + return newTransactionRequestFrom(statement, options); + } else { + return ExecuteSqlRequest.newBuilder(originalRequest).setResumeToken(resumeToken).build(); + } + } + + @VisibleForTesting + ExecuteSqlRequest newTransactionRequestFrom(final Statement statement, final Options options) { + ByteString transactionId = initTransaction(options); + + final TransactionSelector transactionSelector = + TransactionSelector.newBuilder().setId(transactionId).build(); + final ExecuteSqlRequest.Builder builder = + ExecuteSqlRequest.newBuilder() + .setSql(statement.getSql()) + .setQueryMode(ExecuteSqlRequest.QueryMode.NORMAL) + .setSession(session.getName()) + .setTransaction(transactionSelector); + + setParameters(builder, statement.getParameters()); + + builder.setResumeToken(ByteString.EMPTY); + + if (options.hasPriority() || options.hasTag()) { + RequestOptions.Builder requestOptionsBuilder = RequestOptions.newBuilder(); + if (options.hasPriority()) { + requestOptionsBuilder.setPriority(options.priority()); + } + if (options.hasTag()) { + requestOptionsBuilder.setRequestTag(options.tag()); + } + builder.setRequestOptions(requestOptionsBuilder.build()); + } + return builder.build(); + } + + private ByteString initTransaction(final Options options) { + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session.getName()) + .setOptions( + TransactionOptions.newBuilder() + .setPartitionedDml(TransactionOptions.PartitionedDml.getDefaultInstance()) + .setExcludeTxnFromChangeStreams( + options.withExcludeTxnFromChangeStreams() == Boolean.TRUE)) + .build(); + Transaction tx = rpc.beginTransaction(request, channelHintOptions, true); + if (tx.getId().isEmpty()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, + "Failed to init transaction, missing transaction id\n" + session.getName()); + } + return tx.getId(); + } + + private void setParameters( + final ExecuteSqlRequest.Builder requestBuilder, + final Map statementParameters) { + AbstractReadContext.addParameters(requestBuilder, statementParameters); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ProtobufResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ProtobufResultSet.java new file mode 100644 index 000000000000..bbd8c41291f8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ProtobufResultSet.java @@ -0,0 +1,42 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.protobuf.Value; + +/** Interface for {@link ResultSet}s that can return a protobuf value. */ +@InternalApi +public interface ProtobufResultSet extends ResultSet { + + /** Returns true if the protobuf value for the given column is still available. */ + boolean canGetProtobufValue(int columnIndex); + + /** + * Returns the column value as a protobuf value. + * + *

This is an internal method not intended for external usage. + * + *

This method may only be called before the column value has been decoded to a plain Java + * object. This means that the {@link DecodeMode} that is used for the {@link ResultSet} must be + * one of {@link DecodeMode#LAZY_PER_ROW} and {@link DecodeMode#LAZY_PER_COL}, and that the + * corresponding {@link ResultSet#getValue(int)}, {@link ResultSet#getBoolean(int)}, ... method + * may not yet have been called for the column. + */ + @InternalApi + Value getProtobufValue(int columnIndex); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadContext.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadContext.java new file mode 100644 index 000000000000..4b5ba8620eed --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadContext.java @@ -0,0 +1,227 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import javax.annotation.Nullable; + +/** + * A concurrency context in which to run a read or SQL statement. All {@code ReadContext}s are + * implicitly bound to a {@link Session} and therefore a particular {@link Database}. + */ +public interface ReadContext extends AutoCloseable { + /** + * Used to specify the mode in which the query should be analyzed by {@link + * ReadContext#analyzeQuery(Statement,QueryAnalyzeMode)}. + */ + enum QueryAnalyzeMode { + /** Retrieves only the query plan information. No result data is returned. */ + PLAN, + /** Retrieves both query plan and query execution statistics along with the result data. */ + PROFILE + } + + /** + * Reads zero or more rows from a database. + * + *

Implementations may or may not block in the initial {@code read(...)} call; for those that + * do not, the remote call will be initiated immediately but blocking on the response is deferred + * to the first {@link ResultSet#next()} call. Regardless of blocking behavior, any {@link + * SpannerException} is deferred to the first or subsequent {@link ResultSet#next()} call. + * + * + *

{@code
+   * ReadContext readContext = dbClient.singleUse();
+   * ResultSet resultSet =
+   *     readContext.read(
+   *         "Albums",
+   *         // KeySet.all() can be used to read all rows in a table. KeySet exposes other
+   *         // methods to read only a subset of the table.
+   *         KeySet.all(),
+   *         Arrays.asList("SingerId", "AlbumId", "AlbumTitle"));
+   * }
+ * + * + * + * @param table the name of the table to read + * @param keys the keys and ranges of rows to read. Regardless of ordering in {@code keys}, rows + * are returned in their natural key order. + * @param columns the columns to read + * @param options the options to configure the read + */ + ResultSet read(String table, KeySet keys, Iterable columns, ReadOption... options); + + /** + * Same as {@link #read(String, KeySet, Iterable, ReadOption...)}, but is guaranteed to be + * non-blocking and will return the results as an {@link AsyncResultSet}. + */ + AsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options); + + /** + * Reads zero or more rows from a database using an index. + * + *

Implementations may or may not block in the initial {@code read(...)} call; for those that + * do not, the remote call will be initiated immediately but blocking on the response is deferred + * to the first {@link ResultSet#next()} call. Regardless of blocking behavior, any {@link + * SpannerException} is deferred to the first or subsequent {@link ResultSet#next()} call. + * + * + *

{@code
+   * ReadContext readContext = dbClient.singleUse();
+   * Struct row =
+   *     readContext.readRowUsingIndex("Albums", "AlbumsByAlbumId", Key.of(1, "Green"),
+   *         Arrays.asList("AlbumId", "AlbumTitle"));
+   * }
+ * + * + * + * @param table the name of the table to read + * @param index the name of the index on {@code table} to use + * @param keys the keys and ranges of index rows to read. Regardless of ordering in {@code keys}, + * rows are returned in the natural key order of the index. + * @param columns the columns to read + * @param options the options to configure the read + */ + ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options); + + /** + * Same as {@link #readUsingIndex(String, String, KeySet, Iterable, ReadOption...)}, but is + * guaranteed to be non-blocking and will return its results as an {@link AsyncResultSet}. + */ + AsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options); + + /** + * Reads a single row from a database, returning {@code null} if the row does not exist. + * + * + *
{@code
+   * ReadContext readContext = dbClient.singleUse();
+   * Struct row =
+   *     readContext.readRow("Albums", Key.of(2, 1), Arrays.asList("MarketingBudget"));
+   * }
+ * + * + * + * @param table the name of the table to read + * @param key the row to read + * @param columns the columns to return + */ + @Nullable + Struct readRow(String table, Key key, Iterable columns); + + /** Same as {@link #readRow(String, Key, Iterable)}, but is guaranteed to be non-blocking. */ + ApiFuture readRowAsync(String table, Key key, Iterable columns); + + /** + * Reads a single row from a database using an index, returning {@code null} if the row does not + * exist. + * + * + *
{@code
+   * ReadContext readContext = dbClient.singleUse();
+   * Struct row =
+   *     readContext.readRowUsingIndex("Albums", "AlbumsByAlbumId", Key.of(1, "Green"),
+   *         Arrays.asList("AlbumId", "AlbumTitle"));
+   * }
+ * + * + * + * @param table the name of the table to read + * @param index the name of the index on {@code table} to use + * @param key the index row to read + * @param columns the columns to return + */ + @Nullable + Struct readRowUsingIndex(String table, String index, Key key, Iterable columns); + + /** + * Same as {@link #readRowUsingIndex(String, String, Key, Iterable)}, but is guaranteed to be + * non-blocking. + */ + ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns); + + /** + * Executes a query against the database. Can also execute a DML statement with returning clause + * in a read/write transaction. + * + *

Implementations may or may not block in the initial {@code executeQuery(...)} call; for + * those that do not, the remote call will be initiated immediately but blocking on the response + * is deferred to the first {@link ResultSet#next()} call. Regardless of blocking behavior, any + * {@link SpannerException} is deferred to the first or subsequent {@link ResultSet#next()} call. + * + * + *

{@code
+   * // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to
+   * // null.
+   * ReadContext readContext = dbClient.singleUse();
+   * ResultSet resultSet =
+   *     readContext.executeQuery(
+   *         Statement.of(
+   *             "SELECT SingerId, AlbumId, MarketingBudget, LastUpdateTime FROM Albums"));
+   * }
+ * + * + * + * @param statement the query statement to execute + * @param options the options to configure the query + */ + ResultSet executeQuery(Statement statement, QueryOption... options); + + /** + * Same as {@link #executeQuery(Statement, QueryOption...)}, but is guaranteed to be non-blocking + * and returns its results as an {@link AsyncResultSet}. + */ + AsyncResultSet executeQueryAsync(Statement statement, QueryOption... options); + + /** + * Analyzes a query and returns query plan and/or query execution statistics information. + * + *

The query plan and query statistics information is contained in {@link + * com.google.spanner.v1.ResultSetStats} that can be accessed by calling {@link + * ResultSet#getStats()} on the returned {@code ResultSet}. + * + * + *

{@code
+   * ReadContext rc = dbClient.singleUse();
+   * ResultSet resultSet =
+   *     rc.analyzeQuery(
+   *         Statement.of("SELECT SingerId, AlbumId, MarketingBudget FROM Albums"),
+   *         ReadContext.QueryAnalyzeMode.PROFILE);
+   * while (resultSet.next()) {
+   *   // Discard the results. We're only processing because getStats() below requires it.
+   *   resultSet.getCurrentRowAsStruct();
+   * }
+   * ResultSetStats stats = resultSet.getStats();
+   * }
+ * + * + * + * @param statement the query statement to execute + * @param queryMode the mode in which to execute the query + */ + ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode); + + /** Closes this read context and frees up the underlying resources. */ + @Override + void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadOnlyTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadOnlyTransaction.java new file mode 100644 index 000000000000..3441f9492095 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReadOnlyTransaction.java @@ -0,0 +1,51 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; + +/** + * A transaction type that provides guaranteed consistency across several reads, but does not allow + * writes. Snapshot read-only transactions can be configured to read at timestamps in the past. + * Snapshot read-only transactions do not need to be committed. + * + *

Snapshot read-only transactions provide a simpler method than locking read-write transactions + * for doing several consistent reads. However, this type of transaction does not support writes. + * + *

Snapshot read-only transactions do not take locks. Instead, they work by choosing a Cloud + * Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, + * they do not block concurrent read-write transactions. + * + *

Unlike locking read-write transactions, snapshot read-only transactions never abort. They can + * fail if the chosen read timestamp is garbage collected; however, the default garbage collection + * policy is generous enough that most applications do not need to worry about this in practice. See + * the class documentation of {@link TimestampBound} for more details. + * + *

To execute a snapshot transaction, specify a {@link TimestampBound}, which tells Cloud Spanner + * how to choose a read timestamp. + * + * @see Session#singleUseReadOnlyTransaction(TimestampBound) + * @see Session#readOnlyTransaction(TimestampBound) + */ +public interface ReadOnlyTransaction extends ReadContext { + /** + * Returns the timestamp chosen to perform reads and queries in this transaction. The value can + * only be read after some read or query has either returned some data or completed without + * returning any data. + */ + Timestamp getReadTimestamp(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReplicaInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReplicaInfo.java new file mode 100644 index 000000000000..4d3887c0adc2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ReplicaInfo.java @@ -0,0 +1,194 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.annotations.VisibleForTesting; +import java.util.Objects; + +/** Represents a Cloud Spanner replica information. */ +public class ReplicaInfo { + + abstract static class Builder { + abstract Builder setLocation(String location); + + abstract Builder setType(ReplicaType type); + + abstract Builder setDefaultLeaderLocation(boolean defaultLeaderLocation); + + abstract Builder setProto(com.google.spanner.admin.instance.v1.ReplicaInfo proto); + + public abstract ReplicaInfo build(); + } + + public static class BuilderImpl extends Builder { + + private String location; + private ReplicaType type; + private boolean defaultLeaderLocation; + private com.google.spanner.admin.instance.v1.ReplicaInfo proto; + + /** + * The location of the serving resources. This can be one of the values as specified in + * https://cloud.google.com/spanner/docs/instances#available-configurations-regional. + */ + @Override + Builder setLocation(String location) { + this.location = location; + return this; + } + + /** The type of the replica, as per {@link ReplicaType}. */ + @Override + Builder setType(ReplicaType type) { + this.type = type; + return this; + } + + /** + * If true, this location is designated as the default leader location where leader replicas are + * placed. + */ + @Override + Builder setDefaultLeaderLocation(boolean defaultLeaderLocation) { + this.defaultLeaderLocation = defaultLeaderLocation; + return this; + } + + @Override + Builder setProto(com.google.spanner.admin.instance.v1.ReplicaInfo proto) { + this.proto = proto; + return this; + } + + @Override + public ReplicaInfo build() { + return new ReplicaInfo(this); + } + } + + public static ReplicaInfo fromProto(com.google.spanner.admin.instance.v1.ReplicaInfo proto) { + return newBuilder() + .setLocation(proto.getLocation()) + .setType(ReplicaType.fromProto(proto.getType())) + .setDefaultLeaderLocation(proto.getDefaultLeaderLocation()) + .setProto(proto) + .build(); + } + + static Builder newBuilder() { + return new BuilderImpl(); + } + + private final String location; + private final ReplicaType type; + private final boolean defaultLeaderLocation; + private final com.google.spanner.admin.instance.v1.ReplicaInfo proto; + + @VisibleForTesting + ReplicaInfo( + String location, + ReplicaType type, + boolean defaultLeaderLocation, + com.google.spanner.admin.instance.v1.ReplicaInfo proto) { + this.location = location; + this.type = type; + this.defaultLeaderLocation = defaultLeaderLocation; + this.proto = proto; + } + + ReplicaInfo(BuilderImpl builder) { + this.location = builder.location; + this.type = builder.type; + this.defaultLeaderLocation = builder.defaultLeaderLocation; + this.proto = builder.proto; + } + + public String getLocation() { + return location; + } + + public ReplicaType getType() { + return type; + } + + public boolean isDefaultLeaderLocation() { + return defaultLeaderLocation; + } + + public com.google.spanner.admin.instance.v1.ReplicaInfo getProto() { + return proto; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReplicaInfo)) { + return false; + } + ReplicaInfo that = (ReplicaInfo) o; + return defaultLeaderLocation == that.defaultLeaderLocation + && Objects.equals(location, that.location) + && type == that.type; + } + + @Override + public int hashCode() { + return Objects.hash(location, type, defaultLeaderLocation); + } + + @Override + public String toString() { + return "ReplicaInfo{" + + "location='" + + location + + '\'' + + ", type=" + + type + + ", defaultLeaderLocation=" + + defaultLeaderLocation + + '}'; + } + + /** + * Indicates the type of the replica. See the replica types documentation at + * https://cloud.google.com/spanner/docs/replication#replica_types for more details. + */ + public enum ReplicaType { + TYPE_UNSPECIFIED, + READ_WRITE, + READ_ONLY, + WITNESS; + + public static ReplicaType fromProto( + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType proto) { + switch (proto) { + case TYPE_UNSPECIFIED: + return ReplicaType.TYPE_UNSPECIFIED; + case READ_WRITE: + return ReplicaType.READ_WRITE; + case READ_ONLY: + return ReplicaType.READ_ONLY; + case WITNESS: + return ReplicaType.WITNESS; + default: + throw new IllegalArgumentException("Unrecognized replica type " + proto); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Restore.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Restore.java new file mode 100644 index 000000000000..d6a9c28850bb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Restore.java @@ -0,0 +1,109 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.encryption.RestoreEncryptionConfig; +import com.google.common.annotations.VisibleForTesting; +import java.util.Objects; + +/** Represents a restore operation of a Cloud Spanner backup. */ +public class Restore { + + public static class Builder { + + private final BackupId source; + private final DatabaseId destination; + private RestoreEncryptionConfig encryptionConfig; + + public Builder(BackupId source, DatabaseId destination) { + this.source = source; + this.destination = destination; + } + + /** + * Optional for restoring a backup. + * + *

The encryption configuration to be used for the backup. The possible configurations are + * {@link com.google.cloud.spanner.encryption.CustomerManagedEncryption}, {@link + * com.google.cloud.spanner.encryption.GoogleDefaultEncryption} and {@link + * com.google.cloud.spanner.encryption.UseBackupEncryption}. + * + *

If no encryption config is given the database will be restored with the same encryption as + * set by the backup ({@link com.google.cloud.spanner.encryption.UseBackupEncryption}). + */ + public Builder setEncryptionConfig(RestoreEncryptionConfig encryptionConfig) { + this.encryptionConfig = encryptionConfig; + return this; + } + + public Restore build() { + return new Restore(this); + } + } + + private final BackupId source; + private final DatabaseId destination; + private final RestoreEncryptionConfig encryptionConfig; + + Restore(Builder builder) { + this(builder.source, builder.destination, builder.encryptionConfig); + } + + @VisibleForTesting + Restore(BackupId source, DatabaseId destination, RestoreEncryptionConfig encryptionConfig) { + this.source = source; + this.destination = destination; + this.encryptionConfig = encryptionConfig; + } + + public BackupId getSource() { + return source; + } + + public DatabaseId getDestination() { + return destination; + } + + public RestoreEncryptionConfig getEncryptionConfig() { + return encryptionConfig; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Restore restore = (Restore) o; + return Objects.equals(source, restore.source) + && Objects.equals(destination, restore.destination) + && Objects.equals(encryptionConfig, restore.encryptionConfig); + } + + @Override + public int hashCode() { + return Objects.hash(source, destination, encryptionConfig); + } + + @Override + public String toString() { + return String.format( + "Restore[%s, %s, %s]", source.getName(), destination.getName(), encryptionConfig); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RestoreInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RestoreInfo.java new file mode 100644 index 000000000000..f00504e698d9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RestoreInfo.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import javax.annotation.Nullable; + +/** Represents the restore information of a Cloud Spanner database. */ +public class RestoreInfo { + private static class Builder { + private final BackupId backup; + private RestoreSourceType sourceType; + private Timestamp backupCreateTime; + private DatabaseId sourceDatabase; + private com.google.spanner.admin.database.v1.RestoreInfo proto; + + private Builder(BackupId backup) { + this.backup = backup; + } + + private Builder setSourceType(RestoreSourceType sourceType) { + this.sourceType = sourceType; + return this; + } + + private Builder setBackupCreateTime(Timestamp backupCreateTime) { + this.backupCreateTime = backupCreateTime; + return this; + } + + private Builder setSourceDatabase(DatabaseId sourceDatabase) { + this.sourceDatabase = sourceDatabase; + return this; + } + + private Builder setProto(com.google.spanner.admin.database.v1.RestoreInfo proto) { + this.proto = proto; + return this; + } + + private RestoreInfo build() { + return new RestoreInfo(this); + } + } + + /** Source of the restore information. */ + public enum RestoreSourceType { + // Not specified. + UNSPECIFIED, + // The database was restored from a Backup. + BACKUP + } + + private final BackupId backup; + private final RestoreSourceType sourceType; + private final Timestamp backupCreateTime; + private final DatabaseId sourceDatabase; + private final com.google.spanner.admin.database.v1.RestoreInfo proto; + + private RestoreInfo(Builder builder) { + this.backup = builder.backup; + this.sourceType = builder.sourceType; + this.backupCreateTime = builder.backupCreateTime; + this.sourceDatabase = builder.sourceDatabase; + this.proto = builder.proto; + } + + /** The backup source of the restored database. The backup may no longer exist. */ + public BackupId getBackup() { + return backup; + } + + /** The source type of the restore. */ + public RestoreSourceType getSourceType() { + return sourceType; + } + + /** The create time of the backup for the restore. */ + public Timestamp getBackupCreateTime() { + return backupCreateTime; + } + + /** The source database that was used to create the backup. The database may no longer exist. */ + public DatabaseId getSourceDatabase() { + return sourceDatabase; + } + + /** Returns the raw proto instance that was used to construct this {@link RestoreInfo}. */ + public @Nullable com.google.spanner.admin.database.v1.RestoreInfo getProto() { + return proto; + } + + /** + * Returns a {@link RestoreInfo} instance from the given proto, or null if the given + * proto is the default proto instance (i.e. there is no restore info). + */ + static RestoreInfo fromProtoOrNullIfDefaultInstance( + com.google.spanner.admin.database.v1.RestoreInfo proto) { + return proto.equals(com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance()) + ? null + : new Builder(BackupId.of(proto.getBackupInfo().getBackup())) + .setSourceType(fromProtoSourceType(proto.getSourceType())) + .setBackupCreateTime(Timestamp.fromProto(proto.getBackupInfo().getCreateTime())) + .setSourceDatabase(DatabaseId.of(proto.getBackupInfo().getSourceDatabase())) + .setProto(proto) + .build(); + } + + static RestoreSourceType fromProtoSourceType( + com.google.spanner.admin.database.v1.RestoreSourceType protoSourceType) { + switch (protoSourceType) { + case BACKUP: + return RestoreSourceType.BACKUP; + case TYPE_UNSPECIFIED: + return RestoreSourceType.UNSPECIFIED; + default: + throw new IllegalArgumentException("Unrecognized source type " + protoSourceType); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSet.java new file mode 100644 index 000000000000..cd6fa10b9962 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSet.java @@ -0,0 +1,85 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.Options.QueryOption; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import javax.annotation.Nullable; + +/** + * Provides access to the data returned by a Cloud Spanner read or query. {@code ResultSet} allows a + * single row to be inspected at a time through the methods from the {@link StructReader} interface, + * in the order that the rows were returned by the read or query. The result set can be positioned + * over the next row, if one exists, by calling {@link #next()}; this method returns {@code false} + * when all rows returned have been seen. The result set is initially positioned before the first + * row, so a call to {@code next()} is required before the first row can be inspected. + * + *

{@code ResultSet} implementations may buffer data ahead and/or maintain a persistent streaming + * connection to the remote service until all data has been returned or the resultSet closed. As + * such, it is important that all uses of {@code ResultSet} either fully consume it (that is, call + * {@code next()} until {@code false} is returned or it throws an exception) or explicitly call + * {@link #close()}: failure to do so may result in wasted work or leaked resources. + * + *

{@code ResultSet} implementations are not required to be thread-safe: if methods are called + * from multiple threads, external synchronization must be used. + */ +public interface ResultSet extends AutoCloseable, StructReader { + /** + * Advances the result set to the next row, returning false if no such row exists. This method may + * block. + */ + boolean next() throws SpannerException; + + /** + * Creates an immutable version of the row that the result set is positioned over. This may + * involve copying internal data structures, and so converting all rows to {@code Struct} objects + * is generally more expensive than processing the {@code ResultSet} directly. + */ + Struct getCurrentRowAsStruct(); + + /** + * Explicitly close the result set, releasing any associated resources. This must always be called + * when disposing of a {@code ResultSet} before {@link #next()} has returned {@code false} or + * raised an exception. Calling {@code close()} is also allowed if the result set has been fully + * consumed, so a recommended practice is to unconditionally close the result set once it is done + * with, typically using a try-with-resources construct. + */ + @Override + void close(); + + /** + * Returns the {@link ResultSetStats} for the query only if the query was executed in either the + * {@code PLAN} or the {@code PROFILE} mode via the {@link ReadContext#analyzeQuery(Statement, + * com.google.cloud.spanner.ReadContext.QueryAnalyzeMode)} method or for DML statements in {@link + * ReadContext#executeQuery(Statement, QueryOption...)}. Attempts to call this method on a {@code + * ResultSet} not obtained from {@code analyzeQuery} or {@code executeQuery} will return a {@code + * null} {@code ResultSetStats}. This method must be called after {@link #next()} has + * returned @{code false}. Calling it before that will result in {@code null} {@code + * ResultSetStats} too. + */ + @Nullable + ResultSetStats getStats(); + + /** + * Returns the {@link ResultSetMetadata} for this {@link ResultSet}. This is method may only be + * called after calling {@link ResultSet#next()} at least once. + */ + default ResultSetMetadata getMetadata() { + throw new UnsupportedOperationException("Method should be overridden"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSets.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSets.java new file mode 100644 index 000000000000..92a12286ae27 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResultSets.java @@ -0,0 +1,584 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Type.StructField; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.math.BigDecimal; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** Utility methods for working with {@link com.google.cloud.spanner.ResultSet}. */ +public final class ResultSets { + + private ResultSets() {} + + /** + * Creates a pre-populated {@link com.google.cloud.spanner.ResultSet} + * + * @param type row type of the rows in the returned {@link com.google.cloud.spanner.ResultSet} + * @param rows the rows in the returned {@link com.google.cloud.spanner.ResultSet}. + */ + public static ResultSet forRows(Type type, Iterable rows) { + return new PrePopulatedResultSet(type, rows); + } + + /** Converts the given {@link ResultSet} to an {@link AsyncResultSet}. */ + public static AsyncResultSet toAsyncResultSet(ResultSet delegate) { + return new AsyncResultSetImpl( + InstantiatingExecutorProvider.newBuilder() + .setExecutorThreadCount(1) + .setThreadFactory( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("test-async-resultset-%d") + .build()) + .build(), + delegate, + 100); + } + + /** + * Converts the given {@link ResultSet} to an {@link AsyncResultSet} using the given {@link + * ExecutorProvider}. + */ + public static AsyncResultSet toAsyncResultSet( + ResultSet delegate, ExecutorProvider executorProvider, QueryOption... options) { + Options readOptions = Options.fromQueryOptions(options); + final int bufferRows = + readOptions.hasBufferRows() + ? readOptions.bufferRows() + : AsyncResultSetImpl.DEFAULT_BUFFER_SIZE; + return new AsyncResultSetImpl(executorProvider, delegate, bufferRows); + } + + /** + * Converts the {@link ResultSet} that will be returned by the given {@link ApiFuture} to an + * {@link AsyncResultSet} using the given {@link ExecutorProvider}. + */ + public static AsyncResultSet toAsyncResultSet( + ApiFuture delegate, ExecutorProvider executorProvider, QueryOption... options) { + Options readOptions = Options.fromQueryOptions(options); + final int bufferRows = + readOptions.hasBufferRows() + ? readOptions.bufferRows() + : AsyncResultSetImpl.DEFAULT_BUFFER_SIZE; + return new AsyncResultSetImpl( + executorProvider, new FutureResultSetSupplier(delegate), bufferRows); + } + + private static class FutureResultSetSupplier implements Supplier { + final ApiFuture delegate; + + FutureResultSetSupplier(ApiFuture delegate) { + this.delegate = Preconditions.checkNotNull(delegate); + } + + @Override + public ResultSet get() { + return SpannerApiFutures.get(delegate); + } + } + + private static class PrePopulatedResultSet implements ProtobufResultSet { + private final List rows; + private final Type type; + private int index = -1; + private boolean closed = false; + + PrePopulatedResultSet(Type type, Iterable rows) { + Preconditions.checkNotNull(rows); + Preconditions.checkNotNull(type); + Preconditions.checkArgument(type.getCode() == Type.Code.STRUCT); + for (StructField field : type.getStructFields()) { + if (field.getType().getCode() == Code.STRUCT) { + throw new UnsupportedOperationException( + "STRUCT-typed columns are not supported inside ResultSets."); + } + } + this.type = type; + this.rows = rows instanceof List ? (List) rows : Lists.newArrayList(rows); + for (Struct row : rows) { + Preconditions.checkArgument(row.getType().equals(type)); + } + } + + @Override + public boolean next() throws SpannerException { + return ++index < rows.size(); + } + + @Override + public boolean canGetProtobufValue(int columnIndex) { + return !closed && index >= 0 && index < rows.size(); + } + + @Override + public com.google.protobuf.Value getProtobufValue(int columnIndex) { + Preconditions.checkState(!closed, "ResultSet is closed"); + Preconditions.checkState(index >= 0, "Must be preceded by a next() call"); + Preconditions.checkElementIndex(index, rows.size(), "All rows have been yielded"); + return getValue(columnIndex).toProto(); + } + + @Override + public Struct getCurrentRowAsStruct() { + Preconditions.checkState(!closed, "ResultSet is closed"); + Preconditions.checkState(index >= 0, "Must be preceded by a next() call"); + Preconditions.checkElementIndex(index, rows.size(), "All rows have been yielded"); + return rows.get(index); + } + + @Override + public void close() { + closed = true; + } + + @Override + public Type getType() { + Preconditions.checkState(index >= 0, "Must be preceded by a next() call"); + return type; + } + + @Override + public ResultSetStats getStats() { + throw new UnsupportedOperationException( + "ResultSetStats are available only for results returned from analyzeQuery() calls"); + } + + @Override + public ResultSetMetadata getMetadata() { + throw new UnsupportedOperationException( + "ResultSetMetadata are available only for results that were returned from Cloud Spanner"); + } + + @Override + public int getColumnCount() { + return getType().getStructFields().size(); + } + + @Override + public int getColumnIndex(String columnName) { + return getType().getFieldIndex(columnName); + } + + @Override + public Type getColumnType(int columnIndex) { + return getType().getStructFields().get(columnIndex).getType(); + } + + @Override + public Type getColumnType(String columnName) { + for (Type.StructField field : getType().getStructFields()) { + if (field.getName().equals(columnName)) { + return field.getType(); + } + } + return null; + } + + @Override + public boolean isNull(int columnIndex) { + return getCurrentRowAsStruct().isNull(columnIndex); + } + + @Override + public boolean isNull(String columnName) { + return getCurrentRowAsStruct().isNull(columnName); + } + + @Override + public boolean getBoolean(int columnIndex) { + return getCurrentRowAsStruct().getBoolean(columnIndex); + } + + @Override + public boolean getBoolean(String columnName) { + return getCurrentRowAsStruct().getBoolean(columnName); + } + + @Override + public long getLong(int columnIndex) { + return getCurrentRowAsStruct().getLong(columnIndex); + } + + @Override + public long getLong(String columnName) { + return getCurrentRowAsStruct().getLong(columnName); + } + + @Override + public float getFloat(int columnIndex) { + return getCurrentRowAsStruct().getFloat(columnIndex); + } + + @Override + public float getFloat(String columnName) { + return getCurrentRowAsStruct().getFloat(columnName); + } + + @Override + public double getDouble(int columnIndex) { + return getCurrentRowAsStruct().getDouble(columnIndex); + } + + @Override + public double getDouble(String columnName) { + return getCurrentRowAsStruct().getDouble(columnName); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) { + return getCurrentRowAsStruct().getBigDecimal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnName) { + return getCurrentRowAsStruct().getBigDecimal(columnName); + } + + @Override + public String getString(int columnIndex) { + return getCurrentRowAsStruct().getString(columnIndex); + } + + @Override + public String getString(String columnName) { + return getCurrentRowAsStruct().getString(columnName); + } + + @Override + public String getJson(int columnIndex) { + return getCurrentRowAsStruct().getJson(columnIndex); + } + + @Override + public String getJson(String columnName) { + return getCurrentRowAsStruct().getJson(columnName); + } + + @Override + public String getPgJsonb(int columnIndex) { + return getCurrentRowAsStruct().getPgJsonb(columnIndex); + } + + @Override + public String getPgJsonb(String columnName) { + return getCurrentRowAsStruct().getPgJsonb(columnName); + } + + @Override + public ByteArray getBytes(int columnIndex) { + return getCurrentRowAsStruct().getBytes(columnIndex); + } + + @Override + public ByteArray getBytes(String columnName) { + return getCurrentRowAsStruct().getBytes(columnName); + } + + @Override + public Timestamp getTimestamp(int columnIndex) { + return getCurrentRowAsStruct().getTimestamp(columnIndex); + } + + @Override + public Timestamp getTimestamp(String columnName) { + return getCurrentRowAsStruct().getTimestamp(columnName); + } + + @Override + public Date getDate(int columnIndex) { + return getCurrentRowAsStruct().getDate(columnIndex); + } + + @Override + public Date getDate(String columnName) { + return getCurrentRowAsStruct().getDate(columnName); + } + + @Override + public UUID getUuid(int columnIndex) { + return getCurrentRowAsStruct().getUuid(columnIndex); + } + + @Override + public UUID getUuid(String columnName) { + return getCurrentRowAsStruct().getUuid(columnName); + } + + @Override + public Interval getInterval(int columnIndex) { + return getCurrentRowAsStruct().getInterval(columnIndex); + } + + @Override + public Interval getInterval(String columnName) { + return getCurrentRowAsStruct().getInterval(columnName); + } + + @Override + public T getProtoMessage(int columnIndex, T message) { + return getCurrentRowAsStruct().getProtoMessage(columnIndex, message); + } + + @Override + public T getProtoMessage(String columnName, T message) { + return getCurrentRowAsStruct().getProtoMessage(columnName, message); + } + + @Override + public T getProtoEnum( + int columnIndex, Function method) { + return getCurrentRowAsStruct().getProtoEnum(columnIndex, method); + } + + @Override + public T getProtoEnum( + String columnName, Function method) { + return getCurrentRowAsStruct().getProtoEnum(columnName, method); + } + + @Override + public Value getValue(int columnIndex) { + return getCurrentRowAsStruct().getValue(columnIndex); + } + + @Override + public Value getValue(String columnName) { + return getCurrentRowAsStruct().getValue(columnName); + } + + @Override + public boolean[] getBooleanArray(int columnIndex) { + return getCurrentRowAsStruct().getBooleanArray(columnIndex); + } + + @Override + public boolean[] getBooleanArray(String columnName) { + return getCurrentRowAsStruct().getBooleanArray(columnName); + } + + @Override + public List getBooleanList(int columnIndex) { + return getCurrentRowAsStruct().getBooleanList(columnIndex); + } + + @Override + public List getBooleanList(String columnName) { + return getCurrentRowAsStruct().getBooleanList(columnName); + } + + @Override + public long[] getLongArray(int columnIndex) { + return getCurrentRowAsStruct().getLongArray(columnIndex); + } + + @Override + public long[] getLongArray(String columnName) { + return getCurrentRowAsStruct().getLongArray(columnName); + } + + @Override + public List getLongList(int columnIndex) { + return getCurrentRowAsStruct().getLongList(columnIndex); + } + + @Override + public List getLongList(String columnName) { + return getCurrentRowAsStruct().getLongList(columnName); + } + + @Override + public float[] getFloatArray(int columnIndex) { + return getCurrentRowAsStruct().getFloatArray(columnIndex); + } + + @Override + public float[] getFloatArray(String columnName) { + return getCurrentRowAsStruct().getFloatArray(columnName); + } + + @Override + public List getFloatList(int columnIndex) { + return getCurrentRowAsStruct().getFloatList(columnIndex); + } + + @Override + public List getFloatList(String columnName) { + return getCurrentRowAsStruct().getFloatList(columnName); + } + + @Override + public double[] getDoubleArray(int columnIndex) { + return getCurrentRowAsStruct().getDoubleArray(columnIndex); + } + + @Override + public double[] getDoubleArray(String columnName) { + return getCurrentRowAsStruct().getDoubleArray(columnName); + } + + @Override + public List getDoubleList(int columnIndex) { + return getCurrentRowAsStruct().getDoubleList(columnIndex); + } + + @Override + public List getDoubleList(String columnName) { + return getCurrentRowAsStruct().getDoubleList(columnName); + } + + @Override + public List getBigDecimalList(int columnIndex) { + return getCurrentRowAsStruct().getBigDecimalList(columnIndex); + } + + @Override + public List getBigDecimalList(String columnName) { + return getCurrentRowAsStruct().getBigDecimalList(columnName); + } + + @Override + public List getStringList(int columnIndex) { + return getCurrentRowAsStruct().getStringList(columnIndex); + } + + @Override + public List getStringList(String columnName) { + return getCurrentRowAsStruct().getStringList(columnName); + } + + @Override + public List getJsonList(int columnIndex) { + return getCurrentRowAsStruct().getJsonList(columnIndex); + } + + @Override + public List getJsonList(String columnName) { + return getCurrentRowAsStruct().getJsonList(columnName); + } + + @Override + public List getPgJsonbList(int columnIndex) { + return getCurrentRowAsStruct().getPgJsonbList(columnIndex); + } + + @Override + public List getPgJsonbList(String columnName) { + return getCurrentRowAsStruct().getPgJsonbList(columnName); + } + + @Override + public List getBytesList(int columnIndex) { + return getCurrentRowAsStruct().getBytesList(columnIndex); + } + + @Override + public List getBytesList(String columnName) { + return getCurrentRowAsStruct().getBytesList(columnName); + } + + @Override + public List getTimestampList(int columnIndex) { + return getCurrentRowAsStruct().getTimestampList(columnIndex); + } + + @Override + public List getTimestampList(String columnName) { + return getCurrentRowAsStruct().getTimestampList(columnName); + } + + @Override + public List getDateList(int columnIndex) { + return getCurrentRowAsStruct().getDateList(columnIndex); + } + + @Override + public List getDateList(String columnName) { + return getCurrentRowAsStruct().getDateList(columnName); + } + + @Override + public List getUuidList(int columnIndex) { + return getCurrentRowAsStruct().getUuidList(columnIndex); + } + + @Override + public List getUuidList(String columnName) { + return getCurrentRowAsStruct().getUuidList(columnName); + } + + @Override + public List getIntervalList(int columnIndex) { + return getCurrentRowAsStruct().getIntervalList(columnIndex); + } + + @Override + public List getIntervalList(String columnName) { + return getCurrentRowAsStruct().getIntervalList(columnName); + } + + @Override + public List getProtoMessageList(int columnIndex, T message) { + return getCurrentRowAsStruct().getProtoMessageList(columnIndex, message); + } + + @Override + public List getProtoMessageList(String columnName, T message) { + return getCurrentRowAsStruct().getProtoMessageList(columnName, message); + } + + @Override + public List getProtoEnumList( + int columnIndex, Function method) { + return getCurrentRowAsStruct().getProtoEnumList(columnIndex, method); + } + + @Override + public List getProtoEnumList( + String columnName, Function method) { + return getCurrentRowAsStruct().getProtoEnumList(columnName, method); + } + + @Override + public List getStructList(int columnIndex) { + return getCurrentRowAsStruct().getStructList(columnIndex); + } + + @Override + public List getStructList(String columnName) { + return getCurrentRowAsStruct().getStructList(columnName); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java new file mode 100644 index 000000000000..aac7f63c8614 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java @@ -0,0 +1,354 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.client.util.BackOff; +import com.google.api.client.util.ExponentialBackOff; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.AbstractIterator; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.PartialResultSet; +import io.grpc.Context; +import io.opentelemetry.api.common.Attributes; +import java.io.IOException; +import java.util.LinkedList; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps + * track of the most recent resume token seen, and will buffer partial result set chunks that do not + * have a resume token until one is seen or buffer space is exceeded, which reduces the chance of + * yielding data to the caller that cannot be resumed. + */ +@VisibleForTesting +abstract class ResumableStreamIterator extends AbstractIterator + implements CloseableIterator { + private static final RetrySettings DEFAULT_STREAMING_RETRY_SETTINGS = + SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings(); + private final ErrorHandler errorHandler; + private AsyncResultSet.StreamMessageListener streamMessageListener; + private final RetrySettings streamingRetrySettings; + private final Set retryableCodes; + private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName()); + private BackOff backOff; + private final LinkedList buffer = new LinkedList<>(); + private final int maxBufferSize; + private final ISpan span; + private final TraceWrapper tracer; + private CloseableIterator stream; + private int attempts; + private ByteString resumeToken; + private boolean finished; + private final XGoogSpannerRequestId requestId; + + /** + * Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have + * reached the maximum buffer size without seeing a restart token; in this case, we will drain the + * buffer and remain in this state until we see a new restart token. + */ + private boolean safeToRetry = true; + + protected ResumableStreamIterator( + int maxBufferSize, + String streamName, + ISpan parent, + TraceWrapper tracer, + ErrorHandler errorHandler, + RetrySettings streamingRetrySettings, + Set retryableCodes, + XGoogSpannerRequestId.RequestIdCreator xGoogRequestIdCreator) { + this( + maxBufferSize, + streamName, + parent, + tracer, + Attributes.empty(), + errorHandler, + streamingRetrySettings, + retryableCodes, + xGoogRequestIdCreator); + } + + protected ResumableStreamIterator( + int maxBufferSize, + String streamName, + ISpan parent, + TraceWrapper tracer, + Attributes attributes, + ErrorHandler errorHandler, + RetrySettings streamingRetrySettings, + Set retryableCodes, + XGoogSpannerRequestId.RequestIdCreator xGoogRequestIdCreator) { + checkArgument(maxBufferSize >= 0); + this.maxBufferSize = maxBufferSize; + this.tracer = tracer; + this.span = tracer.spanBuilderWithExplicitParent(streamName, parent, attributes); + this.errorHandler = errorHandler; + this.streamingRetrySettings = Preconditions.checkNotNull(streamingRetrySettings); + this.retryableCodes = Preconditions.checkNotNull(retryableCodes); + // The channel is automatically updated by the gRPC client when the request is actually sent. + this.requestId = xGoogRequestIdCreator.nextRequestId(0); + } + + private ExponentialBackOff newBackOff() { + if (Objects.equals(streamingRetrySettings, DEFAULT_STREAMING_RETRY_SETTINGS)) { + return new ExponentialBackOff.Builder() + .setMultiplier(streamingRetrySettings.getRetryDelayMultiplier()) + .setInitialIntervalMillis( + Math.max(10, (int) streamingRetrySettings.getInitialRetryDelay().toMillis())) + .setMaxIntervalMillis( + Math.max(1000, (int) streamingRetrySettings.getMaxRetryDelay().toMillis())) + .setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned. + .build(); + } + return new ExponentialBackOff.Builder() + .setMultiplier(streamingRetrySettings.getRetryDelayMultiplier()) + // All of these values must be > 0. + .setInitialIntervalMillis( + Math.max( + 1, + (int) + Math.min( + streamingRetrySettings.getInitialRetryDelay().toMillis(), + Integer.MAX_VALUE))) + .setMaxIntervalMillis( + Math.max( + 1, + (int) + Math.min( + streamingRetrySettings.getMaxRetryDelay().toMillis(), Integer.MAX_VALUE))) + .setMaxElapsedTimeMillis( + Math.max( + 1, + (int) + Math.min( + streamingRetrySettings.getTotalTimeout().toMillis(), Integer.MAX_VALUE))) + .build(); + } + + private void backoffSleep(Context context, BackOff backoff) throws SpannerException { + backoffSleep(context, nextBackOffMillis(backoff)); + } + + private static long nextBackOffMillis(BackOff backoff) throws SpannerException { + try { + return backoff.nextBackOffMillis(); + } catch (IOException e) { + throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e); + } + } + + private void backoffSleep(Context context, long backoffMillis) throws SpannerException { + tracer.getCurrentSpan().addAnnotation("Backing off", "Delay", backoffMillis); + final CountDownLatch latch = new CountDownLatch(1); + final Context.CancellationListener listener = + ignored -> { + // Wakeup on cancellation / DEADLINE_EXCEEDED. + latch.countDown(); + }; + + context.addListener(listener, DirectExecutor.INSTANCE); + try { + if (backoffMillis == BackOff.STOP) { + // Highly unlikely but we handle it just in case. + backoffMillis = streamingRetrySettings.getMaxRetryDelay().toMillis(); + } + if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) { + // Woken by context cancellation. + throw newSpannerExceptionForCancellation(context, null); + } + } catch (InterruptedException interruptExcept) { + throw newSpannerExceptionForCancellation(context, interruptExcept); + } finally { + context.removeListener(listener); + } + } + + private enum DirectExecutor implements Executor { + INSTANCE; + + @Override + public void execute(Runnable command) { + command.run(); + } + } + + abstract CloseableIterator startStream( + @Nullable ByteString resumeToken, + AsyncResultSet.StreamMessageListener streamMessageListener, + XGoogSpannerRequestId requestId); + + /** + * Prepares the iterator for a retry on a different gRPC channel. Returns true if that is + * possible, and false otherwise. A retry should only be attempted if the method returns true. + */ + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return false; + } + + @Override + public void close(@Nullable String message) { + if (stream != null) { + stream.close(message); + span.end(); + stream = null; + } + } + + @Override + public boolean isWithBeginTransaction() { + return stream != null && stream.isWithBeginTransaction(); + } + + @Override + public boolean isLastStatement() { + return stream != null && stream.isLastStatement(); + } + + @Override + @InternalApi + public boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener) { + this.streamMessageListener = streamMessageListener; + startGrpcStreaming(); + return true; + } + + @Override + protected PartialResultSet computeNext() { + int numAttemptsOnOtherChannel = 0; + Context context = Context.current(); + while (true) { + // Eagerly start stream before consuming any buffered items. + startGrpcStreaming(); + // Buffer contains items up to a resume token or has reached capacity: flush. + if (!buffer.isEmpty() + && (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) { + return buffer.pop(); + } + try { + if (stream.hasNext()) { + PartialResultSet next = stream.next(); + boolean hasResumeToken = !next.getResumeToken().isEmpty(); + if (hasResumeToken) { + resumeToken = next.getResumeToken(); + safeToRetry = true; + } + // If the buffer is empty and this chunk has a resume token or we cannot resume safely + // anyway, we can yield it immediately rather than placing it in the buffer to be + // returned on the next iteration. + if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) { + return next; + } + buffer.add(next); + if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) { + // We need to flush without a restart token. Errors encountered until we see + // such a token will fail the read. + safeToRetry = false; + } + } else { + finished = true; + if (buffer.isEmpty()) { + endOfData(); + return null; + } + } + } catch (SpannerException spannerException) { + if (safeToRetry && isRetryable(spannerException)) { + span.addAnnotation("Stream broken. Safe to retry", spannerException); + logger.log(Level.FINE, "Retryable exception, will sleep and retry", spannerException); + // Truncate any items in the buffer before the last retry token. + while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) { + buffer.removeLast(); + } + assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken); + stream = null; + try (IScope s = tracer.withSpan(span)) { + long delay = spannerException.getRetryDelayInMillis(); + if (delay != -1) { + backoffSleep(context, delay); + } else { + if (this.backOff == null) { + this.backOff = newBackOff(); + } + backoffSleep(context, this.backOff); + } + } + + continue; + } + // Check if we should retry the request on a different gRPC channel. + if (resumeToken == null && buffer.isEmpty()) { + Throwable translated = errorHandler.translateException(spannerException); + if (translated instanceof RetryOnDifferentGrpcChannelException) { + if (++numAttemptsOnOtherChannel < errorHandler.getMaxAttempts() + && prepareIteratorForRetryOnDifferentGrpcChannel()) { + stream = null; + continue; + } + } + } + span.addAnnotation("Stream broken. Not safe to retry", spannerException); + span.setStatus(spannerException); + throw spannerException; + } catch (RuntimeException e) { + span.addAnnotation("Stream broken. Not safe to retry", e); + span.setStatus(e); + throw e; + } + } + } + + private void startGrpcStreaming() { + if (stream == null) { + span.addAnnotation( + "Starting/Resuming stream", + "ResumeToken", + resumeToken == null ? "null" : resumeToken.toStringUtf8()); + try (IScope scope = tracer.withSpan(span)) { + // When start a new stream set the Span as current to make the gRPC Span a child of + // this Span. + stream = checkNotNull(startStream(resumeToken, streamMessageListener, requestId)); + stream.requestPrefetchChunks(); + } + } + } + + boolean isRetryable(SpannerException spannerException) { + return spannerException.isRetryable() + || retryableCodes.contains( + GrpcStatusCode.of(spannerException.getErrorCode().getGrpcStatusCode()).getCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java new file mode 100644 index 000000000000..46607d33a882 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionImpl.NO_CHANNEL_HINT; + +import com.google.api.core.BetaApi; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import javax.annotation.Nonnull; + +/** + * An experimental error handler that allows DEADLINE_EXCEEDED errors to be retried on a different + * gRPC channel. This handler is only used if the system property + * 'spanner.retry_deadline_exceeded_on_different_channel' has been set to true, and it is only used + * in the following specific cases: + * + *

    + *
  1. A DEADLINE_EXCEEDED error during a read/write transaction. The error is translated to a + * {@link RetryOnDifferentGrpcChannelException}, which is caught by the session pool and + * causes a retry of the entire transaction on a different session and different gRPC channel. + *
  2. A DEADLINE_EXCEEDED error during a single-use read-only transaction using a multiplexed + * session. Note that errors for the same using a regular session are not retried. + *
+ */ +@BetaApi +class RetryOnDifferentGrpcChannelErrorHandler implements ErrorHandler { + private final int maxAttempts; + + private final SessionImpl session; + + static boolean isEnabled() { + return Boolean.parseBoolean( + System.getProperty("spanner.retry_deadline_exceeded_on_different_channel", "false")); + } + + RetryOnDifferentGrpcChannelErrorHandler(int maxAttempts, SessionImpl session) { + this.maxAttempts = maxAttempts; + this.session = session; + } + + @Override + @Nonnull + public Throwable translateException(@Nonnull Throwable exception) { + if (session == null || !(exception instanceof SpannerException)) { + return exception; + } + SpannerException spannerException = (SpannerException) exception; + if (spannerException.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED) { + if (session.getIsMultiplexed() + || (session.getOptions() != null + && session.getOptions().containsKey(Option.CHANNEL_HINT))) { + int channel = NO_CHANNEL_HINT; + if (session.getOptions() != null && session.getOptions().containsKey(Option.CHANNEL_HINT)) { + channel = Option.CHANNEL_HINT.getLong(session.getOptions()).intValue(); + } + return SpannerExceptionFactory.newRetryOnDifferentGrpcChannelException( + "Retrying on a new gRPC channel due to a DEADLINE_EXCEEDED error", + channel, + spannerException); + } + } + return spannerException; + } + + @Override + public int getMaxAttempts() { + return maxAttempts; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java new file mode 100644 index 000000000000..e56265b6c527 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import javax.annotation.Nullable; + +class RetryOnDifferentGrpcChannelException extends SpannerException { + private final int channel; + + RetryOnDifferentGrpcChannelException( + @Nullable String message, int channel, @Nullable Throwable cause) { + // Note: We set retryable=false, as the exception is not retryable in the standard way. + super( + DoNotConstructDirectly.ALLOWED, ErrorCode.INTERNAL, /* retryable= */ false, message, cause); + this.channel = channel; + } + + int getChannel() { + return this.channel; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Session.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Session.java new file mode 100644 index 000000000000..61d13db8d82e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Session.java @@ -0,0 +1,58 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.protobuf.Empty; + +/** + * A {@code Session} can be used to perform transactions that read and/or modify data in a Cloud + * Spanner database. + * + *

Sessions can only execute one transaction at a time. To execute multiple concurrent + * read-write/write-only transactions, create multiple sessions. Note that standalone reads and + * queries use a transaction internally, and count toward the one transaction limit. + * + *

It is a good idea to delete idle and/or unneeded sessions. Aside from explicit deletes, Cloud + * Spanner can delete sessions for which no operations are sent for more than an hour, or due to + * internal errors. If a session is deleted, requests to it return {@link ErrorCode#NOT_FOUND}. + * + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, + * {@code SELECT 1}. + * + *

Sessions are long-lived objects intended to be reused for many consecutive operations; a + * typical application will maintain a pool of sessions to use during its lifetime. + * + *

Since only one transaction can be performed at a time within any given session, instances + * require external synchronization; {@code Session} implementations are not required to be + * thread-safe. + */ +@InternalApi +public interface Session extends DatabaseClient, AutoCloseable { + /** Returns the resource name associated with this session. */ + String getName(); + + @Override + void close(); + + /** + * Closes the session asynchronously and returns the {@link ApiFuture} that can be used to monitor + * the operation progress. + */ + ApiFuture asyncClose(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionClient.java new file mode 100644 index 000000000000..1fb49f2ced3f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionClient.java @@ -0,0 +1,453 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import io.opentelemetry.api.common.Attributes; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; + +/** Client for creating single sessions and batches of sessions. */ +class SessionClient implements AutoCloseable { + static class SessionId { + private static final PathTemplate NAME_TEMPLATE = + PathTemplate.create( + "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}"); + private final DatabaseId db; + private final String name; + + private SessionId(DatabaseId db, String name) { + this.db = Preconditions.checkNotNull(db); + this.name = Preconditions.checkNotNull(name); + } + + static SessionId of(String name) { + Preconditions.checkNotNull(name); + Map parts = NAME_TEMPLATE.match(name); + Preconditions.checkArgument( + parts != null, "Name should conform to pattern %s: %s", NAME_TEMPLATE, name); + return of( + parts.get("project"), parts.get("instance"), parts.get("database"), parts.get("session")); + } + + /** Creates a {@code SessionId} given project, instance, database and session IDs. */ + static SessionId of(String project, String instance, String database, String session) { + return new SessionId(new DatabaseId(new InstanceId(project, instance), database), session); + } + + DatabaseId getDatabaseId() { + return db; + } + + String getName() { + return name; + } + } + + /** + * Encapsulates state to be passed to the {@link SpannerRpc} layer for a given session. Currently + * used to select the {@link io.grpc.Channel} to be used in issuing the RPCs in a Session. + */ + static class SessionOption { + private final SpannerRpc.Option rpcOption; + private final Object value; + + SessionOption(SpannerRpc.Option option, Object value) { + this.rpcOption = checkNotNull(option); + this.value = value; + } + + static SessionOption channelHint(long hint) { + return new SessionOption(SpannerRpc.Option.CHANNEL_HINT, hint); + } + + SpannerRpc.Option rpcOption() { + return rpcOption; + } + + Object value() { + return value; + } + } + + static Map optionMap(SessionOption... options) { + if (options.length == 0) { + return Collections.emptyMap(); + } + Map tmp = Maps.newEnumMap(SpannerRpc.Option.class); + for (SessionOption option : options) { + Object prev = tmp.put(option.rpcOption(), option.value()); + checkArgument(prev == null, "Duplicate option %s", option.rpcOption()); + } + return ImmutableMap.copyOf(tmp); + } + + static Map createRequestOptions(long channelId) { + return ImmutableMap.of(Option.CHANNEL_HINT, channelId); + } + + private final class BatchCreateSessionsRunnable implements Runnable { + private final long channelHint; + private final int sessionCount; + private final SessionConsumer consumer; + + private BatchCreateSessionsRunnable( + int sessionCount, long channelHint, SessionConsumer consumer) { + Preconditions.checkNotNull(consumer); + Preconditions.checkArgument(sessionCount > 0, "sessionCount must be > 0"); + this.channelHint = channelHint; + this.sessionCount = sessionCount; + this.consumer = consumer; + } + + @Override + public void run() { + List sessions; + int remainingSessionsToCreate = sessionCount; + ISpan span = + spanner.getTracer().spanBuilder(SpannerImpl.BATCH_CREATE_SESSIONS, databaseAttributes); + try (IScope s = spanner.getTracer().withSpan(span)) { + spanner + .getTracer() + .getCurrentSpan() + .addAnnotation(String.format("Creating %d sessions", sessionCount)); + while (remainingSessionsToCreate > 0) { + try { + sessions = internalBatchCreateSessions(remainingSessionsToCreate, channelHint); + } catch (Throwable t) { + spanner.getTracer().getCurrentSpan().setStatus(t); + consumer.onSessionCreateFailure(t, remainingSessionsToCreate); + break; + } + for (SessionImpl session : sessions) { + consumer.onSessionReady(session); + } + remainingSessionsToCreate -= sessions.size(); + } + } finally { + span.end(); + } + } + } + + /** + * Callback interface to be used for Sessions. When sessions become available or session creation + * fails, one of the callback methods will be called. + */ + interface SessionConsumer { + /** Called when a session has been created and is ready for use. */ + void onSessionReady(SessionImpl session); + + /** + * Called when an error occurred during session creation. The createFailureForSessionCount + * indicates the number of sessions that could not be created, so that the consumer knows how + * many sessions it should still expect. + */ + void onSessionCreateFailure(Throwable t, int createFailureForSessionCount); + } + + private final SpannerImpl spanner; + private final ExecutorFactory executorFactory; + private final ScheduledExecutorService executor; + private final DatabaseId db; + private final Attributes databaseAttributes; + + // SessionClient is created long before a DatabaseClientImpl is created, + // as batch sessions are firstly created then later attached to each Client. + private static final AtomicInteger NTH_ID = new AtomicInteger(0); + private final int nthId = NTH_ID.incrementAndGet(); + private final AtomicInteger nthRequest = new AtomicInteger(0); + + @GuardedBy("this") + private volatile long sessionChannelCounter; + + SessionClient( + SpannerImpl spanner, + DatabaseId db, + ExecutorFactory executorFactory) { + this.spanner = spanner; + this.db = db; + this.executorFactory = executorFactory; + this.executor = executorFactory.get(); + this.databaseAttributes = spanner.getTracer().createDatabaseAttributes(db); + } + + @Override + public void close() { + executorFactory.release(executor); + } + + SpannerImpl getSpanner() { + return spanner; + } + + DatabaseId getDatabaseId() { + return db; + } + + /** Create a single session. */ + SessionImpl createSession() { + // The sessionChannelCounter could overflow, but that will just flip it to Integer.MIN_VALUE, + // which is also a valid channel hint. + final long channelId; + synchronized (this) { + channelId = sessionChannelCounter; + sessionChannelCounter++; + } + ISpan span = + spanner.getTracer().spanBuilder(SpannerImpl.CREATE_SESSION, this.databaseAttributes); + try (IScope s = spanner.getTracer().withSpan(span)) { + com.google.spanner.v1.Session session = + spanner + .getRpc() + .createSession( + db.getName(), + spanner.getOptions().getDatabaseRole(), + spanner.getOptions().getSessionLabels(), + createRequestOptions(channelId)); + SessionReference sessionReference = + new SessionReference( + session.getName(), + spanner.getOptions().getDatabaseRole(), + session.getCreateTime(), + session.getMultiplexed(), + optionMap(SessionOption.channelHint(channelId))); + return new SessionImpl(spanner, sessionReference); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + /** + * Create a multiplexed session and returns it to the given {@link SessionConsumer}. A multiplexed + * session is not affiliated with any GRPC channel. The given {@link SessionConsumer} is + * guaranteed to eventually get exactly 1 multiplexed session unless an error occurs. In case of + * an error on the gRPC calls, the consumer will receive one {@link + * SessionConsumer#onSessionCreateFailure(Throwable, int)} calls with the error. + * + * @param consumer The {@link SessionConsumer} to use for callbacks when sessions are available. + */ + void createMultiplexedSession(SessionConsumer consumer) { + try { + SessionImpl sessionImpl = createMultiplexedSession(); + consumer.onSessionReady(sessionImpl); + } catch (Throwable t) { + consumer.onSessionCreateFailure(t, 1); + } + } + + /** + * Creates a multiplexed session and returns it. A multiplexed session is not affiliated with any + * GRPC channel. In case of an error during the gRPC calls, an exception will be thrown. + */ + SessionImpl createMultiplexedSession() { + ISpan span = + spanner + .getTracer() + .spanBuilder(SpannerImpl.CREATE_MULTIPLEXED_SESSION, this.databaseAttributes); + try (IScope s = spanner.getTracer().withSpan(span)) { + com.google.spanner.v1.Session session = + spanner + .getRpc() + .createSession( + db.getName(), + spanner.getOptions().getDatabaseRole(), + spanner.getOptions().getSessionLabels(), + null, + true); + SessionImpl sessionImpl = + new SessionImpl( + spanner, + new SessionReference( + session.getName(), + spanner.getOptions().getDatabaseRole(), + session.getCreateTime(), + session.getMultiplexed(), + null)); + span.addAnnotation( + String.format("Request for %d multiplexed session returned %d session", 1, 1)); + return sessionImpl; + } catch (Throwable t) { + span.setStatus(t); + throw t; + } finally { + span.end(); + } + } + + /** + * Create a multiplexed session asynchronously and returns it to the given {@link + * SessionConsumer}. A multiplexed session is not affiliated with any GRPC channel. The given + * {@link SessionConsumer} is guaranteed to eventually get exactly 1 multiplexed session unless an + * error occurs. In case of an error on the gRPC calls, the consumer will receive one {@link + * SessionConsumer#onSessionCreateFailure(Throwable, int)} call with the error. + * + * @param consumer The {@link SessionConsumer} to use for callbacks when sessions are available. + */ + void asyncCreateMultiplexedSession(SessionConsumer consumer) { + try { + executor.submit(new CreateMultiplexedSessionsRunnable(consumer)); + } catch (Throwable t) { + consumer.onSessionCreateFailure(t, 1); + } + } + + private final class CreateMultiplexedSessionsRunnable implements Runnable { + private final SessionConsumer consumer; + + private CreateMultiplexedSessionsRunnable(SessionConsumer consumer) { + Preconditions.checkNotNull(consumer); + this.consumer = consumer; + } + + @Override + public void run() { + createMultiplexedSession(consumer); + } + } + + /** + * Asynchronously creates a batch of sessions and returns these to the given {@link + * SessionConsumer}. This method may split the actual session creation over several gRPC calls in + * order to distribute the sessions evenly over all available channels and to parallelize the + * session creation. The given {@link SessionConsumer} is guaranteed to eventually get exactly the + * number of requested sessions unless an error occurs. In case of an error on one or more of the + * gRPC calls, the consumer will receive one or more {@link + * SessionConsumer#onSessionCreateFailure(Throwable, int)} calls with the error and the number of + * sessions that could not be created. + * + * @param sessionCount The number of sessions to create. + * @param distributeOverChannels Whether to distribute the sessions over all available channels + * (true) or create all for the next channel round robin. + * @param consumer The {@link SessionConsumer} to use for callbacks when sessions are available. + */ + void asyncBatchCreateSessions( + final int sessionCount, boolean distributeOverChannels, SessionConsumer consumer) { + int sessionCountPerChannel; + int remainder; + if (distributeOverChannels) { + sessionCountPerChannel = sessionCount / spanner.getOptions().getNumChannels(); + remainder = sessionCount % spanner.getOptions().getNumChannels(); + } else { + sessionCountPerChannel = sessionCount; + remainder = 0; + } + int numBeingCreated = 0; + synchronized (this) { + for (int channelIndex = 0; + channelIndex < spanner.getOptions().getNumChannels(); + channelIndex++) { + int createCountForChannel = sessionCountPerChannel; + // Add the remainder of the division to the creation count of the first channel to make sure + // we are creating the requested number of sessions. This will cause a slightly less + // efficient distribution of sessions over the channels than spreading the remainder over + // all channels as well, but it will also reduce the number of requests when less than + // numChannels sessions are requested (i.e. with 4 channels and 3 requested sessions, the 3 + // sessions will be requested in one rpc call). + if (channelIndex == 0) { + createCountForChannel = sessionCountPerChannel + remainder; + } + if (createCountForChannel > 0 && numBeingCreated < sessionCount) { + try { + executor.submit( + new BatchCreateSessionsRunnable( + createCountForChannel, sessionChannelCounter++, consumer)); + numBeingCreated += createCountForChannel; + } catch (Throwable t) { + consumer.onSessionCreateFailure(t, sessionCount - numBeingCreated); + } + } else { + break; + } + } + } + } + + /** + * Creates a batch of sessions that will all be affiliated with the same gRPC channel. It is the + * responsibility of the caller to make multiple calls to this method in order to create sessions + * that are distributed over multiple channels. + */ + private List internalBatchCreateSessions( + final int sessionCount, final long channelHint) throws SpannerException { + ISpan parent = spanner.getTracer().getCurrentSpan(); + ISpan span = + spanner + .getTracer() + .spanBuilderWithExplicitParent(SpannerImpl.BATCH_CREATE_SESSIONS_REQUEST, parent); + span.addAnnotation(String.format("Requesting %d sessions", sessionCount)); + try (IScope s = spanner.getTracer().withSpan(span)) { + List sessions = + spanner + .getRpc() + .batchCreateSessions( + db.getName(), + sessionCount, + spanner.getOptions().getDatabaseRole(), + spanner.getOptions().getSessionLabels(), + createRequestOptions(channelHint)); + span.addAnnotation( + String.format( + "Request for %d sessions returned %d sessions", sessionCount, sessions.size())); + span.end(); + List res = new ArrayList<>(sessionCount); + for (com.google.spanner.v1.Session session : sessions) { + SessionImpl sessionImpl = + new SessionImpl( + spanner, + new SessionReference( + session.getName(), + spanner.getOptions().getDatabaseRole(), + session.getCreateTime(), + session.getMultiplexed(), + optionMap(SessionOption.channelHint(channelHint)))); + res.add(sessionImpl); + } + return res; + } catch (RuntimeException e) { + span.setStatus(e); + span.end(); + throw e; + } + } + + /** Returns a {@link SessionImpl} that references the existing session with the given name. */ + SessionImpl sessionWithId(String name) { + final Map options; + synchronized (this) { + options = optionMap(SessionOption.channelHint(sessionChannelCounter++)); + } + return new SessionImpl(spanner, new SessionReference(name, /* databaseRole= */ null, options)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java new file mode 100644 index 000000000000..e70ee390df11 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java @@ -0,0 +1,609 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionClient.optionMap; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractReadContext.MultiUseReadOnlyTransaction; +import com.google.cloud.spanner.AbstractReadContext.SingleReadContext; +import com.google.cloud.spanner.AbstractReadContext.SingleUseReadOnlyTransaction; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SessionClient.SessionOption; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.base.Ticker; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import javax.annotation.Nullable; + +/** + * Implementation of {@link Session}. Sessions are managed internally by the client library, and + * users need not be aware of the actual session management, pooling and handling. + */ +class SessionImpl implements Session { + private final TraceWrapper tracer; + + /** Keep track of running transactions on this session per thread. */ + static final ThreadLocal hasPendingTransaction = ThreadLocal.withInitial(() -> false); + + static void throwIfTransactionsPending() { + if (hasPendingTransaction.get() == Boolean.TRUE) { + throw newSpannerException(ErrorCode.INTERNAL, "Nested transactions are not supported"); + } + } + + static TransactionOptions createReadWriteTransactionOptions( + Options options, ByteString previousTransactionId) { + TransactionOptions.Builder transactionOptions = TransactionOptions.newBuilder(); + if (options.withExcludeTxnFromChangeStreams() == Boolean.TRUE) { + transactionOptions.setExcludeTxnFromChangeStreams(true); + } + TransactionOptions.ReadWrite.Builder readWrite = TransactionOptions.ReadWrite.newBuilder(); + if (previousTransactionId != null + && previousTransactionId != com.google.protobuf.ByteString.EMPTY) { + readWrite.setMultiplexedSessionPreviousTransactionId(previousTransactionId); + } + if (options.isolationLevel() != null) { + transactionOptions.setIsolationLevel(options.isolationLevel()); + } + if (options.readLockMode() != null) { + readWrite.setReadLockMode(options.readLockMode()); + } + transactionOptions.setReadWrite(readWrite); + return transactionOptions.build(); + } + + /** + * Represents a transaction within a session. "Transaction" here is used in the general sense, + * which covers standalone reads, standalone writes, single-use and multi-use read-only + * transactions, and read-write transactions. The defining characteristic is that a session may + * only have one such transaction active at a time. + */ + interface SessionTransaction { + + /** Invalidates the transaction, generally because a new one has been started on the session. */ + void invalidate(); + + /** Registers the current span on the transaction. */ + void setSpan(ISpan span); + + /** Closes the transaction. */ + void close(); + } + + private static final Map[] CHANNEL_HINT_OPTIONS = + new Map[SpannerOptions.MAX_CHANNELS]; + + static { + for (int i = 0; i < CHANNEL_HINT_OPTIONS.length; i++) { + CHANNEL_HINT_OPTIONS[i] = optionMap(SessionOption.channelHint(i)); + } + } + + static final int NO_CHANNEL_HINT = -1; + + private final SpannerImpl spanner; + private SessionReference sessionReference; + private SessionTransaction activeTransaction; + private ISpan currentSpan; + private final Clock clock; + private final Map options; + private final ErrorHandler errorHandler; + + SessionImpl(SpannerImpl spanner, SessionReference sessionReference) { + this(spanner, sessionReference, NO_CHANNEL_HINT); + } + + SessionImpl(SpannerImpl spanner, SessionReference sessionReference, int channelHint) { + this.spanner = spanner; + this.tracer = spanner.getTracer(); + this.sessionReference = sessionReference; + this.clock = spanner.getOptions().getSessionPoolOptions().getPoolMaintainerClock(); + this.options = createOptions(sessionReference, channelHint); + this.errorHandler = createErrorHandler(spanner.getOptions()); + } + + static Map createOptions( + SessionReference sessionReference, int channelHint) { + if (channelHint == NO_CHANNEL_HINT) { + return sessionReference.getOptions(); + } + return CHANNEL_HINT_OPTIONS[channelHint % CHANNEL_HINT_OPTIONS.length]; + } + + private ErrorHandler createErrorHandler(SpannerOptions options) { + if (RetryOnDifferentGrpcChannelErrorHandler.isEnabled()) { + return new RetryOnDifferentGrpcChannelErrorHandler(options.getNumChannels(), this); + } + return DefaultErrorHandler.INSTANCE; + } + + @Override + public String getName() { + return sessionReference.getName(); + } + + @Override + public String getDatabaseRole() { + return sessionReference.getDatabaseRole(); + } + + /** + * Updates the session reference with the fallback session. This should only be used for updating + * session reference with regular session in case of unimplemented error in multiplexed session. + */ + void setFallbackSessionReference(SessionReference sessionReference) { + this.sessionReference = sessionReference; + } + + Map getOptions() { + return options; + } + + ErrorHandler getErrorHandler() { + return this.errorHandler; + } + + SpannerImpl getSpanner() { + return spanner; + } + + void setCurrentSpan(ISpan span) { + currentSpan = span; + } + + ISpan getCurrentSpan() { + return currentSpan; + } + + Instant getLastUseTime() { + return sessionReference.getLastUseTime(); + } + + Instant getCreateTime() { + return sessionReference.getCreateTime(); + } + + boolean getIsMultiplexed() { + return sessionReference.getIsMultiplexed(); + } + + SessionReference getSessionReference() { + return sessionReference; + } + + void markUsed(Instant instant) { + sessionReference.markUsed(instant); + } + + TransactionOptions defaultTransactionOptions() { + return this.spanner.getOptions().getDefaultTransactionOptions(); + } + + public DatabaseId getDatabaseId() { + return sessionReference.getDatabaseId(); + } + + @Override + public long executePartitionedUpdate(Statement stmt, UpdateOption... options) { + setActive(null); + PartitionedDmlTransaction txn = + new PartitionedDmlTransaction(this, spanner.getRpc(), Ticker.systemTicker()); + return txn.executeStreamingPartitionedUpdate( + stmt, spanner.getOptions().getPartitionedDmlTimeoutDuration(), options); + } + + @Override + public Timestamp write(Iterable mutations) throws SpannerException { + return writeWithOptions(mutations).getCommitTimestamp(); + } + + @Override + public CommitResponse writeWithOptions(Iterable mutations, TransactionOption... options) + throws SpannerException { + TransactionRunner runner = readWriteTransaction(options); + final Collection finalMutations = + mutations instanceof java.util.Collection + ? (Collection) mutations + : Lists.newArrayList(mutations); + runner.run( + ctx -> { + ctx.buffer(finalMutations); + return null; + }); + return runner.getCommitResponse(); + } + + @Override + public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { + return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp(); + } + + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... transactionOptions) + throws SpannerException { + setActive(null); + List mutationsProto = new ArrayList<>(); + Mutation.toProtoAndReturnRandomMutation(mutations, mutationsProto); + Options options = Options.fromTransactionOptions(transactionOptions); + final CommitRequest.Builder requestBuilder = + CommitRequest.newBuilder() + .setSession(getName()) + .setReturnCommitStats(options.withCommitStats()) + .addAllMutations(mutationsProto); + + TransactionOptions.Builder transactionOptionsBuilder = + TransactionOptions.newBuilder() + .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance()); + if (options.withExcludeTxnFromChangeStreams() == Boolean.TRUE) { + transactionOptionsBuilder.setExcludeTxnFromChangeStreams(true); + } + if (options.isolationLevel() != null) { + transactionOptionsBuilder.setIsolationLevel(options.isolationLevel()); + } + if (options.readLockMode() != null) { + transactionOptionsBuilder.getReadWriteBuilder().setReadLockMode(options.readLockMode()); + } + requestBuilder.setSingleUseTransaction( + defaultTransactionOptions().toBuilder().mergeFrom(transactionOptionsBuilder.build())); + + if (options.hasMaxCommitDelay()) { + requestBuilder.setMaxCommitDelay( + Duration.newBuilder() + .setSeconds(options.maxCommitDelay().getSeconds()) + .setNanos(options.maxCommitDelay().getNano()) + .build()); + } + RequestOptions commitRequestOptions = getRequestOptions(transactionOptions); + + if (commitRequestOptions != null) { + requestBuilder.setRequestOptions(commitRequestOptions); + } + CommitRequest request = requestBuilder.build(); + ISpan span = tracer.spanBuilder(SpannerImpl.COMMIT); + + try (IScope s = tracer.withSpan(span)) { + return SpannerRetryHelper.runTxWithRetriesOnAborted( + () -> new CommitResponse(spanner.getRpc().commit(request, getOptions()))); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + private RequestOptions getRequestOptions(TransactionOption... transactionOptions) { + Options requestOptions = Options.fromTransactionOptions(transactionOptions); + if (requestOptions.hasPriority() || requestOptions.hasTag()) { + RequestOptions.Builder requestOptionsBuilder = RequestOptions.newBuilder(); + if (requestOptions.hasPriority()) { + requestOptionsBuilder.setPriority(requestOptions.priority()); + } + if (requestOptions.hasTag()) { + requestOptionsBuilder.setTransactionTag(requestOptions.tag()); + } + return requestOptionsBuilder.build(); + } + return null; + } + + @Override + public ServerStream batchWriteAtLeastOnce( + Iterable mutationGroups, TransactionOption... transactionOptions) + throws SpannerException { + setActive(null); + List mutationGroupsProto = + MutationGroup.toListProto(mutationGroups); + final BatchWriteRequest.Builder requestBuilder = + BatchWriteRequest.newBuilder() + .setSession(getName()) + .addAllMutationGroups(mutationGroupsProto); + RequestOptions batchWriteRequestOptions = getRequestOptions(transactionOptions); + Options allOptions = Options.fromTransactionOptions(transactionOptions); + if (batchWriteRequestOptions != null) { + requestBuilder.setRequestOptions(batchWriteRequestOptions); + } + if (allOptions.withExcludeTxnFromChangeStreams() == Boolean.TRUE) { + requestBuilder.setExcludeTxnFromChangeStreams(true); + } + ISpan span = tracer.spanBuilder(SpannerImpl.BATCH_WRITE); + try (IScope s = tracer.withSpan(span)) { + return spanner.getRpc().batchWriteAtLeastOnce(requestBuilder.build(), getOptions()); + } catch (Throwable e) { + span.setStatus(e); + throw SpannerExceptionFactory.newSpannerException(e); + } finally { + span.end(); + onTransactionDone(); + } + } + + @Override + public ReadContext singleUse() { + return singleUse(TimestampBound.strong()); + } + + @Override + public ReadContext singleUse(TimestampBound bound) { + return setActive( + SingleReadContext.newBuilder() + .setSession(this) + .setTimestampBound(bound) + .setRpc(spanner.getRpc()) + .setDefaultQueryOptions(spanner.getDefaultQueryOptions(getDatabaseId())) + .setDefaultPrefetchChunks(spanner.getDefaultPrefetchChunks()) + .setDefaultDecodeMode(spanner.getDefaultDecodeMode()) + .setDefaultDirectedReadOptions(spanner.getOptions().getDirectedReadOptions()) + .setSpan(currentSpan) + .setTracer(tracer) + .setExecutorProvider(spanner.getAsyncExecutorProvider()) + .setClock(clock) + .build()); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction() { + return singleUseReadOnlyTransaction(TimestampBound.strong()); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { + return setActive( + SingleUseReadOnlyTransaction.newBuilder() + .setSession(this) + .setTimestampBound(bound) + .setRpc(spanner.getRpc()) + .setDefaultQueryOptions(spanner.getDefaultQueryOptions(getDatabaseId())) + .setDefaultPrefetchChunks(spanner.getDefaultPrefetchChunks()) + .setDefaultDecodeMode(spanner.getDefaultDecodeMode()) + .setDefaultDirectedReadOptions(spanner.getOptions().getDirectedReadOptions()) + .setSpan(currentSpan) + .setTracer(tracer) + .setExecutorProvider(spanner.getAsyncExecutorProvider()) + .setClock(clock) + .buildSingleUseReadOnlyTransaction()); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction() { + return readOnlyTransaction(TimestampBound.strong()); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { + return setActive( + MultiUseReadOnlyTransaction.newBuilder() + .setSession(this) + .setTimestampBound(bound) + .setRpc(spanner.getRpc()) + .setDefaultQueryOptions(spanner.getDefaultQueryOptions(getDatabaseId())) + .setDefaultPrefetchChunks(spanner.getDefaultPrefetchChunks()) + .setDefaultDecodeMode(spanner.getDefaultDecodeMode()) + .setDefaultDirectedReadOptions(spanner.getOptions().getDirectedReadOptions()) + .setSpan(currentSpan) + .setTracer(tracer) + .setExecutorProvider(spanner.getAsyncExecutorProvider()) + .setClock(clock) + .build()); + } + + @Override + public TransactionRunner readWriteTransaction(TransactionOption... options) { + return setActive(new TransactionRunnerImpl(this, options)); + } + + @Override + public AsyncRunner runAsync(TransactionOption... options) { + return new AsyncRunnerImpl(setActive(new TransactionRunnerImpl(this, options))); + } + + @Override + public TransactionManager transactionManager(TransactionOption... options) { + return new TransactionManagerImpl(this, currentSpan, tracer, options); + } + + @Override + public AsyncTransactionManagerImpl transactionManagerAsync(TransactionOption... options) { + return new AsyncTransactionManagerImpl(this, currentSpan, options); + } + + @Override + public ApiFuture asyncClose() { + if (getIsMultiplexed()) { + return com.google.api.core.ApiFutures.immediateFuture(Empty.getDefaultInstance()); + } + return spanner.getRpc().asyncDeleteSession(getName(), getOptions()); + } + + @Override + public void close() { + if (getIsMultiplexed()) { + return; + } + ISpan span = tracer.spanBuilder(SpannerImpl.DELETE_SESSION); + try (IScope s = tracer.withSpan(span)) { + spanner.getRpc().deleteSession(getName(), getOptions()); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + span.end(); + } + } + + ApiFuture beginTransactionAsync( + Options transactionOptions, + boolean routeToLeader, + Map channelHint, + ByteString previousTransactionId, + com.google.spanner.v1.Mutation mutation) { + final SettableApiFuture res = SettableApiFuture.create(); + final ISpan span = tracer.spanBuilder(SpannerImpl.BEGIN_TRANSACTION); + BeginTransactionRequest.Builder requestBuilder = + BeginTransactionRequest.newBuilder() + .setSession(getName()) + .setOptions( + defaultTransactionOptions().toBuilder() + .mergeFrom( + createReadWriteTransactionOptions( + transactionOptions, previousTransactionId))); + if (sessionReference.getIsMultiplexed() && mutation != null) { + requestBuilder.setMutationKey(mutation); + } + RequestOptions.Builder optionsBuilder = + transactionOptions.toRequestOptionsProto(true).toBuilder(); + RequestOptions.ClientContext defaultClientContext = spanner.getOptions().getClientContext(); + if (defaultClientContext != null) { + RequestOptions.ClientContext.Builder builder = defaultClientContext.toBuilder(); + if (optionsBuilder.hasClientContext()) { + builder.mergeFrom(optionsBuilder.getClientContext()); + } + optionsBuilder.setClientContext(builder.build()); + } + if (!sessionReference.getIsMultiplexed()) { + optionsBuilder.clearTransactionTag(); + } + RequestOptions requestOptions = optionsBuilder.build(); + if (!requestOptions.equals(RequestOptions.getDefaultInstance())) { + requestBuilder.setRequestOptions(requestOptions); + } + final BeginTransactionRequest request = requestBuilder.build(); + final ApiFuture requestFuture; + try (IScope ignore = tracer.withSpan(span)) { + requestFuture = spanner.getRpc().beginTransactionAsync(request, channelHint, routeToLeader); + } + requestFuture.addListener( + () -> { + try (IScope ignore = tracer.withSpan(span)) { + Transaction txn = requestFuture.get(); + if (txn.getId().isEmpty()) { + throw newSpannerException( + ErrorCode.INTERNAL, "Missing id in transaction\n" + getName()); + } + span.end(); + res.set(txn); + } catch (ExecutionException e) { + span.setStatus(e); + span.end(); + res.setException( + SpannerExceptionFactory.newSpannerException( + e.getCause() == null ? e : e.getCause())); + } catch (InterruptedException e) { + span.setStatus(e); + span.end(); + res.setException(SpannerExceptionFactory.propagateInterrupt(e)); + } catch (Exception e) { + span.setStatus(e); + span.end(); + res.setException(e); + } + }, + MoreExecutors.directExecutor()); + return res; + } + + TransactionContextImpl newTransaction(Options options, ByteString previousTransactionId) { + return TransactionContextImpl.newBuilder() + .setSession(this) + .setOptions(options) + .setTransactionId(null) + .setPreviousTransactionId(previousTransactionId) + .setTrackTransactionStarter(spanner.getOptions().isTrackTransactionStarter()) + .setRpc(spanner.getRpc()) + .setDefaultQueryOptions(spanner.getDefaultQueryOptions(getDatabaseId())) + .setDefaultPrefetchChunks(spanner.getDefaultPrefetchChunks()) + .setDefaultDecodeMode(spanner.getDefaultDecodeMode()) + .setSpan(currentSpan) + .setTracer(tracer) + .setExecutorProvider(spanner.getAsyncExecutorProvider()) + .setClock(clock) + .build(); + } + + SessionTransaction getActiveTransaction() { + return this.activeTransaction; + } + + T setActive(@Nullable T ctx) { + throwIfTransactionsPending(); + // multiplexed sessions support running concurrent transactions + if (!getIsMultiplexed()) { + if (activeTransaction != null) { + activeTransaction.invalidate(); + } + } + activeTransaction = ctx; + if (activeTransaction != null) { + activeTransaction.setSpan(currentSpan); + } + return ctx; + } + + void onError(SpannerException spannerException) {} + + void onReadDone() {} + + void onTransactionDone() {} + + TraceWrapper getTracer() { + return tracer; + } + + public XGoogSpannerRequestId.RequestIdCreator getRequestIdCreator() { + return this.spanner.getRpc().getRequestIdCreator(); + } + + int getChannel() { + if (getIsMultiplexed()) { + return 0; + } + Map options = this.getOptions(); + if (options == null) { + return 0; + } + Long channelHint = (Long) options.get(SpannerRpc.Option.CHANNEL_HINT); + if (channelHint == null) { + return 0; + } + return (int) (channelHint % this.spanner.getOptions().getNumChannels()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionNotFoundException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionNotFoundException.java new file mode 100644 index 000000000000..f4a62b1954a8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionNotFoundException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.rpc.ResourceInfo; +import javax.annotation.Nullable; + +/** + * Exception thrown by Cloud Spanner when an operation detects that the session that is being used + * is no longer valid. This type of error has its own subclass as it is a condition that should + * normally be hidden from the user, and the client library should try to fix this internally. + */ +public class SessionNotFoundException extends ResourceNotFoundException { + private static final long serialVersionUID = -6395746612598975751L; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + SessionNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause) { + this(token, message, resourceInfo, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + SessionNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, message, resourceInfo, cause, apiException); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java new file mode 100644 index 000000000000..c0fb65980cbe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java @@ -0,0 +1,1093 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; +import static com.google.api.gax.util.TimeConversionUtils.toThreetenDuration; + +import com.google.api.core.InternalApi; +import com.google.api.core.ObsoleteApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import java.time.Duration; +import java.util.Locale; +import java.util.Objects; + +/** + * Options for the session pool used by {@code DatabaseClient}. + * + * @deprecated The Spanner Java client uses a single multiplexed session. All options related to the + * session pool are no longer functional and will be removed in a future version. + */ +@Deprecated +public class SessionPoolOptions { + @Deprecated + enum Position { + FIRST, + LAST, + RANDOM + } + + // Default number of channels * 100. + private static final int DEFAULT_MAX_SESSIONS = 400; + private static final int DEFAULT_MIN_SESSIONS = 100; + private static final int DEFAULT_INC_STEP = 25; + private static final int EXPERIMENTAL_HOST_REGULAR_SESSIONS = 0; + private static final ActionOnExhaustion DEFAULT_ACTION = ActionOnExhaustion.BLOCK; + private final int minSessions; + private final int maxSessions; + private final int incStep; + + /** + * Use {@link #minSessions} instead to set the minimum number of sessions in the pool to maintain. + * Creating a larger number of sessions during startup is relatively cheap as it is executed with + * the BatchCreateSessions RPC. + */ + @Deprecated private final int maxIdleSessions; + + /** + * The session pool no longer prepares a fraction of the sessions with a read/write transaction. + * This setting therefore does not have any meaning anymore, and may be removed in the future. + */ + @Deprecated private final float writeSessionsFraction; + + private final ActionOnExhaustion actionOnExhaustion; + private final long loopFrequency; + private final Duration multiplexedSessionMaintenanceLoopFrequency; + private final int keepAliveIntervalMinutes; + private final Duration removeInactiveSessionAfter; + private final ActionOnSessionNotFound actionOnSessionNotFound; + private final ActionOnSessionLeak actionOnSessionLeak; + private final boolean trackStackTraceOfSessionCheckout; + private final InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions; + + /** + * Use {@link #acquireSessionTimeout} instead to specify the total duration to wait while + * acquiring session for a transaction. + */ + @Deprecated private final long initialWaitForSessionTimeoutMillis; + + private final boolean autoDetectDialect; + private final Duration waitForMinSessions; + private final Duration acquireSessionTimeout; + private final Position releaseToPosition; + private final long randomizePositionQPSThreshold; + + /** Property for allowing mocking of session maintenance clock. */ + private final Clock poolMaintainerClock; + + private final boolean useMultiplexedSession; + + private final boolean useMultiplexedSessionForRW; + + private final boolean useMultiplexedSessionForPartitionedOps; + + // TODO: Change to use java.time.Duration. + private final Duration multiplexedSessionMaintenanceDuration; + private final boolean skipVerifyingBeginTransactionForMuxRW; + + private SessionPoolOptions(Builder builder) { + // minSessions > maxSessions is only possible if the user has only set a value for maxSessions. + // We allow that to prevent code that only sets a value for maxSessions to break if the + // maxSessions value is less than the default for minSessions. + this.minSessions = + builder.isExperimentalHost + ? EXPERIMENTAL_HOST_REGULAR_SESSIONS + : Math.min(builder.minSessions, builder.maxSessions); + this.maxSessions = + builder.isExperimentalHost ? EXPERIMENTAL_HOST_REGULAR_SESSIONS : builder.maxSessions; + this.incStep = builder.incStep; + this.maxIdleSessions = builder.maxIdleSessions; + this.writeSessionsFraction = builder.writeSessionsFraction; + this.actionOnExhaustion = builder.actionOnExhaustion; + this.actionOnSessionNotFound = builder.actionOnSessionNotFound; + this.actionOnSessionLeak = builder.actionOnSessionLeak; + this.trackStackTraceOfSessionCheckout = builder.trackStackTraceOfSessionCheckout; + this.initialWaitForSessionTimeoutMillis = builder.initialWaitForSessionTimeoutMillis; + this.loopFrequency = builder.loopFrequency; + this.multiplexedSessionMaintenanceLoopFrequency = + builder.multiplexedSessionMaintenanceLoopFrequency; + this.keepAliveIntervalMinutes = builder.keepAliveIntervalMinutes; + this.removeInactiveSessionAfter = builder.removeInactiveSessionAfter; + this.autoDetectDialect = builder.autoDetectDialect; + this.waitForMinSessions = builder.waitForMinSessions; + this.acquireSessionTimeout = builder.acquireSessionTimeout; + this.releaseToPosition = builder.releaseToPosition; + this.randomizePositionQPSThreshold = builder.randomizePositionQPSThreshold; + this.inactiveTransactionRemovalOptions = builder.inactiveTransactionRemovalOptions; + this.poolMaintainerClock = builder.poolMaintainerClock; + // useMultiplexedSession priority => Environment var > private setter > client default + Boolean useMultiplexedSessionFromEnvVariable = getUseMultiplexedSessionFromEnvVariable(); + this.useMultiplexedSession = + builder.isExperimentalHost + || ((useMultiplexedSessionFromEnvVariable != null) + ? useMultiplexedSessionFromEnvVariable + : builder.useMultiplexedSession); + // useMultiplexedSessionForRW priority => Environment var > private setter > client default + Boolean useMultiplexedSessionForRWFromEnvVariable = + getUseMultiplexedSessionForRWFromEnvVariable(); + this.useMultiplexedSessionForRW = + builder.isExperimentalHost + || ((useMultiplexedSessionForRWFromEnvVariable != null) + ? useMultiplexedSessionForRWFromEnvVariable + : builder.useMultiplexedSessionForRW); + // useMultiplexedSessionPartitionedOps priority => Environment var > private setter > client + // default + Boolean useMultiplexedSessionFromEnvVariablePartitionedOps = + getUseMultiplexedSessionFromEnvVariablePartitionedOps(); + this.useMultiplexedSessionForPartitionedOps = + builder.isExperimentalHost + || ((useMultiplexedSessionFromEnvVariablePartitionedOps != null) + ? useMultiplexedSessionFromEnvVariablePartitionedOps + : builder.useMultiplexedSessionPartitionedOps); + this.multiplexedSessionMaintenanceDuration = builder.multiplexedSessionMaintenanceDuration; + this.skipVerifyingBeginTransactionForMuxRW = + builder.isExperimentalHost || builder.skipVerifyingBeginTransactionForMuxRW; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof SessionPoolOptions)) { + return false; + } + SessionPoolOptions other = (SessionPoolOptions) o; + return Objects.equals(this.minSessions, other.minSessions) + && Objects.equals(this.maxSessions, other.maxSessions) + && Objects.equals(this.incStep, other.incStep) + && Objects.equals(this.maxIdleSessions, other.maxIdleSessions) + && Objects.equals(this.writeSessionsFraction, other.writeSessionsFraction) + && Objects.equals(this.actionOnExhaustion, other.actionOnExhaustion) + && Objects.equals(this.actionOnSessionNotFound, other.actionOnSessionNotFound) + && Objects.equals(this.actionOnSessionLeak, other.actionOnSessionLeak) + && Objects.equals( + this.trackStackTraceOfSessionCheckout, other.trackStackTraceOfSessionCheckout) + && Objects.equals( + this.initialWaitForSessionTimeoutMillis, other.initialWaitForSessionTimeoutMillis) + && Objects.equals(this.loopFrequency, other.loopFrequency) + && Objects.equals( + this.multiplexedSessionMaintenanceLoopFrequency, + other.multiplexedSessionMaintenanceLoopFrequency) + && Objects.equals(this.keepAliveIntervalMinutes, other.keepAliveIntervalMinutes) + && Objects.equals(this.removeInactiveSessionAfter, other.removeInactiveSessionAfter) + && Objects.equals(this.autoDetectDialect, other.autoDetectDialect) + && Objects.equals(this.waitForMinSessions, other.waitForMinSessions) + && Objects.equals(this.acquireSessionTimeout, other.acquireSessionTimeout) + && Objects.equals(this.releaseToPosition, other.releaseToPosition) + && Objects.equals(this.randomizePositionQPSThreshold, other.randomizePositionQPSThreshold) + && Objects.equals( + this.inactiveTransactionRemovalOptions, other.inactiveTransactionRemovalOptions) + && Objects.equals(this.poolMaintainerClock, other.poolMaintainerClock) + && Objects.equals(this.useMultiplexedSession, other.useMultiplexedSession) + && Objects.equals(this.useMultiplexedSessionForRW, other.useMultiplexedSessionForRW) + && Objects.equals( + this.multiplexedSessionMaintenanceDuration, other.multiplexedSessionMaintenanceDuration) + && Objects.equals( + this.skipVerifyingBeginTransactionForMuxRW, + other.skipVerifyingBeginTransactionForMuxRW); + } + + @Override + public int hashCode() { + return Objects.hash( + this.minSessions, + this.maxSessions, + this.incStep, + this.maxIdleSessions, + this.writeSessionsFraction, + this.actionOnExhaustion, + this.actionOnSessionNotFound, + this.actionOnSessionLeak, + this.trackStackTraceOfSessionCheckout, + this.initialWaitForSessionTimeoutMillis, + this.loopFrequency, + this.multiplexedSessionMaintenanceLoopFrequency, + this.keepAliveIntervalMinutes, + this.removeInactiveSessionAfter, + this.autoDetectDialect, + this.waitForMinSessions, + this.acquireSessionTimeout, + this.releaseToPosition, + this.randomizePositionQPSThreshold, + this.inactiveTransactionRemovalOptions, + this.poolMaintainerClock, + this.useMultiplexedSession, + this.useMultiplexedSessionForRW, + this.multiplexedSessionMaintenanceDuration, + this.skipVerifyingBeginTransactionForMuxRW); + } + + public Builder toBuilder() { + return new Builder(this); + } + + @Deprecated + public int getMinSessions() { + return minSessions; + } + + @Deprecated + public int getMaxSessions() { + return maxSessions; + } + + int getIncStep() { + return incStep; + } + + /** + * @deprecated Use a higher value for {@link SessionPoolOptions.Builder#setMinSessions(int)} + * instead of setting this option. + */ + @Deprecated + public int getMaxIdleSessions() { + return maxIdleSessions; + } + + /** + * @deprecated This value is no longer used. The session pool does not prepare any sessions for + * read/write transactions. Instead, a transaction will be started by including a + * BeginTransaction option with the first statement of a transaction. This method may be + * removed in a future release. + */ + @Deprecated + public float getWriteSessionsFraction() { + return writeSessionsFraction; + } + + long getLoopFrequency() { + return loopFrequency; + } + + Duration getMultiplexedSessionMaintenanceLoopFrequency() { + return this.multiplexedSessionMaintenanceLoopFrequency; + } + + @Deprecated + public int getKeepAliveIntervalMinutes() { + return keepAliveIntervalMinutes; + } + + /** This method is obsolete. Use {@link #getRemoveInactiveSessionAfterDuration()} instead. */ + @ObsoleteApi("Use getRemoveInactiveSessionAfterDuration() instead") + public org.threeten.bp.Duration getRemoveInactiveSessionAfter() { + return toThreetenDuration(getRemoveInactiveSessionAfterDuration()); + } + + @Deprecated + public Duration getRemoveInactiveSessionAfterDuration() { + return removeInactiveSessionAfter; + } + + @Deprecated + public boolean isFailIfPoolExhausted() { + return actionOnExhaustion == ActionOnExhaustion.FAIL; + } + + @Deprecated + public boolean isBlockIfPoolExhausted() { + return actionOnExhaustion == ActionOnExhaustion.BLOCK; + } + + public boolean isAutoDetectDialect() { + return autoDetectDialect; + } + + InactiveTransactionRemovalOptions getInactiveTransactionRemovalOptions() { + return inactiveTransactionRemovalOptions; + } + + boolean closeInactiveTransactions() { + return inactiveTransactionRemovalOptions.actionOnInactiveTransaction + == ActionOnInactiveTransaction.CLOSE; + } + + boolean warnAndCloseInactiveTransactions() { + return inactiveTransactionRemovalOptions.actionOnInactiveTransaction + == ActionOnInactiveTransaction.WARN_AND_CLOSE; + } + + boolean warnInactiveTransactions() { + return inactiveTransactionRemovalOptions.actionOnInactiveTransaction + == ActionOnInactiveTransaction.WARN; + } + + @VisibleForTesting + long getInitialWaitForSessionTimeoutMillis() { + return initialWaitForSessionTimeoutMillis; + } + + @VisibleForTesting + boolean isFailIfSessionNotFound() { + return actionOnSessionNotFound == ActionOnSessionNotFound.FAIL; + } + + @VisibleForTesting + boolean isFailOnSessionLeak() { + return actionOnSessionLeak == ActionOnSessionLeak.FAIL; + } + + @VisibleForTesting + Clock getPoolMaintainerClock() { + return poolMaintainerClock; + } + + @Deprecated + public boolean isTrackStackTraceOfSessionCheckout() { + return trackStackTraceOfSessionCheckout; + } + + Duration getWaitForMinSessions() { + return waitForMinSessions; + } + + @VisibleForTesting + Duration getAcquireSessionTimeout() { + return acquireSessionTimeout; + } + + Position getReleaseToPosition() { + return releaseToPosition; + } + + long getRandomizePositionQPSThreshold() { + return randomizePositionQPSThreshold; + } + + @VisibleForTesting + @InternalApi + public boolean getUseMultiplexedSession() { + return useMultiplexedSession; + } + + @VisibleForTesting + @InternalApi + protected boolean getUseMultiplexedSessionBlindWrite() { + return getUseMultiplexedSession(); + } + + @VisibleForTesting + @InternalApi + public boolean getUseMultiplexedSessionForRW() { + // Multiplexed sessions for R/W are enabled only if both global multiplexed sessions and + // read-write multiplexed session flags are set to true. + return getUseMultiplexedSession() && useMultiplexedSessionForRW; + } + + @VisibleForTesting + @InternalApi + public boolean getUseMultiplexedSessionPartitionedOps() { + return getUseMultiplexedSession() && useMultiplexedSessionForPartitionedOps; + } + + private static Boolean getUseMultiplexedSessionFromEnvVariable() { + return parseBooleanEnvVariable("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + + @VisibleForTesting + @InternalApi + protected static Boolean getUseMultiplexedSessionFromEnvVariablePartitionedOps() { + return parseBooleanEnvVariable("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS"); + } + + private static Boolean parseBooleanEnvVariable(String variableName) { + String envVariable = System.getenv(variableName); + if (envVariable != null && envVariable.length() > 0) { + if ("true".equalsIgnoreCase(envVariable) || "false".equalsIgnoreCase(envVariable)) { + return Boolean.parseBoolean(envVariable); + } else { + throw new IllegalArgumentException(variableName + " should be either true or false."); + } + } + return null; + } + + private static Boolean getUseMultiplexedSessionForRWFromEnvVariable() { + // Checks the value of env, GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW + // This returns null until RW is supported. + return parseBooleanEnvVariable("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"); + } + + Duration getMultiplexedSessionMaintenanceDuration() { + return multiplexedSessionMaintenanceDuration; + } + + @VisibleForTesting + @InternalApi + boolean getSkipVerifyBeginTransactionForMuxRW() { + return skipVerifyingBeginTransactionForMuxRW; + } + + public static Builder newBuilder() { + return new Builder(); + } + + private enum ActionOnExhaustion { + BLOCK, + FAIL, + } + + private enum ActionOnSessionNotFound { + RETRY, + FAIL + } + + private enum ActionOnSessionLeak { + WARN, + FAIL + } + + @VisibleForTesting + enum ActionOnInactiveTransaction { + WARN, + WARN_AND_CLOSE, + CLOSE + } + + /** Configuration options for task to clean up inactive transactions. */ + static class InactiveTransactionRemovalOptions { + + /** Option to set the behaviour when there are inactive transactions. */ + private final ActionOnInactiveTransaction actionOnInactiveTransaction; + + /** + * Frequency for closing inactive transactions. Between two consecutive task executions, it's + * ensured that the duration is greater or equal to this duration. + */ + private final Duration executionFrequency; + + /** + * Long-running transactions will be cleaned up if utilisation is greater than the below value. + */ + private final double usedSessionsRatioThreshold; + + /** + * A transaction is considered to be idle if it has not been used for a duration greater than + * the below value. + */ + private final Duration idleTimeThreshold; + + InactiveTransactionRemovalOptions(final Builder builder) { + this.actionOnInactiveTransaction = builder.actionOnInactiveTransaction; + this.idleTimeThreshold = builder.idleTimeThreshold; + this.executionFrequency = builder.executionFrequency; + this.usedSessionsRatioThreshold = builder.usedSessionsRatioThreshold; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof InactiveTransactionRemovalOptions)) { + return false; + } + InactiveTransactionRemovalOptions other = (InactiveTransactionRemovalOptions) o; + return Objects.equals(this.actionOnInactiveTransaction, other.actionOnInactiveTransaction) + && Objects.equals(this.idleTimeThreshold, other.idleTimeThreshold) + && Objects.equals(this.executionFrequency, other.executionFrequency) + && Objects.equals(this.usedSessionsRatioThreshold, other.usedSessionsRatioThreshold); + } + + @Override + public int hashCode() { + return Objects.hash( + this.actionOnInactiveTransaction, + this.idleTimeThreshold, + this.executionFrequency, + this.usedSessionsRatioThreshold); + } + + Duration getExecutionFrequency() { + return executionFrequency; + } + + double getUsedSessionsRatioThreshold() { + return usedSessionsRatioThreshold; + } + + Duration getIdleTimeThreshold() { + return idleTimeThreshold; + } + + static InactiveTransactionRemovalOptions.Builder newBuilder() { + return new Builder(); + } + + static class Builder { + private ActionOnInactiveTransaction actionOnInactiveTransaction = + ActionOnInactiveTransaction.WARN; + private Duration executionFrequency = Duration.ofMinutes(2); + private double usedSessionsRatioThreshold = 0.95; + private Duration idleTimeThreshold = Duration.ofMinutes(60L); + + public Builder() {} + + InactiveTransactionRemovalOptions build() { + validate(); + return new InactiveTransactionRemovalOptions(this); + } + + private void validate() { + Preconditions.checkArgument( + executionFrequency.toMillis() > 0, + "Execution frequency %s should be positive", + executionFrequency.toMillis()); + Preconditions.checkArgument( + idleTimeThreshold.toMillis() > 0, + "Idle Time Threshold duration %s should be positive", + idleTimeThreshold.toMillis()); + } + + @VisibleForTesting + InactiveTransactionRemovalOptions.Builder setActionOnInactiveTransaction( + final ActionOnInactiveTransaction actionOnInactiveTransaction) { + this.actionOnInactiveTransaction = actionOnInactiveTransaction; + return this; + } + + @VisibleForTesting + InactiveTransactionRemovalOptions.Builder setExecutionFrequency( + final Duration executionFrequency) { + this.executionFrequency = executionFrequency; + return this; + } + + @VisibleForTesting + InactiveTransactionRemovalOptions.Builder setUsedSessionsRatioThreshold( + final double usedSessionsRatioThreshold) { + this.usedSessionsRatioThreshold = usedSessionsRatioThreshold; + return this; + } + + @VisibleForTesting + InactiveTransactionRemovalOptions.Builder setIdleTimeThreshold( + final Duration idleTimeThreshold) { + this.idleTimeThreshold = idleTimeThreshold; + return this; + } + } + } + + /** Builder for creating SessionPoolOptions. */ + public static class Builder { + private boolean minSessionsSet = false; + private int minSessions = DEFAULT_MIN_SESSIONS; + private int maxSessions = DEFAULT_MAX_SESSIONS; + private int incStep = DEFAULT_INC_STEP; + + /** Set a higher value for {@link #minSessions} instead of using this field. */ + @Deprecated private int maxIdleSessions; + + /** + * The session pool no longer prepares a fraction of the sessions with a read/write transaction. + * This setting therefore does not have any meaning anymore, and may be removed in the future. + */ + @Deprecated private float writeSessionsFraction = 0.2f; + + private ActionOnExhaustion actionOnExhaustion = DEFAULT_ACTION; + private long initialWaitForSessionTimeoutMillis = 30_000L; + private ActionOnSessionNotFound actionOnSessionNotFound = ActionOnSessionNotFound.RETRY; + private ActionOnSessionLeak actionOnSessionLeak = ActionOnSessionLeak.WARN; + + /** + * Capture the call stack of the thread that checked out a session of the pool. This will + * pre-create a com.google.cloud.spanner.SessionPool.LeakedSessionException already when a + * session is checked out. This can be disabled by users, for example if their monitoring + * systems log the pre-created exception. If disabled, the + * com.google.cloud.spanner.SessionPool.LeakedSessionException will only be created when an + * actual session leak is detected. The stack trace of the exception will in that case not + * contain the call stack of when the session was checked out. + */ + private boolean trackStackTraceOfSessionCheckout = true; + + private InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder().build(); + private long loopFrequency = 10 * 1000L; + private Duration multiplexedSessionMaintenanceLoopFrequency = Duration.ofMinutes(10); + private int keepAliveIntervalMinutes = 30; + private Duration removeInactiveSessionAfter = Duration.ofMinutes(55L); + private boolean autoDetectDialect = false; + private Duration waitForMinSessions = Duration.ZERO; + private Duration acquireSessionTimeout = Duration.ofSeconds(60); + private final Position releaseToPosition = getReleaseToPositionFromSystemProperty(); + + /** + * The session pool will randomize the position of a session that is being returned when this + * threshold is exceeded. That is: If the transactions per second exceeds this threshold, then + * the session pool will use a random order for the sessions instead of LIFO. The default is 0, + * which means that the option is disabled. + */ + private long randomizePositionQPSThreshold = 0L; + + // This field controls the default behavior of session management in Java client. + // Set useMultiplexedSession to true to make multiplexed session the default. + private boolean useMultiplexedSession = true; + + // This field controls the default behavior of session management for RW operations in Java + // client. + // Set useMultiplexedSessionForRW to true to make multiplexed session for RW operations the + // default. + private boolean useMultiplexedSessionForRW = true; + + // This field controls the default behavior of session management for Partitioned operations in + // Java client. + // Set useMultiplexedSessionPartitionedOps to true to make multiplexed session for Partitioned + // operations the default. + private boolean useMultiplexedSessionPartitionedOps = true; + + private Duration multiplexedSessionMaintenanceDuration = Duration.ofDays(7); + private Clock poolMaintainerClock = Clock.INSTANCE; + private boolean skipVerifyingBeginTransactionForMuxRW = false; + private boolean isExperimentalHost = false; + + private static Position getReleaseToPositionFromSystemProperty() { + // NOTE: This System property is a beta feature. Support for it can be removed in the future. + String key = "com.google.cloud.spanner.session_pool_release_to_position"; + if (System.getProperties().containsKey(key)) { + try { + return Position.valueOf(System.getProperty(key).toUpperCase(Locale.ENGLISH)); + } catch (Throwable ignore) { + // fallthrough and return the default. + } + } + return Position.FIRST; + } + + public Builder() {} + + private Builder(SessionPoolOptions options) { + this.minSessionsSet = true; + this.minSessions = options.minSessions; + this.maxSessions = options.maxSessions; + this.incStep = options.incStep; + this.maxIdleSessions = options.maxIdleSessions; + this.writeSessionsFraction = options.writeSessionsFraction; + this.actionOnExhaustion = options.actionOnExhaustion; + this.initialWaitForSessionTimeoutMillis = options.initialWaitForSessionTimeoutMillis; + this.actionOnSessionNotFound = options.actionOnSessionNotFound; + this.actionOnSessionLeak = options.actionOnSessionLeak; + this.trackStackTraceOfSessionCheckout = options.trackStackTraceOfSessionCheckout; + this.loopFrequency = options.loopFrequency; + this.multiplexedSessionMaintenanceLoopFrequency = + options.multiplexedSessionMaintenanceLoopFrequency; + this.keepAliveIntervalMinutes = options.keepAliveIntervalMinutes; + this.removeInactiveSessionAfter = options.removeInactiveSessionAfter; + this.autoDetectDialect = options.autoDetectDialect; + this.waitForMinSessions = options.waitForMinSessions; + this.acquireSessionTimeout = options.acquireSessionTimeout; + this.randomizePositionQPSThreshold = options.randomizePositionQPSThreshold; + this.inactiveTransactionRemovalOptions = options.inactiveTransactionRemovalOptions; + this.useMultiplexedSession = options.useMultiplexedSession; + this.useMultiplexedSessionForRW = options.useMultiplexedSessionForRW; + this.useMultiplexedSessionPartitionedOps = options.useMultiplexedSessionForPartitionedOps; + this.multiplexedSessionMaintenanceDuration = options.multiplexedSessionMaintenanceDuration; + this.poolMaintainerClock = options.poolMaintainerClock; + this.skipVerifyingBeginTransactionForMuxRW = options.skipVerifyingBeginTransactionForMuxRW; + } + + /** + * Minimum number of sessions that this pool will always maintain. These will be created eagerly + * in parallel. Defaults to 100. + */ + @Deprecated + public Builder setMinSessions(int minSessions) { + Preconditions.checkArgument(minSessions >= 0, "minSessions must be >= 0"); + this.minSessionsSet = true; + this.minSessions = minSessions; + return this; + } + + /** + * Maximum number of sessions that this pool will have. If current numbers of sessions in the + * pool is less than this and they are all busy, then a new session will be created for any new + * operation. If current number of in use sessions is same as this and a new request comes, pool + * can either block or fail. Defaults to 400. + */ + @Deprecated + public Builder setMaxSessions(int maxSessions) { + Preconditions.checkArgument(maxSessions > 0, "maxSessions must be > 0"); + this.maxSessions = maxSessions; + return this; + } + + /** + * Number of sessions to batch create when the pool needs at least one more session. Defaults to + * 25. + */ + Builder setIncStep(int incStep) { + Preconditions.checkArgument(incStep > 0, "incStep must be > 0"); + this.incStep = incStep; + return this; + } + + /** + * Maximum number of idle sessions that this pool will maintain. Pool will close any sessions + * beyond this but making sure to always have at least as many sessions as specified by {@link + * #setMinSessions}. To determine how many sessions are idle we look at maximum number of + * sessions used concurrently over a window of time. Any sessions beyond that are idle. Defaults + * to 0. + * + * @deprecated set a higher value for {@link #setMinSessions(int)} instead of using this + * configuration option. This option will be removed in a future release. + */ + @Deprecated + public Builder setMaxIdleSessions(int maxIdleSessions) { + this.maxIdleSessions = maxIdleSessions; + return this; + } + + Builder setLoopFrequency(long loopFrequency) { + this.loopFrequency = loopFrequency; + return this; + } + + Builder setMultiplexedSessionMaintenanceLoopFrequency(Duration frequency) { + this.multiplexedSessionMaintenanceLoopFrequency = frequency; + return this; + } + + Builder setInactiveTransactionRemovalOptions( + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions) { + this.inactiveTransactionRemovalOptions = inactiveTransactionRemovalOptions; + return this; + } + + /** + * This method is obsolete. Use {@link #setRemoveInactiveSessionAfterDuration(Duration)} + * instead. + */ + @ObsoleteApi("Use setRemoveInactiveSessionAfterDuration(Duration) instead") + @Deprecated + public Builder setRemoveInactiveSessionAfter(org.threeten.bp.Duration duration) { + return setRemoveInactiveSessionAfterDuration(toJavaTimeDuration(duration)); + } + + @Deprecated + public Builder setRemoveInactiveSessionAfterDuration(Duration duration) { + this.removeInactiveSessionAfter = duration; + return this; + } + + /** + * How frequently to keep alive idle sessions. This should be less than 60 since an idle session + * is automatically closed after 60 minutes. Sessions will be kept alive by sending a dummy + * query "Select 1". Default value is 30 minutes. + */ + @Deprecated + public Builder setKeepAliveIntervalMinutes(int intervalMinutes) { + this.keepAliveIntervalMinutes = intervalMinutes; + return this; + } + + /** + * If all sessions are in use and {@code maxSessions} has been reached, fail the request by + * throwing a {@link SpannerException} with the error code {@code RESOURCE_EXHAUSTED}. Default + * behavior is to block the request. + */ + @Deprecated + public Builder setFailIfPoolExhausted() { + this.actionOnExhaustion = ActionOnExhaustion.FAIL; + return this; + } + + /** + * If all sessions are in use and there is no more room for creating new sessions, block for a + * session to become available. Default behavior is same. + * + *

By default the requests are blocked for 60s and will fail with a `SpannerException` with + * error code `ResourceExhausted` if this timeout is exceeded. If you wish to block for a + * different period use the option {@link Builder#setAcquireSessionTimeoutDuration(Duration)} + * ()} + */ + @Deprecated + public Builder setBlockIfPoolExhausted() { + this.actionOnExhaustion = ActionOnExhaustion.BLOCK; + return this; + } + + /** + * If there are inactive transactions, log warning messages with the origin of such transactions + * to aid debugging. A transaction is classified as inactive if it executes for more than a + * system defined duration. + * + *

This option won't change the state of the transactions. It only generates warning logs + * that can be used for debugging. + * + * @return this builder for chaining + */ + @Deprecated + public Builder setWarnIfInactiveTransactions() { + this.inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setActionOnInactiveTransaction(ActionOnInactiveTransaction.WARN) + .build(); + return this; + } + + /** + * If there are inactive transactions, release the resources consumed by such transactions. A + * transaction is classified as inactive if it executes for more than a system defined duration. + * The option would also produce necessary warning logs through which it can be debugged as to + * what resources were released due to this option. + * + *

Use the option {@link Builder#setWarnIfInactiveTransactions()} if you only want to log + * warnings about long-running transactions. + * + * @return this builder for chaining + */ + @Deprecated + public Builder setWarnAndCloseIfInactiveTransactions() { + this.inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setActionOnInactiveTransaction(ActionOnInactiveTransaction.WARN_AND_CLOSE) + .build(); + return this; + } + + @InternalApi + public Builder setExperimentalHost() { + this.isExperimentalHost = true; + return this; + } + + /** + * If there are inactive transactions, release the resources consumed by such transactions. A + * transaction is classified as inactive if it executes for more than a system defined duration. + * + *

Use the option {@link Builder#setWarnIfInactiveTransactions()} if you only want to log + * warnings about long-running sessions. + * + *

Use the option {@link Builder#setWarnAndCloseIfInactiveTransactions()} if you want to log + * warnings along with closing the long-running transactions. + * + * @return this builder for chaining + */ + @VisibleForTesting + Builder setCloseIfInactiveTransactions() { + this.inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setActionOnInactiveTransaction(ActionOnInactiveTransaction.CLOSE) + .build(); + return this; + } + + @VisibleForTesting + Builder setPoolMaintainerClock(Clock poolMaintainerClock) { + this.poolMaintainerClock = Preconditions.checkNotNull(poolMaintainerClock); + return this; + } + + /** + * Sets whether the client should use multiplexed session or not. If set to true, the client + * optimises and runs multiple applicable requests concurrently on a single session. A single + * multiplexed session is sufficient to handle all concurrent traffic. + * + *

When set to false, the client uses the regular session cached in the session pool for + * running 1 concurrent transaction per session. We require to provision sufficient sessions by + * making use of {@link SessionPoolOptions#minSessions} and {@link + * SessionPoolOptions#maxSessions} based on the traffic load. Failing to do so will result in + * higher latencies. + */ + Builder setUseMultiplexedSession(boolean useMultiplexedSession) { + this.useMultiplexedSession = useMultiplexedSession; + return this; + } + + /** + * Sets whether the client should use multiplexed session for R/W operations or not. This method + * is intentionally package-private and intended for internal use. + */ + @InternalApi + @VisibleForTesting + Builder setUseMultiplexedSessionForRW(boolean useMultiplexedSessionForRW) { + this.useMultiplexedSessionForRW = useMultiplexedSessionForRW; + return this; + } + + /** + * Sets whether the client should use multiplexed session for Partitioned operations or not. + * This method is intentionally package-private and intended for internal use. + */ + Builder setUseMultiplexedSessionPartitionedOps(boolean useMultiplexedSessionPartitionedOps) { + this.useMultiplexedSessionPartitionedOps = useMultiplexedSessionPartitionedOps; + return this; + } + + @VisibleForTesting + Builder setMultiplexedSessionMaintenanceDuration( + Duration multiplexedSessionMaintenanceDuration) { + this.multiplexedSessionMaintenanceDuration = multiplexedSessionMaintenanceDuration; + return this; + } + + // The additional BeginTransaction RPC for multiplexed session read-write is causing + // unexpected behavior in mock Spanner tests that rely on mocking the BeginTransaction RPC. + // Invoking this method with `true` skips sending the BeginTransaction RPC when the multiplexed + // session is created for the first time during client initialization. + // This is only used for tests. + @VisibleForTesting + Builder setSkipVerifyingBeginTransactionForMuxRW( + boolean skipVerifyingBeginTransactionForMuxRW) { + this.skipVerifyingBeginTransactionForMuxRW = skipVerifyingBeginTransactionForMuxRW; + return this; + } + + /** + * Sets whether the client should automatically execute a background query to detect the dialect + * that is used by the database or not. Set this option to true if you do not know what the + * dialect of the database will be. + * + *

Note that you can always call {@link DatabaseClient#getDialect()} to get the dialect of a + * database regardless of this setting, but by setting this to true, the value will be + * pre-populated and cached in the client. + * + * @param autoDetectDialect Whether the client should automatically execute a background query + * to detect the dialect of the underlying database + * @return this builder for chaining + */ + public Builder setAutoDetectDialect(boolean autoDetectDialect) { + this.autoDetectDialect = autoDetectDialect; + return this; + } + + /** + * The initial number of milliseconds to wait for a session to become available when one is + * requested. The session pool will keep retrying to get a session, and the timeout will be + * doubled for each new attempt. The default is 30 seconds. + */ + @VisibleForTesting + Builder setInitialWaitForSessionTimeoutMillis(long timeout) { + this.initialWaitForSessionTimeoutMillis = timeout; + return this; + } + + /** + * If a session has been invalidated by the server, the SessionPool will by default retry the + * session. Set this option to throw an exception instead of retrying. + */ + @VisibleForTesting + Builder setFailIfSessionNotFound() { + this.actionOnSessionNotFound = ActionOnSessionNotFound.FAIL; + return this; + } + + @VisibleForTesting + Builder setFailOnSessionLeak() { + this.actionOnSessionLeak = ActionOnSessionLeak.FAIL; + return this; + } + + /** + * Sets whether the session pool should capture the call stack trace when a session is checked + * out of the pool. This will internally prepare a + * com.google.cloud.spanner.SessionPool.LeakedSessionException that will only be thrown if the + * session is actually leaked. This makes it easier to debug session leaks, as the stack trace + * of the thread that checked out the session will be available in the exception. + * + *

Some monitoring tools might log these exceptions even though they are not thrown. This + * option can be used to suppress the creation and logging of these exceptions. + */ + @Deprecated + public Builder setTrackStackTraceOfSessionCheckout(boolean trackStackTraceOfSessionCheckout) { + this.trackStackTraceOfSessionCheckout = trackStackTraceOfSessionCheckout; + return this; + } + + /** + * @deprecated This configuration value is no longer in use. The session pool does not prepare + * any sessions for read/write transactions. Instead, a transaction will automatically be + * started by the first statement that is executed by a transaction by including a + * BeginTransaction option with that statement. + *

This method may be removed in a future release. + */ + @Deprecated + public Builder setWriteSessionsFraction(float writeSessionsFraction) { + this.writeSessionsFraction = writeSessionsFraction; + return this; + } + + /** This method is obsolete. Use {@link #setWaitForMinSessionsDuration(Duration)} instead. */ + @ObsoleteApi("Use setWaitForMinSessionsDuration(Duration) instead") + public Builder setWaitForMinSessions(org.threeten.bp.Duration waitForMinSessions) { + return setWaitForMinSessionsDuration(toJavaTimeDuration(waitForMinSessions)); + } + + /** + * If greater than zero, waits for the session pool to have at least {@link + * SessionPoolOptions#minSessions} before returning the database client to the caller. Note that + * this check is only done during the session pool creation. This is usually done asynchronously + * in order to provide the client back to the caller as soon as possible. We don't recommend + * using this option unless you are executing benchmarks and want to guarantee the session pool + * has min sessions in the pool before continuing. + * + *

Defaults to zero (initialization is done asynchronously). + */ + public Builder setWaitForMinSessionsDuration(Duration waitForMinSessions) { + this.waitForMinSessions = waitForMinSessions; + return this; + } + + /** This method is obsolete. Use {@link #setAcquireSessionTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use setAcquireSessionTimeoutDuration(Duration) instead") + @Deprecated + public Builder setAcquireSessionTimeout(org.threeten.bp.Duration acquireSessionTimeout) { + return setAcquireSessionTimeoutDuration(toJavaTimeDuration(acquireSessionTimeout)); + } + + /** + * If greater than zero, we wait for said duration when no sessions are available in the + * SessionPool. The default is a 60s timeout. Set the value to null to disable the timeout. + */ + @Deprecated + public Builder setAcquireSessionTimeoutDuration(Duration acquireSessionTimeout) { + try { + if (acquireSessionTimeout != null) { + Preconditions.checkArgument( + acquireSessionTimeout.toMillis() > 0, + "acquireSessionTimeout should be greater than 0 ns"); + } + } catch (ArithmeticException ex) { + throw new IllegalArgumentException( + "acquireSessionTimeout in millis should be lesser than Long.MAX_VALUE"); + } + this.acquireSessionTimeout = acquireSessionTimeout; + return this; + } + + Builder setRandomizePositionQPSThreshold(long randomizePositionQPSThreshold) { + Preconditions.checkArgument( + randomizePositionQPSThreshold >= 0L, "randomizePositionQPSThreshold must be >= 0"); + this.randomizePositionQPSThreshold = randomizePositionQPSThreshold; + return this; + } + + /** Build a SessionPoolOption object */ + public SessionPoolOptions build() { + validate(); + return new SessionPoolOptions(this); + } + + private void validate() { + if (minSessionsSet) { + Preconditions.checkArgument( + maxSessions >= minSessions, + "Min sessions(%s) must be <= max sessions(%s)", + minSessions, + maxSessions); + } + Preconditions.checkArgument( + keepAliveIntervalMinutes < 60, "Keep alive interval should be less than" + "60 minutes"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionReference.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionReference.java new file mode 100644 index 000000000000..1fd6c303ede1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionReference.java @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.spanner.SessionClient.SessionId; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import java.time.Instant; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * A {@code Session} can be used to perform transactions that read and/or modify data in a Cloud + * Spanner database. Sessions are managed internally by the client library, and users need not be + * aware of the actual session management, pooling and handling. + */ +class SessionReference { + + private final String name; + private final DatabaseId databaseId; + @Nullable private final String databaseRole; + private final Map options; + private volatile Instant lastUseTime; + @Nullable private final Instant createTime; + private final boolean isMultiplexed; + + SessionReference(String name, @Nullable String databaseRole, Map options) { + this.options = options; + this.name = checkNotNull(name); + this.databaseId = SessionId.of(name).getDatabaseId(); + this.databaseRole = databaseRole; + this.lastUseTime = Instant.now(); + this.createTime = null; + this.isMultiplexed = false; + } + + SessionReference( + String name, + @Nullable String databaseRole, + com.google.protobuf.Timestamp createTime, + boolean isMultiplexed, + Map options) { + this.options = options; + this.name = checkNotNull(name); + this.databaseId = SessionId.of(name).getDatabaseId(); + this.databaseRole = databaseRole; + this.lastUseTime = Instant.now(); + this.createTime = convert(createTime); + this.isMultiplexed = isMultiplexed; + } + + public String getName() { + return name; + } + + public String getDatabaseRole() { + return databaseRole; + } + + public DatabaseId getDatabaseId() { + return databaseId; + } + + Map getOptions() { + return options; + } + + Instant getLastUseTime() { + return lastUseTime; + } + + Instant getCreateTime() { + return createTime; + } + + boolean getIsMultiplexed() { + return isMultiplexed; + } + + void markUsed(Instant instant) { + lastUseTime = instant; + } + + private Instant convert(com.google.protobuf.Timestamp timestamp) { + if (timestamp == null) { + return null; + } + return Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Spanner.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Spanner.java new file mode 100644 index 000000000000..908ca4c8fc07 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Spanner.java @@ -0,0 +1,174 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.Service; + +/** + * An interface for Cloud Spanner. Typically, there would only be one instance of this for the + * lifetime of the application which must be closed by invoking {@link #close()} when it is no + * longer needed. Failure to do so may result in leaking session resources and exhausting session + * quota. + */ +public interface Spanner extends Service, AutoCloseable { + + /** + * Returns a {@code DatabaseAdminClient} to execute admin operations on Cloud Spanner databases. + * + * @return {@code DatabaseAdminClient} + */ + /* + * + *

{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient();
+   * }
+ * + */ + DatabaseAdminClient getDatabaseAdminClient(); + + /** + * Returns a {@link com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient} to execute + * admin operations on Cloud Spanner databases. This method always creates a new instance of + * {@link com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient} which is an {@link + * AutoCloseable} resource. For optimising the number of clients, caller may choose to cache the + * clients instead of repeatedly invoking this method and creating new instances. + * + * @return {@link com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient} + */ + /* + * + *
{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * DatabaseAdminClient dbAdminClient = spanner.createDatabaseAdminClient();
+   * }
+ * + */ + default com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient + createDatabaseAdminClient() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns an {@code InstanceAdminClient} to execute admin operations on Cloud Spanner instances. + * + * @return {@code InstanceAdminClient} + */ + /* + * + *
{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient();
+   * }
+ * + */ + InstanceAdminClient getInstanceAdminClient(); + + /** + * Returns a {@link com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient} to execute + * admin operations on Cloud Spanner databases. This method always creates a new instance of + * {@link com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient} which is an {@link + * AutoCloseable} resource. For optimising the number of clients, caller may choose to cache the + * clients instead of repeatedly invoking this method and creating new instances. + * + * @return {@link com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient} + */ + /* + * + *
{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient();
+   * }
+ * + */ + default com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient + createInstanceAdminClient() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns a {@code DatabaseClient} for the given database. It uses a pool of sessions to talk to + * the database. + * + * + *
{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * final String project = "test-project";
+   * final String instance = "test-instance";
+   * final String database = "example-db";
+   * DatabaseId db =
+   *     DatabaseId.of(project, instance, database);
+   * DatabaseClient dbClient = spanner.getDatabaseClient(db);
+   * }
+ * + * + */ + DatabaseClient getDatabaseClient(DatabaseId db); + + /** + * Returns a {@code BatchClient} to do batch operations on Cloud Spanner databases. Batch client + * is useful when one wants to read/query a large amount of data from Cloud Spanner across + * multiple processes, even across different machines. It allows to create partitions of Cloud + * Spanner database and then read/query over each partition independently yet at the same + * snapshot. + * + *

For all other use cases, {@code DatabaseClient} is more appropriate and performant. + * + * + *

{@code
+   * SpannerOptions options = SpannerOptions.newBuilder().build();
+   * Spanner spanner = options.getService();
+   * final String project = "test-project";
+   * final String instance = "test-instance";
+   * final String database = "example-db";
+   * DatabaseId db =
+   *     DatabaseId.of(project, instance, database);
+   * BatchClient batchClient = spanner.getBatchClient(db);
+   * }
+ * + * + */ + BatchClient getBatchClient(DatabaseId db); + + /** + * Closes all the clients associated with this instance and frees up all the resources. This + * method will block until it can clean up all the resources. Specifically, it deletes all the + * underlying sessions (which involves rpcs) and closes all the gRPC channels. Once this method + * called, this object is no longer usable. It is strongly advised to call this method when you + * are done with the {@code Spanner} object, typically when your application shuts down. There is + * a hard limit on number of sessions in Cloud Spanner and not calling this method can lead to + * unused sessions piling up on the backend. + */ + @Override + void close(); + + /** + * @return true if this {@link Spanner} object is closed. + */ + boolean isClosed(); + + /** + * @return the {@link ExecutorProvider} that is used for asynchronous queries and operations. + */ + ExecutorProvider getAsyncExecutorProvider(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerApiFutures.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerApiFutures.java new file mode 100644 index 000000000000..88e0b84f0fc3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerApiFutures.java @@ -0,0 +1,43 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.common.base.Preconditions; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; + +public class SpannerApiFutures { + public static T get(ApiFuture future) throws SpannerException { + return getOrNull(Preconditions.checkNotNull(future)); + } + + public static T getOrNull(ApiFuture future) throws SpannerException { + try { + return future == null ? null : future.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof SpannerException) { + throw (SpannerException) e.getCause(); + } + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (CancellationException e) { + throw SpannerExceptionFactory.newSpannerExceptionForCancellation(null, e); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerBatchUpdateException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerBatchUpdateException.java new file mode 100644 index 000000000000..837a008a8338 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerBatchUpdateException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +public class SpannerBatchUpdateException extends SpannerException { + private final long[] updateCounts; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + SpannerBatchUpdateException( + DoNotConstructDirectly token, + ErrorCode code, + String message, + long[] counts, + Throwable cause) { + super(token, code, false, message, cause, null); + updateCounts = counts; + } + + /** Returns the number of rows affected by each statement that is successfully run. */ + public long[] getUpdateCounts() { + return updateCounts; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java new file mode 100644 index 000000000000..bedf6600075a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java @@ -0,0 +1,279 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.auth.Credentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Strings; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.grpc.ManagedChannelBuilder; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.resources.Resource; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Spanner Cloud Monitoring OpenTelemetry Exporter. + * + *

The exporter will look for all spanner owned metrics under spanner.googleapis.com + * instrumentation scope and upload it via the Google Cloud Monitoring API. + */ +class SpannerCloudMonitoringExporter implements MetricExporter { + + private static final Logger logger = + Logger.getLogger(SpannerCloudMonitoringExporter.class.getName()); + + // This the quota limit from Cloud Monitoring. More details in + // https://cloud.google.com/monitoring/quotas#custom_metrics_quotas. + private static final int EXPORT_BATCH_SIZE_LIMIT = 200; + private final AtomicBoolean spannerExportFailureLogged = new AtomicBoolean(false); + private final AtomicBoolean lastExportSkippedData = new AtomicBoolean(false); + private final MetricServiceClient client; + private final String spannerProjectId; + + static SpannerCloudMonitoringExporter create( + String projectId, + @Nullable Credentials credentials, + @Nullable String monitoringHost, + String universeDomain) + throws IOException { + MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); + CredentialsProvider credentialsProvider; + if (credentials == null || credentials instanceof NoCredentials) { + credentialsProvider = NoCredentialsProvider.create(); + } else { + credentialsProvider = FixedCredentialsProvider.create(credentials); + } + settingsBuilder.setCredentialsProvider(credentialsProvider); + if (monitoringHost != null) { + settingsBuilder.setEndpoint(monitoringHost); + } + if (!Strings.isNullOrEmpty(universeDomain)) { + settingsBuilder.setUniverseDomain(universeDomain); + } + + if (System.getProperty("jmh.monitoring-server-port") != null) { + settingsBuilder.setTransportChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setChannelConfigurator( + managedChannelBuilder -> + ManagedChannelBuilder.forAddress( + "0.0.0.0", + Integer.parseInt(System.getProperty("jmh.monitoring-server-port"))) + .usePlaintext()) + .build()); + } + + Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetriesDuration(timeout); + + return new SpannerCloudMonitoringExporter( + projectId, MetricServiceClient.create(settingsBuilder.build())); + } + + @VisibleForTesting + SpannerCloudMonitoringExporter(String projectId, MetricServiceClient client) { + this.client = client; + this.spannerProjectId = projectId; + } + + @Override + public CompletableResultCode export(@Nonnull Collection collection) { + if (client.isShutdown()) { + logger.log(Level.WARNING, "Exporter is shut down"); + return CompletableResultCode.ofFailure(); + } + + return exportSpannerClientMetrics(collection); + } + + @VisibleForTesting + MetricServiceClient getMetricServiceClient() { + return client; + } + + /** Export client built in metrics */ + private CompletableResultCode exportSpannerClientMetrics(Collection collection) { + // Filter spanner metrics. Only include metrics that contain a valid project. + List spannerMetricData = collection.stream().collect(Collectors.toList()); + + // Log warnings for metrics that will be skipped. + boolean mustFilter = false; + if (spannerMetricData.stream() + .map(metricData -> metricData.getResource()) + .anyMatch(this::shouldSkipPointDataDueToProjectId)) { + logger.log( + Level.WARNING, "Some metric data contain a different projectId. These will be skipped."); + mustFilter = true; + } + + if (mustFilter) { + spannerMetricData = + spannerMetricData.stream() + .filter(this::shouldSkipMetricData) + .collect(Collectors.toList()); + } + lastExportSkippedData.set(mustFilter); + + // Skips exporting if there's none + if (spannerMetricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + List spannerTimeSeries; + try { + spannerTimeSeries = + SpannerCloudMonitoringExporterUtils.convertToSpannerTimeSeries( + spannerMetricData, this.spannerProjectId); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert spanner metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + ProjectName projectName = ProjectName.of(spannerProjectId); + + ApiFuture> futureList = exportTimeSeriesInBatch(projectName, spannerTimeSeries); + + CompletableResultCode spannerExportCode = new CompletableResultCode(); + ApiFutures.addCallback( + futureList, + new ApiFutureCallback>() { + @Override + public void onFailure(Throwable throwable) { + if (spannerExportFailureLogged.compareAndSet(false, true)) { + String msg = "createServiceTimeSeries request failed for spanner metrics."; + if (throwable instanceof PermissionDeniedException) { + // TODO: Add the link of public documentation when available in the log message. + msg += + String.format( + " Need monitoring metric writer permission on project=%s. Follow" + + " https://cloud.google.com/spanner/docs/view-manage-client-side-metrics#access-client-side-metrics" + + " to set up permissions", + projectName.getProject()); + } + logger.log(Level.WARNING, msg, throwable); + } + spannerExportCode.fail(); + } + + @Override + public void onSuccess(List empty) { + // When an export succeeded reset the export failure flag to false so if there's a + // transient failure it'll be logged. + spannerExportFailureLogged.set(false); + spannerExportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + return spannerExportCode; + } + + private boolean shouldSkipMetricData(MetricData metricData) { + return shouldSkipPointDataDueToProjectId(metricData.getResource()); + } + + private boolean shouldSkipPointDataDueToProjectId(Resource resource) { + return !spannerProjectId.equals(SpannerCloudMonitoringExporterUtils.getProjectId(resource)); + } + + boolean lastExportSkippedData() { + return this.lastExportSkippedData.get(); + } + + private ApiFuture> exportTimeSeriesInBatch( + ProjectName projectName, List timeSeries) { + List> batchResults = new ArrayList<>(); + + for (List batch : Iterables.partition(timeSeries, EXPORT_BATCH_SIZE_LIMIT)) { + CreateTimeSeriesRequest req = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(batch) + .build(); + batchResults.add(this.client.createServiceTimeSeriesCallable().futureCall(req)); + } + + return ApiFutures.allAsList(batchResults); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + if (client.isShutdown()) { + logger.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode shutdownResult = new CompletableResultCode(); + try { + client.shutdown(); + shutdownResult.succeed(); + } catch (Throwable e) { + logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); + shutdownResult.fail(); + } + return shutdownResult; + } + + /** + * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a + * metric over time. + */ + @Override + public AggregationTemporality getAggregationTemporality(@Nonnull InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java new file mode 100644 index 000000000000..e6afa86749fd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java @@ -0,0 +1,317 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; +import static com.google.cloud.spanner.BuiltInMetricsConstant.ALLOWED_EXEMPLARS_ATTRIBUTES; +import static com.google.cloud.spanner.BuiltInMetricsConstant.GAX_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.GRPC_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_PROMOTED_RESOURCE_LABELS; +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_RESOURCE_TYPE; + +import com.google.api.Distribution; +import com.google.api.Distribution.BucketOptions; +import com.google.api.Distribution.BucketOptions.Explicit; +import com.google.api.Metric; +import com.google.api.MetricDescriptor.MetricKind; +import com.google.api.MetricDescriptor.ValueType; +import com.google.api.MonitoredResource; +import com.google.monitoring.v3.DroppedLabels; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.SpanContext; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.Any; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoubleExemplarData; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.ExemplarData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongExemplarData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import io.opentelemetry.sdk.resources.Resource; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +class SpannerCloudMonitoringExporterUtils { + + private static final Logger logger = + Logger.getLogger(SpannerCloudMonitoringExporterUtils.class.getName()); + + private SpannerCloudMonitoringExporterUtils() {} + + static String getProjectId(Resource resource) { + return resource.getAttributes().get(PROJECT_ID_KEY); + } + + static List convertToSpannerTimeSeries( + List collection, String projectId) { + List allTimeSeries = new ArrayList<>(); + + for (MetricData metricData : collection) { + // Get metrics data from GAX library, GRPC library and Spanner library + if (!(metricData.getInstrumentationScopeInfo().getName().equals(GAX_METER_NAME) + || metricData.getInstrumentationScopeInfo().getName().equals(SPANNER_METER_NAME) + || metricData.getInstrumentationScopeInfo().getName().equals(GRPC_METER_NAME))) { + // Filter out metric data for instruments that are not part of the spanner metrics list + continue; + } + + // Create MonitoredResource Builder + MonitoredResource.Builder monitoredResourceBuilder = + MonitoredResource.newBuilder().setType(SPANNER_RESOURCE_TYPE); + + Attributes resourceAttributes = metricData.getResource().getAttributes(); + for (AttributeKey key : resourceAttributes.asMap().keySet()) { + monitoredResourceBuilder.putLabels( + key.getKey(), String.valueOf(resourceAttributes.get(key))); + } + + metricData.getData().getPoints().stream() + .map( + pointData -> + convertPointToSpannerTimeSeries( + metricData, pointData, monitoredResourceBuilder, projectId)) + .forEach(allTimeSeries::add); + } + return allTimeSeries; + } + + private static TimeSeries convertPointToSpannerTimeSeries( + MetricData metricData, + PointData pointData, + MonitoredResource.Builder monitoredResourceBuilder, + String projectId) { + MetricKind metricKind = convertMetricKind(metricData); + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(metricKind) + .setValueType(convertValueType(metricData.getType())); + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + + for (AttributeKey key : attributes.asMap().keySet()) { + if (SPANNER_PROMOTED_RESOURCE_LABELS.contains(key)) { + monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } else { + // Replace metric label names by converting "." to "_" since Cloud Monitoring does not + // support labels containing "." + metricBuilder.putLabels( + key.getKey().replace(".", "_"), String.valueOf(attributes.get(key))); + } + } + + // Add common labels like "client_name" and "client_uid" for all the exported metrics. + metricBuilder.putAllLabels(BuiltInMetricsProvider.INSTANCE.createClientAttributes()); + + builder.setResource(monitoredResourceBuilder.build()); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime( + // For gauge metrics, the start and end time should be the same. + metricKind == MetricKind.GAUGE + ? Timestamps.fromNanos(pointData.getEpochNanos()) + : Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval, projectId)); + + return builder.build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } + + private static Point createPoint( + MetricDataType type, PointData pointData, TimeInterval timeInterval, String projectId) { + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue( + convertHistogramData((HistogramPointData) pointData, projectId)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + logger.log(Level.WARNING, "unsupported metric type"); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData, String projectId) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .addAllExemplars( + pointData.getExemplars().stream() + .map(e -> mapExemplar(e, projectId)) + .collect(Collectors.toList())) + .build(); + } + + private static Distribution.Exemplar mapExemplar(ExemplarData exemplar, String projectId) { + double value = 0; + if (exemplar instanceof DoubleExemplarData) { + value = ((DoubleExemplarData) exemplar).getValue(); + } else if (exemplar instanceof LongExemplarData) { + value = ((LongExemplarData) exemplar).getValue(); + } + + Distribution.Exemplar.Builder exemplarBuilder = + Distribution.Exemplar.newBuilder() + .setValue(value) + .setTimestamp(mapTimestamp(exemplar.getEpochNanos())); + if (exemplar.getSpanContext().isValid()) { + exemplarBuilder.addAttachments( + Any.pack( + SpanContext.newBuilder() + .setSpanName( + makeSpanName( + projectId, + exemplar.getSpanContext().getTraceId(), + exemplar.getSpanContext().getSpanId())) + .build())); + } + if (!exemplar.getFilteredAttributes().isEmpty()) { + exemplarBuilder.addAttachments( + Any.pack(mapFilteredAttributes(exemplar.getFilteredAttributes()))); + } + return exemplarBuilder.build(); + } + + static final long NANO_PER_SECOND = (long) 1e9; + + private static Timestamp mapTimestamp(long epochNanos) { + return Timestamp.newBuilder() + .setSeconds(epochNanos / NANO_PER_SECOND) + .setNanos((int) (epochNanos % NANO_PER_SECOND)) + .build(); + } + + private static String makeSpanName(String projectId, String traceId, String spanId) { + return String.format("projects/%s/traces/%s/spans/%s", projectId, traceId, spanId); + } + + private static DroppedLabels mapFilteredAttributes(Attributes attributes) { + DroppedLabels.Builder labels = DroppedLabels.newBuilder(); + attributes.forEach( + (k, v) -> { + String key = cleanAttributeKey(k.getKey()); + if (ALLOWED_EXEMPLARS_ATTRIBUTES.contains(key)) { + labels.putLabel(key, v.toString()); + } + }); + return labels.build(); + } + + private static String cleanAttributeKey(String key) { + // . is commonly used in OTel but disallowed in GCM label names, + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/LabelDescriptor#:~:text=Matches%20the%20following%20regular%20expression%3A + return key.replace('.', '_'); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerException.java new file mode 100644 index 000000000000..0829cc35d62a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerException.java @@ -0,0 +1,236 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.cloud.grpc.BaseGrpcServiceException; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.protobuf.util.Durations; +import com.google.rpc.ResourceInfo; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.protobuf.ProtoUtils; +import java.util.Map; +import javax.annotation.Nullable; + +/** Base exception type for all exceptions produced by the Cloud Spanner service. */ +public class SpannerException extends BaseGrpcServiceException { + /** Base exception type for NOT_FOUND exceptions for known resource types. */ + public abstract static class ResourceNotFoundException extends SpannerException { + private final ResourceInfo resourceInfo; + + ResourceNotFoundException( + DoNotConstructDirectly token, + @Nullable String message, + ResourceInfo resourceInfo, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(token, ErrorCode.NOT_FOUND, /* retryable */ false, message, cause, apiException); + this.resourceInfo = resourceInfo; + } + + public String getResourceName() { + return resourceInfo.getResourceName(); + } + } + + private static final long serialVersionUID = 20150916L; + private static final Metadata.Key KEY_RETRY_INFO = + ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + private static final String PG_ERR_CODE_KEY = "pg_sqlerrcode"; + + private final ErrorCode code; + private final ApiException apiException; + private final XGoogSpannerRequestId requestId; + private String statement; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + SpannerException( + DoNotConstructDirectly token, + ErrorCode code, + boolean retryable, + @Nullable String message, + @Nullable Throwable cause) { + this(token, code, retryable, message, cause, null); + } + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + SpannerException( + DoNotConstructDirectly token, + ErrorCode code, + boolean retryable, + @Nullable String message, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + super(message, cause, code.getCode(), retryable); + if (token != DoNotConstructDirectly.ALLOWED) { + throw new AssertionError("Do not construct directly: use SpannerExceptionFactory"); + } + this.code = Preconditions.checkNotNull(code); + this.apiException = apiException; + this.requestId = extractRequestId(cause); + } + + @Override + public String getMessage() { + if (this.statement == null) { + return super.getMessage(); + } + return String.format("%s - Statement: '%s'", super.getMessage(), this.statement); + } + + @Override + public String toString() { + if (this.requestId == null) { + return super.toString(); + } + return super.toString() + " - RequestId: " + this.requestId; + } + + /** Returns the error code associated with this exception. */ + public ErrorCode getErrorCode() { + return code; + } + + /** + * Returns the PostgreSQL SQLState error code that is encoded in this exception, or null if this + * {@link SpannerException} does not include a PostgreSQL error code. + */ + public String getPostgreSQLErrorCode() { + ErrorDetails details = getErrorDetails(); + if (details == null || details.getErrorInfo() == null) { + return null; + } + return details.getErrorInfo().getMetadataOrDefault(PG_ERR_CODE_KEY, null); + } + + public String getRequestId() { + if (requestId == null) { + return ""; + } + return requestId.toString(); + } + + enum DoNotConstructDirectly { + ALLOWED + } + + /** + * Return the retry delay for operation in milliseconds. Return -1 if this does not specify any + * retry delay. + */ + public long getRetryDelayInMillis() { + return extractRetryDelay(this.getCause()); + } + + static long extractRetryDelay(Throwable cause) { + if (cause != null) { + Metadata trailers = Status.trailersFromThrowable(cause); + if (trailers != null && trailers.containsKey(KEY_RETRY_INFO)) { + RetryInfo retryInfo = trailers.get(KEY_RETRY_INFO); + if (retryInfo != null && retryInfo.hasRetryDelay()) { + return Durations.toMillis(retryInfo.getRetryDelay()); + } + } + } + return -1L; + } + + @Nullable + static XGoogSpannerRequestId extractRequestId(Throwable cause) { + if (cause != null) { + Metadata trailers = Status.trailersFromThrowable(cause); + if (trailers != null && trailers.containsKey(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY)) { + String requestId = trailers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + if (!Strings.isNullOrEmpty(requestId)) { + return XGoogSpannerRequestId.of(requestId); + } + } + } + return null; + } + + /** + * Checks the underlying reason of the exception and if it's {@link ApiException} then return the + * reason otherwise null. + * + * @see Reason + * @return the reason of an error. + */ + public String getReason() { + if (this.apiException != null) { + return this.apiException.getReason(); + } + return null; + } + + /** + * Checks the underlying reason of the exception and if it's {@link ApiException} then return the + * specific domain otherwise null. + * + * @see Domain + * @return the logical grouping to which the "reason" belongs. + */ + public String getDomain() { + if (this.apiException != null) { + return this.apiException.getDomain(); + } + return null; + } + + /** + * Checks the underlying reason of the exception and if it's {@link ApiException} then return a + * map of key-value pairs otherwise null. + * + * @see Metadata + * @return the map of additional structured details about an error. + */ + public Map getMetadata() { + if (this.apiException != null) { + return this.apiException.getMetadata(); + } + return null; + } + + /** + * Checks the underlying reason of the exception and if it's {@link ApiException} then return the + * ErrorDetails otherwise null. + * + * @see Status + * @see Error + * Details + * @return An object containing getters for structured objects from error_details.proto. + */ + public ErrorDetails getErrorDetails() { + if (this.apiException != null) { + return this.apiException.getErrorDetails(); + } + return null; + } + + void setStatement(String statement) { + this.statement = statement; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java new file mode 100644 index 000000000000..185f98b54332 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java @@ -0,0 +1,422 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MissingDefaultSequenceKindException.isMissingDefaultSequenceKindException; +import static com.google.cloud.spanner.TransactionMutationLimitExceededException.isTransactionMutationLimitException; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.WatchdogTimeoutException; +import com.google.cloud.spanner.SpannerException.DoNotConstructDirectly; +import com.google.common.base.MoreObjects; +import com.google.common.base.Predicate; +import com.google.rpc.ErrorInfo; +import com.google.rpc.ResourceInfo; +import com.google.rpc.RetryInfo; +import io.grpc.Context; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import javax.annotation.Nullable; + +/** + * A factory for creating instances of {@link SpannerException} and its subtypes. All creation of + * these exceptions is directed through the factory. This ensures that particular types of errors + * are always expressed as the same concrete exception type. For example, exceptions of type {@link + * ErrorCode#ABORTED} are always represented by {@link AbortedException}. + */ +public final class SpannerExceptionFactory { + + static final String SESSION_RESOURCE_TYPE = "type.googleapis.com/google.spanner.v1.Session"; + static final String DATABASE_RESOURCE_TYPE = + "type.googleapis.com/google.spanner.admin.database.v1.Database"; + static final String INSTANCE_RESOURCE_TYPE = + "type.googleapis.com/google.spanner.admin.instance.v1.Instance"; + private static final Metadata.Key KEY_RESOURCE_INFO = + ProtoUtils.keyForProto(ResourceInfo.getDefaultInstance()); + private static final Metadata.Key KEY_ERROR_INFO = + ProtoUtils.keyForProto(ErrorInfo.getDefaultInstance()); + + public static SpannerException newSpannerException(ErrorCode code, @Nullable String message) { + return newSpannerException(code, message, null); + } + + public static SpannerException newSpannerException( + ErrorCode code, @Nullable String message, @Nullable Throwable cause) { + return newSpannerExceptionPreformatted(code, formatMessage(code, message), cause, null); + } + + public static SpannerException propagateInterrupt(InterruptedException e) { + Thread.currentThread().interrupt(); + return SpannerExceptionFactory.newSpannerException(ErrorCode.CANCELLED, "Interrupted", e); + } + + /** + * Transforms a {@code TimeoutException} to a {@code SpannerException}. + * + *

+   * 
+   * try {
+   *   Spanner spanner = SpannerOptions.getDefaultInstance();
+   *   spanner
+   *       .getDatabaseAdminClient()
+   *       .createDatabase("[INSTANCE_ID]", "[DATABASE_ID]", [STATEMENTS])
+   *       .get();
+   * } catch (TimeoutException e) {
+   *   propagateTimeout(e);
+   * }
+   * 
+   * 
+ */ + public static SpannerException propagateTimeout(TimeoutException e) { + return SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Operation did not complete in the given time", e); + } + + /** + * Converts the given {@link Throwable} to a {@link SpannerException}. If t is + * already a (subclass of a) {@link SpannerException}, t is returned unaltered. + * Otherwise, a new {@link SpannerException} is created with t as its cause. + */ + public static SpannerException asSpannerException(Throwable t) { + if (t instanceof SpannerException) { + return (SpannerException) t; + } + return newSpannerException(t); + } + + /** + * Creates a new exception based on {@code cause}. + * + *

Intended for internal library use; user code should use {@link + * #newSpannerException(ErrorCode, String)} instead of this method. + */ + public static SpannerException newSpannerException(Throwable cause) { + return newSpannerException(null, cause); + } + + public static SpannerBatchUpdateException newSpannerBatchUpdateException( + ErrorCode code, String message, long[] updateCounts) { + DoNotConstructDirectly token = DoNotConstructDirectly.ALLOWED; + SpannerException cause = null; + if (isTransactionMutationLimitException(code, message)) { + cause = new TransactionMutationLimitExceededException(token, code, message, null, null); + } + return new SpannerBatchUpdateException(token, code, message, updateCounts, cause); + } + + /** Constructs a specific error that */ + public static DmlBatchUpdateCountVerificationFailedException + newDmlBatchUpdateCountVerificationFailedException(long[] expected, long[] actual) { + return new DmlBatchUpdateCountVerificationFailedException( + DoNotConstructDirectly.ALLOWED, expected, actual); + } + + /** + * Constructs a specific aborted exception that should only be thrown by a connection after an + * internal retry aborted due to concurrent modifications. + */ + public static AbortedDueToConcurrentModificationException + newAbortedDueToConcurrentModificationException(AbortedException cause) { + return new AbortedDueToConcurrentModificationException( + DoNotConstructDirectly.ALLOWED, + "The transaction was aborted and could not be retried due to a concurrent modification", + cause); + } + + /** + * Constructs a specific aborted exception that should only be thrown by a connection after an + * internal retry aborted because a database call caused an exception that did not happen during + * the original attempt. + */ + public static AbortedDueToConcurrentModificationException + newAbortedDueToConcurrentModificationException( + AbortedException cause, SpannerException databaseError) { + return new AbortedDueToConcurrentModificationException( + DoNotConstructDirectly.ALLOWED, + "The transaction was aborted and could not be retried due to a database error during the" + + " retry", + cause, + databaseError); + } + + /** + * Constructs a new {@link AbortedDueToConcurrentModificationException} that can be re-thrown for + * a transaction that had already been aborted, but that the client application tried to use for + * additional statements. + */ + public static AbortedDueToConcurrentModificationException + newAbortedDueToConcurrentModificationException( + AbortedDueToConcurrentModificationException cause) { + return new AbortedDueToConcurrentModificationException( + DoNotConstructDirectly.ALLOWED, + "This transaction has already been aborted and could not be retried due to a concurrent" + + " modification. Rollback this transaction to start a new one.", + cause); + } + + /** + * Creates a new exception based on {@code cause}. If {@code cause} indicates cancellation, {@code + * context} will be inspected to establish the type of cancellation. + * + *

Intended for internal library use; user code should use {@link + * #newSpannerException(ErrorCode, String)} instead of this method. + */ + public static SpannerException newSpannerException(@Nullable Context context, Throwable cause) { + if (cause instanceof SpannerException) { + SpannerException e = (SpannerException) cause; + return newSpannerExceptionPreformatted(e.getErrorCode(), e.getMessage(), e, null); + } else if (cause instanceof CancellationException) { + return newSpannerExceptionForCancellation(context, cause); + } else if (cause instanceof ApiException) { + return fromApiException((ApiException) cause); + } + // Extract gRPC status. This will produce "UNKNOWN" for non-gRPC exceptions. + Status status = Status.fromThrowable(cause); + if (status.getCode() == Status.Code.CANCELLED) { + return newSpannerExceptionForCancellation(context, cause); + } + return newSpannerException(ErrorCode.fromGrpcStatus(status), cause.getMessage(), cause); + } + + public static RuntimeException causeAsRunTimeException(ExecutionException executionException) { + // Propagate the underlying exception as a RuntimeException (SpannerException is also a + // RuntimeException). + if (executionException.getCause() instanceof RuntimeException) { + throw (RuntimeException) executionException.getCause(); + } + throw asSpannerException(executionException.getCause()); + } + + /** + * Creates a new SpannerException that indicates that the RPC or transaction should be retried on + * a different gRPC channel. This is an experimental feature that can be removed in the future. + * The exception should not be surfaced to the client application, and should instead be caught + * and handled in the client library. + */ + static SpannerException newRetryOnDifferentGrpcChannelException( + String message, int channel, Throwable cause) { + return new RetryOnDifferentGrpcChannelException(message, channel, cause); + } + + static SpannerException newSpannerExceptionForCancellation( + @Nullable Context context, @Nullable Throwable cause) { + if (context != null && context.isCancelled()) { + Throwable cancellationCause = context.cancellationCause(); + Throwable throwable = + cause == null && cancellationCause == null + ? null + : MoreObjects.firstNonNull(cause, cancellationCause); + if (cancellationCause instanceof TimeoutException) { + return newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Current context exceeded deadline", throwable); + } else { + return newSpannerException(ErrorCode.CANCELLED, "Current context was cancelled", throwable); + } + } + return newSpannerException( + ErrorCode.CANCELLED, cause == null ? "Cancelled" : cause.getMessage(), cause); + } + + private static String formatMessage(ErrorCode code, @Nullable String message) { + if (message == null) { + return code.toString(); + } + // gRPC exceptions already start with the code, which happens to be the same prefix we use. + return message.startsWith(code.toString()) ? message : code + ": " + message; + } + + private static ResourceInfo extractResourceInfo(Throwable cause) { + if (cause != null) { + Metadata trailers = Status.trailersFromThrowable(cause); + if (trailers != null) { + return trailers.get(KEY_RESOURCE_INFO); + } + } + return null; + } + + private static ErrorInfo extractErrorInfo(Throwable cause, ApiException apiException) { + if (apiException != null && apiException.getErrorDetails() != null) { + return apiException.getErrorDetails().getErrorInfo(); + } + if (cause != null) { + Metadata trailers = Status.trailersFromThrowable(cause); + if (trailers != null) { + return trailers.get(KEY_ERROR_INFO); + } + } + return null; + } + + static ErrorDetails extractErrorDetails(Throwable cause, ApiException apiException) { + if (apiException != null && apiException.getErrorDetails() != null) { + return apiException.getErrorDetails(); + } + + Throwable prevCause = null; + while (cause != null && cause != prevCause) { + if (cause instanceof ApiException) { + return ((ApiException) cause).getErrorDetails(); + } + if (cause instanceof SpannerException) { + return ((SpannerException) cause).getErrorDetails(); + } + prevCause = cause; + cause = cause.getCause(); + } + return null; + } + + /** + * Creates a {@link StatusRuntimeException} that contains a {@link RetryInfo} with the specified + * retry delay. + */ + static StatusRuntimeException createAbortedExceptionWithRetryDelay( + String message, Throwable cause, long retryDelaySeconds, int retryDelayNanos) { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + com.google.protobuf.Duration.newBuilder() + .setNanos(retryDelayNanos) + .setSeconds(retryDelaySeconds)) + .build(); + trailers.put(key, retryInfo); + return io.grpc.Status.ABORTED + .withDescription(message) + .withCause(cause) + .asRuntimeException(trailers); + } + + static SpannerException newSpannerExceptionPreformatted( + ErrorCode code, + @Nullable String message, + @Nullable Throwable cause, + @Nullable ApiException apiException) { + // This is the one place in the codebase that is allowed to call constructors directly. + DoNotConstructDirectly token = DoNotConstructDirectly.ALLOWED; + switch (code) { + case ABORTED: + return new AbortedException(token, message, cause, apiException); + case RESOURCE_EXHAUSTED: + ErrorInfo info = extractErrorInfo(cause, apiException); + if (info != null + && info.getMetadataMap() + .containsKey(AdminRequestsPerMinuteExceededException.ADMIN_REQUESTS_LIMIT_KEY) + && AdminRequestsPerMinuteExceededException.ADMIN_REQUESTS_LIMIT_VALUE.equals( + info.getMetadataMap() + .get(AdminRequestsPerMinuteExceededException.ADMIN_REQUESTS_LIMIT_KEY))) { + return new AdminRequestsPerMinuteExceededException(token, message, cause, apiException); + } + case NOT_FOUND: + ResourceInfo resourceInfo = extractResourceInfo(cause); + if (resourceInfo != null) { + switch (resourceInfo.getResourceType()) { + case SESSION_RESOURCE_TYPE: + return new SessionNotFoundException( + token, message, resourceInfo, cause, apiException); + case DATABASE_RESOURCE_TYPE: + return new DatabaseNotFoundException( + token, message, resourceInfo, cause, apiException); + case INSTANCE_RESOURCE_TYPE: + return new InstanceNotFoundException( + token, message, resourceInfo, cause, apiException); + } + } + case INVALID_ARGUMENT: + if (isTransactionMutationLimitException(cause, apiException)) { + return new TransactionMutationLimitExceededException( + token, code, message, cause, apiException); + } + if (isMissingDefaultSequenceKindException(apiException)) { + return new MissingDefaultSequenceKindException(token, code, message, cause, apiException); + } + // Fall through to the default. + default: + return new SpannerException( + token, code, isRetryable(code, cause), message, cause, apiException); + } + } + + static SpannerException newSpannerExceptionPreformatted( + ErrorCode code, @Nullable String message, @Nullable Throwable cause) { + return newSpannerExceptionPreformatted(code, message, cause, null); + } + + private static SpannerException fromApiException(ApiException exception) { + Status.Code code; + if (exception.getStatusCode() instanceof GrpcStatusCode) { + code = ((GrpcStatusCode) exception.getStatusCode()).getTransportCode(); + } else if (exception instanceof WatchdogTimeoutException) { + code = Status.Code.DEADLINE_EXCEEDED; + } else { + code = Status.Code.UNKNOWN; + } + ErrorCode errorCode = ErrorCode.fromGrpcStatus(Status.fromCode(code)); + + return SpannerExceptionFactory.newSpannerExceptionPreformatted( + errorCode, + formatMessage(errorCode, exception.getMessage()), + exception.getCause(), + exception); + } + + private static boolean isRetryable(ErrorCode code, @Nullable Throwable cause) { + switch (code) { + case INTERNAL: + return hasCauseMatching(cause, Matchers.isRetryableInternalError); + case UNAVAILABLE: + // SSLHandshakeException is (probably) not retryable, as it is an indication that the server + // certificate was not accepted by the client. + // Channel shutdown is also not a retryable exception. + return !(hasCauseMatching(cause, Matchers.isSSLHandshakeException) + || hasCauseMatching(cause, Matchers.IS_CHANNEL_SHUTDOWN_EXCEPTION)); + case RESOURCE_EXHAUSTED: + return SpannerException.extractRetryDelay(cause) > 0; + default: + return false; + } + } + + private static boolean hasCauseMatching( + @Nullable Throwable cause, Predicate matcher) { + while (cause != null) { + if (matcher.apply(cause)) { + return true; + } + cause = cause.getCause(); + } + return false; + } + + private static class Matchers { + + static final Predicate isRetryableInternalError = new IsRetryableInternalError(); + static final Predicate isSSLHandshakeException = new IsSslHandshakeException(); + + static final Predicate IS_CHANNEL_SHUTDOWN_EXCEPTION = + new IsChannelShutdownException(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerFactory.java new file mode 100644 index 000000000000..df0e43127e89 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.ServiceFactory; + +/** Factory to create instance of {@code Spanner}. */ +public interface SpannerFactory extends ServiceFactory {} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java new file mode 100644 index 000000000000..c201924dfbe7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java @@ -0,0 +1,424 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.paging.Page; +import com.google.cloud.BaseService; +import com.google.cloud.PageImpl; +import com.google.cloud.PageImpl.NextPageFetcher; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.spanner.SessionClient.SessionId; +import com.google.cloud.spanner.SpannerOptions.CloseableExecutorProvider; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.spi.v1.GapicSpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.common.Attributes; +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Default implementation of the Cloud Spanner interface. */ +class SpannerImpl extends BaseService implements Spanner { + private static final Logger logger = Logger.getLogger(SpannerImpl.class.getName()); + final TraceWrapper tracer = + new TraceWrapper( + Tracing.getTracer(), + getOptions() + .getOpenTelemetry() + .getTracer( + MetricRegistryConstants.INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(this.getOptions().getClass())), + getOptions().isEnableExtendedTracing()); + + static final String CREATE_MULTIPLEXED_SESSION = "CloudSpannerOperation.CreateMultiplexedSession"; + static final String CREATE_SESSION = "CloudSpannerOperation.CreateSession"; + static final String BATCH_CREATE_SESSIONS = "CloudSpannerOperation.BatchCreateSessions"; + static final String BATCH_CREATE_SESSIONS_REQUEST = + "CloudSpannerOperation.BatchCreateSessionsRequest"; + static final String DELETE_SESSION = "CloudSpannerOperation.DeleteSession"; + static final String BEGIN_TRANSACTION = "CloudSpannerOperation.BeginTransaction"; + static final String COMMIT = "CloudSpannerOperation.Commit"; + static final String QUERY = "CloudSpannerOperation.ExecuteStreamingQuery"; + static final String READ = "CloudSpannerOperation.ExecuteStreamingRead"; + static final String BATCH_WRITE = "CloudSpannerOperation.BatchWrite"; + static final String UPDATE = "CloudSpannerOperation.ExecuteUpdate"; + static final String BATCH_UPDATE = "CloudSpannerOperation.BatchUpdate"; + + private static final Object CLIENT_ID_LOCK = new Object(); + + @GuardedBy("CLIENT_ID_LOCK") + private static final Map CLIENT_IDS = new HashMap<>(); + + private static String nextDatabaseClientId(DatabaseId databaseId) { + synchronized (CLIENT_ID_LOCK) { + Long id = CLIENT_IDS.get(databaseId); + if (id == null) { + id = 1L; + } else { + id++; + } + CLIENT_IDS.put(databaseId, id); + return String.format("client-%d", id); + } + } + + private final SpannerRpc gapicRpc; + + @GuardedBy("this") + private final Map dbClients = new HashMap<>(); + + @GuardedBy("dbBatchClientLock") + private final Map dbBatchClients = new HashMap<>(); + + private final ReentrantLock dbBatchClientLock = new ReentrantLock(); + + private final CloseableExecutorProvider asyncExecutorProvider; + + @GuardedBy("this") + private final Map sessionClients = new HashMap<>(); + + private final DatabaseAdminClient dbAdminClient; + private final InstanceAdminClient instanceClient; + + /** + * Exception class used to track the stack trace at the point when a Spanner instance is closed. + * This exception will be thrown if a user tries to use any resources that were returned by this + * Spanner instance after the instance has been closed. This makes it easier to track down the + * code that (accidentally) closed the Spanner instance. + */ + static final class ClosedException extends RuntimeException { + private static final long serialVersionUID = 1451131180314064914L; + + ClosedException() { + super("Spanner client was closed at " + Instant.now()); + } + } + + @GuardedBy("this") + private ClosedException closedException; + + @VisibleForTesting + SpannerImpl(SpannerRpc gapicRpc, SpannerOptions options) { + super(options); + this.gapicRpc = gapicRpc; + this.asyncExecutorProvider = + MoreObjects.firstNonNull( + options.getAsyncExecutorProvider(), + SpannerOptions.createDefaultAsyncExecutorProvider()); + this.dbAdminClient = new DatabaseAdminClientImpl(options.getProjectId(), gapicRpc); + this.instanceClient = + new InstanceAdminClientImpl(options.getProjectId(), gapicRpc, dbAdminClient); + logSpannerOptions(options); + } + + SpannerImpl(SpannerOptions options) { + this(options.getSpannerRpcV1(), options); + } + + private void logSpannerOptions(SpannerOptions options) { + logger.log( + Level.INFO, + "Spanner options: " + + "\nProject ID: " + + options.getProjectId() + + "\nHost: " + + options.getHost() + + "\nNum gRPC channels: " + + options.getNumChannels() + + "\nLeader aware routing enabled: " + + options.isLeaderAwareRoutingEnabled() + + "\nDirect access enabled: " + + options.isEnableDirectAccess() + + "\nActive Tracing Framework: " + + SpannerOptions.getActiveTracingFramework() + + "\nAPI tracing enabled: " + + options.isEnableApiTracing() + + "\nExtended tracing enabled: " + + options.isEnableExtendedTracing() + + "\nEnd to end tracing enabled: " + + options.isEndToEndTracingEnabled() + + "\nBuilt-in metrics enabled: " + + options.isEnableBuiltInMetrics()); + if (options.getSessionPoolOptions() != null) { + logger.log( + Level.INFO, + "Session pool options (deprecated, no longer used): " + + "\nSession pool min sessions: " + + options.getSessionPoolOptions().getMinSessions() + + "\nSession pool max sessions: " + + options.getSessionPoolOptions().getMaxSessions() + + "\nMultiplexed sessions enabled: " + + options.getSessionPoolOptions().getUseMultiplexedSession() + + "\nMultiplexed sessions enabled for RW: " + + options.getSessionPoolOptions().getUseMultiplexedSessionForRW() + + "\nMultiplexed sessions enabled for blind write: " + + options.getSessionPoolOptions().getUseMultiplexedSessionBlindWrite() + + "\nMultiplexed sessions enabled for partitioned ops: " + + options.getSessionPoolOptions().getUseMultiplexedSessionPartitionedOps()); + } + } + + /** Returns the {@link SpannerRpc} of this {@link SpannerImpl} instance. */ + SpannerRpc getRpc() { + return gapicRpc; + } + + /** Returns the default setting for prefetchChunks of this {@link SpannerImpl} instance. */ + int getDefaultPrefetchChunks() { + return getOptions().getPrefetchChunks(); + } + + DecodeMode getDefaultDecodeMode() { + return getOptions().getDecodeMode(); + } + + /** Returns the default query options that should be used for the specified database. */ + QueryOptions getDefaultQueryOptions(DatabaseId databaseId) { + return getOptions().getDefaultQueryOptions(databaseId); + } + + TraceWrapper getTracer() { + return this.tracer; + } + + /** + * Returns the {@link ExecutorProvider} to use for async methods that need a background executor. + */ + public ExecutorProvider getAsyncExecutorProvider() { + return asyncExecutorProvider; + } + + SessionImpl sessionWithId(String name) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "name is null or empty"); + SessionId id = SessionId.of(name); + return getSessionClient(id.getDatabaseId()).sessionWithId(name); + } + + void checkClosed() { + synchronized (this) { + if (closedException != null) { + throw new IllegalStateException("Cloud Spanner client has been closed", closedException); + } + } + } + + SessionClient getSessionClient(DatabaseId db) { + synchronized (this) { + checkClosed(); + if (sessionClients.containsKey(db)) { + return sessionClients.get(db); + } else { + SessionClient client = + new SessionClient( + this, + db, + ((GrpcTransportOptions) getOptions().getTransportOptions()).getExecutorFactory()); + sessionClients.put(db, client); + return client; + } + } + } + + @Override + public DatabaseAdminClient getDatabaseAdminClient() { + return dbAdminClient; + } + + @Override + public com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient + createDatabaseAdminClient() { + try { + final DatabaseAdminStubSettings settings = + Preconditions.checkNotNull(gapicRpc.getDatabaseAdminStubSettings()); + return com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.create( + settings.createStub()); + } catch (IOException ex) { + throw SpannerExceptionFactory.newSpannerException(ex); + } + } + + @Override + public InstanceAdminClient getInstanceAdminClient() { + return instanceClient; + } + + @Override + public com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient + createInstanceAdminClient() { + try { + final InstanceAdminStubSettings settings = + Preconditions.checkNotNull(gapicRpc.getInstanceAdminStubSettings()); + return com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.create( + settings.createStub()); + } catch (IOException ex) { + throw SpannerExceptionFactory.newSpannerException(ex); + } + } + + @Override + public DatabaseClient getDatabaseClient(DatabaseId db) { + synchronized (this) { + checkClosed(); + String clientId = null; + if (dbClients.containsKey(db) && !dbClients.get(db).isValid()) { + // Close the invalidated client and remove it. + dbClients.get(db).closeAsync(new ClosedException()); + clientId = dbClients.get(db).clientId; + dbClients.remove(db); + } + if (dbClients.containsKey(db)) { + return dbClients.get(db); + } else { + if (clientId == null) { + clientId = nextDatabaseClientId(db); + } + MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient = + new MultiplexedSessionDatabaseClient(SpannerImpl.this.getSessionClient(db)); + DatabaseClientImpl dbClient = + createDatabaseClient( + clientId, + multiplexedSessionDatabaseClient, + this.tracer.createDatabaseAttributes(db)); + dbClients.put(db, dbClient); + return dbClient; + } + } + } + + @VisibleForTesting + DatabaseClientImpl createDatabaseClient( + String clientId, + MultiplexedSessionDatabaseClient multiplexedSessionClient, + Attributes databaseAttributes) { + return new DatabaseClientImpl(clientId, multiplexedSessionClient, tracer, databaseAttributes); + } + + @Override + public BatchClient getBatchClient(DatabaseId db) { + this.dbBatchClientLock.lock(); + try { + if (this.dbBatchClients.containsKey(db)) { + return this.dbBatchClients.get(db); + } + BatchClientImpl batchClient = new BatchClientImpl(getSessionClient(db)); + this.dbBatchClients.put(db, batchClient); + return batchClient; + } finally { + this.dbBatchClientLock.unlock(); + } + } + + @Override + public void close() { + close(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + } + + void close(long timeout, TimeUnit unit) { + List> closureFutures; + synchronized (this) { + checkClosed(); + closedException = new ClosedException(); + } + try { + closureFutures = new ArrayList<>(); + for (DatabaseClientImpl dbClient : dbClients.values()) { + closureFutures.add(dbClient.closeAsync(closedException)); + } + dbClients.clear(); + Futures.successfulAsList(closureFutures).get(timeout, unit); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } finally { + for (SessionClient sessionClient : sessionClients.values()) { + sessionClient.close(); + } + sessionClients.clear(); + asyncExecutorProvider.close(); + try { + if (timeout == Long.MAX_VALUE || !(gapicRpc instanceof GapicSpannerRpc)) { + gapicRpc.shutdown(); + } else { + ((GapicSpannerRpc) gapicRpc).shutdownNow(); + } + } catch (RuntimeException e) { + logger.log(Level.WARNING, "Failed to close channels", e); + } + } + } + + @Override + public boolean isClosed() { + synchronized (this) { + return closedException != null; + } + } + + void resetRequestIdCounters() { + gapicRpc.getRequestIdCreator().reset(); + } + + long getRequestIdClientId() { + return gapicRpc.getRequestIdCreator().getClientId(); + } + + /** Helper class for gRPC calls that can return paginated results. */ + abstract static class PageFetcher implements NextPageFetcher { + private String nextPageToken; + + @Override + public Page getNextPage() { + Paginated nextPage = getNextPage(nextPageToken); + this.nextPageToken = nextPage.getNextPageToken(); + List results = new ArrayList<>(); + for (T proto : nextPage.getResults()) { + results.add(fromProto(proto)); + } + return new PageImpl<>(this, nextPageToken, results); + } + + void setNextPageToken(String nextPageToken) { + this.nextPageToken = nextPageToken; + } + + abstract Paginated getNextPage(@Nullable String nextPageToken); + + abstract S fromProto(T proto); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java new file mode 100644 index 000000000000..167274418724 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java @@ -0,0 +1,2611 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; +import static com.google.api.gax.util.TimeConversionUtils.toThreetenDuration; +import static com.google.cloud.spanner.spi.v1.GapicSpannerRpc.EXPERIMENTAL_LOCATION_API_ENV_VAR; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.core.InternalApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.BaseApiTracerFactory; +import com.google.api.gax.tracing.OpencensusTracerFactory; +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceDefaults; +import com.google.cloud.ServiceOptions; +import com.google.cloud.ServiceRpc; +import com.google.cloud.TransportOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.spanner.Options.DirectedReadOption; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminSettings; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminSettings; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.spi.SpannerRpcFactory; +import com.google.cloud.spanner.spi.v1.ChannelEndpointCacheFactory; +import com.google.cloud.spanner.spi.v1.GapicSpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.SpannerSettings; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.CallCredentials; +import io.grpc.CompressorRegistry; +import io.grpc.Context; +import io.grpc.ExperimentalApi; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import java.io.File; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Options for the Cloud Spanner service. */ +public class SpannerOptions extends ServiceOptions { + private static final long serialVersionUID = 2789571558532701170L; + private static SpannerEnvironment environment = SpannerEnvironmentImpl.INSTANCE; + private static boolean enableOpenCensusMetrics = true; + private static boolean enableOpenTelemetryMetrics = false; + + private static final String JDBC_API_CLIENT_LIB_TOKEN = "sp-jdbc"; + private static final String HIBERNATE_API_CLIENT_LIB_TOKEN = "sp-hib"; + private static final String LIQUIBASE_API_CLIENT_LIB_TOKEN = "sp-liq"; + private static final String PG_ADAPTER_CLIENT_LIB_TOKEN = "pg-adapter"; + + private static final String API_SHORT_NAME = "Spanner"; + private static final String SPANNER_SERVICE_NAME = "spanner"; + private static final String GOOGLE_DEFAULT_UNIVERSE = "googleapis.com"; + private static final String EXPERIMENTAL_HOST_PROJECT_ID = "default"; + + static final ImmutableSet SCOPES = + ImmutableSet.of( + "https://www.googleapis.com/auth/spanner.admin", + "https://www.googleapis.com/auth/spanner.data"); + static final int MAX_CHANNELS = 256; + @VisibleForTesting static final int DEFAULT_CHANNELS = 4; + // Set the default number of channels to GRPC_GCP_ENABLED_DEFAULT_CHANNELS when gRPC-GCP extension + // is enabled, to make sure there are sufficient channels available to move the sessions to a + // different channel if a network connection in a particular channel fails. + @VisibleForTesting static final int GRPC_GCP_ENABLED_DEFAULT_CHANNELS = 8; + + // Dynamic Channel Pool (DCP) default values and bounds + /** Default max concurrent RPCs per channel before triggering scale up. */ + public static final int DEFAULT_DYNAMIC_POOL_MAX_RPC = 25; + + /** Default min concurrent RPCs per channel for scale down check. */ + public static final int DEFAULT_DYNAMIC_POOL_MIN_RPC = 15; + + /** Default scale down check interval. */ + public static final Duration DEFAULT_DYNAMIC_POOL_SCALE_DOWN_INTERVAL = Duration.ofMinutes(3); + + /** Default initial number of channels for dynamic pool. */ + public static final int DEFAULT_DYNAMIC_POOL_INITIAL_SIZE = 4; + + /** Default max number of channels for dynamic pool. */ + public static final int DEFAULT_DYNAMIC_POOL_MAX_CHANNELS = 10; + + /** Default min number of channels for dynamic pool. */ + public static final int DEFAULT_DYNAMIC_POOL_MIN_CHANNELS = 2; + + /** + * Default affinity key lifetime for dynamic channel pool. This is how long to keep an affinity + * key after its last use. Zero means keeping keys forever. Default is 10 minutes, which is + * sufficient to ensure that requests within a single transaction use the same channel. + */ + public static final Duration DEFAULT_DYNAMIC_POOL_AFFINITY_KEY_LIFETIME = Duration.ofMinutes(10); + + /** + * Default cleanup interval for dynamic channel pool affinity keys. This is how frequently the + * affinity key cleanup process runs. Default is 1 minute (1/10 of default affinity key lifetime). + */ + public static final Duration DEFAULT_DYNAMIC_POOL_CLEANUP_INTERVAL = Duration.ofMinutes(1); + + /** + * Creates a {@link GcpChannelPoolOptions} instance with Spanner-specific defaults for dynamic + * channel pooling. These defaults are optimized for typical Spanner workloads. + * + *

Default values: + * + *

    + *
  • Max size: {@value #DEFAULT_DYNAMIC_POOL_MAX_CHANNELS} + *
  • Min size: {@value #DEFAULT_DYNAMIC_POOL_MIN_CHANNELS} + *
  • Initial size: {@value #DEFAULT_DYNAMIC_POOL_INITIAL_SIZE} + *
  • Max RPC per channel: {@value #DEFAULT_DYNAMIC_POOL_MAX_RPC} + *
  • Min RPC per channel: {@value #DEFAULT_DYNAMIC_POOL_MIN_RPC} + *
  • Scale down interval: 3 minutes + *
  • Affinity key lifetime: 10 minutes + *
  • Cleanup interval: 1 minute + *
+ * + * @return a new {@link GcpChannelPoolOptions} instance with Spanner defaults + */ + public static GcpChannelPoolOptions createDefaultDynamicChannelPoolOptions() { + return GcpChannelPoolOptions.newBuilder() + .setMaxSize(DEFAULT_DYNAMIC_POOL_MAX_CHANNELS) + .setMinSize(DEFAULT_DYNAMIC_POOL_MIN_CHANNELS) + .setInitSize(DEFAULT_DYNAMIC_POOL_INITIAL_SIZE) + .setDynamicScaling( + DEFAULT_DYNAMIC_POOL_MIN_RPC, + DEFAULT_DYNAMIC_POOL_MAX_RPC, + DEFAULT_DYNAMIC_POOL_SCALE_DOWN_INTERVAL) + .setAffinityKeyLifetime(DEFAULT_DYNAMIC_POOL_AFFINITY_KEY_LIFETIME) + .setCleanupInterval(DEFAULT_DYNAMIC_POOL_CLEANUP_INTERVAL) + .build(); + } + + private final TransportChannelProvider channelProvider; + private final ChannelEndpointCacheFactory channelEndpointCacheFactory; + + @SuppressWarnings("rawtypes") + private final ApiFunction channelConfigurator; + + private final GrpcInterceptorProvider interceptorProvider; + private final SessionPoolOptions sessionPoolOptions; + private final int prefetchChunks; + private final DecodeMode decodeMode; + private final int numChannels; + private final String transportChannelExecutorThreadNameFormat; + private final String databaseRole; + private final ImmutableMap sessionLabels; + private final SpannerStubSettings spannerStubSettings; + private final InstanceAdminStubSettings instanceAdminStubSettings; + private final DatabaseAdminStubSettings databaseAdminStubSettings; + private final Duration partitionedDmlTimeout; + private final boolean grpcGcpExtensionEnabled; + private final GcpManagedChannelOptions grpcGcpOptions; + private final boolean dynamicChannelPoolEnabled; + private final GcpChannelPoolOptions gcpChannelPoolOptions; + private final boolean autoThrottleAdministrativeRequests; + private final RetrySettings retryAdministrativeRequestsSettings; + private final boolean trackTransactionStarter; + private final boolean enableGrpcGcpOtelMetrics; + private final BuiltInMetricsProvider builtInMetricsProvider = BuiltInMetricsProvider.INSTANCE; + + /** + * These are the default {@link QueryOptions} defined by the user on this {@link SpannerOptions}. + */ + private final Map defaultQueryOptions; + + /** These are the default {@link QueryOptions} defined in environment variables on this system. */ + private final QueryOptions envQueryOptions; + + /** + * These are the merged query options of the {@link QueryOptions} set on this {@link + * SpannerOptions} and the {@link QueryOptions} in the environment variables. Options specified in + * environment variables take precedence above options specified in the {@link SpannerOptions} + * instance. + */ + private final Map mergedQueryOptions; + + private final CallCredentialsProvider callCredentialsProvider; + private final CloseableExecutorProvider asyncExecutorProvider; + private final String compressorName; + private final boolean leaderAwareRoutingEnabled; + private final boolean enableDirectAccess; + private final boolean enableGcpFallback; + private final DirectedReadOptions directedReadOptions; + private final boolean useVirtualThreads; + private final OpenTelemetry openTelemetry; + private final boolean enableApiTracing; + private final boolean enableBuiltInMetrics; + private final boolean enableLocationApi; + private final boolean enableExtendedTracing; + private final boolean enableEndToEndTracing; + private final String monitoringHost; + private final TransactionOptions defaultTransactionOptions; + private final RequestOptions.ClientContext clientContext; + + enum TracingFramework { + OPEN_CENSUS, + OPEN_TELEMETRY + } + + private static final Object lock = new Object(); + + @GuardedBy("lock") + private static TracingFramework activeTracingFramework; + + /** Interface that can be used to provide {@link CallCredentials} to {@link SpannerOptions}. */ + public interface CallCredentialsProvider { + /** Return the {@link CallCredentials} to use for a gRPC call. */ + CallCredentials getCallCredentials(); + } + + /** Context key for the {@link CallContextConfigurator} to use. */ + public static final Context.Key CALL_CONTEXT_CONFIGURATOR_KEY = + Context.key("call-context-configurator"); + + /** + * {@link CallContextConfigurator} can be used to modify the {@link ApiCallContext} for one or + * more specific RPCs. This can be used to set specific timeout value for RPCs or use specific + * {@link CallCredentials} for an RPC. The {@link CallContextConfigurator} must be set as a value + * on the {@link Context} using the {@link SpannerOptions#CALL_CONTEXT_CONFIGURATOR_KEY} key. + * + *

This API is meant for advanced users. Most users should instead use the {@link + * SpannerCallContextTimeoutConfigurator} for setting timeouts per RPC. + * + *

Example usage: + * + *

{@code
+   * CallContextConfigurator configurator =
+   *     new CallContextConfigurator() {
+   *       public  ApiCallContext configure(
+   *           ApiCallContext context, ReqT request, MethodDescriptor method) {
+   *         if (method == SpannerGrpc.getExecuteBatchDmlMethod()) {
+   *           return GrpcCallContext.createDefault()
+   *               .withCallOptions(CallOptions.DEFAULT.withDeadlineAfter(60L, TimeUnit.SECONDS));
+   *         }
+   *         return null;
+   *       }
+   *     };
+   * Context context =
+   *     Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator);
+   * context.run(
+   *     () -> {
+   *       try {
+   *         client
+   *             .readWriteTransaction()
+   *             .run(
+   *                 new TransactionCallable() {
+   *                   public long[] run(TransactionContext transaction) throws Exception {
+   *                     return transaction.batchUpdate(
+   *                         ImmutableList.of(statement1, statement2));
+   *                   }
+   *                 });
+   *       } catch (SpannerException e) {
+   *         if (e.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED) {
+   *           // handle timeout exception.
+   *         }
+   *       }
+   *     }
+   * }
+ */ + public interface CallContextConfigurator { + /** + * Configure a {@link ApiCallContext} for a specific RPC call. + * + * @param context The default context. This can be used to inspect the current values. + * @param request The request that will be sent. + * @param method The method that is being called. + * @return An {@link ApiCallContext} that will be merged with the default {@link + * ApiCallContext}. If null is returned, no changes to the default {@link + * ApiCallContext} will be made. + */ + @Nullable + ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method); + } + + private enum SpannerMethod { + COMMIT { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getCommitMethod(); + } + }, + ROLLBACK { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getRollbackMethod(); + } + }, + + EXECUTE_QUERY { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + // This also matches with Partitioned DML calls, but that call will override any timeout + // settings anyway. + return method == SpannerGrpc.getExecuteStreamingSqlMethod(); + } + }, + READ { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getStreamingReadMethod(); + } + }, + EXECUTE_UPDATE { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + if (method == SpannerGrpc.getExecuteSqlMethod()) { + ExecuteSqlRequest sqlRequest = (ExecuteSqlRequest) request; + return sqlRequest.getSeqno() != 0L; + } + return false; + } + }, + BATCH_UPDATE { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getExecuteBatchDmlMethod(); + } + }, + + PARTITION_QUERY { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getPartitionQueryMethod(); + } + }, + PARTITION_READ { + @Override + boolean isMethod(ReqT request, MethodDescriptor method) { + return method == SpannerGrpc.getPartitionReadMethod(); + } + }; + + abstract boolean isMethod(ReqT request, MethodDescriptor method); + + static SpannerMethod valueOf(ReqT request, MethodDescriptor method) { + for (SpannerMethod m : SpannerMethod.values()) { + if (m.isMethod(request, method)) { + return m; + } + } + return null; + } + } + + /** + * Helper class to configure timeouts for specific Spanner RPCs. The {@link + * SpannerCallContextTimeoutConfigurator} must be set as a value on the {@link Context} using the + * {@link SpannerOptions#CALL_CONTEXT_CONFIGURATOR_KEY} key. + * + *

Example usage: + * + *

{@code
+   * // Create a context with a ExecuteQuery timeout of 10 seconds.
+   * Context context =
+   *     Context.current()
+   *         .withValue(
+   *             SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY,
+   *             SpannerCallContextTimeoutConfigurator.create()
+   *                 .withExecuteQueryTimeout(Duration.ofSeconds(10L)));
+   * context.run(
+   *     () -> {
+   *       try (ResultSet rs =
+   *           client
+   *               .singleUse()
+   *               .executeQuery(
+   *                   Statement.of(
+   *                       "SELECT SingerId, FirstName, LastName FROM Singers ORDER BY LastName"))) {
+   *         while (rs.next()) {
+   *           System.out.printf("%d %s %s%n", rs.getLong(0), rs.getString(1), rs.getString(2));
+   *         }
+   *       } catch (SpannerException e) {
+   *         if (e.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED) {
+   *           // Handle timeout.
+   *         }
+   *       }
+   *     }
+   * }
+ */ + public static class SpannerCallContextTimeoutConfigurator implements CallContextConfigurator { + private Duration commitTimeout; + private Duration rollbackTimeout; + + private Duration executeQueryTimeout; + private Duration executeUpdateTimeout; + private Duration batchUpdateTimeout; + private Duration readTimeout; + + private Duration partitionQueryTimeout; + private Duration partitionReadTimeout; + + public static SpannerCallContextTimeoutConfigurator create() { + return new SpannerCallContextTimeoutConfigurator(); + } + + private SpannerCallContextTimeoutConfigurator() {} + + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + SpannerMethod spannerMethod = SpannerMethod.valueOf(request, method); + if (spannerMethod == null) { + return null; + } + switch (SpannerMethod.valueOf(request, method)) { + case BATCH_UPDATE: + return batchUpdateTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(batchUpdateTimeout); + case COMMIT: + return commitTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(commitTimeout); + case EXECUTE_QUERY: + return executeQueryTimeout == null + ? null + : GrpcCallContext.createDefault() + .withTimeoutDuration(executeQueryTimeout) + .withStreamWaitTimeoutDuration(executeQueryTimeout); + case EXECUTE_UPDATE: + return executeUpdateTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(executeUpdateTimeout); + case PARTITION_QUERY: + return partitionQueryTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(partitionQueryTimeout); + case PARTITION_READ: + return partitionReadTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(partitionReadTimeout); + case READ: + return readTimeout == null + ? null + : GrpcCallContext.createDefault() + .withTimeoutDuration(readTimeout) + .withStreamWaitTimeoutDuration(readTimeout); + case ROLLBACK: + return rollbackTimeout == null + ? null + : GrpcCallContext.createDefault().withTimeoutDuration(rollbackTimeout); + default: + } + return null; + } + + /** This method is obsolete. Use {@link #getCommitTimeoutDuration()} instead. */ + @ObsoleteApi("Use getCommitTimeoutDuration() instead.") + public org.threeten.bp.Duration getCommitTimeout() { + return toThreetenDuration(getCommitTimeoutDuration()); + } + + public Duration getCommitTimeoutDuration() { + return commitTimeout; + } + + /** This method is obsolete. Use {@link #withCommitTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withCommitTimeoutDuration() instead.") + public SpannerCallContextTimeoutConfigurator withCommitTimeout( + org.threeten.bp.Duration commitTimeout) { + return withCommitTimeoutDuration(toJavaTimeDuration(commitTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withCommitTimeoutDuration(Duration commitTimeout) { + this.commitTimeout = commitTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getRollbackTimeoutDuration()} instead. */ + @ObsoleteApi("Use getRollbackTimeoutDuration() instead.") + public org.threeten.bp.Duration getRollbackTimeout() { + return toThreetenDuration(getRollbackTimeoutDuration()); + } + + public Duration getRollbackTimeoutDuration() { + return rollbackTimeout; + } + + /** This method is obsolete. Use {@link #withRollbackTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withRollbackTimeoutDuration() instead.") + public SpannerCallContextTimeoutConfigurator withRollbackTimeout( + org.threeten.bp.Duration rollbackTimeout) { + return withRollbackTimeoutDuration(toJavaTimeDuration(rollbackTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withRollbackTimeoutDuration( + Duration rollbackTimeout) { + this.rollbackTimeout = rollbackTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getExecuteQueryTimeoutDuration()} instead. */ + @ObsoleteApi("Use getExecuteQueryTimeoutDuration() instead.") + public org.threeten.bp.Duration getExecuteQueryTimeout() { + return toThreetenDuration(getExecuteQueryTimeoutDuration()); + } + + public Duration getExecuteQueryTimeoutDuration() { + return executeQueryTimeout; + } + + /** This method is obsolete. Use {@link #withExecuteQueryTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withExecuteQueryTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withExecuteQueryTimeout( + org.threeten.bp.Duration executeQueryTimeout) { + return withExecuteQueryTimeoutDuration(toJavaTimeDuration(executeQueryTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withExecuteQueryTimeoutDuration( + Duration executeQueryTimeout) { + this.executeQueryTimeout = executeQueryTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getExecuteUpdateTimeoutDuration()} instead. */ + @ObsoleteApi("Use getExecuteUpdateTimeoutDuration() instead") + public org.threeten.bp.Duration getExecuteUpdateTimeout() { + return toThreetenDuration(getExecuteUpdateTimeoutDuration()); + } + + public Duration getExecuteUpdateTimeoutDuration() { + return executeUpdateTimeout; + } + + /** This method is obsolete. Use {@link #withExecuteUpdateTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withExecuteUpdateTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withExecuteUpdateTimeout( + org.threeten.bp.Duration executeUpdateTimeout) { + return withExecuteUpdateTimeoutDuration(toJavaTimeDuration(executeUpdateTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withExecuteUpdateTimeoutDuration( + Duration executeUpdateTimeout) { + this.executeUpdateTimeout = executeUpdateTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getBatchUpdateTimeoutDuration()} instead. */ + @ObsoleteApi("Use getBatchUpdateTimeoutDuration() instead") + public org.threeten.bp.Duration getBatchUpdateTimeout() { + return toThreetenDuration(getBatchUpdateTimeoutDuration()); + } + + public Duration getBatchUpdateTimeoutDuration() { + return batchUpdateTimeout; + } + + /** This method is obsolete. Use {@link #withBatchUpdateTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withBatchUpdateTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withBatchUpdateTimeout( + org.threeten.bp.Duration batchUpdateTimeout) { + return withBatchUpdateTimeoutDuration(toJavaTimeDuration(batchUpdateTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withBatchUpdateTimeoutDuration( + Duration batchUpdateTimeout) { + this.batchUpdateTimeout = batchUpdateTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getReadTimeoutDuration()} instead. */ + @ObsoleteApi("Use getReadTimeoutDuration() instead") + public org.threeten.bp.Duration getReadTimeout() { + return toThreetenDuration(getReadTimeoutDuration()); + } + + public Duration getReadTimeoutDuration() { + return readTimeout; + } + + /** This method is obsolete. Use {@link #withReadTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withReadTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withReadTimeout( + org.threeten.bp.Duration readTimeout) { + return withReadTimeoutDuration(toJavaTimeDuration(readTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withReadTimeoutDuration(Duration readTimeout) { + this.readTimeout = readTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getPartitionQueryTimeoutDuration()} instead. */ + @ObsoleteApi("Use getPartitionQueryTimeoutDuration() instead") + public org.threeten.bp.Duration getPartitionQueryTimeout() { + return toThreetenDuration(getPartitionQueryTimeoutDuration()); + } + + public Duration getPartitionQueryTimeoutDuration() { + return partitionQueryTimeout; + } + + /** + * This method is obsolete. Use {@link #withPartitionQueryTimeoutDuration(Duration)} instead. + */ + @ObsoleteApi("Use withPartitionQueryTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withPartitionQueryTimeout( + org.threeten.bp.Duration partitionQueryTimeout) { + return withPartitionQueryTimeoutDuration(toJavaTimeDuration(partitionQueryTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withPartitionQueryTimeoutDuration( + Duration partitionQueryTimeout) { + this.partitionQueryTimeout = partitionQueryTimeout; + return this; + } + + /** This method is obsolete. Use {@link #getPartitionReadTimeoutDuration()} instead. */ + @ObsoleteApi("Use getPartitionReadTimeoutDuration() instead") + public org.threeten.bp.Duration getPartitionReadTimeout() { + return toThreetenDuration(getPartitionReadTimeoutDuration()); + } + + public Duration getPartitionReadTimeoutDuration() { + return partitionReadTimeout; + } + + /** This method is obsolete. Use {@link #withPartitionReadTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use withPartitionReadTimeoutDuration() instead") + public SpannerCallContextTimeoutConfigurator withPartitionReadTimeout( + org.threeten.bp.Duration partitionReadTimeout) { + return withPartitionReadTimeoutDuration(toJavaTimeDuration(partitionReadTimeout)); + } + + public SpannerCallContextTimeoutConfigurator withPartitionReadTimeoutDuration( + Duration partitionReadTimeout) { + this.partitionReadTimeout = partitionReadTimeout; + return this; + } + } + + /** Default implementation of {@code SpannerFactory}. */ + private static class DefaultSpannerFactory implements SpannerFactory { + private static final DefaultSpannerFactory INSTANCE = new DefaultSpannerFactory(); + + @Override + public Spanner create(SpannerOptions serviceOptions) { + return new SpannerImpl(serviceOptions); + } + } + + /** Default implementation of {@code SpannerRpcFactory}. */ + private static class DefaultSpannerRpcFactory implements SpannerRpcFactory { + private static final DefaultSpannerRpcFactory INSTANCE = new DefaultSpannerRpcFactory(); + + @Override + public ServiceRpc create(SpannerOptions options) { + return new GapicSpannerRpc(options); + } + } + + private static final AtomicInteger DEFAULT_POOL_COUNT = new AtomicInteger(); + + /** {@link ExecutorProvider} that is used for {@link AsyncResultSet}. */ + public interface CloseableExecutorProvider extends ExecutorProvider, AutoCloseable { + /** Overridden to suppress the throws declaration of the super interface. */ + @Override + void close(); + } + + /** + * Implementation of {@link CloseableExecutorProvider} that uses a fixed single {@link + * ScheduledExecutorService}. + */ + public static class FixedCloseableExecutorProvider implements CloseableExecutorProvider { + private final ScheduledExecutorService executor; + + private FixedCloseableExecutorProvider(ScheduledExecutorService executor) { + this.executor = Preconditions.checkNotNull(executor); + } + + @Override + public void close() { + executor.shutdown(); + } + + @Override + public ScheduledExecutorService getExecutor() { + return executor; + } + + @Override + public boolean shouldAutoClose() { + return false; + } + + /** Creates a FixedCloseableExecutorProvider. */ + public static FixedCloseableExecutorProvider create(ScheduledExecutorService executor) { + return new FixedCloseableExecutorProvider(executor); + } + } + + /** + * Default {@link ExecutorProvider} for high-level async calls that need an executor. The default + * uses a cached thread pool containing a max of 8 threads. The pool is lazily initialized and + * will not create any threads if the user application does not use any async methods. It will + * also scale down the thread usage if the async load allows for that. + */ + @VisibleForTesting + static CloseableExecutorProvider createDefaultAsyncExecutorProvider() { + return createAsyncExecutorProvider( + getDefaultAsyncExecutorProviderCoreThreadCount(), 60L, TimeUnit.SECONDS); + } + + @VisibleForTesting + static int getDefaultAsyncExecutorProviderCoreThreadCount() { + String propertyName = "com.google.cloud.spanner.async_num_core_threads"; + String propertyValue = System.getProperty(propertyName, "8"); + try { + int corePoolSize = Integer.parseInt(propertyValue); + if (corePoolSize < 0) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "The value for %s must be >=0. Invalid value: %s", propertyName, propertyValue)); + } + return corePoolSize; + } catch (NumberFormatException exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "The %s system property must be a valid integer. The value %s could not be parsed as" + + " an integer.", + propertyName, propertyValue)); + } + } + + /** + * Creates a {@link CloseableExecutorProvider} that can be used as an {@link ExecutorProvider} for + * the async API. The {@link ExecutorProvider} will lazily create up to poolSize threads. The + * backing threads will automatically be shutdown if they have not been used during the keep-alive + * time. The backing threads are created as daemon threads. + * + * @param poolSize the maximum number of threads to create in the pool + * @param keepAliveTime the time that an unused thread in the pool should be kept alive + * @param unit the time unit used for the keepAliveTime + * @return a {@link CloseableExecutorProvider} that can be used for {@link + * SpannerOptions.Builder#setAsyncExecutorProvider(CloseableExecutorProvider)} + */ + public static CloseableExecutorProvider createAsyncExecutorProvider( + int poolSize, long keepAliveTime, TimeUnit unit) { + String format = + String.format("spanner-async-pool-%d-thread-%%d", DEFAULT_POOL_COUNT.incrementAndGet()); + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat(format).build(); + ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(poolSize, threadFactory); + executor.setKeepAliveTime(keepAliveTime, unit); + executor.allowCoreThreadTimeOut(true); + return FixedCloseableExecutorProvider.create(executor); + } + + protected SpannerOptions(Builder builder) { + super(SpannerFactory.class, SpannerRpcFactory.class, builder, new SpannerDefaults()); + numChannels = builder.numChannels == null ? DEFAULT_CHANNELS : builder.numChannels; + Preconditions.checkArgument( + numChannels >= 1 && numChannels <= MAX_CHANNELS, + "Number of channels must fall in the range [1, %s], found: %s", + MAX_CHANNELS, + numChannels); + + transportChannelExecutorThreadNameFormat = builder.transportChannelExecutorThreadNameFormat; + channelProvider = builder.channelProvider; + channelEndpointCacheFactory = builder.channelEndpointCacheFactory; + if (builder.mTLSContext != null) { + channelConfigurator = + channelBuilder -> { + if (builder.channelConfigurator != null) { + channelBuilder = builder.channelConfigurator.apply(channelBuilder); + } + if (channelBuilder instanceof NettyChannelBuilder) { + ((NettyChannelBuilder) channelBuilder).sslContext(builder.mTLSContext); + } + return channelBuilder; + }; + } else { + channelConfigurator = builder.channelConfigurator; + } + interceptorProvider = builder.interceptorProvider; + sessionPoolOptions = + builder.sessionPoolOptions != null + ? builder.sessionPoolOptions + : SessionPoolOptions.newBuilder().build(); + prefetchChunks = builder.prefetchChunks; + decodeMode = builder.decodeMode; + databaseRole = builder.databaseRole; + sessionLabels = builder.sessionLabels; + try { + String resolvedUniversalDomain = getResolvedUniverseDomain(); + spannerStubSettings = + builder.spannerStubSettingsBuilder.setUniverseDomain(resolvedUniversalDomain).build(); + instanceAdminStubSettings = + builder + .instanceAdminStubSettingsBuilder + .setUniverseDomain(resolvedUniversalDomain) + .build(); + databaseAdminStubSettings = + builder + .databaseAdminStubSettingsBuilder + .setUniverseDomain(resolvedUniversalDomain) + .build(); + } catch (IOException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + partitionedDmlTimeout = builder.partitionedDmlTimeout; + grpcGcpExtensionEnabled = builder.grpcGcpExtensionEnabled; + grpcGcpOptions = builder.grpcGcpOptions; + + // Dynamic channel pooling is disabled by default. + // It is only enabled when: + // 1. enableDynamicChannelPool() was explicitly called, AND + // 2. grpc-gcp extension is enabled, AND + // 3. numChannels was not explicitly set + if (builder.dynamicChannelPoolEnabled != null && builder.dynamicChannelPoolEnabled) { + // DCP was explicitly enabled, but respect numChannels if set + dynamicChannelPoolEnabled = grpcGcpExtensionEnabled && !builder.numChannelsExplicitlySet; + } else { + // DCP is disabled by default, or was explicitly disabled + dynamicChannelPoolEnabled = false; + } + + // Use user-provided GcpChannelPoolOptions or create Spanner-specific defaults + gcpChannelPoolOptions = + builder.gcpChannelPoolOptions != null + ? builder.gcpChannelPoolOptions + : createDefaultDynamicChannelPoolOptions(); + + autoThrottleAdministrativeRequests = builder.autoThrottleAdministrativeRequests; + retryAdministrativeRequestsSettings = builder.retryAdministrativeRequestsSettings; + trackTransactionStarter = builder.trackTransactionStarter; + enableGrpcGcpOtelMetrics = builder.enableGrpcGcpOtelMetrics; + defaultQueryOptions = builder.defaultQueryOptions; + envQueryOptions = builder.getEnvironmentQueryOptions(); + if (envQueryOptions.equals(QueryOptions.getDefaultInstance())) { + this.mergedQueryOptions = ImmutableMap.copyOf(builder.defaultQueryOptions); + } else { + // Merge all specific database options with the environment options. + Map merged = new HashMap<>(builder.defaultQueryOptions); + for (Entry entry : builder.defaultQueryOptions.entrySet()) { + merged.put(entry.getKey(), entry.getValue().toBuilder().mergeFrom(envQueryOptions).build()); + } + this.mergedQueryOptions = ImmutableMap.copyOf(merged); + } + callCredentialsProvider = builder.callCredentialsProvider; + asyncExecutorProvider = builder.asyncExecutorProvider; + compressorName = builder.compressorName; + leaderAwareRoutingEnabled = builder.leaderAwareRoutingEnabled; + enableDirectAccess = builder.enableDirectAccess; + enableGcpFallback = builder.enableGcpFallback; + directedReadOptions = builder.directedReadOptions; + useVirtualThreads = builder.useVirtualThreads; + openTelemetry = builder.openTelemetry; + enableApiTracing = builder.enableApiTracing; + enableExtendedTracing = builder.enableExtendedTracing; + if (builder.experimentalHost != null) { + enableBuiltInMetrics = false; + } else { + enableBuiltInMetrics = builder.enableBuiltInMetrics; + } + enableLocationApi = builder.enableLocationApi; + enableEndToEndTracing = builder.enableEndToEndTracing; + monitoringHost = builder.monitoringHost; + defaultTransactionOptions = builder.defaultTransactionOptions; + clientContext = builder.clientContext; + } + + private String getResolvedUniverseDomain() { + String universeDomain = getUniverseDomain(); + return Strings.isNullOrEmpty(universeDomain) ? GOOGLE_DEFAULT_UNIVERSE : universeDomain; + } + + /** Returns the default {@link RequestOptions.ClientContext} for this {@link SpannerOptions}. */ + public RequestOptions.ClientContext getClientContext() { + return clientContext; + } + + /** + * The environment to read configuration values from. The default implementation uses environment + * variables. + */ + public interface SpannerEnvironment { + /** + * The optimizer version to use. Must return an empty string to indicate that no value has been + * set. + */ + @Nonnull + default String getOptimizerVersion() { + return ""; + } + + /** + * The optimizer statistics package to use. Must return an empty string to indicate that no + * value has been set. + */ + @Nonnull + default String getOptimizerStatisticsPackage() { + return ""; + } + + default boolean isEnableExtendedTracing() { + return false; + } + + default boolean isEnableApiTracing() { + return false; + } + + default boolean isEnableDirectAccess() { + return false; + } + + default boolean isEnableGcpFallback() { + return false; + } + + default boolean isEnableBuiltInMetrics() { + return true; + } + + default boolean isEnableGRPCBuiltInMetrics() { + return false; + } + + default boolean isEnableGrpcGcpOtelMetrics() { + return true; + } + + default boolean isEnableEndToEndTracing() { + return false; + } + + default boolean isEnableLocationApi() { + return false; + } + + @Deprecated + @ObsoleteApi( + "This will be removed in an upcoming version without a major version bump. You should use" + + " universalDomain to configure the built-in metrics endpoint for a partner universe.") + default String getMonitoringHost() { + return null; + } + + default GoogleCredentials getDefaultExperimentalHostCredentials() { + return null; + } + } + + static final String DEFAULT_SPANNER_EXPERIMENTAL_HOST_CREDENTIALS = + "SPANNER_EXPERIMENTAL_HOST_AUTH_TOKEN"; + + /** + * Default implementation of {@link SpannerEnvironment}. Reads all configuration from environment + * variables. + */ + private static class SpannerEnvironmentImpl implements SpannerEnvironment { + private static final SpannerEnvironmentImpl INSTANCE = new SpannerEnvironmentImpl(); + private static final String SPANNER_OPTIMIZER_VERSION_ENV_VAR = "SPANNER_OPTIMIZER_VERSION"; + private static final String SPANNER_OPTIMIZER_STATISTICS_PACKAGE_ENV_VAR = + "SPANNER_OPTIMIZER_STATISTICS_PACKAGE"; + private static final String SPANNER_ENABLE_EXTENDED_TRACING = "SPANNER_ENABLE_EXTENDED_TRACING"; + private static final String SPANNER_ENABLE_API_TRACING = "SPANNER_ENABLE_API_TRACING"; + private static final String GOOGLE_SPANNER_ENABLE_DIRECT_ACCESS = + "GOOGLE_SPANNER_ENABLE_DIRECT_ACCESS"; + private static final String GOOGLE_SPANNER_ENABLE_GCP_FALLBACK = + "GOOGLE_SPANNER_ENABLE_GCP_FALLBACK"; + private static final String SPANNER_ENABLE_END_TO_END_TRACING = + "SPANNER_ENABLE_END_TO_END_TRACING"; + private static final String SPANNER_DISABLE_BUILTIN_METRICS = "SPANNER_DISABLE_BUILTIN_METRICS"; + private static final String SPANNER_DISABLE_DIRECT_ACCESS_GRPC_BUILTIN_METRICS = + "SPANNER_DISABLE_DIRECT_ACCESS_GRPC_BUILTIN_METRICS"; + private static final String SPANNER_DISABLE_GRPC_GCP_OTEL_METRICS = + "SPANNER_DISABLE_GRPC_GCP_OTEL_METRICS"; + private static final String SPANNER_MONITORING_HOST = "SPANNER_MONITORING_HOST"; + + private SpannerEnvironmentImpl() {} + + @Nonnull + @Override + public String getOptimizerVersion() { + return MoreObjects.firstNonNull(System.getenv(SPANNER_OPTIMIZER_VERSION_ENV_VAR), ""); + } + + @Nonnull + @Override + public String getOptimizerStatisticsPackage() { + return MoreObjects.firstNonNull( + System.getenv(SPANNER_OPTIMIZER_STATISTICS_PACKAGE_ENV_VAR), ""); + } + + @Override + public boolean isEnableExtendedTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_EXTENDED_TRACING)); + } + + @Override + public boolean isEnableApiTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_API_TRACING)); + } + + @Override + public boolean isEnableDirectAccess() { + return Boolean.parseBoolean(System.getenv(GOOGLE_SPANNER_ENABLE_DIRECT_ACCESS)); + } + + @Override + public boolean isEnableGcpFallback() { + return Boolean.parseBoolean(System.getenv(GOOGLE_SPANNER_ENABLE_GCP_FALLBACK)); + } + + @Override + public boolean isEnableBuiltInMetrics() { + return !Boolean.parseBoolean(System.getenv(SPANNER_DISABLE_BUILTIN_METRICS)); + } + + @Override + public boolean isEnableGRPCBuiltInMetrics() { + // Enable gRPC built-in metrics as default unless explicitly + // disabled via env. + return !Boolean.parseBoolean( + System.getenv(SPANNER_DISABLE_DIRECT_ACCESS_GRPC_BUILTIN_METRICS)); + } + + @Override + public boolean isEnableGrpcGcpOtelMetrics() { + return !Boolean.parseBoolean(System.getenv(SPANNER_DISABLE_GRPC_GCP_OTEL_METRICS)); + } + + @Override + public boolean isEnableEndToEndTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_END_TO_END_TRACING)); + } + + @Override + public boolean isEnableLocationApi() { + return Boolean.parseBoolean(System.getenv(EXPERIMENTAL_LOCATION_API_ENV_VAR)); + } + + @Override + public String getMonitoringHost() { + return System.getenv(SPANNER_MONITORING_HOST); + } + + @Override + public GoogleCredentials getDefaultExperimentalHostCredentials() { + return getOAuthTokenFromFile(System.getenv(DEFAULT_SPANNER_EXPERIMENTAL_HOST_CREDENTIALS)); + } + } + + /** Builder for {@link SpannerOptions} instances. */ + public static class Builder + extends ServiceOptions.Builder { + static final int DEFAULT_PREFETCH_CHUNKS = 4; + static final QueryOptions DEFAULT_QUERY_OPTIONS = QueryOptions.getDefaultInstance(); + static final DecodeMode DEFAULT_DECODE_MODE = DecodeMode.DIRECT; + static final RetrySettings DEFAULT_ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(5L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelayDuration(Duration.ofSeconds(60L)) + .setMaxAttempts(10) + .build(); + private final ImmutableSet allowedClientLibTokens = + ImmutableSet.of( + ServiceOptions.getGoogApiClientLibName(), + createCustomClientLibToken(JDBC_API_CLIENT_LIB_TOKEN), + createCustomClientLibToken(HIBERNATE_API_CLIENT_LIB_TOKEN), + createCustomClientLibToken(LIQUIBASE_API_CLIENT_LIB_TOKEN), + createCustomClientLibToken(PG_ADAPTER_CLIENT_LIB_TOKEN)); + private TransportChannelProvider channelProvider; + private ChannelEndpointCacheFactory channelEndpointCacheFactory; + + @SuppressWarnings("rawtypes") + private ApiFunction channelConfigurator; + + private GrpcInterceptorProvider interceptorProvider; + + private Integer numChannels; + private boolean numChannelsExplicitlySet = false; + + private String transportChannelExecutorThreadNameFormat = "Cloud-Spanner-TransportChannel-%d"; + + private int prefetchChunks = DEFAULT_PREFETCH_CHUNKS; + private DecodeMode decodeMode = DEFAULT_DECODE_MODE; + private SessionPoolOptions sessionPoolOptions; + private String databaseRole; + private ImmutableMap sessionLabels; + private SpannerStubSettings.Builder spannerStubSettingsBuilder = + SpannerStubSettings.newBuilder(); + private InstanceAdminStubSettings.Builder instanceAdminStubSettingsBuilder = + InstanceAdminStubSettings.newBuilder(); + private DatabaseAdminStubSettings.Builder databaseAdminStubSettingsBuilder = + DatabaseAdminStubSettings.newBuilder(); + private Duration partitionedDmlTimeout = Duration.ofHours(2L); + private boolean grpcGcpExtensionEnabled = true; + private GcpManagedChannelOptions grpcGcpOptions; + private Boolean dynamicChannelPoolEnabled; + private GcpChannelPoolOptions gcpChannelPoolOptions; + private RetrySettings retryAdministrativeRequestsSettings = + DEFAULT_ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS; + private boolean autoThrottleAdministrativeRequests = false; + private boolean trackTransactionStarter = false; + private Map defaultQueryOptions = new HashMap<>(); + private boolean enableGrpcGcpOtelMetrics = + SpannerOptions.environment.isEnableGrpcGcpOtelMetrics(); + private CallCredentialsProvider callCredentialsProvider; + private CloseableExecutorProvider asyncExecutorProvider; + private String compressorName; + private String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + private boolean leaderAwareRoutingEnabled = true; + private boolean enableDirectAccess = SpannerOptions.environment.isEnableDirectAccess(); + private boolean enableGcpFallback = SpannerOptions.environment.isEnableGcpFallback(); + private DirectedReadOptions directedReadOptions; + private boolean useVirtualThreads = false; + private OpenTelemetry openTelemetry; + private boolean enableApiTracing = SpannerOptions.environment.isEnableApiTracing(); + private boolean enableExtendedTracing = SpannerOptions.environment.isEnableExtendedTracing(); + private boolean enableEndToEndTracing = SpannerOptions.environment.isEnableEndToEndTracing(); + private boolean enableBuiltInMetrics = SpannerOptions.environment.isEnableBuiltInMetrics(); + private boolean enableLocationApi = SpannerOptions.environment.isEnableLocationApi(); + private String monitoringHost = SpannerOptions.environment.getMonitoringHost(); + private SslContext mTLSContext = null; + private String experimentalHost = null; + private boolean usePlainText = false; + private TransactionOptions defaultTransactionOptions = TransactionOptions.getDefaultInstance(); + private RequestOptions.ClientContext clientContext; + + private static String createCustomClientLibToken(String token) { + return token + " " + ServiceOptions.getGoogApiClientLibName(); + } + + protected Builder() { + // Manually set retry and polling settings that work. + RetrySettings baseRetrySettings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofSeconds(60L)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(600L)) + .setMaxRetryDelayDuration(Duration.ofSeconds(45L)) + .setRetryDelayMultiplier(1.5) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofHours(48L)) + .build(); + + // The polling setting with a short initial delay as we expect + // it to return soon. + OperationTimedPollAlgorithm shortInitialPollingDelayAlgorithm = + OperationTimedPollAlgorithm.create( + baseRetrySettings.toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(1L)) + .build()); + databaseAdminStubSettingsBuilder + .createDatabaseOperationSettings() + .setPollingAlgorithm(shortInitialPollingDelayAlgorithm); + + // The polling setting with a long initial delay as we expect + // the operation to take a bit long time to return. + OperationTimedPollAlgorithm longInitialPollingDelayAlgorithm = + OperationTimedPollAlgorithm.create( + baseRetrySettings.toBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(20L)) + .build()); + databaseAdminStubSettingsBuilder + .createBackupOperationSettings() + .setPollingAlgorithm(longInitialPollingDelayAlgorithm); + databaseAdminStubSettingsBuilder + .restoreDatabaseOperationSettings() + .setPollingAlgorithm(longInitialPollingDelayAlgorithm); + + // updateDatabaseDdl requires a separate setting because + // it has no existing overrides on RPC timeouts for LRO polling. + databaseAdminStubSettingsBuilder + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofHours(48L)) + .build())); + } + + Builder(SpannerOptions options) { + super(options); + if (options.getHost() != null + && this.emulatorHost != null + && !options.getHost().equals(this.emulatorHost)) { + this.emulatorHost = null; + } + this.numChannels = options.numChannels; + this.transportChannelExecutorThreadNameFormat = + options.transportChannelExecutorThreadNameFormat; + this.sessionPoolOptions = options.sessionPoolOptions; + this.prefetchChunks = options.prefetchChunks; + this.decodeMode = options.decodeMode; + this.databaseRole = options.databaseRole; + this.sessionLabels = options.sessionLabels; + this.spannerStubSettingsBuilder = options.spannerStubSettings.toBuilder(); + this.instanceAdminStubSettingsBuilder = options.instanceAdminStubSettings.toBuilder(); + this.databaseAdminStubSettingsBuilder = options.databaseAdminStubSettings.toBuilder(); + this.partitionedDmlTimeout = options.partitionedDmlTimeout; + this.grpcGcpExtensionEnabled = options.grpcGcpExtensionEnabled; + this.grpcGcpOptions = options.grpcGcpOptions; + this.dynamicChannelPoolEnabled = options.dynamicChannelPoolEnabled; + this.gcpChannelPoolOptions = options.gcpChannelPoolOptions; + this.autoThrottleAdministrativeRequests = options.autoThrottleAdministrativeRequests; + this.retryAdministrativeRequestsSettings = options.retryAdministrativeRequestsSettings; + this.trackTransactionStarter = options.trackTransactionStarter; + this.enableGrpcGcpOtelMetrics = options.enableGrpcGcpOtelMetrics; + this.defaultQueryOptions = options.defaultQueryOptions; + this.callCredentialsProvider = options.callCredentialsProvider; + this.asyncExecutorProvider = options.asyncExecutorProvider; + this.compressorName = options.compressorName; + this.channelProvider = options.channelProvider; + this.channelEndpointCacheFactory = options.channelEndpointCacheFactory; + this.channelConfigurator = options.channelConfigurator; + this.interceptorProvider = options.interceptorProvider; + this.enableDirectAccess = options.enableDirectAccess; + this.enableGcpFallback = options.enableGcpFallback; + this.directedReadOptions = options.directedReadOptions; + this.useVirtualThreads = options.useVirtualThreads; + this.enableApiTracing = options.enableApiTracing; + this.enableExtendedTracing = options.enableExtendedTracing; + this.enableBuiltInMetrics = options.enableBuiltInMetrics; + this.enableLocationApi = options.enableLocationApi; + this.enableEndToEndTracing = options.enableEndToEndTracing; + this.monitoringHost = options.monitoringHost; + this.defaultTransactionOptions = options.defaultTransactionOptions; + this.clientContext = options.clientContext; + } + + @Override + public Builder setTransportOptions(TransportOptions transportOptions) { + if (!(transportOptions instanceof GrpcTransportOptions)) { + throw new IllegalArgumentException( + "Only grpc transport is allowed for " + API_SHORT_NAME + "."); + } + return super.setTransportOptions(transportOptions); + } + + @Override + protected Set getAllowedClientLibTokens() { + return allowedClientLibTokens; + } + + @InternalApi + @Override + public SpannerOptions.Builder setClientLibToken(String clientLibToken) { + return super.setClientLibToken( + clientLibToken + " " + ServiceOptions.getGoogApiClientLibName()); + } + + /** + * Sets the {@code ChannelProvider}. {@link GapicSpannerRpc} would create a default one if none + * is provided. + * + *

Setting a custom {@link TransportChannelProvider} also overrides any other settings that + * affect the default channel provider. These must be set manually on the custom {@link + * TransportChannelProvider} instead of on {@link SpannerOptions}. The settings of {@link + * SpannerOptions} that have no effect if you set a custom {@link TransportChannelProvider} are: + * + *

    + *
  1. {@link #setChannelConfigurator(ApiFunction)} + *
  2. {@link #setHost(String)} + *
  3. {@link #setNumChannels(int)} + *
  4. {@link #setInterceptorProvider(GrpcInterceptorProvider)} + *
  5. {@link #setHeaderProvider(com.google.api.gax.rpc.HeaderProvider)} + *
+ */ + public Builder setChannelProvider(TransportChannelProvider channelProvider) { + this.channelProvider = channelProvider; + return this; + } + + @InternalApi + public Builder setChannelEndpointCacheFactory( + ChannelEndpointCacheFactory channelEndpointCacheFactory) { + this.channelEndpointCacheFactory = channelEndpointCacheFactory; + return this; + } + + /** + * Sets an {@link ApiFunction} that will be used to configure the transport channel. This will + * only be used if no custom {@link TransportChannelProvider} has been set. + */ + public Builder setChannelConfigurator( + @SuppressWarnings("rawtypes") + ApiFunction channelConfigurator) { + this.channelConfigurator = channelConfigurator; + return this; + } + + /** + * Sets the {@code GrpcInterceptorProvider}. {@link GapicSpannerRpc} would create a default one + * if none is provided. + */ + public Builder setInterceptorProvider(GrpcInterceptorProvider interceptorProvider) { + this.interceptorProvider = interceptorProvider; + return this; + } + + /** + * Sets the number of gRPC channels to use. By default 4 channels are created per {@link + * SpannerOptions}. + */ + public Builder setNumChannels(int numChannels) { + this.numChannels = numChannels; + this.numChannelsExplicitlySet = true; + return this; + } + + /** Sets the name format for transport channel threads that should be used by this instance. */ + Builder setTransportChannelExecutorThreadNameFormat( + String transportChannelExecutorThreadNameFormat) { + this.transportChannelExecutorThreadNameFormat = transportChannelExecutorThreadNameFormat; + return this; + } + + /** + * Sets the options for managing the session pool. If not specified then the default {@code + * SessionPoolOptions} is used. + */ + public Builder setSessionPoolOption(SessionPoolOptions sessionPoolOptions) { + this.sessionPoolOptions = sessionPoolOptions; + return this; + } + + /** + * Sets the database role that should be used for connections that are created by this instance. + * The database role that is used determines the access permissions that a connection has. This + * can for example be used to create connections that are only permitted to access certain + * tables. + */ + public Builder setDatabaseRole(String databaseRole) { + this.databaseRole = databaseRole; + return this; + } + + /** + * Sets the labels to add to all Sessions created in this client. + * + * @param sessionLabels Map from label key to label value. Label key and value cannot be null. + * For more information on valid syntax see + * api docs . + */ + public Builder setSessionLabels(Map sessionLabels) { + Preconditions.checkNotNull(sessionLabels, "Session labels map cannot be null"); + for (String value : sessionLabels.values()) { + Preconditions.checkNotNull(value, "Null values are not allowed in the labels map."); + } + this.sessionLabels = ImmutableMap.copyOf(sessionLabels); + return this; + } + + /** + * {@link SpannerOptions.Builder} does not support global retry settings, as it creates three + * different gRPC clients: {@link Spanner}, {@link DatabaseAdminClient} and {@link + * InstanceAdminClient}. Instead of calling this method, you should set specific {@link + * RetrySettings} for each of the underlying gRPC clients by calling respectively {@link + * #getSpannerStubSettingsBuilder()}, {@link #getDatabaseAdminStubSettingsBuilder()} or {@link + * #getInstanceAdminStubSettingsBuilder()}. + */ + @Override + public Builder setRetrySettings(RetrySettings retrySettings) { + throw new UnsupportedOperationException( + "SpannerOptions does not support setting global retry settings. Call" + + " spannerStubSettingsBuilder().Settings().setRetrySettings(RetrySettings)" + + " instead."); + } + + /** + * Returns the {@link SpannerStubSettings.Builder} that will be used to build the {@link + * SpannerRpc}. Use this to set custom {@link RetrySettings} for individual gRPC methods. + * + *

The library will automatically use the defaults defined in {@link SpannerStubSettings} if + * no custom settings are set. The defaults are the same as the defaults that are used by {@link + * SpannerSettings}, and are generated from the file spanner_gapic.yaml. + * Retries are configured for idempotent methods but not for non-idempotent methods. + * + *

You can set the same {@link RetrySettings} for all unary methods by calling this: + * + *


+     * builder
+     *     .getSpannerStubSettingsBuilder()
+     *     .applyToAllUnaryMethods(
+     *         new ApiFunction<UnaryCallSettings.Builder<?, ?>, Void>() {
+     *           public Void apply(Builder<?, ?> input) {
+     *             input.setRetrySettings(retrySettings);
+     *             return null;
+     *           }
+     *         });
+     * 
+ */ + public SpannerStubSettings.Builder getSpannerStubSettingsBuilder() { + return spannerStubSettingsBuilder; + } + + /** + * Returns the {@link InstanceAdminStubSettings.Builder} that will be used to build the {@link + * SpannerRpc}. Use this to set custom {@link RetrySettings} for individual gRPC methods. + * + *

The library will automatically use the defaults defined in {@link + * InstanceAdminStubSettings} if no custom settings are set. The defaults are the same as the + * defaults that are used by {@link InstanceAdminSettings}, and are generated from the file spanner_admin_instance_gapic.yaml. + * Retries are configured for idempotent methods but not for non-idempotent methods. + * + *

You can set the same {@link RetrySettings} for all unary methods by calling this: + * + *


+     * builder
+     *     .getInstanceAdminStubSettingsBuilder()
+     *     .applyToAllUnaryMethods(
+     *         new ApiFunction<UnaryCallSettings.Builder<?, ?>, Void>() {
+     *           public Void apply(Builder<?, ?> input) {
+     *             input.setRetrySettings(retrySettings);
+     *             return null;
+     *           }
+     *         });
+     * 
+ */ + public InstanceAdminStubSettings.Builder getInstanceAdminStubSettingsBuilder() { + return instanceAdminStubSettingsBuilder; + } + + /** + * Returns the {@link DatabaseAdminStubSettings.Builder} that will be used to build the {@link + * SpannerRpc}. Use this to set custom {@link RetrySettings} for individual gRPC methods. + * + *

The library will automatically use the defaults defined in {@link + * DatabaseAdminStubSettings} if no custom settings are set. The defaults are the same as the + * defaults that are used by {@link DatabaseAdminSettings}, and are generated from the file spanner_admin_database_gapic.yaml. + * Retries are configured for idempotent methods but not for non-idempotent methods. + * + *

You can set the same {@link RetrySettings} for all unary methods by calling this: + * + *


+     * builder
+     *     .getDatabaseAdminStubSettingsBuilder()
+     *     .applyToAllUnaryMethods(
+     *         new ApiFunction<UnaryCallSettings.Builder<?, ?>, Void>() {
+     *           public Void apply(Builder<?, ?> input) {
+     *             input.setRetrySettings(retrySettings);
+     *             return null;
+     *           }
+     *         });
+     * 
+ */ + public DatabaseAdminStubSettings.Builder getDatabaseAdminStubSettingsBuilder() { + return databaseAdminStubSettingsBuilder; + } + + /** This method is obsolete. Use {@link #setPartitionedDmlTimeoutDuration(Duration)} instead. */ + @ObsoleteApi("Use setPartitionedDmlTimeoutDuration(Duration) instead") + public Builder setPartitionedDmlTimeout(org.threeten.bp.Duration timeout) { + return setPartitionedDmlTimeoutDuration(toJavaTimeDuration(timeout)); + } + + /** + * Sets a timeout specifically for Partitioned DML statements executed through {@link + * DatabaseClient#executePartitionedUpdate(Statement, UpdateOption...)}. The default is 2 hours. + */ + public Builder setPartitionedDmlTimeoutDuration(Duration timeout) { + this.partitionedDmlTimeout = timeout; + return this; + } + + /** + * Instructs the client library to automatically throttle the number of administrative requests + * if the rate of administrative requests generated by this {@link Spanner} instance will exceed + * the administrative limits Cloud Spanner. The default behavior is to not throttle any + * requests. If the limit is exceeded, Cloud Spanner will return a RESOURCE_EXHAUSTED error. + * More information on the administrative limits can be found here: + * https://cloud.google.com/spanner/quotas#administrative_limits. Setting this option is not a + * guarantee that the rate will never be exceeded, as this option will only throttle requests + * coming from this client. Additional requests from other clients could still cause the limit + * to be exceeded. + */ + public Builder setAutoThrottleAdministrativeRequests() { + this.autoThrottleAdministrativeRequests = true; + return this; + } + + /** + * Disables automatic retries of administrative requests that fail if the https://cloud.google.com/spanner/quotas#administrative_limits + * have been exceeded. You should disable these retries if you intend to handle these errors in + * your application. + */ + public Builder disableAdministrativeRequestRetries() { + this.retryAdministrativeRequestsSettings = + this.retryAdministrativeRequestsSettings.toBuilder().setMaxAttempts(1).build(); + return this; + } + + /** + * Sets the retry settings for retrying administrative requests when the quote of administrative + * requests per minute has been exceeded. + */ + Builder setRetryAdministrativeRequestsSettings( + RetrySettings retryAdministrativeRequestsSettings) { + this.retryAdministrativeRequestsSettings = + Preconditions.checkNotNull(retryAdministrativeRequestsSettings); + return this; + } + + /** + * Instructs the client library to track the first request of each read/write transaction. This + * statement will include a BeginTransaction option and will return a transaction id as part of + * its result. All other statements in the same transaction must wait for this first statement + * to finish before they can proceed. By setting this option the client library will throw a + * {@link SpannerException} with {@link ErrorCode#DEADLINE_EXCEEDED} for any subsequent + * statement that has waited for at least 60 seconds for the first statement to return a + * transaction id, including the stacktrace of the initial statement that should have returned a + * transaction id. + */ + public Builder setTrackTransactionStarter() { + this.trackTransactionStarter = true; + return this; + } + + /** + * Sets the default {@link QueryOptions} that will be used for all queries on the specified + * database. Query options can also be specified on a per-query basis and as environment + * variables. The precedence of these settings are: + * + *
    + *
  1. Query options for a specific query + *
  2. Environment variables + *
  3. These default query options + *
+ * + * Each {@link QueryOption} value that is used for a query is determined individually based on + * the above precedence. If for example a value for {@link QueryOptions#getOptimizerVersion()} + * is specified in an environment variable and a value for {@link + * QueryOptions#getOptimizerStatisticsPackage()} is specified for a specific query, both values + * will be used for the specific query. Environment variables are only read during the + * initialization of a {@link SpannerOptions} instance. Changing an environment variable after + * initializing a {@link SpannerOptions} instance will not have any effect on that instance. + */ + public Builder setDefaultQueryOptions(DatabaseId database, QueryOptions defaultQueryOptions) { + this.defaultQueryOptions.put(database, defaultQueryOptions); + return this; + } + + /** Gets the {@link QueryOptions} specified in the {@link SpannerEnvironment}. */ + QueryOptions getEnvironmentQueryOptions() { + return QueryOptions.newBuilder() + .setOptimizerVersion(environment.getOptimizerVersion()) + .setOptimizerStatisticsPackage(environment.getOptimizerStatisticsPackage()) + .build(); + } + + /** + * Sets a {@link CallCredentialsProvider} that can deliver {@link CallCredentials} to use on a + * per-gRPC basis. + */ + public Builder setCallCredentialsProvider(CallCredentialsProvider callCredentialsProvider) { + this.callCredentialsProvider = callCredentialsProvider; + return this; + } + + /** + * Sets the compression to use for all gRPC calls. The compressor must be a valid name known in + * the {@link CompressorRegistry}. This will enable compression both from the client to the + * server and from the server to the client. + * + *

Supported values are: + * + *

    + *
  • gzip: Enable gzip compression + *
  • identity: Disable compression + *
  • null: Use default compression + *
+ */ + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/1704") + public Builder setCompressorName(@Nullable String compressorName) { + Preconditions.checkArgument( + compressorName == null + || CompressorRegistry.getDefaultInstance().lookupCompressor(compressorName) != null, + String.format("%s is not a known compressor", compressorName)); + this.compressorName = compressorName; + return this; + } + + /** + * Sets the {@link ExecutorProvider} to use for high-level async calls that need an executor, + * such as fetching results for an {@link AsyncResultSet}. + * + *

Async methods will use a sensible default if no custom {@link ExecutorProvider} has been + * set. The default {@link ExecutorProvider} uses a cached thread pool containing a maximum of 8 + * threads. The pool is lazily initialized and will not create any threads if the user + * application does not use any async methods. It will also scale down the thread usage if the + * async load allows for that. + * + *

Call {@link SpannerOptions#createAsyncExecutorProvider(int, long, TimeUnit)} to create a + * provider with a custom pool size or call {@link + * FixedCloseableExecutorProvider#create(ScheduledExecutorService)} to create a {@link + * CloseableExecutorProvider} from a standard Java {@link ScheduledExecutorService}. + */ + public Builder setAsyncExecutorProvider(CloseableExecutorProvider provider) { + this.asyncExecutorProvider = provider; + return this; + } + + /** + * Sets the {@link DirectedReadOption} that specify which replicas or regions should be used for + * non-transactional reads or queries. + * + *

DirectedReadOptions set at the request level will take precedence over the options set + * using this method. + * + *

An example below of how {@link DirectedReadOptions} can be constructed by including a + * replica. + * + *


+     * DirectedReadOptions.newBuilder()
+     *           .setIncludeReplicas(
+     *               IncludeReplicas.newBuilder()
+     *                   .addReplicaSelections(
+     *                       ReplicaSelection.newBuilder().setLocation("us-east1").build()))
+     *           .build();
+     *           }
+     * 
+ */ + public Builder setDirectedReadOptions(DirectedReadOptions directedReadOptions) { + this.directedReadOptions = + Preconditions.checkNotNull(directedReadOptions, "DirectedReadOptions cannot be null"); + return this; + } + + /** + * Specifying this will allow the client to prefetch up to {@code prefetchChunks} {@code + * PartialResultSet} chunks for each read and query. The data size of each chunk depends on the + * server implementation but a good rule of thumb is that each chunk will be up to 1 MiB. Larger + * values reduce the likelihood of blocking while consuming results at the cost of greater + * memory consumption. {@code prefetchChunks} should be greater than 0. To get good performance + * choose a value that is large enough to allow buffering of chunks for an entire row. Apart + * from the buffered chunks, there can be at most one more row buffered in the client. This can + * be overridden on a per read/query basis by {@link Options#prefetchChunks()}. If unspecified, + * we will use a default value (currently 4). + */ + public Builder setPrefetchChunks(int prefetchChunks) { + this.prefetchChunks = prefetchChunks; + return this; + } + + /** + * Specifies how values that are returned from a query should be decoded and converted from + * protobuf values into plain Java objects. + */ + public Builder setDecodeMode(DecodeMode decodeMode) { + this.decodeMode = decodeMode; + return this; + } + + @Override + public Builder setHost(String host) { + super.setHost(host); + // Setting a host should override any SPANNER_EMULATOR_HOST setting. + setEmulatorHost(null); + return this; + } + + @ExperimentalApi("https://github.com/googleapis/java-spanner/pull/3676") + public Builder setExperimentalHost(String host) { + if (this.usePlainText) { + Preconditions.checkArgument( + !host.startsWith("https:"), + "Please remove the 'https:' protocol prefix from the host string when using plain text" + + " communication"); + if (!host.startsWith("http")) { + host = "http://" + host; + } + } + super.setHost(host); + super.setProjectId(EXPERIMENTAL_HOST_PROJECT_ID); + setSessionPoolOption(SessionPoolOptions.newBuilder().setExperimentalHost().build()); + this.experimentalHost = host; + return this; + } + + /** Enables gRPC-GCP extension with the default settings. This option is enabled by default. */ + public Builder enableGrpcGcpExtension() { + return this.enableGrpcGcpExtension(null); + } + + /** + * Enables gRPC-GCP extension and uses provided options for configuration. The metric registry + * and default Spanner metric labels will be added automatically. + */ + public Builder enableGrpcGcpExtension(GcpManagedChannelOptions options) { + this.grpcGcpExtensionEnabled = true; + this.grpcGcpOptions = options; + return this; + } + + /** Disables gRPC-GCP extension and uses GAX channel pool instead. */ + public Builder disableGrpcGcpExtension() { + this.grpcGcpExtensionEnabled = false; + return this; + } + + /** + * Enables dynamic channel pooling. When enabled, the client will automatically scale the number + * of channels based on load. This requires the gRPC-GCP extension to be enabled. + * + *

Dynamic channel pooling is disabled by default. Use this method to explicitly enable it. + * Note that calling {@link #setNumChannels(int)} will disable dynamic channel pooling even if + * this method was called. + */ + public Builder enableDynamicChannelPool() { + this.dynamicChannelPoolEnabled = true; + return this; + } + + /** + * Disables dynamic channel pooling. When disabled, the client will use a static number of + * channels as configured by {@link #setNumChannels(int)}. + * + *

Dynamic channel pooling is disabled by default, so this method is typically not needed + * unless you want to explicitly disable it after enabling it. + */ + public Builder disableDynamicChannelPool() { + this.dynamicChannelPoolEnabled = false; + return this; + } + + /** + * Sets whether to enable or disable grpc-gcp OpenTelemetry metrics injection. When disabled, + * Spanner will not automatically inject an OpenTelemetry {@link + * io.opentelemetry.api.metrics.Meter} into grpc-gcp. If a Meter or MetricRegistry is explicitly + * provided via {@link GcpManagedChannelOptions}, those settings will still be honored. + */ + public Builder setGrpcGcpOtelMetricsEnabled(boolean enableGrpcGcpOtelMetrics) { + this.enableGrpcGcpOtelMetrics = enableGrpcGcpOtelMetrics; + return this; + } + + /** + * Sets the channel pool options for dynamic channel pooling. Use this to configure the dynamic + * channel pool behavior when {@link #enableDynamicChannelPool()} is enabled. + * + *

If not set, Spanner-specific defaults will be used (see {@link + * #createDefaultDynamicChannelPoolOptions()}). + * + *

Example usage: + * + *

{@code
+     * SpannerOptions options = SpannerOptions.newBuilder()
+     *     .setProjectId("my-project")
+     *     .enableDynamicChannelPool()
+     *     .setGcpChannelPoolOptions(
+     *         GcpChannelPoolOptions.newBuilder()
+     *             .setMaxSize(15)
+     *             .setMinSize(3)
+     *             .setInitSize(5)
+     *             .setDynamicScaling(10, 30, Duration.ofMinutes(5))
+     *             .build())
+     *     .build();
+     * }
+ * + * @param gcpChannelPoolOptions the channel pool options to use + * @return this builder for chaining + */ + public Builder setGcpChannelPoolOptions(GcpChannelPoolOptions gcpChannelPoolOptions) { + this.gcpChannelPoolOptions = Preconditions.checkNotNull(gcpChannelPoolOptions); + return this; + } + + /** + * Sets the host of an emulator to use. By default the value is read from an environment + * variable. If the environment variable is not set, this will be null. + */ + public Builder setEmulatorHost(String emulatorHost) { + this.emulatorHost = emulatorHost; + return this; + } + + /** + * Configures mTLS authentication using the provided client certificate and key files. mTLS is + * only supported for experimental spanner hosts. + * + * @param clientCertificate Path to the client certificate file. + * @param clientCertificateKey Path to the client private key file. + * @throws SpannerException If an error occurs while configuring the mTLS context + */ + @ExperimentalApi("https://github.com/googleapis/java-spanner/pull/3574") + public Builder useClientCert(String clientCertificate, String clientCertificateKey) { + try { + this.mTLSContext = + GrpcSslContexts.forClient() + .keyManager(new File(clientCertificate), new File(clientCertificateKey)) + .build(); + } catch (Exception e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + return this; + } + + /** + * {@code usePlainText} will configure the transport to use plaintext (no TLS) and will set + * credentials to {@link com.google.cloud.NoCredentials} to avoid sending authentication over an + * unsecured channel. + */ + @ExperimentalApi("https://github.com/googleapis/java-spanner/pull/4264") + public Builder usePlainText() { + this.usePlainText = true; + this.setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()); + if (this.experimentalHost != null) { + // Re-apply host settings to ensure http:// is prepended. + setExperimentalHost(this.experimentalHost); + } + return this; + } + + /** + * Sets OpenTelemetry object to be used for Spanner Metrics and Traces. GlobalOpenTelemetry will + * be used as fallback if this options is not set. + */ + public Builder setOpenTelemetry(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + return this; + } + + /** + * Enable leader aware routing. Leader aware routing would route all requests in RW/PDML + * transactions to the leader region. + */ + public Builder enableLeaderAwareRouting() { + this.leaderAwareRoutingEnabled = true; + return this; + } + + /** + * Disable leader aware routing. Disabling leader aware routing would route all requests in + * RW/PDML transactions to any region. + */ + public Builder disableLeaderAwareRouting() { + this.leaderAwareRoutingEnabled = false; + return this; + } + + @BetaApi + public Builder setEnableDirectAccess(boolean enableDirectAccess) { + this.enableDirectAccess = enableDirectAccess; + return this; + } + + @ObsoleteApi("Use setEnableDirectAccess(false) instead") + @Deprecated + public Builder disableDirectPath() { + this.enableDirectAccess = false; + return this; + } + + /** + * Enables/disables the use of virtual threads for the gRPC executor. Setting this option only + * has any effect on Java 21 and higher. In all other cases, the option will be ignored. + */ + @BetaApi + protected Builder setUseVirtualThreads(boolean useVirtualThreads) { + this.useVirtualThreads = useVirtualThreads; + return this; + } + + /** + * Creates and sets an {@link com.google.api.gax.tracing.ApiTracer} for the RPCs that are + * executed by this client. Enabling this creates traces for each individual RPC execution, + * including events/annotations when an RPC is retried or fails. The traces are only exported if + * an OpenTelemetry or OpenCensus trace exporter has been configured for the client. + */ + public Builder setEnableApiTracing(boolean enableApiTracing) { + this.enableApiTracing = enableApiTracing; + return this; + } + + /** + * Sets whether to enable or disable built in metrics for Data client operations. Built in + * metrics are enabled by default. + */ + public Builder setBuiltInMetricsEnabled(boolean enableBuiltInMetrics) { + this.enableBuiltInMetrics = enableBuiltInMetrics; + return this; + } + + /** Sets the monitoring host to be used for Built-in client side metrics */ + @Deprecated + @ObsoleteApi( + "This will be removed in an upcoming version without a major version bump. You should use" + + " universalDomain to configure the built-in metrics endpoint for a partner universe.") + public Builder setMonitoringHost(String monitoringHost) { + this.monitoringHost = monitoringHost; + return this; + } + + /** + * Sets whether to enable extended OpenTelemetry tracing. Enabling this option will add the + * following additional attributes to the traces that are generated by the client: + * + *
    + *
  • db.statement: Contains the SQL statement that is being executed. + *
  • thread.name: The name of the thread that executes the statement. + *
+ */ + public Builder setEnableExtendedTracing(boolean enableExtendedTracing) { + this.enableExtendedTracing = enableExtendedTracing; + return this; + } + + /** + * Sets whether to enable end to end tracing. Enabling this option will create the trace spans + * at the Spanner layer. By default, end to end tracing is disabled. Enabling end to end tracing + * requires OpenTelemetry to be set up. Simply enabling this option won't generate traces at + * Spanner layer. + */ + public Builder setEnableEndToEndTracing(boolean enableEndToEndTracing) { + this.enableEndToEndTracing = enableEndToEndTracing; + return this; + } + + /** + * Provides the default read-write transaction options for all databases. These defaults are + * overridden by any explicit {@link com.google.cloud.spanner.Options.TransactionOption} + * provided through {@link DatabaseClient}. + * + *

Example Usage: + * + *

{@code
+     * DefaultReadWriteTransactionOptions options = DefaultReadWriteTransactionOptions.newBuilder()
+     * .setIsolationLevel(IsolationLevel.SERIALIZABLE)
+     * .setReadLockMode(ReadLockMode.OPTIMISTIC)
+     * .build();
+     * }
+ */ + public static class DefaultReadWriteTransactionOptions { + private final TransactionOptions defaultTransactionOptions; + + private DefaultReadWriteTransactionOptions(TransactionOptions defaultTransactionOptions) { + this.defaultTransactionOptions = defaultTransactionOptions; + } + + public static DefaultReadWriteTransactionOptionsBuilder newBuilder() { + return new DefaultReadWriteTransactionOptionsBuilder(); + } + + public static class DefaultReadWriteTransactionOptionsBuilder { + private final TransactionOptions.Builder transactionOptionsBuilder = + TransactionOptions.newBuilder(); + + public DefaultReadWriteTransactionOptionsBuilder setIsolationLevel( + IsolationLevel isolationLevel) { + transactionOptionsBuilder.setIsolationLevel(isolationLevel); + return this; + } + + public DefaultReadWriteTransactionOptionsBuilder setReadLockMode( + ReadLockMode readLockMode) { + transactionOptionsBuilder.getReadWriteBuilder().setReadLockMode(readLockMode); + return this; + } + + public DefaultReadWriteTransactionOptions build() { + return new DefaultReadWriteTransactionOptions(transactionOptionsBuilder.build()); + } + } + } + + /** Sets the {@link DefaultReadWriteTransactionOptions} for read-write transactions. */ + public Builder setDefaultTransactionOptions( + DefaultReadWriteTransactionOptions defaultReadWriteTransactionOptions) { + Preconditions.checkNotNull( + defaultReadWriteTransactionOptions, "DefaultReadWriteTransactionOptions cannot be null"); + this.defaultTransactionOptions = defaultReadWriteTransactionOptions.defaultTransactionOptions; + return this; + } + + /** Sets the default {@link RequestOptions.ClientContext} for all requests. */ + public Builder setDefaultClientContext(RequestOptions.ClientContext clientContext) { + this.clientContext = clientContext; + return this; + } + + @SuppressWarnings("rawtypes") + @Override + public SpannerOptions build() { + // Set the host of emulator has been set. + if (emulatorHost != null && experimentalHost == null) { + if (!emulatorHost.startsWith("http")) { + emulatorHost = "http://" + emulatorHost; + } + this.setHost(emulatorHost); + // Channels are secure by default (via SSL/TLS). For the example we disable TLS to avoid + // needing certificates. + this.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + // As we are using plain text, we should never send any credentials. + this.setCredentials(NoCredentials.getInstance()); + } else if (experimentalHost != null && credentials == null) { + credentials = environment.getDefaultExperimentalHostCredentials(); + } + if (this.numChannels == null) { + this.numChannels = + this.grpcGcpExtensionEnabled ? GRPC_GCP_ENABLED_DEFAULT_CHANNELS : DEFAULT_CHANNELS; + } + + synchronized (lock) { + if (activeTracingFramework == null) { + activeTracingFramework = TracingFramework.OPEN_CENSUS; + } + } + return new SpannerOptions(this); + } + } + + /** Returns default instance of {@code SpannerOptions}. */ + public static SpannerOptions getDefaultInstance() { + return newBuilder().build(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Sets the environment to use to read configuration. The default will read configuration from + * environment variables. + */ + public static void useEnvironment(SpannerEnvironment environment) { + SpannerOptions.environment = environment; + } + + /** + * Sets the environment to use to read configuration to the default environment. This will read + * configuration from environment variables. + */ + public static void useDefaultEnvironment() { + SpannerOptions.environment = SpannerEnvironmentImpl.INSTANCE; + } + + @InternalApi + public static GoogleCredentials getDefaultExperimentalCredentialsFromSysEnv() { + return getOAuthTokenFromFile(System.getenv(DEFAULT_SPANNER_EXPERIMENTAL_HOST_CREDENTIALS)); + } + + private static @Nullable GoogleCredentials getOAuthTokenFromFile(@Nullable String file) { + if (!Strings.isNullOrEmpty(file)) { + String token; + try { + token = Base64.getEncoder().encodeToString(Files.readAllBytes(Paths.get(file))); + } catch (IOException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + return GoogleCredentials.create(new AccessToken(token, null)); + } + return null; + } + + /** + * Enables OpenTelemetry traces. Enabling OpenTelemetry traces will disable OpenCensus traces. By + * default, OpenCensus traces are enabled. + */ + public static void enableOpenTelemetryTraces() { + synchronized (lock) { + if (activeTracingFramework != null + && activeTracingFramework != TracingFramework.OPEN_TELEMETRY) { + throw new IllegalStateException( + "ActiveTracingFramework is set to OpenCensus and cannot be reset after SpannerOptions" + + " object is created."); + } + activeTracingFramework = TracingFramework.OPEN_TELEMETRY; + } + } + + /** Enables OpenCensus traces. Enabling OpenCensus traces will disable OpenTelemetry traces. */ + @ObsoleteApi( + "The OpenCensus project is deprecated. Use enableOpenTelemetryTraces to switch to" + + " OpenTelemetry traces") + public static void enableOpenCensusTraces() { + synchronized (lock) { + if (activeTracingFramework != null + && activeTracingFramework != TracingFramework.OPEN_CENSUS) { + throw new IllegalStateException( + "ActiveTracingFramework is set to OpenTelemetry and cannot be reset after" + + " SpannerOptions object is created."); + } + activeTracingFramework = TracingFramework.OPEN_CENSUS; + } + } + + /** + * Always resets the activeTracingFramework. This variable is used for internal testing, and is + * not a valid production scenario + */ + @ObsoleteApi( + "The OpenCensus project is deprecated. Use enableOpenTelemetryTraces to switch to" + + " OpenTelemetry traces") + @VisibleForTesting + static void resetActiveTracingFramework() { + activeTracingFramework = null; + } + + public static TracingFramework getActiveTracingFramework() { + synchronized (lock) { + if (activeTracingFramework == null) { + return TracingFramework.OPEN_CENSUS; + } + return activeTracingFramework; + } + } + + /** Disables OpenCensus metrics. Disable OpenCensus metrics before creating Spanner client. */ + public static void disableOpenCensusMetrics() { + SpannerOptions.enableOpenCensusMetrics = false; + } + + @VisibleForTesting + static void enableOpenCensusMetrics() { + SpannerOptions.enableOpenCensusMetrics = true; + } + + public static boolean isEnabledOpenCensusMetrics() { + return SpannerOptions.enableOpenCensusMetrics; + } + + /** Enables OpenTelemetry metrics. Enable OpenTelemetry metrics before creating Spanner client. */ + public static void enableOpenTelemetryMetrics() { + SpannerOptions.enableOpenTelemetryMetrics = true; + } + + public static boolean isEnabledOpenTelemetryMetrics() { + return SpannerOptions.enableOpenTelemetryMetrics; + } + + @Override + protected String getDefaultProject() { + String projectId = getDefaultProjectId(); + // The project id does not matter if we are using the emulator. + if (projectId == null && System.getenv("SPANNER_EMULATOR_HOST") != null) { + return "emulator-project"; + } + return projectId; + } + + public TransportChannelProvider getChannelProvider() { + return channelProvider; + } + + @InternalApi + public ChannelEndpointCacheFactory getChannelEndpointCacheFactory() { + return channelEndpointCacheFactory; + } + + @SuppressWarnings("rawtypes") + public ApiFunction getChannelConfigurator() { + return channelConfigurator; + } + + public GrpcInterceptorProvider getInterceptorProvider() { + return interceptorProvider; + } + + public int getNumChannels() { + return numChannels; + } + + public String getTransportChannelExecutorThreadNameFormat() { + return transportChannelExecutorThreadNameFormat; + } + + public SessionPoolOptions getSessionPoolOptions() { + return sessionPoolOptions; + } + + public String getDatabaseRole() { + return databaseRole; + } + + public Map getSessionLabels() { + return sessionLabels; + } + + public SpannerStubSettings getSpannerStubSettings() { + return spannerStubSettings; + } + + public InstanceAdminStubSettings getInstanceAdminStubSettings() { + return instanceAdminStubSettings; + } + + public DatabaseAdminStubSettings getDatabaseAdminStubSettings() { + return databaseAdminStubSettings; + } + + public org.threeten.bp.Duration getPartitionedDmlTimeout() { + return toThreetenDuration(getPartitionedDmlTimeoutDuration()); + } + + public Duration getPartitionedDmlTimeoutDuration() { + return partitionedDmlTimeout; + } + + public boolean isGrpcGcpExtensionEnabled() { + return grpcGcpExtensionEnabled; + } + + public boolean isGrpcGcpOtelMetricsEnabled() { + return enableGrpcGcpOtelMetrics; + } + + public GcpManagedChannelOptions getGrpcGcpOptions() { + return grpcGcpOptions; + } + + /** + * Returns whether dynamic channel pooling is enabled. Dynamic channel pooling is disabled by + * default. Use {@link Builder#enableDynamicChannelPool()} to explicitly enable it. Note that + * calling {@link Builder#setNumChannels(int)} will disable dynamic channel pooling even if it was + * explicitly enabled. + */ + public boolean isDynamicChannelPoolEnabled() { + return dynamicChannelPoolEnabled; + } + + /** + * Returns the channel pool options for dynamic channel pooling. If no options were explicitly + * set, returns the Spanner-specific defaults. + * + * @see #createDefaultDynamicChannelPoolOptions() + */ + public GcpChannelPoolOptions getGcpChannelPoolOptions() { + return gcpChannelPoolOptions; + } + + public boolean isAutoThrottleAdministrativeRequests() { + return autoThrottleAdministrativeRequests; + } + + public RetrySettings getRetryAdministrativeRequestsSettings() { + return retryAdministrativeRequestsSettings; + } + + public boolean isTrackTransactionStarter() { + return trackTransactionStarter; + } + + public CallCredentialsProvider getCallCredentialsProvider() { + return callCredentialsProvider; + } + + private boolean usesNoCredentials() { + // When JMH is enabled, we need to enable built-in metrics + if (System.getProperty("jmh.enabled") != null + && System.getProperty("jmh.enabled").equals("true")) { + return false; + } + return Objects.equals(getCredentials(), NoCredentials.getInstance()); + } + + public String getCompressorName() { + return compressorName; + } + + public boolean isLeaderAwareRoutingEnabled() { + return leaderAwareRoutingEnabled; + } + + public DirectedReadOptions getDirectedReadOptions() { + return directedReadOptions; + } + + @BetaApi + public Boolean isEnableDirectAccess() { + return enableDirectAccess; + } + + public Boolean isEnableGcpFallback() { + return enableGcpFallback; + } + + @ObsoleteApi("Use isEnableDirectAccess() instead") + @Deprecated + public boolean isAttemptDirectPath() { + return enableDirectAccess; + } + + /** + * Returns an instance of OpenTelemetry. If OpenTelemetry object is not set via SpannerOptions + * then GlobalOpenTelemetry will be used as fallback. + */ + public OpenTelemetry getOpenTelemetry() { + if (this.openTelemetry != null) { + return this.openTelemetry; + } else { + return GlobalOpenTelemetry.get(); + } + } + + @Override + public ApiTracerFactory getApiTracerFactory() { + return createApiTracerFactory(false, false); + } + + public void enablegRPCMetrics(InstantiatingGrpcChannelProvider.Builder channelProviderBuilder) { + if (SpannerOptions.environment.isEnableGRPCBuiltInMetrics()) { + this.builtInMetricsProvider.enableGrpcMetrics( + channelProviderBuilder, + this.getProjectId(), + getCredentials(), + this.monitoringHost, + getUniverseDomain()); + } + } + + public ApiTracerFactory getApiTracerFactory(boolean isAdminClient, boolean isEmulatorEnabled) { + return createApiTracerFactory(isAdminClient, isEmulatorEnabled); + } + + private ApiTracerFactory createApiTracerFactory( + boolean isAdminClient, boolean isEmulatorEnabled) { + List apiTracerFactories = new ArrayList<>(); + // Prefer any direct ApiTracerFactory that might have been set on the builder. + apiTracerFactories.add( + MoreObjects.firstNonNull(super.getApiTracerFactory(), getDefaultApiTracerFactory())); + + // Add Metrics Tracer factory if built in metrics are enabled and if the client is data client + // and if emulator is not enabled. + if (isEnableBuiltInMetrics() && !isAdminClient && !isEmulatorEnabled && !usesNoCredentials()) { + ApiTracerFactory metricsTracerFactory = createMetricsApiTracerFactory(); + if (metricsTracerFactory != null) { + apiTracerFactories.add(metricsTracerFactory); + } + } + + return new CompositeTracerFactory(apiTracerFactories); + } + + private ApiTracerFactory getDefaultApiTracerFactory() { + if (isEnableApiTracing()) { + if (activeTracingFramework == TracingFramework.OPEN_TELEMETRY) { + return new OpenTelemetryApiTracerFactory( + getOpenTelemetry() + .getTracer( + MetricRegistryConstants.INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(getClass())), + Attributes.empty()); + } else if (activeTracingFramework == TracingFramework.OPEN_CENSUS) { + return new OpencensusTracerFactory(); + } + } + return BaseApiTracerFactory.getInstance(); + } + + private ApiTracerFactory createMetricsApiTracerFactory() { + OpenTelemetry openTelemetry = + this.builtInMetricsProvider.getOrCreateOpenTelemetry( + this.getProjectId(), getCredentials(), this.monitoringHost, getUniverseDomain()); + + return openTelemetry != null + ? new BuiltInMetricsTracerFactory( + new BuiltInMetricsRecorder(openTelemetry, BuiltInMetricsConstant.METER_NAME), + new HashMap<>(), + new TraceWrapper( + Tracing.getTracer(), + // Using the OpenTelemetry object set in Spanner Options, will be NoOp if not set + this.getOpenTelemetry() + .getTracer( + MetricRegistryConstants.INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(getClass())), + true)) + : null; + } + + /** + * Returns true if an {@link com.google.api.gax.tracing.ApiTracer} should be created and set on + * the Spanner client. Enabling this only has effect if an OpenTelemetry or OpenCensus trace + * exporter has been configured. + */ + public boolean isEnableApiTracing() { + return enableApiTracing; + } + + /** + * Returns true if an {@link com.google.api.gax.tracing.MetricsTracer} should be created and set + * on the Spanner client. + */ + public boolean isEnableBuiltInMetrics() { + return enableBuiltInMetrics; + } + + @InternalApi + public boolean isEnableLocationApi() { + return enableLocationApi; + } + + /** Returns the override metrics Host. */ + String getMonitoringHost() { + return monitoringHost; + } + + public TransactionOptions getDefaultTransactionOptions() { + return defaultTransactionOptions; + } + + @BetaApi + public boolean isUseVirtualThreads() { + return useVirtualThreads; + } + + /** + * Returns whether extended OpenTelemetry tracing is enabled. Enabling this option will add the + * following additional attributes to the traces that are generated by the client: + * + *
    + *
  • db.statement: Contains the SQL statement that is being executed. + *
  • thread.name: The name of the thread that executes the statement. + *
+ */ + public boolean isEnableExtendedTracing() { + return enableExtendedTracing; + } + + /** + * Returns whether end to end tracing is enabled. If this option is enabled then trace spans will + * be created at the Spanner layer. + */ + public boolean isEndToEndTracingEnabled() { + return enableEndToEndTracing; + } + + /** Returns the default query options to use for the specific database. */ + public QueryOptions getDefaultQueryOptions(DatabaseId databaseId) { + // Use the specific query options for the database if any have been specified. These have + // already been merged with the query options specified in the environment variables. + QueryOptions options = this.mergedQueryOptions.get(databaseId); + if (options == null) { + // Use the generic environment query options. These are initialized as a default instance of + // query options and appended with any options specified in the environment variables. + options = this.envQueryOptions; + } + return options; + } + + public CloseableExecutorProvider getAsyncExecutorProvider() { + return asyncExecutorProvider; + } + + public int getPrefetchChunks() { + return prefetchChunks; + } + + public DecodeMode getDecodeMode() { + return decodeMode; + } + + public static GrpcTransportOptions getDefaultGrpcTransportOptions() { + return GrpcTransportOptions.newBuilder().build(); + } + + @Override + protected String getDefaultHost() { + String universeDomain = getUniverseDomain(); + if (Strings.isNullOrEmpty(universeDomain)) { + universeDomain = GOOGLE_DEFAULT_UNIVERSE; + } + return String.format("https://%s.%s", SPANNER_SERVICE_NAME, universeDomain); + } + + private static class SpannerDefaults implements ServiceDefaults { + + @Override + public SpannerFactory getDefaultServiceFactory() { + return DefaultSpannerFactory.INSTANCE; + } + + @Override + public SpannerRpcFactory getDefaultRpcFactory() { + return DefaultSpannerRpcFactory.INSTANCE; + } + + @Override + public TransportOptions getDefaultTransportOptions() { + return getDefaultGrpcTransportOptions(); + } + } + + @Override + public Set getScopes() { + return SCOPES; + } + + protected SpannerRpc getSpannerRpcV1() { + return (SpannerRpc) getRpc(); + } + + /** + * @return true if the cached Spanner service instance is null or + * closed. This will cause the method {@link #getService()} to create a new {@link SpannerRpc} + * instance when one is requested. + */ + @Override + protected boolean shouldRefreshService(Spanner cachedService) { + return cachedService == null || cachedService.isClosed(); + } + + /** + * @return true if the cached {@link ServiceRpc} instance is null or + * closed. This will cause the method {@link #getRpc()} to create a new {@link Spanner} + * instance when one is requested. + */ + @Override + protected boolean shouldRefreshRpc(ServiceRpc cachedRpc) { + return cachedRpc == null || ((SpannerRpc) cachedRpc).isClosed(); + } + + @SuppressWarnings("unchecked") + @Override + public Builder toBuilder() { + return new Builder(this); + } + + public String getEndpoint() { + URL url; + try { + url = new URL(getHost()); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("Invalid host: " + getHost(), e); + } + return String.format( + "%s:%s", url.getHost(), url.getPort() < 0 ? url.getDefaultPort() : url.getPort()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java new file mode 100644 index 000000000000..0dabcbd0094e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java @@ -0,0 +1,125 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiClock; +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; +import com.google.cloud.spanner.v1.stub.SpannerStub; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Throwables; +import com.google.spanner.v1.RollbackRequest; +import io.grpc.Context; +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; + +/** + * Util class for retrying aborted transactions. This class is a wrapper around {@link RetryHelper} + * that uses specific settings to only retry on aborted transactions, without a timeout and without + * a cap on the number of retries. + */ +class SpannerRetryHelper { + + /** + * Use the same {@link RetrySettings} for retrying an aborted transaction as for retrying a {@link + * RollbackRequest}. The {@link RollbackRequest} automatically uses the default retry settings + * defined for the {@link SpannerStub}. By referencing these settings, the retry settings for + * retrying aborted transactions will also automatically be updated if the default retry settings + * are updated. + * + *

A read/write transaction should not timeout while retrying. The total timeout of the retry + * settings is therefore set to 24 hours and there is no max attempts value. + * + *

These default {@link RetrySettings} are only used if no retry information is returned by the + * {@link AbortedException}. + */ + @VisibleForTesting + static final RetrySettings txRetrySettings = + SpannerStubSettings.newBuilder().rollbackSettings().getRetrySettings().toBuilder() + .setTotalTimeoutDuration(Duration.ofHours(24L)) + .setMaxAttempts(0) + .build(); + + /** Executes the {@link Callable} and retries if it fails with an {@link AbortedException}. */ + static T runTxWithRetriesOnAborted(Callable callable) { + return runTxWithRetriesOnAborted(callable, DefaultErrorHandler.INSTANCE); + } + + static T runTxWithRetriesOnAborted(Callable callable, ErrorHandler errorHandler) { + return runTxWithRetriesOnAborted( + callable, errorHandler, txRetrySettings, NanoClock.getDefaultClock()); + } + + /** + * Executes the {@link Callable} and retries if it fails with an {@link AbortedException} using + * the specific {@link RetrySettings}. + */ + @VisibleForTesting + static T runTxWithRetriesOnAborted( + Callable callable, RetrySettings retrySettings, ApiClock clock) { + return runTxWithRetriesOnAborted(callable, DefaultErrorHandler.INSTANCE, retrySettings, clock); + } + + @VisibleForTesting + static T runTxWithRetriesOnAborted( + Callable callable, + ErrorHandler errorHandler, + RetrySettings retrySettings, + ApiClock clock) { + try { + return RetryHelper.runWithRetries(callable, retrySettings, new TxRetryAlgorithm<>(), clock); + } catch (RetryHelperException e) { + if (e.getCause() != null) { + Throwables.throwIfUnchecked(errorHandler.translateException(e.getCause())); + } + throw e; + } + } + + private static class TxRetryAlgorithm implements ResultRetryAlgorithm { + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, T prevResponse, TimedAttemptSettings prevSettings) { + if (prevThrowable != null) { + long retryDelay = SpannerException.extractRetryDelay(prevThrowable); + if (retryDelay > -1L) { + return prevSettings.toBuilder() + .setRandomizedRetryDelayDuration(Duration.ofMillis(retryDelay)) + .build(); + } + } + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, T prevResponse) + throws CancellationException { + if (Context.current().isCancelled()) { + throw SpannerExceptionFactory.newSpannerExceptionForCancellation(Context.current(), null); + } + return prevThrowable instanceof AbortedException + || prevThrowable instanceof com.google.api.gax.rpc.AbortedException; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRpcMetrics.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRpcMetrics.java new file mode 100644 index 000000000000..794c211971d7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRpcMetrics.java @@ -0,0 +1,74 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.util.Arrays; +import java.util.List; + +@InternalApi +public class SpannerRpcMetrics { + private final LongHistogram gfeLatencies; + private final LongCounter gfeHeaderMissingCount; + + public SpannerRpcMetrics(OpenTelemetry openTelemetry) { + if (!SpannerOptions.isEnabledOpenTelemetryMetrics()) { + gfeLatencies = null; + gfeHeaderMissingCount = null; + return; + } + + Meter meter = openTelemetry.getMeter(MetricRegistryConstants.INSTRUMENTATION_SCOPE); + List RPC_MILLIS_BUCKET_BOUNDARIES = + Arrays.asList( + 1L, 2L, 3L, 4L, 5L, 6L, 8L, 10L, 13L, 16L, 20L, 25L, 30L, 40L, 50L, 65L, 80L, 100L, + 130L, 160L, 200L, 250L, 300L, 400L, 500L, 650L, 800L, 1000L, 2000L, 5000L, 10000L, + 20000L, 50000L, 100000L); + gfeLatencies = + meter + .histogramBuilder(MetricRegistryConstants.SPANNER_GFE_LATENCY) + .ofLongs() + .setDescription(MetricRegistryConstants.SPANNER_GFE_LATENCY_DESCRIPTION) + .setUnit("ms") + .setExplicitBucketBoundariesAdvice(RPC_MILLIS_BUCKET_BOUNDARIES) + .build(); + gfeHeaderMissingCount = + meter + .counterBuilder(MetricRegistryConstants.SPANNER_GFE_HEADER_MISSING_COUNT) + .setDescription(MetricRegistryConstants.SPANNER_GFE_HEADER_MISSING_COUNT_DESCRIPTION) + .setUnit(MetricRegistryConstants.COUNT) + .build(); + } + + @InternalApi + public void recordGfeLatency(long value, Attributes attributes) { + if (gfeLatencies != null) { + gfeLatencies.record(value, attributes); + } + } + + @InternalApi + public void recordGfeHeaderMissingCount(long value, Attributes attributes) { + if (gfeHeaderMissingCount != null) { + gfeHeaderMissingCount.add(value, attributes); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerTypeConverter.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerTypeConverter.java new file mode 100644 index 000000000000..02c0cc213d62 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerTypeConverter.java @@ -0,0 +1,111 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Date; +import com.google.protobuf.ListValue; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +final class SpannerTypeConverter { + + private static final ZoneId UTC_ZONE = ZoneId.of("UTC"); + private static final DateTimeFormatter ISO_8601_DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX"); + + static Value createUntypedArrayValue(Stream stream) { + List values = + stream + .map( + val -> + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(val)) + .build()) + .collect(Collectors.toList()); + return Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue(ListValue.newBuilder().addAllValues(values).build()) + .build()); + } + + static String convertToISO8601(T dateTime) { + return ISO_8601_DATE_FORMATTER.format(dateTime); + } + + static Value createUntypedStringValue(T value) { + return Value.untyped( + com.google.protobuf.Value.newBuilder().setStringValue(String.valueOf(value)).build()); + } + + static Iterable convertToTypedIterable( + Function func, T val, Iterator iterator) { + List values = new ArrayList<>(); + SpannerTypeConverter.processIterable(val, iterator, func, values::add); + return values; + } + + static Iterable convertToTypedIterable(T val, Iterator iterator) { + return convertToTypedIterable(v -> v, val, iterator); + } + + @SuppressWarnings("unchecked") + static void processIterable( + T val, Iterator iterator, Function func, Consumer consumer) { + consumer.accept(func.apply(val)); + iterator.forEachRemaining(values -> consumer.accept(func.apply((T) values))); + } + + static Date convertLocalDateToSpannerDate(LocalDate date) { + return Date.fromYearMonthDay(date.getYear(), date.getMonthValue(), date.getDayOfMonth()); + } + + static Value createUntypedIterableValue( + T value, Iterator iterator, Function func) { + ListValue.Builder listValueBuilder = ListValue.newBuilder(); + SpannerTypeConverter.processIterable( + value, + iterator, + (val) -> com.google.protobuf.Value.newBuilder().setStringValue(func.apply(val)).build(), + listValueBuilder::addValues); + return Value.untyped( + com.google.protobuf.Value.newBuilder().setListValue(listValueBuilder.build()).build()); + } + + static ZonedDateTime atUTC(LocalDateTime localDateTime) { + return atUTC(localDateTime.atZone(ZoneId.systemDefault())); + } + + static ZonedDateTime atUTC(OffsetDateTime localDateTime) { + return localDateTime.atZoneSameInstant(UTC_ZONE); + } + + static ZonedDateTime atUTC(ZonedDateTime localDateTime) { + return localDateTime.withZoneSameInstant(UTC_ZONE); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Statement.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Statement.java new file mode 100644 index 000000000000..1776139d81d1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Statement.java @@ -0,0 +1,355 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParametersInfo; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import java.io.Serializable; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A SQL statement and optional bound parameters that can be executed in a {@link ReadContext}. + * + *

The SQL query string can contain parameter placeholders. A parameter placeholder consists of + * {@literal @} followed by the parameter name. Parameter names consist of any combination of + * letters, numbers, and underscores. + * + *

Parameters can appear anywhere that a literal value is expected. The same parameter name can + * be used more than once, for example: {@code WHERE id > @msg_id AND id < @msg_id + 100} + * + *

It is an error to execute an SQL query with placeholders for unbound parameters. + * + *

Statements are constructed using a builder. Parameter values are specified by calling {@link + * Builder#bind(String)}. For example, code to build a query using the clause above and bind a value + * to {@code id} might look like the following: + * + *

{@code
+ * Statement statement = Statement
+ *     .newBuilder("SELECT name WHERE id > @msg_id AND id < @msg_id + 100")
+ *     .bind("msg_id").to(500)
+ *     .build();
+ * }
+ * + *

{@code Statement} instances are immutable. + */ +public final class Statement implements Serializable { + private static final long serialVersionUID = -1967958247625065259L; + + private final Map parameters; + private final String sql; + private final QueryOptions queryOptions; + + private Statement(String sql, Map parameters, QueryOptions queryOptions) { + this.sql = sql; + this.parameters = parameters; + this.queryOptions = queryOptions; + } + + /** Builder for {@code Statement}. */ + public static final class Builder { + final Map parameters; + private final StringBuilder sqlBuffer; + private String currentBinding; + private final ValueBinder binder = new Binder(); + private QueryOptions queryOptions; + + private Builder(String sql) { + parameters = new HashMap<>(); + sqlBuffer = new StringBuilder(sql); + } + + private Builder(Statement statement) { + sqlBuffer = new StringBuilder(statement.sql); + parameters = new HashMap<>(statement.parameters); + queryOptions = + statement.queryOptions == null ? null : statement.queryOptions.toBuilder().build(); + } + + /** Replaces the current SQL of this builder with the given string. */ + public Builder replace(String sql) { + sqlBuffer.replace(0, sqlBuffer.length(), sql); + return this; + } + + /** Appends {@code sqlFragment} to the statement. */ + public Builder append(String sqlFragment) { + sqlBuffer.append(checkNotNull(sqlFragment)); + return this; + } + + /** Sets the {@link QueryOptions} to use when executing this {@link Statement}. */ + public Builder withQueryOptions(QueryOptions queryOptions) { + this.queryOptions = queryOptions; + return this; + } + + /** Returns a binder to bind the value of the query parameter {@code parameter}. */ + public ValueBinder bind(String parameter) { + checkState( + currentBinding == null, + "Cannot bind new parameter. Previous binding of parameter '%s' is incomplete.", + currentBinding); + currentBinding = parameter; + return binder; + } + + /** Builds the {@code Statement}. */ + public Statement build() { + checkState( + currentBinding == null, "Binding for parameter '%s' is incomplete.", currentBinding); + return new Statement( + sqlBuffer.toString(), + Collections.unmodifiableMap(new HashMap<>(parameters)), + queryOptions); + } + + private class Binder extends ValueBinder { + @Override + Builder handle(Value value) { + Preconditions.checkArgument( + value == null || !value.isCommitTimestamp(), + "Mutation.COMMIT_TIMESTAMP cannot be bound as a query parameter"); + checkState(currentBinding != null, "No binding in progress"); + parameters.put(currentBinding, value); + currentBinding = null; + return Builder.this; + } + } + } + + /** Creates a {@code Statement} with the given SQL text {@code sql}. */ + public static Statement of(String sql) { + return new Statement(sql, ImmutableMap.of(), /* queryOptions= */ null); + } + + /** Creates a {@link Statement} with the given SQL text and parameters. */ + public static Statement of(String sql, ImmutableMap parameters) { + return new Statement(sql, parameters, /* queryOptions= */ null); + } + + /** Creates a new statement builder with the SQL text {@code sql}. */ + public static Builder newBuilder(String sql) { + return new Builder(sql); + } + + /** Returns {@code true} if a binding exists for {@code parameter}. */ + public boolean hasBinding(String parameter) { + return parameters.containsKey(parameter); + } + + /** + * Executes the query in {@code context}. {@code statement.executeQuery(context)} is exactly + * equivalent to {@code context.executeQuery(statement)}. + * + * @see ReadContext#executeQuery(Statement, Options.QueryOption...) + */ + public ResultSet executeQuery(ReadContext context, Options.QueryOption... options) { + return context.executeQuery(this, options); + } + + /** + * Analyzes the query in {@code context}. {@code statement.analyzeQuery(context, queryMode)} is + * exactly equivalent to {@code context.analyzeQuery(statement, queryMode)}. + * + * @see ReadContext#analyzeQuery(Statement, com.google.cloud.spanner.ReadContext.QueryAnalyzeMode) + */ + public ResultSet analyzeQuery(ReadContext context, QueryAnalyzeMode queryMode) { + return context.analyzeQuery(this, queryMode); + } + + /** Returns the current SQL statement text. */ + public String getSql() { + return sql; + } + + /** Returns a copy of this statement with the SQL string replaced by the given SQL string. */ + public Statement withReplacedSql(String sql) { + return new Statement(sql, this.parameters, this.queryOptions); + } + + /** Returns the {@link QueryOptions} that will be used with this {@link Statement}. */ + public QueryOptions getQueryOptions() { + return queryOptions; + } + + /** Returns the parameters bound to this {@code Statement}. */ + public Map getParameters() { + return parameters; + } + + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return toString(new StringBuilder()).toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Statement that = (Statement) o; + return Objects.equals(sql, that.sql) + && Objects.equals(parameters, that.parameters) + && Objects.equals(queryOptions, that.queryOptions); + } + + @Override + public int hashCode() { + return Objects.hash(sql, parameters, queryOptions); + } + + StringBuilder toString(StringBuilder b) { + b.append(sql); + if (!parameters.isEmpty()) { + b.append(" {"); + int n = 0; + for (Map.Entry parameter : parameters.entrySet()) { + if (n++ > 0) { + b.append(", "); + } + b.append(parameter.getKey()).append(": "); + if (parameter.getValue() == null) { + b.append("NULL"); + } else { + parameter.getValue().toString(b); + } + } + b.append("}"); + } + if (queryOptions != null) { + b.append(",queryOptions=").append(queryOptions.toString()); + } + return b; + } + + /** + * Factory for creating {@link Statement}s with unnamed parameters. + * + *

This class is primarily intended for framework developers who want to integrate the Spanner + * client with a framework that uses unnamed parameters. Developers who want to use the Spanner + * client in their application, should use named parameters. + * + *

+ * + *

Usage Example

+ * + * Simple SQL query + * + *
{@code
+   * Statement statement = databaseClient.getStatementFactory()
+   *     .withUnnamedParameters("SELECT * FROM TABLE WHERE ID = ?", 10L)
+   * }
+ * + * SQL query with multiple parameters + * + *
{@code
+   * long id = 10L;
+   * String name = "google";
+   * List phoneNumbers = Arrays.asList("1234567890", "0987654321");
+   * Statement statement = databaseClient.getStatementFactory()
+   *      .withUnnamedParameters("INSERT INTO TABLE (ID, name, phonenumbers) VALUES(?, ?, ?)", id, name, phoneNumbers)
+   * }
+ * + * How to use arrays with the IN operator + * + *
{@code
+   * long[] ids = {10L, 12L, 1483L};
+   * Statement statement = databaseClient.getStatementFactory()
+   *     .withUnnamedParameters("SELECT * FROM TABLE WHERE ID = UNNEST(?)", ids)
+   * }
+ * + * @see DatabaseClient#getStatementFactory() + * @see StatementFactory#withUnnamedParameters(String, Object...) + */ + public static final class StatementFactory { + private final Dialect dialect; + + StatementFactory(Dialect dialect) { + this.dialect = dialect; + } + + public Statement of(String sql) { + return Statement.of(sql); + } + + /** + * This function accepts a SQL statement with unnamed parameters (?) and accepts a list of + * objects that should be used as the values for those parameters. Primitive types are + * supported. + * + *

For parameters of type DATE, the following types are supported + * + *

    + *
  • {@link java.time.LocalDate} + *
  • {@link com.google.cloud.Date} + *
+ * + *

For parameters of type TIMESTAMP, the following types are supported. Note that Spanner + * stores all timestamps in UTC. Instances of ZonedDateTime and OffsetDateTime that use other + * timezones than UTC, will be converted to the corresponding UTC values before being sent to + * Spanner. Instances of LocalDateTime will be converted to a ZonedDateTime using the system + * default timezone, and then converted to UTC before being sent to Spanner. + * + *

    + *
  • {@link java.time.LocalDateTime} + *
  • {@link java.time.OffsetDateTime} + *
  • {@link java.time.ZonedDateTime} + *
+ * + *

+ * + * @param sql SQL statement with unnamed parameters denoted as ? + * @param values positional list of values for the unnamed parameters in the SQL string + * @return Statement a statement that can be executed on Spanner + * @see DatabaseClient#getStatementFactory + */ + public Statement withUnnamedParameters(String sql, Object... values) { + Map parameters = getUnnamedParametersMap(values); + AbstractStatementParser statementParser = AbstractStatementParser.getInstance(this.dialect); + ParametersInfo parametersInfo = + statementParser.convertPositionalParametersToNamedParameters('?', sql); + return new Statement(parametersInfo.sqlWithNamedParameters, parameters, null); + } + + private Map getUnnamedParametersMap(Object[] values) { + Map parameters = new HashMap<>(); + int index = 1; + for (Object value : values) { + parameters.put("p" + (index++), Value.toValue(value)); + } + return parameters; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingResultSet.java new file mode 100644 index 000000000000..47b10d852c64 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingResultSet.java @@ -0,0 +1,31 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; + +/** Streaming implementation of ResultSet that supports streaming of chunks */ +interface StreamingResultSet extends ResultSet { + + /** + * Returns the {@link boolean} for this {@link ResultSet}. This method will be used by + * AsyncResultSet internally to initiate gRPC streaming. This method should not be called by the + * users. + */ + @InternalApi + boolean initiateStreaming(AsyncResultSet.StreamMessageListener streamMessageListener); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingUtil.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingUtil.java new file mode 100644 index 000000000000..54496d39f965 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StreamingUtil.java @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +final class StreamingUtil { + + private StreamingUtil() {} + + static boolean initiateStreaming( + ResultSet resultSet, AsyncResultSet.StreamMessageListener streamMessageListener) { + if (resultSet instanceof StreamingResultSet) { + return ((StreamingResultSet) resultSet).initiateStreaming(streamMessageListener); + } + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Struct.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Struct.java new file mode 100644 index 000000000000..38a47e99dffe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Struct.java @@ -0,0 +1,492 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Type.StructField; +import com.google.common.collect.ImmutableList; +import com.google.common.primitives.Booleans; +import com.google.common.primitives.Doubles; +import com.google.common.primitives.Floats; +import com.google.common.primitives.Longs; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.io.Serializable; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.function.Function; +import javax.annotation.concurrent.Immutable; + +/** + * Represents a non-{@code NULL} value of {@link Type.Code#STRUCT}. Such values are a tuple of named + * and typed columns, where individual columns may be null. Individual rows from a read or query + * operation can be considered as structs; {@link ResultSet#getCurrentRowAsStruct()} allows an + * immutable struct to be created from the row that the result set is currently positioned over. + * + *

{@code Struct} instances are immutable. + * + *

This class does not support representing typed {@code NULL} {@code Struct} values. + * + *

However, struct values inside SQL queries are always typed and can be externally + * supplied to a query only in the form of struct/array-of-struct query parameter values for which + * typed {@code NULL} struct values can be specified in the following ways: + * + *

1. As a standalone {@code NULL} struct value or as a nested struct field value, constructed + * using {@link ValueBinder#to(Type, Struct)} or {@link Value#struct(Type, Struct)}. + * + *

2. As as a null {@code Struct} reference representing a {@code NULL} struct typed element + * value inside an array/list of '{@code Struct}' references, that is used to construct an + * array-of-struct value using {@link Value#structArray(Type, Iterable)} or {@link + * ValueBinder#toStructArray(Type, Iterable)}. In this case, the type of the {@code NULL} struct + * value is assumed to be the same as the explicitly specified struct element type of the + * array/list. + */ +@Immutable +public abstract class Struct extends AbstractStructReader implements Serializable { + // Only implementations within the package are allowed. + Struct() {} + + /** Returns a builder for creating a non-{@code NULL} {@code Struct} instance. */ + public static Builder newBuilder() { + return new Builder(); + } + + /** Builder for constructing non-{@code NULL} {@code Struct} instances. */ + public static final class Builder { + private final List types = new ArrayList<>(); + private final List values = new ArrayList<>(); + private final ValueBinder binder; + private String currentField; + + private Builder() { + this.binder = + new ValueBinder() { + @Override + Builder handle(Value value) { + checkBindingInProgress(true); + addInternal(currentField, value); + currentField = null; + return Builder.this; + } + }; + } + + /** + * Returns a binder to set the value of a new field in the struct named {@code fieldName}. + * + * @param fieldName name of the field to set. Can be empty or the same as an existing field name + * in the {@code STRUCT} + */ + public ValueBinder set(String fieldName) { + checkBindingInProgress(false); + currentField = checkNotNull(fieldName); + return binder; + } + + /** Adds a new unnamed field {@code fieldName} with the given value. */ + public Builder add(Value value) { + checkBindingInProgress(false); + addInternal("", value); + return this; + } + + public Struct build() { + checkBindingInProgress(false); + return new ValueListStruct(types, values); + } + + private void addInternal(String fieldName, Value value) { + types.add(Type.StructField.of(fieldName, value.getType())); + values.add(value); + } + + private void checkBindingInProgress(boolean expectInProgress) { + if (expectInProgress) { + checkState(currentField != null, "No binding currently active"); + } else if (currentField != null) { + throw new IllegalStateException("Incomplete binding for column " + currentField); + } + } + } + + /** + * TODO(user) : Consider moving these methods to the StructReader interface once STRUCT-typed + * columns are supported in {@link ResultSet}. + */ + + /* Public methods for accessing struct-typed fields */ + public Struct getStruct(int columnIndex) { + checkNonNullStruct(columnIndex, columnIndex); + return getStructInternal(columnIndex); + } + + public Struct getStruct(String columnName) { + int columnIndex = getColumnIndex(columnName); + checkNonNullStruct(columnIndex, columnName); + return getStructInternal(columnIndex); + } + + /* Sub-classes must implement this method */ + protected abstract Struct getStructInternal(int columnIndex); + + private void checkNonNullStruct(int columnIndex, Object columnNameForError) { + Type actualType = getColumnType(columnIndex); + checkState( + actualType.getCode() == Code.STRUCT, + "Column %s is not of correct type: expected STRUCT<...> but was %s", + columnNameForError, + actualType); + checkNonNull(columnIndex, columnNameForError); + } + + /** Default implementation for value structs produced by {@link Builder}. */ + private static class ValueListStruct extends Struct { + private final Type type; + private final List values; + + private ValueListStruct(Iterable types, Iterable values) { + this.type = Type.struct(types); + this.values = ImmutableList.copyOf(values); + } + + @Override + protected boolean getBooleanInternal(int columnIndex) { + return values.get(columnIndex).getBool(); + } + + @Override + protected long getLongInternal(int columnIndex) { + return values.get(columnIndex).getInt64(); + } + + @Override + protected float getFloatInternal(int columnIndex) { + return values.get(columnIndex).getFloat32(); + } + + @Override + protected double getDoubleInternal(int columnIndex) { + return values.get(columnIndex).getFloat64(); + } + + @Override + protected BigDecimal getBigDecimalInternal(int columnIndex) { + return values.get(columnIndex).getNumeric(); + } + + @Override + protected String getStringInternal(int columnIndex) { + return values.get(columnIndex).getString(); + } + + @Override + protected String getJsonInternal(int columnIndex) { + return values.get(columnIndex).getJson(); + } + + @Override + protected String getPgJsonbInternal(int columnIndex) { + return values.get(columnIndex).getPgJsonb(); + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + return values.get(columnIndex).getBytes(); + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + return values.get(columnIndex).getTimestamp(); + } + + @Override + protected Date getDateInternal(int columnIndex) { + return values.get(columnIndex).getDate(); + } + + @Override + protected UUID getUuidInternal(int columnIndex) { + return values.get(columnIndex).getUuid(); + } + + @Override + protected Interval getIntervalInternal(int columnIndex) { + return values.get(columnIndex).getInterval(); + } + + @Override + protected T getProtoMessageInternal(int columnIndex, T message) { + return values.get(columnIndex).getProtoMessage(message); + } + + @Override + protected T getProtoEnumInternal( + int columnIndex, Function method) { + return values.get(columnIndex).getProtoEnum(method); + } + + @Override + protected Value getValueInternal(int columnIndex) { + return values.get(columnIndex); + } + + @Override + protected Struct getStructInternal(int columnIndex) { + return values.get(columnIndex).getStruct(); + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + return Booleans.toArray(getBooleanListInternal(columnIndex)); + } + + @Override + protected List getBooleanListInternal(int columnIndex) { + return values.get(columnIndex).getBoolArray(); + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + return Longs.toArray(getLongListInternal(columnIndex)); + } + + @Override + protected List getLongListInternal(int columnIndex) { + return values.get(columnIndex).getInt64Array(); + } + + @Override + protected float[] getFloatArrayInternal(int columnIndex) { + return Floats.toArray(getFloatListInternal(columnIndex)); + } + + @Override + protected List getFloatListInternal(int columnIndex) { + return values.get(columnIndex).getFloat32Array(); + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + return Doubles.toArray(getDoubleListInternal(columnIndex)); + } + + @Override + protected List getDoubleListInternal(int columnIndex) { + return values.get(columnIndex).getFloat64Array(); + } + + @Override + protected List getBigDecimalListInternal(int columnIndex) { + return values.get(columnIndex).getNumericArray(); + } + + @Override + protected List getStringListInternal(int columnIndex) { + return values.get(columnIndex).getStringArray(); + } + + @Override + protected List getJsonListInternal(int columnIndex) { + return values.get(columnIndex).getJsonArray(); + } + + @Override + protected List getPgJsonbListInternal(int columnIndex) { + return values.get(columnIndex).getPgJsonbArray(); + } + + @Override + protected List getBytesListInternal(int columnIndex) { + return values.get(columnIndex).getBytesArray(); + } + + @Override + protected List getTimestampListInternal(int columnIndex) { + return values.get(columnIndex).getTimestampArray(); + } + + @Override + protected List getProtoMessageListInternal( + int columnIndex, T message) { + return values.get(columnIndex).getProtoMessageArray(message); + } + + @Override + protected List getProtoEnumListInternal( + int columnIndex, Function method) { + return values.get(columnIndex).getProtoEnumArray(method); + } + + @Override + protected List getDateListInternal(int columnIndex) { + return values.get(columnIndex).getDateArray(); + } + + @Override + protected List getUuidListInternal(int columnIndex) { + return values.get(columnIndex).getUuidArray(); + } + + @Override + protected List getIntervalListInternal(int columnIndex) { + return values.get(columnIndex).getIntervalArray(); + } + + @Override + protected List getStructListInternal(int columnIndex) { + return values.get(columnIndex).getStructArray(); + } + + @Override + public Type getType() { + return type; + } + + @Override + public boolean isNull(int columnIndex) { + return values.get(columnIndex).isNull(); + } + + @Override + public String toString() { + // TODO(user): Consider pulling a generic toString() up to Struct. + return values.toString(); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Struct)) { + return false; + } + + Struct that = (Struct) o; + + if (!getType().equals(that.getType())) { + return false; + } + for (int i = 0; i < getColumnCount(); ++i) { + if (!Objects.equals(getAsObject(i), that.getAsObject(i))) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + int result = getType().hashCode(); + for (int i = 0; i < getColumnCount(); ++i) { + result = 31 * result + Objects.hashCode(getAsObject(i)); + } + return result; + } + + private Object getAsObject(int columnIndex) { + Type type = getColumnType(columnIndex); + if (isNull(columnIndex)) { + return null; + } + switch (type.getCode()) { + case BOOL: + return getBooleanInternal(columnIndex); + case INT64: + case PG_OID: + case ENUM: + return getLongInternal(columnIndex); + case FLOAT32: + return getFloatInternal(columnIndex); + case FLOAT64: + return getDoubleInternal(columnIndex); + case NUMERIC: + return getBigDecimalInternal(columnIndex); + case PG_NUMERIC: + return getStringInternal(columnIndex); + case STRING: + return getStringInternal(columnIndex); + case JSON: + return getJsonInternal(columnIndex); + case PG_JSONB: + return getPgJsonbInternal(columnIndex); + case BYTES: + case PROTO: + return getBytesInternal(columnIndex); + case TIMESTAMP: + return getTimestampInternal(columnIndex); + case DATE: + return getDateInternal(columnIndex); + case UUID: + return getUuidInternal(columnIndex); + case INTERVAL: + return getIntervalInternal(columnIndex); + case STRUCT: + return getStructInternal(columnIndex); + case ARRAY: + switch (type.getArrayElementType().getCode()) { + case BOOL: + return getBooleanListInternal(columnIndex); + case INT64: + case PG_OID: + case ENUM: + return getLongListInternal(columnIndex); + case FLOAT32: + return getFloatListInternal(columnIndex); + case FLOAT64: + return getDoubleListInternal(columnIndex); + case NUMERIC: + return getBigDecimalListInternal(columnIndex); + case PG_NUMERIC: + return getStringListInternal(columnIndex); + case STRING: + return getStringListInternal(columnIndex); + case JSON: + return getJsonListInternal(columnIndex); + case PG_JSONB: + return getPgJsonbListInternal(columnIndex); + case BYTES: + case PROTO: + return getBytesListInternal(columnIndex); + case TIMESTAMP: + return getTimestampListInternal(columnIndex); + case DATE: + return getDateListInternal(columnIndex); + case UUID: + return getUuidListInternal(columnIndex); + case INTERVAL: + return getIntervalListInternal(columnIndex); + case STRUCT: + return getStructListInternal(columnIndex); + default: + throw new AssertionError("Invalid type " + type); + } + default: + throw new AssertionError("Invalid type " + type); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StructReader.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StructReader.java new file mode 100644 index 000000000000..ab645588bf1b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/StructReader.java @@ -0,0 +1,739 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.math.BigDecimal; +import java.util.List; +import java.util.UUID; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * A base interface for reading the fields of a {@code STRUCT}. The Cloud Spanner yields {@code + * StructReader} instances as one of the subclasses {@link ResultSet} or {@link Struct}, most + * commonly as the result of a read or query operation. At any point in time, a {@code StructReader} + * provides access to a single tuple of data comprising multiple typed columns. Each column may have + * a {@code NULL} or non-{@code NULL} value; in both cases, columns always have a type. + * + *

Column values are accessed using the {@code getTypeName()} methods; a set of methods exists + * for each Java type that a column may be read as, and depending on the type of the column, only a + * subset of those methods will be appropriate. For example, {@link #getString(int)} and {@link + * #getString(String)} exist for reading columns of type {@link Type#string()}; attempting to call + * those methods for columns of other types will result in an {@code IllegalStateException}. The + * {@code getTypeName()} methods should only be called for non-{@code NULL} values, otherwise a + * {@code NullPointerException} is raised; {@link #isNull(int)}/{@link #isNull(String)} can be used + * to test for {@code NULL}-ness if necessary. + * + *

All methods for accessing a column have overloads that accept an {@code int} column index and + * a {@code String} column name. Column indices are zero-based. The column name overloads will fail + * with {@code IllegalArgumentException} if the column name does not appear exactly once in this + * instance's {@link #getType()}. The {@code int} overloads are typically more efficient than their + * {@code String} counterparts. + * + *

{@code StructReader} itself does not define whether the implementing type is mutable or + * immutable. For example, {@link ResultSet} is a mutable implementation of {@code StructReader}, + * where the {@code StructReader} methods provide access to the row that the result set is currently + * positioned over and {@link ResultSet#next()} changes that view to the next row, whereas {@link + * Struct} is an immutable implementation of {@code StructReader}. + */ +public interface StructReader { + /** + * @return the type of the underlying data. This will always be a {@code STRUCT} type, with fields + * corresponding to the data's columns. For the result of a read or query, this will always + * match the columns passed to the {@code read()} call or named in the query text, in order. + */ + Type getType(); + + /** + * @return the number of columns in the underlying data. This includes any columns with {@code + * NULL} values. + */ + int getColumnCount(); + + /** + * @param columnName name of the column + * @return the index of the column named {@code columnName}. + * @throws IllegalArgumentException if there is not exactly one element of {@code + * type().structFields()} with {@link Type.StructField#getName()} equal to {@code columnName} + */ + int getColumnIndex(String columnName); + + /** + * @param columnIndex index of the column + * @return the type of a column. + */ + Type getColumnType(int columnIndex); + + /** + * @param columnName name of the column + * @return the type of a column. + */ + Type getColumnType(String columnName); + + /** + * @param columnIndex index of the column + * @return {@code true} if a column contains a {@code NULL} value. + */ + boolean isNull(int columnIndex); + + /** + * @param columnName name of the column + * @return {@code true} if a column contains a {@code NULL} value. + */ + boolean isNull(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#bool()}. + */ + boolean getBoolean(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#bool()}. + */ + boolean getBoolean(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#int64()}. + */ + long getLong(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#int64()}. + */ + long getLong(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#float32()}. + */ + default float getFloat(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#float32()}. + */ + default float getFloat(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#float64()}. + */ + double getDouble(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#float64()}. + */ + double getDouble(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#numeric()}. + */ + BigDecimal getBigDecimal(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#numeric()}. + */ + BigDecimal getBigDecimal(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#string()}. + */ + String getString(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#string()}. + */ + String getString(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a column with type T or null if the column contains a null value + *

Example + *

{@code
+   * Struct row = ...
+   * String name = row.getOrNull(1, StructReader::getString)
+   * }
+ */ + default T getOrNull(int columnIndex, BiFunction function) { + return isNull(columnIndex) ? null : function.apply(this, columnIndex); + } + + /** + * @param columnName index of the column + * @return the value of a column with type T or null if the column contains a null value + *

Example + *

{@code
+   * Struct row = ...
+   * String name = row.getOrNull("name", StructReader::getString)
+   * }
+ */ + default T getOrNull(String columnName, BiFunction function) { + return isNull(columnName) ? null : function.apply(this, columnName); + } + + /** + * @param columnIndex index of the column + * @return the value of a column with type T, or the given default if the column value is null + *

Example + *

{@code
+   * Struct row = ...
+   * String name = row.getOrDefault(1, StructReader::getString, "")
+   * }
+ */ + default T getOrDefault( + int columnIndex, BiFunction function, T defaultValue) { + return isNull(columnIndex) ? defaultValue : function.apply(this, columnIndex); + } + + /** + * @param columnName name of the column + * @return the value of a column with type T, or the given default if the column value is null + *

Example + *

{@code
+   * Struct row = ...
+   * String name = row.getOrDefault("name", StructReader::getString, "")
+   * }
+ */ + default T getOrDefault( + String columnName, BiFunction function, T defaultValue) { + return isNull(columnName) ? defaultValue : function.apply(this, columnName); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#json()}. + */ + default String getJson(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#json()}. + */ + default String getJson(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#pgJsonb()}. + */ + default String getPgJsonb(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#pgJsonb()}. + */ + default String getPgJsonb(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto message of generic type {@code T} from Struct. + * + * @param columnIndex Index of the column. + * @param message Proto message object. Message can't be null as it's internally used to find the + * type of proto. Use @code{MyProtoClass.getDefaultInstance()}. @see getDefaultInstance() + * @return The value of a non-{@code NULL} column with type {@link Type#proto(String)} ()}. + */ + default T getProtoMessage(int columnIndex, T message) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto message of type {@code T} from Struct. + * + * @param columnName Name of the column. + * @param message Proto message object. Message can't be null as it's internally used to find the + * type of proto. Use @code{MyProtoClass.getDefaultInstance()}. @see getDefaultInstance() + * @return The value of a non-{@code NULL} column with type {@link Type#proto(String)} ()}. + */ + default T getProtoMessage(String columnName, T message) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto enum of type {@code T} from Struct. + * + * @param columnIndex Index of the column. + * @param method A function that takes enum integer constant as argument and returns the enum. Use + * method @code{forNumber} from generated enum class (eg: MyProtoEnum::forNumber). @see forNumber + * @return The value of a non-{@code NULL} column with type {@link Type#protoEnum(String)} ()}. + */ + default T getProtoEnum( + int columnIndex, Function method) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto enum of type {@code T} from Struct. + * + * @param columnName Name of the column. + * @param method A function that takes enum integer constant as argument and returns the enum. Use + * method @code{forNumber} from generated enum class (eg: MyProtoEnum::forNumber). @see forNumber + * @return The value of a non-{@code NULL} column with type {@link Type#protoEnum(String)} ()}. + */ + default T getProtoEnum( + String columnName, Function method) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#bytes()}. + */ + ByteArray getBytes(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#bytes()}. + */ + ByteArray getBytes(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#timestamp()}. + */ + Timestamp getTimestamp(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#timestamp()}. + */ + Timestamp getTimestamp(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#date()}. + */ + Date getDate(int columnIndex); + + UUID getUuid(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#date()}. + */ + Date getDate(String columnName); + + UUID getUuid(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@link Type#interval()}. + */ + Interval getInterval(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@link Type#interval()}. + */ + Interval getInterval(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a nullable column as a {@link Value}. + */ + default Value getValue(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a nullable column as a {@link Value}. + */ + default Value getValue(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bool())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getBooleanList(int)} instead. + */ + boolean[] getBooleanArray(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bool())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getBooleanList(String)} instead. + */ + boolean[] getBooleanArray(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bool())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBooleanList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bool())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBooleanList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.int64())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getLongList(int)} instead. + */ + long[] getLongArray(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.int64())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getLongList(String)} instead. + */ + long[] getLongArray(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.int64())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getLongList(int columnIndex); + + /** + * @param columnName + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.int64())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getLongList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float32())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getFloatList(int)} instead. + */ + default float[] getFloatArray(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float32())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getFloatList(String)} instead. + */ + default float[] getFloatArray(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float32())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + default List getFloatList(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float32())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getFloatList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float64())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getDoubleList(int)} instead. + */ + double[] getDoubleArray(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float64())}. + * @throws NullPointerException if any element of the array value is {@code NULL}. If the array + * may contain {@code NULL} values, use {@link #getDoubleList(String)} instead. + */ + double[] getDoubleArray(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float64())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getDoubleList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.float64())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getDoubleList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.numeric())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBigDecimalList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.numeric())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBigDecimalList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.string())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getStringList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.string())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getStringList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.json())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + default List getJsonList(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + ; + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.json())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + default List getJsonList(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + ; + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.pgJsonb())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + default List getPgJsonbList(int columnIndex) { + throw new UnsupportedOperationException("method should be overwritten"); + } + ; + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.pgJsonb())} The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + default List getPgJsonbList(String columnName) { + throw new UnsupportedOperationException("method should be overwritten"); + } + ; + + /** + * To get the proto message of generic type {@code T} from Struct. + * + * @param columnIndex Index of the column. + * @param message Proto message object. Message can't be null as it's internally used to find the + * type of proto. Use @code{MyProtoClass.getDefaultInstance()}. @see getDefaultInstance() + * @return The value of a non-{@code NULL} column with type {@code + * Type.array(Type.proto(String))}. + */ + default List getProtoMessageList(int columnIndex, T message) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto message of type {@code T} from Struct. + * + * @param columnName Name of the column. + * @param message Proto message object. Message can't be null as it's internally used to find the + * type of proto. Use @code{MyProtoClass.getDefaultInstance()}. @see getDefaultInstance() + * @return The value of a non-{@code NULL} column with type {@code + * Type.array(Type.proto(String))}. + */ + default List getProtoMessageList(String columnName, T message) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto enum of type {@code T} from Struct. + * + * @param columnIndex Index of the column. + * @param method A function that takes enum integer constant as argument and returns the enum. Use + * method @code{forNumber} from generated enum class (eg: MyProtoEnum::forNumber). @see forNumber + * @return The value of a non-{@code NULL} column with type {@code + * Type.array(Type.protoEnum(String))}. + */ + default List getProtoEnumList( + int columnIndex, Function method) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * To get the proto enum list of type {@code T} from Struct. + * + * @param columnName Name of the column. + * @param method A function that takes enum integer constant as argument and returns the enum. Use + * method @code{forNumber} from generated enum class (eg: MyProtoEnum::forNumber). @see forNumber + * @return The value of a non-{@code NULL} column with type {@code + * Type.array(Type.protoEnum(String))}. + */ + default List getProtoEnumList( + String columnName, Function method) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bytes())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBytesList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.bytes())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getBytesList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.timestamp())} + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getTimestampList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.timestamp())} + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getTimestampList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.date())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getDateList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.date())}. The + * list returned by this method is lazily constructed. Create a copy of it if you intend to + * access each element in the list multiple times. + */ + List getDateList(String columnName); + + List getUuidList(int columnIndex); + + List getUuidList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.interval())}. + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getIntervalList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.interval())}. + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getIntervalList(String columnName); + + /** + * @param columnIndex index of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.struct(...))} + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getStructList(int columnIndex); + + /** + * @param columnName name of the column + * @return the value of a non-{@code NULL} column with type {@code Type.array(Type.struct(...))} + * The list returned by this method is lazily constructed. Create a copy of it if you intend + * to access each element in the list multiple times. + */ + List getStructList(String columnName); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ThreadFactoryUtil.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ThreadFactoryUtil.java new file mode 100644 index 000000000000..72d58e85be30 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ThreadFactoryUtil.java @@ -0,0 +1,109 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import javax.annotation.Nullable; + +/** Utility class for creating a thread factory for daemon or virtual threads. */ +@InternalApi +public class ThreadFactoryUtil { + + /** + * Tries to create a thread factory for virtual threads, and otherwise falls back to creating a + * platform thread factory that creates daemon threads. Virtual threads are supported from JDK21. + * + * @param baseNameFormat the base name format for the threads, '-%d' will be appended to the + * actual thread name format + * @param tryVirtualThreads whether to try to use virtual threads if available or not + * @return a {@link ThreadFactory} that produces virtual threads (Java 21 or higher) or platform + * daemon threads + */ + @InternalApi + public static ThreadFactory createVirtualOrPlatformDaemonThreadFactory( + String baseNameFormat, boolean tryVirtualThreads) { + ThreadFactory virtualThreadFactory = + tryVirtualThreads ? tryCreateVirtualThreadFactory(baseNameFormat) : null; + if (virtualThreadFactory != null) { + return virtualThreadFactory; + } + + return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(baseNameFormat + "-%d").build(); + } + + /** + * Tries to create a {@link ThreadFactory} that creates virtual threads. Returns null if virtual + * threads are not supported on this JVM. + */ + @InternalApi + @Nullable + public static ThreadFactory tryCreateVirtualThreadFactory(String baseNameFormat) { + try { + Class threadBuilderClass = Class.forName("java.lang.Thread$Builder"); + Method ofVirtualMethod = Thread.class.getDeclaredMethod("ofVirtual"); + Object virtualBuilder = ofVirtualMethod.invoke(null); + Method nameMethod = threadBuilderClass.getDeclaredMethod("name", String.class, long.class); + virtualBuilder = nameMethod.invoke(virtualBuilder, baseNameFormat + "-", 0); + Method factoryMethod = threadBuilderClass.getDeclaredMethod("factory"); + return (ThreadFactory) factoryMethod.invoke(virtualBuilder); + } catch (ClassNotFoundException | NoSuchMethodException ignore) { + return null; + } catch (InvocationTargetException | IllegalAccessException e) { + // Java 20 supports virtual threads as an experimental feature. It will throw an + // UnsupportedOperationException if experimental features have not been enabled. + if (e.getCause() instanceof UnsupportedOperationException) { + return null; + } + throw new RuntimeException(e); + } + } + + /** + * Tries to create an {@link ExecutorService} that creates a new virtual thread for each task that + * it runs. Creating a new virtual thread is the recommended way to create executors using virtual + * threads, instead of creating a pool of virtual threads. Returns null if virtual threads are not + * supported on this JVM. + */ + @InternalApi + @Nullable + public static ExecutorService tryCreateVirtualThreadPerTaskExecutor(String baseNameFormat) { + ThreadFactory factory = tryCreateVirtualThreadFactory(baseNameFormat); + if (factory != null) { + try { + Method newThreadPerTaskExecutorMethod = + Executors.class.getDeclaredMethod("newThreadPerTaskExecutor", ThreadFactory.class); + return (ExecutorService) newThreadPerTaskExecutorMethod.invoke(null, factory); + } catch (NoSuchMethodException ignore) { + return null; + } catch (InvocationTargetException | IllegalAccessException e) { + // Java 20 supports virtual threads as an experimental feature. It will throw an + // UnsupportedOperationException if experimental features have not been enabled. + if (e.getCause() instanceof UnsupportedOperationException) { + return null; + } + throw new RuntimeException(e); + } + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TimestampBound.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TimestampBound.java new file mode 100644 index 000000000000..6502077cd65e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TimestampBound.java @@ -0,0 +1,362 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.Timestamp; +import com.google.protobuf.Duration; +import com.google.protobuf.util.Durations; +import com.google.spanner.v1.TransactionOptions; +import java.io.Serializable; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * Defines how Cloud Spanner will choose a timestamp for a read-only transaction or a single + * read/query. + * + *

The types of timestamp bound are: + * + *

    + *
  • Strong (the default). + *
  • Bounded staleness. + *
  • Exact staleness. + *
+ * + *

If the Cloud Spanner database to be read is geographically distributed, stale read-only + * transactions can execute more quickly than strong or read-write transactions, because they are + * able to execute far from the leader replica. + * + *

Each type of timestamp bound is discussed in detail below. + * + *

Strong reads

+ * + *

Strong reads are guaranteed to see the effects of all transactions that have committed before + * the start of the read. Furthermore, all rows yielded by a single read are consistent with each + * other - if any part of the read observes a transaction, all parts of the read see the + * transaction. + * + *

Strong reads are not repeatable: two consecutive strong read-only transactions might return + * inconsistent results if there are concurrent writes. If consistency across reads is required, the + * reads should be executed within a transaction or at an exact read timestamp. + * + *

Use {@link #strong()} to create a bound of this type. + * + *

Exact Staleness

+ * + *

These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are + * guaranteed to see a consistent prefix of the global transaction history: they observe + * modifications done by all transactions with a commit timestamp less than or equal to the read + * timestamp, and observe none of the modifications done by transactions with a larger commit + * timestamp. They will block until all conflicting transactions that may be assigned commit + * timestamps less than or equal to the read timestamp have finished. + * + *

The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a + * staleness relative to the current time. + * + *

These modes do not require a "negotiation phase" to pick a timestamp. As a result, they + * execute slightly faster than the equivalent bounded stale concurrency modes. On the other hand, + * boundedly stale reads usually return fresher results. + * + *

Use {@link #ofReadTimestamp(Timestamp)} and {@link #ofExactStaleness(long, TimeUnit)} to + * create a bound of this type. + * + *

Bounded Staleness

+ * + *

Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a + * user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness + * bound that allows execution of the reads at the closest available replica without blocking. + * + *

All rows yielded are consistent with each other -- if any part of the read observes a + * transaction, all parts of the read see the transaction. Bounded stale reads are not repeatable: + * two stale reads, even if they use the same staleness bound, can execute at different timestamps + * and thus return inconsistent results. + * + *

Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all + * replicas needed to serve the read. In the second phase, reads are executed at the negotiated + * timestamp. + * + *

As a result of the two phase execution, bounded staleness reads are usually a little slower + * than comparable exact staleness reads. However, they are typically able to return fresher + * results, and are more likely to execute at the closest replica. + * + *

Because the timestamp negotiation requires up-front knowledge of which rows will be read, it + * can only be used with single-use reads and single-use read-only transactions. + * + *

Use {@link #ofMinReadTimestamp(Timestamp)} and {@link #ofMaxStaleness(long, TimeUnit)} to + * create a bound of this type. + * + *

Old Read Timestamps and Garbage Collection

+ * + *

Cloud Spanner continuously garbage collects deleted and overwritten data in the background to + * reclaim storage space. This process is known as "version GC". By default, version GC reclaims + * versions after they are four hours old. Because of this, Cloud Spanner cannot perform reads at + * read timestamps more than four hours in the past. This restriction also applies to in-progress + * reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries + * with too-old read timestamps fail with the error {@link ErrorCode#FAILED_PRECONDITION}. + * + * @see Session#singleUse(TimestampBound) + * @see Session#singleUseReadOnlyTransaction(TimestampBound) + * @see Session#readOnlyTransaction(TimestampBound) + */ +public final class TimestampBound implements Serializable { + private static final TimestampBound STRONG_BOUND = new TimestampBound(Mode.STRONG, null, null); + private static final TransactionOptions.ReadOnly STRONG_PROTO = + TransactionOptions.ReadOnly.newBuilder().setStrong(true).build(); + private static final long serialVersionUID = 9194565742651275731L; + + private final Mode mode; + private final Timestamp timestamp; + private final Duration staleness; + + private TimestampBound(Mode mode, Timestamp timestamp, Duration staleness) { + this.mode = mode; + this.timestamp = timestamp; + this.staleness = staleness; + } + + /** + * Returns a timestamp bound that will perform reads and queries at a timestamp where all + * previously committed transactions are visible. + */ + public static TimestampBound strong() { + return STRONG_BOUND; + } + + /** + * Returns a timestamp bound that will perform reads and queries at the given timestamp. Unlike + * other modes, reads at a specific timestamp are repeatable; the same read at the same timestamp + * always returns the same data. If the timestamp is in the future, the read will block until the + * specified timestamp, modulo the read's deadline. + * + *

This mode is useful for large scale consistent reads such as mapreduces, or for coordinating + * many reads against a consistent snapshot of the data. + */ + public static TimestampBound ofReadTimestamp(Timestamp timestamp) { + return new TimestampBound(Mode.READ_TIMESTAMP, checkNotNull(timestamp), null); + } + + /** + * Returns a timestamp bound that will perform reads and queries at a timestamp chosen to be at + * least {@code timestamp}. This is useful for requesting fresher data than some previous read, or + * data that is fresh enough to observe the effects of some previously committed transaction whose + * timestamp is known. + * + *

Note that this option can only be used in single-use reads and single-use read-only + * transactions. + */ + public static TimestampBound ofMinReadTimestamp(Timestamp timestamp) { + return new TimestampBound(Mode.MIN_READ_TIMESTAMP, checkNotNull(timestamp), null); + } + + /** + * Returns a timestamp bound that will perform reads and queries at an exact staleness. The + * timestamp is chosen soon after the read is started. + * + *

Guarantees that all writes that have committed more than the specified number of seconds ago + * are visible. Because Cloud Spanner chooses the exact timestamp, this mode works even if the + * client's local clock is substantially skewed from Cloud Spanner commit timestamps. + * + *

Useful for reading at nearby replicas without the distributed timestamp negotiation overhead + * of {@link #ofMaxStaleness(long, TimeUnit)}. + */ + public static TimestampBound ofExactStaleness(long num, TimeUnit units) { + checkStaleness(num); + return new TimestampBound(Mode.EXACT_STALENESS, null, createDuration(num, units)); + } + + /** + * Returns a timestamp bound that will perform reads and queries at a timestamp chosen to be at + * most {@code num units} stale. This guarantees that all writes that have committed more than the + * specified number of seconds ago are visible. Because Cloud Spanner chooses the exact timestamp, + * this mode works even if the client's local clock is substantially skewed from Cloud Spanner + * commit timestamps. + * + *

Useful for reading the freshest data available at a nearby replica, while bounding the + * possible staleness if the local replica has fallen behind. + * + *

Note that this option can only be used in single-use reads and single-use read-only + * transactions. + */ + public static TimestampBound ofMaxStaleness(long num, TimeUnit units) { + checkStaleness(num); + return new TimestampBound(Mode.MAX_STALENESS, null, createDuration(num, units)); + } + + /** + * The type of timestamp bound. See the class documentation of {@link TimestampBound} for a + * detailed discussion of the various modes. + */ + public enum Mode { + STRONG, + READ_TIMESTAMP, + MIN_READ_TIMESTAMP, + EXACT_STALENESS, + MAX_STALENESS, + } + + public Mode getMode() { + return mode; + } + + /** + * Returns the timestamp at which reads will be performed. + * + * @throws IllegalStateException if {@code mode() != Mode.EXACT_TIMESTAMP} + * @see #ofReadTimestamp(Timestamp) + */ + public Timestamp getReadTimestamp() { + checkMode(Mode.READ_TIMESTAMP); + return timestamp; + } + + /** + * Returns the minimum timestamp at which reads will be performed. + * + * @throws IllegalStateException if {@code mode() != Mode.MIN_READ_TIMESTAMP} + * @see #ofMinReadTimestamp(Timestamp) + */ + public Timestamp getMinReadTimestamp() { + checkMode(Mode.MIN_READ_TIMESTAMP); + return timestamp; + } + + /** + * Returns the exact staleness, in the units requested, at which reads will be performed. + * + * @throws IllegalStateException if {@code mode() != Mode.EXACT_STALENESS} + * @see #ofExactStaleness(long, java.util.concurrent.TimeUnit) + */ + public long getExactStaleness(TimeUnit units) { + checkMode(Mode.EXACT_STALENESS); + return durationToUnits(staleness, units); + } + + /** + * Returns the maximum staleness, in the units requested, at which reads will be performed. + * + * @throws IllegalStateException if {@code mode() != Mode.MAX_STALENESS} + * @see #ofMaxStaleness(long, java.util.concurrent.TimeUnit) + */ + public long getMaxStaleness(TimeUnit units) { + checkMode(Mode.MAX_STALENESS); + return durationToUnits(staleness, units); + } + + StringBuilder toString(StringBuilder b) { + // TODO(user): Convert all internal toString(StringBuilder) methods to return StringBuilder. + switch (mode) { + case STRONG: + return b.append("strong"); + case READ_TIMESTAMP: + return b.append("exact_timestamp: ").append(timestamp.toString()); + case MIN_READ_TIMESTAMP: + return b.append("min_read_timestamp: ").append(timestamp.toString()); + case EXACT_STALENESS: + return b.append("exact_staleness: ").append(Durations.toString(staleness)); + case MAX_STALENESS: + return b.append("max_staleness: ").append(Durations.toString(staleness)); + default: + throw new AssertionError("Unexpected mode: " + mode); + } + } + + @Override + public String toString() { + return toString(new StringBuilder()).toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TimestampBound that = (TimestampBound) o; + return mode == that.mode + && Objects.equals(staleness, that.staleness) + && Objects.equals(timestamp, that.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(mode, timestamp, staleness); + } + + TransactionOptions.ReadOnly toProto() { + // TODO(user): Use full proto as the internal representation if we eliminate Timestamp. + if (mode == Mode.STRONG) { + return STRONG_PROTO; + } + TransactionOptions.ReadOnly.Builder builder = TransactionOptions.ReadOnly.newBuilder(); + applyToBuilder(builder); + return builder.build(); + } + + TransactionOptions.ReadOnly.Builder applyToBuilder(TransactionOptions.ReadOnly.Builder builder) { + switch (mode) { + case STRONG: + return builder.setStrong(true); + case READ_TIMESTAMP: + return builder.setReadTimestamp(timestamp.toProto()); + case MIN_READ_TIMESTAMP: + return builder.setMinReadTimestamp(timestamp.toProto()); + case EXACT_STALENESS: + return builder.setExactStaleness(staleness); + case MAX_STALENESS: + return builder.setMaxStaleness(staleness); + default: + throw new AssertionError("Unexpected mode: " + mode); + } + } + + private static void checkStaleness(double num) { + checkArgument(num >= 0, "Staleness cannot be negative"); + } + + private void checkMode(Mode requiredMode) { + checkState(mode == requiredMode, "Invalid call for mode %s", mode); + } + + private static Duration createDuration(long num, TimeUnit units) { + switch (units) { + case NANOSECONDS: + return Durations.fromNanos(num); + case MICROSECONDS: + return Durations.fromMicros(num); + default: + return Durations.fromMillis(units.toMillis(num)); + } + } + + private static long durationToUnits(Duration duration, TimeUnit units) { + // TODO(user): Handle overflow. + switch (units) { + case NANOSECONDS: + return Durations.toNanos(duration); + case MICROSECONDS: + return Durations.toMicros(duration); + default: + return units.convert(Durations.toMillis(duration), TimeUnit.MILLISECONDS); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java new file mode 100644 index 000000000000..e94c64926993 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java @@ -0,0 +1,243 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.core.GaxProperties; +import com.google.cloud.spanner.Options.TagOption; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.SpannerOptions.TracingFramework; +import com.google.common.base.MoreObjects; +import io.opencensus.trace.BlankSpan; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracer; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.context.Context; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +class TraceWrapper { + private static final AttributeKey TRANSACTION_TAG_KEY = + AttributeKey.stringKey("transaction.tag"); + private static final AttributeKey STATEMENT_TAG_KEY = + AttributeKey.stringKey("statement.tag"); + private static final AttributeKey INSTANCE_NAME_KEY = + AttributeKey.stringKey("instance.name"); + private static final AttributeKey DB_NAME_KEY = AttributeKey.stringKey("db.name"); + private static final AttributeKey DB_STATEMENT_KEY = + AttributeKey.stringKey("db.statement"); + private static final AttributeKey> DB_STATEMENT_ARRAY_KEY = + AttributeKey.stringArrayKey("db.statement"); + private static final AttributeKey DB_TABLE_NAME_KEY = AttributeKey.stringKey("db.table"); + private static final AttributeKey CLOUD_REGION_KEY = + AttributeKey.stringKey("cloud.region"); + private static final AttributeKey GCP_CLIENT_SERVICE_KEY = + AttributeKey.stringKey("gcp.client.service"); + private static final AttributeKey GCP_CLIENT_VERSION_KEY = + AttributeKey.stringKey("gcp.client.version"); + private static final AttributeKey GCP_CLIENT_REPO_KEY = + AttributeKey.stringKey("gcp.client.repo"); + private static final AttributeKey GCP_RESOURCE_NAME_KEY = + AttributeKey.stringKey("gcp.resource.name"); + private static final String GCP_RESOURCE_NAME_PREFIX = "//spanner.googleapis.com/"; + private static final AttributeKey THREAD_NAME_KEY = AttributeKey.stringKey("thread.name"); + + private final Tracer openCensusTracer; + private final io.opentelemetry.api.trace.Tracer openTelemetryTracer; + private final boolean enableExtendedTracing; + private final Attributes commonAttributes; + + TraceWrapper( + Tracer openCensusTracer, + io.opentelemetry.api.trace.Tracer openTelemetryTracer, + boolean enableExtendedTracing) { + this.openTelemetryTracer = openTelemetryTracer; + this.openCensusTracer = openCensusTracer; + this.enableExtendedTracing = enableExtendedTracing; + this.commonAttributes = createCommonAttributes(); + } + + ISpan spanBuilder(String spanName) { + return spanBuilder(spanName, Attributes.empty()); + } + + ISpan spanBuilder(String spanName, Attributes attributes, TransactionOption... options) { + return spanBuilder(spanName, createTransactionAttributes(attributes, options)); + } + + ISpan spanBuilder(String spanName, Attributes attributes) { + if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { + return new OpenTelemetrySpan( + openTelemetryTracer + .spanBuilder(spanName) + .setAllAttributes(attributes) + .setAllAttributes(commonAttributes) + .startSpan()); + } else { + return new OpenCensusSpan(openCensusTracer.spanBuilder(spanName).startSpan()); + } + } + + ISpan spanBuilderWithExplicitParent(String spanName, ISpan parentSpan) { + return spanBuilderWithExplicitParent(spanName, parentSpan, Attributes.empty()); + } + + ISpan spanBuilderWithExplicitParent(String spanName, ISpan parentSpan, Attributes attributes) { + if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { + OpenTelemetrySpan otParentSpan = (OpenTelemetrySpan) parentSpan; + + io.opentelemetry.api.trace.SpanBuilder otSpan = + openTelemetryTracer.spanBuilder(spanName).setAllAttributes(attributes); + if (otParentSpan != null && otParentSpan.getOpenTelemetrySpan() != null) { + otSpan = otSpan.setParent(Context.current().with(otParentSpan.getOpenTelemetrySpan())); + } + return new OpenTelemetrySpan(otSpan.startSpan()); + } else { + OpenCensusSpan parentOcSpan = (OpenCensusSpan) parentSpan; + Span ocSpan = + openCensusTracer + .spanBuilderWithExplicitParent( + spanName, parentOcSpan != null ? parentOcSpan.getOpenCensusSpan() : null) + .startSpan(); + + return new OpenCensusSpan(ocSpan); + } + } + + ISpan getCurrentSpan() { + if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { + return new OpenTelemetrySpan( + io.opentelemetry.api.trace.Span.fromContext(io.opentelemetry.context.Context.current())); + } else { + return new OpenCensusSpan(openCensusTracer.getCurrentSpan()); + } + } + + ISpan getBlankSpan() { + if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { + return new OpenTelemetrySpan(io.opentelemetry.api.trace.Span.getInvalid()); + } else { + return new OpenCensusSpan(BlankSpan.INSTANCE); + } + } + + IScope withSpan(ISpan span) { + if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { + OpenTelemetrySpan openTelemetrySpan; + if (!(span instanceof OpenTelemetrySpan)) { + openTelemetrySpan = new OpenTelemetrySpan(null); + } else { + openTelemetrySpan = (OpenTelemetrySpan) span; + } + return new OpenTelemetryScope(openTelemetrySpan.getOpenTelemetrySpan().makeCurrent()); + } else { + OpenCensusSpan openCensusSpan; + if (!(span instanceof OpenCensusSpan)) { + openCensusSpan = new OpenCensusSpan(null); + } else { + openCensusSpan = (OpenCensusSpan) span; + } + return new OpenCensusScope(openCensusTracer.withSpan(openCensusSpan.getOpenCensusSpan())); + } + } + + Attributes createTransactionAttributes( + Attributes commonAttributes, TransactionOption... options) { + AttributesBuilder builder = commonAttributes.toBuilder(); + if (options != null && options.length > 0) { + Optional tagOption = + Arrays.stream(options) + .filter(option -> option instanceof TagOption) + .map(option -> (TagOption) option) + .findAny(); + if (tagOption.isPresent()) { + builder.put(TRANSACTION_TAG_KEY, tagOption.get().getTag()); + } + } + return builder.build(); + } + + Attributes createStatementAttributes(Statement statement, Options options) { + if (this.enableExtendedTracing || (options != null && options.hasTag())) { + AttributesBuilder builder = Attributes.builder(); + if (this.enableExtendedTracing) { + builder.put(DB_STATEMENT_KEY, statement.getSql()); + builder.put(THREAD_NAME_KEY, getTraceThreadName()); + } + if (options != null && options.hasTag()) { + builder.put(STATEMENT_TAG_KEY, options.tag()); + } + return builder.build(); + } + return Attributes.empty(); + } + + Attributes createStatementBatchAttributes(Iterable statements, Options options) { + if (this.enableExtendedTracing || (options != null && options.hasTag())) { + AttributesBuilder builder = Attributes.builder(); + if (this.enableExtendedTracing) { + builder.put( + DB_STATEMENT_ARRAY_KEY, + StreamSupport.stream(statements.spliterator(), false) + .map(Statement::getSql) + .collect(Collectors.toList())); + builder.put(THREAD_NAME_KEY, getTraceThreadName()); + } + if (options != null && options.hasTag()) { + builder.put(STATEMENT_TAG_KEY, options.tag()); + } + return builder.build(); + } + return Attributes.empty(); + } + + Attributes createTableAttributes(String tableName, Options options) { + AttributesBuilder builder = Attributes.builder(); + builder.put(DB_TABLE_NAME_KEY, tableName); + if (options != null && options.hasTag()) { + builder.put(STATEMENT_TAG_KEY, options.tag()); + } + return builder.build(); + } + + Attributes createDatabaseAttributes(DatabaseId db) { + AttributesBuilder builder = Attributes.builder(); + builder.put(DB_NAME_KEY, db.getDatabase()); + builder.put(INSTANCE_NAME_KEY, db.getInstanceId().getInstance()); + builder.put(GCP_RESOURCE_NAME_KEY, GCP_RESOURCE_NAME_PREFIX + db.getName()); + return builder.build(); + } + + private Attributes createCommonAttributes() { + AttributesBuilder builder = Attributes.builder(); + builder.put(GCP_CLIENT_SERVICE_KEY, "spanner"); + builder.put(GCP_CLIENT_REPO_KEY, "googleapis/java-spanner"); + builder.put(GCP_CLIENT_VERSION_KEY, GaxProperties.getLibraryVersion(TraceWrapper.class)); + builder.put(CLOUD_REGION_KEY, BuiltInMetricsProvider.detectClientLocation()); + return builder.build(); + } + + private static String getTraceThreadName() { + return MoreObjects.firstNonNull( + Context.current().get(OpenTelemetryContextKeys.THREAD_NAME_KEY), + Thread.currentThread().getName()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContext.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContext.java new file mode 100644 index 000000000000..c80185d197c3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContext.java @@ -0,0 +1,191 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.spanner.v1.ResultSetStats; + +/** + * Context for a single attempt of a locking read-write transaction. This type of transaction is the + * only way to write data into Cloud Spanner; {@link Session#write(Iterable)} and {@link + * Session#writeAtLeastOnce(Iterable)} use transactions internally. These transactions rely on + * pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may + * abort, requiring the application to retry. However, the interface exposed by {@link + * TransactionRunner} eliminates the need for applications to write retry loops explicitly. + * + *

Locking transactions may be used to atomically read-modify-write data anywhere in a database. + * This type of transaction is externally consistent. + * + *

Clients should attempt to minimize the amount of time a transaction is active. Faster + * transactions commit with higher probability and cause less contention. Cloud Spanner attempts to + * keep read locks active as long as the transaction continues to do reads, and the transaction has + * not been terminated by returning from a {@link TransactionRunner.TransactionCallable}. Long + * periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and + * abort it. + * + *

Reads performed within a transaction acquire locks on the data being read. Writes can only be + * done at commit time, after all reads have been completed. + * + *

Conceptually, a read-write transaction consists of zero or more reads or SQL queries followed + * by a commit. + * + *

Semantics

+ * + *

Cloud Spanner can commit the transaction if all read locks it acquired are still valid at + * commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the + * transaction for any reason. If a commit attempt returns {@code ABORTED}, Cloud Spanner guarantees + * that the transaction has not modified any user data in Cloud Spanner. + * + *

Unless the transaction commits, Cloud Spanner makes no guarantees about how long the + * transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of + * mutual exclusion other than between Cloud Spanner transactions themselves. + * + *

Retrying Aborted Transactions

+ * + *

When a transaction aborts, the application can choose to retry the whole transaction again. To + * maximize the chances of successfully committing the retry, the client should execute the retry in + * the same session as the original attempt. The original session's lock priority increases with + * each consecutive abort, meaning that each attempt has a slightly better chance of success than + * the previous. + * + *

Under some circumstances (e.g., many transactions attempting to modify the same row(s)), a + * transaction can abort many times in a short period before successfully committing. Thus, it is + * not a good idea to cap the number of retries a transaction can attempt; instead, it is better to + * limit the total amount of wall time spent retrying. + * + *

Application code does not need to retry explicitly; {@link TransactionRunner} will + * automatically retry a transaction if an attempt results in an abort. + * + *

Idle Transactions

+ * + *

A transaction is considered idle if it has no outstanding reads or SQL queries and has not + * started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud + * Spanner so that they don't hold on to locks indefinitely. In that case, the commit will fail with + * error {@code ABORTED}. + * + *

If this behavior is undesirable, periodically executing a simple SQL query in the transaction + * (e.g., {@code SELECT 1}) prevents the transaction from becoming idle. + * + * @see DatabaseClient#readWriteTransaction(TransactionOption...) + * @see TransactionRunner + */ +public interface TransactionContext extends ReadContext { + /** + * Buffers a single mutation to be applied if the transaction commits successfully. The effects of + * this mutation will not be visible to subsequent operations in the transaction. All buffered + * mutations will be applied atomically. + */ + void buffer(Mutation mutation); + + /** Same as {@link #buffer(Mutation)}, but is guaranteed to be non-blocking. */ + default ApiFuture bufferAsync(Mutation mutation) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Buffers mutations to be applied if the transaction commits successfully. The effects of the + * mutations will not be visible to subsequent operations in the transaction. All buffered + * mutations will be applied atomically. + */ + void buffer(Iterable mutations); + + /** Same as {@link #buffer(Iterable)}, but is guaranteed to be non-blocking. */ + default ApiFuture bufferAsync(Iterable mutations) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Executes the DML statement (which can be a simple DML statement or DML statement with a + * returning clause) and returns the number of rows modified. For non-DML statements, it will + * result in an {@code IllegalArgumentException}. The effects of the DML statement will be visible + * to subsequent operations in the transaction. + */ + long executeUpdate(Statement statement, UpdateOption... options); + + /** + * Same as {@link #executeUpdate(Statement,UpdateOption...)}, but is guaranteed to be + * non-blocking. If multiple asynchronous update statements are submitted to the same read/write + * transaction, the statements are guaranteed to be sent to Cloud Spanner in the order that they + * were submitted in the client. This does however not guarantee that Spanner will receive the + * requests in the same order as they were sent, as requests that are sent partly in parallel can + * overtake each other. It is therefore recommended to block until an update statement has + * returned a result before sending the next update statement. + */ + ApiFuture executeUpdateAsync(Statement statement, UpdateOption... options); + + /** + * Analyzes a DML statement and returns query plan and/or execution statistics information. + * + *

{@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PLAN} only returns the plan for + * the statement. {@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PROFILE} executes + * the DML statement, returns the modified row count and execution statistics, and the effects of + * the DML statement will be visible to subsequent operations in the transaction. + * + * @deprecated Use {@link #analyzeUpdateStatement(Statement, QueryAnalyzeMode, UpdateOption...)} + * instead to get both statement plan and parameter metadata + */ + @Deprecated + default ResultSetStats analyzeUpdate( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Analyzes a DML statement and returns query plan and statement parameter metadata and optionally + * execution statistics information. + * + *

{@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PLAN} only returns the plan and + * parameter metadata for the statement. {@link + * com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PROFILE} executes the DML statement, + * returns the modified row count and execution statistics, and the effects of the DML statement + * will be visible to subsequent operations in the transaction. + */ + default ResultSet analyzeUpdateStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + throw new UnsupportedOperationException("method should be overwritten"); + } + + /** + * Executes a list of DML statements in a single request. The statements will be executed in order + * and the semantics is the same as if each statement is executed by {@code executeUpdate} in a + * loop. This method returns an array of long integers, each representing the number of rows + * modified by each statement. + * + *

If an individual statement fails, execution stops and a {@code SpannerBatchUpdateException} + * is returned, which includes the error and the number of rows affected by the statements that + * are run prior to the error. + * + *

For example, if statements contains 3 statements, and the 2nd one is not a valid DML. This + * method throws a {@code SpannerBatchUpdateException} that contains the error message from the + * 2nd statement, and an array of length 1 that contains the number of rows modified by the 1st + * statement. The 3rd statement will not run. + */ + long[] batchUpdate(Iterable statements, UpdateOption... options); + + /** + * Same as {@link #batchUpdate(Iterable, UpdateOption...)}, but is guaranteed to be non-blocking. + * If multiple asynchronous update statements are submitted to the same read/write transaction, + * the statements are guaranteed to be sent to Cloud Spanner in the order that they were submitted + * in the client. This does however not guarantee that Spanner will receive the requests in the + * same order as they were sent, as requests that are sent partly in parallel can overtake each + * other. It is therefore recommended to block until an update statement has returned a result + * before sending the next update statement. + */ + ApiFuture batchUpdateAsync(Iterable statements, UpdateOption... options); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContextFutureImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContextFutureImpl.java new file mode 100644 index 000000000000..1e796ecffb35 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionContextFutureImpl.java @@ -0,0 +1,261 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.ForwardingApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionFunction; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class TransactionContextFutureImpl extends ForwardingApiFuture + implements TransactionContextFuture { + + @InternalApi + interface CommittableAsyncTransactionManager extends AsyncTransactionManager { + void onError(Throwable t); + + ApiFuture commitAsync(); + } + + /** + * {@link ApiFuture} that returns a commit timestamp. Any {@link AbortedException} that is thrown + * by either the commit call or any other rpc during the transaction will be thrown by the {@link + * #get()} method of this future as an {@link AbortedException} and not as an {@link + * ExecutionException} with an {@link AbortedException} as its cause. + */ + static class CommitTimestampFutureImpl extends ForwardingApiFuture + implements CommitTimestampFuture { + CommitTimestampFutureImpl(ApiFuture delegate) { + super(Preconditions.checkNotNull(delegate)); + } + + @Override + public Timestamp get() throws AbortedException, ExecutionException, InterruptedException { + try { + return super.get(); + } catch (ExecutionException e) { + if (e.getCause() != null && e.getCause() instanceof AbortedException) { + throw (AbortedException) e.getCause(); + } + throw e; + } + } + + @Override + public Timestamp get(long timeout, TimeUnit unit) + throws AbortedException, ExecutionException, InterruptedException, TimeoutException { + try { + return super.get(timeout, unit); + } catch (ExecutionException e) { + if (e.getCause() != null && e.getCause() instanceof AbortedException) { + throw (AbortedException) e.getCause(); + } + throw e; + } + } + } + + class AsyncTransactionStatementImpl extends ForwardingApiFuture + implements AsyncTransactionStep { + final ApiFuture txnFuture; + final SettableApiFuture statementResult; + + AsyncTransactionStatementImpl( + final ApiFuture txnFuture, + ApiFuture input, + final AsyncTransactionFunction function, + Executor executor) { + this(SettableApiFuture.create(), txnFuture, input, function, executor); + } + + AsyncTransactionStatementImpl( + SettableApiFuture delegate, + final ApiFuture txnFuture, + ApiFuture input, + final AsyncTransactionFunction function, + final Executor executor) { + super(delegate); + this.statementResult = delegate; + this.txnFuture = txnFuture; + ApiFutures.addCallback( + input, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + mgr.onError(t); + statementResult.setException(t); + txnResult.setException(t); + } + + @Override + public void onSuccess(I result) { + try { + ApiFutures.addCallback( + runAsyncTransactionFunction(function, txnFuture.get(), result, executor), + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + mgr.onError(t); + statementResult.setException(t); + txnResult.setException(t); + } + + @Override + public void onSuccess(O result) { + statementResult.set(result); + } + }, + MoreExecutors.directExecutor()); + } catch (Throwable t) { + mgr.onError(t); + statementResult.setException(t); + txnResult.setException(t); + } + } + }, + MoreExecutors.directExecutor()); + } + + @Override + public AsyncTransactionStatementImpl then( + AsyncTransactionFunction next, Executor executor) { + return new AsyncTransactionStatementImpl<>(txnFuture, statementResult, next, executor); + } + + @Override + public CommitTimestampFuture commitAsync() { + ApiFutures.addCallback( + statementResult, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + mgr.onError(t); + txnResult.setException(t); + } + + @Override + public void onSuccess(O result) { + ApiFutures.addCallback( + mgr.commitAsync(), + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + mgr.onError(t); + txnResult.setException(t); + } + + @Override + public void onSuccess(Timestamp result) { + txnResult.set(result); + } + }, + MoreExecutors.directExecutor()); + } + }, + MoreExecutors.directExecutor()); + return new CommitTimestampFutureImpl(txnResult); + } + } + + static ApiFuture runAsyncTransactionFunction( + final AsyncTransactionFunction function, + final TransactionContext txn, + final I input, + Executor executor) + throws Exception { + // Shortcut for common path. + if (executor == MoreExecutors.directExecutor()) { + return Preconditions.checkNotNull( + function.apply(txn, input), + "AsyncTransactionFunction returned . Did you mean to return" + + " ApiFutures.immediateFuture(null)?"); + } else { + final SettableApiFuture res = SettableApiFuture.create(); + executor.execute( + () -> { + try { + ApiFuture functionResult = + Preconditions.checkNotNull( + function.apply(txn, input), + "AsyncTransactionFunction returned . Did you mean to return" + + " ApiFutures.immediateFuture(null)?"); + ApiFutures.addCallback( + functionResult, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + res.setException(t); + } + + @Override + public void onSuccess(O result) { + res.set(result); + } + }, + MoreExecutors.directExecutor()); + } catch (Throwable t) { + res.setException(t); + } + }); + return res; + } + } + + final CommittableAsyncTransactionManager mgr; + final SettableApiFuture txnResult = SettableApiFuture.create(); + + TransactionContextFutureImpl( + CommittableAsyncTransactionManager mgr, ApiFuture txnFuture) { + super(txnFuture); + this.mgr = mgr; + } + + @Override + public AsyncTransactionStatementImpl then( + AsyncTransactionFunction function, Executor executor) { + final SettableApiFuture input = SettableApiFuture.create(); + ApiFutures.addCallback( + this, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + mgr.onError(t); + input.setException(t); + } + + @Override + public void onSuccess(TransactionContext result) { + input.set(null); + } + }, + MoreExecutors.directExecutor()); + return new AsyncTransactionStatementImpl<>(this, input, function, executor); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManager.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManager.java new file mode 100644 index 000000000000..350adb2a2c29 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManager.java @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; + +/** + * An interface for managing the life cycle of a read write transaction including all its retries. + * See {@link TransactionContext} for a description of transaction semantics. + * + *

At any point in time there can be at most one active transaction in this manager. When that + * transaction is committed, if it fails with an {@code ABORTED} error, calling {@link + * #resetForRetry()} would create a new {@link TransactionContext}. The newly created transaction + * would use the same session thus increasing its lock priority. If the transaction is committed + * successfully, or is rolled back or commit fails with any error other than {@code ABORTED}, the + * manager is considered complete and no further transactions are allowed to be created in it. + * + *

Every {@code TransactionManager} should either be committed or rolled back. Failure to do so + * can cause resources to be leaked and deadlocks. Easiest way to guarantee this is by calling + * {@link #close()} in a finally block. + * + * @see DatabaseClient#transactionManager(TransactionOption...) + */ +public interface TransactionManager extends AutoCloseable { + + /** State of the transaction manager. */ + enum TransactionState { + // Transaction has been started either by calling {@link #begin()} or via + // {@link resetForRetry()} but has not been committed or rolled back yet. + STARTED, + // Transaction was successfully committed. This is a terminal state. + COMMITTED, + // Transaction failed during commit with an error other than ABORTED. Transaction cannot be + // retried in this state. This is a terminal state. + COMMIT_FAILED, + // Transaction failed during commit with ABORTED and can be retried. + ABORTED, + // Transaction was rolled back. This is a terminal state. + ROLLED_BACK + } + + /** + * Creates a new read write transaction. This must be called before doing any other operation and + * can only be called once. To create a new transaction for subsequent retries, see {@link + * #resetForRetry()}. + */ + TransactionContext begin(); + + /** + * Initializes a new read-write transaction that is a retry of a previously aborted transaction. + * This method must be called before performing any operations, and it can only be invoked once + * per transaction lifecycle. + * + *

This method should only be used when multiplexed sessions are enabled to create a retry for + * a previously aborted transaction. This method can be used instead of {@link #resetForRetry()} + * to create a retry. Using this method or {@link #resetForRetry()} will have the same effect. You + * must pass in the {@link AbortedException} from the previous attempt to preserve the + * transaction's priority. + * + *

For regular sessions, this behaves the same as {@link #begin()}. + */ + TransactionContext begin(AbortedException exception); + + /** + * Commits the currently active transaction. If the transaction was already aborted, then this + * would throw an {@link AbortedException}. + */ + void commit(); + + /** + * Rolls back the currently active transaction. In most cases there should be no need to call this + * explicitly since {@link #close()} would automatically roll back any active transaction. + */ + void rollback(); + + /** + * Creates a new transaction for retry. This should only be called if the previous transaction + * failed with {@code ABORTED}. In all other cases, this will throw an {@link + * IllegalStateException}. Users should backoff before calling this method. Backoff delay is + * specified by {@link SpannerException#getRetryDelayInMillis()} on the {@code SpannerException} + * throw by the previous commit call. + */ + TransactionContext resetForRetry(); + + /** + * Returns the commit timestamp if the transaction committed successfully otherwise it will throw + * {@code IllegalStateException}. + */ + Timestamp getCommitTimestamp(); + + /** Returns the {@link CommitResponse} of this transaction. */ + CommitResponse getCommitResponse(); + + /** Returns the state of the transaction. */ + TransactionState getState(); + + /** + * Closes the manager. If there is an active transaction, it will be rolled back. Underlying + * session will be released back to the session pool. + */ + @Override + void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java new file mode 100644 index 000000000000..469376c52edf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java @@ -0,0 +1,187 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; + +/** Implementation of {@link TransactionManager}. */ +final class TransactionManagerImpl implements TransactionManager, SessionTransaction { + private final TraceWrapper tracer; + + private final SessionImpl session; + private ISpan span; + private final Options options; + + private TransactionRunnerImpl.TransactionContextImpl txn; + private TransactionState txnState; + + TransactionManagerImpl( + SessionImpl session, ISpan span, TraceWrapper tracer, TransactionOption... options) { + this.session = session; + this.span = span; + this.tracer = tracer; + this.options = Options.fromTransactionOptions(options); + } + + ISpan getSpan() { + return span; + } + + @Override + public void setSpan(ISpan span) { + this.span = span; + } + + @Override + public TransactionContext begin() { + Preconditions.checkState(txn == null, "begin can only be called once"); + return begin(ByteString.EMPTY); + } + + @Override + public TransactionContext begin(AbortedException exception) { + Preconditions.checkState(txn == null, "begin can only be called once"); + Preconditions.checkNotNull(exception, "AbortedException from the previous attempt is required"); + ByteString previousAbortedTransactionID = + exception.getTransactionID() != null ? exception.getTransactionID() : ByteString.EMPTY; + return begin(previousAbortedTransactionID); + } + + TransactionContext begin(ByteString previousTransactionId) { + try (IScope s = tracer.withSpan(span)) { + txn = session.newTransaction(options, previousTransactionId); + session.setActive(this); + txnState = TransactionState.STARTED; + return txn; + } + } + + @Override + public void commit() { + Preconditions.checkState( + txnState == TransactionState.STARTED, + "commit can only be invoked if" + " the transaction is in progress"); + if (txn.isAborted()) { + txnState = TransactionState.ABORTED; + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "Transaction already aborted"); + } + try { + txn.commit(); + txnState = TransactionState.COMMITTED; + } catch (AbortedException e1) { + txnState = TransactionState.ABORTED; + throw e1; + } catch (SpannerException e2) { + txnState = TransactionState.COMMIT_FAILED; + throw e2; + } finally { + // At this point, if the TransactionState is not ABORTED, then the transaction has reached an + // end state. + // We can safely call close() to release resources. + if (getState() != TransactionState.ABORTED) { + close(); + } + } + } + + @Override + public void rollback() { + Preconditions.checkState( + txnState == TransactionState.STARTED, + "rollback can only be called if the transaction is in progress"); + try { + txn.rollback(); + } finally { + txnState = TransactionState.ROLLED_BACK; + // At this point, the TransactionState is ROLLED_BACK which is an end state. + // We can safely call close() to release resources. + close(); + } + } + + @Override + public TransactionContext resetForRetry() { + if (txn == null || !txn.isAborted() && txnState != TransactionState.ABORTED) { + throw new IllegalStateException( + "resetForRetry can only be called if the previous attempt aborted"); + } + try (IScope s = tracer.withSpan(span)) { + boolean useInlinedBegin = txn.transactionId != null; + + // Determine the latest transactionId when using a multiplexed session. + ByteString multiplexedSessionPreviousTransactionId = ByteString.EMPTY; + if (session.getIsMultiplexed()) { + // Use the current transactionId if available, otherwise fallback to the previous aborted + // transactionId. + multiplexedSessionPreviousTransactionId = + txn.transactionId != null ? txn.transactionId : txn.getPreviousTransactionId(); + } + txn = + session.newTransaction( + options, /* previousTransactionId= */ multiplexedSessionPreviousTransactionId); + if (!useInlinedBegin) { + txn.ensureTxn(); + } + txnState = TransactionState.STARTED; + return txn; + } + } + + @Override + public Timestamp getCommitTimestamp() { + Preconditions.checkState( + txnState == TransactionState.COMMITTED, + "getCommitTimestamp can only be invoked if the transaction committed successfully"); + return txn.getCommitResponse().getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + Preconditions.checkState( + txnState == TransactionState.COMMITTED, + "getCommitResponse can only be invoked if the transaction committed successfully"); + return txn.getCommitResponse(); + } + + @Override + public void close() { + try { + if (txnState == TransactionState.STARTED && !txn.isAborted()) { + txn.rollback(); + txnState = TransactionState.ROLLED_BACK; + } + } finally { + span.end(); + session.onTransactionDone(); + } + } + + @Override + public TransactionState getState() { + return txnState; + } + + @Override + public void invalidate() { + close(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionMutationLimitExceededException.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionMutationLimitExceededException.java new file mode 100644 index 000000000000..de215c5caee6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionMutationLimitExceededException.java @@ -0,0 +1,81 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.extractErrorDetails; + +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ErrorDetails; +import javax.annotation.Nullable; + +/** Exception thrown by Spanner when the transaction mutation limit has been exceeded. */ +public class TransactionMutationLimitExceededException extends SpannerException { + private static final long serialVersionUID = 1L; + + private static final String ERROR_MESSAGE = "The transaction contains too many mutations."; + + private static final String TRANSACTION_RESOURCE_LIMIT_EXCEEDED_MESSAGE = + "Transaction resource limits exceeded"; + + /** Private constructor. Use {@link SpannerExceptionFactory} to create instances. */ + TransactionMutationLimitExceededException( + DoNotConstructDirectly token, + ErrorCode errorCode, + String message, + Throwable cause, + @Nullable ApiException apiException) { + super(token, errorCode, /* retryable= */ false, message, cause, apiException); + } + + static boolean isTransactionMutationLimitException(ErrorCode code, String message) { + return code == ErrorCode.INVALID_ARGUMENT + && message != null + && (message.contains(ERROR_MESSAGE) + || message.contains(TRANSACTION_RESOURCE_LIMIT_EXCEEDED_MESSAGE)); + } + + static boolean isTransactionMutationLimitException(Throwable cause, ApiException apiException) { + if (cause == null + || cause.getMessage() == null + || !(cause.getMessage().contains(ERROR_MESSAGE) + || cause.getMessage().contains(TRANSACTION_RESOURCE_LIMIT_EXCEEDED_MESSAGE))) { + return false; + } + // Spanner includes a hint that points to the Spanner limits documentation page when the error + // was that the transaction mutation limit was exceeded. We use that here to identify the error, + // as there is no other specific metadata in the error that identifies it (other than the error + // message). + ErrorDetails errorDetails = extractErrorDetails(cause, apiException); + if (errorDetails != null && errorDetails.getHelp() != null) { + return errorDetails.getHelp().getLinksCount() == 1 + && errorDetails + .getHelp() + .getLinks(0) + .getDescription() + .equals("Cloud Spanner limits documentation.") + && errorDetails + .getHelp() + .getLinks(0) + .getUrl() + .equals("https://cloud.google.com/spanner/docs/limits"); + } else if (cause.getMessage().contains(TRANSACTION_RESOURCE_LIMIT_EXCEEDED_MESSAGE)) { + // This more generic error does not contain any additional details. + return true; + } + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunner.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunner.java new file mode 100644 index 000000000000..09bc11f152b2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunner.java @@ -0,0 +1,99 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.TransactionOption; +import javax.annotation.Nullable; + +/** + * An interface for executing a body of work in the context of a read-write transaction, with + * retries for transaction aborts. See {@link TransactionContext} for a description of transaction + * semantics. {@code TransactionRunner} instances are obtained by calling {@link + * DatabaseClient#readWriteTransaction(TransactionOption...)}. + * + *

A {@code TransactionRunner} instance can only be used for a single invocation of {@link + * #run(TransactionCallable)}. + */ +public interface TransactionRunner { + /** A unit of work to be performed in the context of a transaction. */ + @FunctionalInterface + interface TransactionCallable { + /** + * Invoked by the library framework to perform a single attempt of a transaction. This method + * may be called more than once if previous transaction attempts aborted. Each invocation can + * perform zero or more reads or queries and can buffer mutations to be applied to the database + * when the transaction commits. + * + *

The framework will attempt to commit the transaction when this method returns normally. If + * commit is successful, the return value from this method will be returned from {@link + * TransactionRunner#run(TransactionCallable)}; on failure, the transaction may be retried by + * the framework by calling the method again. + * + *

If this method raises an exception, one of two things can happen. If some operation in the + * transaction has previously detected that the transaction has been aborted, the framework may + * retry the transaction. Otherwise, the framework will roll back the transaction, releasing any + * locks held, and yield the exception to the caller as a {@link SpannerException} of type + * {@link ErrorCode#UNKNOWN} with the exception as the cause. + */ + @Nullable + T run(TransactionContext transaction) throws Exception; + } + + /** + * Executes a read-write transaction, with retries as necessary. The work to perform in each + * transaction attempt is defined by {@code callable}, which may return an object as the result of + * the work. {@code callable} will be retried if a transaction attempt aborts; implementations + * must be prepared to be called more than once. Any writes buffered by {@code callable} will only + * be applied if the transaction commits successfully. Similarly, the value produced by {@code + * callable} will only be returned by this method if the transaction commits successfully. + * + *

{@code callable} is allowed to raise an unchecked exception. Typically this prevents further + * attempts to execute {@code callable}, and the exception will propagate from this method call. + * However, if a read or query in {@code callable} detected that the transaction aborted, {@code + * callable} will be retried even if it raised an exception. + */ + @Nullable + T run(TransactionCallable callable); + + /** + * Returns the timestamp at which the transaction committed. This method may only be called once + * {@link #run(TransactionCallable)} has returned normally. + */ + Timestamp getCommitTimestamp(); + + /** Returns the {@link CommitResponse} of this transaction. */ + CommitResponse getCommitResponse(); + + /** + * Allows overriding the default behaviour of blocking nested transactions. + * + *

Note that the client library does not maintain any information regarding the nesting + * structure. If an outer transaction fails and an inner transaction succeeds, upon retry of the + * outer transaction, the inner transaction will be re-executed. + * + *

Use with care when certain that the inner transaction is idempotent. Avoid doing this when + * accessing the same db. There might be legitimate uses where access need to be made across DBs + * for instance. + * + *

E.g. of nesting that is discouraged, see {@code nestedReadWriteTxnThrows} {@code + * nestedReadOnlyTxnThrows}, {@code nestedBatchTxnThrows}, {@code nestedSingleUseReadTxnThrows} + * + * @return this object + */ + TransactionRunner allowNestedTransaction(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java new file mode 100644 index 000000000000..3458b04e7a9f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java @@ -0,0 +1,1381 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerBatchUpdateException; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.cloud.spanner.SpannerImpl.BATCH_UPDATE; +import static com.google.cloud.spanner.SpannerImpl.UPDATE; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.rpc.Code; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Default implementation of {@link TransactionRunner}. */ +class TransactionRunnerImpl implements SessionTransaction, TransactionRunner { + private static final Logger txnLogger = Logger.getLogger(TransactionRunner.class.getName()); + + /** + * (Part of) the error message that is returned by Cloud Spanner if a transaction is cancelled + * because it was invalidated by a later transaction in the same session. + */ + private static final String TRANSACTION_CANCELLED_MESSAGE = "invalidated by a later transaction"; + + private static final String TRANSACTION_ALREADY_COMMITTED_MESSAGE = + "Transaction has already committed"; + + private static final String DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE = + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests. " + + "This option should be set at the transaction level."; + + @VisibleForTesting + static class TransactionContextImpl extends AbstractReadContext implements TransactionContext { + + static class Builder extends AbstractReadContext.Builder { + + private Clock clock = new Clock(); + private ByteString transactionId; + // This field is set only when the transaction is created during a retry and uses a + // multiplexed session. + private ByteString previousTransactionId; + private Options options; + private boolean trackTransactionStarter; + + private Builder() {} + + Builder setClock(Clock clock) { + this.clock = Preconditions.checkNotNull(clock); + return self(); + } + + Builder setTransactionId(ByteString transactionId) { + this.transactionId = transactionId; + return self(); + } + + Builder setOptions(Options options) { + this.options = Preconditions.checkNotNull(options); + return self(); + } + + Builder setTrackTransactionStarter(boolean trackTransactionStarter) { + this.trackTransactionStarter = trackTransactionStarter; + return self(); + } + + Builder setPreviousTransactionId(ByteString previousTransactionId) { + this.previousTransactionId = previousTransactionId; + return self(); + } + + @Override + TransactionContextImpl build() { + Preconditions.checkState(this.options != null, "Options must be set"); + return new TransactionContextImpl(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + /** + * {@link AsyncResultSet} implementation that keeps track of the async operations that are still + * running for this {@link TransactionContext} and that should finish before the {@link + * TransactionContext} can commit and release its session back into the pool. + */ + private class TransactionContextAsyncResultSetImpl extends ForwardingAsyncResultSet + implements ListenableAsyncResultSet { + + private TransactionContextAsyncResultSetImpl(ListenableAsyncResultSet delegate) { + super(delegate); + } + + @Override + public ApiFuture setCallback(Executor exec, ReadyCallback cb) { + Runnable listener = TransactionContextImpl.this::decreaseAsyncOperations; + try { + increaseAsyncOperations(); + addListener(listener); + return super.setCallback(exec, cb); + } catch (Throwable t) { + removeListener(listener); + decreaseAsyncOperations(); + throw t; + } + } + + @Override + public void addListener(Runnable listener) { + ((ListenableAsyncResultSet) getDelegate()).addListener(listener); + } + + @Override + public void removeListener(Runnable listener) { + ((ListenableAsyncResultSet) getDelegate()).removeListener(listener); + } + } + + private final Object committingLock = new Object(); + + @GuardedBy("committingLock") + private volatile boolean committing; + + private final Object precommitTokenLock = new Object(); + + @GuardedBy("precommitTokenLock") + private MultiplexedSessionPrecommitToken latestPrecommitToken; + + @GuardedBy("lock") + private volatile SettableApiFuture finishedAsyncOperations = SettableApiFuture.create(); + + @GuardedBy("lock") + private volatile int runningAsyncOperations; + + private final Queue mutations = new ConcurrentLinkedQueue<>(); + + @GuardedBy("lock") + private boolean aborted; + + private final Options options; + + /** Default to -1 to indicate not available. */ + @GuardedBy("lock") + private long retryDelayInMillis = -1L; + + /** + * transactionIdFuture will return the transaction id returned by the first statement in the + * transaction if the BeginTransaction option is included with the first statement of the + * transaction. + */ + @VisibleForTesting volatile SettableApiFuture transactionIdFuture = null; + + @VisibleForTesting long waitForTransactionTimeoutMillis = 60_000L; + private final boolean trackTransactionStarter; + private Exception transactionStarter; + + volatile ByteString transactionId; + + final ByteString previousTransactionId; + + private CommitResponse commitResponse; + private final Clock clock; + + private final Map channelHint; + + private TransactionContextImpl(Builder builder) { + super(builder); + this.transactionId = builder.transactionId; + this.trackTransactionStarter = builder.trackTransactionStarter; + this.options = builder.options; + this.finishedAsyncOperations.set(null); + this.clock = builder.clock; + this.channelHint = + getChannelHintOptions( + session.getOptions(), ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)); + this.previousTransactionId = builder.previousTransactionId; + } + + @Override + protected boolean isReadOnly() { + return false; + } + + @Override + protected boolean isRouteToLeader() { + return true; + } + + private void increaseAsyncOperations() { + synchronized (lock) { + if (runningAsyncOperations == 0) { + finishedAsyncOperations = SettableApiFuture.create(); + } + runningAsyncOperations++; + } + } + + private void decreaseAsyncOperations() { + synchronized (lock) { + runningAsyncOperations--; + if (runningAsyncOperations == 0) { + finishedAsyncOperations.set(null); + } + } + } + + ByteString getPreviousTransactionId() { + return this.previousTransactionId; + } + + @Override + public void close() { + // Only mark the context as closed, but do not end the tracer span, as that is done by the + // commit and rollback methods. + synchronized (lock) { + isClosed = true; + } + } + + void ensureTxn() { + try { + ensureTxnAsync().get(); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause() == null ? e : e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + + ApiFuture ensureTxnAsync() { + final SettableApiFuture res = SettableApiFuture.create(); + if (transactionId == null || isAborted()) { + createTxnAsync(res, null); + } else { + span.addAnnotation("Transaction Initialized", "Id", transactionId.toStringUtf8()); + txnLogger.log( + Level.FINER, + "Using prepared transaction {0}", + txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); + res.set(null); + } + return res; + } + + private void createTxnAsync( + final SettableApiFuture res, com.google.spanner.v1.Mutation mutation) { + span.addAnnotation("Creating Transaction"); + final ApiFuture fut = + session.beginTransactionAsync( + options, + isRouteToLeader(), + getTransactionChannelHint(), + getPreviousTransactionId(), + mutation); + fut.addListener( + () -> { + try { + Transaction txn = fut.get(); + transactionId = txn.getId(); + span.addAnnotation("Transaction Creation Done", "Id", transactionId.toStringUtf8()); + txnLogger.log( + Level.FINER, + "Started transaction {0}", + txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); + if (txn.hasPrecommitToken()) { + onPrecommitToken(txn.getPrecommitToken()); + } + res.set(null); + } catch (ExecutionException e) { + SpannerException spannerException = SpannerExceptionFactory.asSpannerException(e); + if (spannerException.getErrorCode() == ErrorCode.ABORTED + && session.getIsMultiplexed() + && mutation != null) { + // Begin transaction can return ABORTED errors. This can only happen if it included + // a mutation key, which again means that this is a mutation-only transaction on a + // multiplexed session. + span.addAnnotation( + "Transaction Creation Failed with ABORT. Retrying", + e.getCause() == null ? e : e.getCause()); + createTxnAsync(res, mutation); + return; + } + span.addAnnotation( + "Transaction Creation Failed", e.getCause() == null ? e : e.getCause()); + res.setException(e.getCause() == null ? e : e.getCause()); + } catch (InterruptedException e) { + res.setException(SpannerExceptionFactory.propagateInterrupt(e)); + } + }, + MoreExecutors.directExecutor()); + } + + void commit() { + try { + // Normally, Gax will take care of any timeouts, but we add a timeout for getting the value + // from the future here as well to make sure the call always finishes, even if the future + // never resolves. + commitResponse = + commitAsync() + .get( + rpc.getCommitRetrySettings().getTotalTimeout().getSeconds() + 5, + TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException e) { + if (commitFuture != null) { + commitFuture.cancel(true); + } + if (e instanceof InterruptedException) { + throw SpannerExceptionFactory.propagateInterrupt((InterruptedException) e); + } else { + throw SpannerExceptionFactory.propagateTimeout((TimeoutException) e); + } + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause() == null ? e : e.getCause()); + } + } + + volatile ApiFuture commitFuture; + + ApiFuture commitAsync() { + close(); + + List mutationsProto = new ArrayList<>(); + com.google.spanner.v1.Mutation randomMutation = null; + synchronized (committingLock) { + if (committing) { + throw new IllegalStateException(TRANSACTION_ALREADY_COMMITTED_MESSAGE); + } + committing = true; + if (!mutations.isEmpty()) { + randomMutation = Mutation.toProtoAndReturnRandomMutation(mutations, mutationsProto); + } + } + final SettableApiFuture res = SettableApiFuture.create(); + final SettableApiFuture finishOps; + CommitRequest.Builder builder = + CommitRequest.newBuilder() + .setSession(session.getName()) + .setReturnCommitStats(options.withCommitStats()); + if (options.hasPriority() || getTransactionTag() != null) { + RequestOptions.Builder requestOptionsBuilder = RequestOptions.newBuilder(); + if (options.hasPriority()) { + requestOptionsBuilder.setPriority(options.priority()); + } + if (getTransactionTag() != null) { + requestOptionsBuilder.setTransactionTag(getTransactionTag()); + } + builder.setRequestOptions(requestOptionsBuilder.build()); + } + if (options.hasMaxCommitDelay()) { + builder.setMaxCommitDelay( + com.google.protobuf.Duration.newBuilder() + .setSeconds(options.maxCommitDelay().getSeconds()) + .setNanos(options.maxCommitDelay().getNano()) + .build()); + } + synchronized (lock) { + if (transactionIdFuture == null && transactionId == null && runningAsyncOperations == 0) { + finishOps = SettableApiFuture.create(); + createTxnAsync(finishOps, randomMutation); + } else { + finishOps = finishedAsyncOperations; + } + } + builder.addAllMutations(mutationsProto); + finishOps.addListener( + new CommitRunnable( + res, finishOps, builder, /* retryAttemptDueToCommitProtocolExtension= */ false), + MoreExecutors.directExecutor()); + return res; + } + + private final class CommitRunnable implements Runnable { + + private final SettableApiFuture res; + private final ApiFuture prev; + private final CommitRequest.Builder requestBuilder; + private final boolean retryAttemptDueToCommitProtocolExtension; + + CommitRunnable( + SettableApiFuture res, + ApiFuture prev, + CommitRequest.Builder requestBuilder, + boolean retryAttemptDueToCommitProtocolExtension) { + this.res = res; + this.prev = prev; + this.requestBuilder = requestBuilder; + this.retryAttemptDueToCommitProtocolExtension = retryAttemptDueToCommitProtocolExtension; + } + + @Override + public void run() { + try { + prev.get(); + if (transactionId == null && transactionIdFuture == null) { + requestBuilder.setSingleUseTransaction( + TransactionOptions.newBuilder() + .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance()) + .setExcludeTxnFromChangeStreams( + options.withExcludeTxnFromChangeStreams() == Boolean.TRUE)); + } else { + requestBuilder.setTransactionId( + transactionId == null + ? transactionIdFuture.get( + waitForTransactionTimeoutMillis, TimeUnit.MILLISECONDS) + : transactionId); + } + RequestOptions requestOptions = options.toRequestOptionsProto(true); + if (!requestOptions.equals(RequestOptions.getDefaultInstance())) { + requestBuilder.setRequestOptions(requestOptions); + } + if (session.getIsMultiplexed() && getLatestPrecommitToken() != null) { + // Set the precommit token in the CommitRequest for multiplexed sessions. + requestBuilder.setPrecommitToken(getLatestPrecommitToken()); + } + if (retryAttemptDueToCommitProtocolExtension) { + // When a retry occurs due to the commit protocol extension, clear all mutations because + // they were already buffered in SpanFE during the previous attempt. + requestBuilder.clearMutations(); + span.addAnnotation( + "Retrying commit operation with a new precommit token obtained from the previous" + + " CommitResponse"); + } + final CommitRequest commitRequest = requestBuilder.build(); + span.addAnnotation("Starting Commit"); + final ApiFuture commitFuture; + final ISpan opSpan = tracer.spanBuilderWithExplicitParent(SpannerImpl.COMMIT, span); + try (IScope ignore = tracer.withSpan(opSpan)) { + commitFuture = rpc.commitAsync(commitRequest, getTransactionChannelHint()); + } + session.markUsed(clock.instant()); + commitFuture.addListener( + () -> { + try (IScope ignore = tracer.withSpan(opSpan)) { + if (!commitFuture.isDone()) { + // This should not be possible, considering that we are in a listener for the + // future, but we add a result here as well as a safety precaution. + res.setException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "commitFuture is not done")); + return; + } + com.google.spanner.v1.CommitResponse proto = commitFuture.get(); + + // If the CommitResponse includes a precommit token, the client will retry the + // commit RPC once with the new token and clear any existing mutations. + // This case is applicable only when the read-write transaction uses multiplexed + // session. + if (proto.hasPrecommitToken() && !retryAttemptDueToCommitProtocolExtension) { + // track the latest pre commit token + onPrecommitToken(proto.getPrecommitToken()); + span.addAnnotation( + "Commit operation will be retried with new precommit token as the" + + " CommitResponse includes a MultiplexedSessionRetry field"); + opSpan.end(); + + // Retry the commit RPC with the latest precommit token from CommitResponse. + new CommitRunnable( + res, + prev, + requestBuilder, + /* retryAttemptDueToCommitProtocolExtension= */ true) + .run(); + + // Exit to prevent further processing in this attempt. + return; + } + if (!proto.hasCommitTimestamp()) { + throw newSpannerException( + ErrorCode.INTERNAL, "Missing commitTimestamp:\n" + session.getName()); + } + span.addAnnotation("Commit Done"); + opSpan.end(); + res.set(new CommitResponse(proto)); + } catch (Throwable throwable) { + SpannerException resultException; + try { + if (throwable instanceof ExecutionException) { + resultException = + SpannerExceptionFactory.asSpannerException( + throwable.getCause() == null ? throwable : throwable.getCause()); + } else if (throwable instanceof InterruptedException) { + resultException = + SpannerExceptionFactory.propagateInterrupt( + (InterruptedException) throwable); + } else { + resultException = SpannerExceptionFactory.asSpannerException(throwable); + } + span.addAnnotation("Commit Failed", resultException); + opSpan.setStatus(resultException); + opSpan.end(); + res.setException( + onError( + resultException, + /* withBeginTransaction= */ false, + /* lastStatement= */ true)); + } catch (Throwable unexpectedError) { + // This is a safety precaution to make sure that a result is always returned. + res.setException(unexpectedError); + } + } + }, + MoreExecutors.directExecutor()); + } catch (InterruptedException e) { + res.setException(SpannerExceptionFactory.propagateInterrupt(e)); + } catch (TimeoutException e) { + res.setException(SpannerExceptionFactory.propagateTimeout(e)); + } catch (Throwable e) { + res.setException( + SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause())); + } + } + } + + CommitResponse getCommitResponse() { + checkState(commitResponse != null, "run() has not yet returned normally"); + return commitResponse; + } + + boolean isAborted() { + synchronized (lock) { + return aborted; + } + } + + void rollback() { + try { + rollbackAsync().get(); + } catch (ExecutionException e) { + txnLogger.log(Level.FINE, "Exception during rollback", e); + span.addAnnotation("Rollback Failed", e); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + + ApiFuture rollbackAsync() { + close(); + + // It could be that there is no transaction if the transaction has been marked + // withInlineBegin, and there has not been any query/update statement that has been executed. + // In that case, we do not need to do anything, as there is no transaction. + // + // We do not take the transactionLock before trying to rollback to prevent a rollback call + // from blocking if an async query or update statement that is trying to begin the transaction + // is still in flight. That transaction will then automatically be terminated by the server. + if (transactionId != null) { + span.addAnnotation("Starting Rollback"); + ApiFuture apiFuture = + rpc.rollbackAsync( + RollbackRequest.newBuilder() + .setSession(session.getName()) + .setTransactionId(transactionId) + .build(), + getTransactionChannelHint()); + session.markUsed(clock.instant()); + return apiFuture; + } else if (transactionIdFuture != null) { + ApiFuture transactionIdOrEmptyFuture = + ApiFutures.catching( + transactionIdFuture, + Throwable.class, + input -> ByteString.empty(), + MoreExecutors.directExecutor()); + return ApiFutures.transformAsync( + transactionIdOrEmptyFuture, + transactionId -> + transactionId.isEmpty() + ? ApiFutures.immediateFuture(Empty.getDefaultInstance()) + : rpc.rollbackAsync( + RollbackRequest.newBuilder() + .setSession(session.getName()) + .setTransactionId(transactionId) + .build(), + getTransactionChannelHint()), + MoreExecutors.directExecutor()); + } else { + return ApiFutures.immediateFuture(Empty.getDefaultInstance()); + } + } + + @Nullable + @Override + TransactionSelector getTransactionSelector() { + // Check if there is already a transactionId available. That is the case if this transaction + // has already been prepared by the session pool, or if this transaction has been marked + // withInlineBegin and an earlier statement has already started a transaction. + if (transactionId == null) { + try { + ApiFuture tx = null; + synchronized (lock) { + // The first statement of a transaction that gets here will be the one that includes + // BeginTransaction with the statement. The others will be waiting on the + // transactionIdFuture until an actual transactionId is available. + if (transactionIdFuture == null) { + transactionIdFuture = SettableApiFuture.create(); + if (trackTransactionStarter) { + transactionStarter = new Exception("Requesting new transaction"); + } + } else { + tx = transactionIdFuture; + } + } + if (tx == null) { + return TransactionSelector.newBuilder() + .setBegin( + this.session.defaultTransactionOptions().toBuilder() + .mergeFrom( + SessionImpl.createReadWriteTransactionOptions( + options, getPreviousTransactionId()))) + .build(); + } else { + // Wait for the transaction to come available. The tx.get() call will fail with an + // Aborted error if the call that included the BeginTransaction option fails. The + // Aborted error will cause the entire transaction to be retried, and the retry will use + // a separate BeginTransaction RPC. + // If tx.get() returns successfully, this.transactionId will also have been set to a + // valid value as the latter is always set when a transaction id is returned by a + // statement. + return TransactionSelector.newBuilder() + .setId(tx.get(waitForTransactionTimeoutMillis, TimeUnit.MILLISECONDS)) + .build(); + } + } catch (ExecutionException e) { + if (e.getCause() instanceof AbortedException) { + synchronized (lock) { + aborted = true; + } + } + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (TimeoutException e) { + // Throw an ABORTED exception to force a retry of the transaction if no transaction + // has been returned by the first statement. + SpannerException se = + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "Timeout while waiting for a transaction to be returned by another statement." + + (trackTransactionStarter + ? " See the suppressed exception for the stacktrace of the caller that" + + " should return a transaction" + : ""), + e); + if (transactionStarter != null) { + se.addSuppressed(transactionStarter); + } + throw se; + } catch (InterruptedException e) { + throw SpannerExceptionFactory.newSpannerExceptionForCancellation(null, e); + } + } + // There is already a transactionId available. Include that id as the transaction to use. + return TransactionSelector.newBuilder().setId(transactionId).build(); + } + + @Override + Map getTransactionChannelHint() { + return channelHint; + } + + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) { + Preconditions.checkNotNull(transaction); + if (transaction.getId() != ByteString.EMPTY) { + // A transaction has been returned by a statement that was executed. Set the id of the + // transaction on this instance and release the lock to allow other statements to proceed. + if ((transactionIdFuture == null || !this.transactionIdFuture.isDone()) + && this.transactionId == null) { + this.transactionId = transaction.getId(); + this.transactionIdFuture.set(transaction.getId()); + } + } else if (shouldIncludeId) { + // The statement should have returned a transaction. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + } + } + + /** + * In read-write transactions, the precommit token with the highest sequence number from this + * transaction attempt will be tracked and included in the + * [Commit][google.spanner.v1.Spanner.Commit] request for the transaction. + */ + @Override + public void onPrecommitToken(MultiplexedSessionPrecommitToken token) { + if (token == null) { + return; + } + synchronized (precommitTokenLock) { + if (this.latestPrecommitToken == null + || token.getSeqNum() > this.latestPrecommitToken.getSeqNum()) { + this.latestPrecommitToken = token; + txnLogger.log(Level.FINE, "Updating precommit token to " + this.latestPrecommitToken); + } + } + } + + @Nullable + String getTransactionTag() { + if (this.options.hasTag()) { + return this.options.tag(); + } + return null; + } + + @Nullable + MultiplexedSessionPrecommitToken getLatestPrecommitToken() { + synchronized (precommitTokenLock) { + return this.latestPrecommitToken; + } + } + + @Override + public SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean lastStatement) { + e = super.onError(e, withBeginTransaction, lastStatement); + + // If the statement that caused an error was the statement that included a BeginTransaction + // option, we simulate an aborted transaction to force a retry of the entire transaction. This + // will cause the retry to execute an explicit BeginTransaction RPC and then the actual + // statements of the transaction. This is needed as the first statement of the transaction + // must be included with the transaction to ensure that any locks that are taken by the + // statement are included in the transaction, even if the statement again causes an error + // during the retry. + if (withBeginTransaction) { + if (lastStatement) { + this.transactionIdFuture.setException(e); + } else { + // Simulate an aborted transaction to force a retry with a new transaction. + this.transactionIdFuture.setException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "Aborted due to failed initial statement", + SpannerExceptionFactory.createAbortedExceptionWithRetryDelay( + "Aborted due to failed initial statement", e, 0, 1))); + } + } + SpannerException exceptionToThrow; + if (withBeginTransaction + && e.getErrorCode() == ErrorCode.CANCELLED + && e.getMessage().contains(TRANSACTION_CANCELLED_MESSAGE)) { + // If the first statement of a transaction fails because it was invalidated by a later + // transaction, then the transaction should be retried with an explicit BeginTransaction + // RPC. It could be that this occurred because of a previous transaction that timed out or + // was cancelled by the client, but that was sent to Cloud Spanner and that was still active + // on the backend. + exceptionToThrow = + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + e.getMessage(), + SpannerExceptionFactory.createAbortedExceptionWithRetryDelay( + "Aborted due to failed initial statement", e, 0, 1)); + } else { + exceptionToThrow = e; + } + + if (exceptionToThrow.getErrorCode() == ErrorCode.ABORTED) { + long delay = -1L; + if (exceptionToThrow instanceof AbortedException) { + delay = exceptionToThrow.getRetryDelayInMillis(); + ((AbortedException) exceptionToThrow) + .setTransactionID( + this.transactionId != null + ? this.transactionId + : this.getPreviousTransactionId()); + } + if (delay == -1L) { + txnLogger.log( + Level.FINE, "Retry duration is missing from the exception.", exceptionToThrow); + } + + synchronized (lock) { + retryDelayInMillis = delay; + aborted = true; + } + } + return exceptionToThrow; + } + + @Override + public void onDone(boolean withBeginTransaction) { + if (withBeginTransaction + && transactionIdFuture != null + && !this.transactionIdFuture.isDone()) { + // Context was done (closed) before a transaction id was returned. + this.transactionIdFuture.setException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "ResultSet was closed before a transaction id was returned")); + } + } + + @Override + public void buffer(Mutation mutation) { + synchronized (committingLock) { + if (committing) { + throw new IllegalStateException(TRANSACTION_ALREADY_COMMITTED_MESSAGE); + } + mutations.add(checkNotNull(mutation)); + } + } + + @Override + public ApiFuture bufferAsync(Mutation mutation) { + // Normally, we would call the async method from the sync method, but this is also safe as + // both are non-blocking anyways, and this prevents the creation of an ApiFuture that is not + // really used when the sync method is called. + buffer(mutation); + return ApiFutures.immediateFuture(null); + } + + @Override + public void buffer(Iterable mutations) { + synchronized (committingLock) { + if (committing) { + throw new IllegalStateException(TRANSACTION_ALREADY_COMMITTED_MESSAGE); + } + for (Mutation mutation : mutations) { + this.mutations.add(checkNotNull(mutation)); + } + } + } + + @Override + public ApiFuture bufferAsync(Iterable mutations) { + // Normally, we would call the async method from the sync method, but this is also safe as + // both are non-blocking anyways, and this prevents the creation of an ApiFuture that is not + // really used when the sync method is called. + buffer(mutations); + return ApiFutures.immediateFuture(null); + } + + @Override + public ResultSetStats analyzeUpdate( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + return internalAnalyzeStatement(statement, analyzeMode, options).getStats(); + } + + @Override + public com.google.cloud.spanner.ResultSet analyzeUpdateStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + return new NoRowsResultSet(internalAnalyzeStatement(statement, analyzeMode, options)); + } + + private ResultSet internalAnalyzeStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... updateOptions) { + Preconditions.checkNotNull(analyzeMode); + QueryMode queryMode; + switch (analyzeMode) { + case PLAN: + queryMode = QueryMode.PLAN; + break; + case PROFILE: + queryMode = QueryMode.PROFILE; + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown analyze mode: " + analyzeMode); + } + final Options options = Options.fromUpdateOptions(updateOptions); + return internalExecuteUpdate(statement, queryMode, options); + } + + @Override + public long executeUpdate(Statement statement, UpdateOption... updateOptions) { + final Options options = Options.fromUpdateOptions(updateOptions); + ISpan span = + tracer.spanBuilderWithExplicitParent( + UPDATE, this.span, this.tracer.createStatementAttributes(statement, options)); + try (IScope ignore = tracer.withSpan(span)) { + ResultSet resultSet = internalExecuteUpdate(statement, QueryMode.NORMAL, options); + // For standard DML, using the exact row count. + return resultSet.getStats().getRowCountExact(); + } finally { + span.end(); + } + } + + private ResultSet internalExecuteUpdate( + Statement statement, QueryMode queryMode, Options options) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteSqlRequest.Builder builder = + getExecuteSqlRequestBuilder( + statement, queryMode, options, /* withTransactionSelector= */ true); + try { + com.google.spanner.v1.ResultSet resultSet = + rpc.executeQuery(builder.build(), getTransactionChannelHint(), isRouteToLeader()); + session.markUsed(clock.instant()); + if (resultSet.getMetadata().hasTransaction()) { + onTransactionMetadata( + resultSet.getMetadata().getTransaction(), builder.getTransaction().hasBegin()); + } + if (!resultSet.hasStats()) { + throw new IllegalArgumentException( + "DML response missing stats possibly due to non-DML statement as input"); + } + if (resultSet.hasPrecommitToken()) { + onPrecommitToken(resultSet.getPrecommitToken()); + } + return resultSet; + } catch (Throwable t) { + throw onError( + SpannerExceptionFactory.asSpannerException(t), + builder.getTransaction().hasBegin(), + builder.getLastStatement()); + } + } + + @Override + public ApiFuture executeUpdateAsync(Statement statement, UpdateOption... updateOptions) { + final Options options = Options.fromUpdateOptions(updateOptions); + ISpan span = + tracer.spanBuilderWithExplicitParent( + UPDATE, this.span, this.tracer.createStatementAttributes(statement, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteSqlRequest.Builder builder = + getExecuteSqlRequestBuilder( + statement, QueryMode.NORMAL, options, /* withTransactionSelector= */ true); + final ApiFuture resultSet; + try { + // Register the update as an async operation that must finish before the transaction may + // commit. + increaseAsyncOperations(); + resultSet = + rpc.executeQueryAsync( + builder.build(), getTransactionChannelHint(), isRouteToLeader()); + session.markUsed(clock.instant()); + } catch (Throwable t) { + decreaseAsyncOperations(); + throw t; + } + ApiFuture updateCount = + ApiFutures.transform( + resultSet, + input -> { + if (!input.hasStats()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "DML response missing stats possibly due to non-DML statement as input"); + } + if (builder.getTransaction().hasBegin() + && !(input.getMetadata().hasTransaction() + && input.getMetadata().getTransaction().getId() != ByteString.EMPTY)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, NO_TRANSACTION_RETURNED_MSG); + } + // For standard DML, using the exact row count. + return input.getStats().getRowCountExact(); + }, + MoreExecutors.directExecutor()); + updateCount = + ApiFutures.catching( + updateCount, + Throwable.class, + input -> { + SpannerException e = SpannerExceptionFactory.asSpannerException(input); + SpannerException exceptionToThrow = + onError(e, builder.getTransaction().hasBegin(), builder.getLastStatement()); + span.setStatus(exceptionToThrow); + throw exceptionToThrow; + }, + MoreExecutors.directExecutor()); + updateCount.addListener( + () -> { + try { + if (resultSet.get().getMetadata().hasTransaction()) { + onTransactionMetadata( + resultSet.get().getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); + } + if (resultSet.get().hasPrecommitToken()) { + onPrecommitToken(resultSet.get().getPrecommitToken()); + } + } catch (Throwable e) { + // Ignore this error here as it is handled by the future that is returned by the + // executeUpdateAsync method. + } + span.end(); + decreaseAsyncOperations(); + }, + MoreExecutors.directExecutor()); + return updateCount; + } + } + + private SpannerException createAbortedExceptionForBatchDml(ExecuteBatchDmlResponse response) { + // Manually construct an AbortedException with a 10ms retry delay for BatchDML responses that + // return an Aborted status (and not an AbortedException). + return newSpannerException( + ErrorCode.fromRpcStatus(response.getStatus()), + response.getStatus().getMessage(), + SpannerExceptionFactory.createAbortedExceptionWithRetryDelay( + response.getStatus().getMessage(), + /* cause= */ null, + /* retryDelaySeconds= */ 0, + /* retryDelayNanos= */ (int) TimeUnit.MILLISECONDS.toNanos(10L))); + } + + @Override + public long[] batchUpdate(Iterable statements, UpdateOption... updateOptions) { + final Options options = Options.fromUpdateOptions(updateOptions); + ISpan span = + tracer.spanBuilderWithExplicitParent( + BATCH_UPDATE, + this.span, + this.tracer.createStatementBatchAttributes(statements, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteBatchDmlRequest.Builder builder = + getExecuteBatchDmlRequestBuilder(statements, options); + try { + com.google.spanner.v1.ExecuteBatchDmlResponse response = + rpc.executeBatchDml(builder.build(), getTransactionChannelHint()); + session.markUsed(clock.instant()); + long[] results = new long[response.getResultSetsCount()]; + for (int i = 0; i < response.getResultSetsCount(); ++i) { + results[i] = response.getResultSets(i).getStats().getRowCountExact(); + if (response.getResultSets(i).getMetadata().hasTransaction()) { + onTransactionMetadata( + response.getResultSets(i).getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); + } + } + + if (response.hasPrecommitToken()) { + onPrecommitToken(response.getPrecommitToken()); + } + + // If one of the DML statements was aborted, we should throw an aborted exception. + // In all other cases, we should throw a BatchUpdateException. + if (response.getStatus().getCode() == Code.ABORTED_VALUE) { + throw createAbortedExceptionForBatchDml(response); + } else if (response.getStatus().getCode() != Code.OK_VALUE) { + throw newSpannerBatchUpdateException( + ErrorCode.fromRpcStatus(response.getStatus()), + response.getStatus().getMessage(), + results); + } + return results; + } catch (Throwable e) { + throw onError( + SpannerExceptionFactory.asSpannerException(e), + builder.getTransaction().hasBegin(), + builder.getLastStatements()); + } + } catch (Throwable throwable) { + span.setStatus(throwable); + throw throwable; + } finally { + span.end(); + } + } + + @Override + public ApiFuture batchUpdateAsync( + Iterable statements, UpdateOption... updateOptions) { + final Options options = Options.fromUpdateOptions(updateOptions); + ISpan span = + tracer.spanBuilderWithExplicitParent( + BATCH_UPDATE, + this.span, + this.tracer.createStatementBatchAttributes(statements, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteBatchDmlRequest.Builder builder = + getExecuteBatchDmlRequestBuilder(statements, options); + ApiFuture response; + try { + // Register the update as an async operation that must finish before the transaction may + // commit. + increaseAsyncOperations(); + response = rpc.executeBatchDmlAsync(builder.build(), getTransactionChannelHint()); + session.markUsed(clock.instant()); + } catch (Throwable t) { + decreaseAsyncOperations(); + throw t; + } + ApiFuture updateCounts = + ApiFutures.transform( + response, + batchDmlResponse -> { + long[] results = new long[batchDmlResponse.getResultSetsCount()]; + for (int i = 0; i < batchDmlResponse.getResultSetsCount(); ++i) { + results[i] = batchDmlResponse.getResultSets(i).getStats().getRowCountExact(); + if (batchDmlResponse.getResultSets(i).getMetadata().hasTransaction()) { + onTransactionMetadata( + batchDmlResponse.getResultSets(i).getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); + } + } + if (batchDmlResponse.hasPrecommitToken()) { + onPrecommitToken(batchDmlResponse.getPrecommitToken()); + } + // If one of the DML statements was aborted, we should throw an aborted exception. + // In all other cases, we should throw a BatchUpdateException. + if (batchDmlResponse.getStatus().getCode() == Code.ABORTED_VALUE) { + throw createAbortedExceptionForBatchDml(batchDmlResponse); + } else if (batchDmlResponse.getStatus().getCode() != Code.OK_VALUE) { + throw newSpannerBatchUpdateException( + ErrorCode.fromRpcStatus(batchDmlResponse.getStatus()), + batchDmlResponse.getStatus().getMessage(), + results); + } + return results; + }, + MoreExecutors.directExecutor()); + updateCounts = + ApiFutures.catching( + updateCounts, + Throwable.class, + input -> { + SpannerException e = SpannerExceptionFactory.asSpannerException(input); + SpannerException exceptionToThrow = + onError(e, builder.getTransaction().hasBegin(), builder.getLastStatements()); + span.setStatus(exceptionToThrow); + throw exceptionToThrow; + }, + MoreExecutors.directExecutor()); + updateCounts.addListener( + () -> { + span.end(); + decreaseAsyncOperations(); + }, + MoreExecutors.directExecutor()); + return updateCounts; + } + } + + private ListenableAsyncResultSet wrap(ListenableAsyncResultSet delegate) { + return new TransactionContextAsyncResultSetImpl(delegate); + } + + @Override + public ListenableAsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return wrap(super.readAsync(table, keys, columns, options)); + } + + @Override + public ListenableAsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return wrap(super.readUsingIndexAsync(table, index, keys, columns, options)); + } + + @Override + public ListenableAsyncResultSet executeQueryAsync( + final Statement statement, final QueryOption... options) { + return wrap(super.executeQueryAsync(statement, options)); + } + } + + private boolean blockNestedTxn = true; + private final SessionImpl session; + private final Options options; + private ISpan span; + private final TraceWrapper tracer; + private TransactionContextImpl txn; + private volatile boolean isValid = true; + + @Override + public TransactionRunner allowNestedTransaction() { + blockNestedTxn = false; + return this; + } + + TransactionRunnerImpl(SessionImpl session, TransactionOption... options) { + this.session = session; + this.options = Options.fromTransactionOptions(options); + this.txn = session.newTransaction(this.options, /* previousTransactionId= */ ByteString.EMPTY); + this.tracer = session.getTracer(); + } + + @Override + public void setSpan(ISpan span) { + this.span = span; + } + + @Nullable + @Override + public T run(TransactionCallable callable) { + try (IScope s = tracer.withSpan(span)) { + if (blockNestedTxn) { + SessionImpl.hasPendingTransaction.set(Boolean.TRUE); + } + return runInternal(callable); + } catch (RuntimeException e) { + span.setStatus(e); + throw e; + } finally { + // Remove threadLocal rather than set to FALSE to avoid a possible memory leak. + // We also do this unconditionally in case a user has modified the flag when the transaction + // was running. + SessionImpl.hasPendingTransaction.remove(); + span.end(); + session.onTransactionDone(); + } + } + + private T runInternal(final TransactionCallable txCallable) { + final AtomicInteger attempt = new AtomicInteger(); + Callable retryCallable = + () -> { + boolean useInlinedBegin = true; + if (attempt.get() > 0) { + // Do not inline the BeginTransaction during a retry if the initial attempt did not + // actually start a transaction. + useInlinedBegin = txn.transactionId != null; + + // Determine the latest transactionId when using a multiplexed session. + ByteString multiplexedSessionPreviousTransactionId = ByteString.EMPTY; + if (session.getIsMultiplexed()) { + // Use the current transactionId if available, otherwise fallback to the previous + // transactionId. + multiplexedSessionPreviousTransactionId = + txn.transactionId != null ? txn.transactionId : txn.getPreviousTransactionId(); + } + + txn = + session.newTransaction( + options, /* previousTransactionId= */ multiplexedSessionPreviousTransactionId); + } + checkState( + isValid, "TransactionRunner has been invalidated by a new operation on the session"); + attempt.incrementAndGet(); + span.addAnnotation("Starting Transaction Attempt", "Attempt", attempt.longValue()); + // Only ensure that there is a transaction if we should not inline the beginTransaction + // with the first statement. + if (!useInlinedBegin) { + txn.ensureTxn(); + } + + T result; + boolean shouldRollback = true; + try { + result = txCallable.run(txn); + shouldRollback = false; + } catch (Exception e) { + txnLogger.log(Level.FINE, "User-provided TransactionCallable raised exception", e); + if (txn.isAborted() || (e instanceof AbortedException)) { + span.addAnnotation( + "Transaction Attempt Aborted in user operation. Retrying", + "Attempt", + attempt.longValue()); + shouldRollback = false; + if (e instanceof AbortedException) { + throw e; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, e.getMessage(), e); + } + SpannerException toThrow; + if (e instanceof SpannerException) { + toThrow = (SpannerException) e; + } else { + toThrow = newSpannerException(ErrorCode.UNKNOWN, e.getMessage(), e); + } + span.addAnnotation( + "Transaction Attempt Failed in user operation", + ImmutableMap.of( + "Attempt", attempt.longValue(), "Status", toThrow.getErrorCode().toString())); + throw toThrow; + } finally { + if (shouldRollback) { + txn.rollback(); + } + } + + try { + txn.commit(); + span.addAnnotation("Transaction Attempt Succeeded", "Attempt", attempt.longValue()); + return result; + } catch (AbortedException e) { + txnLogger.log(Level.FINE, "Commit aborted", e); + span.addAnnotation( + "Transaction Attempt Aborted in Commit. Retrying", "Attempt", attempt.longValue()); + throw e; + } catch (SpannerException e) { + span.addAnnotation( + "Transaction Attempt Failed in Commit", + ImmutableMap.of( + "Attempt", attempt.longValue(), "Status", e.getErrorCode().toString())); + throw e; + } + }; + return SpannerRetryHelper.runTxWithRetriesOnAborted(retryCallable, session.getErrorHandler()); + } + + @Override + public Timestamp getCommitTimestamp() { + checkState(txn != null, "run() has not yet returned normally"); + return txn.getCommitResponse().getCommitTimestamp(); + } + + public CommitResponse getCommitResponse() { + checkState(txn != null, "run() has not yet returned normally"); + return txn.getCommitResponse(); + } + + @Override + public void invalidate() { + isValid = false; + } + + @Override + public void close() {} +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java new file mode 100644 index 000000000000..71120a0f420f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java @@ -0,0 +1,677 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.spanner.v1.TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import java.io.Serializable; +import java.util.AbstractMap.SimpleEntry; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.TreeMap; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * Describes a type in the Cloud Spanner type system. Types can either be primitive (for example, + * {@code INT64} and {@code STRING}) or composite (for example, {@code ARRAY} or {@code + * STRUCT}). + * + *

{@code Type} instances are immutable. + */ +@Immutable +public final class Type implements Serializable { + private static final Type TYPE_BOOL = new Type(Code.BOOL, null, null); + private static final Type TYPE_INT64 = new Type(Code.INT64, null, null); + private static final Type TYPE_FLOAT32 = new Type(Code.FLOAT32, null, null); + private static final Type TYPE_FLOAT64 = new Type(Code.FLOAT64, null, null); + private static final Type TYPE_NUMERIC = new Type(Code.NUMERIC, null, null); + private static final Type TYPE_PG_NUMERIC = new Type(Code.PG_NUMERIC, null, null); + private static final Type TYPE_STRING = new Type(Code.STRING, null, null); + private static final Type TYPE_JSON = new Type(Code.JSON, null, null); + private static final Type TYPE_PG_JSONB = new Type(Code.PG_JSONB, null, null); + private static final Type TYPE_PG_OID = new Type(Code.PG_OID, null, null); + private static final Type TYPE_BYTES = new Type(Code.BYTES, null, null); + private static final Type TYPE_TIMESTAMP = new Type(Code.TIMESTAMP, null, null); + private static final Type TYPE_DATE = new Type(Code.DATE, null, null); + private static final Type TYPE_UUID = new Type(Code.UUID, null, null); + private static final Type TYPE_INTERVAL = new Type(Code.INTERVAL, null, null); + private static final Type TYPE_ARRAY_BOOL = new Type(Code.ARRAY, TYPE_BOOL, null); + private static final Type TYPE_ARRAY_INT64 = new Type(Code.ARRAY, TYPE_INT64, null); + private static final Type TYPE_ARRAY_FLOAT32 = new Type(Code.ARRAY, TYPE_FLOAT32, null); + private static final Type TYPE_ARRAY_FLOAT64 = new Type(Code.ARRAY, TYPE_FLOAT64, null); + private static final Type TYPE_ARRAY_NUMERIC = new Type(Code.ARRAY, TYPE_NUMERIC, null); + private static final Type TYPE_ARRAY_PG_NUMERIC = new Type(Code.ARRAY, TYPE_PG_NUMERIC, null); + private static final Type TYPE_ARRAY_STRING = new Type(Code.ARRAY, TYPE_STRING, null); + private static final Type TYPE_ARRAY_JSON = new Type(Code.ARRAY, TYPE_JSON, null); + private static final Type TYPE_ARRAY_PG_JSONB = new Type(Code.ARRAY, TYPE_PG_JSONB, null); + private static final Type TYPE_ARRAY_PG_OID = new Type(Code.ARRAY, TYPE_PG_OID, null); + private static final Type TYPE_ARRAY_BYTES = new Type(Code.ARRAY, TYPE_BYTES, null); + private static final Type TYPE_ARRAY_TIMESTAMP = new Type(Code.ARRAY, TYPE_TIMESTAMP, null); + private static final Type TYPE_ARRAY_DATE = new Type(Code.ARRAY, TYPE_DATE, null); + private static final Type TYPE_ARRAY_UUID = new Type(Code.ARRAY, TYPE_UUID, null); + private static final Type TYPE_ARRAY_INTERVAL = new Type(Code.ARRAY, TYPE_INTERVAL, null); + + private static final int AMBIGUOUS_FIELD = -1; + private static final long serialVersionUID = -3076152125004114582L; + + static Type unrecognized(com.google.spanner.v1.Type proto) { + return new Type(proto); + } + + /** Returns the descriptor for the {@code BOOL type}. */ + public static Type bool() { + return TYPE_BOOL; + } + + /** + * Returns the descriptor for the {@code INT64} type: an integral type with the same value domain + * as a Java {@code long}. + */ + public static Type int64() { + return TYPE_INT64; + } + + /** + * Returns the descriptor for the {@code FLOAT32} type: a floating point type with the same value + * domain as a Java {@code float}. + */ + public static Type float32() { + return TYPE_FLOAT32; + } + + /** + * Returns the descriptor for the {@code FLOAT64} type: a floating point type with the same value + * domain as a Java {@code double}. + */ + public static Type float64() { + return TYPE_FLOAT64; + } + + /** Returns the descriptor for the {@code NUMERIC} type. */ + public static Type numeric() { + return TYPE_NUMERIC; + } + + /** + * Returns the descriptor for the {@code NUMERIC} type with the {@code PG_NUMERIC} type + * annotation. + */ + public static Type pgNumeric() { + return Type.TYPE_PG_NUMERIC; + } + + /** + * Returns the descriptor for the {@code STRING} type: a variable-length Unicode character string. + */ + public static Type string() { + return TYPE_STRING; + } + + /** Returns the descriptor for the {@code JSON} type. */ + public static Type json() { + return TYPE_JSON; + } + + /** Returns the descriptor for the {@code JSONB} type. */ + public static Type pgJsonb() { + return TYPE_PG_JSONB; + } + + /** Returns the descriptor for the {@code PG_OID} type. */ + public static Type pgOid() { + return TYPE_PG_OID; + } + + /** + * To get the descriptor for the {@code PROTO} type. + * + * @param protoTypeFqn Proto fully qualified name (ex: "examples.spanner.music.SingerInfo"). + */ + public static Type proto(String protoTypeFqn) { + return new Type(Code.PROTO, protoTypeFqn); + } + + /** + * To get the descriptor for the {@code ENUM} type. + * + * @param protoTypeFqn Proto ENUM fully qualified name (ex: "examples.spanner.music.Genre") + */ + public static Type protoEnum(String protoTypeFqn) { + return new Type(Code.ENUM, protoTypeFqn); + } + + /** Returns the descriptor for the {@code BYTES} type: a variable-length byte string. */ + public static Type bytes() { + return TYPE_BYTES; + } + + /** + * Returns the descriptor for the {@code TIMESTAMP} type: a nano precision timestamp in the range + * [0000-01-01 00:00:00, 9999-12-31 23:59:59.999999999 UTC]. + */ + public static Type timestamp() { + return TYPE_TIMESTAMP; + } + + /** + * Returns the descriptor for the {@code DATE} type: a timezone independent date in the range + * [0001-01-01, 9999-12-31). + */ + public static Type date() { + return TYPE_DATE; + } + + /** Returns the descriptor for the {@code UUID} type. */ + public static Type uuid() { + return TYPE_UUID; + } + + /** + * Returns the descriptor for the {@code INTERVAL} type: an interval which represents a time + * duration as a tuple of 3 values (months, days, nanoseconds). [Interval(months:-120000, days: + * -3660000, nanoseconds: -316224000000000000000), Interval(months:120000, days: 3660000, + * nanoseconds: 316224000000000000000)]. + */ + public static Type interval() { + return TYPE_INTERVAL; + } + + /** Returns a descriptor for an array of {@code elementType}. */ + public static Type array(Type elementType) { + Preconditions.checkNotNull(elementType); + switch (elementType.getCode()) { + case BOOL: + return TYPE_ARRAY_BOOL; + case INT64: + return TYPE_ARRAY_INT64; + case FLOAT32: + return TYPE_ARRAY_FLOAT32; + case FLOAT64: + return TYPE_ARRAY_FLOAT64; + case NUMERIC: + return TYPE_ARRAY_NUMERIC; + case PG_NUMERIC: + return TYPE_ARRAY_PG_NUMERIC; + case STRING: + return TYPE_ARRAY_STRING; + case JSON: + return TYPE_ARRAY_JSON; + case PG_JSONB: + return TYPE_ARRAY_PG_JSONB; + case PG_OID: + return TYPE_ARRAY_PG_OID; + case BYTES: + return TYPE_ARRAY_BYTES; + case TIMESTAMP: + return TYPE_ARRAY_TIMESTAMP; + case DATE: + return TYPE_ARRAY_DATE; + case UUID: + return TYPE_ARRAY_UUID; + case INTERVAL: + return TYPE_ARRAY_INTERVAL; + default: + return new Type(Code.ARRAY, elementType, null); + } + } + + /** + * Returns a descriptor for a {@code STRUCT} type: an ordered collection of named and typed + * fields. + */ + public static Type struct(Iterable fields) { + return new Type(Code.STRUCT, null, ImmutableList.copyOf(fields)); + } + + /** + * Returns a descriptor for a {@code STRUCT} type: an ordered collection of named and typed + * fields. + */ + public static Type struct(StructField... fields) { + return new Type(Code.STRUCT, null, ImmutableList.copyOf(fields)); + } + + private final com.google.spanner.v1.Type proto; + private final Code code; + private final Type arrayElementType; + private final ImmutableList structFields; + private String protoTypeFqn; + + /** + * Map of field name to field index. Ambiguous names are indexed to {@link #AMBIGUOUS_FIELD}. The + * map is lazily initialized with a benign race. + */ + private Map fieldsByName; + + private Type( + @Nonnull Code code, + @Nullable Type arrayElementType, + @Nullable ImmutableList structFields) { + this(null, Preconditions.checkNotNull(code), arrayElementType, structFields); + } + + private Type(@Nonnull com.google.spanner.v1.Type proto) { + this( + Preconditions.checkNotNull(proto), + Code.UNRECOGNIZED, + proto.hasArrayElementType() ? new Type(proto.getArrayElementType()) : null, + null); + } + + private Type( + com.google.spanner.v1.Type proto, + Code code, + Type arrayElementType, + ImmutableList structFields) { + this.proto = proto; + this.code = code; + this.arrayElementType = arrayElementType; + this.structFields = structFields; + } + + private Type(Code code, @Nonnull String protoTypeFqn) { + this(code, null, null); + this.protoTypeFqn = protoTypeFqn; + } + + /** Enumerates the categories of types. */ + public enum Code { + UNRECOGNIZED(TypeCode.UNRECOGNIZED, "unknown"), + BOOL(TypeCode.BOOL, "boolean"), + INT64(TypeCode.INT64, "bigint"), + NUMERIC(TypeCode.NUMERIC, "unknown"), + PG_NUMERIC(TypeCode.NUMERIC, "numeric", TypeAnnotationCode.PG_NUMERIC), + FLOAT64(TypeCode.FLOAT64, "double precision"), + FLOAT32(TypeCode.FLOAT32, "real"), + STRING(TypeCode.STRING, "character varying"), + JSON(TypeCode.JSON, "unknown"), + PG_JSONB(TypeCode.JSON, "jsonb", TypeAnnotationCode.PG_JSONB), + PG_OID(TypeCode.INT64, "oid", TypeAnnotationCode.PG_OID), + PROTO(TypeCode.PROTO, "proto"), + ENUM(TypeCode.ENUM, "enum"), + BYTES(TypeCode.BYTES, "bytea"), + TIMESTAMP(TypeCode.TIMESTAMP, "timestamp with time zone"), + DATE(TypeCode.DATE, "date"), + UUID(TypeCode.UUID, "uuid"), + INTERVAL(TypeCode.INTERVAL, "interval"), + ARRAY(TypeCode.ARRAY, "array"), + STRUCT(TypeCode.STRUCT, "struct"); + + private static final Map, Code> protoToCode; + + static { + ImmutableMap.Builder, Code> builder = + ImmutableMap.builder(); + for (Code code : Code.values()) { + builder.put(new SimpleEntry<>(code.getTypeCode(), code.getTypeAnnotationCode()), code); + } + protoToCode = builder.build(); + } + + private final String postgreSQLName; + private final TypeCode typeCode; + private final TypeAnnotationCode typeAnnotationCode; + + Code(TypeCode typeCode, String postgreSQLName) { + this(typeCode, postgreSQLName, TYPE_ANNOTATION_CODE_UNSPECIFIED); + } + + Code(TypeCode typeCode, String postgreSQLName, TypeAnnotationCode typeAnnotationCode) { + this.typeCode = typeCode; + this.postgreSQLName = postgreSQLName; + this.typeAnnotationCode = typeAnnotationCode; + } + + TypeCode getTypeCode() { + return typeCode; + } + + TypeAnnotationCode getTypeAnnotationCode() { + return typeAnnotationCode; + } + + static Code fromProto(TypeCode typeCode, TypeAnnotationCode typeAnnotationCode) { + Code code = protoToCode.get(new SimpleEntry<>(typeCode, typeAnnotationCode)); + return code == null ? Code.UNRECOGNIZED : code; + } + + @Override + public String toString() { + if (typeAnnotationCode == TYPE_ANNOTATION_CODE_UNSPECIFIED) { + return typeCode.toString(); + } else { + return typeCode.toString() + "<" + typeAnnotationCode.toString() + ">"; + } + } + + private String getGoogleSQLName() { + return name(); + } + + private String getPostgreSQLName() { + return postgreSQLName; + } + } + + /** Describes an individual field in a {@code STRUCT type}. */ + public static final class StructField implements Serializable { + private static final long serialVersionUID = 8640511292704408210L; + + private final String name; + private final Type type; + + public static StructField of(String name, Type type) { + return new StructField(name, type); + } + + private StructField(String name, Type type) { + this.name = Preconditions.checkNotNull(name); + this.type = Preconditions.checkNotNull(type); + } + + public String getName() { + return name; + } + + public Type getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + StructField that = (StructField) o; + return name.equals(that.name) && type.equals(that.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + } + + /** Returns the type code corresponding to this type. */ + public Code getCode() { + return code; + } + + /** + * Returns the type descriptor for elements of this {@code ARRAY} type. + * + * @throws IllegalStateException if {@code code() != Code.ARRAY} + */ + public Type getArrayElementType() { + Preconditions.checkState(arrayElementType != null, "Illegal call for non-ARRAY type"); + return arrayElementType; + } + + /** + * Returns the fields of this {@code STRUCT} type. + * + * @return an immutable list of the fields + * @throws IllegalStateException if {@code code() != Code.STRUCT} + */ + public List getStructFields() { + Preconditions.checkState(code == Code.STRUCT, "Illegal call for non-STRUCT type"); + return structFields; + } + + /** + * Returns the full package name for elements of this {@code Proto or @code Enum} type. + * + * @throws IllegalStateException if {@code code() != Code.PROTO or code() != Code.ENUM} + */ + public String getProtoTypeFqn() { + Preconditions.checkState( + (code == Code.PROTO || code == Code.ENUM), "Illegal call for non-Proto type"); + return protoTypeFqn; + } + + /** + * Returns the index of the field named {@code fieldName} in this {@code STRUCT} type. + * + * @throws IllegalArgumentException if there is not exactly one element of {@link + * #getStructFields()} with {@link StructField#getName()} equal to {@code fieldName} + * @throws IllegalStateException if {@code code() != Code.STRUCT} + */ + public int getFieldIndex(String fieldName) { + Preconditions.checkState(code == Code.STRUCT, "Illegal call for non-STRUCT type"); + + if (fieldsByName == null) { + Map tmp = new TreeMap<>(); + for (int i = 0; i < getStructFields().size(); ++i) { + Type.StructField field = getStructFields().get(i); + if (tmp.put(field.getName(), i) != null) { + // Column name appears more than once: mark as ambiguous. + tmp.put(field.getName(), AMBIGUOUS_FIELD); + } + } + // Benign race: Java's final field semantics mean that if we see a non-null "fieldsByName", + // we are guaranteed to see it in a fully initialized state. It is thus important that we + // use an ImmutableMap here, which necessarily uses final fields or equivalent reasoning. + // Since all computations of "fieldsByName" produce the same value, there is no risk of + // inconsistency. + fieldsByName = ImmutableMap.copyOf(tmp); + } + + Integer index = fieldsByName.get(fieldName); + if (index == null) { + throw new IllegalArgumentException("Field not found: " + fieldName); + } + if (index == AMBIGUOUS_FIELD) { + throw new IllegalArgumentException("Ambiguous field name: " + fieldName); + } + return index; + } + + void toString(StringBuilder b) { + if (code == Code.ARRAY || (proto != null && proto.hasArrayElementType())) { + if (code == Code.ARRAY) { + b.append("ARRAY<"); + } else { + // This is very unlikely to happen. It would mean that we have introduced a type that + // is not an ARRAY, but does have an array element type. + b.append("UNRECOGNIZED<"); + } + arrayElementType.toString(b); + b.append('>'); + } else if (code == Code.STRUCT) { + b.append("STRUCT<"); + for (int i = 0; i < structFields.size(); ++i) { + if (i > 0) { + b.append(", "); + } + StructField f = structFields.get(i); + b.append(f.getName()).append(' '); + f.getType().toString(b); + } + b.append('>'); + } else if (proto != null) { + b.append(proto.getCode().name()); + if (proto.getTypeAnnotation() != TYPE_ANNOTATION_CODE_UNSPECIFIED) { + b.append("<").append(proto.getTypeAnnotation().name()).append(">"); + } + } else { + b.append(code.toString()); + } + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + /** Returns the type name as used by the database in the given dialect. */ + public String getSpannerTypeName(Dialect dialect) { + switch (dialect) { + case POSTGRESQL: + return getTypeNamePostgreSQL(); + case GOOGLE_STANDARD_SQL: + default: + return getTypeNameGoogleSQL(); + } + } + + private String getTypeNameGoogleSQL() { + if (code == Code.ARRAY) { + return code.getGoogleSQLName() + "<" + arrayElementType.getTypeNameGoogleSQL() + ">"; + } + return code.getGoogleSQLName(); + } + + private String getTypeNamePostgreSQL() { + if (code == Code.ARRAY) { + return arrayElementType.getTypeNamePostgreSQL() + "[]"; + } + return code.getPostgreSQLName(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Type that = (Type) o; + if (proto != null) { + return Objects.equals(proto, that.proto); + } + return code == that.code + && Objects.equals(arrayElementType, that.arrayElementType) + && Objects.equals(structFields, that.structFields) + && Objects.equals(protoTypeFqn, that.protoTypeFqn); + } + + @Override + public int hashCode() { + if (proto != null) { + return proto.hashCode(); + } + return Objects.hash(code, arrayElementType, structFields); + } + + com.google.spanner.v1.Type toProto() { + if (proto != null) { + return proto; + } + com.google.spanner.v1.Type.Builder proto = com.google.spanner.v1.Type.newBuilder(); + proto.setCode(code.getTypeCode()); + proto.setTypeAnnotation(code.getTypeAnnotationCode()); + if (code == Code.ARRAY) { + proto.setArrayElementType(arrayElementType.toProto()); + } else if (code == Code.STRUCT) { + com.google.spanner.v1.StructType.Builder fields = proto.getStructTypeBuilder(); + for (StructField field : structFields) { + fields.addFieldsBuilder().setName(field.getName()).setType(field.getType().toProto()); + } + } else if (code == Code.PROTO || code == Code.ENUM) { + proto.setProtoTypeFqn(protoTypeFqn); + } + + return proto.build(); + } + + static Type fromProto(com.google.spanner.v1.Type proto) { + Code type = Code.fromProto(proto.getCode(), proto.getTypeAnnotation()); + switch (type) { + case BOOL: + return bool(); + case INT64: + return int64(); + case FLOAT32: + return float32(); + case FLOAT64: + return float64(); + case NUMERIC: + return numeric(); + case PG_NUMERIC: + return pgNumeric(); + case STRING: + return string(); + case JSON: + return json(); + case PG_JSONB: + return pgJsonb(); + case PG_OID: + return pgOid(); + case BYTES: + return bytes(); + case TIMESTAMP: + return timestamp(); + case DATE: + return date(); + case UUID: + return uuid(); + case INTERVAL: + return interval(); + case PROTO: + return proto(proto.getProtoTypeFqn()); + case ENUM: + return protoEnum(proto.getProtoTypeFqn()); + case ARRAY: + checkArgument( + proto.hasArrayElementType(), + "Missing expected 'array_element_type' field in 'Type' message: %s", + proto); + Type elementType; + try { + elementType = fromProto(proto.getArrayElementType()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "Could not parse 'array_element_type' attribute in 'Type' message: " + proto, e); + } + return array(elementType); + case STRUCT: + checkArgument( + proto.hasStructType(), + "Missing expected 'struct_type' field in 'Type' message: %s", + proto); + List fields = new ArrayList<>(proto.getStructType().getFieldsCount()); + for (com.google.spanner.v1.StructType.Field field : proto.getStructType().getFieldsList()) { + checkArgument(field.hasType(), "Missing expected 'type' attribute in 'Field': %s", proto); + // Names may be empty; for example, the name of the column returned by "SELECT 1". + String name = Strings.nullToEmpty(field.getName()); + fields.add(StructField.of(name, fromProto(field.getType()))); + } + return struct(fields); + case UNRECOGNIZED: + default: + return unrecognized(proto); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Value.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Value.java new file mode 100644 index 000000000000..b1ffc5ea3abc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Value.java @@ -0,0 +1,3421 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerTypeConverter.atUTC; +import static com.google.cloud.spanner.SpannerTypeConverter.convertLocalDateToSpannerDate; +import static com.google.cloud.spanner.SpannerTypeConverter.convertToISO8601; +import static com.google.cloud.spanner.SpannerTypeConverter.convertToTypedIterable; +import static com.google.cloud.spanner.SpannerTypeConverter.createUntypedArrayValue; +import static com.google.cloud.spanner.SpannerTypeConverter.createUntypedIterableValue; +import static com.google.cloud.spanner.SpannerTypeConverter.createUntypedStringValue; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.LazyByteArray; +import com.google.cloud.spanner.Type.Code; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.google.common.io.CharSource; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.EnumDescriptor; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.protobuf.Value.KindCase; +import java.io.IOException; +import java.io.Serializable; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.BitSet; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +/** + * Represents a value to be consumed by the Cloud Spanner API. A value can be {@code NULL} or + * non-{@code NULL}; regardless, values always have an associated type. + * + *

The {@code Value} API is optimized for construction, since this is the majority use-case when + * using this class with the Cloud Spanner libraries. The factory method signatures and internal + * representations are design to minimize memory usage and object creation while still maintaining + * the immutability contract of this class. In particular, arrays of primitive types can be + * constructed without requiring boxing into collections of wrapper types. The getters in this class + * are intended primarily for test purposes, and so do not share the same performance + * characteristics; in particular, getters for array types may be expensive. + * + *

{@code Value} instances are immutable. + */ +@Immutable +public abstract class Value implements Serializable { + + /** + * Placeholder value to be passed to a mutation to make Cloud Spanner store the commit timestamp + * in that column. The commit timestamp is the timestamp corresponding to when Cloud Spanner + * commits the transaction containing the mutation. + * + *

Note that this particular timestamp instance has no semantic meaning. In particular the + * value of seconds and nanoseconds in this timestamp are meaningless. This placeholder can only + * be used for columns that have set the option "(allow_commit_timestamp=true)" in the schema. + * + *

When reading the value stored in such a column, the value returned is an actual timestamp + * corresponding to the commit time of the transaction, which has no relation to this placeholder. + * + * @see + * Transaction Semantics + */ + public static final Timestamp COMMIT_TIMESTAMP = Timestamp.ofTimeMicroseconds(0L); + + static final com.google.protobuf.Value NULL_PROTO = + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(); + + /** Constant to specify a PG Numeric NaN value. */ + public static final String NAN = "NaN"; + + private static final int MAX_DEBUG_STRING_LENGTH = 36; + private static final String ELLIPSIS = "..."; + private static final String NULL_STRING = "NULL"; + private static final char LIST_SEPARATOR = ','; + private static final char LIST_OPEN = '['; + private static final char LIST_CLOSE = ']'; + private static final long serialVersionUID = -5289864325087675338L; + + /** + * Returns a {@link Value} that wraps the given proto value. This can be used to construct a value + * without a specific type, and let the backend infer the type based on the statement where it is + * used. + * + * @param value the non-null proto value (a {@link NullValue} is allowed) + */ + public static Value untyped(com.google.protobuf.Value value) { + return new ProtoBackedValueImpl(Preconditions.checkNotNull(value), null); + } + + /** Returns a generic Value backed by a protobuf value. This is used for unrecognized types. */ + static Value unrecognized(com.google.protobuf.Value value, Type type) { + Preconditions.checkArgument( + type.getCode() == Code.UNRECOGNIZED + || type.getCode() == Code.ARRAY + && type.getArrayElementType().getCode() == Code.UNRECOGNIZED); + return new ProtoBackedValueImpl(Preconditions.checkNotNull(value), type); + } + + /** + * Returns a {@code BOOL} value. + * + * @param v the value, which may be null + */ + public static Value bool(@Nullable Boolean v) { + return new BoolImpl(v == null, v == null ? false : v); + } + + /** Returns a {@code BOOL} value. */ + public static Value bool(boolean v) { + return new BoolImpl(false, v); + } + + /** + * Returns an {@code INT64} value. + * + * @param v the value, which may be null + */ + public static Value int64(@Nullable Long v) { + return new Int64Impl(v == null, v == null ? 0 : v); + } + + /** Returns an {@code INT64} value. */ + public static Value int64(long v) { + return new Int64Impl(false, v); + } + + /** + * Returns a {@code FLOAT32} value. + * + * @param v the value, which may be null + */ + public static Value float32(@Nullable Float v) { + return new Float32Impl(v == null, v == null ? 0 : v); + } + + /** Returns a {@code FLOAT32} value. */ + public static Value float32(float v) { + return new Float32Impl(false, v); + } + + /** + * Returns a {@code FLOAT64} value. + * + * @param v the value, which may be null + */ + public static Value float64(@Nullable Double v) { + return new Float64Impl(v == null, v == null ? 0 : v); + } + + /** Returns a {@code FLOAT64} value. */ + public static Value float64(double v) { + return new Float64Impl(false, v); + } + + /** + * Returns a {@code NUMERIC} value. The valid value range for the whole component of the {@link + * BigDecimal} is from -9,999,999,999,999,999,999,999,999 to +9,999,999,999,999,999,999,999,999 + * (both inclusive), i.e. the max length of the whole component is 29 digits. The max length of + * the fractional part is 9 digits. Trailing zeros in the fractional part are not considered and + * will be lost, as Cloud Spanner does not preserve the precision of a numeric value. + * + *

If you set a numeric value of a record to for example 0.10, Cloud Spanner will return this + * value as 0.1 in subsequent queries. Use {@link BigDecimal#stripTrailingZeros()} to compare + * inserted values with retrieved values if your application might insert numeric values with + * trailing zeros. + * + * @param v the value, which may be null + */ + public static Value numeric(@Nullable BigDecimal v) { + if (v != null) { + // Cloud Spanner does not preserve the precision, so 0.1 is considered equal to 0.10. + BigDecimal test = v.stripTrailingZeros(); + if (test.scale() > 9) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.OUT_OF_RANGE, + String.format( + "Max scale for a numeric is 9. The requested numeric has scale %d", test.scale())); + } + if (test.precision() - test.scale() > 29) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.OUT_OF_RANGE, + String.format( + "Max precision for the whole component of a numeric is 29. The requested numeric" + + " has a whole component with precision %d", + test.precision() - test.scale())); + } + } + return new NumericImpl(v == null, v); + } + + /** + * Returns a {@code PG NUMERIC} value. This value has flexible precision and scale which is + * specified in the Database DDL. This value also supports {@code NaNs}, which can be specified + * with {@code Value.pgNumeric(Value.NAN)} or simply as {@code Value.pgNumeric("NaN")}. + * + *

Note that this flavour of numeric is different than Spanner numerics ({@link + * Value#numeric(BigDecimal)}). It should be used only for handling numerics in the PostgreSQL + * dialect. + * + * @param v the value, which may be null + */ + public static Value pgNumeric(@Nullable String v) { + return new PgNumericImpl(v == null, v); + } + + /** + * Returns a {@code STRING} value. + * + * @param v the value, which may be null + */ + public static Value string(@Nullable String v) { + return new StringImpl(v == null, v); + } + + /** + * Returns a {@code JSON} value. + * + * @param v the value, which may be null + */ + public static Value json(@Nullable String v) { + return new JsonImpl(v == null, v); + } + + /** + * Returns a {@code INTERVAL} value. + * + * @param interval the value, which may be null + */ + public static Value interval(@Nullable Interval interval) { + return new IntervalImpl(interval == null, interval); + } + + /** + * Returns a {@code PG JSONB} value. + * + * @param v the value, which may be null + */ + public static Value pgJsonb(@Nullable String v) { + return new PgJsonbImpl(v == null, v); + } + + /** + * Returns an {@code PG_OID} value. + * + * @param v the value, which may be null + */ + public static Value pgOid(@Nullable Long v) { + return new PgOidImpl(v == null, v == null ? 0 : v); + } + + /** Returns an {@code PG_OID} value. */ + public static Value pgOid(long v) { + return new PgOidImpl(false, v); + } + + /** + * Return a {@code PROTO} value for not null proto messages. + * + * @param v Not null Proto message. + */ + public static Value protoMessage(AbstractMessage v) { + Preconditions.checkNotNull( + v, "Use protoMessage((ByteArray) null, MyProtoClass.getDescriptor()) for null values."); + return protoMessage( + ByteArray.copyFrom(v.toByteArray()), v.getDescriptorForType().getFullName()); + } + + /** + * Return a {@code PROTO} value + * + * @param v Serialized Proto Array, which may be null. + * @param protoTypeFqn Fully qualified name of proto representing the proto definition. Use static + * method from proto class {@code MyProtoClass.getDescriptor().getFullName()} + */ + public static Value protoMessage(@Nullable ByteArray v, String protoTypeFqn) { + return new ProtoMessageImpl(v == null, v, protoTypeFqn); + } + + /** + * Return a {@code PROTO} value + * + * @param v Serialized Proto Array, which may be null. + * @param descriptor Proto Type Descriptor, use static method from proto class {@code + * MyProtoClass.getDescriptor()}. + */ + public static Value protoMessage(@Nullable ByteArray v, Descriptor descriptor) { + Preconditions.checkNotNull(descriptor, "descriptor can't be null."); + return protoMessage(v, descriptor.getFullName()); + } + + /** + * Return a {@code ENUM} value for not null proto messages. + * + * @param v Proto Enum, which may be null. + */ + public static Value protoEnum(ProtocolMessageEnum v) { + Preconditions.checkNotNull( + v, "Use protoEnum((Long) null, MyProtoEnum.getDescriptor()) for null values."); + return protoEnum(v.getNumber(), v.getDescriptorForType().getFullName()); + } + + /** + * Return a {@code ENUM} value. + * + * @param v Enum non-primitive Integer constant. + * @param protoTypeFqn Fully qualified name of proto representing the enum definition. Use static + * method from proto class {@code MyProtoEnum.getDescriptor().getFullName()} + */ + public static Value protoEnum(@Nullable Long v, String protoTypeFqn) { + return new ProtoEnumImpl(v == null, v, protoTypeFqn); + } + + /** + * Return a {@code ENUM} value. + * + * @param v Enum non-primitive Integer constant. + * @param enumDescriptor Enum Type Descriptor. Use static method from proto class {@code * + * MyProtoEnum.getDescriptor()}. + */ + public static Value protoEnum(@Nullable Long v, EnumDescriptor enumDescriptor) { + Preconditions.checkNotNull(enumDescriptor, "descriptor can't be null."); + return protoEnum(v, enumDescriptor.getFullName()); + } + + /** + * Return a {@code ENUM} value. + * + * @param v Enum integer primitive constant. + * @param protoTypeFqn Fully qualified name of proto representing the enum definition. Use static + * method from proto class {@code MyProtoEnum.getDescriptor().getFullName()} + */ + public static Value protoEnum(long v, String protoTypeFqn) { + return new ProtoEnumImpl(false, v, protoTypeFqn); + } + + /** + * e Returns a {@code BYTES} value. Returns a {@code BYTES} value. + * + * @param v the value, which may be null + */ + public static Value bytes(@Nullable ByteArray v) { + return new LazyBytesImpl(v == null, v); + } + + /** + * Returns a {@code BYTES} value. + * + * @param base64String the value in Base64 encoding, which may be null. This value must be a valid + * base64 string. + */ + public static Value bytesFromBase64(@Nullable String base64String) { + return new LazyBytesImpl( + base64String == null, base64String == null ? null : new LazyByteArray(base64String)); + } + + static Value internalBytes(@Nullable LazyByteArray bytes) { + return new LazyBytesImpl(bytes == null, bytes); + } + + /** Returns a {@code TIMESTAMP} value. */ + public static Value timestamp(@Nullable Timestamp v) { + return new TimestampImpl(v == null, v == Value.COMMIT_TIMESTAMP, v); + } + + /** + * Returns a {@code DATE} value. The range [1678-01-01, 2262-01-01) is the legal interval for + * cloud spanner dates. A write to a date column is rejected if the value is outside of that + * interval. + */ + public static Value date(@Nullable Date v) { + return new DateImpl(v == null, v); + } + + public static Value uuid(@Nullable UUID v) { + return new UuidImpl(v == null, v); + } + + /** Returns a non-{@code NULL} {#code STRUCT} value. */ + public static Value struct(Struct v) { + Preconditions.checkNotNull(v, "Illegal call to create a NULL struct value."); + return new StructImpl(v); + } + + /** + * Returns a {@code STRUCT} value of {@code Type} type. + * + * @param type the type of the {@code STRUCT} value + * @param v the struct {@code STRUCT} value. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. If non-{@code null}, {@link Struct#getType()} must match + * type. + */ + public static Value struct(Type type, @Nullable Struct v) { + if (v == null) { + Preconditions.checkArgument( + type.getCode() == Code.STRUCT, + "Illegal call to create a NULL struct with a non-struct type."); + return new StructImpl(type); + } else { + Preconditions.checkArgument( + type.equals(v.getType()), "Mismatch between struct value and type."); + return new StructImpl(v); + } + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + */ + public static Value boolArray(@Nullable boolean[] v) { + return boolArray(v, 0, v == null ? 0 : v.length); + } + + /** + * Returns an {@code ARRAY} value that takes its elements from a region of an array. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + * @param pos the start position of {@code v} to copy values from. Ignored if {@code v} is {@code + * null}. + * @param length the number of values to copy from {@code v}. Ignored if {@code v} is {@code + * null}. + */ + public static Value boolArray(@Nullable boolean[] v, int pos, int length) { + return boolArrayFactory.create(v, pos, length); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value boolArray(@Nullable Iterable v) { + // TODO(user): Consider memory optimizing boolArray() to use BitSet instead of boolean[]. + return boolArrayFactory.create(v); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + */ + public static Value int64Array(@Nullable long[] v) { + return int64Array(v, 0, v == null ? 0 : v.length); + } + + /** + * Returns an {@code ARRAY} value that takes its elements from a region of an array. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + * @param pos the start position of {@code v} to copy values from. Ignored if {@code v} is {@code + * null}. + * @param length the number of values to copy from {@code v}. Ignored if {@code v} is {@code + * null}. + */ + public static Value int64Array(@Nullable long[] v, int pos, int length) { + return int64ArrayFactory.create(v, pos, length); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value int64Array(@Nullable Iterable v) { + return int64ArrayFactory.create(v); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + */ + public static Value float32Array(@Nullable float[] v) { + return float32Array(v, 0, v == null ? 0 : v.length); + } + + /** + * Returns an {@code ARRAY} value that takes its elements from a region of an array. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + * @param pos the start position of {@code v} to copy values from. Ignored if {@code v} is {@code + * null}. + * @param length the number of values to copy from {@code v}. Ignored if {@code v} is {@code + * null}. + */ + public static Value float32Array(@Nullable float[] v, int pos, int length) { + return float32ArrayFactory.create(v, pos, length); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value float32Array(@Nullable Iterable v) { + return float32ArrayFactory.create(v); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + */ + public static Value float64Array(@Nullable double[] v) { + return float64Array(v, 0, v == null ? 0 : v.length); + } + + /** + * Returns an {@code ARRAY} value that takes its elements from a region of an array. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + * @param pos the start position of {@code v} to copy values from. Ignored if {@code v} is {@code + * null}. + * @param length the number of values to copy from {@code v}. Ignored if {@code v} is {@code + * null}. + */ + public static Value float64Array(@Nullable double[] v, int pos, int length) { + return float64ArrayFactory.create(v, pos, length); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value float64Array(@Nullable Iterable v) { + return float64ArrayFactory.create(v); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value numericArray(@Nullable Iterable v) { + return new NumericArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. Individual + * elements may be {@code "NaN"} or {@link Value#NAN}. + */ + public static Value pgNumericArray(@Nullable Iterable v) { + return new PgNumericArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value stringArray(@Nullable Iterable v) { + return new StringArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value jsonArray(@Nullable Iterable v) { + return new JsonArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value pgJsonbArray(@Nullable Iterable v) { + return new PgJsonbArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + */ + public static Value pgOidArray(@Nullable long[] v) { + return pgOidArray(v, 0, v == null ? 0 : v.length); + } + + /** + * Returns an {@code ARRAY} value that takes its elements from a region of an array. + * + * @param v the source of element values, which may be null to produce a value for which {@code + * isNull()} is {@code true} + * @param pos the start position of {@code v} to copy values from. Ignored if {@code v} is {@code + * null}. + * @param length the number of values to copy from {@code v}. Ignored if {@code v} is {@code + * null}. + */ + public static Value pgOidArray(@Nullable long[] v, int pos, int length) { + return pgOidArrayFactory.create(v, pos, length); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value pgOidArray(@Nullable Iterable v) { + return pgOidArrayFactory.create(v); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + * @param descriptor Proto Type Descriptor, use static method from proto class {@code + * MyProtoClass.getDescriptor()}. + */ + public static Value protoMessageArray( + @Nullable Iterable v, Descriptor descriptor) { + if (v == null) { + return new ProtoMessageArrayImpl(true, null, descriptor.getFullName()); + } + + List serializedArray = new ArrayList<>(); + v.forEach( + (message) -> { + if (message != null) { + serializedArray.add(ByteArray.copyFrom(message.toByteArray())); + } else { + serializedArray.add(null); + } + }); + + return new ProtoMessageArrayImpl(false, serializedArray, descriptor.getFullName()); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + * @param protoTypeFqn Fully qualified name of proto representing the proto definition. Use static + * method from proto class {@code MyProtoClass.getDescriptor().getFullName()} + */ + public static Value protoMessageArray(@Nullable Iterable v, String protoTypeFqn) { + return new ProtoMessageArrayImpl( + v == null, v != null ? immutableCopyOf(v) : null, protoTypeFqn); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + * @param descriptor Proto Type Descriptor, use static method from proto class {@code + * MyProtoClass.getDescriptor()}. + */ + public static Value protoEnumArray( + @Nullable Iterable v, EnumDescriptor descriptor) { + if (v == null) { + return new ProtoEnumArrayImpl(true, null, descriptor.getFullName()); + } + + List enumConstValues = new ArrayList<>(); + v.forEach( + (protoEnum) -> { + if (protoEnum != null) { + enumConstValues.add((long) protoEnum.getNumber()); + } else { + enumConstValues.add(null); + } + }); + + return new ProtoEnumArrayImpl(false, enumConstValues, descriptor.getFullName()); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + * @param protoTypeFqn Fully qualified name of proto representing the enum definition. Use static + * method from proto class {@code MyProtoEnum.getDescriptor().getFullName()} + */ + public static Value protoEnumArray(@Nullable Iterable v, String protoTypeFqn) { + return new ProtoEnumArrayImpl(v == null, v != null ? immutableCopyOf(v) : null, protoTypeFqn); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value bytesArray(@Nullable Iterable v) { + return new LazyBytesArrayImpl(v == null, v == null ? null : byteArraysToLazyByteArrayList(v)); + } + + private static List byteArraysToLazyByteArrayList(Iterable byteArrays) { + List list = new ArrayList<>(); + for (ByteArray byteArray : byteArrays) { + list.add(byteArray == null ? null : new LazyByteArray(byteArray)); + } + return Collections.unmodifiableList(list); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param base64Strings the source of element values. This may be {@code null} to produce a value + * for which {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + * Non-null values must be a valid Base64 string. + */ + public static Value bytesArrayFromBase64(@Nullable Iterable base64Strings) { + return new LazyBytesArrayImpl( + base64Strings == null, + base64Strings == null ? null : base64StringsToLazyByteArrayList(base64Strings)); + } + + private static List base64StringsToLazyByteArrayList( + Iterable base64Strings) { + List list = new ArrayList<>(); + for (String base64 : base64Strings) { + list.add(base64 == null ? null : new LazyByteArray(base64)); + } + return Collections.unmodifiableList(list); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value timestampArray(@Nullable Iterable v) { + return new TimestampArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. The range [1678-01-01, 2262-01-01) is the legal interval + * for cloud spanner dates. A write to a date column is rejected if the value is outside of that + * interval. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value dateArray(@Nullable Iterable v) { + return new DateArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value uuidArray(@Nullable Iterable v) { + return new UuidArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY} value. + * + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value intervalArray(@Nullable Iterable v) { + return new IntervalArrayImpl(v == null, v == null ? null : immutableCopyOf(v)); + } + + /** + * Returns an {@code ARRAY>} value. + * + * @param elementType + * @param v the source of element values. This may be {@code null} to produce a value for which + * {@code isNull()} is {@code true}. Individual elements may also be {@code null}. + */ + public static Value structArray(Type elementType, @Nullable Iterable v) { + if (v == null) { + Preconditions.checkArgument( + elementType.getCode() == Code.STRUCT, + "Illegal call to create a NULL array-of-struct with a non-struct element type."); + return new StructArrayImpl(elementType, null); + } + List values = immutableCopyOf(v); + for (Struct value : values) { + if (value != null) { + Preconditions.checkArgument( + value.getType().equals(elementType), + "Members of v must have type %s (found %s)", + elementType, + value.getType()); + } + } + return new StructArrayImpl(elementType, values); + } + + private Value() {} + + static Value toValue(Object value) { + if (value == null) { + return Value.untyped(NULL_PROTO); + } + if (value instanceof Value) { + return (Value) value; + } + if (value instanceof Boolean) { + return Value.bool((Boolean) value); + } + if (value instanceof Long || value instanceof Integer) { + return createUntypedStringValue(String.valueOf(value)); + } + if (value instanceof Float) { + return Value.float32((Float) value); + } + if (value instanceof Double) { + return Value.float64((Double) value); + } + if (value instanceof BigDecimal) { + return Value.numeric((BigDecimal) value); + } + if (value instanceof ByteArray) { + return Value.bytes((ByteArray) value); + } + if (value instanceof byte[]) { + return Value.bytes(ByteArray.copyFrom((byte[]) value)); + } + if (value instanceof Date) { + return Value.date((Date) value); + } + if (value instanceof UUID) { + return Value.uuid((UUID) value); + } + if (value instanceof LocalDate) { + return Value.date(convertLocalDateToSpannerDate((LocalDate) value)); + } + if (value instanceof LocalDateTime) { + return createUntypedStringValue(convertToISO8601(atUTC((LocalDateTime) value))); + } + if (value instanceof OffsetDateTime) { + return createUntypedStringValue(convertToISO8601(atUTC((OffsetDateTime) value))); + } + if (value instanceof ZonedDateTime) { + return createUntypedStringValue(convertToISO8601(atUTC((ZonedDateTime) value))); + } + if (value instanceof ProtocolMessageEnum) { + return Value.protoEnum((ProtocolMessageEnum) value); + } + if (value instanceof AbstractMessage) { + return Value.protoMessage((AbstractMessage) value); + } + if (value instanceof Interval) { + return Value.interval((Interval) value); + } + if (value instanceof Struct) { + return Value.struct((Struct) value); + } + if (value instanceof Timestamp) { + return Value.timestamp((Timestamp) value); + } + if (value instanceof Iterable) { + Iterator iterator = ((Iterable) value).iterator(); + if (!iterator.hasNext()) { + return createUntypedArrayValue(Stream.empty()); + } + Object object = iterator.next(); + if (object instanceof Boolean) { + return Value.boolArray(convertToTypedIterable((Boolean) object, iterator)); + } + if (object instanceof Integer) { + return createUntypedIterableValue((Integer) object, iterator, String::valueOf); + } + if (object instanceof Long) { + return createUntypedIterableValue((Long) object, iterator, String::valueOf); + } + if (object instanceof Float) { + return Value.float32Array(convertToTypedIterable((Float) object, iterator)); + } + if (object instanceof Double) { + return Value.float64Array(convertToTypedIterable((Double) object, iterator)); + } + if (object instanceof BigDecimal) { + return Value.numericArray(convertToTypedIterable((BigDecimal) object, iterator)); + } + if (object instanceof ByteArray) { + return Value.bytesArray(convertToTypedIterable((ByteArray) object, iterator)); + } + if (object instanceof byte[]) { + return Value.bytesArray( + SpannerTypeConverter.convertToTypedIterable( + ByteArray::copyFrom, (byte[]) object, iterator)); + } + if (object instanceof Interval) { + return Value.intervalArray(convertToTypedIterable((Interval) object, iterator)); + } + if (object instanceof Timestamp) { + return Value.timestampArray(convertToTypedIterable((Timestamp) object, iterator)); + } + if (object instanceof Date) { + return Value.dateArray(convertToTypedIterable((Date) object, iterator)); + } + if (object instanceof UUID) { + return Value.uuidArray(convertToTypedIterable((UUID) object, iterator)); + } + if (object instanceof LocalDate) { + return Value.dateArray( + SpannerTypeConverter.convertToTypedIterable( + SpannerTypeConverter::convertLocalDateToSpannerDate, (LocalDate) object, iterator)); + } + if (object instanceof LocalDateTime) { + return createUntypedIterableValue( + (LocalDateTime) object, iterator, val -> convertToISO8601(atUTC(val))); + } + if (object instanceof OffsetDateTime) { + return createUntypedIterableValue( + (OffsetDateTime) object, iterator, val -> convertToISO8601(atUTC(val))); + } + if (object instanceof ZonedDateTime) { + return createUntypedIterableValue( + (ZonedDateTime) object, iterator, val -> convertToISO8601(atUTC(val))); + } + } + + // array and primitive array + if (value instanceof Boolean[]) { + return Value.boolArray(Arrays.asList((Boolean[]) value)); + } + if (value instanceof boolean[]) { + return Value.boolArray((boolean[]) value); + } + if (value instanceof Float[]) { + return Value.float32Array(Arrays.asList((Float[]) value)); + } + if (value instanceof float[]) { + return Value.float32Array((float[]) value); + } + if (value instanceof Double[]) { + return Value.float64Array(Arrays.asList((Double[]) value)); + } + if (value instanceof double[]) { + return Value.float64Array((double[]) value); + } + if (value instanceof Long[]) { + return createUntypedArrayValue(Arrays.stream((Long[]) value)); + } + if (value instanceof long[]) { + return createUntypedArrayValue(Arrays.stream((long[]) value).boxed()); + } + if (value instanceof Integer[]) { + return createUntypedArrayValue(Arrays.stream((Integer[]) value)); + } + if (value instanceof int[]) { + return createUntypedArrayValue(Arrays.stream((int[]) value).boxed()); + } + + return createUntypedStringValue(value); + } + + /** Returns the type of this value. This will return a type even if {@code isNull()} is true. */ + public abstract Type getType(); + + /** Returns {@code true} if this instance represents a {@code NULL} value. */ + public abstract boolean isNull(); + + /** + * Returns the value of a {@code BOOL}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract boolean getBool(); + + /** + * Returns the value of a {@code INT64}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract long getInt64(); + + /** + * Returns the value of a {@code FLOAT32}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract float getFloat32(); + + /** + * Returns the value of a {@code FLOAT64}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract double getFloat64(); + + /** + * Returns the value of a {@code NUMERIC}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract BigDecimal getNumeric(); + + /** + * Returns the value of a {@code STRING}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract String getString(); + + /** + * Returns the value of a {@code JSON}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public String getJson() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of a {@code JSONB}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public String getPgJsonb() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of a {@code PROTO}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public T getProtoMessage(T m) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of a {@code ENUM}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public T getProtoEnum( + Function method) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of a {@code BYTES}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract ByteArray getBytes(); + + /** + * Returns the value of a {@code TIMESTAMP}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type or + * {@link #isCommitTimestamp()}. + */ + public abstract Timestamp getTimestamp(); + + /** Returns true if this is a commit timestamp value. */ + public abstract boolean isCommitTimestamp(); + + /** + * Returns the value of a {@code DATE}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract Date getDate(); + + /** + * Returns the value of a {@code UUID}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract UUID getUuid(); + + /** + * Returns the value of a {@code INTERVAL}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract Interval getInterval(); + + /** + * Returns the value of a {@code STRUCT}-typed instance. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract Struct getStruct(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself will + * never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getBoolArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getInt64Array(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getFloat32Array(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getFloat64Array(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getNumericArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getStringArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself will + * never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public List getJsonArray() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public List getPgJsonbArray() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public List getProtoMessageArray(T m) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself will + * never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public List getProtoEnumArray( + Function method) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getBytesArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getTimestampArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself will + * never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getDateArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself will + * never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getUuidArray(); + + /** + * Returns the value of an {@code ARRAY}-typed instance. While the returned list itself + * will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getIntervalArray(); + + /** + * Returns the value of an {@code ARRAY>}-typed instance. While the returned list + * itself will never be {@code null}, elements of that list may be null. + * + * @throws IllegalStateException if {@code isNull()} or the value is not of the expected type + */ + public abstract List getStructArray(); + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toString(b); + return b.toString(); + } + + /** + * Returns this value as a raw string representation. This is guaranteed to work for all values, + * regardless of the underlying data type, and is guaranteed not to be truncated. + * + *

Returns the string "NULL" for null values. + */ + @Nonnull + public String getAsString() { + return toString(); + } + + /** + * Returns this value as a list of raw string representations. This is guaranteed to work for all + * values, regardless of the underlying data type, and the strings are guaranteed not to be + * truncated. The method returns a singleton list for non-array values and a list containing as + * many elements as there are in the array for array values. This method can be used instead of + * the {@link #getAsString()} method if you need to quote the individual elements in an array. + * + *

Returns the string "NULL" for null values. + */ + @Nonnull + public ImmutableList getAsStringList() { + return ImmutableList.of(toString()); + } + + // END OF PUBLIC API. + + static com.google.protobuf.Value toProto(Value value) { + return value == null ? NULL_PROTO : value.toProto(); + } + + /** + * Appends a string representation of this value to the given builder. The string representation + * can be truncated. + */ + abstract void toString(StringBuilder b); + + abstract com.google.protobuf.Value toProto(); + + private static List immutableCopyOf(Iterable v) { + ArrayList copy = new ArrayList<>(); + Iterables.addAll(copy, v); + return Collections.unmodifiableList(copy); + } + + private abstract static class PrimitiveArrayValueFactory { + Value create(A v, int pos, int length) { + if (v == null) { + return newValue(true, null, null); + } + A copy = newArray(length); + System.arraycopy(v, pos, copy, 0, length); + return newValue(false, null, copy); + } + + Value create(@Nullable Iterable v) { + if (v == null) { + return newValue(true, null, null); + } + Collection values; + if (v instanceof Collection) { + values = (Collection) v; + } else { + // The wrapper objects already exist (or would be created in our iterator below), and we + // need to do the same amount of array buffering to come up with a backing array of the + // correct size; it is as CPU-efficient to simply copy into an ArrayList (except in the rare + // case where the underlying ArrayList buffer is exactly the size of the data - in that + // case, we incur an additional buffer copy over managing the array ourselves). Note that + // this simpler strategy does use more memory for booleans though as Boolean[] uses more + // memory than a boolean[] of the same size. + values = Lists.newArrayList(v); + } + BitSet nulls = null; + A arr = newArray(values.size()); + int i = 0; + for (T element : values) { + if (element == null) { + if (nulls == null) { + nulls = new BitSet(); + } + nulls.set(i); + } else { + set(arr, i, element); + } + ++i; + } + return newValue(false, nulls, arr); + } + + abstract A newArray(int size); + + abstract void set(A arr, int i, T value); + + abstract Value newValue(boolean isNull, BitSet nulls, A values); + } + + private static final PrimitiveArrayValueFactory int64ArrayFactory = + new PrimitiveArrayValueFactory() { + @Override + long[] newArray(int size) { + return new long[size]; + } + + @Override + void set(long[] arr, int i, Long value) { + arr[i] = value; + } + + @Override + Value newValue(boolean isNull, BitSet nulls, long[] values) { + return new Int64ArrayImpl(isNull, nulls, values); + } + }; + + private static final PrimitiveArrayValueFactory pgOidArrayFactory = + new PrimitiveArrayValueFactory() { + @Override + long[] newArray(int size) { + return new long[size]; + } + + @Override + void set(long[] arr, int i, Long value) { + arr[i] = value; + } + + @Override + Value newValue(boolean isNull, BitSet nulls, long[] values) { + return new PgOidArrayImpl(isNull, nulls, values); + } + }; + + private static final PrimitiveArrayValueFactory float32ArrayFactory = + new PrimitiveArrayValueFactory() { + @Override + float[] newArray(int size) { + return new float[size]; + } + + @Override + void set(float[] arr, int i, Float value) { + arr[i] = value; + } + + @Override + Value newValue(boolean isNull, BitSet nulls, float[] values) { + return new Float32ArrayImpl(isNull, nulls, values); + } + }; + private static final PrimitiveArrayValueFactory float64ArrayFactory = + new PrimitiveArrayValueFactory() { + @Override + double[] newArray(int size) { + return new double[size]; + } + + @Override + void set(double[] arr, int i, Double value) { + arr[i] = value; + } + + @Override + Value newValue(boolean isNull, BitSet nulls, double[] values) { + return new Float64ArrayImpl(isNull, nulls, values); + } + }; + private static final PrimitiveArrayValueFactory boolArrayFactory = + new PrimitiveArrayValueFactory() { + @Override + boolean[] newArray(int size) { + return new boolean[size]; + } + + @Override + void set(boolean[] arr, int i, Boolean value) { + arr[i] = value; + } + + @Override + Value newValue(boolean isNull, BitSet nulls, boolean[] values) { + return new BoolArrayImpl(isNull, nulls, values); + } + }; + + /** Template class for {@code Value} implementations. */ + private abstract static class AbstractValue extends Value { + private final boolean isNull; + private final Type type; + + private AbstractValue(boolean isNull, Type type) { + this.isNull = isNull; + this.type = type; + } + + @Override + public Type getType() { + return type; + } + + @Override + public final boolean isNull() { + return isNull; + } + + @Override + public boolean isCommitTimestamp() { + return false; + } + + @Override + public boolean getBool() { + throw defaultGetter(Type.bool()); + } + + @Override + public long getInt64() { + throw defaultGetter(Type.int64()); + } + + @Override + public float getFloat32() { + throw defaultGetter(Type.float32()); + } + + @Override + public double getFloat64() { + throw defaultGetter(Type.float64()); + } + + @Override + public BigDecimal getNumeric() { + throw defaultGetter(Type.numeric()); + } + + @Override + public String getString() { + throw defaultGetter(Type.string()); + } + + @Override + public String getJson() { + throw defaultGetter(Type.json()); + } + + @Override + public String getPgJsonb() { + throw defaultGetter(Type.pgJsonb()); + } + + @Override + public ByteArray getBytes() { + throw defaultGetter(Type.bytes()); + } + + @Override + public Timestamp getTimestamp() { + throw defaultGetter(Type.timestamp()); + } + + @Override + public Date getDate() { + throw defaultGetter(Type.date()); + } + + @Override + public UUID getUuid() { + throw defaultGetter(Type.uuid()); + } + + @Override + public Interval getInterval() { + throw defaultGetter(Type.interval()); + } + + @Override + public Struct getStruct() { + if (getType().getCode() != Type.Code.STRUCT) { + throw new IllegalStateException( + "Illegal call to getter of incorrect type. Expected: STRUCT<...> actual: " + getType()); + } + throw new AssertionError("Should have been overridden"); + } + + @Override + public List getBoolArray() { + throw defaultGetter(Type.array(Type.bool())); + } + + @Override + public List getInt64Array() { + throw defaultGetter(Type.array(Type.int64())); + } + + @Override + public List getFloat32Array() { + throw defaultGetter(Type.array(Type.float32())); + } + + @Override + public List getFloat64Array() { + throw defaultGetter(Type.array(Type.float64())); + } + + @Override + public List getNumericArray() { + throw defaultGetter(Type.array(Type.numeric())); + } + + @Override + public List getStringArray() { + throw defaultGetter(Type.array(Type.string())); + } + + @Override + public List getJsonArray() { + throw defaultGetter(Type.array(Type.json())); + } + + @Override + public List getPgJsonbArray() { + throw defaultGetter(Type.array(Type.pgJsonb())); + } + + @Override + public List getBytesArray() { + throw defaultGetter(Type.array(Type.bytes())); + } + + @Override + public List getTimestampArray() { + throw defaultGetter(Type.array(Type.timestamp())); + } + + @Override + public List getDateArray() { + throw defaultGetter(Type.array(Type.date())); + } + + @Override + public List getUuidArray() { + throw defaultGetter(Type.array(Type.uuid())); + } + + @Override + public List getIntervalArray() { + throw defaultGetter(Type.array(Type.interval())); + } + + @Override + public List getStructArray() { + if (getType().getCode() != Type.Code.ARRAY + || getType().getArrayElementType().getCode() != Type.Code.STRUCT) { + throw new IllegalStateException( + "Illegal call to getter of incorrect type. Expected: ARRAY> actual: " + + getType()); + } + throw new AssertionError("Should have been overridden"); + } + + @Override + final void toString(StringBuilder b) { + // TODO(user): Truncate long arrays. + if (isNull()) { + b.append(NULL_STRING); + } else { + valueToString(b); + } + } + + /** + * Appends a representation of {@code this} to {@code b}. {@code this} is guaranteed to + * represent a non-null value. This value could be truncated if the underlying value is long. + */ + abstract void valueToString(StringBuilder b); + + @Override + final com.google.protobuf.Value toProto() { + return isNull() ? NULL_PROTO : valueToProto(); + } + + /** + * Returns a proto representation of this value. {@code this} is guaranteed to represent a + * non-null value. + */ + abstract com.google.protobuf.Value valueToProto(); + + @Override + public final boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AbstractValue that = (AbstractValue) o; + if (!Objects.equals(getType(), that.getType()) || isNull != that.isNull) { + return false; + } + + return isNull || valueEquals(that); + } + + /** + * Returns true if {@code v} has the same value as {@code this}. {@code v} is guaranteed to have + * the same Java class as {@code this}, and both {@code this} and {@code v} are guaranteed to + * represent a non-null values. + */ + abstract boolean valueEquals(Value v); + + @Override + public final int hashCode() { + Type typeToHash = getType(); + int valueHash = isNull ? 0 : valueHash(); + + /** + * We are relaxing equality values here, making sure that Double.NaNs and Float.NaNs are equal + * to each other. This is because our Cloud Spanner Import / Export template in Apache Beam + * uses the mutation equality to check for modifications before committing. We noticed that + * when NaNs where used the template would always indicate a modification was present, when it + * turned out not to be the case. + * + *

With FLOAT32 being introduced, we want to ensure the backward compatibility of the NaN + * equality checks that existed for FLOAT64. We're promoting the type to FLOAT64 while + * calculating the type hash when the value is a NaN. We're doing a similar type promotion + * while calculating valueHash of Float32 type. Note that this is not applicable for composite + * types containing FLOAT32. + */ + if (type != null + && type.getCode() == Type.Code.FLOAT32 + && !isNull + && Float.isNaN(getFloat32())) { + typeToHash = Type.float64(); + } + + int result = Objects.hash(typeToHash, isNull); + if (!isNull) { + result = 31 * result + valueHash; + } + return result; + } + + /** + * Returns a hash code for the underlying value. {@code this} is guaranteed to represent a + * non-null value. + */ + abstract int valueHash(); + + private AssertionError defaultGetter(Type expectedType) { + checkType(expectedType); + throw new AssertionError("Should have been overridden"); + } + + final void checkType(Type expected) { + if (!getType().equals(expected)) { + throw new IllegalStateException( + "Illegal call to getter of incorrect type. Expected: " + + expected + + " actual: " + + getType()); + } + } + + final void checkNotNull() { + Preconditions.checkState(!isNull(), "Illegal call to getter of null value."); + } + } + + /** + * This {@link Value} implementation is backed by a generic protobuf Value instance. It is used + * for untyped Values that are created by users, and for values with an unrecognized types that + * coming from the backend. + */ + private static class ProtoBackedValueImpl extends AbstractValue { + private final com.google.protobuf.Value value; + + private ProtoBackedValueImpl(com.google.protobuf.Value value, @Nullable Type type) { + super(value.hasNullValue(), type); + this.value = value; + } + + @Override + public boolean getBool() { + checkNotNull(); + Preconditions.checkState(value.hasBoolValue(), "This value does not contain a bool value"); + return value.getBoolValue(); + } + + @Override + public String getString() { + checkNotNull(); + Preconditions.checkState( + value.hasStringValue(), "This value does not contain a string value"); + return value.getStringValue(); + } + + @Override + public double getFloat64() { + checkNotNull(); + Preconditions.checkState( + value.hasNumberValue(), "This value does not contain a number value"); + return value.getNumberValue(); + } + + @Nonnull + @Override + public String getAsString() { + switch (value.getKindCase()) { + case NULL_VALUE: + return NULL_STRING; + case NUMBER_VALUE: + return Double.toString(value.getNumberValue()); + case STRING_VALUE: + return value.getStringValue(); + case BOOL_VALUE: + return Boolean.toString(value.getBoolValue()); + case LIST_VALUE: + return value.getListValue().getValuesList().stream() + .map(element -> Value.untyped(element).getAsString()) + .collect(Collectors.joining(",", "[", "]")); + case STRUCT_VALUE: + throw new IllegalArgumentException( + "Struct value with unrecognized type is not supported"); + case KIND_NOT_SET: + default: + throw new IllegalArgumentException("Kind of value is not set or unknown"); + } + } + + @Nonnull + @Override + public ImmutableList getAsStringList() { + if (value.getKindCase() == KindCase.LIST_VALUE) { + ImmutableList.Builder builder = ImmutableList.builder(); + value.getListValue().getValuesList().stream() + .map(v -> Value.untyped(v).getAsString()) + .forEach(builder::add); + return builder.build(); + } + return ImmutableList.of(getAsString()); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + com.google.protobuf.Value valueToProto() { + return value; + } + + @Override + boolean valueEquals(Value v) { + return ((ProtoBackedValueImpl) v).value.equals(value); + } + + @Override + int valueHash() { + return value.hashCode(); + } + } + + private static class BoolImpl extends AbstractValue { + private final boolean value; + + private BoolImpl(boolean isNull, boolean value) { + super(isNull, Type.bool()); + this.value = value; + } + + @Override + public boolean getBool() { + checkNotNull(); + return value; + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setBoolValue(value).build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + boolean valueEquals(Value v) { + return ((BoolImpl) v).value == value; + } + + @Override + int valueHash() { + return Boolean.valueOf(value).hashCode(); + } + } + + private static class Int64Impl extends AbstractValue { + private final long value; + + private Int64Impl(boolean isNull, long value) { + super(isNull, Type.int64()); + this.value = value; + } + + @Override + public long getInt64() { + checkNotNull(); + return value; + } + + @Override + public T getProtoEnum( + Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + checkNotNull(); + return (T) method.apply((int) value); + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(Long.toString(value)).build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + boolean valueEquals(Value v) { + return ((Int64Impl) v).value == value; + } + + @Override + int valueHash() { + return Long.valueOf(value).hashCode(); + } + } + + private static class Float32Impl extends AbstractValue { + private final float value; + + private Float32Impl(boolean isNull, float value) { + super(isNull, Type.float32()); + this.value = value; + } + + @Override + public float getFloat32() { + checkNotNull(); + return value; + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setNumberValue(value).build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + boolean valueEquals(Value v) { + // NaN == NaN always returns false, so we need a custom check. + if (Float.isNaN(this.value)) { + return Float.isNaN(((Float32Impl) v).value); + } + return ((Float32Impl) v).value == value; + } + + @Override + int valueHash() { + // For backward compatibility of NaN equality checks with Float64 NaNs. + // Refer the comment in `Value.hashCode()` for more details. + if (!isNull() && Float.isNaN(value)) { + return Double.valueOf(Double.NaN).hashCode(); + } + return Float.valueOf(value).hashCode(); + } + } + + private static class Float64Impl extends AbstractValue { + private final double value; + + private Float64Impl(boolean isNull, double value) { + super(isNull, Type.float64()); + this.value = value; + } + + @Override + public double getFloat64() { + checkNotNull(); + return value; + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setNumberValue(value).build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + boolean valueEquals(Value v) { + // NaN == NaN always returns false, so we need a custom check. + if (Double.isNaN(this.value)) { + return Double.isNaN(((Float64Impl) v).value); + } + return ((Float64Impl) v).value == value; + } + + @Override + int valueHash() { + return Double.valueOf(value).hashCode(); + } + } + + abstract static class AbstractObjectValue extends AbstractValue { + final T value; + + private AbstractObjectValue(boolean isNull, Type type, T value) { + super(isNull, type); + this.value = value; + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(value.toString()).build(); + } + + @SuppressWarnings("unchecked") + @Override + boolean valueEquals(Value v) { + return ((AbstractObjectValue) v).value.equals(value); + } + + @Override + int valueHash() { + return value.hashCode(); + } + } + + private static class DateImpl extends AbstractObjectValue { + + private DateImpl(boolean isNull, Date value) { + super(isNull, Type.date(), value); + } + + @Override + public Date getDate() { + checkNotNull(); + return value; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + } + + private static class UuidImpl extends AbstractObjectValue { + + private UuidImpl(boolean isNull, UUID value) { + super(isNull, Type.uuid(), value); + } + + @Override + public UUID getUuid() { + checkNotNull(); + return value; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + } + + private static class IntervalImpl extends AbstractObjectValue { + + private IntervalImpl(boolean isNull, Interval value) { + super(isNull, Type.interval(), value); + } + + @Override + public Interval getInterval() { + checkNotNull(); + return value; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value.toISO8601()); + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(value.toISO8601()).build(); + } + + @Nonnull + @Override + public String getAsString() { + return isNull() ? NULL_STRING : value.toISO8601(); + } + } + + private static class StringImpl extends AbstractObjectValue { + + private StringImpl(boolean isNull, @Nullable String value) { + super(isNull, Type.string(), value); + } + + @Override + public String getString() { + checkNotNull(); + return value; + } + + @Nonnull + @Override + public String getAsString() { + return isNull() ? NULL_STRING : value; + } + + @Override + void valueToString(StringBuilder b) { + if (value.length() > MAX_DEBUG_STRING_LENGTH) { + b.append(value, 0, MAX_DEBUG_STRING_LENGTH - ELLIPSIS.length()).append(ELLIPSIS); + } else { + b.append(value); + } + } + } + + private static class JsonImpl extends AbstractObjectValue { + + private JsonImpl(boolean isNull, @Nullable String value) { + super(isNull, Type.json(), value); + } + + @Override + public String getJson() { + checkNotNull(); + return value; + } + + @Nonnull + @Override + public String getAsString() { + return isNull() ? NULL_STRING : value; + } + + @Override + public String getString() { + return getJson(); + } + + @Override + void valueToString(StringBuilder b) { + if (value.length() > MAX_DEBUG_STRING_LENGTH) { + b.append(value, 0, MAX_DEBUG_STRING_LENGTH - ELLIPSIS.length()).append(ELLIPSIS); + } else { + b.append(value); + } + } + } + + private static class PgJsonbImpl extends AbstractObjectValue { + + private PgJsonbImpl(boolean isNull, @Nullable String value) { + super(isNull, Type.pgJsonb(), value); + } + + @Override + public String getPgJsonb() { + checkNotNull(); + return value; + } + + @Nonnull + @Override + public String getAsString() { + return isNull() ? NULL_STRING : value; + } + + @Override + public String getString() { + return getPgJsonb(); + } + + @Override + void valueToString(StringBuilder b) { + if (value.length() > MAX_DEBUG_STRING_LENGTH) { + b.append(value, 0, MAX_DEBUG_STRING_LENGTH - ELLIPSIS.length()).append(ELLIPSIS); + } else { + b.append(value); + } + } + } + + private static class PgOidImpl extends AbstractValue { + private final long value; + + private PgOidImpl(boolean isNull, long value) { + super(isNull, Type.pgOid()); + this.value = value; + } + + @Override + public long getInt64() { + checkNotNull(); + return value; + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(Long.toString(value)).build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + boolean valueEquals(Value v) { + return ((PgOidImpl) v).value == value; + } + + @Override + int valueHash() { + return Long.valueOf(value).hashCode(); + } + } + + private static class LazyBytesImpl extends AbstractObjectValue { + + private LazyBytesImpl(boolean isNull, LazyByteArray value) { + super(isNull, Type.bytes(), value); + } + + private LazyBytesImpl(boolean isNull, ByteArray value) { + super(isNull, Type.bytes(), value == null ? null : new LazyByteArray(value)); + } + + @Override + public ByteArray getBytes() { + checkNotNull(); + return value.getByteArray(); + } + + @Override + public T getProtoMessage(T m) { + Preconditions.checkNotNull( + m, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + checkNotNull(); + try { + return (T) + m.toBuilder() + .mergeFrom( + Base64.getDecoder() + .wrap( + CharSource.wrap(value.getBase64String()) + .asByteSource(StandardCharsets.UTF_8) + .openStream())) + .build(); + } catch (IOException ioException) { + throw SpannerExceptionFactory.asSpannerException(ioException); + } + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(value.getBase64String()).build(); + } + + @Nonnull + @Override + public String getAsString() { + return value == null ? NULL_STRING : value.getBase64String(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value == null ? null : value.toString()); + } + } + + private static class ProtoMessageImpl extends AbstractObjectValue { + + private ProtoMessageImpl(boolean isNull, ByteArray serializedProtoArray, String protoTypeFqn) { + super(isNull, Type.proto(protoTypeFqn), serializedProtoArray); + } + + @Override + public ByteArray getBytes() { + checkNotNull(); + return value; + } + + @Override + public T getProtoMessage(T m) { + Preconditions.checkNotNull( + m, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + checkNotNull(); + try { + return (T) m.toBuilder().mergeFrom(value.toByteArray()).build(); + } catch (InvalidProtocolBufferException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + @Override + com.google.protobuf.Value valueToProto() { + String base64EncodedString = value.toBase64(); + return com.google.protobuf.Value.newBuilder().setStringValue(base64EncodedString).build(); + } + + @Nonnull + @Override + public String getAsString() { + return value == null ? NULL_STRING : value.toBase64(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value.toBase64()); + } + } + + private static class ProtoEnumImpl extends AbstractObjectValue { + + private ProtoEnumImpl(boolean isNull, Long enumValue, String protoTypeFqn) { + super(isNull, Type.protoEnum(protoTypeFqn), enumValue); + } + + @Override + public long getInt64() { + checkNotNull(); + return value; + } + + @Override + public T getProtoEnum( + Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + checkNotNull(); + return (T) method.apply(value.intValue()); + } + + @Override + void valueToString(StringBuilder b) { + b.append(value.toString()); + } + + @Override + com.google.protobuf.Value valueToProto() { + return com.google.protobuf.Value.newBuilder().setStringValue(Long.toString(value)).build(); + } + } + + private static class TimestampImpl extends AbstractObjectValue { + + private static final String COMMIT_TIMESTAMP_STRING = "spanner.commit_timestamp()"; + private final boolean isCommitTimestamp; + + private TimestampImpl(boolean isNull, boolean isCommitTimestamp, Timestamp value) { + super(isNull, Type.timestamp(), value); + this.isCommitTimestamp = isCommitTimestamp; + } + + @Override + public Timestamp getTimestamp() { + checkNotNull(); + Preconditions.checkState(!isCommitTimestamp, "Commit timestamp value"); + return value; + } + + @Override + public boolean isCommitTimestamp() { + return isCommitTimestamp; + } + + @Override + com.google.protobuf.Value valueToProto() { + if (isCommitTimestamp) { + return com.google.protobuf.Value.newBuilder() + .setStringValue(COMMIT_TIMESTAMP_STRING) + .build(); + } + return super.valueToProto(); + } + + @Override + void valueToString(StringBuilder b) { + if (isCommitTimestamp()) { + b.append(COMMIT_TIMESTAMP_STRING); + } else { + b.append(value); + } + } + + @Override + boolean valueEquals(Value v) { + if (isCommitTimestamp) { + return v.isCommitTimestamp(); + } + if (v.isCommitTimestamp()) { + return isCommitTimestamp; + } + return ((TimestampImpl) v).value.equals(value); + } + + @Override + int valueHash() { + if (isCommitTimestamp) { + return Objects.hashCode(isCommitTimestamp); + } + return value.hashCode(); + } + } + + private static class NumericImpl extends AbstractObjectValue { + + private NumericImpl(boolean isNull, BigDecimal value) { + super(isNull, Type.numeric(), value); + } + + @Override + public BigDecimal getNumeric() { + checkNotNull(); + return value; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + } + + private static class PgNumericImpl extends AbstractObjectValue { + private BigDecimal valueAsBigDecimal; + private NumberFormatException bigDecimalConversionError; + private Double valueAsDouble; + private NumberFormatException doubleConversionError; + + private PgNumericImpl(boolean isNull, String value) { + super(isNull, Type.pgNumeric(), value); + } + + @Override + public String getString() { + checkNotNull(); + return value; + } + + @Override + public BigDecimal getNumeric() { + checkNotNull(); + if (bigDecimalConversionError != null) { + throw bigDecimalConversionError; + } + if (valueAsBigDecimal == null) { + try { + valueAsBigDecimal = new BigDecimal(value); + } catch (NumberFormatException e) { + bigDecimalConversionError = e; + throw e; + } + } + return valueAsBigDecimal; + } + + @Override + public double getFloat64() { + checkNotNull(); + if (doubleConversionError != null) { + throw doubleConversionError; + } + if (valueAsDouble == null) { + try { + valueAsDouble = Double.parseDouble(value); + } catch (NumberFormatException e) { + doubleConversionError = e; + throw e; + } + } + return valueAsDouble; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + } + + private abstract static class PrimitiveArrayImpl extends AbstractValue { + private final BitSet nulls; + + private PrimitiveArrayImpl(boolean isNull, Type elementType, BitSet nulls) { + super(isNull, Type.array(elementType)); + this.nulls = nulls; + } + + boolean isElementNull(int i) { + return nulls != null && nulls.get(i); + } + + List getArray() { + checkNotNull(); + List r = new ArrayList<>(size()); + for (int i = 0; i < size(); ++i) { + r.add(isElementNull(i) ? null : getValue(i)); + } + return r; + } + + abstract int size(); + + abstract T getValue(int i); + + abstract com.google.protobuf.Value getValueAsProto(int i); + + @Nonnull + @Override + public ImmutableList getAsStringList() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (int i = 0; i < size(); i++) { + builder.add(isElementNull(i) ? NULL_STRING : String.valueOf(getValue(i))); + } + return builder.build(); + } + + @Override + void valueToString(StringBuilder b) { + b.append(LIST_OPEN); + for (int i = 0; i < size(); ++i) { + if (i > 0) { + b.append(LIST_SEPARATOR); + } + if (nulls != null && nulls.get(i)) { + b.append(NULL_STRING); + } else { + b.append(getValue(i)); + } + } + b.append(LIST_CLOSE); + } + + @Override + int valueHash() { + return 31 * Objects.hashCode(nulls) + arrayHash(); + } + + abstract int arrayHash(); + + @Override + com.google.protobuf.Value valueToProto() { + ListValue.Builder list = ListValue.newBuilder(); + for (int i = 0; i < size(); ++i) { + if (isElementNull(i)) { + list.addValues(NULL_PROTO); + } else { + list.addValues(getValueAsProto(i)); + } + } + return com.google.protobuf.Value.newBuilder().setListValue(list).build(); + } + } + + private static class BoolArrayImpl extends PrimitiveArrayImpl { + private final boolean[] values; + + private BoolArrayImpl(boolean isNull, BitSet nulls, boolean[] values) { + super(isNull, Type.bool(), nulls); + this.values = values; + } + + @Override + public List getBoolArray() { + return getArray(); + } + + @Override + boolean valueEquals(Value v) { + BoolArrayImpl that = (BoolArrayImpl) v; + return Arrays.equals(values, that.values); + } + + @Override + int size() { + return values.length; + } + + @Override + Boolean getValue(int i) { + return values[i]; + } + + @Override + com.google.protobuf.Value getValueAsProto(int i) { + return com.google.protobuf.Value.newBuilder().setBoolValue(values[i]).build(); + } + + @Override + int arrayHash() { + return Arrays.hashCode(values); + } + } + + private static class Int64ArrayImpl extends PrimitiveArrayImpl { + private final long[] values; + + private Int64ArrayImpl(boolean isNull, BitSet nulls, long[] values) { + super(isNull, Type.int64(), nulls); + this.values = values; + } + + @Override + public List getInt64Array() { + return getArray(); + } + + @Override + public List getProtoEnumArray( + Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + checkNotNull(); + + List protoEnumList = new ArrayList<>(); + for (Long enumIntValue : values) { + if (enumIntValue == null) { + protoEnumList.add(null); + } else { + protoEnumList.add((T) method.apply(enumIntValue.intValue())); + } + } + return protoEnumList; + } + + @Override + boolean valueEquals(Value v) { + Int64ArrayImpl that = (Int64ArrayImpl) v; + return Arrays.equals(values, that.values); + } + + @Override + int size() { + return values.length; + } + + @Override + Long getValue(int i) { + return values[i]; + } + + @Override + com.google.protobuf.Value getValueAsProto(int i) { + return com.google.protobuf.Value.newBuilder() + .setStringValue(Long.toString(values[i])) + .build(); + } + + @Override + int arrayHash() { + return Arrays.hashCode(values); + } + } + + private static class Float32ArrayImpl extends PrimitiveArrayImpl { + private final float[] values; + + private Float32ArrayImpl(boolean isNull, BitSet nulls, float[] values) { + super(isNull, Type.float32(), nulls); + this.values = values; + } + + @Override + public List getFloat32Array() { + return getArray(); + } + + @Override + boolean valueEquals(Value v) { + Float32ArrayImpl that = (Float32ArrayImpl) v; + return Arrays.equals(values, that.values); + } + + @Override + int size() { + return values.length; + } + + @Override + Float getValue(int i) { + return values[i]; + } + + @Override + com.google.protobuf.Value getValueAsProto(int i) { + return com.google.protobuf.Value.newBuilder().setNumberValue(values[i]).build(); + } + + @Override + int arrayHash() { + return Arrays.hashCode(values); + } + } + + private static class Float64ArrayImpl extends PrimitiveArrayImpl { + private final double[] values; + + private Float64ArrayImpl(boolean isNull, BitSet nulls, double[] values) { + super(isNull, Type.float64(), nulls); + this.values = values; + } + + @Override + public List getFloat64Array() { + return getArray(); + } + + @Override + boolean valueEquals(Value v) { + Float64ArrayImpl that = (Float64ArrayImpl) v; + return Arrays.equals(values, that.values); + } + + @Override + int size() { + return values.length; + } + + @Override + Double getValue(int i) { + return values[i]; + } + + @Override + com.google.protobuf.Value getValueAsProto(int i) { + return com.google.protobuf.Value.newBuilder().setNumberValue(values[i]).build(); + } + + @Override + int arrayHash() { + return Arrays.hashCode(values); + } + } + + abstract static class AbstractArrayValue extends AbstractObjectValue> { + + private AbstractArrayValue(boolean isNull, Type elementType, @Nullable List values) { + super(isNull, Type.array(elementType), values); + } + + @Override + com.google.protobuf.Value valueToProto() { + ListValue.Builder list = ListValue.newBuilder(); + for (T element : value) { + if (element == null) { + list.addValues(NULL_PROTO); + } else { + list.addValuesBuilder().setStringValue(elementToString(element)); + } + } + return com.google.protobuf.Value.newBuilder().setListValue(list).build(); + } + + @Nonnull + @Override + public ImmutableList getAsStringList() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (T element : value) { + builder.add(element == null ? NULL_STRING : elementToString(element)); + } + return builder.build(); + } + + String elementToString(T element) { + return element.toString(); + } + + abstract void appendElement(StringBuilder b, T element); + + @Override + void valueToString(StringBuilder b) { + b.append(LIST_OPEN); + for (int i = 0; i < value.size(); ++i) { + if (i > 0) { + b.append(LIST_SEPARATOR); + } + T v = value.get(i); + if (v == null) { + b.append(NULL_STRING); + } else { + appendElement(b, v); + } + } + b.append(LIST_CLOSE); + } + } + + private static class StringArrayImpl extends AbstractArrayValue { + + private StringArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.string(), values); + } + + @Override + public List getStringArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, String element) { + b.append(element); + } + } + + private static class JsonArrayImpl extends AbstractArrayValue { + + private JsonArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.json(), values); + } + + @Override + public List getJsonArray() { + checkNotNull(); + return value; + } + + @Override + public List getStringArray() { + return getJsonArray(); + } + + @Override + void appendElement(StringBuilder b, String element) { + b.append(element); + } + } + + private static class PgJsonbArrayImpl extends AbstractArrayValue { + + private PgJsonbArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.pgJsonb(), values); + } + + @Override + public List getPgJsonbArray() { + checkNotNull(); + return value; + } + + @Override + public List getStringArray() { + return this.getPgJsonbArray(); + } + + @Override + void appendElement(StringBuilder b, String element) { + b.append(element); + } + } + + private static class PgOidArrayImpl extends PrimitiveArrayImpl { + private final long[] values; + + private PgOidArrayImpl(boolean isNull, BitSet nulls, long[] values) { + super(isNull, Type.pgOid(), nulls); + this.values = values; + } + + @Override + public List getInt64Array() { + return getArray(); + } + + @Override + boolean valueEquals(Value v) { + PgOidArrayImpl that = (PgOidArrayImpl) v; + return Arrays.equals(values, that.values); + } + + @Override + int size() { + return values.length; + } + + @Override + Long getValue(int i) { + return values[i]; + } + + @Override + com.google.protobuf.Value getValueAsProto(int i) { + return com.google.protobuf.Value.newBuilder() + .setStringValue(Long.toString(values[i])) + .build(); + } + + @Override + int arrayHash() { + return Arrays.hashCode(values); + } + } + + private static class LazyBytesArrayImpl extends AbstractArrayValue { + private transient AbstractLazyInitializer> bytesArray = defaultInitializer(); + + private LazyBytesArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.bytes(), values); + } + + private AbstractLazyInitializer> defaultInitializer() { + return new AbstractLazyInitializer>() { + @Override + protected List initialize() { + return value.stream() + .map(element -> element == null ? null : element.getByteArray()) + .collect(Collectors.toList()); + } + }; + } + + private void readObject(java.io.ObjectInputStream in) + throws IOException, ClassNotFoundException { + in.defaultReadObject(); + bytesArray = defaultInitializer(); + } + + @Override + public List getBytesArray() { + checkNotNull(); + try { + return bytesArray.get(); + } catch (Exception e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + @Override + public List getProtoMessageArray(T m) { + Preconditions.checkNotNull( + m, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + checkNotNull(); + try { + List protoMessagesList = new ArrayList<>(value.size()); + for (LazyByteArray protoMessageBytes : value) { + if (protoMessageBytes == null) { + protoMessagesList.add(null); + } else { + protoMessagesList.add( + (T) + m.toBuilder() + .mergeFrom( + Base64.getDecoder() + .wrap( + CharSource.wrap(protoMessageBytes.getBase64String()) + .asByteSource(StandardCharsets.UTF_8) + .openStream())) + .build()); + } + } + return protoMessagesList; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + String elementToString(LazyByteArray element) { + return element.getBase64String(); + } + + @Override + void appendElement(StringBuilder b, LazyByteArray element) { + b.append(elementToString(element)); + } + } + + private static class TimestampArrayImpl extends AbstractArrayValue { + + private TimestampArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.timestamp(), values); + } + + @Override + public List getTimestampArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, Timestamp element) { + b.append(element); + } + } + + private static class ProtoMessageArrayImpl extends AbstractArrayValue { + + private ProtoMessageArrayImpl( + boolean isNull, @Nullable List values, String protoTypeFqn) { + super(isNull, Type.proto(protoTypeFqn), values); + } + + @Override + public List getBytesArray() { + return value; + } + + @Override + public List getProtoMessageArray(T m) { + Preconditions.checkNotNull( + m, + "Proto message may not be null. Use MyProtoClass.getDefaultInstance() as a parameter" + + " value."); + checkNotNull(); + try { + List protoMessagesList = new ArrayList<>(value.size()); + for (ByteArray protoMessageBytes : value) { + if (protoMessageBytes == null) { + protoMessagesList.add(null); + } else { + protoMessagesList.add( + (T) m.toBuilder().mergeFrom(protoMessageBytes.toByteArray()).build()); + } + } + return protoMessagesList; + } catch (InvalidProtocolBufferException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + @Override + String elementToString(ByteArray element) { + return element.toBase64(); + } + + @Override + void appendElement(StringBuilder b, ByteArray element) { + b.append(element.toBase64()); + } + } + + private static class ProtoEnumArrayImpl extends AbstractArrayValue { + + private ProtoEnumArrayImpl(boolean isNull, @Nullable List values, String protoTypeFqn) { + super(isNull, Type.protoEnum(protoTypeFqn), values); + } + + @Override + public List getInt64Array() { + return value; + } + + @Override + public List getProtoEnumArray( + Function method) { + Preconditions.checkNotNull( + method, "Method may not be null. Use 'MyProtoEnum::forNumber' as a parameter value."); + checkNotNull(); + + List protoEnumList = new ArrayList<>(); + for (Long enumIntValue : value) { + if (enumIntValue == null) { + protoEnumList.add(null); + } else { + protoEnumList.add((T) method.apply(enumIntValue.intValue())); + } + } + return protoEnumList; + } + + @Override + String elementToString(Long element) { + return Long.toString(element); + } + + @Override + void appendElement(StringBuilder b, Long element) { + b.append(element); + } + } + + private static class DateArrayImpl extends AbstractArrayValue { + + private DateArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.date(), values); + } + + @Override + public List getDateArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, Date element) { + b.append(element); + } + } + + private static class UuidArrayImpl extends AbstractArrayValue { + + private UuidArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.uuid(), values); + } + + @Override + public List getUuidArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, UUID element) { + b.append(element); + } + } + + private static class IntervalArrayImpl extends AbstractArrayValue { + + private IntervalArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.interval(), values); + } + + @Override + public List getIntervalArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, Interval element) { + b.append(element.toISO8601()); + } + + @Override + String elementToString(Interval element) { + return element.toISO8601(); + } + } + + private static class NumericArrayImpl extends AbstractArrayValue { + + private NumericArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.numeric(), values); + } + + @Override + public List getNumericArray() { + checkNotNull(); + return value; + } + + @Override + void appendElement(StringBuilder b, BigDecimal element) { + b.append(element); + } + } + + private static class PgNumericArrayImpl extends AbstractArrayValue { + + private List valuesAsBigDecimal; + private NumberFormatException bigDecimalConversionError; + private List valuesAsDouble; + private NumberFormatException doubleConversionError; + + private PgNumericArrayImpl(boolean isNull, @Nullable List values) { + super(isNull, Type.pgNumeric(), values); + } + + @Override + public List getStringArray() { + checkNotNull(); + return value; + } + + @Override + public List getNumericArray() { + checkNotNull(); + if (bigDecimalConversionError != null) { + throw bigDecimalConversionError; + } + if (valuesAsBigDecimal == null) { + try { + valuesAsBigDecimal = + value.stream() + .map(v -> v == null ? null : new BigDecimal(v)) + .collect(Collectors.toList()); + } catch (NumberFormatException e) { + bigDecimalConversionError = e; + throw e; + } + } + return valuesAsBigDecimal; + } + + @Override + public List getFloat64Array() { + checkNotNull(); + if (doubleConversionError != null) { + throw doubleConversionError; + } + if (valuesAsDouble == null) { + try { + valuesAsDouble = + value.stream() + .map(v -> v == null ? null : Double.valueOf(v)) + .collect(Collectors.toList()); + } catch (NumberFormatException e) { + doubleConversionError = e; + throw e; + } + } + return valuesAsDouble; + } + + @Override + void appendElement(StringBuilder b, String element) { + b.append(element); + } + } + + private static class StructImpl extends AbstractObjectValue { + + // Constructor for non-NULL struct values. + private StructImpl(Struct value) { + super(false, value.getType(), value); + } + + // Constructor for NULL struct values. + private StructImpl(Type structType) { + super(true, structType, null); + } + + @Override + public Struct getStruct() { + checkNotNull(); + return value; + } + + @Override + void valueToString(StringBuilder b) { + b.append(value); + } + + @Override + int valueHash() { + return value.hashCode(); + } + + @Override + boolean valueEquals(Value v) { + return ((StructImpl) v).value.equals(value); + } + + private Value getValue(int fieldIndex) { + Type fieldType = value.getColumnType(fieldIndex); + switch (fieldType.getCode()) { + case BOOL: + return Value.bool(value.getBoolean(fieldIndex)); + case INT64: + return Value.int64(value.getLong(fieldIndex)); + case STRING: + return Value.string(value.getString(fieldIndex)); + case JSON: + return Value.json(value.getJson(fieldIndex)); + case PG_JSONB: + return Value.pgJsonb(value.getPgJsonb(fieldIndex)); + case BYTES: + return Value.bytes(value.getBytes(fieldIndex)); + case FLOAT32: + return Value.float32(value.getFloat(fieldIndex)); + case FLOAT64: + return Value.float64(value.getDouble(fieldIndex)); + case NUMERIC: + return Value.numeric(value.getBigDecimal(fieldIndex)); + case PG_NUMERIC: + return Value.pgNumeric(value.getString(fieldIndex)); + case PG_OID: + return Value.pgOid(value.getLong(fieldIndex)); + case DATE: + return Value.date(value.getDate(fieldIndex)); + case UUID: + return Value.uuid(value.getUuid(fieldIndex)); + case TIMESTAMP: + return Value.timestamp(value.getTimestamp(fieldIndex)); + case INTERVAL: + return Value.interval(value.getInterval(fieldIndex)); + case PROTO: + return Value.protoMessage(value.getBytes(fieldIndex), fieldType.getProtoTypeFqn()); + case ENUM: + return Value.protoEnum(value.getLong(fieldIndex), fieldType.getProtoTypeFqn()); + case STRUCT: + return Value.struct(value.getStruct(fieldIndex)); + case ARRAY: + { + Type elementType = fieldType.getArrayElementType(); + switch (elementType.getCode()) { + case BOOL: + return Value.boolArray(value.getBooleanList(fieldIndex)); + case INT64: + case ENUM: + return Value.int64Array(value.getLongList(fieldIndex)); + case STRING: + return Value.stringArray(value.getStringList(fieldIndex)); + case JSON: + return Value.jsonArray(value.getJsonList(fieldIndex)); + case PG_JSONB: + return Value.pgJsonbArray(value.getPgJsonbList(fieldIndex)); + case PG_OID: + return Value.pgOidArray(value.getLongList(fieldIndex)); + case BYTES: + case PROTO: + return Value.bytesArray(value.getBytesList(fieldIndex)); + case FLOAT32: + return Value.float32Array(value.getFloatList(fieldIndex)); + case FLOAT64: + return Value.float64Array(value.getDoubleList(fieldIndex)); + case NUMERIC: + return Value.numericArray(value.getBigDecimalList(fieldIndex)); + case PG_NUMERIC: + return Value.pgNumericArray(value.getStringList(fieldIndex)); + case DATE: + return Value.dateArray(value.getDateList(fieldIndex)); + case UUID: + return Value.uuidArray(value.getUuidList(fieldIndex)); + case TIMESTAMP: + return Value.timestampArray(value.getTimestampList(fieldIndex)); + case INTERVAL: + return Value.intervalArray(value.getIntervalList(fieldIndex)); + case STRUCT: + return Value.structArray(elementType, value.getStructList(fieldIndex)); + case ARRAY: + throw new UnsupportedOperationException( + "ARRAY field types are not " + + "supported inside STRUCT-typed values."); + default: + throw new IllegalArgumentException( + "Unrecognized array element type : " + fieldType); + } + } + default: + throw new IllegalArgumentException("Unrecognized field type : " + fieldType); + } + } + + @Override + com.google.protobuf.Value valueToProto() { + checkNotNull(); + ListValue.Builder struct = ListValue.newBuilder(); + for (int fieldIndex = 0; fieldIndex < value.getColumnCount(); ++fieldIndex) { + if (value.isNull(fieldIndex)) { + struct.addValues(NULL_PROTO); + } else { + struct.addValues(getValue(fieldIndex).toProto()); + } + } + return com.google.protobuf.Value.newBuilder().setListValue(struct).build(); + } + } + + private static class StructArrayImpl extends AbstractArrayValue { + private static final Joiner joiner = Joiner.on(LIST_SEPARATOR).useForNull(NULL_STRING); + + private StructArrayImpl(Type elementType, @Nullable List values) { + super(values == null, elementType, values); + } + + @Override + public List getStructArray() { + checkNotNull(); + return value; + } + + @Override + com.google.protobuf.Value valueToProto() { + ListValue.Builder list = ListValue.newBuilder(); + for (Struct element : value) { + if (element == null) { + list.addValues(NULL_PROTO); + } else { + list.addValues(Value.struct(element).toProto()); + } + } + return com.google.protobuf.Value.newBuilder().setListValue(list).build(); + } + + @Override + void appendElement(StringBuilder b, Struct element) { + b.append(element); + } + + @Override + void valueToString(StringBuilder b) { + b.append(LIST_OPEN); + joiner.appendTo(b, value); + b.append(LIST_CLOSE); + } + + @Override + boolean valueEquals(Value v) { + return ((StructArrayImpl) v).value.equals(value); + } + + @Override + int valueHash() { + return value.hashCode(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ValueBinder.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ValueBinder.java new file mode 100644 index 000000000000..e0b420e07ab7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ValueBinder.java @@ -0,0 +1,351 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.EnumDescriptor; +import com.google.protobuf.ProtocolMessageEnum; +import java.math.BigDecimal; +import java.util.UUID; +import javax.annotation.Nullable; + +/** + * An interface for binding a {@link Value} in some context. Users of the Cloud Spanner client + * library never create a {@code ValueBinder} directly; instead this interface is returned from + * other parts of the library involved in {@code Value} construction. For example, {@link + * Mutation.WriteBuilder#set(String)} returns a binder to bind a column value, and {@code + * Statement#bind(String)} returns a binder to bind a parameter to a value. + * + *

{@code ValueBinder} subclasses typically carry state and are therefore not thread-safe, + * although the core implementation itself is thread-safe. + * + * @param The context which is used to bind the {@link Value}. + */ +public abstract class ValueBinder { + /** + * Intentionally package-protected constructor; only the Cloud Spanner library can create + * instances. + */ + ValueBinder() {} + + /** + * Subclasses should implement this method to handle value binding. + * + *

This method is intentionally package-protected rather than protected; the internal API is + * subject to change. + * + * @param value the newly bound value + * @return the object to return from the bind call ({@code to(...)} + */ + abstract R handle(Value value); + + /** Binds a {@link Value} */ + public R to(Value value) { + return handle(value); + } + + /** Binds to {@code Value.bool(value)} */ + public R to(boolean value) { + return handle(Value.bool(value)); + } + + /** Binds to {@code Value.bool(value)} */ + public R to(@Nullable Boolean value) { + return handle(Value.bool(value)); + } + + /** Binds to {@code Value.int64(value)} */ + public R to(long value) { + return handle(Value.int64(value)); + } + + /** Binds to {@code Value.int64(value)} */ + public R to(@Nullable Long value) { + return handle(Value.int64(value)); + } + + /** Binds to {@code Value.float32(value)} */ + public R to(float value) { + return handle(Value.float32(value)); + } + + /** Binds to {@code Value.float32(value)} */ + public R to(@Nullable Float value) { + return handle(Value.float32(value)); + } + + /** Binds to {@code Value.float64(value)} */ + public R to(double value) { + return handle(Value.float64(value)); + } + + /** Binds to {@code Value.float64(value)} */ + public R to(@Nullable Double value) { + return handle(Value.float64(value)); + } + + /** Binds to {@code Value.numeric(value)} */ + public R to(BigDecimal value) { + return handle(Value.numeric(value)); + } + + /** Binds to {@code Value.string(value)} */ + public R to(@Nullable String value) { + return handle(Value.string(value)); + } + + /** Binds to {@code Value.protoMessage(value)} */ + public R to(AbstractMessage m) { + return handle(Value.protoMessage(m)); + } + + /** Binds to {@code Value.protoMessage(value, protoType)} */ + public R to(@Nullable ByteArray v, String protoTypFqn) { + return handle(Value.protoMessage(v, protoTypFqn)); + } + + /** Binds to {@code Value.protoMessage(value, descriptor)} */ + public R to(@Nullable ByteArray v, Descriptor descriptor) { + return handle(Value.protoMessage(v, descriptor)); + } + + /** Binds to {@code Value.protoEnum(value)} */ + public R to(ProtocolMessageEnum value) { + return handle(Value.protoEnum(value)); + } + + /** Binds to {@code Value.protoEnum(value, protoType)} */ + public R to(@Nullable Long v, String protoTypFqn) { + return handle(Value.protoEnum(v, protoTypFqn)); + } + + /** Binds to {@code Value.protoEnum(value, enumDescriptor)} */ + public R to(@Nullable Long v, EnumDescriptor enumDescriptor) { + return handle(Value.protoEnum(v, enumDescriptor)); + } + + /** Binds to {@code Value.protoEnum(value, protoType)} */ + public R to(long v, String protoTypFqn) { + return handle(Value.protoEnum(v, protoTypFqn)); + } + + /** + * Binds to {@code Value.bytes(value)}. Use {@link #to(Value)} in combination with {@link + * Value#bytesFromBase64(String)} if you already have the value that you want to bind in base64 + * format. This prevents unnecessary decoding and encoding of base64 strings. + */ + public R to(@Nullable ByteArray value) { + return handle(Value.bytes(value)); + } + + /** Binds to {@code Value.timestamp(value)} */ + public R to(@Nullable Timestamp value) { + return handle(Value.timestamp(value)); + } + + /** Binds to {@code Value.date(value)} */ + public R to(@Nullable Date value) { + return handle(Value.date(value)); + } + + /** Binds to {@code Value.uuid(value)} */ + public R to(@Nullable UUID value) { + return handle(Value.uuid(value)); + } + + /** Binds to {@code Value.interval(value)} */ + public R to(@Nullable Interval value) { + return handle(Value.interval(value)); + } + + /** Binds a non-{@code NULL} struct value to {@code Value.struct(value)} */ + public R to(Struct value) { + return handle(Value.struct(value)); + } + + /** + * Binds a nullable {@code Struct} reference with given {@code Type} to {@code + * Value.struct(type,value} + */ + public R to(Type type, @Nullable Struct value) { + return handle(Value.struct(type, value)); + } + + /** Binds to {@code Value.boolArray(values)} */ + public R toBoolArray(@Nullable boolean[] values) { + return handle(Value.boolArray(values)); + } + + /** Binds to {@code Value.boolArray(values, int, pos)} */ + public R toBoolArray(@Nullable boolean[] values, int pos, int length) { + return handle(Value.boolArray(values, pos, length)); + } + + /** Binds to {@code Value.boolArray(values)} */ + public R toBoolArray(@Nullable Iterable values) { + return handle(Value.boolArray(values)); + } + + /** Binds to {@code Value.int64Array(values)} */ + public R toInt64Array(@Nullable long[] values) { + return handle(Value.int64Array(values)); + } + + /** Binds to {@code Value.int64Array(values, pos, length)} */ + public R toInt64Array(@Nullable long[] values, int pos, int length) { + return handle(Value.int64Array(values, pos, length)); + } + + /** Binds to {@code Value.int64Array(values)} */ + public R toInt64Array(@Nullable Iterable values) { + return handle(Value.int64Array(values)); + } + + /** Binds to {@code Value.float32Array(values)} */ + public R toFloat32Array(@Nullable float[] values) { + return handle(Value.float32Array(values)); + } + + /** Binds to {@code Value.float32Array(values, pos, length)} */ + public R toFloat32Array(@Nullable float[] values, int pos, int length) { + return handle(Value.float32Array(values, pos, length)); + } + + /** Binds to {@code Value.float32Array(values)} */ + public R toFloat32Array(@Nullable Iterable values) { + return handle(Value.float32Array(values)); + } + + /** Binds to {@code Value.float64Array(values)} */ + public R toFloat64Array(@Nullable double[] values) { + return handle(Value.float64Array(values)); + } + + /** Binds to {@code Value.float64Array(values, pos, length)} */ + public R toFloat64Array(@Nullable double[] values, int pos, int length) { + return handle(Value.float64Array(values, pos, length)); + } + + /** Binds to {@code Value.float64Array(values)} */ + public R toFloat64Array(@Nullable Iterable values) { + return handle(Value.float64Array(values)); + } + + /** Binds to {@code Value.numericArray(values)} */ + public R toNumericArray(@Nullable Iterable values) { + return handle(Value.numericArray(values)); + } + + /** Binds to {@code Value.pgNumericArray(values)} */ + public R toPgNumericArray(@Nullable Iterable values) { + return handle(Value.pgNumericArray(values)); + } + + /** Binds to {@code Value.stringArray(values)} */ + public R toStringArray(@Nullable Iterable values) { + return handle(Value.stringArray(values)); + } + + /** Binds to {@code Value.jsonArray(values)} */ + public R toJsonArray(@Nullable Iterable values) { + return handle(Value.jsonArray(values)); + } + + /** Binds to {@code Value.jsonbArray(values)} */ + public R toPgJsonbArray(@Nullable Iterable values) { + return handle(Value.pgJsonbArray(values)); + } + + /** Binds to {@code Value.pgOidArray(values)} */ + public R toPgOidArray(@Nullable long[] values) { + return handle(Value.pgOidArray(values)); + } + + /** Binds to {@code Value.pgOidArray(values, pos, length)} */ + public R toPgOidArray(@Nullable long[] values, int pos, int length) { + return handle(Value.pgOidArray(values, pos, length)); + } + + /** Binds to {@code Value.pgOidArray(values)} */ + public R toPgOidArray(@Nullable Iterable values) { + return handle(Value.pgOidArray(values)); + } + + /** Binds to {@code Value.bytesArray(values)} */ + public R toBytesArray(@Nullable Iterable values) { + return handle(Value.bytesArray(values)); + } + + /** + * Binds to {@code Value.bytesArray(values)}. The given strings must be valid base64 encoded + * strings. Use this method instead of {@link #toBytesArray(Iterable)} if you already have the + * values in base64 format to prevent unnecessary decoding and encoding to/from base64. + */ + public R toBytesArrayFromBase64(@Nullable Iterable valuesAsBase64Strings) { + return handle(Value.bytesArrayFromBase64(valuesAsBase64Strings)); + } + + /** Binds to {@code Value.timestampArray(values)} */ + public R toTimestampArray(@Nullable Iterable values) { + return handle(Value.timestampArray(values)); + } + + /** Binds to {@code Value.protoMessageArray(values, descriptor)} */ + public R toProtoMessageArray(@Nullable Iterable values, Descriptor descriptor) { + return handle(Value.protoMessageArray(values, descriptor)); + } + + /** Binds to {@code Value.protoMessageArray(values, protoTypeFq)} */ + public R toProtoMessageArray(@Nullable Iterable values, String protoTypeFq) { + return handle(Value.protoMessageArray(values, protoTypeFq)); + } + + /** Binds to {@code Value.protoEnumArray(values, descriptor)} */ + public R toProtoEnumArray( + @Nullable Iterable values, EnumDescriptor descriptor) { + return handle(Value.protoEnumArray(values, descriptor)); + } + + /** Binds to {@code Value.protoEnumArray(values, protoTypeFq)} */ + public R toProtoEnumArray(@Nullable Iterable values, String protoTypeFq) { + return handle(Value.protoEnumArray(values, protoTypeFq)); + } + + /** Binds to {@code Value.dateArray(values)} */ + public R toDateArray(@Nullable Iterable values) { + return handle(Value.dateArray(values)); + } + + /** Binds to {@code Value.uuidArray(values)} */ + public R toUuidArray(@Nullable Iterable values) { + return handle(Value.uuidArray(values)); + } + + /** Binds to {@code Value.intervalArray(values)} */ + public R toIntervalArray(@Nullable Iterable values) { + return handle(Value.intervalArray(values)); + } + + /** Binds to {@code Value.structArray(fieldTypes, values)} */ + public R toStructArray(Type elementType, @Nullable Iterable values) { + return handle(Value.structArray(elementType, values)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/XGoogSpannerRequestId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/XGoogSpannerRequestId.java new file mode 100644 index 000000000000..d858fdb9273b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/XGoogSpannerRequestId.java @@ -0,0 +1,235 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import io.grpc.CallOptions; +import io.grpc.Metadata; +import java.math.BigInteger; +import java.security.SecureRandom; +import java.util.Objects; +import java.util.regex.MatchResult; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +@InternalApi +public class XGoogSpannerRequestId { + // 1. Generate the random process ID singleton. + @VisibleForTesting + static final String RAND_PROCESS_ID = XGoogSpannerRequestId.generateRandProcessId(); + + public static String REQUEST_ID_HEADER_NAME = "x-goog-spanner-request-id"; + public static final Metadata.Key REQUEST_ID_HEADER_KEY = + Metadata.Key.of(REQUEST_ID_HEADER_NAME, Metadata.ASCII_STRING_MARSHALLER); + public static final CallOptions.Key REQUEST_ID_CALL_OPTIONS_KEY = + CallOptions.Key.create("XGoogSpannerRequestId"); + + @VisibleForTesting + static final long VERSION = 1; // The version of the specification being implemented. + + private final long nthClientId; + private final long nthRequest; + private long nthChannelId; + private long attempt; + + XGoogSpannerRequestId(long nthClientId, long nthChannelId, long nthRequest, long attempt) { + this.nthClientId = nthClientId; + this.nthChannelId = nthChannelId; + this.nthRequest = nthRequest; + this.attempt = attempt; + } + + public static XGoogSpannerRequestId of( + long nthClientId, long nthChannelId, long nthRequest, long attempt) { + return new XGoogSpannerRequestId(nthClientId, nthChannelId, nthRequest, attempt); + } + + @VisibleForTesting + long getNthClientId() { + return nthClientId; + } + + @VisibleForTesting + long getNthChannelId() { + return nthChannelId; + } + + boolean hasChannelId() { + return nthChannelId > 0; + } + + @VisibleForTesting + long getAttempt() { + return this.attempt; + } + + @VisibleForTesting + long getNthRequest() { + return this.nthRequest; + } + + @VisibleForTesting + static final Pattern REGEX = + Pattern.compile("^(\\d)\\.([0-9a-z]{16})\\.(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)$"); + + public static XGoogSpannerRequestId of(String s) { + Matcher m = XGoogSpannerRequestId.REGEX.matcher(s); + if (!m.matches()) { + throw new IllegalStateException( + s + " does not match " + XGoogSpannerRequestId.REGEX.pattern()); + } + + MatchResult mr = m.toMatchResult(); + + return new XGoogSpannerRequestId( + Long.parseLong(mr.group(3)), + Long.parseLong(mr.group(4)), + Long.parseLong(mr.group(5)), + Long.parseLong(mr.group(6))); + } + + private static String generateRandProcessId() { + // Expecting to use 64-bits of randomness to avoid clashes. + BigInteger bigInt = new BigInteger(64, new SecureRandom()); + return String.format("%016x", bigInt); + } + + /** Returns the string representation of this RequestId as it should be sent to Spanner. */ + public String getHeaderValue() { + return String.format( + "%d.%s.%d.%d.%d.%d", + XGoogSpannerRequestId.VERSION, + XGoogSpannerRequestId.RAND_PROCESS_ID, + this.nthClientId, + this.nthChannelId, + this.nthRequest, + this.attempt); + } + + @Override + public String toString() { + return String.format( + "%d.%s.%d.%s.%d.%d", + XGoogSpannerRequestId.VERSION, + XGoogSpannerRequestId.RAND_PROCESS_ID, + this.nthClientId, + this.nthChannelId < 0 ? "x" : String.valueOf(this.nthChannelId), + this.nthRequest, + this.attempt); + } + + public String debugToString() { + return String.format( + "%d.%s.nth_client=%d.nth_chan=%d.nth_req=%d.attempt=%d", + XGoogSpannerRequestId.VERSION, + XGoogSpannerRequestId.RAND_PROCESS_ID, + this.nthClientId, + this.nthChannelId, + this.nthRequest, + this.attempt); + } + + @VisibleForTesting + boolean isGreaterThan(XGoogSpannerRequestId other) { + if (this.nthClientId != other.nthClientId) { + return this.nthClientId > other.nthClientId; + } + if (this.nthChannelId != other.nthChannelId) { + return this.nthChannelId > other.nthChannelId; + } + if (this.nthRequest != other.nthRequest) { + return this.nthRequest > other.nthRequest; + } + return this.attempt > other.attempt; + } + + @Override + public boolean equals(Object other) { + // instanceof for a null object returns false. + if (!(other instanceof XGoogSpannerRequestId)) { + return false; + } + + XGoogSpannerRequestId otherReqId = (XGoogSpannerRequestId) (other); + + return Objects.equals(this.nthClientId, otherReqId.nthClientId) + && Objects.equals(this.nthChannelId, otherReqId.nthChannelId) + && Objects.equals(this.nthRequest, otherReqId.nthRequest) + && Objects.equals(this.attempt, otherReqId.attempt); + } + + public void incrementAttempt() { + this.attempt++; + } + + @Override + public int hashCode() { + return Objects.hash(this.nthClientId, this.nthChannelId, this.nthRequest, this.attempt); + } + + @InternalApi + public interface RequestIdCreator { + long getClientId(); + + XGoogSpannerRequestId nextRequestId(long channelId); + + void reset(); + } + + // TODO: Move this class into test code. + static final class NoopRequestIdCreator implements RequestIdCreator { + static final NoopRequestIdCreator INSTANCE = new NoopRequestIdCreator(); + + private NoopRequestIdCreator() {} + + @Override + public long getClientId() { + return 1L; + } + + @Override + public XGoogSpannerRequestId nextRequestId(long channelId) { + return XGoogSpannerRequestId.of(1, channelId, 1, 0); + } + + @Override + public void reset() {} + } + + public void setChannelId(long channelId) { + this.nthChannelId = channelId; + } + + @VisibleForTesting + XGoogSpannerRequestId withNthRequest(long replacementNthRequest) { + return XGoogSpannerRequestId.of( + this.nthClientId, this.nthChannelId, replacementNthRequest, this.attempt); + } + + @VisibleForTesting + XGoogSpannerRequestId withChannelId(long replacementChannelId) { + return XGoogSpannerRequestId.of( + this.nthClientId, replacementChannelId, this.nthRequest, this.attempt); + } + + @VisibleForTesting + XGoogSpannerRequestId withNthClientId(long replacementClientId) { + return XGoogSpannerRequestId.of( + replacementClientId, this.nthChannelId, this.nthRequest, this.attempt); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java new file mode 100644 index 000000000000..a75e9d546eb6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java @@ -0,0 +1,5789 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.httpjson.longrunning.OperationsClient; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.resourcenames.ResourceName; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStub; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.SplitPoints; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: Cloud Spanner Database Admin API + * + *

The Cloud Spanner Database Admin API can be used to: * create, drop, and list databases + * * update the schema of pre-existing databases * create, delete, copy and list backups for + * a database * restore a database from an existing backup + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+ *   DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+ *   Database response = databaseAdminClient.getDatabase(name);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the DatabaseAdminClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

ListDatabases

Lists Cloud Spanner databases.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listDatabases(ListDatabasesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listDatabases(InstanceName parent) + *

  • listDatabases(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listDatabasesPagedCallable() + *

  • listDatabasesCallable() + *

+ *

CreateDatabase

Creates a new Cloud Spanner database and starts to prepare it for serving. The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<database_name>/operations/<operation_id>` and can be used to track preparation of the database. The [metadata][google.longrunning.Operation.metadata] field type is [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The [response][google.longrunning.Operation.response] field type is [Database][google.spanner.admin.database.v1.Database], if successful.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createDatabaseAsync(CreateDatabaseRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createDatabaseAsync(InstanceName parent, String createStatement) + *

  • createDatabaseAsync(String parent, String createStatement) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createDatabaseOperationCallable() + *

  • createDatabaseCallable() + *

+ *

GetDatabase

Gets the state of a Cloud Spanner database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getDatabase(GetDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getDatabase(DatabaseName name) + *

  • getDatabase(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getDatabaseCallable() + *

+ *

UpdateDatabase

Updates a Cloud Spanner database. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the database. If the named database does not exist, returns `NOT_FOUND`. + *

While the operation is pending: + *

* The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] field is set to true. * Cancelling the operation is best-effort. If the cancellation succeeds, the operation metadata's [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] is set, the updates are reverted, and the operation terminates with a `CANCELLED` status. * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation is done (returns successfully or with error). * Reading the database via the API continues to give the pre-request values. + *

Upon completion of the returned operation: + *

* The new values are in effect and readable via the API. * The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] field becomes false. + *

The returned [long-running operation][google.longrunning.Operation] will have a name of the format `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>` and can be used to track the database modification. The [metadata][google.longrunning.Operation.metadata] field type is [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. The [response][google.longrunning.Operation.response] field type is [Database][google.spanner.admin.database.v1.Database], if successful.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateDatabaseAsync(UpdateDatabaseRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateDatabaseAsync(Database database, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateDatabaseOperationCallable() + *

  • updateDatabaseCallable() + *

+ *

UpdateDatabaseDdl

Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<database_name>/operations/<operation_id>` and can be used to track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] field type is [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateDatabaseDdlAsync(UpdateDatabaseDdlRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateDatabaseDdlAsync(DatabaseName database, List<String> statements) + *

  • updateDatabaseDdlAsync(String database, List<String> statements) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateDatabaseDdlOperationCallable() + *

  • updateDatabaseDdlCallable() + *

+ *

DropDatabase

Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be retained according to their `expire_time`. Note: Cloud Spanner might continue to accept requests for a few seconds after the database has been deleted.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • dropDatabase(DropDatabaseRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • dropDatabase(DatabaseName database) + *

  • dropDatabase(String database) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • dropDatabaseCallable() + *

+ *

GetDatabaseDdl

Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending schema updates, those may be queried using the [Operations][google.longrunning.Operations] API.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getDatabaseDdl(GetDatabaseDdlRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getDatabaseDdl(DatabaseName database) + *

  • getDatabaseDdl(String database) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getDatabaseDdlCallable() + *

+ *

SetIamPolicy

Sets the access control policy on a database or backup resource. Replaces any existing policy. + *

Authorization requires `spanner.databases.setIamPolicy` permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. For backups, authorization requires `spanner.backups.setIamPolicy` permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • setIamPolicy(SetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • setIamPolicy(ResourceName resource, Policy policy) + *

  • setIamPolicy(String resource, Policy policy) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • setIamPolicyCallable() + *

+ *

GetIamPolicy

Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists but does not have a policy set. + *

Authorization requires `spanner.databases.getIamPolicy` permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. For backups, authorization requires `spanner.backups.getIamPolicy` permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getIamPolicy(GetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getIamPolicy(ResourceName resource) + *

  • getIamPolicy(String resource) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getIamPolicyCallable() + *

+ *

TestIamPermissions

Returns permissions that the caller has on the specified database or backup resource. + *

Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. Calling this method on a backup that does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` permission on the containing instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • testIamPermissions(TestIamPermissionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • testIamPermissions(ResourceName resource, List<String> permissions) + *

  • testIamPermissions(String resource, List<String> permissions) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • testIamPermissionsCallable() + *

+ *

CreateBackup

Starts creating a new Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have a name of the format `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` and can be used to track creation of the backup. The [metadata][google.longrunning.Operation.metadata] field type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The [response][google.longrunning.Operation.response] field type is [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the creation and delete the backup. There can be only one pending backup creation per database. Backup creation of different databases can run concurrently.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createBackupAsync(CreateBackupRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createBackupAsync(InstanceName parent, Backup backup, String backupId) + *

  • createBackupAsync(String parent, Backup backup, String backupId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createBackupOperationCallable() + *

  • createBackupCallable() + *

+ *

CopyBackup

Starts copying a Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have a name of the format `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` and can be used to track copying of the backup. The operation is associated with the destination backup. The [metadata][google.longrunning.Operation.metadata] field type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The [response][google.longrunning.Operation.response] field type is [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the copying and delete the destination backup. Concurrent CopyBackup requests can run on the same source backup.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • copyBackupAsync(CopyBackupRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • copyBackupAsync(InstanceName parent, String backupId, BackupName sourceBackup, Timestamp expireTime) + *

  • copyBackupAsync(InstanceName parent, String backupId, String sourceBackup, Timestamp expireTime) + *

  • copyBackupAsync(String parent, String backupId, BackupName sourceBackup, Timestamp expireTime) + *

  • copyBackupAsync(String parent, String backupId, String sourceBackup, Timestamp expireTime) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • copyBackupOperationCallable() + *

  • copyBackupCallable() + *

+ *

GetBackup

Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getBackup(GetBackupRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getBackup(BackupName name) + *

  • getBackup(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getBackupCallable() + *

+ *

UpdateBackup

Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateBackup(UpdateBackupRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateBackup(Backup backup, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateBackupCallable() + *

+ *

DeleteBackup

Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteBackup(DeleteBackupRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteBackup(BackupName name) + *

  • deleteBackup(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteBackupCallable() + *

+ *

ListBackups

Lists completed and pending backups. Backups returned are ordered by `create_time` in descending order, starting from the most recent `create_time`.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listBackups(ListBackupsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listBackups(InstanceName parent) + *

  • listBackups(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listBackupsPagedCallable() + *

  • listBackupsCallable() + *

+ *

RestoreDatabase

Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with the same instance configuration as the instance containing the backup. The returned database [long-running operation][google.longrunning.Operation] has a name of the format `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, and can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The [response][google.longrunning.Operation.response] type is [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned operation will stop the restore and delete the database. There can be only one database being restored into an instance at a time. Once the restore operation completes, a new restore operation can be initiated, without waiting for the optimize operation associated with the first restore to complete.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • restoreDatabaseAsync(RestoreDatabaseRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • restoreDatabaseAsync(InstanceName parent, String databaseId, BackupName backup) + *

  • restoreDatabaseAsync(InstanceName parent, String databaseId, String backup) + *

  • restoreDatabaseAsync(String parent, String databaseId, BackupName backup) + *

  • restoreDatabaseAsync(String parent, String databaseId, String backup) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • restoreDatabaseOperationCallable() + *

  • restoreDatabaseCallable() + *

+ *

ListDatabaseOperations

Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has a name of the form `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. The long-running operation [metadata][google.longrunning.Operation.metadata] field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listDatabaseOperations(ListDatabaseOperationsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listDatabaseOperations(InstanceName parent) + *

  • listDatabaseOperations(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listDatabaseOperationsPagedCallable() + *

  • listDatabaseOperationsCallable() + *

+ *

ListBackupOperations

Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. A backup operation has a name of the form `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. The long-running operation [metadata][google.longrunning.Operation.metadata] field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.progress.start_time` in descending order starting from the most recently started operation.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listBackupOperations(ListBackupOperationsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listBackupOperations(InstanceName parent) + *

  • listBackupOperations(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listBackupOperationsPagedCallable() + *

  • listBackupOperationsCallable() + *

+ *

ListDatabaseRoles

Lists Cloud Spanner database roles.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listDatabaseRoles(ListDatabaseRolesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listDatabaseRoles(DatabaseName parent) + *

  • listDatabaseRoles(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listDatabaseRolesPagedCallable() + *

  • listDatabaseRolesCallable() + *

+ *

AddSplitPoints

Adds split points to specified tables, indexes of a database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • addSplitPoints(AddSplitPointsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • addSplitPoints(DatabaseName database, List<SplitPoints> splitPoints) + *

  • addSplitPoints(String database, List<SplitPoints> splitPoints) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • addSplitPointsCallable() + *

+ *

CreateBackupSchedule

Creates a new backup schedule.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createBackupSchedule(CreateBackupScheduleRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createBackupSchedule(DatabaseName parent, BackupSchedule backupSchedule, String backupScheduleId) + *

  • createBackupSchedule(String parent, BackupSchedule backupSchedule, String backupScheduleId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createBackupScheduleCallable() + *

+ *

GetBackupSchedule

Gets backup schedule for the input schedule name.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getBackupSchedule(GetBackupScheduleRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getBackupSchedule(BackupScheduleName name) + *

  • getBackupSchedule(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getBackupScheduleCallable() + *

+ *

UpdateBackupSchedule

Updates a backup schedule.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateBackupSchedule(UpdateBackupScheduleRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • updateBackupSchedule(BackupSchedule backupSchedule, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateBackupScheduleCallable() + *

+ *

DeleteBackupSchedule

Deletes a backup schedule.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteBackupSchedule(DeleteBackupScheduleRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteBackupSchedule(BackupScheduleName name) + *

  • deleteBackupSchedule(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteBackupScheduleCallable() + *

+ *

ListBackupSchedules

Lists all the backup schedules for the database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listBackupSchedules(ListBackupSchedulesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listBackupSchedules(DatabaseName parent) + *

  • listBackupSchedules(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listBackupSchedulesPagedCallable() + *

  • listBackupSchedulesCallable() + *

+ *

InternalUpdateGraphOperation

This is an internal API called by Spanner Graph jobs. You should never need to call this API directly.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • internalUpdateGraphOperation(InternalUpdateGraphOperationRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • internalUpdateGraphOperation(DatabaseName database, String operationId) + *

  • internalUpdateGraphOperation(String database, String operationId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • internalUpdateGraphOperationCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of DatabaseAdminSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminSettings databaseAdminSettings =
+ *     DatabaseAdminSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create(databaseAdminSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminSettings databaseAdminSettings =
+ *     DatabaseAdminSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create(databaseAdminSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminSettings databaseAdminSettings =
+ *     DatabaseAdminSettings.newHttpJsonBuilder().build();
+ * DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create(databaseAdminSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class DatabaseAdminClient implements BackgroundResource { + private final DatabaseAdminSettings settings; + private final DatabaseAdminStub stub; + private final OperationsClient httpJsonOperationsClient; + private final com.google.longrunning.OperationsClient operationsClient; + + /** Constructs an instance of DatabaseAdminClient with default settings. */ + public static final DatabaseAdminClient create() throws IOException { + return create(DatabaseAdminSettings.newBuilder().build()); + } + + /** + * Constructs an instance of DatabaseAdminClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final DatabaseAdminClient create(DatabaseAdminSettings settings) + throws IOException { + return new DatabaseAdminClient(settings); + } + + /** + * Constructs an instance of DatabaseAdminClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(DatabaseAdminSettings). + */ + public static final DatabaseAdminClient create(DatabaseAdminStub stub) { + return new DatabaseAdminClient(stub); + } + + /** + * Constructs an instance of DatabaseAdminClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected DatabaseAdminClient(DatabaseAdminSettings settings) throws IOException { + this.settings = settings; + this.stub = ((DatabaseAdminStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + protected DatabaseAdminClient(DatabaseAdminStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + public final DatabaseAdminSettings getSettings() { + return settings; + } + + public DatabaseAdminStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + public final com.google.longrunning.OperationsClient getOperationsClient() { + return operationsClient; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi + public final OperationsClient getHttpJsonOperationsClient() { + return httpJsonOperationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner databases. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (Database element : databaseAdminClient.listDatabases(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance whose databases should be listed. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabasesPagedResponse listDatabases(InstanceName parent) { + ListDatabasesRequest request = + ListDatabasesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listDatabases(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner databases. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (Database element : databaseAdminClient.listDatabases(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance whose databases should be listed. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabasesPagedResponse listDatabases(String parent) { + ListDatabasesRequest request = ListDatabasesRequest.newBuilder().setParent(parent).build(); + return listDatabases(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner databases. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabasesRequest request =
+   *       ListDatabasesRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Database element : databaseAdminClient.listDatabases(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabasesPagedResponse listDatabases(ListDatabasesRequest request) { + return listDatabasesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner databases. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabasesRequest request =
+   *       ListDatabasesRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.listDatabasesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Database element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listDatabasesPagedCallable() { + return stub.listDatabasesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner databases. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabasesRequest request =
+   *       ListDatabasesRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListDatabasesResponse response = databaseAdminClient.listDatabasesCallable().call(request);
+   *     for (Database element : response.getDatabasesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listDatabasesCallable() { + return stub.listDatabasesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Cloud Spanner database and starts to prepare it for serving. The returned + * [long-running operation][google.longrunning.Operation] will have a name of the format + * `<database_name>/operations/<operation_id>` and can be used to track preparation of + * the database. The [metadata][google.longrunning.Operation.metadata] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   String createStatement = "createStatement744686547";
+   *   Database response = databaseAdminClient.createDatabaseAsync(parent, createStatement).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance that will serve the new database. Values are + * of the form `projects/<project>/instances/<instance>`. + * @param createStatement Required. A `CREATE DATABASE` statement, which specifies the ID of the + * new database. The database ID must conform to the regular expression + * `[a-z][a-z0-9_\\-]*[a-z0-9]` and be between 2 and 30 characters in length. If the + * database ID is a reserved word or if it contains a hyphen, the database ID must be enclosed + * in backticks (`` ` ``). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createDatabaseAsync( + InstanceName parent, String createStatement) { + CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setCreateStatement(createStatement) + .build(); + return createDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Cloud Spanner database and starts to prepare it for serving. The returned + * [long-running operation][google.longrunning.Operation] will have a name of the format + * `<database_name>/operations/<operation_id>` and can be used to track preparation of + * the database. The [metadata][google.longrunning.Operation.metadata] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   String createStatement = "createStatement744686547";
+   *   Database response = databaseAdminClient.createDatabaseAsync(parent, createStatement).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance that will serve the new database. Values are + * of the form `projects/<project>/instances/<instance>`. + * @param createStatement Required. A `CREATE DATABASE` statement, which specifies the ID of the + * new database. The database ID must conform to the regular expression + * `[a-z][a-z0-9_\\-]*[a-z0-9]` and be between 2 and 30 characters in length. If the + * database ID is a reserved word or if it contains a hyphen, the database ID must be enclosed + * in backticks (`` ` ``). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createDatabaseAsync( + String parent, String createStatement) { + CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent(parent) + .setCreateStatement(createStatement) + .build(); + return createDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Cloud Spanner database and starts to prepare it for serving. The returned + * [long-running operation][google.longrunning.Operation] will have a name of the format + * `<database_name>/operations/<operation_id>` and can be used to track preparation of + * the database. The [metadata][google.longrunning.Operation.metadata] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateDatabaseRequest request =
+   *       CreateDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setCreateStatement("createStatement744686547")
+   *           .addAllExtraStatements(new ArrayList())
+   *           .setEncryptionConfig(EncryptionConfig.newBuilder().build())
+   *           .setDatabaseDialect(DatabaseDialect.forNumber(0))
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .build();
+   *   Database response = databaseAdminClient.createDatabaseAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createDatabaseAsync( + CreateDatabaseRequest request) { + return createDatabaseOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Cloud Spanner database and starts to prepare it for serving. The returned + * [long-running operation][google.longrunning.Operation] will have a name of the format + * `<database_name>/operations/<operation_id>` and can be used to track preparation of + * the database. The [metadata][google.longrunning.Operation.metadata] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateDatabaseRequest request =
+   *       CreateDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setCreateStatement("createStatement744686547")
+   *           .addAllExtraStatements(new ArrayList())
+   *           .setEncryptionConfig(EncryptionConfig.newBuilder().build())
+   *           .setDatabaseDialect(DatabaseDialect.forNumber(0))
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.createDatabaseOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Database response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + createDatabaseOperationCallable() { + return stub.createDatabaseOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new Cloud Spanner database and starts to prepare it for serving. The returned + * [long-running operation][google.longrunning.Operation] will have a name of the format + * `<database_name>/operations/<operation_id>` and can be used to track preparation of + * the database. The [metadata][google.longrunning.Operation.metadata] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateDatabaseRequest request =
+   *       CreateDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setCreateStatement("createStatement744686547")
+   *           .addAllExtraStatements(new ArrayList())
+   *           .setEncryptionConfig(EncryptionConfig.newBuilder().build())
+   *           .setDatabaseDialect(DatabaseDialect.forNumber(0))
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.createDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createDatabaseCallable() { + return stub.createDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the state of a Cloud Spanner database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   Database response = databaseAdminClient.getDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested database. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Database getDatabase(DatabaseName name) { + GetDatabaseRequest request = + GetDatabaseRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the state of a Cloud Spanner database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   Database response = databaseAdminClient.getDatabase(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested database. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Database getDatabase(String name) { + GetDatabaseRequest request = GetDatabaseRequest.newBuilder().setName(name).build(); + return getDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the state of a Cloud Spanner database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetDatabaseRequest request =
+   *       GetDatabaseRequest.newBuilder()
+   *           .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   Database response = databaseAdminClient.getDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Database getDatabase(GetDatabaseRequest request) { + return getDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the state of a Cloud Spanner database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetDatabaseRequest request =
+   *       GetDatabaseRequest.newBuilder()
+   *           .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.getDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   Database response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getDatabaseCallable() { + return stub.getDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a Cloud Spanner database. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * database. If the named database does not exist, returns `NOT_FOUND`. + * + *

While the operation is pending: + * + *

* The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] + * field is set to true. * Cancelling the operation is best-effort. If the cancellation + * succeeds, the operation metadata's + * [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] is set, the + * updates are reverted, and the operation terminates with a `CANCELLED` status. * New + * UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation + * is done (returns successfully or with error). * Reading the database via the API continues + * to give the pre-request values. + * + *

Upon completion of the returned operation: + * + *

* The new values are in effect and readable via the API. * The database's + * [reconciling][google.spanner.admin.database.v1.Database.reconciling] field becomes false. + * + *

The returned [long-running operation][google.longrunning.Operation] will have a name of the + * format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>` + * and can be used to track the database modification. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   Database database = Database.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   Database response = databaseAdminClient.updateDatabaseAsync(database, updateMask).get();
+   * }
+   * }
+ * + * @param database Required. The database to update. The `name` field of the database is of the + * form `projects/<project>/instances/<instance>/databases/<database>`. + * @param updateMask Required. The list of fields to update. Currently, only + * `enable_drop_protection` field can be updated. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateDatabaseAsync( + Database database, FieldMask updateMask) { + UpdateDatabaseRequest request = + UpdateDatabaseRequest.newBuilder().setDatabase(database).setUpdateMask(updateMask).build(); + return updateDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a Cloud Spanner database. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * database. If the named database does not exist, returns `NOT_FOUND`. + * + *

While the operation is pending: + * + *

* The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] + * field is set to true. * Cancelling the operation is best-effort. If the cancellation + * succeeds, the operation metadata's + * [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] is set, the + * updates are reverted, and the operation terminates with a `CANCELLED` status. * New + * UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation + * is done (returns successfully or with error). * Reading the database via the API continues + * to give the pre-request values. + * + *

Upon completion of the returned operation: + * + *

* The new values are in effect and readable via the API. * The database's + * [reconciling][google.spanner.admin.database.v1.Database.reconciling] field becomes false. + * + *

The returned [long-running operation][google.longrunning.Operation] will have a name of the + * format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>` + * and can be used to track the database modification. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseRequest request =
+   *       UpdateDatabaseRequest.newBuilder()
+   *           .setDatabase(Database.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Database response = databaseAdminClient.updateDatabaseAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateDatabaseAsync( + UpdateDatabaseRequest request) { + return updateDatabaseOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a Cloud Spanner database. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * database. If the named database does not exist, returns `NOT_FOUND`. + * + *

While the operation is pending: + * + *

* The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] + * field is set to true. * Cancelling the operation is best-effort. If the cancellation + * succeeds, the operation metadata's + * [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] is set, the + * updates are reverted, and the operation terminates with a `CANCELLED` status. * New + * UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation + * is done (returns successfully or with error). * Reading the database via the API continues + * to give the pre-request values. + * + *

Upon completion of the returned operation: + * + *

* The new values are in effect and readable via the API. * The database's + * [reconciling][google.spanner.admin.database.v1.Database.reconciling] field becomes false. + * + *

The returned [long-running operation][google.longrunning.Operation] will have a name of the + * format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>` + * and can be used to track the database modification. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseRequest request =
+   *       UpdateDatabaseRequest.newBuilder()
+   *           .setDatabase(Database.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.updateDatabaseOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Database response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + updateDatabaseOperationCallable() { + return stub.updateDatabaseOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a Cloud Spanner database. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * database. If the named database does not exist, returns `NOT_FOUND`. + * + *

While the operation is pending: + * + *

* The database's [reconciling][google.spanner.admin.database.v1.Database.reconciling] + * field is set to true. * Cancelling the operation is best-effort. If the cancellation + * succeeds, the operation metadata's + * [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] is set, the + * updates are reverted, and the operation terminates with a `CANCELLED` status. * New + * UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation + * is done (returns successfully or with error). * Reading the database via the API continues + * to give the pre-request values. + * + *

Upon completion of the returned operation: + * + *

* The new values are in effect and readable via the API. * The database's + * [reconciling][google.spanner.admin.database.v1.Database.reconciling] field becomes false. + * + *

The returned [long-running operation][google.longrunning.Operation] will have a name of the + * format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>` + * and can be used to track the database modification. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Database][google.spanner.admin.database.v1.Database], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseRequest request =
+   *       UpdateDatabaseRequest.newBuilder()
+   *           .setDatabase(Database.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.updateDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateDatabaseCallable() { + return stub.updateDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, + * indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a + * name of the format `<database_name>/operations/<operation_id>` and can be used to + * track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] + * field type is + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The + * operation has no response. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   List statements = new ArrayList<>();
+   *   databaseAdminClient.updateDatabaseDdlAsync(database, statements).get();
+   * }
+   * }
+ * + * @param database Required. The database to update. + * @param statements Required. DDL statements to be applied to the database. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateDatabaseDdlAsync( + DatabaseName database, List statements) { + UpdateDatabaseDdlRequest request = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .addAllStatements(statements) + .build(); + return updateDatabaseDdlAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, + * indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a + * name of the format `<database_name>/operations/<operation_id>` and can be used to + * track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] + * field type is + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The + * operation has no response. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   List statements = new ArrayList<>();
+   *   databaseAdminClient.updateDatabaseDdlAsync(database, statements).get();
+   * }
+   * }
+ * + * @param database Required. The database to update. + * @param statements Required. DDL statements to be applied to the database. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateDatabaseDdlAsync( + String database, List statements) { + UpdateDatabaseDdlRequest request = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(database) + .addAllStatements(statements) + .build(); + return updateDatabaseDdlAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, + * indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a + * name of the format `<database_name>/operations/<operation_id>` and can be used to + * track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] + * field type is + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The + * operation has no response. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseDdlRequest request =
+   *       UpdateDatabaseDdlRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .addAllStatements(new ArrayList())
+   *           .setOperationId("operationId129704162")
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .setThroughputMode(true)
+   *           .build();
+   *   databaseAdminClient.updateDatabaseDdlAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateDatabaseDdlAsync( + UpdateDatabaseDdlRequest request) { + return updateDatabaseDdlOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, + * indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a + * name of the format `<database_name>/operations/<operation_id>` and can be used to + * track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] + * field type is + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The + * operation has no response. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseDdlRequest request =
+   *       UpdateDatabaseDdlRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .addAllStatements(new ArrayList())
+   *           .setOperationId("operationId129704162")
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .setThroughputMode(true)
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.updateDatabaseDdlOperationCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final OperationCallable + updateDatabaseDdlOperationCallable() { + return stub.updateDatabaseDdlOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, + * indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a + * name of the format `<database_name>/operations/<operation_id>` and can be used to + * track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] + * field type is + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The + * operation has no response. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateDatabaseDdlRequest request =
+   *       UpdateDatabaseDdlRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .addAllStatements(new ArrayList())
+   *           .setOperationId("operationId129704162")
+   *           .setProtoDescriptors(ByteString.EMPTY)
+   *           .setThroughputMode(true)
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.updateDatabaseDdlCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateDatabaseDdlCallable() { + return stub.updateDatabaseDdlCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be + * retained according to their `expire_time`. Note: Cloud Spanner might continue to accept + * requests for a few seconds after the database has been deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   databaseAdminClient.dropDatabase(database);
+   * }
+   * }
+ * + * @param database Required. The database to be dropped. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void dropDatabase(DatabaseName database) { + DropDatabaseRequest request = + DropDatabaseRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .build(); + dropDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be + * retained according to their `expire_time`. Note: Cloud Spanner might continue to accept + * requests for a few seconds after the database has been deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   databaseAdminClient.dropDatabase(database);
+   * }
+   * }
+ * + * @param database Required. The database to be dropped. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void dropDatabase(String database) { + DropDatabaseRequest request = DropDatabaseRequest.newBuilder().setDatabase(database).build(); + dropDatabase(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be + * retained according to their `expire_time`. Note: Cloud Spanner might continue to accept + * requests for a few seconds after the database has been deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DropDatabaseRequest request =
+   *       DropDatabaseRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   databaseAdminClient.dropDatabase(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void dropDatabase(DropDatabaseRequest request) { + dropDatabaseCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be + * retained according to their `expire_time`. Note: Cloud Spanner might continue to accept + * requests for a few seconds after the database has been deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DropDatabaseRequest request =
+   *       DropDatabaseRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.dropDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable dropDatabaseCallable() { + return stub.dropDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates, those may be queried using the + * [Operations][google.longrunning.Operations] API. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   GetDatabaseDdlResponse response = databaseAdminClient.getDatabaseDdl(database);
+   * }
+   * }
+ * + * @param database Required. The database whose schema we wish to get. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final GetDatabaseDdlResponse getDatabaseDdl(DatabaseName database) { + GetDatabaseDdlRequest request = + GetDatabaseDdlRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .build(); + return getDatabaseDdl(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates, those may be queried using the + * [Operations][google.longrunning.Operations] API. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   GetDatabaseDdlResponse response = databaseAdminClient.getDatabaseDdl(database);
+   * }
+   * }
+ * + * @param database Required. The database whose schema we wish to get. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final GetDatabaseDdlResponse getDatabaseDdl(String database) { + GetDatabaseDdlRequest request = + GetDatabaseDdlRequest.newBuilder().setDatabase(database).build(); + return getDatabaseDdl(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates, those may be queried using the + * [Operations][google.longrunning.Operations] API. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetDatabaseDdlRequest request =
+   *       GetDatabaseDdlRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   GetDatabaseDdlResponse response = databaseAdminClient.getDatabaseDdl(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final GetDatabaseDdlResponse getDatabaseDdl(GetDatabaseDdlRequest request) { + return getDatabaseDdlCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This + * method does not show pending schema updates, those may be queried using the + * [Operations][google.longrunning.Operations] API. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetDatabaseDdlRequest request =
+   *       GetDatabaseDdlRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.getDatabaseDdlCallable().futureCall(request);
+   *   // Do something.
+   *   GetDatabaseDdlResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + getDatabaseDdlCallable() { + return stub.getDatabaseDdlCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on a database or backup resource. Replaces any existing policy. + * + *

Authorization requires `spanner.databases.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = databaseAdminClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(ResourceName resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .setPolicy(policy) + .build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on a database or backup resource. Replaces any existing policy. + * + *

Authorization requires `spanner.databases.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = databaseAdminClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(String resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on a database or backup resource. Replaces any existing policy. + * + *

Authorization requires `spanner.databases.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Policy response = databaseAdminClient.setIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(SetIamPolicyRequest request) { + return setIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on a database or backup resource. Replaces any existing policy. + * + *

Authorization requires `spanner.databases.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.setIamPolicy` permission on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.setIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable setIamPolicyCallable() { + return stub.setIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for a database or backup resource. Returns an empty policy if a + * database or backup exists but does not have a policy set. + * + *

Authorization requires `spanner.databases.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Policy response = databaseAdminClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(ResourceName resource) { + GetIamPolicyRequest request = + GetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for a database or backup resource. Returns an empty policy if a + * database or backup exists but does not have a policy set. + * + *

Authorization requires `spanner.databases.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Policy response = databaseAdminClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(String resource) { + GetIamPolicyRequest request = GetIamPolicyRequest.newBuilder().setResource(resource).build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for a database or backup resource. Returns an empty policy if a + * database or backup exists but does not have a policy set. + * + *

Authorization requires `spanner.databases.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   Policy response = databaseAdminClient.getIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(GetIamPolicyRequest request) { + return getIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for a database or backup resource. Returns an empty policy if a + * database or backup exists but does not have a policy set. + * + *

Authorization requires `spanner.databases.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. For backups, authorization requires + * `spanner.backups.getIamPolicy` permission on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.getIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getIamPolicyCallable() { + return stub.getIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified database or backup resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND + * error if the user has `spanner.databases.list` permission on the containing Cloud Spanner + * instance. Otherwise returns an empty set of permissions. Calling this method on a backup that + * does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` + * permission on the containing instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       databaseAdminClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + ResourceName resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified database or backup resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND + * error if the user has `spanner.databases.list` permission on the containing Cloud Spanner + * instance. Otherwise returns an empty set of permissions. Calling this method on a backup that + * does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` + * permission on the containing instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       databaseAdminClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + String resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified database or backup resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND + * error if the user has `spanner.databases.list` permission on the containing Cloud Spanner + * instance. Otherwise returns an empty set of permissions. Calling this method on a backup that + * does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` + * permission on the containing instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   TestIamPermissionsResponse response = databaseAdminClient.testIamPermissions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions(TestIamPermissionsRequest request) { + return testIamPermissionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified database or backup resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND + * error if the user has `spanner.databases.list` permission on the containing Cloud Spanner + * instance. Otherwise returns an empty set of permissions. Calling this method on a backup that + * does not exist will result in a NOT_FOUND error if the user has `spanner.backups.list` + * permission on the containing instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.testIamPermissionsCallable().futureCall(request);
+   *   // Do something.
+   *   TestIamPermissionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + testIamPermissionsCallable() { + return stub.testIamPermissionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts creating a new Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track creation of the backup. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the creation and delete the backup. There can be only one pending backup + * creation per database. Backup creation of different databases can run concurrently. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   Backup backup = Backup.newBuilder().build();
+   *   String backupId = "backupId2121930365";
+   *   Backup response = databaseAdminClient.createBackupAsync(parent, backup, backupId).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which the backup will be created. This must + * be the same instance that contains the database the backup will be created from. The backup + * will be stored in the location(s) specified in the instance configuration of this instance. + * Values are of the form `projects/<project>/instances/<instance>`. + * @param backup Required. The backup to create. + * @param backupId Required. The id of the backup to be created. The `backup_id` appended to + * `parent` forms the full backup name of the form + * `projects/<project>/instances/<instance>/backups/<backup_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBackupAsync( + InstanceName parent, Backup backup, String backupId) { + CreateBackupRequest request = + CreateBackupRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBackup(backup) + .setBackupId(backupId) + .build(); + return createBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts creating a new Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track creation of the backup. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the creation and delete the backup. There can be only one pending backup + * creation per database. Backup creation of different databases can run concurrently. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   Backup backup = Backup.newBuilder().build();
+   *   String backupId = "backupId2121930365";
+   *   Backup response = databaseAdminClient.createBackupAsync(parent, backup, backupId).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which the backup will be created. This must + * be the same instance that contains the database the backup will be created from. The backup + * will be stored in the location(s) specified in the instance configuration of this instance. + * Values are of the form `projects/<project>/instances/<instance>`. + * @param backup Required. The backup to create. + * @param backupId Required. The id of the backup to be created. The `backup_id` appended to + * `parent` forms the full backup name of the form + * `projects/<project>/instances/<instance>/backups/<backup_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBackupAsync( + String parent, Backup backup, String backupId) { + CreateBackupRequest request = + CreateBackupRequest.newBuilder() + .setParent(parent) + .setBackup(backup) + .setBackupId(backupId) + .build(); + return createBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts creating a new Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track creation of the backup. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the creation and delete the backup. There can be only one pending backup + * creation per database. Backup creation of different databases can run concurrently. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateBackupRequest request =
+   *       CreateBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setBackup(Backup.newBuilder().build())
+   *           .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   Backup response = databaseAdminClient.createBackupAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createBackupAsync( + CreateBackupRequest request) { + return createBackupOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts creating a new Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track creation of the backup. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the creation and delete the backup. There can be only one pending backup + * creation per database. Backup creation of different databases can run concurrently. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateBackupRequest request =
+   *       CreateBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setBackup(Backup.newBuilder().build())
+   *           .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.createBackupOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Backup response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + createBackupOperationCallable() { + return stub.createBackupOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts creating a new Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track creation of the backup. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the creation and delete the backup. There can be only one pending backup + * creation per database. Backup creation of different databases can run concurrently. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateBackupRequest request =
+   *       CreateBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setBackup(Backup.newBuilder().build())
+   *           .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.createBackupCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createBackupCallable() { + return stub.createBackupCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   String backupId = "backupId2121930365";
+   *   BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Timestamp expireTime = Timestamp.newBuilder().build();
+   *   Backup response =
+   *       databaseAdminClient.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the destination instance that will contain the backup copy. + * Values are of the form: `projects/<project>/instances/<instance>`. + * @param backupId Required. The id of the backup copy. The `backup_id` appended to `parent` forms + * the full backup_uri of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @param sourceBackup Required. The source backup to be copied. The source backup needs to be in + * READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot + * be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the + * form: `projects/<project>/instances/<instance>/backups/<backup>`. + * @param expireTime Required. The expiration time of the backup in microsecond granularity. The + * expiration time must be at least 6 hours and at most 366 days from the `create_time` of the + * source backup. Once the `expire_time` has passed, the backup is eligible to be + * automatically deleted by Cloud Spanner to free the resources used by the backup. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture copyBackupAsync( + InstanceName parent, String backupId, BackupName sourceBackup, Timestamp expireTime) { + CopyBackupRequest request = + CopyBackupRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBackupId(backupId) + .setSourceBackup(sourceBackup == null ? null : sourceBackup.toString()) + .setExpireTime(expireTime) + .build(); + return copyBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   String backupId = "backupId2121930365";
+   *   String sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Timestamp expireTime = Timestamp.newBuilder().build();
+   *   Backup response =
+   *       databaseAdminClient.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the destination instance that will contain the backup copy. + * Values are of the form: `projects/<project>/instances/<instance>`. + * @param backupId Required. The id of the backup copy. The `backup_id` appended to `parent` forms + * the full backup_uri of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @param sourceBackup Required. The source backup to be copied. The source backup needs to be in + * READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot + * be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the + * form: `projects/<project>/instances/<instance>/backups/<backup>`. + * @param expireTime Required. The expiration time of the backup in microsecond granularity. The + * expiration time must be at least 6 hours and at most 366 days from the `create_time` of the + * source backup. Once the `expire_time` has passed, the backup is eligible to be + * automatically deleted by Cloud Spanner to free the resources used by the backup. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture copyBackupAsync( + InstanceName parent, String backupId, String sourceBackup, Timestamp expireTime) { + CopyBackupRequest request = + CopyBackupRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBackupId(backupId) + .setSourceBackup(sourceBackup) + .setExpireTime(expireTime) + .build(); + return copyBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   String backupId = "backupId2121930365";
+   *   BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Timestamp expireTime = Timestamp.newBuilder().build();
+   *   Backup response =
+   *       databaseAdminClient.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the destination instance that will contain the backup copy. + * Values are of the form: `projects/<project>/instances/<instance>`. + * @param backupId Required. The id of the backup copy. The `backup_id` appended to `parent` forms + * the full backup_uri of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @param sourceBackup Required. The source backup to be copied. The source backup needs to be in + * READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot + * be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the + * form: `projects/<project>/instances/<instance>/backups/<backup>`. + * @param expireTime Required. The expiration time of the backup in microsecond granularity. The + * expiration time must be at least 6 hours and at most 366 days from the `create_time` of the + * source backup. Once the `expire_time` has passed, the backup is eligible to be + * automatically deleted by Cloud Spanner to free the resources used by the backup. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture copyBackupAsync( + String parent, String backupId, BackupName sourceBackup, Timestamp expireTime) { + CopyBackupRequest request = + CopyBackupRequest.newBuilder() + .setParent(parent) + .setBackupId(backupId) + .setSourceBackup(sourceBackup == null ? null : sourceBackup.toString()) + .setExpireTime(expireTime) + .build(); + return copyBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   String backupId = "backupId2121930365";
+   *   String sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Timestamp expireTime = Timestamp.newBuilder().build();
+   *   Backup response =
+   *       databaseAdminClient.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the destination instance that will contain the backup copy. + * Values are of the form: `projects/<project>/instances/<instance>`. + * @param backupId Required. The id of the backup copy. The `backup_id` appended to `parent` forms + * the full backup_uri of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @param sourceBackup Required. The source backup to be copied. The source backup needs to be in + * READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot + * be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the + * form: `projects/<project>/instances/<instance>/backups/<backup>`. + * @param expireTime Required. The expiration time of the backup in microsecond granularity. The + * expiration time must be at least 6 hours and at most 366 days from the `create_time` of the + * source backup. Once the `expire_time` has passed, the backup is eligible to be + * automatically deleted by Cloud Spanner to free the resources used by the backup. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture copyBackupAsync( + String parent, String backupId, String sourceBackup, Timestamp expireTime) { + CopyBackupRequest request = + CopyBackupRequest.newBuilder() + .setParent(parent) + .setBackupId(backupId) + .setSourceBackup(sourceBackup) + .setExpireTime(expireTime) + .build(); + return copyBackupAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CopyBackupRequest request =
+   *       CopyBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setSourceBackup(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setExpireTime(Timestamp.newBuilder().build())
+   *           .setEncryptionConfig(CopyBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   Backup response = databaseAdminClient.copyBackupAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture copyBackupAsync( + CopyBackupRequest request) { + return copyBackupOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CopyBackupRequest request =
+   *       CopyBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setSourceBackup(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setExpireTime(Timestamp.newBuilder().build())
+   *           .setEncryptionConfig(CopyBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.copyBackupOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Backup response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + copyBackupOperationCallable() { + return stub.copyBackupOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Starts copying a Cloud Spanner Backup. The returned backup [long-running + * operation][google.longrunning.Operation] will have a name of the format + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` + * and can be used to track copying of the backup. The operation is associated with the + * destination backup. The [metadata][google.longrunning.Operation.metadata] field type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CopyBackupRequest request =
+   *       CopyBackupRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setBackupId("backupId2121930365")
+   *           .setSourceBackup(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .setExpireTime(Timestamp.newBuilder().build())
+   *           .setEncryptionConfig(CopyBackupEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.copyBackupCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable copyBackupCallable() { + return stub.copyBackupCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Backup response = databaseAdminClient.getBackup(name);
+   * }
+   * }
+ * + * @param name Required. Name of the backup. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Backup getBackup(BackupName name) { + GetBackupRequest request = + GetBackupRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getBackup(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Backup response = databaseAdminClient.getBackup(name);
+   * }
+   * }
+ * + * @param name Required. Name of the backup. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Backup getBackup(String name) { + GetBackupRequest request = GetBackupRequest.newBuilder().setName(name).build(); + return getBackup(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetBackupRequest request =
+   *       GetBackupRequest.newBuilder()
+   *           .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .build();
+   *   Backup response = databaseAdminClient.getBackup(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Backup getBackup(GetBackupRequest request) { + return getBackupCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetBackupRequest request =
+   *       GetBackupRequest.newBuilder()
+   *           .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.getBackupCallable().futureCall(request);
+   *   // Do something.
+   *   Backup response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getBackupCallable() { + return stub.getBackupCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   Backup backup = Backup.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   Backup response = databaseAdminClient.updateBackup(backup, updateMask);
+   * }
+   * }
+ * + * @param backup Required. The backup to update. `backup.name`, and the fields to be updated as + * specified by `update_mask` are required. Other fields are ignored. Update is only supported + * for the following fields: * `backup.expire_time`. + * @param updateMask Required. A mask specifying which fields (e.g. `expire_time`) in the Backup + * resource should be updated. This mask is relative to the Backup resource, not to the + * request message. The field mask must always be specified; this prevents any future fields + * from being erased accidentally by clients that do not know about them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Backup updateBackup(Backup backup, FieldMask updateMask) { + UpdateBackupRequest request = + UpdateBackupRequest.newBuilder().setBackup(backup).setUpdateMask(updateMask).build(); + return updateBackup(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateBackupRequest request =
+   *       UpdateBackupRequest.newBuilder()
+   *           .setBackup(Backup.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Backup response = databaseAdminClient.updateBackup(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Backup updateBackup(UpdateBackupRequest request) { + return updateBackupCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateBackupRequest request =
+   *       UpdateBackupRequest.newBuilder()
+   *           .setBackup(Backup.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.updateBackupCallable().futureCall(request);
+   *   // Do something.
+   *   Backup response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateBackupCallable() { + return stub.updateBackupCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   databaseAdminClient.deleteBackup(name);
+   * }
+   * }
+ * + * @param name Required. Name of the backup to delete. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackup(BackupName name) { + DeleteBackupRequest request = + DeleteBackupRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteBackup(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   databaseAdminClient.deleteBackup(name);
+   * }
+   * }
+ * + * @param name Required. Name of the backup to delete. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackup(String name) { + DeleteBackupRequest request = DeleteBackupRequest.newBuilder().setName(name).build(); + deleteBackup(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DeleteBackupRequest request =
+   *       DeleteBackupRequest.newBuilder()
+   *           .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .build();
+   *   databaseAdminClient.deleteBackup(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackup(DeleteBackupRequest request) { + deleteBackupCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DeleteBackupRequest request =
+   *       DeleteBackupRequest.newBuilder()
+   *           .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString())
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.deleteBackupCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteBackupCallable() { + return stub.deleteBackupCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists completed and pending backups. Backups returned are ordered by `create_time` in + * descending order, starting from the most recent `create_time`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (Backup element : databaseAdminClient.listBackups(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance to list backups from. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupsPagedResponse listBackups(InstanceName parent) { + ListBackupsRequest request = + ListBackupsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBackups(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists completed and pending backups. Backups returned are ordered by `create_time` in + * descending order, starting from the most recent `create_time`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (Backup element : databaseAdminClient.listBackups(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance to list backups from. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupsPagedResponse listBackups(String parent) { + ListBackupsRequest request = ListBackupsRequest.newBuilder().setParent(parent).build(); + return listBackups(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists completed and pending backups. Backups returned are ordered by `create_time` in + * descending order, starting from the most recent `create_time`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupsRequest request =
+   *       ListBackupsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Backup element : databaseAdminClient.listBackups(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupsPagedResponse listBackups(ListBackupsRequest request) { + return listBackupsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists completed and pending backups. Backups returned are ordered by `create_time` in + * descending order, starting from the most recent `create_time`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupsRequest request =
+   *       ListBackupsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future = databaseAdminClient.listBackupsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Backup element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBackupsPagedCallable() { + return stub.listBackupsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists completed and pending backups. Backups returned are ordered by `create_time` in + * descending order, starting from the most recent `create_time`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupsRequest request =
+   *       ListBackupsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListBackupsResponse response = databaseAdminClient.listBackupsCallable().call(request);
+   *     for (Backup element : response.getBackupsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listBackupsCallable() { + return stub.listBackupsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   String databaseId = "databaseId1688905718";
+   *   BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Database response =
+   *       databaseAdminClient.restoreDatabaseAsync(parent, databaseId, backup).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the restored database. This + * instance must be in the same project and have the same instance configuration as the + * instance containing the source backup. Values are of the form + * `projects/<project>/instances/<instance>`. + * @param databaseId Required. The id of the database to create and restore to. This database must + * not already exist. The `database_id` appended to `parent` forms the full database name of + * the form + * `projects/<project>/instances/<instance>/databases/<database_id>`. + * @param backup Name of the backup from which to restore. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture restoreDatabaseAsync( + InstanceName parent, String databaseId, BackupName backup) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setDatabaseId(databaseId) + .setBackup(backup == null ? null : backup.toString()) + .build(); + return restoreDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   String databaseId = "databaseId1688905718";
+   *   String backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Database response =
+   *       databaseAdminClient.restoreDatabaseAsync(parent, databaseId, backup).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the restored database. This + * instance must be in the same project and have the same instance configuration as the + * instance containing the source backup. Values are of the form + * `projects/<project>/instances/<instance>`. + * @param databaseId Required. The id of the database to create and restore to. This database must + * not already exist. The `database_id` appended to `parent` forms the full database name of + * the form + * `projects/<project>/instances/<instance>/databases/<database_id>`. + * @param backup Name of the backup from which to restore. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture restoreDatabaseAsync( + InstanceName parent, String databaseId, String backup) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setDatabaseId(databaseId) + .setBackup(backup) + .build(); + return restoreDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   String databaseId = "databaseId1688905718";
+   *   BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]");
+   *   Database response =
+   *       databaseAdminClient.restoreDatabaseAsync(parent, databaseId, backup).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the restored database. This + * instance must be in the same project and have the same instance configuration as the + * instance containing the source backup. Values are of the form + * `projects/<project>/instances/<instance>`. + * @param databaseId Required. The id of the database to create and restore to. This database must + * not already exist. The `database_id` appended to `parent` forms the full database name of + * the form + * `projects/<project>/instances/<instance>/databases/<database_id>`. + * @param backup Name of the backup from which to restore. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture restoreDatabaseAsync( + String parent, String databaseId, BackupName backup) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(parent) + .setDatabaseId(databaseId) + .setBackup(backup == null ? null : backup.toString()) + .build(); + return restoreDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   String databaseId = "databaseId1688905718";
+   *   String backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString();
+   *   Database response =
+   *       databaseAdminClient.restoreDatabaseAsync(parent, databaseId, backup).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the restored database. This + * instance must be in the same project and have the same instance configuration as the + * instance containing the source backup. Values are of the form + * `projects/<project>/instances/<instance>`. + * @param databaseId Required. The id of the database to create and restore to. This database must + * not already exist. The `database_id` appended to `parent` forms the full database name of + * the form + * `projects/<project>/instances/<instance>/databases/<database_id>`. + * @param backup Name of the backup from which to restore. Values are of the form + * `projects/<project>/instances/<instance>/backups/<backup>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture restoreDatabaseAsync( + String parent, String databaseId, String backup) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(parent) + .setDatabaseId(databaseId) + .setBackup(backup) + .build(); + return restoreDatabaseAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   RestoreDatabaseRequest request =
+   *       RestoreDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setDatabaseId("databaseId1688905718")
+   *           .setEncryptionConfig(RestoreDatabaseEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   Database response = databaseAdminClient.restoreDatabaseAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture restoreDatabaseAsync( + RestoreDatabaseRequest request) { + return restoreDatabaseOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   RestoreDatabaseRequest request =
+   *       RestoreDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setDatabaseId("databaseId1688905718")
+   *           .setEncryptionConfig(RestoreDatabaseEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       databaseAdminClient.restoreDatabaseOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Database response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + restoreDatabaseOperationCallable() { + return stub.restoreDatabaseOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Create a new database by restoring from a completed backup. The new database must be in the + * same project and in an instance with the same instance configuration as the instance containing + * the backup. The returned database [long-running operation][google.longrunning.Operation] has a + * name of the format + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`, + * and can be used to track the progress of the operation, and to cancel it. The + * [metadata][google.longrunning.Operation.metadata] field type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. The + * [response][google.longrunning.Operation.response] type is + * [Database][google.spanner.admin.database.v1.Database], if successful. Cancelling the returned + * operation will stop the restore and delete the database. There can be only one database being + * restored into an instance at a time. Once the restore operation completes, a new restore + * operation can be initiated, without waiting for the optimize operation associated with the + * first restore to complete. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   RestoreDatabaseRequest request =
+   *       RestoreDatabaseRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setDatabaseId("databaseId1688905718")
+   *           .setEncryptionConfig(RestoreDatabaseEncryptionConfig.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.restoreDatabaseCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable restoreDatabaseCallable() { + return stub.restoreDatabaseCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has + * a name of the form + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (Operation element : databaseAdminClient.listDatabaseOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance of the database operations. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseOperationsPagedResponse listDatabaseOperations(InstanceName parent) { + ListDatabaseOperationsRequest request = + ListDatabaseOperationsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listDatabaseOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has + * a name of the form + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (Operation element : databaseAdminClient.listDatabaseOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance of the database operations. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseOperationsPagedResponse listDatabaseOperations(String parent) { + ListDatabaseOperationsRequest request = + ListDatabaseOperationsRequest.newBuilder().setParent(parent).build(); + return listDatabaseOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has + * a name of the form + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseOperationsRequest request =
+   *       ListDatabaseOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Operation element : databaseAdminClient.listDatabaseOperations(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseOperationsPagedResponse listDatabaseOperations( + ListDatabaseOperationsRequest request) { + return listDatabaseOperationsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has + * a name of the form + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseOperationsRequest request =
+   *       ListDatabaseOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.listDatabaseOperationsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Operation element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listDatabaseOperationsPagedCallable() { + return stub.listDatabaseOperationsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists database [longrunning-operations][google.longrunning.Operation]. A database operation has + * a name of the form + * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseOperationsRequest request =
+   *       ListDatabaseOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListDatabaseOperationsResponse response =
+   *         databaseAdminClient.listDatabaseOperationsCallable().call(request);
+   *     for (Operation element : response.getOperationsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listDatabaseOperationsCallable() { + return stub.listDatabaseOperationsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. + * A backup operation has a name of the form + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. Operations + * returned are ordered by `operation.metadata.value.progress.start_time` in descending order + * starting from the most recently started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (Operation element : databaseAdminClient.listBackupOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance of the backup operations. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupOperationsPagedResponse listBackupOperations(InstanceName parent) { + ListBackupOperationsRequest request = + ListBackupOperationsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBackupOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. + * A backup operation has a name of the form + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. Operations + * returned are ordered by `operation.metadata.value.progress.start_time` in descending order + * starting from the most recently started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (Operation element : databaseAdminClient.listBackupOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance of the backup operations. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupOperationsPagedResponse listBackupOperations(String parent) { + ListBackupOperationsRequest request = + ListBackupOperationsRequest.newBuilder().setParent(parent).build(); + return listBackupOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. + * A backup operation has a name of the form + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. Operations + * returned are ordered by `operation.metadata.value.progress.start_time` in descending order + * starting from the most recently started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupOperationsRequest request =
+   *       ListBackupOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Operation element : databaseAdminClient.listBackupOperations(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupOperationsPagedResponse listBackupOperations( + ListBackupOperationsRequest request) { + return listBackupOperationsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. + * A backup operation has a name of the form + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. Operations + * returned are ordered by `operation.metadata.value.progress.start_time` in descending order + * starting from the most recently started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupOperationsRequest request =
+   *       ListBackupOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.listBackupOperationsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Operation element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBackupOperationsPagedCallable() { + return stub.listBackupOperationsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. + * A backup operation has a name of the form + * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`. + * The long-running operation [metadata][google.longrunning.Operation.metadata] field type + * `metadata.type_url` describes the type of the metadata. Operations returned include those that + * have completed/failed/canceled within the last 7 days, and pending operations. Operations + * returned are ordered by `operation.metadata.value.progress.start_time` in descending order + * starting from the most recently started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupOperationsRequest request =
+   *       ListBackupOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListBackupOperationsResponse response =
+   *         databaseAdminClient.listBackupOperationsCallable().call(request);
+   *     for (Operation element : response.getOperationsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBackupOperationsCallable() { + return stub.listBackupOperationsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner database roles. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   for (DatabaseRole element : databaseAdminClient.listDatabaseRoles(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The database whose roles should be listed. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseRolesPagedResponse listDatabaseRoles(DatabaseName parent) { + ListDatabaseRolesRequest request = + ListDatabaseRolesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listDatabaseRoles(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner database roles. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   for (DatabaseRole element : databaseAdminClient.listDatabaseRoles(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The database whose roles should be listed. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseRolesPagedResponse listDatabaseRoles(String parent) { + ListDatabaseRolesRequest request = + ListDatabaseRolesRequest.newBuilder().setParent(parent).build(); + return listDatabaseRoles(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner database roles. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseRolesRequest request =
+   *       ListDatabaseRolesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (DatabaseRole element : databaseAdminClient.listDatabaseRoles(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListDatabaseRolesPagedResponse listDatabaseRoles(ListDatabaseRolesRequest request) { + return listDatabaseRolesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner database roles. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseRolesRequest request =
+   *       ListDatabaseRolesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.listDatabaseRolesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (DatabaseRole element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listDatabaseRolesPagedCallable() { + return stub.listDatabaseRolesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists Cloud Spanner database roles. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListDatabaseRolesRequest request =
+   *       ListDatabaseRolesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListDatabaseRolesResponse response =
+   *         databaseAdminClient.listDatabaseRolesCallable().call(request);
+   *     for (DatabaseRole element : response.getDatabaseRolesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listDatabaseRolesCallable() { + return stub.listDatabaseRolesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds split points to specified tables, indexes of a database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   List splitPoints = new ArrayList<>();
+   *   AddSplitPointsResponse response = databaseAdminClient.addSplitPoints(database, splitPoints);
+   * }
+   * }
+ * + * @param database Required. The database on whose tables/indexes split points are to be added. + * Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @param splitPoints Required. The split points to add. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AddSplitPointsResponse addSplitPoints( + DatabaseName database, List splitPoints) { + AddSplitPointsRequest request = + AddSplitPointsRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .addAllSplitPoints(splitPoints) + .build(); + return addSplitPoints(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds split points to specified tables, indexes of a database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   List splitPoints = new ArrayList<>();
+   *   AddSplitPointsResponse response = databaseAdminClient.addSplitPoints(database, splitPoints);
+   * }
+   * }
+ * + * @param database Required. The database on whose tables/indexes split points are to be added. + * Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>`. + * @param splitPoints Required. The split points to add. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AddSplitPointsResponse addSplitPoints( + String database, List splitPoints) { + AddSplitPointsRequest request = + AddSplitPointsRequest.newBuilder() + .setDatabase(database) + .addAllSplitPoints(splitPoints) + .build(); + return addSplitPoints(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds split points to specified tables, indexes of a database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   AddSplitPointsRequest request =
+   *       AddSplitPointsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .addAllSplitPoints(new ArrayList())
+   *           .setInitiator("initiator-248987089")
+   *           .build();
+   *   AddSplitPointsResponse response = databaseAdminClient.addSplitPoints(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final AddSplitPointsResponse addSplitPoints(AddSplitPointsRequest request) { + return addSplitPointsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Adds split points to specified tables, indexes of a database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   AddSplitPointsRequest request =
+   *       AddSplitPointsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .addAllSplitPoints(new ArrayList())
+   *           .setInitiator("initiator-248987089")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.addSplitPointsCallable().futureCall(request);
+   *   // Do something.
+   *   AddSplitPointsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + addSplitPointsCallable() { + return stub.addSplitPointsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
+   *   String backupScheduleId = "backupScheduleId1704974708";
+   *   BackupSchedule response =
+   *       databaseAdminClient.createBackupSchedule(parent, backupSchedule, backupScheduleId);
+   * }
+   * }
+ * + * @param parent Required. The name of the database that this backup schedule applies to. + * @param backupSchedule Required. The backup schedule to create. + * @param backupScheduleId Required. The Id to use for the backup schedule. The + * `backup_schedule_id` appended to `parent` forms the full backup schedule name of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule( + DatabaseName parent, BackupSchedule backupSchedule, String backupScheduleId) { + CreateBackupScheduleRequest request = + CreateBackupScheduleRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBackupSchedule(backupSchedule) + .setBackupScheduleId(backupScheduleId) + .build(); + return createBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
+   *   String backupScheduleId = "backupScheduleId1704974708";
+   *   BackupSchedule response =
+   *       databaseAdminClient.createBackupSchedule(parent, backupSchedule, backupScheduleId);
+   * }
+   * }
+ * + * @param parent Required. The name of the database that this backup schedule applies to. + * @param backupSchedule Required. The backup schedule to create. + * @param backupScheduleId Required. The Id to use for the backup schedule. The + * `backup_schedule_id` appended to `parent` forms the full backup schedule name of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule( + String parent, BackupSchedule backupSchedule, String backupScheduleId) { + CreateBackupScheduleRequest request = + CreateBackupScheduleRequest.newBuilder() + .setParent(parent) + .setBackupSchedule(backupSchedule) + .setBackupScheduleId(backupScheduleId) + .build(); + return createBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateBackupScheduleRequest request =
+   *       CreateBackupScheduleRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setBackupScheduleId("backupScheduleId1704974708")
+   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
+   *           .build();
+   *   BackupSchedule response = databaseAdminClient.createBackupSchedule(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule(CreateBackupScheduleRequest request) { + return createBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   CreateBackupScheduleRequest request =
+   *       CreateBackupScheduleRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setBackupScheduleId("backupScheduleId1704974708")
+   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.createBackupScheduleCallable().futureCall(request);
+   *   // Do something.
+   *   BackupSchedule response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createBackupScheduleCallable() { + return stub.createBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   BackupScheduleName name =
+   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]");
+   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(name);
+   * }
+   * }
+ * + * @param name Required. The name of the schedule to retrieve. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(BackupScheduleName name) { + GetBackupScheduleRequest request = + GetBackupScheduleRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String name =
+   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]").toString();
+   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(name);
+   * }
+   * }
+ * + * @param name Required. The name of the schedule to retrieve. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(String name) { + GetBackupScheduleRequest request = GetBackupScheduleRequest.newBuilder().setName(name).build(); + return getBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetBackupScheduleRequest request =
+   *       GetBackupScheduleRequest.newBuilder()
+   *           .setName(
+   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
+   *                   .toString())
+   *           .build();
+   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(GetBackupScheduleRequest request) { + return getBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   GetBackupScheduleRequest request =
+   *       GetBackupScheduleRequest.newBuilder()
+   *           .setName(
+   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.getBackupScheduleCallable().futureCall(request);
+   *   // Do something.
+   *   BackupSchedule response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getBackupScheduleCallable() { + return stub.getBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   BackupSchedule response =
+   *       databaseAdminClient.updateBackupSchedule(backupSchedule, updateMask);
+   * }
+   * }
+ * + * @param backupSchedule Required. The backup schedule to update. `backup_schedule.name`, and the + * fields to be updated as specified by `update_mask` are required. Other fields are ignored. + * @param updateMask Required. A mask specifying which fields in the BackupSchedule resource + * should be updated. This mask is relative to the BackupSchedule resource, not to the request + * message. The field mask must always be specified; this prevents any future fields from + * being erased accidentally. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule updateBackupSchedule( + BackupSchedule backupSchedule, FieldMask updateMask) { + UpdateBackupScheduleRequest request = + UpdateBackupScheduleRequest.newBuilder() + .setBackupSchedule(backupSchedule) + .setUpdateMask(updateMask) + .build(); + return updateBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateBackupScheduleRequest request =
+   *       UpdateBackupScheduleRequest.newBuilder()
+   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   BackupSchedule response = databaseAdminClient.updateBackupSchedule(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule updateBackupSchedule(UpdateBackupScheduleRequest request) { + return updateBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   UpdateBackupScheduleRequest request =
+   *       UpdateBackupScheduleRequest.newBuilder()
+   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.updateBackupScheduleCallable().futureCall(request);
+   *   // Do something.
+   *   BackupSchedule response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateBackupScheduleCallable() { + return stub.updateBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   BackupScheduleName name =
+   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]");
+   *   databaseAdminClient.deleteBackupSchedule(name);
+   * }
+   * }
+ * + * @param name Required. The name of the schedule to delete. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(BackupScheduleName name) { + DeleteBackupScheduleRequest request = + DeleteBackupScheduleRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String name =
+   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]").toString();
+   *   databaseAdminClient.deleteBackupSchedule(name);
+   * }
+   * }
+ * + * @param name Required. The name of the schedule to delete. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(String name) { + DeleteBackupScheduleRequest request = + DeleteBackupScheduleRequest.newBuilder().setName(name).build(); + deleteBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DeleteBackupScheduleRequest request =
+   *       DeleteBackupScheduleRequest.newBuilder()
+   *           .setName(
+   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
+   *                   .toString())
+   *           .build();
+   *   databaseAdminClient.deleteBackupSchedule(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(DeleteBackupScheduleRequest request) { + deleteBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DeleteBackupScheduleRequest request =
+   *       DeleteBackupScheduleRequest.newBuilder()
+   *           .setName(
+   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.deleteBackupScheduleCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteBackupScheduleCallable() { + return stub.deleteBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Database is the parent resource whose backup schedules should be + * listed. Values are of the form + * projects/<project>/instances/<instance>/databases/<database> + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules(DatabaseName parent) { + ListBackupSchedulesRequest request = + ListBackupSchedulesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBackupSchedules(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. Database is the parent resource whose backup schedules should be + * listed. Values are of the form + * projects/<project>/instances/<instance>/databases/<database> + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules(String parent) { + ListBackupSchedulesRequest request = + ListBackupSchedulesRequest.newBuilder().setParent(parent).build(); + return listBackupSchedules(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupSchedulesRequest request =
+   *       ListBackupSchedulesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules( + ListBackupSchedulesRequest request) { + return listBackupSchedulesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupSchedulesRequest request =
+   *       ListBackupSchedulesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.listBackupSchedulesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (BackupSchedule element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBackupSchedulesPagedCallable() { + return stub.listBackupSchedulesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   ListBackupSchedulesRequest request =
+   *       ListBackupSchedulesRequest.newBuilder()
+   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListBackupSchedulesResponse response =
+   *         databaseAdminClient.listBackupSchedulesCallable().call(request);
+   *     for (BackupSchedule element : response.getBackupSchedulesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listBackupSchedulesCallable() { + return stub.listBackupSchedulesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   String operationId = "operationId129704162";
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(database, operationId);
+   * }
+   * }
+ * + * @param database Internal field, do not use directly. + * @param operationId Internal field, do not use directly. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + DatabaseName database, String operationId) { + InternalUpdateGraphOperationRequest request = + InternalUpdateGraphOperationRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .setOperationId(operationId) + .build(); + return internalUpdateGraphOperation(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   String operationId = "operationId129704162";
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(database, operationId);
+   * }
+   * }
+ * + * @param database Internal field, do not use directly. + * @param operationId Internal field, do not use directly. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + String database, String operationId) { + InternalUpdateGraphOperationRequest request = + InternalUpdateGraphOperationRequest.newBuilder() + .setDatabase(database) + .setOperationId(operationId) + .build(); + return internalUpdateGraphOperation(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InternalUpdateGraphOperationRequest request =
+   *       InternalUpdateGraphOperationRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setOperationId("operationId129704162")
+   *           .setVmIdentityToken("vmIdentityToken-417652124")
+   *           .setProgress(-1001078227)
+   *           .setStatus(Status.newBuilder().build())
+   *           .build();
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + InternalUpdateGraphOperationRequest request) { + return internalUpdateGraphOperationCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InternalUpdateGraphOperationRequest request =
+   *       InternalUpdateGraphOperationRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setOperationId("operationId129704162")
+   *           .setVmIdentityToken("vmIdentityToken-417652124")
+   *           .setProgress(-1001078227)
+   *           .setStatus(Status.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.internalUpdateGraphOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InternalUpdateGraphOperationResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationCallable() { + return stub.internalUpdateGraphOperationCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListDatabasesPagedResponse + extends AbstractPagedListResponse< + ListDatabasesRequest, + ListDatabasesResponse, + Database, + ListDatabasesPage, + ListDatabasesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListDatabasesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListDatabasesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListDatabasesPagedResponse(ListDatabasesPage page) { + super(page, ListDatabasesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListDatabasesPage + extends AbstractPage< + ListDatabasesRequest, ListDatabasesResponse, Database, ListDatabasesPage> { + + private ListDatabasesPage( + PageContext context, + ListDatabasesResponse response) { + super(context, response); + } + + private static ListDatabasesPage createEmptyPage() { + return new ListDatabasesPage(null, null); + } + + @Override + protected ListDatabasesPage createPage( + PageContext context, + ListDatabasesResponse response) { + return new ListDatabasesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListDatabasesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListDatabasesRequest, + ListDatabasesResponse, + Database, + ListDatabasesPage, + ListDatabasesFixedSizeCollection> { + + private ListDatabasesFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListDatabasesFixedSizeCollection createEmptyCollection() { + return new ListDatabasesFixedSizeCollection(null, 0); + } + + @Override + protected ListDatabasesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListDatabasesFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListBackupsPagedResponse + extends AbstractPagedListResponse< + ListBackupsRequest, + ListBackupsResponse, + Backup, + ListBackupsPage, + ListBackupsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListBackupsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, input -> new ListBackupsPagedResponse(input), MoreExecutors.directExecutor()); + } + + private ListBackupsPagedResponse(ListBackupsPage page) { + super(page, ListBackupsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListBackupsPage + extends AbstractPage { + + private ListBackupsPage( + PageContext context, + ListBackupsResponse response) { + super(context, response); + } + + private static ListBackupsPage createEmptyPage() { + return new ListBackupsPage(null, null); + } + + @Override + protected ListBackupsPage createPage( + PageContext context, + ListBackupsResponse response) { + return new ListBackupsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListBackupsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListBackupsRequest, + ListBackupsResponse, + Backup, + ListBackupsPage, + ListBackupsFixedSizeCollection> { + + private ListBackupsFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListBackupsFixedSizeCollection createEmptyCollection() { + return new ListBackupsFixedSizeCollection(null, 0); + } + + @Override + protected ListBackupsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListBackupsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListDatabaseOperationsPagedResponse + extends AbstractPagedListResponse< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + Operation, + ListDatabaseOperationsPage, + ListDatabaseOperationsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListDatabaseOperationsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListDatabaseOperationsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListDatabaseOperationsPagedResponse(ListDatabaseOperationsPage page) { + super(page, ListDatabaseOperationsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListDatabaseOperationsPage + extends AbstractPage< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + Operation, + ListDatabaseOperationsPage> { + + private ListDatabaseOperationsPage( + PageContext + context, + ListDatabaseOperationsResponse response) { + super(context, response); + } + + private static ListDatabaseOperationsPage createEmptyPage() { + return new ListDatabaseOperationsPage(null, null); + } + + @Override + protected ListDatabaseOperationsPage createPage( + PageContext + context, + ListDatabaseOperationsResponse response) { + return new ListDatabaseOperationsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListDatabaseOperationsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + Operation, + ListDatabaseOperationsPage, + ListDatabaseOperationsFixedSizeCollection> { + + private ListDatabaseOperationsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListDatabaseOperationsFixedSizeCollection createEmptyCollection() { + return new ListDatabaseOperationsFixedSizeCollection(null, 0); + } + + @Override + protected ListDatabaseOperationsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListDatabaseOperationsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListBackupOperationsPagedResponse + extends AbstractPagedListResponse< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + Operation, + ListBackupOperationsPage, + ListBackupOperationsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListBackupOperationsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListBackupOperationsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListBackupOperationsPagedResponse(ListBackupOperationsPage page) { + super(page, ListBackupOperationsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListBackupOperationsPage + extends AbstractPage< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + Operation, + ListBackupOperationsPage> { + + private ListBackupOperationsPage( + PageContext context, + ListBackupOperationsResponse response) { + super(context, response); + } + + private static ListBackupOperationsPage createEmptyPage() { + return new ListBackupOperationsPage(null, null); + } + + @Override + protected ListBackupOperationsPage createPage( + PageContext context, + ListBackupOperationsResponse response) { + return new ListBackupOperationsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListBackupOperationsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + Operation, + ListBackupOperationsPage, + ListBackupOperationsFixedSizeCollection> { + + private ListBackupOperationsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListBackupOperationsFixedSizeCollection createEmptyCollection() { + return new ListBackupOperationsFixedSizeCollection(null, 0); + } + + @Override + protected ListBackupOperationsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListBackupOperationsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListDatabaseRolesPagedResponse + extends AbstractPagedListResponse< + ListDatabaseRolesRequest, + ListDatabaseRolesResponse, + DatabaseRole, + ListDatabaseRolesPage, + ListDatabaseRolesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListDatabaseRolesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListDatabaseRolesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListDatabaseRolesPagedResponse(ListDatabaseRolesPage page) { + super(page, ListDatabaseRolesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListDatabaseRolesPage + extends AbstractPage< + ListDatabaseRolesRequest, + ListDatabaseRolesResponse, + DatabaseRole, + ListDatabaseRolesPage> { + + private ListDatabaseRolesPage( + PageContext context, + ListDatabaseRolesResponse response) { + super(context, response); + } + + private static ListDatabaseRolesPage createEmptyPage() { + return new ListDatabaseRolesPage(null, null); + } + + @Override + protected ListDatabaseRolesPage createPage( + PageContext context, + ListDatabaseRolesResponse response) { + return new ListDatabaseRolesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListDatabaseRolesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListDatabaseRolesRequest, + ListDatabaseRolesResponse, + DatabaseRole, + ListDatabaseRolesPage, + ListDatabaseRolesFixedSizeCollection> { + + private ListDatabaseRolesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListDatabaseRolesFixedSizeCollection createEmptyCollection() { + return new ListDatabaseRolesFixedSizeCollection(null, 0); + } + + @Override + protected ListDatabaseRolesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListDatabaseRolesFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListBackupSchedulesPagedResponse + extends AbstractPagedListResponse< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage, + ListBackupSchedulesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListBackupSchedulesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListBackupSchedulesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListBackupSchedulesPagedResponse(ListBackupSchedulesPage page) { + super(page, ListBackupSchedulesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListBackupSchedulesPage + extends AbstractPage< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage> { + + private ListBackupSchedulesPage( + PageContext + context, + ListBackupSchedulesResponse response) { + super(context, response); + } + + private static ListBackupSchedulesPage createEmptyPage() { + return new ListBackupSchedulesPage(null, null); + } + + @Override + protected ListBackupSchedulesPage createPage( + PageContext + context, + ListBackupSchedulesResponse response) { + return new ListBackupSchedulesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListBackupSchedulesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage, + ListBackupSchedulesFixedSizeCollection> { + + private ListBackupSchedulesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListBackupSchedulesFixedSizeCollection createEmptyCollection() { + return new ListBackupSchedulesFixedSizeCollection(null, 0); + } + + @Override + protected ListBackupSchedulesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListBackupSchedulesFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java new file mode 100644 index 000000000000..7457cd253c5a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java @@ -0,0 +1,676 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link DatabaseAdminClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getDatabase: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminSettings.Builder databaseAdminSettingsBuilder = DatabaseAdminSettings.newBuilder();
+ * databaseAdminSettingsBuilder
+ *     .getDatabaseSettings()
+ *     .setRetrySettings(
+ *         databaseAdminSettingsBuilder
+ *             .getDatabaseSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * DatabaseAdminSettings databaseAdminSettings = databaseAdminSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createDatabase: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminSettings.Builder databaseAdminSettingsBuilder = DatabaseAdminSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * databaseAdminSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class DatabaseAdminSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to listDatabases. */ + public PagedCallSettings + listDatabasesSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listDatabasesSettings(); + } + + /** Returns the object with the settings used for calls to createDatabase. */ + public UnaryCallSettings createDatabaseSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to createDatabase. */ + public OperationCallSettings + createDatabaseOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createDatabaseOperationSettings(); + } + + /** Returns the object with the settings used for calls to getDatabase. */ + public UnaryCallSettings getDatabaseSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to updateDatabase. */ + public UnaryCallSettings updateDatabaseSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to updateDatabase. */ + public OperationCallSettings + updateDatabaseOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateDatabaseOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateDatabaseDdl. */ + public UnaryCallSettings updateDatabaseDdlSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateDatabaseDdlSettings(); + } + + /** Returns the object with the settings used for calls to updateDatabaseDdl. */ + public OperationCallSettings + updateDatabaseDdlOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateDatabaseDdlOperationSettings(); + } + + /** Returns the object with the settings used for calls to dropDatabase. */ + public UnaryCallSettings dropDatabaseSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).dropDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to getDatabaseDdl. */ + public UnaryCallSettings getDatabaseDdlSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getDatabaseDdlSettings(); + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).setIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).testIamPermissionsSettings(); + } + + /** Returns the object with the settings used for calls to createBackup. */ + public UnaryCallSettings createBackupSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createBackupSettings(); + } + + /** Returns the object with the settings used for calls to createBackup. */ + public OperationCallSettings + createBackupOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createBackupOperationSettings(); + } + + /** Returns the object with the settings used for calls to copyBackup. */ + public UnaryCallSettings copyBackupSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).copyBackupSettings(); + } + + /** Returns the object with the settings used for calls to copyBackup. */ + public OperationCallSettings + copyBackupOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).copyBackupOperationSettings(); + } + + /** Returns the object with the settings used for calls to getBackup. */ + public UnaryCallSettings getBackupSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getBackupSettings(); + } + + /** Returns the object with the settings used for calls to updateBackup. */ + public UnaryCallSettings updateBackupSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateBackupSettings(); + } + + /** Returns the object with the settings used for calls to deleteBackup. */ + public UnaryCallSettings deleteBackupSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).deleteBackupSettings(); + } + + /** Returns the object with the settings used for calls to listBackups. */ + public PagedCallSettings + listBackupsSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listBackupsSettings(); + } + + /** Returns the object with the settings used for calls to restoreDatabase. */ + public UnaryCallSettings restoreDatabaseSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).restoreDatabaseSettings(); + } + + /** Returns the object with the settings used for calls to restoreDatabase. */ + public OperationCallSettings + restoreDatabaseOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).restoreDatabaseOperationSettings(); + } + + /** Returns the object with the settings used for calls to listDatabaseOperations. */ + public PagedCallSettings< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listDatabaseOperationsSettings(); + } + + /** Returns the object with the settings used for calls to listBackupOperations. */ + public PagedCallSettings< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listBackupOperationsSettings(); + } + + /** Returns the object with the settings used for calls to listDatabaseRoles. */ + public PagedCallSettings< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listDatabaseRolesSettings(); + } + + /** Returns the object with the settings used for calls to addSplitPoints. */ + public UnaryCallSettings addSplitPointsSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).addSplitPointsSettings(); + } + + /** Returns the object with the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings + createBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings getBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings + updateBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings deleteBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).deleteBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to listBackupSchedules. */ + public PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listBackupSchedulesSettings(); + } + + /** Returns the object with the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).internalUpdateGraphOperationSettings(); + } + + public static final DatabaseAdminSettings create(DatabaseAdminStubSettings stub) + throws IOException { + return new DatabaseAdminSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return DatabaseAdminStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return DatabaseAdminStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DatabaseAdminStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return DatabaseAdminStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return DatabaseAdminStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return DatabaseAdminStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return DatabaseAdminStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return DatabaseAdminStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected DatabaseAdminSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for DatabaseAdminSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(DatabaseAdminStubSettings.newBuilder(clientContext)); + } + + protected Builder(DatabaseAdminSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(DatabaseAdminStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(DatabaseAdminStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(DatabaseAdminStubSettings.newHttpJsonBuilder()); + } + + public DatabaseAdminStubSettings.Builder getStubSettingsBuilder() { + return ((DatabaseAdminStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to listDatabases. */ + public PagedCallSettings.Builder< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse> + listDatabasesSettings() { + return getStubSettingsBuilder().listDatabasesSettings(); + } + + /** Returns the builder for the settings used for calls to createDatabase. */ + public UnaryCallSettings.Builder createDatabaseSettings() { + return getStubSettingsBuilder().createDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to createDatabase. */ + public OperationCallSettings.Builder + createDatabaseOperationSettings() { + return getStubSettingsBuilder().createDatabaseOperationSettings(); + } + + /** Returns the builder for the settings used for calls to getDatabase. */ + public UnaryCallSettings.Builder getDatabaseSettings() { + return getStubSettingsBuilder().getDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to updateDatabase. */ + public UnaryCallSettings.Builder updateDatabaseSettings() { + return getStubSettingsBuilder().updateDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to updateDatabase. */ + public OperationCallSettings.Builder + updateDatabaseOperationSettings() { + return getStubSettingsBuilder().updateDatabaseOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateDatabaseDdl. */ + public UnaryCallSettings.Builder + updateDatabaseDdlSettings() { + return getStubSettingsBuilder().updateDatabaseDdlSettings(); + } + + /** Returns the builder for the settings used for calls to updateDatabaseDdl. */ + public OperationCallSettings.Builder + updateDatabaseDdlOperationSettings() { + return getStubSettingsBuilder().updateDatabaseDdlOperationSettings(); + } + + /** Returns the builder for the settings used for calls to dropDatabase. */ + public UnaryCallSettings.Builder dropDatabaseSettings() { + return getStubSettingsBuilder().dropDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to getDatabaseDdl. */ + public UnaryCallSettings.Builder + getDatabaseDdlSettings() { + return getStubSettingsBuilder().getDatabaseDdlSettings(); + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return getStubSettingsBuilder().setIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getStubSettingsBuilder().getIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return getStubSettingsBuilder().testIamPermissionsSettings(); + } + + /** Returns the builder for the settings used for calls to createBackup. */ + public UnaryCallSettings.Builder createBackupSettings() { + return getStubSettingsBuilder().createBackupSettings(); + } + + /** Returns the builder for the settings used for calls to createBackup. */ + public OperationCallSettings.Builder + createBackupOperationSettings() { + return getStubSettingsBuilder().createBackupOperationSettings(); + } + + /** Returns the builder for the settings used for calls to copyBackup. */ + public UnaryCallSettings.Builder copyBackupSettings() { + return getStubSettingsBuilder().copyBackupSettings(); + } + + /** Returns the builder for the settings used for calls to copyBackup. */ + public OperationCallSettings.Builder + copyBackupOperationSettings() { + return getStubSettingsBuilder().copyBackupOperationSettings(); + } + + /** Returns the builder for the settings used for calls to getBackup. */ + public UnaryCallSettings.Builder getBackupSettings() { + return getStubSettingsBuilder().getBackupSettings(); + } + + /** Returns the builder for the settings used for calls to updateBackup. */ + public UnaryCallSettings.Builder updateBackupSettings() { + return getStubSettingsBuilder().updateBackupSettings(); + } + + /** Returns the builder for the settings used for calls to deleteBackup. */ + public UnaryCallSettings.Builder deleteBackupSettings() { + return getStubSettingsBuilder().deleteBackupSettings(); + } + + /** Returns the builder for the settings used for calls to listBackups. */ + public PagedCallSettings.Builder< + ListBackupsRequest, ListBackupsResponse, ListBackupsPagedResponse> + listBackupsSettings() { + return getStubSettingsBuilder().listBackupsSettings(); + } + + /** Returns the builder for the settings used for calls to restoreDatabase. */ + public UnaryCallSettings.Builder restoreDatabaseSettings() { + return getStubSettingsBuilder().restoreDatabaseSettings(); + } + + /** Returns the builder for the settings used for calls to restoreDatabase. */ + public OperationCallSettings.Builder + restoreDatabaseOperationSettings() { + return getStubSettingsBuilder().restoreDatabaseOperationSettings(); + } + + /** Returns the builder for the settings used for calls to listDatabaseOperations. */ + public PagedCallSettings.Builder< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings() { + return getStubSettingsBuilder().listDatabaseOperationsSettings(); + } + + /** Returns the builder for the settings used for calls to listBackupOperations. */ + public PagedCallSettings.Builder< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings() { + return getStubSettingsBuilder().listBackupOperationsSettings(); + } + + /** Returns the builder for the settings used for calls to listDatabaseRoles. */ + public PagedCallSettings.Builder< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings() { + return getStubSettingsBuilder().listDatabaseRolesSettings(); + } + + /** Returns the builder for the settings used for calls to addSplitPoints. */ + public UnaryCallSettings.Builder + addSplitPointsSettings() { + return getStubSettingsBuilder().addSplitPointsSettings(); + } + + /** Returns the builder for the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings.Builder + createBackupScheduleSettings() { + return getStubSettingsBuilder().createBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings.Builder + getBackupScheduleSettings() { + return getStubSettingsBuilder().getBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings.Builder + updateBackupScheduleSettings() { + return getStubSettingsBuilder().updateBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings.Builder + deleteBackupScheduleSettings() { + return getStubSettingsBuilder().deleteBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to listBackupSchedules. */ + public PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return getStubSettingsBuilder().listBackupSchedulesSettings(); + } + + /** Returns the builder for the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return getStubSettingsBuilder().internalUpdateGraphOperationSettings(); + } + + @Override + public DatabaseAdminSettings build() throws IOException { + return new DatabaseAdminSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json new file mode 100644 index 000000000000..f6bcf8dda65b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json @@ -0,0 +1,99 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.spanner.admin.database.v1", + "libraryPackage": "com.google.cloud.spanner.admin.database.v1", + "services": { + "DatabaseAdmin": { + "clients": { + "grpc": { + "libraryClient": "DatabaseAdminClient", + "rpcs": { + "AddSplitPoints": { + "methods": ["addSplitPoints", "addSplitPoints", "addSplitPoints", "addSplitPointsCallable"] + }, + "CopyBackup": { + "methods": ["copyBackupAsync", "copyBackupAsync", "copyBackupAsync", "copyBackupAsync", "copyBackupAsync", "copyBackupOperationCallable", "copyBackupCallable"] + }, + "CreateBackup": { + "methods": ["createBackupAsync", "createBackupAsync", "createBackupAsync", "createBackupOperationCallable", "createBackupCallable"] + }, + "CreateBackupSchedule": { + "methods": ["createBackupSchedule", "createBackupSchedule", "createBackupSchedule", "createBackupScheduleCallable"] + }, + "CreateDatabase": { + "methods": ["createDatabaseAsync", "createDatabaseAsync", "createDatabaseAsync", "createDatabaseOperationCallable", "createDatabaseCallable"] + }, + "DeleteBackup": { + "methods": ["deleteBackup", "deleteBackup", "deleteBackup", "deleteBackupCallable"] + }, + "DeleteBackupSchedule": { + "methods": ["deleteBackupSchedule", "deleteBackupSchedule", "deleteBackupSchedule", "deleteBackupScheduleCallable"] + }, + "DropDatabase": { + "methods": ["dropDatabase", "dropDatabase", "dropDatabase", "dropDatabaseCallable"] + }, + "GetBackup": { + "methods": ["getBackup", "getBackup", "getBackup", "getBackupCallable"] + }, + "GetBackupSchedule": { + "methods": ["getBackupSchedule", "getBackupSchedule", "getBackupSchedule", "getBackupScheduleCallable"] + }, + "GetDatabase": { + "methods": ["getDatabase", "getDatabase", "getDatabase", "getDatabaseCallable"] + }, + "GetDatabaseDdl": { + "methods": ["getDatabaseDdl", "getDatabaseDdl", "getDatabaseDdl", "getDatabaseDdlCallable"] + }, + "GetIamPolicy": { + "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] + }, + "InternalUpdateGraphOperation": { + "methods": ["internalUpdateGraphOperation", "internalUpdateGraphOperation", "internalUpdateGraphOperation", "internalUpdateGraphOperationCallable"] + }, + "ListBackupOperations": { + "methods": ["listBackupOperations", "listBackupOperations", "listBackupOperations", "listBackupOperationsPagedCallable", "listBackupOperationsCallable"] + }, + "ListBackupSchedules": { + "methods": ["listBackupSchedules", "listBackupSchedules", "listBackupSchedules", "listBackupSchedulesPagedCallable", "listBackupSchedulesCallable"] + }, + "ListBackups": { + "methods": ["listBackups", "listBackups", "listBackups", "listBackupsPagedCallable", "listBackupsCallable"] + }, + "ListDatabaseOperations": { + "methods": ["listDatabaseOperations", "listDatabaseOperations", "listDatabaseOperations", "listDatabaseOperationsPagedCallable", "listDatabaseOperationsCallable"] + }, + "ListDatabaseRoles": { + "methods": ["listDatabaseRoles", "listDatabaseRoles", "listDatabaseRoles", "listDatabaseRolesPagedCallable", "listDatabaseRolesCallable"] + }, + "ListDatabases": { + "methods": ["listDatabases", "listDatabases", "listDatabases", "listDatabasesPagedCallable", "listDatabasesCallable"] + }, + "RestoreDatabase": { + "methods": ["restoreDatabaseAsync", "restoreDatabaseAsync", "restoreDatabaseAsync", "restoreDatabaseAsync", "restoreDatabaseAsync", "restoreDatabaseOperationCallable", "restoreDatabaseCallable"] + }, + "SetIamPolicy": { + "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] + }, + "TestIamPermissions": { + "methods": ["testIamPermissions", "testIamPermissions", "testIamPermissions", "testIamPermissionsCallable"] + }, + "UpdateBackup": { + "methods": ["updateBackup", "updateBackup", "updateBackupCallable"] + }, + "UpdateBackupSchedule": { + "methods": ["updateBackupSchedule", "updateBackupSchedule", "updateBackupScheduleCallable"] + }, + "UpdateDatabase": { + "methods": ["updateDatabaseAsync", "updateDatabaseAsync", "updateDatabaseOperationCallable", "updateDatabaseCallable"] + }, + "UpdateDatabaseDdl": { + "methods": ["updateDatabaseDdlAsync", "updateDatabaseDdlAsync", "updateDatabaseDdlAsync", "updateDatabaseDdlOperationCallable", "updateDatabaseDdlCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java new file mode 100644 index 000000000000..ea0fc2fa4308 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Spanner API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= DatabaseAdminClient ======================= + * + *

Service Description: Cloud Spanner Database Admin API + * + *

The Cloud Spanner Database Admin API can be used to: * create, drop, and list databases + * * update the schema of pre-existing databases * create, delete, copy and list backups for + * a database * restore a database from an existing backup + * + *

Sample for DatabaseAdminClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+ *   DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+ *   Database response = databaseAdminClient.getDatabase(name);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.spanner.admin.database.v1; + +import javax.annotation.Generated; diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java new file mode 100644 index 000000000000..ffb3b37e8bda --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java @@ -0,0 +1,276 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the DatabaseAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class DatabaseAdminStub implements BackgroundResource { + + public OperationsStub getOperationsStub() { + return null; + } + + public com.google.api.gax.httpjson.longrunning.stub.OperationsStub getHttpJsonOperationsStub() { + return null; + } + + public UnaryCallable + listDatabasesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listDatabasesPagedCallable()"); + } + + public UnaryCallable listDatabasesCallable() { + throw new UnsupportedOperationException("Not implemented: listDatabasesCallable()"); + } + + public OperationCallable + createDatabaseOperationCallable() { + throw new UnsupportedOperationException("Not implemented: createDatabaseOperationCallable()"); + } + + public UnaryCallable createDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: createDatabaseCallable()"); + } + + public UnaryCallable getDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: getDatabaseCallable()"); + } + + public OperationCallable + updateDatabaseOperationCallable() { + throw new UnsupportedOperationException("Not implemented: updateDatabaseOperationCallable()"); + } + + public UnaryCallable updateDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: updateDatabaseCallable()"); + } + + public OperationCallable + updateDatabaseDdlOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateDatabaseDdlOperationCallable()"); + } + + public UnaryCallable updateDatabaseDdlCallable() { + throw new UnsupportedOperationException("Not implemented: updateDatabaseDdlCallable()"); + } + + public UnaryCallable dropDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: dropDatabaseCallable()"); + } + + public UnaryCallable getDatabaseDdlCallable() { + throw new UnsupportedOperationException("Not implemented: getDatabaseDdlCallable()"); + } + + public UnaryCallable setIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: setIamPolicyCallable()"); + } + + public UnaryCallable getIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: getIamPolicyCallable()"); + } + + public UnaryCallable + testIamPermissionsCallable() { + throw new UnsupportedOperationException("Not implemented: testIamPermissionsCallable()"); + } + + public OperationCallable + createBackupOperationCallable() { + throw new UnsupportedOperationException("Not implemented: createBackupOperationCallable()"); + } + + public UnaryCallable createBackupCallable() { + throw new UnsupportedOperationException("Not implemented: createBackupCallable()"); + } + + public OperationCallable + copyBackupOperationCallable() { + throw new UnsupportedOperationException("Not implemented: copyBackupOperationCallable()"); + } + + public UnaryCallable copyBackupCallable() { + throw new UnsupportedOperationException("Not implemented: copyBackupCallable()"); + } + + public UnaryCallable getBackupCallable() { + throw new UnsupportedOperationException("Not implemented: getBackupCallable()"); + } + + public UnaryCallable updateBackupCallable() { + throw new UnsupportedOperationException("Not implemented: updateBackupCallable()"); + } + + public UnaryCallable deleteBackupCallable() { + throw new UnsupportedOperationException("Not implemented: deleteBackupCallable()"); + } + + public UnaryCallable listBackupsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupsPagedCallable()"); + } + + public UnaryCallable listBackupsCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupsCallable()"); + } + + public OperationCallable + restoreDatabaseOperationCallable() { + throw new UnsupportedOperationException("Not implemented: restoreDatabaseOperationCallable()"); + } + + public UnaryCallable restoreDatabaseCallable() { + throw new UnsupportedOperationException("Not implemented: restoreDatabaseCallable()"); + } + + public UnaryCallable + listDatabaseOperationsPagedCallable() { + throw new UnsupportedOperationException( + "Not implemented: listDatabaseOperationsPagedCallable()"); + } + + public UnaryCallable + listDatabaseOperationsCallable() { + throw new UnsupportedOperationException("Not implemented: listDatabaseOperationsCallable()"); + } + + public UnaryCallable + listBackupOperationsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupOperationsPagedCallable()"); + } + + public UnaryCallable + listBackupOperationsCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupOperationsCallable()"); + } + + public UnaryCallable + listDatabaseRolesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listDatabaseRolesPagedCallable()"); + } + + public UnaryCallable + listDatabaseRolesCallable() { + throw new UnsupportedOperationException("Not implemented: listDatabaseRolesCallable()"); + } + + public UnaryCallable addSplitPointsCallable() { + throw new UnsupportedOperationException("Not implemented: addSplitPointsCallable()"); + } + + public UnaryCallable createBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: createBackupScheduleCallable()"); + } + + public UnaryCallable getBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: getBackupScheduleCallable()"); + } + + public UnaryCallable updateBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: updateBackupScheduleCallable()"); + } + + public UnaryCallable deleteBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: deleteBackupScheduleCallable()"); + } + + public UnaryCallable + listBackupSchedulesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupSchedulesPagedCallable()"); + } + + public UnaryCallable + listBackupSchedulesCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupSchedulesCallable()"); + } + + public UnaryCallable + internalUpdateGraphOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: internalUpdateGraphOperationCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java new file mode 100644 index 000000000000..b3e281670ef1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java @@ -0,0 +1,1777 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.LibraryMetadata; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link DatabaseAdminStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getDatabase: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminStubSettings.Builder databaseAdminSettingsBuilder =
+ *     DatabaseAdminStubSettings.newBuilder();
+ * databaseAdminSettingsBuilder
+ *     .getDatabaseSettings()
+ *     .setRetrySettings(
+ *         databaseAdminSettingsBuilder
+ *             .getDatabaseSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * DatabaseAdminStubSettings databaseAdminSettings = databaseAdminSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createDatabase: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * DatabaseAdminStubSettings.Builder databaseAdminSettingsBuilder =
+ *     DatabaseAdminStubSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * databaseAdminSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +@SuppressWarnings("CanonicalDuration") +public class DatabaseAdminStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/cloud-platform") + .add("https://www.googleapis.com/auth/spanner.admin") + .build(); + + private final PagedCallSettings< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse> + listDatabasesSettings; + private final UnaryCallSettings createDatabaseSettings; + private final OperationCallSettings + createDatabaseOperationSettings; + private final UnaryCallSettings getDatabaseSettings; + private final UnaryCallSettings updateDatabaseSettings; + private final OperationCallSettings + updateDatabaseOperationSettings; + private final UnaryCallSettings updateDatabaseDdlSettings; + private final OperationCallSettings + updateDatabaseDdlOperationSettings; + private final UnaryCallSettings dropDatabaseSettings; + private final UnaryCallSettings + getDatabaseDdlSettings; + private final UnaryCallSettings setIamPolicySettings; + private final UnaryCallSettings getIamPolicySettings; + private final UnaryCallSettings + testIamPermissionsSettings; + private final UnaryCallSettings createBackupSettings; + private final OperationCallSettings + createBackupOperationSettings; + private final UnaryCallSettings copyBackupSettings; + private final OperationCallSettings + copyBackupOperationSettings; + private final UnaryCallSettings getBackupSettings; + private final UnaryCallSettings updateBackupSettings; + private final UnaryCallSettings deleteBackupSettings; + private final PagedCallSettings + listBackupsSettings; + private final UnaryCallSettings restoreDatabaseSettings; + private final OperationCallSettings + restoreDatabaseOperationSettings; + private final PagedCallSettings< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings; + private final PagedCallSettings< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings; + private final PagedCallSettings< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings; + private final UnaryCallSettings + addSplitPointsSettings; + private final UnaryCallSettings + createBackupScheduleSettings; + private final UnaryCallSettings + getBackupScheduleSettings; + private final UnaryCallSettings + updateBackupScheduleSettings; + private final UnaryCallSettings deleteBackupScheduleSettings; + private final PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings; + private final UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings; + + private static final PagedListDescriptor + LIST_DATABASES_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListDatabasesRequest injectToken(ListDatabasesRequest payload, String token) { + return ListDatabasesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListDatabasesRequest injectPageSize(ListDatabasesRequest payload, int pageSize) { + return ListDatabasesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListDatabasesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListDatabasesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListDatabasesResponse payload) { + return payload.getDatabasesList(); + } + }; + + private static final PagedListDescriptor + LIST_BACKUPS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBackupsRequest injectToken(ListBackupsRequest payload, String token) { + return ListBackupsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBackupsRequest injectPageSize(ListBackupsRequest payload, int pageSize) { + return ListBackupsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBackupsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBackupsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBackupsResponse payload) { + return payload.getBackupsList(); + } + }; + + private static final PagedListDescriptor< + ListDatabaseOperationsRequest, ListDatabaseOperationsResponse, Operation> + LIST_DATABASE_OPERATIONS_PAGE_STR_DESC = + new PagedListDescriptor< + ListDatabaseOperationsRequest, ListDatabaseOperationsResponse, Operation>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListDatabaseOperationsRequest injectToken( + ListDatabaseOperationsRequest payload, String token) { + return ListDatabaseOperationsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListDatabaseOperationsRequest injectPageSize( + ListDatabaseOperationsRequest payload, int pageSize) { + return ListDatabaseOperationsRequest.newBuilder(payload) + .setPageSize(pageSize) + .build(); + } + + @Override + public Integer extractPageSize(ListDatabaseOperationsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListDatabaseOperationsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListDatabaseOperationsResponse payload) { + return payload.getOperationsList(); + } + }; + + private static final PagedListDescriptor< + ListBackupOperationsRequest, ListBackupOperationsResponse, Operation> + LIST_BACKUP_OPERATIONS_PAGE_STR_DESC = + new PagedListDescriptor< + ListBackupOperationsRequest, ListBackupOperationsResponse, Operation>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBackupOperationsRequest injectToken( + ListBackupOperationsRequest payload, String token) { + return ListBackupOperationsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBackupOperationsRequest injectPageSize( + ListBackupOperationsRequest payload, int pageSize) { + return ListBackupOperationsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBackupOperationsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBackupOperationsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBackupOperationsResponse payload) { + return payload.getOperationsList(); + } + }; + + private static final PagedListDescriptor< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, DatabaseRole> + LIST_DATABASE_ROLES_PAGE_STR_DESC = + new PagedListDescriptor< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, DatabaseRole>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListDatabaseRolesRequest injectToken( + ListDatabaseRolesRequest payload, String token) { + return ListDatabaseRolesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListDatabaseRolesRequest injectPageSize( + ListDatabaseRolesRequest payload, int pageSize) { + return ListDatabaseRolesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListDatabaseRolesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListDatabaseRolesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListDatabaseRolesResponse payload) { + return payload.getDatabaseRolesList(); + } + }; + + private static final PagedListDescriptor< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, BackupSchedule> + LIST_BACKUP_SCHEDULES_PAGE_STR_DESC = + new PagedListDescriptor< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, BackupSchedule>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBackupSchedulesRequest injectToken( + ListBackupSchedulesRequest payload, String token) { + return ListBackupSchedulesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBackupSchedulesRequest injectPageSize( + ListBackupSchedulesRequest payload, int pageSize) { + return ListBackupSchedulesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBackupSchedulesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBackupSchedulesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBackupSchedulesResponse payload) { + return payload.getBackupSchedulesList(); + } + }; + + private static final PagedListResponseFactory< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse> + LIST_DATABASES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListDatabasesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_DATABASES_PAGE_STR_DESC, request, context); + return ListDatabasesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListBackupsRequest, ListBackupsResponse, ListBackupsPagedResponse> + LIST_BACKUPS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBackupsRequest, ListBackupsResponse, ListBackupsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBackupsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_BACKUPS_PAGE_STR_DESC, request, context); + return ListBackupsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + LIST_DATABASE_OPERATIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable + callable, + ListDatabaseOperationsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_DATABASE_OPERATIONS_PAGE_STR_DESC, request, context); + return ListDatabaseOperationsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + LIST_BACKUP_OPERATIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBackupOperationsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_BACKUP_OPERATIONS_PAGE_STR_DESC, request, context); + return ListBackupOperationsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + LIST_DATABASE_ROLES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListDatabaseRolesRequest, + ListDatabaseRolesResponse, + ListDatabaseRolesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListDatabaseRolesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_DATABASE_ROLES_PAGE_STR_DESC, request, context); + return ListDatabaseRolesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + LIST_BACKUP_SCHEDULES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBackupSchedulesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_BACKUP_SCHEDULES_PAGE_STR_DESC, request, context); + return ListBackupSchedulesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to listDatabases. */ + public PagedCallSettings + listDatabasesSettings() { + return listDatabasesSettings; + } + + /** Returns the object with the settings used for calls to createDatabase. */ + public UnaryCallSettings createDatabaseSettings() { + return createDatabaseSettings; + } + + /** Returns the object with the settings used for calls to createDatabase. */ + public OperationCallSettings + createDatabaseOperationSettings() { + return createDatabaseOperationSettings; + } + + /** Returns the object with the settings used for calls to getDatabase. */ + public UnaryCallSettings getDatabaseSettings() { + return getDatabaseSettings; + } + + /** Returns the object with the settings used for calls to updateDatabase. */ + public UnaryCallSettings updateDatabaseSettings() { + return updateDatabaseSettings; + } + + /** Returns the object with the settings used for calls to updateDatabase. */ + public OperationCallSettings + updateDatabaseOperationSettings() { + return updateDatabaseOperationSettings; + } + + /** Returns the object with the settings used for calls to updateDatabaseDdl. */ + public UnaryCallSettings updateDatabaseDdlSettings() { + return updateDatabaseDdlSettings; + } + + /** Returns the object with the settings used for calls to updateDatabaseDdl. */ + public OperationCallSettings + updateDatabaseDdlOperationSettings() { + return updateDatabaseDdlOperationSettings; + } + + /** Returns the object with the settings used for calls to dropDatabase. */ + public UnaryCallSettings dropDatabaseSettings() { + return dropDatabaseSettings; + } + + /** Returns the object with the settings used for calls to getDatabaseDdl. */ + public UnaryCallSettings getDatabaseDdlSettings() { + return getDatabaseDdlSettings; + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the object with the settings used for calls to createBackup. */ + public UnaryCallSettings createBackupSettings() { + return createBackupSettings; + } + + /** Returns the object with the settings used for calls to createBackup. */ + public OperationCallSettings + createBackupOperationSettings() { + return createBackupOperationSettings; + } + + /** Returns the object with the settings used for calls to copyBackup. */ + public UnaryCallSettings copyBackupSettings() { + return copyBackupSettings; + } + + /** Returns the object with the settings used for calls to copyBackup. */ + public OperationCallSettings + copyBackupOperationSettings() { + return copyBackupOperationSettings; + } + + /** Returns the object with the settings used for calls to getBackup. */ + public UnaryCallSettings getBackupSettings() { + return getBackupSettings; + } + + /** Returns the object with the settings used for calls to updateBackup. */ + public UnaryCallSettings updateBackupSettings() { + return updateBackupSettings; + } + + /** Returns the object with the settings used for calls to deleteBackup. */ + public UnaryCallSettings deleteBackupSettings() { + return deleteBackupSettings; + } + + /** Returns the object with the settings used for calls to listBackups. */ + public PagedCallSettings + listBackupsSettings() { + return listBackupsSettings; + } + + /** Returns the object with the settings used for calls to restoreDatabase. */ + public UnaryCallSettings restoreDatabaseSettings() { + return restoreDatabaseSettings; + } + + /** Returns the object with the settings used for calls to restoreDatabase. */ + public OperationCallSettings + restoreDatabaseOperationSettings() { + return restoreDatabaseOperationSettings; + } + + /** Returns the object with the settings used for calls to listDatabaseOperations. */ + public PagedCallSettings< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings() { + return listDatabaseOperationsSettings; + } + + /** Returns the object with the settings used for calls to listBackupOperations. */ + public PagedCallSettings< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings() { + return listBackupOperationsSettings; + } + + /** Returns the object with the settings used for calls to listDatabaseRoles. */ + public PagedCallSettings< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings() { + return listDatabaseRolesSettings; + } + + /** Returns the object with the settings used for calls to addSplitPoints. */ + public UnaryCallSettings addSplitPointsSettings() { + return addSplitPointsSettings; + } + + /** Returns the object with the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings + createBackupScheduleSettings() { + return createBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings getBackupScheduleSettings() { + return getBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings + updateBackupScheduleSettings() { + return updateBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings deleteBackupScheduleSettings() { + return deleteBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to listBackupSchedules. */ + public PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return listBackupSchedulesSettings; + } + + /** Returns the object with the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return internalUpdateGraphOperationSettings; + } + + public DatabaseAdminStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcDatabaseAdminStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonDatabaseAdminStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "spanner"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "spanner.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "spanner.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(DatabaseAdminStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(DatabaseAdminStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return DatabaseAdminStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected DatabaseAdminStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + listDatabasesSettings = settingsBuilder.listDatabasesSettings().build(); + createDatabaseSettings = settingsBuilder.createDatabaseSettings().build(); + createDatabaseOperationSettings = settingsBuilder.createDatabaseOperationSettings().build(); + getDatabaseSettings = settingsBuilder.getDatabaseSettings().build(); + updateDatabaseSettings = settingsBuilder.updateDatabaseSettings().build(); + updateDatabaseOperationSettings = settingsBuilder.updateDatabaseOperationSettings().build(); + updateDatabaseDdlSettings = settingsBuilder.updateDatabaseDdlSettings().build(); + updateDatabaseDdlOperationSettings = + settingsBuilder.updateDatabaseDdlOperationSettings().build(); + dropDatabaseSettings = settingsBuilder.dropDatabaseSettings().build(); + getDatabaseDdlSettings = settingsBuilder.getDatabaseDdlSettings().build(); + setIamPolicySettings = settingsBuilder.setIamPolicySettings().build(); + getIamPolicySettings = settingsBuilder.getIamPolicySettings().build(); + testIamPermissionsSettings = settingsBuilder.testIamPermissionsSettings().build(); + createBackupSettings = settingsBuilder.createBackupSettings().build(); + createBackupOperationSettings = settingsBuilder.createBackupOperationSettings().build(); + copyBackupSettings = settingsBuilder.copyBackupSettings().build(); + copyBackupOperationSettings = settingsBuilder.copyBackupOperationSettings().build(); + getBackupSettings = settingsBuilder.getBackupSettings().build(); + updateBackupSettings = settingsBuilder.updateBackupSettings().build(); + deleteBackupSettings = settingsBuilder.deleteBackupSettings().build(); + listBackupsSettings = settingsBuilder.listBackupsSettings().build(); + restoreDatabaseSettings = settingsBuilder.restoreDatabaseSettings().build(); + restoreDatabaseOperationSettings = settingsBuilder.restoreDatabaseOperationSettings().build(); + listDatabaseOperationsSettings = settingsBuilder.listDatabaseOperationsSettings().build(); + listBackupOperationsSettings = settingsBuilder.listBackupOperationsSettings().build(); + listDatabaseRolesSettings = settingsBuilder.listDatabaseRolesSettings().build(); + addSplitPointsSettings = settingsBuilder.addSplitPointsSettings().build(); + createBackupScheduleSettings = settingsBuilder.createBackupScheduleSettings().build(); + getBackupScheduleSettings = settingsBuilder.getBackupScheduleSettings().build(); + updateBackupScheduleSettings = settingsBuilder.updateBackupScheduleSettings().build(); + deleteBackupScheduleSettings = settingsBuilder.deleteBackupScheduleSettings().build(); + listBackupSchedulesSettings = settingsBuilder.listBackupSchedulesSettings().build(); + internalUpdateGraphOperationSettings = + settingsBuilder.internalUpdateGraphOperationSettings().build(); + } + + @Override + protected LibraryMetadata getLibraryMetadata() { + return LibraryMetadata.newBuilder() + .setArtifactName("com.google.cloud:google-cloud-spanner") + .setRepository("googleapis/google-cloud-java") + .setVersion(Version.VERSION) + .build(); + } + + /** Builder for DatabaseAdminStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final PagedCallSettings.Builder< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse> + listDatabasesSettings; + private final UnaryCallSettings.Builder + createDatabaseSettings; + private final OperationCallSettings.Builder< + CreateDatabaseRequest, Database, CreateDatabaseMetadata> + createDatabaseOperationSettings; + private final UnaryCallSettings.Builder getDatabaseSettings; + private final UnaryCallSettings.Builder + updateDatabaseSettings; + private final OperationCallSettings.Builder< + UpdateDatabaseRequest, Database, UpdateDatabaseMetadata> + updateDatabaseOperationSettings; + private final UnaryCallSettings.Builder + updateDatabaseDdlSettings; + private final OperationCallSettings.Builder< + UpdateDatabaseDdlRequest, Empty, UpdateDatabaseDdlMetadata> + updateDatabaseDdlOperationSettings; + private final UnaryCallSettings.Builder dropDatabaseSettings; + private final UnaryCallSettings.Builder + getDatabaseDdlSettings; + private final UnaryCallSettings.Builder setIamPolicySettings; + private final UnaryCallSettings.Builder getIamPolicySettings; + private final UnaryCallSettings.Builder + testIamPermissionsSettings; + private final UnaryCallSettings.Builder createBackupSettings; + private final OperationCallSettings.Builder + createBackupOperationSettings; + private final UnaryCallSettings.Builder copyBackupSettings; + private final OperationCallSettings.Builder + copyBackupOperationSettings; + private final UnaryCallSettings.Builder getBackupSettings; + private final UnaryCallSettings.Builder updateBackupSettings; + private final UnaryCallSettings.Builder deleteBackupSettings; + private final PagedCallSettings.Builder< + ListBackupsRequest, ListBackupsResponse, ListBackupsPagedResponse> + listBackupsSettings; + private final UnaryCallSettings.Builder + restoreDatabaseSettings; + private final OperationCallSettings.Builder< + RestoreDatabaseRequest, Database, RestoreDatabaseMetadata> + restoreDatabaseOperationSettings; + private final PagedCallSettings.Builder< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings; + private final PagedCallSettings.Builder< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings; + private final PagedCallSettings.Builder< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings; + private final UnaryCallSettings.Builder + addSplitPointsSettings; + private final UnaryCallSettings.Builder + createBackupScheduleSettings; + private final UnaryCallSettings.Builder + getBackupScheduleSettings; + private final UnaryCallSettings.Builder + updateBackupScheduleSettings; + private final UnaryCallSettings.Builder + deleteBackupScheduleSettings; + private final PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings; + private final UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.DEADLINE_EXCEEDED))); + definitions.put( + "no_retry_2_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "no_retry_3_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.DEADLINE_EXCEEDED))); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("no_retry_2_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + definitions.put("no_retry_3_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); + definitions.put("no_retry_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + listDatabasesSettings = PagedCallSettings.newBuilder(LIST_DATABASES_PAGE_STR_FACT); + createDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createDatabaseOperationSettings = OperationCallSettings.newBuilder(); + getDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateDatabaseOperationSettings = OperationCallSettings.newBuilder(); + updateDatabaseDdlSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateDatabaseDdlOperationSettings = OperationCallSettings.newBuilder(); + dropDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getDatabaseDdlSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + setIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + testIamPermissionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createBackupSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createBackupOperationSettings = OperationCallSettings.newBuilder(); + copyBackupSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + copyBackupOperationSettings = OperationCallSettings.newBuilder(); + getBackupSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateBackupSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteBackupSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listBackupsSettings = PagedCallSettings.newBuilder(LIST_BACKUPS_PAGE_STR_FACT); + restoreDatabaseSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + restoreDatabaseOperationSettings = OperationCallSettings.newBuilder(); + listDatabaseOperationsSettings = + PagedCallSettings.newBuilder(LIST_DATABASE_OPERATIONS_PAGE_STR_FACT); + listBackupOperationsSettings = + PagedCallSettings.newBuilder(LIST_BACKUP_OPERATIONS_PAGE_STR_FACT); + listDatabaseRolesSettings = PagedCallSettings.newBuilder(LIST_DATABASE_ROLES_PAGE_STR_FACT); + addSplitPointsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listBackupSchedulesSettings = + PagedCallSettings.newBuilder(LIST_BACKUP_SCHEDULES_PAGE_STR_FACT); + internalUpdateGraphOperationSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listDatabasesSettings, + createDatabaseSettings, + getDatabaseSettings, + updateDatabaseSettings, + updateDatabaseDdlSettings, + dropDatabaseSettings, + getDatabaseDdlSettings, + setIamPolicySettings, + getIamPolicySettings, + testIamPermissionsSettings, + createBackupSettings, + copyBackupSettings, + getBackupSettings, + updateBackupSettings, + deleteBackupSettings, + listBackupsSettings, + restoreDatabaseSettings, + listDatabaseOperationsSettings, + listBackupOperationsSettings, + listDatabaseRolesSettings, + addSplitPointsSettings, + createBackupScheduleSettings, + getBackupScheduleSettings, + updateBackupScheduleSettings, + deleteBackupScheduleSettings, + listBackupSchedulesSettings, + internalUpdateGraphOperationSettings); + initDefaults(this); + } + + protected Builder(DatabaseAdminStubSettings settings) { + super(settings); + + listDatabasesSettings = settings.listDatabasesSettings.toBuilder(); + createDatabaseSettings = settings.createDatabaseSettings.toBuilder(); + createDatabaseOperationSettings = settings.createDatabaseOperationSettings.toBuilder(); + getDatabaseSettings = settings.getDatabaseSettings.toBuilder(); + updateDatabaseSettings = settings.updateDatabaseSettings.toBuilder(); + updateDatabaseOperationSettings = settings.updateDatabaseOperationSettings.toBuilder(); + updateDatabaseDdlSettings = settings.updateDatabaseDdlSettings.toBuilder(); + updateDatabaseDdlOperationSettings = settings.updateDatabaseDdlOperationSettings.toBuilder(); + dropDatabaseSettings = settings.dropDatabaseSettings.toBuilder(); + getDatabaseDdlSettings = settings.getDatabaseDdlSettings.toBuilder(); + setIamPolicySettings = settings.setIamPolicySettings.toBuilder(); + getIamPolicySettings = settings.getIamPolicySettings.toBuilder(); + testIamPermissionsSettings = settings.testIamPermissionsSettings.toBuilder(); + createBackupSettings = settings.createBackupSettings.toBuilder(); + createBackupOperationSettings = settings.createBackupOperationSettings.toBuilder(); + copyBackupSettings = settings.copyBackupSettings.toBuilder(); + copyBackupOperationSettings = settings.copyBackupOperationSettings.toBuilder(); + getBackupSettings = settings.getBackupSettings.toBuilder(); + updateBackupSettings = settings.updateBackupSettings.toBuilder(); + deleteBackupSettings = settings.deleteBackupSettings.toBuilder(); + listBackupsSettings = settings.listBackupsSettings.toBuilder(); + restoreDatabaseSettings = settings.restoreDatabaseSettings.toBuilder(); + restoreDatabaseOperationSettings = settings.restoreDatabaseOperationSettings.toBuilder(); + listDatabaseOperationsSettings = settings.listDatabaseOperationsSettings.toBuilder(); + listBackupOperationsSettings = settings.listBackupOperationsSettings.toBuilder(); + listDatabaseRolesSettings = settings.listDatabaseRolesSettings.toBuilder(); + addSplitPointsSettings = settings.addSplitPointsSettings.toBuilder(); + createBackupScheduleSettings = settings.createBackupScheduleSettings.toBuilder(); + getBackupScheduleSettings = settings.getBackupScheduleSettings.toBuilder(); + updateBackupScheduleSettings = settings.updateBackupScheduleSettings.toBuilder(); + deleteBackupScheduleSettings = settings.deleteBackupScheduleSettings.toBuilder(); + listBackupSchedulesSettings = settings.listBackupSchedulesSettings.toBuilder(); + internalUpdateGraphOperationSettings = + settings.internalUpdateGraphOperationSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listDatabasesSettings, + createDatabaseSettings, + getDatabaseSettings, + updateDatabaseSettings, + updateDatabaseDdlSettings, + dropDatabaseSettings, + getDatabaseDdlSettings, + setIamPolicySettings, + getIamPolicySettings, + testIamPermissionsSettings, + createBackupSettings, + copyBackupSettings, + getBackupSettings, + updateBackupSettings, + deleteBackupSettings, + listBackupsSettings, + restoreDatabaseSettings, + listDatabaseOperationsSettings, + listBackupOperationsSettings, + listDatabaseRolesSettings, + addSplitPointsSettings, + createBackupScheduleSettings, + getBackupScheduleSettings, + updateBackupScheduleSettings, + deleteBackupScheduleSettings, + listBackupSchedulesSettings, + internalUpdateGraphOperationSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .listDatabasesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .getDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateDatabaseDdlSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .dropDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getDatabaseDdlSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .setIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_3_params")); + + builder + .getIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .testIamPermissionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_3_params")); + + builder + .createBackupSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .copyBackupSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .getBackupSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateBackupSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteBackupSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listBackupsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .restoreDatabaseSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .listDatabaseOperationsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listBackupOperationsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listDatabaseRolesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .addSplitPointsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listBackupSchedulesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .internalUpdateGraphOperationSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .createDatabaseOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Database.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(CreateDatabaseMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build())); + + builder + .updateDatabaseOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Database.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(UpdateDatabaseMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .updateDatabaseDdlOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Empty.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + UpdateDatabaseDdlMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build())); + + builder + .createBackupOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Backup.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(CreateBackupMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(20000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(172800000L)) + .build())); + + builder + .copyBackupOperationSettings() + .setInitialCallSettings( + UnaryCallSettings.newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Backup.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(CopyBackupMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .restoreDatabaseOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Database.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(RestoreDatabaseMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(20000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build())); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to listDatabases. */ + public PagedCallSettings.Builder< + ListDatabasesRequest, ListDatabasesResponse, ListDatabasesPagedResponse> + listDatabasesSettings() { + return listDatabasesSettings; + } + + /** Returns the builder for the settings used for calls to createDatabase. */ + public UnaryCallSettings.Builder createDatabaseSettings() { + return createDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to createDatabase. */ + public OperationCallSettings.Builder + createDatabaseOperationSettings() { + return createDatabaseOperationSettings; + } + + /** Returns the builder for the settings used for calls to getDatabase. */ + public UnaryCallSettings.Builder getDatabaseSettings() { + return getDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to updateDatabase. */ + public UnaryCallSettings.Builder updateDatabaseSettings() { + return updateDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to updateDatabase. */ + public OperationCallSettings.Builder + updateDatabaseOperationSettings() { + return updateDatabaseOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateDatabaseDdl. */ + public UnaryCallSettings.Builder + updateDatabaseDdlSettings() { + return updateDatabaseDdlSettings; + } + + /** Returns the builder for the settings used for calls to updateDatabaseDdl. */ + public OperationCallSettings.Builder + updateDatabaseDdlOperationSettings() { + return updateDatabaseDdlOperationSettings; + } + + /** Returns the builder for the settings used for calls to dropDatabase. */ + public UnaryCallSettings.Builder dropDatabaseSettings() { + return dropDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to getDatabaseDdl. */ + public UnaryCallSettings.Builder + getDatabaseDdlSettings() { + return getDatabaseDdlSettings; + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the builder for the settings used for calls to createBackup. */ + public UnaryCallSettings.Builder createBackupSettings() { + return createBackupSettings; + } + + /** Returns the builder for the settings used for calls to createBackup. */ + public OperationCallSettings.Builder + createBackupOperationSettings() { + return createBackupOperationSettings; + } + + /** Returns the builder for the settings used for calls to copyBackup. */ + public UnaryCallSettings.Builder copyBackupSettings() { + return copyBackupSettings; + } + + /** Returns the builder for the settings used for calls to copyBackup. */ + public OperationCallSettings.Builder + copyBackupOperationSettings() { + return copyBackupOperationSettings; + } + + /** Returns the builder for the settings used for calls to getBackup. */ + public UnaryCallSettings.Builder getBackupSettings() { + return getBackupSettings; + } + + /** Returns the builder for the settings used for calls to updateBackup. */ + public UnaryCallSettings.Builder updateBackupSettings() { + return updateBackupSettings; + } + + /** Returns the builder for the settings used for calls to deleteBackup. */ + public UnaryCallSettings.Builder deleteBackupSettings() { + return deleteBackupSettings; + } + + /** Returns the builder for the settings used for calls to listBackups. */ + public PagedCallSettings.Builder< + ListBackupsRequest, ListBackupsResponse, ListBackupsPagedResponse> + listBackupsSettings() { + return listBackupsSettings; + } + + /** Returns the builder for the settings used for calls to restoreDatabase. */ + public UnaryCallSettings.Builder restoreDatabaseSettings() { + return restoreDatabaseSettings; + } + + /** Returns the builder for the settings used for calls to restoreDatabase. */ + public OperationCallSettings.Builder + restoreDatabaseOperationSettings() { + return restoreDatabaseOperationSettings; + } + + /** Returns the builder for the settings used for calls to listDatabaseOperations. */ + public PagedCallSettings.Builder< + ListDatabaseOperationsRequest, + ListDatabaseOperationsResponse, + ListDatabaseOperationsPagedResponse> + listDatabaseOperationsSettings() { + return listDatabaseOperationsSettings; + } + + /** Returns the builder for the settings used for calls to listBackupOperations. */ + public PagedCallSettings.Builder< + ListBackupOperationsRequest, + ListBackupOperationsResponse, + ListBackupOperationsPagedResponse> + listBackupOperationsSettings() { + return listBackupOperationsSettings; + } + + /** Returns the builder for the settings used for calls to listDatabaseRoles. */ + public PagedCallSettings.Builder< + ListDatabaseRolesRequest, ListDatabaseRolesResponse, ListDatabaseRolesPagedResponse> + listDatabaseRolesSettings() { + return listDatabaseRolesSettings; + } + + /** Returns the builder for the settings used for calls to addSplitPoints. */ + public UnaryCallSettings.Builder + addSplitPointsSettings() { + return addSplitPointsSettings; + } + + /** Returns the builder for the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings.Builder + createBackupScheduleSettings() { + return createBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings.Builder + getBackupScheduleSettings() { + return getBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings.Builder + updateBackupScheduleSettings() { + return updateBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings.Builder + deleteBackupScheduleSettings() { + return deleteBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to listBackupSchedules. */ + public PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return listBackupSchedulesSettings; + } + + /** Returns the builder for the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return internalUpdateGraphOperationSettings; + } + + @Override + public DatabaseAdminStubSettings build() throws IOException { + return new DatabaseAdminStubSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminCallableFactory.java new file mode 100644 index 000000000000..15fa1361452c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the DatabaseAdmin service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcDatabaseAdminCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java new file mode 100644 index 000000000000..fcebc037b4b4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java @@ -0,0 +1,1228 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the DatabaseAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcDatabaseAdminStub extends DatabaseAdminStub { + private static final MethodDescriptor + listDatabasesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases") + .setRequestMarshaller( + ProtoUtils.marshaller(ListDatabasesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListDatabasesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase") + .setRequestMarshaller(ProtoUtils.marshaller(GetDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Database.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateDatabaseDdlMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateDatabaseDdlRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor dropDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase") + .setRequestMarshaller(ProtoUtils.marshaller(DropDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getDatabaseDdlMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl") + .setRequestMarshaller( + ProtoUtils.marshaller(GetDatabaseDdlRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(GetDatabaseDdlResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor setIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + testIamPermissionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions") + .setRequestMarshaller( + ProtoUtils.marshaller(TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(TestIamPermissionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createBackupMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup") + .setRequestMarshaller(ProtoUtils.marshaller(CreateBackupRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor copyBackupMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup") + .setRequestMarshaller(ProtoUtils.marshaller(CopyBackupRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getBackupMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackup") + .setRequestMarshaller(ProtoUtils.marshaller(GetBackupRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Backup.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor updateBackupMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup") + .setRequestMarshaller(ProtoUtils.marshaller(UpdateBackupRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Backup.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor deleteBackupMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteBackupRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listBackupsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListBackups") + .setRequestMarshaller(ProtoUtils.marshaller(ListBackupsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBackupsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + restoreDatabaseMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase") + .setRequestMarshaller( + ProtoUtils.marshaller(RestoreDatabaseRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListDatabaseOperationsRequest, ListDatabaseOperationsResponse> + listDatabaseOperationsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations") + .setRequestMarshaller( + ProtoUtils.marshaller(ListDatabaseOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListDatabaseOperationsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listBackupOperationsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations") + .setRequestMarshaller( + ProtoUtils.marshaller(ListBackupOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBackupOperationsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listDatabaseRolesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles") + .setRequestMarshaller( + ProtoUtils.marshaller(ListDatabaseRolesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListDatabaseRolesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + addSplitPointsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints") + .setRequestMarshaller( + ProtoUtils.marshaller(AddSplitPointsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(AddSplitPointsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(GetBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listBackupSchedulesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules") + .setRequestMarshaller( + ProtoUtils.marshaller(ListBackupSchedulesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBackupSchedulesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation") + .setRequestMarshaller( + ProtoUtils.marshaller(InternalUpdateGraphOperationRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(InternalUpdateGraphOperationResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable listDatabasesCallable; + private final UnaryCallable + listDatabasesPagedCallable; + private final UnaryCallable createDatabaseCallable; + private final OperationCallable + createDatabaseOperationCallable; + private final UnaryCallable getDatabaseCallable; + private final UnaryCallable updateDatabaseCallable; + private final OperationCallable + updateDatabaseOperationCallable; + private final UnaryCallable updateDatabaseDdlCallable; + private final OperationCallable + updateDatabaseDdlOperationCallable; + private final UnaryCallable dropDatabaseCallable; + private final UnaryCallable getDatabaseDdlCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + private final UnaryCallable createBackupCallable; + private final OperationCallable + createBackupOperationCallable; + private final UnaryCallable copyBackupCallable; + private final OperationCallable + copyBackupOperationCallable; + private final UnaryCallable getBackupCallable; + private final UnaryCallable updateBackupCallable; + private final UnaryCallable deleteBackupCallable; + private final UnaryCallable listBackupsCallable; + private final UnaryCallable + listBackupsPagedCallable; + private final UnaryCallable restoreDatabaseCallable; + private final OperationCallable + restoreDatabaseOperationCallable; + private final UnaryCallable + listDatabaseOperationsCallable; + private final UnaryCallable + listDatabaseOperationsPagedCallable; + private final UnaryCallable + listBackupOperationsCallable; + private final UnaryCallable + listBackupOperationsPagedCallable; + private final UnaryCallable + listDatabaseRolesCallable; + private final UnaryCallable + listDatabaseRolesPagedCallable; + private final UnaryCallable addSplitPointsCallable; + private final UnaryCallable + createBackupScheduleCallable; + private final UnaryCallable getBackupScheduleCallable; + private final UnaryCallable + updateBackupScheduleCallable; + private final UnaryCallable deleteBackupScheduleCallable; + private final UnaryCallable + listBackupSchedulesCallable; + private final UnaryCallable + listBackupSchedulesPagedCallable; + private final UnaryCallable< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcDatabaseAdminStub create(DatabaseAdminStubSettings settings) + throws IOException { + return new GrpcDatabaseAdminStub(settings, ClientContext.create(settings)); + } + + public static final GrpcDatabaseAdminStub create(ClientContext clientContext) throws IOException { + return new GrpcDatabaseAdminStub(DatabaseAdminStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcDatabaseAdminStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcDatabaseAdminStub( + DatabaseAdminStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcDatabaseAdminStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcDatabaseAdminStub(DatabaseAdminStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcDatabaseAdminCallableFactory()); + } + + /** + * Constructs an instance of GrpcDatabaseAdminStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcDatabaseAdminStub( + DatabaseAdminStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings listDatabasesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listDatabasesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings createDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings getDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings updateDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database.name", String.valueOf(request.getDatabase().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings updateDatabaseDdlTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateDatabaseDdlMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings dropDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(dropDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings + getDatabaseDdlTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getDatabaseDdlMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings setIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings getIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings + testIamPermissionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings createBackupTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createBackupMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings copyBackupTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(copyBackupMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings getBackupTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getBackupMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings updateBackupTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateBackupMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("backup.name", String.valueOf(request.getBackup().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteBackupTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings listBackupsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBackupsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings restoreDatabaseTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(restoreDatabaseMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + listDatabaseOperationsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listDatabaseOperationsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + listBackupOperationsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBackupOperationsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + listDatabaseRolesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listDatabaseRolesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + addSplitPointsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(addSplitPointsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings + createBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings getBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings + updateBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "backup_schedule.name", + String.valueOf(request.getBackupSchedule().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings + listBackupSchedulesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBackupSchedulesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + internalUpdateGraphOperationTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(internalUpdateGraphOperationMethodDescriptor) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + + this.listDatabasesCallable = + callableFactory.createUnaryCallable( + listDatabasesTransportSettings, settings.listDatabasesSettings(), clientContext); + this.listDatabasesPagedCallable = + callableFactory.createPagedCallable( + listDatabasesTransportSettings, settings.listDatabasesSettings(), clientContext); + this.createDatabaseCallable = + callableFactory.createUnaryCallable( + createDatabaseTransportSettings, settings.createDatabaseSettings(), clientContext); + this.createDatabaseOperationCallable = + callableFactory.createOperationCallable( + createDatabaseTransportSettings, + settings.createDatabaseOperationSettings(), + clientContext, + operationsStub); + this.getDatabaseCallable = + callableFactory.createUnaryCallable( + getDatabaseTransportSettings, settings.getDatabaseSettings(), clientContext); + this.updateDatabaseCallable = + callableFactory.createUnaryCallable( + updateDatabaseTransportSettings, settings.updateDatabaseSettings(), clientContext); + this.updateDatabaseOperationCallable = + callableFactory.createOperationCallable( + updateDatabaseTransportSettings, + settings.updateDatabaseOperationSettings(), + clientContext, + operationsStub); + this.updateDatabaseDdlCallable = + callableFactory.createUnaryCallable( + updateDatabaseDdlTransportSettings, + settings.updateDatabaseDdlSettings(), + clientContext); + this.updateDatabaseDdlOperationCallable = + callableFactory.createOperationCallable( + updateDatabaseDdlTransportSettings, + settings.updateDatabaseDdlOperationSettings(), + clientContext, + operationsStub); + this.dropDatabaseCallable = + callableFactory.createUnaryCallable( + dropDatabaseTransportSettings, settings.dropDatabaseSettings(), clientContext); + this.getDatabaseDdlCallable = + callableFactory.createUnaryCallable( + getDatabaseDdlTransportSettings, settings.getDatabaseDdlSettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + this.createBackupCallable = + callableFactory.createUnaryCallable( + createBackupTransportSettings, settings.createBackupSettings(), clientContext); + this.createBackupOperationCallable = + callableFactory.createOperationCallable( + createBackupTransportSettings, + settings.createBackupOperationSettings(), + clientContext, + operationsStub); + this.copyBackupCallable = + callableFactory.createUnaryCallable( + copyBackupTransportSettings, settings.copyBackupSettings(), clientContext); + this.copyBackupOperationCallable = + callableFactory.createOperationCallable( + copyBackupTransportSettings, + settings.copyBackupOperationSettings(), + clientContext, + operationsStub); + this.getBackupCallable = + callableFactory.createUnaryCallable( + getBackupTransportSettings, settings.getBackupSettings(), clientContext); + this.updateBackupCallable = + callableFactory.createUnaryCallable( + updateBackupTransportSettings, settings.updateBackupSettings(), clientContext); + this.deleteBackupCallable = + callableFactory.createUnaryCallable( + deleteBackupTransportSettings, settings.deleteBackupSettings(), clientContext); + this.listBackupsCallable = + callableFactory.createUnaryCallable( + listBackupsTransportSettings, settings.listBackupsSettings(), clientContext); + this.listBackupsPagedCallable = + callableFactory.createPagedCallable( + listBackupsTransportSettings, settings.listBackupsSettings(), clientContext); + this.restoreDatabaseCallable = + callableFactory.createUnaryCallable( + restoreDatabaseTransportSettings, settings.restoreDatabaseSettings(), clientContext); + this.restoreDatabaseOperationCallable = + callableFactory.createOperationCallable( + restoreDatabaseTransportSettings, + settings.restoreDatabaseOperationSettings(), + clientContext, + operationsStub); + this.listDatabaseOperationsCallable = + callableFactory.createUnaryCallable( + listDatabaseOperationsTransportSettings, + settings.listDatabaseOperationsSettings(), + clientContext); + this.listDatabaseOperationsPagedCallable = + callableFactory.createPagedCallable( + listDatabaseOperationsTransportSettings, + settings.listDatabaseOperationsSettings(), + clientContext); + this.listBackupOperationsCallable = + callableFactory.createUnaryCallable( + listBackupOperationsTransportSettings, + settings.listBackupOperationsSettings(), + clientContext); + this.listBackupOperationsPagedCallable = + callableFactory.createPagedCallable( + listBackupOperationsTransportSettings, + settings.listBackupOperationsSettings(), + clientContext); + this.listDatabaseRolesCallable = + callableFactory.createUnaryCallable( + listDatabaseRolesTransportSettings, + settings.listDatabaseRolesSettings(), + clientContext); + this.listDatabaseRolesPagedCallable = + callableFactory.createPagedCallable( + listDatabaseRolesTransportSettings, + settings.listDatabaseRolesSettings(), + clientContext); + this.addSplitPointsCallable = + callableFactory.createUnaryCallable( + addSplitPointsTransportSettings, settings.addSplitPointsSettings(), clientContext); + this.createBackupScheduleCallable = + callableFactory.createUnaryCallable( + createBackupScheduleTransportSettings, + settings.createBackupScheduleSettings(), + clientContext); + this.getBackupScheduleCallable = + callableFactory.createUnaryCallable( + getBackupScheduleTransportSettings, + settings.getBackupScheduleSettings(), + clientContext); + this.updateBackupScheduleCallable = + callableFactory.createUnaryCallable( + updateBackupScheduleTransportSettings, + settings.updateBackupScheduleSettings(), + clientContext); + this.deleteBackupScheduleCallable = + callableFactory.createUnaryCallable( + deleteBackupScheduleTransportSettings, + settings.deleteBackupScheduleSettings(), + clientContext); + this.listBackupSchedulesCallable = + callableFactory.createUnaryCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + this.listBackupSchedulesPagedCallable = + callableFactory.createPagedCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + this.internalUpdateGraphOperationCallable = + callableFactory.createUnaryCallable( + internalUpdateGraphOperationTransportSettings, + settings.internalUpdateGraphOperationSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable listDatabasesCallable() { + return listDatabasesCallable; + } + + @Override + public UnaryCallable + listDatabasesPagedCallable() { + return listDatabasesPagedCallable; + } + + @Override + public UnaryCallable createDatabaseCallable() { + return createDatabaseCallable; + } + + @Override + public OperationCallable + createDatabaseOperationCallable() { + return createDatabaseOperationCallable; + } + + @Override + public UnaryCallable getDatabaseCallable() { + return getDatabaseCallable; + } + + @Override + public UnaryCallable updateDatabaseCallable() { + return updateDatabaseCallable; + } + + @Override + public OperationCallable + updateDatabaseOperationCallable() { + return updateDatabaseOperationCallable; + } + + @Override + public UnaryCallable updateDatabaseDdlCallable() { + return updateDatabaseDdlCallable; + } + + @Override + public OperationCallable + updateDatabaseDdlOperationCallable() { + return updateDatabaseDdlOperationCallable; + } + + @Override + public UnaryCallable dropDatabaseCallable() { + return dropDatabaseCallable; + } + + @Override + public UnaryCallable getDatabaseDdlCallable() { + return getDatabaseDdlCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public UnaryCallable createBackupCallable() { + return createBackupCallable; + } + + @Override + public OperationCallable + createBackupOperationCallable() { + return createBackupOperationCallable; + } + + @Override + public UnaryCallable copyBackupCallable() { + return copyBackupCallable; + } + + @Override + public OperationCallable + copyBackupOperationCallable() { + return copyBackupOperationCallable; + } + + @Override + public UnaryCallable getBackupCallable() { + return getBackupCallable; + } + + @Override + public UnaryCallable updateBackupCallable() { + return updateBackupCallable; + } + + @Override + public UnaryCallable deleteBackupCallable() { + return deleteBackupCallable; + } + + @Override + public UnaryCallable listBackupsCallable() { + return listBackupsCallable; + } + + @Override + public UnaryCallable listBackupsPagedCallable() { + return listBackupsPagedCallable; + } + + @Override + public UnaryCallable restoreDatabaseCallable() { + return restoreDatabaseCallable; + } + + @Override + public OperationCallable + restoreDatabaseOperationCallable() { + return restoreDatabaseOperationCallable; + } + + @Override + public UnaryCallable + listDatabaseOperationsCallable() { + return listDatabaseOperationsCallable; + } + + @Override + public UnaryCallable + listDatabaseOperationsPagedCallable() { + return listDatabaseOperationsPagedCallable; + } + + @Override + public UnaryCallable + listBackupOperationsCallable() { + return listBackupOperationsCallable; + } + + @Override + public UnaryCallable + listBackupOperationsPagedCallable() { + return listBackupOperationsPagedCallable; + } + + @Override + public UnaryCallable + listDatabaseRolesCallable() { + return listDatabaseRolesCallable; + } + + @Override + public UnaryCallable + listDatabaseRolesPagedCallable() { + return listDatabaseRolesPagedCallable; + } + + @Override + public UnaryCallable addSplitPointsCallable() { + return addSplitPointsCallable; + } + + @Override + public UnaryCallable createBackupScheduleCallable() { + return createBackupScheduleCallable; + } + + @Override + public UnaryCallable getBackupScheduleCallable() { + return getBackupScheduleCallable; + } + + @Override + public UnaryCallable updateBackupScheduleCallable() { + return updateBackupScheduleCallable; + } + + @Override + public UnaryCallable deleteBackupScheduleCallable() { + return deleteBackupScheduleCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesCallable() { + return listBackupSchedulesCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesPagedCallable() { + return listBackupSchedulesPagedCallable; + } + + @Override + public UnaryCallable + internalUpdateGraphOperationCallable() { + return internalUpdateGraphOperationCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminCallableFactory.java new file mode 100644 index 000000000000..0d93f799046c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminCallableFactory.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the DatabaseAdmin service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class HttpJsonDatabaseAdminCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java new file mode 100644 index 000000000000..ca101a1412ed --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java @@ -0,0 +1,2067 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.HttpRule; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshot; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.httpjson.longrunning.stub.HttpJsonOperationsStub; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableMap; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the DatabaseAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { + private static final TypeRegistry typeRegistry = + TypeRegistry.newBuilder() + .add(CreateDatabaseMetadata.getDescriptor()) + .add(Empty.getDescriptor()) + .add(CreateBackupMetadata.getDescriptor()) + .add(RestoreDatabaseMetadata.getDescriptor()) + .add(Database.getDescriptor()) + .add(Backup.getDescriptor()) + .add(CopyBackupMetadata.getDescriptor()) + .add(UpdateDatabaseMetadata.getDescriptor()) + .add(UpdateDatabaseDdlMetadata.getDescriptor()) + .build(); + + private static final ApiMethodDescriptor + listDatabasesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/databases", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListDatabasesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/databases", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateDatabaseRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + getDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Database.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database.name=projects/*/instances/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "database.name", request.getDatabase().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("database", request.getDatabase(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateDatabaseRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + updateDatabaseDdlMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}/ddl", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearDatabase().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateDatabaseDdlRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + dropDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getDatabaseDdlMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}/ddl", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(GetDatabaseDdlResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + setIamPolicyMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setAdditionalPaths( + "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy") + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Policy.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getIamPolicyMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setAdditionalPaths( + "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy") + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Policy.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + testIamPermissionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setAdditionalPaths( + "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions", + "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions") + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(TestIamPermissionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createBackupMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/backups", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "backupId", request.getBackupId()); + serializer.putQueryParam( + fields, "encryptionConfig", request.getEncryptionConfig()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backup", request.getBackup(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateBackupRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + copyBackupMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/backups:copy", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CopyBackupRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor getBackupMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackup") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/backups/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Backup.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateBackupMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{backup.name=projects/*/instances/*/backups/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "backup.name", request.getBackup().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backup", request.getBackup(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Backup.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteBackupMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/backups/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listBackupsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListBackups") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/backups", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListBackupsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + restoreDatabaseMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/databases:restore", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (RestoreDatabaseRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor< + ListDatabaseOperationsRequest, ListDatabaseOperationsResponse> + listDatabaseOperationsMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/databaseOperations", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListDatabaseOperationsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + ListBackupOperationsRequest, ListBackupOperationsResponse> + listBackupOperationsMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/backupOperations", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListBackupOperationsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listDatabaseRolesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListDatabaseRolesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + addSplitPointsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearDatabase().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(AddSplitPointsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "backupScheduleId", request.getBackupScheduleId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backupSchedule", request.getBackupSchedule(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{backupSchedule.name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "backupSchedule.name", + request.getBackupSchedule().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backupSchedule", request.getBackupSchedule(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listBackupSchedulesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListBackupSchedulesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private final UnaryCallable listDatabasesCallable; + private final UnaryCallable + listDatabasesPagedCallable; + private final UnaryCallable createDatabaseCallable; + private final OperationCallable + createDatabaseOperationCallable; + private final UnaryCallable getDatabaseCallable; + private final UnaryCallable updateDatabaseCallable; + private final OperationCallable + updateDatabaseOperationCallable; + private final UnaryCallable updateDatabaseDdlCallable; + private final OperationCallable + updateDatabaseDdlOperationCallable; + private final UnaryCallable dropDatabaseCallable; + private final UnaryCallable getDatabaseDdlCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + private final UnaryCallable createBackupCallable; + private final OperationCallable + createBackupOperationCallable; + private final UnaryCallable copyBackupCallable; + private final OperationCallable + copyBackupOperationCallable; + private final UnaryCallable getBackupCallable; + private final UnaryCallable updateBackupCallable; + private final UnaryCallable deleteBackupCallable; + private final UnaryCallable listBackupsCallable; + private final UnaryCallable + listBackupsPagedCallable; + private final UnaryCallable restoreDatabaseCallable; + private final OperationCallable + restoreDatabaseOperationCallable; + private final UnaryCallable + listDatabaseOperationsCallable; + private final UnaryCallable + listDatabaseOperationsPagedCallable; + private final UnaryCallable + listBackupOperationsCallable; + private final UnaryCallable + listBackupOperationsPagedCallable; + private final UnaryCallable + listDatabaseRolesCallable; + private final UnaryCallable + listDatabaseRolesPagedCallable; + private final UnaryCallable addSplitPointsCallable; + private final UnaryCallable + createBackupScheduleCallable; + private final UnaryCallable getBackupScheduleCallable; + private final UnaryCallable + updateBackupScheduleCallable; + private final UnaryCallable deleteBackupScheduleCallable; + private final UnaryCallable + listBackupSchedulesCallable; + private final UnaryCallable + listBackupSchedulesPagedCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonOperationsStub httpJsonOperationsStub; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonDatabaseAdminStub create(DatabaseAdminStubSettings settings) + throws IOException { + return new HttpJsonDatabaseAdminStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonDatabaseAdminStub create(ClientContext clientContext) + throws IOException { + return new HttpJsonDatabaseAdminStub( + DatabaseAdminStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonDatabaseAdminStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonDatabaseAdminStub( + DatabaseAdminStubSettings.newHttpJsonBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of HttpJsonDatabaseAdminStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonDatabaseAdminStub( + DatabaseAdminStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new HttpJsonDatabaseAdminCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonDatabaseAdminStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonDatabaseAdminStub( + DatabaseAdminStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.httpJsonOperationsStub = + HttpJsonOperationsStub.create( + clientContext, + callableFactory, + typeRegistry, + ImmutableMap.builder() + .put( + "google.longrunning.Operations.CancelOperation", + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel") + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost("/v1/{name=projects/*/instances/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel") + .build()) + .build()) + .put( + "google.longrunning.Operations.DeleteOperation", + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instances/*/databases/*/operations/*}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instances/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete( + "/v1/{name=projects/*/instances/*/backups/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instanceConfigs/*/operations/*}") + .build()) + .build()) + .put( + "google.longrunning.Operations.GetOperation", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/databases/*/operations/*}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/backups/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instanceConfigs/*/operations/*}") + .build()) + .build()) + .put( + "google.longrunning.Operations.ListOperations", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/databases/*/operations}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/backups/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instanceConfigs/*/operations}") + .build()) + .build()) + .build()); + + HttpJsonCallSettings + listDatabasesTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listDatabasesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings createDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings getDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings updateDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database.name", String.valueOf(request.getDatabase().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings updateDatabaseDdlTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateDatabaseDdlMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings dropDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(dropDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings + getDatabaseDdlTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getDatabaseDdlMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings setIamPolicyTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings getIamPolicyTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings + testIamPermissionsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings createBackupTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createBackupMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings copyBackupTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(copyBackupMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings getBackupTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getBackupMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings updateBackupTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateBackupMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("backup.name", String.valueOf(request.getBackup().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteBackupTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings listBackupsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listBackupsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings restoreDatabaseTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(restoreDatabaseMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + listDatabaseOperationsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listDatabaseOperationsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + listBackupOperationsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listBackupOperationsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + listDatabaseRolesTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listDatabaseRolesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + addSplitPointsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(addSplitPointsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings + createBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + getBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + updateBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "backup_schedule.name", + String.valueOf(request.getBackupSchedule().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + listBackupSchedulesTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listBackupSchedulesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + + this.listDatabasesCallable = + callableFactory.createUnaryCallable( + listDatabasesTransportSettings, settings.listDatabasesSettings(), clientContext); + this.listDatabasesPagedCallable = + callableFactory.createPagedCallable( + listDatabasesTransportSettings, settings.listDatabasesSettings(), clientContext); + this.createDatabaseCallable = + callableFactory.createUnaryCallable( + createDatabaseTransportSettings, settings.createDatabaseSettings(), clientContext); + this.createDatabaseOperationCallable = + callableFactory.createOperationCallable( + createDatabaseTransportSettings, + settings.createDatabaseOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.getDatabaseCallable = + callableFactory.createUnaryCallable( + getDatabaseTransportSettings, settings.getDatabaseSettings(), clientContext); + this.updateDatabaseCallable = + callableFactory.createUnaryCallable( + updateDatabaseTransportSettings, settings.updateDatabaseSettings(), clientContext); + this.updateDatabaseOperationCallable = + callableFactory.createOperationCallable( + updateDatabaseTransportSettings, + settings.updateDatabaseOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.updateDatabaseDdlCallable = + callableFactory.createUnaryCallable( + updateDatabaseDdlTransportSettings, + settings.updateDatabaseDdlSettings(), + clientContext); + this.updateDatabaseDdlOperationCallable = + callableFactory.createOperationCallable( + updateDatabaseDdlTransportSettings, + settings.updateDatabaseDdlOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.dropDatabaseCallable = + callableFactory.createUnaryCallable( + dropDatabaseTransportSettings, settings.dropDatabaseSettings(), clientContext); + this.getDatabaseDdlCallable = + callableFactory.createUnaryCallable( + getDatabaseDdlTransportSettings, settings.getDatabaseDdlSettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + this.createBackupCallable = + callableFactory.createUnaryCallable( + createBackupTransportSettings, settings.createBackupSettings(), clientContext); + this.createBackupOperationCallable = + callableFactory.createOperationCallable( + createBackupTransportSettings, + settings.createBackupOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.copyBackupCallable = + callableFactory.createUnaryCallable( + copyBackupTransportSettings, settings.copyBackupSettings(), clientContext); + this.copyBackupOperationCallable = + callableFactory.createOperationCallable( + copyBackupTransportSettings, + settings.copyBackupOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.getBackupCallable = + callableFactory.createUnaryCallable( + getBackupTransportSettings, settings.getBackupSettings(), clientContext); + this.updateBackupCallable = + callableFactory.createUnaryCallable( + updateBackupTransportSettings, settings.updateBackupSettings(), clientContext); + this.deleteBackupCallable = + callableFactory.createUnaryCallable( + deleteBackupTransportSettings, settings.deleteBackupSettings(), clientContext); + this.listBackupsCallable = + callableFactory.createUnaryCallable( + listBackupsTransportSettings, settings.listBackupsSettings(), clientContext); + this.listBackupsPagedCallable = + callableFactory.createPagedCallable( + listBackupsTransportSettings, settings.listBackupsSettings(), clientContext); + this.restoreDatabaseCallable = + callableFactory.createUnaryCallable( + restoreDatabaseTransportSettings, settings.restoreDatabaseSettings(), clientContext); + this.restoreDatabaseOperationCallable = + callableFactory.createOperationCallable( + restoreDatabaseTransportSettings, + settings.restoreDatabaseOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.listDatabaseOperationsCallable = + callableFactory.createUnaryCallable( + listDatabaseOperationsTransportSettings, + settings.listDatabaseOperationsSettings(), + clientContext); + this.listDatabaseOperationsPagedCallable = + callableFactory.createPagedCallable( + listDatabaseOperationsTransportSettings, + settings.listDatabaseOperationsSettings(), + clientContext); + this.listBackupOperationsCallable = + callableFactory.createUnaryCallable( + listBackupOperationsTransportSettings, + settings.listBackupOperationsSettings(), + clientContext); + this.listBackupOperationsPagedCallable = + callableFactory.createPagedCallable( + listBackupOperationsTransportSettings, + settings.listBackupOperationsSettings(), + clientContext); + this.listDatabaseRolesCallable = + callableFactory.createUnaryCallable( + listDatabaseRolesTransportSettings, + settings.listDatabaseRolesSettings(), + clientContext); + this.listDatabaseRolesPagedCallable = + callableFactory.createPagedCallable( + listDatabaseRolesTransportSettings, + settings.listDatabaseRolesSettings(), + clientContext); + this.addSplitPointsCallable = + callableFactory.createUnaryCallable( + addSplitPointsTransportSettings, settings.addSplitPointsSettings(), clientContext); + this.createBackupScheduleCallable = + callableFactory.createUnaryCallable( + createBackupScheduleTransportSettings, + settings.createBackupScheduleSettings(), + clientContext); + this.getBackupScheduleCallable = + callableFactory.createUnaryCallable( + getBackupScheduleTransportSettings, + settings.getBackupScheduleSettings(), + clientContext); + this.updateBackupScheduleCallable = + callableFactory.createUnaryCallable( + updateBackupScheduleTransportSettings, + settings.updateBackupScheduleSettings(), + clientContext); + this.deleteBackupScheduleCallable = + callableFactory.createUnaryCallable( + deleteBackupScheduleTransportSettings, + settings.deleteBackupScheduleSettings(), + clientContext); + this.listBackupSchedulesCallable = + callableFactory.createUnaryCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + this.listBackupSchedulesPagedCallable = + callableFactory.createPagedCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(listDatabasesMethodDescriptor); + methodDescriptors.add(createDatabaseMethodDescriptor); + methodDescriptors.add(getDatabaseMethodDescriptor); + methodDescriptors.add(updateDatabaseMethodDescriptor); + methodDescriptors.add(updateDatabaseDdlMethodDescriptor); + methodDescriptors.add(dropDatabaseMethodDescriptor); + methodDescriptors.add(getDatabaseDdlMethodDescriptor); + methodDescriptors.add(setIamPolicyMethodDescriptor); + methodDescriptors.add(getIamPolicyMethodDescriptor); + methodDescriptors.add(testIamPermissionsMethodDescriptor); + methodDescriptors.add(createBackupMethodDescriptor); + methodDescriptors.add(copyBackupMethodDescriptor); + methodDescriptors.add(getBackupMethodDescriptor); + methodDescriptors.add(updateBackupMethodDescriptor); + methodDescriptors.add(deleteBackupMethodDescriptor); + methodDescriptors.add(listBackupsMethodDescriptor); + methodDescriptors.add(restoreDatabaseMethodDescriptor); + methodDescriptors.add(listDatabaseOperationsMethodDescriptor); + methodDescriptors.add(listBackupOperationsMethodDescriptor); + methodDescriptors.add(listDatabaseRolesMethodDescriptor); + methodDescriptors.add(addSplitPointsMethodDescriptor); + methodDescriptors.add(createBackupScheduleMethodDescriptor); + methodDescriptors.add(getBackupScheduleMethodDescriptor); + methodDescriptors.add(updateBackupScheduleMethodDescriptor); + methodDescriptors.add(deleteBackupScheduleMethodDescriptor); + methodDescriptors.add(listBackupSchedulesMethodDescriptor); + return methodDescriptors; + } + + public HttpJsonOperationsStub getHttpJsonOperationsStub() { + return httpJsonOperationsStub; + } + + @Override + public UnaryCallable listDatabasesCallable() { + return listDatabasesCallable; + } + + @Override + public UnaryCallable + listDatabasesPagedCallable() { + return listDatabasesPagedCallable; + } + + @Override + public UnaryCallable createDatabaseCallable() { + return createDatabaseCallable; + } + + @Override + public OperationCallable + createDatabaseOperationCallable() { + return createDatabaseOperationCallable; + } + + @Override + public UnaryCallable getDatabaseCallable() { + return getDatabaseCallable; + } + + @Override + public UnaryCallable updateDatabaseCallable() { + return updateDatabaseCallable; + } + + @Override + public OperationCallable + updateDatabaseOperationCallable() { + return updateDatabaseOperationCallable; + } + + @Override + public UnaryCallable updateDatabaseDdlCallable() { + return updateDatabaseDdlCallable; + } + + @Override + public OperationCallable + updateDatabaseDdlOperationCallable() { + return updateDatabaseDdlOperationCallable; + } + + @Override + public UnaryCallable dropDatabaseCallable() { + return dropDatabaseCallable; + } + + @Override + public UnaryCallable getDatabaseDdlCallable() { + return getDatabaseDdlCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public UnaryCallable createBackupCallable() { + return createBackupCallable; + } + + @Override + public OperationCallable + createBackupOperationCallable() { + return createBackupOperationCallable; + } + + @Override + public UnaryCallable copyBackupCallable() { + return copyBackupCallable; + } + + @Override + public OperationCallable + copyBackupOperationCallable() { + return copyBackupOperationCallable; + } + + @Override + public UnaryCallable getBackupCallable() { + return getBackupCallable; + } + + @Override + public UnaryCallable updateBackupCallable() { + return updateBackupCallable; + } + + @Override + public UnaryCallable deleteBackupCallable() { + return deleteBackupCallable; + } + + @Override + public UnaryCallable listBackupsCallable() { + return listBackupsCallable; + } + + @Override + public UnaryCallable listBackupsPagedCallable() { + return listBackupsPagedCallable; + } + + @Override + public UnaryCallable restoreDatabaseCallable() { + return restoreDatabaseCallable; + } + + @Override + public OperationCallable + restoreDatabaseOperationCallable() { + return restoreDatabaseOperationCallable; + } + + @Override + public UnaryCallable + listDatabaseOperationsCallable() { + return listDatabaseOperationsCallable; + } + + @Override + public UnaryCallable + listDatabaseOperationsPagedCallable() { + return listDatabaseOperationsPagedCallable; + } + + @Override + public UnaryCallable + listBackupOperationsCallable() { + return listBackupOperationsCallable; + } + + @Override + public UnaryCallable + listBackupOperationsPagedCallable() { + return listBackupOperationsPagedCallable; + } + + @Override + public UnaryCallable + listDatabaseRolesCallable() { + return listDatabaseRolesCallable; + } + + @Override + public UnaryCallable + listDatabaseRolesPagedCallable() { + return listDatabaseRolesPagedCallable; + } + + @Override + public UnaryCallable addSplitPointsCallable() { + return addSplitPointsCallable; + } + + @Override + public UnaryCallable createBackupScheduleCallable() { + return createBackupScheduleCallable; + } + + @Override + public UnaryCallable getBackupScheduleCallable() { + return getBackupScheduleCallable; + } + + @Override + public UnaryCallable updateBackupScheduleCallable() { + return updateBackupScheduleCallable; + } + + @Override + public UnaryCallable deleteBackupScheduleCallable() { + return deleteBackupScheduleCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesCallable() { + return listBackupSchedulesCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesPagedCallable() { + return listBackupSchedulesPagedCallable; + } + + @Override + public UnaryCallable + internalUpdateGraphOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: internalUpdateGraphOperationCallable(). REST transport is not implemented" + + " for this method yet."); + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/Version.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/Version.java new file mode 100644 index 000000000000..a0c19dea4ccc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/Version.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1.stub; + +import com.google.api.core.InternalApi; + +@InternalApi("For internal use only") +final class Version { + // {x-version-update-start:google-cloud-spanner:current} + static final String VERSION = "0.0.0-SNAPSHOT"; + // {x-version-update-end} + +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java new file mode 100644 index 000000000000..05150d865e77 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java @@ -0,0 +1,5359 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.httpjson.longrunning.OperationsClient; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.resourcenames.ResourceName; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.InstancePartitionName; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: Cloud Spanner Instance Admin API + * + *

The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. + * Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner + * databases. + * + *

Each instance has a "configuration", which dictates where the serving resources for the Cloud + * Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google + * based on resource availability. + * + *

Cloud Spanner billing is based on the instances that exist and their sizes. After an instance + * exists, there are no additional per-database or per-operation charges for use of the instance + * (though there may be additional network bandwidth charges). Instances offer isolation: problems + * with databases in one instance will not affect other instances. However, within an instance + * databases can affect each other. For example, if one database in an instance receives a lot of + * requests and consumes most of the instance resources, fewer resources are available for other + * databases in that instance, and their performance may suffer. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+ *   InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]");
+ *   InstanceConfig response = instanceAdminClient.getInstanceConfig(name);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the InstanceAdminClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

ListInstanceConfigs

Lists the supported instance configurations for a given project. + *

Returns both Google-managed configurations and user-managed configurations.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listInstanceConfigs(ListInstanceConfigsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listInstanceConfigs(ProjectName parent) + *

  • listInstanceConfigs(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listInstanceConfigsPagedCallable() + *

  • listInstanceConfigsCallable() + *

+ *

GetInstanceConfig

Gets information about a particular instance configuration.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getInstanceConfig(GetInstanceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getInstanceConfig(InstanceConfigName name) + *

  • getInstanceConfig(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getInstanceConfigCallable() + *

+ *

CreateInstanceConfig

Creates an instance configuration and begins preparing it to be used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + *

Immediately after the request returns: + *

* The instance configuration is readable via the API, with all requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is `CREATING`. + *

While the operation is pending: + *

* Cancelling the operation renders the instance configuration immediately unreadable via the API. * Except for deleting the creating resource, all other attempts to modify the instance configuration are rejected. + *

Upon completion of the returned operation: + *

* Instances can be created using the instance configuration. * The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes `READY`. + *

The returned long-running operation will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track creation of the instance configuration. The metadata field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createInstanceConfigAsync(CreateInstanceConfigRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createInstanceConfigAsync(ProjectName parent, InstanceConfig instanceConfig, String instanceConfigId) + *

  • createInstanceConfigAsync(String parent, InstanceConfig instanceConfig, String instanceConfigId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createInstanceConfigOperationCallable() + *

  • createInstanceConfigCallable() + *

+ *

UpdateInstanceConfig

Updates an instance configuration. The returned long-running operation can be used to track the progress of updating the instance. If the named instance configuration does not exist, returns `NOT_FOUND`. + *

Only user-managed configurations can be updated. + *

Immediately after the request returns: + *

* The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. + *

While the operation is pending: + *

* Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance configuration are rejected. * Reading the instance configuration via the API continues to give the pre-request values. + *

Upon completion of the returned operation: + *

* Creating instances using the instance configuration uses the new values. * The new values of the instance configuration are readable via the API. * The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

The returned long-running operation will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track the instance configuration modification. The metadata field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. + *

Authorization requires `spanner.instanceConfigs.update` permission on the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateInstanceConfigAsync(UpdateInstanceConfigRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateInstanceConfigAsync(InstanceConfig instanceConfig, FieldMask updateMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateInstanceConfigOperationCallable() + *

  • updateInstanceConfigCallable() + *

+ *

DeleteInstanceConfig

Deletes the instance configuration. Deletion is only allowed when no instances are using the configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + *

Only user-managed configurations can be deleted. + *

Authorization requires `spanner.instanceConfigs.delete` permission on the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteInstanceConfig(DeleteInstanceConfigRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteInstanceConfig(InstanceConfigName name) + *

  • deleteInstanceConfig(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteInstanceConfigCallable() + *

+ *

ListInstanceConfigOperations

Lists the user-managed instance configuration long-running operations in the given project. An instance configuration operation has a name of the form `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listInstanceConfigOperations(ListInstanceConfigOperationsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listInstanceConfigOperations(ProjectName parent) + *

  • listInstanceConfigOperations(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listInstanceConfigOperationsPagedCallable() + *

  • listInstanceConfigOperationsCallable() + *

+ *

ListInstances

Lists all instances in the given project.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listInstances(ListInstancesRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listInstances(ProjectName parent) + *

  • listInstances(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listInstancesPagedCallable() + *

  • listInstancesCallable() + *

+ *

ListInstancePartitions

Lists all instance partitions for the given instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listInstancePartitions(ListInstancePartitionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listInstancePartitions(InstanceName parent) + *

  • listInstancePartitions(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listInstancePartitionsPagedCallable() + *

  • listInstancePartitionsCallable() + *

+ *

GetInstance

Gets information about a particular instance.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getInstance(GetInstanceRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getInstance(InstanceName name) + *

  • getInstance(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getInstanceCallable() + *

+ *

CreateInstance

Creates an instance and begins preparing it to begin serving. The returned long-running operation can be used to track the progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, `CreateInstance` returns `ALREADY_EXISTS`. + *

Immediately upon completion of this request: + *

* The instance is readable via the API, with all requested attributes but no allocated resources. Its state is `CREATING`. + *

Until completion of the returned operation: + *

* Cancelling the operation renders the instance immediately unreadable via the API. * The instance can be deleted. * All other attempts to modify the instance are rejected. + *

Upon completion of the returned operation: + *

* Billing for all successfully-allocated resources begins (some types may have lower than the requested levels). * Databases can be created in the instance. * The instance's allocated resource levels are readable via the API. * The instance's state becomes `READY`. + *

The returned long-running operation will have a name of the format `<instance_name>/operations/<operation_id>` and can be used to track creation of the instance. The metadata field type is [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createInstanceAsync(CreateInstanceRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createInstanceAsync(ProjectName parent, String instanceId, Instance instance) + *

  • createInstanceAsync(String parent, String instanceId, Instance instance) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createInstanceOperationCallable() + *

  • createInstanceCallable() + *

+ *

UpdateInstance

Updates an instance, and begins allocating or releasing resources as requested. The returned long-running operation can be used to track the progress of updating the instance. If the named instance does not exist, returns `NOT_FOUND`. + *

Immediately upon completion of this request: + *

* For resource types for which a decrease in the instance's allocation has been requested, billing is based on the newly-requested level. + *

Until completion of the returned operation: + *

* Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins restoring resources to their pre-request values. The operation is guaranteed to succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance are rejected. * Reading the instance via the API continues to give the pre-request resource levels. + *

Upon completion of the returned operation: + *

* Billing begins for all successfully-allocated resources (some types may have lower than the requested levels). * All newly-reserved resources are available for serving the instance's tables. * The instance's new resource levels are readable via the API. + *

The returned long-running operation will have a name of the format `<instance_name>/operations/<operation_id>` and can be used to track the instance modification. The metadata field type is [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. The response field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + *

Authorization requires `spanner.instances.update` permission on the resource [name][google.spanner.admin.instance.v1.Instance.name].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateInstanceAsync(UpdateInstanceRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateInstanceAsync(Instance instance, FieldMask fieldMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateInstanceOperationCallable() + *

  • updateInstanceCallable() + *

+ *

DeleteInstance

Deletes an instance. + *

Immediately upon completion of the request: + *

* Billing ceases for all of the instance's reserved resources. + *

Soon afterward: + *

* The instance and *all of its databases* immediately and irrevocably disappear from the API. All data in the databases is permanently deleted.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteInstance(DeleteInstanceRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteInstance(InstanceName name) + *

  • deleteInstance(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteInstanceCallable() + *

+ *

SetIamPolicy

Sets the access control policy on an instance resource. Replaces any existing policy. + *

Authorization requires `spanner.instances.setIamPolicy` on [resource][google.iam.v1.SetIamPolicyRequest.resource].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • setIamPolicy(SetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • setIamPolicy(ResourceName resource, Policy policy) + *

  • setIamPolicy(String resource, Policy policy) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • setIamPolicyCallable() + *

+ *

GetIamPolicy

Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. + *

Authorization requires `spanner.instances.getIamPolicy` on [resource][google.iam.v1.GetIamPolicyRequest.resource].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getIamPolicy(GetIamPolicyRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getIamPolicy(ResourceName resource) + *

  • getIamPolicy(String resource) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getIamPolicyCallable() + *

+ *

TestIamPermissions

Returns permissions that the caller has on the specified instance resource. + *

Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google Cloud Project. Otherwise returns an empty set of permissions.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • testIamPermissions(TestIamPermissionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • testIamPermissions(ResourceName resource, List<String> permissions) + *

  • testIamPermissions(String resource, List<String> permissions) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • testIamPermissionsCallable() + *

+ *

GetInstancePartition

Gets information about a particular instance partition.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getInstancePartition(GetInstancePartitionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getInstancePartition(InstancePartitionName name) + *

  • getInstancePartition(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getInstancePartitionCallable() + *

+ *

CreateInstancePartition

Creates an instance partition and begins preparing it to be used. The returned long-running operation can be used to track the progress of preparing the new instance partition. The instance partition name is assigned by the caller. If the named instance partition already exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + *

Immediately upon completion of this request: + *

* The instance partition is readable via the API, with all requested attributes but no allocated resources. Its state is `CREATING`. + *

Until completion of the returned operation: + *

* Cancelling the operation renders the instance partition immediately unreadable via the API. * The instance partition can be deleted. * All other attempts to modify the instance partition are rejected. + *

Upon completion of the returned operation: + *

* Billing for all successfully-allocated resources begins (some types may have lower than the requested levels). * Databases can start using this instance partition. * The instance partition's allocated resource levels are readable via the API. * The instance partition's state becomes `READY`. + *

The returned long-running operation will have a name of the format `<instance_partition_name>/operations/<operation_id>` and can be used to track creation of the instance partition. The metadata field type is [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createInstancePartitionAsync(CreateInstancePartitionRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • createInstancePartitionAsync(InstanceName parent, InstancePartition instancePartition, String instancePartitionId) + *

  • createInstancePartitionAsync(String parent, InstancePartition instancePartition, String instancePartitionId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createInstancePartitionOperationCallable() + *

  • createInstancePartitionCallable() + *

+ *

DeleteInstancePartition

Deletes an existing instance partition. Requires that the instance partition is not used by any database or backup and is not the default instance partition of an instance. + *

Authorization requires `spanner.instancePartitions.delete` permission on the resource [name][google.spanner.admin.instance.v1.InstancePartition.name].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteInstancePartition(DeleteInstancePartitionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteInstancePartition(InstancePartitionName name) + *

  • deleteInstancePartition(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteInstancePartitionCallable() + *

+ *

UpdateInstancePartition

Updates an instance partition, and begins allocating or releasing resources as requested. The returned long-running operation can be used to track the progress of updating the instance partition. If the named instance partition does not exist, returns `NOT_FOUND`. + *

Immediately upon completion of this request: + *

* For resource types for which a decrease in the instance partition's allocation has been requested, billing is based on the newly-requested level. + *

Until completion of the returned operation: + *

* Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], and begins restoring resources to their pre-request values. The operation is guaranteed to succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance partition are rejected. * Reading the instance partition via the API continues to give the pre-request resource levels. + *

Upon completion of the returned operation: + *

* Billing begins for all successfully-allocated resources (some types may have lower than the requested levels). * All newly-reserved resources are available for serving the instance partition's tables. * The instance partition's new resource levels are readable via the API. + *

The returned long-running operation will have a name of the format `<instance_partition_name>/operations/<operation_id>` and can be used to track the instance partition modification. The metadata field type is [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + *

Authorization requires `spanner.instancePartitions.update` permission on the resource [name][google.spanner.admin.instance.v1.InstancePartition.name].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • updateInstancePartitionAsync(UpdateInstancePartitionRequest request) + *

+ *

Methods that return long-running operations have "Async" method variants that return `OperationFuture`, which is used to track polling of the service.

+ *
    + *
  • updateInstancePartitionAsync(InstancePartition instancePartition, FieldMask fieldMask) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • updateInstancePartitionOperationCallable() + *

  • updateInstancePartitionCallable() + *

+ *

ListInstancePartitionOperations

Lists instance partition long-running operations in the given instance. An instance partition operation has a name of the form `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. The long-running operation metadata field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation. + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listInstancePartitionOperations(ListInstancePartitionOperationsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listInstancePartitionOperations(InstanceName parent) + *

  • listInstancePartitionOperations(String parent) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listInstancePartitionOperationsPagedCallable() + *

  • listInstancePartitionOperationsCallable() + *

+ *

MoveInstance

Moves an instance to the target instance configuration. You can use the returned long-running operation to track the progress of moving the instance. + *

`MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following criteria: + *

* Is undergoing a move to a different instance configuration * Has backups * Has an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + *

While the operation is pending: + *

* All other attempts to modify the instance, including changes to its compute capacity, are rejected. * The following database and backup admin operations are rejected: + *

* `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + *

* Both the source and target instance configurations are subject to hourly compute and storage charges. * The instance might experience higher read-write latencies and a higher transaction abort rate. However, moving an instance doesn't cause any downtime. + *

The returned long-running operation has a name of the format `<instance_name>/operations/<operation_id>` and can be used to track the move instance operation. The metadata field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The response field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any data previously moved to the target instance configuration back to the original instance configuration. You can use this operation to track the progress of the cancellation. Upon successful completion of the cancellation, the operation terminates with `CANCELLED` status. + *

If not cancelled, upon completion of the returned operation: + *

* The instance successfully moves to the target instance configuration. * You are billed for compute and storage in target instance configuration. + *

Authorization requires the `spanner.instances.update` permission on the resource [instance][google.spanner.admin.instance.v1.Instance]. + *

For more details, see [Move an instance](https://cloud.google.com/spanner/docs/move-instance).

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • moveInstanceAsync(MoveInstanceRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • moveInstanceOperationCallable() + *

  • moveInstanceCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of InstanceAdminSettings to + * create(). For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminSettings instanceAdminSettings =
+ *     InstanceAdminSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * InstanceAdminClient instanceAdminClient = InstanceAdminClient.create(instanceAdminSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminSettings instanceAdminSettings =
+ *     InstanceAdminSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * InstanceAdminClient instanceAdminClient = InstanceAdminClient.create(instanceAdminSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminSettings instanceAdminSettings =
+ *     InstanceAdminSettings.newHttpJsonBuilder().build();
+ * InstanceAdminClient instanceAdminClient = InstanceAdminClient.create(instanceAdminSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class InstanceAdminClient implements BackgroundResource { + private final InstanceAdminSettings settings; + private final InstanceAdminStub stub; + private final OperationsClient httpJsonOperationsClient; + private final com.google.longrunning.OperationsClient operationsClient; + + /** Constructs an instance of InstanceAdminClient with default settings. */ + public static final InstanceAdminClient create() throws IOException { + return create(InstanceAdminSettings.newBuilder().build()); + } + + /** + * Constructs an instance of InstanceAdminClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final InstanceAdminClient create(InstanceAdminSettings settings) + throws IOException { + return new InstanceAdminClient(settings); + } + + /** + * Constructs an instance of InstanceAdminClient, using the given stub for making calls. This is + * for advanced usage - prefer using create(InstanceAdminSettings). + */ + public static final InstanceAdminClient create(InstanceAdminStub stub) { + return new InstanceAdminClient(stub); + } + + /** + * Constructs an instance of InstanceAdminClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected InstanceAdminClient(InstanceAdminSettings settings) throws IOException { + this.settings = settings; + this.stub = ((InstanceAdminStubSettings) settings.getStubSettings()).createStub(); + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + protected InstanceAdminClient(InstanceAdminStub stub) { + this.settings = null; + this.stub = stub; + this.operationsClient = + com.google.longrunning.OperationsClient.create(this.stub.getOperationsStub()); + this.httpJsonOperationsClient = OperationsClient.create(this.stub.getHttpJsonOperationsStub()); + } + + public final InstanceAdminSettings getSettings() { + return settings; + } + + public InstanceAdminStub getStub() { + return stub; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + public final com.google.longrunning.OperationsClient getOperationsClient() { + return operationsClient; + } + + /** + * Returns the OperationsClient that can be used to query the status of a long-running operation + * returned by another API method call. + */ + @BetaApi + public final OperationsClient getHttpJsonOperationsClient() { + return httpJsonOperationsClient; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the supported instance configurations for a given project. + * + *

Returns both Google-managed configurations and user-managed configurations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   for (InstanceConfig element : instanceAdminClient.listInstanceConfigs(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The name of the project for which a list of supported instance + * configurations is requested. Values are of the form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigsPagedResponse listInstanceConfigs(ProjectName parent) { + ListInstanceConfigsRequest request = + ListInstanceConfigsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listInstanceConfigs(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the supported instance configurations for a given project. + * + *

Returns both Google-managed configurations and user-managed configurations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   for (InstanceConfig element : instanceAdminClient.listInstanceConfigs(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The name of the project for which a list of supported instance + * configurations is requested. Values are of the form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigsPagedResponse listInstanceConfigs(String parent) { + ListInstanceConfigsRequest request = + ListInstanceConfigsRequest.newBuilder().setParent(parent).build(); + return listInstanceConfigs(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the supported instance configurations for a given project. + * + *

Returns both Google-managed configurations and user-managed configurations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigsRequest request =
+   *       ListInstanceConfigsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (InstanceConfig element : instanceAdminClient.listInstanceConfigs(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigsPagedResponse listInstanceConfigs( + ListInstanceConfigsRequest request) { + return listInstanceConfigsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the supported instance configurations for a given project. + * + *

Returns both Google-managed configurations and user-managed configurations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigsRequest request =
+   *       ListInstanceConfigsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.listInstanceConfigsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (InstanceConfig element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listInstanceConfigsPagedCallable() { + return stub.listInstanceConfigsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the supported instance configurations for a given project. + * + *

Returns both Google-managed configurations and user-managed configurations. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigsRequest request =
+   *       ListInstanceConfigsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListInstanceConfigsResponse response =
+   *         instanceAdminClient.listInstanceConfigsCallable().call(request);
+   *     for (InstanceConfig element : response.getInstanceConfigsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listInstanceConfigsCallable() { + return stub.listInstanceConfigsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance configuration. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]");
+   *   InstanceConfig response = instanceAdminClient.getInstanceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance configuration. Values are of the form + * `projects/<project>/instanceConfigs/<config>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstanceConfig getInstanceConfig(InstanceConfigName name) { + GetInstanceConfigRequest request = + GetInstanceConfigRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getInstanceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance configuration. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString();
+   *   InstanceConfig response = instanceAdminClient.getInstanceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance configuration. Values are of the form + * `projects/<project>/instanceConfigs/<config>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstanceConfig getInstanceConfig(String name) { + GetInstanceConfigRequest request = GetInstanceConfigRequest.newBuilder().setName(name).build(); + return getInstanceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance configuration. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstanceConfigRequest request =
+   *       GetInstanceConfigRequest.newBuilder()
+   *           .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .build();
+   *   InstanceConfig response = instanceAdminClient.getInstanceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstanceConfig getInstanceConfig(GetInstanceConfigRequest request) { + return getInstanceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance configuration. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstanceConfigRequest request =
+   *       GetInstanceConfigRequest.newBuilder()
+   *           .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.getInstanceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   InstanceConfig response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getInstanceConfigCallable() { + return stub.getInstanceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance configuration and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance configuration. The + * instance configuration name is assigned by the caller. If the named instance configuration + * already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * + *

Immediately after the request returns: + * + *

* The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. + * + *

While the operation is pending: + * + *

* Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. + * + *

Upon completion of the returned operation: + * + *

* Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track + * creation of the instance configuration. The metadata field type is + * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource + * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   InstanceConfig instanceConfig = InstanceConfig.newBuilder().build();
+   *   String instanceConfigId = "instanceConfigId1750947762";
+   *   InstanceConfig response =
+   *       instanceAdminClient
+   *           .createInstanceConfigAsync(parent, instanceConfig, instanceConfigId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The name of the project in which to create the instance configuration. + * Values are of the form `projects/<project>`. + * @param instanceConfig Required. The `InstanceConfig` proto of the configuration to create. + * `instance_config.name` must be `<parent>/instanceConfigs/<instance_config_id>`. + * `instance_config.base_config` must be a Google-managed configuration name, e.g. + * <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3. + * @param instanceConfigId Required. The ID of the instance configuration to create. Valid + * identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. The `custom-` prefix is required to avoid name conflicts with + * Google-managed configurations. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstanceConfigAsync( + ProjectName parent, InstanceConfig instanceConfig, String instanceConfigId) { + CreateInstanceConfigRequest request = + CreateInstanceConfigRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setInstanceConfig(instanceConfig) + .setInstanceConfigId(instanceConfigId) + .build(); + return createInstanceConfigAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance configuration and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance configuration. The + * instance configuration name is assigned by the caller. If the named instance configuration + * already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * + *

Immediately after the request returns: + * + *

* The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. + * + *

While the operation is pending: + * + *

* Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. + * + *

Upon completion of the returned operation: + * + *

* Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track + * creation of the instance configuration. The metadata field type is + * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource + * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   InstanceConfig instanceConfig = InstanceConfig.newBuilder().build();
+   *   String instanceConfigId = "instanceConfigId1750947762";
+   *   InstanceConfig response =
+   *       instanceAdminClient
+   *           .createInstanceConfigAsync(parent, instanceConfig, instanceConfigId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The name of the project in which to create the instance configuration. + * Values are of the form `projects/<project>`. + * @param instanceConfig Required. The `InstanceConfig` proto of the configuration to create. + * `instance_config.name` must be `<parent>/instanceConfigs/<instance_config_id>`. + * `instance_config.base_config` must be a Google-managed configuration name, e.g. + * <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3. + * @param instanceConfigId Required. The ID of the instance configuration to create. Valid + * identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. The `custom-` prefix is required to avoid name conflicts with + * Google-managed configurations. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstanceConfigAsync( + String parent, InstanceConfig instanceConfig, String instanceConfigId) { + CreateInstanceConfigRequest request = + CreateInstanceConfigRequest.newBuilder() + .setParent(parent) + .setInstanceConfig(instanceConfig) + .setInstanceConfigId(instanceConfigId) + .build(); + return createInstanceConfigAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance configuration and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance configuration. The + * instance configuration name is assigned by the caller. If the named instance configuration + * already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * + *

Immediately after the request returns: + * + *

* The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. + * + *

While the operation is pending: + * + *

* Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. + * + *

Upon completion of the returned operation: + * + *

* Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track + * creation of the instance configuration. The metadata field type is + * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource + * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceConfigRequest request =
+   *       CreateInstanceConfigRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceConfigId("instanceConfigId1750947762")
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   InstanceConfig response = instanceAdminClient.createInstanceConfigAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstanceConfigAsync(CreateInstanceConfigRequest request) { + return createInstanceConfigOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance configuration and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance configuration. The + * instance configuration name is assigned by the caller. If the named instance configuration + * already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * + *

Immediately after the request returns: + * + *

* The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. + * + *

While the operation is pending: + * + *

* Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. + * + *

Upon completion of the returned operation: + * + *

* Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track + * creation of the instance configuration. The metadata field type is + * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource + * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceConfigRequest request =
+   *       CreateInstanceConfigRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceConfigId("instanceConfigId1750947762")
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.createInstanceConfigOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InstanceConfig response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable() { + return stub.createInstanceConfigOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance configuration and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance configuration. The + * instance configuration name is assigned by the caller. If the named instance configuration + * already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * + *

Immediately after the request returns: + * + *

* The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. + * + *

While the operation is pending: + * + *

* Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. + * + *

Upon completion of the returned operation: + * + *

* Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track + * creation of the instance configuration. The metadata field type is + * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.create` permission on the resource + * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceConfigRequest request =
+   *       CreateInstanceConfigRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceConfigId("instanceConfigId1750947762")
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.createInstanceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createInstanceConfigCallable() { + return stub.createInstanceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance configuration. The returned long-running operation can be used to track the + * progress of updating the instance. If the named instance configuration does not exist, returns + * `NOT_FOUND`. + * + *

Only user-managed configurations can be updated. + * + *

Immediately after the request returns: + * + *

* The instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. + * + *

While the operation is pending: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The + * operation is guaranteed to succeed at undoing all changes, after which point it terminates with + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. + * + *

Upon completion of the returned operation: + * + *

* Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track the + * instance configuration modification. The metadata field type is + * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceConfig instanceConfig = InstanceConfig.newBuilder().build();
+   *   FieldMask updateMask = FieldMask.newBuilder().build();
+   *   InstanceConfig response =
+   *       instanceAdminClient.updateInstanceConfigAsync(instanceConfig, updateMask).get();
+   * }
+   * }
+ * + * @param instanceConfig Required. The user instance configuration to update, which must always + * include the instance configuration name. Otherwise, only fields mentioned in + * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] + * need be included. To prevent conflicts of concurrent updates, + * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can be used. + * @param updateMask Required. A mask specifying which fields in + * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be updated. The + * field mask must always be specified; this prevents any future fields in + * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] from being erased + * accidentally by clients that do not know about them. Only display_name and labels can be + * updated. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + updateInstanceConfigAsync(InstanceConfig instanceConfig, FieldMask updateMask) { + UpdateInstanceConfigRequest request = + UpdateInstanceConfigRequest.newBuilder() + .setInstanceConfig(instanceConfig) + .setUpdateMask(updateMask) + .build(); + return updateInstanceConfigAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance configuration. The returned long-running operation can be used to track the + * progress of updating the instance. If the named instance configuration does not exist, returns + * `NOT_FOUND`. + * + *

Only user-managed configurations can be updated. + * + *

Immediately after the request returns: + * + *

* The instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. + * + *

While the operation is pending: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The + * operation is guaranteed to succeed at undoing all changes, after which point it terminates with + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. + * + *

Upon completion of the returned operation: + * + *

* Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track the + * instance configuration modification. The metadata field type is + * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceConfigRequest request =
+   *       UpdateInstanceConfigRequest.newBuilder()
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   InstanceConfig response = instanceAdminClient.updateInstanceConfigAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + updateInstanceConfigAsync(UpdateInstanceConfigRequest request) { + return updateInstanceConfigOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance configuration. The returned long-running operation can be used to track the + * progress of updating the instance. If the named instance configuration does not exist, returns + * `NOT_FOUND`. + * + *

Only user-managed configurations can be updated. + * + *

Immediately after the request returns: + * + *

* The instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. + * + *

While the operation is pending: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The + * operation is guaranteed to succeed at undoing all changes, after which point it terminates with + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. + * + *

Upon completion of the returned operation: + * + *

* Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track the + * instance configuration modification. The metadata field type is + * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceConfigRequest request =
+   *       UpdateInstanceConfigRequest.newBuilder()
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.updateInstanceConfigOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InstanceConfig response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable() { + return stub.updateInstanceConfigOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance configuration. The returned long-running operation can be used to track the + * progress of updating the instance. If the named instance configuration does not exist, returns + * `NOT_FOUND`. + * + *

Only user-managed configurations can be updated. + * + *

Immediately after the request returns: + * + *

* The instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. + * + *

While the operation is pending: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The + * operation is guaranteed to succeed at undoing all changes, after which point it terminates with + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. + * + *

Upon completion of the returned operation: + * + *

* Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. + * + *

The returned long-running operation will have a name of the format + * `<instance_config_name>/operations/<operation_id>` and can be used to track the + * instance configuration modification. The metadata field type is + * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. + * The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], + * if successful. + * + *

Authorization requires `spanner.instanceConfigs.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceConfigRequest request =
+   *       UpdateInstanceConfigRequest.newBuilder()
+   *           .setInstanceConfig(InstanceConfig.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .setValidateOnly(true)
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.updateInstanceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateInstanceConfigCallable() { + return stub.updateInstanceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + * + *

Only user-managed configurations can be deleted. + * + *

Authorization requires `spanner.instanceConfigs.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]");
+   *   instanceAdminClient.deleteInstanceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance configuration to be deleted. Values are of the + * form `projects/<project>/instanceConfigs/<instance_config>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstanceConfig(InstanceConfigName name) { + DeleteInstanceConfigRequest request = + DeleteInstanceConfigRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteInstanceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + * + *

Only user-managed configurations can be deleted. + * + *

Authorization requires `spanner.instanceConfigs.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString();
+   *   instanceAdminClient.deleteInstanceConfig(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance configuration to be deleted. Values are of the + * form `projects/<project>/instanceConfigs/<instance_config>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstanceConfig(String name) { + DeleteInstanceConfigRequest request = + DeleteInstanceConfigRequest.newBuilder().setName(name).build(); + deleteInstanceConfig(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + * + *

Only user-managed configurations can be deleted. + * + *

Authorization requires `spanner.instanceConfigs.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstanceConfigRequest request =
+   *       DeleteInstanceConfigRequest.newBuilder()
+   *           .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .setEtag("etag3123477")
+   *           .setValidateOnly(true)
+   *           .build();
+   *   instanceAdminClient.deleteInstanceConfig(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstanceConfig(DeleteInstanceConfigRequest request) { + deleteInstanceConfigCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + * + *

Only user-managed configurations can be deleted. + * + *

Authorization requires `spanner.instanceConfigs.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstanceConfigRequest request =
+   *       DeleteInstanceConfigRequest.newBuilder()
+   *           .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .setEtag("etag3123477")
+   *           .setValidateOnly(true)
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.deleteInstanceConfigCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteInstanceConfigCallable() { + return stub.deleteInstanceConfigCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the user-managed instance configuration long-running operations in the given project. An + * instance configuration operation has a name of the form + * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   for (Operation element :
+   *       instanceAdminClient.listInstanceConfigOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project of the instance configuration operations. Values are of the + * form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperations( + ProjectName parent) { + ListInstanceConfigOperationsRequest request = + ListInstanceConfigOperationsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listInstanceConfigOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the user-managed instance configuration long-running operations in the given project. An + * instance configuration operation has a name of the form + * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   for (Operation element :
+   *       instanceAdminClient.listInstanceConfigOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The project of the instance configuration operations. Values are of the + * form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperations( + String parent) { + ListInstanceConfigOperationsRequest request = + ListInstanceConfigOperationsRequest.newBuilder().setParent(parent).build(); + return listInstanceConfigOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the user-managed instance configuration long-running operations in the given project. An + * instance configuration operation has a name of the form + * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigOperationsRequest request =
+   *       ListInstanceConfigOperationsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   for (Operation element :
+   *       instanceAdminClient.listInstanceConfigOperations(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperations( + ListInstanceConfigOperationsRequest request) { + return listInstanceConfigOperationsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the user-managed instance configuration long-running operations in the given project. An + * instance configuration operation has a name of the form + * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigOperationsRequest request =
+   *       ListInstanceConfigOperationsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.listInstanceConfigOperationsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Operation element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable() { + return stub.listInstanceConfigOperationsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists the user-managed instance configuration long-running operations in the given project. An + * instance configuration operation has a name of the form + * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstanceConfigOperationsRequest request =
+   *       ListInstanceConfigOperationsRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .build();
+   *   while (true) {
+   *     ListInstanceConfigOperationsResponse response =
+   *         instanceAdminClient.listInstanceConfigOperationsCallable().call(request);
+   *     for (Operation element : response.getOperationsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + listInstanceConfigOperationsCallable() { + return stub.listInstanceConfigOperationsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instances in the given project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   for (Instance element : instanceAdminClient.listInstances(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The name of the project for which a list of instances is requested. + * Values are of the form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancesPagedResponse listInstances(ProjectName parent) { + ListInstancesRequest request = + ListInstancesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listInstances(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instances in the given project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   for (Instance element : instanceAdminClient.listInstances(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The name of the project for which a list of instances is requested. + * Values are of the form `projects/<project>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancesPagedResponse listInstances(String parent) { + ListInstancesRequest request = ListInstancesRequest.newBuilder().setParent(parent).build(); + return listInstances(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instances in the given project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancesRequest request =
+   *       ListInstancesRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setInstanceDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   for (Instance element : instanceAdminClient.listInstances(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancesPagedResponse listInstances(ListInstancesRequest request) { + return listInstancesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instances in the given project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancesRequest request =
+   *       ListInstancesRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setInstanceDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.listInstancesPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Instance element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listInstancesPagedCallable() { + return stub.listInstancesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instances in the given project. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancesRequest request =
+   *       ListInstancesRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .setInstanceDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   while (true) {
+   *     ListInstancesResponse response = instanceAdminClient.listInstancesCallable().call(request);
+   *     for (Instance element : response.getInstancesList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listInstancesCallable() { + return stub.listInstancesCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instance partitions for the given instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (InstancePartition element :
+   *       instanceAdminClient.listInstancePartitions(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance whose instance partitions should be listed. Values are of + * the form `projects/<project>/instances/<instance>`. Use `{instance} = '-'` to + * list instance partitions for all Instances in a project, e.g., + * `projects/myproject/instances/-`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionsPagedResponse listInstancePartitions(InstanceName parent) { + ListInstancePartitionsRequest request = + ListInstancePartitionsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listInstancePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instance partitions for the given instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (InstancePartition element :
+   *       instanceAdminClient.listInstancePartitions(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The instance whose instance partitions should be listed. Values are of + * the form `projects/<project>/instances/<instance>`. Use `{instance} = '-'` to + * list instance partitions for all Instances in a project, e.g., + * `projects/myproject/instances/-`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionsPagedResponse listInstancePartitions(String parent) { + ListInstancePartitionsRequest request = + ListInstancePartitionsRequest.newBuilder().setParent(parent).build(); + return listInstancePartitions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instance partitions for the given instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionsRequest request =
+   *       ListInstancePartitionsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   for (InstancePartition element :
+   *       instanceAdminClient.listInstancePartitions(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionsPagedResponse listInstancePartitions( + ListInstancePartitionsRequest request) { + return listInstancePartitionsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instance partitions for the given instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionsRequest request =
+   *       ListInstancePartitionsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.listInstancePartitionsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (InstancePartition element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listInstancePartitionsPagedCallable() { + return stub.listInstancePartitionsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all instance partitions for the given instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionsRequest request =
+   *       ListInstancePartitionsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   while (true) {
+   *     ListInstancePartitionsResponse response =
+   *         instanceAdminClient.listInstancePartitionsCallable().call(request);
+   *     for (InstancePartition element : response.getInstancePartitionsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listInstancePartitionsCallable() { + return stub.listInstancePartitionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   Instance response = instanceAdminClient.getInstance(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Instance getInstance(InstanceName name) { + GetInstanceRequest request = + GetInstanceRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getInstance(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   Instance response = instanceAdminClient.getInstance(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance. Values are of the form + * `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Instance getInstance(String name) { + GetInstanceRequest request = GetInstanceRequest.newBuilder().setName(name).build(); + return getInstance(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstanceRequest request =
+   *       GetInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Instance response = instanceAdminClient.getInstance(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Instance getInstance(GetInstanceRequest request) { + return getInstanceCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstanceRequest request =
+   *       GetInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = instanceAdminClient.getInstanceCallable().futureCall(request);
+   *   // Do something.
+   *   Instance response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getInstanceCallable() { + return stub.getInstanceCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance and begins preparing it to begin serving. The returned long-running + * operation can be used to track the progress of preparing the new instance. The instance name is + * assigned by the caller. If the named instance already exists, `CreateInstance` returns + * `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance is readable via the API, with all requested attributes but no allocated + * resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance immediately unreadable via the API. + * * The instance can be deleted. * All other attempts to modify the instance are + * rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can be created in the instance. * The + * instance's allocated resource levels are readable via the API. * The instance's state + * becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track creation of + * the instance. The metadata field type is + * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ProjectName parent = ProjectName.of("[PROJECT]");
+   *   String instanceId = "instanceId902024336";
+   *   Instance instance = Instance.newBuilder().build();
+   *   Instance response =
+   *       instanceAdminClient.createInstanceAsync(parent, instanceId, instance).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the project in which to create the instance. Values are of + * the form `projects/<project>`. + * @param instanceId Required. The ID of the instance to create. Valid identifiers are of the form + * `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in length. + * @param instance Required. The instance to create. The name may be omitted, but if specified + * must be `<parent>/instances/<instance_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createInstanceAsync( + ProjectName parent, String instanceId, Instance instance) { + CreateInstanceRequest request = + CreateInstanceRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build(); + return createInstanceAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance and begins preparing it to begin serving. The returned long-running + * operation can be used to track the progress of preparing the new instance. The instance name is + * assigned by the caller. If the named instance already exists, `CreateInstance` returns + * `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance is readable via the API, with all requested attributes but no allocated + * resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance immediately unreadable via the API. + * * The instance can be deleted. * All other attempts to modify the instance are + * rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can be created in the instance. * The + * instance's allocated resource levels are readable via the API. * The instance's state + * becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track creation of + * the instance. The metadata field type is + * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = ProjectName.of("[PROJECT]").toString();
+   *   String instanceId = "instanceId902024336";
+   *   Instance instance = Instance.newBuilder().build();
+   *   Instance response =
+   *       instanceAdminClient.createInstanceAsync(parent, instanceId, instance).get();
+   * }
+   * }
+ * + * @param parent Required. The name of the project in which to create the instance. Values are of + * the form `projects/<project>`. + * @param instanceId Required. The ID of the instance to create. Valid identifiers are of the form + * `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in length. + * @param instance Required. The instance to create. The name may be omitted, but if specified + * must be `<parent>/instances/<instance_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createInstanceAsync( + String parent, String instanceId, Instance instance) { + CreateInstanceRequest request = + CreateInstanceRequest.newBuilder() + .setParent(parent) + .setInstanceId(instanceId) + .setInstance(instance) + .build(); + return createInstanceAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance and begins preparing it to begin serving. The returned long-running + * operation can be used to track the progress of preparing the new instance. The instance name is + * assigned by the caller. If the named instance already exists, `CreateInstance` returns + * `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance is readable via the API, with all requested attributes but no allocated + * resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance immediately unreadable via the API. + * * The instance can be deleted. * All other attempts to modify the instance are + * rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can be created in the instance. * The + * instance's allocated resource levels are readable via the API. * The instance's state + * becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track creation of + * the instance. The metadata field type is + * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceRequest request =
+   *       CreateInstanceRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceId("instanceId902024336")
+   *           .setInstance(Instance.newBuilder().build())
+   *           .build();
+   *   Instance response = instanceAdminClient.createInstanceAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture createInstanceAsync( + CreateInstanceRequest request) { + return createInstanceOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance and begins preparing it to begin serving. The returned long-running + * operation can be used to track the progress of preparing the new instance. The instance name is + * assigned by the caller. If the named instance already exists, `CreateInstance` returns + * `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance is readable via the API, with all requested attributes but no allocated + * resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance immediately unreadable via the API. + * * The instance can be deleted. * All other attempts to modify the instance are + * rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can be created in the instance. * The + * instance's allocated resource levels are readable via the API. * The instance's state + * becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track creation of + * the instance. The metadata field type is + * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceRequest request =
+   *       CreateInstanceRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceId("instanceId902024336")
+   *           .setInstance(Instance.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.createInstanceOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Instance response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + createInstanceOperationCallable() { + return stub.createInstanceOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance and begins preparing it to begin serving. The returned long-running + * operation can be used to track the progress of preparing the new instance. The instance name is + * assigned by the caller. If the named instance already exists, `CreateInstance` returns + * `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance is readable via the API, with all requested attributes but no allocated + * resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance immediately unreadable via the API. + * * The instance can be deleted. * All other attempts to modify the instance are + * rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can be created in the instance. * The + * instance's allocated resource levels are readable via the API. * The instance's state + * becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track creation of + * the instance. The metadata field type is + * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstanceRequest request =
+   *       CreateInstanceRequest.newBuilder()
+   *           .setParent(ProjectName.of("[PROJECT]").toString())
+   *           .setInstanceId("instanceId902024336")
+   *           .setInstance(Instance.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.createInstanceCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createInstanceCallable() { + return stub.createInstanceCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance, and begins allocating or releasing resources as requested. The returned + * long-running operation can be used to track the progress of updating the instance. If the named + * instance does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance's allocation has been + * requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + * restoring resources to their pre-request values. The operation is guaranteed to succeed at + * undoing all resource changes, after which point it terminates with a `CANCELLED` status. * + * All other attempts to modify the instance are rejected. * Reading the instance via the API + * continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance's tables. * The instance's new resource levels are readable via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the instance + * modification. The metadata field type is + * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Authorization requires `spanner.instances.update` permission on the resource + * [name][google.spanner.admin.instance.v1.Instance.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   Instance instance = Instance.newBuilder().build();
+   *   FieldMask fieldMask = FieldMask.newBuilder().build();
+   *   Instance response = instanceAdminClient.updateInstanceAsync(instance, fieldMask).get();
+   * }
+   * }
+ * + * @param instance Required. The instance to update, which must always include the instance name. + * Otherwise, only fields mentioned in + * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be + * included. + * @param fieldMask Required. A mask specifying which fields in + * [Instance][google.spanner.admin.instance.v1.Instance] should be updated. The field mask + * must always be specified; this prevents any future fields in + * [Instance][google.spanner.admin.instance.v1.Instance] from being erased accidentally by + * clients that do not know about them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateInstanceAsync( + Instance instance, FieldMask fieldMask) { + UpdateInstanceRequest request = + UpdateInstanceRequest.newBuilder().setInstance(instance).setFieldMask(fieldMask).build(); + return updateInstanceAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance, and begins allocating or releasing resources as requested. The returned + * long-running operation can be used to track the progress of updating the instance. If the named + * instance does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance's allocation has been + * requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + * restoring resources to their pre-request values. The operation is guaranteed to succeed at + * undoing all resource changes, after which point it terminates with a `CANCELLED` status. * + * All other attempts to modify the instance are rejected. * Reading the instance via the API + * continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance's tables. * The instance's new resource levels are readable via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the instance + * modification. The metadata field type is + * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Authorization requires `spanner.instances.update` permission on the resource + * [name][google.spanner.admin.instance.v1.Instance.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceRequest request =
+   *       UpdateInstanceRequest.newBuilder()
+   *           .setInstance(Instance.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Instance response = instanceAdminClient.updateInstanceAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture updateInstanceAsync( + UpdateInstanceRequest request) { + return updateInstanceOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance, and begins allocating or releasing resources as requested. The returned + * long-running operation can be used to track the progress of updating the instance. If the named + * instance does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance's allocation has been + * requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + * restoring resources to their pre-request values. The operation is guaranteed to succeed at + * undoing all resource changes, after which point it terminates with a `CANCELLED` status. * + * All other attempts to modify the instance are rejected. * Reading the instance via the API + * continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance's tables. * The instance's new resource levels are readable via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the instance + * modification. The metadata field type is + * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Authorization requires `spanner.instances.update` permission on the resource + * [name][google.spanner.admin.instance.v1.Instance.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceRequest request =
+   *       UpdateInstanceRequest.newBuilder()
+   *           .setInstance(Instance.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.updateInstanceOperationCallable().futureCall(request);
+   *   // Do something.
+   *   Instance response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + updateInstanceOperationCallable() { + return stub.updateInstanceOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance, and begins allocating or releasing resources as requested. The returned + * long-running operation can be used to track the progress of updating the instance. If the named + * instance does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance's allocation has been + * requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + * restoring resources to their pre-request values. The operation is guaranteed to succeed at + * undoing all resource changes, after which point it terminates with a `CANCELLED` status. * + * All other attempts to modify the instance are rejected. * Reading the instance via the API + * continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance's tables. * The instance's new resource levels are readable via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the instance + * modification. The metadata field type is + * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. + * + *

Authorization requires `spanner.instances.update` permission on the resource + * [name][google.spanner.admin.instance.v1.Instance.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstanceRequest request =
+   *       UpdateInstanceRequest.newBuilder()
+   *           .setInstance(Instance.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.updateInstanceCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable updateInstanceCallable() { + return stub.updateInstanceCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an instance. + * + *

Immediately upon completion of the request: + * + *

* Billing ceases for all of the instance's reserved resources. + * + *

Soon afterward: + * + *

* The instance and *all of its databases* immediately and irrevocably disappear + * from the API. All data in the databases is permanently deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   instanceAdminClient.deleteInstance(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance to be deleted. Values are of the form + * `projects/<project>/instances/<instance>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstance(InstanceName name) { + DeleteInstanceRequest request = + DeleteInstanceRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteInstance(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an instance. + * + *

Immediately upon completion of the request: + * + *

* Billing ceases for all of the instance's reserved resources. + * + *

Soon afterward: + * + *

* The instance and *all of its databases* immediately and irrevocably disappear + * from the API. All data in the databases is permanently deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   instanceAdminClient.deleteInstance(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance to be deleted. Values are of the form + * `projects/<project>/instances/<instance>` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstance(String name) { + DeleteInstanceRequest request = DeleteInstanceRequest.newBuilder().setName(name).build(); + deleteInstance(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an instance. + * + *

Immediately upon completion of the request: + * + *

* Billing ceases for all of the instance's reserved resources. + * + *

Soon afterward: + * + *

* The instance and *all of its databases* immediately and irrevocably disappear + * from the API. All data in the databases is permanently deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstanceRequest request =
+   *       DeleteInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .build();
+   *   instanceAdminClient.deleteInstance(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstance(DeleteInstanceRequest request) { + deleteInstanceCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an instance. + * + *

Immediately upon completion of the request: + * + *

* Billing ceases for all of the instance's reserved resources. + * + *

Soon afterward: + * + *

* The instance and *all of its databases* immediately and irrevocably disappear + * from the API. All data in the databases is permanently deleted. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstanceRequest request =
+   *       DeleteInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .build();
+   *   ApiFuture future = instanceAdminClient.deleteInstanceCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteInstanceCallable() { + return stub.deleteInstanceCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on an instance resource. Replaces any existing policy. + * + *

Authorization requires `spanner.instances.setIamPolicy` on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = instanceAdminClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(ResourceName resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .setPolicy(policy) + .build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on an instance resource. Replaces any existing policy. + * + *

Authorization requires `spanner.instances.setIamPolicy` on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String resource = ProjectName.of("[PROJECT]").toString();
+   *   Policy policy = Policy.newBuilder().build();
+   *   Policy response = instanceAdminClient.setIamPolicy(resource, policy);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being specified. See the + * operation documentation for the appropriate value for this field. + * @param policy REQUIRED: The complete policy to be applied to the `resource`. The size of the + * policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud + * Platform services (such as Projects) might reject them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(String resource, Policy policy) { + SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + return setIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on an instance resource. Replaces any existing policy. + * + *

Authorization requires `spanner.instances.setIamPolicy` on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   Policy response = instanceAdminClient.setIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy setIamPolicy(SetIamPolicyRequest request) { + return setIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Sets the access control policy on an instance resource. Replaces any existing policy. + * + *

Authorization requires `spanner.instances.setIamPolicy` on + * [resource][google.iam.v1.SetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   SetIamPolicyRequest request =
+   *       SetIamPolicyRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setPolicy(Policy.newBuilder().build())
+   *           .setUpdateMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = instanceAdminClient.setIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable setIamPolicyCallable() { + return stub.setIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for an instance resource. Returns an empty policy if an instance + * exists but does not have a policy set. + * + *

Authorization requires `spanner.instances.getIamPolicy` on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   Policy response = instanceAdminClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(ResourceName resource) { + GetIamPolicyRequest request = + GetIamPolicyRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for an instance resource. Returns an empty policy if an instance + * exists but does not have a policy set. + * + *

Authorization requires `spanner.instances.getIamPolicy` on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String resource = ProjectName.of("[PROJECT]").toString();
+   *   Policy response = instanceAdminClient.getIamPolicy(resource);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy is being requested. See the + * operation documentation for the appropriate value for this field. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(String resource) { + GetIamPolicyRequest request = GetIamPolicyRequest.newBuilder().setResource(resource).build(); + return getIamPolicy(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for an instance resource. Returns an empty policy if an instance + * exists but does not have a policy set. + * + *

Authorization requires `spanner.instances.getIamPolicy` on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   Policy response = instanceAdminClient.getIamPolicy(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Policy getIamPolicy(GetIamPolicyRequest request) { + return getIamPolicyCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets the access control policy for an instance resource. Returns an empty policy if an instance + * exists but does not have a policy set. + * + *

Authorization requires `spanner.instances.getIamPolicy` on + * [resource][google.iam.v1.GetIamPolicyRequest.resource]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetIamPolicyRequest request =
+   *       GetIamPolicyRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setOptions(GetPolicyOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = instanceAdminClient.getIamPolicyCallable().futureCall(request);
+   *   // Do something.
+   *   Policy response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getIamPolicyCallable() { + return stub.getIamPolicyCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified instance resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a + * NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google + * Cloud Project. Otherwise returns an empty set of permissions. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       instanceAdminClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + ResourceName resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource == null ? null : resource.toString()) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified instance resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a + * NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google + * Cloud Project. Otherwise returns an empty set of permissions. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String resource = ProjectName.of("[PROJECT]").toString();
+   *   List permissions = new ArrayList<>();
+   *   TestIamPermissionsResponse response =
+   *       instanceAdminClient.testIamPermissions(resource, permissions);
+   * }
+   * }
+ * + * @param resource REQUIRED: The resource for which the policy detail is being requested. See the + * operation documentation for the appropriate value for this field. + * @param permissions The set of permissions to check for the `resource`. Permissions with + * wildcards (such as '*' or 'storage.*') are not allowed. For more information see + * [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions( + String resource, List permissions) { + TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + return testIamPermissions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified instance resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a + * NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google + * Cloud Project. Otherwise returns an empty set of permissions. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   TestIamPermissionsResponse response = instanceAdminClient.testIamPermissions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final TestIamPermissionsResponse testIamPermissions(TestIamPermissionsRequest request) { + return testIamPermissionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Returns permissions that the caller has on the specified instance resource. + * + *

Attempting this RPC on a non-existent Cloud Spanner instance resource will result in a + * NOT_FOUND error if the user has `spanner.instances.list` permission on the containing Google + * Cloud Project. Otherwise returns an empty set of permissions. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   TestIamPermissionsRequest request =
+   *       TestIamPermissionsRequest.newBuilder()
+   *           .setResource(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .addAllPermissions(new ArrayList())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.testIamPermissionsCallable().futureCall(request);
+   *   // Do something.
+   *   TestIamPermissionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + testIamPermissionsCallable() { + return stub.testIamPermissionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance partition. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstancePartitionName name =
+   *       InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]");
+   *   InstancePartition response = instanceAdminClient.getInstancePartition(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance partition. Values are of the form + * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstancePartition getInstancePartition(InstancePartitionName name) { + GetInstancePartitionRequest request = + GetInstancePartitionRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getInstancePartition(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance partition. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name =
+   *       InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]").toString();
+   *   InstancePartition response = instanceAdminClient.getInstancePartition(name);
+   * }
+   * }
+ * + * @param name Required. The name of the requested instance partition. Values are of the form + * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstancePartition getInstancePartition(String name) { + GetInstancePartitionRequest request = + GetInstancePartitionRequest.newBuilder().setName(name).build(); + return getInstancePartition(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance partition. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstancePartitionRequest request =
+   *       GetInstancePartitionRequest.newBuilder()
+   *           .setName(
+   *               InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]")
+   *                   .toString())
+   *           .build();
+   *   InstancePartition response = instanceAdminClient.getInstancePartition(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InstancePartition getInstancePartition(GetInstancePartitionRequest request) { + return getInstancePartitionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets information about a particular instance partition. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   GetInstancePartitionRequest request =
+   *       GetInstancePartitionRequest.newBuilder()
+   *           .setName(
+   *               InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]")
+   *                   .toString())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.getInstancePartitionCallable().futureCall(request);
+   *   // Do something.
+   *   InstancePartition response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + getInstancePartitionCallable() { + return stub.getInstancePartitionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance partition and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance partition. The + * instance partition name is assigned by the caller. If the named instance partition already + * exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance partition is readable via the API, with all requested attributes but no + * allocated resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance partition immediately unreadable via the + * API. * The instance partition can be deleted. * All other attempts to modify the + * instance partition are rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can start using this instance partition. * The + * instance partition's allocated resource levels are readable via the API. * The instance + * partition's state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track + * creation of the instance partition. The metadata field type is + * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   InstancePartition instancePartition = InstancePartition.newBuilder().build();
+   *   String instancePartitionId = "instancePartitionId1364450768";
+   *   InstancePartition response =
+   *       instanceAdminClient
+   *           .createInstancePartitionAsync(parent, instancePartition, instancePartitionId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the instance partition. + * Values are of the form `projects/<project>/instances/<instance>`. + * @param instancePartition Required. The instance partition to create. The + * instance_partition.name may be omitted, but if specified must be + * `<parent>/instancePartitions/<instance_partition_id>`. + * @param instancePartitionId Required. The ID of the instance partition to create. Valid + * identifiers are of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstancePartitionAsync( + InstanceName parent, InstancePartition instancePartition, String instancePartitionId) { + CreateInstancePartitionRequest request = + CreateInstancePartitionRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setInstancePartition(instancePartition) + .setInstancePartitionId(instancePartitionId) + .build(); + return createInstancePartitionAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance partition and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance partition. The + * instance partition name is assigned by the caller. If the named instance partition already + * exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance partition is readable via the API, with all requested attributes but no + * allocated resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance partition immediately unreadable via the + * API. * The instance partition can be deleted. * All other attempts to modify the + * instance partition are rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can start using this instance partition. * The + * instance partition's allocated resource levels are readable via the API. * The instance + * partition's state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track + * creation of the instance partition. The metadata field type is + * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   InstancePartition instancePartition = InstancePartition.newBuilder().build();
+   *   String instancePartitionId = "instancePartitionId1364450768";
+   *   InstancePartition response =
+   *       instanceAdminClient
+   *           .createInstancePartitionAsync(parent, instancePartition, instancePartitionId)
+   *           .get();
+   * }
+   * }
+ * + * @param parent Required. The name of the instance in which to create the instance partition. + * Values are of the form `projects/<project>/instances/<instance>`. + * @param instancePartition Required. The instance partition to create. The + * instance_partition.name may be omitted, but if specified must be + * `<parent>/instancePartitions/<instance_partition_id>`. + * @param instancePartitionId Required. The ID of the instance partition to create. Valid + * identifiers are of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstancePartitionAsync( + String parent, InstancePartition instancePartition, String instancePartitionId) { + CreateInstancePartitionRequest request = + CreateInstancePartitionRequest.newBuilder() + .setParent(parent) + .setInstancePartition(instancePartition) + .setInstancePartitionId(instancePartitionId) + .build(); + return createInstancePartitionAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance partition and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance partition. The + * instance partition name is assigned by the caller. If the named instance partition already + * exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance partition is readable via the API, with all requested attributes but no + * allocated resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance partition immediately unreadable via the + * API. * The instance partition can be deleted. * All other attempts to modify the + * instance partition are rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can start using this instance partition. * The + * instance partition's allocated resource levels are readable via the API. * The instance + * partition's state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track + * creation of the instance partition. The metadata field type is + * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstancePartitionRequest request =
+   *       CreateInstancePartitionRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setInstancePartitionId("instancePartitionId1364450768")
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .build();
+   *   InstancePartition response = instanceAdminClient.createInstancePartitionAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + createInstancePartitionAsync(CreateInstancePartitionRequest request) { + return createInstancePartitionOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance partition and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance partition. The + * instance partition name is assigned by the caller. If the named instance partition already + * exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance partition is readable via the API, with all requested attributes but no + * allocated resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance partition immediately unreadable via the + * API. * The instance partition can be deleted. * All other attempts to modify the + * instance partition are rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can start using this instance partition. * The + * instance partition's allocated resource levels are readable via the API. * The instance + * partition's state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track + * creation of the instance partition. The metadata field type is + * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstancePartitionRequest request =
+   *       CreateInstancePartitionRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setInstancePartitionId("instancePartitionId1364450768")
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.createInstancePartitionOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InstancePartition response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable() { + return stub.createInstancePartitionOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates an instance partition and begins preparing it to be used. The returned long-running + * operation can be used to track the progress of preparing the new instance partition. The + * instance partition name is assigned by the caller. If the named instance partition already + * exists, `CreateInstancePartition` returns `ALREADY_EXISTS`. + * + *

Immediately upon completion of this request: + * + *

* The instance partition is readable via the API, with all requested attributes but no + * allocated resources. Its state is `CREATING`. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation renders the instance partition immediately unreadable via the + * API. * The instance partition can be deleted. * All other attempts to modify the + * instance partition are rejected. + * + *

Upon completion of the returned operation: + * + *

* Billing for all successfully-allocated resources begins (some types may have lower + * than the requested levels). * Databases can start using this instance partition. * The + * instance partition's allocated resource levels are readable via the API. * The instance + * partition's state becomes `READY`. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track + * creation of the instance partition. The metadata field type is + * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   CreateInstancePartitionRequest request =
+   *       CreateInstancePartitionRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setInstancePartitionId("instancePartitionId1364450768")
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.createInstancePartitionCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + createInstancePartitionCallable() { + return stub.createInstancePartitionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing instance partition. Requires that the instance partition is not used by any + * database or backup and is not the default instance partition of an instance. + * + *

Authorization requires `spanner.instancePartitions.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstancePartitionName name =
+   *       InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]");
+   *   instanceAdminClient.deleteInstancePartition(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance partition to be deleted. Values are of the form + * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstancePartition(InstancePartitionName name) { + DeleteInstancePartitionRequest request = + DeleteInstancePartitionRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteInstancePartition(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing instance partition. Requires that the instance partition is not used by any + * database or backup and is not the default instance partition of an instance. + * + *

Authorization requires `spanner.instancePartitions.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String name =
+   *       InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]").toString();
+   *   instanceAdminClient.deleteInstancePartition(name);
+   * }
+   * }
+ * + * @param name Required. The name of the instance partition to be deleted. Values are of the form + * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}` + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstancePartition(String name) { + DeleteInstancePartitionRequest request = + DeleteInstancePartitionRequest.newBuilder().setName(name).build(); + deleteInstancePartition(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing instance partition. Requires that the instance partition is not used by any + * database or backup and is not the default instance partition of an instance. + * + *

Authorization requires `spanner.instancePartitions.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstancePartitionRequest request =
+   *       DeleteInstancePartitionRequest.newBuilder()
+   *           .setName(
+   *               InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]")
+   *                   .toString())
+   *           .setEtag("etag3123477")
+   *           .build();
+   *   instanceAdminClient.deleteInstancePartition(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteInstancePartition(DeleteInstancePartitionRequest request) { + deleteInstancePartitionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes an existing instance partition. Requires that the instance partition is not used by any + * database or backup and is not the default instance partition of an instance. + * + *

Authorization requires `spanner.instancePartitions.delete` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   DeleteInstancePartitionRequest request =
+   *       DeleteInstancePartitionRequest.newBuilder()
+   *           .setName(
+   *               InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]")
+   *                   .toString())
+   *           .setEtag("etag3123477")
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.deleteInstancePartitionCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + deleteInstancePartitionCallable() { + return stub.deleteInstancePartitionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance partition, and begins allocating or releasing resources as requested. The + * returned long-running operation can be used to track the progress of updating the instance + * partition. If the named instance partition does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance partition's allocation has + * been requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + * and begins restoring resources to their pre-request values. The operation is guaranteed to + * succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` + * status. * All other attempts to modify the instance partition are rejected. * Reading + * the instance partition via the API continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance partition's tables. * The instance partition's new resource levels are readable + * via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track the + * instance partition modification. The metadata field type is + * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Authorization requires `spanner.instancePartitions.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstancePartition instancePartition = InstancePartition.newBuilder().build();
+   *   FieldMask fieldMask = FieldMask.newBuilder().build();
+   *   InstancePartition response =
+   *       instanceAdminClient.updateInstancePartitionAsync(instancePartition, fieldMask).get();
+   * }
+   * }
+ * + * @param instancePartition Required. The instance partition to update, which must always include + * the instance partition name. Otherwise, only fields mentioned in + * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask] + * need be included. + * @param fieldMask Required. A mask specifying which fields in + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] should be updated. + * The field mask must always be specified; this prevents any future fields in + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] from being erased + * accidentally by clients that do not know about them. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + updateInstancePartitionAsync(InstancePartition instancePartition, FieldMask fieldMask) { + UpdateInstancePartitionRequest request = + UpdateInstancePartitionRequest.newBuilder() + .setInstancePartition(instancePartition) + .setFieldMask(fieldMask) + .build(); + return updateInstancePartitionAsync(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance partition, and begins allocating or releasing resources as requested. The + * returned long-running operation can be used to track the progress of updating the instance + * partition. If the named instance partition does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance partition's allocation has + * been requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + * and begins restoring resources to their pre-request values. The operation is guaranteed to + * succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` + * status. * All other attempts to modify the instance partition are rejected. * Reading + * the instance partition via the API continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance partition's tables. * The instance partition's new resource levels are readable + * via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track the + * instance partition modification. The metadata field type is + * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Authorization requires `spanner.instancePartitions.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstancePartitionRequest request =
+   *       UpdateInstancePartitionRequest.newBuilder()
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   InstancePartition response = instanceAdminClient.updateInstancePartitionAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture + updateInstancePartitionAsync(UpdateInstancePartitionRequest request) { + return updateInstancePartitionOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance partition, and begins allocating or releasing resources as requested. The + * returned long-running operation can be used to track the progress of updating the instance + * partition. If the named instance partition does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance partition's allocation has + * been requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + * and begins restoring resources to their pre-request values. The operation is guaranteed to + * succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` + * status. * All other attempts to modify the instance partition are rejected. * Reading + * the instance partition via the API continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance partition's tables. * The instance partition's new resource levels are readable + * via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track the + * instance partition modification. The metadata field type is + * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Authorization requires `spanner.instancePartitions.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstancePartitionRequest request =
+   *       UpdateInstancePartitionRequest.newBuilder()
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.updateInstancePartitionOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InstancePartition response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable() { + return stub.updateInstancePartitionOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates an instance partition, and begins allocating or releasing resources as requested. The + * returned long-running operation can be used to track the progress of updating the instance + * partition. If the named instance partition does not exist, returns `NOT_FOUND`. + * + *

Immediately upon completion of this request: + * + *

* For resource types for which a decrease in the instance partition's allocation has + * been requested, billing is based on the newly-requested level. + * + *

Until completion of the returned operation: + * + *

* Cancelling the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + * and begins restoring resources to their pre-request values. The operation is guaranteed to + * succeed at undoing all resource changes, after which point it terminates with a `CANCELLED` + * status. * All other attempts to modify the instance partition are rejected. * Reading + * the instance partition via the API continues to give the pre-request resource levels. + * + *

Upon completion of the returned operation: + * + *

* Billing begins for all successfully-allocated resources (some types may have lower + * than the requested levels). * All newly-reserved resources are available for serving the + * instance partition's tables. * The instance partition's new resource levels are readable + * via the API. + * + *

The returned long-running operation will have a name of the format + * `<instance_partition_name>/operations/<operation_id>` and can be used to track the + * instance partition modification. The metadata field type is + * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + * The response field type is + * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. + * + *

Authorization requires `spanner.instancePartitions.update` permission on the resource + * [name][google.spanner.admin.instance.v1.InstancePartition.name]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   UpdateInstancePartitionRequest request =
+   *       UpdateInstancePartitionRequest.newBuilder()
+   *           .setInstancePartition(InstancePartition.newBuilder().build())
+   *           .setFieldMask(FieldMask.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.updateInstancePartitionCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + updateInstancePartitionCallable() { + return stub.updateInstancePartitionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists instance partition long-running operations in the given instance. An instance partition + * operation has a name of the form + * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource + * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]");
+   *   for (Operation element :
+   *       instanceAdminClient.listInstancePartitionOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent instance of the instance partition operations. Values are of + * the form `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionOperationsPagedResponse listInstancePartitionOperations( + InstanceName parent) { + ListInstancePartitionOperationsRequest request = + ListInstancePartitionOperationsRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listInstancePartitionOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists instance partition long-running operations in the given instance. An instance partition + * operation has a name of the form + * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource + * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   String parent = InstanceName.of("[PROJECT]", "[INSTANCE]").toString();
+   *   for (Operation element :
+   *       instanceAdminClient.listInstancePartitionOperations(parent).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param parent Required. The parent instance of the instance partition operations. Values are of + * the form `projects/<project>/instances/<instance>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionOperationsPagedResponse listInstancePartitionOperations( + String parent) { + ListInstancePartitionOperationsRequest request = + ListInstancePartitionOperationsRequest.newBuilder().setParent(parent).build(); + return listInstancePartitionOperations(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists instance partition long-running operations in the given instance. An instance partition + * operation has a name of the form + * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource + * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionOperationsRequest request =
+   *       ListInstancePartitionOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   for (Operation element :
+   *       instanceAdminClient.listInstancePartitionOperations(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListInstancePartitionOperationsPagedResponse listInstancePartitionOperations( + ListInstancePartitionOperationsRequest request) { + return listInstancePartitionOperationsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists instance partition long-running operations in the given instance. An instance partition + * operation has a name of the form + * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource + * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionOperationsRequest request =
+   *       ListInstancePartitionOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       instanceAdminClient.listInstancePartitionOperationsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Operation element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable() { + return stub.listInstancePartitionOperationsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists instance partition long-running operations in the given instance. An instance partition + * operation has a name of the form + * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`. + * The long-running operation metadata field type `metadata.type_url` describes the type of the + * metadata. Operations returned include those that have completed/failed/canceled within the last + * 7 days, and pending operations. Operations returned are ordered by + * `operation.metadata.value.start_time` in descending order starting from the most recently + * started operation. + * + *

Authorization requires `spanner.instancePartitionOperations.list` permission on the resource + * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   ListInstancePartitionOperationsRequest request =
+   *       ListInstancePartitionOperationsRequest.newBuilder()
+   *           .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setFilter("filter-1274492040")
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setInstancePartitionDeadline(Timestamp.newBuilder().build())
+   *           .build();
+   *   while (true) {
+   *     ListInstancePartitionOperationsResponse response =
+   *         instanceAdminClient.listInstancePartitionOperationsCallable().call(request);
+   *     for (Operation element : response.getOperationsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable() { + return stub.listInstancePartitionOperationsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned long-running + * operation to track the progress of moving the instance. + * + *

`MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

* Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

While the operation is pending: + * + *

* All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

* `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

* Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

The returned long-running operation has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The metadata field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling + * the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

If not cancelled, upon completion of the returned operation: + * + *

* The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   MoveInstanceRequest request =
+   *       MoveInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .build();
+   *   MoveInstanceResponse response = instanceAdminClient.moveInstanceAsync(request).get();
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture moveInstanceAsync( + MoveInstanceRequest request) { + return moveInstanceOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned long-running + * operation to track the progress of moving the instance. + * + *

`MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

* Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

While the operation is pending: + * + *

* All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

* `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

* Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

The returned long-running operation has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The metadata field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling + * the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

If not cancelled, upon completion of the returned operation: + * + *

* The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   MoveInstanceRequest request =
+   *       MoveInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .build();
+   *   OperationFuture future =
+   *       instanceAdminClient.moveInstanceOperationCallable().futureCall(request);
+   *   // Do something.
+   *   MoveInstanceResponse response = future.get();
+   * }
+   * }
+ */ + public final OperationCallable + moveInstanceOperationCallable() { + return stub.moveInstanceOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned long-running + * operation to track the progress of moving the instance. + * + *

`MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

* Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

While the operation is pending: + * + *

* All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

* `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

* Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

The returned long-running operation has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The metadata field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The response + * field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling + * the operation sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

If not cancelled, upon completion of the returned operation: + * + *

* The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+   *   MoveInstanceRequest request =
+   *       MoveInstanceRequest.newBuilder()
+   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
+   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
+   *           .build();
+   *   ApiFuture future = instanceAdminClient.moveInstanceCallable().futureCall(request);
+   *   // Do something.
+   *   Operation response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable moveInstanceCallable() { + return stub.moveInstanceCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListInstanceConfigsPagedResponse + extends AbstractPagedListResponse< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + InstanceConfig, + ListInstanceConfigsPage, + ListInstanceConfigsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListInstanceConfigsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListInstanceConfigsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListInstanceConfigsPagedResponse(ListInstanceConfigsPage page) { + super(page, ListInstanceConfigsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListInstanceConfigsPage + extends AbstractPage< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + InstanceConfig, + ListInstanceConfigsPage> { + + private ListInstanceConfigsPage( + PageContext + context, + ListInstanceConfigsResponse response) { + super(context, response); + } + + private static ListInstanceConfigsPage createEmptyPage() { + return new ListInstanceConfigsPage(null, null); + } + + @Override + protected ListInstanceConfigsPage createPage( + PageContext + context, + ListInstanceConfigsResponse response) { + return new ListInstanceConfigsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListInstanceConfigsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + InstanceConfig, + ListInstanceConfigsPage, + ListInstanceConfigsFixedSizeCollection> { + + private ListInstanceConfigsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListInstanceConfigsFixedSizeCollection createEmptyCollection() { + return new ListInstanceConfigsFixedSizeCollection(null, 0); + } + + @Override + protected ListInstanceConfigsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListInstanceConfigsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListInstanceConfigOperationsPagedResponse + extends AbstractPagedListResponse< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation, + ListInstanceConfigOperationsPage, + ListInstanceConfigOperationsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation> + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListInstanceConfigOperationsPage.createEmptyPage() + .createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListInstanceConfigOperationsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListInstanceConfigOperationsPagedResponse(ListInstanceConfigOperationsPage page) { + super(page, ListInstanceConfigOperationsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListInstanceConfigOperationsPage + extends AbstractPage< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation, + ListInstanceConfigOperationsPage> { + + private ListInstanceConfigOperationsPage( + PageContext< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation> + context, + ListInstanceConfigOperationsResponse response) { + super(context, response); + } + + private static ListInstanceConfigOperationsPage createEmptyPage() { + return new ListInstanceConfigOperationsPage(null, null); + } + + @Override + protected ListInstanceConfigOperationsPage createPage( + PageContext< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation> + context, + ListInstanceConfigOperationsResponse response) { + return new ListInstanceConfigOperationsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation> + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListInstanceConfigOperationsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation, + ListInstanceConfigOperationsPage, + ListInstanceConfigOperationsFixedSizeCollection> { + + private ListInstanceConfigOperationsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListInstanceConfigOperationsFixedSizeCollection createEmptyCollection() { + return new ListInstanceConfigOperationsFixedSizeCollection(null, 0); + } + + @Override + protected ListInstanceConfigOperationsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListInstanceConfigOperationsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListInstancesPagedResponse + extends AbstractPagedListResponse< + ListInstancesRequest, + ListInstancesResponse, + Instance, + ListInstancesPage, + ListInstancesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListInstancesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListInstancesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListInstancesPagedResponse(ListInstancesPage page) { + super(page, ListInstancesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListInstancesPage + extends AbstractPage< + ListInstancesRequest, ListInstancesResponse, Instance, ListInstancesPage> { + + private ListInstancesPage( + PageContext context, + ListInstancesResponse response) { + super(context, response); + } + + private static ListInstancesPage createEmptyPage() { + return new ListInstancesPage(null, null); + } + + @Override + protected ListInstancesPage createPage( + PageContext context, + ListInstancesResponse response) { + return new ListInstancesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListInstancesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListInstancesRequest, + ListInstancesResponse, + Instance, + ListInstancesPage, + ListInstancesFixedSizeCollection> { + + private ListInstancesFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListInstancesFixedSizeCollection createEmptyCollection() { + return new ListInstancesFixedSizeCollection(null, 0); + } + + @Override + protected ListInstancesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListInstancesFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListInstancePartitionsPagedResponse + extends AbstractPagedListResponse< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + InstancePartition, + ListInstancePartitionsPage, + ListInstancePartitionsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition> + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListInstancePartitionsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListInstancePartitionsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListInstancePartitionsPagedResponse(ListInstancePartitionsPage page) { + super(page, ListInstancePartitionsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListInstancePartitionsPage + extends AbstractPage< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + InstancePartition, + ListInstancePartitionsPage> { + + private ListInstancePartitionsPage( + PageContext< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition> + context, + ListInstancePartitionsResponse response) { + super(context, response); + } + + private static ListInstancePartitionsPage createEmptyPage() { + return new ListInstancePartitionsPage(null, null); + } + + @Override + protected ListInstancePartitionsPage createPage( + PageContext< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition> + context, + ListInstancePartitionsResponse response) { + return new ListInstancePartitionsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition> + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListInstancePartitionsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + InstancePartition, + ListInstancePartitionsPage, + ListInstancePartitionsFixedSizeCollection> { + + private ListInstancePartitionsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListInstancePartitionsFixedSizeCollection createEmptyCollection() { + return new ListInstancePartitionsFixedSizeCollection(null, 0); + } + + @Override + protected ListInstancePartitionsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListInstancePartitionsFixedSizeCollection(pages, collectionSize); + } + } + + public static class ListInstancePartitionOperationsPagedResponse + extends AbstractPagedListResponse< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation, + ListInstancePartitionOperationsPage, + ListInstancePartitionOperationsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListInstancePartitionOperationsPage.createEmptyPage() + .createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListInstancePartitionOperationsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListInstancePartitionOperationsPagedResponse(ListInstancePartitionOperationsPage page) { + super(page, ListInstancePartitionOperationsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListInstancePartitionOperationsPage + extends AbstractPage< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation, + ListInstancePartitionOperationsPage> { + + private ListInstancePartitionOperationsPage( + PageContext< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + context, + ListInstancePartitionOperationsResponse response) { + super(context, response); + } + + private static ListInstancePartitionOperationsPage createEmptyPage() { + return new ListInstancePartitionOperationsPage(null, null); + } + + @Override + protected ListInstancePartitionOperationsPage createPage( + PageContext< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + context, + ListInstancePartitionOperationsResponse response) { + return new ListInstancePartitionOperationsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListInstancePartitionOperationsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation, + ListInstancePartitionOperationsPage, + ListInstancePartitionOperationsFixedSizeCollection> { + + private ListInstancePartitionOperationsFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListInstancePartitionOperationsFixedSizeCollection createEmptyCollection() { + return new ListInstancePartitionOperationsFixedSizeCollection(null, 0); + } + + @Override + protected ListInstancePartitionOperationsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListInstancePartitionOperationsFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java new file mode 100644 index 000000000000..3b4af74269f9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java @@ -0,0 +1,631 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link InstanceAdminClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getInstanceConfig: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminSettings.Builder instanceAdminSettingsBuilder = InstanceAdminSettings.newBuilder();
+ * instanceAdminSettingsBuilder
+ *     .getInstanceConfigSettings()
+ *     .setRetrySettings(
+ *         instanceAdminSettingsBuilder
+ *             .getInstanceConfigSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * InstanceAdminSettings instanceAdminSettings = instanceAdminSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createInstanceConfig: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminSettings.Builder instanceAdminSettingsBuilder = InstanceAdminSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * instanceAdminSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +public class InstanceAdminSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to listInstanceConfigs. */ + public PagedCallSettings< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).listInstanceConfigsSettings(); + } + + /** Returns the object with the settings used for calls to getInstanceConfig. */ + public UnaryCallSettings getInstanceConfigSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).getInstanceConfigSettings(); + } + + /** Returns the object with the settings used for calls to createInstanceConfig. */ + public UnaryCallSettings createInstanceConfigSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).createInstanceConfigSettings(); + } + + /** Returns the object with the settings used for calls to createInstanceConfig. */ + public OperationCallSettings< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).createInstanceConfigOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateInstanceConfig. */ + public UnaryCallSettings updateInstanceConfigSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).updateInstanceConfigSettings(); + } + + /** Returns the object with the settings used for calls to updateInstanceConfig. */ + public OperationCallSettings< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).updateInstanceConfigOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteInstanceConfig. */ + public UnaryCallSettings deleteInstanceConfigSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).deleteInstanceConfigSettings(); + } + + /** Returns the object with the settings used for calls to listInstanceConfigOperations. */ + public PagedCallSettings< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).listInstanceConfigOperationsSettings(); + } + + /** Returns the object with the settings used for calls to listInstances. */ + public PagedCallSettings + listInstancesSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).listInstancesSettings(); + } + + /** Returns the object with the settings used for calls to listInstancePartitions. */ + public PagedCallSettings< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).listInstancePartitionsSettings(); + } + + /** Returns the object with the settings used for calls to getInstance. */ + public UnaryCallSettings getInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).getInstanceSettings(); + } + + /** Returns the object with the settings used for calls to createInstance. */ + public UnaryCallSettings createInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).createInstanceSettings(); + } + + /** Returns the object with the settings used for calls to createInstance. */ + public OperationCallSettings + createInstanceOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).createInstanceOperationSettings(); + } + + /** Returns the object with the settings used for calls to updateInstance. */ + public UnaryCallSettings updateInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).updateInstanceSettings(); + } + + /** Returns the object with the settings used for calls to updateInstance. */ + public OperationCallSettings + updateInstanceOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).updateInstanceOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteInstance. */ + public UnaryCallSettings deleteInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).deleteInstanceSettings(); + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return ((InstanceAdminStubSettings) getStubSettings()).setIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return ((InstanceAdminStubSettings) getStubSettings()).getIamPolicySettings(); + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).testIamPermissionsSettings(); + } + + /** Returns the object with the settings used for calls to getInstancePartition. */ + public UnaryCallSettings + getInstancePartitionSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).getInstancePartitionSettings(); + } + + /** Returns the object with the settings used for calls to createInstancePartition. */ + public UnaryCallSettings + createInstancePartitionSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).createInstancePartitionSettings(); + } + + /** Returns the object with the settings used for calls to createInstancePartition. */ + public OperationCallSettings< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()) + .createInstancePartitionOperationSettings(); + } + + /** Returns the object with the settings used for calls to deleteInstancePartition. */ + public UnaryCallSettings + deleteInstancePartitionSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).deleteInstancePartitionSettings(); + } + + /** Returns the object with the settings used for calls to updateInstancePartition. */ + public UnaryCallSettings + updateInstancePartitionSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).updateInstancePartitionSettings(); + } + + /** Returns the object with the settings used for calls to updateInstancePartition. */ + public OperationCallSettings< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()) + .updateInstancePartitionOperationSettings(); + } + + /** Returns the object with the settings used for calls to listInstancePartitionOperations. */ + public PagedCallSettings< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings() { + return ((InstanceAdminStubSettings) getStubSettings()) + .listInstancePartitionOperationsSettings(); + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public UnaryCallSettings moveInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).moveInstanceSettings(); + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public OperationCallSettings + moveInstanceOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).moveInstanceOperationSettings(); + } + + public static final InstanceAdminSettings create(InstanceAdminStubSettings stub) + throws IOException { + return new InstanceAdminSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstanceAdminStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return InstanceAdminStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return InstanceAdminStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return InstanceAdminStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstanceAdminStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstanceAdminStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return InstanceAdminStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return InstanceAdminStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected InstanceAdminSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for InstanceAdminSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(InstanceAdminStubSettings.newBuilder(clientContext)); + } + + protected Builder(InstanceAdminSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(InstanceAdminStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(InstanceAdminStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(InstanceAdminStubSettings.newHttpJsonBuilder()); + } + + public InstanceAdminStubSettings.Builder getStubSettingsBuilder() { + return ((InstanceAdminStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to listInstanceConfigs. */ + public PagedCallSettings.Builder< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings() { + return getStubSettingsBuilder().listInstanceConfigsSettings(); + } + + /** Returns the builder for the settings used for calls to getInstanceConfig. */ + public UnaryCallSettings.Builder + getInstanceConfigSettings() { + return getStubSettingsBuilder().getInstanceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to createInstanceConfig. */ + public UnaryCallSettings.Builder + createInstanceConfigSettings() { + return getStubSettingsBuilder().createInstanceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to createInstanceConfig. */ + public OperationCallSettings.Builder< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings() { + return getStubSettingsBuilder().createInstanceConfigOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstanceConfig. */ + public UnaryCallSettings.Builder + updateInstanceConfigSettings() { + return getStubSettingsBuilder().updateInstanceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstanceConfig. */ + public OperationCallSettings.Builder< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings() { + return getStubSettingsBuilder().updateInstanceConfigOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteInstanceConfig. */ + public UnaryCallSettings.Builder + deleteInstanceConfigSettings() { + return getStubSettingsBuilder().deleteInstanceConfigSettings(); + } + + /** Returns the builder for the settings used for calls to listInstanceConfigOperations. */ + public PagedCallSettings.Builder< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings() { + return getStubSettingsBuilder().listInstanceConfigOperationsSettings(); + } + + /** Returns the builder for the settings used for calls to listInstances. */ + public PagedCallSettings.Builder< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse> + listInstancesSettings() { + return getStubSettingsBuilder().listInstancesSettings(); + } + + /** Returns the builder for the settings used for calls to listInstancePartitions. */ + public PagedCallSettings.Builder< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings() { + return getStubSettingsBuilder().listInstancePartitionsSettings(); + } + + /** Returns the builder for the settings used for calls to getInstance. */ + public UnaryCallSettings.Builder getInstanceSettings() { + return getStubSettingsBuilder().getInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to createInstance. */ + public UnaryCallSettings.Builder createInstanceSettings() { + return getStubSettingsBuilder().createInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to createInstance. */ + public OperationCallSettings.Builder + createInstanceOperationSettings() { + return getStubSettingsBuilder().createInstanceOperationSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstance. */ + public UnaryCallSettings.Builder updateInstanceSettings() { + return getStubSettingsBuilder().updateInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstance. */ + public OperationCallSettings.Builder + updateInstanceOperationSettings() { + return getStubSettingsBuilder().updateInstanceOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteInstance. */ + public UnaryCallSettings.Builder deleteInstanceSettings() { + return getStubSettingsBuilder().deleteInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return getStubSettingsBuilder().setIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getStubSettingsBuilder().getIamPolicySettings(); + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return getStubSettingsBuilder().testIamPermissionsSettings(); + } + + /** Returns the builder for the settings used for calls to getInstancePartition. */ + public UnaryCallSettings.Builder + getInstancePartitionSettings() { + return getStubSettingsBuilder().getInstancePartitionSettings(); + } + + /** Returns the builder for the settings used for calls to createInstancePartition. */ + public UnaryCallSettings.Builder + createInstancePartitionSettings() { + return getStubSettingsBuilder().createInstancePartitionSettings(); + } + + /** Returns the builder for the settings used for calls to createInstancePartition. */ + public OperationCallSettings.Builder< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings() { + return getStubSettingsBuilder().createInstancePartitionOperationSettings(); + } + + /** Returns the builder for the settings used for calls to deleteInstancePartition. */ + public UnaryCallSettings.Builder + deleteInstancePartitionSettings() { + return getStubSettingsBuilder().deleteInstancePartitionSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstancePartition. */ + public UnaryCallSettings.Builder + updateInstancePartitionSettings() { + return getStubSettingsBuilder().updateInstancePartitionSettings(); + } + + /** Returns the builder for the settings used for calls to updateInstancePartition. */ + public OperationCallSettings.Builder< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings() { + return getStubSettingsBuilder().updateInstancePartitionOperationSettings(); + } + + /** Returns the builder for the settings used for calls to listInstancePartitionOperations. */ + public PagedCallSettings.Builder< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings() { + return getStubSettingsBuilder().listInstancePartitionOperationsSettings(); + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public UnaryCallSettings.Builder moveInstanceSettings() { + return getStubSettingsBuilder().moveInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings() { + return getStubSettingsBuilder().moveInstanceOperationSettings(); + } + + @Override + public InstanceAdminSettings build() throws IOException { + return new InstanceAdminSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json new file mode 100644 index 000000000000..1500bd3742e7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json @@ -0,0 +1,81 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.spanner.admin.instance.v1", + "libraryPackage": "com.google.cloud.spanner.admin.instance.v1", + "services": { + "InstanceAdmin": { + "clients": { + "grpc": { + "libraryClient": "InstanceAdminClient", + "rpcs": { + "CreateInstance": { + "methods": ["createInstanceAsync", "createInstanceAsync", "createInstanceAsync", "createInstanceOperationCallable", "createInstanceCallable"] + }, + "CreateInstanceConfig": { + "methods": ["createInstanceConfigAsync", "createInstanceConfigAsync", "createInstanceConfigAsync", "createInstanceConfigOperationCallable", "createInstanceConfigCallable"] + }, + "CreateInstancePartition": { + "methods": ["createInstancePartitionAsync", "createInstancePartitionAsync", "createInstancePartitionAsync", "createInstancePartitionOperationCallable", "createInstancePartitionCallable"] + }, + "DeleteInstance": { + "methods": ["deleteInstance", "deleteInstance", "deleteInstance", "deleteInstanceCallable"] + }, + "DeleteInstanceConfig": { + "methods": ["deleteInstanceConfig", "deleteInstanceConfig", "deleteInstanceConfig", "deleteInstanceConfigCallable"] + }, + "DeleteInstancePartition": { + "methods": ["deleteInstancePartition", "deleteInstancePartition", "deleteInstancePartition", "deleteInstancePartitionCallable"] + }, + "GetIamPolicy": { + "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] + }, + "GetInstance": { + "methods": ["getInstance", "getInstance", "getInstance", "getInstanceCallable"] + }, + "GetInstanceConfig": { + "methods": ["getInstanceConfig", "getInstanceConfig", "getInstanceConfig", "getInstanceConfigCallable"] + }, + "GetInstancePartition": { + "methods": ["getInstancePartition", "getInstancePartition", "getInstancePartition", "getInstancePartitionCallable"] + }, + "ListInstanceConfigOperations": { + "methods": ["listInstanceConfigOperations", "listInstanceConfigOperations", "listInstanceConfigOperations", "listInstanceConfigOperationsPagedCallable", "listInstanceConfigOperationsCallable"] + }, + "ListInstanceConfigs": { + "methods": ["listInstanceConfigs", "listInstanceConfigs", "listInstanceConfigs", "listInstanceConfigsPagedCallable", "listInstanceConfigsCallable"] + }, + "ListInstancePartitionOperations": { + "methods": ["listInstancePartitionOperations", "listInstancePartitionOperations", "listInstancePartitionOperations", "listInstancePartitionOperationsPagedCallable", "listInstancePartitionOperationsCallable"] + }, + "ListInstancePartitions": { + "methods": ["listInstancePartitions", "listInstancePartitions", "listInstancePartitions", "listInstancePartitionsPagedCallable", "listInstancePartitionsCallable"] + }, + "ListInstances": { + "methods": ["listInstances", "listInstances", "listInstances", "listInstancesPagedCallable", "listInstancesCallable"] + }, + "MoveInstance": { + "methods": ["moveInstanceAsync", "moveInstanceOperationCallable", "moveInstanceCallable"] + }, + "SetIamPolicy": { + "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] + }, + "TestIamPermissions": { + "methods": ["testIamPermissions", "testIamPermissions", "testIamPermissions", "testIamPermissionsCallable"] + }, + "UpdateInstance": { + "methods": ["updateInstanceAsync", "updateInstanceAsync", "updateInstanceOperationCallable", "updateInstanceCallable"] + }, + "UpdateInstanceConfig": { + "methods": ["updateInstanceConfigAsync", "updateInstanceConfigAsync", "updateInstanceConfigOperationCallable", "updateInstanceConfigCallable"] + }, + "UpdateInstancePartition": { + "methods": ["updateInstancePartitionAsync", "updateInstancePartitionAsync", "updateInstancePartitionOperationCallable", "updateInstancePartitionCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/package-info.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/package-info.java new file mode 100644 index 000000000000..207d8ecb31a8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/package-info.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Spanner API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= InstanceAdminClient ======================= + * + *

Service Description: Cloud Spanner Instance Admin API + * + *

The Cloud Spanner Instance Admin API can be used to create, delete, modify and list instances. + * Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner + * databases. + * + *

Each instance has a "configuration", which dictates where the serving resources for the Cloud + * Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google + * based on resource availability. + * + *

Cloud Spanner billing is based on the instances that exist and their sizes. After an instance + * exists, there are no additional per-database or per-operation charges for use of the instance + * (though there may be additional network bandwidth charges). Instances offer isolation: problems + * with databases in one instance will not affect other instances. However, within an instance + * databases can affect each other. For example, if one database in an instance receives a lot of + * requests and consumes most of the instance resources, fewer resources are available for other + * databases in that instance, and their performance may suffer. + * + *

Sample for InstanceAdminClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
+ *   InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]");
+ *   InstanceConfig response = instanceAdminClient.getInstanceConfig(name);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.spanner.admin.instance.v1; + +import javax.annotation.Generated; diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminCallableFactory.java new file mode 100644 index 000000000000..983c0fda6e01 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the InstanceAdmin service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcInstanceAdminCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java new file mode 100644 index 000000000000..25e7711e8da4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java @@ -0,0 +1,1084 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the InstanceAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcInstanceAdminStub extends InstanceAdminStub { + private static final MethodDescriptor + listInstanceConfigsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs") + .setRequestMarshaller( + ProtoUtils.marshaller(ListInstanceConfigsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListInstanceConfigsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getInstanceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(GetInstanceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(InstanceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createInstanceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateInstanceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateInstanceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateInstanceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteInstanceConfigMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteInstanceConfigRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + listInstanceConfigOperationsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations") + .setRequestMarshaller( + ProtoUtils.marshaller(ListInstanceConfigOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListInstanceConfigOperationsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listInstancesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/ListInstances") + .setRequestMarshaller( + ProtoUtils.marshaller(ListInstancesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListInstancesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListInstancePartitionsRequest, ListInstancePartitionsResponse> + listInstancePartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(ListInstancePartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListInstancePartitionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetInstance") + .setRequestMarshaller(ProtoUtils.marshaller(GetInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Instance.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor setIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getIamPolicyMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy") + .setRequestMarshaller(ProtoUtils.marshaller(GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + testIamPermissionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions") + .setRequestMarshaller( + ProtoUtils.marshaller(TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(TestIamPermissionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + getInstancePartitionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition") + .setRequestMarshaller( + ProtoUtils.marshaller(GetInstancePartitionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(InstancePartition.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + createInstancePartitionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateInstancePartitionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + deleteInstancePartitionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteInstancePartitionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + updateInstancePartitionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateInstancePartitionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations") + .setRequestMarshaller( + ProtoUtils.marshaller( + ListInstancePartitionOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller( + ListInstancePartitionOperationsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + moveInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance") + .setRequestMarshaller(ProtoUtils.marshaller(MoveInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable + listInstanceConfigsCallable; + private final UnaryCallable + listInstanceConfigsPagedCallable; + private final UnaryCallable getInstanceConfigCallable; + private final UnaryCallable createInstanceConfigCallable; + private final OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable; + private final UnaryCallable updateInstanceConfigCallable; + private final OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable; + private final UnaryCallable deleteInstanceConfigCallable; + private final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + listInstanceConfigOperationsCallable; + private final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable; + private final UnaryCallable listInstancesCallable; + private final UnaryCallable + listInstancesPagedCallable; + private final UnaryCallable + listInstancePartitionsCallable; + private final UnaryCallable + listInstancePartitionsPagedCallable; + private final UnaryCallable getInstanceCallable; + private final UnaryCallable createInstanceCallable; + private final OperationCallable + createInstanceOperationCallable; + private final UnaryCallable updateInstanceCallable; + private final OperationCallable + updateInstanceOperationCallable; + private final UnaryCallable deleteInstanceCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + private final UnaryCallable + getInstancePartitionCallable; + private final UnaryCallable + createInstancePartitionCallable; + private final OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable; + private final UnaryCallable + deleteInstancePartitionCallable; + private final UnaryCallable + updateInstancePartitionCallable; + private final OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable; + private final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable; + private final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable; + private final UnaryCallable moveInstanceCallable; + private final OperationCallable + moveInstanceOperationCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcInstanceAdminStub create(InstanceAdminStubSettings settings) + throws IOException { + return new GrpcInstanceAdminStub(settings, ClientContext.create(settings)); + } + + public static final GrpcInstanceAdminStub create(ClientContext clientContext) throws IOException { + return new GrpcInstanceAdminStub(InstanceAdminStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcInstanceAdminStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcInstanceAdminStub( + InstanceAdminStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcInstanceAdminStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcInstanceAdminStub(InstanceAdminStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcInstanceAdminCallableFactory()); + } + + /** + * Constructs an instance of GrpcInstanceAdminStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcInstanceAdminStub( + InstanceAdminStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings + listInstanceConfigsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listInstanceConfigsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings getInstanceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getInstanceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings createInstanceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createInstanceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings updateInstanceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateInstanceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "instance_config.name", + String.valueOf(request.getInstanceConfig().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteInstanceConfigTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteInstanceConfigMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings + listInstanceConfigOperationsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(listInstanceConfigOperationsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings listInstancesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listInstancesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + listInstancePartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listInstancePartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings getInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings createInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings updateInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("instance.name", String.valueOf(request.getInstance().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings setIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings getIamPolicyTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings + testIamPermissionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + GrpcCallSettings + getInstancePartitionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getInstancePartitionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings + createInstancePartitionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createInstancePartitionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings + deleteInstancePartitionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteInstancePartitionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings + updateInstancePartitionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateInstancePartitionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "instance_partition.name", + String.valueOf(request.getInstancePartition().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(listInstancePartitionOperationsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + GrpcCallSettings moveInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(moveInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + + this.listInstanceConfigsCallable = + callableFactory.createUnaryCallable( + listInstanceConfigsTransportSettings, + settings.listInstanceConfigsSettings(), + clientContext); + this.listInstanceConfigsPagedCallable = + callableFactory.createPagedCallable( + listInstanceConfigsTransportSettings, + settings.listInstanceConfigsSettings(), + clientContext); + this.getInstanceConfigCallable = + callableFactory.createUnaryCallable( + getInstanceConfigTransportSettings, + settings.getInstanceConfigSettings(), + clientContext); + this.createInstanceConfigCallable = + callableFactory.createUnaryCallable( + createInstanceConfigTransportSettings, + settings.createInstanceConfigSettings(), + clientContext); + this.createInstanceConfigOperationCallable = + callableFactory.createOperationCallable( + createInstanceConfigTransportSettings, + settings.createInstanceConfigOperationSettings(), + clientContext, + operationsStub); + this.updateInstanceConfigCallable = + callableFactory.createUnaryCallable( + updateInstanceConfigTransportSettings, + settings.updateInstanceConfigSettings(), + clientContext); + this.updateInstanceConfigOperationCallable = + callableFactory.createOperationCallable( + updateInstanceConfigTransportSettings, + settings.updateInstanceConfigOperationSettings(), + clientContext, + operationsStub); + this.deleteInstanceConfigCallable = + callableFactory.createUnaryCallable( + deleteInstanceConfigTransportSettings, + settings.deleteInstanceConfigSettings(), + clientContext); + this.listInstanceConfigOperationsCallable = + callableFactory.createUnaryCallable( + listInstanceConfigOperationsTransportSettings, + settings.listInstanceConfigOperationsSettings(), + clientContext); + this.listInstanceConfigOperationsPagedCallable = + callableFactory.createPagedCallable( + listInstanceConfigOperationsTransportSettings, + settings.listInstanceConfigOperationsSettings(), + clientContext); + this.listInstancesCallable = + callableFactory.createUnaryCallable( + listInstancesTransportSettings, settings.listInstancesSettings(), clientContext); + this.listInstancesPagedCallable = + callableFactory.createPagedCallable( + listInstancesTransportSettings, settings.listInstancesSettings(), clientContext); + this.listInstancePartitionsCallable = + callableFactory.createUnaryCallable( + listInstancePartitionsTransportSettings, + settings.listInstancePartitionsSettings(), + clientContext); + this.listInstancePartitionsPagedCallable = + callableFactory.createPagedCallable( + listInstancePartitionsTransportSettings, + settings.listInstancePartitionsSettings(), + clientContext); + this.getInstanceCallable = + callableFactory.createUnaryCallable( + getInstanceTransportSettings, settings.getInstanceSettings(), clientContext); + this.createInstanceCallable = + callableFactory.createUnaryCallable( + createInstanceTransportSettings, settings.createInstanceSettings(), clientContext); + this.createInstanceOperationCallable = + callableFactory.createOperationCallable( + createInstanceTransportSettings, + settings.createInstanceOperationSettings(), + clientContext, + operationsStub); + this.updateInstanceCallable = + callableFactory.createUnaryCallable( + updateInstanceTransportSettings, settings.updateInstanceSettings(), clientContext); + this.updateInstanceOperationCallable = + callableFactory.createOperationCallable( + updateInstanceTransportSettings, + settings.updateInstanceOperationSettings(), + clientContext, + operationsStub); + this.deleteInstanceCallable = + callableFactory.createUnaryCallable( + deleteInstanceTransportSettings, settings.deleteInstanceSettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + this.getInstancePartitionCallable = + callableFactory.createUnaryCallable( + getInstancePartitionTransportSettings, + settings.getInstancePartitionSettings(), + clientContext); + this.createInstancePartitionCallable = + callableFactory.createUnaryCallable( + createInstancePartitionTransportSettings, + settings.createInstancePartitionSettings(), + clientContext); + this.createInstancePartitionOperationCallable = + callableFactory.createOperationCallable( + createInstancePartitionTransportSettings, + settings.createInstancePartitionOperationSettings(), + clientContext, + operationsStub); + this.deleteInstancePartitionCallable = + callableFactory.createUnaryCallable( + deleteInstancePartitionTransportSettings, + settings.deleteInstancePartitionSettings(), + clientContext); + this.updateInstancePartitionCallable = + callableFactory.createUnaryCallable( + updateInstancePartitionTransportSettings, + settings.updateInstancePartitionSettings(), + clientContext); + this.updateInstancePartitionOperationCallable = + callableFactory.createOperationCallable( + updateInstancePartitionTransportSettings, + settings.updateInstancePartitionOperationSettings(), + clientContext, + operationsStub); + this.listInstancePartitionOperationsCallable = + callableFactory.createUnaryCallable( + listInstancePartitionOperationsTransportSettings, + settings.listInstancePartitionOperationsSettings(), + clientContext); + this.listInstancePartitionOperationsPagedCallable = + callableFactory.createPagedCallable( + listInstancePartitionOperationsTransportSettings, + settings.listInstancePartitionOperationsSettings(), + clientContext); + this.moveInstanceCallable = + callableFactory.createUnaryCallable( + moveInstanceTransportSettings, settings.moveInstanceSettings(), clientContext); + this.moveInstanceOperationCallable = + callableFactory.createOperationCallable( + moveInstanceTransportSettings, + settings.moveInstanceOperationSettings(), + clientContext, + operationsStub); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable + listInstanceConfigsCallable() { + return listInstanceConfigsCallable; + } + + @Override + public UnaryCallable + listInstanceConfigsPagedCallable() { + return listInstanceConfigsPagedCallable; + } + + @Override + public UnaryCallable getInstanceConfigCallable() { + return getInstanceConfigCallable; + } + + @Override + public UnaryCallable createInstanceConfigCallable() { + return createInstanceConfigCallable; + } + + @Override + public OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable() { + return createInstanceConfigOperationCallable; + } + + @Override + public UnaryCallable updateInstanceConfigCallable() { + return updateInstanceConfigCallable; + } + + @Override + public OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable() { + return updateInstanceConfigOperationCallable; + } + + @Override + public UnaryCallable deleteInstanceConfigCallable() { + return deleteInstanceConfigCallable; + } + + @Override + public UnaryCallable + listInstanceConfigOperationsCallable() { + return listInstanceConfigOperationsCallable; + } + + @Override + public UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable() { + return listInstanceConfigOperationsPagedCallable; + } + + @Override + public UnaryCallable listInstancesCallable() { + return listInstancesCallable; + } + + @Override + public UnaryCallable + listInstancesPagedCallable() { + return listInstancesPagedCallable; + } + + @Override + public UnaryCallable + listInstancePartitionsCallable() { + return listInstancePartitionsCallable; + } + + @Override + public UnaryCallable + listInstancePartitionsPagedCallable() { + return listInstancePartitionsPagedCallable; + } + + @Override + public UnaryCallable getInstanceCallable() { + return getInstanceCallable; + } + + @Override + public UnaryCallable createInstanceCallable() { + return createInstanceCallable; + } + + @Override + public OperationCallable + createInstanceOperationCallable() { + return createInstanceOperationCallable; + } + + @Override + public UnaryCallable updateInstanceCallable() { + return updateInstanceCallable; + } + + @Override + public OperationCallable + updateInstanceOperationCallable() { + return updateInstanceOperationCallable; + } + + @Override + public UnaryCallable deleteInstanceCallable() { + return deleteInstanceCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public UnaryCallable + getInstancePartitionCallable() { + return getInstancePartitionCallable; + } + + @Override + public UnaryCallable + createInstancePartitionCallable() { + return createInstancePartitionCallable; + } + + @Override + public OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable() { + return createInstancePartitionOperationCallable; + } + + @Override + public UnaryCallable deleteInstancePartitionCallable() { + return deleteInstancePartitionCallable; + } + + @Override + public UnaryCallable + updateInstancePartitionCallable() { + return updateInstancePartitionCallable; + } + + @Override + public OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable() { + return updateInstancePartitionOperationCallable; + } + + @Override + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable() { + return listInstancePartitionOperationsCallable; + } + + @Override + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable() { + return listInstancePartitionOperationsPagedCallable; + } + + @Override + public UnaryCallable moveInstanceCallable() { + return moveInstanceCallable; + } + + @Override + public OperationCallable + moveInstanceOperationCallable() { + return moveInstanceOperationCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminCallableFactory.java new file mode 100644 index 000000000000..cad7d83e97ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminCallableFactory.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the InstanceAdmin service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class HttpJsonInstanceAdminCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java new file mode 100644 index 000000000000..993e01537d3b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java @@ -0,0 +1,1852 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.HttpRule; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshot; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.httpjson.longrunning.stub.HttpJsonOperationsStub; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableMap; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the InstanceAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class HttpJsonInstanceAdminStub extends InstanceAdminStub { + private static final TypeRegistry typeRegistry = + TypeRegistry.newBuilder() + .add(MoveInstanceResponse.getDescriptor()) + .add(InstanceConfig.getDescriptor()) + .add(CreateInstancePartitionMetadata.getDescriptor()) + .add(MoveInstanceMetadata.getDescriptor()) + .add(UpdateInstancePartitionMetadata.getDescriptor()) + .add(Instance.getDescriptor()) + .add(InstancePartition.getDescriptor()) + .add(CreateInstanceConfigMetadata.getDescriptor()) + .add(UpdateInstanceMetadata.getDescriptor()) + .add(CreateInstanceMetadata.getDescriptor()) + .add(UpdateInstanceConfigMetadata.getDescriptor()) + .build(); + + private static final ApiMethodDescriptor + listInstanceConfigsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*}/instanceConfigs", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListInstanceConfigsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getInstanceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instanceConfigs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(InstanceConfig.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createInstanceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*}/instanceConfigs", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateInstanceConfigRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + updateInstanceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{instanceConfig.name=projects/*/instanceConfigs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "instanceConfig.name", + request.getInstanceConfig().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateInstanceConfigRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + deleteInstanceConfigMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instanceConfigs/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "etag", request.getEtag()); + serializer.putQueryParam( + fields, "validateOnly", request.getValidateOnly()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + listInstanceConfigOperationsMethodDescriptor = + ApiMethodDescriptor + . + newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*}/instanceConfigOperations", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListInstanceConfigOperationsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listInstancesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/ListInstances") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*}/instances", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam( + fields, "instanceDeadline", request.getInstanceDeadline()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListInstancesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor< + ListInstancePartitionsRequest, ListInstancePartitionsResponse> + listInstancePartitionsMethodDescriptor = + ApiMethodDescriptor + .newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/instancePartitions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, + "instancePartitionDeadline", + request.getInstancePartitionDeadline()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListInstancePartitionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetInstance") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "fieldMask", request.getFieldMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Instance.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*}/instances", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateInstanceRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + updateInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{instance.name=projects/*/instances/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, "instance.name", request.getInstance().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateInstanceRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + deleteInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + setIamPolicyMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*}:setIamPolicy", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Policy.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getIamPolicyMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*}:getIamPolicy", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Policy.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + testIamPermissionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{resource=projects/*/instances/*}:testIamPermissions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "resource", request.getResource()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearResource().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(TestIamPermissionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getInstancePartitionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/instancePartitions/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(InstancePartition.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + createInstancePartitionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/instancePartitions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearParent().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (CreateInstancePartitionRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor + deleteInstancePartitionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/instancePartitions/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "etag", request.getEtag()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateInstancePartitionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{instancePartition.name=projects/*/instances/*/instancePartitions/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "instancePartition.name", + request.getInstancePartition().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (UpdateInstancePartitionRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private static final ApiMethodDescriptor< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsMethodDescriptor = + ApiMethodDescriptor + . + newBuilder() + .setFullMethodName( + "google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*}/instancePartitionOperations", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam( + fields, + "instancePartitionDeadline", + request.getInstancePartitionDeadline()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance( + ListInstancePartitionOperationsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + moveInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*}:move", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (MoveInstanceRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + + private final UnaryCallable + listInstanceConfigsCallable; + private final UnaryCallable + listInstanceConfigsPagedCallable; + private final UnaryCallable getInstanceConfigCallable; + private final UnaryCallable createInstanceConfigCallable; + private final OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable; + private final UnaryCallable updateInstanceConfigCallable; + private final OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable; + private final UnaryCallable deleteInstanceConfigCallable; + private final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + listInstanceConfigOperationsCallable; + private final UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable; + private final UnaryCallable listInstancesCallable; + private final UnaryCallable + listInstancesPagedCallable; + private final UnaryCallable + listInstancePartitionsCallable; + private final UnaryCallable + listInstancePartitionsPagedCallable; + private final UnaryCallable getInstanceCallable; + private final UnaryCallable createInstanceCallable; + private final OperationCallable + createInstanceOperationCallable; + private final UnaryCallable updateInstanceCallable; + private final OperationCallable + updateInstanceOperationCallable; + private final UnaryCallable deleteInstanceCallable; + private final UnaryCallable setIamPolicyCallable; + private final UnaryCallable getIamPolicyCallable; + private final UnaryCallable + testIamPermissionsCallable; + private final UnaryCallable + getInstancePartitionCallable; + private final UnaryCallable + createInstancePartitionCallable; + private final OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable; + private final UnaryCallable + deleteInstancePartitionCallable; + private final UnaryCallable + updateInstancePartitionCallable; + private final OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable; + private final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable; + private final UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable; + private final UnaryCallable moveInstanceCallable; + private final OperationCallable + moveInstanceOperationCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonOperationsStub httpJsonOperationsStub; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonInstanceAdminStub create(InstanceAdminStubSettings settings) + throws IOException { + return new HttpJsonInstanceAdminStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonInstanceAdminStub create(ClientContext clientContext) + throws IOException { + return new HttpJsonInstanceAdminStub( + InstanceAdminStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonInstanceAdminStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonInstanceAdminStub( + InstanceAdminStubSettings.newHttpJsonBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of HttpJsonInstanceAdminStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonInstanceAdminStub( + InstanceAdminStubSettings settings, ClientContext clientContext) throws IOException { + this(settings, clientContext, new HttpJsonInstanceAdminCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonInstanceAdminStub, using the given settings. This is + * protected so that it is easy to make a subclass, but otherwise, the static factory methods + * should be preferred. + */ + protected HttpJsonInstanceAdminStub( + InstanceAdminStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.httpJsonOperationsStub = + HttpJsonOperationsStub.create( + clientContext, + callableFactory, + typeRegistry, + ImmutableMap.builder() + .put( + "google.longrunning.Operations.CancelOperation", + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel") + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost("/v1/{name=projects/*/instances/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instances/*/instancePartitions/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setPost( + "/v1/{name=projects/*/instanceConfigs/*/ssdCaches/*/operations/*}:cancel") + .build()) + .build()) + .put( + "google.longrunning.Operations.DeleteOperation", + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instances/*/databases/*/operations/*}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instances/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete( + "/v1/{name=projects/*/instances/*/backups/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete( + "/v1/{name=projects/*/instances/*/instancePartitions/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete("/v1/{name=projects/*/instanceConfigs/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setDelete( + "/v1/{name=projects/*/instanceConfigs/*/ssdCaches/*/operations/*}") + .build()) + .build()) + .put( + "google.longrunning.Operations.GetOperation", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/databases/*/operations/*}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/backups/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet( + "/v1/{name=projects/*/instances/*/instancePartitions/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instanceConfigs/*/operations/*}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet( + "/v1/{name=projects/*/instanceConfigs/*/ssdCaches/*/operations/*}") + .build()) + .build()) + .put( + "google.longrunning.Operations.ListOperations", + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/databases/*/operations}") + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instances/*/backups/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet( + "/v1/{name=projects/*/instances/*/instancePartitions/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet("/v1/{name=projects/*/instanceConfigs/*/operations}") + .build()) + .addAdditionalBindings( + HttpRule.newBuilder() + .setGet( + "/v1/{name=projects/*/instanceConfigs/*/ssdCaches/*/operations}") + .build()) + .build()) + .build()); + + HttpJsonCallSettings + listInstanceConfigsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listInstanceConfigsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + getInstanceConfigTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getInstanceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + createInstanceConfigTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createInstanceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + updateInstanceConfigTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateInstanceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "instance_config.name", + String.valueOf(request.getInstanceConfig().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteInstanceConfigTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteInstanceConfigMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + listInstanceConfigOperationsTransportSettings = + HttpJsonCallSettings + . + newBuilder() + .setMethodDescriptor(listInstanceConfigOperationsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + listInstancesTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listInstancesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + listInstancePartitionsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listInstancePartitionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings getInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings createInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings updateInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("instance.name", String.valueOf(request.getInstance().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings setIamPolicyTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(setIamPolicyMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings getIamPolicyTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getIamPolicyMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings + testIamPermissionsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(testIamPermissionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("resource", String.valueOf(request.getResource())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getResource()) + .build(); + HttpJsonCallSettings + getInstancePartitionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getInstancePartitionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + createInstancePartitionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createInstancePartitionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings + deleteInstancePartitionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteInstancePartitionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings + updateInstancePartitionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateInstancePartitionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "instance_partition.name", + String.valueOf(request.getInstancePartition().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsTransportSettings = + HttpJsonCallSettings + . + newBuilder() + .setMethodDescriptor(listInstancePartitionOperationsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getParent()) + .build(); + HttpJsonCallSettings moveInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(moveInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + + this.listInstanceConfigsCallable = + callableFactory.createUnaryCallable( + listInstanceConfigsTransportSettings, + settings.listInstanceConfigsSettings(), + clientContext); + this.listInstanceConfigsPagedCallable = + callableFactory.createPagedCallable( + listInstanceConfigsTransportSettings, + settings.listInstanceConfigsSettings(), + clientContext); + this.getInstanceConfigCallable = + callableFactory.createUnaryCallable( + getInstanceConfigTransportSettings, + settings.getInstanceConfigSettings(), + clientContext); + this.createInstanceConfigCallable = + callableFactory.createUnaryCallable( + createInstanceConfigTransportSettings, + settings.createInstanceConfigSettings(), + clientContext); + this.createInstanceConfigOperationCallable = + callableFactory.createOperationCallable( + createInstanceConfigTransportSettings, + settings.createInstanceConfigOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.updateInstanceConfigCallable = + callableFactory.createUnaryCallable( + updateInstanceConfigTransportSettings, + settings.updateInstanceConfigSettings(), + clientContext); + this.updateInstanceConfigOperationCallable = + callableFactory.createOperationCallable( + updateInstanceConfigTransportSettings, + settings.updateInstanceConfigOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.deleteInstanceConfigCallable = + callableFactory.createUnaryCallable( + deleteInstanceConfigTransportSettings, + settings.deleteInstanceConfigSettings(), + clientContext); + this.listInstanceConfigOperationsCallable = + callableFactory.createUnaryCallable( + listInstanceConfigOperationsTransportSettings, + settings.listInstanceConfigOperationsSettings(), + clientContext); + this.listInstanceConfigOperationsPagedCallable = + callableFactory.createPagedCallable( + listInstanceConfigOperationsTransportSettings, + settings.listInstanceConfigOperationsSettings(), + clientContext); + this.listInstancesCallable = + callableFactory.createUnaryCallable( + listInstancesTransportSettings, settings.listInstancesSettings(), clientContext); + this.listInstancesPagedCallable = + callableFactory.createPagedCallable( + listInstancesTransportSettings, settings.listInstancesSettings(), clientContext); + this.listInstancePartitionsCallable = + callableFactory.createUnaryCallable( + listInstancePartitionsTransportSettings, + settings.listInstancePartitionsSettings(), + clientContext); + this.listInstancePartitionsPagedCallable = + callableFactory.createPagedCallable( + listInstancePartitionsTransportSettings, + settings.listInstancePartitionsSettings(), + clientContext); + this.getInstanceCallable = + callableFactory.createUnaryCallable( + getInstanceTransportSettings, settings.getInstanceSettings(), clientContext); + this.createInstanceCallable = + callableFactory.createUnaryCallable( + createInstanceTransportSettings, settings.createInstanceSettings(), clientContext); + this.createInstanceOperationCallable = + callableFactory.createOperationCallable( + createInstanceTransportSettings, + settings.createInstanceOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.updateInstanceCallable = + callableFactory.createUnaryCallable( + updateInstanceTransportSettings, settings.updateInstanceSettings(), clientContext); + this.updateInstanceOperationCallable = + callableFactory.createOperationCallable( + updateInstanceTransportSettings, + settings.updateInstanceOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.deleteInstanceCallable = + callableFactory.createUnaryCallable( + deleteInstanceTransportSettings, settings.deleteInstanceSettings(), clientContext); + this.setIamPolicyCallable = + callableFactory.createUnaryCallable( + setIamPolicyTransportSettings, settings.setIamPolicySettings(), clientContext); + this.getIamPolicyCallable = + callableFactory.createUnaryCallable( + getIamPolicyTransportSettings, settings.getIamPolicySettings(), clientContext); + this.testIamPermissionsCallable = + callableFactory.createUnaryCallable( + testIamPermissionsTransportSettings, + settings.testIamPermissionsSettings(), + clientContext); + this.getInstancePartitionCallable = + callableFactory.createUnaryCallable( + getInstancePartitionTransportSettings, + settings.getInstancePartitionSettings(), + clientContext); + this.createInstancePartitionCallable = + callableFactory.createUnaryCallable( + createInstancePartitionTransportSettings, + settings.createInstancePartitionSettings(), + clientContext); + this.createInstancePartitionOperationCallable = + callableFactory.createOperationCallable( + createInstancePartitionTransportSettings, + settings.createInstancePartitionOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.deleteInstancePartitionCallable = + callableFactory.createUnaryCallable( + deleteInstancePartitionTransportSettings, + settings.deleteInstancePartitionSettings(), + clientContext); + this.updateInstancePartitionCallable = + callableFactory.createUnaryCallable( + updateInstancePartitionTransportSettings, + settings.updateInstancePartitionSettings(), + clientContext); + this.updateInstancePartitionOperationCallable = + callableFactory.createOperationCallable( + updateInstancePartitionTransportSettings, + settings.updateInstancePartitionOperationSettings(), + clientContext, + httpJsonOperationsStub); + this.listInstancePartitionOperationsCallable = + callableFactory.createUnaryCallable( + listInstancePartitionOperationsTransportSettings, + settings.listInstancePartitionOperationsSettings(), + clientContext); + this.listInstancePartitionOperationsPagedCallable = + callableFactory.createPagedCallable( + listInstancePartitionOperationsTransportSettings, + settings.listInstancePartitionOperationsSettings(), + clientContext); + this.moveInstanceCallable = + callableFactory.createUnaryCallable( + moveInstanceTransportSettings, settings.moveInstanceSettings(), clientContext); + this.moveInstanceOperationCallable = + callableFactory.createOperationCallable( + moveInstanceTransportSettings, + settings.moveInstanceOperationSettings(), + clientContext, + httpJsonOperationsStub); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(listInstanceConfigsMethodDescriptor); + methodDescriptors.add(getInstanceConfigMethodDescriptor); + methodDescriptors.add(createInstanceConfigMethodDescriptor); + methodDescriptors.add(updateInstanceConfigMethodDescriptor); + methodDescriptors.add(deleteInstanceConfigMethodDescriptor); + methodDescriptors.add(listInstanceConfigOperationsMethodDescriptor); + methodDescriptors.add(listInstancesMethodDescriptor); + methodDescriptors.add(listInstancePartitionsMethodDescriptor); + methodDescriptors.add(getInstanceMethodDescriptor); + methodDescriptors.add(createInstanceMethodDescriptor); + methodDescriptors.add(updateInstanceMethodDescriptor); + methodDescriptors.add(deleteInstanceMethodDescriptor); + methodDescriptors.add(setIamPolicyMethodDescriptor); + methodDescriptors.add(getIamPolicyMethodDescriptor); + methodDescriptors.add(testIamPermissionsMethodDescriptor); + methodDescriptors.add(getInstancePartitionMethodDescriptor); + methodDescriptors.add(createInstancePartitionMethodDescriptor); + methodDescriptors.add(deleteInstancePartitionMethodDescriptor); + methodDescriptors.add(updateInstancePartitionMethodDescriptor); + methodDescriptors.add(listInstancePartitionOperationsMethodDescriptor); + methodDescriptors.add(moveInstanceMethodDescriptor); + return methodDescriptors; + } + + public HttpJsonOperationsStub getHttpJsonOperationsStub() { + return httpJsonOperationsStub; + } + + @Override + public UnaryCallable + listInstanceConfigsCallable() { + return listInstanceConfigsCallable; + } + + @Override + public UnaryCallable + listInstanceConfigsPagedCallable() { + return listInstanceConfigsPagedCallable; + } + + @Override + public UnaryCallable getInstanceConfigCallable() { + return getInstanceConfigCallable; + } + + @Override + public UnaryCallable createInstanceConfigCallable() { + return createInstanceConfigCallable; + } + + @Override + public OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable() { + return createInstanceConfigOperationCallable; + } + + @Override + public UnaryCallable updateInstanceConfigCallable() { + return updateInstanceConfigCallable; + } + + @Override + public OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable() { + return updateInstanceConfigOperationCallable; + } + + @Override + public UnaryCallable deleteInstanceConfigCallable() { + return deleteInstanceConfigCallable; + } + + @Override + public UnaryCallable + listInstanceConfigOperationsCallable() { + return listInstanceConfigOperationsCallable; + } + + @Override + public UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable() { + return listInstanceConfigOperationsPagedCallable; + } + + @Override + public UnaryCallable listInstancesCallable() { + return listInstancesCallable; + } + + @Override + public UnaryCallable + listInstancesPagedCallable() { + return listInstancesPagedCallable; + } + + @Override + public UnaryCallable + listInstancePartitionsCallable() { + return listInstancePartitionsCallable; + } + + @Override + public UnaryCallable + listInstancePartitionsPagedCallable() { + return listInstancePartitionsPagedCallable; + } + + @Override + public UnaryCallable getInstanceCallable() { + return getInstanceCallable; + } + + @Override + public UnaryCallable createInstanceCallable() { + return createInstanceCallable; + } + + @Override + public OperationCallable + createInstanceOperationCallable() { + return createInstanceOperationCallable; + } + + @Override + public UnaryCallable updateInstanceCallable() { + return updateInstanceCallable; + } + + @Override + public OperationCallable + updateInstanceOperationCallable() { + return updateInstanceOperationCallable; + } + + @Override + public UnaryCallable deleteInstanceCallable() { + return deleteInstanceCallable; + } + + @Override + public UnaryCallable setIamPolicyCallable() { + return setIamPolicyCallable; + } + + @Override + public UnaryCallable getIamPolicyCallable() { + return getIamPolicyCallable; + } + + @Override + public UnaryCallable + testIamPermissionsCallable() { + return testIamPermissionsCallable; + } + + @Override + public UnaryCallable + getInstancePartitionCallable() { + return getInstancePartitionCallable; + } + + @Override + public UnaryCallable + createInstancePartitionCallable() { + return createInstancePartitionCallable; + } + + @Override + public OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable() { + return createInstancePartitionOperationCallable; + } + + @Override + public UnaryCallable deleteInstancePartitionCallable() { + return deleteInstancePartitionCallable; + } + + @Override + public UnaryCallable + updateInstancePartitionCallable() { + return updateInstancePartitionCallable; + } + + @Override + public OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable() { + return updateInstancePartitionOperationCallable; + } + + @Override + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable() { + return listInstancePartitionOperationsCallable; + } + + @Override + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable() { + return listInstancePartitionOperationsPagedCallable; + } + + @Override + public UnaryCallable moveInstanceCallable() { + return moveInstanceCallable; + } + + @Override + public OperationCallable + moveInstanceOperationCallable() { + return moveInstanceOperationCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java new file mode 100644 index 000000000000..2c8e3f1c4d96 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java @@ -0,0 +1,259 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the InstanceAdmin service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class InstanceAdminStub implements BackgroundResource { + + public OperationsStub getOperationsStub() { + return null; + } + + public com.google.api.gax.httpjson.longrunning.stub.OperationsStub getHttpJsonOperationsStub() { + return null; + } + + public UnaryCallable + listInstanceConfigsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listInstanceConfigsPagedCallable()"); + } + + public UnaryCallable + listInstanceConfigsCallable() { + throw new UnsupportedOperationException("Not implemented: listInstanceConfigsCallable()"); + } + + public UnaryCallable getInstanceConfigCallable() { + throw new UnsupportedOperationException("Not implemented: getInstanceConfigCallable()"); + } + + public OperationCallable< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: createInstanceConfigOperationCallable()"); + } + + public UnaryCallable createInstanceConfigCallable() { + throw new UnsupportedOperationException("Not implemented: createInstanceConfigCallable()"); + } + + public OperationCallable< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateInstanceConfigOperationCallable()"); + } + + public UnaryCallable updateInstanceConfigCallable() { + throw new UnsupportedOperationException("Not implemented: updateInstanceConfigCallable()"); + } + + public UnaryCallable deleteInstanceConfigCallable() { + throw new UnsupportedOperationException("Not implemented: deleteInstanceConfigCallable()"); + } + + public UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsPagedCallable() { + throw new UnsupportedOperationException( + "Not implemented: listInstanceConfigOperationsPagedCallable()"); + } + + public UnaryCallable + listInstanceConfigOperationsCallable() { + throw new UnsupportedOperationException( + "Not implemented: listInstanceConfigOperationsCallable()"); + } + + public UnaryCallable + listInstancesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listInstancesPagedCallable()"); + } + + public UnaryCallable listInstancesCallable() { + throw new UnsupportedOperationException("Not implemented: listInstancesCallable()"); + } + + public UnaryCallable + listInstancePartitionsPagedCallable() { + throw new UnsupportedOperationException( + "Not implemented: listInstancePartitionsPagedCallable()"); + } + + public UnaryCallable + listInstancePartitionsCallable() { + throw new UnsupportedOperationException("Not implemented: listInstancePartitionsCallable()"); + } + + public UnaryCallable getInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: getInstanceCallable()"); + } + + public OperationCallable + createInstanceOperationCallable() { + throw new UnsupportedOperationException("Not implemented: createInstanceOperationCallable()"); + } + + public UnaryCallable createInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: createInstanceCallable()"); + } + + public OperationCallable + updateInstanceOperationCallable() { + throw new UnsupportedOperationException("Not implemented: updateInstanceOperationCallable()"); + } + + public UnaryCallable updateInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: updateInstanceCallable()"); + } + + public UnaryCallable deleteInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: deleteInstanceCallable()"); + } + + public UnaryCallable setIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: setIamPolicyCallable()"); + } + + public UnaryCallable getIamPolicyCallable() { + throw new UnsupportedOperationException("Not implemented: getIamPolicyCallable()"); + } + + public UnaryCallable + testIamPermissionsCallable() { + throw new UnsupportedOperationException("Not implemented: testIamPermissionsCallable()"); + } + + public UnaryCallable + getInstancePartitionCallable() { + throw new UnsupportedOperationException("Not implemented: getInstancePartitionCallable()"); + } + + public OperationCallable< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: createInstancePartitionOperationCallable()"); + } + + public UnaryCallable + createInstancePartitionCallable() { + throw new UnsupportedOperationException("Not implemented: createInstancePartitionCallable()"); + } + + public UnaryCallable deleteInstancePartitionCallable() { + throw new UnsupportedOperationException("Not implemented: deleteInstancePartitionCallable()"); + } + + public OperationCallable< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: updateInstancePartitionOperationCallable()"); + } + + public UnaryCallable + updateInstancePartitionCallable() { + throw new UnsupportedOperationException("Not implemented: updateInstancePartitionCallable()"); + } + + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsPagedCallable() { + throw new UnsupportedOperationException( + "Not implemented: listInstancePartitionOperationsPagedCallable()"); + } + + public UnaryCallable< + ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsResponse> + listInstancePartitionOperationsCallable() { + throw new UnsupportedOperationException( + "Not implemented: listInstancePartitionOperationsCallable()"); + } + + public OperationCallable + moveInstanceOperationCallable() { + throw new UnsupportedOperationException("Not implemented: moveInstanceOperationCallable()"); + } + + public UnaryCallable moveInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: moveInstanceCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java new file mode 100644 index 000000000000..7c32a3f90baa --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java @@ -0,0 +1,1698 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.LibraryMetadata; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link InstanceAdminStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getInstanceConfig: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminStubSettings.Builder instanceAdminSettingsBuilder =
+ *     InstanceAdminStubSettings.newBuilder();
+ * instanceAdminSettingsBuilder
+ *     .getInstanceConfigSettings()
+ *     .setRetrySettings(
+ *         instanceAdminSettingsBuilder
+ *             .getInstanceConfigSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * InstanceAdminStubSettings instanceAdminSettings = instanceAdminSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + * + *

To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createInstanceConfig: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * InstanceAdminStubSettings.Builder instanceAdminSettingsBuilder =
+ *     InstanceAdminStubSettings.newBuilder();
+ * TimedRetryAlgorithm timedRetryAlgorithm =
+ *     OperationalTimedPollAlgorithm.create(
+ *         RetrySettings.newBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
+ *             .setRetryDelayMultiplier(1.5)
+ *             .setMaxRetryDelayDuration(Duration.ofMillis(5000))
+ *             .setTotalTimeoutDuration(Duration.ofHours(24))
+ *             .build());
+ * instanceAdminSettingsBuilder
+ *     .createClusterOperationSettings()
+ *     .setPollingAlgorithm(timedRetryAlgorithm)
+ *     .build();
+ * }
+ */ +@Generated("by gapic-generator-java") +@SuppressWarnings("CanonicalDuration") +public class InstanceAdminStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/cloud-platform") + .add("https://www.googleapis.com/auth/spanner.admin") + .build(); + + private final PagedCallSettings< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings; + private final UnaryCallSettings + getInstanceConfigSettings; + private final UnaryCallSettings + createInstanceConfigSettings; + private final OperationCallSettings< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings; + private final UnaryCallSettings + updateInstanceConfigSettings; + private final OperationCallSettings< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings; + private final UnaryCallSettings deleteInstanceConfigSettings; + private final PagedCallSettings< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings; + private final PagedCallSettings< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse> + listInstancesSettings; + private final PagedCallSettings< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings; + private final UnaryCallSettings getInstanceSettings; + private final UnaryCallSettings createInstanceSettings; + private final OperationCallSettings + createInstanceOperationSettings; + private final UnaryCallSettings updateInstanceSettings; + private final OperationCallSettings + updateInstanceOperationSettings; + private final UnaryCallSettings deleteInstanceSettings; + private final UnaryCallSettings setIamPolicySettings; + private final UnaryCallSettings getIamPolicySettings; + private final UnaryCallSettings + testIamPermissionsSettings; + private final UnaryCallSettings + getInstancePartitionSettings; + private final UnaryCallSettings + createInstancePartitionSettings; + private final OperationCallSettings< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings; + private final UnaryCallSettings + deleteInstancePartitionSettings; + private final UnaryCallSettings + updateInstancePartitionSettings; + private final OperationCallSettings< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings; + private final PagedCallSettings< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings; + private final UnaryCallSettings moveInstanceSettings; + private final OperationCallSettings< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings; + + private static final PagedListDescriptor< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, InstanceConfig> + LIST_INSTANCE_CONFIGS_PAGE_STR_DESC = + new PagedListDescriptor< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, InstanceConfig>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListInstanceConfigsRequest injectToken( + ListInstanceConfigsRequest payload, String token) { + return ListInstanceConfigsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListInstanceConfigsRequest injectPageSize( + ListInstanceConfigsRequest payload, int pageSize) { + return ListInstanceConfigsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListInstanceConfigsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListInstanceConfigsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListInstanceConfigsResponse payload) { + return payload.getInstanceConfigsList(); + } + }; + + private static final PagedListDescriptor< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse, Operation> + LIST_INSTANCE_CONFIG_OPERATIONS_PAGE_STR_DESC = + new PagedListDescriptor< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListInstanceConfigOperationsRequest injectToken( + ListInstanceConfigOperationsRequest payload, String token) { + return ListInstanceConfigOperationsRequest.newBuilder(payload) + .setPageToken(token) + .build(); + } + + @Override + public ListInstanceConfigOperationsRequest injectPageSize( + ListInstanceConfigOperationsRequest payload, int pageSize) { + return ListInstanceConfigOperationsRequest.newBuilder(payload) + .setPageSize(pageSize) + .build(); + } + + @Override + public Integer extractPageSize(ListInstanceConfigOperationsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListInstanceConfigOperationsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources( + ListInstanceConfigOperationsResponse payload) { + return payload.getOperationsList(); + } + }; + + private static final PagedListDescriptor + LIST_INSTANCES_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListInstancesRequest injectToken(ListInstancesRequest payload, String token) { + return ListInstancesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListInstancesRequest injectPageSize(ListInstancesRequest payload, int pageSize) { + return ListInstancesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListInstancesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListInstancesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListInstancesResponse payload) { + return payload.getInstancesList(); + } + }; + + private static final PagedListDescriptor< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition> + LIST_INSTANCE_PARTITIONS_PAGE_STR_DESC = + new PagedListDescriptor< + ListInstancePartitionsRequest, ListInstancePartitionsResponse, InstancePartition>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListInstancePartitionsRequest injectToken( + ListInstancePartitionsRequest payload, String token) { + return ListInstancePartitionsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListInstancePartitionsRequest injectPageSize( + ListInstancePartitionsRequest payload, int pageSize) { + return ListInstancePartitionsRequest.newBuilder(payload) + .setPageSize(pageSize) + .build(); + } + + @Override + public Integer extractPageSize(ListInstancePartitionsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListInstancePartitionsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources( + ListInstancePartitionsResponse payload) { + return payload.getInstancePartitionsList(); + } + }; + + private static final PagedListDescriptor< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + LIST_INSTANCE_PARTITION_OPERATIONS_PAGE_STR_DESC = + new PagedListDescriptor< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListInstancePartitionOperationsRequest injectToken( + ListInstancePartitionOperationsRequest payload, String token) { + return ListInstancePartitionOperationsRequest.newBuilder(payload) + .setPageToken(token) + .build(); + } + + @Override + public ListInstancePartitionOperationsRequest injectPageSize( + ListInstancePartitionOperationsRequest payload, int pageSize) { + return ListInstancePartitionOperationsRequest.newBuilder(payload) + .setPageSize(pageSize) + .build(); + } + + @Override + public Integer extractPageSize(ListInstancePartitionOperationsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListInstancePartitionOperationsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources( + ListInstancePartitionOperationsResponse payload) { + return payload.getOperationsList(); + } + }; + + private static final PagedListResponseFactory< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, ListInstanceConfigsPagedResponse> + LIST_INSTANCE_CONFIGS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + ListInstanceConfigsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListInstanceConfigsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_INSTANCE_CONFIGS_PAGE_STR_DESC, request, context); + return ListInstanceConfigsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + LIST_INSTANCE_CONFIG_OPERATIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable< + ListInstanceConfigOperationsRequest, ListInstanceConfigOperationsResponse> + callable, + ListInstanceConfigOperationsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + Operation> + pageContext = + PageContext.create( + callable, + LIST_INSTANCE_CONFIG_OPERATIONS_PAGE_STR_DESC, + request, + context); + return ListInstanceConfigOperationsPagedResponse.createAsync( + pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse> + LIST_INSTANCES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListInstancesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_INSTANCES_PAGE_STR_DESC, request, context); + return ListInstancesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + LIST_INSTANCE_PARTITIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable + callable, + ListInstancePartitionsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + InstancePartition> + pageContext = + PageContext.create( + callable, LIST_INSTANCE_PARTITIONS_PAGE_STR_DESC, request, context); + return ListInstancePartitionsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + private static final PagedListResponseFactory< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + LIST_INSTANCE_PARTITION_OPERATIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse> + callable, + ListInstancePartitionOperationsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + Operation> + pageContext = + PageContext.create( + callable, + LIST_INSTANCE_PARTITION_OPERATIONS_PAGE_STR_DESC, + request, + context); + return ListInstancePartitionOperationsPagedResponse.createAsync( + pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to listInstanceConfigs. */ + public PagedCallSettings< + ListInstanceConfigsRequest, ListInstanceConfigsResponse, ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings() { + return listInstanceConfigsSettings; + } + + /** Returns the object with the settings used for calls to getInstanceConfig. */ + public UnaryCallSettings getInstanceConfigSettings() { + return getInstanceConfigSettings; + } + + /** Returns the object with the settings used for calls to createInstanceConfig. */ + public UnaryCallSettings createInstanceConfigSettings() { + return createInstanceConfigSettings; + } + + /** Returns the object with the settings used for calls to createInstanceConfig. */ + public OperationCallSettings< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings() { + return createInstanceConfigOperationSettings; + } + + /** Returns the object with the settings used for calls to updateInstanceConfig. */ + public UnaryCallSettings updateInstanceConfigSettings() { + return updateInstanceConfigSettings; + } + + /** Returns the object with the settings used for calls to updateInstanceConfig. */ + public OperationCallSettings< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings() { + return updateInstanceConfigOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteInstanceConfig. */ + public UnaryCallSettings deleteInstanceConfigSettings() { + return deleteInstanceConfigSettings; + } + + /** Returns the object with the settings used for calls to listInstanceConfigOperations. */ + public PagedCallSettings< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings() { + return listInstanceConfigOperationsSettings; + } + + /** Returns the object with the settings used for calls to listInstances. */ + public PagedCallSettings + listInstancesSettings() { + return listInstancesSettings; + } + + /** Returns the object with the settings used for calls to listInstancePartitions. */ + public PagedCallSettings< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings() { + return listInstancePartitionsSettings; + } + + /** Returns the object with the settings used for calls to getInstance. */ + public UnaryCallSettings getInstanceSettings() { + return getInstanceSettings; + } + + /** Returns the object with the settings used for calls to createInstance. */ + public UnaryCallSettings createInstanceSettings() { + return createInstanceSettings; + } + + /** Returns the object with the settings used for calls to createInstance. */ + public OperationCallSettings + createInstanceOperationSettings() { + return createInstanceOperationSettings; + } + + /** Returns the object with the settings used for calls to updateInstance. */ + public UnaryCallSettings updateInstanceSettings() { + return updateInstanceSettings; + } + + /** Returns the object with the settings used for calls to updateInstance. */ + public OperationCallSettings + updateInstanceOperationSettings() { + return updateInstanceOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteInstance. */ + public UnaryCallSettings deleteInstanceSettings() { + return deleteInstanceSettings; + } + + /** Returns the object with the settings used for calls to setIamPolicy. */ + public UnaryCallSettings setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the object with the settings used for calls to getIamPolicy. */ + public UnaryCallSettings getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the object with the settings used for calls to testIamPermissions. */ + public UnaryCallSettings + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the object with the settings used for calls to getInstancePartition. */ + public UnaryCallSettings + getInstancePartitionSettings() { + return getInstancePartitionSettings; + } + + /** Returns the object with the settings used for calls to createInstancePartition. */ + public UnaryCallSettings + createInstancePartitionSettings() { + return createInstancePartitionSettings; + } + + /** Returns the object with the settings used for calls to createInstancePartition. */ + public OperationCallSettings< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings() { + return createInstancePartitionOperationSettings; + } + + /** Returns the object with the settings used for calls to deleteInstancePartition. */ + public UnaryCallSettings + deleteInstancePartitionSettings() { + return deleteInstancePartitionSettings; + } + + /** Returns the object with the settings used for calls to updateInstancePartition. */ + public UnaryCallSettings + updateInstancePartitionSettings() { + return updateInstancePartitionSettings; + } + + /** Returns the object with the settings used for calls to updateInstancePartition. */ + public OperationCallSettings< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings() { + return updateInstancePartitionOperationSettings; + } + + /** Returns the object with the settings used for calls to listInstancePartitionOperations. */ + public PagedCallSettings< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings() { + return listInstancePartitionOperationsSettings; + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public UnaryCallSettings moveInstanceSettings() { + return moveInstanceSettings; + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public OperationCallSettings + moveInstanceOperationSettings() { + return moveInstanceOperationSettings; + } + + public InstanceAdminStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcInstanceAdminStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonInstanceAdminStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "spanner"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "spanner.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "spanner.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(InstanceAdminStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(InstanceAdminStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return InstanceAdminStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected InstanceAdminStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + listInstanceConfigsSettings = settingsBuilder.listInstanceConfigsSettings().build(); + getInstanceConfigSettings = settingsBuilder.getInstanceConfigSettings().build(); + createInstanceConfigSettings = settingsBuilder.createInstanceConfigSettings().build(); + createInstanceConfigOperationSettings = + settingsBuilder.createInstanceConfigOperationSettings().build(); + updateInstanceConfigSettings = settingsBuilder.updateInstanceConfigSettings().build(); + updateInstanceConfigOperationSettings = + settingsBuilder.updateInstanceConfigOperationSettings().build(); + deleteInstanceConfigSettings = settingsBuilder.deleteInstanceConfigSettings().build(); + listInstanceConfigOperationsSettings = + settingsBuilder.listInstanceConfigOperationsSettings().build(); + listInstancesSettings = settingsBuilder.listInstancesSettings().build(); + listInstancePartitionsSettings = settingsBuilder.listInstancePartitionsSettings().build(); + getInstanceSettings = settingsBuilder.getInstanceSettings().build(); + createInstanceSettings = settingsBuilder.createInstanceSettings().build(); + createInstanceOperationSettings = settingsBuilder.createInstanceOperationSettings().build(); + updateInstanceSettings = settingsBuilder.updateInstanceSettings().build(); + updateInstanceOperationSettings = settingsBuilder.updateInstanceOperationSettings().build(); + deleteInstanceSettings = settingsBuilder.deleteInstanceSettings().build(); + setIamPolicySettings = settingsBuilder.setIamPolicySettings().build(); + getIamPolicySettings = settingsBuilder.getIamPolicySettings().build(); + testIamPermissionsSettings = settingsBuilder.testIamPermissionsSettings().build(); + getInstancePartitionSettings = settingsBuilder.getInstancePartitionSettings().build(); + createInstancePartitionSettings = settingsBuilder.createInstancePartitionSettings().build(); + createInstancePartitionOperationSettings = + settingsBuilder.createInstancePartitionOperationSettings().build(); + deleteInstancePartitionSettings = settingsBuilder.deleteInstancePartitionSettings().build(); + updateInstancePartitionSettings = settingsBuilder.updateInstancePartitionSettings().build(); + updateInstancePartitionOperationSettings = + settingsBuilder.updateInstancePartitionOperationSettings().build(); + listInstancePartitionOperationsSettings = + settingsBuilder.listInstancePartitionOperationsSettings().build(); + moveInstanceSettings = settingsBuilder.moveInstanceSettings().build(); + moveInstanceOperationSettings = settingsBuilder.moveInstanceOperationSettings().build(); + } + + @Override + protected LibraryMetadata getLibraryMetadata() { + return LibraryMetadata.newBuilder() + .setArtifactName("com.google.cloud:google-cloud-spanner") + .setRepository("googleapis/google-cloud-java") + .setVersion(Version.VERSION) + .build(); + } + + /** Builder for InstanceAdminStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final PagedCallSettings.Builder< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings; + private final UnaryCallSettings.Builder + getInstanceConfigSettings; + private final UnaryCallSettings.Builder + createInstanceConfigSettings; + private final OperationCallSettings.Builder< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings; + private final UnaryCallSettings.Builder + updateInstanceConfigSettings; + private final OperationCallSettings.Builder< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings; + private final UnaryCallSettings.Builder + deleteInstanceConfigSettings; + private final PagedCallSettings.Builder< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings; + private final PagedCallSettings.Builder< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse> + listInstancesSettings; + private final PagedCallSettings.Builder< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings; + private final UnaryCallSettings.Builder getInstanceSettings; + private final UnaryCallSettings.Builder + createInstanceSettings; + private final OperationCallSettings.Builder< + CreateInstanceRequest, Instance, CreateInstanceMetadata> + createInstanceOperationSettings; + private final UnaryCallSettings.Builder + updateInstanceSettings; + private final OperationCallSettings.Builder< + UpdateInstanceRequest, Instance, UpdateInstanceMetadata> + updateInstanceOperationSettings; + private final UnaryCallSettings.Builder deleteInstanceSettings; + private final UnaryCallSettings.Builder setIamPolicySettings; + private final UnaryCallSettings.Builder getIamPolicySettings; + private final UnaryCallSettings.Builder + testIamPermissionsSettings; + private final UnaryCallSettings.Builder + getInstancePartitionSettings; + private final UnaryCallSettings.Builder + createInstancePartitionSettings; + private final OperationCallSettings.Builder< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings; + private final UnaryCallSettings.Builder + deleteInstancePartitionSettings; + private final UnaryCallSettings.Builder + updateInstancePartitionSettings; + private final OperationCallSettings.Builder< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings; + private final PagedCallSettings.Builder< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings; + private final UnaryCallSettings.Builder moveInstanceSettings; + private final OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_0_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.DEADLINE_EXCEEDED))); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "no_retry_2_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "no_retry_3_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.DEADLINE_EXCEEDED))); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("retry_policy_0_params", settings); + settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); + definitions.put("no_retry_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("no_retry_2_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + definitions.put("no_retry_3_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + listInstanceConfigsSettings = + PagedCallSettings.newBuilder(LIST_INSTANCE_CONFIGS_PAGE_STR_FACT); + getInstanceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstanceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstanceConfigOperationSettings = OperationCallSettings.newBuilder(); + updateInstanceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateInstanceConfigOperationSettings = OperationCallSettings.newBuilder(); + deleteInstanceConfigSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listInstanceConfigOperationsSettings = + PagedCallSettings.newBuilder(LIST_INSTANCE_CONFIG_OPERATIONS_PAGE_STR_FACT); + listInstancesSettings = PagedCallSettings.newBuilder(LIST_INSTANCES_PAGE_STR_FACT); + listInstancePartitionsSettings = + PagedCallSettings.newBuilder(LIST_INSTANCE_PARTITIONS_PAGE_STR_FACT); + getInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstanceOperationSettings = OperationCallSettings.newBuilder(); + updateInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateInstanceOperationSettings = OperationCallSettings.newBuilder(); + deleteInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + setIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getIamPolicySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + testIamPermissionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getInstancePartitionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstancePartitionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + createInstancePartitionOperationSettings = OperationCallSettings.newBuilder(); + deleteInstancePartitionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateInstancePartitionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateInstancePartitionOperationSettings = OperationCallSettings.newBuilder(); + listInstancePartitionOperationsSettings = + PagedCallSettings.newBuilder(LIST_INSTANCE_PARTITION_OPERATIONS_PAGE_STR_FACT); + moveInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + moveInstanceOperationSettings = OperationCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listInstanceConfigsSettings, + getInstanceConfigSettings, + createInstanceConfigSettings, + updateInstanceConfigSettings, + deleteInstanceConfigSettings, + listInstanceConfigOperationsSettings, + listInstancesSettings, + listInstancePartitionsSettings, + getInstanceSettings, + createInstanceSettings, + updateInstanceSettings, + deleteInstanceSettings, + setIamPolicySettings, + getIamPolicySettings, + testIamPermissionsSettings, + getInstancePartitionSettings, + createInstancePartitionSettings, + deleteInstancePartitionSettings, + updateInstancePartitionSettings, + listInstancePartitionOperationsSettings, + moveInstanceSettings); + initDefaults(this); + } + + protected Builder(InstanceAdminStubSettings settings) { + super(settings); + + listInstanceConfigsSettings = settings.listInstanceConfigsSettings.toBuilder(); + getInstanceConfigSettings = settings.getInstanceConfigSettings.toBuilder(); + createInstanceConfigSettings = settings.createInstanceConfigSettings.toBuilder(); + createInstanceConfigOperationSettings = + settings.createInstanceConfigOperationSettings.toBuilder(); + updateInstanceConfigSettings = settings.updateInstanceConfigSettings.toBuilder(); + updateInstanceConfigOperationSettings = + settings.updateInstanceConfigOperationSettings.toBuilder(); + deleteInstanceConfigSettings = settings.deleteInstanceConfigSettings.toBuilder(); + listInstanceConfigOperationsSettings = + settings.listInstanceConfigOperationsSettings.toBuilder(); + listInstancesSettings = settings.listInstancesSettings.toBuilder(); + listInstancePartitionsSettings = settings.listInstancePartitionsSettings.toBuilder(); + getInstanceSettings = settings.getInstanceSettings.toBuilder(); + createInstanceSettings = settings.createInstanceSettings.toBuilder(); + createInstanceOperationSettings = settings.createInstanceOperationSettings.toBuilder(); + updateInstanceSettings = settings.updateInstanceSettings.toBuilder(); + updateInstanceOperationSettings = settings.updateInstanceOperationSettings.toBuilder(); + deleteInstanceSettings = settings.deleteInstanceSettings.toBuilder(); + setIamPolicySettings = settings.setIamPolicySettings.toBuilder(); + getIamPolicySettings = settings.getIamPolicySettings.toBuilder(); + testIamPermissionsSettings = settings.testIamPermissionsSettings.toBuilder(); + getInstancePartitionSettings = settings.getInstancePartitionSettings.toBuilder(); + createInstancePartitionSettings = settings.createInstancePartitionSettings.toBuilder(); + createInstancePartitionOperationSettings = + settings.createInstancePartitionOperationSettings.toBuilder(); + deleteInstancePartitionSettings = settings.deleteInstancePartitionSettings.toBuilder(); + updateInstancePartitionSettings = settings.updateInstancePartitionSettings.toBuilder(); + updateInstancePartitionOperationSettings = + settings.updateInstancePartitionOperationSettings.toBuilder(); + listInstancePartitionOperationsSettings = + settings.listInstancePartitionOperationsSettings.toBuilder(); + moveInstanceSettings = settings.moveInstanceSettings.toBuilder(); + moveInstanceOperationSettings = settings.moveInstanceOperationSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + listInstanceConfigsSettings, + getInstanceConfigSettings, + createInstanceConfigSettings, + updateInstanceConfigSettings, + deleteInstanceConfigSettings, + listInstanceConfigOperationsSettings, + listInstancesSettings, + listInstancePartitionsSettings, + getInstanceSettings, + createInstanceSettings, + updateInstanceSettings, + deleteInstanceSettings, + setIamPolicySettings, + getIamPolicySettings, + testIamPermissionsSettings, + getInstancePartitionSettings, + createInstancePartitionSettings, + deleteInstancePartitionSettings, + updateInstancePartitionSettings, + listInstancePartitionOperationsSettings, + moveInstanceSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .listInstanceConfigsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getInstanceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createInstanceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .updateInstanceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .deleteInstanceConfigSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .listInstanceConfigOperationsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .listInstancesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listInstancePartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .getInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .createInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .updateInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")); + + builder + .deleteInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .setIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_3_params")); + + builder + .getIamPolicySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .testIamPermissionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_3_params")); + + builder + .getInstancePartitionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .createInstancePartitionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .deleteInstancePartitionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .updateInstancePartitionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .listInstancePartitionOperationsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .moveInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + + builder + .createInstanceConfigOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(InstanceConfig.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + CreateInstanceConfigMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .updateInstanceConfigOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(InstanceConfig.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + UpdateInstanceConfigMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .createInstanceOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Instance.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(CreateInstanceMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(20000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build())); + + builder + .updateInstanceOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_2_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(Instance.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(UpdateInstanceMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(20000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(86400000L)) + .build())); + + builder + .createInstancePartitionOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(InstancePartition.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + CreateInstancePartitionMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .updateInstancePartitionOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(InstancePartition.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create( + UpdateInstancePartitionMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + builder + .moveInstanceOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(MoveInstanceResponse.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(MoveInstanceMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelayDuration(Duration.ofMillis(45000L)) + .setInitialRpcTimeoutDuration(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ZERO) + .setTotalTimeoutDuration(Duration.ofMillis(300000L)) + .build())); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to listInstanceConfigs. */ + public PagedCallSettings.Builder< + ListInstanceConfigsRequest, + ListInstanceConfigsResponse, + ListInstanceConfigsPagedResponse> + listInstanceConfigsSettings() { + return listInstanceConfigsSettings; + } + + /** Returns the builder for the settings used for calls to getInstanceConfig. */ + public UnaryCallSettings.Builder + getInstanceConfigSettings() { + return getInstanceConfigSettings; + } + + /** Returns the builder for the settings used for calls to createInstanceConfig. */ + public UnaryCallSettings.Builder + createInstanceConfigSettings() { + return createInstanceConfigSettings; + } + + /** Returns the builder for the settings used for calls to createInstanceConfig. */ + public OperationCallSettings.Builder< + CreateInstanceConfigRequest, InstanceConfig, CreateInstanceConfigMetadata> + createInstanceConfigOperationSettings() { + return createInstanceConfigOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateInstanceConfig. */ + public UnaryCallSettings.Builder + updateInstanceConfigSettings() { + return updateInstanceConfigSettings; + } + + /** Returns the builder for the settings used for calls to updateInstanceConfig. */ + public OperationCallSettings.Builder< + UpdateInstanceConfigRequest, InstanceConfig, UpdateInstanceConfigMetadata> + updateInstanceConfigOperationSettings() { + return updateInstanceConfigOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteInstanceConfig. */ + public UnaryCallSettings.Builder + deleteInstanceConfigSettings() { + return deleteInstanceConfigSettings; + } + + /** Returns the builder for the settings used for calls to listInstanceConfigOperations. */ + public PagedCallSettings.Builder< + ListInstanceConfigOperationsRequest, + ListInstanceConfigOperationsResponse, + ListInstanceConfigOperationsPagedResponse> + listInstanceConfigOperationsSettings() { + return listInstanceConfigOperationsSettings; + } + + /** Returns the builder for the settings used for calls to listInstances. */ + public PagedCallSettings.Builder< + ListInstancesRequest, ListInstancesResponse, ListInstancesPagedResponse> + listInstancesSettings() { + return listInstancesSettings; + } + + /** Returns the builder for the settings used for calls to listInstancePartitions. */ + public PagedCallSettings.Builder< + ListInstancePartitionsRequest, + ListInstancePartitionsResponse, + ListInstancePartitionsPagedResponse> + listInstancePartitionsSettings() { + return listInstancePartitionsSettings; + } + + /** Returns the builder for the settings used for calls to getInstance. */ + public UnaryCallSettings.Builder getInstanceSettings() { + return getInstanceSettings; + } + + /** Returns the builder for the settings used for calls to createInstance. */ + public UnaryCallSettings.Builder createInstanceSettings() { + return createInstanceSettings; + } + + /** Returns the builder for the settings used for calls to createInstance. */ + public OperationCallSettings.Builder + createInstanceOperationSettings() { + return createInstanceOperationSettings; + } + + /** Returns the builder for the settings used for calls to updateInstance. */ + public UnaryCallSettings.Builder updateInstanceSettings() { + return updateInstanceSettings; + } + + /** Returns the builder for the settings used for calls to updateInstance. */ + public OperationCallSettings.Builder + updateInstanceOperationSettings() { + return updateInstanceOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteInstance. */ + public UnaryCallSettings.Builder deleteInstanceSettings() { + return deleteInstanceSettings; + } + + /** Returns the builder for the settings used for calls to setIamPolicy. */ + public UnaryCallSettings.Builder setIamPolicySettings() { + return setIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to getIamPolicy. */ + public UnaryCallSettings.Builder getIamPolicySettings() { + return getIamPolicySettings; + } + + /** Returns the builder for the settings used for calls to testIamPermissions. */ + public UnaryCallSettings.Builder + testIamPermissionsSettings() { + return testIamPermissionsSettings; + } + + /** Returns the builder for the settings used for calls to getInstancePartition. */ + public UnaryCallSettings.Builder + getInstancePartitionSettings() { + return getInstancePartitionSettings; + } + + /** Returns the builder for the settings used for calls to createInstancePartition. */ + public UnaryCallSettings.Builder + createInstancePartitionSettings() { + return createInstancePartitionSettings; + } + + /** Returns the builder for the settings used for calls to createInstancePartition. */ + public OperationCallSettings.Builder< + CreateInstancePartitionRequest, InstancePartition, CreateInstancePartitionMetadata> + createInstancePartitionOperationSettings() { + return createInstancePartitionOperationSettings; + } + + /** Returns the builder for the settings used for calls to deleteInstancePartition. */ + public UnaryCallSettings.Builder + deleteInstancePartitionSettings() { + return deleteInstancePartitionSettings; + } + + /** Returns the builder for the settings used for calls to updateInstancePartition. */ + public UnaryCallSettings.Builder + updateInstancePartitionSettings() { + return updateInstancePartitionSettings; + } + + /** Returns the builder for the settings used for calls to updateInstancePartition. */ + public OperationCallSettings.Builder< + UpdateInstancePartitionRequest, InstancePartition, UpdateInstancePartitionMetadata> + updateInstancePartitionOperationSettings() { + return updateInstancePartitionOperationSettings; + } + + /** Returns the builder for the settings used for calls to listInstancePartitionOperations. */ + public PagedCallSettings.Builder< + ListInstancePartitionOperationsRequest, + ListInstancePartitionOperationsResponse, + ListInstancePartitionOperationsPagedResponse> + listInstancePartitionOperationsSettings() { + return listInstancePartitionOperationsSettings; + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public UnaryCallSettings.Builder moveInstanceSettings() { + return moveInstanceSettings; + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings() { + return moveInstanceOperationSettings; + } + + @Override + public InstanceAdminStubSettings build() throws IOException { + return new InstanceAdminStubSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/Version.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/Version.java new file mode 100644 index 000000000000..3ef859f106e7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/Version.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1.stub; + +import com.google.api.core.InternalApi; + +@InternalApi("For internal use only") +final class Version { + // {x-version-update-start:google-cloud-spanner:current} + static final String VERSION = "0.0.0-SNAPSHOT"; + // {x-version-update-end} + +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java new file mode 100644 index 000000000000..1d71e062cbb4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java @@ -0,0 +1,460 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.BatchTransactionId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.OpenTelemetryContextKeys; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Partition; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.StatementExecutor.StatementTimeout; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import io.grpc.Context; +import io.grpc.Deadline; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Base for all {@link Connection}-based transactions and batches. */ +abstract class AbstractBaseUnitOfWork implements UnitOfWork { + static final String DB_STATEMENT = "db.statement"; + static final AttributeKey DB_STATEMENT_KEY = AttributeKey.stringKey(DB_STATEMENT); + static final AttributeKey> DB_STATEMENT_ARRAY_KEY = + AttributeKey.stringArrayKey(DB_STATEMENT); + + private final StatementExecutor statementExecutor; + private final StatementTimeout statementTimeout; + protected final String transactionTag; + protected final List transactionRetryListeners; + protected final boolean excludeTxnFromChangeStreams; + protected final RpcPriority rpcPriority; + protected final com.google.spanner.v1.RequestOptions.ClientContext clientContext; + protected final Span span; + + /** Class for keeping track of the stacktrace of the caller of an async statement. */ + static final class SpannerAsyncExecutionException extends RuntimeException { + final Statement statement; + + SpannerAsyncExecutionException(Statement statement) { + this.statement = statement; + } + + public String getMessage() { + // We only include the SQL of the statement and not the parameter values to prevent + // potentially sensitive data to escape into an error message. + return String.format("Execution failed for statement: %s", statement.getSql()); + } + } + + /** + * The {@link Future} that monitors the result of the statement currently being executed for this + * unit of work. + */ + @GuardedBy("this") + private volatile Future currentlyRunningStatementFuture = null; + + enum InterceptorsUsage { + INVOKE_INTERCEPTORS, + IGNORE_INTERCEPTORS + } + + abstract static class Builder, T extends AbstractBaseUnitOfWork> { + private StatementExecutor statementExecutor; + private StatementTimeout statementTimeout = new StatementTimeout(); + private String transactionTag; + private List transactionRetryListeners; + + private boolean excludeTxnFromChangeStreams; + private RpcPriority rpcPriority; + private com.google.spanner.v1.RequestOptions.ClientContext clientContext; + private Span span; + + Builder() {} + + @SuppressWarnings("unchecked") + B self() { + return (B) this; + } + + B withStatementExecutor(StatementExecutor executor) { + Preconditions.checkNotNull(executor); + this.statementExecutor = executor; + return self(); + } + + B setStatementTimeout(StatementTimeout timeout) { + Preconditions.checkNotNull(timeout); + this.statementTimeout = timeout; + return self(); + } + + B setTransactionRetryListeners(List listeners) { + Preconditions.checkNotNull(listeners); + this.transactionRetryListeners = listeners; + return self(); + } + + boolean hasTransactionRetryListeners() { + return this.transactionRetryListeners != null; + } + + B setTransactionTag(@Nullable String tag) { + this.transactionTag = tag; + return self(); + } + + B setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + this.excludeTxnFromChangeStreams = excludeTxnFromChangeStreams; + return self(); + } + + B setRpcPriority(@Nullable RpcPriority rpcPriority) { + this.rpcPriority = rpcPriority; + return self(); + } + + B setClientContext(@Nullable com.google.spanner.v1.RequestOptions.ClientContext clientContext) { + this.clientContext = clientContext; + return self(); + } + + B setSpan(@Nullable Span span) { + this.span = span; + return self(); + } + + abstract T build(); + } + + AbstractBaseUnitOfWork(Builder builder) { + Preconditions.checkState(builder.statementExecutor != null, "No statement executor specified"); + this.statementExecutor = builder.statementExecutor; + this.statementTimeout = builder.statementTimeout; + this.transactionTag = builder.transactionTag; + this.transactionRetryListeners = builder.transactionRetryListeners; + this.excludeTxnFromChangeStreams = builder.excludeTxnFromChangeStreams; + this.rpcPriority = builder.rpcPriority; + this.clientContext = builder.clientContext; + this.span = Preconditions.checkNotNull(builder.span); + } + + @Override + public Span getSpan() { + return this.span; + } + + ApiFuture asyncEndUnitOfWorkSpan() { + return this.statementExecutor.submit(this::endUnitOfWorkSpan); + } + + private Void endUnitOfWorkSpan() { + if (this.span != null) { + this.span.end(); + } + return null; + } + + /** + * Returns a descriptive name for the type of transaction / unit of work. This is used in error + * messages. + */ + abstract String getUnitOfWorkName(); + + @Override + public void savepoint(@Nonnull String name, @Nonnull Dialect dialect) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Savepoint is not supported for " + getUnitOfWorkName()); + } + + @Override + public void releaseSavepoint(@Nonnull String name) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Release savepoint is not supported for " + getUnitOfWorkName()); + } + + @Override + public void rollbackToSavepoint( + @Nonnull String name, @Nonnull SavepointSupport savepointSupport) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Rollback to savepoint is not supported for " + getUnitOfWorkName()); + } + + @Override + public ApiFuture partitionQueryAsync( + CallType callType, + ParsedStatement query, + PartitionOptions partitionOptions, + QueryOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Partition query is not supported for " + getUnitOfWorkName()); + } + + ResultSet partitionQuery( + BatchReadOnlyTransaction transaction, + PartitionOptions partitionOptions, + ParsedStatement query, + QueryOption... options) { + final String partitionColumnName = "PARTITION"; + BatchTransactionId transactionId = transaction.getBatchTransactionId(); + List partitions = + transaction.partitionQuery(partitionOptions, query.getStatement(), options); + return ResultSets.forRows( + com.google.cloud.spanner.Type.struct( + StructField.of(partitionColumnName, com.google.cloud.spanner.Type.string())), + partitions.stream() + .map( + partition -> + Struct.newBuilder() + .set(partitionColumnName) + .to(PartitionId.encodeToString(transactionId, partition)) + .build()) + .collect(Collectors.toList())); + } + + StatementExecutor getStatementExecutor() { + return statementExecutor; + } + + StatementTimeout getStatementTimeout() { + return statementTimeout; + } + + @Override + public void cancel() { + synchronized (this) { + if (currentlyRunningStatementFuture != null + && !currentlyRunningStatementFuture.isDone() + && !currentlyRunningStatementFuture.isCancelled()) { + currentlyRunningStatementFuture.cancel(true); + } + } + } + + ApiFuture executeStatementAsync( + CallType callType, + ParsedStatement statement, + Callable callable, + @Nullable MethodDescriptor applyStatementTimeoutToMethod) { + return executeStatementAsync( + callType, + statement, + callable, + InterceptorsUsage.INVOKE_INTERCEPTORS, + applyStatementTimeoutToMethod == null + ? Collections.emptySet() + : ImmutableList.of(applyStatementTimeoutToMethod)); + } + + ApiFuture executeStatementAsync( + CallType callType, + ParsedStatement statement, + Callable callable, + Collection> applyStatementTimeoutToMethods) { + return executeStatementAsync( + callType, + statement, + callable, + InterceptorsUsage.INVOKE_INTERCEPTORS, + applyStatementTimeoutToMethods); + } + + ResponseT getWithStatementTimeout( + OperationFuture operation, ParsedStatement statement) { + ResponseT res; + try { + if (statementTimeout.hasTimeout()) { + TimeUnit unit = statementTimeout.getAppropriateTimeUnit(); + res = operation.get(statementTimeout.getTimeoutValue(unit), unit); + } else { + res = operation.get(); + } + } catch (TimeoutException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, + "Statement execution timeout occurred for " + statement.getSql(), + e); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + Set causes = new HashSet<>(); + while (cause != null && !causes.contains(cause)) { + if (cause instanceof SpannerException) { + throw (SpannerException) cause; + } + causes.add(cause); + cause = cause.getCause(); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.fromGrpcStatus(Status.fromThrowable(e)), + "Statement execution failed for " + statement.getSql(), + e); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.CANCELLED, "Statement execution was interrupted", e); + } catch (CancellationException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.CANCELLED, "Statement execution was cancelled", e); + } + return res; + } + + ApiFuture executeStatementAsync( + CallType callType, + ParsedStatement statement, + Callable callable, + InterceptorsUsage interceptorUsage, + final Collection> applyStatementTimeoutToMethods) { + Preconditions.checkNotNull(statement); + Preconditions.checkNotNull(callable); + + if (interceptorUsage == InterceptorsUsage.INVOKE_INTERCEPTORS) { + statementExecutor.invokeInterceptors( + statement, StatementExecutionStep.EXECUTE_STATEMENT, this); + } + Context context = Context.current(); + Deadline transactionDeadline = getTransactionDeadline(); + Deadline statementDeadline = + statementTimeout.hasTimeout() + ? Deadline.after( + statementTimeout.getTimeoutValue(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS) + : null; + Deadline effectiveDeadline = min(transactionDeadline, statementDeadline); + if (effectiveDeadline != null && !applyStatementTimeoutToMethods.isEmpty()) { + context = + context.withValue( + SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, + new SpannerOptions.CallContextConfigurator() { + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + if (applyStatementTimeoutToMethods.contains(method)) { + // Calculate the remaining timeout. This method could be called multiple times + // if the transaction is retried. + long remainingTimeout = effectiveDeadline.timeRemaining(TimeUnit.NANOSECONDS); + if (remainingTimeout <= 0) { + remainingTimeout = 1; + } + return GrpcCallContext.createDefault() + .withTimeoutDuration(Duration.ofNanos(remainingTimeout)); + } + return null; + } + }); + } + // Register the name of the thread that called this method as the thread name that should be + // traced. + try (Scope ignore = + io.opentelemetry.context.Context.current() + .with(OpenTelemetryContextKeys.THREAD_NAME_KEY, Thread.currentThread().getName()) + .makeCurrent()) { + ApiFuture f = statementExecutor.submit(context.wrap(callable)); + final SpannerAsyncExecutionException caller = + callType == CallType.ASYNC + ? new SpannerAsyncExecutionException(statement.getStatement()) + : null; + final ApiFuture future = + ApiFutures.catching( + f, + Throwable.class, + input -> { + if (caller != null) { + input.addSuppressed(caller); + } + throw SpannerExceptionFactory.asSpannerException(input); + }, + MoreExecutors.directExecutor()); + synchronized (this) { + this.currentlyRunningStatementFuture = future; + } + future.addListener( + new Runnable() { + @Override + public void run() { + synchronized (this) { + if (currentlyRunningStatementFuture == future) { + currentlyRunningStatementFuture = null; + } + } + if (isSingleUse()) { + endUnitOfWorkSpan(); + } + } + }, + MoreExecutors.directExecutor()); + return future; + } + } + + @Nullable + static Deadline min(@Nullable Deadline a, @Nullable Deadline b) { + if (a == null && b == null) { + return null; + } + if (a == null) { + return b; + } + if (b == null) { + return a; + } + return a.minimum(b); + } + + @Nullable + Deadline getTransactionDeadline() { + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java new file mode 100644 index 000000000000..ca78c3e5aeaf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java @@ -0,0 +1,216 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.context.Scope; +import java.util.LinkedList; +import java.util.Objects; +import javax.annotation.Nonnull; + +/** + * Base class for {@link Connection}-based transactions that can be used for multiple read and + * read/write statements. + */ +abstract class AbstractMultiUseTransaction extends AbstractBaseUnitOfWork { + + /** In-memory savepoint implementation that is used by the Connection API. */ + static class Savepoint { + private final String name; + private final boolean autoSavepoint; + + static Savepoint of(String name) { + return new Savepoint(name, false); + } + + Savepoint(String name, boolean autoSavepoint) { + this.name = name; + this.autoSavepoint = autoSavepoint; + } + + /** Returns the index of the first statement that was executed after this savepoint. */ + int getStatementPosition() { + return -1; + } + + /** Returns the index of the first mutation that was executed after this savepoint. */ + int getMutationPosition() { + return -1; + } + + boolean isAutoSavepoint() { + return this.autoSavepoint; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Savepoint)) { + return false; + } + Savepoint other = (Savepoint) o; + return Objects.equals(other.name, this.name) + && Objects.equals(other.autoSavepoint, this.autoSavepoint); + } + + @Override + public int hashCode() { + return Objects.hash(this.name, this.autoSavepoint); + } + + @Override + public String toString() { + return name; + } + } + + private final LinkedList savepoints = new LinkedList<>(); + + AbstractMultiUseTransaction(Builder builder) { + super(builder); + } + + @Override + public boolean isSingleUse() { + return false; + } + + @Override + public Type getType() { + return Type.TRANSACTION; + } + + @Override + public boolean isActive() { + return getState().isActive(); + } + + abstract void checkAborted(); + + /** + * Check that the current transaction actually has a valid underlying transaction and creates it + * if necessary. If the transaction does not have a valid underlying transaction and/or is not in + * a state that allows the creation of a transaction, the method will throw a {@link + * SpannerException}. + */ + abstract void checkOrCreateValidTransaction(ParsedStatement statement, CallType callType); + + /** Returns the {@link ReadContext} that can be used for queries on this transaction. */ + abstract ReadContext getReadContext(); + + @Override + public ApiFuture executeQueryAsync( + final CallType callType, + final ParsedStatement statement, + final AnalyzeMode analyzeMode, + final QueryOption... options) { + Preconditions.checkArgument(statement.isQuery(), "Statement is not a query"); + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(statement, callType); + return executeStatementAsync( + callType, + statement, + () -> { + checkAborted(); + return DirectExecuteResultSet.ofResultSet( + internalExecuteQuery(statement, analyzeMode, options)); + }, + SpannerGrpc.getExecuteStreamingSqlMethod()); + } + } + + ResultSet internalExecuteQuery( + final ParsedStatement statement, AnalyzeMode analyzeMode, QueryOption... options) { + if (analyzeMode == AnalyzeMode.NONE) { + return getReadContext().executeQuery(statement.getStatement(), options); + } + return getReadContext() + .analyzeQuery(statement.getStatement(), analyzeMode.getQueryAnalyzeMode()); + } + + @Override + public ApiFuture runBatchAsync(CallType callType) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Run batch is not supported for transactions"); + } + + @Override + public void abortBatch() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Run batch is not supported for transactions"); + } + + abstract Savepoint savepoint(String name); + + abstract void rollbackToSavepoint(Savepoint savepoint); + + @VisibleForTesting + ImmutableList getSavepoints() { + return ImmutableList.copyOf(savepoints); + } + + @Override + public void savepoint(@Nonnull String name, @Nonnull Dialect dialect) { + if (dialect != Dialect.POSTGRESQL) { + // Check that there is no savepoint with this name. Note that PostgreSQL allows multiple + // savepoints in a transaction with the same name, so we don't execute this check for PG. + if (savepoints.stream().anyMatch(savepoint -> savepoint.name.equals(name))) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Savepoint with name " + name + " already exists"); + } + } + savepoints.add(savepoint(name)); + } + + @Override + public void releaseSavepoint(@Nonnull String name) { + // Remove the given savepoint and all later savepoints from the transaction. + savepoints.subList(getSavepointIndex(name), savepoints.size()).clear(); + } + + @Override + public void rollbackToSavepoint( + @Nonnull String name, @Nonnull SavepointSupport savepointSupport) { + int index = getSavepointIndex(name); + rollbackToSavepoint(savepoints.get(index)); + if (index < (savepoints.size() - 1)) { + // Remove all savepoints that come after this savepoint from the transaction. + // Rolling back to a savepoint does not remove that savepoint, only the ones that come after. + savepoints.subList(index + 1, savepoints.size()).clear(); + } + } + + private int getSavepointIndex(String name) { + int index = savepoints.lastIndexOf(savepoint(name)); + if (index == -1) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Savepoint with name " + name + " does not exist"); + } + return index; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractStatementParser.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractStatementParser.java new file mode 100644 index 000000000000..fea032e2f525 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractStatementParser.java @@ -0,0 +1,1076 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.SimpleParser.isValidIdentifierChar; +import static com.google.cloud.spanner.connection.StatementHintParser.convertHintsToOptions; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.ReadQueryUpdateTransactionOption; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractBaseUnitOfWork.InterceptorsUsage; +import com.google.cloud.spanner.connection.SimpleParser.Result; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import com.google.common.base.Suppliers; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheStats; +import com.google.common.cache.Weigher; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import java.nio.CharBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.function.Supplier; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * Internal class for the Spanner Connection API. + * + *

Parses {@link ClientSideStatement}s and normal SQL statements. The parser is able to recognize + * the type of statement, allowing the connection API to know which method on Spanner should be + * called. The parser does not validate the validity of statements, except for {@link + * ClientSideStatement}s. This means that an invalid DML statement could be accepted by the {@link + * AbstractStatementParser} and sent to Spanner, and Spanner will then reject it with some error + * message. + */ +@InternalApi +public abstract class AbstractStatementParser { + private static final Object lock = new Object(); + private static final Map INSTANCES = new HashMap<>(); + private static final ImmutableMap> + KNOWN_PARSER_CLASSES = + ImmutableMap.of( + Dialect.GOOGLE_STANDARD_SQL, + SpannerStatementParser.class, + Dialect.POSTGRESQL, + PostgreSQLStatementParser.class); + + @VisibleForTesting + static void resetParsers() { + synchronized (lock) { + INSTANCES.clear(); + } + } + + /** Get an instance of {@link AbstractStatementParser} for the specified dialect. */ + public static AbstractStatementParser getInstance(Dialect dialect) { + synchronized (lock) { + if (!INSTANCES.containsKey(dialect)) { + try { + Class clazz = KNOWN_PARSER_CLASSES.get(dialect); + if (clazz == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "There is no known statement parser for dialect " + dialect); + } + INSTANCES.put(dialect, clazz.getDeclaredConstructor().newInstance()); + } catch (Exception exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, + "Could not instantiate statement parser for dialect " + dialect.name(), + exception); + } + } + return INSTANCES.get(dialect); + } + } + + static final Set ddlStatements = + ImmutableSet.of("CREATE", "DROP", "ALTER", "ANALYZE", "GRANT", "REVOKE", "RENAME"); + static final Set selectStatements = + ImmutableSet.of("SELECT", "WITH", "SHOW", "FROM", "GRAPH", "CALL"); + static final Set SELECT_STATEMENTS_ALLOWING_PRECEDING_BRACKETS = + ImmutableSet.of("SELECT", "FROM"); + static final Set dmlStatements = ImmutableSet.of("INSERT", "UPDATE", "DELETE"); + + /* + * The following fixed pre-parsed statements are used internally by the Connection API. These do + * not need to be parsed using a specific dialect, as they are equal for all dialects, and + * pre-parsing them avoids the need to repeatedly parse statements that are used internally. + */ + + /** Begins a transaction. */ + static final ParsedStatement BEGIN_STATEMENT; + + /** + * Create a COMMIT statement to use with the {@link Connection#commit()} method to allow it to be + * cancelled, time out or retried. + * + *

{@link ReadWriteTransaction} uses the generic methods {@link + * ReadWriteTransaction#executeStatementAsync(CallType, ParsedStatement, Callable, + * InterceptorsUsage, Collection)} and {@link ReadWriteTransaction#runWithRetry(Callable)} to + * allow statements to be cancelled, to timeout and to be retried. These methods require a {@link + * ParsedStatement} as input. When the {@link Connection#commit()} method is called directly, we + * do not have a {@link ParsedStatement}, and the method uses this statement instead in order to + * use the same logic as the other statements. + */ + static final ParsedStatement COMMIT_STATEMENT; + + /** The {@link Statement} and {@link Callable} for rollbacks */ + static final ParsedStatement ROLLBACK_STATEMENT; + + /** + * Create a RUN BATCH statement to use with the {@link Connection#executeBatchUpdate(Iterable)} + * method to allow it to be cancelled, time out or retried. + * + *

{@link ReadWriteTransaction} uses the generic methods {@link + * ReadWriteTransaction#executeStatementAsync(CallType, ParsedStatement, Callable, Collection)} + * and {@link ReadWriteTransaction#runWithRetry(Callable)} to allow statements to be cancelled, to + * timeout and to be retried. These methods require a {@link ParsedStatement} as input. When the + * {@link Connection#executeBatchUpdate(Iterable)} method is called, we do not have one {@link + * ParsedStatement}, and the method uses this statement instead in order to use the same logic as + * the other statements. + */ + static final ParsedStatement RUN_BATCH_STATEMENT; + + static { + try { + BEGIN_STATEMENT = getInstance(Dialect.GOOGLE_STANDARD_SQL).parse(Statement.of("BEGIN")); + COMMIT_STATEMENT = getInstance(Dialect.GOOGLE_STANDARD_SQL).parse(Statement.of("COMMIT")); + ROLLBACK_STATEMENT = getInstance(Dialect.GOOGLE_STANDARD_SQL).parse(Statement.of("ROLLBACK")); + RUN_BATCH_STATEMENT = + getInstance(Dialect.GOOGLE_STANDARD_SQL).parse(Statement.of("RUN BATCH")); + + } catch (Throwable ex) { + Logger logger = Logger.getLogger(AbstractStatementParser.class.getName()); + logger.log(Level.SEVERE, "Static initialization failure.", ex); + throw ex; + } + } + + /** The type of statement that has been recognized by the parser. */ + @InternalApi + public enum StatementType { + CLIENT_SIDE, + DDL, + QUERY, + UPDATE, + UNKNOWN + } + + /** A statement that has been parsed */ + @InternalApi + public static class ParsedStatement { + private final StatementType type; + private final ClientSideStatementImpl clientSideStatement; + private final Statement statement; + private final Supplier sqlWithoutComments; + private final Supplier returningClause; + private final ReadQueryUpdateTransactionOption[] optionsFromHints; + + private static ParsedStatement clientSideStatement( + ClientSideStatementImpl clientSideStatement, + Statement statement, + Supplier sqlWithoutComments) { + return new ParsedStatement(clientSideStatement, statement, sqlWithoutComments); + } + + private static ParsedStatement ddl(Statement statement, Supplier sqlWithoutComments) { + return new ParsedStatement(StatementType.DDL, statement, sqlWithoutComments); + } + + private static ParsedStatement query( + Statement statement, + Supplier sqlWithoutComments, + QueryOptions defaultQueryOptions, + ReadQueryUpdateTransactionOption[] optionsFromHints) { + return new ParsedStatement( + StatementType.QUERY, + null, + statement, + sqlWithoutComments, + defaultQueryOptions, + Suppliers.ofInstance(false), + optionsFromHints); + } + + private static ParsedStatement update( + Statement statement, + Supplier sqlWithoutComments, + Supplier returningClause, + ReadQueryUpdateTransactionOption[] optionsFromHints) { + return new ParsedStatement( + StatementType.UPDATE, statement, sqlWithoutComments, returningClause, optionsFromHints); + } + + private static ParsedStatement unknown( + Statement statement, Supplier sqlWithoutComments) { + return new ParsedStatement(StatementType.UNKNOWN, statement, sqlWithoutComments); + } + + private ParsedStatement( + ClientSideStatementImpl clientSideStatement, + Statement statement, + Supplier sqlWithoutComments) { + Preconditions.checkNotNull(clientSideStatement); + Preconditions.checkNotNull(statement); + this.type = StatementType.CLIENT_SIDE; + this.clientSideStatement = clientSideStatement; + this.statement = statement; + this.sqlWithoutComments = sqlWithoutComments; + this.returningClause = Suppliers.ofInstance(false); + this.optionsFromHints = EMPTY_OPTIONS; + } + + private ParsedStatement( + StatementType type, + Statement statement, + Supplier sqlWithoutComments, + Supplier returningClause, + ReadQueryUpdateTransactionOption[] optionsFromHints) { + this(type, null, statement, sqlWithoutComments, null, returningClause, optionsFromHints); + } + + private ParsedStatement( + StatementType type, Statement statement, Supplier sqlWithoutComments) { + this( + type, + null, + statement, + sqlWithoutComments, + null, + Suppliers.ofInstance(false), + EMPTY_OPTIONS); + } + + private ParsedStatement( + StatementType type, + ClientSideStatementImpl clientSideStatement, + Statement statement, + Supplier sqlWithoutComments, + QueryOptions defaultQueryOptions, + Supplier returningClause, + ReadQueryUpdateTransactionOption[] optionsFromHints) { + Preconditions.checkNotNull(type); + this.type = type; + this.clientSideStatement = clientSideStatement; + this.statement = statement == null ? null : mergeQueryOptions(statement, defaultQueryOptions); + this.sqlWithoutComments = Preconditions.checkNotNull(sqlWithoutComments); + this.returningClause = returningClause; + this.optionsFromHints = optionsFromHints; + } + + private ParsedStatement copy(Statement statement, QueryOptions defaultQueryOptions) { + return new ParsedStatement( + this.type, + this.clientSideStatement, + statement.withReplacedSql(this.statement.getSql()), + this.sqlWithoutComments, + defaultQueryOptions, + this.returningClause, + this.optionsFromHints); + } + + private ParsedStatement forCache() { + return new ParsedStatement( + this.type, + this.clientSideStatement, + Statement.of(this.statement.getSql()), + this.sqlWithoutComments, + null, + this.returningClause, + this.optionsFromHints); + } + + @Override + public int hashCode() { + return Objects.hash( + this.type, this.clientSideStatement, this.statement, this.sqlWithoutComments); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof ParsedStatement)) { + return false; + } + ParsedStatement o = (ParsedStatement) other; + return Objects.equals(this.type, o.type) + && Objects.equals(this.clientSideStatement, o.clientSideStatement) + && Objects.equals(this.statement, o.statement) + && Objects.equals(this.sqlWithoutComments, o.sqlWithoutComments); + } + + /** + * @return the type of statement that was recognized by the parser. + */ + @InternalApi + public StatementType getType() { + return type; + } + + /** + * @return whether the statement has a returning clause or not. + */ + @InternalApi + public boolean hasReturningClause() { + return this.returningClause.get(); + } + + @InternalApi + public ReadQueryUpdateTransactionOption[] getOptionsFromHints() { + return this.optionsFromHints; + } + + /** + * @return true if the statement is a query that will return a {@link + * com.google.cloud.spanner.ResultSet}. + */ + @InternalApi + public boolean isQuery() { + switch (type) { + case CLIENT_SIDE: + return getClientSideStatement().isQuery(); + case QUERY: + return true; + case UPDATE: + case DDL: + case UNKNOWN: + default: + } + return false; + } + + /** + * @return true if the statement is a DML statement or a client side statement that will return + * an update count. + */ + @InternalApi + public boolean isUpdate() { + switch (type) { + case CLIENT_SIDE: + return getClientSideStatement().isUpdate(); + case UPDATE: + return true; + case QUERY: + case DDL: + case UNKNOWN: + default: + } + return false; + } + + /** + * @return true if the statement is a DDL statement. + */ + @InternalApi + public boolean isDdl() { + switch (type) { + case DDL: + return true; + case CLIENT_SIDE: + case UPDATE: + case QUERY: + case UNKNOWN: + default: + } + return false; + } + + /** + * @return the {@link ClientSideStatementType} of this statement. This method may only be called + * on statements of type {@link StatementType#CLIENT_SIDE}. + */ + @InternalApi + public ClientSideStatementType getClientSideStatementType() { + Preconditions.checkState(type == StatementType.CLIENT_SIDE); + return clientSideStatement.getStatementType(); + } + + Statement getStatement() { + return statement; + } + + /** + * Merges the {@link QueryOptions} of the {@link Statement} with the current {@link + * QueryOptions} of this connection. The {@link QueryOptions} that are already present on the + * statement take precedence above the connection {@link QueryOptions}. + */ + Statement mergeQueryOptions(Statement statement, QueryOptions defaultQueryOptions) { + if (defaultQueryOptions == null + || defaultQueryOptions.equals(QueryOptions.getDefaultInstance())) { + return statement; + } + if (statement.getQueryOptions() == null) { + return statement.toBuilder().withQueryOptions(defaultQueryOptions).build(); + } + return statement.toBuilder() + .withQueryOptions( + defaultQueryOptions.toBuilder().mergeFrom(statement.getQueryOptions()).build()) + .build(); + } + + /** + * @return the original SQL statement + */ + @InternalApi + public String getSql() { + return statement.getSql(); + } + + /** + * @return the SQL statement with all comments removed from the SQL string. + * @deprecated use {@link #getSql()} instead + */ + @Deprecated + @InternalApi + public String getSqlWithoutComments() { + return sqlWithoutComments.get(); + } + + ClientSideStatement getClientSideStatement() { + Preconditions.checkState( + clientSideStatement != null, + "This ParsedStatement does not contain a ClientSideStatement"); + return clientSideStatement; + } + } + + private final Set statements; + + /** The default maximum size of the statement cache in Mb. */ + public static final int DEFAULT_MAX_STATEMENT_CACHE_SIZE_MB = 5; + + private static int getMaxStatementCacheSize() { + String stringValue = System.getProperty("spanner.statement_cache_size_mb"); + if (stringValue == null) { + return DEFAULT_MAX_STATEMENT_CACHE_SIZE_MB; + } + int value = 0; + try { + value = Integer.parseInt(stringValue); + } catch (NumberFormatException ignore) { + } + return Math.max(value, 0); + } + + private static boolean isRecordStatementCacheStats() { + return "true" + .equalsIgnoreCase(System.getProperty("spanner.record_statement_cache_stats", "false")); + } + + /** + * Cache for parsed statements. This prevents statements that are executed multiple times by the + * application to be parsed over and over again. The default maximum size is 5Mb. + */ + private final Cache statementCache; + + AbstractStatementParser(Set statements) { + this.statements = Collections.unmodifiableSet(statements); + int maxCacheSize = getMaxStatementCacheSize(); + if (maxCacheSize > 0) { + CacheBuilder cacheBuilder = + CacheBuilder.newBuilder() + // Set the max size to (approx) 5MB (by default). + .maximumWeight(maxCacheSize * 1024L * 1024L) + // We do length*2 because Java uses 2 bytes for each char. + .weigher( + (Weigher) + (key, value) -> 2 * key.length() + 2 * value.statement.getSql().length()) + .concurrencyLevel(Runtime.getRuntime().availableProcessors()); + if (isRecordStatementCacheStats()) { + cacheBuilder.recordStats(); + } + this.statementCache = cacheBuilder.build(); + } else { + this.statementCache = null; + } + } + + @VisibleForTesting + CacheStats getStatementCacheStats() { + return statementCache == null ? null : statementCache.stats(); + } + + @VisibleForTesting + Set getClientSideStatements() { + return statements; + } + + /** + * Parses the given statement and categorizes it as one of the possible {@link StatementType}s. + * The validity of the statement is not checked, unless it is a client-side statement. + * + * @param statement The statement to parse. + * @return the parsed and categorized statement. + */ + @InternalApi + public ParsedStatement parse(Statement statement) { + return parse(statement, null); + } + + ParsedStatement parse(Statement statement, QueryOptions defaultQueryOptions) { + if (statementCache == null) { + return internalParse(statement, defaultQueryOptions); + } + + ParsedStatement parsedStatement = statementCache.getIfPresent(statement.getSql()); + if (parsedStatement == null) { + parsedStatement = internalParse(statement, defaultQueryOptions); + statementCache.put(statement.getSql(), parsedStatement.forCache()); + return parsedStatement; + } + return parsedStatement.copy(statement, defaultQueryOptions); + } + + ParsedStatement internalParse(Statement statement, QueryOptions defaultQueryOptions) { + String sql = statement.getSql(); + StatementHintParser statementHintParser = new StatementHintParser(getDialect(), sql); + ReadQueryUpdateTransactionOption[] optionsFromHints = EMPTY_OPTIONS; + if (statementHintParser.hasStatementHints() + && !statementHintParser.getClientSideStatementHints().isEmpty()) { + statement = + statement.toBuilder().replace(statementHintParser.getSqlWithoutClientSideHints()).build(); + optionsFromHints = convertHintsToOptions(statementHintParser.getClientSideStatementHints()); + } + // Create a supplier that will actually remove all comments and hints from the SQL string to be + // backwards compatible with anything that really needs the SQL string without comments. + Supplier sqlWithoutCommentsSupplier = + Suppliers.memoize(() -> removeCommentsAndTrim(sql)); + + // Get rid of any spaces/comments at the start of the string. + SimpleParser simpleParser = new SimpleParser(getDialect(), sql); + simpleParser.skipWhitespaces(); + // Create a wrapper around the SQL string from the point after the first whitespace. + CharBuffer charBuffer = CharBuffer.wrap(sql, simpleParser.getPos(), sql.length()); + ClientSideStatementImpl client = parseClientSideStatement(charBuffer); + + if (client != null) { + return ParsedStatement.clientSideStatement(client, statement, sqlWithoutCommentsSupplier); + } else { + // Find the first keyword in the SQL statement. + Result keywordResult = simpleParser.eatNextKeyword(); + if (keywordResult.isValid()) { + // Determine the statement type based on the first keyword. + String keyword = keywordResult.getValue().toUpperCase(); + if (keywordResult.isInParenthesis()) { + // If the first keyword is inside one or more parentheses, then only a subset of all + // keywords are allowed. + if (SELECT_STATEMENTS_ALLOWING_PRECEDING_BRACKETS.contains(keyword)) { + return ParsedStatement.query( + statement, sqlWithoutCommentsSupplier, defaultQueryOptions, optionsFromHints); + } + } else { + if (selectStatements.contains(keyword)) { + return ParsedStatement.query( + statement, sqlWithoutCommentsSupplier, defaultQueryOptions, optionsFromHints); + } else if (dmlStatements.contains(keyword)) { + return ParsedStatement.update( + statement, + sqlWithoutCommentsSupplier, + // TODO: Make the returning clause check work without removing comments + Suppliers.memoize(() -> checkReturningClause(sqlWithoutCommentsSupplier.get())), + optionsFromHints); + } else if (ddlStatements.contains(keyword)) { + return ParsedStatement.ddl(statement, sqlWithoutCommentsSupplier); + } + } + } + } + // Fallthrough: Return an unknown statement. + return ParsedStatement.unknown(statement, sqlWithoutCommentsSupplier); + } + + /** + * Parses the given statement as a client-side statement. Client-side statements are statements + * that are never sent to Cloud Spanner, but that are interpreted by the Connection API and then + * translated into some action, such as for example starting a transaction or getting the last + * commit timestamp. + * + * @param sql The statement to try to parse as a client-side statement (without any comments). + * @return a valid {@link ClientSideStatement} or null if the statement is not a client-side + * statement. + */ + @VisibleForTesting + ClientSideStatementImpl parseClientSideStatement(CharSequence sql) { + for (ClientSideStatementImpl css : statements) { + if (css.matches(sql)) { + return css; + } + } + return null; + } + + /** + * Checks whether the given statement is (probably) a DDL statement. The method does not check the + * validity of the statement, only if it is a DDL statement based on the first word in the + * statement. + * + * @param sql The statement to check (without any comments). + * @return true if the statement is a DDL statement (i.e. starts with 'CREATE', + * 'ALTER' or 'DROP'). + * @deprecated Use {@link #parse(Statement)} instead + */ + @InternalApi + @Deprecated + public boolean isDdlStatement(String sql) { + return statementStartsWith(sql, ddlStatements); + } + + /** + * Checks whether the given statement is (probably) a SELECT query. The method does not check the + * validity of the statement, only if it is a SELECT statement based on the first word in the + * statement. + * + * @param sql The statement to check (without any comments). + * @return true if the statement is a SELECT statement (i.e. starts with 'SELECT'). + * @deprecated Use {@link #parse(Statement)} instead + */ + @InternalApi + @Deprecated + public boolean isQuery(String sql) { + // Skip any query hints at the beginning of the query. + // We only do this if we actually know that it starts with a hint to prevent unnecessary + // re-assigning the exact same sql string. + if (sql.startsWith("@")) { + sql = removeStatementHint(sql); + } + if (sql.startsWith("(")) { + sql = removeOpeningBrackets(sql); + return statementStartsWith(sql, SELECT_STATEMENTS_ALLOWING_PRECEDING_BRACKETS); + } + return statementStartsWith(sql, selectStatements); + } + + /** + * Checks whether the given statement is (probably) an update statement. The method does not check + * the validity of the statement, only if it is an update statement based on the first word in the + * statement. + * + * @param sql The statement to check (without any comments). + * @return true if the statement is a DML update statement (i.e. starts with + * 'INSERT', 'UPDATE' or 'DELETE'). + * @deprecated Use {@link #parse(Statement)} instead + */ + @InternalApi + @Deprecated + public boolean isUpdateStatement(String sql) { + // Skip any query hints at the beginning of the query. + if (sql.startsWith("@")) { + sql = removeStatementHint(sql); + } + return statementStartsWith(sql, dmlStatements); + } + + private boolean statementStartsWith(String sql, Iterable checkStatements) { + Preconditions.checkNotNull(sql); + Iterator tokens = Splitter.onPattern("\\s+").split(sql).iterator(); + if (!tokens.hasNext()) { + return false; + } + String token = tokens.next(); + for (String check : checkStatements) { + if (token.equalsIgnoreCase(check)) { + return true; + } + } + return false; + } + + static final char SINGLE_QUOTE = '\''; + static final char DOUBLE_QUOTE = '"'; + static final char BACKTICK_QUOTE = '`'; + static final char HYPHEN = '-'; + static final char DASH = '#'; + static final char SLASH = '/'; + static final char ASTERISK = '*'; + static final char DOLLAR = '$'; + static final char SPACE = ' '; + static final char CLOSE_PARENTHESIS = ')'; + static final char COMMA = ','; + static final char UNDERSCORE = '_'; + static final char BACKSLASH = '\\'; + + /** + * Removes comments from and trims the given sql statement using the dialect of this parser. + * + * @param sql The sql statement to remove comments from and to trim. + * @return the sql statement without the comments and leading and trailing spaces. + */ + @InternalApi + abstract String removeCommentsAndTrimInternal(String sql); + + /** + * Removes comments from and trims the given sql statement using the dialect of this parser. + * + * @param sql The sql statement to remove comments from and to trim. + * @return the sql statement without the comments and leading and trailing spaces. + */ + @InternalApi + public String removeCommentsAndTrim(String sql) { + return removeCommentsAndTrimInternal(sql); + } + + /** Removes any statement hints at the beginning of the statement. */ + abstract String removeStatementHint(String sql); + + private String removeOpeningBrackets(String sql) { + int index = 0; + while (index < sql.length()) { + if (sql.charAt(index) == '(' || Character.isWhitespace(sql.charAt(index))) { + index++; + } else { + return sql.substring(index); + } + } + return sql; + } + + @VisibleForTesting + static final ReadQueryUpdateTransactionOption[] EMPTY_OPTIONS = + new ReadQueryUpdateTransactionOption[0]; + + /** Parameter information with positional parameters translated to named parameters. */ + @InternalApi + public static class ParametersInfo { + public final int numberOfParameters; + public final String sqlWithNamedParameters; + + ParametersInfo(int numberOfParameters, String sqlWithNamedParameters) { + this.numberOfParameters = numberOfParameters; + this.sqlWithNamedParameters = sqlWithNamedParameters; + } + } + + /** + * Converts all positional parameters (?) in the given sql string into named parameters. The + * parameters are named @p1, @p2, etc. for GoogleSQL, and $1, $2, etc. for PostgreSQL. This method + * is used when converting a JDBC statement that uses positional parameters to a Cloud Spanner + * {@link Statement} instance that requires named parameters. + * + * @param sql The sql string that should be converted to use named parameters + * @return A {@link ParametersInfo} object containing a string with named parameters instead of + * positional parameters and the number of parameters. + * @throws SpannerException If the input sql string contains an unclosed string/byte literal. + */ + @InternalApi + public ParametersInfo convertPositionalParametersToNamedParameters(char paramChar, String sql) { + Preconditions.checkNotNull(sql); + final String namedParamPrefix = getQueryParameterPrefix(); + StringBuilder named = new StringBuilder(sql.length() + countOccurrencesOf(paramChar, sql)); + int index = 0; + int paramIndex = 1; + while (index < sql.length()) { + char c = sql.charAt(index); + if (c == paramChar) { + named.append(namedParamPrefix).append(paramIndex); + paramIndex++; + index++; + } else { + index = skip(sql, index, named); + } + } + return new ParametersInfo(paramIndex - 1, named.toString()); + } + + /** Convenience method that is used to estimate the number of parameters in a SQL statement. */ + static int countOccurrencesOf(char c, String string) { + int res = 0; + for (int i = 0; i < string.length(); i++) { + if (string.charAt(i) == c) { + res++; + } + } + return res; + } + + /** + * Checks if the given SQL string contains a Returning clause. This method is used only in case of + * a DML statement. + * + * @param sql The sql string without comments that has to be evaluated. + * @return A boolean indicating whether the sql string has a Returning clause or not. + */ + @InternalApi + protected abstract boolean checkReturningClauseInternal(String sql); + + /** + * Checks if the given SQL string contains a Returning clause. This method is used only in case of + * a DML statement. + * + * @param sql The sql string without comments that has to be evaluated. + * @return A boolean indicating whether the sql string has a Returning clause or not. + */ + @InternalApi + public boolean checkReturningClause(String sql) { + return checkReturningClauseInternal(sql); + } + + abstract Dialect getDialect(); + + /** + * Returns true if this dialect supports nested comments. + * + *

    + *
  • This method should return false for dialects that consider this to be a valid comment: + * /* A comment /* still a comment */. + *
  • This method should return true for dialects that require all comment start sequences to + * be balanced with a comment end sequence: + * /* A comment /* still a comment */ Also still a comment */. + *
+ */ + abstract boolean supportsNestedComments(); + + /** + * Returns true for dialects that support dollar-quoted string literals. + * + *

Example: $tag$This is a string$tag$. + */ + abstract boolean supportsDollarQuotedStrings(); + + /** + * Returns true for dialects that support backticks as a quoting character, either for string + * literals or identifiers. + */ + abstract boolean supportsBacktickQuote(); + + /** + * Returns true for dialects that support triple-quoted string literals and identifiers. + * + *

Example: ```This is a triple-quoted string``` + */ + abstract boolean supportsTripleQuotedStrings(); + + /** + * Returns true if the dialect supports escaping a quote character within a literal with the same + * quote as the literal is using. That is: 'foo''bar' means "foo'bar". + */ + abstract boolean supportsEscapeQuoteWithQuote(); + + /** Returns true if the dialect supports starting an escape sequence with a backslash. */ + abstract boolean supportsBackslashEscape(); + + /** + * Returns true if the dialect supports single-line comments that start with a dash. + * + *

Example: # This is a comment + */ + abstract boolean supportsHashSingleLineComments(); + + /** + * Returns true for dialects that allow line-feeds in quoted strings. Note that the return value + * of this is not used for triple-quoted strings. Triple-quoted strings are assumed to always + * support line-feeds. + */ + abstract boolean supportsLineFeedInQuotedString(); + + /** Returns the query parameter prefix that should be used for this dialect. */ + abstract String getQueryParameterPrefix(); + + /** Reads a dollar-quoted string literal from position index in the given sql string. */ + String parseDollarQuotedString(String sql, int index) { + // Look ahead to the next dollar sign (if any). Everything in between is the quote tag. + StringBuilder tag = new StringBuilder(); + while (index < sql.length()) { + char c = sql.charAt(index); + if (c == DOLLAR) { + return tag.toString(); + } + if (!isValidIdentifierChar(c)) { + break; + } + tag.append(c); + index++; + } + return null; + } + + /** + * Skips the next character, literal, identifier, or comment in the given sql string from the + * given index. The skipped characters are added to result if it is not null. + */ + int skip(String sql, int currentIndex, @Nullable StringBuilder result) { + if (currentIndex >= sql.length()) { + return currentIndex; + } + char currentChar = sql.charAt(currentIndex); + + if (currentChar == SINGLE_QUOTE + || currentChar == DOUBLE_QUOTE + || (supportsBacktickQuote() && currentChar == BACKTICK_QUOTE)) { + appendIfNotNull(result, currentChar); + return skipQuoted(sql, currentIndex, currentChar, result); + } else if (supportsDollarQuotedStrings() && currentChar == DOLLAR) { + String dollarTag = parseDollarQuotedString(sql, currentIndex + 1); + if (dollarTag != null) { + appendIfNotNull(result, currentChar, dollarTag, currentChar); + return skipQuoted( + sql, currentIndex + dollarTag.length() + 1, currentChar, dollarTag, result); + } + } else if (currentChar == HYPHEN + && sql.length() > (currentIndex + 1) + && sql.charAt(currentIndex + 1) == HYPHEN) { + return skipSingleLineComment(sql, /* prefixLength= */ 2, currentIndex, result); + } else if (currentChar == DASH && supportsHashSingleLineComments()) { + return skipSingleLineComment(sql, /* prefixLength= */ 1, currentIndex, result); + } else if (currentChar == SLASH + && sql.length() > (currentIndex + 1) + && sql.charAt(currentIndex + 1) == ASTERISK) { + return skipMultiLineComment(sql, currentIndex, result); + } + + appendIfNotNull(result, currentChar); + return currentIndex + 1; + } + + /** Skips a single-line comment from startIndex and adds it to result if result is not null. */ + int skipSingleLineComment( + String sql, int prefixLength, int startIndex, @Nullable StringBuilder result) { + return skipSingleLineComment(getDialect(), sql, prefixLength, startIndex, result); + } + + static int skipSingleLineComment( + Dialect dialect, + String sql, + int prefixLength, + int startIndex, + @Nullable StringBuilder result) { + SimpleParser simpleParser = new SimpleParser(dialect, sql, startIndex, false); + if (simpleParser.skipSingleLineComment(prefixLength)) { + appendIfNotNull(result, sql.substring(startIndex, simpleParser.getPos())); + } + return simpleParser.getPos(); + } + + /** Skips a multi-line comment from startIndex and adds it to result if result is not null. */ + int skipMultiLineComment(String sql, int startIndex, @Nullable StringBuilder result) { + SimpleParser simpleParser = new SimpleParser(getDialect(), sql, startIndex, false); + if (simpleParser.skipMultiLineComment()) { + appendIfNotNull(result, sql.substring(startIndex, simpleParser.getPos())); + } + return simpleParser.getPos(); + } + + /** Skips a quoted string from startIndex. */ + private int skipQuoted( + String sql, int startIndex, char startQuote, @Nullable StringBuilder result) { + return skipQuoted(sql, startIndex, startQuote, null, result); + } + + /** + * Skips a quoted string from startIndex. The quote character is assumed to be $ if dollarTag is + * not null. + */ + int skipQuoted( + String sql, + int startIndex, + char startQuote, + @Nullable String dollarTag, + @Nullable StringBuilder result) { + boolean isTripleQuoted = + supportsTripleQuotedStrings() + && sql.length() > startIndex + 2 + && sql.charAt(startIndex + 1) == startQuote + && sql.charAt(startIndex + 2) == startQuote; + int currentIndex = startIndex + (isTripleQuoted ? 3 : 1); + if (isTripleQuoted) { + appendIfNotNull(result, startQuote); + appendIfNotNull(result, startQuote); + } + int length = sql.length(); + while (currentIndex < length) { + char currentChar = sql.charAt(currentIndex); + if (currentChar == startQuote) { + if (supportsDollarQuotedStrings() && currentChar == DOLLAR) { + // Check if this is the end of the current dollar quoted string. + String tag = parseDollarQuotedString(sql, currentIndex + 1); + if (tag != null && tag.equals(dollarTag)) { + appendIfNotNull(result, currentChar, dollarTag, currentChar); + return currentIndex + tag.length() + 2; + } + } else if (supportsEscapeQuoteWithQuote() + && length > currentIndex + 1 + && sql.charAt(currentIndex + 1) == startQuote) { + // This is an escaped quote (e.g. 'foo''bar') + appendIfNotNull(result, currentChar); + appendIfNotNull(result, currentChar); + currentIndex += 2; + continue; + } else if (isTripleQuoted) { + // Check if this is the end of the triple-quoted string. + if (length > currentIndex + 2 + && sql.charAt(currentIndex + 1) == startQuote + && sql.charAt(currentIndex + 2) == startQuote) { + appendIfNotNull(result, currentChar); + appendIfNotNull(result, currentChar); + appendIfNotNull(result, currentChar); + return currentIndex + 3; + } + } else { + appendIfNotNull(result, currentChar); + return currentIndex + 1; + } + } else if (supportsBackslashEscape() + && currentChar == BACKSLASH + && length > currentIndex + 1 + && (sql.charAt(currentIndex + 1) == startQuote + || sql.charAt(currentIndex + 1) == BACKSLASH)) { + // This is an escaped quote (e.g. 'foo\'bar') or an escaped backslash (e.g. 'test\\'). + // Note that in raw strings, the \ officially does not start an escape sequence, but the + // result is still the same, as in a raw string 'both characters are preserved'. + appendIfNotNull(result, currentChar); + appendIfNotNull(result, sql.charAt(currentIndex + 1)); + currentIndex += 2; + continue; + } else if (currentChar == '\n' && !isTripleQuoted && !supportsLineFeedInQuotedString()) { + break; + } + currentIndex++; + appendIfNotNull(result, currentChar); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "SQL statement contains an unclosed literal: " + sql); + } + + /** Appends the given character to result if result is not null. */ + private void appendIfNotNull(@Nullable StringBuilder result, char currentChar) { + if (result != null) { + result.append(currentChar); + } + } + + /** Appends the given suffix to result if result is not null. */ + private static void appendIfNotNull(@Nullable StringBuilder result, String suffix) { + if (result != null) { + result.append(suffix); + } + } + + /** Appends the given prefix, tag, and suffix to result if result is not null. */ + private static void appendIfNotNull( + @Nullable StringBuilder result, char prefix, String tag, char suffix) { + if (result != null) { + result.append(prefix).append(tag).append(suffix); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AnalyzeMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AnalyzeMode.java new file mode 100644 index 000000000000..f67d2267771e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AnalyzeMode.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; + +/** + * {@link AnalyzeMode} indicates whether a query should be executed as a normal query (NONE), + * whether only a query plan should be returned, or whether the query should be profiled while + * executed. + */ +enum AnalyzeMode { + NONE(null), + PLAN(QueryAnalyzeMode.PLAN), + PROFILE(QueryAnalyzeMode.PROFILE); + + private final QueryAnalyzeMode mode; + + AnalyzeMode(QueryAnalyzeMode mode) { + this.mode = mode; + } + + QueryAnalyzeMode getQueryAnalyzeMode() { + return mode; + } + + /** Translates from the Spanner client library QueryAnalyzeMode to {@link AnalyzeMode}. */ + static AnalyzeMode of(QueryAnalyzeMode mode) { + switch (mode) { + case PLAN: + return AnalyzeMode.PLAN; + case PROFILE: + return AnalyzeMode.PROFILE; + default: + throw new IllegalArgumentException(mode + " is unknown"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResult.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResult.java new file mode 100644 index 000000000000..fef96ab456ca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResult.java @@ -0,0 +1,47 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.connection.StatementResult.ResultType; + +@InternalApi +public interface AsyncStatementResult extends StatementResult { + /** + * Returns the {@link AsyncResultSet} held by this result. May only be called if the type of this + * result is {@link ResultType#RESULT_SET}. + * + * @return the {@link AsyncResultSet} held by this result. + */ + AsyncResultSet getResultSetAsync(); + + /** + * Returns the update count held by this result. May only be called if the type of this result is + * {@link ResultType#UPDATE_COUNT}. + * + * @return the update count held by this result. + */ + ApiFuture getUpdateCountAsync(); + + /** + * Returns a future that tracks the progress of a statement that returns no result. This could be + * a DDL statement or a client side statement that does not return a result. + */ + ApiFuture getNoResultAsync(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResultImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResultImpl.java new file mode 100644 index 000000000000..930a611327cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AsyncStatementResultImpl.java @@ -0,0 +1,129 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.common.base.Preconditions; + +class AsyncStatementResultImpl implements AsyncStatementResult { + + static AsyncStatementResult of(AsyncResultSet resultSet) { + return new AsyncStatementResultImpl(Preconditions.checkNotNull(resultSet), null); + } + + static AsyncStatementResult of(ApiFuture updateCount) { + return new AsyncStatementResultImpl(Preconditions.checkNotNull(updateCount)); + } + + static AsyncStatementResult of( + StatementResult clientSideStatementResult, ExecutorProvider executorProvider) { + Preconditions.checkNotNull(clientSideStatementResult.getClientSideStatementType()); + Preconditions.checkNotNull(executorProvider); + if (clientSideStatementResult.getResultType() == ResultType.RESULT_SET) { + return new AsyncStatementResultImpl( + ResultSets.toAsyncResultSet(clientSideStatementResult.getResultSet(), executorProvider), + clientSideStatementResult.getClientSideStatementType()); + } else { + return new AsyncStatementResultImpl( + clientSideStatementResult.getClientSideStatementType(), ApiFutures.immediateFuture(null)); + } + } + + static AsyncStatementResult noResult(ApiFuture result) { + return new AsyncStatementResultImpl(null, Preconditions.checkNotNull(result)); + } + + private final ResultType type; + private final ClientSideStatementType clientSideStatementType; + private final AsyncResultSet resultSet; + private final ApiFuture updateCount; + private final ApiFuture noResult; + + private AsyncStatementResultImpl( + AsyncResultSet resultSet, ClientSideStatementType clientSideStatementType) { + this.type = ResultType.RESULT_SET; + this.clientSideStatementType = clientSideStatementType; + this.resultSet = resultSet; + this.updateCount = null; + this.noResult = null; + } + + private AsyncStatementResultImpl(ApiFuture updateCount) { + this.type = ResultType.UPDATE_COUNT; + this.clientSideStatementType = null; + this.resultSet = null; + this.updateCount = updateCount; + this.noResult = null; + } + + private AsyncStatementResultImpl( + ClientSideStatementType clientSideStatementType, ApiFuture result) { + this.type = ResultType.NO_RESULT; + this.clientSideStatementType = clientSideStatementType; + this.resultSet = null; + this.updateCount = null; + this.noResult = result; + } + + @Override + public ResultType getResultType() { + return type; + } + + @Override + public ClientSideStatementType getClientSideStatementType() { + return clientSideStatementType; + } + + @Override + public ResultSet getResultSet() { + return getResultSetAsync(); + } + + @Override + public Long getUpdateCount() { + return get(getUpdateCountAsync()); + } + + @Override + public AsyncResultSet getResultSetAsync() { + ConnectionPreconditions.checkState( + resultSet != null, "This result does not contain a ResultSet"); + return resultSet; + } + + @Override + public ApiFuture getUpdateCountAsync() { + ConnectionPreconditions.checkState( + updateCount != null, "This result does not contain an update count"); + return updateCount; + } + + @Override + public ApiFuture getNoResultAsync() { + ConnectionPreconditions.checkState( + type == ResultType.NO_RESULT, "This result does not contain a 'no-result' result"); + return noResult; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AutocommitDmlMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AutocommitDmlMode.java new file mode 100644 index 000000000000..4d6becfe1bdf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AutocommitDmlMode.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** Enum used to define the behavior of DML statements in autocommit mode */ +public enum AutocommitDmlMode { + /** TRANSACTIONAL: DML statements use a standard atomic transaction. */ + TRANSACTIONAL, + /** PARTITIONED_NON_ATOMIC: DML statements use a Partitioned DML transaction. */ + PARTITIONED_NON_ATOMIC, + /** + * TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC: DML statements are first executed with a + * standard atomic transaction. If that fails due to the mutation limit being exceeded, the + * statement will automatically be retried using a Partitioned DML transaction. These statements + * are not guaranteed to be atomic. The corresponding {@link TransactionRetryListener} methods + * will be invoked when a DML statement falls back to Partitioned DML. + */ + TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC; + + private final String statementString; + + AutocommitDmlMode() { + this.statementString = name(); + } + + /** + * Use this method to get the correct format for use in a SQL statement. Autocommit dml mode must + * be wrapped between single quotes in SQL statements: + * SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL' This method returns the value + * without the single quotes. + * + * @return a string representation of this {@link AutocommitDmlMode} that can be used in a SQL + * statement. + */ + public String getStatementString() { + return statementString; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ChecksumResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ChecksumResultSet.java new file mode 100644 index 000000000000..c2af543cc9b1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ChecksumResultSet.java @@ -0,0 +1,357 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.ProtobufResultSet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.hash.HashCode; +import com.google.protobuf.Value; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicLong; + +/** + * {@link ResultSet} implementation that keeps a running checksum that can be used to determine + * whether a transaction retry is possible or not. The checksum is based on all the rows that have + * actually been consumed by the user. If the user has not yet consumed any part of the result set + * (i.e. never called next()), the checksum will be null and retry will always be + * allowed. + * + *

If all the rows in the result set have been consumed, the checksum will be based on the values + * of all those rows, and a retry will only be possible if the query returns the exact same results + * during the retry as during the original transaction. + * + *

If some of the rows in the result set have been consumed, the checksum will be based on the + * values of the rows that have been consumed. A retry will succeed if the query returns the same + * results for the already consumed rows. + * + *

The checksum of a {@link ResultSet} is the SHA256 checksum of the current row together with + * the previous checksum value of the result set. The calculation of the checksum is executed in a + * separate {@link Thread} to allow the checksum calculation to lag behind the actual consumption of + * rows, and catch up again if the client slows down the consumption of rows, for example while + * waiting for more data from Cloud Spanner. If the checksum calculation queue contains more than + * {@link ChecksumExecutor#MAX_IN_CHECKSUM_QUEUE} items that have not yet been calculated, calls to + * {@link ResultSet#next()} will slow down in order to allow the calculation to catch up. + */ +@VisibleForTesting +class ChecksumResultSet extends ReplaceableForwardingResultSet implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final AtomicLong numberOfNextCalls = new AtomicLong(); + private final ParsedStatement statement; + private final AnalyzeMode analyzeMode; + private final QueryOption[] options; + private final ChecksumCalculator checksumCalculator = new ChecksumCalculator(); + + ChecksumResultSet( + ReadWriteTransaction transaction, + ProtobufResultSet delegate, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + super(delegate); + Preconditions.checkNotNull(transaction); + Preconditions.checkNotNull(delegate); + Preconditions.checkNotNull(statement); + Preconditions.checkNotNull(statement.getStatement()); + Preconditions.checkNotNull(statement.getStatement().getSql()); + this.transaction = transaction; + this.statement = statement; + this.analyzeMode = analyzeMode; + this.options = options; + } + + @Override + public Value getProtobufValue(int columnIndex) { + // We can safely cast to ProtobufResultSet here, as the constructor of this class only accepts + // instances of ProtobufResultSet. + return ((ProtobufResultSet) getDelegate()).getProtobufValue(columnIndex); + } + + /** Simple {@link Callable} for calling {@link ResultSet#next()} */ + private final class NextCallable implements Callable { + @Override + public Boolean call() { + transaction + .getStatementExecutor() + .invokeInterceptors( + statement, StatementExecutionStep.CALL_NEXT_ON_RESULT_SET, transaction); + boolean res = ChecksumResultSet.super.next(); + // Only update the checksum if there was another row to be consumed. + if (res) { + checksumCalculator.calculateNextChecksum(ChecksumResultSet.this); + } + numberOfNextCalls.incrementAndGet(); + return res; + } + } + + private final NextCallable nextCallable = new NextCallable(); + + @Override + public boolean next() { + // Call next() with retry. + return transaction.runWithRetry(nextCallable); + } + + @VisibleForTesting + byte[] getChecksum() { + // Getting the checksum from the checksumCalculator will create a clone of the current digest + // and return the checksum from the clone, so it is safe to return this value. + return checksumCalculator.getChecksum(); + } + + /** + * Execute the same query as in the original transaction and consume the {@link ResultSet} to the + * same point as the original {@link ResultSet}. The {@link HashCode} of the new {@link ResultSet} + * is compared with the {@link HashCode} of the original {@link ResultSet} at the point where the + * consumption of the {@link ResultSet} stopped. + */ + @Override + public void retry(AbortedException aborted) throws AbortedException { + // Execute the same query and consume the result set to the same point as the original. + ChecksumCalculator newChecksumCalculator = new ChecksumCalculator(); + ProtobufResultSet resultSet = null; + long counter = 0L; + try { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + resultSet = + DirectExecuteResultSet.ofResultSet( + transaction.internalExecuteQuery(statement, analyzeMode, options)); + boolean next = true; + while (counter < numberOfNextCalls.get() && next) { + transaction + .getStatementExecutor() + .invokeInterceptors( + statement, StatementExecutionStep.RETRY_NEXT_ON_RESULT_SET, transaction); + next = resultSet.next(); + if (next) { + newChecksumCalculator.calculateNextChecksum(resultSet); + } + counter++; + } + } catch (Throwable e) { + if (resultSet != null) { + resultSet.close(); + } + // If it was a SpannerException other than an AbortedException, the retry should fail + // because of different results from the database. + if (e instanceof SpannerException && !(e instanceof AbortedException)) { + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException( + aborted, (SpannerException) e); + } + // For other types of exceptions we should just re-throw the exception. + throw e; + } + // Check that we have the same number of rows and the same checksum. + byte[] newChecksum = newChecksumCalculator.getChecksum(); + byte[] currentChecksum = checksumCalculator.getChecksum(); + if (counter == numberOfNextCalls.get() && Arrays.equals(newChecksum, currentChecksum)) { + // Checksum is ok, we only need to replace the delegate result set if it's still open. + if (isClosed()) { + resultSet.close(); + } else { + replaceDelegate(resultSet); + } + } else { + // The results are not equal, there is an actual concurrent modification, so we cannot + // continue the transaction. + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } + } + + /** + * Calculates a running checksum for all the data that has been seen sofar in this result set. The + * calculation is performed on the protobuf values that were returned by Cloud Spanner, which + * means that no decoding of the results is needed (or allowed!) before calculating the checksum. + * This is more efficient, both in terms of CPU usage and memory consumption, especially if the + * consumer of the result set does not read all values, or is only reading the underlying protobuf + * values. + */ + private static final class ChecksumCalculator { + // Use a buffer of max 1Mb to hash string data. This means that strings of up to 1Mb in size + // will be hashed in one go, while strings larger than 1Mb will be chunked into pieces of at + // most 1Mb and then fed into the digest. The digest internally creates a copy of the string + // that is being hashed, so chunking large strings prevents them from being loaded into memory + // twice. + private static final int MAX_BUFFER_SIZE = 1 << 20; + + private boolean firstRow = true; + private final MessageDigest digest; + private ByteBuffer buffer; + private ByteBuffer float64Buffer; + + ChecksumCalculator() { + try { + // This is safe, as all Java implementations are required to have MD5 implemented. + // See https://docs.oracle.com/javase/8/docs/api/java/security/MessageDigest.html + // MD5 requires less CPU power than SHA-256, and still offers a low enough collision + // probability for the use case at hand here. + digest = MessageDigest.getInstance("MD5"); + } catch (Throwable t) { + throw SpannerExceptionFactory.asSpannerException(t); + } + } + + private byte[] getChecksum() { + try { + // This is safe, as the MD5 MessageDigest is known to be cloneable. + MessageDigest clone = (MessageDigest) digest.clone(); + return clone.digest(); + } catch (CloneNotSupportedException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + + private void calculateNextChecksum(ProtobufResultSet resultSet) { + if (firstRow) { + for (StructField field : resultSet.getType().getStructFields()) { + digest.update(field.getType().toString().getBytes(StandardCharsets.UTF_8)); + } + } + for (int col = 0; col < resultSet.getColumnCount(); col++) { + Type type = resultSet.getColumnType(col); + if (resultSet.canGetProtobufValue(col)) { + Value value = resultSet.getProtobufValue(col); + digest.update((byte) value.getKindCase().getNumber()); + pushValue(type, value); + } else { + // This will normally not happen, unless the user explicitly sets the decoding mode to + // DIRECT for a query in a read/write transaction. The default decoding mode in the + // Connection API is set to LAZY_PER_COL. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Failed to get the underlying protobuf value for the column " + + resultSet.getMetadata().getRowType().getFields(col).getName() + + ". Executing queries with DecodeMode#DIRECT is not supported in read/write" + + " transactions."); + } + } + firstRow = false; + } + + private void pushValue(Type type, Value value) { + // Protobuf Value has a very limited set of possible types of values. All Cloud Spanner types + // are mapped to one of the protobuf values listed here, meaning that no changes are needed to + // this calculation when a new type is added to Cloud Spanner. + switch (value.getKindCase()) { + case NULL_VALUE: + // nothing needed, writing the KindCase is enough. + break; + case BOOL_VALUE: + digest.update(value.getBoolValue() ? (byte) 1 : 0); + break; + case STRING_VALUE: + putString(value.getStringValue()); + break; + case NUMBER_VALUE: + if (float64Buffer == null) { + // Create an 8-byte buffer that can be re-used for all float64 values in this result + // set. + float64Buffer = ByteBuffer.allocate(Double.BYTES); + } else { + float64Buffer.clear(); + } + float64Buffer.putDouble(value.getNumberValue()); + float64Buffer.flip(); + digest.update(float64Buffer); + break; + case LIST_VALUE: + if (type.getCode() == Code.ARRAY) { + for (Value item : value.getListValue().getValuesList()) { + digest.update((byte) item.getKindCase().getNumber()); + pushValue(type.getArrayElementType(), item); + } + } else { + // This should not be possible. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "List values that are not an ARRAY are not supported"); + } + break; + case STRUCT_VALUE: + if (type.getCode() == Code.STRUCT) { + for (int col = 0; col < type.getStructFields().size(); col++) { + String name = type.getStructFields().get(col).getName(); + putString(name); + Value item = value.getStructValue().getFieldsMap().get(name); + digest.update((byte) item.getKindCase().getNumber()); + pushValue(type.getStructFields().get(col).getType(), item); + } + } else { + // This should not be possible. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Struct values without a struct type are not supported"); + } + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, "Unsupported protobuf value: " + value.getKindCase()); + } + } + + /** Hashes a string value in blocks of max MAX_BUFFER_SIZE. */ + private void putString(String stringValue) { + int length = stringValue.length(); + if (buffer == null || (buffer.capacity() < MAX_BUFFER_SIZE && buffer.capacity() < length)) { + // Create a ByteBuffer with a maximum buffer size. + // This buffer is re-used for all string values in the result set. + buffer = ByteBuffer.allocate(Math.min(MAX_BUFFER_SIZE, length)); + } else { + buffer.clear(); + } + + // Wrap the string in a CharBuffer. This allows us to read from the string in sections without + // creating a new copy of (a part of) the string. E.g. using something like substring(..) + // would create a copy of that part of the string, using CharBuffer.wrap(..) does not. + CharBuffer source = CharBuffer.wrap(stringValue); + CharsetEncoder encoder = StandardCharsets.UTF_8.newEncoder(); + // source.hasRemaining() returns false when all the characters in the string have been + // processed. + while (source.hasRemaining()) { + // Encode the string into bytes and write them into the byte buffer. + // At most MAX_BUFFER_SIZE bytes will be written. + encoder.encode(source, buffer, false); + // Flip the buffer so we can read from the start. + buffer.flip(); + // Put the bytes from the buffer into the digest. + digest.update(buffer); + // Flip the buffer again, so we can repeat and write to the start of the buffer again. + buffer.flip(); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatement.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatement.java new file mode 100644 index 000000000000..f507e6458325 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatement.java @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import java.util.List; + +/** + * A {@link ClientSideStatement} is a statement that is not sent to Google Cloud Spanner, but that + * is executed locally to for example set a certain state of a {@link Connection} or get a property + * of a {@link Connection}. + */ +interface ClientSideStatement { + + /** + * @return a list of example statements for this {@link ClientSideStatement}. If these statements + * are parsed, they will all result this in this {@link ClientSideStatement}. + */ + List getExampleStatements(); + + /** + * @return a list of statements that need to be executed on a new connection before the example + * statements may be executed on a connection. For GET READ_TIMESTAMP this would for example + * be a couple of statements that generate a read-only transaction. + */ + List getExamplePrerequisiteStatements(); + + /** + * @return true if this {@link ClientSideStatement} will return a {@link ResultSet}. + */ + boolean isQuery(); + + /** + * @return true if this {@link ClientSideStatement} will return an update count. + */ + boolean isUpdate(); + + /** + * @return the statement type + */ + ClientSideStatementType getStatementType(); + + /** + * Execute this {@link ClientSideStatement} on the given {@link ConnectionStatementExecutor}. The + * executor calls the appropriate method(s) on the {@link Connection}. The statement argument is + * used to parse any additional properties that might be needed for the execution. + * + * @param executor The {@link ConnectionStatementExecutor} that will be used to call a method on + * the {@link Connection}. + * @param statement The original sql statement that has been parsed to this {@link + * ClientSideStatement}. This statement is used to get any additional arguments that are + * needed for the execution of the {@link ClientSideStatement}. + * @return the result of the execution of the statement. + */ + StatementResult execute(ConnectionStatementExecutor executor, ParsedStatement statement); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementBeginExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementBeginExecutor.java new file mode 100644 index 000000000000..7f854c0ccab0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementBeginExecutor.java @@ -0,0 +1,71 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.IsolationLevelConverter; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.lang.reflect.Method; +import java.util.regex.Matcher; + +/** Executor for BEGIN TRANSACTION [ISOLATION LEVEL SERIALIZABLE|REPEATABLE READ] statements. */ +class ClientSideStatementBeginExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + private final IsolationLevelConverter converter; + + ClientSideStatementBeginExecutor(ClientSideStatementImpl statement) throws CompileException { + try { + this.statement = statement; + this.converter = new IsolationLevelConverter(); + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), converter.getParameterClass()); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute(ConnectionStatementExecutor connection, ParsedStatement statement) + throws Exception { + return (StatementResult) method.invoke(connection, getParameterValue(statement.getSql())); + } + + IsolationLevel getParameterValue(String sql) { + Matcher matcher = statement.getPattern().matcher(sql); + // Match the 'isolation level (serializable|repeatable read)' part. + // Group 1 is the isolation level. + if (matcher.find() && matcher.groupCount() >= 1) { + String value = matcher.group(1); + if (value != null) { + // Convert the text to an isolation level enum. + // This returns null if the string is not a valid isolation level value. + IsolationLevel res = converter.convert(value.trim()); + if (res != null) { + return res; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Unknown isolation level: %s", value)); + } + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExecutor.java new file mode 100644 index 000000000000..dae773f606ac --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExecutor.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; + +/** + * A {@link ClientSideStatementExecutor} is used to compile {@link ClientSideStatement}s from the + * json source file, and to execute these against a {@link Connection} (through a {@link + * ConnectionStatementExecutor}). + */ +interface ClientSideStatementExecutor { + + /** + * Executes the {@link ClientSideStatementImpl} that has been compiled and registered with this + * executor on the specified connection. + * + * @param connectionExecutor The {@link ConnectionStatementExecutor} to use to execute the + * statement on a {@link Connection}. + * @param statement The statement that is executed. This can be used to parse any additional + * arguments that might be needed for the execution of the {@link ClientSideStatementImpl}. + * @return the result of the execution. + * @throws Exception If an error occurs while executing the statement, for example if an invalid + * argument has been specified in the sql statement, or if the statement is invalid for the + * current state of the {@link Connection}. + */ + StatementResult execute(ConnectionStatementExecutor connectionExecutor, ParsedStatement statement) + throws Exception; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExplainExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExplainExecutor.java new file mode 100644 index 000000000000..43b84f48123d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementExplainExecutor.java @@ -0,0 +1,71 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ExplainCommandConverter; +import com.google.common.collect.ImmutableSet; +import java.lang.reflect.Method; +import java.util.Set; +import java.util.regex.Matcher; + +/** Specific executor for the EXPLAIN statement for PostgreSQL. */ +class ClientSideStatementExplainExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + private final ExplainCommandConverter converter; + public static final Set EXPLAIN_OPTIONS = + ImmutableSet.of( + "verbose", "costs", "settings", "buffers", "wal", "timing", "summary", "format"); + + ClientSideStatementExplainExecutor(ClientSideStatementImpl statement) throws CompileException { + try { + this.statement = statement; + this.converter = new ExplainCommandConverter(); + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), converter.getParameterClass()); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute(ConnectionStatementExecutor connection, ParsedStatement statement) + throws Exception { + return (StatementResult) method.invoke(connection, getParameterValue(statement.getSql())); + } + + String getParameterValue(String sql) { + Matcher matcher = statement.getPattern().matcher(sql); + if (matcher.find() && matcher.groupCount() >= 1) { + String value = matcher.group(0); + if (value != null) { + String res = converter.convert(value.trim()); + if (res != null) { + return res; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Invalid argument for EXPLAIN: %s", value)); + } + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementImpl.java new file mode 100644 index 000000000000..c136cfcf5255 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementImpl.java @@ -0,0 +1,233 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.common.base.Preconditions; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; + +/** + * Implementation of the {@link ClientSideStatement} interface. The instances of this class are + * imported from the file 'ClientSideStatements.json' in the resources folder. + */ +class ClientSideStatementImpl implements ClientSideStatement { + + /** + * Statements that set a value, such as SET AUTOCOMMIT ON|OFF, must specify a {@link + * ClientSideSetStatementImpl} that defines how the value is set. + */ + static class ClientSideSetStatementImpl { + /** The property name that is to be set, e.g. AUTOCOMMIT. */ + private String propertyName; + + /** The separator between the property and the value (i.e. '=' or '\s+'). */ + private String separator; + + /** Regex specifying the range of allowed values for the property. */ + private String allowedValues; + + /** The class name of the {@link ClientSideStatementValueConverter} to use. */ + private String converterName; + + String getPropertyName() { + return propertyName; + } + + String getSeparator() { + return separator; + } + + String getAllowedValues() { + return allowedValues; + } + + String getConverterName() { + return converterName; + } + } + + static class CompileException extends Exception { + private static final long serialVersionUID = 1L; + private final ClientSideStatementImpl statement; + + CompileException(Throwable cause, ClientSideStatementImpl statement) { + super(cause); + this.statement = statement; + } + + @Override + public String getMessage() { + return "Could not compile statement " + this.statement.name; + } + } + + static class ExecuteException extends RuntimeException { + private static final long serialVersionUID = 1L; + private final ClientSideStatementImpl statement; + private final String sql; + + private ExecuteException(Throwable cause, ClientSideStatementImpl statement, String sql) { + super(cause); + this.statement = statement; + this.sql = sql; + } + + @Override + public String getMessage() { + return "Could not execute statement " + this.statement.name + " (" + sql + ")"; + } + } + + /** The name of this statement. Used in error and info messages. */ + private String name; + + /** + * The class name of the {@link ClientSideStatementExecutor} that should be used for this + * statement. + */ + private String executorName; + + /** The result type of this statement. */ + private ResultType resultType; + + private ClientSideStatementType statementType; + + /** The regular expression that should be used to recognize this class of statements. */ + private String regex; + + /** + * The method name of the {@link ConnectionStatementExecutor} that should be called when this + * statement is executed, for example 'statementSetAutocommit'. + */ + private String method; + + /** A list of example statements that is used for testing. */ + private List exampleStatements; + + /** + * A list of statements that need to be executed before the example statements may be executed. + */ + private List examplePrerequisiteStatements; + + /** + * If this statement sets a value, the statement definition should also contain a {@link + * ClientSideSetStatementImpl} definition that defines how the value that is to be set should be + * parsed. + */ + private ClientSideSetStatementImpl setStatement; + + /** The compiled regex pattern for recognizing this statement. */ + private transient Pattern pattern; + + /** A reference to the executor that should be used. */ + private transient ClientSideStatementExecutor executor; + + /** + * Compiles this {@link ClientSideStatementImpl}. Throws a {@link CompileException} if the + * compilation fails. This should never happen, and if it does, it is a sign of a invalid + * statement definition in the ClientSideStatements.json file. + */ + ClientSideStatementImpl compile() throws CompileException { + try { + this.pattern = Pattern.compile(regex); + @SuppressWarnings("unchecked") + Constructor constructor = + (Constructor) + Class.forName(getClass().getPackage().getName() + "." + executorName) + .getDeclaredConstructor(ClientSideStatementImpl.class); + this.executor = constructor.newInstance(this); + return this; + } catch (Exception e) { + throw new CompileException(e, this); + } + } + + @Override + public StatementResult execute( + ConnectionStatementExecutor connection, ParsedStatement statement) { + Preconditions.checkState(executor != null, "This statement has not been compiled"); + try { + return executor.execute(connection, statement); + } catch (SpannerException e) { + throw e; + } catch (InvocationTargetException e) { + if (e.getCause() instanceof SpannerException) { + throw (SpannerException) e.getCause(); + } + throw new ExecuteException(e.getCause(), this, statement.getStatement().getSql()); + } catch (Exception e) { + throw new ExecuteException(e, this, statement.getStatement().getSql()); + } + } + + @Override + public boolean isQuery() { + return resultType == ResultType.RESULT_SET; + } + + @Override + public boolean isUpdate() { + return resultType == ResultType.UPDATE_COUNT; + } + + @Override + public ClientSideStatementType getStatementType() { + return statementType; + } + + boolean matches(CharSequence statement) { + Preconditions.checkState(pattern != null, "This statement has not been compiled"); + return pattern.matcher(statement).matches(); + } + + @Override + public String toString() { + return name; + } + + Pattern getPattern() { + return pattern; + } + + String getMethodName() { + return method; + } + + @Override + public List getExampleStatements() { + return Collections.unmodifiableList(exampleStatements); + } + + @Override + public List getExamplePrerequisiteStatements() { + if (examplePrerequisiteStatements == null) { + return Collections.emptyList(); + } + return Collections.unmodifiableList(examplePrerequisiteStatements); + } + + ClientSideSetStatementImpl getSetStatement() { + return setStatement; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementNoParamExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementNoParamExecutor.java new file mode 100644 index 000000000000..01da308c1662 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementNoParamExecutor.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import java.lang.reflect.Method; + +/** + * Executor to use for statements that do not set a value and do not have any parameters, such as + * SHOW AUTOCOMMIT. The executor just calls a method with no parameters. + */ +class ClientSideStatementNoParamExecutor implements ClientSideStatementExecutor { + private final Method method; + + /** + * Creates and compiles the given {@link ClientSideStatementImpl}. + * + * @param statement The statement to compile. + * @throws CompileException If the statement could not be compiled. This should never happen, as + * it would indicate that an invalid statement has been defined in the source file. + */ + ClientSideStatementNoParamExecutor(ClientSideStatementImpl statement) throws CompileException { + try { + this.method = ConnectionStatementExecutor.class.getDeclaredMethod(statement.getMethodName()); + } catch (NoSuchMethodException | SecurityException e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute(ConnectionStatementExecutor connection, ParsedStatement statement) + throws Exception { + return (StatementResult) method.invoke(connection); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPartitionExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPartitionExecutor.java new file mode 100644 index 000000000000..c96ee1553413 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPartitionExecutor.java @@ -0,0 +1,63 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import java.lang.reflect.Method; +import java.util.regex.Matcher; + +/** Executor for PARTITION <sql> statements. */ +class ClientSideStatementPartitionExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + + ClientSideStatementPartitionExecutor(ClientSideStatementImpl statement) throws CompileException { + try { + this.statement = statement; + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), Statement.class); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute( + ConnectionStatementExecutor connection, ParsedStatement parsedStatement) throws Exception { + String sql = getParameterValue(parsedStatement); + return (StatementResult) + method.invoke(connection, parsedStatement.getStatement().toBuilder().replace(sql).build()); + } + + String getParameterValue(ParsedStatement parsedStatement) { + Matcher matcher = statement.getPattern().matcher(parsedStatement.getSql()); + if (matcher.find() && matcher.groupCount() >= 2) { + String space = matcher.group(1); + String value = matcher.group(2); + return (space + value).trim(); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "Invalid argument for PARTITION: %s", parsedStatement.getStatement().getSql())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPgBeginExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPgBeginExecutor.java new file mode 100644 index 000000000000..fae41de18c16 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementPgBeginExecutor.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.PgTransactionModeConverter; +import java.lang.reflect.Method; +import java.util.regex.Matcher; + +/** Specific executor for the BEGIN statement for PostgreSQL. */ +class ClientSideStatementPgBeginExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + private final PgTransactionModeConverter converter; + + ClientSideStatementPgBeginExecutor(ClientSideStatementImpl statement) throws CompileException { + try { + this.statement = statement; + this.converter = new PgTransactionModeConverter(); + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), converter.getParameterClass()); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute(ConnectionStatementExecutor connection, ParsedStatement statement) + throws Exception { + return (StatementResult) method.invoke(connection, getParameterValue(statement.getSql())); + } + + PgTransactionMode getParameterValue(String sql) { + Matcher matcher = statement.getPattern().matcher(sql); + if (matcher.find() && matcher.groupCount() >= 1) { + String value = matcher.group(1); + if (value != null) { + PgTransactionMode res = converter.convert(value.trim()); + if (res != null) { + return res; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Unknown transaction mode: %s", value)); + } + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionExecutor.java new file mode 100644 index 000000000000..7e3c30d9f70c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionExecutor.java @@ -0,0 +1,81 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.common.base.Strings; +import java.lang.reflect.Method; +import java.util.regex.Matcher; + +/** Executor for RUN PARTITION <partition_id> statements. */ +class ClientSideStatementRunPartitionExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + + ClientSideStatementRunPartitionExecutor(ClientSideStatementImpl statement) + throws CompileException { + try { + this.statement = statement; + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), String.class); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute( + ConnectionStatementExecutor connection, ParsedStatement parsedStatement) throws Exception { + String partitionId = getParameterValue(parsedStatement); + if (partitionId == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "No valid partition id found in statement: " + parsedStatement.getStatement().getSql()); + } + return (StatementResult) method.invoke(connection, partitionId); + } + + String getParameterValue(ParsedStatement parsedStatement) { + // The statement has the form `RUN PARTITION ['partition-id']`. + // The regex that is defined for this statement is (simplified) `run\s+partition(?:\s*'(.*)')?` + // This regex has one capturing group, which captures the partition-id inside the single quotes. + // That capturing group is however inside a non-capturing optional group. + // That means that: + // 1. If the matcher matches and returns one or more groups, we know that we have a partition-id + // in the SQL statement itself, as that is the only thing that can be in a capturing group. + // 2. If the matcher matches and returns zero groups, we know that the statement is valid, but + // that it does not contain a partition-id in the SQL statement. The partition-id must then + // be included in the statement as a query parameter. + Matcher matcher = statement.getPattern().matcher(parsedStatement.getSql()); + if (matcher.find() && matcher.groupCount() >= 1) { + String value = matcher.group(1); + if (!Strings.isNullOrEmpty(value)) { + return value; + } + } + if (parsedStatement.getStatement().getParameters().size() == 1) { + Value value = parsedStatement.getStatement().getParameters().values().iterator().next(); + return value.getAsString(); + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionedQueryExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionedQueryExecutor.java new file mode 100644 index 000000000000..c95f2203fc8b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementRunPartitionedQueryExecutor.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import java.lang.reflect.Method; +import java.util.regex.Matcher; + +/** Executor for RUN PARTITIONED QUERY <sql> statements. */ +class ClientSideStatementRunPartitionedQueryExecutor implements ClientSideStatementExecutor { + private final ClientSideStatementImpl statement; + private final Method method; + + ClientSideStatementRunPartitionedQueryExecutor(ClientSideStatementImpl statement) + throws CompileException { + try { + this.statement = statement; + this.method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), Statement.class); + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute( + ConnectionStatementExecutor connection, ParsedStatement parsedStatement) throws Exception { + String sql = getParameterValue(parsedStatement); + return (StatementResult) + method.invoke(connection, parsedStatement.getStatement().toBuilder().replace(sql).build()); + } + + String getParameterValue(ParsedStatement parsedStatement) { + Matcher matcher = statement.getPattern().matcher(parsedStatement.getSql()); + if (matcher.find() && matcher.groupCount() >= 2) { + // Include the spacing group in case the query is enclosed in parentheses like this: + // `run partitioned query(select * from foo)` + String space = matcher.group(1); + String value = matcher.group(2); + return (space + value).trim(); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "Invalid argument for RUN PARTITIONED QUERY: %s", + parsedStatement.getStatement().getSql())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementSetExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementSetExecutor.java new file mode 100644 index 000000000000..5bb0a4c8d3b9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementSetExecutor.java @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.Tuple; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.util.concurrent.UncheckedExecutionException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.nio.CharBuffer; +import java.util.concurrent.ExecutionException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Executor for {@link ClientSideStatement}s that sets a value for a property, such as SET + * AUTOCOMMIT=TRUE. + */ +class ClientSideStatementSetExecutor implements ClientSideStatementExecutor { + private final Cache> cache; + private final ClientSideStatementImpl statement; + private final Method method; + private final boolean supportsLocal; + private final ClientSideStatementValueConverter converter; + private final Pattern allowedValuesPattern; + + /** + * Creates and compiles the given {@link ClientSideStatementImpl}. + * + * @param statement The statement to compile. + * @throws CompileException If the statement could not be compiled. This should never happen, as + * it would indicate that an invalid statement has been defined in the source file. + */ + @SuppressWarnings("unchecked") + ClientSideStatementSetExecutor(ClientSideStatementImpl statement) throws CompileException { + Preconditions.checkNotNull(statement.getSetStatement()); + this.cache = + CacheBuilder.newBuilder() + .maximumSize(25) + // Set the concurrency level to 1, as we don't expect many concurrent updates. + .concurrencyLevel(1) + .build(); + try { + this.statement = statement; + this.allowedValuesPattern = + Pattern.compile( + String.format( + "(?is)\\A\\s*set\\s+((?:local|session)\\s+)?%s\\s*%s\\s*%s\\s*\\z", + statement.getSetStatement().getPropertyName(), + statement.getSetStatement().getSeparator(), + statement.getSetStatement().getAllowedValues())); + Class> converterClass = + (Class>) + Class.forName( + getClass().getPackage().getName() + + "." + + statement.getSetStatement().getConverterName()); + Constructor> constructor = + converterClass.getConstructor(String.class); + this.converter = constructor.newInstance(statement.getSetStatement().getAllowedValues()); + Method method; + boolean supportsLocal; + try { + method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), converter.getParameterClass()); + supportsLocal = false; + } catch (NoSuchMethodException ignore) { + method = + ConnectionStatementExecutor.class.getDeclaredMethod( + statement.getMethodName(), converter.getParameterClass(), Boolean.class); + supportsLocal = true; + } + this.method = method; + this.supportsLocal = supportsLocal; + } catch (Exception e) { + throw new CompileException(e, statement); + } + } + + @Override + public StatementResult execute(ConnectionStatementExecutor connection, ParsedStatement statement) + throws Exception { + Tuple value; + try { + value = + this.cache.get( + statement.getSql(), + () -> getParameterValue(connection.getDialect(), statement.getSql())); + } catch (ExecutionException | UncheckedExecutionException executionException) { + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } + if (this.supportsLocal) { + return (StatementResult) method.invoke(connection, value.x(), value.y()); + } + return (StatementResult) method.invoke(connection, value.x()); + } + + Tuple getParameterValue(Dialect dialect, String sql) { + // Get rid of any spaces/comments at the start of the string. + SimpleParser simpleParser = new SimpleParser(dialect, sql); + simpleParser.skipWhitespaces(); + // Create a wrapper around the SQL string from the point after the first whitespace. + CharBuffer sqlAfterWhitespaces = CharBuffer.wrap(sql, simpleParser.getPos(), sql.length()); + Matcher matcher = allowedValuesPattern.matcher(sqlAfterWhitespaces); + if (matcher.find() && matcher.groupCount() >= 2) { + boolean local = matcher.group(1) != null && "local".equalsIgnoreCase(matcher.group(1).trim()); + String value = matcher.group(2); + T res = converter.convert(value); + if (res != null) { + return Tuple.of(res, local); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "Unknown value for %s: %s", + this.statement.getSetStatement().getPropertyName(), value)); + } else { + Matcher invalidMatcher = this.statement.getPattern().matcher(sqlAfterWhitespaces); + int valueGroup = this.supportsLocal ? 2 : 1; + if (invalidMatcher.find() && invalidMatcher.groupCount() == valueGroup) { + String invalidValue = invalidMatcher.group(valueGroup); + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "Unknown value for %s: %s", + this.statement.getSetStatement().getPropertyName(), invalidValue)); + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Unknown statement: %s", sql)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverter.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverter.java new file mode 100644 index 000000000000..1cd04433abcc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverter.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** + * Interface for converters that are used by {@link ClientSideStatement} that sets a value that need + * to be converted from a string to a specific type. Implementing classes must have a public + * constructor that takes a String parameter. The String parameter will contain a regular expression + * for the allowed values for the property. + */ +interface ClientSideStatementValueConverter { + + /** The type to convert to. */ + Class getParameterClass(); + + /** + * The actual convert method. Should return null for values that could not be + * converted. + */ + T convert(String value); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java new file mode 100644 index 000000000000..3d796af4f007 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java @@ -0,0 +1,898 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.parseTimeUnit; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.toChronoUnit; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.PgTransactionMode.AccessMode; +import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Base64; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** Contains all {@link ClientSideStatementValueConverter} implementations. */ +class ClientSideStatementValueConverters { + /** Map for mapping case-insensitive strings to enums. */ + private static final class CaseInsensitiveEnumMap> { + private final Map map = new HashMap<>(); + + /** Create an map using the name of the enum elements as keys. */ + private CaseInsensitiveEnumMap(Class elementType) { + this(elementType, Enum::name); + } + + /** Create a map using the specific function to get the key per enum value. */ + private CaseInsensitiveEnumMap(Class elementType, Function keyFunction) { + Preconditions.checkNotNull(elementType); + Preconditions.checkNotNull(keyFunction); + EnumSet set = EnumSet.allOf(elementType); + for (E e : set) { + if (map.put(keyFunction.apply(e).toUpperCase(), e) != null) { + throw new IllegalArgumentException( + "Enum contains multiple elements with the same case-insensitive key"); + } + } + } + + private E get(String value) { + Preconditions.checkNotNull(value); + return map.get(value.toUpperCase()); + } + } + + /** Converter from string to {@link Boolean} */ + static class BooleanConverter implements ClientSideStatementValueConverter { + static final BooleanConverter INSTANCE = new BooleanConverter(); + + private BooleanConverter() {} + + /** Constructor that is needed for reflection. */ + public BooleanConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return Boolean.class; + } + + @Override + public Boolean convert(String value) { + if ("true".equalsIgnoreCase(value)) { + return Boolean.TRUE; + } + if ("false".equalsIgnoreCase(value)) { + return Boolean.FALSE; + } + return null; + } + } + + /** Converter from string to {@link Boolean} */ + static class PgBooleanConverter implements ClientSideStatementValueConverter { + + public PgBooleanConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return Boolean.class; + } + + @Override + public Boolean convert(String value) { + if (value == null) { + return null; + } + if (value.length() > 1 + && ((value.startsWith("'") && value.endsWith("'")) + || (value.startsWith("\"") && value.endsWith("\"")))) { + value = value.substring(1, value.length() - 1); + } + if ("true".equalsIgnoreCase(value) + || "tru".equalsIgnoreCase(value) + || "tr".equalsIgnoreCase(value) + || "t".equalsIgnoreCase(value) + || "on".equalsIgnoreCase(value) + || "1".equalsIgnoreCase(value) + || "yes".equalsIgnoreCase(value) + || "ye".equalsIgnoreCase(value) + || "y".equalsIgnoreCase(value)) { + return Boolean.TRUE; + } + if ("false".equalsIgnoreCase(value) + || "fals".equalsIgnoreCase(value) + || "fal".equalsIgnoreCase(value) + || "fa".equalsIgnoreCase(value) + || "f".equalsIgnoreCase(value) + || "off".equalsIgnoreCase(value) + || "of".equalsIgnoreCase(value) + || "0".equalsIgnoreCase(value) + || "no".equalsIgnoreCase(value) + || "n".equalsIgnoreCase(value)) { + return Boolean.FALSE; + } + return null; + } + } + + /** Converter from string to a non-negative integer. */ + static class NonNegativeIntegerConverter implements ClientSideStatementValueConverter { + static final NonNegativeIntegerConverter INSTANCE = new NonNegativeIntegerConverter(); + + private NonNegativeIntegerConverter() {} + + /** Constructor needed for reflection. */ + public NonNegativeIntegerConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return Integer.class; + } + + @Override + public Integer convert(String value) { + try { + int res = Integer.parseInt(value); + if (res < 0) { + // The conventions for these converters is to return null if the value is invalid. + return null; + } + return res; + } catch (Exception ignore) { + return null; + } + } + } + + /** Converter from string to a long. */ + static class LongConverter implements ClientSideStatementValueConverter { + static final LongConverter INSTANCE = new LongConverter(); + + private LongConverter() {} + + /** Constructor needed for reflection. */ + public LongConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return Long.class; + } + + @Override + public Long convert(String value) { + try { + long res = Long.parseLong(value); + if (res < 0) { + // The conventions for these converters is to return null if the value is invalid. + return null; + } + return res; + } catch (Exception ignore) { + return null; + } + } + } + + /** Converter from string to {@link Duration}. */ + static class DurationConverter implements ClientSideStatementValueConverter { + static final DurationConverter INSTANCE = + new DurationConverter("('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)"); + + private final String resetValue; + + private final Pattern allowedValues; + + public DurationConverter(String allowedValues) { + this("NULL", allowedValues); + } + + DurationConverter(String resetValue, String allowedValues) { + this.resetValue = Preconditions.checkNotNull(resetValue); + // Remove the parentheses from the beginning and end. + this.allowedValues = + Pattern.compile( + "(?is)\\A" + allowedValues.substring(1, allowedValues.length() - 1) + "\\z"); + } + + @Override + public Class getParameterClass() { + return Duration.class; + } + + @Override + public Duration convert(String value) { + Matcher matcher = allowedValues.matcher(value); + if (matcher.find()) { + if (value.trim().equalsIgnoreCase(resetValue)) { + return Duration.ZERO; + } else { + try { + Duration duration; + if (matcher.group(1) != null && matcher.group(2) != null) { + ChronoUnit unit = toChronoUnit(parseTimeUnit(matcher.group(2))); + duration = Duration.of(Long.parseLong(matcher.group(1)), unit); + } else { + duration = Duration.ofMillis(Long.parseLong(value.trim())); + } + // Converters should return null for invalid values. + if (duration.isNegative()) { + return null; + } + return duration; + } catch (NumberFormatException exception) { + // Converters should return null for invalid values. + return null; + } + } + } + return null; + } + } + + /** Converter from string to {@link Duration}. */ + static class PgDurationConverter extends DurationConverter { + public PgDurationConverter(String allowedValues) { + super("DEFAULT", allowedValues); + } + } + + /** Converter from string to possible values for read only staleness ({@link TimestampBound}). */ + static class ReadOnlyStalenessConverter + implements ClientSideStatementValueConverter { + // Some backslashes need to be specified as hexcode. + // See https://github.com/google/google-java-format/issues/1253 + static final ReadOnlyStalenessConverter INSTANCE = + new ReadOnlyStalenessConverter( + "'((STRONG)|(MIN_READ_TIMESTAMP)[\\t" + + " ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(READ_TIMESTAMP)[\u005Ct" + + " ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):( " + + " \\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(MAX_STALENESS)[\u005Ct" + + " ]+((\\d{1,19})(s|ms|us|ns))|(EXACT_STALENESS)[\\t" + + " ]+((\\d{1,19})(s|ms|us|ns)))'"); + + private final Pattern allowedValues; + private final CaseInsensitiveEnumMap values = new CaseInsensitiveEnumMap<>(Mode.class); + + public ReadOnlyStalenessConverter(String allowedValues) { + // Remove the single quotes at the beginning and end. + this.allowedValues = + Pattern.compile( + "(?is)\\A" + allowedValues.substring(1, allowedValues.length() - 1) + "\\z"); + } + + @Override + public Class getParameterClass() { + return TimestampBound.class; + } + + @Override + public TimestampBound convert(String value) { + Matcher matcher = allowedValues.matcher(value); + if (matcher.find() && matcher.groupCount() >= 1) { + Mode mode = null; + int groupIndex = 0; + for (int group = 1; group <= matcher.groupCount(); group++) { + if (matcher.group(group) != null) { + mode = values.get(matcher.group(group)); + if (mode != null) { + groupIndex = group; + break; + } + } + } + switch (mode) { + case STRONG: + return TimestampBound.strong(); + case READ_TIMESTAMP: + return TimestampBound.ofReadTimestamp( + ReadOnlyStalenessUtil.parseRfc3339(matcher.group(groupIndex + 1))); + case MIN_READ_TIMESTAMP: + return TimestampBound.ofMinReadTimestamp( + ReadOnlyStalenessUtil.parseRfc3339(matcher.group(groupIndex + 1))); + case EXACT_STALENESS: + try { + return TimestampBound.ofExactStaleness( + Long.parseLong(matcher.group(groupIndex + 2)), + parseTimeUnit(matcher.group(groupIndex + 3))); + } catch (IllegalArgumentException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, e.getMessage()); + } + case MAX_STALENESS: + try { + return TimestampBound.ofMaxStaleness( + Long.parseLong(matcher.group(groupIndex + 2)), + parseTimeUnit(matcher.group(groupIndex + 3))); + } catch (IllegalArgumentException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, e.getMessage()); + } + default: + // fall through to allow the calling method to handle this + } + } + return null; + } + } + + /** + * Converter from string to possible values for {@link com.google.spanner.v1.DirectedReadOptions}. + */ + static class DirectedReadOptionsConverter + implements ClientSideStatementValueConverter { + private final Pattern allowedValues; + + public DirectedReadOptionsConverter(String allowedValues) { + // Remove the single quotes at the beginning and end. + this.allowedValues = + Pattern.compile( + "(?is)\\A" + allowedValues.substring(1, allowedValues.length() - 1) + "\\z"); + } + + @Override + public Class getParameterClass() { + return DirectedReadOptions.class; + } + + @Override + public DirectedReadOptions convert(String value) { + Matcher matcher = allowedValues.matcher(value); + if (matcher.find()) { + try { + return DirectedReadOptionsUtil.parse(value); + } catch (SpannerException spannerException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "Failed to parse '%s' as a valid value for DIRECTED_READ.\n" + + "The value should be a JSON string like this: '%s'.\n" + + "You can generate a valid JSON string from a DirectedReadOptions instance" + + " by calling %s.%s", + value, + "{\"includeReplicas\":{\"replicaSelections\":[{\"location\":\"eu-west1\",\"type\":\"READ_ONLY\"}]}}", + DirectedReadOptionsUtil.class.getName(), + "toString(DirectedReadOptions directedReadOptions)"), + spannerException); + } + } + return null; + } + } + + /** + * Converter for converting strings to {@link + * com.google.spanner.v1.TransactionOptions.IsolationLevel} values. + */ + static class IsolationLevelConverter + implements ClientSideStatementValueConverter { + static final IsolationLevelConverter INSTANCE = new IsolationLevelConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(TransactionOptions.IsolationLevel.class); + + IsolationLevelConverter() {} + + /** Constructor needed for reflection. */ + public IsolationLevelConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return TransactionOptions.IsolationLevel.class; + } + + @Override + public TransactionOptions.IsolationLevel convert(String value) { + if (value != null) { + // This ensures that 'repeatable read' is translated to 'repeatable_read'. The text between + // 'repeatable' and 'read' can be any number of valid whitespace characters. + value = value.trim().replaceFirst("\\s+", "_"); + } + return values.get(value); + } + } + + /** + * Converter for converting strings to {@link + * com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode} values. + */ + static class ReadLockModeConverter implements ClientSideStatementValueConverter { + static final ReadLockModeConverter INSTANCE = new ReadLockModeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(ReadLockMode.class); + + ReadLockModeConverter() {} + + /** Constructor needed for reflection. */ + public ReadLockModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return ReadLockMode.class; + } + + @Override + public ReadLockMode convert(String value) { + if (value != null && value.equalsIgnoreCase("unspecified")) { + // Allow 'unspecified' to be used in addition to 'read_lock_mode_unspecified'. + value = ReadLockMode.READ_LOCK_MODE_UNSPECIFIED.name(); + } + return values.get(value); + } + } + + /** Converter for converting strings to {@link AutocommitDmlMode} values. */ + static class AutocommitDmlModeConverter + implements ClientSideStatementValueConverter { + static final AutocommitDmlModeConverter INSTANCE = new AutocommitDmlModeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(AutocommitDmlMode.class); + + private AutocommitDmlModeConverter() {} + + /** Constructor needed for reflection. */ + public AutocommitDmlModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return AutocommitDmlMode.class; + } + + @Override + public AutocommitDmlMode convert(String value) { + return values.get(value); + } + } + + static class ConnectionStateTypeConverter + implements ClientSideStatementValueConverter { + static final ConnectionStateTypeConverter INSTANCE = new ConnectionStateTypeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(ConnectionState.Type.class); + + private ConnectionStateTypeConverter() {} + + /** Constructor that is needed for reflection. */ + public ConnectionStateTypeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return ConnectionState.Type.class; + } + + @Override + public ConnectionState.Type convert(String value) { + return values.get(value); + } + } + + static class StringValueConverter implements ClientSideStatementValueConverter { + static final StringValueConverter INSTANCE = new StringValueConverter(); + + private StringValueConverter() {} + + /** Constructor needed for reflection. */ + public StringValueConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return String.class; + } + + @Override + public String convert(String value) { + return value; + } + } + + /** Converter for converting string values to {@link TransactionMode} values. */ + static class TransactionModeConverter + implements ClientSideStatementValueConverter { + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(TransactionMode.class, TransactionMode::getStatementString); + + public TransactionModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return TransactionMode.class; + } + + @Override + public TransactionMode convert(String value) { + // Transaction mode may contain multiple spaces. + String valueWithSingleSpaces = value.replaceAll("\\s+", " "); + return values.get(valueWithSingleSpaces); + } + } + + static class PgTransactionIsolationConverter + implements ClientSideStatementValueConverter { + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(IsolationLevel.class, IsolationLevel::getShortStatementString); + + public PgTransactionIsolationConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return IsolationLevel.class; + } + + @Override + public IsolationLevel convert(String value) { + // Isolation level may contain multiple spaces. + String valueWithSingleSpaces = value.replaceAll("\\s+", " "); + if (valueWithSingleSpaces.length() > 1 + && ((valueWithSingleSpaces.startsWith("'") && valueWithSingleSpaces.endsWith("'")) + || (valueWithSingleSpaces.startsWith("\"") + && valueWithSingleSpaces.endsWith("\"")))) { + valueWithSingleSpaces = + valueWithSingleSpaces.substring(1, valueWithSingleSpaces.length() - 1); + } + return values.get(valueWithSingleSpaces); + } + } + + /** + * Converter for converting string values to {@link PgTransactionMode} values. Includes no-op + * handling of setting the isolation level of the transaction to default or serializable. + */ + static class PgTransactionModeConverter + implements ClientSideStatementValueConverter { + PgTransactionModeConverter() {} + + public PgTransactionModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return PgTransactionMode.class; + } + + @Override + public PgTransactionMode convert(String value) { + PgTransactionMode mode = new PgTransactionMode(); + // Transaction mode may contain multiple spaces. + String valueWithoutDeferrable = value.replaceAll("(?i)(not\\s+deferrable)", " "); + String valueWithSingleSpaces = + valueWithoutDeferrable.replaceAll("\\s+", " ").toLowerCase(Locale.ENGLISH).trim(); + int currentIndex = 0; + while (currentIndex < valueWithSingleSpaces.length()) { + // This will use the last access mode and isolation level that is encountered in the string. + // This is consistent with the behavior of PostgreSQL, which also allows multiple modes to + // be specified in one string, and will use the last one that is encountered. + if (valueWithSingleSpaces.substring(currentIndex).startsWith("read only")) { + currentIndex += "read only".length(); + mode.setAccessMode(AccessMode.READ_ONLY_TRANSACTION); + } else if (valueWithSingleSpaces.substring(currentIndex).startsWith("read write")) { + currentIndex += "read write".length(); + mode.setAccessMode(AccessMode.READ_WRITE_TRANSACTION); + } else if (valueWithSingleSpaces + .substring(currentIndex) + .startsWith("isolation level repeatable read")) { + currentIndex += "isolation level repeatable read".length(); + mode.setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_REPEATABLE_READ); + } else if (valueWithSingleSpaces + .substring(currentIndex) + .startsWith("isolation level serializable")) { + currentIndex += "isolation level serializable".length(); + mode.setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_SERIALIZABLE); + } else if (valueWithSingleSpaces + .substring(currentIndex) + .startsWith("isolation level default")) { + currentIndex += "isolation level default".length(); + mode.setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_DEFAULT); + } else { + return null; + } + // Skip space and/or comma that may separate multiple transaction modes. + if (currentIndex < valueWithSingleSpaces.length() + && valueWithSingleSpaces.charAt(currentIndex) == ' ') { + currentIndex++; + } + if (currentIndex < valueWithSingleSpaces.length() + && valueWithSingleSpaces.charAt(currentIndex) == ',') { + currentIndex++; + } + if (currentIndex < valueWithSingleSpaces.length() + && valueWithSingleSpaces.charAt(currentIndex) == ' ') { + currentIndex++; + } + } + return mode; + } + } + + /** Converter for converting strings to {@link RpcPriority} values. */ + static class RpcPriorityConverter implements ClientSideStatementValueConverter { + static final RpcPriorityConverter INSTANCE = new RpcPriorityConverter("(HIGH|MEDIUM|LOW|NULL)"); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(RpcPriority.class); + private final Pattern allowedValues; + + public RpcPriorityConverter(String allowedValues) { + // Remove the parentheses from the beginning and end. + this.allowedValues = + Pattern.compile( + "(?is)\\A" + allowedValues.substring(1, allowedValues.length() - 1) + "\\z"); + } + + @Override + public Class getParameterClass() { + return RpcPriority.class; + } + + @Override + public RpcPriority convert(String value) { + Matcher matcher = allowedValues.matcher(value); + if (matcher.find()) { + if (matcher.group(0).equalsIgnoreCase("null")) { + return RpcPriority.UNSPECIFIED; + } + } + return values.get(value); + } + } + + /** Converter for converting strings to {@link SavepointSupport} values. */ + static class SavepointSupportConverter + implements ClientSideStatementValueConverter { + static final SavepointSupportConverter INSTANCE = new SavepointSupportConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(SavepointSupport.class); + + private SavepointSupportConverter() {} + + /** Constructor needed for reflection. */ + public SavepointSupportConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return SavepointSupport.class; + } + + @Override + public SavepointSupport convert(String value) { + return values.get(value); + } + } + + /** Converter for converting strings to {@link DdlInTransactionMode} values. */ + static class DdlInTransactionModeConverter + implements ClientSideStatementValueConverter { + static final DdlInTransactionModeConverter INSTANCE = new DdlInTransactionModeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(DdlInTransactionMode.class); + + private DdlInTransactionModeConverter() {} + + /** Constructor needed for reflection. */ + public DdlInTransactionModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return DdlInTransactionMode.class; + } + + @Override + public DdlInTransactionMode convert(String value) { + return values.get(value); + } + } + + static class ExplainCommandConverter implements ClientSideStatementValueConverter { + @Override + public Class getParameterClass() { + return String.class; + } + + @Override + public String convert(String value) { + /* The first word in the string should be "explain" + * So, if the size of the string <= 7 (number of letters in the word "explain"), its an invalid statement + * If the size is greater than 7, we'll consider everything after explain as the query. + */ + if (value.length() <= 7) { + return null; + } + return value.substring(7).trim(); + } + } + + /** Converter for converting Base64 encoded string to byte[] */ + static class ProtoDescriptorsConverter implements ClientSideStatementValueConverter { + + public ProtoDescriptorsConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return byte[].class; + } + + @Override + public byte[] convert(String value) { + if (value == null || value.length() == 0 || value.equalsIgnoreCase("null")) { + return null; + } + try { + return Base64.getDecoder().decode(value); + } catch (IllegalArgumentException e) { + return null; + } + } + } + + /** Converter for converting String that take in file path as input to String */ + static class ProtoDescriptorsFileConverter implements ClientSideStatementValueConverter { + + public ProtoDescriptorsFileConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return String.class; + } + + @Override + public String convert(String filePath) { + if (Strings.isNullOrEmpty(filePath)) { + return null; + } + return filePath; + } + } + + static class CredentialsProviderConverter + implements ClientSideStatementValueConverter { + static final CredentialsProviderConverter INSTANCE = new CredentialsProviderConverter(); + + private CredentialsProviderConverter() {} + + @Override + public Class getParameterClass() { + return CredentialsProvider.class; + } + + @Override + public CredentialsProvider convert(String credentialsProviderName) { + if (!Strings.isNullOrEmpty(credentialsProviderName)) { + try { + Class clazz = + (Class) Class.forName(credentialsProviderName); + Constructor constructor = clazz.getDeclaredConstructor(); + return constructor.newInstance(); + } catch (ClassNotFoundException classNotFoundException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unknown or invalid CredentialsProvider class name: " + credentialsProviderName, + classNotFoundException); + } catch (NoSuchMethodException noSuchMethodException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Credentials provider " + + credentialsProviderName + + " does not have a public no-arg constructor.", + noSuchMethodException); + } catch (InvocationTargetException + | InstantiationException + | IllegalAccessException exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Failed to create an instance of " + + credentialsProviderName + + ": " + + exception.getMessage(), + exception); + } + } + return null; + } + } + + static class GrpcInterceptorProviderConverter + implements ClientSideStatementValueConverter { + static final GrpcInterceptorProviderConverter INSTANCE = new GrpcInterceptorProviderConverter(); + + private GrpcInterceptorProviderConverter() {} + + @Override + public Class getParameterClass() { + return GrpcInterceptorProvider.class; + } + + @Override + public GrpcInterceptorProvider convert(String interceptorProviderName) { + if (!Strings.isNullOrEmpty(interceptorProviderName)) { + try { + Class clazz = + (Class) Class.forName(interceptorProviderName); + Constructor constructor = + clazz.getDeclaredConstructor(); + return constructor.newInstance(); + } catch (ClassNotFoundException classNotFoundException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unknown or invalid GrpcInterceptorProvider class name: " + interceptorProviderName, + classNotFoundException); + } catch (NoSuchMethodException noSuchMethodException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "GrpcInterceptorProvider " + + interceptorProviderName + + " does not have a public no-arg constructor.", + noSuchMethodException); + } catch (InvocationTargetException + | InstantiationException + | IllegalAccessException exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Failed to create an instance of " + + interceptorProviderName + + ": " + + exception.getMessage(), + exception); + } + } + return null; + } + } + + /** Converter for converting strings to {@link Dialect} values. */ + static class DialectConverter implements ClientSideStatementValueConverter { + static final DialectConverter INSTANCE = new DialectConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(Dialect.class); + + private DialectConverter() {} + + @Override + public Class getParameterClass() { + return Dialect.class; + } + + @Override + public Dialect convert(String value) { + return values.get(value); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatements.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatements.java new file mode 100644 index 000000000000..8d4181cfe55b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatements.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.gson.Gson; +import java.io.InputStreamReader; +import java.util.Set; + +/** This class reads and parses the {@link ClientSideStatement}s from the json file. */ +class ClientSideStatements { + private static final String GSQL_STATEMENTS_DEFINITION_FILE = "ClientSideStatements.json"; + private static final String PG_STATEMENTS_DEFINITION_FILE = "PG_ClientSideStatements.json"; + private static final ClientSideStatements GSQL_STATEMENTS = importGsqlStatements(); + private static final ClientSideStatements PG_STATEMENTS = importPgStatements(); + + static ClientSideStatements getInstance(Dialect dialect) { + switch (dialect) { + case GOOGLE_STANDARD_SQL: + return GSQL_STATEMENTS; + case POSTGRESQL: + return PG_STATEMENTS; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown or unsupported dialect: " + dialect); + } + } + + /** + * Reads statement definitions from ClientSideStatements.json and parses these as Java objects. + */ + private static ClientSideStatements importGsqlStatements() { + Gson gson = new Gson(); + return gson.fromJson( + new InputStreamReader( + ClientSideStatements.class.getResourceAsStream(GSQL_STATEMENTS_DEFINITION_FILE)), + ClientSideStatements.class); + } + + /** + * Reads statement definitions from PG_ClientSideStatements.json and parses these as Java objects. + */ + private static ClientSideStatements importPgStatements() { + Gson gson = new Gson(); + return gson.fromJson( + new InputStreamReader( + ClientSideStatements.class.getResourceAsStream(PG_STATEMENTS_DEFINITION_FILE)), + ClientSideStatements.class); + } + + // This field is set automatically by the importStatements / pgImportStatements methods. + private Set statements; + + private ClientSideStatements() {} + + /** Compiles and returns all statements from the resource file. */ + Set getCompiledStatements() throws CompileException { + for (ClientSideStatementImpl statement : statements) { + statement.compile(); + } + return statements; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java new file mode 100644 index 000000000000..60d739a3c850 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java @@ -0,0 +1,1606 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.time.Duration; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; + +/** + * Internal connection API for Google Cloud Spanner. This interface may introduce breaking changes + * without prior notice. + * + *

A connection to a Cloud Spanner database. Connections are not designed to be thread-safe. The + * only exception is the {@link Connection#cancel()} method that may be called by any other thread + * to stop the execution of the current statement on the connection. + * + *

All -Async methods on {@link Connection} are guaranteed to be executed in the order that they + * are issued on the {@link Connection}. Mixing synchronous and asynchronous method calls is also + * supported, and these are also guaranteed to be executed in the order that they are issued. + * + *

Connections accept a number of additional SQL statements for setting or changing the state of + * a {@link Connection}. These statements can only be executed using the {@link + * Connection#execute(Statement)} method: + * + *

    + *
  • SHOW AUTOCOMMIT: Returns the current value of AUTOCOMMIT of this + * connection as a {@link ResultSet} + *
  • SET AUTOCOMMIT=TRUE|FALSE: Sets the value of AUTOCOMMIT for this + * connection + *
  • SHOW READONLY: Returns the current value of READONLY of this + * connection as a {@link ResultSet} + *
  • SET READONLY=TRUE|FALSE: Sets the value of READONLY for this + * connection + *
  • SHOW RETRY_ABORTS_INTERNALLY: Returns the current value of + * RETRY_ABORTS_INTERNALLY of this connection as a {@link ResultSet} + *
  • SET RETRY_ABORTS_INTERNALLY=TRUE|FALSE: Sets the value of + * RETRY_ABORTS_INTERNALLY for this connection + *
  • SHOW AUTOCOMMIT_DML_MODE: Returns the current value of + * AUTOCOMMIT_DML_MODE of this connection as a {@link ResultSet} + *
  • SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL' | 'PARTITIONED_NON_ATOMIC': Sets the + * value of AUTOCOMMIT_DML_MODE for this connection + *
  • SHOW STATEMENT_TIMEOUT: Returns the current value of STATEMENT_TIMEOUT + * of this connection as a {@link ResultSet} + *
  • SET STATEMENT_TIMEOUT='<int64>s|ms|us|ns' | NULL: Sets the value of + * STATEMENT_TIMEOUT for this connection. The supported {@link TimeUnit}s are: + *
      + *
    • s - Seconds + *
    • ms - Milliseconds + *
    • us - Microseconds + *
    • ns - Nanoseconds + *
    + * Setting the STATEMENT_TIMEOUT to NULL will clear the value for the STATEMENT_TIMEOUT on the + * connection. + *
  • SHOW READ_TIMESTAMP: Returns the last READ_TIMESTAMP of this + * connection as a {@link ResultSet} + *
  • SHOW COMMIT_TIMESTAMP: Returns the last COMMIT_TIMESTAMP of this + * connection as a {@link ResultSet} + *
  • SHOW READ_ONLY_STALENESS: Returns the current value of + * READ_ONLY_STALENESS of this connection as a {@link ResultSet} + *
  • + * SET READ_ONLY_STALENESS='STRONG' | 'MIN_READ_TIMESTAMP <timestamp>' | 'READ_TIMESTAMP <timestamp>' | 'MAX_STALENESS <int64>s|ms|mus|ns' | 'EXACT_STALENESS (<int64>s|ms|mus|ns)' + * : Sets the value of READ_ONLY_STALENESS for this connection. + *
  • SHOW OPTIMIZER_VERSION: Returns the current value of + * OPTIMIZER_VERSION of this connection as a {@link ResultSet} + *
  • + * SET OPTIMIZER_VERSION='<version>' | 'LATEST' + * : Sets the value of OPTIMIZER_VERSION for this connection. + *
  • SHOW OPTIMIZER_STATISTICS_PACKAGE: Returns the current value of + * OPTIMIZER_STATISTICS_PACKAGE of this connection as a {@link ResultSet} + *
  • + * SET OPTIMIZER_STATISTICS_PACKAGE='<package>' | '' + * : Sets the value of OPTIMIZER_STATISTICS_PACKAGE for this connection. + *
  • BEGIN [TRANSACTION]: Begins a new transaction. This statement is optional when + * the connection is not in autocommit mode, as a new transaction will automatically be + * started when a query or update statement is issued. In autocommit mode, this statement will + * temporarily put the connection in transactional mode, and return the connection to + * autocommit mode when COMMIT [TRANSACTION] or ROLLBACK [TRANSACTION] + * is executed + *
  • COMMIT [TRANSACTION]: Commits the current transaction + *
  • ROLLBACK [TRANSACTION]: Rollbacks the current transaction + *
  • SET TRANSACTION READ ONLY|READ WRITE: Sets the type for the current + * transaction. May only be executed before a transaction is actually running (i.e. before any + * statements have been executed in the transaction) + *
  • START BATCH DDL: Starts a batch of DDL statements. May only be executed when + * no transaction has been started and the connection is in read/write mode. The connection + * will only accept DDL statements while a DDL batch is active. + *
  • START BATCH DML: Starts a batch of DML statements. May only be executed when + * the connection is in read/write mode. The connection will only accept DML statements while + * a DML batch is active. + *
  • RUN BATCH: Ends the current batch, sends the batched DML or DDL statements to + * Spanner and blocks until all statements have been executed or an error occurs. May only be + * executed when a (possibly empty) batch is active. The statement will return the update + * counts of the batched statements as {@link ResultSet} with an ARRAY<INT64> column. In + * case of a DDL batch, this array will always be empty. + *
  • ABORT BATCH: Ends the current batch and removes any DML or DDL statements from + * the buffer without sending any statements to Spanner. May only be executed when a (possibly + * empty) batch is active. + *
+ * + * Note that Cloud Spanner could abort read/write transactions in the background, and that + * any database call during a read/write transaction could fail with an {@link + * AbortedException}. This also includes calls to {@link ResultSet#next()}. + * + *

If {@link Connection#isRetryAbortsInternally()} is true, then the connection will + * silently handle any {@link AbortedException}s by internally re-acquiring all transactional locks + * and verifying (via the use of cryptographic checksums) that no underlying data has changed. If a + * change to the underlying data is detected, then an {@link + * AbortedDueToConcurrentModificationException} error will be thrown. If your application already + * uses retry loops to handle these Aborted errors, then it will be most efficient to set {@link + * Connection#isRetryAbortsInternally()} to false. + * + *

Use {@link ConnectionOptions} to create a {@link Connection}. + */ +@InternalApi +public interface Connection extends AutoCloseable { + + /** Closes this connection. This is a no-op if the {@link Connection} has already been closed. */ + @Override + void close(); + + /** + * Closes this connection without blocking. This is a no-op if the {@link Connection} has already + * been closed. The {@link Connection} is no longer usable directly after calling this method. The + * returned {@link ApiFuture} is done when the running statement(s) (if any) on the connection + * have finished. + */ + ApiFuture closeAsync(); + + /** + * @return true if this connection has been closed. + */ + boolean isClosed(); + + /** + * Resets the state of this connection to the default state that it had when it was first created. + * Calling this method after a transaction has started (that is; after a statement has been + * executed in the transaction), does not change the active transaction. If for example a + * transaction has been started with a transaction tag, the transaction tag for the active + * transaction is not reset. + * + *

You can use this method to reset the state of the connection before returning a connection + * to a connection pool, and/or before using a connection that was retrieved from a connection + * pool. + */ + void reset(); + + /** Returns the current value of the given connection property. */ + T getConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property); + + /** + * Sets autocommit on/off for this {@link Connection}. Connections in autocommit mode will apply + * any changes to the database directly without waiting for an explicit commit. DDL- and DML + * statements as well as {@link Mutation}s are sent directly to Spanner, and committed + * automatically unless the statement caused an error. The statement is retried in case of an + * {@link AbortedException}. All other errors will cause the underlying transaction to be rolled + * back. + * + *

A {@link Connection} that is in autocommit and read/write mode will allow all types of + * statements: Queries, DML, DDL, and Mutations (writes). If the connection is in read-only mode, + * only queries will be allowed. + * + *

{@link Connection}s in autocommit mode may also accept partitioned DML statements. See + * {@link Connection#setAutocommitDmlMode(AutocommitDmlMode)} for more information. + * + * @param autocommit true/false to turn autocommit on/off + */ + void setAutocommit(boolean autocommit); + + /** + * @return true if this connection is in autocommit mode + */ + boolean isAutocommit(); + + /** + * Sets this connection to read-only or read-write. This method may only be called when no + * transaction is active. A connection that is in read-only mode, will never allow any kind of + * changes to the database to be submitted. + * + * @param readOnly true/false to turn read-only mode on/off + */ + void setReadOnly(boolean readOnly); + + /** + * @return true if this connection is in read-only mode + */ + boolean isReadOnly(); + + /** Sets the default isolation level for read/write transactions for this connection. */ + void setDefaultIsolationLevel(IsolationLevel isolationLevel); + + /** Returns the default isolation level for read/write transactions for this connection. */ + IsolationLevel getDefaultIsolationLevel(); + + /** Sets the read lock mode for read/write transactions for this connection. */ + void setReadLockMode(ReadLockMode readLockMode); + + /** Returns the read lock mode for read/write transactions for this connection. */ + ReadLockMode getReadLockMode(); + + /** Sets the timeout for read/write transactions. */ + void setTransactionTimeout(Duration timeout); + + /** Returns the timeout for read/write transactions. */ + Duration getTransactionTimeout(); + + /** + * Sets the duration the connection should wait before automatically aborting the execution of a + * statement. The default is no timeout. Statement timeouts are applied all types of statements, + * both in autocommit and transactional mode. They also apply to {@link Connection#commit()} and + * {@link Connection#rollback()} statements. + * + *

A DML statement in autocommit mode may or may not have actually been applied to the + * database, depending on when the timeout occurred. + * + *

A DML statement in a transaction that times out may still have been applied to the + * transaction. If you still decide to commit the transaction after such a timeout, the DML + * statement may or may not have been part of the transaction, depending on whether the timeout + * occurred before or after the statement was (successfully) sent to Spanner. You should therefore + * either always rollback a transaction that had a DML statement that timed out, or you should + * accept that the timed out statement still might have been applied to the database. + * + *

DDL statements and DML statements in {@link AutocommitDmlMode#PARTITIONED_NON_ATOMIC} mode + * cannot be rolled back. If such a statement times out, it may or may not have been applied to + * the database. The same applies to commit and rollback statements. + * + *

Statements that time out will throw a {@link SpannerException} with error code {@link + * ErrorCode#DEADLINE_EXCEEDED}. + * + * @param timeout The number of {@link TimeUnit}s before a statement is automatically aborted by + * the connection. Zero or negative values are not allowed. The maximum allowed value is + * 315,576,000,000 seconds. Use {@link Connection#clearStatementTimeout()} to remove a timeout + * value that has been set. + * @param unit The {@link TimeUnit} to specify the timeout value in. Must be one of {@link + * TimeUnit#NANOSECONDS}, {@link TimeUnit#MICROSECONDS}, {@link TimeUnit#MILLISECONDS}, {@link + * TimeUnit#SECONDS}. + */ + void setStatementTimeout(long timeout, TimeUnit unit); + + /** + * Clears the statement timeout value for this connection. This is a no-op if there is currently + * no statement timeout set on this connection. + */ + void clearStatementTimeout(); + + /** + * @param unit The {@link TimeUnit} to get the timeout value in. Must be one of {@link + * TimeUnit#NANOSECONDS}, {@link TimeUnit#MICROSECONDS}, {@link TimeUnit#MILLISECONDS}, {@link + * TimeUnit#SECONDS} + * @return the current statement timeout value or 0 if no timeout value has been set. + */ + long getStatementTimeout(TimeUnit unit); + + /** + * @return true if this {@link Connection} has a statement timeout value. + */ + boolean hasStatementTimeout(); + + /** + * Cancels the currently running statement on this {@link Connection} (if any). If canceling the + * statement execution succeeds, the statement will be terminated and a {@link SpannerException} + * with code {@link ErrorCode#CANCELLED} will be thrown. The result of the statement will be the + * same as when a statement times out (see {@link Connection#setStatementTimeout(long, TimeUnit)} + * for more information). + * + *

Canceling a DDL statement in autocommit mode or a RUN BATCH statement of a DDL batch will + * cause the connection to try to cancel the execution of the DDL statement(s). This is not + * guaranteed to cancel the execution of the statement(s) on Cloud Spanner. See + * https://cloud.google.com/spanner/docs/reference/rpc/google.longrunning#google.longrunning.Operations.CancelOperation + * for more information. + * + *

Canceling a DML statement that is running in {@link + * AutocommitDmlMode#PARTITIONED_NON_ATOMIC} mode will not cancel a statement on Cloud Spanner + * that is already being executed, and its effects will still be applied to the database. + */ + void cancel(); + + /** + * Begins a new transaction for this connection. The transaction will use the default isolation + * level of this connection. + * + *

    + *
  • Calling this method on a connection that has no transaction and that is + * not in autocommit mode, will register a new transaction that has not yet + * started on this connection + *
  • Calling this method on a connection that has no transaction and that is + * in autocommit mode, will register a new transaction that has not yet started on this + * connection, and temporarily turn off autocommit mode until the next commit/rollback + *
  • Calling this method on a connection that already has a transaction that has not yet + * started, will cause a {@link SpannerException} + *
  • Calling this method on a connection that already has a transaction that has started, will + * cause a {@link SpannerException} (no nested transactions) + *
+ */ + void beginTransaction(); + + /** + * Same as {@link #beginTransaction()}, but this transaction will use the given isolation level, + * instead of the default isolation level of this connection. + */ + void beginTransaction(IsolationLevel isolationLevel); + + /** + * Begins a new transaction for this connection. This method is guaranteed to be non-blocking. The + * returned {@link ApiFuture} will be done when the transaction has been initialized. The + * transaction will use the default isolation level of this connection. + * + *
    + *
  • Calling this method on a connection that has no transaction and that is + * not in autocommit mode, will register a new transaction that has not yet + * started on this connection + *
  • Calling this method on a connection that has no transaction and that is + * in autocommit mode, will register a new transaction that has not yet started on this + * connection, and temporarily turn off autocommit mode until the next commit/rollback + *
  • Calling this method on a connection that already has a transaction that has not yet + * started, will cause a {@link SpannerException} + *
  • Calling this method on a connection that already has a transaction that has started, will + * cause a {@link SpannerException} (no nested transactions) + *
+ */ + ApiFuture beginTransactionAsync(); + + /** + * Same as {@link #beginTransactionAsync()}, but this transaction will use the given isolation + * level, instead of the default isolation level of this connection. + */ + ApiFuture beginTransactionAsync(IsolationLevel isolationLevel); + + /** + * Sets the transaction mode to use for current transaction. This method may only be called when + * in a transaction, and before the transaction is actually started, i.e. before any statements + * have been executed in the transaction. + * + * @param transactionMode The transaction mode to use for the current transaction. + *
    + *
  • {@link TransactionMode#READ_ONLY_TRANSACTION} will create a read-only transaction and + * prevent any changes to written to the database through this transaction. The read + * timestamp to be used will be determined based on the current readOnlyStaleness + * setting of this connection. It is recommended to use {@link + * TransactionMode#READ_ONLY_TRANSACTION} instead of {@link + * TransactionMode#READ_WRITE_TRANSACTION} when possible, as read-only transactions do + * not acquire locks on Cloud Spanner, and read-only transactions never abort. + *
  • {@link TransactionMode#READ_WRITE_TRANSACTION} this value is only allowed when the + * connection is not in read-only mode and will create a read-write transaction. If + * {@link Connection#isRetryAbortsInternally()} is true, each read/write + * transaction will keep track of a running SHA256 checksum for each {@link ResultSet} + * that is returned in order to be able to retry the transaction in case the transaction + * is aborted by Spanner. + *
+ */ + void setTransactionMode(TransactionMode transactionMode); + + /** + * @return the transaction mode of the current transaction. This method may only be called when + * the connection is in a transaction. + */ + TransactionMode getTransactionMode(); + + /** + * Sets the transaction tag to use for the current transaction. This method may only be called + * when in a transaction and before any statements have been executed in the transaction. + * + *

The tag will be set as the transaction tag of all statements during the transaction, and as + * the transaction tag of the commit. + * + *

The transaction tag will automatically be cleared after the transaction has ended. + * + * @param tag The tag to use. + */ + default void setTransactionTag(String tag) { + throw new UnsupportedOperationException(); + } + + /** + * @return The transaction tag of the current transaction. + */ + default String getTransactionTag() { + throw new UnsupportedOperationException(); + } + + /** + * Sets the statement tag to use for the next statement that is executed. The tag is automatically + * cleared after the statement is executed. Statement tags can be used both with autocommit=true + * and autocommit=false, and can be used for partitioned DML. + * + *

Statement tags are not allowed before COMMIT and ROLLBACK statements. + * + *

Statement tags are allowed before START BATCH DML statements and will be included in the + * {@link ExecuteBatchDmlRequest} that is sent to Spanner. Statement tags are not allowed inside a + * batch. + * + * @param tag The statement tag to use with the next statement that will be executed on this + * connection. + */ + default void setStatementTag(String tag) { + throw new UnsupportedOperationException(); + } + + /** + * @return The statement tag that will be used with the next statement that is executed on this + * connection. + */ + default String getStatementTag() { + throw new UnsupportedOperationException(); + } + + /** + * Sets the client context to use for the statements that are executed. The client context + * persists until it is changed or cleared. + * + * @param clientContext The client context to use with the statements that will be executed on + * this connection. + */ + default void setClientContext(com.google.spanner.v1.RequestOptions.ClientContext clientContext) { + throw new UnsupportedOperationException(); + } + + /** + * @return The client context that will be used with the statements that are executed on this + * connection. + */ + default com.google.spanner.v1.RequestOptions.ClientContext getClientContext() { + throw new UnsupportedOperationException(); + } + + /** + * Sets whether the next transaction should be excluded from all change streams with the DDL + * option `allow_txn_exclusion=true` + */ + default void setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + throw new UnsupportedOperationException(); + } + + /** + * Returns true if the next transaction should be excluded from all change streams with the DDL + * option `allow_txn_exclusion=true` + */ + default boolean isExcludeTxnFromChangeStreams() { + throw new UnsupportedOperationException(); + } + + /** + * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be + * executed. The proto descriptor is automatically cleared after the statement is executed. + * + * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or + * batch) that will be executed on this connection. + */ + default void setProtoDescriptors(@Nonnull byte[] protoDescriptors) { + throw new UnsupportedOperationException(); + } + + /** + * @return The proto descriptor that will be used with the next DDL statement (single or batch) + * that is executed on this connection. + */ + default byte[] getProtoDescriptors() { + throw new UnsupportedOperationException(); + } + + /** + * @return true if this connection will automatically retry read/write transactions + * that abort. This method may only be called when the connection is in read/write + * transactional mode and no transaction has been started yet. + */ + boolean isRetryAbortsInternally(); + + /** + * Sets whether this connection will internally retry read/write transactions that abort. The + * default is true. When internal retry is enabled, the {@link Connection} will keep + * track of a running SHA256 checksum of all {@link ResultSet}s that have been returned from Cloud + * Spanner. If the checksum that is calculated during an internal retry differs from the original + * checksum, the transaction will abort with an {@link + * AbortedDueToConcurrentModificationException}. + * + *

Note that retries of a read/write transaction that calls a non-deterministic function on + * Cloud Spanner, such as CURRENT_TIMESTAMP(), will never be successful, as the data returned + * during the retry will always be different from the original transaction. + * + *

It is also highly recommended that all queries in a read/write transaction have an ORDER BY + * clause that guarantees that the data is returned in the same order as in the original + * transaction if the transaction is internally retried. The most efficient way to achieve this is + * to always include the primary key columns at the end of the ORDER BY clause. + * + *

This method may only be called when the connection is in read/write transactional mode and + * no transaction has been started yet. + * + * @param retryAbortsInternally Set to true to internally retry transactions that are + * aborted by Spanner. When set to false, any database call on a transaction that + * has been aborted by Cloud Spanner will throw an {@link AbortedException} instead of being + * retried. Set this to false if your application already uses retry loops to handle {@link + * AbortedException}s. + */ + void setRetryAbortsInternally(boolean retryAbortsInternally); + + /** + * Add a {@link TransactionRetryListener} to this {@link Connection} for testing and logging + * purposes. The method {@link TransactionRetryListener#retryStarting(Timestamp, long, int)} will + * be called before an automatic retry is started for a read/write transaction on this connection. + * The method {@link TransactionRetryListener#retryFinished(Timestamp, long, int, + * TransactionRetryListener.RetryResult)} will be called after the retry has finished. + * + * @param listener The listener to add to this connection. + */ + void addTransactionRetryListener(TransactionRetryListener listener); + + /** + * Removes one existing {@link TransactionRetryListener} from this {@link Connection}, if it is + * present (optional operation). + * + * @param listener The listener to remove from the connection. + * @return true if a listener was removed from the connection. + */ + boolean removeTransactionRetryListener(TransactionRetryListener listener); + + /** + * @return an unmodifiable iterator of the {@link TransactionRetryListener}s registered for this + * connection. + */ + Iterator getTransactionRetryListeners(); + + /** + * Sets the mode for executing DML statements in autocommit mode for this connection. This setting + * is only used when the connection is in autocommit mode, and may only be set while the + * transaction is in autocommit mode and not in a temporary transaction. The autocommit + * transaction mode is reset to its default value of {@link AutocommitDmlMode#TRANSACTIONAL} when + * autocommit mode is changed on the connection. + * + * @param mode The DML autocommit mode to use + *

    + *
  • {@link AutocommitDmlMode#TRANSACTIONAL} DML statements are executed as single + * read-write transaction. After successful execution, the DML statement is guaranteed + * to have been applied exactly once to the database + *
  • {@link AutocommitDmlMode#PARTITIONED_NON_ATOMIC} DML statements are executed as + * partitioned DML transactions. If an error occurs during the execution of the DML + * statement, it is possible that the statement has been applied to some but not all of + * the rows specified in the statement. + *
+ */ + void setAutocommitDmlMode(AutocommitDmlMode mode); + + /** + * @return the current {@link AutocommitDmlMode} setting for this connection. This method may only + * be called on a connection that is in autocommit mode and not while in a temporary + * transaction. + */ + AutocommitDmlMode getAutocommitDmlMode(); + + /** + * Sets the staleness to use for the current read-only transaction. This method may only be called + * when the transaction mode of the current transaction is {@link + * TransactionMode#READ_ONLY_TRANSACTION} and there is no transaction that has started, or when + * the connection is in read-only and autocommit mode. + * + * @param staleness The staleness to use for the current but not yet started read-only transaction + */ + void setReadOnlyStaleness(TimestampBound staleness); + + /** + * @return the read-only staleness setting for the current read-only transaction. This method may + * only be called when the current transaction is a read-only transaction, or when the + * connection is in read-only and autocommit mode. + */ + TimestampBound getReadOnlyStaleness(); + + /** + * Sets the {@link DirectedReadOptions} to use for both single-use and multi-use read-only + * transactions on this connection. + */ + default void setDirectedRead(DirectedReadOptions directedReadOptions) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Returns the {@link DirectedReadOptions} that are used for both single-use and multi-use + * read-only transactions on this connection. + */ + default DirectedReadOptions getDirectedRead() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets the query optimizer version to use for this connection. + * + * @param optimizerVersion The query optimizer version to use. Must be a valid optimizer version + * number, the string LATEST or an empty string. The empty string will instruct + * the connection to use the optimizer version that is defined in the environment variable + * SPANNER_OPTIMIZER_VERSION. If no value is specified in the environment + * variable, the default query optimizer of Cloud Spanner is used. + */ + void setOptimizerVersion(String optimizerVersion); + + /** + * Gets the current query optimizer version of this connection. + * + * @return The query optimizer version that is currently used by this connection. + */ + String getOptimizerVersion(); + + /** + * Sets the query optimizer statistics package + * + * @param optimizerStatisticsPackage The query optimizer statistics package to use. Must be a + * string composed of letters, numbers, dashes and underscores or an empty string. The empty + * string will instruct the connection to use the optimizer statistics package that is defined + * the environment variable SPANNER_OPTIMIZER_STATISTICS_PACKAGE. If no value is + * specified in the environment variable, the client level query optimizer is used. If none is + * set, the default query optimizer of Cloud Spanner is used. + */ + default void setOptimizerStatisticsPackage(String optimizerStatisticsPackage) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Gets the current query optimizer statistics package of this connection. + * + * @return The query optimizer statistics package that is currently used by this connection. + */ + default String getOptimizerStatisticsPackage() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets whether this connection should request commit statistics from Cloud Spanner for read/write + * transactions and DML statements in autocommit mode. + */ + void setReturnCommitStats(boolean returnCommitStats); + + /** + * @return true if this connection requests commit statistics from Cloud Spanner + */ + boolean isReturnCommitStats(); + + /** Sets the max_commit_delay that will be applied to commit requests from this connection. */ + default void setMaxCommitDelay(Duration maxCommitDelay) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** Returns the max_commit_delay that will be applied to commit requests from this connection. */ + default Duration getMaxCommitDelay() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets the priority to use for RPCs executed by this connection.. + * + * @param rpcPriority The RPC priority to use. + *
    + *
  • {@link RpcPriority#HIGH} This specifies that the RPC's invocation will be of high + * priority. + *
  • {@link RpcPriority#MEDIUM} This specifies that the RPC's invocation will be of medium + * priority. + *
  • {@link RpcPriority#LOW} This specifies that the RPC's invocation will be of low + * priority. + *
+ */ + default void setRPCPriority(RpcPriority rpcPriority) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Gets the current RPC priority of this connection. + * + * @return The RPC priority that is currently used by this connection. + */ + default RpcPriority getRPCPriority() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets whether this connection should delay the actual start of a read/write transaction until + * the first write operation is observed on that transaction. All read operations that are + * executed before the first write operation in the transaction will be executed as if the + * connection was in auto-commit mode. This can reduce locking, especially for transactions that + * execute a large number of reads before any writes, at the expense of a lower transaction + * isolation. + * + *

NOTE: This will make read/write transactions non-serializable. + */ + default void setDelayTransactionStartUntilFirstWrite( + boolean delayTransactionStartUntilFirstWrite) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * @return true if this connection delays the actual start of a read/write transaction until the + * first write operation on that transaction. + */ + default boolean isDelayTransactionStartUntilFirstWrite() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Sets whether this connection should keep read/write transactions alive by executing a SELECT 1 + * once every 10 seconds during inactive read/write transactions. + * + *

NOTE: This will keep read/write transactions alive and hold on to locks until it is + * explicitly committed or rolled back. + */ + default void setKeepTransactionAlive(boolean keepTransactionAlive) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * @return true if this connection keeps read/write transactions alive by executing a SELECT 1 + * once every 10 seconds during inactive read/write transactions. + */ + default boolean isKeepTransactionAlive() { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Commits the current transaction of this connection. All mutations that have been buffered + * during the current transaction will be written to the database. + * + *

If the connection is in autocommit mode, and there is a temporary transaction active on this + * connection, calling this method will cause the connection to go back to autocommit mode after + * calling this method. + * + *

This method will throw a {@link SpannerException} with code {@link + * ErrorCode#DEADLINE_EXCEEDED} if a statement timeout has been set on this connection, and the + * commit operation takes longer than this timeout. + * + *

    + *
  • Calling this method on a connection in autocommit mode and with no temporary transaction, + * will cause an exception + *
  • Calling this method while a DDL batch is active will cause an exception + *
  • Calling this method on a connection with a transaction that has not yet started, will end + * that transaction and any properties that might have been set on that transaction, and + * return the connection to its previous state. This means that if a transaction is created + * and set to read-only, and then committed before any statements have been executed, the + * read-only transaction is ended and any subsequent statements will be executed in a new + * transaction. If the connection is in read-write mode, the default for new transactions + * will be {@link TransactionMode#READ_WRITE_TRANSACTION}. Committing an empty transaction + * also does not generate a read timestamp or a commit timestamp, and calling one of the + * methods {@link Connection#getReadTimestamp()} or {@link Connection#getCommitTimestamp()} + * will cause an exception. + *
  • Calling this method on a connection with a {@link TransactionMode#READ_ONLY_TRANSACTION} + * transaction will end that transaction. If the connection is in read-write mode, any + * subsequent transaction will by default be a {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction, unless any following transaction is + * explicitly set to {@link TransactionMode#READ_ONLY_TRANSACTION} + *
  • Calling this method on a connection with a {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction will send all buffered mutations to the database, commit any DML statements + * that have been executed during this transaction and end the transaction. + *
+ */ + void commit(); + + /** + * Commits the current transaction of this connection. All mutations that have been buffered + * during the current transaction will be written to the database. + * + *

This method is guaranteed to be non-blocking. The returned {@link ApiFuture} will be done + * when the transaction has committed or the commit has failed. + * + *

Calling this method will always end the current transaction and start a new transaction when + * the next statement is executed, regardless whether this commit call succeeded or failed. If the + * next statement(s) rely on the results of the transaction that is being committed, it is + * recommended to check the status of this commit by inspecting the value of the returned {@link + * ApiFuture} before executing the next statement, to ensure that the commit actually succeeded. + * + *

If the connection is in autocommit mode, and there is a temporary transaction active on this + * connection, calling this method will cause the connection to go back to autocommit mode after + * calling this method. + * + *

This method will throw a {@link SpannerException} with code {@link + * ErrorCode#DEADLINE_EXCEEDED} if a statement timeout has been set on this connection, and the + * commit operation takes longer than this timeout. + * + *

    + *
  • Calling this method on a connection in autocommit mode and with no temporary transaction, + * will cause an exception + *
  • Calling this method while a DDL batch is active will cause an exception + *
  • Calling this method on a connection with a transaction that has not yet started, will end + * that transaction and any properties that might have been set on that transaction, and + * return the connection to its previous state. This means that if a transaction is created + * and set to read-only, and then committed before any statements have been executed, the + * read-only transaction is ended and any subsequent statements will be executed in a new + * transaction. If the connection is in read-write mode, the default for new transactions + * will be {@link TransactionMode#READ_WRITE_TRANSACTION}. Committing an empty transaction + * also does not generate a read timestamp or a commit timestamp, and calling one of the + * methods {@link Connection#getReadTimestamp()} or {@link Connection#getCommitTimestamp()} + * will cause an exception. + *
  • Calling this method on a connection with a {@link TransactionMode#READ_ONLY_TRANSACTION} + * transaction will end that transaction. If the connection is in read-write mode, any + * subsequent transaction will by default be a {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction, unless any following transaction is + * explicitly set to {@link TransactionMode#READ_ONLY_TRANSACTION} + *
  • Calling this method on a connection with a {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction will send all buffered mutations to the database, commit any DML statements + * that have been executed during this transaction and end the transaction. + *
+ */ + ApiFuture commitAsync(); + + /** + * Rollbacks the current transaction of this connection. All mutations or DDL statements that have + * been buffered during the current transaction will be removed from the buffer. + * + *

If the connection is in autocommit mode, and there is a temporary transaction active on this + * connection, calling this method will cause the connection to go back to autocommit mode after + * calling this method. + * + *

    + *
  • Calling this method on a connection in autocommit mode and with no temporary transaction + * will cause an exception + *
  • Calling this method while a DDL batch is active will cause an exception + *
  • Calling this method on a connection with a transaction that has not yet started, will end + * that transaction and any properties that might have been set on that transaction, and + * return the connection to its previous state. This means that if a transaction is created + * and set to read-only, and then rolled back before any statements have been executed, the + * read-only transaction is ended and any subsequent statements will be executed in a new + * transaction. If the connection is in read-write mode, the default for new transactions + * will be {@link TransactionMode#READ_WRITE_TRANSACTION}. + *
  • Calling this method on a connection with a {@link TransactionMode#READ_ONLY_TRANSACTION} + * transaction will end that transaction. If the connection is in read-write mode, any + * subsequent transaction will by default be a {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction, unless any following transaction is + * explicitly set to {@link TransactionMode#READ_ONLY_TRANSACTION} + *
  • Calling this method on a connection with a {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction will clear all buffered mutations, rollback any DML statements that have been + * executed during this transaction and end the transaction. + *
+ */ + void rollback(); + + /** + * Rollbacks the current transaction of this connection. All mutations or DDL statements that have + * been buffered during the current transaction will be removed from the buffer. + * + *

This method is guaranteed to be non-blocking. The returned {@link ApiFuture} will be done + * when the transaction has been rolled back. + * + *

If the connection is in autocommit mode, and there is a temporary transaction active on this + * connection, calling this method will cause the connection to go back to autocommit mode after + * calling this method. + * + *

    + *
  • Calling this method on a connection in autocommit mode and with no temporary transaction + * will cause an exception + *
  • Calling this method while a DDL batch is active will cause an exception + *
  • Calling this method on a connection with a transaction that has not yet started, will end + * that transaction and any properties that might have been set on that transaction, and + * return the connection to its previous state. This means that if a transaction is created + * and set to read-only, and then rolled back before any statements have been executed, the + * read-only transaction is ended and any subsequent statements will be executed in a new + * transaction. If the connection is in read-write mode, the default for new transactions + * will be {@link TransactionMode#READ_WRITE_TRANSACTION}. + *
  • Calling this method on a connection with a {@link TransactionMode#READ_ONLY_TRANSACTION} + * transaction will end that transaction. If the connection is in read-write mode, any + * subsequent transaction will by default be a {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction, unless any following transaction is + * explicitly set to {@link TransactionMode#READ_ONLY_TRANSACTION} + *
  • Calling this method on a connection with a {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction will clear all buffered mutations, rollback any DML statements that have been + * executed during this transaction and end the transaction. + *
+ */ + ApiFuture rollbackAsync(); + + /** Functional interface for the {@link #runTransaction(TransactionCallable)} method. */ + interface TransactionCallable { + /** This method is invoked with a fresh transaction on the connection. */ + T run(Connection transaction); + } + + /** + * Runs the given callable in a transaction. The transaction type is determined by the current + * state of the connection. That is; if the connection is in read/write mode, the transaction type + * will be a read/write transaction. If the connection is in read-only mode, it will be a + * read-only transaction. The transaction will automatically be retried if it is aborted by + * Spanner. + */ + T runTransaction(TransactionCallable callable); + + /** Returns the current savepoint support for this connection. */ + SavepointSupport getSavepointSupport(); + + /** Sets how savepoints should be supported on this connection. */ + void setSavepointSupport(SavepointSupport savepointSupport); + + /** Returns the current {@link DdlInTransactionMode} for this connection. */ + DdlInTransactionMode getDdlInTransactionMode(); + + /** Sets how the connection should behave if a DDL statement is executed during a transaction. */ + void setDdlInTransactionMode(DdlInTransactionMode ddlInTransactionMode); + + /** + * Returns the default sequence kind that will be set for this database if a DDL statement is + * executed that uses auto_increment or serial. + */ + String getDefaultSequenceKind(); + + /** + * Sets the default sequence kind that will be set for this database if a DDL statement is + * executed that uses auto_increment or serial. + */ + void setDefaultSequenceKind(String defaultSequenceKind); + + /** + * Creates a savepoint with the given name. + * + *

The uniqueness constraints on a savepoint name depends on the database dialect that is used: + * + *

    + *
  • {@link Dialect#GOOGLE_STANDARD_SQL} requires that savepoint names are unique within a + * transaction. The name of a savepoint that has been released or destroyed because the + * transaction has rolled back to a savepoint that was defined before that savepoint can be + * re-used within the transaction. + *
  • {@link Dialect#POSTGRESQL} follows the rules for savepoint names in PostgreSQL. This + * means that multiple savepoints in one transaction can have the same name, but only the + * last savepoint with a given name is visible. See PostgreSQL savepoint + * documentation for more information. + *
+ * + * @param name the name of the savepoint to create + * @throws SpannerException if a savepoint with the same name already exists and the dialect that + * is used is {@link Dialect#GOOGLE_STANDARD_SQL} + * @throws SpannerException if there is no transaction on this connection + * @throws SpannerException if internal retries have been disabled for this connection + */ + void savepoint(String name); + + /** + * Releases the savepoint with the given name. The savepoint and all later savepoints will be + * removed from the current transaction and can no longer be used. + * + * @param name the name of the savepoint to release + * @throws SpannerException if no savepoint with the given name exists + */ + void releaseSavepoint(String name); + + /** + * Rolls back to the given savepoint. Rolling back to a savepoint undoes all changes and releases + * all internal locks that have been taken by the transaction after the savepoint. Rolling back to + * a savepoint does not remove the savepoint from the transaction, and it is possible to roll back + * to the same savepoint multiple times. All savepoints that have been defined after the given + * savepoint are removed from the transaction. + * + * @param name the name of the savepoint to roll back to. + * @throws SpannerException if no savepoint with the given name exists. + * @throws AbortedDueToConcurrentModificationException if rolling back to the savepoint failed + * because another transaction has modified the data that has been read or modified by this + * transaction + */ + void rollbackToSavepoint(String name); + + /** + * @return true if this connection has a transaction (that has not necessarily + * started). This method will only return false when the {@link Connection} is in autocommit + * mode and no explicit transaction has been started by calling {@link + * Connection#beginTransaction()}. If the {@link Connection} is not in autocommit mode, there + * will always be a transaction. + */ + boolean isInTransaction(); + + /** + * @return true if this connection has a transaction that has started. A transaction + * is automatically started by the first statement that is executed in the transaction. + */ + boolean isTransactionStarted(); + + /** + * Returns the read timestamp of the current/last {@link TransactionMode#READ_ONLY_TRANSACTION} + * transaction, or the read timestamp of the last query in autocommit mode. + * + *
    + *
  • When in autocommit mode: The method will return the read timestamp of the last statement + * if the last statement was a query. + *
  • When in a {@link TransactionMode#READ_ONLY_TRANSACTION} transaction that has started (a + * query has been executed), or that has just committed: The read timestamp of the + * transaction. If the read-only transaction was committed without ever executing a query, + * calling this method after the commit will also throw a {@link SpannerException} + *
  • In all other cases the method will throw a {@link SpannerException}. + *
+ * + * @return the read timestamp of the current/last read-only transaction. + */ + Timestamp getReadTimestamp(); + + /** + * @return the commit timestamp of the last {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction. This method throws a {@link SpannerException} if there is no last {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction. That is, if the last transaction was a + * {@link TransactionMode#READ_ONLY_TRANSACTION}), or if the last {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction rolled back. It also throws a {@link + * SpannerException} if the last {@link TransactionMode#READ_WRITE_TRANSACTION} transaction + * was empty when committed. + */ + Timestamp getCommitTimestamp(); + + /** + * @return the {@link CommitResponse} of the last {@link TransactionMode#READ_WRITE_TRANSACTION} + * transaction. This method throws a {@link SpannerException} if there is no last {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction. That is, if the last transaction was a + * {@link TransactionMode#READ_ONLY_TRANSACTION}), or if the last {@link + * TransactionMode#READ_WRITE_TRANSACTION} transaction rolled back. It also throws a {@link + * SpannerException} if the last {@link TransactionMode#READ_WRITE_TRANSACTION} transaction + * was empty when committed. + */ + CommitResponse getCommitResponse(); + + /** + * Starts a new DDL batch on this connection. A DDL batch allows several DDL statements to be + * grouped into a batch that can be executed as a group. DDL statements that are issued during the + * batch are buffered locally and will return immediately with an OK. It is not guaranteed that a + * DDL statement that has been issued during a batch will eventually succeed when running the + * batch. Aborting a DDL batch will clear the DDL buffer and will have made no changes to the + * database. Running a DDL batch will send all buffered DDL statements to Spanner, and Spanner + * will try to execute these. The result will be OK if all the statements executed successfully. + * If a statement cannot be executed, Spanner will stop execution at that point and return an + * error message for the statement that could not be executed. Preceding statements of the batch + * may have been executed. + * + *

This method may only be called when the connection is in read/write mode, autocommit mode is + * enabled or no read/write transaction has been started, and there is not already another batch + * active. The connection will only accept DDL statements while a DDL batch is active. + */ + void startBatchDdl(); + + /** + * Starts a new DML batch on this connection. A DML batch allows several DML statements to be + * grouped into a batch that can be executed as a group. DML statements that are issued during the + * batch are buffered locally and will return immediately with an OK. It is not guaranteed that a + * DML statement that has been issued during a batch will eventually succeed when running the + * batch. Aborting a DML batch will clear the DML buffer and will have made no changes to the + * database. Running a DML batch will send all buffered DML statements to Spanner, and Spanner + * will try to execute these. The result will be OK if all the statements executed successfully. + * If a statement cannot be executed, Spanner will stop execution at that point and return {@link + * SpannerBatchUpdateException} for the statement that could not be executed. Preceding statements + * of the batch will have been executed, and the update counts of those statements can be + * retrieved through {@link SpannerBatchUpdateException#getUpdateCounts()}. + * + *

This method may only be called when the connection is in read/write mode, autocommit mode is + * enabled or no read/write transaction has been started, and there is not already another batch + * active. The connection will only accept DML statements while a DML batch is active. + */ + void startBatchDml(); + + /** + * Sends all buffered DML or DDL statements of the current batch to the database, waits for these + * to be executed and ends the current batch. The method will throw an exception for the first + * statement that cannot be executed, or return successfully if all statements could be executed. + * If an exception is thrown for a statement in the batch, the preceding statements in the same + * batch may still have been applied to the database. + * + *

This method may only be called when a (possibly empty) batch is active. + * + * @return the update counts in case of a DML batch. Returns an array containing 1 for each + * successful statement and 0 for each failed statement or statement that was not executed in + * case of a DDL batch. + */ + long[] runBatch(); + + /** + * Sends all buffered DML or DDL statements of the current batch to the database, waits for these + * to be executed and ends the current batch. The method will throw an exception for the first + * statement that cannot be executed, or return successfully if all statements could be executed. + * If an exception is thrown for a statement in the batch, the preceding statements in the same + * batch may still have been applied to the database. + * + *

This method is guaranteed to be non-blocking. The returned {@link ApiFuture} will be done + * when the batch has been successfully applied, or when one or more of the statements in the + * batch has failed and the further execution of the batch has been halted. + * + *

This method may only be called when a (possibly empty) batch is active. + * + * @return an {@link ApiFuture} containing the update counts in case of a DML batch. The {@link + * ApiFuture} contains an array containing 1 for each successful statement and 0 for each + * failed statement or statement that was not executed in case of a DDL batch. + */ + ApiFuture runBatchAsync(); + + /** + * Clears all buffered statements in the current batch and ends the batch. + * + *

This method may only be called when a (possibly empty) batch is active. + */ + void abortBatch(); + + /** + * @return true if a DDL batch is active on this connection. + */ + boolean isDdlBatchActive(); + + /** + * @return true if a DML batch is active on this connection. + */ + boolean isDmlBatchActive(); + + /** + * Executes the given statement if allowed in the current {@link TransactionMode} and connection + * state. The returned value depends on the type of statement: + * + *

    + *
  • Queries and DML statements with returning clause will return a {@link ResultSet}. + *
  • Simple DML statements will return an update count + *
  • DDL statements will return a {@link ResultType#NO_RESULT} + *
  • Connection and transaction statements (SET AUTOCOMMIT=TRUE|FALSE, SHOW AUTOCOMMIT, SET + * TRANSACTION READ ONLY, etc) will return either a {@link ResultSet} or {@link + * ResultType#NO_RESULT}, depending on the type of statement (SHOW or SET) + *
+ * + * @param statement The statement to execute + * @return the result of the statement + */ + StatementResult execute(Statement statement); + + /** + * Executes the given statement if allowed in the current {@link TransactionMode} and connection + * state, and if the result that would be returned is in the set of allowed result types. The + * statement will not be sent to Cloud Spanner if the result type would not be allowed. This + * method can be used by drivers that must limit the type of statements that are allowed for a + * given method, e.g. for the {@link java.sql.Statement#executeQuery(String)} and {@link + * java.sql.Statement#executeUpdate(String)} methods. + * + *

The returned value depends on the type of statement: + * + *

    + *
  • Queries and DML statements with returning clause will return a {@link ResultSet}. + *
  • Simple DML statements will return an update count + *
  • DDL statements will return a {@link ResultType#NO_RESULT} + *
  • Connection and transaction statements (SET AUTOCOMMIT=TRUE|FALSE, SHOW AUTOCOMMIT, SET + * TRANSACTION READ ONLY, etc) will return either a {@link ResultSet} or {@link + * ResultType#NO_RESULT}, depending on the type of statement (SHOW or SET) + *
+ * + * @param statement The statement to execute + * @param allowedResultTypes The result types that this method may return. The statement will not + * be sent to Cloud Spanner if the statement would return a result that is not one of the + * types in this set. + * @return the result of the statement + */ + default StatementResult execute(Statement statement, Set allowedResultTypes) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Executes the given statement if allowed in the current {@link TransactionMode} and connection + * state asynchronously. The returned value depends on the type of statement: + * + *
    + *
  • Queries and DML statements with returning clause will return an {@link AsyncResultSet}. + *
  • Simple DML statements will return an {@link ApiFuture} with an update count that is done + * when the DML statement has been applied successfully, or that throws an {@link + * ExecutionException} if the DML statement failed. + *
  • DDL statements will return an {@link ApiFuture} containing a {@link Void} that is done + * when the DDL statement has been applied successfully, or that throws an {@link + * ExecutionException} if the DDL statement failed. + *
  • Connection and transaction statements (SET AUTOCOMMIT=TRUE|FALSE, SHOW AUTOCOMMIT, SET + * TRANSACTION READ ONLY, etc) will return either a {@link ResultSet} or {@link + * ResultType#NO_RESULT}, depending on the type of statement (SHOW or SET) + *
+ * + * This method is guaranteed to be non-blocking. + * + * @param statement The statement to execute + * @return the result of the statement + */ + AsyncStatementResult executeAsync(Statement statement); + + /** + * Executes the given statement (a query or a DML statement with returning clause) and returns the + * result as a {@link ResultSet}. This method blocks and waits for a response from Spanner. If the + * statement does not contain a valid query or a DML statement with returning clause, the method + * will throw a {@link SpannerException}. + * + * @param query The query statement or DML statement with returning clause to execute + * @param options the options to configure the query + * @return a {@link ResultSet} with the results of the statement + */ + ResultSet executeQuery(Statement query, QueryOption... options); + + /** + * Executes the given statement (a query or a DML statement with returning clause) asynchronously + * and returns the result as an {@link AsyncResultSet}. This method is guaranteed to be + * non-blocking. If the statement does not contain a valid query or a DML statement with returning + * clause, the method will throw a {@link SpannerException}. + * + *

See {@link AsyncResultSet#setCallback(java.util.concurrent.Executor, + * com.google.cloud.spanner.AsyncResultSet.ReadyCallback)} for more information on how to consume + * the results of the statement asynchronously. + * + *

It is also possible to consume the returned {@link AsyncResultSet} in the same way as a + * normal {@link ResultSet}, i.e. in a while-loop calling {@link AsyncResultSet#next()}. + * + * @param query The query statement or DML statement with returning clause to execute + * @param options the options to configure the query + * @return an {@link AsyncResultSet} with the results of the statement + */ + AsyncResultSet executeQueryAsync(Statement query, QueryOption... options); + + /** + * Analyzes a query or a DML statement and returns query plan and/or query execution statistics + * information. + * + *

The query plan and query statistics information is contained in {@link + * com.google.spanner.v1.ResultSetStats} that can be accessed by calling {@link + * ResultSet#getStats()} on the returned {@code ResultSet}. + * + *

+   * 
+   * {@code
+   * ResultSet resultSet =
+   *     connection.analyzeQuery(
+   *         Statement.of("SELECT SingerId, AlbumId, MarketingBudget FROM Albums"),
+   *         ReadContext.QueryAnalyzeMode.PROFILE);
+   * while (resultSet.next()) {
+   *   // Discard the results. We're only processing because getStats() below requires it.
+   * }
+   * ResultSetStats stats = resultSet.getStats();
+   * }
+   * 
+   * 
+ * + * @param query the query statement to execute + * @param queryMode the mode in which to execute the query + */ + ResultSet analyzeQuery(Statement query, QueryAnalyzeMode queryMode); + + /** + * Enables or disables automatic batching of DML statements. When enabled, DML statements that are + * executed on this connection will be buffered in memory instead of actually being executed. The + * buffered DML statements are flushed to Spanner when a statement that cannot be part of a DML + * batch is executed on the connection. This can be a query, a DDL statement with a THEN RETURN + * clause, or a Commit call. The update count that is returned for DML statements that are + * buffered is determined by the value that has been set with {@link + * #setAutoBatchDmlUpdateCount(long)}. The default is 1. The connection verifies that the update + * counts that were returned while buffering DML statements match the actual update counts that + * are returned by Spanner when the batch is executed. This verification can be disabled by + * calling {@link #setAutoBatchDmlUpdateCountVerification(boolean)}. + */ + void setAutoBatchDml(boolean autoBatchDml); + + /** Returns whether automatic DML batching is enabled on this connection. */ + boolean isAutoBatchDml(); + + /** + * Sets the update count that is returned for DML statements that are buffered during an automatic + * DML batch. This value is only used if {@link #isAutoBatchDml()} is enabled. + */ + void setAutoBatchDmlUpdateCount(long updateCount); + + /** + * Returns the update count that is returned for DML statements that are buffered during an + * automatic DML batch. + */ + long getAutoBatchDmlUpdateCount(); + + /** + * Sets whether the update count that is returned by Spanner after executing an automatic DML + * batch should be verified against the update counts that were returned during the buffering of + * those statements. + */ + void setAutoBatchDmlUpdateCountVerification(boolean verification); + + /** Indicates whether the update counts of automatic DML batches should be verified. */ + boolean isAutoBatchDmlUpdateCountVerification(); + + /** + * Enable data boost for partitioned queries. See also {@link #partitionQuery(Statement, + * PartitionOptions, QueryOption...)} + */ + void setDataBoostEnabled(boolean dataBoostEnabled); + + /** + * Returns whether data boost is enabled for partitioned queries. See also {@link + * #partitionQuery(Statement, PartitionOptions, QueryOption...)} + */ + boolean isDataBoostEnabled(); + + /** + * Sets whether this connection should always use partitioned queries when a query is executed on + * this connection. Setting this flag to true and then executing a query that cannot + * be partitioned, or executing a query in a read/write transaction, will cause an error. Use this + * flag in combination with {@link #setDataBoostEnabled(boolean)} to force all queries on this + * connection to use data boost. + */ + void setAutoPartitionMode(boolean autoPartitionMode); + + /** Returns whether this connection will execute all queries as partitioned queries. */ + boolean isAutoPartitionMode(); + + /** + * Sets the maximum number of partitions that should be included as a hint to Cloud Spanner when + * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might + * choose to ignore the hint. + */ + void setMaxPartitions(int maxPartitions); + + /** + * Gets the maximum number of partitions that should be included as a hint to Cloud Spanner when + * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might + * choose to ignore the hint. + */ + int getMaxPartitions(); + + /** + * Partitions the given query, so it can be executed in parallel. This method returns a {@link + * ResultSet} with a string-representation of the partitions that were created. These strings can + * be used to execute a partition either on this connection or an any other connection (on this + * host or an any other host) by calling the method {@link #runPartition(String)}. This method + * will automatically enable data boost for the query if {@link #isDataBoostEnabled()} returns + * true. + */ + ResultSet partitionQuery( + Statement query, PartitionOptions partitionOptions, QueryOption... options); + + /** + * Executes the given partition of a query. The encodedPartitionId should be a string that was + * returned by {@link #partitionQuery(Statement, PartitionOptions, QueryOption...)}. + */ + ResultSet runPartition(String encodedPartitionId); + + /** + * Sets the maximum degree of parallelism that is used when executing a partitioned query using + * {@link #runPartitionedQuery(Statement, PartitionOptions, QueryOption...)}. The method will use + * up to maxThreads to execute and retrieve the results from Cloud Spanner. Set this + * value to 0> to use the number of available processors as returned by {@link + * Runtime#availableProcessors()}. + */ + void setMaxPartitionedParallelism(int maxThreads); + + /** + * Returns the maximum degree of parallelism that is used for {@link + * #runPartitionedQuery(Statement, PartitionOptions, QueryOption...)} + */ + int getMaxPartitionedParallelism(); + + /** + * Executes the given query as a partitioned query. The query will first be partitioned using the + * {@link #partitionQuery(Statement, PartitionOptions, QueryOption...)} method. Each of the + * partitions will then be executed in the background, and the results will be merged into a + * single result set. + * + *

This method will use maxPartitionedParallelism threads to execute the + * partitioned query. Set this variable to a higher/lower value to increase/decrease the degree of + * parallelism used for execution. + */ + PartitionedQueryResultSet runPartitionedQuery( + Statement query, PartitionOptions partitionOptions, QueryOption... options); + + /** + * Executes the given statement as a simple DML statement. If the statement does not contain a + * valid DML statement, the method will throw a {@link SpannerException}. + * + * @param update The update statement to execute + * @return the number of records that were inserted/updated/deleted by this statement + */ + long executeUpdate(Statement update); + + /** + * Analyzes a DML statement and returns query plan and/or execution statistics information. + * + *

{@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PLAN} only returns the plan for + * the statement. {@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PROFILE} executes + * the DML statement, returns the modified row count and execution statistics, and the effects of + * the DML statement will be visible to subsequent operations in the transaction. + * + * @deprecated Use {@link #analyzeUpdateStatement(Statement, QueryAnalyzeMode, UpdateOption...)} + * instead + */ + @Deprecated + default ResultSetStats analyzeUpdate(Statement update, QueryAnalyzeMode analyzeMode) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Analyzes a DML statement and returns execution plan, undeclared parameters and optionally + * execution statistics information. + * + *

{@link com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PLAN} only returns the plan and + * undeclared parameters for the statement. {@link + * com.google.cloud.spanner.ReadContext.QueryAnalyzeMode#PROFILE} also executes the DML statement, + * returns the modified row count and execution statistics, and the effects of the DML statement + * will be visible to subsequent operations in the transaction. + */ + default ResultSet analyzeUpdateStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Executes the given statement asynchronously as a simple DML statement. If the statement does + * not contain a simple DML statement, the method will throw a {@link SpannerException}. A DML + * statement with returning clause will throw a {@link SpannerException}. + * + *

This method is guaranteed to be non-blocking. + * + * @param update The update statement to execute + * @return an {@link ApiFuture} containing the number of records that were + * inserted/updated/deleted by this statement + */ + ApiFuture executeUpdateAsync(Statement update); + + /** + * Executes a list of DML statements (can be simple DML statements or DML statements with + * returning clause) in a single request. The statements will be executed in order and the + * semantics is the same as if each statement is executed by {@link + * Connection#executeUpdate(Statement)} in a loop. This method returns an array of long integers, + * each representing the number of rows modified by each statement. + * + *

If an individual statement fails, execution stops and a {@code SpannerBatchUpdateException} + * is returned, which includes the error and the number of rows affected by the statements that + * are run prior to the error. + * + *

For example, if statements contains 3 statements, and the 2nd one is not a valid DML. This + * method throws a {@code SpannerBatchUpdateException} that contains the error message from the + * 2nd statement, and an array of length 1 that contains the number of rows modified by the 1st + * statement. The 3rd statement will not run. Executes the given statements as DML statements in + * one batch. If one of the statements does not contain a valid DML statement, the method will + * throw a {@link SpannerException}. + * + * @param updates The update statements that will be executed as one batch. + * @return an array containing the update counts per statement. + */ + long[] executeBatchUpdate(Iterable updates); + + /** + * Executes a list of DML statements (can be simple DML statements or DML statements with + * returning clause) in a single request. The statements will be executed in order and the + * semantics is the same as if each statement is executed by {@link + * Connection#executeUpdate(Statement)} in a loop. This method returns an {@link ApiFuture} that + * contains an array of long integers, each representing the number of rows modified by each + * statement. + * + *

This method is guaranteed to be non-blocking. + * + *

If an individual statement fails, execution stops and a {@code SpannerBatchUpdateException} + * is returned, which includes the error and the number of rows affected by the statements that + * are run prior to the error. + * + *

For example, if statements contains 3 statements, and the 2nd one is not a valid DML. This + * method throws a {@code SpannerBatchUpdateException} that contains the error message from the + * 2nd statement, and an array of length 1 that contains the number of rows modified by the 1st + * statement. The 3rd statement will not run. Executes the given statements as DML statements in + * one batch. If one of the statements does not contain a valid DML statement, the method will + * throw a {@link SpannerException}. + * + * @param updates The update statements that will be executed as one batch. + * @return an {@link ApiFuture} containing an array with the update counts per statement. + */ + ApiFuture executeBatchUpdateAsync(Iterable updates); + + /** + * Writes the specified mutation directly to the database and commits the change. The value is + * readable after the successful completion of this method. Writing multiple mutations to a + * database by calling this method multiple times mode is inefficient, as each call will need a + * round trip to the database. Instead, you should consider writing the mutations together by + * calling {@link Connection#write(Iterable)}. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * Connection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutation The {@link Mutation} to write to the database + * @throws SpannerException if the {@link Connection} is not in autocommit mode + */ + void write(Mutation mutation); + + /** + * Writes the specified mutation directly to the database and commits the change. The value is + * readable after the successful completion of the returned {@link ApiFuture}. Writing multiple + * mutations to a database by calling this method multiple times mode is inefficient, as each call + * will need a round trip to the database. Instead, you should consider writing the mutations + * together by calling {@link Connection#writeAsync(Iterable)}. + * + *

This method is guaranteed to be non-blocking. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * Connection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutation The {@link Mutation} to write to the database + * @throws SpannerException if the {@link Connection} is not in autocommit mode + */ + ApiFuture writeAsync(Mutation mutation); + + /** + * Writes the specified mutations directly to the database and commits the changes. The values are + * readable after the successful completion of this method. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * Connection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutations The {@link Mutation}s to write to the database + * @throws SpannerException if the {@link Connection} is not in autocommit mode + */ + void write(Iterable mutations); + + /** + * Writes the specified mutations directly to the database and commits the changes. The values are + * readable after the successful completion of the returned {@link ApiFuture}. + * + *

This method is guaranteed to be non-blocking. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * Connection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutations The {@link Mutation}s to write to the database + * @throws SpannerException if the {@link Connection} is not in autocommit mode + */ + ApiFuture writeAsync(Iterable mutations); + + /** + * Buffers the given mutation locally on the current transaction of this {@link Connection}. The + * mutation will be written to the database at the next call to {@link Connection#commit()}. The + * value will not be readable on this {@link Connection} before the transaction is committed. + * + *

Calling this method is only allowed when not in autocommit mode. See {@link + * Connection#write(Mutation)} for writing mutations in autocommit mode. + * + * @param mutation the {@link Mutation} to buffer for writing to the database on the next commit + * @throws SpannerException if the {@link Connection} is in autocommit mode + */ + void bufferedWrite(Mutation mutation); + + /** + * Buffers the given mutations locally on the current transaction of this {@link Connection}. The + * mutations will be written to the database at the next call to {@link Connection#commit()}. The + * values will not be readable on this {@link Connection} before the transaction is committed. + * + *

Calling this method is only allowed when not in autocommit mode. See {@link + * Connection#write(Iterable)} for writing mutations in autocommit mode. + * + * @param mutations the {@link Mutation}s to buffer for writing to the database on the next commit + * @throws SpannerException if the {@link Connection} is in autocommit mode + */ + void bufferedWrite(Iterable mutations); + + /** The {@link Dialect} that is used by this {@link Connection}. */ + default Dialect getDialect() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** The {@link DatabaseClient} that is used by this {@link Connection}. */ + @InternalApi + default DatabaseClient getDatabaseClient() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** The {@link Spanner} instance that is used by this {@link Connection}. */ + @InternalApi + default Spanner getSpanner() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * This query option is used internally to indicate that a query is executed by the library itself + * to fetch metadata. These queries are specifically allowed to be executed even when a DDL batch + * is active. + * + *

NOT INTENDED FOR EXTERNAL USE! + */ + @InternalApi + final class InternalMetadataQuery implements QueryOption { + @InternalApi public static final InternalMetadataQuery INSTANCE = new InternalMetadataQuery(); + + private InternalMetadataQuery() {} + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java new file mode 100644 index 000000000000..cadd63757398 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java @@ -0,0 +1,2578 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.connection.ConnectionOptions.isEnableTransactionalConnectionStateForPostgreSQL; +import static com.google.cloud.spanner.connection.ConnectionPreconditions.checkValidIdentifier; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_BATCH_DML; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionProperties.DDL_IN_TRANSACTION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_SEQUENCE_KIND; +import static com.google.cloud.spanner.connection.ConnectionProperties.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DIRECTED_READ; +import static com.google.cloud.spanner.connection.ConnectionProperties.KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.ConnectionProperties.OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_LOCK_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_ONLY_STALENESS; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionProperties.RPC_PRIORITY; +import static com.google.cloud.spanner.connection.ConnectionProperties.SAVEPOINT_SUPPORT; +import static com.google.cloud.spanner.connection.ConnectionProperties.STATEMENT_TIMEOUT; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACING_PREFIX; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRANSACTION_TIMEOUT; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.GaxProperties; +import com.google.cloud.ByteArray; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadQueryUpdateTransactionOption; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; +import com.google.cloud.spanner.connection.StatementExecutor.StatementTimeout; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.EndTransactionCallback; +import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Suppliers; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Deadline; +import io.grpc.Deadline.Ticker; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import java.io.File; +import java.io.InputStream; +import java.nio.file.Files; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.Stack; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** Implementation for {@link Connection}, the generic Spanner connection API (not JDBC). */ +class ConnectionImpl implements Connection { + private static final String INSTRUMENTATION_SCOPE = "cloud.google.com/java"; + private static final String SINGLE_USE_TRANSACTION = "SingleUseTransaction"; + private static final String READ_ONLY_TRANSACTION = "ReadOnlyTransaction"; + private static final String READ_WRITE_TRANSACTION = "ReadWriteTransaction"; + private static final String DDL_BATCH = "DdlBatch"; + private static final String DDL_STATEMENT = "DdlStatement"; + + private static final String CLOSED_ERROR_MSG = "This connection is closed"; + private static final String ONLY_ALLOWED_IN_AUTOCOMMIT = + "This method may only be called while in autocommit mode"; + private static final String NOT_ALLOWED_IN_AUTOCOMMIT = + "This method may not be called while in autocommit mode"; + private static final ParsedStatement COMMIT_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("COMMIT")); + private static final ParsedStatement ROLLBACK_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("ROLLBACK")); + private static final ParsedStatement START_BATCH_DDL_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("START BATCH DDL")); + private static final ParsedStatement START_BATCH_DML_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("START BATCH DML")); + + // These SAVEPOINT statements are used as sentinels to recognize the start/rollback/release of a + // savepoint. + private static final ParsedStatement SAVEPOINT_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("SAVEPOINT s1")); + private static final ParsedStatement ROLLBACK_TO_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("ROLLBACK TO s1")); + private static final ParsedStatement RELEASE_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("RELEASE s1")); + + /** + * Exception that is used to register the stacktrace of the code that opened a {@link Connection}. + * This exception is logged if the application closes without first closing the connection. + */ + static class LeakedConnectionException extends RuntimeException { + private static final long serialVersionUID = 7119433786832158700L; + + private LeakedConnectionException() { + super("Connection was opened at " + Instant.now()); + } + } + + private volatile LeakedConnectionException leakedException; + private final SpannerPool spannerPool; + private AbstractStatementParser statementParser; + + /** + * The {@link ConnectionStatementExecutor} is responsible for translating parsed {@link + * ClientSideStatement}s into actual method calls on this {@link ConnectionImpl}. I.e. the {@link + * ClientSideStatement} 'SET AUTOCOMMIT ON' will be translated into the method call {@link + * ConnectionImpl#setAutocommit(boolean)} with value true. + */ + private final ConnectionStatementExecutor connectionStatementExecutor = + new ConnectionStatementExecutorImpl(this); + + /** + * Statements are executed using a separate thread in order to be able to cancel these. Statements + * are automatically cancelled if the configured {@link ConnectionImpl#statementTimeout} is + * exceeded. In autocommit mode, the connection will try to rollback the effects of an update + * statement, but this is not guaranteed to actually succeed. + */ + private final StatementExecutor statementExecutor; + + /** + * The {@link ConnectionOptions} that were used to create this {@link ConnectionImpl}. This is + * retained as it is used for getting a {@link Spanner} object and removing this connection from + * the {@link SpannerPool}. + */ + private final ConnectionOptions options; + + enum Caller { + APPLICATION, + TRANSACTION_RUNNER, + } + + /** The supported batch modes. */ + enum BatchMode { + NONE, + DDL, + DML + } + + /** The combination of all transaction modes and batch modes. */ + enum UnitOfWorkType { + READ_ONLY_TRANSACTION { + @Override + TransactionMode getTransactionMode() { + return TransactionMode.READ_ONLY_TRANSACTION; + } + }, + READ_WRITE_TRANSACTION { + @Override + TransactionMode getTransactionMode() { + return TransactionMode.READ_WRITE_TRANSACTION; + } + }, + DML_BATCH { + @Override + TransactionMode getTransactionMode() { + return TransactionMode.READ_WRITE_TRANSACTION; + } + }, + DDL_BATCH { + @Override + TransactionMode getTransactionMode() { + return null; + } + }; + + abstract TransactionMode getTransactionMode(); + + static UnitOfWorkType of(TransactionMode transactionMode) { + switch (transactionMode) { + case READ_ONLY_TRANSACTION: + return UnitOfWorkType.READ_ONLY_TRANSACTION; + case READ_WRITE_TRANSACTION: + return UnitOfWorkType.READ_WRITE_TRANSACTION; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown transaction mode: " + transactionMode); + } + } + } + + private final Ticker ticker; + private StatementExecutor.StatementTimeout statementTimeout = + new StatementExecutor.StatementTimeout(); + private boolean closed = false; + + private final Spanner spanner; + private final Tracer tracer; + private final Attributes openTelemetryAttributes; + private final DdlClient ddlClient; + private final DatabaseClient dbClient; + private final BatchClient batchClient; + private final ConnectionState connectionState; + + private UnitOfWork currentUnitOfWork = null; + + /** + * This field is only used in autocommit mode to indicate that the user has explicitly started a + * transaction. + */ + private boolean inTransaction = false; + + /** + * This field is used to indicate that a transaction begin has been indicated. This is done by + * calling beginTransaction or by setting a transaction property while not in autocommit mode. + */ + private boolean transactionBeginMarked = false; + + /** This field is set to true when a transaction runner is active for this connection. */ + private boolean transactionRunnerActive = false; + + private BatchMode batchMode; + private UnitOfWorkType unitOfWorkType; + private final Stack transactionStack = new Stack<>(); + private final List transactionRetryListeners = new ArrayList<>(); + + // The following properties are not 'normal' connection properties, but transient properties that + // are automatically reset after executing a transaction or statement. + private IsolationLevel transactionIsolationLevel; + private String transactionTag; + private String statementTag; + private RequestOptions.ClientContext clientContext; + private boolean excludeTxnFromChangeStreams; + private byte[] protoDescriptors; + private String protoDescriptorsFilePath; + + /** Create a connection and register it in the SpannerPool. */ + ConnectionImpl(ConnectionOptions options) { + Preconditions.checkNotNull(options); + this.leakedException = + options.isTrackConnectionLeaks() ? new LeakedConnectionException() : null; + StatementExecutorType statementExecutorType; + if (options.getStatementExecutorType() != null) { + statementExecutorType = options.getStatementExecutorType(); + } else { + statementExecutorType = + options.isUseVirtualThreads() + ? StatementExecutorType.VIRTUAL_THREAD + : StatementExecutorType.PLATFORM_THREAD; + } + this.ticker = options.getTicker(); + this.statementExecutor = + new StatementExecutor(statementExecutorType, options.getStatementExecutionInterceptors()); + this.spannerPool = SpannerPool.INSTANCE; + this.options = options; + this.spanner = spannerPool.getSpanner(options, this); + this.tracer = + spanner + .getOptions() + .getOpenTelemetry() + .getTracer( + INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(spanner.getOptions().getClass())); + this.openTelemetryAttributes = createOpenTelemetryAttributes(options.getDatabaseId()); + if (options.isAutoConfigEmulator()) { + EmulatorUtil.maybeCreateInstanceAndDatabase( + spanner, options.getDatabaseId(), options.getDialect()); + } + this.dbClient = spanner.getDatabaseClient(options.getDatabaseId()); + this.batchClient = spanner.getBatchClient(options.getDatabaseId()); + this.ddlClient = createDdlClient(); + this.connectionState = + new ConnectionState( + options.getInitialConnectionPropertyValues(), + Suppliers.memoize( + () -> + isEnableTransactionalConnectionStateForPostgreSQL() + && getDialect() == Dialect.POSTGRESQL + ? Type.TRANSACTIONAL + : Type.NON_TRANSACTIONAL)); + setInitialStatementTimeout(options.getInitialConnectionPropertyValue(STATEMENT_TIMEOUT)); + // (Re)set the state of the connection to the default. + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + + /** Constructor only for test purposes. */ + @VisibleForTesting + ConnectionImpl( + ConnectionOptions options, + SpannerPool spannerPool, + DdlClient ddlClient, + DatabaseClient dbClient, + BatchClient batchClient) { + this.leakedException = + options.isTrackConnectionLeaks() ? new LeakedConnectionException() : null; + this.statementExecutor = + new StatementExecutor( + options.isUseVirtualThreads() + ? StatementExecutorType.VIRTUAL_THREAD + : StatementExecutorType.PLATFORM_THREAD, + Collections.emptyList()); + this.ticker = options.getTicker(); + this.spannerPool = Preconditions.checkNotNull(spannerPool); + this.options = Preconditions.checkNotNull(options); + this.spanner = spannerPool.getSpanner(options, this); + this.tracer = OpenTelemetry.noop().getTracer(INSTRUMENTATION_SCOPE); + this.openTelemetryAttributes = Attributes.empty(); + this.ddlClient = Preconditions.checkNotNull(ddlClient); + this.dbClient = Preconditions.checkNotNull(dbClient); + this.batchClient = Preconditions.checkNotNull(batchClient); + this.connectionState = + new ConnectionState( + options.getInitialConnectionPropertyValues(), + Suppliers.ofInstance(Type.NON_TRANSACTIONAL)); + setInitialStatementTimeout(options.getInitialConnectionPropertyValue(STATEMENT_TIMEOUT)); + setReadOnly(options.isReadOnly()); + setAutocommit(options.isAutocommit()); + setReturnCommitStats(options.isReturnCommitStats()); + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + + @Override + public Spanner getSpanner() { + return this.spanner; + } + + private void setInitialStatementTimeout(Duration duration) { + if (duration == null || duration.isZero()) { + return; + } + com.google.protobuf.Duration protoDuration = + com.google.protobuf.Duration.newBuilder() + .setSeconds(duration.getSeconds()) + .setNanos(duration.getNano()) + .build(); + TimeUnit unit = + ReadOnlyStalenessUtil.getAppropriateTimeUnit( + new ReadOnlyStalenessUtil.DurationGetter(protoDuration)); + setStatementTimeout(ReadOnlyStalenessUtil.durationToUnits(protoDuration, unit), unit); + } + + private DdlClient createDdlClient() { + return DdlClient.newBuilder() + .setDatabaseAdminClient(spanner.getDatabaseAdminClient()) + .setDialectSupplier(this::getDialect) + .setProjectId(options.getProjectId()) + .setInstanceId(options.getInstanceId()) + .setDatabaseName(options.getDatabaseName()) + .build(); + } + + private AbstractStatementParser getStatementParser() { + if (this.statementParser == null) { + this.statementParser = AbstractStatementParser.getInstance(dbClient.getDialect()); + } + return this.statementParser; + } + + Attributes getOpenTelemetryAttributes() { + return this.openTelemetryAttributes; + } + + @VisibleForTesting + static Attributes createOpenTelemetryAttributes(DatabaseId databaseId) { + AttributesBuilder attributesBuilder = Attributes.builder(); + attributesBuilder.put("connection_id", UUID.randomUUID().toString()); + attributesBuilder.put("database", databaseId.getDatabase()); + attributesBuilder.put("instance_id", databaseId.getInstanceId().getInstance()); + attributesBuilder.put("project_id", databaseId.getInstanceId().getProject()); + return attributesBuilder.build(); + } + + @VisibleForTesting + ConnectionState.Type getConnectionStateType() { + return this.connectionState.getType(); + } + + @Override + public void close() { + try { + closeAsync().get(10L, TimeUnit.SECONDS); + } catch (SpannerException | InterruptedException | ExecutionException | TimeoutException e) { + // ignore and continue to close the connection. + } finally { + statementExecutor.shutdownNow(); + } + } + + public ApiFuture closeAsync() { + synchronized (this) { + if (!isClosed()) { + List> futures = new ArrayList<>(); + if (isBatchActive()) { + abortBatch(); + } + if (isTransactionStarted()) { + try { + futures.add(rollbackAsync()); + } catch (Exception exception) { + // ignore and continue to close the connection. + } + } + // Try to wait for the current statement to finish (if any) before we actually close the + // connection. + this.closed = true; + // Add a no-op statement to the executor. Once this has been executed, we know that all + // preceding statements have also been executed, as the executor is single-threaded and + // executes all statements in order of submitting. The Executor#submit method can throw a + // RejectedExecutionException if the executor is no longer in state where it accepts new + // tasks. + try { + futures.add(statementExecutor.submit(() -> null)); + } catch (RejectedExecutionException ignored) { + // ignore and continue to close the connection. + } + statementExecutor.shutdown(); + leakedException = null; + spannerPool.removeConnection(options, this); + return ApiFutures.transform( + ApiFutures.allAsList(futures), ignored -> null, MoreExecutors.directExecutor()); + } + } + return ApiFutures.immediateFuture(null); + } + + private Context getCurrentContext() { + return Context.USER; + } + + /** + * Resets the state of this connection to the default state in the {@link ConnectionOptions} of + * this connection. + */ + public void reset() { + reset(getCurrentContext(), isInTransaction()); + } + + private void reset(Context context, boolean inTransaction) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + + // TODO: Replace all of these with a resetAll in ConnectionState. + this.connectionState.resetValue(RETRY_ABORTS_INTERNALLY, context, inTransaction); + this.connectionState.resetValue(AUTOCOMMIT, context, inTransaction); + this.connectionState.resetValue(READONLY, context, inTransaction); + this.connectionState.resetValue(DEFAULT_ISOLATION_LEVEL, context, inTransaction); + this.connectionState.resetValue(READ_LOCK_MODE, context, inTransaction); + this.connectionState.resetValue(TRANSACTION_TIMEOUT, context, inTransaction); + this.connectionState.resetValue(READ_ONLY_STALENESS, context, inTransaction); + this.connectionState.resetValue(OPTIMIZER_VERSION, context, inTransaction); + this.connectionState.resetValue(OPTIMIZER_STATISTICS_PACKAGE, context, inTransaction); + this.connectionState.resetValue(RPC_PRIORITY, context, inTransaction); + this.connectionState.resetValue(DDL_IN_TRANSACTION_MODE, context, inTransaction); + this.connectionState.resetValue(RETURN_COMMIT_STATS, context, inTransaction); + this.connectionState.resetValue( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, context, inTransaction); + this.connectionState.resetValue(KEEP_TRANSACTION_ALIVE, context, inTransaction); + this.connectionState.resetValue(AUTO_PARTITION_MODE, context, inTransaction); + this.connectionState.resetValue(DATA_BOOST_ENABLED, context, inTransaction); + this.connectionState.resetValue(MAX_PARTITIONS, context, inTransaction); + this.connectionState.resetValue(MAX_PARTITIONED_PARALLELISM, context, inTransaction); + this.connectionState.resetValue(MAX_COMMIT_DELAY, context, inTransaction); + + this.connectionState.resetValue(AUTOCOMMIT_DML_MODE, context, inTransaction); + this.statementTag = null; + this.statementTimeout = new StatementExecutor.StatementTimeout(); + this.connectionState.resetValue(DIRECTED_READ, context, inTransaction); + this.connectionState.resetValue(SAVEPOINT_SUPPORT, context, inTransaction); + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + this.clientContext = null; + + if (!isTransactionStarted()) { + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + } + + /** Get the current unit-of-work type of this connection. */ + UnitOfWorkType getUnitOfWorkType() { + return unitOfWorkType; + } + + /** + * @return true if this connection is in a batch. + */ + boolean isInBatch() { + return batchMode != BatchMode.NONE; + } + + /** Get the call stack from when the {@link Connection} was opened. */ + LeakedConnectionException getLeakedException() { + return leakedException; + } + + @Override + public Dialect getDialect() { + return dbClient.getDialect(); + } + + @Override + public DatabaseClient getDatabaseClient() { + return dbClient; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public T getConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property) { + return this.connectionState.getValue(property).getValue(); + } + + private void setConnectionPropertyValue(ConnectionProperty property, T value) { + setConnectionPropertyValue(property, value, /* local= */ false); + } + + private void setConnectionPropertyValue( + ConnectionProperty property, T value, boolean local) { + if (local) { + setLocalConnectionPropertyValue(property, value); + } else { + this.connectionState.setValue(property, value, getCurrentContext(), isInTransaction()); + } + } + + /** + * Sets a connection property value only for the duration of the current transaction. The effects + * of this will be undone once the transaction ends, regardless whether the transaction is + * committed or rolled back. 'Local' properties are supported for both {@link + * com.google.cloud.spanner.connection.ConnectionState.Type#TRANSACTIONAL} and {@link + * com.google.cloud.spanner.connection.ConnectionState.Type#NON_TRANSACTIONAL} connection states. + * + *

NOTE: This feature is not yet exposed in the public API. + */ + private void setLocalConnectionPropertyValue(ConnectionProperty property, T value) { + ConnectionPreconditions.checkState( + isInTransaction(), "SET LOCAL statements are only supported in transactions"); + this.connectionState.setLocalValue(property, value); + } + + @Override + public void setAutocommit(boolean autocommit) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + if (isAutocommit() == autocommit) { + return; + } + ConnectionPreconditions.checkState(!isBatchActive(), "Cannot set autocommit while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set autocommit while a transaction is active"); + ConnectionPreconditions.checkState( + !(isAutocommit() && isInTransaction()), + "Cannot set autocommit while in a temporary transaction"); + ConnectionPreconditions.checkState( + !transactionBeginMarked, "Cannot set autocommit when a transaction has begun"); + setConnectionPropertyValue(AUTOCOMMIT, autocommit); + if (autocommit) { + // Commit the current transaction state if we went from autocommit=false to autocommit=true. + // Otherwise, we get the strange situation that autocommit=true cannot be committed, as we no + // longer have a transaction. Note that all the above state checks essentially mean that + // autocommit can only be set before a transaction has actually started, and not in the + // middle of a transaction. + this.connectionState.commit(); + } + clearLastTransactionAndSetDefaultTransactionOptions(getDefaultIsolationLevel()); + // Reset the readOnlyStaleness value if it is no longer compatible with the new autocommit + // value. + if (!autocommit) { + TimestampBound readOnlyStaleness = getReadOnlyStaleness(); + if (readOnlyStaleness.getMode() == Mode.MAX_STALENESS + || readOnlyStaleness.getMode() == Mode.MIN_READ_TIMESTAMP) { + setConnectionPropertyValue(READ_ONLY_STALENESS, TimestampBound.strong()); + } + } + } + + @Override + public boolean isAutocommit() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return internalIsAutocommit(); + } + + private boolean internalIsAutocommit() { + return getConnectionPropertyValue(AUTOCOMMIT); + } + + @Override + public void setReadOnly(boolean readOnly) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isBatchActive(), "Cannot set read-only while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set read-only while a transaction is active"); + ConnectionPreconditions.checkState( + !(isAutocommit() && isInTransaction()), + "Cannot set read-only while in a temporary transaction"); + ConnectionPreconditions.checkState( + !transactionBeginMarked, "Cannot set read-only when a transaction has begun"); + setConnectionPropertyValue(READONLY, readOnly); + clearLastTransactionAndSetDefaultTransactionOptions(getDefaultIsolationLevel()); + } + + @Override + public boolean isReadOnly() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(READONLY); + } + + @Override + public void setDefaultIsolationLevel(IsolationLevel isolationLevel) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot default isolation level while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "Cannot set default isolation level while a transaction is active"); + setConnectionPropertyValue(DEFAULT_ISOLATION_LEVEL, isolationLevel); + clearLastTransactionAndSetDefaultTransactionOptions(isolationLevel); + } + + @Override + public IsolationLevel getDefaultIsolationLevel() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(DEFAULT_ISOLATION_LEVEL); + } + + private void clearLastTransactionAndSetDefaultTransactionOptions(IsolationLevel isolationLevel) { + setDefaultTransactionOptions(isolationLevel); + this.currentUnitOfWork = null; + } + + @Override + public void setReadLockMode(ReadLockMode readLockMode) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(READ_LOCK_MODE, readLockMode); + } + + @Override + public ReadLockMode getReadLockMode() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(READ_LOCK_MODE); + } + + @Override + public void setTransactionTimeout(Duration timeout) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(TRANSACTION_TIMEOUT, timeout); + } + + @Override + public Duration getTransactionTimeout() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(TRANSACTION_TIMEOUT); + } + + @Nullable + Deadline getTransactionDeadline() { + Duration timeout = getTransactionTimeout(); + return timeout == null + ? null + : Deadline.after(timeout.toNanos(), TimeUnit.NANOSECONDS, this.ticker); + } + + @Override + public void setAutocommitDmlMode(AutocommitDmlMode mode) { + Preconditions.checkNotNull(mode); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set autocommit DML mode while in a batch"); + ConnectionPreconditions.checkState( + !isInTransaction() && isAutocommit(), + "Cannot set autocommit DML mode while not in autocommit mode or while a transaction is" + + " active"); + ConnectionPreconditions.checkState( + !isReadOnly(), "Cannot set autocommit DML mode for a read-only connection"); + setConnectionPropertyValue(AUTOCOMMIT_DML_MODE, mode); + } + + @Override + public AutocommitDmlMode getAutocommitDmlMode() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot get autocommit DML mode while in a batch"); + return getConnectionPropertyValue(AUTOCOMMIT_DML_MODE); + } + + @Override + public void setReadOnlyStaleness(TimestampBound staleness) { + Preconditions.checkNotNull(staleness); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isBatchActive(), "Cannot set read-only while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "Cannot set read-only staleness when a transaction has been started"); + if (staleness.getMode() == Mode.MAX_STALENESS + || staleness.getMode() == Mode.MIN_READ_TIMESTAMP) { + // These values are only allowed in autocommit mode. + ConnectionPreconditions.checkState( + isAutocommit() && !inTransaction, + "MAX_STALENESS and MIN_READ_TIMESTAMP are only allowed in autocommit mode"); + } + setConnectionPropertyValue(READ_ONLY_STALENESS, staleness); + } + + @Override + public TimestampBound getReadOnlyStaleness() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isBatchActive(), "Cannot get read-only while in a batch"); + return getConnectionPropertyValue(READ_ONLY_STALENESS); + } + + @Override + public void setDirectedRead(DirectedReadOptions directedReadOptions) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "Cannot set directed read options when a transaction has been started"); + setConnectionPropertyValue(DIRECTED_READ, directedReadOptions); + } + + @Override + public DirectedReadOptions getDirectedRead() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(DIRECTED_READ); + } + + @Override + public void setOptimizerVersion(String optimizerVersion) { + Preconditions.checkNotNull(optimizerVersion); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(OPTIMIZER_VERSION, optimizerVersion); + } + + @Override + public String getOptimizerVersion() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(OPTIMIZER_VERSION); + } + + @Override + public void setOptimizerStatisticsPackage(String optimizerStatisticsPackage) { + Preconditions.checkNotNull(optimizerStatisticsPackage); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE, optimizerStatisticsPackage); + } + + @Override + public String getOptimizerStatisticsPackage() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE); + } + + private QueryOptions buildQueryOptions() { + return QueryOptions.newBuilder() + .setOptimizerVersion(getConnectionPropertyValue(OPTIMIZER_VERSION)) + .setOptimizerStatisticsPackage(getConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE)) + .build(); + } + + @Override + public void setRPCPriority(RpcPriority rpcPriority) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(RPC_PRIORITY, rpcPriority); + } + + @Override + public RpcPriority getRPCPriority() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(RPC_PRIORITY); + } + + @Override + public DdlInTransactionMode getDdlInTransactionMode() { + return getConnectionPropertyValue(DDL_IN_TRANSACTION_MODE); + } + + @Override + public void setDdlInTransactionMode(DdlInTransactionMode ddlInTransactionMode) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set DdlInTransactionMode while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set DdlInTransactionMode while a transaction is active"); + setConnectionPropertyValue(DDL_IN_TRANSACTION_MODE, ddlInTransactionMode); + } + + @Override + public String getDefaultSequenceKind() { + return getConnectionPropertyValue(DEFAULT_SEQUENCE_KIND); + } + + @Override + public void setDefaultSequenceKind(String defaultSequenceKind) { + setConnectionPropertyValue(DEFAULT_SEQUENCE_KIND, defaultSequenceKind); + } + + @Override + public void setStatementTimeout(long timeout, TimeUnit unit) { + Preconditions.checkArgument(timeout > 0L, "Zero or negative timeout values are not allowed"); + Preconditions.checkArgument( + StatementTimeout.isValidTimeoutUnit(unit), + "Time unit must be one of NANOSECONDS, MICROSECONDS, MILLISECONDS or SECONDS"); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + this.statementTimeout.setTimeoutValue(timeout, unit); + } + + @Override + public void clearStatementTimeout() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + this.statementTimeout.clearTimeoutValue(); + } + + @Override + public long getStatementTimeout(TimeUnit unit) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + Preconditions.checkArgument( + StatementTimeout.isValidTimeoutUnit(unit), + "Time unit must be one of NANOSECONDS, MICROSECONDS, MILLISECONDS or SECONDS"); + return this.statementTimeout.getTimeoutValue(unit); + } + + @Override + public boolean hasStatementTimeout() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.statementTimeout.hasTimeout(); + } + + @Override + public void cancel() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + if (this.currentUnitOfWork != null) { + currentUnitOfWork.cancel(); + } + } + + @Override + public TransactionMode getTransactionMode() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isDdlBatchActive(), "This connection is in a DDL batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + return unitOfWorkType.getTransactionMode(); + } + + @Override + public void setTransactionMode(TransactionMode transactionMode) { + Preconditions.checkNotNull(transactionMode); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set transaction mode while in a batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "The transaction mode cannot be set after the transaction has started"); + ConnectionPreconditions.checkState( + !isReadOnly() || transactionMode == TransactionMode.READ_ONLY_TRANSACTION, + "The transaction mode can only be READ_ONLY when the connection is in read_only mode"); + + this.transactionBeginMarked = true; + this.unitOfWorkType = UnitOfWorkType.of(transactionMode); + } + + IsolationLevel getTransactionIsolationLevel() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isDdlBatchActive(), "This connection is in a DDL batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + return this.transactionIsolationLevel; + } + + void setTransactionIsolationLevel(IsolationLevel isolationLevel) { + Preconditions.checkNotNull(isolationLevel); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set transaction isolation level while in a batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "The transaction isolation level cannot be set after the transaction has started"); + + this.transactionBeginMarked = true; + this.transactionIsolationLevel = isolationLevel; + } + + @Override + public String getTransactionTag() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isDdlBatchActive(), "This connection is in a DDL batch"); + return transactionTag; + } + + @Override + public void setClientContext(RequestOptions.ClientContext clientContext) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + this.clientContext = clientContext; + } + + @Override + public RequestOptions.ClientContext getClientContext() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return clientContext; + } + + @Override + public void setTransactionTag(String tag) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set transaction tag while in a batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "The transaction tag cannot be set after the transaction has started"); + ConnectionPreconditions.checkState( + getTransactionMode() == TransactionMode.READ_WRITE_TRANSACTION, + "Transaction tag can only be set for a read/write transaction"); + + this.transactionBeginMarked = true; + this.transactionTag = tag; + } + + @Override + public String getStatementTag() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Statement tags are not allowed inside a batch"); + return statementTag; + } + + @Override + public void setStatementTag(String tag) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Statement tags are not allowed inside a batch"); + + this.statementTag = tag; + } + + @Override + public boolean isExcludeTxnFromChangeStreams() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isDdlBatchActive(), "This connection is in a DDL batch"); + return excludeTxnFromChangeStreams; + } + + @Override + public void setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set exclude_txn_from_change_streams while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "exclude_txn_from_change_streams cannot be set after the transaction has started"); + this.excludeTxnFromChangeStreams = excludeTxnFromChangeStreams; + } + + @Override + public byte[] getProtoDescriptors() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + if (this.protoDescriptors == null && this.protoDescriptorsFilePath != null) { + // Read from file if filepath is valid + try { + File protoDescriptorsFile = new File(this.protoDescriptorsFilePath); + if (!protoDescriptorsFile.isFile()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "File %s is not a valid proto descriptors file", this.protoDescriptorsFilePath)); + } + InputStream pdStream = Files.newInputStream(protoDescriptorsFile.toPath()); + this.protoDescriptors = ByteArray.copyFrom(pdStream).toByteArray(); + } catch (Exception exception) { + throw SpannerExceptionFactory.newSpannerException(exception); + } + } + return this.protoDescriptors; + } + + @Override + public void setProtoDescriptors(@Nonnull byte[] protoDescriptors) { + Preconditions.checkNotNull(protoDescriptors); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Proto descriptors cannot be set when a batch is active"); + this.protoDescriptors = protoDescriptors; + this.protoDescriptorsFilePath = null; + } + + void setProtoDescriptorsFilePath(@Nonnull String protoDescriptorsFilePath) { + Preconditions.checkNotNull(protoDescriptorsFilePath); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Proto descriptors file path cannot be set when a batch is active"); + this.protoDescriptorsFilePath = protoDescriptorsFilePath; + this.protoDescriptors = null; + } + + String getProtoDescriptorsFilePath() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.protoDescriptorsFilePath; + } + + /** + * Throws an {@link SpannerException} with code {@link ErrorCode#FAILED_PRECONDITION} if the + * current state of this connection does not allow changing the setting for retryAbortsInternally. + */ + private void checkSetRetryAbortsInternallyAvailable() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "RetryAbortsInternally cannot be set after the transaction has started"); + } + + @Override + public boolean isRetryAbortsInternally() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(RETRY_ABORTS_INTERNALLY); + } + + @Override + public void setRetryAbortsInternally(boolean retryAbortsInternally) { + setRetryAbortsInternally(retryAbortsInternally, /* local= */ false); + } + + void setRetryAbortsInternally(boolean retryAbortsInternally, boolean local) { + checkSetRetryAbortsInternallyAvailable(); + setConnectionPropertyValue(RETRY_ABORTS_INTERNALLY, retryAbortsInternally, local); + } + + @Override + public void addTransactionRetryListener(TransactionRetryListener listener) { + Preconditions.checkNotNull(listener); + transactionRetryListeners.add(listener); + } + + @Override + public boolean removeTransactionRetryListener(TransactionRetryListener listener) { + Preconditions.checkNotNull(listener); + return transactionRetryListeners.remove(listener); + } + + @Override + public Iterator getTransactionRetryListeners() { + return Collections.unmodifiableList(transactionRetryListeners).iterator(); + } + + @Override + public boolean isInTransaction() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return internalIsInTransaction(); + } + + /** Returns true if this connection currently is in a transaction (and not a batch). */ + private boolean internalIsInTransaction() { + return !isDdlBatchActive() && (!internalIsAutocommit() || inTransaction); + } + + @Override + public boolean isTransactionStarted() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return internalIsTransactionStarted(); + } + + private boolean internalIsTransactionStarted() { + if (internalIsAutocommit() && !inTransaction) { + return false; + } + return internalIsInTransaction() + && this.currentUnitOfWork != null + && this.currentUnitOfWork.getState() == UnitOfWorkState.STARTED; + } + + private boolean hasTransactionalChanges() { + return internalIsTransactionStarted() || this.connectionState.hasTransactionalChanges(); + } + + @Override + public Timestamp getReadTimestamp() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + this.currentUnitOfWork != null, "There is no transaction on this connection"); + return this.currentUnitOfWork.getReadTimestamp(); + } + + Timestamp getReadTimestampOrNull() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.currentUnitOfWork == null ? null : this.currentUnitOfWork.getReadTimestampOrNull(); + } + + @Override + public Timestamp getCommitTimestamp() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + this.currentUnitOfWork != null, "There is no transaction on this connection"); + return this.currentUnitOfWork.getCommitTimestamp(); + } + + Timestamp getCommitTimestampOrNull() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.currentUnitOfWork == null + ? null + : this.currentUnitOfWork.getCommitTimestampOrNull(); + } + + @Override + public CommitResponse getCommitResponse() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + this.currentUnitOfWork != null, "There is no transaction on this connection"); + return this.currentUnitOfWork.getCommitResponse(); + } + + CommitResponse getCommitResponseOrNull() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.currentUnitOfWork == null ? null : this.currentUnitOfWork.getCommitResponseOrNull(); + } + + @Override + public void setReturnCommitStats(boolean returnCommitStats) { + setReturnCommitStats(returnCommitStats, /* local= */ false); + } + + @VisibleForTesting + void setReturnCommitStats(boolean returnCommitStats, boolean local) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(RETURN_COMMIT_STATS, returnCommitStats, local); + } + + @Override + public boolean isReturnCommitStats() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(RETURN_COMMIT_STATS); + } + + @Override + public void setMaxCommitDelay(Duration maxCommitDelay) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + setConnectionPropertyValue(MAX_COMMIT_DELAY, maxCommitDelay); + } + + @Override + public Duration getMaxCommitDelay() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(MAX_COMMIT_DELAY); + } + + @Override + public void setDelayTransactionStartUntilFirstWrite( + boolean delayTransactionStartUntilFirstWrite) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "Cannot set DelayTransactionStartUntilFirstWrite while a transaction is active"); + setConnectionPropertyValue( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, delayTransactionStartUntilFirstWrite); + } + + @Override + public boolean isDelayTransactionStartUntilFirstWrite() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE); + } + + @Override + public void setKeepTransactionAlive(boolean keepTransactionAlive) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set KeepTransactionAlive while a transaction is active"); + setConnectionPropertyValue(KEEP_TRANSACTION_ALIVE, keepTransactionAlive); + } + + @Override + public boolean isKeepTransactionAlive() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(KEEP_TRANSACTION_ALIVE); + } + + /** Resets this connection to its default transaction options. */ + private void setDefaultTransactionOptions(IsolationLevel isolationLevel) { + if (transactionStack.isEmpty()) { + unitOfWorkType = + isReadOnly() + ? UnitOfWorkType.READ_ONLY_TRANSACTION + : UnitOfWorkType.READ_WRITE_TRANSACTION; + batchMode = BatchMode.NONE; + transactionIsolationLevel = isolationLevel; + transactionTag = null; + excludeTxnFromChangeStreams = false; + } else { + popUnitOfWorkFromTransactionStack(); + } + } + + @Override + public void beginTransaction() { + get(beginTransactionAsync(getConnectionPropertyValue(DEFAULT_ISOLATION_LEVEL))); + } + + @Override + public void beginTransaction(IsolationLevel isolationLevel) { + get(beginTransactionAsync(isolationLevel)); + } + + @Override + public ApiFuture beginTransactionAsync() { + return beginTransactionAsync(getConnectionPropertyValue(DEFAULT_ISOLATION_LEVEL)); + } + + @Override + public ApiFuture beginTransactionAsync(IsolationLevel isolationLevel) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "This connection has an active batch and cannot begin a transaction"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "Beginning a new transaction is not allowed when a transaction is already running"); + ConnectionPreconditions.checkState(!transactionBeginMarked, "A transaction has already begun"); + + transactionBeginMarked = true; + clearLastTransactionAndSetDefaultTransactionOptions(isolationLevel); + if (isAutocommit()) { + inTransaction = true; + } + return ApiFutures.immediateFuture(null); + } + + /** Internal interface for ending a transaction (commit/rollback). */ + private interface EndTransactionMethod { + ApiFuture endAsync(CallType callType, UnitOfWork t); + } + + private final class Commit implements EndTransactionMethod { + @Override + public ApiFuture endAsync(CallType callType, UnitOfWork t) { + return t.commitAsync( + callType, + new EndTransactionCallback() { + @Override + public void onSuccess() { + ConnectionImpl.this.connectionState.commit(); + } + + @Override + public void onFailure() { + ConnectionImpl.this.connectionState.rollback(); + } + }); + } + } + + private final Commit commit = new Commit(); + + @Override + public void commit() { + get(commitAsync(CallType.SYNC, Caller.APPLICATION)); + } + + @Override + public ApiFuture commitAsync() { + return commitAsync(CallType.ASYNC, Caller.APPLICATION); + } + + ApiFuture commitAsync(CallType callType, Caller caller) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !transactionRunnerActive || caller == Caller.TRANSACTION_RUNNER, + "Cannot call commit when a transaction runner is active"); + maybeAutoCommitOrFlushCurrentUnitOfWork(COMMIT_STATEMENT.getType(), COMMIT_STATEMENT); + return endCurrentTransactionAsync(callType, commit, COMMIT_STATEMENT); + } + + private final class Rollback implements EndTransactionMethod { + @Override + public ApiFuture endAsync(CallType callType, UnitOfWork t) { + return t.rollbackAsync( + callType, + new EndTransactionCallback() { + @Override + public void onSuccess() { + ConnectionImpl.this.connectionState.rollback(); + } + + @Override + public void onFailure() { + ConnectionImpl.this.connectionState.rollback(); + } + }); + } + } + + private final Rollback rollback = new Rollback(); + + @Override + public void rollback() { + get(rollbackAsync(CallType.SYNC, Caller.APPLICATION)); + } + + @Override + public ApiFuture rollbackAsync() { + return rollbackAsync(CallType.ASYNC, Caller.APPLICATION); + } + + ApiFuture rollbackAsync(CallType callType, Caller caller) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !transactionRunnerActive || caller == Caller.TRANSACTION_RUNNER, + "Cannot call rollback when a transaction runner is active"); + maybeAutoCommitOrFlushCurrentUnitOfWork(ROLLBACK_STATEMENT.getType(), ROLLBACK_STATEMENT); + return endCurrentTransactionAsync(callType, rollback, ROLLBACK_STATEMENT); + } + + private ApiFuture endCurrentTransactionAsync( + CallType callType, + EndTransactionMethod endTransactionMethod, + ParsedStatement parsedStatement) { + ConnectionPreconditions.checkState(!isBatchActive(), "This connection has an active batch"); + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + ConnectionPreconditions.checkState( + statementTag == null, "Statement tags are not supported for COMMIT or ROLLBACK"); + ApiFuture res; + try { + if (hasTransactionalChanges()) { + res = + endTransactionMethod.endAsync( + callType, getCurrentUnitOfWorkOrStartNewUnitOfWork(parsedStatement)); + } else { + this.currentUnitOfWork = null; + res = ApiFutures.immediateFuture(null); + } + } finally { + transactionBeginMarked = false; + if (isAutocommit()) { + inTransaction = false; + } + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + return res; + } + + @Override + public T runTransaction(TransactionCallable callable) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isBatchActive(), "Cannot run transaction while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot run transaction when a transaction is already active"); + ConnectionPreconditions.checkState( + !transactionRunnerActive, "A transaction runner is already active for this connection"); + this.transactionRunnerActive = true; + try { + return new TransactionRunnerImpl(this).run(callable); + } finally { + this.transactionRunnerActive = false; + } + } + + void resetForRetry(UnitOfWork retryUnitOfWork) { + retryUnitOfWork.resetForRetry(); + this.currentUnitOfWork = retryUnitOfWork; + } + + @Override + public SavepointSupport getSavepointSupport() { + return getConnectionPropertyValue(SAVEPOINT_SUPPORT); + } + + @Override + public void setSavepointSupport(SavepointSupport savepointSupport) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set SavepointSupport while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set SavepointSupport while a transaction is active"); + setConnectionPropertyValue(SAVEPOINT_SUPPORT, savepointSupport); + } + + @Override + public void savepoint(String name) { + ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + SavepointSupport savepointSupport = getSavepointSupport(); + ConnectionPreconditions.checkState( + savepointSupport.isSavepointCreationAllowed(), + "This connection does not allow the creation of savepoints. Current value of" + + " SavepointSupport: " + + savepointSupport); + getCurrentUnitOfWorkOrStartNewUnitOfWork(SAVEPOINT_STATEMENT) + .savepoint(checkValidIdentifier(name), getDialect()); + } + + @Override + public void releaseSavepoint(String name) { + ConnectionPreconditions.checkState( + isTransactionStarted(), "This connection has no active transaction"); + getCurrentUnitOfWorkOrStartNewUnitOfWork(RELEASE_STATEMENT) + .releaseSavepoint(checkValidIdentifier(name)); + } + + @Override + public void rollbackToSavepoint(String name) { + ConnectionPreconditions.checkState( + isTransactionStarted(), "This connection has no active transaction"); + getCurrentUnitOfWorkOrStartNewUnitOfWork(ROLLBACK_TO_STATEMENT) + .rollbackToSavepoint(checkValidIdentifier(name), getSavepointSupport()); + } + + @Override + public StatementResult execute(Statement statement) { + return internalExecute(Preconditions.checkNotNull(statement), null); + } + + @Override + public StatementResult execute(Statement statement, Set allowedResultTypes) { + return internalExecute( + Preconditions.checkNotNull(statement), Preconditions.checkNotNull(allowedResultTypes)); + } + + private StatementResult internalExecute( + Statement statement, @Nullable Set allowedResultTypes) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(statement, buildQueryOptions()); + checkResultTypeAllowed(parsedStatement, allowedResultTypes); + switch (parsedStatement.getType()) { + case CLIENT_SIDE: + return parsedStatement + .getClientSideStatement() + .execute(connectionStatementExecutor, parsedStatement); + case QUERY: + return StatementResultImpl.of( + internalExecuteQuery(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)); + case UPDATE: + if (parsedStatement.hasReturningClause()) { + return StatementResultImpl.of( + internalExecuteQuery(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)); + } + return StatementResultImpl.of( + get(internalExecuteUpdateAsync(CallType.SYNC, parsedStatement))); + case DDL: + get(executeDdlAsync(CallType.SYNC, parsedStatement)); + return StatementResultImpl.noResult(); + case UNKNOWN: + default: + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown statement: " + parsedStatement.getSql()); + } + + @VisibleForTesting + static void checkResultTypeAllowed( + ParsedStatement parsedStatement, @Nullable Set allowedResultTypes) { + if (allowedResultTypes == null) { + return; + } + ResultType resultType = getResultType(parsedStatement); + if (!allowedResultTypes.contains(resultType)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "This statement returns a result of type " + + resultType + + ". Only statements that return a result of one of the following types are allowed: " + + allowedResultTypes.stream() + .map(ResultType::toString) + .collect(Collectors.joining(", "))); + } + } + + private static ResultType getResultType(ParsedStatement parsedStatement) { + switch (parsedStatement.getType()) { + case CLIENT_SIDE: + if (parsedStatement.getClientSideStatement().isQuery()) { + return ResultType.RESULT_SET; + } else if (parsedStatement.getClientSideStatement().isUpdate()) { + return ResultType.UPDATE_COUNT; + } else { + return ResultType.NO_RESULT; + } + case QUERY: + return ResultType.RESULT_SET; + case UPDATE: + if (parsedStatement.hasReturningClause()) { + return ResultType.RESULT_SET; + } else { + return ResultType.UPDATE_COUNT; + } + case DDL: + return ResultType.NO_RESULT; + case UNKNOWN: + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown statement: " + parsedStatement.getSql()); + } + } + + @Override + public AsyncStatementResult executeAsync(Statement statement) { + Preconditions.checkNotNull(statement); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(statement, buildQueryOptions()); + switch (parsedStatement.getType()) { + case CLIENT_SIDE: + return AsyncStatementResultImpl.of( + parsedStatement + .getClientSideStatement() + .execute(connectionStatementExecutor, parsedStatement), + spanner.getAsyncExecutorProvider()); + case QUERY: + return AsyncStatementResultImpl.of( + internalExecuteQueryAsync(CallType.ASYNC, parsedStatement, AnalyzeMode.NONE)); + case UPDATE: + if (parsedStatement.hasReturningClause()) { + return AsyncStatementResultImpl.of( + internalExecuteQueryAsync(CallType.ASYNC, parsedStatement, AnalyzeMode.NONE)); + } + return AsyncStatementResultImpl.of( + internalExecuteUpdateAsync(CallType.ASYNC, parsedStatement)); + case DDL: + return AsyncStatementResultImpl.noResult(executeDdlAsync(CallType.ASYNC, parsedStatement)); + case UNKNOWN: + default: + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown statement: " + parsedStatement.getSql()); + } + + @Override + public ResultSet executeQuery(Statement query, QueryOption... options) { + return parseAndExecuteQuery(CallType.SYNC, query, AnalyzeMode.NONE, options); + } + + @Override + public AsyncResultSet executeQueryAsync(Statement query, QueryOption... options) { + return parseAndExecuteQueryAsync(query, options); + } + + @Override + public ResultSet analyzeQuery(Statement query, QueryAnalyzeMode queryMode) { + Preconditions.checkNotNull(queryMode); + return parseAndExecuteQuery(CallType.SYNC, query, AnalyzeMode.of(queryMode)); + } + + @Override + public void setAutoBatchDml(boolean autoBatchDml) { + setConnectionPropertyValue(AUTO_BATCH_DML, autoBatchDml); + } + + @Override + public boolean isAutoBatchDml() { + return getConnectionPropertyValue(AUTO_BATCH_DML); + } + + @Override + public void setAutoBatchDmlUpdateCount(long updateCount) { + setConnectionPropertyValue(AUTO_BATCH_DML_UPDATE_COUNT, updateCount); + } + + @Override + public long getAutoBatchDmlUpdateCount() { + return getConnectionPropertyValue(AUTO_BATCH_DML_UPDATE_COUNT); + } + + long getDmlBatchUpdateCount() { + return getConnectionPropertyValue(BATCH_DML_UPDATE_COUNT); + } + + @Override + public void setAutoBatchDmlUpdateCountVerification(boolean verification) { + setConnectionPropertyValue(AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION, verification); + } + + @Override + public boolean isAutoBatchDmlUpdateCountVerification() { + return getConnectionPropertyValue(AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION); + } + + void setBatchDmlUpdateCount(long updateCount, boolean local) { + setConnectionPropertyValue(BATCH_DML_UPDATE_COUNT, updateCount, local); + } + + @Override + public void setDataBoostEnabled(boolean dataBoostEnabled) { + setConnectionPropertyValue(DATA_BOOST_ENABLED, dataBoostEnabled); + } + + @Override + public boolean isDataBoostEnabled() { + return getConnectionPropertyValue(DATA_BOOST_ENABLED); + } + + @Override + public void setAutoPartitionMode(boolean autoPartitionMode) { + setConnectionPropertyValue(AUTO_PARTITION_MODE, autoPartitionMode); + } + + /** + * autoPartitionMode will force this connection to execute all queries as partitioned queries. If + * a query cannot be executed as a partitioned query, for example if it is not partitionable, then + * the query will fail. This mode is intended for integrations with frameworks that should always + * use partitioned queries, and that do not support executing custom SQL statements. This setting + * can be used in combination with the dataBoostEnabled flag to force all queries to use data + * boost. + */ + @Override + public boolean isAutoPartitionMode() { + return getConnectionPropertyValue(AUTO_PARTITION_MODE); + } + + @Override + public void setMaxPartitions(int maxPartitions) { + setConnectionPropertyValue(MAX_PARTITIONS, maxPartitions); + } + + @Override + public int getMaxPartitions() { + return getConnectionPropertyValue(MAX_PARTITIONS); + } + + @Override + public ResultSet partitionQuery( + Statement query, PartitionOptions partitionOptions, QueryOption... options) { + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); + if (parsedStatement.getType() != StatementType.QUERY) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Only queries can be partitioned. Invalid statement: " + query.getSql()); + } + + QueryOption[] combinedOptions = concat(parsedStatement.getOptionsFromHints(), options); + UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork(parsedStatement); + return get( + transaction.partitionQueryAsync( + CallType.SYNC, + parsedStatement, + getEffectivePartitionOptions(partitionOptions), + mergeDataBoost( + mergeQueryRequestOptions( + parsedStatement, mergeQueryStatementTag(combinedOptions))))); + } + + private PartitionOptions getEffectivePartitionOptions( + PartitionOptions callSpecificPartitionOptions) { + if (getMaxPartitions() == 0) { + if (callSpecificPartitionOptions == null) { + return PartitionOptions.newBuilder().build(); + } else { + return callSpecificPartitionOptions; + } + } + if (callSpecificPartitionOptions != null + && callSpecificPartitionOptions.getMaxPartitions() > 0L) { + return callSpecificPartitionOptions; + } + if (callSpecificPartitionOptions != null + && callSpecificPartitionOptions.getPartitionSizeBytes() > 0L) { + return PartitionOptions.newBuilder() + .setMaxPartitions(getMaxPartitions()) + .setPartitionSizeBytes(callSpecificPartitionOptions.getPartitionSizeBytes()) + .build(); + } + return PartitionOptions.newBuilder().setMaxPartitions(getMaxPartitions()).build(); + } + + @Override + public ResultSet runPartition(String encodedPartitionId) { + PartitionId id = PartitionId.decodeFromString(encodedPartitionId); + try (BatchReadOnlyTransaction transaction = + batchClient.batchReadOnlyTransaction(id.getTransactionId())) { + return transaction.execute(id.getPartition()); + } + } + + @Override + public void setMaxPartitionedParallelism(int maxThreads) { + Preconditions.checkArgument(maxThreads >= 0, "maxThreads must be >=0"); + setConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM, maxThreads); + } + + @Override + public int getMaxPartitionedParallelism() { + return getConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM); + } + + @Override + public PartitionedQueryResultSet runPartitionedQuery( + Statement query, PartitionOptions partitionOptions, QueryOption... options) { + List partitionIds = new ArrayList<>(); + try (ResultSet partitions = partitionQuery(query, partitionOptions, options)) { + while (partitions.next()) { + partitionIds.add(partitions.getString(0)); + } + } + // parallelism=0 means 'dynamically choose based on the number of available processors and the + // number of partitions'. + return new MergedResultSet(this, partitionIds, getMaxPartitionedParallelism()); + } + + /** + * Parses the given statement as a query and executes it. Throws a {@link SpannerException} if the + * statement is not a query. + */ + private ResultSet parseAndExecuteQuery( + CallType callType, Statement query, AnalyzeMode analyzeMode, QueryOption... options) { + Preconditions.checkNotNull(query); + Preconditions.checkNotNull(analyzeMode); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); + if (parsedStatement.isQuery() || parsedStatement.isUpdate()) { + switch (parsedStatement.getType()) { + case CLIENT_SIDE: + return parsedStatement + .getClientSideStatement() + .execute(connectionStatementExecutor, parsedStatement) + .getResultSet(); + case QUERY: + return internalExecuteQuery(callType, parsedStatement, analyzeMode, options); + case UPDATE: + if (parsedStatement.hasReturningClause()) { + // Cannot execute DML statement with returning clause in read-only mode or in + // READ_ONLY_TRANSACTION transaction mode. + if (this.isReadOnly() + || (this.isInTransaction() + && this.getTransactionMode() == TransactionMode.READ_ONLY_TRANSACTION)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "DML statement with returning clause cannot be executed in read-only mode: " + + parsedStatement.getSql()); + } + return internalExecuteQuery(callType, parsedStatement, analyzeMode, options); + } + case DDL: + case UNKNOWN: + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not a query or DML with returning clause: " + parsedStatement.getSql()); + } + + private AsyncResultSet parseAndExecuteQueryAsync(Statement query, QueryOption... options) { + Preconditions.checkNotNull(query); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); + if (parsedStatement.isQuery() || parsedStatement.isUpdate()) { + switch (parsedStatement.getType()) { + case CLIENT_SIDE: + return ResultSets.toAsyncResultSet( + parsedStatement + .getClientSideStatement() + .execute(connectionStatementExecutor, parsedStatement) + .getResultSet(), + spanner.getAsyncExecutorProvider(), + options); + case QUERY: + return internalExecuteQueryAsync( + CallType.ASYNC, parsedStatement, AnalyzeMode.NONE, options); + case UPDATE: + if (parsedStatement.hasReturningClause()) { + // Cannot execute DML statement with returning clause in read-only mode or in + // READ_ONLY_TRANSACTION transaction mode. + if (this.isReadOnly() + || (this.isInTransaction() + && this.getTransactionMode() == TransactionMode.READ_ONLY_TRANSACTION)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "DML statement with returning clause cannot be executed in read-only mode: " + + parsedStatement.getSql()); + } + return internalExecuteQueryAsync( + CallType.ASYNC, parsedStatement, AnalyzeMode.NONE, options); + } + case DDL: + case UNKNOWN: + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not a query or DML with returning clause: " + parsedStatement.getSql()); + } + + private boolean isInternalMetadataQuery(QueryOption... options) { + if (options == null) { + return false; + } + for (QueryOption option : options) { + if (option instanceof InternalMetadataQuery) { + return true; + } + } + return false; + } + + @Override + public long executeUpdate(Statement update) { + Preconditions.checkNotNull(update); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(update); + if (parsedStatement.isUpdate()) { + switch (parsedStatement.getType()) { + case UPDATE: + if (parsedStatement.hasReturningClause()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "DML statement with returning clause cannot be executed using executeUpdate: " + + parsedStatement.getSql() + + ". Please use executeQuery instead."); + } + return get(internalExecuteUpdateAsync(CallType.SYNC, parsedStatement)); + case CLIENT_SIDE: + case QUERY: + case DDL: + case UNKNOWN: + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not an update statement: " + parsedStatement.getSql()); + } + + @Override + public ApiFuture executeUpdateAsync(Statement update) { + Preconditions.checkNotNull(update); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(update); + if (parsedStatement.isUpdate()) { + switch (parsedStatement.getType()) { + case UPDATE: + if (parsedStatement.hasReturningClause()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "DML statement with returning clause cannot be executed using executeUpdateAsync: " + + parsedStatement.getSql() + + ". Please use executeQueryAsync instead."); + } + return internalExecuteUpdateAsync(CallType.ASYNC, parsedStatement); + case CLIENT_SIDE: + case QUERY: + case DDL: + case UNKNOWN: + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not an update statement: " + parsedStatement.getSql()); + } + + @Override + public ResultSetStats analyzeUpdate(Statement update, QueryAnalyzeMode analyzeMode) { + Preconditions.checkNotNull(update); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(update); + if (parsedStatement.isUpdate()) { + switch (parsedStatement.getType()) { + case UPDATE: + return get(internalAnalyzeUpdateAsync( + CallType.SYNC, parsedStatement, AnalyzeMode.of(analyzeMode))) + .getStats(); + case CLIENT_SIDE: + case QUERY: + case DDL: + case UNKNOWN: + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not an update statement: " + parsedStatement.getSql()); + } + + @Override + public ResultSet analyzeUpdateStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + Preconditions.checkNotNull(statement); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ParsedStatement parsedStatement = getStatementParser().parse(statement); + switch (parsedStatement.getType()) { + case UPDATE: + return get( + internalAnalyzeUpdateAsync( + CallType.SYNC, parsedStatement, AnalyzeMode.of(analyzeMode), options)); + case QUERY: + case CLIENT_SIDE: + case DDL: + case UNKNOWN: + default: + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Statement is not an update statement: " + parsedStatement.getSql()); + } + + @Override + public long[] executeBatchUpdate(Iterable updates) { + return get(internalExecuteBatchUpdateAsync(CallType.SYNC, parseUpdateStatements(updates))); + } + + @Override + public ApiFuture executeBatchUpdateAsync(Iterable updates) { + return internalExecuteBatchUpdateAsync(CallType.ASYNC, parseUpdateStatements(updates)); + } + + private List parseUpdateStatements(Iterable updates) { + Preconditions.checkNotNull(updates); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + // Check that there are only DML statements in the input. + List parsedStatements = new LinkedList<>(); + for (Statement update : updates) { + ParsedStatement parsedStatement = getStatementParser().parse(update); + switch (parsedStatement.getType()) { + case UPDATE: + parsedStatements.add(parsedStatement); + break; + case CLIENT_SIDE: + case QUERY: + case DDL: + case UNKNOWN: + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "The batch update list contains a statement that is not an update statement: " + + parsedStatement.getSql()); + } + } + return parsedStatements; + } + + private UpdateOption[] concat( + ReadQueryUpdateTransactionOption[] statementOptions, UpdateOption[] argumentOptions) { + if (statementOptions == null || statementOptions.length == 0) { + return argumentOptions; + } + if (argumentOptions == null || argumentOptions.length == 0) { + return statementOptions; + } + UpdateOption[] result = + Arrays.copyOf(statementOptions, statementOptions.length + argumentOptions.length); + System.arraycopy(argumentOptions, 0, result, statementOptions.length, argumentOptions.length); + return result; + } + + private QueryOption[] concat( + ReadQueryUpdateTransactionOption[] statementOptions, QueryOption[] argumentOptions) { + if (statementOptions == null || statementOptions.length == 0) { + return argumentOptions; + } + if (argumentOptions == null || argumentOptions.length == 0) { + return statementOptions; + } + QueryOption[] result = + Arrays.copyOf(statementOptions, statementOptions.length + argumentOptions.length); + System.arraycopy(argumentOptions, 0, result, statementOptions.length, argumentOptions.length); + return result; + } + + private QueryOption[] mergeDataBoost(QueryOption... options) { + if (isDataBoostEnabled()) { + options = appendQueryOption(options, Options.dataBoostEnabled(true)); + } + return options; + } + + private QueryOption[] mergeQueryStatementTag(QueryOption... options) { + if (this.statementTag != null) { + options = appendQueryOption(options, Options.tag(statementTag)); + this.statementTag = null; + } + return options; + } + + private QueryOption[] mergeQueryRequestOptions( + ParsedStatement parsedStatement, QueryOption... options) { + if (getConnectionPropertyValue(RPC_PRIORITY) != null) { + options = + appendQueryOption(options, Options.priority(getConnectionPropertyValue(RPC_PRIORITY))); + } + if (clientContext != null) { + options = appendQueryOption(options, Options.clientContext(clientContext)); + } + if (currentUnitOfWork != null + && currentUnitOfWork.supportsDirectedReads(parsedStatement) + && getConnectionPropertyValue(DIRECTED_READ) != null) { + options = + appendQueryOption( + options, Options.directedRead(getConnectionPropertyValue(DIRECTED_READ))); + } + return options; + } + + private QueryOption[] appendQueryOption(QueryOption[] options, QueryOption append) { + if (options == null || options.length == 0) { + options = new QueryOption[] {append}; + } else { + options = Arrays.copyOf(options, options.length + 1); + options[options.length - 1] = append; + } + return options; + } + + private UpdateOption[] mergeUpdateStatementTag(UpdateOption... options) { + if (this.statementTag != null) { + // Shortcut for the most common scenario. + if (options == null || options.length == 0) { + options = new UpdateOption[] {Options.tag(statementTag)}; + } else { + options = Arrays.copyOf(options, options.length + 1); + options[options.length - 1] = Options.tag(statementTag); + } + this.statementTag = null; + } + return options; + } + + private UpdateOption[] mergeUpdateRequestOptions(UpdateOption... options) { + if (getConnectionPropertyValue(RPC_PRIORITY) != null) { + // Shortcut for the most common scenario. + if (options == null || options.length == 0) { + options = new UpdateOption[] {Options.priority(getConnectionPropertyValue(RPC_PRIORITY))}; + } else { + options = Arrays.copyOf(options, options.length + 1); + options[options.length - 1] = Options.priority(getConnectionPropertyValue(RPC_PRIORITY)); + } + } + if (clientContext != null) { + if (options == null || options.length == 0) { + options = new UpdateOption[] {Options.clientContext(clientContext)}; + } else { + options = Arrays.copyOf(options, options.length + 1); + options[options.length - 1] = Options.clientContext(clientContext); + } + } + return options; + } + + private ResultSet internalExecuteQuery( + final CallType callType, + final ParsedStatement statement, + final AnalyzeMode analyzeMode, + final QueryOption... options) { + Preconditions.checkArgument( + statement.getType() == StatementType.QUERY + || (statement.getType() == StatementType.UPDATE + && (analyzeMode != AnalyzeMode.NONE || statement.hasReturningClause())), + "Statement must either be a query or a DML mode with analyzeMode!=NONE or returning" + + " clause"); + boolean isInternalMetadataQuery = isInternalMetadataQuery(options); + QueryOption[] combinedOptions = concat(statement.getOptionsFromHints(), options); + UnitOfWork transaction = + getCurrentUnitOfWorkOrStartNewUnitOfWork(statement, isInternalMetadataQuery); + if (isAutoPartitionMode() + && statement.getType() == StatementType.QUERY + && !isInternalMetadataQuery) { + return runPartitionedQuery( + statement.getStatement(), PartitionOptions.getDefaultInstance(), combinedOptions); + } + return get( + transaction.executeQueryAsync( + callType, + statement, + analyzeMode, + mergeQueryRequestOptions(statement, mergeQueryStatementTag(combinedOptions)))); + } + + private AsyncResultSet internalExecuteQueryAsync( + final CallType callType, + final ParsedStatement statement, + final AnalyzeMode analyzeMode, + final QueryOption... options) { + Preconditions.checkArgument( + (statement.getType() == StatementType.QUERY) + || (statement.getType() == StatementType.UPDATE && statement.hasReturningClause()), + "Statement must be a query or DML with returning clause."); + ConnectionPreconditions.checkState( + !(isAutoPartitionMode() && statement.getType() == StatementType.QUERY), + "Partitioned queries cannot be executed asynchronously"); + boolean isInternalMetadataQuery = isInternalMetadataQuery(options); + QueryOption[] combinedOptions = concat(statement.getOptionsFromHints(), options); + UnitOfWork transaction = + getCurrentUnitOfWorkOrStartNewUnitOfWork(statement, isInternalMetadataQuery); + return ResultSets.toAsyncResultSet( + transaction.executeQueryAsync( + callType, + statement, + analyzeMode, + mergeQueryRequestOptions(statement, mergeQueryStatementTag(combinedOptions))), + spanner.getAsyncExecutorProvider(), + combinedOptions); + } + + private ApiFuture internalExecuteUpdateAsync( + final CallType callType, final ParsedStatement update, UpdateOption... options) { + Preconditions.checkArgument( + update.getType() == StatementType.UPDATE, "Statement must be an update"); + UpdateOption[] combinedOptions = concat(update.getOptionsFromHints(), options); + UnitOfWork transaction = + maybeStartAutoDmlBatch(getCurrentUnitOfWorkOrStartNewUnitOfWork(update)); + return transaction.executeUpdateAsync( + callType, update, mergeUpdateRequestOptions(mergeUpdateStatementTag(combinedOptions))); + } + + private ApiFuture internalAnalyzeUpdateAsync( + final CallType callType, + final ParsedStatement update, + AnalyzeMode analyzeMode, + UpdateOption... options) { + Preconditions.checkArgument( + update.getType() == StatementType.UPDATE, "Statement must be an update"); + UpdateOption[] combinedOptions = concat(update.getOptionsFromHints(), options); + UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork(update); + return transaction.analyzeUpdateAsync( + callType, + update, + analyzeMode, + mergeUpdateRequestOptions(mergeUpdateStatementTag(combinedOptions))); + } + + private ApiFuture internalExecuteBatchUpdateAsync( + CallType callType, List updates, UpdateOption... options) { + UpdateOption[] combinedOptions = + updates.isEmpty() ? options : concat(updates.get(0).getOptionsFromHints(), options); + UnitOfWork transaction = + maybeStartAutoDmlBatch(getCurrentUnitOfWorkOrStartNewUnitOfWork(updates.get(0))); + return transaction.executeBatchUpdateAsync( + callType, updates, mergeUpdateRequestOptions(mergeUpdateStatementTag(combinedOptions))); + } + + private UnitOfWork maybeStartAutoDmlBatch(UnitOfWork transaction) { + if (isInTransaction() && isAutoBatchDml() && !(transaction instanceof DmlBatch)) { + // Automatically start a DML batch. + return startBatchDml(/* autoBatch= */ true); + } + return transaction; + } + + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork() { + return getCurrentUnitOfWorkOrStartNewUnitOfWork( + StatementType.UNKNOWN, /* parsedStatement= */ null, /* internalMetadataQuery= */ false); + } + + private UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + @Nonnull ParsedStatement parsedStatement) { + return getCurrentUnitOfWorkOrStartNewUnitOfWork( + parsedStatement.getType(), parsedStatement, /* internalMetadataQuery= */ false); + } + + @VisibleForTesting + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + @Nonnull ParsedStatement parsedStatement, boolean isInternalMetadataQuery) { + return getCurrentUnitOfWorkOrStartNewUnitOfWork( + parsedStatement.getType(), parsedStatement, isInternalMetadataQuery); + } + + private UnitOfWork getOrStartDdlUnitOfWork(ParsedStatement parsedStatement) { + return getCurrentUnitOfWorkOrStartNewUnitOfWork(StatementType.DDL, parsedStatement, false); + } + + /** + * Returns the current {@link UnitOfWork} of this connection, or creates a new one based on the + * current transaction settings of the connection and returns that. + */ + @VisibleForTesting + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + StatementType statementType, + @Nullable ParsedStatement parsedStatement, + boolean isInternalMetadataQuery) { + if (isInternalMetadataQuery) { + // Just return a temporary single-use transaction. + return createNewUnitOfWork( + /* isInternalMetadataQuery= */ true, + /* forceSingleUse= */ true, + /* autoBatchDml= */ false); + } + maybeAutoCommitOrFlushCurrentUnitOfWork(statementType, parsedStatement); + if (this.currentUnitOfWork == null || !this.currentUnitOfWork.isActive()) { + this.currentUnitOfWork = + createNewUnitOfWork( + /* isInternalMetadataQuery= */ false, + /* forceSingleUse= */ statementType == StatementType.DDL + && getDdlInTransactionMode() != DdlInTransactionMode.FAIL + && !this.transactionBeginMarked, + /* autoBatchDml= */ false, + statementType); + } + return this.currentUnitOfWork; + } + + private Span createSpanForUnitOfWork(String name) { + return tracer + .spanBuilder( + // We can memoize this, as it is a STARTUP property. + Suppliers.memoize(() -> this.connectionState.getValue(TRACING_PREFIX).getValue()).get() + + "." + + name) + .setAllAttributes(getOpenTelemetryAttributes()) + .startSpan(); + } + + void maybeAutoCommitOrFlushCurrentUnitOfWork( + StatementType statementType, @Nullable ParsedStatement parsedStatement) { + if (this.currentUnitOfWork instanceof ReadWriteTransaction + && this.currentUnitOfWork.isActive() + && statementType == StatementType.DDL + && getDdlInTransactionMode() == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + commit(); + } else { + maybeFlushAutoDmlBatch(parsedStatement); + } + } + + private void maybeFlushAutoDmlBatch(@Nullable ParsedStatement parsedStatement) { + if (parsedStatement == null) { + return; + } + if (this.currentUnitOfWork instanceof DmlBatch && this.currentUnitOfWork.isActive()) { + DmlBatch batch = (DmlBatch) this.currentUnitOfWork; + if (batch.isAutoBatch()) { + if (parsedStatement == ROLLBACK_STATEMENT + || (parsedStatement == ROLLBACK_TO_STATEMENT + && getSavepointSupport() == SavepointSupport.ENABLED)) { + // Just abort the batch if the transaction is about to be rolled back. + abortBatch(); + } else if (!parsedStatement.isUpdate() || parsedStatement.hasReturningClause()) { + // The statement that is about to be executed cannot be executed in a DML batch. + // Flush the current batch and then executed the statement. + runBatch(); + } + } + } + } + + @VisibleForTesting + UnitOfWork createNewUnitOfWork( + boolean isInternalMetadataQuery, boolean forceSingleUse, boolean autoBatchDml) { + return createNewUnitOfWork(isInternalMetadataQuery, forceSingleUse, autoBatchDml, null); + } + + @VisibleForTesting + UnitOfWork createNewUnitOfWork( + boolean isInternalMetadataQuery, + boolean forceSingleUse, + boolean autoBatchDml, + StatementType statementType) { + if (isInternalMetadataQuery + || (isAutocommit() && !isInTransaction() && !isInBatch()) + || forceSingleUse) { + SingleUseTransaction singleUseTransaction = + SingleUseTransaction.newBuilder() + .setInternalMetadataQuery(isInternalMetadataQuery) + .setDdlClient(ddlClient) + .setDatabaseClient(dbClient) + .setBatchClient(batchClient) + .setConnectionState(connectionState) + .setTransactionRetryListeners(transactionRetryListeners) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setSpan( + createSpanForUnitOfWork( + statementType == StatementType.DDL ? DDL_STATEMENT : SINGLE_USE_TRANSACTION)) + .setProtoDescriptors(getProtoDescriptors()) + .setClientContext(clientContext) + .build(); + if (!isInternalMetadataQuery && !forceSingleUse) { + // Reset the transaction options after starting a single-use transaction. + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + return singleUseTransaction; + } else { + switch (getUnitOfWorkType()) { + case READ_ONLY_TRANSACTION: + return ReadOnlyTransaction.newBuilder() + .setDatabaseClient(dbClient) + .setBatchClient(batchClient) + .setReadOnlyStaleness(getConnectionPropertyValue(READ_ONLY_STALENESS)) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setTransactionTag(transactionTag) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + .setSpan(createSpanForUnitOfWork(READ_ONLY_TRANSACTION)) + .setClientContext(clientContext) + .build(); + case READ_WRITE_TRANSACTION: + return ReadWriteTransaction.newBuilder() + .setUsesEmulator(options.usesEmulator()) + .setUseAutoSavepointsForEmulator(options.useAutoSavepointsForEmulator()) + .setDatabaseClient(dbClient) + .setIsolationLevel(transactionIsolationLevel) + .setReadLockMode(getConnectionPropertyValue(READ_LOCK_MODE)) + .setDeadline(getTransactionDeadline()) + .setDelayTransactionStartUntilFirstWrite( + getConnectionPropertyValue(DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE)) + .setKeepTransactionAlive(getConnectionPropertyValue(KEEP_TRANSACTION_ALIVE)) + .setRetryAbortsInternally(getConnectionPropertyValue(RETRY_ABORTS_INTERNALLY)) + .setSavepointSupport(getConnectionPropertyValue(SAVEPOINT_SUPPORT)) + .setReturnCommitStats(getConnectionPropertyValue(RETURN_COMMIT_STATS)) + .setMaxCommitDelay(getConnectionPropertyValue(MAX_COMMIT_DELAY)) + .setTransactionRetryListeners(transactionRetryListeners) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setTransactionTag(transactionTag) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + .setSpan(createSpanForUnitOfWork(READ_WRITE_TRANSACTION)) + .setClientContext(clientContext) + .build(); + case DML_BATCH: + // A DML batch can run inside the current transaction. It should therefore only + // temporarily replace the current transaction. + pushCurrentUnitOfWorkToTransactionStack(); + return DmlBatch.newBuilder() + .setAutoBatch(autoBatchDml) + .setAutoBatchUpdateCountSupplier(this::getAutoBatchDmlUpdateCount) + .setAutoBatchUpdateCountVerificationSupplier( + this::isAutoBatchDmlUpdateCountVerification) + .setDmlBatchUpdateCountSupplier(this::getDmlBatchUpdateCount) + .setTransaction(currentUnitOfWork) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setStatementTag(statementTag) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + // Use the transaction Span for the DML batch. + .setSpan(transactionStack.peek().getSpan()) + .setClientContext(clientContext) + .build(); + case DDL_BATCH: + return DdlBatch.newBuilder() + .setDdlClient(ddlClient) + .setDatabaseClient(dbClient) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setSpan(createSpanForUnitOfWork(DDL_BATCH)) + .setProtoDescriptors(getProtoDescriptors()) + .setConnectionState(connectionState) + .setClientContext(clientContext) + .build(); + default: + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "This connection does not have an active transaction and the state of this connection does" + + " not allow any new transactions to be started"); + } + + /** Pushes the current unit of work to the stack of nested transactions. */ + private void pushCurrentUnitOfWorkToTransactionStack() { + Preconditions.checkState(currentUnitOfWork != null, "There is no current transaction"); + transactionStack.push(currentUnitOfWork); + } + + /** Set the {@link UnitOfWork} of this connection back to the previous {@link UnitOfWork}. */ + private void popUnitOfWorkFromTransactionStack() { + Preconditions.checkState( + !transactionStack.isEmpty(), "There is no unit of work in the transaction stack"); + this.currentUnitOfWork = transactionStack.pop(); + } + + private ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { + ApiFuture result = getOrStartDdlUnitOfWork(ddl).executeDdlAsync(callType, ddl); + // reset proto descriptors after executing a DDL statement + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + return result; + } + + @Override + public void write(Mutation mutation) { + get(writeAsync(Collections.singleton(Preconditions.checkNotNull(mutation)))); + } + + @Override + public ApiFuture writeAsync(Mutation mutation) { + return writeAsync(Collections.singleton(Preconditions.checkNotNull(mutation))); + } + + @Override + public void write(Iterable mutations) { + get(writeAsync(Preconditions.checkNotNull(mutations))); + } + + @Override + public ApiFuture writeAsync(Iterable mutations) { + Preconditions.checkNotNull(mutations); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(isAutocommit(), ONLY_ALLOWED_IN_AUTOCOMMIT); + return getCurrentUnitOfWorkOrStartNewUnitOfWork().writeAsync(CallType.ASYNC, mutations); + } + + @Override + public void bufferedWrite(Mutation mutation) { + bufferedWrite(Preconditions.checkNotNull(Collections.singleton(mutation))); + } + + @Override + public void bufferedWrite(Iterable mutations) { + Preconditions.checkNotNull(mutations); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isAutocommit(), NOT_ALLOWED_IN_AUTOCOMMIT); + get(getCurrentUnitOfWorkOrStartNewUnitOfWork().writeAsync(CallType.SYNC, mutations)); + } + + @Override + public void startBatchDdl() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot start a DDL batch when a batch is already active"); + ConnectionPreconditions.checkState( + !isReadOnly(), "Cannot start a DDL batch when the connection is in read-only mode"); + ConnectionPreconditions.checkState( + !isTransactionStarted() + || getDdlInTransactionMode() == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION, + "Cannot start a DDL batch while a transaction is active"); + ConnectionPreconditions.checkState( + !(isAutocommit() && isInTransaction()), + "Cannot start a DDL batch while in a temporary transaction"); + ConnectionPreconditions.checkState( + !transactionBeginMarked, "Cannot start a DDL batch when a transaction has begun"); + ConnectionPreconditions.checkState( + isAutocommit() || getDdlInTransactionMode() != DdlInTransactionMode.FAIL, + "Cannot start a DDL batch when autocommit=false and ddlInTransactionMode=FAIL"); + + maybeAutoCommitOrFlushCurrentUnitOfWork(StatementType.DDL, START_BATCH_DDL_STATEMENT); + this.batchMode = BatchMode.DDL; + this.unitOfWorkType = UnitOfWorkType.DDL_BATCH; + this.currentUnitOfWork = + createNewUnitOfWork( + /* isInternalMetadataQuery= */ false, + /* forceSingleUse= */ false, + /* autoBatchDml= */ false); + } + + @Override + public void startBatchDml() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot start a DML batch when a batch is already active"); + ConnectionPreconditions.checkState( + !isReadOnly(), "Cannot start a DML batch when the connection is in read-only mode"); + ConnectionPreconditions.checkState( + !(isInTransaction() && getTransactionMode() == TransactionMode.READ_ONLY_TRANSACTION), + "Cannot start a DML batch when a read-only transaction is in progress"); + startBatchDml(/* autoBatch= */ false); + } + + private UnitOfWork startBatchDml(boolean autoBatch) { + // Make sure that there is a current unit of work that the batch can use. + getCurrentUnitOfWorkOrStartNewUnitOfWork(START_BATCH_DML_STATEMENT); + // Then create the DML batch. + this.batchMode = BatchMode.DML; + this.unitOfWorkType = UnitOfWorkType.DML_BATCH; + return this.currentUnitOfWork = + createNewUnitOfWork( + /* isInternalMetadataQuery= */ false, /* forceSingleUse= */ false, autoBatch); + } + + @Override + public long[] runBatch() { + return get(runBatchAsync()); + } + + @Override + public ApiFuture runBatchAsync() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(isBatchActive(), "This connection has no active batch"); + try { + if (this.currentUnitOfWork != null) { + return this.currentUnitOfWork.runBatchAsync(CallType.ASYNC); + } + return ApiFutures.immediateFuture(new long[0]); + } finally { + if (isDdlBatchActive()) { + // reset proto descriptors after executing a DDL batch + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + } + this.batchMode = BatchMode.NONE; + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + } + + @Override + public void abortBatch() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(isBatchActive(), "This connection has no active batch"); + try { + if (this.currentUnitOfWork != null) { + this.currentUnitOfWork.abortBatch(); + } + } finally { + this.batchMode = BatchMode.NONE; + setDefaultTransactionOptions(getDefaultIsolationLevel()); + } + } + + private boolean isBatchActive() { + return isDdlBatchActive() || isDmlBatchActive(); + } + + @Override + public boolean isDdlBatchActive() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.batchMode == BatchMode.DDL; + } + + @Override + public boolean isDmlBatchActive() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.batchMode == BatchMode.DML; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java new file mode 100644 index 000000000000..44faecee7046 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java @@ -0,0 +1,1271 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_CONFIG_EMULATOR; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CHANNEL_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionProperties.CLIENT_CERTIFICATE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CLIENT_KEY; +import static com.google.cloud.spanner.connection.ConnectionProperties.CREDENTIALS_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionProperties.CREDENTIALS_URL; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATABASE_ROLE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionProperties.DCP_INITIAL_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionProperties.DCP_MAX_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionProperties.DCP_MIN_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionProperties.DIALECT; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_API_TRACING; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_DIRECT_ACCESS; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_DYNAMIC_CHANNEL_POOL; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_END_TO_END_TRACING; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_EXTENDED_TRACING; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENCODED_CREDENTIALS; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENDPOINT; +import static com.google.cloud.spanner.connection.ConnectionProperties.GRPC_INTERCEPTOR_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionProperties.IS_EXPERIMENTAL_HOST; +import static com.google.cloud.spanner.connection.ConnectionProperties.LENIENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.MIN_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.NUM_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionProperties.OAUTH_TOKEN; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionProperties.ROUTE_TO_LEADER; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACING_PREFIX; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACK_CONNECTION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACK_SESSION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionProperties.UNIVERSE_DOMAIN; +import static com.google.cloud.spanner.connection.ConnectionProperties.USER_AGENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_AUTO_SAVEPOINTS_FOR_EMULATOR; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_PLAIN_TEXT; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_VIRTUAL_GRPC_TRANSPORT_THREADS; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_VIRTUAL_THREADS; +import static com.google.cloud.spanner.connection.ConnectionPropertyValue.cast; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.auth.Credentials; +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceOptions; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.GrpcInterceptorProviderConverter; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableMap; +import io.grpc.Deadline; +import io.grpc.Deadline.Ticker; +import io.opentelemetry.api.OpenTelemetry; +import java.io.IOException; +import java.net.URL; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Stream; +import javax.annotation.Nullable; + +/** + * Internal connection API for Google Cloud Spanner. This class may introduce breaking changes + * without prior notice. + * + *

Options for creating a {@link Connection} to a Google Cloud Spanner database. + * + *

Usage: + * + *

+ * 
+ * {@code
+ * ConnectionOptions options = ConnectionOptions.newBuilder()
+ *       .setUri("cloudspanner:/projects/my_project_id/instances/my_instance_id/databases/my_database_name?autocommit=false")
+ *       .setCredentialsUrl("/home/cloudspanner-keys/my-key.json")
+ *       .build();
+ * try(Connection connection = options.getConnection()) {
+ *   try(ResultSet rs = connection.executeQuery(Statement.of("SELECT SingerId, AlbumId, MarketingBudget FROM Albums"))) {
+ *     while(rs.next()) {
+ *       // do something
+ *     }
+ *   }
+ * }
+ * }
+ * 
+ * 
+ */ +@InternalApi +public class ConnectionOptions { + + /** + * Set this system property to true to enable transactional connection state by default for + * PostgreSQL-dialect databases. The default is currently false. + */ + public static String ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY = + "spanner.enable_transactional_connection_state_for_postgresql"; + + private static final LocalConnectionChecker LOCAL_CONNECTION_CHECKER = + new LocalConnectionChecker(); + static final boolean DEFAULT_USE_PLAIN_TEXT = false; + static final boolean DEFAULT_IS_EXPERIMENTAL_HOST = false; + static final boolean DEFAULT_AUTOCOMMIT = true; + static final boolean DEFAULT_READONLY = false; + static final boolean DEFAULT_RETRY_ABORTS_INTERNALLY = true; + static final boolean DEFAULT_USE_VIRTUAL_THREADS = false; + static final boolean DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS = false; + static final String DEFAULT_CREDENTIALS = null; + static final String DEFAULT_CLIENT_CERTIFICATE = null; + static final String DEFAULT_CLIENT_KEY = null; + static final String DEFAULT_OAUTH_TOKEN = null; + static final Integer DEFAULT_MIN_SESSIONS = null; + static final Integer DEFAULT_MAX_SESSIONS = null; + static final Integer DEFAULT_NUM_CHANNELS = null; + static final Boolean DEFAULT_ENABLE_DYNAMIC_CHANNEL_POOL = null; + static final Integer DEFAULT_DCP_MIN_CHANNELS = null; + static final Integer DEFAULT_DCP_MAX_CHANNELS = null; + static final Integer DEFAULT_DCP_INITIAL_CHANNELS = null; + static final String DEFAULT_ENDPOINT = null; + static final String DEFAULT_CHANNEL_PROVIDER = null; + static final String DEFAULT_DATABASE_ROLE = null; + static final String DEFAULT_USER_AGENT = null; + static final String DEFAULT_OPTIMIZER_VERSION = ""; + static final String DEFAULT_OPTIMIZER_STATISTICS_PACKAGE = ""; + static final RpcPriority DEFAULT_RPC_PRIORITY = null; + static final DdlInTransactionMode DEFAULT_DDL_IN_TRANSACTION_MODE = + DdlInTransactionMode.ALLOW_IN_EMPTY_TRANSACTION; + static final String DEFAULT_DEFAULT_SEQUENCE_KIND = null; + static final boolean DEFAULT_RETURN_COMMIT_STATS = false; + static final boolean DEFAULT_LENIENT = false; + static final boolean DEFAULT_ROUTE_TO_LEADER = true; + static final boolean DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = false; + static final boolean DEFAULT_KEEP_TRANSACTION_ALIVE = false; + static final boolean DEFAULT_TRACK_SESSION_LEAKS = true; + static final boolean DEFAULT_TRACK_CONNECTION_LEAKS = true; + static final boolean DEFAULT_DATA_BOOST_ENABLED = false; + static final boolean DEFAULT_AUTO_PARTITION_MODE = false; + static final int DEFAULT_MAX_PARTITIONS = 0; + static final int DEFAULT_MAX_PARTITIONED_PARALLELISM = 1; + static final Boolean DEFAULT_ENABLE_EXTENDED_TRACING = null; + static final Boolean DEFAULT_ENABLE_API_TRACING = null; + static final boolean DEFAULT_ENABLE_END_TO_END_TRACING = false; + static final boolean DEFAULT_AUTO_BATCH_DML = false; + static final long DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT = 1L; + static final long DEFAULT_BATCH_DML_UPDATE_COUNT = -1L; + static final boolean DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = true; + private static final String EXPERIMENTAL_HOST_PROJECT_ID = "default"; + private static final String DEFAULT_EXPERIMENTAL_HOST_INSTANCE_ID = "default"; + + private static final String PLAIN_TEXT_PROTOCOL = "http:"; + private static final String HOST_PROTOCOL = "https:"; + private static final String DEFAULT_HOST = "https://spanner.googleapis.com"; + private static final String SPANNER_EMULATOR_HOST_ENV_VAR = "SPANNER_EMULATOR_HOST"; + private static final String DEFAULT_EMULATOR_HOST = "http://localhost:9010"; + + /** Use plain text is only for local testing purposes. */ + static final String USE_PLAIN_TEXT_PROPERTY_NAME = "usePlainText"; + + /** Connect to a Experimental Host * */ + static final String IS_EXPERIMENTAL_HOST_PROPERTY_NAME = "isExperimentalHost"; + + /** Client certificate path to establish mTLS */ + static final String CLIENT_CERTIFICATE_PROPERTY_NAME = "clientCertificate"; + + /** Client key path to establish mTLS */ + static final String CLIENT_KEY_PROPERTY_NAME = "clientKey"; + + /** Name of the 'autocommit' connection property. */ + public static final String AUTOCOMMIT_PROPERTY_NAME = "autocommit"; + + /** Name of the 'readonly' connection property. */ + public static final String READONLY_PROPERTY_NAME = "readonly"; + + /** Name of the 'routeToLeader' connection property. */ + public static final String ROUTE_TO_LEADER_PROPERTY_NAME = "routeToLeader"; + + /** Name of the 'retry aborts internally' connection property. */ + public static final String RETRY_ABORTS_INTERNALLY_PROPERTY_NAME = "retryAbortsInternally"; + + /** Name of the property to enable/disable virtual threads for the statement executor. */ + public static final String USE_VIRTUAL_THREADS_PROPERTY_NAME = "useVirtualThreads"; + + /** Name of the property to enable/disable virtual threads for gRPC transport. */ + public static final String USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME = + "useVirtualGrpcTransportThreads"; + + /** Name of the 'credentials' connection property. */ + public static final String CREDENTIALS_PROPERTY_NAME = "credentials"; + + /** Name of the 'encodedCredentials' connection property. */ + public static final String ENCODED_CREDENTIALS_PROPERTY_NAME = "encodedCredentials"; + + public static final String ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY = + "ENABLE_ENCODED_CREDENTIALS"; + + /** Name of the 'credentialsProvider' connection property. */ + public static final String CREDENTIALS_PROVIDER_PROPERTY_NAME = "credentialsProvider"; + + public static final String ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY = + "ENABLE_CREDENTIALS_PROVIDER"; + + /** + * OAuth token to use for authentication. Cannot be used in combination with a credentials file. + */ + public static final String OAUTH_TOKEN_PROPERTY_NAME = "oauthToken"; + + /** Name of the 'minSessions' connection property. */ + public static final String MIN_SESSIONS_PROPERTY_NAME = "minSessions"; + + /** Name of the 'maxSessions' connection property. */ + public static final String MAX_SESSIONS_PROPERTY_NAME = "maxSessions"; + + /** Name of the 'numChannels' connection property. */ + public static final String NUM_CHANNELS_PROPERTY_NAME = "numChannels"; + + /** Name of the 'enableDynamicChannelPool' connection property. */ + public static final String ENABLE_DYNAMIC_CHANNEL_POOL_PROPERTY_NAME = "enableDynamicChannelPool"; + + /** Name of the 'dcpMinChannels' connection property. */ + public static final String DCP_MIN_CHANNELS_PROPERTY_NAME = "dcpMinChannels"; + + /** Name of the 'dcpMaxChannels' connection property. */ + public static final String DCP_MAX_CHANNELS_PROPERTY_NAME = "dcpMaxChannels"; + + /** Name of the 'dcpInitialChannels' connection property. */ + public static final String DCP_INITIAL_CHANNELS_PROPERTY_NAME = "dcpInitialChannels"; + + /** Name of the 'endpoint' connection property. */ + public static final String ENDPOINT_PROPERTY_NAME = "endpoint"; + + /** Name of the 'channelProvider' connection property. */ + public static final String CHANNEL_PROVIDER_PROPERTY_NAME = "channelProvider"; + + public static final String ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY = "ENABLE_CHANNEL_PROVIDER"; + + public static final String ENABLE_GRPC_INTERCEPTOR_PROVIDER_SYSTEM_PROPERTY = + "ENABLE_GRPC_INTERCEPTOR_PROVIDER"; + + /** Custom user agent string is only for other Google libraries. */ + static final String USER_AGENT_PROPERTY_NAME = "userAgent"; + + /** Query optimizer version to use for a connection. */ + static final String OPTIMIZER_VERSION_PROPERTY_NAME = "optimizerVersion"; + + /** Query optimizer statistics package to use for a connection. */ + static final String OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME = "optimizerStatisticsPackage"; + + /** Name of the 'lenientMode' connection property. */ + public static final String LENIENT_PROPERTY_NAME = "lenient"; + + /** Name of the 'rpcPriority' connection property. */ + public static final String RPC_PRIORITY_NAME = "rpcPriority"; + + public static final String DDL_IN_TRANSACTION_MODE_PROPERTY_NAME = "ddlInTransactionMode"; + public static final String DEFAULT_SEQUENCE_KIND_PROPERTY_NAME = "defaultSequenceKind"; + + /** Dialect to use for a connection. */ + static final String DIALECT_PROPERTY_NAME = "dialect"; + + /** Name of the 'databaseRole' connection property. */ + public static final String DATABASE_ROLE_PROPERTY_NAME = "databaseRole"; + + /** Name of the 'delay transaction start until first write' property. */ + public static final String DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME = + "delayTransactionStartUntilFirstWrite"; + + /** Name of the 'keep transaction alive' property. */ + public static final String KEEP_TRANSACTION_ALIVE_PROPERTY_NAME = "keepTransactionAlive"; + + /** Name of the 'trackStackTraceOfSessionCheckout' connection property. */ + public static final String TRACK_SESSION_LEAKS_PROPERTY_NAME = "trackSessionLeaks"; + + /** Name of the 'trackStackTraceOfConnectionCreation' connection property. */ + public static final String TRACK_CONNECTION_LEAKS_PROPERTY_NAME = "trackConnectionLeaks"; + + public static final String DATA_BOOST_ENABLED_PROPERTY_NAME = "dataBoostEnabled"; + public static final String AUTO_PARTITION_MODE_PROPERTY_NAME = "autoPartitionMode"; + public static final String MAX_PARTITIONS_PROPERTY_NAME = "maxPartitions"; + public static final String MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME = + "maxPartitionedParallelism"; + + public static final String ENABLE_EXTENDED_TRACING_PROPERTY_NAME = "enableExtendedTracing"; + public static final String ENABLE_API_TRACING_PROPERTY_NAME = "enableApiTracing"; + public static final String ENABLE_END_TO_END_TRACING_PROPERTY_NAME = "enableEndToEndTracing"; + + public static final String AUTO_BATCH_DML_PROPERTY_NAME = "auto_batch_dml"; + public static final String AUTO_BATCH_DML_UPDATE_COUNT_PROPERTY_NAME = + "auto_batch_dml_update_count"; + public static final String AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION_PROPERTY_NAME = + "auto_batch_dml_update_count_verification"; + public static final String BATCH_DML_UPDATE_COUNT_PROPERTY_NAME = "batch_dml_update_count"; + + private static final String GUARDED_CONNECTION_PROPERTY_ERROR_MESSAGE = + "%s can only be used if the system property %s has been set to true. " + + "Start the application with the JVM command line option -D%s=true"; + + private static String generateGuardedConnectionPropertyError( + String systemPropertyName, String connectionPropertyName) { + return String.format( + GUARDED_CONNECTION_PROPERTY_ERROR_MESSAGE, + connectionPropertyName, + systemPropertyName, + systemPropertyName); + } + + static boolean isEnableTransactionalConnectionStateForPostgreSQL() { + return Boolean.parseBoolean( + System.getProperty(ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY, "false")); + } + + /** + * Gets the default project-id for the current environment as defined by {@link + * ServiceOptions#getDefaultProjectId()}, and if none could be found, the project-id of the given + * credentials if it contains any. + * + * @param credentials The credentials to use to get the default project-id if none could be found + * in the environment. + * @return the default project-id. + */ + public static String getDefaultProjectId(Credentials credentials) { + String projectId = SpannerOptions.getDefaultProjectId(); + if (projectId == null + && credentials != null + && credentials instanceof ServiceAccountCredentials) { + projectId = ((ServiceAccountCredentials) credentials).getProjectId(); + } + return projectId; + } + + /** + * Closes all {@link Spanner} instances that have been opened by connections + * during the lifetime of this JVM. Call this method at the end of your application to free up + * resources. You must close all {@link Connection}s that have been opened by your application + * before calling this method. Failing to do so, will cause this method to throw a {@link + * SpannerException}. + * + *

This method is also automatically called by a shutdown hook (see {@link + * Runtime#addShutdownHook(Thread)}) when the JVM is shutdown gracefully. + */ + public static void closeSpanner() { + SpannerPool.INSTANCE.checkAndCloseSpanners(); + } + + /** + * {@link SpannerOptionsConfigurator} can be used to add additional configuration for a {@link + * Spanner} instance. Intended for tests. + */ + @VisibleForTesting + interface SpannerOptionsConfigurator { + void configure(SpannerOptions.Builder options); + } + + /** + * {@link ExternalChannelProvider} can be used for to specify an external channel provider. This + * is needed if you require different certificates than those provided by the standard grpc + * channel provider. + */ + public interface ExternalChannelProvider { + TransportChannelProvider getChannelProvider(String host, int port); + } + + /** Builder for {@link ConnectionOptions} instances. */ + public static class Builder { + private final Map> connectionPropertyValues = + new HashMap<>(); + private String uri; + private Credentials credentials; + private StatementExecutorType statementExecutorType; + private SessionPoolOptions sessionPoolOptions; + private List statementExecutionInterceptors = + Collections.emptyList(); + private SpannerOptionsConfigurator configurator; + private OpenTelemetry openTelemetry; + private Ticker ticker = Deadline.getSystemTicker(); + + private Builder() {} + + /** Spanner {@link ConnectionOptions} URI format. */ + public static final String SPANNER_URI_FORMAT = + "(?:(?:spanner|cloudspanner):)(?//[\\w.-]+(?:\\.[\\w\\.-]+)*[\\w\\-\\._~:/?#\\[\\]@!\\$&'\\(\\)\\*\\+,;=.]+)?/projects/(?(([a-z]|[-.:]|[0-9])+|(DEFAULT_PROJECT_ID)))(/instances/(?([a-z]|[-]|[0-9])+)(/databases/(?([a-z]|[-]|[_]|[0-9])+))?)?(?:[?|;].*)?"; + + public static final String EXTERNAL_HOST_FORMAT = + "(?:(?:spanner|cloudspanner):)(?//[\\w.-]+(?::\\d+)?)(/instances/(?[a-z0-9-]+))?(/databases/(?[a-z0-9_-]+))(?:[?;].*)?"; + private static final String SPANNER_URI_REGEX = "(?is)^" + SPANNER_URI_FORMAT + "$"; + + @VisibleForTesting + static final Pattern SPANNER_URI_PATTERN = Pattern.compile(SPANNER_URI_REGEX); + + @VisibleForTesting + static final Pattern EXTERNAL_HOST_PATTERN = Pattern.compile(EXTERNAL_HOST_FORMAT); + + private static final String HOST_GROUP = "HOSTGROUP"; + private static final String PROJECT_GROUP = "PROJECTGROUP"; + private static final String INSTANCE_GROUP = "INSTANCEGROUP"; + private static final String DATABASE_GROUP = "DATABASEGROUP"; + private static final String DEFAULT_PROJECT_ID_PLACEHOLDER = "DEFAULT_PROJECT_ID"; + + private boolean isValidUri(String uri) { + return SPANNER_URI_PATTERN.matcher(uri).matches(); + } + + private boolean isValidExperimentalHostUri(String uri) { + return EXTERNAL_HOST_PATTERN.matcher(uri).matches(); + } + + /** + * Sets the URI of the Cloud Spanner database to connect to. A connection URI must be specified + * in this format: + * + *

+     * cloudspanner:[//host[:port]]/projects/project-id[/instances/instance-id[/databases/database-name]][\?property-name=property-value[;property-name=property-value]*]?
+     * 
+ * + * The property-value strings should be url-encoded. + * + *

The project-id part of the URI may be filled with the placeholder DEFAULT_PROJECT_ID. This + * placeholder will be replaced by the default project id of the environment that is requesting + * a connection. + * + *

The supported properties are: + * + *

    + *
  • credentials (String): URL for the credentials file to use for the connection. This + * property is only used if no credentials have been specified using the {@link + * ConnectionOptions.Builder#setCredentialsUrl(String)} method. If you do not specify any + * credentials at all, the default credentials of the environment as returned by {@link + * GoogleCredentials#getApplicationDefault()} will be used. + *
  • encodedCredentials (String): A Base64 encoded string containing the Google credentials + * to use. You should only set either this property or the `credentials` (file location) + * property, but not both at the same time. + *
  • credentialsProvider (String): Class name of the {@link + * com.google.api.gax.core.CredentialsProvider} that should be used to get credentials for + * a connection that is created by this {@link ConnectionOptions}. The credentials will be + * retrieved from the {@link com.google.api.gax.core.CredentialsProvider} when a new + * connection is created. A connection will use the credentials that were obtained at + * creation during its lifetime. + *
  • autocommit (boolean): Sets the initial autocommit mode for the connection. Default is + * true. + *
  • readonly (boolean): Sets the initial readonly mode for the connection. Default is + * false. + *
  • minSessions (int): Sets the minimum number of sessions in the backing session pool. + *
  • maxSessions (int): Sets the maximum number of sessions in the backing session pool. + *
  • numChannels (int): Sets the number of gRPC channels to use for the connection. + *
  • retryAbortsInternally (boolean): Sets the initial retryAbortsInternally mode for the + * connection. Default is true. + *
  • optimizerVersion (string): Sets the query optimizer version to use for the connection. + *
  • autoConfigEmulator (boolean): Automatically configures the connection to connect to the + * Cloud Spanner emulator. If no host and port is specified in the connection string, the + * connection will automatically use the default emulator host/port combination + * (localhost:9010). Plain text communication will be enabled and authentication will be + * disabled. The instance and database in the connection string will automatically be + * created on the emulator if any of them do not yet exist. Any existing instance or + * database on the emulator will remain untouched. No other configuration is needed in + * order to connect to the emulator than setting this property. + *
  • routeToLeader (boolean): Sets the routeToLeader flag to route requests to leader (true) + * or any region (false) in read/write transactions and Partitioned DML. Default is true. + *
+ * + * @param uri The URI of the Spanner database to connect to. + * @return this builder + */ + public Builder setUri(String uri) { + if (!isValidExperimentalHostUri(uri)) { + Preconditions.checkArgument( + isValidUri(uri), + "The specified URI is not a valid Cloud Spanner connection URI. Please specify a URI in" + + " the format" + + " \"cloudspanner:[//host[:port]]/projects/project-id[/instances/instance-id[/databases/database-name]][\\?property-name=property-value[;property-name=property-value]*]?\""); + } + ConnectionPropertyValue value = + cast(ConnectionProperties.parseValues(uri).get(LENIENT.getKey())); + checkValidProperties(value != null && value.getValue(), uri); + this.uri = uri; + return this; + } + + Builder setConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property, T value) { + this.connectionPropertyValues.put( + property.getKey(), new ConnectionPropertyValue<>(property, value, value)); + return this; + } + + /** Sets the {@link SessionPoolOptions} to use for the connection. */ + public Builder setSessionPoolOptions(SessionPoolOptions sessionPoolOptions) { + Preconditions.checkNotNull(sessionPoolOptions); + this.sessionPoolOptions = sessionPoolOptions; + return this; + } + + /** + * Sets the URL of the credentials file to use for this connection. The URL may be a reference + * to a file on the local file system, or to a file on Google Cloud Storage. References to + * Google Cloud Storage files are only allowed when the application is running on Google Cloud + * and the environment has access to the specified storage location. It also requires that the + * Google Cloud Storage client library is present on the class path. The Google Cloud Storage + * library is not automatically added as a dependency by the JDBC driver. + * + *

If you do not specify a credentialsUrl (either by using this setter, or by specifying on + * the connection URI), the credentials returned by {@link + * GoogleCredentials#getApplicationDefault()} will be used for the connection. + * + * @param credentialsUrl A valid file or Google Cloud Storage URL for the credentials file to be + * used. + * @return this builder + */ + public Builder setCredentialsUrl(String credentialsUrl) { + setConnectionPropertyValue(CREDENTIALS_URL, credentialsUrl); + return this; + } + + /** + * Sets the OAuth token to use with this connection. The token must be a valid token with access + * to the resources (project/instance/database) that the connection will be accessing. This + * authentication method cannot be used in combination with a credentials file. If both an OAuth + * token and a credentials file is specified, the {@link #build()} method will throw an + * exception. + * + * @param oauthToken A valid OAuth token for the Google Cloud project that is used by this + * connection. + * @return this builder + */ + public Builder setOAuthToken(String oauthToken) { + setConnectionPropertyValue(OAUTH_TOKEN, oauthToken); + return this; + } + + @VisibleForTesting + Builder setStatementExecutionInterceptors(List interceptors) { + this.statementExecutionInterceptors = interceptors; + return this; + } + + @VisibleForTesting + Builder setConfigurator(SpannerOptionsConfigurator configurator) { + this.configurator = Preconditions.checkNotNull(configurator); + return this; + } + + @VisibleForTesting + Builder setCredentials(Credentials credentials) { + this.credentials = credentials; + return this; + } + + @VisibleForTesting + Builder setTicker(Ticker ticker) { + this.ticker = Preconditions.checkNotNull(ticker); + return this; + } + + /** + * Sets the executor type to use for connections. See {@link StatementExecutorType} for more + * information on what the different options mean. + */ + public Builder setStatementExecutorType(StatementExecutorType statementExecutorType) { + this.statementExecutorType = statementExecutorType; + return this; + } + + public Builder setOpenTelemetry(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + return this; + } + + public Builder setTracingPrefix(String tracingPrefix) { + setConnectionPropertyValue(TRACING_PREFIX, tracingPrefix); + return this; + } + + /** + * @return the {@link ConnectionOptions} + */ + public ConnectionOptions build() { + Preconditions.checkState(this.uri != null, "Connection URI is required"); + return new ConnectionOptions(this); + } + } + + /** + * Create a {@link Builder} for {@link ConnectionOptions}. Use this method to create {@link + * ConnectionOptions} that can be used to obtain a {@link Connection}. + * + * @return a new {@link Builder} + */ + public static Builder newBuilder() { + return new Builder(); + } + + private final ConnectionState initialConnectionState; + private final String uri; + private final String warnings; + private final Credentials fixedCredentials; + + private final String host; + private final String projectId; + private final String instanceId; + private final String databaseName; + private final Credentials credentials; + private final StatementExecutorType statementExecutorType; + private final SessionPoolOptions sessionPoolOptions; + + private final OpenTelemetry openTelemetry; + private final List statementExecutionInterceptors; + private final SpannerOptionsConfigurator configurator; + private final Ticker ticker; + + private ConnectionOptions(Builder builder) { + Matcher matcher; + boolean isExperimentalHostPattern = false; + if (builder.isValidExperimentalHostUri(builder.uri)) { + matcher = Builder.EXTERNAL_HOST_PATTERN.matcher(builder.uri); + isExperimentalHostPattern = true; + } else { + matcher = Builder.SPANNER_URI_PATTERN.matcher(builder.uri); + } + Preconditions.checkArgument( + matcher.find(), String.format("Invalid connection URI specified: %s", builder.uri)); + + ImmutableMap> connectionPropertyValues = + ImmutableMap.>builder() + .putAll(ConnectionProperties.parseValues(builder.uri)) + .putAll(builder.connectionPropertyValues) + .buildKeepingLast(); + this.uri = builder.uri; + ConnectionPropertyValue value = cast(connectionPropertyValues.get(LENIENT.getKey())); + this.warnings = checkValidProperties(value != null && value.getValue(), uri); + this.fixedCredentials = builder.credentials; + this.statementExecutorType = builder.statementExecutorType; + + this.openTelemetry = builder.openTelemetry; + this.statementExecutionInterceptors = + Collections.unmodifiableList(builder.statementExecutionInterceptors); + this.configurator = builder.configurator; + this.ticker = builder.ticker; + + // Create the initial connection state from the parsed properties in the connection URL. + this.initialConnectionState = new ConnectionState(connectionPropertyValues); + + checkGuardedProperty( + getInitialConnectionPropertyValue(ENCODED_CREDENTIALS), + ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + ENCODED_CREDENTIALS_PROPERTY_NAME); + checkGuardedProperty( + getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) == null + ? null + : getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER).getClass().getName(), + ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, + CREDENTIALS_PROVIDER_PROPERTY_NAME); + checkGuardedProperty( + getInitialConnectionPropertyValue(CHANNEL_PROVIDER), + ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY, + CHANNEL_PROVIDER_PROPERTY_NAME); + checkGuardedProperty( + getInitialConnectionPropertyValue(GRPC_INTERCEPTOR_PROVIDER), + ENABLE_GRPC_INTERCEPTOR_PROVIDER_SYSTEM_PROPERTY, + GRPC_INTERCEPTOR_PROVIDER.getName()); + // Check that at most one of credentials location, encoded credentials, credentials provider and + // OUAuth token has been specified in the connection URI. + Preconditions.checkArgument( + Stream.of( + getInitialConnectionPropertyValue(CREDENTIALS_URL), + getInitialConnectionPropertyValue(ENCODED_CREDENTIALS), + getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER), + getInitialConnectionPropertyValue(OAUTH_TOKEN)) + .filter(Objects::nonNull) + .count() + <= 1, + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider and OAuth" + + " token"); + + boolean usePlainText = + getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR) + || getInitialConnectionPropertyValue(USE_PLAIN_TEXT); + this.host = + determineHost( + matcher, + getInitialConnectionPropertyValue(ENDPOINT), + getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR), + usePlainText, + System.getenv()); + GoogleCredentials defaultExperimentalHostCredentials = + SpannerOptions.getDefaultExperimentalCredentialsFromSysEnv(); + // Using credentials on a plain text connection is not allowed, so if the user has not specified + // any credentials and is using a plain text connection, we should not try to get the + // credentials from the environment, but default to NoCredentials. + if (this.fixedCredentials == null + && getInitialConnectionPropertyValue(CREDENTIALS_URL) == null + && getInitialConnectionPropertyValue(ENCODED_CREDENTIALS) == null + && getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) == null + && getInitialConnectionPropertyValue(OAUTH_TOKEN) == null + && usePlainText) { + this.credentials = NoCredentials.getInstance(); + } else if (getInitialConnectionPropertyValue(OAUTH_TOKEN) != null) { + this.credentials = + new GoogleCredentials( + new AccessToken(getInitialConnectionPropertyValue(OAUTH_TOKEN), null)); + } else if ((isExperimentalHostPattern || isExperimentalHost()) + && defaultExperimentalHostCredentials != null) { + this.credentials = defaultExperimentalHostCredentials; + } else if (getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) != null) { + try { + this.credentials = getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER).getCredentials(); + } catch (IOException exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Failed to get credentials from CredentialsProvider: " + exception.getMessage(), + exception); + } + } else if (this.fixedCredentials != null) { + this.credentials = fixedCredentials; + } else if (getInitialConnectionPropertyValue(ENCODED_CREDENTIALS) != null) { + this.credentials = + getCredentialsService() + .decodeCredentials(getInitialConnectionPropertyValue(ENCODED_CREDENTIALS)); + } else { + this.credentials = + getCredentialsService() + .createCredentials(getInitialConnectionPropertyValue(CREDENTIALS_URL)); + } + + if (getInitialConnectionPropertyValue(MIN_SESSIONS) != null + || getInitialConnectionPropertyValue(MAX_SESSIONS) != null + || !getInitialConnectionPropertyValue(TRACK_SESSION_LEAKS)) { + SessionPoolOptions.Builder sessionPoolOptionsBuilder = + builder.sessionPoolOptions == null + ? SessionPoolOptions.newBuilder() + : builder.sessionPoolOptions.toBuilder(); + sessionPoolOptionsBuilder.setTrackStackTraceOfSessionCheckout( + getInitialConnectionPropertyValue(TRACK_SESSION_LEAKS)); + sessionPoolOptionsBuilder.setAutoDetectDialect(true); + if (getInitialConnectionPropertyValue(MIN_SESSIONS) != null) { + sessionPoolOptionsBuilder.setMinSessions(getInitialConnectionPropertyValue(MIN_SESSIONS)); + } + if (getInitialConnectionPropertyValue(MAX_SESSIONS) != null) { + sessionPoolOptionsBuilder.setMaxSessions(getInitialConnectionPropertyValue(MAX_SESSIONS)); + } + this.sessionPoolOptions = sessionPoolOptionsBuilder.build(); + } else if (builder.sessionPoolOptions != null) { + this.sessionPoolOptions = builder.sessionPoolOptions; + } else if (isExperimentalHostPattern || isExperimentalHost()) { + this.sessionPoolOptions = + SessionPoolOptions.newBuilder().setExperimentalHost().setAutoDetectDialect(true).build(); + } else { + this.sessionPoolOptions = SessionPoolOptions.newBuilder().setAutoDetectDialect(true).build(); + } + + String projectId = EXPERIMENTAL_HOST_PROJECT_ID; + String instanceId = matcher.group(Builder.INSTANCE_GROUP); + if (!isExperimentalHost() && !isExperimentalHostPattern) { + projectId = matcher.group(Builder.PROJECT_GROUP); + } else if (instanceId == null && isExperimentalHost()) { + instanceId = DEFAULT_EXPERIMENTAL_HOST_INSTANCE_ID; + } + if (Builder.DEFAULT_PROJECT_ID_PLACEHOLDER.equalsIgnoreCase(projectId)) { + projectId = getDefaultProjectId(this.credentials); + } + this.projectId = projectId; + this.instanceId = instanceId; + this.databaseName = matcher.group(Builder.DATABASE_GROUP); + } + + @VisibleForTesting + static String determineHost( + Matcher matcher, + String endpoint, + boolean autoConfigEmulator, + boolean usePlainText, + Map environment) { + String host = null; + if (Objects.equals(endpoint, DEFAULT_ENDPOINT) && matcher.group(Builder.HOST_GROUP) == null) { + if (autoConfigEmulator) { + if (Strings.isNullOrEmpty(environment.get(SPANNER_EMULATOR_HOST_ENV_VAR))) { + return DEFAULT_EMULATOR_HOST; + } else { + return PLAIN_TEXT_PROTOCOL + "//" + environment.get(SPANNER_EMULATOR_HOST_ENV_VAR); + } + } + } else if (!Objects.equals(endpoint, DEFAULT_ENDPOINT)) { + // Add '//' at the start of the endpoint to conform to the standard URL specification. + host = "//" + endpoint; + } else { + // The leading '//' is already included in the regex for the connection URL, so we don't need + // to add the leading '//' to the host name here. + host = matcher.group(Builder.HOST_GROUP); + if (Builder.EXTERNAL_HOST_FORMAT.equals(matcher.pattern().pattern()) + && !host.matches(".*:\\d+$")) { + host = String.format("%s:15000", host); + } + } + if (host == null) { + return null; + } + if (usePlainText) { + return PLAIN_TEXT_PROTOCOL + host; + } + return HOST_PROTOCOL + host; + } + + /** + * @return an instance of OpenTelemetry. If OpenTelemetry object is not set then null + * will be returned. + */ + OpenTelemetry getOpenTelemetry() { + return this.openTelemetry; + } + + SpannerOptionsConfigurator getConfigurator() { + return configurator; + } + + Ticker getTicker() { + return ticker; + } + + @VisibleForTesting + CredentialsService getCredentialsService() { + return CredentialsService.INSTANCE; + } + + private static void checkGuardedProperty( + String value, String systemPropertyName, String connectionPropertyName) { + if (!Strings.isNullOrEmpty(value) + && !Boolean.parseBoolean(System.getProperty(systemPropertyName))) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + generateGuardedConnectionPropertyError(systemPropertyName, connectionPropertyName)); + } + } + + @VisibleForTesting + static String parseUriProperty(String uri, String property) { + Pattern pattern = Pattern.compile(String.format("(?is)(?:;|\\?)%s=(.*?)(?:;|$)", property)); + Matcher matcher = pattern.matcher(uri); + if (matcher.find() && matcher.groupCount() == 1) { + return matcher.group(1); + } + return null; + } + + /** Check that only valid properties have been specified. */ + @VisibleForTesting + static String checkValidProperties(boolean lenient, String uri) { + StringBuilder invalidProperties = new StringBuilder(); + List properties = parseProperties(uri); + for (String property : properties) { + if (!ConnectionProperties.CONNECTION_PROPERTIES.containsKey( + property.toLowerCase(Locale.ENGLISH))) { + if (invalidProperties.length() > 0) { + invalidProperties.append(", "); + } + invalidProperties.append(property); + } + } + if (lenient) { + return String.format("Invalid properties found in connection URI: %s", invalidProperties); + } else { + Preconditions.checkArgument( + invalidProperties.length() == 0, + String.format( + "Invalid properties found in connection URI. Add lenient=true to the connection" + + " string to ignore unknown properties. Invalid properties: %s", + invalidProperties)); + return null; + } + } + + @VisibleForTesting + static List parseProperties(String uri) { + Pattern pattern = Pattern.compile("(?is)(?:\\?|;)(?.*?)=(?:.*?)"); + Matcher matcher = pattern.matcher(uri); + List res = new ArrayList<>(); + while (matcher.find() && matcher.group("PROPERTY") != null) { + res.add(matcher.group("PROPERTY")); + } + return res; + } + + static long tryParseLong(String value, long defaultValue) { + try { + return Long.parseLong(value); + } catch (NumberFormatException ignore) { + return defaultValue; + } + } + + /** + * Create a new {@link Connection} from this {@link ConnectionOptions}. Calling this method + * multiple times for the same {@link ConnectionOptions} will return multiple instances of {@link + * Connection}s to the same database. + * + * @return a new {@link Connection} to the database referenced by this {@link ConnectionOptions} + */ + public Connection getConnection() { + LOCAL_CONNECTION_CHECKER.checkLocalConnection(this); + return new ConnectionImpl(this); + } + + /** The URI of this {@link ConnectionOptions} */ + public String getUri() { + return uri; + } + + /** The connection properties that have been pre-set for this {@link ConnectionOptions}. */ + Map> getInitialConnectionPropertyValues() { + return this.initialConnectionState.getAllValues(); + } + + T getInitialConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property) { + return this.initialConnectionState.getValue(property).getValue(); + } + + /** The credentials URL of this {@link ConnectionOptions} */ + public String getCredentialsUrl() { + return getInitialConnectionPropertyValue(CREDENTIALS_URL); + } + + String getOAuthToken() { + return getInitialConnectionPropertyValue(OAUTH_TOKEN); + } + + Credentials getFixedCredentials() { + return this.fixedCredentials; + } + + CredentialsProvider getCredentialsProvider() { + return getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER); + } + + /** + * Returns the executor type that is used by connections that are created from this {@link + * ConnectionOptions} instance. + */ + public StatementExecutorType getStatementExecutorType() { + return this.statementExecutorType; + } + + /** The {@link SessionPoolOptions} of this {@link ConnectionOptions}. */ + public SessionPoolOptions getSessionPoolOptions() { + return sessionPoolOptions; + } + + /** + * The minimum number of sessions in the backing session pool of this connection. The session pool + * is shared between all connections in the same JVM that connect to the same Cloud Spanner + * database using the same connection settings. + */ + public Integer getMinSessions() { + return getInitialConnectionPropertyValue(MIN_SESSIONS); + } + + /** + * The maximum number of sessions in the backing session pool of this connection. The session pool + * is shared between all connections in the same JVM that connect to the same Cloud Spanner + * database using the same connection settings. + */ + public Integer getMaxSessions() { + return getInitialConnectionPropertyValue(MAX_SESSIONS); + } + + /** The number of channels to use for the connection. */ + public Integer getNumChannels() { + return getInitialConnectionPropertyValue(NUM_CHANNELS); + } + + /** Whether dynamic channel pooling is enabled for this connection. */ + public Boolean isEnableDynamicChannelPool() { + return getInitialConnectionPropertyValue(ENABLE_DYNAMIC_CHANNEL_POOL); + } + + /** The minimum number of channels in the dynamic channel pool. */ + public Integer getDcpMinChannels() { + return getInitialConnectionPropertyValue(DCP_MIN_CHANNELS); + } + + /** The maximum number of channels in the dynamic channel pool. */ + public Integer getDcpMaxChannels() { + return getInitialConnectionPropertyValue(DCP_MAX_CHANNELS); + } + + /** The initial number of channels in the dynamic channel pool. */ + public Integer getDcpInitialChannels() { + return getInitialConnectionPropertyValue(DCP_INITIAL_CHANNELS); + } + + /** Calls the getChannelProvider() method from the supplied class. */ + public TransportChannelProvider getChannelProvider() { + String channelProvider = getInitialConnectionPropertyValue(CHANNEL_PROVIDER); + if (channelProvider == null) { + return null; + } + try { + URL url = new URL(MoreObjects.firstNonNull(host, DEFAULT_HOST)); + ExternalChannelProvider provider = + ExternalChannelProvider.class.cast(Class.forName(channelProvider).newInstance()); + return provider.getChannelProvider(url.getHost(), url.getPort()); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "%s : Failed to create channel with external provider: %s", + e.toString(), channelProvider)); + } + } + + String getGrpcInterceptorProviderName() { + return getInitialConnectionPropertyValue(GRPC_INTERCEPTOR_PROVIDER); + } + + /** Returns the gRPC interceptor provider that has been configured. */ + public GrpcInterceptorProvider getGrpcInterceptorProvider() { + String interceptorProvider = getInitialConnectionPropertyValue(GRPC_INTERCEPTOR_PROVIDER); + if (interceptorProvider == null) { + return null; + } + return GrpcInterceptorProviderConverter.INSTANCE.convert(interceptorProvider); + } + + /** + * The database role that is used for this connection. Assigning a role to a connection can be + * used to for example restrict the access of a connection to a specific set of tables. + */ + public String getDatabaseRole() { + return getInitialConnectionPropertyValue(DATABASE_ROLE); + } + + /** The host and port number that this {@link ConnectionOptions} will connect to */ + public String getHost() { + return host; + } + + /** The Google Project ID that this {@link ConnectionOptions} will connect to */ + public String getProjectId() { + return projectId; + } + + /** The Spanner Instance ID that this {@link ConnectionOptions} will connect to */ + public String getInstanceId() { + return instanceId; + } + + /** The Spanner database name that this {@link ConnectionOptions} will connect to */ + public String getDatabaseName() { + return databaseName; + } + + /** The Spanner {@link DatabaseId} that this {@link ConnectionOptions} will connect to */ + public DatabaseId getDatabaseId() { + Preconditions.checkState(projectId != null, "Project ID is not specified"); + Preconditions.checkState(instanceId != null, "Instance ID is not specified"); + Preconditions.checkState(databaseName != null, "Database name is not specified"); + return DatabaseId.of(projectId, instanceId, databaseName); + } + + /** + * The {@link Credentials} of this {@link ConnectionOptions}. This is either the credentials + * specified in the credentialsUrl or the default Google application credentials + */ + public Credentials getCredentials() { + return credentials; + } + + /** The initial autocommit value for connections created by this {@link ConnectionOptions} */ + public boolean isAutocommit() { + return getInitialConnectionPropertyValue(AUTOCOMMIT); + } + + /** The initial readonly value for connections created by this {@link ConnectionOptions} */ + public boolean isReadOnly() { + return getInitialConnectionPropertyValue(READONLY); + } + + /** + * Whether read/write transactions and partitioned DML are preferred to be routed to the leader + * region. + */ + public boolean isRouteToLeader() { + return getInitialConnectionPropertyValue(ROUTE_TO_LEADER); + } + + /** Whether end-to-end tracing is enabled. */ + public boolean isEndToEndTracingEnabled() { + return getInitialConnectionPropertyValue(ENABLE_END_TO_END_TRACING); + } + + /** + * The initial retryAbortsInternally value for connections created by this {@link + * ConnectionOptions} + */ + public boolean isRetryAbortsInternally() { + return getInitialConnectionPropertyValue(RETRY_ABORTS_INTERNALLY); + } + + /** Whether connections should use virtual threads for connection executors. */ + public boolean isUseVirtualThreads() { + return getInitialConnectionPropertyValue(USE_VIRTUAL_THREADS); + } + + /** Whether virtual threads should be used for gRPC transport. */ + public boolean isUseVirtualGrpcTransportThreads() { + return getInitialConnectionPropertyValue(USE_VIRTUAL_GRPC_TRANSPORT_THREADS); + } + + /** Any warnings that were generated while creating the {@link ConnectionOptions} instance. */ + @Nullable + public String getWarnings() { + return warnings; + } + + /** Use http instead of https. Only valid for (local) test servers. */ + boolean isUsePlainText() { + return getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR) + || getInitialConnectionPropertyValue(USE_PLAIN_TEXT); + } + + boolean isExperimentalHost() { + return getInitialConnectionPropertyValue(IS_EXPERIMENTAL_HOST); + } + + Boolean isEnableDirectAccess() { + return getInitialConnectionPropertyValue(ENABLE_DIRECT_ACCESS); + } + + String getUniverseDomain() { + return getInitialConnectionPropertyValue(UNIVERSE_DOMAIN); + } + + String getClientCertificate() { + return getInitialConnectionPropertyValue(CLIENT_CERTIFICATE); + } + + String getClientCertificateKey() { + return getInitialConnectionPropertyValue(CLIENT_KEY); + } + + /** + * The (custom) user agent string to use for this connection. If null, then the + * default JDBC user agent string will be used. + */ + String getUserAgent() { + return getInitialConnectionPropertyValue(USER_AGENT); + } + + /** Whether connections created by this {@link ConnectionOptions} return commit stats. */ + public boolean isReturnCommitStats() { + return getInitialConnectionPropertyValue(RETURN_COMMIT_STATS); + } + + /** The max_commit_delay that should be applied to commit operations on this connection. */ + public Duration getMaxCommitDelay() { + return getInitialConnectionPropertyValue(MAX_COMMIT_DELAY); + } + + boolean usesEmulator() { + return Suppliers.memoize( + () -> + isAutoConfigEmulator() + || !Strings.isNullOrEmpty(System.getenv("SPANNER_EMULATOR_HOST"))) + .get(); + } + + /** + * Whether connections created by this {@link ConnectionOptions} will automatically try to connect + * to the emulator using the default host/port of the emulator, and automatically create the + * instance and database that is specified in the connection string if these do not exist on the + * emulator instance. + */ + public boolean isAutoConfigEmulator() { + return getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR); + } + + /** + * Returns true if a connection should generate auto-savepoints for retrying transactions on the + * emulator. This allows some more concurrent transactions on the emulator. + * + *

This is no longer needed since version 1.5.23 of the emulator. + */ + boolean useAutoSavepointsForEmulator() { + return getInitialConnectionPropertyValue(USE_AUTO_SAVEPOINTS_FOR_EMULATOR); + } + + public Dialect getDialect() { + return getInitialConnectionPropertyValue(DIALECT); + } + + boolean isTrackConnectionLeaks() { + return getInitialConnectionPropertyValue(TRACK_CONNECTION_LEAKS); + } + + boolean isDataBoostEnabled() { + return getInitialConnectionPropertyValue(DATA_BOOST_ENABLED); + } + + boolean isAutoPartitionMode() { + return getInitialConnectionPropertyValue(AUTO_PARTITION_MODE); + } + + int getMaxPartitions() { + return getInitialConnectionPropertyValue(MAX_PARTITIONS); + } + + int getMaxPartitionedParallelism() { + return getInitialConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM); + } + + Boolean isEnableExtendedTracing() { + return getInitialConnectionPropertyValue(ENABLE_EXTENDED_TRACING); + } + + Boolean isEnableApiTracing() { + return getInitialConnectionPropertyValue(ENABLE_API_TRACING); + } + + /** Interceptors that should be executed after each statement */ + List getStatementExecutionInterceptors() { + return statementExecutionInterceptors; + } + + @Override + public String toString() { + return getUri(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPreconditions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPreconditions.java new file mode 100644 index 000000000000..8cda355858e1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPreconditions.java @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Strings; +import javax.annotation.Nullable; + +/** + * Static convenience methods that help a method or constructor in the Connection API to check + * whether it was invoked correctly. + */ +class ConnectionPreconditions { + /** + * Ensures the truth of an expression involving the state of the calling instance, but not + * involving any parameters to the calling method. + * + * @param expression a boolean expression + * @param errorMessage the exception message to use if the check fails; will be converted to a + * string using {@link String#valueOf(Object)}. + * @throws SpannerException with {@link ErrorCode#FAILED_PRECONDITION} if {@code expression} is + * false. + */ + static void checkState(boolean expression, @Nullable Object errorMessage) { + if (!expression) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, String.valueOf(errorMessage)); + } + } + + static void checkArgument(boolean expression, String message) { + if (!expression) { + throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, message); + } + } + + /** Verifies that the given identifier is a valid identifier for the given dialect. */ + static String checkValidIdentifier(String identifier) { + checkArgument(!Strings.isNullOrEmpty(identifier), "Identifier may not be null or empty"); + checkArgument( + Character.isJavaIdentifierStart(identifier.charAt(0)), "Invalid identifier: " + identifier); + for (int i = 1; i < identifier.length(); i++) { + checkArgument( + Character.isJavaIdentifierPart(identifier.charAt(i)), + "Invalid identifier: " + identifier); + } + checkArgument(identifier.length() <= 128, "Max identifier length is 128 characters"); + return identifier; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java new file mode 100644 index 000000000000..5fa678afef5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java @@ -0,0 +1,884 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTOCOMMIT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTO_BATCH_DML_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTO_BATCH_DML_UPDATE_COUNT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTO_PARTITION_MODE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.BATCH_DML_UPDATE_COUNT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CHANNEL_PROVIDER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CLIENT_CERTIFICATE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CLIENT_KEY_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CREDENTIALS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CREDENTIALS_PROVIDER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DATABASE_ROLE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DATA_BOOST_ENABLED_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DCP_INITIAL_CHANNELS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DCP_MAX_CHANNELS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DCP_MIN_CHANNELS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DDL_IN_TRANSACTION_MODE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTO_BATCH_DML; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CHANNEL_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CLIENT_CERTIFICATE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CLIENT_KEY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CREDENTIALS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DATABASE_ROLE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DCP_INITIAL_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DCP_MAX_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DCP_MIN_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DDL_IN_TRANSACTION_MODE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DEFAULT_SEQUENCE_KIND; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_API_TRACING; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_DYNAMIC_CHANNEL_POOL; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_END_TO_END_TRACING; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_EXTENDED_TRACING; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENDPOINT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_IS_EXPERIMENTAL_HOST; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_LENIENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MIN_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_NUM_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OAUTH_TOKEN; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_READONLY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ROUTE_TO_LEADER; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RPC_PRIORITY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_SEQUENCE_KIND_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_TRACK_CONNECTION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_TRACK_SESSION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USER_AGENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_PLAIN_TEXT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_VIRTUAL_THREADS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DIALECT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_API_TRACING_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_DYNAMIC_CHANNEL_POOL_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_END_TO_END_TRACING_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_EXTENDED_TRACING_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_GRPC_INTERCEPTOR_PROVIDER_SYSTEM_PROPERTY; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENCODED_CREDENTIALS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENDPOINT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.IS_EXPERIMENTAL_HOST_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.KEEP_TRANSACTION_ALIVE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.LENIENT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_PARTITIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_SESSIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MIN_SESSIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.NUM_CHANNELS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OAUTH_TOKEN_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OPTIMIZER_VERSION_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.READONLY_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.RETRY_ABORTS_INTERNALLY_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ROUTE_TO_LEADER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.RPC_PRIORITY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.TRACK_CONNECTION_LEAKS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.TRACK_SESSION_LEAKS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USER_AGENT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_PLAIN_TEXT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_VIRTUAL_THREADS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionProperty.castProperty; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.DmlBatchUpdateCountVerificationFailedException; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.AutocommitDmlModeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.BooleanConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ConnectionStateTypeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.CredentialsProviderConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DdlInTransactionModeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DialectConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DurationConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.IsolationLevelConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.LongConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.NonNegativeIntegerConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ReadLockModeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ReadOnlyStalenessConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.RpcPriorityConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.SavepointSupportConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.StringValueConverter; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.DirectedReadOptionsUtil.DirectedReadOptionsConverter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.time.Duration; +import java.util.Arrays; +import java.util.stream.Collectors; + +/** Utility class that defines all known connection properties. */ +public class ConnectionProperties { + private static final ImmutableMap.Builder> + CONNECTION_PROPERTIES_BUILDER = ImmutableMap.builder(); + + private static final Boolean[] BOOLEANS = new Boolean[] {Boolean.TRUE, Boolean.FALSE}; + + static final ConnectionProperty CONNECTION_STATE_TYPE = + create( + "connection_state_type", + "The type of connection state to use for this connection. Can only be set at start up. " + + "If no value is set, then the database dialect default will be used, " + + "which is NON_TRANSACTIONAL for GoogleSQL and TRANSACTIONAL for PostgreSQL.", + null, + ConnectionState.Type.values(), + ConnectionStateTypeConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACING_PREFIX = + create( + "tracing_prefix", + "The prefix that will be prepended to all OpenTelemetry traces that are " + + "generated by a Connection.", + "CloudSpanner", + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty LENIENT = + create( + LENIENT_PROPERTY_NAME, + "Silently ignore unknown properties in the connection string/properties (true/false)", + DEFAULT_LENIENT, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENDPOINT = + create( + ENDPOINT_PROPERTY_NAME, + "The endpoint that the JDBC driver should connect to. The default is the default Spanner" + + " production endpoint when autoConfigEmulator=false, and the default Spanner" + + " emulator endpoint (localhost:9010) when autoConfigEmulator=true. This property" + + " takes precedence over any host name at the start of the connection URL.", + DEFAULT_ENDPOINT, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty AUTO_CONFIG_EMULATOR = + create( + "autoConfigEmulator", + "Automatically configure the connection to try to connect to the Cloud Spanner emulator" + + " (true/false). The instance and database in the connection string will" + + " automatically be created if these do not yet exist on the emulator. Add" + + " dialect=postgresql to the connection string to make sure that the database that" + + " is created uses the PostgreSQL dialect.", + false, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_DIRECT_ACCESS = + create( + "enableDirectAccess", + "Configure the connection to try to connect to Spanner using " + + "DirectPath (true/false). The client will try to connect to Spanner " + + "using a direct Google network connection. DirectPath will work only " + + "if the client is trying to establish a connection from a Google Cloud VM. " + + "Otherwise it will automatically fallback to the standard network path. " + + "NOTE: The default for this property is currently false, " + + "but this could be changed in the future.", + null, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty UNIVERSE_DOMAIN = + create( + "universeDomain", + "Configure the connection to try to connect to Spanner using " + + "a different partner Google Universe than GDU (googleapis.com).", + "googleapis.com", + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_AUTO_SAVEPOINTS_FOR_EMULATOR = + create( + "useAutoSavepointsForEmulator", + "Automatically creates savepoints for each statement in a read/write transaction when" + + " using the Emulator. This is no longer needed when using Emulator version 1.5.23" + + " or higher.", + false, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_PLAIN_TEXT = + create( + USE_PLAIN_TEXT_PROPERTY_NAME, + "Use a plain text communication channel (i.e. non-TLS) for communicating with the server" + + " (true/false). Set this value to true for communication with the Cloud Spanner" + + " emulator.", + DEFAULT_USE_PLAIN_TEXT, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty IS_EXPERIMENTAL_HOST = + create( + IS_EXPERIMENTAL_HOST_PROPERTY_NAME, + "Set this value to true for communication with a Experimental Host.", + DEFAULT_IS_EXPERIMENTAL_HOST, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CLIENT_CERTIFICATE = + create( + CLIENT_CERTIFICATE_PROPERTY_NAME, + "Specifies the file path to the client certificate required for establishing an mTLS" + + " connection.", + DEFAULT_CLIENT_CERTIFICATE, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CLIENT_KEY = + create( + CLIENT_KEY_PROPERTY_NAME, + "Specifies the file path to the client private key required for establishing an mTLS" + + " connection.", + DEFAULT_CLIENT_KEY, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CREDENTIALS_URL = + create( + CREDENTIALS_PROPERTY_NAME, + "The location of the credentials file to use for this connection. If neither this" + + " property or encoded credentials are set, the connection will use the default" + + " Google Cloud credentials for the runtime environment. WARNING: Using this" + + " property without proper validation can expose the application to security risks." + + " It is intended for use with credentials from a trusted source only, as it could" + + " otherwise allow end-users to supply arbitrary credentials. For more information," + + " seehttps://cloud.google.com/docs/authentication/client-libraries#external-credentials", + DEFAULT_CREDENTIALS, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENCODED_CREDENTIALS = + create( + ENCODED_CREDENTIALS_PROPERTY_NAME, + "Base64-encoded credentials to use for this connection. If neither this property or a" + + " credentials location are set, the connection will use the default Google Cloud" + + " credentials for the runtime environment. WARNING: Enabling this property without" + + " proper validation can expose the application to security risks. It is intended" + + " for use with credentials from a trusted source only, as it could otherwise allow" + + " end-users to supply arbitrary credentials. For more information, see" + + "https://cloud.google.com/docs/authentication/client-libraries#external-credentials", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty OAUTH_TOKEN = + create( + OAUTH_TOKEN_PROPERTY_NAME, + "A valid pre-existing OAuth token to use for authentication for this connection. Setting" + + " this property will take precedence over any value set for a credentials file.", + DEFAULT_OAUTH_TOKEN, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CREDENTIALS_PROVIDER = + create( + CREDENTIALS_PROVIDER_PROPERTY_NAME, + "The class name of the com.google.api.gax.core.CredentialsProvider implementation that" + + " should be used to obtain credentials for connections.", + null, + CredentialsProviderConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty GRPC_INTERCEPTOR_PROVIDER = + create( + "grpc_interceptor_provider", + "The class name of a " + + GrpcInterceptorProvider.class.getName() + + " implementation that should be used to provide interceptors for the underlying" + + " Spanner client. This is a guarded property that can only be set if the Java" + + " System Property " + + ENABLE_GRPC_INTERCEPTOR_PROVIDER_SYSTEM_PROPERTY + + " has been set to true. This property should only be set to true on systems where" + + " an untrusted user cannot modify the connection URL, as using this property will" + + " dynamically invoke the constructor of the class specified. This means that any" + + " user that can modify the connection URL, can also dynamically invoke code on the" + + " host where the application is running.", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + + static final ConnectionProperty USER_AGENT = + create( + USER_AGENT_PROPERTY_NAME, + "The custom user-agent property name to use when communicating with Cloud Spanner. This" + + " property is intended for internal library usage, and should not be set by" + + " applications.", + DEFAULT_USER_AGENT, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DIALECT = + create( + DIALECT_PROPERTY_NAME, + "Sets the dialect to use for new databases that are created by this connection.", + Dialect.GOOGLE_STANDARD_SQL, + Dialect.values(), + DialectConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACK_SESSION_LEAKS = + create( + TRACK_SESSION_LEAKS_PROPERTY_NAME, + "Capture the call stack of the thread that checked out a session of the session pool." + + " This will pre-create a LeakedSessionException already when a session is checked" + + " out. This can be disabled, for example if a monitoring system logs the" + + " pre-created exception. If disabled, the LeakedSessionException will only be" + + " created when an actual session leak is detected. The stack trace of the exception" + + " will in that case not contain the call stack of when the session was checked" + + " out.", + DEFAULT_TRACK_SESSION_LEAKS, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACK_CONNECTION_LEAKS = + create( + TRACK_CONNECTION_LEAKS_PROPERTY_NAME, + "Capture the call stack of the thread that created a connection. This will pre-create a" + + " LeakedConnectionException already when a connection is created. This can be" + + " disabled, for example if a monitoring system logs the pre-created exception. If" + + " disabled, the LeakedConnectionException will only be created when an actual" + + " connection leak is detected. The stack trace of the exception will in that case" + + " not contain the call stack of when the connection was created.", + DEFAULT_TRACK_CONNECTION_LEAKS, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ROUTE_TO_LEADER = + create( + ROUTE_TO_LEADER_PROPERTY_NAME, + "Should read/write transactions and partitioned DML be routed to leader region" + + " (true/false)", + DEFAULT_ROUTE_TO_LEADER, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_VIRTUAL_THREADS = + create( + USE_VIRTUAL_THREADS_PROPERTY_NAME, + "Use a virtual thread instead of a platform thread for each connection (true/false). This" + + " option only has any effect if the application is running on Java 21 or higher. In" + + " all other cases, the option is ignored.", + DEFAULT_USE_VIRTUAL_THREADS, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_VIRTUAL_GRPC_TRANSPORT_THREADS = + create( + USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME, + "Use a virtual thread instead of a platform thread for the gRPC executor (true/false)." + + " This option only has any effect if the application is running on Java 21 or" + + " higher. In all other cases, the option is ignored.", + DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_EXTENDED_TRACING = + create( + ENABLE_EXTENDED_TRACING_PROPERTY_NAME, + "Include the SQL string in the OpenTelemetry traces that are generated " + + "by this connection. The SQL string is added as the standard OpenTelemetry " + + "attribute 'db.statement'.", + DEFAULT_ENABLE_EXTENDED_TRACING, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_API_TRACING = + create( + ENABLE_API_TRACING_PROPERTY_NAME, + "Add OpenTelemetry traces for each individual RPC call. Enable this " + + "to get a detailed view of each RPC that is being executed by your application, " + + "or if you want to debug potential latency problems caused by RPCs that are " + + "being retried.", + DEFAULT_ENABLE_API_TRACING, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_END_TO_END_TRACING = + create( + ENABLE_END_TO_END_TRACING_PROPERTY_NAME, + "Enable end-to-end tracing (true/false) to generate traces for both the time that is" + + " spent in the client, as well as time that is spent in the Spanner server. Server" + + " side traces can only go to Google Cloud Trace, so to see end to end traces, the" + + " application should configure an exporter that exports the traces to Google Cloud" + + " Trace.", + DEFAULT_ENABLE_END_TO_END_TRACING, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty MIN_SESSIONS = + create( + MIN_SESSIONS_PROPERTY_NAME, + "The minimum number of sessions in the backing session pool. The default is 100.", + DEFAULT_MIN_SESSIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty MAX_SESSIONS = + create( + MAX_SESSIONS_PROPERTY_NAME, + "The maximum number of sessions in the backing session pool. The default is 400.", + DEFAULT_MAX_SESSIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty NUM_CHANNELS = + create( + NUM_CHANNELS_PROPERTY_NAME, + "The number of gRPC channels to use to communicate with Cloud Spanner. The default is 4.", + DEFAULT_NUM_CHANNELS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_DYNAMIC_CHANNEL_POOL = + create( + ENABLE_DYNAMIC_CHANNEL_POOL_PROPERTY_NAME, + "Enable dynamic channel pooling for automatic gRPC channel scaling. When enabled, the " + + "client will automatically scale the number of channels based on load. Setting " + + "numChannels will disable dynamic channel pooling even if this is set to true. " + + "The default is currently false (disabled), but this may change to true in a " + + "future version. Set this property explicitly to ensure consistent behavior.", + DEFAULT_ENABLE_DYNAMIC_CHANNEL_POOL, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DCP_MIN_CHANNELS = + create( + DCP_MIN_CHANNELS_PROPERTY_NAME, + "The minimum number of channels in the dynamic channel pool. Only used when " + + "enableDynamicChannelPool is true. The default is " + + "SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_CHANNELS (2).", + DEFAULT_DCP_MIN_CHANNELS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DCP_MAX_CHANNELS = + create( + DCP_MAX_CHANNELS_PROPERTY_NAME, + "The maximum number of channels in the dynamic channel pool. Only used when " + + "enableDynamicChannelPool is true. The default is " + + "SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_CHANNELS (10).", + DEFAULT_DCP_MAX_CHANNELS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DCP_INITIAL_CHANNELS = + create( + DCP_INITIAL_CHANNELS_PROPERTY_NAME, + "The initial number of channels in the dynamic channel pool. Only used when " + + "enableDynamicChannelPool is true. The default is " + + "SpannerOptions.DEFAULT_DYNAMIC_POOL_INITIAL_SIZE (4).", + DEFAULT_DCP_INITIAL_CHANNELS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CHANNEL_PROVIDER = + create( + CHANNEL_PROVIDER_PROPERTY_NAME, + "The name of the channel provider class. The name must reference an implementation of" + + " ExternalChannelProvider. If this property is not set, the connection will use the" + + " default grpc channel provider.", + DEFAULT_CHANNEL_PROVIDER, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DATABASE_ROLE = + create( + DATABASE_ROLE_PROPERTY_NAME, + "Sets the database role to use for this connection. The default is privileges assigned to" + + " IAM role", + DEFAULT_DATABASE_ROLE, + StringValueConverter.INSTANCE, + Context.STARTUP); + + static final ConnectionProperty AUTOCOMMIT = + create( + AUTOCOMMIT_PROPERTY_NAME, + "Should the connection start in autocommit (true/false)", + DEFAULT_AUTOCOMMIT, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty READONLY = + create( + READONLY_PROPERTY_NAME, + "Should the connection start in read-only mode (true/false)", + DEFAULT_READONLY, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DEFAULT_ISOLATION_LEVEL = + create( + "default_isolation_level", + "The transaction isolation level that is used by default for read/write transactions. The" + + " default is isolation_level_unspecified, which means that the connection will use" + + " the default isolation level of the database that it is connected to.", + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + new IsolationLevel[] { + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + IsolationLevel.SERIALIZABLE, + IsolationLevel.REPEATABLE_READ + }, + IsolationLevelConverter.INSTANCE, + Context.USER); + static final ConnectionProperty READ_LOCK_MODE = + create( + "read_lock_mode", + "This option controls the locking behavior for read operations and queries within a" + + " read/write transaction. It works in conjunction with the transaction's isolation" + + " level.\n\n" + + "PESSIMISTIC: Read locks are acquired immediately on read. This mode only applies" + + " to SERIALIZABLE isolation. This mode prevents concurrent modifications by locking" + + " data throughout the transaction. This reduces commit-time aborts due to" + + " conflicts, but can increase how long transactions wait for locks and the overall" + + " contention.\n\n" + + "OPTIMISTIC: Locks for reads within the transaction are not acquired on read." + + " Instead, the locks are acquired on commit to validate that read/queried data has" + + " not changed since the transaction started. If a conflict is detected, the" + + " transaction will fail. This mode only applies to SERIALIZABLE isolation. This" + + " mode defers locking until commit, which can reduce contention and improve" + + " throughput. However, be aware that this increases the risk of transaction aborts" + + " if there's significant write competition on the same data.\n\n" + + "READ_LOCK_MODE_UNSPECIFIED: This is the default if no mode is set. The locking" + + " behavior depends on the isolation level:\n\n" + + "REPEATABLE_READ: Locking semantics default to OPTIMISTIC. However, validation" + + " checks at commit are only performed for queries using SELECT FOR UPDATE," + + " statements with {@code LOCK_SCANNED_RANGES} hints, and DML statements.\n\n" + + "For all other isolation levels: If the read lock mode is not set, it defaults to" + + " PESSIMISTIC locking.", + ReadLockMode.READ_LOCK_MODE_UNSPECIFIED, + Arrays.stream(ReadLockMode.values()) + .filter(mode -> !mode.equals(ReadLockMode.UNRECOGNIZED)) + .collect(Collectors.toList()) + .toArray(new ReadLockMode[0]), + ReadLockModeConverter.INSTANCE, + Context.USER); + static final ConnectionProperty STATEMENT_TIMEOUT = + create( + "statement_timeout", + "Adds a timeout to all statements executed on this connection. " + + "This property is only used when a statement timeout is specified.", + null, + null, + DurationConverter.INSTANCE, + Context.USER); + static final ConnectionProperty TRANSACTION_TIMEOUT = + create( + "transaction_timeout", + "Timeout for read/write transactions.", + null, + null, + DurationConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTOCOMMIT_DML_MODE = + create( + "autocommit_dml_mode", + "Determines the transaction type that is used to execute " + + "DML statements when the connection is in auto-commit mode.", + AutocommitDmlMode.TRANSACTIONAL, + // Add 'null' as a valid value. + Arrays.copyOf(AutocommitDmlMode.values(), AutocommitDmlMode.values().length + 1), + AutocommitDmlModeConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RETRY_ABORTS_INTERNALLY = + create( + // TODO: Add support for synonyms for connection properties. + // retryAbortsInternally / retry_aborts_internally is currently not consistent. + // The connection URL property is retryAbortsInternally. The SET statement assumes + // that the property name is retry_aborts_internally. We should support both to be + // backwards compatible, but the standard should be snake_case. + RETRY_ABORTS_INTERNALLY_PROPERTY_NAME, + "Should the connection automatically retry Aborted errors (true/false)", + DEFAULT_RETRY_ABORTS_INTERNALLY, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RETURN_COMMIT_STATS = + create( + "returnCommitStats", + "Request that Spanner returns commit statistics for read/write transactions (true/false)", + DEFAULT_RETURN_COMMIT_STATS, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = + create( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME, + "Enabling this option will delay the actual start of a read/write transaction until the" + + " first write operation is seen in that transaction. All reads that happen before" + + " the first write in a transaction will instead be executed as if the connection" + + " was in auto-commit mode. Enabling this option will make read/write transactions" + + " lose their SERIALIZABLE isolation level. Read operations that are executed after" + + " the first write operation in a read/write transaction will be executed using the" + + " read/write transaction. Enabling this mode can reduce locking and improve" + + " performance for applications that can handle the lower transaction isolation" + + " semantics.", + DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty KEEP_TRANSACTION_ALIVE = + create( + KEEP_TRANSACTION_ALIVE_PROPERTY_NAME, + "Enabling this option will trigger the connection to keep read/write transactions alive" + + " by executing a SELECT 1 query once every 10 seconds if no other statements are" + + " being executed. This option should be used with caution, as it can keep" + + " transactions alive and hold on to locks longer than intended. This option should" + + " typically be used for CLI-type application that might wait for user input for a" + + " longer period of time.", + DEFAULT_KEEP_TRANSACTION_ALIVE, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + + static final ConnectionProperty READ_ONLY_STALENESS = + create( + "read_only_staleness", + "The read-only staleness to use for read-only transactions and single-use queries.", + TimestampBound.strong(), + ReadOnlyStalenessConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTO_PARTITION_MODE = + create( + AUTO_PARTITION_MODE_PROPERTY_NAME, + "Execute all queries on this connection as partitioned queries. " + + "Executing a query that cannot be partitioned will fail. " + + "Executing a query in a read/write transaction will also fail.", + DEFAULT_AUTO_PARTITION_MODE, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DATA_BOOST_ENABLED = + create( + DATA_BOOST_ENABLED_PROPERTY_NAME, + "Enable data boost for all partitioned queries that are executed by this connection. This" + + " setting is only used for partitioned queries and is ignored by all other" + + " statements.", + DEFAULT_DATA_BOOST_ENABLED, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_PARTITIONS = + create( + MAX_PARTITIONS_PROPERTY_NAME, + "The max partitions hint value to use for partitioned queries. " + + "Use 0 if you do not want to specify a hint.", + DEFAULT_MAX_PARTITIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_PARTITIONED_PARALLELISM = + create( + MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME, + "The max partitions hint value to use for partitioned queries. " + + "Use 0 if you do not want to specify a hint.", + DEFAULT_MAX_PARTITIONED_PARALLELISM, + NonNegativeIntegerConverter.INSTANCE, + Context.USER); + + static final ConnectionProperty DIRECTED_READ = + create( + "directed_read", + "The directed read options to apply to read-only transactions.", + null, + DirectedReadOptionsConverter.INSTANCE, + Context.USER); + static final ConnectionProperty OPTIMIZER_VERSION = + create( + OPTIMIZER_VERSION_PROPERTY_NAME, + "Sets the default query optimizer version to use for this connection.", + DEFAULT_OPTIMIZER_VERSION, + StringValueConverter.INSTANCE, + Context.USER); + static final ConnectionProperty OPTIMIZER_STATISTICS_PACKAGE = + create( + OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME, + "Sets the query optimizer statistics package to use for this connection.", + DEFAULT_OPTIMIZER_STATISTICS_PACKAGE, + StringValueConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RPC_PRIORITY = + create( + RPC_PRIORITY_NAME, + "Sets the priority for all RPC invocations from this connection (HIGH/MEDIUM/LOW). The" + + " default is HIGH.", + DEFAULT_RPC_PRIORITY, + // Add 'null' as a valid value. + Arrays.copyOf(RpcPriority.values(), RpcPriority.values().length + 1), + RpcPriorityConverter.INSTANCE, + Context.USER); + static final ConnectionProperty SAVEPOINT_SUPPORT = + create( + "savepoint_support", + "Determines the behavior of the connection when savepoints are used.", + SavepointSupport.FAIL_AFTER_ROLLBACK, + SavepointSupport.values(), + SavepointSupportConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DDL_IN_TRANSACTION_MODE = + create( + DDL_IN_TRANSACTION_MODE_PROPERTY_NAME, + "Determines how the connection should handle DDL statements in a read/write transaction.", + DEFAULT_DDL_IN_TRANSACTION_MODE, + DdlInTransactionMode.values(), + DdlInTransactionModeConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DEFAULT_SEQUENCE_KIND = + create( + DEFAULT_SEQUENCE_KIND_PROPERTY_NAME, + "The default sequence kind that should be used for the database. " + + "This property is only used when a DDL statement that requires a default " + + "sequence kind is executed on this connection.", + DEFAULT_DEFAULT_SEQUENCE_KIND, + StringValueConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_COMMIT_DELAY = + create( + "maxCommitDelay", + "The max delay that Spanner may apply to commit requests to improve throughput.", + null, + DurationConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTO_BATCH_DML = + create( + AUTO_BATCH_DML_PROPERTY_NAME, + "Automatically buffer DML statements that are executed on this connection and execute" + + " them as one batch when a non-DML statement is executed, or when the current" + + " transaction is committed. The update count that is returned for DML statements" + + " that are buffered is by default 1. This default can be changed by setting the" + + " connection variable " + + AUTO_BATCH_DML_UPDATE_COUNT_PROPERTY_NAME + + " to value other than 1. This setting is only in read/write transactions. DML" + + " statements in auto-commit mode are executed directly.", + DEFAULT_AUTO_BATCH_DML, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTO_BATCH_DML_UPDATE_COUNT = + create( + AUTO_BATCH_DML_UPDATE_COUNT_PROPERTY_NAME, + "DML statements that are executed when " + + AUTO_BATCH_DML_PROPERTY_NAME + + " is set to true, are not directly sent to Spanner, but are buffered in the client" + + " until the batch is flushed. This property determines the update count that is" + + " returned for these DML statements. The default is " + + DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT + + ", as " + + "that is the update count that is expected by most ORMs (e.g. Hibernate).", + DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT, + LongConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = + create( + AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION_PROPERTY_NAME, + "The update count that is returned for DML statements that are buffered during " + + "an automatic DML batch is by default " + + DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT + + ". " + + "This value can be changed by setting the connection variable " + + AUTO_BATCH_DML_UPDATE_COUNT_PROPERTY_NAME + + ". The update counts that are returned by Spanner when the DML statements are" + + " actually executed are verified against the update counts that were returned when" + + " they were buffered. If these do not match, a " + + DmlBatchUpdateCountVerificationFailedException.class.getName() + + " will be thrown. You can disable this verification by setting " + + AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION_PROPERTY_NAME + + " to false.", + DEFAULT_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION, + BOOLEANS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty BATCH_DML_UPDATE_COUNT = + create( + BATCH_DML_UPDATE_COUNT_PROPERTY_NAME, + "The update count that is returned for DML statements that are executed in an " + + "explicit DML batch. The default is " + + DEFAULT_BATCH_DML_UPDATE_COUNT, + DEFAULT_BATCH_DML_UPDATE_COUNT, + LongConverter.INSTANCE, + Context.USER); + public static final ConnectionProperty UNKNOWN_LENGTH = + create( + "unknownLength", + "Spanner does not return the length of the selected columns in query results. When" + + " returning meta-data about these columns through functions like" + + " ResultSetMetaData.getColumnDisplaySize and ResultSetMetaData.getPrecision, we" + + " must provide a value. Various client tools and applications have different ideas" + + " about what they would like to see. This property specifies the length to return" + + " for types of unknown length.", + /* defaultValue= */ 50, + NonNegativeIntegerConverter.INSTANCE, + Context.USER); + + static final ImmutableMap> CONNECTION_PROPERTIES = + CONNECTION_PROPERTIES_BUILDER.build(); + + /** The list of all supported connection properties. */ + public static ImmutableList> VALID_CONNECTION_PROPERTIES = + ImmutableList.copyOf(ConnectionProperties.CONNECTION_PROPERTIES.values()); + + /** Utility method for creating a new core {@link ConnectionProperty}. */ + private static ConnectionProperty create( + String name, + String description, + T defaultValue, + ClientSideStatementValueConverter converter, + Context context) { + return create(name, description, defaultValue, null, converter, context); + } + + /** Utility method for creating a new core {@link ConnectionProperty}. */ + private static ConnectionProperty create( + String name, + String description, + T defaultValue, + T[] validValues, + ClientSideStatementValueConverter converter, + Context context) { + ConnectionProperty property = + ConnectionProperty.create(name, description, defaultValue, validValues, converter, context); + CONNECTION_PROPERTIES_BUILDER.put(property.getKey(), property); + return property; + } + + /** Parse the connection properties that can be found in the given connection URL. */ + static ImmutableMap> parseValues(String url) { + ImmutableMap.Builder> builder = ImmutableMap.builder(); + for (ConnectionProperty property : CONNECTION_PROPERTIES.values()) { + ConnectionPropertyValue value = parseValue(castProperty(property), url); + if (value != null) { + builder.put(property.getKey(), value); + } + } + return builder.build(); + } + + /** + * Parse and convert the value of the specific connection property from a connection URL (e.g. + * readonly=true). + */ + private static ConnectionPropertyValue parseValue( + ConnectionProperty property, String url) { + String stringValue = ConnectionOptions.parseUriProperty(url, property.getKey()); + return property.convert(stringValue); + } + + /** This class should not be instantiated. */ + private ConnectionProperties() {} +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java new file mode 100644 index 000000000000..7c06774cf2fb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java @@ -0,0 +1,213 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Strings; +import java.util.Locale; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * {@link ConnectionProperty} is a variable for a connection. The total set of connection properties + * is the state of a connection, and determine the behavior of that connection. For example, a + * connection with a {@link ConnectionProperty} READONLY=true and AUTOCOMMIT=false will use + * read-only transactions by default, while a connection with READONLY=false and AUTOCOMMIT=false + * will use read/write transactions. + * + *

Connection properties are stored in a {@link ConnectionState} instance. {@link + * ConnectionState} can be transactional. That is; changes to a connection property during a + * transaction will be undone if the transaction is rolled back. Transactional connection state is + * the default for PostgreSQL-dialect databases. For GoogleSQL-dialect databases, transactional + * connection state is an opt-in. + */ +public class ConnectionProperty { + + /** + * Context indicates when a {@link ConnectionProperty} may be set. Each higher-ordinal value + * includes the preceding values, meaning that a {@link ConnectionProperty} with {@link + * Context#USER} can be set both at connection startup and during the connection's lifetime. + */ + public enum Context { + /** The property can only be set at startup of the connection. */ + STARTUP, + /** + * The property can be set at startup or by a user during the lifetime of a connection. The + * value is persisted until it is changed again by the user. + */ + USER, + } + + /** Utility method for doing an unchecked cast to a typed {@link ConnectionProperty}. */ + static ConnectionProperty castProperty(ConnectionProperty property) { + //noinspection unchecked + return (ConnectionProperty) property; + } + + /** + * Utility method for creating a key for a {@link ConnectionProperty}. The key of a property is + * always lower-case and consists of '[extension.]name'. + */ + @Nonnull + static String createKey(String extension, @Nonnull String name) { + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(name), "property name must be a non-empty string"); + return extension == null + ? name.toLowerCase(Locale.ENGLISH) + : extension.toLowerCase(Locale.ENGLISH) + "." + name.toLowerCase(Locale.ENGLISH); + } + + /** Utility method for creating a typed {@link ConnectionProperty}. */ + @Nonnull + static ConnectionProperty create( + @Nonnull String name, + String description, + T defaultValue, + ClientSideStatementValueConverter converter, + Context context) { + return create(name, description, defaultValue, null, converter, context); + } + + /** Utility method for creating a typed {@link ConnectionProperty}. */ + @Nonnull + static ConnectionProperty create( + @Nonnull String name, + String description, + T defaultValue, + T[] validValues, + ClientSideStatementValueConverter converter, + Context context) { + return new ConnectionProperty<>( + null, name, description, defaultValue, validValues, converter, context); + } + + /** + * The 'extension' of this property. This is (currently) only used for PostgreSQL-dialect + * databases. + */ + private final String extension; + + @Nonnull private final String name; + + @Nonnull private final String key; + + @Nonnull private final String description; + + private final T defaultValue; + + private final T[] validValues; + + private final ClientSideStatementValueConverter converter; + + private final Context context; + + ConnectionProperty( + String extension, + @Nonnull String name, + @Nonnull String description, + T defaultValue, + T[] validValues, + ClientSideStatementValueConverter converter, + Context context) { + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(name), "property name must be a non-empty string"); + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(description), "property description must be a non-empty string"); + this.extension = extension == null ? null : extension.toLowerCase(Locale.ENGLISH); + this.name = name.toLowerCase(Locale.ENGLISH); + this.description = description; + this.defaultValue = defaultValue; + this.validValues = validValues; + this.converter = converter; + this.context = context; + this.key = createKey(this.extension, this.name); + } + + @Override + public String toString() { + return this.key; + } + + @Override + public int hashCode() { + return this.key.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ConnectionProperty)) { + return false; + } + ConnectionProperty other = (ConnectionProperty) o; + return this.key.equals(other.key); + } + + ConnectionPropertyValue createInitialValue(@Nullable ConnectionPropertyValue initialValue) { + return initialValue == null + ? new ConnectionPropertyValue<>(this, this.defaultValue, this.defaultValue) + : initialValue.copy(); + } + + @Nullable + ConnectionPropertyValue convert(@Nullable String stringValue) { + if (stringValue == null) { + return null; + } + T convertedValue = this.converter.convert(stringValue); + if (convertedValue == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid value for property " + this + ": " + stringValue); + } + return new ConnectionPropertyValue<>(this, convertedValue, convertedValue); + } + + @Nonnull + public String getKey() { + return this.key; + } + + public boolean hasExtension() { + return this.extension != null; + } + + public String getExtension() { + return this.extension; + } + + @Nonnull + public String getName() { + return this.name; + } + + @Nonnull + public String getDescription() { + return this.description; + } + + public T getDefaultValue() { + return this.defaultValue; + } + + public T[] getValidValues() { + return this.validValues; + } + + public Context getContext() { + return this.context; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java new file mode 100644 index 000000000000..088a28d9d8a3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import java.util.Objects; + +class ConnectionPropertyValue { + static ConnectionPropertyValue cast(ConnectionPropertyValue value) { + //noinspection unchecked + return (ConnectionPropertyValue) value; + } + + private final ConnectionProperty property; + private final T resetValue; + + private T value; + + ConnectionPropertyValue(ConnectionProperty property, T resetValue, T value) { + this.property = property; + this.resetValue = resetValue; + this.value = value; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ConnectionPropertyValue)) { + return false; + } + ConnectionPropertyValue other = cast((ConnectionPropertyValue) o); + return Objects.equals(this.property, other.property) + && Objects.equals(this.resetValue, other.resetValue) + && Objects.equals(this.value, other.value); + } + + @Override + public int hashCode() { + return Objects.hash(this.property, this.resetValue, this.value); + } + + ConnectionProperty getProperty() { + return property; + } + + T getResetValue() { + return resetValue; + } + + T getValue() { + return value; + } + + void setValue(T value, Context context) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= context.ordinal(), + "Property has context " + + property.getContext() + + " and cannot be set in context " + + context); + this.value = value; + } + + ConnectionPropertyValue copy() { + return new ConnectionPropertyValue<>(this.property, this.resetValue, this.value); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionSpannerOptions.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionSpannerOptions.java new file mode 100644 index 000000000000..f9d310b69e3e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionSpannerOptions.java @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.SpannerOptions; + +/** + * This class is used for building {@link SpannerOptions} for {@link Connection} instances. It gives + * access to (experimental) properties that are not public in the standard {@link SpannerOptions} + * implementation. + */ +class ConnectionSpannerOptions extends SpannerOptions { + public static Builder newBuilder() { + return new Builder(); + } + + static class Builder extends SpannerOptions.Builder { + Builder() {} + + @Override + protected SpannerOptions.Builder setUseVirtualThreads(boolean useVirtualThreads) { + return super.setUseVirtualThreads(useVirtualThreads); + } + + @Override + public ConnectionSpannerOptions build() { + return new ConnectionSpannerOptions(this); + } + } + + ConnectionSpannerOptions(Builder builder) { + super(builder); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java new file mode 100644 index 000000000000..ad90fc574b59 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java @@ -0,0 +1,303 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_PROPERTIES; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperty.castProperty; +import static com.google.cloud.spanner.connection.ConnectionPropertyValue.cast; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Suppliers; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.function.Supplier; +import javax.annotation.Nullable; + +class ConnectionState { + /** The type of connection state that is used. */ + enum Type { + /** + * Transactional connection state will roll back changes to connection properties that have been + * done during a transaction if the transaction is rolled back. + */ + TRANSACTIONAL, + /** + * Non-transactional connection state directly applies connection property changes during + * transactions to the main set of properties. Note that non-transactional connection state does + * support local properties. These are property changes that are only visible during the current + * transaction, and that are lost after committing or rolling back the current transaction. + */ + NON_TRANSACTIONAL, + } + + private final Object lock = new Object(); + + private final Supplier type; + + /** properties contain the current connection properties of a connection. */ + private final Map> properties; + + /** + * transactionProperties are the modified connection properties during a transaction. This is only + * used for {@link ConnectionState} that is marked as {@link Type#TRANSACTIONAL}. + */ + private Map> transactionProperties; + + /** localProperties are the modified local properties during a transaction. */ + private Map> localProperties; + + /** Constructs a non-transactional {@link ConnectionState} with the given initial values. */ + ConnectionState(Map> initialValues) { + this(initialValues, Suppliers.ofInstance(Type.NON_TRANSACTIONAL)); + } + + /** + * Constructs a {@link ConnectionState} with the given initial values. The type will be + * transactional or non-transactional based on the value that is returned by the given supplier. + * The type is determined lazily to allow connections to determine the default based on the + * dialect, and the dialect is not known directly when a connection is created. + */ + ConnectionState( + Map> initialValues, + Supplier defaultConnectionStateTypeSupplier) { + this.properties = new HashMap<>(CONNECTION_PROPERTIES.size()); + for (Entry> entry : CONNECTION_PROPERTIES.entrySet()) { + this.properties.put( + entry.getKey(), + entry.getValue().createInitialValue(cast(initialValues.get(entry.getKey())))); + } + // Add any additional non-core values from the options. + for (Entry> entry : initialValues.entrySet()) { + if (!this.properties.containsKey(entry.getKey())) { + setValue( + castProperty(entry.getValue().getProperty()), + cast(entry.getValue()).getValue(), + Context.STARTUP, + /* inTransaction= */ false); + } + } + Type configuredType = getValue(CONNECTION_STATE_TYPE).getValue(); + if (configuredType == null) { + this.type = defaultConnectionStateTypeSupplier; + } else { + this.type = Suppliers.ofInstance(configuredType); + } + } + + @VisibleForTesting + Type getType() { + return this.type.get(); + } + + boolean hasTransactionalChanges() { + synchronized (lock) { + return this.transactionProperties != null || this.localProperties != null; + } + } + + /** + * Returns an unmodifiable map with all the property values of this {@link ConnectionState}. The + * map cannot be modified, but any changes to the current (committed) state will be reflected in + * the map that is returned by this method. + */ + Map> getAllValues() { + synchronized (lock) { + return Collections.unmodifiableMap(this.properties); + } + } + + /** Returns the current value of the specified setting. */ + ConnectionPropertyValue getValue(ConnectionProperty property) { + synchronized (lock) { + return internalGetValue(property, true); + } + } + + /** Returns the current value of the specified setting or null if undefined. */ + @Nullable + ConnectionPropertyValue tryGetValue(ConnectionProperty property) { + synchronized (lock) { + return internalGetValue(property, false); + } + } + + private ConnectionPropertyValue internalGetValue( + ConnectionProperty property, boolean throwForUnknownParam) { + if (localProperties != null && localProperties.containsKey(property.getKey())) { + return cast(localProperties.get(property.getKey())); + } + if (transactionProperties != null && transactionProperties.containsKey(property.getKey())) { + return cast(transactionProperties.get(property.getKey())); + } + if (properties.containsKey(property.getKey())) { + return cast(properties.get(property.getKey())); + } + if (throwForUnknownParam) { + throw unknownParamError(property); + } + return null; + } + + /** + * Sets the value of the specified property. The new value will be persisted if the current + * transaction is committed or directly if the connection state is non-transactional. The value + * will be lost if the transaction is rolled back and the connection state is transactional. + */ + void setValue( + ConnectionProperty property, T value, Context context, boolean inTransaction) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= context.ordinal(), + "Property has context " + + property.getContext() + + " and cannot be set in context " + + context); + synchronized (lock) { + if (!inTransaction + || getType() == Type.NON_TRANSACTIONAL + || context.ordinal() < Context.USER.ordinal()) { + internalSetValue(property, value, properties, context); + return; + } + + if (transactionProperties == null) { + transactionProperties = new HashMap<>(); + } + internalSetValue(property, value, transactionProperties, context); + // Remove the setting from the local settings if it's there, as the new transaction setting is + // the one that should be used. + if (localProperties != null) { + localProperties.remove(property.getKey()); + } + } + } + + /** + * Sets the value of the specified setting for the current transaction. This value is lost when + * the transaction is committed or rolled back. This can be used to temporarily set a value only + * during a transaction, for example if a user wants to disable internal transaction retries only + * for a single transaction. + */ + void setLocalValue(ConnectionProperty property, T value) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= Context.USER.ordinal(), + "setLocalValue is only supported for properties with context USER or higher."); + synchronized (lock) { + if (localProperties == null) { + localProperties = new HashMap<>(); + } + // Note that setting a local setting does not remove it from the transaction settings. This + // means that a commit will persist the setting in transactionSettings. + internalSetValue(property, value, localProperties, Context.USER); + } + } + + /** + * Resets the value of the specified property. The new value will be persisted if the current + * transaction is committed or directly if the connection state is non-transactional. The value + * will be lost if the transaction is rolled back and the connection state is transactional. + */ + void resetValue(ConnectionProperty property, Context context, boolean inTransaction) { + synchronized (lock) { + ConnectionPropertyValue currentValue = getValue(property); + if (currentValue == null) { + setValue(property, null, context, inTransaction); + } else { + setValue(property, currentValue.getResetValue(), context, inTransaction); + } + } + } + + /** Persists the new value for a property to the given map of properties. */ + private void internalSetValue( + ConnectionProperty property, + T value, + Map> currentProperties, + Context context) { + checkValidValue(property, value); + ConnectionPropertyValue newValue = cast(currentProperties.get(property.getKey())); + if (newValue == null) { + ConnectionPropertyValue existingValue = cast(properties.get(property.getKey())); + if (existingValue == null) { + if (!property.hasExtension()) { + throw unknownParamError(property); + } + newValue = new ConnectionPropertyValue(property, null, null); + } else { + newValue = existingValue.copy(); + } + } + newValue.setValue(value, context); + currentProperties.put(property.getKey(), newValue); + } + + static void checkValidValue(ConnectionProperty property, T value) { + if (property.getValidValues() == null || property.getValidValues().length == 0) { + return; + } + if (Arrays.stream(property.getValidValues()) + .noneMatch(validValue -> Objects.equals(validValue, value))) { + throw invalidParamValueError(property, value); + } + } + + /** Creates an exception for an invalid value for a connection property. */ + static SpannerException invalidParamValueError(ConnectionProperty property, T value) { + return SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format("invalid value \"%s\" for configuration property \"%s\"", value, property)); + } + + /** Creates an exception for an unknown connection property. */ + static SpannerException unknownParamError(ConnectionProperty property) { + return SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format("unrecognized configuration property \"%s\"", property)); + } + + /** + * Commits the current transaction and persists any changes to the settings (except local + * changes). + */ + void commit() { + synchronized (lock) { + if (transactionProperties != null) { + for (ConnectionPropertyValue value : transactionProperties.values()) { + properties.put(value.getProperty().getKey(), value); + } + } + this.localProperties = null; + this.transactionProperties = null; + } + } + + /** Rolls back the current transaction and abandons any pending changes to the settings. */ + void rollback() { + synchronized (lock) { + this.localProperties = null; + this.transactionProperties = null; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java new file mode 100644 index 000000000000..6e1852298c0b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java @@ -0,0 +1,206 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.time.Duration; + +/** + * The Cloud Spanner JDBC driver supports a number of client side statements that are interpreted by + * the driver and that can modify the current state of a connection, or report the current state of + * a connection. Each of the methods in this interface correspond with one such client side + * statement. + * + *

The methods in this interface are called by the different {@link ClientSideStatement}s. These + * method calls are then forwarded into the appropriate method of a {@link Connection} instance. + * + *

The client side statements are defined in the ClientSideStatements.json file. + */ +interface ConnectionStatementExecutor { + Dialect getDialect(); + + StatementResult statementSetAutocommit(Boolean autocommit); + + StatementResult statementShowAutocommit(); + + StatementResult statementSetReadOnly(Boolean readOnly); + + StatementResult statementShowReadOnly(); + + StatementResult statementSetRetryAbortsInternally(Boolean retryAbortsInternally, Boolean local); + + StatementResult statementShowRetryAbortsInternally(); + + StatementResult statementSetAutocommitDmlMode(AutocommitDmlMode mode); + + StatementResult statementShowAutocommitDmlMode(); + + StatementResult statementSetStatementTimeout(Duration duration); + + StatementResult statementShowStatementTimeout(); + + StatementResult statementSetTransactionTimeout(Duration duration); + + StatementResult statementShowTransactionTimeout(); + + StatementResult statementShowReadTimestamp(); + + StatementResult statementShowCommitTimestamp(); + + StatementResult statementShowCommitResponse(); + + StatementResult statementSetReadOnlyStaleness(TimestampBound staleness); + + StatementResult statementShowReadOnlyStaleness(); + + StatementResult statementSetDirectedRead(DirectedReadOptions directedReadOptions); + + StatementResult statementShowDirectedRead(); + + StatementResult statementSetOptimizerVersion(String optimizerVersion); + + StatementResult statementShowOptimizerVersion(); + + StatementResult statementSetOptimizerStatisticsPackage(String optimizerStatisticsPackage); + + StatementResult statementShowOptimizerStatisticsPackage(); + + StatementResult statementSetReturnCommitStats(Boolean returnCommitStats); + + StatementResult statementShowReturnCommitStats(); + + StatementResult statementSetMaxCommitDelay(Duration maxCommitDelay); + + StatementResult statementShowMaxCommitDelay(); + + StatementResult statementSetDelayTransactionStartUntilFirstWrite( + Boolean delayTransactionStartUntilFirstWrite); + + StatementResult statementShowDelayTransactionStartUntilFirstWrite(); + + StatementResult statementSetKeepTransactionAlive(Boolean keepTransactionAlive); + + StatementResult statementShowKeepTransactionAlive(); + + StatementResult statementSetStatementTag(String tag); + + StatementResult statementShowStatementTag(); + + StatementResult statementSetTransactionTag(String tag); + + StatementResult statementShowTransactionTag(); + + StatementResult statementSetExcludeTxnFromChangeStreams(Boolean excludeTxnFromChangeStreams); + + StatementResult statementShowExcludeTxnFromChangeStreams(); + + StatementResult statementBeginTransaction(TransactionOptions.IsolationLevel isolationLevel); + + StatementResult statementBeginPgTransaction(PgTransactionMode transactionMode); + + StatementResult statementCommit(); + + StatementResult statementRollback(); + + StatementResult statementSetTransactionMode(TransactionMode mode); + + StatementResult statementSetPgTransactionMode(PgTransactionMode transactionMode); + + StatementResult statementSetPgSessionCharacteristicsTransactionMode( + PgTransactionMode transactionMode); + + StatementResult statementSetPgDefaultTransactionIsolation(IsolationLevel isolationLevel); + + StatementResult statementStartBatchDdl(); + + StatementResult statementStartBatchDml(); + + StatementResult statementRunBatch(); + + StatementResult statementAbortBatch(); + + StatementResult statementResetAll(); + + StatementResult statementSetRPCPriority(RpcPriority priority); + + StatementResult statementShowRPCPriority(); + + StatementResult statementSetSavepointSupport(SavepointSupport savepointSupport); + + StatementResult statementShowSavepointSupport(); + + StatementResult statementShowTransactionIsolationLevel(); + + StatementResult statementShowDefaultTransactionIsolation(); + + StatementResult statementSetProtoDescriptors(byte[] protoDescriptors); + + StatementResult statementSetProtoDescriptorsFilePath(String filePath); + + StatementResult statementShowProtoDescriptors(); + + StatementResult statementShowProtoDescriptorsFilePath(); + + StatementResult statementExplain(String sql); + + StatementResult statementShowDataBoostEnabled(); + + StatementResult statementSetDataBoostEnabled(Boolean dataBoostEnabled); + + StatementResult statementShowAutoPartitionMode(); + + StatementResult statementSetAutoPartitionMode(Boolean autoPartitionMode); + + StatementResult statementShowMaxPartitions(); + + StatementResult statementSetMaxPartitions(Integer maxPartitions); + + StatementResult statementShowMaxPartitionedParallelism(); + + StatementResult statementSetMaxPartitionedParallelism(Integer maxPartitionedParallelism); + + StatementResult statementPartition(Statement statement); + + StatementResult statementRunPartition(String partitionId); + + StatementResult statementRunPartitionedQuery(Statement statement); + + StatementResult statementSetBatchDmlUpdateCount(Long updateCount, Boolean local); + + StatementResult statementSetAutoBatchDml(Boolean autoBatchDml); + + StatementResult statementShowAutoBatchDml(); + + StatementResult statementSetAutoBatchDmlUpdateCount(Long updateCount); + + StatementResult statementShowAutoBatchDmlUpdateCount(); + + StatementResult statementSetAutoBatchDmlUpdateCountVerification(Boolean verification); + + StatementResult statementShowAutoBatchDmlUpdateCountVerification(); + + StatementResult statementSetReadLockMode(ReadLockMode readLockMode); + + StatementResult statementShowReadLockMode(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java new file mode 100644 index 000000000000..2340fc4b1aa1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java @@ -0,0 +1,1013 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.DialectNamespaceMapper.getNamespace; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.ABORT_BATCH; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.BEGIN; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.COMMIT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.RESET_ALL; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.ROLLBACK; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.RUN_BATCH; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTOCOMMIT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTO_BATCH_DML; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTO_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DEFAULT_TRANSACTION_ISOLATION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DIRECTED_READ; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_PROTO_DESCRIPTORS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_PROTO_DESCRIPTORS_FILE_PATH; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_READONLY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_READ_LOCK_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_READ_ONLY_STALENESS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_RPC_PRIORITY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_SAVEPOINT_SUPPORT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_STATEMENT_TAG; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_STATEMENT_TIMEOUT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_TRANSACTION_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_TRANSACTION_TAG; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_TRANSACTION_TIMEOUT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTOCOMMIT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTO_BATCH_DML; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTO_BATCH_DML_UPDATE_COUNT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_COMMIT_RESPONSE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_COMMIT_TIMESTAMP; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DEFAULT_TRANSACTION_ISOLATION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DIRECTED_READ; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_PROTO_DESCRIPTORS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READONLY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READ_LOCK_MODE; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READ_ONLY_STALENESS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READ_TIMESTAMP; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_RPC_PRIORITY; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_SAVEPOINT_SUPPORT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_STATEMENT_TAG; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_STATEMENT_TIMEOUT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_TRANSACTION_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_TRANSACTION_TAG; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_TRANSACTION_TIMEOUT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.START_BATCH_DDL; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.START_BATCH_DML; +import static com.google.cloud.spanner.connection.StatementResultImpl.noResult; +import static com.google.cloud.spanner.connection.StatementResultImpl.resultSet; + +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.CommitStats; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; +import com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.DurationValueGetter; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.PlanNode; +import com.google.spanner.v1.QueryPlan; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; + +/** + * The methods in this class are called by the different {@link ClientSideStatement}s. These method + * calls are then forwarded into a {@link Connection}. + */ +class ConnectionStatementExecutorImpl implements ConnectionStatementExecutor { + + static final class StatementTimeoutGetter implements DurationValueGetter { + private final Connection connection; + + public StatementTimeoutGetter(Connection connection) { + this.connection = connection; + } + + @Override + public long getDuration(TimeUnit unit) { + return connection.getStatementTimeout(unit); + } + + @Override + public boolean hasDuration() { + return connection.hasStatementTimeout(); + } + } + + /** The connection to execute the statements on. */ + private final ConnectionImpl connection; + + ConnectionStatementExecutorImpl(ConnectionImpl connection) { + this.connection = connection; + } + + ConnectionImpl getConnection() { + return connection; + } + + @Override + public Dialect getDialect() { + return getConnection().getDialect(); + } + + @Override + public StatementResult statementSetAutocommit(Boolean autocommit) { + Preconditions.checkNotNull(autocommit); + getConnection().setAutocommit(autocommit); + return noResult(SET_AUTOCOMMIT); + } + + @Override + public StatementResult statementShowAutocommit() { + return resultSet("AUTOCOMMIT", getConnection().isAutocommit(), SHOW_AUTOCOMMIT); + } + + @Override + public StatementResult statementSetReadOnly(Boolean readOnly) { + Preconditions.checkNotNull(readOnly); + getConnection().setReadOnly(readOnly); + return noResult(SET_READONLY); + } + + @Override + public StatementResult statementShowReadOnly() { + return StatementResultImpl.resultSet( + String.format("%sREADONLY", getNamespace(connection.getDialect())), + getConnection().isReadOnly(), + SHOW_READONLY); + } + + @Override + public StatementResult statementSetRetryAbortsInternally( + Boolean retryAbortsInternally, Boolean local) { + Preconditions.checkNotNull(retryAbortsInternally); + getConnection().setRetryAbortsInternally(retryAbortsInternally, local); + return noResult(SET_RETRY_ABORTS_INTERNALLY); + } + + @Override + public StatementResult statementShowRetryAbortsInternally() { + return StatementResultImpl.resultSet( + String.format("%sRETRY_ABORTS_INTERNALLY", getNamespace(connection.getDialect())), + getConnection().isRetryAbortsInternally(), + SHOW_RETRY_ABORTS_INTERNALLY); + } + + @Override + public StatementResult statementSetAutocommitDmlMode(AutocommitDmlMode mode) { + getConnection().setAutocommitDmlMode(mode); + return noResult(SET_AUTOCOMMIT_DML_MODE); + } + + @Override + public StatementResult statementShowAutocommitDmlMode() { + return resultSet( + String.format("%sAUTOCOMMIT_DML_MODE", getNamespace(connection.getDialect())), + getConnection().getAutocommitDmlMode(), + SHOW_AUTOCOMMIT_DML_MODE); + } + + @Override + public StatementResult statementSetStatementTimeout(Duration duration) { + if (duration == null || duration.isZero()) { + getConnection().clearStatementTimeout(); + } else { + com.google.protobuf.Duration protoDuration = + com.google.protobuf.Duration.newBuilder() + .setSeconds(duration.getSeconds()) + .setNanos(duration.getNano()) + .build(); + TimeUnit unit = + ReadOnlyStalenessUtil.getAppropriateTimeUnit( + new ReadOnlyStalenessUtil.DurationGetter(protoDuration)); + getConnection() + .setStatementTimeout(ReadOnlyStalenessUtil.durationToUnits(protoDuration, unit), unit); + } + return noResult(SET_STATEMENT_TIMEOUT); + } + + @Override + public StatementResult statementShowStatementTimeout() { + return resultSet( + "STATEMENT_TIMEOUT", + getConnection().hasStatementTimeout() + ? ReadOnlyStalenessUtil.durationToString(new StatementTimeoutGetter(getConnection())) + : connection.getDialect() == Dialect.POSTGRESQL ? "0" : null, + SHOW_STATEMENT_TIMEOUT); + } + + @Override + public StatementResult statementSetTransactionTimeout(Duration duration) { + if (duration == null || duration.isZero()) { + getConnection().setTransactionTimeout(null); + } else { + getConnection().setTransactionTimeout(duration); + } + return noResult(SET_TRANSACTION_TIMEOUT); + } + + @Override + public StatementResult statementShowTransactionTimeout() { + return resultSet( + String.format("%sTRANSACTION_TIMEOUT", getNamespace(connection.getDialect())), + String.valueOf(getConnection().getTransactionTimeout()), + SHOW_TRANSACTION_TIMEOUT); + } + + @Override + public StatementResult statementShowReadTimestamp() { + return resultSet( + String.format("%sREAD_TIMESTAMP", getNamespace(connection.getDialect())), + getConnection().getReadTimestampOrNull(), + SHOW_READ_TIMESTAMP); + } + + @Override + public StatementResult statementShowCommitTimestamp() { + return resultSet( + String.format("%sCOMMIT_TIMESTAMP", getNamespace(connection.getDialect())), + getConnection().getCommitTimestampOrNull(), + SHOW_COMMIT_TIMESTAMP); + } + + @Override + public StatementResult statementShowCommitResponse() { + CommitResponse response = getConnection().getCommitResponseOrNull(); + CommitStats stats = null; + if (response != null && response.hasCommitStats()) { + stats = response.getCommitStats(); + } + ResultSet resultSet = + ResultSets.forRows( + Type.struct( + StructField.of( + String.format("%sCOMMIT_TIMESTAMP", getNamespace(connection.getDialect())), + Type.timestamp()), + StructField.of( + String.format("%sMUTATION_COUNT", getNamespace(connection.getDialect())), + Type.int64())), + Collections.singletonList( + Struct.newBuilder() + .set(String.format("%sCOMMIT_TIMESTAMP", getNamespace(connection.getDialect()))) + .to(response == null ? null : response.getCommitTimestamp()) + .set(String.format("%sMUTATION_COUNT", getNamespace(connection.getDialect()))) + .to(stats == null ? null : stats.getMutationCount()) + .build())); + return StatementResultImpl.of(resultSet, SHOW_COMMIT_RESPONSE); + } + + @Override + public StatementResult statementSetReadOnlyStaleness(TimestampBound staleness) { + getConnection().setReadOnlyStaleness(staleness); + return noResult(SET_READ_ONLY_STALENESS); + } + + @Override + public StatementResult statementShowReadOnlyStaleness() { + TimestampBound staleness = getConnection().getReadOnlyStaleness(); + return resultSet( + String.format("%sREAD_ONLY_STALENESS", getNamespace(connection.getDialect())), + ReadOnlyStalenessUtil.timestampBoundToString(staleness), + SHOW_READ_ONLY_STALENESS); + } + + @Override + public StatementResult statementSetDirectedRead(DirectedReadOptions directedReadOptions) { + getConnection().setDirectedRead(directedReadOptions); + return noResult(SET_DIRECTED_READ); + } + + @Override + public StatementResult statementShowDirectedRead() { + DirectedReadOptions directedReadOptions = getConnection().getDirectedRead(); + return resultSet( + String.format("%sDIRECTED_READ", getNamespace(connection.getDialect())), + DirectedReadOptionsUtil.toString(directedReadOptions), + SHOW_DIRECTED_READ); + } + + @Override + public StatementResult statementSetOptimizerVersion(String optimizerVersion) { + getConnection().setOptimizerVersion(optimizerVersion); + return noResult(SET_OPTIMIZER_VERSION); + } + + @Override + public StatementResult statementShowOptimizerVersion() { + return resultSet( + String.format("%sOPTIMIZER_VERSION", getNamespace(connection.getDialect())), + getConnection().getOptimizerVersion(), + SHOW_OPTIMIZER_VERSION); + } + + @Override + public StatementResult statementSetOptimizerStatisticsPackage(String optimizerStatisticsPackage) { + getConnection().setOptimizerStatisticsPackage(optimizerStatisticsPackage); + return noResult(SET_OPTIMIZER_STATISTICS_PACKAGE); + } + + @Override + public StatementResult statementShowOptimizerStatisticsPackage() { + return resultSet( + String.format("%sOPTIMIZER_STATISTICS_PACKAGE", getNamespace(connection.getDialect())), + getConnection().getOptimizerStatisticsPackage(), + SHOW_OPTIMIZER_STATISTICS_PACKAGE); + } + + @Override + public StatementResult statementSetReturnCommitStats(Boolean returnCommitStats) { + getConnection().setReturnCommitStats(returnCommitStats); + return noResult(SET_RETURN_COMMIT_STATS); + } + + @Override + public StatementResult statementShowReturnCommitStats() { + return resultSet( + String.format("%sRETURN_COMMIT_STATS", getNamespace(connection.getDialect())), + getConnection().isReturnCommitStats(), + SHOW_RETURN_COMMIT_STATS); + } + + @Override + public StatementResult statementSetMaxCommitDelay(Duration duration) { + getConnection().setMaxCommitDelay(duration); + return noResult(SET_MAX_COMMIT_DELAY); + } + + @Override + public StatementResult statementShowMaxCommitDelay() { + return resultSet( + "MAX_COMMIT_DELAY", + getConnection().getMaxCommitDelay() == null + ? null + : getConnection().getMaxCommitDelay().toMillis() + "ms", + SHOW_MAX_COMMIT_DELAY); + } + + @Override + public StatementResult statementSetDelayTransactionStartUntilFirstWrite( + Boolean delayTransactionStartUntilFirstWrite) { + getConnection().setDelayTransactionStartUntilFirstWrite(delayTransactionStartUntilFirstWrite); + return noResult(SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE); + } + + @Override + public StatementResult statementShowDelayTransactionStartUntilFirstWrite() { + return resultSet( + String.format( + "%sDELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", getNamespace(connection.getDialect())), + getConnection().isDelayTransactionStartUntilFirstWrite(), + SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE); + } + + @Override + public StatementResult statementSetKeepTransactionAlive(Boolean keepTransactionAlive) { + getConnection().setKeepTransactionAlive(keepTransactionAlive); + return noResult(SET_KEEP_TRANSACTION_ALIVE); + } + + @Override + public StatementResult statementShowKeepTransactionAlive() { + return resultSet( + String.format("%sKEEP_TRANSACTION_ALIVE", getNamespace(connection.getDialect())), + getConnection().isKeepTransactionAlive(), + SHOW_KEEP_TRANSACTION_ALIVE); + } + + @Override + public StatementResult statementSetStatementTag(String tag) { + getConnection().setStatementTag("".equals(tag) ? null : tag); + return noResult(SET_STATEMENT_TAG); + } + + @Override + public StatementResult statementShowStatementTag() { + return resultSet( + String.format("%sSTATEMENT_TAG", getNamespace(connection.getDialect())), + MoreObjects.firstNonNull(getConnection().getStatementTag(), ""), + SHOW_STATEMENT_TAG); + } + + @Override + public StatementResult statementSetTransactionTag(String tag) { + getConnection().setTransactionTag("".equals(tag) ? null : tag); + return noResult(SET_TRANSACTION_TAG); + } + + @Override + public StatementResult statementShowTransactionTag() { + return resultSet( + String.format("%sTRANSACTION_TAG", getNamespace(connection.getDialect())), + MoreObjects.firstNonNull(getConnection().getTransactionTag(), ""), + SHOW_TRANSACTION_TAG); + } + + @Override + public StatementResult statementSetExcludeTxnFromChangeStreams( + Boolean excludeTxnFromChangeStreams) { + getConnection().setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams); + return noResult(SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS); + } + + @Override + public StatementResult statementShowExcludeTxnFromChangeStreams() { + return resultSet( + String.format("%sEXCLUDE_TXN_FROM_CHANGE_STREAMS", getNamespace(connection.getDialect())), + getConnection().isExcludeTxnFromChangeStreams(), + SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS); + } + + @Override + public StatementResult statementBeginTransaction( + TransactionOptions.IsolationLevel isolationLevel) { + if (isolationLevel != null) { + getConnection().beginTransaction(isolationLevel); + } else { + getConnection().beginTransaction(); + } + return noResult(BEGIN); + } + + @Override + public StatementResult statementBeginPgTransaction(@Nullable PgTransactionMode transactionMode) { + if (transactionMode == null + || transactionMode.getIsolationLevel() == null + || transactionMode.getIsolationLevel() == IsolationLevel.ISOLATION_LEVEL_DEFAULT) { + getConnection().beginTransaction(); + } else { + getConnection() + .beginTransaction(transactionMode.getIsolationLevel().getSpannerIsolationLevel()); + } + if (transactionMode != null) { + statementSetPgTransactionMode(transactionMode); + } + return noResult(BEGIN); + } + + @Override + public StatementResult statementCommit() { + getConnection().commit(); + return noResult(COMMIT); + } + + @Override + public StatementResult statementRollback() { + getConnection().rollback(); + return noResult(ROLLBACK); + } + + @Override + public StatementResult statementSetTransactionMode(TransactionMode mode) { + getConnection().setTransactionMode(mode); + return noResult(SET_TRANSACTION_MODE); + } + + @Override + public StatementResult statementSetPgTransactionMode(PgTransactionMode transactionMode) { + if (transactionMode.getIsolationLevel() != null) { + getConnection() + .setTransactionIsolationLevel( + transactionMode.getIsolationLevel().getSpannerIsolationLevel()); + } + if (transactionMode.getAccessMode() != null) { + switch (transactionMode.getAccessMode()) { + case READ_ONLY_TRANSACTION: + getConnection().setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + break; + case READ_WRITE_TRANSACTION: + getConnection().setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + break; + default: + // no-op + } + } + return noResult(SET_TRANSACTION_MODE); + } + + @Override + public StatementResult statementSetPgSessionCharacteristicsTransactionMode( + PgTransactionMode transactionMode) { + if (transactionMode.getIsolationLevel() != null) { + getConnection() + .setDefaultIsolationLevel(transactionMode.getIsolationLevel().getSpannerIsolationLevel()); + } + if (transactionMode.getAccessMode() != null) { + switch (transactionMode.getAccessMode()) { + case READ_ONLY_TRANSACTION: + getConnection().setReadOnly(true); + break; + case READ_WRITE_TRANSACTION: + getConnection().setReadOnly(false); + break; + default: + // no-op + } + } + return noResult(SET_TRANSACTION_MODE); + } + + @Override + public StatementResult statementSetPgDefaultTransactionIsolation(IsolationLevel isolationLevel) { + getConnection() + .setDefaultIsolationLevel( + isolationLevel == null + ? TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED + : isolationLevel.getSpannerIsolationLevel()); + return noResult(SET_DEFAULT_TRANSACTION_ISOLATION); + } + + @Override + public StatementResult statementStartBatchDdl() { + getConnection().startBatchDdl(); + return noResult(START_BATCH_DDL); + } + + @Override + public StatementResult statementStartBatchDml() { + getConnection().startBatchDml(); + return noResult(START_BATCH_DML); + } + + @Override + public StatementResult statementRunBatch() { + long[] updateCounts = getConnection().runBatch(); + return resultSet("UPDATE_COUNTS", updateCounts, RUN_BATCH); + } + + @Override + public StatementResult statementAbortBatch() { + getConnection().abortBatch(); + return noResult(ABORT_BATCH); + } + + @Override + public StatementResult statementResetAll() { + getConnection().reset(); + return noResult(RESET_ALL); + } + + @Override + public StatementResult statementSetRPCPriority(RpcPriority priority) { + getConnection().setRPCPriority(priority); + return noResult(SET_RPC_PRIORITY); + } + + @Override + public StatementResult statementShowRPCPriority() { + return resultSet( + String.format("%sRPC_PRIORITY", getNamespace(connection.getDialect())), + getConnection().getRPCPriority() == null + ? RequestOptions.Priority.PRIORITY_UNSPECIFIED + : getConnection().getRPCPriority(), + SHOW_RPC_PRIORITY); + } + + @Override + public StatementResult statementSetSavepointSupport(SavepointSupport savepointSupport) { + getConnection().setSavepointSupport(savepointSupport); + return noResult(SET_SAVEPOINT_SUPPORT); + } + + @Override + public StatementResult statementShowSavepointSupport() { + return resultSet( + String.format("%SAVEPOINT_SUPPORT", getNamespace(connection.getDialect())), + getConnection().getSavepointSupport(), + SHOW_SAVEPOINT_SUPPORT); + } + + @Override + public StatementResult statementSetReadLockMode(ReadLockMode readLockMode) { + getConnection().setReadLockMode(readLockMode); + return noResult(SET_READ_LOCK_MODE); + } + + @Override + public StatementResult statementShowReadLockMode() { + return resultSet( + String.format("%sREAD_LOCK_MODE", getNamespace(connection.getDialect())), + getConnection().getReadLockMode(), + SHOW_READ_LOCK_MODE); + } + + @Override + public StatementResult statementShowTransactionIsolationLevel() { + TransactionOptions.IsolationLevel isolationLevel = + getConnection().isInTransaction() + ? getConnection().getTransactionIsolationLevel() + : getConnection().getDefaultIsolationLevel(); + return resultSet("transaction_isolation", isolationLevel, SHOW_TRANSACTION_ISOLATION_LEVEL); + } + + @Override + public StatementResult statementShowDefaultTransactionIsolation() { + return resultSet( + "default_transaction_isolation", + getConnection().getDefaultIsolationLevel(), + SHOW_DEFAULT_TRANSACTION_ISOLATION); + } + + @Override + public StatementResult statementShowDataBoostEnabled() { + return resultSet( + String.format("%sDATA_BOOST_ENABLED", getNamespace(connection.getDialect())), + getConnection().isDataBoostEnabled(), + SHOW_DATA_BOOST_ENABLED); + } + + @Override + public StatementResult statementSetDataBoostEnabled(Boolean dataBoostEnabled) { + getConnection().setDataBoostEnabled(Preconditions.checkNotNull(dataBoostEnabled)); + return noResult(SET_DATA_BOOST_ENABLED); + } + + @Override + public StatementResult statementShowAutoPartitionMode() { + return resultSet( + String.format("%sAUTO_PARTITION_MODE", getNamespace(connection.getDialect())), + getConnection().isAutoPartitionMode(), + SHOW_AUTO_PARTITION_MODE); + } + + @Override + public StatementResult statementSetAutoPartitionMode(Boolean autoPartitionMode) { + getConnection().setAutoPartitionMode(Preconditions.checkNotNull(autoPartitionMode)); + return noResult(SET_AUTO_PARTITION_MODE); + } + + @Override + public StatementResult statementShowMaxPartitions() { + return resultSet( + String.format("%sMAX_PARTITIONS", getNamespace(connection.getDialect())), + Long.valueOf(getConnection().getMaxPartitions()), + SHOW_MAX_PARTITIONS); + } + + @Override + public StatementResult statementSetMaxPartitions(Integer maxPartitions) { + getConnection().setMaxPartitions(Preconditions.checkNotNull(maxPartitions)); + return noResult(SET_MAX_PARTITIONS); + } + + @Override + public StatementResult statementShowMaxPartitionedParallelism() { + return resultSet( + String.format("%sMAX_PARTITIONED_PARALLELISM", getNamespace(connection.getDialect())), + Long.valueOf(getConnection().getMaxPartitionedParallelism()), + SHOW_MAX_PARTITIONED_PARALLELISM); + } + + @Override + public StatementResult statementSetMaxPartitionedParallelism(Integer maxPartitionedParallelism) { + getConnection() + .setMaxPartitionedParallelism(Preconditions.checkNotNull(maxPartitionedParallelism)); + return noResult(SET_MAX_PARTITIONED_PARALLELISM); + } + + @Override + public StatementResult statementPartition(Statement statement) { + return StatementResultImpl.of( + getConnection().partitionQuery(statement, PartitionOptions.getDefaultInstance()), + ClientSideStatementType.PARTITION); + } + + @Override + public StatementResult statementRunPartition(String partitionId) { + return StatementResultImpl.of( + getConnection().runPartition(partitionId), ClientSideStatementType.RUN_PARTITION); + } + + @Override + public StatementResult statementRunPartitionedQuery(Statement statement) { + return StatementResultImpl.of( + getConnection().runPartitionedQuery(statement, PartitionOptions.getDefaultInstance()), + ClientSideStatementType.RUN_PARTITIONED_QUERY); + } + + @Override + public StatementResult statementSetBatchDmlUpdateCount(Long updateCount, Boolean local) { + getConnection().setBatchDmlUpdateCount(updateCount, local); + return noResult(SET_BATCH_DML_UPDATE_COUNT); + } + + @Override + public StatementResult statementSetProtoDescriptors(byte[] protoDescriptors) { + Preconditions.checkNotNull(protoDescriptors); + getConnection().setProtoDescriptors(protoDescriptors); + return noResult(SET_PROTO_DESCRIPTORS); + } + + @Override + public StatementResult statementSetProtoDescriptorsFilePath(String filePath) { + Preconditions.checkNotNull(filePath); + getConnection().setProtoDescriptorsFilePath(filePath); + return noResult(SET_PROTO_DESCRIPTORS_FILE_PATH); + } + + @Override + public StatementResult statementShowProtoDescriptors() { + return resultSet( + String.format("%sPROTO_DESCRIPTORS", getNamespace(connection.getDialect())), + getConnection().getProtoDescriptors(), + SHOW_PROTO_DESCRIPTORS); + } + + @Override + public StatementResult statementShowProtoDescriptorsFilePath() { + return resultSet( + String.format("%sPROTO_DESCRIPTORS_FILE_PATH", getNamespace(connection.getDialect())), + getConnection().getProtoDescriptorsFilePath(), + SHOW_PROTO_DESCRIPTORS_FILE_PATH); + } + + @Override + public StatementResult statementSetAutoBatchDml(Boolean autoBatchDml) { + getConnection().setAutoBatchDml(autoBatchDml); + return noResult(SET_AUTO_BATCH_DML); + } + + @Override + public StatementResult statementShowAutoBatchDml() { + return resultSet( + String.format("%sAUTO_BATCH_DML", getNamespace(connection.getDialect())), + getConnection().isAutoBatchDml(), + SHOW_AUTO_BATCH_DML); + } + + @Override + public StatementResult statementSetAutoBatchDmlUpdateCount(Long updateCount) { + getConnection().setAutoBatchDmlUpdateCount(updateCount); + return noResult(SET_AUTO_BATCH_DML_UPDATE_COUNT); + } + + @Override + public StatementResult statementShowAutoBatchDmlUpdateCount() { + return resultSet( + String.format("%sAUTO_BATCH_DML_UPDATE_COUNT", getNamespace(connection.getDialect())), + getConnection().getAutoBatchDmlUpdateCount(), + SHOW_AUTO_BATCH_DML_UPDATE_COUNT); + } + + @Override + public StatementResult statementSetAutoBatchDmlUpdateCountVerification(Boolean verification) { + getConnection().setAutoBatchDmlUpdateCountVerification(verification); + return noResult(SET_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION); + } + + @Override + public StatementResult statementShowAutoBatchDmlUpdateCountVerification() { + return resultSet( + String.format( + "%sAUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", getNamespace(connection.getDialect())), + getConnection().isAutoBatchDmlUpdateCountVerification(), + SHOW_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION); + } + + private String processQueryPlan(PlanNode planNode) { + StringBuilder planNodeDescription = new StringBuilder(" : { "); + com.google.protobuf.Struct metadata = planNode.getMetadata(); + + for (String key : metadata.getFieldsMap().keySet()) { + planNodeDescription + .append(key) + .append(" : ") + .append(metadata.getFieldsMap().get(key).getStringValue()) + .append(" , "); + } + String substring = planNodeDescription.substring(0, planNodeDescription.length() - 3); + planNodeDescription.setLength(0); + planNodeDescription.append(substring).append(" }"); + + return planNodeDescription.toString(); + } + + private String processExecutionStats(PlanNode planNode) { + StringBuilder executionStats = new StringBuilder(""); + for (String key : planNode.getExecutionStats().getFieldsMap().keySet()) { + executionStats.append(key).append(" : { "); + com.google.protobuf.Struct value = + planNode.getExecutionStats().getFieldsMap().get(key).getStructValue(); + for (String newKey : value.getFieldsMap().keySet()) { + String newValue = value.getFieldsMap().get(newKey).getStringValue(); + executionStats.append(newKey).append(" : ").append(newValue).append(" , "); + } + String substring = executionStats.substring(0, executionStats.length() - 3); + executionStats.setLength(0); + executionStats.append(substring).append(" } , "); + } + String substring = executionStats.substring(0, executionStats.length() - 3); + executionStats.setLength(0); + executionStats.append(substring); + return executionStats.toString(); + } + + private StatementResult getStatementResultFromQueryPlan(QueryPlan queryPlan, boolean isAnalyze) { + ArrayList list = new ArrayList<>(); + for (PlanNode planNode : queryPlan.getPlanNodesList()) { + String planNodeDescription = planNode.getDisplayName(); + String executionStats = ""; + + if (!planNode.getMetadata().toString().equalsIgnoreCase("")) { + planNodeDescription += processQueryPlan(planNode); + } + + if (!planNode.getShortRepresentation().toString().equalsIgnoreCase("")) { + planNodeDescription += " : " + planNode.getShortRepresentation().getDescription(); + } + + if (isAnalyze && !planNode.getExecutionStats().toString().equals("")) { + executionStats = processExecutionStats(planNode); + } + Struct.Builder builder = Struct.newBuilder().set("QUERY PLAN").to(planNodeDescription); + + if (isAnalyze) { + builder.set("EXECUTION STATS").to(executionStats); + } + list.add(builder.build()); + } + + ResultSet resultSet; + if (isAnalyze) { + resultSet = + ResultSets.forRows( + Type.struct( + StructField.of("QUERY PLAN", Type.string()), + StructField.of("EXECUTION STATS", Type.string())), + list); + } else { + resultSet = + ResultSets.forRows(Type.struct(StructField.of("QUERY PLAN", Type.string())), list); + } + return StatementResultImpl.of(resultSet); + } + + private StatementResult executeStatement(String sql, QueryAnalyzeMode queryAnalyzeMode) { + Statement statement = Statement.newBuilder(sql).build(); + try (ResultSet resultSet = getConnection().analyzeQuery(statement, queryAnalyzeMode)) { + while (resultSet.next()) { + // ResultSet.next() should return false in order to access the ResultSet.Stats + } + + if (resultSet.getStats() != null && resultSet.getStats().getQueryPlan() != null) { + return getStatementResultFromQueryPlan( + resultSet.getStats().getQueryPlan(), queryAnalyzeMode.equals(QueryAnalyzeMode.PROFILE)); + } + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, String.format("Couldn't fetch stats for %s", sql)); + } + + // This method removes parenthesis from the sql string assuming it is ending with the closing + // parenthesis + private String removeParenthesisAndTrim(String sql) { + sql = sql.trim(); + if (sql.charAt(0) == '(') { + sql = sql.substring(1, sql.length() - 1); + } + return sql.trim(); + } + + /* + * This method executes the given SQL string in either PLAN or PROFILE mode and returns + * the query plan and execution stats (if necessary). + * + * The only additional option that is supported is ANALYZE. The method will throw a SpannerException + * if it is invoked with a statement that includes any other options. + * + * If the SQL string has ANALYZE option, it will be executed in PROFILE mode and will return a resultset + * with two String columns namely QUERY PLAN and EXECUTION STATS. + * + * If the sql string doesn't have any option, it will be executed in PLAN mode and will return a resultset + * with one string column namely QUERY PLAN. + */ + @Override + public StatementResult statementExplain(String sql) { + if (sql == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Invalid String with Explain")); + } + + if (sql.charAt(0) == '(') { + int index = sql.indexOf(')'); + if (index == -1) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format("Missing closing parenthesis in the query: %s", sql)); + } + String options[] = sql.substring(1, index).split("\\s*,\\s*"); + boolean isAnalyze = false, startAfterIndex = false; + for (String option : options) { + String optionExpression[] = option.trim().split("\\s+"); + if (optionExpression.length >= 3) { + isAnalyze = false; + break; + } else if (ClientSideStatementExplainExecutor.EXPLAIN_OPTIONS.contains( + optionExpression[0].toLowerCase())) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, + String.format("%s is not implemented yet", optionExpression[0])); + } else if (optionExpression[0].equalsIgnoreCase("analyse") + || optionExpression[0].equalsIgnoreCase("analyze")) { + isAnalyze = true; + } else { + isAnalyze = false; + break; + } + + if (optionExpression.length == 2) { + if (optionExpression[1].equalsIgnoreCase("false") + || optionExpression[1].equalsIgnoreCase("0") + || optionExpression[1].equalsIgnoreCase("off")) { + isAnalyze = false; + startAfterIndex = true; + } else if (!(optionExpression[1].equalsIgnoreCase("true") + || optionExpression[1].equalsIgnoreCase("1") + || optionExpression[1].equalsIgnoreCase("on"))) { + isAnalyze = false; + break; + } + } + } + if (isAnalyze) { + String newSql = removeParenthesisAndTrim(sql.substring(index + 1)); + return executeStatement(newSql, QueryAnalyzeMode.PROFILE); + } else if (startAfterIndex) { + String newSql = removeParenthesisAndTrim(sql.substring(index + 1)); + return executeStatement(newSql, QueryAnalyzeMode.PLAN); + } else { + return executeStatement(removeParenthesisAndTrim(sql), QueryAnalyzeMode.PLAN); + } + } else { + String[] arr = sql.split("\\s+", 2); + if (arr.length >= 2) { + String option = arr[0].toLowerCase(); + String statementToBeExplained = arr[1]; + + if (ClientSideStatementExplainExecutor.EXPLAIN_OPTIONS.contains(option)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, String.format("%s is not implemented yet", option)); + } else if (option.equals("analyze") || option.equals("analyse")) { + return executeStatement( + removeParenthesisAndTrim(statementToBeExplained), QueryAnalyzeMode.PROFILE); + } + } + return executeStatement(sql, QueryAnalyzeMode.PLAN); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/CredentialsService.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/CredentialsService.java new file mode 100644 index 000000000000..3110e361270f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/CredentialsService.java @@ -0,0 +1,125 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.io.BaseEncoding; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** Service class for getting credentials from key files. */ +class CredentialsService { + static final String GCS_NOT_SUPPORTED_MSG = + "Credentials that is stored on Google Cloud Storage is no longer supported. Download the" + + " credentials to a local file and reference the local file in the connection URL."; + static final CredentialsService INSTANCE = new CredentialsService(); + + CredentialsService() {} + + /** + * Create credentials from the given URL pointing to a credentials json file. This may be a local + * file or a file on Google Cloud Storage. Credentials on Google Cloud Storage can only be used if + * the application is running in an environment where application default credentials have been + * set. + * + * @param credentialsUrl The URL of the credentials file to read. If null, then this + * method will return the application default credentials of the environment. + * @return the {@link GoogleCredentials} object pointed to by the URL. + * @throws SpannerException If the URL does not point to a valid credentials file, or if the file + * cannot be accessed. + */ + GoogleCredentials createCredentials(String credentialsUrl) { + try { + if (credentialsUrl == null) { + return internalGetApplicationDefault(); + } else { + return getCredentialsFromUrl(credentialsUrl); + } + } catch (IOException e) { + String msg = "Invalid credentials path specified: "; + if (credentialsUrl == null) { + msg = + msg + + "There are no credentials set in the connection string, and the default" + + " application credentials are not set or are pointing to an invalid or" + + " non-existing file.\n" + + "Please check the GOOGLE_APPLICATION_CREDENTIALS environment variable and/or the" + + " credentials that have been set using the Google Cloud SDK gcloud auth" + + " application-default login command"; + } else { + msg = msg + credentialsUrl; + } + throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, msg, e); + } + } + + GoogleCredentials decodeCredentials(String encodedCredentials) { + byte[] decodedBytes; + try { + decodedBytes = BaseEncoding.base64Url().decode(encodedCredentials); + } catch (IllegalArgumentException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "The encoded credentials could not be decoded as a base64 string. " + + "Please ensure that the provided string is a valid base64 string.", + e); + } + try { + return GoogleCredentials.fromStream(new ByteArrayInputStream(decodedBytes)); + } catch (IllegalArgumentException | IOException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "The encoded credentials do not contain a valid Google Cloud credentials JSON string.", + e); + } + } + + @VisibleForTesting + GoogleCredentials internalGetApplicationDefault() throws IOException { + return GoogleCredentials.getApplicationDefault(); + } + + private GoogleCredentials getCredentialsFromUrl(String credentialsUrl) throws IOException { + Preconditions.checkNotNull(credentialsUrl); + Preconditions.checkArgument( + credentialsUrl.length() > 0, "credentialsUrl may not be an empty string"); + if (credentialsUrl.startsWith("gs://")) { + throw new IOException(GCS_NOT_SUPPORTED_MSG); + } else { + return getCredentialsFromLocalFile(credentialsUrl); + } + } + + private GoogleCredentials getCredentialsFromLocalFile(String filePath) throws IOException { + File credentialsFile = new File(filePath); + if (!credentialsFile.isFile()) { + throw new IOException( + String.format("Error reading credential file %s: File does not exist", filePath)); + } + try (InputStream credentialsStream = new FileInputStream(credentialsFile)) { + return GoogleCredentials.fromStream(credentialsStream); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java new file mode 100644 index 000000000000..4a8d643b79ca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java @@ -0,0 +1,338 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_SEQUENCE_KIND; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.admin.database.v1.DatabaseAdminGrpc; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.context.Scope; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicReference; +import javax.annotation.Nonnull; + +/** + * {@link UnitOfWork} that is used when a DDL batch is started. These batches only accept DDL + * statements. All DDL statements are buffered locally and sent to Spanner when runBatch() is + * called. Running a {@link DdlBatch} is not an atomic operation. If the execution fails, then some + * (possibly empty) prefix of the statements in the batch have been successfully applied to the + * database, and the others have not. Note that the statements that succeed may not all happen at + * the same time, but they will always happen in order. + */ +class DdlBatch extends AbstractBaseUnitOfWork { + private final DdlClient ddlClient; + private final DatabaseClient dbClient; + private final List statements = new ArrayList<>(); + private UnitOfWorkState state = UnitOfWorkState.STARTED; + private final byte[] protoDescriptors; + private final ConnectionState connectionState; + + static class Builder extends AbstractBaseUnitOfWork.Builder { + private DdlClient ddlClient; + private DatabaseClient dbClient; + private byte[] protoDescriptors; + private ConnectionState connectionState; + + private Builder() {} + + Builder setDdlClient(DdlClient client) { + Preconditions.checkNotNull(client); + this.ddlClient = client; + return this; + } + + Builder setDatabaseClient(DatabaseClient client) { + Preconditions.checkNotNull(client); + this.dbClient = client; + return this; + } + + Builder setProtoDescriptors(byte[] protoDescriptors) { + this.protoDescriptors = protoDescriptors; + return this; + } + + Builder setConnectionState(ConnectionState connectionState) { + this.connectionState = connectionState; + return this; + } + + @Override + DdlBatch build() { + Preconditions.checkState(ddlClient != null, "No DdlClient specified"); + Preconditions.checkState(dbClient != null, "No DbClient specified"); + return new DdlBatch(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private DdlBatch(Builder builder) { + super(builder); + this.ddlClient = builder.ddlClient; + this.dbClient = builder.dbClient; + this.protoDescriptors = builder.protoDescriptors; + this.connectionState = Preconditions.checkNotNull(builder.connectionState); + } + + @Override + public boolean isSingleUse() { + return false; + } + + @Override + public Type getType() { + return Type.BATCH; + } + + @Override + public UnitOfWorkState getState() { + return this.state; + } + + @Override + public boolean isActive() { + return getState().isActive(); + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public ApiFuture executeQueryAsync( + CallType callType, + final ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + // Queries are by default not allowed on DDL batches. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Executing queries is not allowed for DDL batches."); + } + + @Override + public Timestamp getReadTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no read timestamp available for DDL batches."); + } + + @Override + public Timestamp getReadTimestampOrNull() { + return null; + } + + @Override + public Timestamp getCommitTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no commit timestamp available for DDL batches."); + } + + @Override + public Timestamp getCommitTimestampOrNull() { + return null; + } + + @Override + public CommitResponse getCommitResponse() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no commit response available for DDL batches."); + } + + @Override + public CommitResponse getCommitResponseOrNull() { + return null; + } + + @Override + public ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, + "The batch is no longer active and cannot be used for further statements"); + Preconditions.checkArgument( + ddl.getType() == StatementType.DDL, + "Only DDL statements are allowed. \"" + ddl.getSql() + "\" is not a DDL-statement."); + Preconditions.checkArgument( + !DdlClient.isCreateDatabaseStatement(dbClient.getDialect(), ddl.getSql()), + "CREATE DATABASE is not supported in DDL batches."); + statements.add(ddl.getSql()); + return ApiFutures.immediateFuture(null); + } + + @Override + public ApiFuture executeUpdateAsync( + CallType callType, ParsedStatement update, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Executing updates is not allowed for DDL batches."); + } + + @Override + public ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Analyzing updates is not allowed for DDL batches."); + } + + @Override + public ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Executing batch updates is not allowed for DDL batches."); + } + + @Override + public ApiFuture writeAsync(CallType callType, Iterable mutations) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Writing mutations is not allowed for DDL batches."); + } + + @Override + public ApiFuture runBatchAsync(CallType callType) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be ran"); + try (Scope ignore = span.makeCurrent()) { + if (statements.isEmpty()) { + this.state = UnitOfWorkState.RAN; + return ApiFutures.transform( + asyncEndUnitOfWorkSpan(), unused -> new long[0], MoreExecutors.directExecutor()); + } + // Set the DDL statements on the span. + + span.setAllAttributes(Attributes.of(DB_STATEMENT_ARRAY_KEY, statements)); + // create a statement that can be passed in to the execute method + Callable callable = + () -> { + try { + AtomicReference> operationReference = + new AtomicReference<>(); + try { + ddlClient.runWithRetryForMissingDefaultSequenceKind( + restartIndex -> { + OperationFuture operation = + ddlClient.executeDdl( + statements.subList(restartIndex, statements.size()), + protoDescriptors); + operationReference.set(operation); + // Wait until the operation has finished. + getWithStatementTimeout(operation, RUN_BATCH_STATEMENT); + }, + connectionState.getValue(DEFAULT_SEQUENCE_KIND).getValue(), + dbClient.getDialect(), + operationReference); + long[] updateCounts = new long[statements.size()]; + Arrays.fill(updateCounts, 1L); + state = UnitOfWorkState.RAN; + return updateCounts; + } catch (SpannerException e) { + long[] updateCounts = extractUpdateCounts(operationReference.get()); + throw SpannerExceptionFactory.newSpannerBatchUpdateException( + e.getErrorCode(), e.getMessage(), updateCounts); + } + } catch (Throwable t) { + span.setStatus(StatusCode.ERROR); + span.recordException(t); + state = UnitOfWorkState.RUN_FAILED; + throw t; + } + }; + this.state = UnitOfWorkState.RUNNING; + ApiFuture result = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + callable, + DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + asyncEndUnitOfWorkSpan(); + + return result; + } + } + + long[] extractUpdateCounts(OperationFuture operation) { + try { + return extractUpdateCounts(operation.getMetadata().get()); + } catch (Throwable t) { + return new long[0]; + } + } + + @VisibleForTesting + long[] extractUpdateCounts(UpdateDatabaseDdlMetadata metadata) { + long[] updateCounts = new long[metadata.getStatementsCount()]; + for (int i = 0; i < updateCounts.length; i++) { + if (metadata.getCommitTimestampsCount() > i && metadata.getCommitTimestamps(i) != null) { + updateCounts[i] = 1L; + } else { + updateCounts[i] = 0L; + } + } + return updateCounts; + } + + @Override + public void abortBatch() { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be aborted."); + asyncEndUnitOfWorkSpan(); + this.state = UnitOfWorkState.ABORTED; + } + + @Override + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Commit is not allowed for DDL batches."); + } + + @Override + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Rollback is not allowed for DDL batches."); + } + + @Override + String getUnitOfWorkName() { + return "DDL batch"; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java new file mode 100644 index 000000000000..d8dcb3c6ae36 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java @@ -0,0 +1,205 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MissingDefaultSequenceKindException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * Convenience class for executing Data Definition Language statements on transactions that support + * DDL statements, i.e. DdlBatchTransaction and SingleUseTransaction. + */ +class DdlClient { + private final DatabaseAdminClient dbAdminClient; + private final Supplier dialectSupplier; + private final String projectId; + private final String instanceId; + private final String databaseName; + + static class Builder { + private DatabaseAdminClient dbAdminClient; + private Supplier dialectSupplier; + private String projectId; + private String instanceId; + private String databaseName; + + private Builder() {} + + Builder setDatabaseAdminClient(DatabaseAdminClient client) { + Preconditions.checkNotNull(client); + this.dbAdminClient = client; + return this; + } + + Builder setDialectSupplier(Supplier dialectSupplier) { + this.dialectSupplier = Preconditions.checkNotNull(dialectSupplier); + return this; + } + + Builder setProjectId(String projectId) { + Preconditions.checkArgument( + !Strings.isNullOrEmpty(projectId), "Empty projectId is not allowed"); + this.projectId = projectId; + return this; + } + + Builder setInstanceId(String instanceId) { + Preconditions.checkArgument( + !Strings.isNullOrEmpty(instanceId), "Empty instanceId is not allowed"); + this.instanceId = instanceId; + return this; + } + + Builder setDatabaseName(String name) { + Preconditions.checkArgument( + !Strings.isNullOrEmpty(name), "Empty database name is not allowed"); + this.databaseName = name; + return this; + } + + DdlClient build() { + Preconditions.checkState(dbAdminClient != null, "No DatabaseAdminClient specified"); + Preconditions.checkState(dialectSupplier != null, "No dialect supplier specified"); + Preconditions.checkState(!Strings.isNullOrEmpty(projectId), "No ProjectId specified"); + Preconditions.checkState(!Strings.isNullOrEmpty(instanceId), "No InstanceId specified"); + Preconditions.checkArgument( + !Strings.isNullOrEmpty(databaseName), "No database name specified"); + return new DdlClient(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private DdlClient(Builder builder) { + this.dbAdminClient = builder.dbAdminClient; + this.dialectSupplier = builder.dialectSupplier; + this.projectId = builder.projectId; + this.instanceId = builder.instanceId; + this.databaseName = builder.databaseName; + } + + OperationFuture executeCreateDatabase( + String createStatement, Dialect dialect) { + Preconditions.checkArgument(isCreateDatabaseStatement(dialect, createStatement)); + return dbAdminClient.createDatabase( + instanceId, createStatement, dialect, Collections.emptyList()); + } + + /** Execute a single DDL statement. */ + OperationFuture executeDdl(String ddl, byte[] protoDescriptors) { + return executeDdl(Collections.singletonList(ddl), protoDescriptors); + } + + /** Execute a list of DDL statements as one operation. */ + OperationFuture executeDdl( + List statements, byte[] protoDescriptors) { + if (statements.stream() + .anyMatch(sql -> isCreateDatabaseStatement(this.dialectSupplier.get(), sql))) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "CREATE DATABASE is not supported in a DDL batch"); + } + Database.Builder dbBuilder = + dbAdminClient.newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseName)); + if (protoDescriptors != null) { + dbBuilder.setProtoDescriptors(protoDescriptors); + } + Database db = dbBuilder.build(); + return dbAdminClient.updateDatabaseDdl( + db, + statements.stream().map(DdlClient::stripTrailingSemicolon).collect(Collectors.toList()), + null); + } + + static String stripTrailingSemicolon(String input) { + if (!input.contains(";")) { + return input; + } + String trimmed = input.trim(); + if (trimmed.endsWith(";")) { + return trimmed.substring(0, trimmed.length() - 1); + } + return input; + } + + /** Returns true if the statement is a `CREATE DATABASE ...` statement. */ + static boolean isCreateDatabaseStatement(Dialect dialect, String statement) { + SimpleParser parser = new SimpleParser(dialect, statement); + return parser.eatKeyword("create", "database"); + } + + void runWithRetryForMissingDefaultSequenceKind( + Consumer runnable, + String defaultSequenceKind, + Dialect dialect, + AtomicReference> operationReference) { + try { + runnable.accept(0); + } catch (Throwable t) { + SpannerException spannerException = SpannerExceptionFactory.asSpannerException(t); + if (!Strings.isNullOrEmpty(defaultSequenceKind) + && spannerException instanceof MissingDefaultSequenceKindException) { + setDefaultSequenceKind(defaultSequenceKind, dialect); + int restartIndex = 0; + if (operationReference.get() != null) { + try { + UpdateDatabaseDdlMetadata metadata = operationReference.get().getMetadata().get(); + restartIndex = metadata.getCommitTimestampsCount(); + } catch (Throwable ignore) { + } + } + runnable.accept(restartIndex); + return; + } + throw t; + } + } + + private void setDefaultSequenceKind(String defaultSequenceKind, Dialect dialect) { + String ddl = + dialect == Dialect.POSTGRESQL + ? "alter database \"%s\" set spanner.default_sequence_kind = '%s'" + : "alter database `%s` set options (default_sequence_kind='%s')"; + ddl = String.format(ddl, databaseName, defaultSequenceKind); + try { + executeDdl(ddl, null).get(); + } catch (ExecutionException executionException) { + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlInTransactionMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlInTransactionMode.java new file mode 100644 index 000000000000..16645e929cb1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlInTransactionMode.java @@ -0,0 +1,35 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** Enum used for setting the behavior of DDL in read/write transactions. */ +public enum DdlInTransactionMode { + /** All DDL statements in a read/write transaction fail. */ + FAIL, + /** + * DDL statements in an empty transaction are allowed. That is; if the connection is in + * AutoCommit=false mode and no other statement has been executed, then executing a DDL statement + * or a DDL batch is allowed. + */ + ALLOW_IN_EMPTY_TRANSACTION, + /** + * DDL statements automatically cause the current transaction to be committed and the DDL + * statement is subsequently executed without a transaction. This is equal to how MySQL and Oracle + * behave. + */ + AUTO_COMMIT_TRANSACTION; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DialectNamespaceMapper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DialectNamespaceMapper.java new file mode 100644 index 000000000000..bb668506075c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DialectNamespaceMapper.java @@ -0,0 +1,31 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; + +class DialectNamespaceMapper { + static String getNamespace(Dialect dialect) { + switch (dialect) { + case POSTGRESQL: + return "SPANNER."; + case GOOGLE_STANDARD_SQL: + default: + return ""; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectExecuteResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectExecuteResultSet.java new file mode 100644 index 000000000000..f0c289a6d80b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectExecuteResultSet.java @@ -0,0 +1,609 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Interval; +import com.google.cloud.spanner.ProtobufResultSet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.common.base.Preconditions; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.math.BigDecimal; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** + * {@link ResultSet} implementation used by the Spanner connection API to ensure that the query for + * a {@link ResultSet} is executed directly when it is created. This is done by calling {@link + * ResultSet#next()} directly after creation. This ensures that a statement timeout can be applied + * to the actual query execution. It also ensures that any invalid query will throw an exception at + * execution instead of the first next() call by a client. + */ +class DirectExecuteResultSet implements ProtobufResultSet { + private static final String MISSING_NEXT_CALL = "Must be preceded by a next() call"; + private final ResultSet delegate; + private boolean nextCalledByClient = false; + private final boolean initialNextResult; + private boolean nextHasReturnedFalse = false; + + /** + * Creates a new {@link DirectExecuteResultSet} from the given delegate {@link ResultSet}. This + * automatically executes the query of the given delegate {@link ResultSet} by calling next() on + * the delegate. The delegate must not have been used (i.e. next() must not have been called on + * it). + * + * @param delegate The underlying {@link ResultSet} for this {@link DirectExecuteResultSet}. + * @return a {@link DirectExecuteResultSet} that has already executed the query associated with + * the delegate {@link ResultSet}. + */ + static DirectExecuteResultSet ofResultSet(ResultSet delegate) { + return new DirectExecuteResultSet(delegate); + } + + DirectExecuteResultSet(ResultSet delegate) { + Preconditions.checkNotNull(delegate); + this.delegate = delegate; + initialNextResult = delegate.next(); + } + + @Override + public boolean next() throws SpannerException { + if (nextCalledByClient) { + boolean res = delegate.next(); + nextHasReturnedFalse = !res; + return res; + } + nextCalledByClient = true; + nextHasReturnedFalse = !initialNextResult; + return initialNextResult; + } + + @Override + public boolean canGetProtobufValue(int columnIndex) { + return nextCalledByClient + && delegate instanceof ProtobufResultSet + && ((ProtobufResultSet) delegate).canGetProtobufValue(columnIndex); + } + + @Override + public com.google.protobuf.Value getProtobufValue(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + Preconditions.checkState( + delegate instanceof ProtobufResultSet, "The result set does not support protobuf values"); + return ((ProtobufResultSet) delegate).getProtobufValue(columnIndex); + } + + @Override + public Struct getCurrentRowAsStruct() { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getCurrentRowAsStruct(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public ResultSetStats getStats() { + if (nextHasReturnedFalse) { + return delegate.getStats(); + } + return null; + } + + @Override + public ResultSetMetadata getMetadata() { + return delegate.getMetadata(); + } + + @Override + public Type getType() { + return delegate.getType(); + } + + @Override + public int getColumnCount() { + return delegate.getColumnCount(); + } + + @Override + public int getColumnIndex(String columnName) { + return delegate.getColumnIndex(columnName); + } + + @Override + public Type getColumnType(int columnIndex) { + return delegate.getColumnType(columnIndex); + } + + @Override + public Type getColumnType(String columnName) { + return delegate.getColumnType(columnName); + } + + @Override + public boolean isNull(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.isNull(columnIndex); + } + + @Override + public boolean isNull(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.isNull(columnName); + } + + @Override + public boolean getBoolean(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBoolean(columnIndex); + } + + @Override + public boolean getBoolean(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBoolean(columnName); + } + + @Override + public long getLong(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLong(columnIndex); + } + + @Override + public long getLong(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLong(columnName); + } + + @Override + public float getFloat(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloat(columnIndex); + } + + @Override + public double getDouble(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDouble(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBigDecimal(columnName); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBigDecimal(columnIndex); + } + + @Override + public float getFloat(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloat(columnName); + } + + @Override + public double getDouble(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDouble(columnName); + } + + @Override + public String getString(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getString(columnIndex); + } + + @Override + public String getString(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getString(columnName); + } + + @Override + public String getJson(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getJson(columnIndex); + } + + @Override + public String getJson(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getJson(columnName); + } + + @Override + public String getPgJsonb(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getPgJsonb(columnIndex); + } + + @Override + public String getPgJsonb(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getPgJsonb(columnName); + } + + @Override + public ByteArray getBytes(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBytes(columnIndex); + } + + @Override + public ByteArray getBytes(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBytes(columnName); + } + + @Override + public Timestamp getTimestamp(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getTimestamp(columnIndex); + } + + @Override + public Timestamp getTimestamp(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getTimestamp(columnName); + } + + @Override + public Date getDate(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDate(columnIndex); + } + + @Override + public Date getDate(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDate(columnName); + } + + @Override + public UUID getUuid(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getUuid(columnIndex); + } + + @Override + public UUID getUuid(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getUuid(columnName); + } + + @Override + public Interval getInterval(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getInterval(columnIndex); + } + + @Override + public Interval getInterval(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getInterval(columnName); + } + + @Override + public Value getValue(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getValue(columnIndex); + } + + @Override + public Value getValue(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getValue(columnName); + } + + @Override + public boolean[] getBooleanArray(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBooleanArray(columnIndex); + } + + @Override + public boolean[] getBooleanArray(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBooleanArray(columnName); + } + + @Override + public List getBooleanList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBooleanList(columnIndex); + } + + @Override + public List getBooleanList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBooleanList(columnName); + } + + @Override + public long[] getLongArray(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLongArray(columnIndex); + } + + @Override + public long[] getLongArray(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLongArray(columnName); + } + + @Override + public List getLongList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLongList(columnIndex); + } + + @Override + public List getLongList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getLongList(columnName); + } + + @Override + public float[] getFloatArray(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloatArray(columnIndex); + } + + @Override + public float[] getFloatArray(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloatArray(columnName); + } + + @Override + public List getFloatList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloatList(columnIndex); + } + + @Override + public List getFloatList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getFloatList(columnName); + } + + @Override + public double[] getDoubleArray(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDoubleArray(columnIndex); + } + + @Override + public double[] getDoubleArray(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDoubleArray(columnName); + } + + @Override + public List getDoubleList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDoubleList(columnIndex); + } + + @Override + public List getDoubleList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDoubleList(columnName); + } + + @Override + public List getBigDecimalList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBigDecimalList(columnIndex); + } + + @Override + public List getBigDecimalList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBigDecimalList(columnName); + } + + @Override + public List getStringList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getStringList(columnIndex); + } + + @Override + public List getStringList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getStringList(columnName); + } + + @Override + public List getJsonList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getJsonList(columnIndex); + } + + @Override + public List getJsonList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getJsonList(columnName); + } + + @Override + public List getPgJsonbList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getPgJsonbList(columnIndex); + } + + @Override + public List getPgJsonbList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getPgJsonbList(columnName); + } + + @Override + public List getBytesList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBytesList(columnIndex); + } + + @Override + public List getBytesList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getBytesList(columnName); + } + + @Override + public List getTimestampList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getTimestampList(columnIndex); + } + + @Override + public List getTimestampList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getTimestampList(columnName); + } + + @Override + public List getDateList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDateList(columnIndex); + } + + @Override + public List getDateList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getDateList(columnName); + } + + @Override + public List getUuidList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getUuidList(columnIndex); + } + + @Override + public List getUuidList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getUuidList(columnName); + } + + @Override + public List getIntervalList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getIntervalList(columnIndex); + } + + @Override + public List getIntervalList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getIntervalList(columnName); + } + + @Override + public List getProtoMessageList(int columnIndex, T message) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoMessageList(columnIndex, message); + } + + @Override + public List getProtoMessageList(String columnName, T message) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoMessageList(columnName, message); + } + + @Override + public List getProtoEnumList( + int columnIndex, Function method) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoEnumList(columnIndex, method); + } + + @Override + public List getProtoEnumList( + String columnName, Function method) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoEnumList(columnName, method); + } + + @Override + public List getStructList(int columnIndex) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getStructList(columnIndex); + } + + @Override + public List getStructList(String columnName) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getStructList(columnName); + } + + @Override + public T getProtoEnum( + int columnIndex, Function method) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoEnum(columnIndex, method); + } + + @Override + public T getProtoEnum( + String columnName, Function method) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoEnum(columnName, method); + } + + @Override + public T getProtoMessage(int columnIndex, T message) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoMessage(columnIndex, message); + } + + @Override + public T getProtoMessage(String columnName, T message) { + Preconditions.checkState(nextCalledByClient, MISSING_NEXT_CALL); + return delegate.getProtoMessage(columnName, message); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DirectExecuteResultSet)) { + return false; + } + return ((DirectExecuteResultSet) o).delegate.equals(delegate); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java new file mode 100644 index 000000000000..8b346a08f3d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Strings; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.util.JsonFormat; +import com.google.spanner.v1.DirectedReadOptions; + +public class DirectedReadOptionsUtil { + static class DirectedReadOptionsConverter + implements ClientSideStatementValueConverter { + static DirectedReadOptionsConverter INSTANCE = new DirectedReadOptionsConverter(); + + @Override + public Class getParameterClass() { + return DirectedReadOptions.class; + } + + @Override + public DirectedReadOptions convert(String value) { + try { + return parse(value); + } catch (Throwable ignore) { + // ClientSideStatementValueConverters should return null if the value cannot be converted. + return null; + } + } + } + + /** + * Generates a valid JSON string for the given {@link DirectedReadOptions} that can be used with + * the JDBC driver. + */ + public static String toString(DirectedReadOptions directedReadOptions) { + if (directedReadOptions == null + || DirectedReadOptions.getDefaultInstance().equals(directedReadOptions)) { + return ""; + } + try { + return JsonFormat.printer().omittingInsignificantWhitespace().print(directedReadOptions); + } catch (InvalidProtocolBufferException invalidProtocolBufferException) { + throw SpannerExceptionFactory.asSpannerException(invalidProtocolBufferException); + } + } + + static DirectedReadOptions parse(String json) { + if (Strings.isNullOrEmpty(json)) { + return DirectedReadOptions.getDefaultInstance(); + } + DirectedReadOptions.Builder builder = DirectedReadOptions.newBuilder(); + try { + JsonFormat.parser().merge(json, builder); + return builder.build(); + } catch (InvalidProtocolBufferException invalidProtocolBufferException) { + throw SpannerExceptionFactory.asSpannerException(invalidProtocolBufferException); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java new file mode 100644 index 000000000000..1f70825910d6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java @@ -0,0 +1,361 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.common.base.Preconditions; +import com.google.common.base.Suppliers; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import io.opentelemetry.context.Scope; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; +import javax.annotation.Nonnull; + +/** + * {@link UnitOfWork} that is used when a DML batch is started. These batches only accept DML + * statements. All DML statements are buffered locally and sent to Spanner when runBatch() is + * called. + */ +class DmlBatch extends AbstractBaseUnitOfWork { + private final boolean autoBatch; + private final Supplier autoBatchUpdateCountSupplier; + private final Supplier verifyUpdateCountsSupplier; + private final Supplier dmlbatchUpdateCountSupplier; + private final UnitOfWork transaction; + private final String statementTag; + private final List statements = new ArrayList<>(); + private long[] updateCounts = new long[0]; + private UnitOfWorkState state = UnitOfWorkState.STARTED; + + static class Builder extends AbstractBaseUnitOfWork.Builder { + private boolean autoBatch; + private Supplier autoBatchUpdateCountSupplier = Suppliers.ofInstance(1L); + private Supplier verifyUpdateCountsSupplier = Suppliers.ofInstance(Boolean.FALSE); + private Supplier dmlbatchUpdateCountSupplier = Suppliers.ofInstance(-1L); + private UnitOfWork transaction; + private String statementTag; + + private Builder() {} + + Builder setAutoBatch(boolean autoBatch) { + this.autoBatch = autoBatch; + return this; + } + + Builder setAutoBatchUpdateCountSupplier(Supplier updateCountSupplier) { + this.autoBatchUpdateCountSupplier = Preconditions.checkNotNull(updateCountSupplier); + return this; + } + + Builder setAutoBatchUpdateCountVerificationSupplier(Supplier verificationSupplier) { + this.verifyUpdateCountsSupplier = verificationSupplier; + return this; + } + + Builder setDmlBatchUpdateCountSupplier(Supplier dmlbatchUpdateCountSupplier) { + Preconditions.checkNotNull(dmlbatchUpdateCountSupplier); + this.dmlbatchUpdateCountSupplier = dmlbatchUpdateCountSupplier; + return this; + } + + Builder setTransaction(UnitOfWork transaction) { + Preconditions.checkNotNull(transaction); + this.transaction = transaction; + return this; + } + + Builder setStatementTag(String tag) { + this.statementTag = tag; + return this; + } + + @Override + DmlBatch build() { + Preconditions.checkState(transaction != null, "No transaction specified"); + return new DmlBatch(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private DmlBatch(Builder builder) { + super(builder); + this.autoBatch = builder.autoBatch; + this.autoBatchUpdateCountSupplier = builder.autoBatchUpdateCountSupplier; + this.verifyUpdateCountsSupplier = builder.verifyUpdateCountsSupplier; + this.dmlbatchUpdateCountSupplier = builder.dmlbatchUpdateCountSupplier; + this.transaction = Preconditions.checkNotNull(builder.transaction); + this.statementTag = builder.statementTag; + } + + boolean isAutoBatch() { + return this.autoBatch; + } + + @Override + public boolean isSingleUse() { + return false; + } + + @Override + public Type getType() { + return Type.BATCH; + } + + @Override + public UnitOfWorkState getState() { + return this.state; + } + + @Override + public boolean isActive() { + return getState().isActive(); + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public ApiFuture executeQueryAsync( + CallType callType, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Executing queries is not allowed for DML batches."); + } + + @Override + public Timestamp getReadTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no read timestamp available for DML batches."); + } + + @Override + public Timestamp getReadTimestampOrNull() { + return null; + } + + @Override + public Timestamp getCommitTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no commit timestamp available for DML batches."); + } + + @Override + public Timestamp getCommitTimestampOrNull() { + return null; + } + + @Override + public CommitResponse getCommitResponse() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "There is no commit response available for DML batches."); + } + + @Override + public CommitResponse getCommitResponseOrNull() { + return null; + } + + @Override + public ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Executing DDL statements is not allowed for DML batches."); + } + + long getUpdateCount() { + // Auto-batching returns update count 1 by default, as this is what ORMs normally expect. + // Standard batches return -1 by default, to indicate that the update count is unknown. + return isAutoBatch() ? autoBatchUpdateCountSupplier.get() : dmlbatchUpdateCountSupplier.get(); + } + + @Override + public ApiFuture executeUpdateAsync( + CallType callType, ParsedStatement update, UpdateOption... options) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, + "The batch is no longer active and cannot be used for further statements"); + Preconditions.checkArgument( + update.getType() == StatementType.UPDATE, + "Only DML statements are allowed. \"" + update.getSql() + "\" is not a DML-statement."); + long updateCount = getUpdateCount(); + this.statements.add(update); + this.updateCounts = Arrays.copyOf(this.updateCounts, this.updateCounts.length + 1); + this.updateCounts[this.updateCounts.length - 1] = updateCount; + return ApiFutures.immediateFuture(updateCount); + } + + @Override + public ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + if (transaction.isSingleUse()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Analyzing updates is not allowed for DML batches."); + } + return transaction.analyzeUpdateAsync(callType, update, analyzeMode, options); + } + + @Override + public ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, UpdateOption... options) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, + "The batch is no longer active and cannot be used for further statements"); + for (ParsedStatement update : updates) { + Preconditions.checkArgument( + update.getType() == StatementType.UPDATE, + "Only DML statements are allowed. \"" + update.getSql() + "\" is not a DML-statement."); + } + long[] updateCountArray = new long[Iterables.size(updates)]; + Arrays.fill(updateCountArray, getUpdateCount()); + Iterables.addAll(this.statements, updates); + this.updateCounts = + Arrays.copyOf(this.updateCounts, this.updateCounts.length + updateCountArray.length); + System.arraycopy( + updateCountArray, + 0, + this.updateCounts, + this.updateCounts.length - updateCountArray.length, + updateCountArray.length); + + return ApiFutures.immediateFuture(updateCountArray); + } + + @Override + public ApiFuture writeAsync(CallType callType, Iterable mutations) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Writing mutations is not allowed for DML batches."); + } + + @Override + public ApiFuture runBatchAsync(CallType callType) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be ran"); + try (Scope ignore = span.makeCurrent()) { + if (statements.isEmpty()) { + this.state = UnitOfWorkState.RAN; + return ApiFutures.immediateFuture(new long[0]); + } + this.state = UnitOfWorkState.RUNNING; + // Use a SettableApiFuture to return the result, instead of directly returning the future that + // is returned by the executeBatchUpdateAsync method. This is needed because the state of the + // batch is set after the update has finished, and this happens in a listener. A listener is + // executed AFTER a Future is done, which means that a user could read the state of the Batch + // before it has been changed. + final SettableApiFuture res = SettableApiFuture.create(); + int numOptions = 0; + if (statementTag != null) { + numOptions++; + } + if (this.rpcPriority != null) { + numOptions++; + } + UpdateOption[] options = new UpdateOption[numOptions]; + int index = 0; + if (statementTag != null) { + options[index++] = Options.tag(statementTag); + } + if (this.rpcPriority != null) { + options[index++] = Options.priority(this.rpcPriority); + } + ApiFuture updateCounts = + transaction.executeBatchUpdateAsync(callType, statements, options); + ApiFutures.addCallback( + updateCounts, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + state = UnitOfWorkState.RUN_FAILED; + res.setException(t); + } + + @Override + public void onSuccess(long[] result) { + state = UnitOfWorkState.RAN; + if (!verifyUpdateCounts(result)) { + res.setException( + SpannerExceptionFactory.newDmlBatchUpdateCountVerificationFailedException( + DmlBatch.this.updateCounts, result)); + } else { + res.set(result); + } + } + }, + MoreExecutors.directExecutor()); + asyncEndUnitOfWorkSpan(); + return res; + } + } + + private boolean verifyUpdateCounts(long[] actualUpdateCounts) { + if (!this.autoBatch || !this.verifyUpdateCountsSupplier.get()) { + // We only need to do an actual verification if the batch was an auto-batch and verification + // is enabled. + return true; + } + return Arrays.equals(this.updateCounts, actualUpdateCounts); + } + + @Override + public void abortBatch() { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be aborted."); + asyncEndUnitOfWorkSpan(); + this.state = UnitOfWorkState.ABORTED; + } + + @Override + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Commit is not allowed for DML batches."); + } + + @Override + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Rollback is not allowed for DML batches."); + } + + @Override + String getUnitOfWorkName() { + return "DML batch"; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/EmulatorUtil.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/EmulatorUtil.java new file mode 100644 index 000000000000..d850c3d38390 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/EmulatorUtil.java @@ -0,0 +1,89 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; + +/** + * Util class for automatically generating a test instance and test database on a Cloud Spanner + * emulator instance. This makes it easier to automatically start a working emulator and test an + * application when working with JDBC. + */ +class EmulatorUtil { + + /** + * Creates the instance and the database that are specified in the connection string on the + * emulator that the given {@link Spanner} instance connects to if these do not already exist. + * + * @param spanner a {@link Spanner} instance that connects to an emulator instance + * @param databaseId the id of the instance and the database to create + * @param dialect the {@link Dialect} to use for the database to create + */ + static void maybeCreateInstanceAndDatabase( + Spanner spanner, DatabaseId databaseId, Dialect dialect) { + Preconditions.checkArgument( + NoCredentials.getInstance().equals(spanner.getOptions().getCredentials())); + try { + spanner + .getInstanceAdminClient() + .createInstance( + InstanceInfo.newBuilder(databaseId.getInstanceId()) + .setDisplayName("Automatically Generated Test Instance") + .setNodeCount(1) + .setInstanceConfigId( + InstanceConfigId.of( + databaseId.getInstanceId().getProject(), "emulator-config")) + .build()) + .get(); + } catch (ExecutionException executionException) { + SpannerException spannerException = (SpannerException) executionException.getCause(); + if (spannerException.getErrorCode() != ErrorCode.ALREADY_EXISTS) { + throw spannerException; + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + try { + spanner + .getDatabaseAdminClient() + .createDatabase( + databaseId.getInstanceId().getInstance(), + dialect.createDatabaseStatementFor(databaseId.getDatabase()), + dialect, + ImmutableList.of()) + .get(); + } catch (ExecutionException executionException) { + SpannerException spannerException = (SpannerException) executionException.getCause(); + if (spannerException.getErrorCode() != ErrorCode.ALREADY_EXISTS) { + throw spannerException; + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedBatchUpdate.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedBatchUpdate.java new file mode 100644 index 000000000000..17e8296a8191 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedBatchUpdate.java @@ -0,0 +1,86 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.base.Preconditions; +import java.util.Arrays; +import java.util.Objects; + +/** + * A batch update that failed with a {@link SpannerException} on a {@link ReadWriteTransaction}. The + * batch update can be retried if the transaction is aborted, and should throw the same exception + * during retry as during the original transaction. + */ +final class FailedBatchUpdate implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final SpannerException exception; + private final Iterable statements; + + FailedBatchUpdate( + ReadWriteTransaction transaction, + SpannerException exception, + Iterable statements) { + Preconditions.checkNotNull(transaction); + Preconditions.checkNotNull(exception); + Preconditions.checkNotNull(statements); + this.transaction = transaction; + this.exception = exception; + this.statements = statements; + } + + @Override + public void retry(AbortedException aborted) throws AbortedException { + transaction + .getStatementExecutor() + .invokeInterceptors( + RUN_BATCH_STATEMENT, StatementExecutionStep.RETRY_STATEMENT, transaction); + try { + transaction.getTransactionContext().batchUpdate(statements); + } catch (AbortedException e) { + // Propagate abort to force a new retry. + throw e; + } catch (SpannerBatchUpdateException e) { + // Check that we got the same exception as in the original transaction. + if (exception instanceof SpannerBatchUpdateException + && e.getErrorCode() == exception.getErrorCode() + && Objects.equals(e.getMessage(), exception.getMessage())) { + // Check that the returned update counts are equal. + if (Arrays.equals( + e.getUpdateCounts(), ((SpannerBatchUpdateException) exception).getUpdateCounts())) { + return; + } + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } catch (SpannerException e) { + // Check that we got the same exception as in the original transaction. + if (e.getErrorCode() == exception.getErrorCode() + && Objects.equals(e.getMessage(), exception.getMessage())) { + return; + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedQuery.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedQuery.java new file mode 100644 index 000000000000..d64c4b9401f7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedQuery.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.base.Preconditions; +import java.util.Objects; + +/** + * A query that failed with a {@link SpannerException} on a {@link ReadWriteTransaction}. The query + * can be retried if the transaction is aborted, and should throw the same exception during retry as + * during the original transaction. + */ +final class FailedQuery implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final SpannerException exception; + private final ParsedStatement statement; + private final AnalyzeMode analyzeMode; + private final QueryOption[] options; + + FailedQuery( + ReadWriteTransaction transaction, + SpannerException exception, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + Preconditions.checkNotNull(transaction); + Preconditions.checkNotNull(exception); + Preconditions.checkNotNull(statement); + this.transaction = transaction; + this.exception = exception; + this.statement = statement; + this.analyzeMode = analyzeMode; + this.options = options; + } + + @Override + public void retry(AbortedException aborted) throws AbortedException { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + try { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + try (ResultSet rs = + DirectExecuteResultSet.ofResultSet( + transaction.internalExecuteQuery(statement, analyzeMode, options))) { + // Do nothing with the results, we are only interested in whether the statement throws the + // same exception as in the original transaction. + } + } catch (AbortedException e) { + // Propagate abort to force a new retry. + throw e; + } catch (SpannerException e) { + // Check that we got the same exception as in the original transaction + if (e.getErrorCode() == exception.getErrorCode() + && Objects.equals(e.getMessage(), exception.getMessage())) { + return; + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedUpdate.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedUpdate.java new file mode 100644 index 000000000000..0fdeec5cf71f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/FailedUpdate.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.base.Preconditions; +import java.util.Objects; + +/** + * An update that failed with a {@link SpannerException} on a {@link ReadWriteTransaction}. The + * update can be retried if the transaction is aborted, and should throw the same exception during + * retry as during the original transaction. + */ +final class FailedUpdate implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final SpannerException exception; + private final ParsedStatement statement; + + FailedUpdate( + ReadWriteTransaction transaction, SpannerException exception, ParsedStatement statement) { + Preconditions.checkNotNull(transaction); + Preconditions.checkNotNull(exception); + Preconditions.checkNotNull(statement); + this.transaction = transaction; + this.exception = exception; + this.statement = statement; + } + + @Override + public void retry(AbortedException aborted) throws AbortedException { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + try { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + transaction.getTransactionContext().executeUpdate(statement.getStatement()); + } catch (AbortedException e) { + // Propagate abort to force a new retry. + throw e; + } catch (SpannerException e) { + // Check that we got the same exception as in the original transaction. + if (e.getErrorCode() == exception.getErrorCode() + && Objects.equals(e.getMessage(), exception.getMessage())) { + return; + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/LocalConnectionChecker.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/LocalConnectionChecker.java new file mode 100644 index 000000000000..62aafab42394 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/LocalConnectionChecker.java @@ -0,0 +1,108 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.admin.instance.v1.stub.GrpcInstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.common.base.Strings; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import java.time.Duration; + +/** + * Util class for quickly checking whether a local emulator or test server can be found. A common + * configuration error is to add 'localhost' to the connection string or to forget to unset the + * SPANNER_EMULATOR_HOST environment variable. This can cause cryptic error messages. This util + * checks for common configurations and errors and returns a more understandable error message for + * known misconfigurations. + */ +class LocalConnectionChecker { + + /** + * Executes a quick check to see if this connection can actually connect to a local emulator host + * or other (mock) test server, if the options point to localhost instead of Cloud Spanner. + */ + void checkLocalConnection(ConnectionOptions options) { + final String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + String host = options.getHost() == null ? emulatorHost : options.getHost(); + if (Strings.isNullOrEmpty(host)) { + return; + } + + if (host.startsWith("https://")) { + host = host.substring(8); + } + if (host.startsWith("http://")) { + host = host.substring(7); + } + // Only do the check if the host has been set to localhost. + if (host.startsWith("localhost") && options.isUsePlainText()) { + // Do a quick check to see if anything is actually running on the host. + try { + InstanceAdminStubSettings.Builder testEmulatorSettings = + InstanceAdminStubSettings.newBuilder() + .setCredentialsProvider(NoCredentialsProvider.create()) + .setTransportChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(host) + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .build()); + testEmulatorSettings + .listInstanceConfigsSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofSeconds(10L)); + try (GrpcInstanceAdminStub stub = + GrpcInstanceAdminStub.create(testEmulatorSettings.build())) { + stub.listInstanceConfigsCallable() + .call( + ListInstanceConfigsRequest.newBuilder() + .setParent(String.format("projects/%s", options.getProjectId())) + .build()); + } + } catch (UnavailableException e) { + String msg; + if (options.getHost() != null) { + msg = + String.format( + "The connection string '%s' contains host '%s', but no running" + + " emulator or other server could be found at that address.\n" + + "Please check the connection string and/or that the emulator is running.", + options.getUri(), host); + } else { + msg = + String.format( + "The environment variable SPANNER_EMULATOR_HOST has been set to '%s', but no" + + " running emulator or other server could be found at that address.\n" + + "Please check the environment variable and/or that the emulator is" + + " running.", + emulatorHost); + } + throw SpannerExceptionFactory.newSpannerException(ErrorCode.UNAVAILABLE, msg); + } catch (Throwable t) { + // Ignore, any other exceptions should also be thrown when connecting to the remote + // server and should not be treated here. + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java new file mode 100644 index 000000000000..1cbbf0818c5f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java @@ -0,0 +1,463 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ForwardingStructReader; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.Nonnull; + +/** + * {@link MergedResultSet} is a {@link ResultSet} implementation that combines the results from + * multiple queries. Each query uses its own {@link RowProducer} that feeds rows into the {@link + * MergedResultSet}. The order of the records in the {@link MergedResultSet} is not guaranteed. + */ +class MergedResultSet extends ForwardingStructReader implements PartitionedQueryResultSet { + static class PartitionExecutor implements Runnable { + private final Connection connection; + private final String partitionId; + private final LinkedBlockingDeque queue; + private final CountDownLatch metadataAvailableLatch; + private final AtomicBoolean shouldStop = new AtomicBoolean(); + + PartitionExecutor( + Connection connection, + String partitionId, + LinkedBlockingDeque queue, + CountDownLatch metadataAvailableLatch) { + this.connection = Preconditions.checkNotNull(connection); + this.partitionId = Preconditions.checkNotNull(partitionId); + this.queue = queue; + this.metadataAvailableLatch = Preconditions.checkNotNull(metadataAvailableLatch); + } + + @Override + public void run() { + try (ResultSet resultSet = connection.runPartition(partitionId)) { + boolean first = true; + while (resultSet.next()) { + Struct row = resultSet.getCurrentRowAsStruct(); + if (first) { + queue.put( + PartitionExecutorResult.dataAndMetadata( + row, resultSet.getType(), resultSet.getMetadata())); + metadataAvailableLatch.countDown(); + first = false; + } else { + queue.put(PartitionExecutorResult.data(row)); + } + if (shouldStop.get()) { + break; + } + } + if (first + && resultSet.getType().getCode() == Code.STRUCT + && !resultSet.getType().getStructFields().isEmpty()) { + // Special case: The result set did not return any rows, but did return metadata. + // Push the metadata to the merged result set. + queue.put( + PartitionExecutorResult.typeAndMetadata( + resultSet.getType(), resultSet.getMetadata())); + metadataAvailableLatch.countDown(); + } + } catch (Throwable exception) { + putWithoutInterruptPropagation(PartitionExecutorResult.exception(exception)); + metadataAvailableLatch.countDown(); + } finally { + // Emit a special 'finished' result to ensure that the row producer is not blocked on a + // queue that never receives any more results. This ensures that we can safely block on + // queue.take(), as we know that we will always receive at least one result from each + // worker. + putWithoutInterruptPropagation(PartitionExecutorResult.finished()); + } + } + + private void putWithoutInterruptPropagation(PartitionExecutorResult result) { + try { + queue.put(result); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + } + } + } + + static class PartitionExecutorResult { + private final Struct data; + private final Throwable exception; + private final Type type; + private final ResultSetMetadata metadata; + + static PartitionExecutorResult data(@Nonnull Struct data) { + return new PartitionExecutorResult(Preconditions.checkNotNull(data), null, null, null); + } + + static PartitionExecutorResult typeAndMetadata( + @Nonnull Type type, @Nonnull ResultSetMetadata metadata) { + return new PartitionExecutorResult( + null, Preconditions.checkNotNull(type), Preconditions.checkNotNull(metadata), null); + } + + static PartitionExecutorResult dataAndMetadata( + @Nonnull Struct data, @Nonnull Type type, @Nonnull ResultSetMetadata metadata) { + return new PartitionExecutorResult( + Preconditions.checkNotNull(data), + Preconditions.checkNotNull(type), + Preconditions.checkNotNull(metadata), + null); + } + + static PartitionExecutorResult exception(@Nonnull Throwable exception) { + return new PartitionExecutorResult(null, null, null, Preconditions.checkNotNull(exception)); + } + + static PartitionExecutorResult finished() { + return new PartitionExecutorResult(null, null, null, null); + } + + private PartitionExecutorResult( + Struct data, Type type, ResultSetMetadata metadata, Throwable exception) { + this.data = data; + this.type = type; + this.metadata = metadata; + this.exception = exception; + } + + boolean hasData() { + return this.data != null; + } + + boolean isFinished() { + return this.data == null + && this.type == null + && this.metadata == null + && this.exception == null; + } + } + + interface RowProducer extends Supplier { + boolean nextRow() throws Throwable; + + void close(); + + Type getType(); + + ResultSetMetadata getMetadata(); + + int getNumPartitions(); + + int getParallelism(); + } + + static class EmptyRowProducer implements RowProducer { + @Override + public Struct get() { + return Struct.newBuilder().build(); + } + + @Override + public boolean nextRow() { + return false; + } + + @Override + public Type getType() { + return Type.struct(); + } + + @Override + public ResultSetMetadata getMetadata() { + return ResultSetMetadata.getDefaultInstance(); + } + + @Override + public int getNumPartitions() { + return 0; + } + + @Override + public int getParallelism() { + return 0; + } + + @Override + public void close() {} + } + + private static class RowProducerImpl implements RowProducer { + /** The maximum number of rows that we will cache per thread that is fetching rows. */ + private static final int QUEUE_SIZE_PER_WORKER = 32; + + private final ExecutorService executor; + private final int parallelism; + private final List partitionExecutors; + private final AtomicInteger finishedCounter; + private final LinkedBlockingDeque queue; + private ResultSetMetadata metadata; + private final CountDownLatch metadataAvailableLatch = new CountDownLatch(1); + private Type type; + private Struct currentRow; + private Throwable exception; + + RowProducerImpl(Connection connection, List partitions, int maxParallelism) { + Preconditions.checkArgument(maxParallelism >= 0, "maxParallelism must be >= 0"); + Preconditions.checkArgument( + !Preconditions.checkNotNull(partitions).isEmpty(), "partitions must not be empty"); + if (maxParallelism == 0) { + // Dynamically determine parallelism. + this.parallelism = Math.min(partitions.size(), Runtime.getRuntime().availableProcessors()); + } else { + this.parallelism = Math.min(partitions.size(), maxParallelism); + } + this.executor = + Executors.newFixedThreadPool( + this.parallelism, + runnable -> { + Thread thread = new Thread(runnable); + thread.setName("partitioned-query-row-producer"); + thread.setDaemon(true); + return thread; + }); + this.queue = new LinkedBlockingDeque<>(QUEUE_SIZE_PER_WORKER * this.parallelism); + this.partitionExecutors = new ArrayList<>(partitions.size()); + this.finishedCounter = new AtomicInteger(partitions.size()); + for (String partition : partitions) { + PartitionExecutor partitionExecutor = + new PartitionExecutor(connection, partition, this.queue, this.metadataAvailableLatch); + this.partitionExecutors.add(partitionExecutor); + this.executor.submit(partitionExecutor); + } + // Pre-emptively shutdown the executor. This does not terminate any running tasks, but it + // stops the executor from accepting any new tasks and guarantees that the executor will + // always be shutdown, regardless whether the user calls ResultSet#close(). + this.executor.shutdown(); + } + + @Override + public void close() { + this.partitionExecutors.forEach(partitionExecutor -> partitionExecutor.shouldStop.set(true)); + // shutdownNow will interrupt any running tasks and then shut down directly. + // This will also cancel any queries that might be running. + this.executor.shutdownNow(); + } + + @Override + public boolean nextRow() throws Throwable { + if (this.exception != null) { + throw this.exception; + } + while (true) { + PartitionExecutorResult next; + if ((next = queue.peek()) != null && !next.isFinished()) { + // There's a valid result available. Return this quickly. + if (setNextRow(queue.remove())) { + return true; + } + } + // Block until the next row is available. + next = queue.take(); + if (next.isFinished()) { + finishedCounter.decrementAndGet(); + if (finishedCounter.get() == 0) { + return false; + } + } else { + if (setNextRow(next)) { + return true; + } + } + } + } + + boolean setNextRow(PartitionExecutorResult next) throws Throwable { + if (next.exception != null) { + this.exception = next.exception; + throw next.exception; + } + currentRow = next.data; + if (this.metadata == null && next.metadata != null) { + this.metadata = next.metadata; + } + if (this.type == null && next.type != null) { + this.type = next.type; + } + return next.hasData(); + } + + @Override + public Struct get() { + checkState(currentRow != null, "next() call required"); + return currentRow; + } + + private PartitionExecutorResult getFirstResultWithMetadata() { + try { + metadataAvailableLatch.await(); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + PartitionExecutorResult result = + queue.stream() + .filter(rs -> rs.metadata != null || rs.exception != null) + .findFirst() + .orElse(null); + if (result == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Thread-unsafe access to ResultSet"); + } + if (result.exception != null) { + throw SpannerExceptionFactory.asSpannerException(result.exception); + } + return result; + } + + public ResultSetMetadata getMetadata() { + if (metadata == null) { + return getFirstResultWithMetadata().metadata; + } + return metadata; + } + + @Override + public int getNumPartitions() { + return partitionExecutors.size(); + } + + @Override + public int getParallelism() { + return parallelism; + } + + public Type getType() { + if (type == null) { + return getFirstResultWithMetadata().type; + } + return type; + } + } + + private final RowProducer rowProducer; + + private boolean closed; + + MergedResultSet(Connection connection, List partitions, int maxParallelism) { + this( + Preconditions.checkNotNull(partitions).isEmpty() + ? new EmptyRowProducer() + : new RowProducerImpl(connection, partitions, maxParallelism)); + } + + private MergedResultSet(RowProducer rowProducer) { + super(rowProducer); + this.rowProducer = rowProducer; + } + + @Override + protected void checkValidState() { + Preconditions.checkState(!closed, "This result set has been closed"); + } + + @Override + public boolean next() throws SpannerException { + checkValidState(); + try { + return rowProducer.nextRow(); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } catch (Throwable throwable) { + throw SpannerExceptionFactory.asSpannerException(throwable); + } + } + + @Override + public Struct getCurrentRowAsStruct() { + checkValidState(); + return rowProducer.get(); + } + + @Override + public void close() { + this.closed = true; + rowProducer.close(); + } + + @Override + public ResultSetStats getStats() { + throw new UnsupportedOperationException( + "ResultSetStats are available only for results returned from analyzeQuery() calls"); + } + + @Override + public ResultSetMetadata getMetadata() { + checkValidState(); + return rowProducer.getMetadata(); + } + + @Override + public Type getType() { + checkValidState(); + return rowProducer.getType(); + } + + @Override + public int getColumnCount() { + return getType().getStructFields().size(); + } + + @Override + public int getColumnIndex(String columnName) { + return getType().getFieldIndex(columnName); + } + + @Override + public Type getColumnType(int columnIndex) { + return getType().getStructFields().get(columnIndex).getType(); + } + + @Override + public Type getColumnType(String columnName) { + return getType().getStructFields().get(getColumnIndex(columnName)).getType(); + } + + @Override + public int getNumPartitions() { + return rowProducer.getNumPartitions(); + } + + @Override + public int getParallelism() { + return rowProducer.getParallelism(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionId.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionId.java new file mode 100644 index 000000000000..2adc264dc6d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionId.java @@ -0,0 +1,128 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.BatchTransactionId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Partition; +import com.google.cloud.spanner.SpannerExceptionFactory; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InvalidClassException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.Serializable; +import java.util.Base64; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +/** + * Contains a reference to a {@link BatchTransactionId} and a {@link Partition}. The combination of + * these two are needed to execute a partition of a partitioned query on a {@link Connection}. A + * {@link PartitionId} can safely be given to a different connection and/or host to be executed + * there. + */ +public final class PartitionId implements Serializable { + private static final long serialVersionUID = 239487275L; + + private final BatchTransactionId transactionId; + private final Partition partition; + + /** + * Deserializes a string representation of a {@link PartitionId}. The string must have been + * created with the {@link #encodeToString(BatchTransactionId, Partition)} method. + */ + public static PartitionId decodeFromString(String id) { + AtomicBoolean classNameVerified = new AtomicBoolean(false); + try (ObjectInputStream objectInputStream = + new ObjectInputStream( + new GZIPInputStream(new ByteArrayInputStream(Base64.getUrlDecoder().decode(id)))) { + @Override + protected Class resolveClass(ObjectStreamClass desc) + throws IOException, ClassNotFoundException { + if (!classNameVerified.get()) { + if (desc.getName().equals(PartitionId.class.getName())) { + classNameVerified.set(true); + } else { + throw new InvalidClassException( + "The id does not contain a valid PartitionId instance", desc.getName()); + } + } + return super.resolveClass(desc); + } + }) { + return (PartitionId) objectInputStream.readObject(); + } catch (InvalidClassException invalidClassException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, invalidClassException.getMessage(), invalidClassException); + } catch (Exception exception) { + throw SpannerExceptionFactory.asSpannerException(exception); + } + } + + /** + * @return A string-encoded version of this {@link PartitionId}. This encoded version can be sent + * to any other {@link Connection} to be executed there, including connections on different + * hosts than the current host. + */ + public static String encodeToString(BatchTransactionId transactionId, Partition partition) { + PartitionId id = new PartitionId(transactionId, partition); + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + try (ObjectOutputStream objectOutputStream = + new ObjectOutputStream(new GZIPOutputStream(byteArrayOutputStream))) { + objectOutputStream.writeObject(id); + } catch (Exception exception) { + throw SpannerExceptionFactory.asSpannerException(exception); + } + return Base64.getUrlEncoder().encodeToString(byteArrayOutputStream.toByteArray()); + } + + private PartitionId(BatchTransactionId transactionId, Partition partition) { + this.transactionId = transactionId; + this.partition = partition; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PartitionId)) { + return false; + } + PartitionId other = (PartitionId) o; + return Objects.equals(this.transactionId, other.transactionId) + && Objects.equals(this.partition, other.partition); + } + + @Override + public int hashCode() { + return Objects.hash(this.transactionId, this.partition); + } + + public BatchTransactionId getTransactionId() { + return transactionId; + } + + public Partition getPartition() { + return partition; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionedQueryResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionedQueryResultSet.java new file mode 100644 index 000000000000..efcecee271c9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PartitionedQueryResultSet.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ResultSet; + +/** Result set that is returned for partitioned queries. */ +public interface PartitionedQueryResultSet extends ResultSet { + + /** Returns the number of partitions that this result set contains. */ + int getNumPartitions(); + + /** Returns the degree of parallelism that this result set uses. */ + int getParallelism(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PgTransactionMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PgTransactionMode.java new file mode 100644 index 000000000000..db6af7e08da3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PgTransactionMode.java @@ -0,0 +1,147 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.util.Objects; + +/** + * Enum for the possible PostgreSQL transaction modes. We need a separate class for PG transaction + * modes to handle setting the isolation level. + */ +class PgTransactionMode { + enum AccessMode { + READ_ONLY_TRANSACTION("READ ONLY"), + READ_WRITE_TRANSACTION("READ WRITE"); + + private final String statementString; + + AccessMode(String statement) { + this.statementString = statement; + } + + @Override + public String toString() { + return statementString; + } + } + + enum IsolationLevel { + ISOLATION_LEVEL_DEFAULT( + "ISOLATION LEVEL DEFAULT", + "DEFAULT", + TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED), + ISOLATION_LEVEL_SERIALIZABLE( + "ISOLATION LEVEL SERIALIZABLE", + "SERIALIZABLE", + TransactionOptions.IsolationLevel.SERIALIZABLE), + ISOLATION_LEVEL_REPEATABLE_READ( + "ISOLATION LEVEL REPEATABLE READ", + "REPEATABLE READ", + TransactionOptions.IsolationLevel.REPEATABLE_READ); + + private final String statementString; + private final String shortStatementString; + private final TransactionOptions.IsolationLevel spannerIsolationLevel; + + IsolationLevel( + String statement, + String shortStatementString, + TransactionOptions.IsolationLevel spannerIsolationLevel) { + this.statementString = statement; + this.shortStatementString = shortStatementString; + this.spannerIsolationLevel = spannerIsolationLevel; + } + + /** + * Use this method to get the correct format for use in a SQL statement. The SQL statement for + * setting the mode to read-only should for example be without the underscore: + * SET TRANSACTION READ ONLY + * + * @return a string representation of this {@link TransactionMode} that can be used in a SQL + * statement to set the transaction mode. + */ + public String getStatementString() { + return statementString; + } + + public String getShortStatementString() { + return shortStatementString; + } + + public TransactionOptions.IsolationLevel getSpannerIsolationLevel() { + return spannerIsolationLevel; + } + + @Override + public String toString() { + return statementString; + } + } + + private AccessMode accessMode; + private IsolationLevel isolationLevel; + + PgTransactionMode() {} + + AccessMode getAccessMode() { + return this.accessMode; + } + + void setAccessMode(AccessMode accessMode) { + this.accessMode = accessMode; + } + + IsolationLevel getIsolationLevel() { + return this.isolationLevel; + } + + void setIsolationLevel(IsolationLevel isolationLevel) { + this.isolationLevel = isolationLevel; + } + + @Override + public int hashCode() { + return Objects.hash(this.accessMode, this.isolationLevel); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof PgTransactionMode)) { + return false; + } + PgTransactionMode other = (PgTransactionMode) o; + return Objects.equals(this.accessMode, other.accessMode) + && Objects.equals(this.isolationLevel, other.isolationLevel); + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(); + if (accessMode != null) { + result.append(accessMode.statementString); + } + if (isolationLevel != null) { + if (accessMode != null) { + result.append(' '); + } + result.append(isolationLevel.statementString); + } + return result.toString(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PostgreSQLStatementParser.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PostgreSQLStatementParser.java new file mode 100644 index 000000000000..60b64b0cd4f7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/PostgreSQLStatementParser.java @@ -0,0 +1,292 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.SimpleParser.isValidIdentifierFirstChar; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.common.base.Preconditions; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.regex.Pattern; + +@InternalApi +public class PostgreSQLStatementParser extends AbstractStatementParser { + private static final Pattern RETURNING_PATTERN = Pattern.compile("returning[ '(\"*]"); + private static final Pattern AS_RETURNING_PATTERN = Pattern.compile("[ ')\"]as returning[ '(\"]"); + private static final String RETURNING_STRING = "returning"; + + PostgreSQLStatementParser() throws CompileException { + super( + Collections.unmodifiableSet( + ClientSideStatements.getInstance(Dialect.POSTGRESQL).getCompiledStatements())); + } + + @Override + Dialect getDialect() { + return Dialect.POSTGRESQL; + } + + @Override + boolean supportsNestedComments() { + return true; + } + + @Override + boolean supportsDollarQuotedStrings() { + return true; + } + + @Override + boolean supportsBacktickQuote() { + return false; + } + + @Override + boolean supportsTripleQuotedStrings() { + return false; + } + + @Override + boolean supportsEscapeQuoteWithQuote() { + return true; + } + + @Override + boolean supportsBackslashEscape() { + return false; + } + + @Override + boolean supportsHashSingleLineComments() { + return false; + } + + @Override + boolean supportsLineFeedInQuotedString() { + return true; + } + + @Override + String getQueryParameterPrefix() { + return "$"; + } + + /** + * Removes comments from and trims the given sql statement. PostgreSQL supports two types of + * comments: + * + *

    + *
  • Single line comments starting with '--' + *
  • Multi line comments between '/*' and '*/'. Nested comments are supported and all + * comments, including the nested comments, must be terminated. + *
+ * + * Reference: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-COMMENTS + * + * @param sql The sql statement to remove comments from and to trim. + * @return the sql statement without the comments and leading and trailing spaces. + */ + @InternalApi + @Override + String removeCommentsAndTrimInternal(String sql) { + Preconditions.checkNotNull(sql); + boolean isInSingleLineComment = false; + int multiLineCommentLevel = 0; + boolean whitespaceBeforeOrAfterMultiLineComment = false; + int multiLineCommentStartIdx = -1; + StringBuilder res = new StringBuilder(sql.length()); + int index = 0; + int length = sql.length(); + while (index < length) { + char c = sql.charAt(index); + if (isInSingleLineComment) { + if (c == '\n') { + isInSingleLineComment = false; + // Include the line feed in the result. + res.append(c); + } + } else if (multiLineCommentLevel > 0) { + if (length > index + 1 && c == ASTERISK && sql.charAt(index + 1) == SLASH) { + multiLineCommentLevel--; + if (multiLineCommentLevel == 0) { + if (!whitespaceBeforeOrAfterMultiLineComment && (length > index + 2)) { + whitespaceBeforeOrAfterMultiLineComment = + Character.isWhitespace(sql.charAt(index + 2)); + } + // If the multiline comment does not have any whitespace before or after it, and it is + // neither at the start nor at the end of SQL string, append an extra space. + if (!whitespaceBeforeOrAfterMultiLineComment + && (multiLineCommentStartIdx != 0) + && (index != length - 2)) { + res.append(' '); + } + } + index++; + } else if (length > index + 1 && c == SLASH && sql.charAt(index + 1) == ASTERISK) { + multiLineCommentLevel++; + index++; + } + } else { + // Check for -- which indicates the start of a single-line comment. + if (length > index + 1 && c == HYPHEN && sql.charAt(index + 1) == HYPHEN) { + // This is a single line comment. + isInSingleLineComment = true; + index += 2; + continue; + } else if (length > index + 1 && c == SLASH && sql.charAt(index + 1) == ASTERISK) { + multiLineCommentLevel++; + if (index >= 1) { + whitespaceBeforeOrAfterMultiLineComment = Character.isWhitespace(sql.charAt(index - 1)); + } + multiLineCommentStartIdx = index; + index += 2; + continue; + } else { + index = skip(sql, index, res); + continue; + } + } + index++; + } + if (multiLineCommentLevel > 0) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "SQL statement contains an unterminated block comment: " + sql); + } + if (res.length() > 0 && res.charAt(res.length() - 1) == ';') { + res.deleteCharAt(res.length() - 1); + } + return res.toString().trim(); + } + + /** PostgreSQL does not support statement hints. */ + @Override + String removeStatementHint(String sql) { + return sql; + } + + /** + * Note: This is an internal API and breaking changes can be made without prior notice. + * + *

Returns the PostgreSQL-style query parameters ($1, $2, ...) in the given SQL string. The + * SQL-string is allowed to contain comments. Occurrences of query-parameter like strings inside + * quoted identifiers or string literals are ignored. + * + *

The following example will return a set containing ("$1", "$2"). + * select col1, col2, "col$4" + * from some_table + * where col1=$1 and col2=$2 + * and not col3=$1 and col4='$3' + * + * + * @param sql the SQL-string to check for parameters. + * @return A set containing all the parameters in the SQL-string. + */ + @InternalApi + public Set getQueryParameters(String sql) { + Preconditions.checkNotNull(sql); + int maxCount = countOccurrencesOf('$', sql); + Set parameters = new HashSet<>(maxCount); + int currentIndex = 0; + while (currentIndex < sql.length() - 1) { + char c = sql.charAt(currentIndex); + if (c == '$' && Character.isDigit(sql.charAt(currentIndex + 1))) { + // Look ahead for the first non-digit. That is the end of the query parameter. + int endIndex = currentIndex + 2; + while (endIndex < sql.length() && Character.isDigit(sql.charAt(endIndex))) { + endIndex++; + } + parameters.add(sql.substring(currentIndex, endIndex)); + currentIndex = endIndex; + } else { + currentIndex = skip(sql, currentIndex, null); + } + } + return parameters; + } + + private boolean checkCharPrecedingReturning(char ch) { + return (ch == SPACE) + || (ch == SINGLE_QUOTE) + || (ch == CLOSE_PARENTHESIS) + || (ch == DOUBLE_QUOTE) + || (ch == DOLLAR); + } + + private boolean checkCharPrecedingSubstrWithReturning(char ch) { + return (ch == SPACE) + || (ch == SINGLE_QUOTE) + || (ch == CLOSE_PARENTHESIS) + || (ch == DOUBLE_QUOTE) + || (ch == COMMA); + } + + private boolean isReturning(String sql, int index) { + // RETURNING is a reserved keyword in PG, but requires a + // leading AS to be used as column label, to avoid ambiguity. + // We thus check for cases which do not have a leading AS. + // (https://www.postgresql.org/docs/current/sql-keywords-appendix.html) + if (index >= 1) { + if (((index + 10 <= sql.length()) + && RETURNING_PATTERN.matcher(sql.substring(index, index + 10)).matches() + && !((index >= 4) + && AS_RETURNING_PATTERN.matcher(sql.substring(index - 4, index + 10)).matches()))) { + if (checkCharPrecedingReturning(sql.charAt(index - 1))) { + return true; + } + // Check for cases where returning clause is part of a substring which starts with an + // invalid first character of an identifier. + // For example, + // insert into t select 2returning *; + int ind = index - 1; + while ((ind >= 0) && !checkCharPrecedingSubstrWithReturning(sql.charAt(ind))) { + ind--; + } + return !isValidIdentifierFirstChar(sql.charAt(ind + 1)); + } + } + return false; + } + + @InternalApi + @Override + protected boolean checkReturningClauseInternal(String rawSql) { + Preconditions.checkNotNull(rawSql); + String sql = rawSql.toLowerCase(); + // Do a pre-check to check if the SQL string definitely does not have a returning clause. + // If this check fails, do a more involved check to check for a returning clause. + if (!sql.contains(RETURNING_STRING)) { + return false; + } + sql = sql.replaceAll("\\s+", " "); + int index = 0; + while (index < sql.length()) { + if (isReturning(sql, index)) { + return true; + } else { + index = skip(sql, index, null); + } + } + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java new file mode 100644 index 000000000000..10c8178efb32 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java @@ -0,0 +1,289 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.client.util.DateTime; +import com.google.api.client.util.DateTime.SecondsAndNanos; +import com.google.api.core.InternalApi; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.protobuf.Duration; +import com.google.protobuf.util.Durations; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.TimeUnit; + +/** + * Util class for parsing and converting ReadOnlyStaleness values to/from strings. This util is used + * to parse client side statements and values for read only staleness for read-only transactions on + * Cloud Spanner. + */ +@InternalApi +public class ReadOnlyStalenessUtil { + /** + * Parses an RFC3339 date/time value with nanosecond precision and returns this as a {@link + * Timestamp}. + */ + public static Timestamp parseRfc3339(String str) throws SpannerException { + try { + SecondsAndNanos secondsAndNanos = DateTime.parseRfc3339ToSecondsAndNanos(str); + return Timestamp.ofTimeSecondsAndNanos( + secondsAndNanos.getSeconds(), secondsAndNanos.getNanos()); + } catch (NumberFormatException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, String.format("Invalid timestamp: %s", str), e); + } + } + + /** The abbreviations for time units that may be used for client side statements. */ + enum TimeUnitAbbreviation { + NANOSECONDS("ns", TimeUnit.NANOSECONDS), + MICROSECONDS("us", TimeUnit.MICROSECONDS), + MILLISECONDS("ms", TimeUnit.MILLISECONDS), + SECONDS("s", TimeUnit.SECONDS); + + private final String abbreviation; + private final TimeUnit unit; + + TimeUnitAbbreviation(String abbreviation, TimeUnit unit) { + this.abbreviation = abbreviation; + this.unit = unit; + } + + String getAbbreviation() { + return abbreviation; + } + + TimeUnit getUnit() { + return unit; + } + } + + /** Get the abbreviation for the given {@link TimeUnit}. */ + static String getTimeUnitAbbreviation(TimeUnit unit) { + for (TimeUnitAbbreviation abb : TimeUnitAbbreviation.values()) { + if (abb.unit == unit) return abb.abbreviation; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid option for time unit: " + unit); + } + + /** Get the {@link TimeUnit} corresponding with the given abbreviation. */ + static TimeUnit parseTimeUnit(String unit) { + for (TimeUnitAbbreviation abb : TimeUnitAbbreviation.values()) { + if (abb.abbreviation.equalsIgnoreCase(unit)) return abb.unit; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid option for time unit: " + unit); + } + + /** + * Convert from {@link TimeUnit} to {@link ChronoUnit}. This code is copied from {@link + * TimeUnit#toChronoUnit()}, which is available in Java 9 and higher. + */ + static ChronoUnit toChronoUnit(TimeUnit timeUnit) { + switch (timeUnit) { + case NANOSECONDS: + return ChronoUnit.NANOS; + case MICROSECONDS: + return ChronoUnit.MICROS; + case MILLISECONDS: + return ChronoUnit.MILLIS; + case SECONDS: + return ChronoUnit.SECONDS; + case MINUTES: + return ChronoUnit.MINUTES; + case HOURS: + return ChronoUnit.HOURS; + case DAYS: + return ChronoUnit.DAYS; + default: + throw new IllegalArgumentException(); + } + } + + /** + * Internal interface that is used to generalize getting a time duration from Cloud Spanner + * read-only staleness settings. + */ + interface DurationValueGetter { + long getDuration(TimeUnit unit); + + boolean hasDuration(); + } + + static final class GetExactStaleness implements DurationValueGetter { + private final TimestampBound staleness; + + public GetExactStaleness(TimestampBound staleness) { + this.staleness = staleness; + } + + @Override + public long getDuration(TimeUnit unit) { + return staleness.getExactStaleness(unit); + } + + @Override + public boolean hasDuration() { + return staleness.getMode() == Mode.EXACT_STALENESS; + } + } + + static final class MaxStalenessGetter implements DurationValueGetter { + private final TimestampBound staleness; + + public MaxStalenessGetter(TimestampBound staleness) { + this.staleness = staleness; + } + + @Override + public long getDuration(TimeUnit unit) { + return staleness.getMaxStaleness(unit); + } + + @Override + public boolean hasDuration() { + return staleness.getMode() == Mode.MAX_STALENESS; + } + } + + static final class DurationGetter implements DurationValueGetter { + private final Duration duration; + + public DurationGetter(Duration duration) { + this.duration = duration; + } + + @Override + public long getDuration(TimeUnit unit) { + return durationToUnits(duration, unit); + } + + @Override + public boolean hasDuration() { + return duration.getNanos() > 0 || duration.getSeconds() > 0L; + } + } + + /** + * Converts a {@link TimestampBound} to a human readable string representation. + * + * @param staleness The staleness to convert + * @return a human readable representation of the staleness. + */ + static String timestampBoundToString(TimestampBound staleness) { + switch (staleness.getMode()) { + case STRONG: + return "STRONG"; + case READ_TIMESTAMP: + return "READ_TIMESTAMP " + staleness.getReadTimestamp().toString(); + case MIN_READ_TIMESTAMP: + return "MIN_READ_TIMESTAMP " + staleness.getMinReadTimestamp().toString(); + case EXACT_STALENESS: + return "EXACT_STALENESS " + durationToString(new GetExactStaleness(staleness)); + case MAX_STALENESS: + return "MAX_STALENESS " + durationToString(new MaxStalenessGetter(staleness)); + default: + throw new IllegalStateException("Unknown mode: " + staleness.getMode()); + } + } + + /** The {@link TimeUnit}s that are supported for timeout and staleness durations. */ + static final TimeUnit[] SUPPORTED_UNITS = + new TimeUnit[] { + TimeUnit.SECONDS, TimeUnit.MILLISECONDS, TimeUnit.MICROSECONDS, TimeUnit.NANOSECONDS + }; + + /** + * Converts a duration value to a human readable string. The method will search for the most + * appropriate {@link TimeUnit} to use to represent the value. + * + * @param function The function that should be called to get the duration in a specific {@link + * TimeUnit}. + * @return a human readable value of the duration. + */ + static String durationToString(DurationValueGetter function) { + TimeUnit unit = getAppropriateTimeUnit(function); + return function.getDuration(unit) + getTimeUnitAbbreviation(unit); + } + + /** + * Calculates the most appropriate {@link TimeUnit} to use to represent the duration that is + * returned by the given function. The most appropriate {@link TimeUnit} is the unit with the + * least precision that still retains all information of the given input. + * + * @param durationGetter The function that will return the duration in different {@link + * TimeUnit}s. + * @return the most appropriate {@link TimeUnit} to represent the duration. + */ + static TimeUnit getAppropriateTimeUnit(DurationValueGetter durationGetter) { + int index = 0; + if (durationGetter.hasDuration()) { + for (TimeUnit unit : SUPPORTED_UNITS) { + long duration = durationGetter.getDuration(unit); + if (index + 1 < SUPPORTED_UNITS.length) { + if (duration > 0L + && duration * 1000 == durationGetter.getDuration(SUPPORTED_UNITS[index + 1])) { + return unit; + } + } else { + // last unit, we have to use this one + return unit; + } + index++; + } + throw new IllegalStateException("Unsupported duration"); + } + return TimeUnit.NANOSECONDS; + } + + /** Converts a value into a duration using the specified {@link TimeUnit}. */ + static Duration createDuration(long num, TimeUnit units) { + switch (units) { + case NANOSECONDS: + return Durations.fromNanos(num); + case MICROSECONDS: + return Durations.fromMicros(num); + case MILLISECONDS: + return Durations.fromMillis(num); + case SECONDS: + return Durations.fromSeconds(num); + default: + return Durations.fromMillis(units.toMillis(num)); + } + } + + /** Converts a duration to a number using the specified {@link TimeUnit}. */ + static long durationToUnits(Duration duration, TimeUnit units) { + switch (units) { + case NANOSECONDS: + return Durations.toNanos(duration); + case MICROSECONDS: + return Durations.toMicros(duration); + case MILLISECONDS: + return Durations.toMillis(duration); + case SECONDS: + return Durations.toSeconds(duration); + default: + throw new IllegalArgumentException(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java new file mode 100644 index 000000000000..357503cb17fa --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java @@ -0,0 +1,296 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.context.Scope; +import java.util.concurrent.Callable; +import javax.annotation.Nonnull; + +/** + * Transaction that is used when a {@link Connection} is in read-only mode or when the transaction + * mode is set to read-only. This transaction can only be used to execute queries. + */ +class ReadOnlyTransaction extends AbstractMultiUseTransaction { + private final DatabaseClient dbClient; + private final BatchClient batchClient; + private final TimestampBound readOnlyStaleness; + private com.google.cloud.spanner.ReadOnlyTransaction transaction; + private BatchReadOnlyTransaction batchReadOnlyTransaction; + private UnitOfWorkState state = UnitOfWorkState.STARTED; + + static class Builder extends AbstractBaseUnitOfWork.Builder { + private DatabaseClient dbClient; + private BatchClient batchClient; + private TimestampBound readOnlyStaleness; + + private Builder() {} + + Builder setDatabaseClient(DatabaseClient client) { + Preconditions.checkNotNull(client); + this.dbClient = client; + return this; + } + + Builder setBatchClient(BatchClient batchClient) { + this.batchClient = Preconditions.checkNotNull(batchClient); + return this; + } + + Builder setReadOnlyStaleness(TimestampBound staleness) { + Preconditions.checkNotNull(staleness); + this.readOnlyStaleness = staleness; + return this; + } + + @Override + ReadOnlyTransaction build() { + Preconditions.checkState(dbClient != null, "No DatabaseClient client specified"); + Preconditions.checkState(batchClient != null, "No BatchClient client specified"); + Preconditions.checkState(readOnlyStaleness != null, "No ReadOnlyStaleness specified"); + return new ReadOnlyTransaction(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + @VisibleForTesting + ReadOnlyTransaction(Builder builder) { + super(builder); + this.dbClient = builder.dbClient; + this.batchClient = builder.batchClient; + this.readOnlyStaleness = builder.readOnlyStaleness; + } + + @Override + public UnitOfWorkState getState() { + return this.state; + } + + @Override + public boolean isReadOnly() { + return true; + } + + @Override + public boolean supportsDirectedReads(ParsedStatement ignore) { + return true; + } + + @Override + void checkAborted() { + // No-op for read-only transactions as they cannot abort. + } + + @Override + void checkOrCreateValidTransaction(ParsedStatement statement, CallType callType) { + if (transaction == null) { + transaction = dbClient.readOnlyTransaction(readOnlyStaleness); + } + } + + @Override + ReadContext getReadContext() { + ConnectionPreconditions.checkState(transaction != null, "Missing read-only transaction"); + return transaction; + } + + @Override + public Timestamp getReadTimestamp() { + ConnectionPreconditions.checkState( + transaction != null, "There is no read timestamp available for this transaction."); + ConnectionPreconditions.checkState( + state != UnitOfWorkState.ROLLED_BACK, "This transaction was rolled back"); + return transaction.getReadTimestamp(); + } + + @Override + public Timestamp getReadTimestampOrNull() { + if (transaction != null && state != UnitOfWorkState.ROLLED_BACK) { + try { + return transaction.getReadTimestamp(); + } catch (SpannerException e) { + // ignore + } + } + return null; + } + + @Override + public Timestamp getCommitTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "There is no commit timestamp available for this transaction."); + } + + @Override + public Timestamp getCommitTimestampOrNull() { + return null; + } + + @Override + public CommitResponse getCommitResponse() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "There is no commit response available for read-only transactions."); + } + + @Override + public CommitResponse getCommitResponseOrNull() { + return null; + } + + @Override + public ApiFuture partitionQueryAsync( + CallType callType, + ParsedStatement query, + PartitionOptions partitionOptions, + QueryOption... options) { + // Batch-read-only transactions are safe to use for both normal queries and partitioned queries. + // We therefore just use the batch transaction as the 'normal' transaction if the first + // statement in the transaction is to partition a query. + // Using a batch-read-only transaction for every read-only transaction is not efficient, as + // these transactions use a session that is created synchronously only for this transaction. + try (Scope ignore = span.makeCurrent()) { + if (transaction == null) { + batchReadOnlyTransaction = batchClient.batchReadOnlyTransaction(readOnlyStaleness); + transaction = batchReadOnlyTransaction; + } else if (batchReadOnlyTransaction == null) { + batchReadOnlyTransaction = + batchClient.batchReadOnlyTransaction( + TimestampBound.ofReadTimestamp(transaction.getReadTimestamp())); + } + Callable callable = + () -> partitionQuery(batchReadOnlyTransaction, partitionOptions, query, options); + return executeStatementAsync( + callType, + query, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + } + } + + @Override + public ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "DDL statements are not allowed for read-only transactions"); + } + + @Override + public ApiFuture executeUpdateAsync( + CallType callType, ParsedStatement update, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Update statements are not allowed for read-only transactions"); + } + + @Override + public ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Analyzing updates is not allowed for read-only transactions"); + } + + @Override + public ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, UpdateOption... options) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Batch updates are not allowed for read-only transactions."); + } + + @Override + public ApiFuture writeAsync(CallType callType, Iterable mutations) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Mutations are not allowed for read-only transactions"); + } + + @Override + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + ApiFuture result = closeTransactions(); + callback.onSuccess(); + this.state = UnitOfWorkState.COMMITTED; + return result; + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } + } + + @Override + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + ApiFuture result = closeTransactions(); + callback.onSuccess(); + this.state = UnitOfWorkState.ROLLED_BACK; + return result; + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } + } + + private ApiFuture closeTransactions() { + if (this.transaction != null) { + this.transaction.close(); + } + if (this.batchReadOnlyTransaction != null) { + this.batchReadOnlyTransaction.close(); + } + return asyncEndUnitOfWorkSpan(); + } + + @Override + String getUnitOfWorkName() { + return "read-only transaction"; + } + + Savepoint savepoint(String name) { + // Read-only transactions do not keep track of the executed statements as they also do not take + // any locks. There is therefore no savepoint positions that must be rolled back to. + return Savepoint.of(name); + } + + void rollbackToSavepoint(Savepoint savepoint) { + // no-op for read-only transactions + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java new file mode 100644 index 000000000000..ccb592e3f843 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java @@ -0,0 +1,1420 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.connection.AbstractStatementParser.BEGIN_STATEMENT; +import static com.google.cloud.spanner.connection.AbstractStatementParser.COMMIT_STATEMENT; +import static com.google.cloud.spanner.connection.AbstractStatementParser.ROLLBACK_STATEMENT; +import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.tryParseLong; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.Tuple; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.ProtobufResultSet; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.ThreadFactoryUtil; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.TransactionRetryListener.RetryResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Deadline; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.context.Scope; +import java.time.Duration; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Transaction that is used when a {@link Connection} is normal read/write mode (i.e. not autocommit + * and not read-only). These transactions can be automatically retried if an {@link + * AbortedException} is thrown. The transaction will keep track of a running checksum of all {@link + * ResultSet}s that have been returned, and the update counts returned by any DML statement executed + * during the transaction. As long as these checksums and update counts are equal for both the + * original transaction and the retried transaction, the retry can safely be assumed to have the + * exact same results as the original transaction. + */ +class ReadWriteTransaction extends AbstractMultiUseTransaction { + private static final AttributeKey TRANSACTION_RETRIED = + AttributeKey.booleanKey("transaction.retried"); + private static final Logger logger = Logger.getLogger(ReadWriteTransaction.class.getName()); + private static final ThreadFactory KEEP_ALIVE_THREAD_FACTORY = + ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory( + "read-write-transaction-keep-alive", true); + private static final ScheduledExecutorService KEEP_ALIVE_SERVICE = + Executors.newSingleThreadScheduledExecutor(KEEP_ALIVE_THREAD_FACTORY); + private static final ParsedStatement SELECT1_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("SELECT 1")); + private static final long DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS = 8000L; + + private static final AtomicLong ID_GENERATOR = new AtomicLong(); + private static final String MAX_INTERNAL_RETRIES_EXCEEDED = + "Internal transaction retry maximum exceeded"; + private static final int DEFAULT_MAX_INTERNAL_RETRIES = 50; + + /** + * A reference to the currently active transaction on the emulator that was started by the same + * thread. This reference is only used when running on the emulator, and enables the Connection + * API to manually abort the current transaction on the emulator, so other transactions can try to + * make progress. + */ + private static final ThreadLocal CURRENT_ACTIVE_TRANSACTION = + new ThreadLocal<>(); + + /** + * The name of the automatic savepoint that is generated by the Connection API if automatically + * aborting the current active transaction on the emulator is enabled. + */ + private static final String AUTO_SAVEPOINT_NAME = "_auto_savepoint"; + + private final boolean usesEmulator; + + /** + * Indicates whether an automatic savepoint should be generated after each statement, so the + * transaction can be manually aborted and retried by the Connection API when connected to the + * emulator. This feature is only intended for use with the Spanner emulator. When connected to + * real Spanner, the decision whether to abort a transaction or not should be delegated to + * Spanner. + */ + private final boolean useAutoSavepointsForEmulator; + + /** + * The savepoint that was automatically generated after executing the last statement. This is used + * to abort transactions on the emulator, if one thread tries to execute concurrent transactions + * on the emulator, and would otherwise be deadlocked. + */ + private Savepoint autoSavepoint; + + private final int maxInternalRetries; + private final ReentrantLock abortedLock = new ReentrantLock(); + private final long transactionId; + private final DatabaseClient dbClient; + private final TransactionOption[] transactionOptions; + private TransactionManager txManager; + private final boolean retryAbortsInternally; + private final boolean delayTransactionStartUntilFirstWrite; + private final boolean keepTransactionAlive; + private final long keepAliveIntervalMillis; + private final ReentrantLock keepAliveLock; + private final SavepointSupport savepointSupport; + @Nonnull private final IsolationLevel isolationLevel; + private final ReadLockMode readLockMode; + private final Deadline deadline; + private int transactionRetryAttempts; + private int successfulRetries; + private volatile ApiFuture txContextFuture; + private boolean canUseSingleUseRead; + private volatile SettableApiFuture commitResponseFuture; + private volatile UnitOfWorkState state = UnitOfWorkState.STARTED; + private volatile AbortedException abortedException; + private AbortedException rolledBackToSavepointException; + private boolean timedOutOrCancelled = false; + private final List statements = new ArrayList<>(); + private final List mutations = new ArrayList<>(); + private Timestamp transactionStarted; + private ScheduledFuture keepAliveFuture; + + private static final class RollbackToSavepointException extends Exception { + private final Savepoint savepoint; + + RollbackToSavepointException(Savepoint savepoint) { + this.savepoint = Preconditions.checkNotNull(savepoint); + } + + Savepoint getSavepoint() { + return this.savepoint; + } + } + + private final class StatementResultCallback implements ApiFutureCallback { + @Override + public void onFailure(Throwable t) { + if (t instanceof SpannerException) { + handlePossibleInvalidatingException((SpannerException) t); + } + maybeScheduleKeepAlivePing(); + } + + @Override + public void onSuccess(V result) { + maybeScheduleKeepAlivePing(); + } + } + + static class Builder extends AbstractMultiUseTransaction.Builder { + private boolean usesEmulator; + private boolean useAutoSavepointsForEmulator; + private DatabaseClient dbClient; + private Boolean retryAbortsInternally; + private boolean delayTransactionStartUntilFirstWrite; + private boolean keepTransactionAlive; + private boolean returnCommitStats; + private Duration maxCommitDelay; + private SavepointSupport savepointSupport; + private IsolationLevel isolationLevel; + private ReadLockMode readLockMode = ReadLockMode.READ_LOCK_MODE_UNSPECIFIED; + private Deadline deadline; + + private Builder() {} + + Builder setUsesEmulator(boolean usesEmulator) { + this.usesEmulator = usesEmulator; + return this; + } + + Builder setUseAutoSavepointsForEmulator(boolean useAutoSavepoints) { + this.useAutoSavepointsForEmulator = useAutoSavepoints; + return this; + } + + Builder setDatabaseClient(DatabaseClient client) { + Preconditions.checkNotNull(client); + this.dbClient = client; + return this; + } + + Builder setDelayTransactionStartUntilFirstWrite(boolean delayTransactionStartUntilFirstWrite) { + this.delayTransactionStartUntilFirstWrite = delayTransactionStartUntilFirstWrite; + return this; + } + + Builder setKeepTransactionAlive(boolean keepTransactionAlive) { + this.keepTransactionAlive = keepTransactionAlive; + return this; + } + + Builder setRetryAbortsInternally(boolean retryAbortsInternally) { + this.retryAbortsInternally = retryAbortsInternally; + return this; + } + + Builder setReturnCommitStats(boolean returnCommitStats) { + this.returnCommitStats = returnCommitStats; + return this; + } + + Builder setMaxCommitDelay(Duration maxCommitDelay) { + this.maxCommitDelay = maxCommitDelay; + return this; + } + + Builder setSavepointSupport(SavepointSupport savepointSupport) { + this.savepointSupport = savepointSupport; + return this; + } + + Builder setIsolationLevel(IsolationLevel isolationLevel) { + this.isolationLevel = Preconditions.checkNotNull(isolationLevel); + return this; + } + + Builder setReadLockMode(ReadLockMode readLockMode) { + this.readLockMode = Preconditions.checkNotNull(readLockMode); + return this; + } + + Builder setDeadline(Deadline deadline) { + this.deadline = deadline; + return this; + } + + @Override + ReadWriteTransaction build() { + Preconditions.checkState(dbClient != null, "No DatabaseClient client specified"); + Preconditions.checkState( + retryAbortsInternally != null, "RetryAbortsInternally is not specified"); + Preconditions.checkState( + hasTransactionRetryListeners(), "TransactionRetryListeners are not specified"); + Preconditions.checkState(savepointSupport != null, "SavepointSupport is not specified"); + Preconditions.checkState(isolationLevel != null, "IsolationLevel is not specified"); + return new ReadWriteTransaction(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private ReadWriteTransaction(Builder builder) { + super(builder); + this.transactionId = ID_GENERATOR.incrementAndGet(); + this.usesEmulator = builder.usesEmulator; + this.useAutoSavepointsForEmulator = builder.useAutoSavepointsForEmulator; + // Use a higher max for internal retries if auto-savepoints have been enabled for the emulator. + // This can cause a larger number of transactions to be aborted and retried, and retrying on the + // emulator is fast, so increasing the limit is reasonable. + this.maxInternalRetries = + builder.usesEmulator && builder.retryAbortsInternally + ? DEFAULT_MAX_INTERNAL_RETRIES * 50 + : DEFAULT_MAX_INTERNAL_RETRIES; + this.dbClient = builder.dbClient; + this.delayTransactionStartUntilFirstWrite = builder.delayTransactionStartUntilFirstWrite; + this.keepTransactionAlive = builder.keepTransactionAlive; + this.keepAliveIntervalMillis = + this.keepTransactionAlive + ? tryParseLong( + System.getProperty( + "spanner.connection.keep_alive_interval_millis", + String.valueOf(DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS)), + DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS) + : 0L; + this.keepAliveLock = this.keepTransactionAlive ? new ReentrantLock() : null; + this.retryAbortsInternally = builder.retryAbortsInternally; + this.savepointSupport = builder.savepointSupport; + this.isolationLevel = Preconditions.checkNotNull(builder.isolationLevel); + this.readLockMode = Preconditions.checkNotNull(builder.readLockMode); + this.deadline = builder.deadline; + this.transactionOptions = extractOptions(builder); + } + + private TransactionOption[] extractOptions(Builder builder) { + int numOptions = 0; + if (builder.returnCommitStats) { + numOptions++; + } + if (builder.maxCommitDelay != null) { + numOptions++; + } + if (this.transactionTag != null) { + numOptions++; + } + if (this.excludeTxnFromChangeStreams) { + numOptions++; + } + if (this.rpcPriority != null) { + numOptions++; + } + if (this.isolationLevel != IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) { + numOptions++; + } + if (this.readLockMode != ReadLockMode.READ_LOCK_MODE_UNSPECIFIED) { + numOptions++; + } + if (this.clientContext != null) { + numOptions++; + } + TransactionOption[] options = new TransactionOption[numOptions]; + int index = 0; + if (builder.returnCommitStats) { + options[index++] = Options.commitStats(); + } + if (builder.maxCommitDelay != null) { + options[index++] = Options.maxCommitDelay(builder.maxCommitDelay); + } + if (this.transactionTag != null) { + options[index++] = Options.tag(this.transactionTag); + } + if (this.excludeTxnFromChangeStreams) { + options[index++] = Options.excludeTxnFromChangeStreams(); + } + if (this.rpcPriority != null) { + options[index++] = Options.priority(this.rpcPriority); + } + if (this.isolationLevel != IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) { + options[index++] = Options.isolationLevel(this.isolationLevel); + } + if (this.readLockMode != ReadLockMode.READ_LOCK_MODE_UNSPECIFIED) { + options[index++] = Options.readLockMode(this.readLockMode); + } + if (this.clientContext != null) { + options[index++] = Options.clientContext(this.clientContext); + } + return options; + } + + @Override + public String toString() { + return new StringBuilder() + .append("ReadWriteTransaction - ID: ") + .append(transactionId) + .append("; Delay tx start: ") + .append(delayTransactionStartUntilFirstWrite) + .append("; Tag: ") + .append(Strings.nullToEmpty(transactionTag)) + .append("; Status: ") + .append(internalGetStateName()) + .append("; Started: ") + .append(internalGetTimeStarted()) + .append("; Retry attempts: ") + .append(transactionRetryAttempts) + .append("; Successful retries: ") + .append(successfulRetries) + .toString(); + } + + private String internalGetStateName() { + return transactionStarted == null ? "Not yet started" : getState().toString(); + } + + private String internalGetTimeStarted() { + return transactionStarted == null ? "Not yet started" : transactionStarted.toString(); + } + + @Override + public UnitOfWorkState getState() { + return this.state; + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + void checkOrCreateValidTransaction(ParsedStatement statement, CallType callType) { + checkValidStateAndMarkStarted(); + if (txContextFuture == null + && (!delayTransactionStartUntilFirstWrite + || (statement != null && statement.isUpdate()) + || (statement == COMMIT_STATEMENT && !mutations.isEmpty()))) { + txManager = dbClient.transactionManager(this.transactionOptions); + canUseSingleUseRead = false; + txContextFuture = + executeStatementAsync( + callType, BEGIN_STATEMENT, txManager::begin, SpannerGrpc.getBeginTransactionMethod()); + } else if (txContextFuture == null && delayTransactionStartUntilFirstWrite) { + canUseSingleUseRead = true; + } + maybeUpdateActiveTransaction(); + } + + private void checkValidStateAndMarkStarted() { + ConnectionPreconditions.checkState( + this.state == UnitOfWorkState.STARTED || this.state == UnitOfWorkState.ABORTED, + "This transaction has status " + + this.state.name() + + ", only " + + UnitOfWorkState.STARTED + + "or " + + UnitOfWorkState.ABORTED + + " is allowed."); + ConnectionPreconditions.checkState( + this.retryAbortsInternally || this.rolledBackToSavepointException == null, + "Cannot resume execution after rolling back to a savepoint if internal retries have been" + + " disabled. Call Connection#setRetryAbortsInternally(true) or execute `SET" + + " RETRY_ABORTS_INTERNALLY=TRUE` to enable resuming execution after rolling back to a" + + " savepoint."); + checkTimedOut(); + if (transactionStarted == null) { + transactionStarted = Timestamp.now(); + } + } + + private boolean shouldPing() { + return isActive() + && keepAliveLock != null + && keepTransactionAlive + && !timedOutOrCancelled + && rolledBackToSavepointException == null; + } + + private void maybeScheduleKeepAlivePing() { + if (shouldPing()) { + keepAliveLock.lock(); + try { + if (keepAliveFuture == null || keepAliveFuture.isDone()) { + keepAliveFuture = + KEEP_ALIVE_SERVICE.schedule( + new KeepAliveRunnable(), + keepAliveIntervalMillis > 0 + ? keepAliveIntervalMillis + : DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); + } + } finally { + keepAliveLock.unlock(); + } + } + } + + private void cancelScheduledKeepAlivePing() { + if (keepAliveLock != null) { + keepAliveLock.lock(); + try { + if (keepAliveFuture != null) { + keepAliveFuture.cancel(false); + } + } finally { + keepAliveLock.unlock(); + } + } + } + + private class KeepAliveRunnable implements Runnable { + @Override + public void run() { + if (shouldPing()) { + // Do a shoot-and-forget ping and schedule a new ping over 8 seconds after this ping has + // finished. + ApiFuture future = + executeQueryAsync( + CallType.SYNC, + SELECT1_STATEMENT, + AnalyzeMode.NONE, + Options.tag( + System.getProperty( + "spanner.connection.keep_alive_query_tag", + "connection.transaction-keep-alive"))); + future.addListener( + ReadWriteTransaction.this::maybeScheduleKeepAlivePing, MoreExecutors.directExecutor()); + } + } + } + + private void checkTimedOut() { + ConnectionPreconditions.checkState( + !timedOutOrCancelled, + "The last statement of this transaction timed out or was cancelled. " + + "The transaction is no longer usable. " + + "Rollback the transaction and start a new one."); + } + + @Override + public boolean isActive() { + // Consider ABORTED an active state, as it is something that is automatically set if the + // transaction is aborted by the backend. That means that we should not automatically create a + // new transaction for the following statement after a transaction has aborted, and instead we + // should wait until the application has rolled back the current transaction. + // + // Otherwise the following list of statements could show unexpected behavior: + + // connection.executeUpdateAsync("UPDATE FOO SET BAR=1 ..."); + // connection.executeUpdateAsync("UPDATE BAR SET FOO=2 ..."); + // connection.commitAsync(); + // + // If the first update statement fails with an aborted exception, the second update statement + // should not be executed in a new transaction, but should also abort. + return getState().isActive() || state == UnitOfWorkState.ABORTED; + } + + void checkAborted() { + if (this.state == UnitOfWorkState.ABORTED && this.abortedException != null) { + if (this.abortedException instanceof AbortedDueToConcurrentModificationException) { + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException( + (AbortedDueToConcurrentModificationException) this.abortedException); + } else { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "This transaction has already been aborted. Rollback this transaction to start a new" + + " one.", + this.abortedException); + } + } + } + + void checkRolledBackToSavepoint() { + if (this.rolledBackToSavepointException != null) { + if (savepointSupport == SavepointSupport.FAIL_AFTER_ROLLBACK + && !((RollbackToSavepointException) this.rolledBackToSavepointException.getCause()) + .getSavepoint() + .isAutoSavepoint()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Using a read/write transaction after rolling back to a savepoint is not supported " + + "with SavepointSupport=" + + savepointSupport); + } else { + AbortedException exception = this.rolledBackToSavepointException; + this.rolledBackToSavepointException = null; + throw exception; + } + } + } + + @Override + ReadContext getReadContext() { + if (txContextFuture == null && canUseSingleUseRead) { + return dbClient.singleUse(); + } + ConnectionPreconditions.checkState(txContextFuture != null, "Missing transaction context"); + return get(txContextFuture); + } + + TransactionContext getTransactionContext() { + ConnectionPreconditions.checkState(txContextFuture != null, "Missing transaction context"); + return (TransactionContext) getReadContext(); + } + + @Override + public Timestamp getReadTimestamp() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "There is no read timestamp available for read/write transactions."); + } + + @Override + public Timestamp getReadTimestampOrNull() { + return null; + } + + private boolean hasCommitResponse() { + return commitResponseFuture != null; + } + + @Override + public Timestamp getCommitTimestamp() { + ConnectionPreconditions.checkState( + hasCommitResponse(), "This transaction has not been committed."); + return get(commitResponseFuture).getCommitTimestamp(); + } + + @Override + public Timestamp getCommitTimestampOrNull() { + return hasCommitResponse() ? get(commitResponseFuture).getCommitTimestamp() : null; + } + + @Override + public CommitResponse getCommitResponse() { + ConnectionPreconditions.checkState( + hasCommitResponse(), "This transaction has not been committed."); + return get(commitResponseFuture); + } + + @Override + public CommitResponse getCommitResponseOrNull() { + return hasCommitResponse() ? get(commitResponseFuture) : null; + } + + @Override + public ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "DDL-statements are not allowed inside a read/write transaction."); + } + + private void handlePossibleInvalidatingException(SpannerException e) { + if (e.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED + || e.getErrorCode() == ErrorCode.CANCELLED) { + this.timedOutOrCancelled = true; + } + } + + @Override + public ApiFuture executeQueryAsync( + final CallType callType, + final ParsedStatement statement, + final AnalyzeMode analyzeMode, + final QueryOption... options) { + Preconditions.checkArgument( + (statement.getType() == StatementType.QUERY) + || (statement.getType() == StatementType.UPDATE && statement.hasReturningClause()), + "Statement must be a query or DML with returning clause"); + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(statement, callType); + + ApiFuture res; + if (retryAbortsInternally && txContextFuture != null) { + res = + executeStatementAsync( + callType, + statement, + () -> { + checkTimedOut(); + return runWithRetry( + () -> { + try { + getStatementExecutor() + .invokeInterceptors( + statement, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + DirectExecuteResultSet delegate = + DirectExecuteResultSet.ofResultSet( + internalExecuteQuery(statement, analyzeMode, options)); + return createAndAddRetryResultSet( + delegate, statement, analyzeMode, options); + } catch (AbortedException e) { + throw e; + } catch (SpannerException e) { + createAndAddFailedQuery(e, statement, analyzeMode, options); + throw e; + } + }); + }, + // ignore interceptors here as they are invoked in the Callable. + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getExecuteStreamingSqlMethod())); + } else { + // Handle both SELECT queries and DML with THEN RETURN without delegating to the base class, + // which rejects non-SELECT statements. + res = + executeStatementAsync( + callType, + statement, + () -> { + checkTimedOut(); + checkAborted(); + return DirectExecuteResultSet.ofResultSet( + internalExecuteQuery(statement, analyzeMode, options)); + }, + SpannerGrpc.getExecuteStreamingSqlMethod()); + } + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); + return res; + } + } + + @Override + public ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + try (Scope ignore = span.makeCurrent()) { + return ApiFutures.transform( + internalExecuteUpdateAsync(callType, update, analyzeMode, options), + Tuple::y, + MoreExecutors.directExecutor()); + } + } + + @Override + public ApiFuture executeUpdateAsync( + CallType callType, final ParsedStatement update, final UpdateOption... options) { + try (Scope ignore = span.makeCurrent()) { + return ApiFutures.transform( + internalExecuteUpdateAsync(callType, update, AnalyzeMode.NONE, options), + Tuple::x, + MoreExecutors.directExecutor()); + } + } + + /** + * Executes the given update statement using the specified query planning mode and with the given + * options and returns the result as a {@link Tuple}. The tuple contains either a {@link + * ResultSet} with the query plan and execution statistics, or a {@link Long} that contains the + * update count that was returned for the update statement. Only one of the elements in the tuple + * will be set, and the reason that we are using a {@link Tuple} here is because Java does not + * have a standard implementation for an 'Either' class (i.e. a Tuple where only one element is + * set). An alternative would be to always return a {@link ResultSet} with the update count + * encoded in the execution stats of the result set, but this would mean that we would create + * additional {@link ResultSet} instances every time an update statement is executed in normal + * mode. + */ + private ApiFuture> internalExecuteUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + Preconditions.checkNotNull(update); + Preconditions.checkArgument(update.isUpdate(), "The statement is not an update statement"); + checkOrCreateValidTransaction(update, callType); + ApiFuture> res; + if (retryAbortsInternally && txContextFuture != null) { + res = + executeStatementAsync( + callType, + update, + () -> { + checkTimedOut(); + return runWithRetry( + () -> { + try { + getStatementExecutor() + .invokeInterceptors( + update, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + + Tuple result; + long updateCount; + if (analyzeMode == AnalyzeMode.NONE) { + updateCount = + get(txContextFuture).executeUpdate(update.getStatement(), options); + result = Tuple.of(updateCount, null); + } else { + ResultSet resultSet = + get(txContextFuture) + .analyzeUpdateStatement( + update.getStatement(), + analyzeMode.getQueryAnalyzeMode(), + options); + updateCount = + Objects.requireNonNull(resultSet.getStats()).getRowCountExact(); + result = Tuple.of(null, resultSet); + } + createAndAddRetriableUpdate(update, analyzeMode, updateCount, options); + return result; + } catch (AbortedException e) { + throw e; + } catch (SpannerException e) { + createAndAddFailedUpdate(e, update); + throw e; + } + }); + }, + // ignore interceptors here as they are invoked in the Callable. + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod())); + } else { + res = + executeStatementAsync( + callType, + update, + () -> { + checkTimedOut(); + checkAborted(); + if (analyzeMode == AnalyzeMode.NONE) { + return Tuple.of( + get(txContextFuture).executeUpdate(update.getStatement(), options), null); + } + ResultSet resultSet = + get(txContextFuture) + .analyzeUpdateStatement( + update.getStatement(), analyzeMode.getQueryAnalyzeMode(), options); + return Tuple.of(null, resultSet); + }, + SpannerGrpc.getExecuteSqlMethod()); + } + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); + return res; + } + + @Override + public ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, final UpdateOption... options) { + Preconditions.checkNotNull(updates); + try (Scope ignore = span.makeCurrent()) { + final List updateStatements = new LinkedList<>(); + for (ParsedStatement update : updates) { + Preconditions.checkArgument( + update.isUpdate(), "Statement is not an update statement: " + update.getSql()); + updateStatements.add(update.getStatement()); + } + checkOrCreateValidTransaction(Iterables.getFirst(updates, null), callType); + + ApiFuture res; + if (retryAbortsInternally) { + res = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + () -> { + checkTimedOut(); + return runWithRetry( + () -> { + try { + getStatementExecutor() + .invokeInterceptors( + RUN_BATCH_STATEMENT, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + long[] updateCounts = + get(txContextFuture).batchUpdate(updateStatements, options); + createAndAddRetriableBatchUpdate(updateStatements, updateCounts, options); + return updateCounts; + } catch (AbortedException e) { + throw e; + } catch (SpannerException e) { + createAndAddFailedBatchUpdate(e, updateStatements); + throw e; + } + }); + }, + // ignore interceptors here as they are invoked in the Callable. + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getExecuteBatchDmlMethod())); + } else { + res = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + () -> { + checkTimedOut(); + checkAborted(); + return get(txContextFuture).batchUpdate(updateStatements); + }, + SpannerGrpc.getExecuteBatchDmlMethod()); + } + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); + return res; + } + } + + @Override + public ApiFuture writeAsync(CallType callType, Iterable mutations) { + try (Scope ignore = span.makeCurrent()) { + Preconditions.checkNotNull(mutations); + // We actually don't need an underlying transaction yet, as mutations are buffered until + // commit. + // But we do need to verify that this transaction is valid, and to mark the start of the + // transaction. + checkValidStateAndMarkStarted(); + for (Mutation mutation : mutations) { + this.mutations.add(checkNotNull(mutation)); + } + return ApiFutures.immediateFuture(null); + } + } + + private final Callable commitCallable = + new Callable() { + @Override + public Void call() { + checkAborted(); + get(txContextFuture).buffer(mutations); + txManager.commit(); + commitResponseFuture.set(txManager.getCommitResponse()); + state = UnitOfWorkState.COMMITTED; + return null; + } + }; + + @Override + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(COMMIT_STATEMENT, callType); + cancelScheduledKeepAlivePing(); + state = UnitOfWorkState.COMMITTING; + commitResponseFuture = SettableApiFuture.create(); + ApiFuture res; + // Check if this transaction actually needs to commit anything. + if (txContextFuture == null) { + // No actual transaction was started by this read/write transaction, which also means that + // we don't have to commit anything. + commitResponseFuture.set( + new CommitResponse( + Timestamp.fromProto(com.google.protobuf.Timestamp.getDefaultInstance()))); + callback.onSuccess(); + state = UnitOfWorkState.COMMITTED; + res = SettableApiFuture.create(); + ((SettableApiFuture) res).set(null); + } else if (retryAbortsInternally) { + res = + executeStatementAsync( + callType, + COMMIT_STATEMENT, + () -> { + checkTimedOut(); + try { + Void result = + runWithRetry( + () -> { + getStatementExecutor() + .invokeInterceptors( + COMMIT_STATEMENT, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + return commitCallable.call(); + }); + callback.onSuccess(); + return result; + } catch (Throwable t) { + commitResponseFuture.setException(t); + callback.onFailure(); + state = UnitOfWorkState.COMMIT_FAILED; + try { + txManager.close(); + } catch (Throwable t2) { + // Ignore. + } + throw t; + } + }, + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getCommitMethod())); + } else { + res = + executeStatementAsync( + callType, + COMMIT_STATEMENT, + () -> { + checkTimedOut(); + try { + Void result = commitCallable.call(); + callback.onSuccess(); + return result; + } catch (Throwable t) { + commitResponseFuture.setException(t); + callback.onFailure(); + state = UnitOfWorkState.COMMIT_FAILED; + try { + txManager.close(); + } catch (Throwable t2) { + // Ignore. + } + throw t; + } + }, + SpannerGrpc.getCommitMethod()); + } + asyncEndUnitOfWorkSpan(); + return res; + } + } + + /** + * Executes a database call that could throw an {@link AbortedException}. If an {@link + * AbortedException} is thrown, the transaction will automatically be retried and the checksums of + * all {@link ResultSet}s and update counts of DML statements will be checked against the original + * values of the original transaction. If the checksums and/or update counts do not match, the + * method will throw an {@link AbortedException} that cannot be retried, as the underlying data + * have actually changed. + * + *

If {@link ReadWriteTransaction#retryAbortsInternally} has been set to false, + * this method will throw an exception instead of retrying the transaction if the transaction was + * aborted. + * + * @param callable The actual database calls. + * @return the results of the database calls. + * @throws SpannerException if the database calls threw an exception, an {@link + * AbortedDueToConcurrentModificationException} if a retry of the transaction yielded + * different results than the original transaction, or an {@link AbortedException} if the + * maximum number of retries has been exceeded. + */ + T runWithRetry(Callable callable) throws SpannerException { + while (true) { + abortedLock.lock(); + try { + checkAborted(); + try { + checkRolledBackToSavepoint(); + T result = callable.call(); + if (this.useAutoSavepointsForEmulator) { + this.autoSavepoint = createAutoSavepoint(); + } + return result; + } catch (final AbortedException aborted) { + handleAborted(aborted); + } catch (SpannerException e) { + throw e; + } catch (Exception e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } finally { + abortedLock.unlock(); + } + } + } + + private void maybeUpdateActiveTransaction() { + if (this.useAutoSavepointsForEmulator) { + if (CURRENT_ACTIVE_TRANSACTION.get() != null && CURRENT_ACTIVE_TRANSACTION.get() != this) { + ReadWriteTransaction activeTransaction = CURRENT_ACTIVE_TRANSACTION.get(); + if (activeTransaction.isActive() && activeTransaction.autoSavepoint != null) { + activeTransaction.rollbackToSavepoint(activeTransaction.autoSavepoint); + activeTransaction.autoSavepoint = null; + } + CURRENT_ACTIVE_TRANSACTION.remove(); + } + CURRENT_ACTIVE_TRANSACTION.set(this); + } + } + + /** + * Registers a {@link ResultSet} on this transaction that must be checked during a retry, and + * returns a retryable {@link ResultSet}. + */ + private ResultSet createAndAddRetryResultSet( + ProtobufResultSet resultSet, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + if (retryAbortsInternally) { + ChecksumResultSet checksumResultSet = + createChecksumResultSet(resultSet, statement, analyzeMode, options); + addRetryStatement(checksumResultSet); + return checksumResultSet; + } + return resultSet; + } + + /** Registers the statement as a query that should return an error during a retry. */ + private void createAndAddFailedQuery( + SpannerException e, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + if (retryAbortsInternally) { + addRetryStatement(new FailedQuery(this, e, statement, analyzeMode, options)); + } + } + + private void createAndAddRetriableUpdate( + ParsedStatement update, AnalyzeMode analyzeMode, long updateCount, UpdateOption... options) { + if (retryAbortsInternally) { + addRetryStatement(new RetriableUpdate(this, update, analyzeMode, updateCount, options)); + } + } + + private void createAndAddRetriableBatchUpdate( + Iterable updates, long[] updateCounts, UpdateOption... options) { + if (retryAbortsInternally) { + addRetryStatement(new RetriableBatchUpdate(this, updates, updateCounts, options)); + } + } + + /** Registers the statement as an update that should return an error during a retry. */ + private void createAndAddFailedUpdate(SpannerException e, ParsedStatement update) { + if (retryAbortsInternally) { + addRetryStatement(new FailedUpdate(this, e, update)); + } + } + + /** Registers the statements as a batch of updates that should return an error during a retry. */ + private void createAndAddFailedBatchUpdate(SpannerException e, Iterable updates) { + if (retryAbortsInternally) { + addRetryStatement(new FailedBatchUpdate(this, e, updates)); + } + } + + /** + * Adds a statement to the list of statements that should be retried if this transaction aborts. + */ + private void addRetryStatement(RetriableStatement statement) { + Preconditions.checkState( + retryAbortsInternally, "retryAbortsInternally is not enabled for this transaction"); + statements.add(statement); + } + + /** + * Handles an aborted exception by checking whether the transaction may be retried internally, and + * if so, does the retry. If retry is not allowed, or if the retry fails, the method will throw an + * {@link AbortedException}. + */ + private void handleAborted(AbortedException aborted) { + if (transactionRetryAttempts >= maxInternalRetries) { + // If the same statement in transaction keeps aborting, then we need to abort here. + span.addEvent("Internal retry attempts exceeded"); + throwAbortWithRetryAttemptsExceeded(); + } else if (retryAbortsInternally) { + logger.fine(toString() + ": Starting internal transaction retry"); + while (true) { + // First back off and then restart the transaction. + long delay = aborted.getRetryDelayInMillis(); + span.addEvent( + "Transaction aborted. Backing off for " + delay + " milliseconds and retrying."); + span.setAttribute(TRANSACTION_RETRIED, true); + try { + if (delay > 0L) { + //noinspection BusyWait + Thread.sleep(delay); + } else if (aborted.isEmulatorOnlySupportsOneTransactionException()) { + //noinspection BusyWait + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 5)); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.CANCELLED, "The statement was cancelled"); + } + try { + if (aborted.getCause() instanceof RollbackToSavepointException) { + if (txManager != null) { + txManager.close(); + } + txManager = dbClient.transactionManager(transactionOptions); + txContextFuture = ApiFutures.immediateFuture(txManager.begin()); + } else { + txContextFuture = ApiFutures.immediateFuture(txManager.resetForRetry()); + } + // Inform listeners about the transaction retry that is about to start. + invokeTransactionRetryListenersOnStart(); + // Then retry all transaction statements. + transactionRetryAttempts++; + for (RetriableStatement statement : statements) { + statement.retry(aborted); + } + successfulRetries++; + invokeTransactionRetryListenersOnFinish(RetryResult.RETRY_SUCCESSFUL); + logger.fine( + toString() + + ": Internal transaction retry succeeded. Starting retry of original" + + " statement."); + // Retry succeeded, return and continue the original transaction. + break; + } catch (AbortedDueToConcurrentModificationException e) { + // Retry failed because of a concurrent modification, we have to abort. + invokeTransactionRetryListenersOnFinish( + RetryResult.RETRY_ABORTED_DUE_TO_CONCURRENT_MODIFICATION); + logger.fine( + toString() + ": Internal transaction retry aborted due to a concurrent modification"); + // Do a shoot and forget rollback. + try { + txManager.rollback(); + } catch (Throwable t) { + // ignore + } + this.state = UnitOfWorkState.ABORTED; + this.abortedException = e; + throw e; + } catch (AbortedException abortedExceptionDuringRetry) { + // Retry aborted, do another retry of the transaction. + if (transactionRetryAttempts >= maxInternalRetries) { + throwAbortWithRetryAttemptsExceeded(); + } + invokeTransactionRetryListenersOnFinish(RetryResult.RETRY_ABORTED_AND_RESTARTING); + logger.fine(toString() + ": Internal transaction retry aborted, trying again"); + // Use the new aborted exception to determine both the backoff delay and how to handle + // the retry. + aborted = abortedExceptionDuringRetry; + } catch (SpannerException e) { + // unexpected exception + logger.log( + Level.FINE, + toString() + ": Internal transaction retry failed due to an unexpected exception", + e); + // Do a shoot and forget rollback. + try { + txManager.rollback(); + } catch (Throwable t) { + // ignore + } + // Set transaction state to aborted as the retry failed. + this.state = UnitOfWorkState.ABORTED; + this.abortedException = aborted; + // Re-throw underlying exception. + throw e; + } + } + } else { + try { + txManager.close(); + } catch (Throwable t) { + // ignore + } + // Internal retry is not enabled. + this.state = UnitOfWorkState.ABORTED; + this.abortedException = aborted; + throw aborted; + } + } + + private void throwAbortWithRetryAttemptsExceeded() throws SpannerException { + invokeTransactionRetryListenersOnFinish(RetryResult.RETRY_ABORTED_AND_MAX_ATTEMPTS_EXCEEDED); + logger.fine( + toString() + + ": Internal transaction retry aborted and max number of retry attempts has been" + + " exceeded"); + // Try to rollback the transaction and ignore any exceptions. + // Normally it should not be necessary to do this, but in order to be sure we never leak + // any sessions it is better to do so. + try { + txManager.rollback(); + } catch (Throwable t) { + // ignore + } + this.state = UnitOfWorkState.ABORTED; + this.abortedException = + (AbortedException) + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, MAX_INTERNAL_RETRIES_EXCEEDED); + throw this.abortedException; + } + + private void invokeTransactionRetryListenersOnStart() { + for (TransactionRetryListener listener : transactionRetryListeners) { + listener.retryStarting(transactionStarted, transactionId, transactionRetryAttempts); + } + } + + private void invokeTransactionRetryListenersOnFinish(RetryResult result) { + for (TransactionRetryListener listener : transactionRetryListeners) { + listener.retryFinished(transactionStarted, transactionId, transactionRetryAttempts, result); + } + } + + private final Callable rollbackCallable = + new Callable() { + @Override + public Void call() { + try { + if (state != UnitOfWorkState.ABORTED && rolledBackToSavepointException == null) { + // Make sure the transaction has actually started before we try to rollback. + get(txContextFuture); + txManager.rollback(); + } + return null; + } finally { + txManager.close(); + } + } + }; + + @Override + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + callback.onSuccess(); + return rollbackAsync(callType, true); + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } + } + + private ApiFuture rollbackAsync(CallType callType, boolean updateStatusAndEndSpan) { + ConnectionPreconditions.checkState( + state == UnitOfWorkState.STARTED || state == UnitOfWorkState.ABORTED, + "This transaction has status " + state.name()); + cancelScheduledKeepAlivePing(); + if (updateStatusAndEndSpan) { + state = UnitOfWorkState.ROLLED_BACK; + } + if (txContextFuture != null && state != UnitOfWorkState.ABORTED) { + ApiFuture result = + executeStatementAsync( + callType, ROLLBACK_STATEMENT, rollbackCallable, SpannerGrpc.getRollbackMethod()); + if (updateStatusAndEndSpan) { + // Note: We end the transaction span after executing the rollback to include the rollback in + // the transaction span. Even though both methods are executed asynchronously, they are both + // executed using the same single-threaded executor, meaning that the span will only be + // ended after the rollback has finished. + asyncEndUnitOfWorkSpan(); + } + return result; + } else if (updateStatusAndEndSpan) { + return asyncEndUnitOfWorkSpan(); + } else { + return ApiFutures.immediateFuture(null); + } + } + + @Override + public void resetForRetry() { + txContextFuture = ApiFutures.immediateFuture(txManager.resetForRetry()); + } + + @Override + String getUnitOfWorkName() { + return "read/write transaction"; + } + + @Nullable + @Override + Deadline getTransactionDeadline() { + return this.deadline; + } + + static class ReadWriteSavepoint extends Savepoint { + private final int statementPosition; + private final int mutationPosition; + + ReadWriteSavepoint(String name, int statementPosition, int mutationPosition) { + this(name, statementPosition, mutationPosition, false); + } + + ReadWriteSavepoint( + String name, int statementPosition, int mutationPosition, boolean autoSavepoint) { + super(name, autoSavepoint); + this.statementPosition = statementPosition; + this.mutationPosition = mutationPosition; + } + + @Override + int getStatementPosition() { + return this.statementPosition; + } + + @Override + int getMutationPosition() { + return this.mutationPosition; + } + } + + @Override + Savepoint savepoint(String name) { + return new ReadWriteSavepoint(name, statements.size(), mutations.size()); + } + + private Savepoint createAutoSavepoint() { + return new ReadWriteSavepoint(AUTO_SAVEPOINT_NAME, statements.size(), mutations.size(), true); + } + + @Override + void rollbackToSavepoint(Savepoint savepoint) { + try (Scope ignore = span.makeCurrent()) { + get(rollbackAsync(CallType.SYNC, false)); + // Mark the state of the transaction as rolled back to a savepoint. This will ensure that the + // transaction will retry the next time a statement is actually executed. + this.rolledBackToSavepointException = + (AbortedException) + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "Transaction has been rolled back to a savepoint", + new RollbackToSavepointException(savepoint)); + // Clear all statements and mutations after the savepoint. + this.statements.subList(savepoint.getStatementPosition(), this.statements.size()).clear(); + this.mutations.subList(savepoint.getMutationPosition(), this.mutations.size()).clear(); + } + } + + /** + * A retriable statement is a query or DML statement during a read/write transaction that can be + * retried if the original transaction aborted. + */ + interface RetriableStatement { + /** + * Retry this statement in a new transaction. Throws an {@link + * AbortedDueToConcurrentModificationException} if the retry could not successfully be executed + * because of an actual concurrent modification of the underlying data. This {@link + * AbortedDueToConcurrentModificationException} cannot be retried. + */ + void retry(AbortedException aborted) throws AbortedException; + } + + /** Creates a {@link ChecksumResultSet} for this {@link ReadWriteTransaction}. */ + @VisibleForTesting + ChecksumResultSet createChecksumResultSet( + ProtobufResultSet delegate, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options) { + return new ChecksumResultSet(this, delegate, statement, analyzeMode, options); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSet.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSet.java new file mode 100644 index 000000000000..8a73318c8801 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSet.java @@ -0,0 +1,605 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Interval; +import com.google.cloud.spanner.ProtobufResultSet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.common.base.Preconditions; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import java.math.BigDecimal; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; + +/** + * Forwarding implementation of {@link ResultSet} that forwards all calls to a delegate that can be + * replaced. This is used by the JDBC Driver when a read/write transaction is successfully retried. + * Any {@link ResultSet} that is open during a transaction retry, must be replaced by a result set + * that is fetched using the new transaction. This is achieved by wrapping the returned result sets + * in a {@link ReplaceableForwardingResultSet} that replaces its delegate after a transaction retry. + */ +class ReplaceableForwardingResultSet implements ProtobufResultSet { + private ResultSet delegate; + private boolean closed; + + ReplaceableForwardingResultSet(ResultSet delegate) { + this.delegate = Preconditions.checkNotNull(delegate); + } + + /** Replace the underlying delegate {@link ResultSet} with a new one. */ + void replaceDelegate(ResultSet delegate) { + Preconditions.checkNotNull(delegate); + checkClosed(); + if (this.delegate != null) { + this.delegate.close(); + } + this.delegate = delegate; + } + + protected ResultSet getDelegate() { + return this.delegate; + } + + private void checkClosed() { + if (closed) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "This ResultSet is closed"); + } + } + + boolean isClosed() { + return closed; + } + + @Override + public boolean next() throws SpannerException { + checkClosed(); + return delegate.next(); + } + + @Override + public boolean canGetProtobufValue(int columnIndex) { + return !closed + && delegate instanceof ProtobufResultSet + && ((ProtobufResultSet) delegate).canGetProtobufValue(columnIndex); + } + + @Override + public com.google.protobuf.Value getProtobufValue(int columnIndex) { + checkClosed(); + Preconditions.checkState( + delegate instanceof ProtobufResultSet, "The result set does not support protobuf values"); + return ((ProtobufResultSet) getDelegate()).getProtobufValue(columnIndex); + } + + @Override + public Struct getCurrentRowAsStruct() { + checkClosed(); + return delegate.getCurrentRowAsStruct(); + } + + @Override + public void close() { + if (delegate != null) { + delegate.close(); + delegate = null; + } + closed = true; + } + + @Override + public ResultSetStats getStats() { + checkClosed(); + return delegate.getStats(); + } + + @Override + public ResultSetMetadata getMetadata() { + return delegate.getMetadata(); + } + + @Override + public Type getType() { + checkClosed(); + return delegate.getType(); + } + + @Override + public int getColumnCount() { + checkClosed(); + return delegate.getColumnCount(); + } + + @Override + public int getColumnIndex(String columnName) { + checkClosed(); + return delegate.getColumnIndex(columnName); + } + + @Override + public Type getColumnType(int columnIndex) { + checkClosed(); + return delegate.getColumnType(columnIndex); + } + + @Override + public Type getColumnType(String columnName) { + checkClosed(); + return delegate.getColumnType(columnName); + } + + @Override + public boolean isNull(int columnIndex) { + checkClosed(); + return delegate.isNull(columnIndex); + } + + @Override + public boolean isNull(String columnName) { + checkClosed(); + return delegate.isNull(columnName); + } + + @Override + public boolean getBoolean(int columnIndex) { + checkClosed(); + return delegate.getBoolean(columnIndex); + } + + @Override + public boolean getBoolean(String columnName) { + checkClosed(); + return delegate.getBoolean(columnName); + } + + @Override + public long getLong(int columnIndex) { + checkClosed(); + return delegate.getLong(columnIndex); + } + + @Override + public long getLong(String columnName) { + checkClosed(); + return delegate.getLong(columnName); + } + + @Override + public float getFloat(int columnIndex) { + checkClosed(); + return delegate.getFloat(columnIndex); + } + + @Override + public float getFloat(String columnName) { + checkClosed(); + return delegate.getFloat(columnName); + } + + @Override + public double getDouble(int columnIndex) { + checkClosed(); + return delegate.getDouble(columnIndex); + } + + @Override + public double getDouble(String columnName) { + checkClosed(); + return delegate.getDouble(columnName); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) { + checkClosed(); + return delegate.getBigDecimal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnName) { + checkClosed(); + return delegate.getBigDecimal(columnName); + } + + @Override + public String getString(int columnIndex) { + checkClosed(); + return delegate.getString(columnIndex); + } + + @Override + public String getString(String columnName) { + checkClosed(); + return delegate.getString(columnName); + } + + @Override + public String getJson(int columnIndex) { + checkClosed(); + return delegate.getJson(columnIndex); + } + + @Override + public String getJson(String columnName) { + checkClosed(); + return delegate.getJson(columnName); + } + + @Override + public String getPgJsonb(int columnIndex) { + checkClosed(); + return delegate.getPgJsonb(columnIndex); + } + + @Override + public String getPgJsonb(String columnName) { + checkClosed(); + return delegate.getPgJsonb(columnName); + } + + @Override + public ByteArray getBytes(int columnIndex) { + checkClosed(); + return delegate.getBytes(columnIndex); + } + + @Override + public ByteArray getBytes(String columnName) { + checkClosed(); + return delegate.getBytes(columnName); + } + + @Override + public Timestamp getTimestamp(int columnIndex) { + checkClosed(); + return delegate.getTimestamp(columnIndex); + } + + @Override + public Timestamp getTimestamp(String columnName) { + checkClosed(); + return delegate.getTimestamp(columnName); + } + + @Override + public Date getDate(int columnIndex) { + checkClosed(); + return delegate.getDate(columnIndex); + } + + @Override + public UUID getUuid(int columnIndex) { + checkClosed(); + return delegate.getUuid(columnIndex); + } + + @Override + public Date getDate(String columnName) { + checkClosed(); + return delegate.getDate(columnName); + } + + @Override + public UUID getUuid(String columnName) { + checkClosed(); + return delegate.getUuid(columnName); + } + + @Override + public Interval getInterval(int columnIndex) { + checkClosed(); + return delegate.getInterval(columnIndex); + } + + @Override + public Interval getInterval(String columnName) { + checkClosed(); + return delegate.getInterval(columnName); + } + + @Override + public Value getValue(int columnIndex) { + checkClosed(); + return delegate.getValue(columnIndex); + } + + @Override + public Value getValue(String columnName) { + checkClosed(); + return delegate.getValue(columnName); + } + + @Override + public boolean[] getBooleanArray(int columnIndex) { + checkClosed(); + return delegate.getBooleanArray(columnIndex); + } + + @Override + public boolean[] getBooleanArray(String columnName) { + checkClosed(); + return delegate.getBooleanArray(columnName); + } + + @Override + public List getBooleanList(int columnIndex) { + checkClosed(); + return delegate.getBooleanList(columnIndex); + } + + @Override + public List getBooleanList(String columnName) { + checkClosed(); + return delegate.getBooleanList(columnName); + } + + @Override + public long[] getLongArray(int columnIndex) { + checkClosed(); + return delegate.getLongArray(columnIndex); + } + + @Override + public long[] getLongArray(String columnName) { + checkClosed(); + return delegate.getLongArray(columnName); + } + + @Override + public List getLongList(int columnIndex) { + checkClosed(); + return delegate.getLongList(columnIndex); + } + + @Override + public List getLongList(String columnName) { + checkClosed(); + return delegate.getLongList(columnName); + } + + @Override + public float[] getFloatArray(int columnIndex) { + checkClosed(); + return delegate.getFloatArray(columnIndex); + } + + @Override + public float[] getFloatArray(String columnName) { + checkClosed(); + return delegate.getFloatArray(columnName); + } + + @Override + public List getFloatList(int columnIndex) { + checkClosed(); + return delegate.getFloatList(columnIndex); + } + + @Override + public List getFloatList(String columnName) { + checkClosed(); + return delegate.getFloatList(columnName); + } + + @Override + public double[] getDoubleArray(int columnIndex) { + checkClosed(); + return delegate.getDoubleArray(columnIndex); + } + + @Override + public double[] getDoubleArray(String columnName) { + checkClosed(); + return delegate.getDoubleArray(columnName); + } + + @Override + public List getDoubleList(int columnIndex) { + checkClosed(); + return delegate.getDoubleList(columnIndex); + } + + @Override + public List getDoubleList(String columnName) { + checkClosed(); + return delegate.getDoubleList(columnName); + } + + @Override + public List getBigDecimalList(int columnIndex) { + checkClosed(); + return delegate.getBigDecimalList(columnIndex); + } + + @Override + public List getBigDecimalList(String columnName) { + checkClosed(); + return delegate.getBigDecimalList(columnName); + } + + @Override + public List getStringList(int columnIndex) { + checkClosed(); + return delegate.getStringList(columnIndex); + } + + @Override + public List getStringList(String columnName) { + checkClosed(); + return delegate.getStringList(columnName); + } + + @Override + public List getJsonList(int columnIndex) { + checkClosed(); + return delegate.getJsonList(columnIndex); + } + + @Override + public List getJsonList(String columnName) { + checkClosed(); + return delegate.getJsonList(columnName); + } + + @Override + public List getPgJsonbList(int columnIndex) { + checkClosed(); + return delegate.getPgJsonbList(columnIndex); + } + + @Override + public List getPgJsonbList(String columnName) { + checkClosed(); + return delegate.getPgJsonbList(columnName); + } + + @Override + public List getBytesList(int columnIndex) { + checkClosed(); + return delegate.getBytesList(columnIndex); + } + + @Override + public List getBytesList(String columnName) { + checkClosed(); + return delegate.getBytesList(columnName); + } + + @Override + public List getTimestampList(int columnIndex) { + checkClosed(); + return delegate.getTimestampList(columnIndex); + } + + @Override + public List getTimestampList(String columnName) { + checkClosed(); + return delegate.getTimestampList(columnName); + } + + @Override + public List getDateList(int columnIndex) { + checkClosed(); + return delegate.getDateList(columnIndex); + } + + @Override + public List getDateList(String columnName) { + checkClosed(); + return delegate.getDateList(columnName); + } + + @Override + public List getUuidList(int columnIndex) { + checkClosed(); + return delegate.getUuidList(columnIndex); + } + + @Override + public List getUuidList(String columnName) { + checkClosed(); + return delegate.getUuidList(columnName); + } + + @Override + public List getIntervalList(int columnIndex) { + checkClosed(); + return delegate.getIntervalList(columnIndex); + } + + @Override + public List getIntervalList(String columnName) { + checkClosed(); + return delegate.getIntervalList(columnName); + } + + @Override + public List getProtoMessageList(int columnIndex, T message) { + checkClosed(); + return delegate.getProtoMessageList(columnIndex, message); + } + + @Override + public List getProtoMessageList(String columnName, T message) { + checkClosed(); + return delegate.getProtoMessageList(columnName, message); + } + + @Override + public List getProtoEnumList( + int columnIndex, Function method) { + checkClosed(); + return delegate.getProtoEnumList(columnIndex, method); + } + + @Override + public List getProtoEnumList( + String columnName, Function method) { + checkClosed(); + return delegate.getProtoEnumList(columnName, method); + } + + @Override + public List getStructList(int columnIndex) { + checkClosed(); + return delegate.getStructList(columnIndex); + } + + @Override + public List getStructList(String columnName) { + checkClosed(); + return delegate.getStructList(columnName); + } + + @Override + public T getProtoMessage(int columnIndex, T message) { + checkClosed(); + return delegate.getProtoMessage(columnIndex, message); + } + + @Override + public T getProtoMessage(String columnName, T message) { + checkClosed(); + return delegate.getProtoMessage(columnName, message); + } + + @Override + public T getProtoEnum( + int columnIndex, Function method) { + checkClosed(); + return delegate.getProtoEnum(columnIndex, method); + } + + @Override + public T getProtoEnum( + String columnName, Function method) { + checkClosed(); + return delegate.getProtoEnum(columnName, method); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableBatchUpdate.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableBatchUpdate.java new file mode 100644 index 000000000000..364346fd1d44 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableBatchUpdate.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.base.Preconditions; +import java.util.Arrays; + +/** + * Retriable batch of DML statements. The check whether the statements had the same effect during + * retry is done by comparing the number of records affected. + */ +final class RetriableBatchUpdate implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final Iterable statements; + private final long[] updateCounts; + private final UpdateOption[] options; + + RetriableBatchUpdate( + ReadWriteTransaction transaction, + Iterable statements, + long[] updateCounts, + UpdateOption... options) { + Preconditions.checkNotNull(transaction); + Preconditions.checkNotNull(statements); + this.transaction = transaction; + this.statements = statements; + this.updateCounts = updateCounts; + this.options = options; + } + + @Override + public void retry(AbortedException aborted) throws AbortedException { + long[] newCount; + try { + transaction + .getStatementExecutor() + .invokeInterceptors( + RUN_BATCH_STATEMENT, StatementExecutionStep.RETRY_STATEMENT, transaction); + newCount = transaction.getTransactionContext().batchUpdate(statements, options); + } catch (AbortedException e) { + // Just re-throw the AbortedException and let the retry logic determine whether another try + // should be executed or not. + throw e; + } catch (SpannerException e) { + // Unexpected database error that is different from the original transaction. + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } + if (newCount == null || !Arrays.equals(updateCounts, newCount)) { + // The update counts do not match, we cannot retry the transaction. + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableUpdate.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableUpdate.java new file mode 100644 index 000000000000..c5488f1ce989 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/RetriableUpdate.java @@ -0,0 +1,81 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ReadWriteTransaction.RetriableStatement; +import com.google.common.base.Preconditions; + +/** + * Retriable DML statement. The check whether the statement had the same effect during retry is done + * by comparing the number of records affected. + */ +final class RetriableUpdate implements RetriableStatement { + private final ReadWriteTransaction transaction; + private final ParsedStatement statement; + private final AnalyzeMode analyzeMode; + private final long updateCount; + private final UpdateOption[] options; + + RetriableUpdate( + ReadWriteTransaction transaction, + ParsedStatement statement, + AnalyzeMode analyzeMode, + long updateCount, + UpdateOption... options) { + this.transaction = Preconditions.checkNotNull(transaction); + this.statement = Preconditions.checkNotNull(statement); + this.analyzeMode = Preconditions.checkNotNull(analyzeMode); + this.updateCount = updateCount; + this.options = options; + } + + @Override + public void retry(AbortedException aborted) throws AbortedException { + long newCount = -1; + try { + transaction + .getStatementExecutor() + .invokeInterceptors(statement, StatementExecutionStep.RETRY_STATEMENT, transaction); + if (analyzeMode == AnalyzeMode.NONE) { + newCount = + transaction.getTransactionContext().executeUpdate(statement.getStatement(), options); + } else { + newCount = + transaction + .getTransactionContext() + .analyzeUpdate(statement.getStatement(), analyzeMode.getQueryAnalyzeMode()) + .getRowCountExact(); + } + } catch (AbortedException e) { + // Just re-throw the AbortedException and let the retry logic determine whether another try + // should be executed or not. + throw e; + } catch (SpannerException e) { + // Unexpected database error that is different from the original transaction. + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted, e); + } + if (newCount != updateCount) { + // The update counts do not match, we cannot retry the transaction. + throw SpannerExceptionFactory.newAbortedDueToConcurrentModificationException(aborted); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SavepointSupport.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SavepointSupport.java new file mode 100644 index 000000000000..37882e488303 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SavepointSupport.java @@ -0,0 +1,49 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** Option value used for determining the behavior of savepoints. */ +public enum SavepointSupport { + /** + * Savepoints are enabled and can be used on the connection. Rolling back to a savepoint will + * trigger a retry of the transaction up to the point where the savepoint was set. + */ + ENABLED, + /** + * Savepoints are enabled and can be used on the connection. Rolling back to a savepoint will not + * trigger a retry. Further attempts to use a read/write transaction after a rollback to savepoint + * will fail. This mode can be used for frameworks that require savepoint support, for example if + * the framework automatically creates a savepoint for a specific feature, but that do not need to + * support rolling back to a savepoint. This value should also be used for transactions that + * return non-deterministic data, for example auto-generated primary key values, as retries would + * always fail. This option will not affect rolling back to a savepoint in a read-only + * transaction, as those transactions do not require a retry after rolling back to a savepoint. + */ + FAIL_AFTER_ROLLBACK, + /** Savepoints are disabled. Any attempt to create a savepoint will fail. */ + DISABLED { + @Override + public boolean isSavepointCreationAllowed() { + return false; + } + }; + + /** Returns true if this mode allows the creation of savepoints. */ + public boolean isSavepointCreationAllowed() { + return true; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SimpleParser.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SimpleParser.java new file mode 100644 index 000000000000..bfcb48f99a56 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SimpleParser.java @@ -0,0 +1,410 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractStatementParser.ASTERISK; +import static com.google.cloud.spanner.connection.AbstractStatementParser.DASH; +import static com.google.cloud.spanner.connection.AbstractStatementParser.HYPHEN; +import static com.google.cloud.spanner.connection.AbstractStatementParser.SLASH; + +import com.google.cloud.spanner.Dialect; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import java.util.Objects; + +/** A very simple token-based parser for extracting relevant information from SQL strings. */ +class SimpleParser { + /** + * An immutable result from a parse action indicating whether the parse action was successful, and + * if so, what the value was. + */ + static class Result { + static final Result NOT_FOUND = new Result(null, false); + + static Result found(String value) { + return new Result(Preconditions.checkNotNull(value), false); + } + + static Result found(String value, boolean inParenthesis) { + return new Result(Preconditions.checkNotNull(value), inParenthesis); + } + + private final String value; + + private final boolean inParenthesis; + + private Result(String value, boolean inParenthesis) { + this.value = value; + this.inParenthesis = inParenthesis; + } + + @Override + public int hashCode() { + return Objects.hashCode(this.value); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Result)) { + return false; + } + return Objects.equals(this.value, ((Result) o).value) + && Objects.equals(this.inParenthesis, ((Result) o).inParenthesis); + } + + @Override + public String toString() { + if (isValid()) { + return this.value; + } + return "NOT FOUND"; + } + + boolean isValid() { + return this.value != null; + } + + String getValue() { + return this.value; + } + + boolean isInParenthesis() { + return this.inParenthesis; + } + } + + // TODO: Replace this with a direct reference to the dialect, and move the isXYZSupported methods + // from the AbstractStatementParser class to the Dialect class. + private final AbstractStatementParser statementParser; + + private final String sql; + + // TODO: Use this length field instead of repeatedly calling sql.length() + private final int length; + + private final boolean treatHintCommentsAsTokens; + + private int pos; + + /** Constructs a simple parser for the given SQL string and dialect. */ + SimpleParser(Dialect dialect, String sql) { + this(dialect, sql, 0, /* treatHintCommentsAsTokens= */ false); + } + + /** + * Constructs a simple parser for the given SQL string and dialect. + * treatHintCommentsAsTokens indicates whether comments that start with '/*@' should be + * treated as tokens or not. This option may only be enabled if the dialect is PostgreSQL. + */ + SimpleParser(Dialect dialect, String sql, int pos, boolean treatHintCommentsAsTokens) { + Preconditions.checkArgument( + !(treatHintCommentsAsTokens && dialect != Dialect.POSTGRESQL), + "treatHintCommentsAsTokens can only be enabled for PostgreSQL"); + this.sql = sql; + this.length = sql.length(); + this.pos = pos; + this.statementParser = AbstractStatementParser.getInstance(dialect); + this.treatHintCommentsAsTokens = treatHintCommentsAsTokens; + } + + Dialect getDialect() { + return this.statementParser.getDialect(); + } + + String getSql() { + return this.sql; + } + + int getPos() { + return this.pos; + } + + void skipHint() { + // We don't need to do anything special for PostgreSQL, as hints in PostgreSQL are inside + // comments and comments are automatically skipped by all methods. + if (getDialect() == Dialect.GOOGLE_STANDARD_SQL && eatTokens('@', '{')) { + while (pos < length && !eatToken('}')) { + pos = statementParser.skip(sql, pos, /* result= */ null); + } + } + } + + Result eatNextKeyword() { + skipHint(); + boolean inParenthesis = false; + while (pos < length && eatToken('(')) { + inParenthesis = true; + } + return eatKeyword(inParenthesis); + } + + /** Returns true if this parser has more tokens. Advances the position to the first next token. */ + boolean hasMoreTokens() { + skipWhitespaces(); + return pos < sql.length(); + } + + /** Eats and returns the keyword at the current position. */ + Result eatKeyword() { + return eatKeyword(false); + } + + /** + * Eats and returns the keyword at the current position and returns a result that indicates that + * the keyword is inside one or more parentheses. + */ + Result eatKeyword(boolean inParenthesis) { + if (!hasMoreTokens()) { + return Result.NOT_FOUND; + } + if (!Character.isLetter(sql.charAt(pos))) { + return Result.NOT_FOUND; + } + int startPos = pos; + while (pos < length && Character.isLetter(sql.charAt(pos))) { + pos++; + } + return Result.found(sql.substring(startPos, pos), inParenthesis); + } + + /** + * Eats and returns the identifier at the current position. This implementation does not support + * quoted identifiers. + */ + Result eatIdentifier() { + // TODO: Implement support for quoted identifiers. + // TODO: Implement support for identifiers with multiple parts (e.g. my_schema.my_table). + if (!hasMoreTokens()) { + return Result.NOT_FOUND; + } + if (!isValidIdentifierFirstChar(sql.charAt(pos))) { + return Result.NOT_FOUND; + } + int startPos = pos; + while (pos < sql.length() && isValidIdentifierChar(sql.charAt(pos))) { + pos++; + } + return Result.found(sql.substring(startPos, pos)); + } + + /** + * Eats a single-quoted string. This implementation currently does not support escape sequences. + */ + Result eatSingleQuotedString() { + if (!eatToken('\'')) { + return Result.NOT_FOUND; + } + int startPos = pos; + while (pos < sql.length() && sql.charAt(pos) != '\'') { + if (sql.charAt(pos) == '\n') { + return Result.NOT_FOUND; + } + pos++; + } + if (pos == sql.length()) { + return Result.NOT_FOUND; + } + return Result.found(sql.substring(startPos, pos++)); + } + + boolean peekTokens(char... tokens) { + return internalEatTokens(/* updatePos= */ false, tokens); + } + + /** + * Returns true if the next tokens in the SQL string are equal to the given tokens, and advances + * the position of the parser to after the tokens. The position is not changed if the next tokens + * are not equal to the list of tokens. + */ + boolean eatTokens(char... tokens) { + return internalEatTokens(/* updatePos= */ true, tokens); + } + + /** + * Returns true if the next tokens in the SQL string are equal to the given tokens, and advances + * the position of the parser to after the tokens if updatePos is true. The position is not + * changed if the next tokens are not equal to the list of tokens, or if updatePos is false. + */ + private boolean internalEatTokens(boolean updatePos, char... tokens) { + int currentPos = pos; + for (char token : tokens) { + if (!eatToken(token)) { + pos = currentPos; + return false; + } + } + if (!updatePos) { + pos = currentPos; + } + return true; + } + + /** + * Returns true if the next token is equal to the given character, but does not advance the + * position of the parser. + */ + boolean peekToken(char token) { + int currentPos = pos; + boolean res = eatToken(token); + pos = currentPos; + return res; + } + + /** + * Returns true and advances the position of the parser if the next token is equal to the given + * character. + */ + boolean eatToken(char token) { + skipWhitespaces(); + if (pos < sql.length() && sql.charAt(pos) == token) { + pos++; + return true; + } + return false; + } + + boolean eatKeyword(String... keywords) { + return eat(true, true, keywords); + } + + boolean eat(boolean skipWhitespaceBefore, boolean requireWhitespaceAfter, String... keywords) { + boolean result = true; + for (String keyword : keywords) { + result &= internalEat(keyword, skipWhitespaceBefore, requireWhitespaceAfter, true); + } + return result; + } + + private boolean internalEat( + String keyword, + boolean skipWhitespaceBefore, + boolean requireWhitespaceAfter, + boolean updatePos) { + int originalPos = pos; + if (skipWhitespaceBefore) { + skipWhitespaces(); + } + if (pos + keyword.length() > sql.length()) { + if (!updatePos) { + pos = originalPos; + } + return false; + } + if (sql.substring(pos, pos + keyword.length()).equalsIgnoreCase(keyword) + && (!requireWhitespaceAfter || isValidEndOfKeyword(pos + keyword.length()))) { + if (updatePos) { + pos = pos + keyword.length(); + } else { + pos = originalPos; + } + return true; + } + if (!updatePos) { + pos = originalPos; + } + return false; + } + + private boolean isValidEndOfKeyword(int index) { + if (sql.length() == index) { + return true; + } + return !isValidIdentifierChar(sql.charAt(index)); + } + + /** + * Returns true if the given character is valid as the first character of an identifier. That + * means that it can be used as the first character of an unquoted identifier. + */ + static boolean isValidIdentifierFirstChar(char c) { + return Character.isLetter(c) || c == '_'; + } + + /** + * Returns true if the given character is a valid identifier character. That means that it can be + * used in an unquoted identifiers. + */ + static boolean isValidIdentifierChar(char c) { + return isValidIdentifierFirstChar(c) || Character.isDigit(c) || c == '$'; + } + + /** + * Skips all whitespaces, including comments, from the current position and advances the parser to + * the next actual token. + */ + @VisibleForTesting + void skipWhitespaces() { + while (pos < sql.length()) { + if (sql.charAt(pos) == HYPHEN && sql.length() > (pos + 1) && sql.charAt(pos + 1) == HYPHEN) { + skipSingleLineComment(/* prefixLength= */ 2); + } else if (statementParser.supportsHashSingleLineComments() && sql.charAt(pos) == DASH) { + skipSingleLineComment(/* prefixLength= */ 1); + } else if (sql.charAt(pos) == SLASH + && sql.length() > (pos + 1) + && sql.charAt(pos + 1) == ASTERISK) { + if (treatHintCommentsAsTokens && sql.length() > (pos + 2) && sql.charAt(pos + 2) == '@') { + break; + } + skipMultiLineComment(); + } else if (Character.isWhitespace(sql.charAt(pos))) { + pos++; + } else { + break; + } + } + } + + /** + * Skips through a single-line comment from the current position. The single-line comment is + * started by a prefix with the given length (e.g. either '#' or '--'). + */ + @VisibleForTesting + boolean skipSingleLineComment(int prefixLength) { + int endIndex = sql.indexOf('\n', pos + prefixLength); + if (endIndex == -1) { + pos = sql.length(); + return true; + } + pos = endIndex + 1; + return true; + } + + /** Skips through a multi-line comment from the current position. */ + @VisibleForTesting + boolean skipMultiLineComment() { + int level = 1; + pos += 2; + while (pos < sql.length()) { + if (statementParser.supportsNestedComments() + && sql.charAt(pos) == SLASH + && sql.length() > (pos + 1) + && sql.charAt(pos + 1) == ASTERISK) { + level++; + } + if (sql.charAt(pos) == ASTERISK && sql.length() > (pos + 1) && sql.charAt(pos + 1) == SLASH) { + level--; + if (level == 0) { + pos += 2; + return true; + } + } + pos++; + } + pos = sql.length(); + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java new file mode 100644 index 000000000000..cfb13cef966b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java @@ -0,0 +1,844 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractStatementParser.COMMIT_STATEMENT; +import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_SEQUENCE_KIND; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_LOCK_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_ONLY_STALENESS; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.DdlClient.isCreateDatabaseStatement; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.Tuple; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.QueryUpdateOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerApiFutures; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.admin.database.v1.DatabaseAdminGrpc; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.opentelemetry.context.Scope; +import java.util.Arrays; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicReference; +import javax.annotation.Nonnull; + +/** + * Transaction that is used when a {@link Connection} is in autocommit mode. Each method on this + * transaction actually starts a new transaction on Spanner. The type of transaction that is started + * depends on the type of statement that is being executed. A {@link SingleUseTransaction} will + * always try to choose the most efficient type of one-time transaction that is available for the + * statement. + * + *

A {@link SingleUseTransaction} can be used to execute any type of statement on Cloud Spanner: + * + *

    + *
  • Client side statements, e.g. SHOW VARIABLE AUTOCOMMIT + *
  • Queries, e.g. SELECT * FROM FOO + *
  • DML statements, e.g. UPDATE FOO SET BAR=1 + *
  • DDL statements, e.g. CREATE TABLE FOO (...) + *
+ */ +class SingleUseTransaction extends AbstractBaseUnitOfWork { + private final DdlClient ddlClient; + private final DatabaseClient dbClient; + private final BatchClient batchClient; + private final ConnectionState connectionState; + private final boolean internalMetadataQuery; + private final byte[] protoDescriptors; + private volatile SettableApiFuture readTimestamp = null; + private volatile TransactionRunner writeTransaction; + private boolean used = false; + private volatile UnitOfWorkState state = UnitOfWorkState.STARTED; + + static class Builder extends AbstractBaseUnitOfWork.Builder { + private DdlClient ddlClient; + private DatabaseClient dbClient; + private BatchClient batchClient; + private ConnectionState connectionState; + private boolean internalMetadataQuery; + private byte[] protoDescriptors; + + private Builder() {} + + Builder setDdlClient(DdlClient ddlClient) { + Preconditions.checkNotNull(ddlClient); + this.ddlClient = ddlClient; + return this; + } + + Builder setDatabaseClient(DatabaseClient client) { + Preconditions.checkNotNull(client); + this.dbClient = client; + return this; + } + + Builder setBatchClient(BatchClient batchClient) { + this.batchClient = Preconditions.checkNotNull(batchClient); + return this; + } + + Builder setConnectionState(ConnectionState connectionState) { + this.connectionState = connectionState; + return this; + } + + Builder setInternalMetadataQuery(boolean internalMetadataQuery) { + this.internalMetadataQuery = internalMetadataQuery; + return this; + } + + Builder setProtoDescriptors(byte[] protoDescriptors) { + this.protoDescriptors = protoDescriptors; + return this; + } + + @Override + SingleUseTransaction build() { + Preconditions.checkState(ddlClient != null, "No DDL client specified"); + Preconditions.checkState(dbClient != null, "No DatabaseClient client specified"); + Preconditions.checkState(batchClient != null, "No BatchClient client specified"); + return new SingleUseTransaction(this); + } + } + + static Builder newBuilder() { + return new Builder(); + } + + private SingleUseTransaction(Builder builder) { + super(builder); + this.ddlClient = builder.ddlClient; + this.dbClient = builder.dbClient; + this.batchClient = builder.batchClient; + this.internalMetadataQuery = builder.internalMetadataQuery; + this.protoDescriptors = builder.protoDescriptors; + this.connectionState = builder.connectionState; + } + + @Override + public boolean isSingleUse() { + return true; + } + + @Override + public Type getType() { + return Type.TRANSACTION; + } + + @Override + public UnitOfWorkState getState() { + return state; + } + + @Override + public boolean isActive() { + // Single-use transactions are never active as they can be used only once. + return false; + } + + @Override + public boolean isReadOnly() { + return connectionState.getValue(READONLY).getValue(); + } + + AutocommitDmlMode getAutocommitDmlMode() { + return connectionState.getValue(AUTOCOMMIT_DML_MODE).getValue(); + } + + @Override + public boolean supportsDirectedReads(ParsedStatement parsedStatement) { + return parsedStatement.isQuery(); + } + + private boolean isRetryDmlAsPartitionedDml() { + return getAutocommitDmlMode() + == AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC; + } + + private void checkAndMarkUsed() { + Preconditions.checkState(!used, "This single-use transaction has already been used"); + used = true; + } + + @Override + public ApiFuture executeQueryAsync( + final CallType callType, + final ParsedStatement statement, + final AnalyzeMode analyzeMode, + final QueryOption... options) { + Preconditions.checkNotNull(statement); + Preconditions.checkArgument( + statement.isQuery() + || (statement.isUpdate() + && (analyzeMode != AnalyzeMode.NONE || statement.hasReturningClause())), + "The statement must be a query, or the statement must be DML and AnalyzeMode must be PLAN" + + " or PROFILE"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + if (statement.isUpdate()) { + if (analyzeMode != AnalyzeMode.NONE) { + return analyzeTransactionalUpdateAsync(callType, statement, analyzeMode); + } + // DML with returning clause. + return executeDmlReturningAsync(callType, statement, options); + } + + // Do not use a read-only staleness for internal metadata queries. + final ReadOnlyTransaction currentTransaction = + internalMetadataQuery + ? dbClient.singleUseReadOnlyTransaction() + : dbClient.singleUseReadOnlyTransaction( + connectionState.getValue(READ_ONLY_STALENESS).getValue()); + Callable callable = + () -> { + try { + ResultSet rs; + if (analyzeMode == AnalyzeMode.NONE) { + rs = currentTransaction.executeQuery(statement.getStatement(), options); + } else { + rs = + currentTransaction.analyzeQuery( + statement.getStatement(), analyzeMode.getQueryAnalyzeMode()); + } + // Return a DirectExecuteResultSet, which will directly do a next() call in order to + // ensure that the query is actually sent to Spanner. + ResultSet directRs = DirectExecuteResultSet.ofResultSet(rs); + state = UnitOfWorkState.COMMITTED; + readTimestamp.set(currentTransaction.getReadTimestamp()); + return directRs; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + readTimestamp.set(null); + currentTransaction.close(); + throw t; + } + }; + readTimestamp = SettableApiFuture.create(); + return executeStatementAsync( + callType, statement, callable, SpannerGrpc.getExecuteStreamingSqlMethod()); + } + } + + private ApiFuture executeDmlReturningAsync( + CallType callType, final ParsedStatement update, QueryOption... options) { + Callable callable = + () -> { + try { + writeTransaction = createWriteTransaction(); + ResultSet resultSet = + writeTransaction.run( + transaction -> + DirectExecuteResultSet.ofResultSet( + transaction.executeQuery( + update.getStatement(), appendLastStatement(options)))); + state = UnitOfWorkState.COMMITTED; + return resultSet; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, + update, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + } + + @Override + public ApiFuture partitionQueryAsync( + CallType callType, + ParsedStatement query, + PartitionOptions partitionOptions, + QueryOption... options) { + try (Scope ignore = span.makeCurrent()) { + Callable callable = + () -> { + try (BatchReadOnlyTransaction transaction = + batchClient.batchReadOnlyTransaction( + connectionState.getValue(READ_ONLY_STALENESS).getValue())) { + ResultSet resultSet = partitionQuery(transaction, partitionOptions, query, options); + readTimestamp.set(transaction.getReadTimestamp()); + state = UnitOfWorkState.COMMITTED; + return resultSet; + } catch (Throwable throwable) { + state = UnitOfWorkState.COMMIT_FAILED; + readTimestamp.set(null); + throw throwable; + } + }; + readTimestamp = SettableApiFuture.create(); + return executeStatementAsync( + callType, + query, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + } + } + + @Override + public Timestamp getReadTimestamp() { + ConnectionPreconditions.checkState( + SpannerApiFutures.getOrNull(readTimestamp) != null, + "There is no read timestamp available for this transaction."); + return SpannerApiFutures.get(readTimestamp); + } + + @Override + public Timestamp getReadTimestampOrNull() { + return SpannerApiFutures.getOrNull(readTimestamp); + } + + private boolean hasCommitResponse() { + return state == UnitOfWorkState.COMMITTED && writeTransaction != null; + } + + @Override + public Timestamp getCommitTimestamp() { + ConnectionPreconditions.checkState( + hasCommitResponse(), "There is no commit timestamp available for this transaction."); + return getCommitResponse().getCommitTimestamp(); + } + + @Override + public Timestamp getCommitTimestampOrNull() { + CommitResponse response = getCommitResponseOrNull(); + return response == null ? null : response.getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + ConnectionPreconditions.checkState( + hasCommitResponse(), "There is no commit response available for this transaction."); + return writeTransaction.getCommitResponse(); + } + + @Override + public CommitResponse getCommitResponseOrNull() { + if (hasCommitResponse()) { + try { + return writeTransaction.getCommitResponse(); + } catch (SpannerException e) { + // ignore + } + } + return null; + } + + @Override + public ApiFuture executeDdlAsync(CallType callType, final ParsedStatement ddl) { + Preconditions.checkNotNull(ddl); + Preconditions.checkArgument( + ddl.getType() == StatementType.DDL, "Statement is not a ddl statement"); + ConnectionPreconditions.checkState( + !isReadOnly(), "DDL statements are not allowed in read-only mode"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + span.setAttribute(DB_STATEMENT_KEY, ddl.getStatement().getSql()); + + Callable callable = + () -> { + try { + if (isCreateDatabaseStatement(dbClient.getDialect(), ddl.getSql())) { + executeCreateDatabase(ddl); + } else { + ddlClient.runWithRetryForMissingDefaultSequenceKind( + restartIndex -> { + OperationFuture operation = + ddlClient.executeDdl(ddl.getSql(), protoDescriptors); + getWithStatementTimeout(operation, ddl); + }, + connectionState.getValue(DEFAULT_SEQUENCE_KIND).getValue(), + dbClient.getDialect(), + new AtomicReference<>()); + } + state = UnitOfWorkState.COMMITTED; + return null; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, ddl, callable, DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + } + } + + private void executeCreateDatabase(ParsedStatement ddl) { + OperationFuture operation = + ddlClient.executeCreateDatabase(ddl.getSql(), dbClient.getDialect()); + getWithStatementTimeout(operation, ddl); + } + + @Override + public ApiFuture executeUpdateAsync( + CallType callType, ParsedStatement update, UpdateOption... options) { + Preconditions.checkNotNull(update); + Preconditions.checkArgument(update.isUpdate(), "Statement is not an update statement"); + ConnectionPreconditions.checkState( + !isReadOnly(), "Update statements are not allowed in read-only mode"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + ApiFuture res; + switch (getAutocommitDmlMode()) { + case TRANSACTIONAL: + case TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC: + res = + ApiFutures.transform( + executeTransactionalUpdateAsync(callType, update, AnalyzeMode.NONE, options), + Tuple::x, + MoreExecutors.directExecutor()); + break; + case PARTITIONED_NON_ATOMIC: + res = executePartitionedUpdateAsync(callType, update, options); + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + getAutocommitDmlMode()); + } + return res; + } + } + + @Override + public ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { + Preconditions.checkNotNull(update); + Preconditions.checkArgument(update.isUpdate(), "Statement is not an update statement"); + ConnectionPreconditions.checkState( + !isReadOnly(), "Update statements are not allowed in read-only mode"); + ConnectionPreconditions.checkState( + getAutocommitDmlMode() != AutocommitDmlMode.PARTITIONED_NON_ATOMIC, + "Analyzing update statements is not supported for Partitioned DML"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + return ApiFutures.transform( + executeTransactionalUpdateAsync(callType, update, analyzeMode, options), + Tuple::y, + MoreExecutors.directExecutor()); + } + } + + @Override + public ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, UpdateOption... options) { + Preconditions.checkNotNull(updates); + for (ParsedStatement update : updates) { + Preconditions.checkArgument( + update.isUpdate(), "Statement is not an update statement: " + update.getSql()); + } + ConnectionPreconditions.checkState( + !isReadOnly(), "Batch update statements are not allowed in read-only mode"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + switch (getAutocommitDmlMode()) { + case TRANSACTIONAL: + return executeTransactionalBatchUpdateAsync(callType, updates, options); + case PARTITIONED_NON_ATOMIC: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Batch updates are not allowed in " + getAutocommitDmlMode()); + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + getAutocommitDmlMode()); + } + } + } + + private TransactionRunner createWriteTransaction() { + int numOptions = 0; + if (this.rpcPriority != null) { + numOptions++; + } + if (connectionState.getValue(RETURN_COMMIT_STATS).getValue()) { + numOptions++; + } + if (excludeTxnFromChangeStreams) { + numOptions++; + } + if (connectionState.getValue(MAX_COMMIT_DELAY).getValue() != null) { + numOptions++; + } + if (connectionState.getValue(DEFAULT_ISOLATION_LEVEL).getValue() + != IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) { + numOptions++; + } + if (connectionState.getValue(READ_LOCK_MODE).getValue() + != ReadLockMode.READ_LOCK_MODE_UNSPECIFIED) { + numOptions++; + } + if (this.clientContext != null) { + numOptions++; + } + if (numOptions == 0) { + return dbClient.readWriteTransaction(); + } + Options.TransactionOption[] options = new Options.TransactionOption[numOptions]; + int index = 0; + if (this.rpcPriority != null) { + options[index++] = Options.priority(this.rpcPriority); + } + if (connectionState.getValue(RETURN_COMMIT_STATS).getValue()) { + options[index++] = Options.commitStats(); + } + if (excludeTxnFromChangeStreams) { + options[index++] = Options.excludeTxnFromChangeStreams(); + } + if (connectionState.getValue(MAX_COMMIT_DELAY).getValue() != null) { + options[index++] = + Options.maxCommitDelay(connectionState.getValue(MAX_COMMIT_DELAY).getValue()); + } + if (connectionState.getValue(DEFAULT_ISOLATION_LEVEL).getValue() + != IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) { + options[index++] = + Options.isolationLevel(connectionState.getValue(DEFAULT_ISOLATION_LEVEL).getValue()); + } + if (connectionState.getValue(READ_LOCK_MODE).getValue() + != ReadLockMode.READ_LOCK_MODE_UNSPECIFIED) { + options[index++] = Options.readLockMode(connectionState.getValue(READ_LOCK_MODE).getValue()); + } + if (this.clientContext != null) { + options[index++] = Options.clientContext(this.clientContext); + } + return dbClient.readWriteTransaction(options); + } + + private ApiFuture> executeTransactionalUpdateAsync( + CallType callType, + final ParsedStatement update, + AnalyzeMode analyzeMode, + final UpdateOption... options) { + Callable> callable = + () -> { + try { + writeTransaction = createWriteTransaction(); + Tuple res = + writeTransaction.run( + transaction -> { + if (analyzeMode == AnalyzeMode.NONE) { + return Tuple.of( + transaction.executeUpdate( + update.getStatement(), appendLastStatement(options)), + null); + } + ResultSet resultSet = + transaction.analyzeUpdateStatement( + update.getStatement(), + analyzeMode.getQueryAnalyzeMode(), + appendLastStatement(options)); + return Tuple.of(null, resultSet); + }); + state = UnitOfWorkState.COMMITTED; + return res; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + ApiFuture> transactionalResult = + executeStatementAsync( + callType, + update, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + // Retry as Partitioned DML if the statement fails due to exceeding the mutation limit if that + // option has been enabled. + if (isRetryDmlAsPartitionedDml()) { + return addRetryUpdateAsPartitionedDmlCallback(transactionalResult, callType, update, options); + } + return transactionalResult; + } + + private static final QueryUpdateOption[] LAST_STATEMENT_OPTIONS = + new QueryUpdateOption[] {Options.lastStatement()}; + + private static UpdateOption[] appendLastStatement(UpdateOption[] options) { + if (options.length == 0) { + return LAST_STATEMENT_OPTIONS; + } + UpdateOption[] result = new UpdateOption[options.length + 1]; + System.arraycopy(options, 0, result, 0, options.length); + result[result.length - 1] = LAST_STATEMENT_OPTIONS[0]; + return result; + } + + private static QueryOption[] appendLastStatement(QueryOption[] options) { + if (options.length == 0) { + return LAST_STATEMENT_OPTIONS; + } + QueryOption[] result = new QueryOption[options.length + 1]; + System.arraycopy(options, 0, result, 0, options.length); + result[result.length - 1] = LAST_STATEMENT_OPTIONS[0]; + return result; + } + + /** + * Adds a callback to the given future that retries the update statement using Partitioned DML if + * the original statement fails with a {@link TransactionMutationLimitExceededException}. + */ + private ApiFuture> addRetryUpdateAsPartitionedDmlCallback( + ApiFuture> transactionalResult, + CallType callType, + final ParsedStatement update, + final UpdateOption... options) { + // Catch TransactionMutationLimitExceededException and retry as Partitioned DML. All other + // exceptions are just propagated. + return ApiFutures.catchingAsync( + transactionalResult, + TransactionMutationLimitExceededException.class, + mutationLimitExceededException -> { + UUID executionId = UUID.randomUUID(); + // Invoke the retryDmlAsPartitionedDmlStarting method for the TransactionRetryListeners + // that have been registered for the connection. + for (TransactionRetryListener listener : this.transactionRetryListeners) { + listener.retryDmlAsPartitionedDmlStarting( + executionId, update.getStatement(), mutationLimitExceededException); + } + // Try to execute the DML statement as Partitioned DML. + ApiFuture> partitionedResult = + ApiFutures.transform( + executePartitionedUpdateAsync(callType, update, options), + lowerBoundUpdateCount -> Tuple.of(lowerBoundUpdateCount, null), + MoreExecutors.directExecutor()); + + // Add a callback to the future that invokes the TransactionRetryListeners after the + // Partitioned DML statement finished. This will invoke either the Finished or Failed + // method on the listeners. + ApiFutures.addCallback( + partitionedResult, + new ApiFutureCallback>() { + @Override + public void onFailure(Throwable throwable) { + for (TransactionRetryListener listener : + SingleUseTransaction.this.transactionRetryListeners) { + listener.retryDmlAsPartitionedDmlFailed( + executionId, update.getStatement(), throwable); + } + } + + @Override + public void onSuccess(Tuple result) { + for (TransactionRetryListener listener : + SingleUseTransaction.this.transactionRetryListeners) { + listener.retryDmlAsPartitionedDmlFinished( + executionId, update.getStatement(), result.x()); + } + } + }, + MoreExecutors.directExecutor()); + + // Catch any exception from the Partitioned DML execution and throw the original + // TransactionMutationLimitExceededException instead. + // The exception that is returned for the Partitioned DML statement is added to the + // exception as a suppressed exception. + return ApiFutures.catching( + partitionedResult, + Throwable.class, + input -> { + mutationLimitExceededException.addSuppressed(input); + throw mutationLimitExceededException; + }, + MoreExecutors.directExecutor()); + }, + MoreExecutors.directExecutor()); + } + + private ApiFuture analyzeTransactionalUpdateAsync( + CallType callType, final ParsedStatement update, AnalyzeMode analyzeMode) { + Callable callable = + () -> { + try { + writeTransaction = createWriteTransaction(); + ResultSet resultSet = + writeTransaction.run( + transaction -> + DirectExecuteResultSet.ofResultSet( + transaction.analyzeQuery( + update.getStatement(), analyzeMode.getQueryAnalyzeMode()))); + state = UnitOfWorkState.COMMITTED; + return resultSet; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, + update, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + } + + private ApiFuture executePartitionedUpdateAsync( + CallType callType, final ParsedStatement update, final UpdateOption... options) { + final UpdateOption[] effectiveOptions; + if (excludeTxnFromChangeStreams) { + if (options.length == 0) { + effectiveOptions = new UpdateOption[] {Options.excludeTxnFromChangeStreams()}; + } else { + effectiveOptions = Arrays.copyOf(options, options.length + 1); + effectiveOptions[effectiveOptions.length - 1] = Options.excludeTxnFromChangeStreams(); + } + } else { + effectiveOptions = options; + } + Callable callable = + () -> { + try { + Long res = dbClient.executePartitionedUpdate(update.getStatement(), effectiveOptions); + state = UnitOfWorkState.COMMITTED; + return res; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, update, callable, SpannerGrpc.getExecuteStreamingSqlMethod()); + } + + private ApiFuture executeTransactionalBatchUpdateAsync( + final CallType callType, + final Iterable updates, + final UpdateOption... options) { + Callable callable = + () -> { + writeTransaction = createWriteTransaction(); + return writeTransaction.run( + transaction -> { + try { + long[] res = + transaction.batchUpdate( + Iterables.transform(updates, ParsedStatement::getStatement), + appendLastStatement(options)); + state = UnitOfWorkState.COMMITTED; + return res; + } catch (Throwable t) { + if (t instanceof SpannerBatchUpdateException) { + // Batch update exceptions does not cause a rollback. + state = UnitOfWorkState.COMMITTED; + } else { + state = UnitOfWorkState.COMMIT_FAILED; + } + throw t; + } + }); + }; + return executeStatementAsync( + callType, RUN_BATCH_STATEMENT, callable, SpannerGrpc.getExecuteBatchDmlMethod()); + } + + @Override + public ApiFuture writeAsync(CallType callType, final Iterable mutations) { + Preconditions.checkNotNull(mutations); + ConnectionPreconditions.checkState( + !isReadOnly(), "Update statements are not allowed in read-only mode"); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + Callable callable = + () -> { + try { + writeTransaction = createWriteTransaction(); + Void res = + writeTransaction.run( + transaction -> { + transaction.buffer(mutations); + return null; + }); + state = UnitOfWorkState.COMMITTED; + return res; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, COMMIT_STATEMENT, callable, SpannerGrpc.getCommitMethod()); + } + } + + @Override + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Commit is not supported for single-use transactions"); + } + + @Override + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Rollback is not supported for single-use transactions"); + } + + @Override + String getUnitOfWorkName() { + return "single-use transaction"; + } + + @Override + public ApiFuture runBatchAsync(CallType callType) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Run batch is not supported for single-use transactions"); + } + + @Override + public void abortBatch() { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Run batch is not supported for single-use transactions"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java new file mode 100644 index 000000000000..e4912b8e4f26 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java @@ -0,0 +1,654 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; +import com.google.cloud.spanner.DecodeMode; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.base.Ticker; +import io.grpc.ManagedChannelBuilder; +import io.opentelemetry.api.OpenTelemetry; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Stream; +import javax.annotation.concurrent.GuardedBy; + +/** + * Pool for keeping track of {@link Spanner} instances needed for connections. + * + *

When a connection is opened for a Google Cloud Spanner database, a {@link Spanner} object can + * be opened in the background. The {@link SpannerPool} keeps track of which {@link Spanner} objects + * have been opened by connections during the lifetime of the JVM, which connections are still + * opened and closed, and which {@link Spanner} objects could be closed. + * + *

Call the method {@link SpannerPool#closeSpannerPool()} at the end of your application to + * gracefully shut down all instances in the pool. + */ +public class SpannerPool { + // TODO: create separate Client Lib Token for the Connection API. + private static final String CONNECTION_API_CLIENT_LIB_TOKEN = "sp-jdbc"; + private static final Logger logger = Logger.getLogger(SpannerPool.class.getName()); + + private static final Function DEFAULT_CLOSE_FUNCTION = + spanner -> { + spanner.close(); + return null; + }; + + /** + * Closes the default {@link SpannerPool} and all {@link Spanner} instances that have been opened + * by connections and that are still open. Call this method at the end of your application to + * gracefully close all {@link Spanner} instances in the pool. Failing to call this method will + * keep your application running for 60 seconds after you close the last {@link + * java.sql.Connection} to Cloud Spanner, as this is the default timeout before the {@link + * SpannerPool} closes the unused {@link Spanner} instances. + */ + public static void closeSpannerPool() { + INSTANCE.checkAndCloseSpanners(); + } + + /** + * The minimum number of milliseconds a {@link Spanner} should not have been used for a connection + * before it is closed. + */ + private static final long DEFAULT_CLOSE_SPANNER_AFTER_MILLISECONDS_UNUSED = 60000L; + + static final SpannerPool INSTANCE = + new SpannerPool(DEFAULT_CLOSE_SPANNER_AFTER_MILLISECONDS_UNUSED, Ticker.systemTicker()); + + @VisibleForTesting + enum CheckAndCloseSpannersMode { + WARN, + ERROR + } + + private final class CloseSpannerRunnable implements Runnable { + @Override + public void run() { + try { + checkAndCloseSpanners(CheckAndCloseSpannersMode.WARN); + } catch (Throwable e) { + // ignore + } + } + } + + private final class CloseUnusedSpannersRunnable implements Runnable { + @Override + public void run() { + try { + closeUnusedSpanners(SpannerPool.this.closeSpannerAfterMillisecondsUnused); + } catch (Throwable e) { + logger.log(Level.FINE, "Scheduled call to closeUnusedSpanners failed", e); + } + } + } + + static class CredentialsKey { + static final Object DEFAULT_CREDENTIALS_KEY = new Object(); + final Object key; + + static CredentialsKey create(ConnectionOptions options) throws IOException { + return new CredentialsKey( + Stream.of( + options.getOAuthToken(), + options.getCredentialsProvider() == null ? null : options.getCredentials(), + options.getFixedCredentials(), + options.getCredentialsUrl(), + DEFAULT_CREDENTIALS_KEY) + .filter(Objects::nonNull) + .findFirst() + .get()); + } + + private CredentialsKey(Object key) { + this.key = Preconditions.checkNotNull(key); + } + + public int hashCode() { + return key.hashCode(); + } + + public boolean equals(Object o) { + return (o instanceof CredentialsKey && Objects.equals(((CredentialsKey) o).key, this.key)); + } + } + + static class SpannerPoolKey { + private final String host; + private final String projectId; + private final CredentialsKey credentialsKey; + private final SessionPoolOptions sessionPoolOptions; + private final Integer numChannels; + private final Boolean enableDynamicChannelPool; + private final Integer dcpMinChannels; + private final Integer dcpMaxChannels; + private final Integer dcpInitialChannels; + private final boolean usePlainText; + private final String userAgent; + private final String databaseRole; + private final boolean routeToLeader; + private final boolean useVirtualGrpcTransportThreads; + private final OpenTelemetry openTelemetry; + private final Boolean enableExtendedTracing; + private final Boolean enableApiTracing; + private final boolean enableEndToEndTracing; + private final String clientCertificate; + private final String clientCertificateKey; + private final boolean isExperimentalHost; + private final Boolean enableDirectAccess; + private final String universeDomain; + private final String grpcInterceptorProvider; + + @VisibleForTesting + static SpannerPoolKey of(ConnectionOptions options) { + try { + return new SpannerPoolKey(options); + } catch (IOException ioException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Failed to get credentials: " + ioException.getMessage(), + ioException); + } + } + + private SpannerPoolKey(ConnectionOptions options) throws IOException { + this.host = options.getHost(); + this.projectId = options.getProjectId(); + this.credentialsKey = CredentialsKey.create(options); + this.databaseRole = options.getDatabaseRole(); + this.sessionPoolOptions = + options.getSessionPoolOptions() == null + ? SessionPoolOptions.newBuilder().build() + : options.getSessionPoolOptions(); + this.numChannels = options.getNumChannels(); + this.enableDynamicChannelPool = options.isEnableDynamicChannelPool(); + this.dcpMinChannels = options.getDcpMinChannels(); + this.dcpMaxChannels = options.getDcpMaxChannels(); + this.dcpInitialChannels = options.getDcpInitialChannels(); + this.usePlainText = options.isUsePlainText(); + this.userAgent = options.getUserAgent(); + this.routeToLeader = options.isRouteToLeader(); + this.useVirtualGrpcTransportThreads = options.isUseVirtualGrpcTransportThreads(); + this.openTelemetry = options.getOpenTelemetry(); + this.enableExtendedTracing = options.isEnableExtendedTracing(); + this.enableApiTracing = options.isEnableApiTracing(); + this.enableEndToEndTracing = options.isEndToEndTracingEnabled(); + this.clientCertificate = options.getClientCertificate(); + this.clientCertificateKey = options.getClientCertificateKey(); + this.isExperimentalHost = options.isExperimentalHost(); + this.enableDirectAccess = options.isEnableDirectAccess(); + this.universeDomain = options.getUniverseDomain(); + this.grpcInterceptorProvider = options.getGrpcInterceptorProviderName(); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof SpannerPoolKey)) { + return false; + } + SpannerPoolKey other = (SpannerPoolKey) o; + return Objects.equals(this.host, other.host) + && Objects.equals(this.projectId, other.projectId) + && Objects.equals(this.credentialsKey, other.credentialsKey) + && Objects.equals(this.sessionPoolOptions, other.sessionPoolOptions) + && Objects.equals(this.numChannels, other.numChannels) + && Objects.equals(this.enableDynamicChannelPool, other.enableDynamicChannelPool) + && Objects.equals(this.dcpMinChannels, other.dcpMinChannels) + && Objects.equals(this.dcpMaxChannels, other.dcpMaxChannels) + && Objects.equals(this.dcpInitialChannels, other.dcpInitialChannels) + && Objects.equals(this.databaseRole, other.databaseRole) + && Objects.equals(this.usePlainText, other.usePlainText) + && Objects.equals(this.userAgent, other.userAgent) + && Objects.equals(this.routeToLeader, other.routeToLeader) + && Objects.equals( + this.useVirtualGrpcTransportThreads, other.useVirtualGrpcTransportThreads) + && Objects.equals(this.openTelemetry, other.openTelemetry) + && Objects.equals(this.enableExtendedTracing, other.enableExtendedTracing) + && Objects.equals(this.enableApiTracing, other.enableApiTracing) + && Objects.equals(this.enableEndToEndTracing, other.enableEndToEndTracing) + && Objects.equals(this.clientCertificate, other.clientCertificate) + && Objects.equals(this.clientCertificateKey, other.clientCertificateKey) + && Objects.equals(this.isExperimentalHost, other.isExperimentalHost) + && Objects.equals(this.enableDirectAccess, other.enableDirectAccess) + && Objects.equals(this.universeDomain, other.universeDomain) + && Objects.equals(this.grpcInterceptorProvider, other.grpcInterceptorProvider); + } + + @Override + public int hashCode() { + return Objects.hash( + this.host, + this.projectId, + this.credentialsKey, + this.sessionPoolOptions, + this.numChannels, + this.enableDynamicChannelPool, + this.dcpMinChannels, + this.dcpMaxChannels, + this.dcpInitialChannels, + this.usePlainText, + this.databaseRole, + this.userAgent, + this.routeToLeader, + this.useVirtualGrpcTransportThreads, + this.openTelemetry, + this.enableExtendedTracing, + this.enableApiTracing, + this.enableEndToEndTracing, + this.clientCertificate, + this.clientCertificateKey, + this.isExperimentalHost, + this.enableDirectAccess, + this.universeDomain, + this.grpcInterceptorProvider); + } + } + + /** + * The management threads of a {@link SpannerPool} are lazily initialized to prevent unnecessary + * threads to be created when the connection API is not used. + */ + private boolean initialized = false; + + /** + * Thread that will be run as a shutdown hook on closing the application. This thread will close + * any Spanner instances opened by the Connection API that are still open. + */ + private Thread shutdownThread = null; + + /** + * Keep unused {@link Spanner} instances open and in the pool for this duration after all its + * {@link Connection}s have been closed. This prevents unnecessary opening and closing of {@link + * Spanner} instances. + */ + private final long closeSpannerAfterMillisecondsUnused; + + /** + * This scheduled task will close all {@link Spanner} objects that have not been used for an open + * connection for at least {@link SpannerPool#DEFAULT_CLOSE_SPANNER_AFTER_MILLISECONDS_UNUSED} + * milliseconds. + */ + private ScheduledExecutorService closerService; + + @GuardedBy("this") + private final Map spanners = new HashMap<>(); + + @GuardedBy("this") + private final Map> connections = new HashMap<>(); + + /** + * Keep track of the moment that the last connection for a specific {@link SpannerPoolKey} was + * closed, so that we can use this to determine whether a {@link Spanner} instance should be + * closed and removed from the pool. As {@link Spanner} instances are expensive to create and + * close, we do not want to do that unnecessarily. By adding a delay between the moment the last + * {@link Connection} for a {@link Spanner} was closed and the moment we close the {@link Spanner} + * instance, we prevent applications that open one or more connections for a process and close all + * these connections at the end of the process from getting a severe performance penalty from + * opening and closing {@link Spanner} instances all the time. + * + *

{@link Spanner} instances are closed and removed from the pool when the last connection was + * closed more than {@link #closeSpannerAfterMillisecondsUnused} milliseconds ago. + */ + @GuardedBy("this") + private final Map lastConnectionClosedAt = new HashMap<>(); + + private final Ticker ticker; + + @VisibleForTesting + SpannerPool(Ticker ticker) { + this(0L, ticker); + } + + @VisibleForTesting + SpannerPool(long closeSpannerAfterMillisecondsUnused, Ticker ticker) { + this.closeSpannerAfterMillisecondsUnused = closeSpannerAfterMillisecondsUnused; + this.ticker = ticker; + } + + /** + * Gets a Spanner object for a connection with the properties specified in the {@link + * ConnectionOptions} object. The {@link SpannerPool} will manage a pool of opened Spanner objects + * for the different connections, and reuse Spanner objects whenever possible. Spanner objects + * will also be closed down when the application is closing. + * + * @param options The specification of the Spanner database to connect to. + * @param connection The {@link ConnectionImpl} that will be created. This {@link ConnectionImpl} + * will be tracked by the pool to know when a {@link Spanner} object can be closed. + * @return an opened {@link Spanner} object that can be used by a connection to communicate with + * the Spanner database. + */ + Spanner getSpanner(ConnectionOptions options, ConnectionImpl connection) { + Preconditions.checkNotNull(options); + Preconditions.checkNotNull(connection); + SpannerPoolKey key = SpannerPoolKey.of(options); + Spanner spanner; + synchronized (this) { + if (!initialized) { + initialize(); + } + if (spanners.get(key) != null) { + spanner = spanners.get(key); + } else { + spanner = createSpanner(key, options); + spanners.put(key, spanner); + } + List registeredConnectionsForSpanner = + connections.computeIfAbsent(key, k -> new ArrayList<>()); + registeredConnectionsForSpanner.add(connection); + lastConnectionClosedAt.remove(key); + return spanner; + } + } + + private void initialize() { + shutdownThread = new Thread(new CloseSpannerRunnable(), "SpannerPool shutdown hook"); + Runtime.getRuntime().addShutdownHook(shutdownThread); + if (this.closeSpannerAfterMillisecondsUnused > 0) { + this.closerService = + Executors.newSingleThreadScheduledExecutor( + runnable -> { + Thread thread = new Thread(runnable, "close-unused-spanners-worker"); + thread.setDaemon(true); + return thread; + }); + this.closerService.scheduleAtFixedRate( + new CloseUnusedSpannersRunnable(), + this.closeSpannerAfterMillisecondsUnused, + this.closeSpannerAfterMillisecondsUnused, + TimeUnit.MILLISECONDS); + } + initialized = true; + } + + @VisibleForTesting + Spanner createSpanner(SpannerPoolKey key, ConnectionOptions options) { + ConnectionSpannerOptions.Builder builder = ConnectionSpannerOptions.newBuilder(); + builder + .setUseVirtualThreads(key.useVirtualGrpcTransportThreads) + .setClientLibToken(MoreObjects.firstNonNull(key.userAgent, CONNECTION_API_CLIENT_LIB_TOKEN)) + .setHost(key.host) + .setProjectId(key.projectId) + // Use lazy decoding, so we can use the protobuf values for calculating the checksum that is + // needed for read/write transactions. + .setDecodeMode(DecodeMode.LAZY_PER_COL) + .setDatabaseRole(options.getDatabaseRole()) + .setCredentials(options.getCredentials()); + builder.setSessionPoolOption(key.sessionPoolOptions); + if (key.openTelemetry != null) { + builder.setOpenTelemetry(key.openTelemetry); + } + if (key.enableExtendedTracing != null) { + builder.setEnableExtendedTracing(key.enableExtendedTracing); + } + if (key.enableApiTracing != null) { + builder.setEnableApiTracing(key.enableApiTracing); + } + if (key.numChannels != null) { + builder.setNumChannels(key.numChannels); + } + // Configure Dynamic Channel Pooling (DCP) based on explicit user setting. + // Note: Setting numChannels disables DCP even if enableDynamicChannelPool is true. + if (key.enableDynamicChannelPool != null && key.numChannels == null) { + if (Boolean.TRUE.equals(key.enableDynamicChannelPool)) { + builder.enableDynamicChannelPool(); + // Build custom GcpChannelPoolOptions if any DCP-specific options are set. + if (key.dcpMinChannels != null + || key.dcpMaxChannels != null + || key.dcpInitialChannels != null) { + // Build GcpChannelPoolOptions from scratch with custom values or Spanner defaults. + // Note: GcpChannelPoolOptions does not have a toBuilder() method, so we must + // construct from scratch using SpannerOptions defaults for unspecified values. + int minChannels = + key.dcpMinChannels != null + ? key.dcpMinChannels + : SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_CHANNELS; + int maxChannels = + key.dcpMaxChannels != null + ? key.dcpMaxChannels + : SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_CHANNELS; + int initChannels = + key.dcpInitialChannels != null + ? key.dcpInitialChannels + : SpannerOptions.DEFAULT_DYNAMIC_POOL_INITIAL_SIZE; + GcpChannelPoolOptions poolOptions = + GcpChannelPoolOptions.newBuilder() + .setMinSize(minChannels) + .setMaxSize(maxChannels) + .setInitSize(initChannels) + .setDynamicScaling( + SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_RPC, + SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_RPC, + SpannerOptions.DEFAULT_DYNAMIC_POOL_SCALE_DOWN_INTERVAL) + .setAffinityKeyLifetime(SpannerOptions.DEFAULT_DYNAMIC_POOL_AFFINITY_KEY_LIFETIME) + .setCleanupInterval(SpannerOptions.DEFAULT_DYNAMIC_POOL_CLEANUP_INTERVAL) + .build(); + builder.setGcpChannelPoolOptions(poolOptions); + } + } else { + // Explicitly disable DCP when enableDynamicChannelPool=false. + // This ensures consistent behavior even if the default changes in the future. + builder.disableDynamicChannelPool(); + } + } + if (options.getChannelProvider() != null) { + builder.setChannelProvider(options.getChannelProvider()); + } + if (!options.isRouteToLeader()) { + builder.disableLeaderAwareRouting(); + } + if (options.isEndToEndTracingEnabled()) { + builder.setEnableEndToEndTracing(true); + } + if (key.usePlainText) { + // Credentials may not be sent over a plain text channel. + builder.setCredentials(NoCredentials.getInstance()); + // Set a custom channel configurator to allow http instead of https. + builder.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + } + if (key.clientCertificate != null && key.clientCertificateKey != null) { + builder.useClientCert(key.clientCertificate, key.clientCertificateKey); + } + if (key.isExperimentalHost) { + builder.setExperimentalHost(key.host); + } + if (key.enableDirectAccess != null) { + builder.setEnableDirectAccess(key.enableDirectAccess); + } + if (key.universeDomain != null) { + builder.setUniverseDomain(key.universeDomain); + } + if (key.grpcInterceptorProvider != null) { + builder.setInterceptorProvider(options.getGrpcInterceptorProvider()); + } + if (options.getConfigurator() != null) { + options.getConfigurator().configure(builder); + } + return builder.build().getService(); + } + + /** + * Remove the given {@link ConnectionImpl} from the list of connections that should be monitored + * by this pool. + * + * @param options The {@link ConnectionOptions} that were used to create the connection. + * @param connection The {@link ConnectionImpl} to remove from this pool.. + */ + void removeConnection(ConnectionOptions options, ConnectionImpl connection) { + Preconditions.checkNotNull(options); + Preconditions.checkNotNull(connection); + SpannerPoolKey key = SpannerPoolKey.of(options); + synchronized (this) { + if (spanners.containsKey(key) && connections.containsKey(key)) { + List registeredConnections = connections.get(key); + // Remove the connection from the pool. + if (registeredConnections == null || !registeredConnections.remove(connection)) { + logger.log( + Level.WARNING, + "There are no connections registered for ConnectionOptions " + options.toString()); + } else { + // Check if this was the last connection for this spanner key. + if (registeredConnections.isEmpty()) { + // Register the moment the last connection for this Spanner key was removed, so we know + // which Spanner objects we could close. + lastConnectionClosedAt.put( + key, TimeUnit.MILLISECONDS.convert(ticker.read(), TimeUnit.NANOSECONDS)); + } + } + } else { + logger.log( + Level.WARNING, + "There is no Spanner registered for ConnectionOptions " + options.toString()); + } + } + } + + /** + * Checks that there are no {@link Connection}s that have been created by this {@link SpannerPool} + * that are still open, and then closes all {@link Spanner} instances in the pool. If there is at + * least one unclosed {@link Connection} left in the pool, the method will throw a {@link + * SpannerException} and no {@link Spanner} instances will be closed. + */ + void checkAndCloseSpanners() { + checkAndCloseSpanners(CheckAndCloseSpannersMode.ERROR); + } + + @VisibleForTesting + void checkAndCloseSpanners(CheckAndCloseSpannersMode mode) { + checkAndCloseSpanners(mode, DEFAULT_CLOSE_FUNCTION); + } + + @VisibleForTesting + void checkAndCloseSpanners( + CheckAndCloseSpannersMode mode, Function closeSpannerFunction) { + List keysStillInUse = new ArrayList<>(); + synchronized (this) { + for (Entry entry : spanners.entrySet()) { + if (!lastConnectionClosedAt.containsKey(entry.getKey())) { + keysStillInUse.add(entry.getKey()); + } + } + try { + if (keysStillInUse.isEmpty() || mode == CheckAndCloseSpannersMode.WARN) { + if (!keysStillInUse.isEmpty()) { + logLeakedConnections(keysStillInUse); + logger.log( + Level.WARNING, + "There is/are " + + keysStillInUse.size() + + " connection(s) still open." + + " Close all connections before stopping the application"); + } + // Force close all Spanner instances by passing in a value that will always be less than + // the + // difference between the current time and the close time of a connection. + closeUnusedSpanners(Long.MIN_VALUE, closeSpannerFunction); + } else { + logLeakedConnections(keysStillInUse); + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "There is/are " + + keysStillInUse.size() + + " connection(s) still open. Close all connections before calling" + + " closeSpanner()"); + } + } finally { + if (closerService != null) { + closerService.shutdown(); + } + initialized = false; + } + } + } + + private void logLeakedConnections(List keysStillInUse) { + synchronized (this) { + for (SpannerPoolKey key : keysStillInUse) { + for (ConnectionImpl con : connections.get(key)) { + if (!con.isClosed() && con.getLeakedException() != null) { + logger.log(Level.WARNING, "Leaked connection", con.getLeakedException()); + } + } + } + } + } + + /** + * Closes Spanner objects that are no longer in use by connections, and where the last connection + * that used it was closed more than closeSpannerAfterMillisecondsUnused seconds ago. + * The delay ensures that Spanner objects are not closed unless there's a good reason for it. + * + * @param closeSpannerAfterMillisecondsUnused The number of milliseconds a {@link Spanner} object + * should not have been used for a {@link Connection} before it is closed by this method. + */ + @VisibleForTesting + void closeUnusedSpanners(long closeSpannerAfterMillisecondsUnused) { + closeUnusedSpanners(closeSpannerAfterMillisecondsUnused, DEFAULT_CLOSE_FUNCTION); + } + + void closeUnusedSpanners( + long closeSpannerAfterMillisecondsUnused, Function closeSpannerFunction) { + List keysToBeRemoved = new ArrayList<>(); + synchronized (this) { + for (Entry entry : lastConnectionClosedAt.entrySet()) { + Long closedAt = entry.getValue(); + // Check whether the last connection was closed more than + // closeSpannerAfterMillisecondsUnused milliseconds ago. + if (closedAt != null + && ((TimeUnit.MILLISECONDS.convert(ticker.read(), TimeUnit.NANOSECONDS) - closedAt)) + > closeSpannerAfterMillisecondsUnused) { + Spanner spanner = spanners.get(entry.getKey()); + if (spanner != null) { + try { + closeSpannerFunction.apply(spanner); + } catch (Throwable t) { + // Ignore any errors and continue with the next one in the pool. + } finally { + // Even if the close operation failed, we should remove the spanner object as it is no + // longer valid. + spanners.remove(entry.getKey()); + keysToBeRemoved.add(entry.getKey()); + } + } + } + } + for (SpannerPoolKey key : keysToBeRemoved) { + lastConnectionClosedAt.remove(key); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerStatementParser.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerStatementParser.java new file mode 100644 index 000000000000..3e70170389cc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerStatementParser.java @@ -0,0 +1,320 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; +import java.util.Collections; +import java.util.Set; +import java.util.regex.Pattern; + +@InternalApi +public class SpannerStatementParser extends AbstractStatementParser { + + private static final Pattern THEN_RETURN_PATTERN = + Pattern.compile("[ `')\"]then return[ *`'(\"]"); + private static final String THEN_STRING = "then"; + private static final String RETURN_STRING = "return"; + + public SpannerStatementParser() throws CompileException { + super( + Collections.unmodifiableSet( + ClientSideStatements.getInstance(Dialect.GOOGLE_STANDARD_SQL).getCompiledStatements())); + } + + @Override + Dialect getDialect() { + return Dialect.GOOGLE_STANDARD_SQL; + } + + @Override + boolean supportsNestedComments() { + return false; + } + + @Override + boolean supportsDollarQuotedStrings() { + return false; + } + + @Override + boolean supportsBacktickQuote() { + return true; + } + + @Override + boolean supportsTripleQuotedStrings() { + return true; + } + + @Override + boolean supportsEscapeQuoteWithQuote() { + return false; + } + + @Override + boolean supportsBackslashEscape() { + return true; + } + + @Override + boolean supportsHashSingleLineComments() { + return true; + } + + @Override + boolean supportsLineFeedInQuotedString() { + return false; + } + + @Override + String getQueryParameterPrefix() { + return "@p"; + } + + /** + * Removes comments from and trims the given sql statement. Spanner supports three types of + * comments: + * + *

    + *
  • Single line comments starting with '--' + *
  • Single line comments starting with '#' + *
  • Multi line comments between '/*' and '*/' + *
+ * + * Reference: https://cloud.google.com/spanner/docs/lexical#comments + * + * @param sql The sql statement to remove comments from and to trim. + * @return the sql statement without the comments and leading and trailing spaces. + */ + @InternalApi + @Override + String removeCommentsAndTrimInternal(String sql) { + Preconditions.checkNotNull(sql); + boolean isInQuoted = false; + boolean isInSingleLineComment = false; + boolean isInMultiLineComment = false; + char startQuote = 0; + boolean lastCharWasEscapeChar = false; + boolean isTripleQuoted = false; + boolean whitespaceBeforeOrAfterMultiLineComment = false; + int multiLineCommentStartIdx = -1; + StringBuilder res = new StringBuilder(sql.length()); + int index = 0; + while (index < sql.length()) { + char c = sql.charAt(index); + if (isInQuoted) { + if ((c == '\n' || c == '\r') && !isTripleQuoted) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "SQL statement contains an unclosed literal: " + sql); + } else if (c == startQuote) { + if (lastCharWasEscapeChar) { + // TODO: Is this correct inside of a triple-quoted string? + lastCharWasEscapeChar = false; + } else if (isTripleQuoted) { + if (sql.length() > index + 2 + && sql.charAt(index + 1) == startQuote + && sql.charAt(index + 2) == startQuote) { + isInQuoted = false; + startQuote = 0; + isTripleQuoted = false; + res.append(c).append(c); + index += 2; + } + } else { + isInQuoted = false; + startQuote = 0; + } + } else if (c == '\\') { + lastCharWasEscapeChar = !lastCharWasEscapeChar; + } else { + lastCharWasEscapeChar = false; + } + res.append(c); + } else { + // We are not in a quoted string. + if (isInSingleLineComment) { + if (c == '\n') { + isInSingleLineComment = false; + // Include the line feed in the result. + res.append(c); + } + } else if (isInMultiLineComment) { + if (sql.length() > index + 1 && c == ASTERISK && sql.charAt(index + 1) == SLASH) { + isInMultiLineComment = false; + if (!whitespaceBeforeOrAfterMultiLineComment && (sql.length() > index + 2)) { + whitespaceBeforeOrAfterMultiLineComment = + Character.isWhitespace(sql.charAt(index + 2)); + } + // If the multiline comment does not have any whitespace before or after it, and it is + // neither at the start nor at the end of SQL string, append an extra space. + if (!whitespaceBeforeOrAfterMultiLineComment + && (multiLineCommentStartIdx != 0) + && (index != sql.length() - 2)) { + res.append(' '); + } + index++; + } + } else { + if (c == DASH + || (sql.length() > index + 1 && c == HYPHEN && sql.charAt(index + 1) == HYPHEN)) { + // This is a single line comment. + isInSingleLineComment = true; + } else if (sql.length() > index + 1 && c == SLASH && sql.charAt(index + 1) == ASTERISK) { + isInMultiLineComment = true; + if (index >= 1) { + whitespaceBeforeOrAfterMultiLineComment = + Character.isWhitespace(sql.charAt(index - 1)); + } + multiLineCommentStartIdx = index; + index++; + } else { + if (c == SINGLE_QUOTE || c == DOUBLE_QUOTE || c == BACKTICK_QUOTE) { + isInQuoted = true; + startQuote = c; + // Check whether it is a triple-quote. + if (sql.length() > index + 2 + && sql.charAt(index + 1) == startQuote + && sql.charAt(index + 2) == startQuote) { + isTripleQuoted = true; + res.append(c).append(c); + index += 2; + } + } + res.append(c); + } + } + } + index++; + } + if (isInQuoted) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "SQL statement contains an unclosed literal: " + sql); + } + if (res.length() > 0 && res.charAt(res.length() - 1) == ';') { + res.deleteCharAt(res.length() - 1); + } + return res.toString().trim(); + } + + /** Removes any statement hints at the beginning of the statement. */ + @Override + String removeStatementHint(String sql) { + // Valid statement hints at the beginning of a query statement can only contain a fixed set of + // possible values. Although it is possible to add a @{FORCE_INDEX=...} as a statement hint, the + // only allowed value is _BASE_TABLE. This means that we can safely assume that the statement + // hint will not contain any special characters, for example a closing curly brace or one of the + // keywords SELECT, UPDATE, DELETE, WITH, and that we can keep the check simple by just + // searching for the first occurrence of a keyword that should be preceded by a closing curly + // brace at the end of the statement hint. + int startStatementHintIndex = sql.indexOf('{'); + // Statement hints are allowed for both queries and DML statements. + int startQueryIndex = -1; + String upperCaseSql = sql.toUpperCase(); + Set selectAndDmlStatements = + Sets.union(selectStatements, dmlStatements).immutableCopy(); + for (String keyword : selectAndDmlStatements) { + startQueryIndex = upperCaseSql.indexOf(keyword); + if (startQueryIndex > -1) { + break; + } + } + if (startQueryIndex > -1) { + int endStatementHintIndex = sql.substring(0, startQueryIndex).lastIndexOf('}'); + if (startStatementHintIndex == -1 || startStatementHintIndex > endStatementHintIndex) { + // Looks like an invalid statement hint. Just ignore at this point and let the caller handle + // the invalid query. + return sql; + } + return removeCommentsAndTrim(sql.substring(endStatementHintIndex + 1)); + } + // Seems invalid, just return the original statement. + return sql; + } + + private boolean isReturning(String sql, int index) { + return (index >= 1) + && (index + 12 <= sql.length()) + && THEN_RETURN_PATTERN.matcher(sql.substring(index - 1, index + 12)).matches(); + } + + @InternalApi + @Override + protected boolean checkReturningClauseInternal(String rawSql) { + Preconditions.checkNotNull(rawSql); + String sql = rawSql.toLowerCase(); + // Do a pre-check to check if the SQL string definitely does not have a returning clause. + // If this check fails, do a more involved check to check for a returning clause. + if (!(sql.contains(THEN_STRING) && sql.contains(RETURN_STRING))) { + return false; + } + sql = sql.replaceAll("\\s+", " "); + final char SINGLE_QUOTE = '\''; + final char DOUBLE_QUOTE = '"'; + final char BACKTICK_QUOTE = '`'; + boolean isInQuoted = false; + char startQuote = 0; + boolean lastCharWasEscapeChar = false; + boolean isTripleQuoted = false; + for (int index = 0; index < sql.length(); index++) { + char c = sql.charAt(index); + if (isInQuoted) { + if (c == startQuote) { + if (lastCharWasEscapeChar) { + lastCharWasEscapeChar = false; + } else if (isTripleQuoted) { + if (sql.length() > index + 2 + && sql.charAt(index + 1) == startQuote + && sql.charAt(index + 2) == startQuote) { + isInQuoted = false; + startQuote = 0; + isTripleQuoted = false; + } + } else { + isInQuoted = false; + startQuote = 0; + } + } else if (c == '\\') { + lastCharWasEscapeChar = !lastCharWasEscapeChar; + } else { + lastCharWasEscapeChar = false; + } + } else { + if (isReturning(sql, index)) { + return true; + } else { + if (c == SINGLE_QUOTE || c == DOUBLE_QUOTE || c == BACKTICK_QUOTE) { + isInQuoted = true; + startQuote = c; + // check whether it is a triple-quote + if (sql.length() > index + 2 + && sql.charAt(index + 1) == startQuote + && sql.charAt(index + 2) == startQuote) { + isTripleQuoted = true; + } + } + } + } + } + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutionInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutionInterceptor.java new file mode 100644 index 000000000000..a2eaa99a5cb5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutionInterceptor.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; + +/** Interface for interceptors that are invoked before a statement is executed. */ +interface StatementExecutionInterceptor { + void intercept(ParsedStatement statement, StatementExecutionStep step, UnitOfWork transaction); +} + +/** + * Enum passed in to a {@link StatementExecutionInterceptor} to determine what/why a statement is + * being executed. + */ +enum StatementExecutionStep { + /** The initial execution of a statement (DML/Query). */ + EXECUTE_STATEMENT, + /** A call to {@link ResultSet#next()}. */ + CALL_NEXT_ON_RESULT_SET, + /** Execution of the statement during an internal transaction retry. */ + RETRY_STATEMENT, + /** A call to {@link ResultSet#next()} during internal transaction retry. */ + RETRY_NEXT_ON_RESULT_SET +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutor.java new file mode 100644 index 000000000000..b022158b917b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementExecutor.java @@ -0,0 +1,235 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_VIRTUAL_THREADS; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ListenableFutureToApiFuture; +import com.google.cloud.spanner.ThreadFactoryUtil; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.DurationValueGetter; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Duration; +import io.opentelemetry.context.Context; +import java.time.temporal.ChronoUnit; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * {@link StatementExecutor} is responsible for executing statements on a {@link Connection}. + * Statements are executed using a separate executor to allow timeouts and cancellation of + * statements. + */ +class StatementExecutor { + + /** Simple holder class for statement timeout that allows us to pass the value by reference. */ + static class StatementTimeout { + /** + * Only {@link TimeUnit#NANOSECONDS}, {@link TimeUnit#MICROSECONDS}, {@link + * TimeUnit#MILLISECONDS} and {@link TimeUnit#SECONDS} may be used to specify a statement + * timeout. + */ + static boolean isValidTimeoutUnit(TimeUnit unit) { + return unit == TimeUnit.NANOSECONDS + || unit == TimeUnit.MICROSECONDS + || unit == TimeUnit.MILLISECONDS + || unit == TimeUnit.SECONDS; + } + + /** The statement timeout. */ + private volatile Duration duration = null; + + /** + * Does this {@link StatementTimeout} have an actual timeout (i.e. it will eventually timeout). + */ + boolean hasTimeout() { + return duration != null; + } + + void clearTimeoutValue() { + this.duration = null; + } + + void setTimeoutValue(long timeout, TimeUnit unit) { + Preconditions.checkArgument(timeout > 0L); + Preconditions.checkArgument(isValidTimeoutUnit(unit)); + this.duration = ReadOnlyStalenessUtil.createDuration(timeout, unit); + } + + long getTimeoutValue(TimeUnit unit) { + Preconditions.checkArgument(isValidTimeoutUnit(unit)); + return duration == null ? 0L : ReadOnlyStalenessUtil.durationToUnits(duration, unit); + } + + /** + * Returns the {@link TimeUnit} with the least precision that could be used to represent this + * {@link StatementTimeout} without loss of precision. + */ + TimeUnit getAppropriateTimeUnit() { + ConnectionPreconditions.checkState( + duration != null, "This StatementTimeout has no timeout value"); + return ReadOnlyStalenessUtil.getAppropriateTimeUnit( + new DurationValueGetter() { + @Override + public long getDuration(TimeUnit unit) { + return StatementTimeout.this.getTimeoutValue(unit); + } + + @Override + public boolean hasDuration() { + return StatementTimeout.this.hasTimeout(); + } + }); + } + + java.time.Duration asDuration() { + if (!hasTimeout()) { + return java.time.Duration.ZERO; + } + TimeUnit unit = getAppropriateTimeUnit(); + switch (unit) { + case DAYS: + return java.time.Duration.ofDays(getTimeoutValue(unit)); + case HOURS: + return java.time.Duration.ofHours(getTimeoutValue(unit)); + case MICROSECONDS: + return java.time.Duration.of(getTimeoutValue(unit), ChronoUnit.MICROS); + case MILLISECONDS: + return java.time.Duration.ofMillis(getTimeoutValue(unit)); + case MINUTES: + return java.time.Duration.ofMinutes(getTimeoutValue(unit)); + case NANOSECONDS: + return java.time.Duration.ofNanos(getTimeoutValue(unit)); + case SECONDS: + return java.time.Duration.ofSeconds(getTimeoutValue(unit)); + default: + throw new IllegalStateException("invalid time unit: " + unit); + } + } + } + + /** + * Use a {@link ThreadFactory} that produces daemon or virtual threads and sets a recognizable + * name on the threads. + */ + private static final ThreadFactory DEFAULT_VIRTUAL_THREAD_FACTORY = + ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory("connection-executor", true); + + /** + * Use a {@link ThreadFactory} that produces daemon threads and sets a recognizable name on the + * threads. + */ + private static final ThreadFactory DEFAULT_DAEMON_THREAD_FACTORY = + ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory("connection-executor", false); + + /** Creates an {@link ExecutorService} for a {@link StatementExecutor}. */ + private static ListeningExecutorService createExecutorService(StatementExecutorType type) { + if (type == StatementExecutorType.DIRECT_EXECUTOR) { + return MoreExecutors.newDirectExecutorService(); + } + return MoreExecutors.listeningDecorator( + Context.taskWrapping( + new ThreadPoolExecutor( + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + type == StatementExecutorType.VIRTUAL_THREAD + ? DEFAULT_VIRTUAL_THREAD_FACTORY + : DEFAULT_DAEMON_THREAD_FACTORY))); + } + + private final ListeningExecutorService executor; + + /** + * Interceptors that should be invoked before or after a statement is executed can be registered + * for a connection. These are added to this list. The interceptors are intended for test usage. + */ + private final List interceptors; + + /** The executor type that is used for statements that are executed on a connection. */ + public enum StatementExecutorType { + /** + * Use a platform thread per connection. This allows async execution of statements, but costs + * more resources than the other options. + */ + PLATFORM_THREAD, + /** + * Use a virtual thread per connection. This allows async execution of statements. Virtual + * threads are only supported on Java 21 and higher. + */ + VIRTUAL_THREAD, + /** + * Use the calling thread for execution. This does not support async execution of statements. + * This option is used by drivers that do not support async execution, such as JDBC and + * PGAdapter. + */ + DIRECT_EXECUTOR, + } + + @VisibleForTesting + StatementExecutor() { + this( + DEFAULT_USE_VIRTUAL_THREADS + ? StatementExecutorType.VIRTUAL_THREAD + : StatementExecutorType.PLATFORM_THREAD, + Collections.emptyList()); + } + + StatementExecutor(StatementExecutorType type, List interceptors) { + this.executor = createExecutorService(type); + this.interceptors = Collections.unmodifiableList(interceptors); + } + + void shutdown() { + executor.shutdown(); + } + + /** + * Shutdown this executor now and do not wait for any statement that is being executed to finish. + */ + void shutdownNow() { + executor.shutdownNow(); + } + + /** Execute a statement on this {@link StatementExecutor}. */ + ApiFuture submit(Callable callable) { + return new ListenableFutureToApiFuture<>(executor.submit(callable)); + } + + /** + * Invoke the interceptors that have been registered for this {@link StatementExecutor} for the + * given step. + */ + void invokeInterceptors( + ParsedStatement statement, StatementExecutionStep step, UnitOfWork transaction) { + for (StatementExecutionInterceptor interceptor : interceptors) { + interceptor.intercept(statement, step, transaction); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementHintParser.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementHintParser.java new file mode 100644 index 000000000000..727582bb8c4d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementHintParser.java @@ -0,0 +1,211 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.Tuple; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.ReadQueryUpdateTransactionOption; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.SimpleParser.Result; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.spanner.v1.RequestOptions.Priority; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +/** A simple parser for extracting statement hints from SQL strings. */ +class StatementHintParser { + private static final char[] GOOGLE_SQL_START_HINT_TOKENS = new char[] {'@', '{'}; + private static final char[] POSTGRESQL_START_HINT_TOKENS = new char[] {'/', '*', '@'}; + private static final char[] GOOGLE_SQL_END_HINT_TOKENS = new char[] {'}'}; + private static final char[] POSTGRESQL_END_HINT_TOKENS = new char[] {'*', '/'}; + private static final String STATEMENT_TAG_HINT_NAME = "STATEMENT_TAG"; + private static final String RPC_PRIORITY_HINT_NAME = "RPC_PRIORITY"; + private static final ImmutableSet CLIENT_SIDE_STATEMENT_HINT_NAMES = + ImmutableSet.of(STATEMENT_TAG_HINT_NAME, RPC_PRIORITY_HINT_NAME); + + static final Map NO_HINTS = ImmutableMap.of(); + + private final boolean hasStatementHints; + + private final Map hints; + + private final String sqlWithoutClientSideHints; + + StatementHintParser(Dialect dialect, String sql) { + this(CLIENT_SIDE_STATEMENT_HINT_NAMES, dialect, sql); + } + + StatementHintParser( + ImmutableSet clientSideStatementHintNames, Dialect dialect, String sql) { + SimpleParser parser = + new SimpleParser( + dialect, + sql, + /* pos= */ 0, + /* treatHintCommentsAsTokens= */ dialect == Dialect.POSTGRESQL); + this.hasStatementHints = parser.peekTokens(getStartHintTokens(dialect)); + if (this.hasStatementHints) { + Tuple> hints = extract(parser, clientSideStatementHintNames); + this.sqlWithoutClientSideHints = hints.x(); + this.hints = hints.y(); + } else { + this.sqlWithoutClientSideHints = sql; + this.hints = NO_HINTS; + } + } + + private static char[] getStartHintTokens(Dialect dialect) { + switch (dialect) { + case POSTGRESQL: + return POSTGRESQL_START_HINT_TOKENS; + case GOOGLE_STANDARD_SQL: + default: + return GOOGLE_SQL_START_HINT_TOKENS; + } + } + + private static char[] getEndHintTokens(Dialect dialect) { + switch (dialect) { + case POSTGRESQL: + return POSTGRESQL_END_HINT_TOKENS; + case GOOGLE_STANDARD_SQL: + default: + return GOOGLE_SQL_END_HINT_TOKENS; + } + } + + /** + * Extracts any query/update options from client-side hints in the given statement. Currently, + * this method supports following client-side hints: + * + *
    + *
  • STATEMENT_TAG + *
  • RPC_PRIORITY + *
+ */ + static ReadQueryUpdateTransactionOption[] convertHintsToOptions(Map hints) { + ReadQueryUpdateTransactionOption[] result = new ReadQueryUpdateTransactionOption[hints.size()]; + int index = 0; + for (Entry hint : hints.entrySet()) { + result[index++] = convertHintToOption(hint.getKey(), hint.getValue()); + } + return result; + } + + private static ReadQueryUpdateTransactionOption convertHintToOption(String hint, String value) { + Preconditions.checkNotNull(value); + switch (Preconditions.checkNotNull(hint).toUpperCase(Locale.ENGLISH)) { + case STATEMENT_TAG_HINT_NAME: + return Options.tag(value); + case RPC_PRIORITY_HINT_NAME: + try { + Priority priority = Priority.valueOf(value); + return Options.priority(RpcPriority.fromProto(priority)); + } catch (IllegalArgumentException illegalArgumentException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Invalid RPC priority value: " + value, + illegalArgumentException); + } + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid hint name: " + hint); + } + } + + boolean hasStatementHints() { + return this.hasStatementHints; + } + + String getSqlWithoutClientSideHints() { + return this.sqlWithoutClientSideHints; + } + + Map getClientSideStatementHints() { + return this.hints; + } + + private static Tuple> extract( + SimpleParser parser, ImmutableSet clientSideStatementHintNames) { + String updatedSql = parser.getSql(); + int posBeforeHintToken = parser.getPos(); + int removedHintsLength = 0; + boolean allClientSideHints = true; + // This method is only called if the parser has hints, so it is safe to ignore this result. + parser.eatTokens(getStartHintTokens(parser.getDialect())); + ImmutableMap.Builder builder = ImmutableMap.builder(); + while (parser.hasMoreTokens()) { + int posBeforeHint = parser.getPos(); + boolean foundClientSideHint = false; + Result hintName = parser.eatIdentifier(); + if (!hintName.isValid()) { + return Tuple.of(parser.getSql(), NO_HINTS); + } + if (!parser.eatToken('=')) { + return Tuple.of(parser.getSql(), NO_HINTS); + } + Result hintValue = eatHintLiteral(parser); + if (!hintValue.isValid()) { + return Tuple.of(parser.getSql(), NO_HINTS); + } + if (clientSideStatementHintNames.contains(hintName.getValue().toUpperCase(Locale.ENGLISH))) { + builder.put(hintName.getValue(), hintValue.getValue()); + foundClientSideHint = true; + } else { + allClientSideHints = false; + } + boolean endOfHints = parser.peekTokens(getEndHintTokens(parser.getDialect())); + if (!endOfHints && !parser.eatToken(',')) { + return Tuple.of(parser.getSql(), NO_HINTS); + } + if (foundClientSideHint) { + // Remove the client-side hint from the SQL string that is sent to Spanner. + updatedSql = + updatedSql.substring(0, posBeforeHint - removedHintsLength) + + parser.getSql().substring(parser.getPos()); + removedHintsLength += parser.getPos() - posBeforeHint; + } + if (endOfHints) { + break; + } + } + if (!parser.eatTokens(getEndHintTokens(parser.getDialect()))) { + return Tuple.of(parser.getSql(), NO_HINTS); + } + if (allClientSideHints) { + // Only client-side hints found. Remove the entire hint block. + updatedSql = + parser.getSql().substring(0, posBeforeHintToken) + + parser.getSql().substring(parser.getPos()); + } + return Tuple.of(updatedSql, builder.build()); + } + + /** Eats a hint literal. This is a literal that could be a quoted string, or an identifier. */ + private static Result eatHintLiteral(SimpleParser parser) { + if (parser.peekToken('\'')) { + return parser.eatSingleQuotedString(); + } + return parser.eatIdentifier(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java new file mode 100644 index 000000000000..55f09f46d936 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java @@ -0,0 +1,159 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.ResultSet; + +/** + * A result of the execution of a statement. Statements that are executed by the {@link + * Connection#execute(com.google.cloud.spanner.Statement)} method could have different types of + * return values. These are wrapped in a {@link StatementResult}. + */ +@InternalApi +public interface StatementResult { + + /** + * Enum indicating the type of result that was returned by {@link + * Connection#execute(com.google.cloud.spanner.Statement)} + */ + enum ResultType { + /** + * A result set either returned by a query on Cloud Spanner or a local result set generated by a + * client side statement. + */ + RESULT_SET, + /** An update count returned by Cloud Spanner. */ + UPDATE_COUNT, + /** + * DDL statements and client side statements that set the state of a connection return no + * result. + */ + NO_RESULT + } + + /** The type of client side statement that was executed. */ + enum ClientSideStatementType { + SHOW_AUTOCOMMIT, + SET_AUTOCOMMIT, + SHOW_READONLY, + SET_READONLY, + SHOW_RETRY_ABORTS_INTERNALLY, + SET_RETRY_ABORTS_INTERNALLY, + SHOW_AUTOCOMMIT_DML_MODE, + SET_AUTOCOMMIT_DML_MODE, + SHOW_STATEMENT_TIMEOUT, + SET_STATEMENT_TIMEOUT, + SHOW_TRANSACTION_TIMEOUT, + SET_TRANSACTION_TIMEOUT, + SHOW_READ_TIMESTAMP, + SHOW_COMMIT_TIMESTAMP, + SHOW_COMMIT_RESPONSE, + SHOW_READ_ONLY_STALENESS, + SET_READ_ONLY_STALENESS, + SHOW_DIRECTED_READ, + SET_DIRECTED_READ, + SHOW_OPTIMIZER_VERSION, + SET_OPTIMIZER_VERSION, + SHOW_OPTIMIZER_STATISTICS_PACKAGE, + SET_OPTIMIZER_STATISTICS_PACKAGE, + SHOW_RETURN_COMMIT_STATS, + SET_RETURN_COMMIT_STATS, + SHOW_MAX_COMMIT_DELAY, + SET_MAX_COMMIT_DELAY, + SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, + SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, + SHOW_KEEP_TRANSACTION_ALIVE, + SET_KEEP_TRANSACTION_ALIVE, + SHOW_STATEMENT_TAG, + SET_STATEMENT_TAG, + SHOW_TRANSACTION_TAG, + SET_TRANSACTION_TAG, + SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS, + SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS, + BEGIN, + COMMIT, + ROLLBACK, + SET_TRANSACTION_MODE, + SET_DEFAULT_TRANSACTION_ISOLATION, + START_BATCH_DDL, + START_BATCH_DML, + RUN_BATCH, + ABORT_BATCH, + RESET_ALL, + SET_RPC_PRIORITY, + SHOW_RPC_PRIORITY, + SHOW_TRANSACTION_ISOLATION_LEVEL, + SHOW_DEFAULT_TRANSACTION_ISOLATION, + SHOW_SAVEPOINT_SUPPORT, + SET_SAVEPOINT_SUPPORT, + SHOW_DATA_BOOST_ENABLED, + SET_DATA_BOOST_ENABLED, + SHOW_AUTO_PARTITION_MODE, + SET_AUTO_PARTITION_MODE, + SHOW_MAX_PARTITIONS, + SET_MAX_PARTITIONS, + SHOW_MAX_PARTITIONED_PARALLELISM, + SET_MAX_PARTITIONED_PARALLELISM, + EXPLAIN, + PARTITION, + RUN_PARTITION, + RUN_PARTITIONED_QUERY, + SET_PROTO_DESCRIPTORS, + SET_PROTO_DESCRIPTORS_FILE_PATH, + SHOW_PROTO_DESCRIPTORS, + SHOW_PROTO_DESCRIPTORS_FILE_PATH, + SET_AUTO_BATCH_DML, + SHOW_AUTO_BATCH_DML, + SET_AUTO_BATCH_DML_UPDATE_COUNT, + SHOW_AUTO_BATCH_DML_UPDATE_COUNT, + SET_BATCH_DML_UPDATE_COUNT, + SET_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION, + SHOW_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION, + SHOW_READ_LOCK_MODE, + SET_READ_LOCK_MODE, + } + + /** + * Returns the {@link ResultType} of this result. + * + * @return the result type. + */ + ResultType getResultType(); + + /** + * @return the {@link ClientSideStatementType} that was executed, or null if no such statement was + * executed. + */ + ClientSideStatementType getClientSideStatementType(); + + /** + * Returns the {@link ResultSet} held by this result. May only be called if the type of this + * result is {@link ResultType#RESULT_SET}. + * + * @return the {@link ResultSet} held by this result. + */ + ResultSet getResultSet(); + + /** + * Returns the update count held by this result. May only be called if the type of this result is + * {@link ResultType#UPDATE_COUNT}. + * + * @return the update count held by this result. + */ + Long getUpdateCount(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java new file mode 100644 index 000000000000..ee5032463cdf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java @@ -0,0 +1,228 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; + +import com.google.cloud.ByteArray; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import java.util.Collections; + +/** Implementation of {@link StatementResult} */ +class StatementResultImpl implements StatementResult { + + /** + * Returns the {@link AsyncStatementResult} as a {@link StatementResult} with the guarantee that + * the underlying result is available. + */ + static StatementResult of(AsyncStatementResult delegate) { + switch (delegate.getResultType()) { + case NO_RESULT: + get(delegate.getNoResultAsync()); + break; + case RESULT_SET: + delegate.getResultSet(); + break; + case UPDATE_COUNT: + delegate.getUpdateCount(); + break; + default: + throw new IllegalStateException("Unknown result type: " + delegate.getResultType()); + } + return delegate; + } + + /** {@link StatementResult} containing a {@link ResultSet} returned by Cloud Spanner. */ + static StatementResult of(ResultSet resultSet) { + return new StatementResultImpl(resultSet, null); + } + + /** + * {@link StatementResult} containing a {@link ResultSet} created by a {@link + * ClientSideStatement}. + */ + static StatementResult of(ResultSet resultSet, ClientSideStatementType clientSideStatementType) { + return new StatementResultImpl(resultSet, clientSideStatementType); + } + + /** {@link StatementResult} containing an update count returned by Cloud Spanner. */ + static StatementResult of(Long updateCount) { + return new StatementResultImpl(updateCount); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one BOOL column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, Boolean value, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.bool())), + Collections.singletonList(Struct.newBuilder().set(name).to(value).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one INT64 column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, Long value, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.int64())), + Collections.singletonList(Struct.newBuilder().set(name).to(value).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one ARRAY column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, long[] values, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.array(Type.int64()))), + Collections.singletonList(Struct.newBuilder().set(name).toInt64Array(values).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one STRING column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, String value, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.string())), + Collections.singletonList(Struct.newBuilder().set(name).to(value).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one STRING column containing an {@link Enum} value and one row that is created by a {@link + * ClientSideStatement}. + */ + static StatementResult resultSet( + String name, Enum value, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.string())), + Collections.singletonList(Struct.newBuilder().set(name).to(value.toString()).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one TIMESTAMP column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, Timestamp value, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.timestamp())), + Collections.singletonList(Struct.newBuilder().set(name).to(value).build())), + clientSideStatementType); + } + + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one BYTES column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, byte[] values, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.bytes())), + Collections.singletonList( + Struct.newBuilder() + .set(name) + .to(values != null ? ByteArray.copyFrom(values) : null) + .build())), + clientSideStatementType); + } + + /** {@link StatementResult} containing no results. */ + static StatementResult noResult() { + return new StatementResultImpl((ClientSideStatementType) null); + } + + /** {@link StatementResult} containing no results created by a {@link ClientSideStatement}. */ + static StatementResult noResult(ClientSideStatementType clientSideStatementType) { + return new StatementResultImpl(clientSideStatementType); + } + + private final ResultType type; + private final ClientSideStatementType clientSideStatementType; + private final ResultSet resultSet; + private final Long updateCount; + + private StatementResultImpl( + ResultSet resultSet, ClientSideStatementType clientSideStatementType) { + this.type = ResultType.RESULT_SET; + this.clientSideStatementType = clientSideStatementType; + this.resultSet = resultSet; + this.updateCount = null; + } + + private StatementResultImpl(Long updateCount) { + this.type = ResultType.UPDATE_COUNT; + this.clientSideStatementType = null; + this.resultSet = null; + this.updateCount = updateCount; + } + + private StatementResultImpl(ClientSideStatementType clientSideStatementType) { + this.type = ResultType.NO_RESULT; + this.clientSideStatementType = clientSideStatementType; + this.resultSet = null; + this.updateCount = null; + } + + @Override + public ResultType getResultType() { + return type; + } + + @Override + public ClientSideStatementType getClientSideStatementType() { + return clientSideStatementType; + } + + @Override + public ResultSet getResultSet() { + ConnectionPreconditions.checkState( + resultSet != null, "This result does not contain a ResultSet"); + return resultSet; + } + + @Override + public Long getUpdateCount() { + ConnectionPreconditions.checkState( + updateCount != null, "This result does not contain an update count"); + return updateCount; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionMode.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionMode.java new file mode 100644 index 000000000000..0a066fede82a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionMode.java @@ -0,0 +1,46 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** Enum used to define the transaction type of a {@link Connection} */ +public enum TransactionMode { + READ_ONLY_TRANSACTION("READ ONLY"), + READ_WRITE_TRANSACTION("READ WRITE"); + + private final String statementString; + + TransactionMode(String statement) { + this.statementString = statement; + } + + /** + * Use this method to get the correct format for use in a SQL statement. The SQL statement for + * setting the mode to read-only should for example be without the underscore: + * SET TRANSACTION READ ONLY + * + * @return a string representation of this {@link TransactionMode} that can be used in a SQL + * statement to set the transaction mode. + */ + public String getStatementString() { + return statementString; + } + + @Override + public String toString() { + return statementString; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListener.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListener.java new file mode 100644 index 000000000000..ba2613ffd86f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListener.java @@ -0,0 +1,131 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.InternalApi; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import java.util.UUID; + +/** + * Cloud Spanner can abort any read/write transaction because of potential deadlocks or other + * internal reasons. When a transaction is aborted, the entire transaction should be retried. A + * {@link Connection} can automatically retry a transaction internally and check whether the results + * that are returned during a retry attempt are equal to the results during the original + * transaction. This is done by keeping track of a SHA-256 checksum of all the results that are + * returned by Spanner during both transactions. + * + *

This listener class for internal transaction retries allow client applications to do + * additional testing or logging of transaction retries. Transaction retry listeners of a {@link + * Connection} can be added using {@link + * Connection#addTransactionRetryListener(TransactionRetryListener)}. + */ +@InternalApi +public interface TransactionRetryListener { + /** The result of a retry. */ + @InternalApi + enum RetryResult { + /** The retry executed successfully and the transaction will continue. */ + RETRY_SUCCESSFUL, + /** The retry was aborted by Spanner and another retry attempt will be started. */ + RETRY_ABORTED_AND_RESTARTING, + /** + * The retry was aborted by the {@link Connection} because of a concurrent modification. The + * transaction cannot continue and will throw an {@link + * AbortedDueToConcurrentModificationException}. + */ + RETRY_ABORTED_DUE_TO_CONCURRENT_MODIFICATION, + /** + * The retry was aborted by Spanner and the maximum number of retry attempts allowed has been + * exceeded. The transaction cannot continue and will throw an {@link AbortedException}. + */ + RETRY_ABORTED_AND_MAX_ATTEMPTS_EXCEEDED, + /** + * An unexpected error occurred during transaction retry, the transaction cannot continue and + * will throw an exception. + */ + RETRY_ERROR + } + + /** + * This method is called when a retry is about to start. + * + * @param transactionStarted The start date/time of the transaction that is retrying. + * @param transactionId An internally assigned ID of the transaction (unique during the lifetime + * of the JVM) that can be used to identify the transaction for logging purposes. + * @param retryAttempt The number of retry attempts the current transaction has executed, + * including the current retry attempt. + */ + void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt); + + /** + * This method is called when a retry has finished. + * + * @param transactionStarted The start date/time of the transaction that is retrying. + * @param transactionId An internally assigned ID of the transaction (unique during the lifetime + * of the JVM) that can be used to identify the transaction for logging purposes. + * @param retryAttempt The number of retry attempts the current transaction has executed, + * including the current retry attempt. + * @param result The result of the retry indicating whether the retry was successful or not. + */ + void retryFinished( + Timestamp transactionStarted, + long transactionId, + int retryAttempt, + TransactionRetryListener.RetryResult result); + + /** + * This method is called when an atomic DML statement is retried as a Partitioned DML statement. + * + * @param executionId a generated, unique ID for this execution. The same ID is passed in to the + * methods {@link #retryDmlAsPartitionedDmlFinished(UUID, Statement, long)} and {@link + * #retryDmlAsPartitionedDmlFailed(UUID, Statement, Throwable)} when the execution finishes or + * fails. + * @param statement the statement that is being retried as Partitioned DML + * @param exception the mutation-limit-exceeded exception that was returned by Spanner during the + * initial execution. + */ + default void retryDmlAsPartitionedDmlStarting( + UUID executionId, Statement statement, TransactionMutationLimitExceededException exception) {} + + /** + * This method is called when an atomic DML statement has been successfully retried as a + * Partitioned DML statement. + * + * @param executionId the unique ID of this statement execution + * @param statement the statement that was successfully retried as Partitioned DML + * @param lowerBoundUpdateCount the lower-bound update count returned by Spanner after executing + * the statement as Partitioned DML + */ + default void retryDmlAsPartitionedDmlFinished( + UUID executionId, Statement statement, long lowerBoundUpdateCount) {} + + /** + * This method is called when an atomic DML statement failed to be retried as a Partitioned DML + * statement. + * + * @param executionId the unique ID of this statement execution + * @param statement the statement that failed to be retried as Partitioned DML + * @param exception the exception that was returned when the statement was executed as Partitioned + * DML + */ + default void retryDmlAsPartitionedDmlFailed( + UUID executionId, Statement statement, Throwable exception) {} +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListenerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListenerImpl.java new file mode 100644 index 000000000000..42497564b95d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRetryListenerImpl.java @@ -0,0 +1,45 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import java.util.UUID; + +/** Default (no-op) implementation for {@link TransactionRetryListener}. */ +public abstract class TransactionRetryListenerImpl implements TransactionRetryListener { + + @Override + public void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt) {} + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) {} + + @Override + public void retryDmlAsPartitionedDmlStarting( + UUID executionId, Statement statement, TransactionMutationLimitExceededException exception) {} + + @Override + public void retryDmlAsPartitionedDmlFinished( + UUID executionId, Statement statement, long lowerBoundUpdateCount) {} + + @Override + public void retryDmlAsPartitionedDmlFailed( + UUID executionId, Statement statement, Throwable exception) {} +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRunnerImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRunnerImpl.java new file mode 100644 index 000000000000..504b084dba32 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/TransactionRunnerImpl.java @@ -0,0 +1,62 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.Connection.TransactionCallable; +import com.google.cloud.spanner.connection.ConnectionImpl.Caller; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; + +class TransactionRunnerImpl { + private final ConnectionImpl connection; + + TransactionRunnerImpl(ConnectionImpl connection) { + this.connection = connection; + } + + T run(TransactionCallable callable) { + connection.beginTransaction(); + // Disable internal retries during this transaction. + connection.setRetryAbortsInternally(/* retryAbortsInternally= */ false, /* local= */ true); + UnitOfWork transaction = connection.getCurrentUnitOfWorkOrStartNewUnitOfWork(); + while (true) { + try { + T result = callable.run(connection); + get(connection.commitAsync(CallType.SYNC, Caller.TRANSACTION_RUNNER)); + return result; + } catch (AbortedException abortedException) { + try { + //noinspection BusyWait + Thread.sleep(abortedException.getRetryDelayInMillis()); + connection.resetForRetry(transaction); + } catch (InterruptedException interruptedException) { + connection.rollbackAsync(CallType.SYNC, Caller.TRANSACTION_RUNNER); + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } catch (Throwable t) { + connection.rollbackAsync(CallType.SYNC, Caller.TRANSACTION_RUNNER); + throw t; + } + } catch (Throwable t) { + connection.rollbackAsync(CallType.SYNC, Caller.TRANSACTION_RUNNER); + throw t; + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java new file mode 100644 index 000000000000..82b1bf8a15ca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java @@ -0,0 +1,310 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.UpdateOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; +import java.util.concurrent.ExecutionException; +import javax.annotation.Nonnull; + +/** Internal interface for transactions and batches on {@link Connection}s. */ +@InternalApi +interface UnitOfWork { + + enum CallType { + SYNC, + ASYNC + } + + /** A unit of work can be either a transaction or a DDL/DML batch. */ + enum Type { + TRANSACTION, + BATCH + } + + enum UnitOfWorkState { + STARTED, + COMMITTING, + COMMITTED, + COMMIT_FAILED, + ROLLED_BACK, + RUNNING, + RAN, + RUN_FAILED, + ABORTED; + + public boolean isActive() { + return this == STARTED; + } + } + + /** + * Callback for end-of-transaction methods. This is used to commit or rollback connection state + * after an async commit/rollback of a database transaction. + */ + interface EndTransactionCallback { + /** + * This method will be called if the end-of-transaction method (commit or rollback) finished + * successfully, but before the {@link ApiFuture} that is returned by the method is done. + */ + void onSuccess(); + + /** + * This method will be called if the end-of-transaction method (commit or rollback) failed, but + * before the {@link ApiFuture} that is returned by the method is done. + */ + void onFailure(); + } + + /** Cancel the currently running statement (if any and the statement may be cancelled). */ + void cancel(); + + /** + * @return the type of unit of work. + */ + Type getType(); + + /** + * @return the current state of this unit of work. + */ + UnitOfWorkState getState(); + + /** + * @return true if this unit of work is still active. + */ + boolean isActive(); + + /** + * @return the {@link Span} that is used by this {@link UnitOfWork}. + */ + Span getSpan(); + + /** Returns true if this transaction can only be used for a single statement. */ + boolean isSingleUse(); + + /** + * Commits the changes in this unit of work to the database. For read-only transactions, this only + * closes the {@link ReadContext}. This method will throw a {@link SpannerException} if called for + * a {@link Type#BATCH}. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param callback Callback that should be called when the commit succeeded or failed. + * @return An {@link ApiFuture} that is done when the commit has finished. + */ + ApiFuture commitAsync(@Nonnull CallType callType, @Nonnull EndTransactionCallback callback); + + /** + * Rollbacks any changes in this unit of work. For read-only transactions, this only closes the + * {@link ReadContext}. This method will throw a {@link SpannerException} if called for a {@link + * Type#BATCH}. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param callback Callback that should be called when the rollback succeeded or failed. + * @return An {@link ApiFuture} that is done when the rollback has finished. + */ + ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback); + + default void resetForRetry() { + throw new UnsupportedOperationException(); + } + + /** + * @see Connection#savepoint(String) + */ + void savepoint(@Nonnull String name, @Nonnull Dialect dialect); + + /** + * @see Connection#releaseSavepoint(String) + */ + void releaseSavepoint(@Nonnull String name); + + /** + * @see Connection#rollbackToSavepoint(String) + */ + void rollbackToSavepoint(@Nonnull String name, @Nonnull SavepointSupport savepointSupport); + + /** + * Sends the currently buffered statements in this unit of work to the database and ends the + * batch. This method will throw a {@link SpannerException} if called for a {@link + * Type#TRANSACTION}. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @return an {@link ApiFuture} containing the update counts in case of a DML batch. Returns an + * array containing 1 for each successful statement and 0 for each failed statement or + * statement that was not executed in case of a DDL batch. + */ + ApiFuture runBatchAsync(CallType callType); + + /** + * Clears the currently buffered statements in this unit of work and ends the batch. This method + * will throw a {@link SpannerException} if called for a {@link Type#TRANSACTION}. This method is + * always non-blocking. + */ + void abortBatch(); + + /** + * @return true if this unit of work is read-only. + */ + boolean isReadOnly(); + + /** + * @return true if this unit of work supports {@link + * com.google.spanner.v1.DirectedReadOptions} + */ + default boolean supportsDirectedReads(ParsedStatement parsedStatement) { + return false; + } + + /** + * Executes a query with the given options. If {@link AnalyzeMode} is set to {@link + * AnalyzeMode#PLAN} or {@link AnalyzeMode#PROFILE}, the returned {@link ResultSet} will include + * {@link ResultSetStats}. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param statement The statement to execute. + * @param analyzeMode Indicates whether to include {@link ResultSetStats} in the returned {@link + * ResultSet} or not. Cannot be used in combination with {@link QueryOption}s. + * @param options the options to configure the query. May only be set if analyzeMode is set to + * {@link AnalyzeMode#NONE}. + * @return an {@link ApiFuture} containing a {@link ResultSet} with the results of the query. + * @throws SpannerException if the query is not allowed on this {@link UnitOfWork}. The {@link + * ApiFuture} will return a {@link SpannerException} wrapped in an {@link ExecutionException} + * if a database error occurs. + */ + ApiFuture executeQueryAsync( + CallType callType, + ParsedStatement statement, + AnalyzeMode analyzeMode, + QueryOption... options); + + ApiFuture partitionQueryAsync( + CallType callType, + ParsedStatement query, + PartitionOptions partitionOptions, + QueryOption... options); + + /** + * @return the read timestamp of this transaction. Will throw a {@link SpannerException} if there + * is no read timestamp. + */ + Timestamp getReadTimestamp(); + + /** + * @return the read timestamp of this transaction or null if there is no read timestamp. + */ + Timestamp getReadTimestampOrNull(); + + /** + * @return the commit timestamp of this transaction. Will throw a {@link SpannerException} if + * there is no commit timestamp. + */ + Timestamp getCommitTimestamp(); + + /** + * @return the commit timestamp of this transaction or null if there is no commit timestamp. + */ + Timestamp getCommitTimestampOrNull(); + + /** + * @return the {@link CommitResponse} of this transaction + * @throws SpannerException if there is no {@link CommitResponse} + */ + CommitResponse getCommitResponse(); + + /** + * @return the {@link CommitResponse} of this transaction or null if there is no {@link + * CommitResponse} + */ + CommitResponse getCommitResponseOrNull(); + + /** + * Executes the specified DDL statements in this unit of work. For DDL batches, this will mean + * that the statements are buffered locally and will be sent to Spanner when {@link + * UnitOfWork#commit()} is called. For {@link SingleUseTransaction}s, this will execute the DDL + * statement directly on Spanner. + * + * @param ddl The DDL statement to execute. + * @return an {@link ApiFuture} that is done when the DDL operation has finished. + */ + ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl); + + /** + * Execute a DML statement on Spanner. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param update The DML statement to execute. + * @param options Update options to apply for the statement. + * @return an {@link ApiFuture} containing the number of records that were + * inserted/updated/deleted by this statement. + */ + ApiFuture executeUpdateAsync( + CallType callType, ParsedStatement update, UpdateOption... options); + + /** + * Execute and/or analyze a DML statement on Spanner. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param update The DML statement to analyze/execute. + * @param analyzeMode Specifies the query/analyze mode to use for the DML statement. + * @param options Update options to apply for the statement. + * @return an {@link ApiFuture} containing the {@link ResultSet} that were returned by this + * statement. The {@link ResultSet} will not contain any rows. + */ + ApiFuture analyzeUpdateAsync( + CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options); + + /** + * Execute a batch of DML statements on Spanner. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param updates The DML statements to execute. + * @param options Update options to apply for the statement. + * @return an {@link ApiFuture} containing an array with the number of records that were + * inserted/updated/deleted per statement. + * @see TransactionContext#batchUpdate(Iterable) + */ + ApiFuture executeBatchUpdateAsync( + CallType callType, Iterable updates, UpdateOption... options); + + /** + * Writes a batch of {@link Mutation}s to Spanner. For {@link ReadWriteTransaction}s, this means + * buffering the {@link Mutation}s locally and writing the {@link Mutation}s to Spanner upon + * {@link UnitOfWork#commit()}. For {@link SingleUseTransaction}s, the {@link Mutation}s will be + * sent directly to Spanner. + * + * @param callType Indicates whether the top-level call is a sync or async call. + * @param mutations The mutations to write. + * @return an {@link ApiFuture} that is done when the {@link Mutation}s have been successfully + * buffered or written to Cloud Spanner. + */ + ApiFuture writeAsync(CallType callType, Iterable mutations); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/package-info.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/package-info.java new file mode 100644 index 000000000000..66ee7a8d72c1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Internal API for Google Cloud Spanner. This API may introduce breaking changes without prior + * notice. + */ +@com.google.api.core.InternalApi +package com.google.cloud.spanner.connection; diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/BackupEncryptionConfig.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/BackupEncryptionConfig.java new file mode 100644 index 000000000000..a6e9fc135628 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/BackupEncryptionConfig.java @@ -0,0 +1,23 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.api.core.InternalApi; + +/** Marker interface for encryption configurations that can be applied on backups. */ +@InternalApi +public interface BackupEncryptionConfig {} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/CustomerManagedEncryption.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/CustomerManagedEncryption.java new file mode 100644 index 000000000000..b24c47672947 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/CustomerManagedEncryption.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.spanner.admin.database.v1.EncryptionConfig; +import java.util.Objects; + +/** The data is encrypted with a key provided by the customer. */ +public class CustomerManagedEncryption implements BackupEncryptionConfig, RestoreEncryptionConfig { + + private final String kmsKeyName; + + CustomerManagedEncryption(String kmsKeyName) { + this.kmsKeyName = kmsKeyName; + } + + public String getKmsKeyName() { + return kmsKeyName; + } + + /** + * Returns a {@link CustomerManagedEncryption} instance from the given proto, or null + * if the given proto is the default proto instance (i.e. there is no encryption config). + */ + public static CustomerManagedEncryption fromProtoOrNull(EncryptionConfig proto) { + return proto.equals(EncryptionConfig.getDefaultInstance()) + ? null + : new CustomerManagedEncryption(proto.getKmsKeyName()); + } + + public EncryptionConfig toProto() { + return EncryptionConfig.newBuilder().setKmsKeyName(this.getKmsKeyName()).build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CustomerManagedEncryption that = (CustomerManagedEncryption) o; + return Objects.equals(kmsKeyName, that.kmsKeyName); + } + + @Override + public int hashCode() { + return Objects.hash(kmsKeyName); + } + + @Override + public String toString() { + return "CustomerManagedEncryption{" + "kmsKeyName='" + kmsKeyName + '\'' + '}'; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapper.java new file mode 100644 index 000000000000..62d51bf76edd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapper.java @@ -0,0 +1,100 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig; + +/** Maps encryption config domain classes to their protobuf counterpart. */ +public class EncryptionConfigProtoMapper { + + /** Returns an encryption config to be used for a database. */ + public static EncryptionConfig encryptionConfig(CustomerManagedEncryption config) { + return EncryptionConfig.newBuilder().setKmsKeyName(config.getKmsKeyName()).build(); + } + + /** Returns an encryption config to be used for a backup. */ + public static CreateBackupEncryptionConfig createBackupEncryptionConfig( + BackupEncryptionConfig config) { + if (config instanceof CustomerManagedEncryption) { + return CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(((CustomerManagedEncryption) config).getKmsKeyName()) + .build(); + } else if (config instanceof GoogleDefaultEncryption) { + return CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + } else if (config instanceof UseDatabaseEncryption) { + return CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + } else { + throw new IllegalArgumentException("Unknown backup encryption configuration " + config); + } + } + + /** Returns an encryption config to be used for a copy backup. */ + public static CopyBackupEncryptionConfig copyBackupEncryptionConfig( + BackupEncryptionConfig config) { + if (config instanceof CustomerManagedEncryption) { + return CopyBackupEncryptionConfig.newBuilder() + .setEncryptionType(CopyBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(((CustomerManagedEncryption) config).getKmsKeyName()) + .build(); + } else if (config instanceof GoogleDefaultEncryption) { + return CopyBackupEncryptionConfig.newBuilder() + .setEncryptionType(CopyBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + } else if (config instanceof UseBackupEncryption) { + return CopyBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CopyBackupEncryptionConfig.EncryptionType.USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION) + .build(); + } else { + throw new IllegalArgumentException("Unknown backup encryption configuration " + config); + } + } + + /** Returns an encryption config to be used for a database restore. */ + public static RestoreDatabaseEncryptionConfig restoreDatabaseEncryptionConfig( + RestoreEncryptionConfig config) { + if (config instanceof CustomerManagedEncryption) { + return RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(((CustomerManagedEncryption) config).getKmsKeyName()) + .build(); + } else if (config instanceof GoogleDefaultEncryption) { + return RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + } else if (config instanceof UseBackupEncryption) { + return RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType + .USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION) + .build(); + } else { + throw new IllegalArgumentException("Unknown restore encryption configuration " + config); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigs.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigs.java new file mode 100644 index 000000000000..6f77da2c872f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionConfigs.java @@ -0,0 +1,45 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.api.client.util.Preconditions; + +/** Encryption configuration factory. */ +public class EncryptionConfigs { + + /** Returns a customer managed encryption configuration for the given key. */ + public static CustomerManagedEncryption customerManagedEncryption(String kmsKeyName) { + Preconditions.checkArgument( + kmsKeyName != null, "Customer managed encryption key name must not be null"); + return new CustomerManagedEncryption(kmsKeyName); + } + + /** Returns google default encryption configuration. */ + public static GoogleDefaultEncryption googleDefaultEncryption() { + return GoogleDefaultEncryption.INSTANCE; + } + + /** Returns use database encryption configuration. */ + public static UseDatabaseEncryption useDatabaseEncryption() { + return UseDatabaseEncryption.INSTANCE; + } + + /** Returns use backup encryption configuration. */ + public static UseBackupEncryption useBackupEncryption() { + return UseBackupEncryption.INSTANCE; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionInfo.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionInfo.java new file mode 100644 index 000000000000..f811cfc10107 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/EncryptionInfo.java @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.common.annotations.VisibleForTesting; +import com.google.rpc.Status; +import java.util.Objects; + +/** Represents the encryption information for a Cloud Spanner backup. */ +public class EncryptionInfo { + + private final String kmsKeyVersion; + private final com.google.spanner.admin.database.v1.EncryptionInfo.Type encryptionType; + private final Status encryptionStatus; + + public EncryptionInfo(com.google.spanner.admin.database.v1.EncryptionInfo proto) { + this(proto.getKmsKeyVersion(), proto.getEncryptionType(), proto.getEncryptionStatus()); + } + + @VisibleForTesting + public EncryptionInfo( + String kmsKeyVersion, + com.google.spanner.admin.database.v1.EncryptionInfo.Type encryptionType, + Status encryptionStatus) { + this.kmsKeyVersion = kmsKeyVersion; + this.encryptionType = encryptionType; + this.encryptionStatus = encryptionStatus; + } + + /** + * Returns a {@link EncryptionInfo} instance from the given proto, or null if the + * given proto is the default proto instance (i.e. there is no encryption info). + */ + public static EncryptionInfo fromProtoOrNull( + com.google.spanner.admin.database.v1.EncryptionInfo proto) { + return proto.equals(com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()) + ? null + : new EncryptionInfo(proto); + } + + public String getKmsKeyVersion() { + return kmsKeyVersion; + } + + public com.google.spanner.admin.database.v1.EncryptionInfo.Type getEncryptionType() { + return encryptionType; + } + + public Status getEncryptionStatus() { + return encryptionStatus; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + EncryptionInfo that = (EncryptionInfo) o; + return Objects.equals(kmsKeyVersion, that.kmsKeyVersion) + && encryptionType == that.encryptionType + && Objects.equals(encryptionStatus, that.encryptionStatus); + } + + @Override + public int hashCode() { + return Objects.hash(kmsKeyVersion, encryptionType, encryptionStatus); + } + + @Override + public String toString() { + return String.format( + "EncryptionInfo[kmsKeyVersion=%s,encryptionType=%s,encryptionStatus=%s]", + kmsKeyVersion, encryptionType, encryptionStatus); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/GoogleDefaultEncryption.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/GoogleDefaultEncryption.java new file mode 100644 index 000000000000..fa03da8bd517 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/GoogleDefaultEncryption.java @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +/** The data is encrypted with a key that is fully managed by Google. */ +public class GoogleDefaultEncryption implements BackupEncryptionConfig, RestoreEncryptionConfig { + + static final GoogleDefaultEncryption INSTANCE = new GoogleDefaultEncryption(); + + private GoogleDefaultEncryption() {} + + @Override + public String toString() { + return "GoogleDefaultEncryption{}"; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/RestoreEncryptionConfig.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/RestoreEncryptionConfig.java new file mode 100644 index 000000000000..b23fbe69d04c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/RestoreEncryptionConfig.java @@ -0,0 +1,23 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +import com.google.api.core.InternalApi; + +/** Marker interface for encryption configurations that can be applied on restores. */ +@InternalApi +public interface RestoreEncryptionConfig {} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseBackupEncryption.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseBackupEncryption.java new file mode 100644 index 000000000000..b3604597ab6b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseBackupEncryption.java @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +/** The data is encrypted with the same configuration as specified by the backup being restored. */ +public class UseBackupEncryption implements RestoreEncryptionConfig { + + static final UseBackupEncryption INSTANCE = new UseBackupEncryption(); + + private UseBackupEncryption() {} + + @Override + public String toString() { + return "UseBackupEncryption{}"; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseDatabaseEncryption.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseDatabaseEncryption.java new file mode 100644 index 000000000000..1fc7233496d4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/encryption/UseDatabaseEncryption.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.encryption; + +/** + * The data is encrypted with the same configuration as specified by the source database for a + * backup. + */ +public class UseDatabaseEncryption implements BackupEncryptionConfig { + + static final UseDatabaseEncryption INSTANCE = new UseDatabaseEncryption(); + + private UseDatabaseEncryption() {} + + @Override + public String toString() { + return "UseDatabaseEncryption{}"; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/nativeimage/SpannerFeature.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/nativeimage/SpannerFeature.java new file mode 100644 index 000000000000..60b41620fd73 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/nativeimage/SpannerFeature.java @@ -0,0 +1,143 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.nativeimage; + +import com.google.api.gax.nativeimage.NativeImageUtils; +import org.graalvm.nativeimage.hosted.Feature; + +/** Registers Spanner library classes for reflection. */ +final class SpannerFeature implements Feature { + + private static final String SPANNER_CLASS = "com.google.spanner.v1.SpannerGrpc"; + private static final String SPANNER_TEST_CLASS = "com.google.cloud.spanner.GceTestEnvConfig"; + private static final String MOCK_CLASS = "com.google.cloud.spanner.MockDatabaseAdminServiceImpl"; + private static final String CLIENT_SIDE_IMPL_CLASS = + "com.google.cloud.spanner.connection.ClientSideStatementImpl"; + private static final String CLIENT_SIDE_VALUE_CONVERTER = + "com.google.cloud.spanner.connection.ClientSideStatementValueConverters"; + private static final String CONNECTION_IMPL = + "com.google.cloud.spanner.connection.ConnectionImpl"; + private static final String CLIENT_SIDE_STATEMENTS = + "com.google.cloud.spanner.connection.ClientSideStatements"; + private static final String CONNECTION_STATEMENT_EXECUTOR = + "com.google.cloud.spanner.connection.ConnectionStatementExecutor"; + private static final String CLIENT_SIDE_STATEMENT_NO_PARAM_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementNoParamExecutor"; + private static final String CLIENT_SIDE_STATEMENT_SET_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementSetExecutor"; + private static final String CLIENT_SIDE_STATEMENT_BEGIN_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementBeginExecutor"; + private static final String CLIENT_SIDE_STATEMENT_PG_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementPgBeginExecutor"; + private static final String CLIENT_SIDE_STATEMENT_EXPLAIN_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementExplainExecutor"; + private static final String CLIENT_SIDE_STATEMENT_PARTITION_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementPartitionExecutor"; + private static final String CLIENT_SIDE_STATEMENT_RUN_PARTITION_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementRunPartitionExecutor"; + private static final String CLIENT_SIDE_STATEMENT_RUN_PARTITIONED_QUERY_EXECUTOR = + "com.google.cloud.spanner.connection.ClientSideStatementRunPartitionedQueryExecutor"; + private static final String ABSTRACT_STATEMENT_PARSER = + "com.google.cloud.spanner.connection.AbstractStatementParser"; + private static final String STATEMENT_PARSER = + "com.google.cloud.spanner.connection.SpannerStatementParser"; + private static final String POSTGRESQL_STATEMENT_PARSER = + "com.google.cloud.spanner.connection.PostgreSQLStatementParser"; + private static final String STATEMENT_RESULT = + "com.google.cloud.spanner.connection.StatementResult$ResultType"; + + @Override + public void beforeAnalysis(BeforeAnalysisAccess access) { + registerSpannerTestClasses(access); + if (access.findClassByName(CLIENT_SIDE_IMPL_CLASS) != null) { + NativeImageUtils.registerClassHierarchyForReflection(access, CLIENT_SIDE_IMPL_CLASS); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_NO_PARAM_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_NO_PARAM_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_BEGIN_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_BEGIN_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_PG_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_PG_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_EXPLAIN_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_EXPLAIN_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_SET_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_SET_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_PARTITION_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENT_PARTITION_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_RUN_PARTITION_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection( + access, CLIENT_SIDE_STATEMENT_RUN_PARTITION_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENT_RUN_PARTITIONED_QUERY_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection( + access, CLIENT_SIDE_STATEMENT_RUN_PARTITIONED_QUERY_EXECUTOR); + } + if (access.findClassByName(CLIENT_SIDE_VALUE_CONVERTER) != null) { + NativeImageUtils.registerClassHierarchyForReflection(access, CLIENT_SIDE_VALUE_CONVERTER); + } + if (access.findClassByName(CLIENT_SIDE_STATEMENTS) != null) { + NativeImageUtils.registerClassForReflection(access, CLIENT_SIDE_STATEMENTS); + } + if (access.findClassByName(CONNECTION_STATEMENT_EXECUTOR) != null) { + NativeImageUtils.registerClassForReflection(access, CONNECTION_STATEMENT_EXECUTOR); + } + if (access.findClassByName(CONNECTION_IMPL) != null) { + NativeImageUtils.registerClassForReflection(access, CONNECTION_IMPL); + } + if (access.findClassByName(ABSTRACT_STATEMENT_PARSER) != null) { + NativeImageUtils.registerClassHierarchyForReflection(access, ABSTRACT_STATEMENT_PARSER); + NativeImageUtils.registerClassForReflection(access, "com.google.cloud.spanner.Dialect"); + } + if (access.findClassByName(STATEMENT_PARSER) != null) { + NativeImageUtils.registerConstructorsForReflection(access, STATEMENT_PARSER); + } + if (access.findClassByName(POSTGRESQL_STATEMENT_PARSER) != null) { + NativeImageUtils.registerConstructorsForReflection(access, POSTGRESQL_STATEMENT_PARSER); + } + if (access.findClassByName(STATEMENT_RESULT) != null) { + NativeImageUtils.registerClassForReflection(access, STATEMENT_RESULT); + } + + Class spannerClass = access.findClassByName(SPANNER_CLASS); + if (spannerClass != null) { + NativeImageUtils.registerClassHierarchyForReflection( + access, "com.google.spanner.admin.database.v1.Database"); + NativeImageUtils.registerClassHierarchyForReflection( + access, "com.google.spanner.admin.instance.v1.Instance"); + NativeImageUtils.registerClassForReflection( + access, "com.google.spanner.admin.database.v1.RestoreInfo"); + } + } + + private void registerSpannerTestClasses(BeforeAnalysisAccess access) { + Class spannerTestClass = access.findClassByName(SPANNER_TEST_CLASS); + if (spannerTestClass != null) { + NativeImageUtils.registerConstructorsForReflection(access, SPANNER_TEST_CLASS); + } + Class mockClass = access.findClassByName(MOCK_CLASS); + if (mockClass != null) { + NativeImageUtils.registerClassForReflection( + access, "com.google.cloud.spanner.MockDatabaseAdminServiceImpl$MockBackup"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/package-info.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/package-info.java new file mode 100644 index 000000000000..1158fb6cd4f2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client for Cloud Spanner - A no-compromise relational database service. + * + * @see Cloud Spanner + */ +package com.google.cloud.spanner; diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/SpannerRpcFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/SpannerRpcFactory.java new file mode 100644 index 000000000000..275109747cf3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/SpannerRpcFactory.java @@ -0,0 +1,23 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi; + +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spi.ServiceRpcFactory; + +/** Factory to create instances of {@code SpannerRpc}. */ +public interface SpannerRpcFactory extends ServiceRpcFactory {} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpoint.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpoint.java new file mode 100644 index 000000000000..cd6b386dc8a7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpoint.java @@ -0,0 +1,66 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import io.grpc.ManagedChannel; + +/** + * Represents a Spanner server endpoint for location-aware routing. + * + *

Each instance wraps a gRPC {@link ManagedChannel} connected to a specific Spanner server. The + * {@link ChannelEndpointCache} creates and caches these instances. + * + *

Implementations must be thread-safe as instances may be shared across multiple concurrent + * operations. + * + * @see ChannelEndpointCache + */ +@InternalApi +public interface ChannelEndpoint { + + /** + * Returns the network address of this server. + * + * @return the server address in "host:port" format + */ + String getAddress(); + + /** + * Returns whether this server is ready to accept RPCs. + * + *

A server is considered unhealthy if: + * + *

    + *
  • The underlying channel is shutdown or terminated + *
  • The channel is in a transient failure state + *
+ * + * @return true if the server is healthy and ready to accept RPCs + */ + boolean isHealthy(); + + /** + * Returns the gRPC channel for making RPCs to this server. + * + *

The returned channel is managed by the {@link ChannelEndpointCache} and should not be shut + * down directly by callers. + * + * @return the managed channel for this server + */ + ManagedChannel getChannel(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCache.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCache.java new file mode 100644 index 000000000000..879ed546f2c2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCache.java @@ -0,0 +1,79 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; + +/** + * Cache for server connections used in location-aware routing. + * + *

Implementations are expected to cache {@link ChannelEndpoint} instances such that repeated + * calls with the same address return the same instance. This allows routing components to + * efficiently manage server references. + * + *

Implementations must be thread-safe. Multiple threads may concurrently call {@link + * #get(String)} with different addresses. + */ +@InternalApi +public interface ChannelEndpointCache { + + /** + * Returns the default channel endpoint. + * + *

The default channel is the original endpoint configured in {@link + * com.google.cloud.spanner.SpannerOptions}. It is used as a fallback when the location cache does + * not have routing information for a request. + * + * @return the default channel, never null + */ + ChannelEndpoint defaultChannel(); + + /** + * Returns a cached channel for the given address, creating it if needed. + * + *

If a channel for this address already exists in the cache, the cached instance is returned. + * Otherwise, a new server connection is created and cached. + * + * @param address the server address in "host:port" format + * @return a channel instance for the address, never null + * @throws com.google.cloud.spanner.SpannerException if the channel cannot be created + */ + ChannelEndpoint get(String address); + + /** + * Evicts a server connection from the cache and gracefully shuts down its channel. + * + *

This method should be called when a server becomes unhealthy or is no longer needed. The + * channel shutdown is graceful: existing RPCs are allowed to complete, but new RPCs will not be + * accepted on this channel. + * + *

If the address is not in the cache, this method does nothing. + * + * @param address the server address to evict + */ + void evict(String address); + + /** + * Shuts down all cached server connections. + * + *

This method should be called when the Spanner client is closed to release all resources. + * Each channel is shut down gracefully, allowing in-flight RPCs to complete. + * + *

After calling this method, the cache should not be used to create new connections. + */ + void shutdown(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCacheFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCacheFactory.java new file mode 100644 index 000000000000..0f122e4b7655 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelEndpointCacheFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import java.io.IOException; + +/** Factory for creating {@link ChannelEndpointCache} instances. */ +@InternalApi +public interface ChannelEndpointCacheFactory { + ChannelEndpointCache create(InstantiatingGrpcChannelProvider baseProvider) throws IOException; +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelFinder.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelFinder.java new file mode 100644 index 000000000000..6c4554ea7764 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/ChannelFinder.java @@ -0,0 +1,158 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Finds a server for a request using location-aware routing metadata. + * + *

This component is per-database and maintains both recipe and range caches. + */ +@InternalApi +public final class ChannelFinder { + private final Object updateLock = new Object(); + private final AtomicLong databaseId = new AtomicLong(); + private final KeyRecipeCache recipeCache = new KeyRecipeCache(); + private final KeyRangeCache rangeCache; + + public ChannelFinder(ChannelEndpointCache endpointCache) { + this.rangeCache = new KeyRangeCache(Objects.requireNonNull(endpointCache)); + } + + void useDeterministicRandom() { + rangeCache.useDeterministicRandom(); + } + + public void update(CacheUpdate update) { + synchronized (updateLock) { + long currentId = databaseId.get(); + if (currentId != update.getDatabaseId()) { + if (currentId != 0) { + recipeCache.clear(); + rangeCache.clear(); + } + databaseId.set(update.getDatabaseId()); + } + if (update.hasKeyRecipes()) { + recipeCache.addRecipes(update.getKeyRecipes()); + } + rangeCache.addRanges(update); + } + } + + public ChannelEndpoint findServer(ReadRequest.Builder reqBuilder) { + return findServer(reqBuilder, preferLeader(reqBuilder.getTransaction())); + } + + public ChannelEndpoint findServer(ReadRequest.Builder reqBuilder, boolean preferLeader) { + recipeCache.computeKeys(reqBuilder); + return fillRoutingHint( + preferLeader, + KeyRangeCache.RangeMode.COVERING_SPLIT, + reqBuilder.getDirectedReadOptions(), + reqBuilder.getRoutingHintBuilder()); + } + + public ChannelEndpoint findServer(ExecuteSqlRequest.Builder reqBuilder) { + return findServer(reqBuilder, preferLeader(reqBuilder.getTransaction())); + } + + public ChannelEndpoint findServer(ExecuteSqlRequest.Builder reqBuilder, boolean preferLeader) { + recipeCache.computeKeys(reqBuilder); + return fillRoutingHint( + preferLeader, + KeyRangeCache.RangeMode.PICK_RANDOM, + reqBuilder.getDirectedReadOptions(), + reqBuilder.getRoutingHintBuilder()); + } + + public ChannelEndpoint findServer(BeginTransactionRequest.Builder reqBuilder) { + if (!reqBuilder.hasMutationKey()) { + return null; + } + TargetRange target = recipeCache.mutationToTargetRange(reqBuilder.getMutationKey()); + if (target == null) { + return null; + } + RoutingHint.Builder hintBuilder = RoutingHint.newBuilder(); + hintBuilder.setKey(target.start); + if (!target.limit.isEmpty()) { + hintBuilder.setLimitKey(target.limit); + } + return fillRoutingHint( + preferLeader(reqBuilder.getOptions()), + KeyRangeCache.RangeMode.COVERING_SPLIT, + DirectedReadOptions.getDefaultInstance(), + hintBuilder); + } + + private ChannelEndpoint fillRoutingHint( + TransactionSelector transactionSelector, + DirectedReadOptions directedReadOptions, + KeyRangeCache.RangeMode rangeMode, + RoutingHint.Builder hintBuilder) { + return fillRoutingHint( + preferLeader(transactionSelector), rangeMode, directedReadOptions, hintBuilder); + } + + private ChannelEndpoint fillRoutingHint( + boolean preferLeader, + KeyRangeCache.RangeMode rangeMode, + DirectedReadOptions directedReadOptions, + RoutingHint.Builder hintBuilder) { + long id = databaseId.get(); + if (id == 0) { + return null; + } + hintBuilder.setDatabaseId(id); + return rangeCache.fillRoutingHint(preferLeader, rangeMode, directedReadOptions, hintBuilder); + } + + private static boolean preferLeader(TransactionSelector selector) { + switch (selector.getSelectorCase()) { + case BEGIN: + return !selector.getBegin().hasReadOnly() || selector.getBegin().getReadOnly().getStrong(); + case SINGLE_USE: + if (!selector.getSingleUse().hasReadOnly()) { + return true; + } + return selector.getSingleUse().getReadOnly().getStrong(); + case ID: + case SELECTOR_NOT_SET: + default: + return true; + } + } + + private static boolean preferLeader(TransactionOptions options) { + if (options == null || !options.hasReadOnly()) { + return true; + } + return options.getReadOnly().getStrong(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/EncodingInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/EncodingInterceptor.java new file mode 100644 index 000000000000..a30135533a81 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/EncodingInterceptor.java @@ -0,0 +1,50 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; + +class EncodingInterceptor implements ClientInterceptor { + private static final String RESPONSE_ENCODING_KEY_NAME = "x-response-encoding"; + private static final Key RESPONSE_ENCODING_KEY = + Metadata.Key.of(RESPONSE_ENCODING_KEY_NAME, Metadata.ASCII_STRING_MARSHALLER); + + private final String encoding; + + EncodingInterceptor(String encoding) { + this.encoding = encoding; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new ForwardingClientCall.SimpleForwardingClientCall( + next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put(RESPONSE_ENCODING_KEY, encoding); + super.start(responseListener, headers); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java new file mode 100644 index 000000000000..ee90b72ff4ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java @@ -0,0 +1,2519 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.cloud.spanner.SpannerExceptionFactory.asSpannerException; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.cloud.spanner.ThreadFactoryUtil.tryCreateVirtualThreadPerTaskExecutor; +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_CALL_OPTIONS_KEY; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.core.NanoClock; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.AlreadyExistsException; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.api.gax.rpc.InstantiatingWatchdogProvider; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.api.gax.rpc.StreamController; +import com.google.api.gax.rpc.TransportChannel; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.api.gax.rpc.UnavailableException; +import com.google.api.gax.rpc.WatchdogProvider; +import com.google.api.pathtemplate.PathTemplate; +import com.google.auth.Credentials; +import com.google.cloud.RetryHelper; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.grpc.GcpManagedChannel; +import com.google.cloud.grpc.GcpManagedChannelBuilder; +import com.google.cloud.grpc.GcpManagedChannelOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.fallback.GcpFallbackChannel; +import com.google.cloud.grpc.fallback.GcpFallbackChannelOptions; +import com.google.cloud.grpc.fallback.GcpFallbackOpenTelemetry; +import com.google.cloud.spanner.AdminRequestsPerMinuteExceededException; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Restore; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.SpannerOptions.CallCredentialsProvider; +import com.google.cloud.spanner.XGoogSpannerRequestId; +import com.google.cloud.spanner.XGoogSpannerRequestId.RequestIdCreator; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStub; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.database.v1.stub.GrpcDatabaseAdminCallableFactory; +import com.google.cloud.spanner.admin.database.v1.stub.GrpcDatabaseAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.GrpcInstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.encryption.EncryptionConfigProtoMapper; +import com.google.cloud.spanner.v1.stub.SpannerStub; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.Resources; +import com.google.common.util.concurrent.RateLimiter; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.GetPolicyOptions; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.CancelOperationRequest; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Message; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseAdminGrpc; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceAdminGrpc; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.Transaction; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Context; +import io.grpc.ForwardingChannelBuilder2; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.auth.MoreCallCredentials; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import javax.annotation.Nullable; + +/** Implementation of Cloud Spanner remote calls using Gapic libraries. */ +@InternalApi +public class GapicSpannerRpc implements SpannerRpc { + private static final PathTemplate PROJECT_NAME_TEMPLATE = + PathTemplate.create("projects/{project}"); + public static final String EXPERIMENTAL_LOCATION_API_ENV_VAR = + "GOOGLE_SPANNER_EXPERIMENTAL_LOCATION_API"; + private static final PathTemplate OPERATION_NAME_TEMPLATE = + PathTemplate.create("{database=projects/*/instances/*/databases/*}/operations/{operation}"); + private static final int MAX_MESSAGE_SIZE = 256 * 1024 * 1024; + private static final int MAX_METADATA_SIZE = 32 * 1024; // bytes + private static final String PROPERTY_TIMEOUT_SECONDS = + "com.google.cloud.spanner.watchdogTimeoutSeconds"; + private static final String PROPERTY_PERIOD_SECONDS = + "com.google.cloud.spanner.watchdogPeriodSeconds"; + private static final int DEFAULT_TIMEOUT_SECONDS = 30 * 60; + private static final int DEFAULT_PERIOD_SECONDS = 10; + private static final int GRPC_KEEPALIVE_SECONDS = 2 * 60; + private static final String USER_AGENT_KEY = "user-agent"; + private static final String CLIENT_LIBRARY_LANGUAGE = "spanner-java"; + public static final String DEFAULT_USER_AGENT = + CLIENT_LIBRARY_LANGUAGE + "/" + GaxProperties.getLibraryVersion(GapicSpannerRpc.class); + public static boolean DIRECTPATH_CHANNEL_CREATED = false; + private static final String API_FILE = "grpc-gcp-apiconfig.json"; + + private final RequestIdCreator requestIdCreator = new RequestIdCreatorImpl(); + private boolean rpcIsClosed; + private final SpannerStub spannerStub; + private final RetrySettings executeQueryRetrySettings; + private final Set executeQueryRetryableCodes; + private final RetrySettings readRetrySettings; + private final Set readRetryableCodes; + private final RetrySettings commitRetrySettings; + private final SpannerStub partitionedDmlStub; + private final RetrySettings partitionedDmlRetrySettings; + private final InstanceAdminStubSettings instanceAdminStubSettings; + private final InstanceAdminStub instanceAdminStub; + private final DatabaseAdminStubSettings databaseAdminStubSettings; + private final DatabaseAdminStub databaseAdminStub; + private final String projectId; + private final String projectName; + private final SpannerMetadataProvider metadataProvider; + private final CallCredentialsProvider callCredentialsProvider; + private final String compressorName; + private final Duration waitTimeout = + systemProperty(PROPERTY_TIMEOUT_SECONDS, DEFAULT_TIMEOUT_SECONDS); + private final Duration idleTimeout = + systemProperty(PROPERTY_TIMEOUT_SECONDS, DEFAULT_TIMEOUT_SECONDS); + private final Duration checkInterval = + systemProperty(PROPERTY_PERIOD_SECONDS, DEFAULT_PERIOD_SECONDS); + + private final ScheduledExecutorService spannerWatchdog; + + private final ConcurrentLinkedDeque responseObservers = + new ConcurrentLinkedDeque<>(); + + private final boolean throttleAdministrativeRequests; + private final RetrySettings retryAdministrativeRequestsSettings; + private static final double ADMINISTRATIVE_REQUESTS_RATE_LIMIT = 1.0D; + private static final ConcurrentMap ADMINISTRATIVE_REQUESTS_RATE_LIMITERS = + new ConcurrentHashMap<>(); + private final boolean leaderAwareRoutingEnabled; + private final boolean endToEndTracingEnabled; + private final int numChannels; + private final boolean isGrpcGcpExtensionEnabled; + private final boolean isDynamicChannelPoolEnabled; + @Nullable private final KeyAwareChannel keyAwareChannel; + + private final GrpcCallContext baseGrpcCallContext; + + public static GapicSpannerRpc create(SpannerOptions options) { + return new GapicSpannerRpc(options); + } + + public GapicSpannerRpc(final SpannerOptions options) { + this(options, true); + } + + GapicSpannerRpc(final SpannerOptions options, boolean initializeStubs) { + this.projectId = options.getProjectId(); + String projectNameStr = PROJECT_NAME_TEMPLATE.instantiate("project", this.projectId); + try { + // Fix use cases where projectName contains special characters. + // This would happen when projects are under an organization. + projectNameStr = URLDecoder.decode(projectNameStr, StandardCharsets.UTF_8.toString()); + } catch (UnsupportedEncodingException e) { // Ignored. + } + this.projectName = projectNameStr; + this.throttleAdministrativeRequests = options.isAutoThrottleAdministrativeRequests(); + if (throttleAdministrativeRequests) { + ADMINISTRATIVE_REQUESTS_RATE_LIMITERS.putIfAbsent( + projectNameStr, RateLimiter.create(ADMINISTRATIVE_REQUESTS_RATE_LIMIT)); + } + this.retryAdministrativeRequestsSettings = options.getRetryAdministrativeRequestsSettings(); + + // create a metadataProvider which combines both internal headers and + // per-method-call extra headers for channelProvider to inject the headers + // for rpc calls + ApiClientHeaderProvider.Builder internalHeaderProviderBuilder = + ApiClientHeaderProvider.newBuilder(); + ApiClientHeaderProvider internalHeaderProvider = + internalHeaderProviderBuilder + .setClientLibToken( + options.getClientLibToken(), GaxProperties.getLibraryVersion(options.getClass())) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()) + .build(); + + final HeaderProvider mergedHeaderProvider = + options.getMergedHeaderProvider(internalHeaderProvider); + final HeaderProvider headerProviderWithUserAgent = + headerProviderWithUserAgentFrom(mergedHeaderProvider); + + this.metadataProvider = + SpannerMetadataProvider.create( + headerProviderWithUserAgent.getHeaders(), + internalHeaderProviderBuilder.getResourceHeaderKey()); + this.callCredentialsProvider = options.getCallCredentialsProvider(); + this.compressorName = options.getCompressorName(); + this.leaderAwareRoutingEnabled = options.isLeaderAwareRoutingEnabled(); + this.endToEndTracingEnabled = options.isEndToEndTracingEnabled(); + this.numChannels = options.getNumChannels(); + this.isGrpcGcpExtensionEnabled = options.isGrpcGcpExtensionEnabled(); + this.isDynamicChannelPoolEnabled = options.isDynamicChannelPoolEnabled(); + this.baseGrpcCallContext = createBaseCallContext(); + + boolean isEnableDirectAccess = options.isEnableDirectAccess(); + + if (initializeStubs) { + CredentialsProvider credentialsProvider = + GrpcTransportOptions.setUpCredentialsProvider(options); + + InstantiatingGrpcChannelProvider.Builder defaultChannelProviderBuilder = + createChannelProviderBuilder(options, headerProviderWithUserAgent, isEnableDirectAccess); + + if (options.getChannelProvider() == null + && isEnableDirectAccess + && options.isEnableGcpFallback()) { + setupGcpFallback( + defaultChannelProviderBuilder, + options, + headerProviderWithUserAgent, + credentialsProvider); + } + + boolean enableLocationApi = options.isEnableLocationApi(); + // First check if SpannerOptions provides a TransportChannelProvider. Create one + // with information gathered from SpannerOptions if none is provided + TransportChannelProvider baseChannelProvider = + MoreObjects.firstNonNull( + options.getChannelProvider(), defaultChannelProviderBuilder.build()); + TransportChannelProvider channelProvider = + enableLocationApi && baseChannelProvider instanceof InstantiatingGrpcChannelProvider + ? new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseChannelProvider, + options.getChannelEndpointCacheFactory()) + : baseChannelProvider; + + spannerWatchdog = + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("Cloud-Spanner-WatchdogProvider-%d") + .build()); + WatchdogProvider watchdogProvider = + InstantiatingWatchdogProvider.create() + .withExecutor(spannerWatchdog) + .withCheckIntervalDuration(checkInterval) + .withClock(NanoClock.getDefaultClock()); + + final String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + + try { + // TODO: make our retry settings to inject and increment + // XGoogSpannerRequestId whenever a retry occurs. + SpannerStubSettings spannerStubSettings = + options.getSpannerStubSettings().toBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient= */ false, isEmulatorEnabled(options, emulatorHost))) + .build(); + ClientContext clientContext = ClientContext.create(spannerStubSettings); + this.keyAwareChannel = extractKeyAwareChannel(clientContext.getTransportChannel()); + this.spannerStub = + GrpcSpannerStubWithStubSettingsAndClientContext.create( + spannerStubSettings, clientContext); + DIRECTPATH_CHANNEL_CREATED = + ((GrpcTransportChannel) clientContext.getTransportChannel()).isDirectPath() + && isEnableDirectAccess; + this.readRetrySettings = + options.getSpannerStubSettings().streamingReadSettings().getRetrySettings(); + this.readRetryableCodes = + options.getSpannerStubSettings().streamingReadSettings().getRetryableCodes(); + this.executeQueryRetrySettings = + options.getSpannerStubSettings().executeStreamingSqlSettings().getRetrySettings(); + this.executeQueryRetryableCodes = + options.getSpannerStubSettings().executeStreamingSqlSettings().getRetryableCodes(); + this.commitRetrySettings = + options.getSpannerStubSettings().commitSettings().getRetrySettings(); + partitionedDmlRetrySettings = + options.getSpannerStubSettings().executeSqlSettings().getRetrySettings().toBuilder() + .setInitialRpcTimeout(options.getPartitionedDmlTimeout()) + .setMaxRpcTimeout(options.getPartitionedDmlTimeout()) + .setTotalTimeout(options.getPartitionedDmlTimeout()) + .setRpcTimeoutMultiplier(1.0) + .build(); + SpannerStubSettings.Builder pdmlSettings = options.getSpannerStubSettings().toBuilder(); + pdmlSettings + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient= */ false, isEmulatorEnabled(options, emulatorHost))) + .executeSqlSettings() + .setRetrySettings(partitionedDmlRetrySettings); + pdmlSettings.executeStreamingSqlSettings().setRetrySettings(partitionedDmlRetrySettings); + // The stream watchdog will by default only check for a timeout every 10 seconds, so if the + // timeout is less than 10 seconds, it would be ignored for the first 10 seconds unless we + // also change the StreamWatchdogCheckInterval. + if (options + .getPartitionedDmlTimeout() + .dividedBy(10L) + .compareTo(pdmlSettings.getStreamWatchdogCheckInterval()) + < 0) { + pdmlSettings.setStreamWatchdogCheckInterval( + options.getPartitionedDmlTimeout().dividedBy(10)); + pdmlSettings.setStreamWatchdogProvider( + pdmlSettings + .getStreamWatchdogProvider() + .withCheckInterval(pdmlSettings.getStreamWatchdogCheckInterval())); + } + this.partitionedDmlStub = + GrpcSpannerStubWithStubSettingsAndClientContext.create( + pdmlSettings.build(), clientContext); + this.instanceAdminStubSettings = + options.getInstanceAdminStubSettings().toBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient= */ true, isEmulatorEnabled(options, emulatorHost))) + .build(); + this.instanceAdminStub = GrpcInstanceAdminStub.create(instanceAdminStubSettings); + + this.databaseAdminStubSettings = + options.getDatabaseAdminStubSettings().toBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(credentialsProvider) + .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient= */ true, isEmulatorEnabled(options, emulatorHost))) + .build(); + + // Automatically retry RESOURCE_EXHAUSTED for GetOperation if auto-throttling of + // administrative requests has been set. The GetOperation RPC is called repeatedly by gax + // while polling long-running operations for their progress and can also cause these errors. + // The default behavior is not to retry these errors, and this option should normally only + // be enabled for (integration) testing. + if (options.isAutoThrottleAdministrativeRequests()) { + GrpcStubCallableFactory factory = + new GrpcDatabaseAdminCallableFactory() { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + // Make GetOperation retry on RESOURCE_EXHAUSTED to prevent polling operations + // from failing with an Administrative requests limit exceeded error. + if (grpcCallSettings + .getMethodDescriptor() + .getFullMethodName() + .equals("google.longrunning.Operations/GetOperation")) { + Set codes = + ImmutableSet.builderWithExpectedSize( + callSettings.getRetryableCodes().size() + 1) + .addAll(callSettings.getRetryableCodes()) + .add(StatusCode.Code.RESOURCE_EXHAUSTED) + .build(); + callSettings = callSettings.toBuilder().setRetryableCodes(codes).build(); + } + return super.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + }; + this.databaseAdminStub = + new GrpcDatabaseAdminStubWithCustomCallableFactory( + databaseAdminStubSettings, + ClientContext.create(databaseAdminStubSettings), + factory); + } else { + this.databaseAdminStub = GrpcDatabaseAdminStub.create(databaseAdminStubSettings); + } + + // Check whether the SPANNER_EMULATOR_HOST env var has been set, and if so, if the emulator + // is actually running. + checkEmulatorConnection(options, channelProvider, credentialsProvider, emulatorHost); + } catch (Exception e) { + throw asSpannerException(e); + } + } else { + this.keyAwareChannel = null; + this.databaseAdminStub = null; + this.instanceAdminStub = null; + this.spannerStub = null; + this.readRetrySettings = null; + this.readRetryableCodes = null; + this.executeQueryRetrySettings = null; + this.executeQueryRetryableCodes = null; + this.commitRetrySettings = + SpannerStubSettings.newBuilder().commitSettings().getRetrySettings(); + this.partitionedDmlStub = null; + this.databaseAdminStubSettings = null; + this.instanceAdminStubSettings = null; + this.spannerWatchdog = null; + this.partitionedDmlRetrySettings = null; + } + } + + @VisibleForTesting + GcpFallbackChannelOptions createFallbackChannelOptions( + GcpFallbackOpenTelemetry fallbackTelemetry, int minFailedCalls) { + return GcpFallbackChannelOptions.newBuilder() + .setPrimaryChannelName("directpath") + .setFallbackChannelName("cloudpath") + .setMinFailedCalls(minFailedCalls) + .setGcpFallbackOpenTelemetry(fallbackTelemetry) + .build(); + } + + private static KeyAwareChannel extractKeyAwareChannel(TransportChannel transportChannel) { + if (transportChannel instanceof GrpcTransportChannel) { + Channel channel = ((GrpcTransportChannel) transportChannel).getChannel(); + if (channel instanceof KeyAwareChannel) { + return (KeyAwareChannel) channel; + } + } + return null; + } + + @Override + public void clearTransactionAffinity(ByteString transactionId) { + if (keyAwareChannel != null) { + keyAwareChannel.clearTransactionAffinity(transactionId); + } + } + + private static String parseGrpcGcpApiConfig() { + try { + return Resources.toString( + GapicSpannerRpc.class.getResource(API_FILE), Charset.forName("UTF8")); + } catch (IOException e) { + throw newSpannerException(e); + } + } + + private void setupGcpFallback( + InstantiatingGrpcChannelProvider.Builder defaultChannelProviderBuilder, + final SpannerOptions options, + final HeaderProvider headerProviderWithUserAgent, + final CredentialsProvider credentialsProvider) { + InstantiatingGrpcChannelProvider.Builder cloudPathProviderBuilder = + createChannelProviderBuilder( + options, headerProviderWithUserAgent, /* isEnableDirectAccess= */ false); + + final ApiFunction existingCloudPathConfigurator = + cloudPathProviderBuilder.getChannelConfigurator(); + final AtomicReference cloudPathBuilderRef = new AtomicReference<>(); + cloudPathProviderBuilder.setChannelConfigurator( + builder -> { + ManagedChannelBuilder effectiveBuilder = builder; + if (existingCloudPathConfigurator != null) { + effectiveBuilder = existingCloudPathConfigurator.apply(effectiveBuilder); + } + cloudPathBuilderRef.set(effectiveBuilder); + return effectiveBuilder; + }); + + // Build the cloudPathProvider to extract the builder which will be provided to + // FallbackChannelBuilder. + try (TransportChannel ignored = cloudPathProviderBuilder.build().getTransportChannel()) { + } catch (Exception e) { + throw asSpannerException(e); + } + + ManagedChannelBuilder cloudPathBuilder = cloudPathBuilderRef.get(); + if (cloudPathBuilder == null) { + throw new IllegalStateException("CloudPath builder was not captured."); + } + + try { + Credentials credentials = credentialsProvider.getCredentials(); + if (credentials != null) { + cloudPathBuilder.intercept( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return next.newCall( + method, callOptions.withCallCredentials(MoreCallCredentials.from(credentials))); + } + }); + } + } catch (Exception e) { + throw asSpannerException(e); + } + + final ApiFunction existingConfigurator = + defaultChannelProviderBuilder.getChannelConfigurator(); + defaultChannelProviderBuilder.setChannelConfigurator( + directPathBuilder -> { + ManagedChannelBuilder builder = directPathBuilder; + if (existingConfigurator != null) { + builder = existingConfigurator.apply(builder); + } + + String jsonApiConfig = parseGrpcGcpApiConfig(); + GcpManagedChannelOptions gcpOptions = grpcGcpOptionsWithMetricsAndDcp(options); + if (gcpOptions == null) { + gcpOptions = GcpManagedChannelOptions.newBuilder().build(); + } + + GcpManagedChannelBuilder primaryGcpBuilder = + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withApiConfigJsonString(jsonApiConfig) + .withOptions(gcpOptions); + + GcpManagedChannelBuilder fallbackGcpBuilder = + GcpManagedChannelBuilder.forDelegateBuilder(cloudPathBuilder) + .withApiConfigJsonString(jsonApiConfig) + .withOptions(gcpOptions); + + GcpFallbackOpenTelemetry fallbackTelemetry = + GcpFallbackOpenTelemetry.newBuilder() + .withSdk(options.getOpenTelemetry()) + .disableAllMetrics() + .enableMetrics(Arrays.asList("fallback_count", "call_status")) + .build(); + + return new FallbackChannelBuilder( + primaryGcpBuilder, + fallbackGcpBuilder, + createFallbackChannelOptions(fallbackTelemetry, 1)); + }); + } + + private InstantiatingGrpcChannelProvider.Builder createChannelProviderBuilder( + final SpannerOptions options, + final HeaderProvider headerProviderWithUserAgent, + boolean isEnableDirectAccess) { + InstantiatingGrpcChannelProvider.Builder defaultChannelProviderBuilder = + InstantiatingGrpcChannelProvider.newBuilder() + .setChannelConfigurator(options.getChannelConfigurator()) + .setEndpoint(options.getEndpoint()) + .setMaxInboundMessageSize(MAX_MESSAGE_SIZE) + .setMaxInboundMetadataSize(MAX_METADATA_SIZE) + .setPoolSize(options.getNumChannels()) + + // Set a keepalive time of 120 seconds to help long running + // commit GRPC calls succeed + .setKeepAliveTimeDuration(Duration.ofSeconds(GRPC_KEEPALIVE_SECONDS)) + + // Then check if SpannerOptions provides an InterceptorProvider. Create a default + // SpannerInterceptorProvider if none is provided + .setInterceptorProvider( + SpannerInterceptorProvider.create( + MoreObjects.firstNonNull( + options.getInterceptorProvider(), + SpannerInterceptorProvider.createDefault(options.getOpenTelemetry()))) + // This sets the trace context headers. + .withTraceContext(endToEndTracingEnabled, options.getOpenTelemetry()) + // This sets the response compressor (Server -> Client). + .withEncoding(compressorName)) + .setHeaderProvider(headerProviderWithUserAgent) + .setAllowNonDefaultServiceAccount(true); + if (isEnableDirectAccess) { + defaultChannelProviderBuilder.setAttemptDirectPath(true); + if (isEnableDirectPathBoundToken()) { + // This will let the credentials try to fetch a hard-bound access token if the runtime + // environment supports it. + defaultChannelProviderBuilder.setAllowHardBoundTokenTypes( + Collections.singletonList(InstantiatingGrpcChannelProvider.HardBoundTokenTypes.ALTS)); + } + defaultChannelProviderBuilder.setAttemptDirectPathXds(); + } + + options.enablegRPCMetrics(defaultChannelProviderBuilder); + + if (options.isUseVirtualThreads()) { + ExecutorService executor = + tryCreateVirtualThreadPerTaskExecutor("spanner-virtual-grpc-executor"); + if (executor != null) { + defaultChannelProviderBuilder.setExecutor(executor); + } + } + // If it is enabled in options uses the channel pool provided by the gRPC-GCP extension. + maybeEnableGrpcGcpExtension(defaultChannelProviderBuilder, options); + return defaultChannelProviderBuilder; + } + + // Enhance gRPC-GCP options with metrics and dynamic channel pool configuration. + private static GcpManagedChannelOptions grpcGcpOptionsWithMetricsAndDcp(SpannerOptions options) { + GcpManagedChannelOptions grpcGcpOptions = + MoreObjects.firstNonNull(options.getGrpcGcpOptions(), new GcpManagedChannelOptions()); + GcpManagedChannelOptions.Builder optionsBuilder = + GcpManagedChannelOptions.newBuilder(grpcGcpOptions); + + // Configure metrics options with OpenTelemetry meter + GcpMetricsOptions metricsOptions = + MoreObjects.firstNonNull( + grpcGcpOptions.getMetricsOptions(), GcpMetricsOptions.newBuilder().build()); + GcpMetricsOptions.Builder metricsOptionsBuilder = GcpMetricsOptions.newBuilder(metricsOptions); + // TODO: Add default labels with values: client_id, database, instance_id. + if (metricsOptions.getNamePrefix().equals("")) { + metricsOptionsBuilder.withNamePrefix("cloud.google.com/java/spanner/gcp-channel-pool/"); + } + // Pass OpenTelemetry meter to grpc-gcp for channel pool metrics + if (metricsOptions.getOpenTelemetryMeter() == null && options.isGrpcGcpOtelMetricsEnabled()) { + metricsOptionsBuilder.withOpenTelemetryMeter( + options.getOpenTelemetry().getMeter("com.google.cloud.spanner")); + } + optionsBuilder.withMetricsOptions(metricsOptionsBuilder.build()); + + // Configure dynamic channel pool options if enabled. + // Uses the GcpChannelPoolOptions from SpannerOptions, which contains Spanner-specific defaults + // or user-provided configuration. + if (options.isDynamicChannelPoolEnabled()) { + optionsBuilder.withChannelPoolOptions(options.getGcpChannelPoolOptions()); + } + + return optionsBuilder.build(); + } + + @SuppressWarnings("rawtypes") + private static void maybeEnableGrpcGcpExtension( + InstantiatingGrpcChannelProvider.Builder defaultChannelProviderBuilder, + final SpannerOptions options) { + if (!options.isGrpcGcpExtensionEnabled()) { + return; + } + + final String jsonApiConfig = parseGrpcGcpApiConfig(); + final GcpManagedChannelOptions grpcGcpOptions = grpcGcpOptionsWithMetricsAndDcp(options); + + // When dynamic channel pool is enabled, use the DCP initial size as the pool size. + // When disabled, use the explicitly configured numChannels. + final int poolSize = options.isDynamicChannelPoolEnabled() ? 0 : options.getNumChannels(); + + ApiFunction baseConfigurator = + defaultChannelProviderBuilder.getChannelConfigurator(); + ApiFunction apiFunction = + channelBuilder -> { + if (baseConfigurator != null) { + channelBuilder = baseConfigurator.apply(channelBuilder); + } + return GcpManagedChannelBuilder.forDelegateBuilder(channelBuilder) + .withApiConfigJsonString(jsonApiConfig) + .withOptions(grpcGcpOptions) + .setPoolSize(poolSize); + }; + + // Disable the GAX channel pooling functionality by setting the GAX channel pool size to 1. + // Enable gRPC-GCP channel pool via the channel configurator. + defaultChannelProviderBuilder.setPoolSize(1).setChannelConfigurator(apiFunction); + } + + private static HeaderProvider headerProviderWithUserAgentFrom(HeaderProvider headerProvider) { + final Optional> existingUserAgentEntry = + headerProvider.getHeaders().entrySet().stream() + .filter(entry -> entry.getKey().equalsIgnoreCase(USER_AGENT_KEY)) + .findFirst(); + final String existingUserAgentValue = existingUserAgentEntry.map(Entry::getValue).orElse(null); + final String userAgent = + Stream.of(existingUserAgentValue, DEFAULT_USER_AGENT) + .filter(Objects::nonNull) + .collect(Collectors.joining(" ")); + + final Map headersWithUserAgent = new HashMap<>(headerProvider.getHeaders()); + existingUserAgentEntry.ifPresent(entry -> headersWithUserAgent.remove(entry.getKey())); + headersWithUserAgent.put(USER_AGENT_KEY, userAgent); + + return FixedHeaderProvider.create(headersWithUserAgent); + } + + private static void checkEmulatorConnection( + SpannerOptions options, + TransportChannelProvider channelProvider, + CredentialsProvider credentialsProvider, + String emulatorHost) + throws IOException { + // Only do the check if the emulator environment variable has been set to localhost. + if (isEmulatorEnabled(options, emulatorHost)) { + // Do a quick check to see if the emulator is actually running. + try { + InstanceAdminStubSettings.Builder testEmulatorSettings = + options.getInstanceAdminStubSettings().toBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(credentialsProvider); + testEmulatorSettings + .listInstanceConfigsSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofSeconds(10L)); + try (GrpcInstanceAdminStub stub = + GrpcInstanceAdminStub.create(testEmulatorSettings.build())) { + stub.listInstanceConfigsCallable() + .call( + ListInstanceConfigsRequest.newBuilder() + .setParent(String.format("projects/%s", options.getProjectId())) + .build()); + } + } catch (UnavailableException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.UNAVAILABLE, + String.format( + "The environment variable SPANNER_EMULATOR_HOST has been set to %s, but no running" + + " emulator could be found at that address.\n" + + "Did you forget to start the emulator, or to unset the environment" + + " variable?", + emulatorHost)); + } + } + } + + private static boolean isEmulatorEnabled(SpannerOptions options, String emulatorHost) { + // Only do the check if the emulator environment variable has been set to localhost. + return options.getChannelProvider() == null + && emulatorHost != null + && options.getHost() != null + && options.getHost().startsWith("http://localhost") + && options.getHost().endsWith(emulatorHost); + } + + public static boolean isEnableAFEServerTiming() { + // Enable AFE metrics as default unless explicitly + // disabled via env. + return !Boolean.parseBoolean(System.getenv("SPANNER_DISABLE_AFE_SERVER_TIMING")); + } + + public static boolean isEnableDirectPathXdsEnv() { + return Boolean.parseBoolean(System.getenv("GOOGLE_SPANNER_ENABLE_DIRECT_ACCESS")); + } + + public static boolean isEnableDirectPathBoundToken() { + return !Boolean.parseBoolean(System.getenv("GOOGLE_SPANNER_DISABLE_DIRECT_ACCESS_BOUND_TOKEN")); + } + + private static final RetrySettings ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(5L)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelayDuration(Duration.ofSeconds(60L)) + .setMaxAttempts(10) + .build(); + + @VisibleForTesting + static final class AdminRequestsLimitExceededRetryAlgorithm + implements ResultRetryAlgorithm { + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, T prevResponse, TimedAttemptSettings prevSettings) { + // Use default retry settings. + return null; + } + + @Override + public boolean shouldRetry(Throwable prevThrowable, T prevResponse) + throws CancellationException { + return prevThrowable instanceof AdminRequestsPerMinuteExceededException; + } + } + + private T runWithRetryOnAdministrativeRequestsExceeded(Callable callable) { + try { + return RetryHelper.runWithRetries( + callable, + retryAdministrativeRequestsSettings, + new AdminRequestsLimitExceededRetryAlgorithm<>(), + NanoClock.getDefaultClock()); + } catch (RetryHelperException e) { + throw asSpannerException(e.getCause()); + } + } + + private static final class OperationFutureRetryAlgorithm + implements ResultRetryAlgorithm> { + + private static final ImmutableList RETRYABLE_CODES = + ImmutableList.of(StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE); + + @Override + public TimedAttemptSettings createNextAttempt( + Throwable prevThrowable, + OperationFuture prevResponse, + TimedAttemptSettings prevSettings) { + // Use default retry settings. + return null; + } + + @Override + public boolean shouldRetry( + Throwable prevThrowable, OperationFuture prevResponse) + throws CancellationException { + if (prevThrowable instanceof ApiException) { + ApiException e = (ApiException) prevThrowable; + return RETRYABLE_CODES.contains(e.getStatusCode().getCode()); + } + if (prevResponse != null) { + try { + prevResponse.getInitialFuture().get(); + } catch (ExecutionException ee) { + Throwable cause = ee.getCause(); + if (cause instanceof ApiException) { + ApiException e = (ApiException) cause; + return RETRYABLE_CODES.contains(e.getStatusCode().getCode()); + } + } catch (InterruptedException e) { + return false; + } + } + return false; + } + } + + private final class OperationFutureCallable + implements Callable> { + + final OperationCallable operationCallable; + final RequestT initialRequest; + final MethodDescriptor method; + final String instanceName; + final OperationsLister lister; + final Function getStartTimeFunction; + Timestamp initialCallTime; + boolean isRetry = false; + + OperationFutureCallable( + OperationCallable operationCallable, + RequestT initialRequest, + MethodDescriptor method, + String instanceName, + OperationsLister lister, + Function getStartTimeFunction) { + this.operationCallable = operationCallable; + this.initialRequest = initialRequest; + this.method = method; + this.instanceName = instanceName; + this.lister = lister; + this.getStartTimeFunction = getStartTimeFunction; + } + + @Override + public OperationFuture call() { + acquireAdministrativeRequestsRateLimiter(); + + return runWithRetryOnAdministrativeRequestsExceeded( + () -> { + String operationName = null; + if (isRetry) { + // Query the backend to see if the operation was actually created, and that the + // problem was caused by a network problem or other transient problem client side. + Operation operation = + mostRecentOperation(lister, getStartTimeFunction, initialCallTime); + if (operation != null) { + // Operation found, resume tracking that operation. + operationName = operation.getName(); + } + } else { + initialCallTime = + Timestamp.newBuilder() + .setSeconds( + TimeUnit.SECONDS.convert( + System.currentTimeMillis(), TimeUnit.MILLISECONDS)) + .build(); + } + isRetry = true; + + if (operationName == null) { + GrpcCallContext context = newAdminCallContext(instanceName, initialRequest, method); + return operationCallable.futureCall(initialRequest, context); + } else { + return operationCallable.resumeFutureCall(operationName); + } + }); + } + } + + private interface OperationsLister { + + Paginated listOperations(String nextPageToken); + } + + private Operation mostRecentOperation( + OperationsLister lister, + Function getStartTimeFunction, + Timestamp initialCallTime) { + Operation res = null; + Timestamp currMaxStartTime = null; + String nextPageToken = null; + Paginated operations; + do { + operations = lister.listOperations(nextPageToken); + nextPageToken = operations.getNextPageToken(); + for (Operation op : operations.getResults()) { + Timestamp startTime = getStartTimeFunction.apply(op); + if (res == null + || (TimestampComparator.INSTANCE.compare(startTime, currMaxStartTime) > 0 + && TimestampComparator.INSTANCE.compare(startTime, initialCallTime) >= 0)) { + currMaxStartTime = startTime; + res = op; + } + // If the operation does not report any start time, then the operation that is not yet done + // is the one that is the most recent. + if (startTime == null && currMaxStartTime == null && !op.getDone()) { + res = op; + break; + } + } + } while (nextPageToken != null); + return res; + } + + private static final class TimestampComparator implements Comparator { + + private static final TimestampComparator INSTANCE = new TimestampComparator(); + + @Override + public int compare(Timestamp t1, Timestamp t2) { + if (t1 == null && t2 == null) { + return 0; + } + if (t1 != null && t2 == null) { + return 1; + } + if (t1 == null && t2 != null) { + return -1; + } + if (t1.getSeconds() > t2.getSeconds() + || (t1.getSeconds() == t2.getSeconds() && t1.getNanos() > t2.getNanos())) { + return 1; + } + if (t1.getSeconds() < t2.getSeconds() + || (t1.getSeconds() == t2.getSeconds() && t1.getNanos() < t2.getNanos())) { + return -1; + } + return 0; + } + } + + private void acquireAdministrativeRequestsRateLimiter() { + if (throttleAdministrativeRequests) { + RateLimiter limiter = ADMINISTRATIVE_REQUESTS_RATE_LIMITERS.get(this.projectName); + if (limiter != null) { + limiter.acquire(); + } + } + } + + @Override + public Paginated listInstanceConfigs(int pageSize, @Nullable String pageToken) + throws SpannerException { + ListInstanceConfigsRequest.Builder requestBuilder = + ListInstanceConfigsRequest.newBuilder().setParent(projectName).setPageSize(pageSize); + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + ListInstanceConfigsRequest request = requestBuilder.build(); + + GrpcCallContext context = + newAdminCallContext(projectName, request, InstanceAdminGrpc.getListInstanceConfigsMethod()); + ListInstanceConfigsResponse response = + get(instanceAdminStub.listInstanceConfigsCallable().futureCall(request, context)); + return new Paginated<>(response.getInstanceConfigsList(), response.getNextPageToken()); + } + + @Override + public OperationFuture createInstanceConfig( + String parent, + String instanceConfigId, + InstanceConfig instanceConfig, + @Nullable Boolean validateOnly) + throws SpannerException { + CreateInstanceConfigRequest.Builder builder = + CreateInstanceConfigRequest.newBuilder() + .setParent(parent) + .setInstanceConfigId(instanceConfigId) + .setInstanceConfig(instanceConfig); + if (validateOnly != null) { + builder.setValidateOnly(validateOnly); + } + CreateInstanceConfigRequest request = builder.build(); + GrpcCallContext context = + newAdminCallContext(parent, request, InstanceAdminGrpc.getCreateInstanceConfigMethod()); + return instanceAdminStub.createInstanceConfigOperationCallable().futureCall(request, context); + } + + @Override + public OperationFuture updateInstanceConfig( + InstanceConfig instanceConfig, @Nullable Boolean validateOnly, FieldMask fieldMask) + throws SpannerException { + UpdateInstanceConfigRequest.Builder builder = + UpdateInstanceConfigRequest.newBuilder() + .setInstanceConfig(instanceConfig) + .setUpdateMask(fieldMask); + if (validateOnly != null) { + builder.setValidateOnly(validateOnly); + } + UpdateInstanceConfigRequest request = builder.build(); + GrpcCallContext context = + newAdminCallContext( + instanceConfig.getName(), request, InstanceAdminGrpc.getUpdateInstanceConfigMethod()); + return instanceAdminStub.updateInstanceConfigOperationCallable().futureCall(request, context); + } + + @Override + public InstanceConfig getInstanceConfig(String instanceConfigName) throws SpannerException { + GetInstanceConfigRequest request = + GetInstanceConfigRequest.newBuilder().setName(instanceConfigName).build(); + + GrpcCallContext context = + newAdminCallContext(projectName, request, InstanceAdminGrpc.getGetInstanceConfigMethod()); + return get(instanceAdminStub.getInstanceConfigCallable().futureCall(request, context)); + } + + @Override + public void deleteInstanceConfig( + String instanceConfigName, @Nullable String etag, @Nullable Boolean validateOnly) + throws SpannerException { + DeleteInstanceConfigRequest.Builder requestBuilder = + DeleteInstanceConfigRequest.newBuilder().setName(instanceConfigName); + + if (etag != null) { + requestBuilder.setEtag(etag); + } + if (validateOnly != null) { + requestBuilder.setValidateOnly(validateOnly); + } + DeleteInstanceConfigRequest request = requestBuilder.build(); + GrpcCallContext context = + newAdminCallContext( + instanceConfigName, request, InstanceAdminGrpc.getDeleteInstanceConfigMethod()); + get(instanceAdminStub.deleteInstanceConfigCallable().futureCall(request, context)); + } + + @Override + public Paginated listInstanceConfigOperations( + int pageSize, @Nullable String filter, @Nullable String pageToken) { + acquireAdministrativeRequestsRateLimiter(); + ListInstanceConfigOperationsRequest.Builder requestBuilder = + ListInstanceConfigOperationsRequest.newBuilder() + .setParent(projectName) + .setPageSize(pageSize); + if (filter != null) { + requestBuilder.setFilter(filter); + } + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListInstanceConfigOperationsRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext( + projectName, request, InstanceAdminGrpc.getListInstanceConfigOperationsMethod()); + ListInstanceConfigOperationsResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> + get( + instanceAdminStub + .listInstanceConfigOperationsCallable() + .futureCall(request, context))); + return new Paginated<>(response.getOperationsList(), response.getNextPageToken()); + } + + @Override + public Paginated listInstances( + int pageSize, @Nullable String pageToken, @Nullable String filter) throws SpannerException { + ListInstancesRequest.Builder requestBuilder = + ListInstancesRequest.newBuilder().setParent(projectName).setPageSize(pageSize); + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + if (filter != null) { + requestBuilder.setFilter(filter); + } + ListInstancesRequest request = requestBuilder.build(); + + GrpcCallContext context = + newAdminCallContext(projectName, request, InstanceAdminGrpc.getListInstancesMethod()); + ListInstancesResponse response = + get(instanceAdminStub.listInstancesCallable().futureCall(request, context)); + return new Paginated<>(response.getInstancesList(), response.getNextPageToken()); + } + + @Override + public OperationFuture createInstance( + String parent, String instanceId, Instance instance) throws SpannerException { + CreateInstanceRequest request = + CreateInstanceRequest.newBuilder() + .setParent(parent) + .setInstanceId(instanceId) + .setInstance(instance) + .build(); + GrpcCallContext context = + newAdminCallContext(parent, request, InstanceAdminGrpc.getCreateInstanceMethod()); + return instanceAdminStub.createInstanceOperationCallable().futureCall(request, context); + } + + @Override + public OperationFuture updateInstance( + Instance instance, FieldMask fieldMask) throws SpannerException { + UpdateInstanceRequest request = + UpdateInstanceRequest.newBuilder().setInstance(instance).setFieldMask(fieldMask).build(); + GrpcCallContext context = + newAdminCallContext( + instance.getName(), request, InstanceAdminGrpc.getUpdateInstanceMethod()); + return instanceAdminStub.updateInstanceOperationCallable().futureCall(request, context); + } + + @Override + public Instance getInstance(String instanceName) throws SpannerException { + GetInstanceRequest request = GetInstanceRequest.newBuilder().setName(instanceName).build(); + + GrpcCallContext context = + newAdminCallContext(instanceName, request, InstanceAdminGrpc.getGetInstanceMethod()); + return get(instanceAdminStub.getInstanceCallable().futureCall(request, context)); + } + + @Override + public void deleteInstance(String instanceName) throws SpannerException { + DeleteInstanceRequest request = + DeleteInstanceRequest.newBuilder().setName(instanceName).build(); + + GrpcCallContext context = + newAdminCallContext(instanceName, request, InstanceAdminGrpc.getDeleteInstanceMethod()); + get(instanceAdminStub.deleteInstanceCallable().futureCall(request, context)); + } + + @Override + public Paginated listBackupOperations( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken) { + acquireAdministrativeRequestsRateLimiter(); + ListBackupOperationsRequest.Builder requestBuilder = + ListBackupOperationsRequest.newBuilder().setParent(instanceName).setPageSize(pageSize); + if (filter != null) { + requestBuilder.setFilter(filter); + } + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListBackupOperationsRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext( + instanceName, request, DatabaseAdminGrpc.getListBackupOperationsMethod()); + ListBackupOperationsResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> + get(databaseAdminStub.listBackupOperationsCallable().futureCall(request, context))); + return new Paginated<>(response.getOperationsList(), response.getNextPageToken()); + } + + @Override + public Paginated listDatabaseOperations( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken) { + acquireAdministrativeRequestsRateLimiter(); + ListDatabaseOperationsRequest.Builder requestBuilder = + ListDatabaseOperationsRequest.newBuilder().setParent(instanceName).setPageSize(pageSize); + + if (filter != null) { + requestBuilder.setFilter(filter); + } + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListDatabaseOperationsRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext( + instanceName, request, DatabaseAdminGrpc.getListDatabaseOperationsMethod()); + ListDatabaseOperationsResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> + get( + databaseAdminStub + .listDatabaseOperationsCallable() + .futureCall(request, context))); + + return new Paginated<>(response.getOperationsList(), response.getNextPageToken()); + } + + @Override + public Paginated listDatabaseRoles( + String databaseName, int pageSize, @Nullable String pageToken) { + acquireAdministrativeRequestsRateLimiter(); + ListDatabaseRolesRequest.Builder requestBuilder = + ListDatabaseRolesRequest.newBuilder().setParent(databaseName).setPageSize(pageSize); + + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListDatabaseRolesRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext(databaseName, request, DatabaseAdminGrpc.getListDatabaseRolesMethod()); + ListDatabaseRolesResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.listDatabaseRolesCallable().futureCall(request, context))); + + return new Paginated<>(response.getDatabaseRolesList(), response.getNextPageToken()); + } + + @Override + public Paginated listBackups( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken) + throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + ListBackupsRequest.Builder requestBuilder = + ListBackupsRequest.newBuilder().setParent(instanceName).setPageSize(pageSize); + if (filter != null) { + requestBuilder.setFilter(filter); + } + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListBackupsRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext(instanceName, request, DatabaseAdminGrpc.getListBackupsMethod()); + ListBackupsResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.listBackupsCallable().futureCall(request, context))); + + return new Paginated<>(response.getBackupsList(), response.getNextPageToken()); + } + + @Override + public Paginated listDatabases( + String instanceName, int pageSize, @Nullable String pageToken) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + ListDatabasesRequest.Builder requestBuilder = + ListDatabasesRequest.newBuilder().setParent(instanceName).setPageSize(pageSize); + if (pageToken != null) { + requestBuilder.setPageToken(pageToken); + } + final ListDatabasesRequest request = requestBuilder.build(); + + final GrpcCallContext context = + newAdminCallContext(instanceName, request, DatabaseAdminGrpc.getListDatabasesMethod()); + ListDatabasesResponse response = + runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.listDatabasesCallable().futureCall(request, context))); + + return new Paginated<>(response.getDatabasesList(), response.getNextPageToken()); + } + + @Override + public OperationFuture createDatabase( + final String instanceName, + String createDatabaseStatement, + Iterable additionalStatements, + com.google.cloud.spanner.Database databaseInfo) + throws SpannerException { + final String databaseId = databaseInfo.getId().getDatabase(); + CreateDatabaseRequest.Builder requestBuilder = + CreateDatabaseRequest.newBuilder() + .setParent(instanceName) + .setCreateStatement(createDatabaseStatement) + .addAllExtraStatements(additionalStatements); + if (databaseInfo.getEncryptionConfig() != null) { + requestBuilder.setEncryptionConfig( + EncryptionConfigProtoMapper.encryptionConfig(databaseInfo.getEncryptionConfig())); + } + if (databaseInfo.getDialect() != null) { + requestBuilder.setDatabaseDialect(databaseInfo.getDialect().toProto()); + } + if (databaseInfo.getProtoDescriptors() != null) { + requestBuilder.setProtoDescriptors(databaseInfo.getProtoDescriptors()); + } + final CreateDatabaseRequest request = requestBuilder.build(); + OperationFutureCallable callable = + new OperationFutureCallable<>( + databaseAdminStub.createDatabaseOperationCallable(), + request, + DatabaseAdminGrpc.getCreateDatabaseMethod(), + instanceName, + nextPageToken -> + listDatabaseOperations( + instanceName, + 0, + String.format( + "(metadata.@type:type.googleapis.com/%s) AND (name:%s/operations/)", + CreateDatabaseMetadata.getDescriptor().getFullName(), + String.format("%s/databases/%s", instanceName, databaseId)), + nextPageToken), + input -> { + if (input.getDone() && input.hasResponse()) { + try { + Timestamp createTime = input.getResponse().unpack(Database.class).getCreateTime(); + if (Timestamp.getDefaultInstance().equals(createTime)) { + // Create time was not returned by the server (proto objects never return + // null, instead they return the default instance). Return null from this + // method to indicate that there is no known create time. + return null; + } + } catch (InvalidProtocolBufferException e) { + return null; + } + } + return null; + }); + return RetryHelper.runWithRetries( + callable, + databaseAdminStubSettings + .createDatabaseOperationSettings() + .getInitialCallSettings() + .getRetrySettings(), + new OperationFutureRetryAlgorithm<>(), + NanoClock.getDefaultClock()); + } + + /** + * If the update database ddl operation returns an ALREADY_EXISTS error, meaning the operation id + * used is already in flight, this method will simply resume the original operation. The returned + * future will be completed when the original operation finishes. + * + *

This mechanism is necessary, because the update database ddl can be retried. If a retryable + * failure occurs, the backend has already started processing the update database ddl operation + * with the given id and the library issues a retry, an ALREADY_EXISTS error will be returned. If + * we were to bubble this error up, it would be confusing for the caller, who used originally + * called the method with a new operation id. + */ + @Override + public OperationFuture updateDatabaseDdl( + com.google.cloud.spanner.Database databaseInfo, + final Iterable updateDatabaseStatements, + @Nullable final String updateId) + throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + Preconditions.checkNotNull(databaseInfo.getId()); + UpdateDatabaseDdlRequest.Builder requestBuilder = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(databaseInfo.getId().getName()) + .addAllStatements(updateDatabaseStatements) + .setOperationId(MoreObjects.firstNonNull(updateId, "")); + if (databaseInfo.getProtoDescriptors() != null) { + requestBuilder.setProtoDescriptors(databaseInfo.getProtoDescriptors()); + } + final UpdateDatabaseDdlRequest request = requestBuilder.build(); + final GrpcCallContext context = + newAdminCallContext( + databaseInfo.getId().getName(), + request, + DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + final OperationCallable callable = + databaseAdminStub.updateDatabaseDdlOperationCallable(); + + return runWithRetryOnAdministrativeRequestsExceeded( + () -> { + OperationFuture operationFuture = + callable.futureCall(request, context); + try { + operationFuture.getInitialFuture().get(); + } catch (InterruptedException e) { + throw newSpannerException(e); + } catch (ExecutionException e) { + Throwable t = e.getCause(); + SpannerException se = asSpannerException(t); + if (se instanceof AdminRequestsPerMinuteExceededException) { + // Propagate this to trigger a retry. + throw se; + } + if (t instanceof AlreadyExistsException) { + String operationName = + OPERATION_NAME_TEMPLATE.instantiate( + "database", databaseInfo.getId().getName(), "operation", updateId); + return callable.resumeFutureCall(operationName, context); + } + } + return operationFuture; + }); + } + + @Override + public void dropDatabase(String databaseName) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final DropDatabaseRequest request = + DropDatabaseRequest.newBuilder().setDatabase(databaseName).build(); + + final GrpcCallContext context = + newAdminCallContext(databaseName, request, DatabaseAdminGrpc.getDropDatabaseMethod()); + runWithRetryOnAdministrativeRequestsExceeded( + () -> { + get(databaseAdminStub.dropDatabaseCallable().futureCall(request, context)); + return null; + }); + } + + @Override + public Database getDatabase(String databaseName) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final GetDatabaseRequest request = + GetDatabaseRequest.newBuilder().setName(databaseName).build(); + + final GrpcCallContext context = + newAdminCallContext(databaseName, request, DatabaseAdminGrpc.getGetDatabaseMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.getDatabaseCallable().futureCall(request, context))); + } + + @Override + public OperationFuture updateDatabase( + Database database, FieldMask updateMask) throws SpannerException { + UpdateDatabaseRequest request = + UpdateDatabaseRequest.newBuilder().setDatabase(database).setUpdateMask(updateMask).build(); + GrpcCallContext context = + newAdminCallContext( + database.getName(), request, DatabaseAdminGrpc.getUpdateDatabaseMethod()); + return databaseAdminStub.updateDatabaseOperationCallable().futureCall(request, context); + } + + @Override + public GetDatabaseDdlResponse getDatabaseDdl(String databaseName) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final GetDatabaseDdlRequest request = + GetDatabaseDdlRequest.newBuilder().setDatabase(databaseName).build(); + + final GrpcCallContext context = + newAdminCallContext(databaseName, request, DatabaseAdminGrpc.getGetDatabaseDdlMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.getDatabaseDdlCallable().futureCall(request, context))); + } + + @Override + public OperationFuture createBackup( + final com.google.cloud.spanner.Backup backupInfo) throws SpannerException { + final String instanceName = backupInfo.getInstanceId().getName(); + final String databaseName = backupInfo.getDatabase().getName(); + final String backupId = backupInfo.getId().getBackup(); + final Backup.Builder backupBuilder = + com.google.spanner.admin.database.v1.Backup.newBuilder() + .setDatabase(databaseName) + .setExpireTime(backupInfo.getExpireTime().toProto()); + if (backupInfo.getVersionTime() != null) { + backupBuilder.setVersionTime(backupInfo.getVersionTime().toProto()); + } + final Backup backup = backupBuilder.build(); + + final CreateBackupRequest.Builder requestBuilder = + CreateBackupRequest.newBuilder() + .setParent(instanceName) + .setBackupId(backupId) + .setBackup(backup); + if (backupInfo.getEncryptionConfig() != null) { + requestBuilder.setEncryptionConfig( + EncryptionConfigProtoMapper.createBackupEncryptionConfig( + backupInfo.getEncryptionConfig())); + } + final CreateBackupRequest request = requestBuilder.build(); + final OperationFutureCallable callable = + new OperationFutureCallable<>( + databaseAdminStub.createBackupOperationCallable(), + request, + DatabaseAdminGrpc.getCreateBackupMethod(), + instanceName, + nextPageToken -> + listBackupOperations( + instanceName, + 0, + String.format( + "(metadata.@type:type.googleapis.com/%s) AND (metadata.name:%s)", + CreateBackupMetadata.getDescriptor().getFullName(), + String.format("%s/backups/%s", instanceName, backupId)), + nextPageToken), + input -> { + try { + return input + .getMetadata() + .unpack(CreateBackupMetadata.class) + .getProgress() + .getStartTime(); + } catch (InvalidProtocolBufferException e) { + return null; + } + }); + return RetryHelper.runWithRetries( + callable, + databaseAdminStubSettings + .createBackupOperationSettings() + .getInitialCallSettings() + .getRetrySettings(), + new OperationFutureRetryAlgorithm<>(), + NanoClock.getDefaultClock()); + } + + @Override + public OperationFuture copyBackup( + BackupId sourceBackupId, final com.google.cloud.spanner.Backup destinationBackup) + throws SpannerException { + Preconditions.checkNotNull(sourceBackupId); + Preconditions.checkNotNull(destinationBackup); + final String instanceName = destinationBackup.getInstanceId().getName(); + final String backupId = destinationBackup.getId().getBackup(); + + final CopyBackupRequest.Builder requestBuilder = + CopyBackupRequest.newBuilder() + .setParent(instanceName) + .setBackupId(backupId) + .setSourceBackup(sourceBackupId.getName()) + .setExpireTime(destinationBackup.getExpireTime().toProto()); + + if (destinationBackup.getEncryptionConfig() != null) { + requestBuilder.setEncryptionConfig( + EncryptionConfigProtoMapper.copyBackupEncryptionConfig( + destinationBackup.getEncryptionConfig())); + } + final CopyBackupRequest request = requestBuilder.build(); + final OperationFutureCallable callable = + new OperationFutureCallable<>( + databaseAdminStub.copyBackupOperationCallable(), + request, + // calling copy backup method of dbClientImpl + DatabaseAdminGrpc.getCopyBackupMethod(), + instanceName, + nextPageToken -> + listBackupOperations( + instanceName, + 0, + String.format( + "(metadata.@type:type.googleapis.com/%s) AND (metadata.name:%s)", + CopyBackupMetadata.getDescriptor().getFullName(), + String.format("%s/backups/%s", instanceName, backupId)), + nextPageToken), + input -> { + try { + return input + .getMetadata() + .unpack(CopyBackupMetadata.class) + .getProgress() + .getStartTime(); + } catch (InvalidProtocolBufferException e) { + return null; + } + }); + return RetryHelper.runWithRetries( + callable, + databaseAdminStubSettings + .copyBackupOperationSettings() + .getInitialCallSettings() + .getRetrySettings(), + new OperationFutureRetryAlgorithm<>(), + NanoClock.getDefaultClock()); + } + + @Override + public OperationFuture restoreDatabase(final Restore restore) { + final String databaseInstanceName = restore.getDestination().getInstanceId().getName(); + final String databaseId = restore.getDestination().getDatabase(); + final RestoreDatabaseRequest.Builder requestBuilder = + RestoreDatabaseRequest.newBuilder() + .setParent(databaseInstanceName) + .setDatabaseId(databaseId) + .setBackup(restore.getSource().getName()); + if (restore.getEncryptionConfig() != null) { + requestBuilder.setEncryptionConfig( + EncryptionConfigProtoMapper.restoreDatabaseEncryptionConfig( + restore.getEncryptionConfig())); + } + + final OperationFutureCallable + callable = + new OperationFutureCallable<>( + databaseAdminStub.restoreDatabaseOperationCallable(), + requestBuilder.build(), + DatabaseAdminGrpc.getRestoreDatabaseMethod(), + databaseInstanceName, + nextPageToken -> + listDatabaseOperations( + databaseInstanceName, + 0, + String.format( + "(metadata.@type:type.googleapis.com/%s) AND (metadata.name:%s)", + RestoreDatabaseMetadata.getDescriptor().getFullName(), + String.format("%s/databases/%s", databaseInstanceName, databaseId)), + nextPageToken), + input -> { + try { + return input + .getMetadata() + .unpack(RestoreDatabaseMetadata.class) + .getProgress() + .getStartTime(); + } catch (InvalidProtocolBufferException e) { + return null; + } + }); + return RetryHelper.runWithRetries( + callable, + databaseAdminStubSettings + .restoreDatabaseOperationSettings() + .getInitialCallSettings() + .getRetrySettings(), + new OperationFutureRetryAlgorithm<>(), + NanoClock.getDefaultClock()); + } + + @Override + public Backup updateBackup(Backup backup, FieldMask updateMask) { + acquireAdministrativeRequestsRateLimiter(); + final UpdateBackupRequest request = + UpdateBackupRequest.newBuilder().setBackup(backup).setUpdateMask(updateMask).build(); + final GrpcCallContext context = + newAdminCallContext(backup.getName(), request, DatabaseAdminGrpc.getUpdateBackupMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> databaseAdminStub.updateBackupCallable().call(request, context)); + } + + @Override + public void deleteBackup(String backupName) { + acquireAdministrativeRequestsRateLimiter(); + final DeleteBackupRequest request = + DeleteBackupRequest.newBuilder().setName(backupName).build(); + final GrpcCallContext context = + newAdminCallContext(backupName, request, DatabaseAdminGrpc.getDeleteBackupMethod()); + runWithRetryOnAdministrativeRequestsExceeded( + () -> { + databaseAdminStub.deleteBackupCallable().call(request, context); + return null; + }); + } + + @Override + public Backup getBackup(String backupName) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final GetBackupRequest request = GetBackupRequest.newBuilder().setName(backupName).build(); + final GrpcCallContext context = + newAdminCallContext(backupName, request, DatabaseAdminGrpc.getGetBackupMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.getBackupCallable().futureCall(request, context))); + } + + @Override + public Operation getOperation(String name) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final GetOperationRequest request = GetOperationRequest.newBuilder().setName(name).build(); + final GrpcCallContext context = + newAdminCallContext(name, request, OperationsGrpc.getGetOperationMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> + get( + databaseAdminStub + .getOperationsStub() + .getOperationCallable() + .futureCall(request, context))); + } + + @Override + public void cancelOperation(String name) throws SpannerException { + acquireAdministrativeRequestsRateLimiter(); + final CancelOperationRequest request = + CancelOperationRequest.newBuilder().setName(name).build(); + final GrpcCallContext context = + newAdminCallContext(name, request, OperationsGrpc.getCancelOperationMethod()); + runWithRetryOnAdministrativeRequestsExceeded( + () -> { + get( + databaseAdminStub + .getOperationsStub() + .cancelOperationCallable() + .futureCall(request, context)); + return null; + }); + } + + @Override + public List batchCreateSessions( + String databaseName, + int sessionCount, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options) + throws SpannerException { + BatchCreateSessionsRequest.Builder requestBuilder = + BatchCreateSessionsRequest.newBuilder() + .setDatabase(databaseName) + .setSessionCount(sessionCount); + Session.Builder sessionBuilder = Session.newBuilder(); + if (labels != null && !labels.isEmpty()) { + sessionBuilder.putAllLabels(labels); + } + if (databaseRole != null && !databaseRole.isEmpty()) { + sessionBuilder.setCreatorRole(databaseRole); + } + requestBuilder.setSessionTemplate(sessionBuilder); + BatchCreateSessionsRequest request = requestBuilder.build(); + GrpcCallContext context = + newCallContext( + options, databaseName, request, SpannerGrpc.getBatchCreateSessionsMethod(), true); + return get(spannerStub.batchCreateSessionsCallable().futureCall(request, context)) + .getSessionList(); + } + + @Override + public Session createSession( + String databaseName, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options) + throws SpannerException { + // By default, sessions are not multiplexed + return createSession(databaseName, databaseRole, labels, options, false); + } + + @Override + public Session createSession( + String databaseName, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options, + boolean isMultiplexed) + throws SpannerException { + CreateSessionRequest.Builder requestBuilder = + CreateSessionRequest.newBuilder().setDatabase(databaseName); + Session.Builder sessionBuilder = Session.newBuilder(); + if (labels != null && !labels.isEmpty()) { + sessionBuilder.putAllLabels(labels); + } + if (databaseRole != null && !databaseRole.isEmpty()) { + sessionBuilder.setCreatorRole(databaseRole); + } + sessionBuilder.setMultiplexed(isMultiplexed); + requestBuilder.setSession(sessionBuilder); + CreateSessionRequest request = requestBuilder.build(); + GrpcCallContext context = + newCallContext(options, databaseName, request, SpannerGrpc.getCreateSessionMethod(), true); + return get(spannerStub.createSessionCallable().futureCall(request, context)); + } + + @Override + public void deleteSession(String sessionName, @Nullable Map options) + throws SpannerException { + get(asyncDeleteSession(sessionName, options)); + } + + @Override + public ApiFuture asyncDeleteSession(String sessionName, @Nullable Map options) { + DeleteSessionRequest request = DeleteSessionRequest.newBuilder().setName(sessionName).build(); + GrpcCallContext context = + newCallContext(options, sessionName, request, SpannerGrpc.getDeleteSessionMethod()); + return spannerStub.deleteSessionCallable().futureCall(request, context); + } + + @Override + public RetrySettings getReadRetrySettings() { + return readRetrySettings; + } + + @Override + public Set getReadRetryableCodes() { + return readRetryableCodes; + } + + @Override + public StreamingCall read( + ReadRequest request, + ResultStreamConsumer consumer, + @Nullable Map options, + XGoogSpannerRequestId requestId, + boolean routeToLeader) { + GrpcCallContext context = + newCallContext( + options, + requestId, + request.getSession(), + request, + SpannerGrpc.getReadMethod(), + routeToLeader); + SpannerResponseObserver responseObserver = new SpannerResponseObserver(consumer); + spannerStub.streamingReadCallable().call(request, responseObserver, context); + return new GrpcStreamingCall(context, responseObserver.getController()); + } + + @Override + public RetrySettings getExecuteQueryRetrySettings() { + return executeQueryRetrySettings; + } + + @Override + public Set getExecuteQueryRetryableCodes() { + return executeQueryRetryableCodes; + } + + @Override + public ResultSet executeQuery( + ExecuteSqlRequest request, @Nullable Map options, boolean routeToLeader) { + return get(executeQueryAsync(request, options, routeToLeader)); + } + + @Override + public ApiFuture executeQueryAsync( + ExecuteSqlRequest request, @Nullable Map options, boolean routeToLeader) { + GrpcCallContext context = + newCallContext( + options, + request.getSession(), + request, + SpannerGrpc.getExecuteSqlMethod(), + routeToLeader); + return spannerStub.executeSqlCallable().futureCall(request, context); + } + + @Override + public ResultSet executePartitionedDml( + ExecuteSqlRequest request, @Nullable Map options) { + GrpcCallContext context = + newCallContext( + options, request.getSession(), request, SpannerGrpc.getExecuteSqlMethod(), true); + return get(partitionedDmlStub.executeSqlCallable().futureCall(request, context)); + } + + @Override + public RetrySettings getPartitionedDmlRetrySettings() { + return partitionedDmlRetrySettings; + } + + @Override + public ServerStream executeStreamingPartitionedDml( + ExecuteSqlRequest request, + Map options, + XGoogSpannerRequestId requestId, + Duration timeout) { + GrpcCallContext context = + newCallContext( + options, + requestId, + request.getSession(), + request, + SpannerGrpc.getExecuteStreamingSqlMethod(), + true); + // Override any timeout settings that might have been set on the call context. + context = context.withTimeoutDuration(timeout).withStreamWaitTimeoutDuration(timeout); + return partitionedDmlStub.executeStreamingSqlCallable().call(request, context); + } + + @Override + public ServerStream batchWriteAtLeastOnce( + BatchWriteRequest request, @Nullable Map options) { + GrpcCallContext context = + newCallContext(options, request.getSession(), request, SpannerGrpc.getBatchWriteMethod()); + return spannerStub.batchWriteCallable().call(request, context); + } + + @Override + public StreamingCall executeQuery( + ExecuteSqlRequest request, + ResultStreamConsumer consumer, + @Nullable Map options, + XGoogSpannerRequestId requestId, + boolean routeToLeader) { + GrpcCallContext context = + newCallContext( + options, + requestId, + request.getSession(), + request, + SpannerGrpc.getExecuteStreamingSqlMethod(), + routeToLeader); + + SpannerResponseObserver responseObserver = new SpannerResponseObserver(consumer); + spannerStub.executeStreamingSqlCallable().call(request, responseObserver, context); + return new GrpcStreamingCall(context, responseObserver.getController()); + } + + @Override + public ExecuteBatchDmlResponse executeBatchDml( + ExecuteBatchDmlRequest request, @Nullable Map options) { + return get(executeBatchDmlAsync(request, options)); + } + + @Override + public ApiFuture executeBatchDmlAsync( + ExecuteBatchDmlRequest request, @Nullable Map options) { + GrpcCallContext context = + newCallContext( + options, request.getSession(), request, SpannerGrpc.getExecuteBatchDmlMethod(), true); + return spannerStub.executeBatchDmlCallable().futureCall(request, context); + } + + @Override + public ApiFuture beginTransactionAsync( + BeginTransactionRequest request, @Nullable Map options, boolean routeToLeader) { + GrpcCallContext context = + newCallContext( + options, + request.getSession(), + request, + SpannerGrpc.getBeginTransactionMethod(), + routeToLeader); + return spannerStub.beginTransactionCallable().futureCall(request, context); + } + + @Override + public Transaction beginTransaction( + BeginTransactionRequest request, @Nullable Map options, boolean routeToLeader) + throws SpannerException { + return get(beginTransactionAsync(request, options, routeToLeader)); + } + + @Override + public ApiFuture commitAsync( + CommitRequest request, @Nullable Map options) { + GrpcCallContext context = + newCallContext(options, request.getSession(), request, SpannerGrpc.getCommitMethod(), true); + return spannerStub.commitCallable().futureCall(request, context); + } + + @Override + public CommitResponse commit(CommitRequest commitRequest, @Nullable Map options) + throws SpannerException { + return get(commitAsync(commitRequest, options)); + } + + @Override + public RetrySettings getCommitRetrySettings() { + return commitRetrySettings; + } + + @Override + public ApiFuture rollbackAsync(RollbackRequest request, @Nullable Map options) { + GrpcCallContext context = + newCallContext( + options, request.getSession(), request, SpannerGrpc.getRollbackMethod(), true); + return spannerStub.rollbackCallable().futureCall(request, context); + } + + @Override + public void rollback(RollbackRequest request, @Nullable Map options) + throws SpannerException { + get(rollbackAsync(request, options)); + } + + @Override + public PartitionResponse partitionQuery( + PartitionQueryRequest request, @Nullable Map options) throws SpannerException { + GrpcCallContext context = + newCallContext( + options, request.getSession(), request, SpannerGrpc.getPartitionQueryMethod(), true); + return get(spannerStub.partitionQueryCallable().futureCall(request, context)); + } + + @Override + public PartitionResponse partitionRead( + PartitionReadRequest request, @Nullable Map options) throws SpannerException { + GrpcCallContext context = + newCallContext( + options, request.getSession(), request, SpannerGrpc.getPartitionReadMethod(), true); + return get(spannerStub.partitionReadCallable().futureCall(request, context)); + } + + @Override + public Policy getDatabaseAdminIAMPolicy(String resource, @Nullable GetPolicyOptions options) { + acquireAdministrativeRequestsRateLimiter(); + GetIamPolicyRequest.Builder builder = GetIamPolicyRequest.newBuilder().setResource(resource); + if (options != null) { + builder.setOptions(options); + } + final GetIamPolicyRequest request = builder.build(); + final GrpcCallContext context = + newCallContext(null, resource, request, DatabaseAdminGrpc.getGetIamPolicyMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.getIamPolicyCallable().futureCall(request, context))); + } + + @Override + public Policy setDatabaseAdminIAMPolicy(String resource, Policy policy) { + acquireAdministrativeRequestsRateLimiter(); + final SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + final GrpcCallContext context = + newCallContext(null, resource, request, DatabaseAdminGrpc.getSetIamPolicyMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.setIamPolicyCallable().futureCall(request, context))); + } + + @Override + public TestIamPermissionsResponse testDatabaseAdminIAMPermissions( + String resource, Iterable permissions) { + acquireAdministrativeRequestsRateLimiter(); + final TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + final GrpcCallContext context = + newCallContext(null, resource, request, DatabaseAdminGrpc.getTestIamPermissionsMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(databaseAdminStub.testIamPermissionsCallable().futureCall(request, context))); + } + + @Override + public Policy getInstanceAdminIAMPolicy(String resource) { + acquireAdministrativeRequestsRateLimiter(); + final GetIamPolicyRequest request = + GetIamPolicyRequest.newBuilder().setResource(resource).build(); + final GrpcCallContext context = + newCallContext(null, resource, request, InstanceAdminGrpc.getGetIamPolicyMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(instanceAdminStub.getIamPolicyCallable().futureCall(request, context))); + } + + @Override + public Policy setInstanceAdminIAMPolicy(String resource, Policy policy) { + acquireAdministrativeRequestsRateLimiter(); + final SetIamPolicyRequest request = + SetIamPolicyRequest.newBuilder().setResource(resource).setPolicy(policy).build(); + final GrpcCallContext context = + newCallContext(null, resource, request, InstanceAdminGrpc.getSetIamPolicyMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(instanceAdminStub.setIamPolicyCallable().futureCall(request, context))); + } + + @Override + public TestIamPermissionsResponse testInstanceAdminIAMPermissions( + String resource, Iterable permissions) { + acquireAdministrativeRequestsRateLimiter(); + final TestIamPermissionsRequest request = + TestIamPermissionsRequest.newBuilder() + .setResource(resource) + .addAllPermissions(permissions) + .build(); + final GrpcCallContext context = + newCallContext(null, resource, request, InstanceAdminGrpc.getTestIamPermissionsMethod()); + return runWithRetryOnAdministrativeRequestsExceeded( + () -> get(instanceAdminStub.testIamPermissionsCallable().futureCall(request, context))); + } + + /** Gets the result of an async RPC call, handling any exceptions encountered. */ + private static T get(final Future future) throws SpannerException { + final Context context = Context.current(); + try { + return future.get(); + } catch (InterruptedException e) { + // We are the sole consumer of the future, so cancel it. + future.cancel(true); + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (ExecutionException e) { + throw asSpannerException(e.getCause()); + } catch (CancellationException e) { + throw newSpannerException(context, e); + } catch (Exception exception) { + throw asSpannerException(exception); + } + } + + private GrpcCallContext createBaseCallContext() { + GrpcCallContext context = GrpcCallContext.createDefault(); + if (compressorName != null) { + // This sets the compressor for Client -> Server. + context = context.withCallOptions(context.getCallOptions().withCompression(compressorName)); + } + if (endToEndTracingEnabled) { + context = context.withExtraHeaders(metadataProvider.newEndToEndTracingHeader()); + } + if (isEnableAFEServerTiming()) { + context = context.withExtraHeaders(metadataProvider.newAfeServerTimingHeader()); + } + return context + .withStreamWaitTimeoutDuration(waitTimeout) + .withStreamIdleTimeoutDuration(idleTimeout); + } + + // Before removing this method, please verify with a code owner that it is not used + // in any internal testing infrastructure. + @VisibleForTesting + @Deprecated + GrpcCallContext newCallContext(@Nullable Map options, String resource) { + return newCallContext(options, resource, null, null); + } + + private GrpcCallContext newAdminCallContext( + String resource, ReqT request, MethodDescriptor method) { + return newCallContext(null, resource, request, method, false); + } + + @VisibleForTesting + GrpcCallContext newCallContext( + @Nullable Map options, + String resource, + ReqT request, + MethodDescriptor method) { + return newCallContext(options, resource, request, method, false); + } + + @VisibleForTesting + GrpcCallContext newCallContext( + @Nullable Map options, + String resource, + ReqT request, + MethodDescriptor method, + boolean routeToLeader) { + return newCallContext(options, /* requestId= */ null, resource, request, method, routeToLeader); + } + + @VisibleForTesting + GrpcCallContext newCallContext( + @Nullable Map options, + @Nullable XGoogSpannerRequestId requestId, + String resource, + ReqT request, + MethodDescriptor method, + boolean routeToLeader) { + GrpcCallContext context = this.baseGrpcCallContext; + Long affinity = options == null ? null : Option.CHANNEL_HINT.getLong(options); + if (affinity != null) { + if (this.isGrpcGcpExtensionEnabled) { + // Set channel affinity in gRPC-GCP. + String affinityKey; + if (this.isDynamicChannelPoolEnabled) { + // When dynamic channel pooling is enabled, we use the raw affinity value as the key. + // This allows grpc-gcp to use round-robin for new keys, enabling new channels + // (created during scale-up) to receive requests. The affinity key lifetime setting + // ensures the affinity map doesn't grow unbounded. + affinityKey = String.valueOf(affinity); + } else { + // When DCP is disabled, compute bounded channel hint to prevent + // gRPC-GCP affinity map from getting unbounded. + int boundedChannelHint = affinity.intValue() % this.numChannels; + affinityKey = String.valueOf(boundedChannelHint); + } + context = + context.withCallOptions( + context.getCallOptions().withOption(GcpManagedChannel.AFFINITY_KEY, affinityKey)); + } else { + // Set channel affinity in GAX. + context = context.withChannelAffinity(affinity.intValue()); + } + } + // When grpc-gcp extension with dynamic channel pooling is enabled, the actual channel ID + // will be set by RequestIdInterceptor after grpc-gcp selects the channel. + // Set to 0 (unknown) here as a placeholder. + int requestIdChannel = + (this.isGrpcGcpExtensionEnabled && this.isDynamicChannelPoolEnabled) + ? 0 + : convertToRequestIdChannelNumber(affinity); + if (requestId == null) { + requestId = requestIdCreator.nextRequestId(requestIdChannel); + } else { + requestId.setChannelId(requestIdChannel); + } + context = + context.withCallOptions( + context.getCallOptions().withOption(REQUEST_ID_CALL_OPTIONS_KEY, requestId)); + context = context.withExtraHeaders(metadataProvider.newExtraHeaders(resource, projectName)); + if (routeToLeader && leaderAwareRoutingEnabled) { + context = context.withExtraHeaders(metadataProvider.newRouteToLeaderHeader()); + } + if (callCredentialsProvider != null) { + CallCredentials callCredentials = callCredentialsProvider.getCallCredentials(); + if (callCredentials != null) { + context = + context.withCallOptions(context.getCallOptions().withCallCredentials(callCredentials)); + } + } + CallContextConfigurator configurator = SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY.get(); + ApiCallContext apiCallContextFromContext = null; + if (configurator != null) { + apiCallContextFromContext = configurator.configure(context, request, method); + } + return (GrpcCallContext) context.merge(apiCallContextFromContext); + } + + @Override + public RequestIdCreator getRequestIdCreator() { + return this.requestIdCreator; + } + + private int convertToRequestIdChannelNumber(@Nullable Long affinity) { + if (affinity == null) { + return 0; + } + int requestIdChannel = affinity.intValue(); + requestIdChannel = requestIdChannel == Integer.MAX_VALUE ? 0 : Math.abs(requestIdChannel); + // Start counting at 1, to distinguish between '0 == Unknown and >0 == known'. + return requestIdChannel % this.numChannels + 1; + } + + void registerResponseObserver(SpannerResponseObserver responseObserver) { + responseObservers.add(responseObserver); + } + + void unregisterResponseObserver(SpannerResponseObserver responseObserver) { + responseObservers.remove(responseObserver); + } + + void closeResponseObservers() { + responseObservers.forEach(SpannerResponseObserver::close); + responseObservers.clear(); + } + + @InternalApi + @VisibleForTesting + public int getNumActiveResponseObservers() { + return responseObservers.size(); + } + + @Override + public void shutdown() { + this.rpcIsClosed = true; + closeResponseObservers(); + if (this.spannerStub != null) { + this.spannerStub.close(); + this.partitionedDmlStub.close(); + this.instanceAdminStub.close(); + this.databaseAdminStub.close(); + this.spannerWatchdog.shutdown(); + + try { + this.spannerStub.awaitTermination(10L, TimeUnit.SECONDS); + this.partitionedDmlStub.awaitTermination(10L, TimeUnit.SECONDS); + this.instanceAdminStub.awaitTermination(10L, TimeUnit.SECONDS); + this.databaseAdminStub.awaitTermination(10L, TimeUnit.SECONDS); + this.spannerWatchdog.awaitTermination(10L, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + } + + public void shutdownNow() { + this.rpcIsClosed = true; + closeResponseObservers(); + this.spannerStub.close(); + this.partitionedDmlStub.close(); + this.instanceAdminStub.close(); + this.databaseAdminStub.close(); + this.spannerWatchdog.shutdown(); + + this.spannerStub.shutdownNow(); + this.partitionedDmlStub.shutdownNow(); + this.instanceAdminStub.shutdownNow(); + this.databaseAdminStub.shutdownNow(); + this.spannerWatchdog.shutdownNow(); + } + + @Override + public boolean isClosed() { + return rpcIsClosed; + } + + @Override + public DatabaseAdminStubSettings getDatabaseAdminStubSettings() { + return databaseAdminStubSettings; + } + + @Override + public InstanceAdminStubSettings getInstanceAdminStubSettings() { + return instanceAdminStubSettings; + } + + private static final class GrpcStreamingCall implements StreamingCall { + private final ApiCallContext callContext; + private final StreamController controller; + + GrpcStreamingCall(ApiCallContext callContext, StreamController controller) { + this.callContext = callContext; + this.controller = controller; + } + + @Override + public ApiCallContext getCallContext() { + return callContext; + } + + @Override + public void request(int numMessages) { + controller.request(numMessages); + } + + @Override + public void cancel(@Nullable String message) { + controller.cancel(); + } + } + + /** + * A {@code ResponseObserver} that exposes the {@code StreamController} and delegates callbacks to + * the {@link ResultStreamConsumer}. + */ + private class SpannerResponseObserver implements ResponseObserver { + + private StreamController controller; + private final ResultStreamConsumer consumer; + + public SpannerResponseObserver(ResultStreamConsumer consumer) { + this.consumer = consumer; + } + + void close() { + if (this.controller != null) { + this.controller.cancel(); + } + } + + @Override + public void onStart(StreamController controller) { + // Disable the auto flow control to allow client library + // set the number of messages it prefers to request + controller.disableAutoInboundFlowControl(); + this.controller = controller; + if (this.consumer.cancelQueryWhenClientIsClosed()) { + registerResponseObserver(this); + } + } + + @Override + public void onResponse(PartialResultSet response) { + consumer.onPartialResultSet(response); + } + + @Override + public void onError(Throwable t) { + // Unregister the response observer when the query has completed with an error. + if (this.consumer.cancelQueryWhenClientIsClosed()) { + unregisterResponseObserver(this); + } + consumer.onError(asSpannerException(t)); + } + + @Override + public void onComplete() { + // Unregister the response observer when the query has completed normally. + if (this.consumer.cancelQueryWhenClientIsClosed()) { + unregisterResponseObserver(this); + } + consumer.onCompleted(); + } + + StreamController getController() { + return Preconditions.checkNotNull(this.controller); + } + } + + private static Duration systemProperty(String name, int defaultValue) { + String stringValue = System.getProperty(name, ""); + return Duration.ofSeconds(stringValue.isEmpty() ? defaultValue : Integer.parseInt(stringValue)); + } + + // Wrapper class to build the GcpFallbackChannel using GAX's configuration + private static class FallbackChannelBuilder + extends ForwardingChannelBuilder2 { + private final GcpFallbackChannelOptions options; + + private final GcpManagedChannelBuilder primaryGcpBuilder; + private final GcpManagedChannelBuilder fallbackGcpBuilder; + + private FallbackChannelBuilder( + GcpManagedChannelBuilder primary, + GcpManagedChannelBuilder fallback, + GcpFallbackChannelOptions options) { + this.primaryGcpBuilder = primary; + this.fallbackGcpBuilder = fallback; + this.options = options; + } + + /** + * Delegates all configuration calls (e.g., interceptors, userAgent) to the primary builder. + * This ensures the primary channel receives all of GAX's standard configuration. + */ + @Override + protected ManagedChannelBuilder delegate() { + return primaryGcpBuilder; + } + + /** + * Overrides the build method to return our custom GcpFallbackChannel instead of a standard gRPC + * channel. + */ + @Override + public ManagedChannel build() { + return new GcpFallbackChannel(options, primaryGcpBuilder, fallbackGcpBuilder); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCache.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCache.java new file mode 100644 index 000000000000..11f0d6309310 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCache.java @@ -0,0 +1,203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.annotations.VisibleForTesting; +import io.grpc.ConnectivityState; +import io.grpc.ManagedChannel; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * gRPC implementation of {@link ChannelEndpointCache}. + * + *

This cache creates and caches gRPC channels per address. It uses {@link + * InstantiatingGrpcChannelProvider#withEndpoint(String)} to create new channels with the same + * configuration but different endpoints, avoiding race conditions. + */ +@InternalApi +class GrpcChannelEndpointCache implements ChannelEndpointCache { + + /** Timeout for graceful channel shutdown. */ + private static final long SHUTDOWN_TIMEOUT_SECONDS = 5; + + private final InstantiatingGrpcChannelProvider baseProvider; + private final Map servers = new ConcurrentHashMap<>(); + private final GrpcChannelEndpoint defaultEndpoint; + private final AtomicBoolean isShutdown = new AtomicBoolean(false); + + /** + * Creates a new cache with the given channel provider. + * + * @param channelProvider the base provider used to create channels. New channels for different + * endpoints are created using {@link InstantiatingGrpcChannelProvider#withEndpoint(String)}. + * @throws IOException if the default channel cannot be created + */ + public GrpcChannelEndpointCache(InstantiatingGrpcChannelProvider channelProvider) + throws IOException { + this.baseProvider = channelProvider; + String defaultEndpoint = channelProvider.getEndpoint(); + this.defaultEndpoint = new GrpcChannelEndpoint(defaultEndpoint, channelProvider); + this.servers.put(defaultEndpoint, this.defaultEndpoint); + } + + @Override + public ChannelEndpoint defaultChannel() { + return defaultEndpoint; + } + + @Override + public ChannelEndpoint get(String address) { + if (isShutdown.get()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "ChannelEndpointCache has been shut down"); + } + + return servers.computeIfAbsent( + address, + addr -> { + try { + // Create a new provider with the same config but different endpoint. + // This is thread-safe as withEndpoint() returns a new provider instance. + TransportChannelProvider newProvider = baseProvider.withEndpoint(addr); + return new GrpcChannelEndpoint(addr, newProvider); + } catch (IOException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Failed to create channel for address: " + addr, e); + } + }); + } + + @Override + public void evict(String address) { + if (defaultEndpoint.getAddress().equals(address)) { + return; + } + GrpcChannelEndpoint server = servers.remove(address); + if (server != null) { + shutdownChannel(server, false); + } + } + + @Override + public void shutdown() { + if (!isShutdown.compareAndSet(false, true)) { + return; + } + for (GrpcChannelEndpoint server : servers.values()) { + shutdownChannel(server, true); + } + servers.clear(); + } + + /** + * Shuts down a server's channel. + * + *

First attempts a graceful shutdown. When awaitTermination is true, waits for in-flight RPCs + * to complete and forces shutdown on timeout. + */ + private void shutdownChannel(GrpcChannelEndpoint server, boolean awaitTermination) { + ManagedChannel channel = server.getChannel(); + if (channel.isShutdown()) { + return; + } + + channel.shutdown(); + if (!awaitTermination) { + return; + } + try { + if (!channel.awaitTermination(SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { + channel.shutdownNow(); + } + } catch (InterruptedException e) { + channel.shutdownNow(); + Thread.currentThread().interrupt(); + } + } + + /** gRPC implementation of {@link ChannelEndpoint}. */ + static class GrpcChannelEndpoint implements ChannelEndpoint { + private final String address; + private final ManagedChannel channel; + + /** + * Creates a server from a channel provider. + * + * @param address the server address + * @param provider the channel provider (must be a gRPC provider) + * @throws IOException if the channel cannot be created + */ + GrpcChannelEndpoint(String address, TransportChannelProvider provider) throws IOException { + this.address = address; + TransportChannelProvider readyProvider = provider; + if (provider.needsHeaders()) { + readyProvider = provider.withHeaders(java.util.Collections.emptyMap()); + } + GrpcTransportChannel transportChannel = + (GrpcTransportChannel) readyProvider.getTransportChannel(); + this.channel = (ManagedChannel) transportChannel.getChannel(); + } + + /** + * Creates a server with an existing channel. Primarily for testing. + * + * @param address the server address + * @param channel the managed channel + */ + @VisibleForTesting + GrpcChannelEndpoint(String address, ManagedChannel channel) { + this.address = address; + this.channel = channel; + } + + @Override + public String getAddress() { + return address; + } + + @Override + public boolean isHealthy() { + if (channel.isShutdown() || channel.isTerminated()) { + return false; + } + // Check connectivity state without triggering a connection attempt. + // Some channel implementations don't support getState(), in which case + // we assume the channel is healthy if it's not shutdown/terminated. + try { + ConnectivityState state = channel.getState(false); + return state != ConnectivityState.SHUTDOWN && state != ConnectivityState.TRANSIENT_FAILURE; + } catch (UnsupportedOperationException ignore) { + return true; + } + } + + @Override + public ManagedChannel getChannel() { + return channel; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcDatabaseAdminStubWithCustomCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcDatabaseAdminStubWithCustomCallableFactory.java new file mode 100644 index 000000000000..8e0771efd119 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcDatabaseAdminStubWithCustomCallableFactory.java @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.database.v1.stub.GrpcDatabaseAdminStub; +import java.io.IOException; + +/** + * Wrapper around {@link GrpcDatabaseAdminStub} to make the constructor available inside this + * package. This makes it possible to create a {@link GrpcDatabaseAdminStub} with a custom {@link + * GrpcStubCallableFactory} and custom settings. This is used by integration tests to automatically + * retry {@link StatusCode.Code#RESOURCE_EXHAUSTED} errors for certain administrative requests. + */ +class GrpcDatabaseAdminStubWithCustomCallableFactory extends GrpcDatabaseAdminStub { + GrpcDatabaseAdminStubWithCustomCallableFactory( + DatabaseAdminStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + super(settings, clientContext, callableFactory); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcSpannerStubWithStubSettingsAndClientContext.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcSpannerStubWithStubSettingsAndClientContext.java new file mode 100644 index 000000000000..6c575e1074c0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GrpcSpannerStubWithStubSettingsAndClientContext.java @@ -0,0 +1,40 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.gax.rpc.ClientContext; +import com.google.cloud.spanner.v1.stub.GrpcSpannerStub; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import java.io.IOException; + +/** + * Wrapper around {@link GrpcSpannerStub} to make the constructor available inside this package. + * This makes it possible to create a {@link GrpcSpannerStub} with a {@link SpannerStubSettings} and + * a {@link ClientContext}. + */ +class GrpcSpannerStubWithStubSettingsAndClientContext extends GrpcSpannerStub { + + static final GrpcSpannerStubWithStubSettingsAndClientContext create( + SpannerStubSettings settings, ClientContext clientContext) throws IOException { + return new GrpcSpannerStubWithStubSettingsAndClientContext(settings, clientContext); + } + + protected GrpcSpannerStubWithStubSettingsAndClientContext( + SpannerStubSettings settings, ClientContext clientContext) throws IOException { + super(settings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java new file mode 100644 index 000000000000..861e839a0366 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java @@ -0,0 +1,310 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import static com.google.api.gax.grpc.GrpcCallContext.TRACER_KEY; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.DATABASE_ID; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.INSTANCE_ID; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.METHOD; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.PROJECT_ID; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT; +import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.SPANNER_GFE_LATENCY; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.cloud.spanner.*; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.spanner.admin.database.v1.DatabaseName; +import io.grpc.*; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.alts.AltsContextUtil; +import io.opencensus.stats.MeasureMap; +import io.opencensus.stats.Stats; +import io.opencensus.stats.StatsRecorder; +import io.opencensus.tags.TagContext; +import io.opencensus.tags.TagValue; +import io.opencensus.tags.Tagger; +import io.opencensus.tags.Tags; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Intercepts all gRPC calls to extract server-timing header. Captures GFE Latency and GFE Header + * Missing count metrics. + */ +class HeaderInterceptor implements ClientInterceptor { + private static final DatabaseName UNDEFINED_DATABASE_NAME = + DatabaseName.of("undefined-project", "undefined-instance", "undefined-database"); + private static final Metadata.Key SERVER_TIMING_HEADER_KEY = + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER); + private static final String GFE_TIMING_HEADER = "gfet4t7"; + private static final String AFE_TIMING_HEADER = "afe"; + private static final Metadata.Key GOOGLE_CLOUD_RESOURCE_PREFIX_KEY = + Metadata.Key.of("google-cloud-resource-prefix", Metadata.ASCII_STRING_MARSHALLER); + private static final Pattern SERVER_TIMING_PATTERN = + Pattern.compile("(?[a-zA-Z0-9_-]+);\\s*dur=(?\\d+(\\.\\d+)?)"); + private static final Pattern GOOGLE_CLOUD_RESOURCE_PREFIX_PATTERN = + Pattern.compile( + ".*projects/(?\\p{ASCII}[^/]*)(/instances/(?\\p{ASCII}[^/]*))?(/databases/(?\\p{ASCII}[^/]*))?"); + private final Cache databaseNameCache = + CacheBuilder.newBuilder().maximumSize(100).build(); + private final Cache tagsCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + private final Cache attributesCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + private final Cache> builtInAttributesCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + private final Cache> keyCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + + // Get the global singleton Tagger object. + private static final Tagger TAGGER = Tags.getTagger(); + private static final StatsRecorder STATS_RECORDER = Stats.getStatsRecorder(); + + private static final Logger LOGGER = Logger.getLogger(HeaderInterceptor.class.getName()); + private static final Level LEVEL = Level.INFO; + private final SpannerRpcMetrics spannerRpcMetrics; + private Float gfeLatency; + private Float afeLatency; + + HeaderInterceptor(SpannerRpcMetrics spannerRpcMetrics) { + this.spannerRpcMetrics = spannerRpcMetrics; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + ApiTracer tracer = callOptions.getOption(TRACER_KEY); + CompositeTracer compositeTracer = + tracer instanceof CompositeTracer ? (CompositeTracer) tracer : null; + return new SimpleForwardingClientCall(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + try { + Span span = Span.current(); + DatabaseName databaseName = extractDatabaseName(headers); + String key = extractKey(databaseName, method.getFullMethodName()); + String requestId = extractRequestId(headers); + TagContext tagContext = getTagContext(key, method.getFullMethodName(), databaseName); + Attributes attributes = + getMetricAttributes(key, method.getFullMethodName(), databaseName); + + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata metadata) { + String serverTiming = metadata.get(SERVER_TIMING_HEADER_KEY); + try { + // Get gfe and afe Latency value + Map serverTimingMetrics = parseServerTimingHeader(serverTiming); + gfeLatency = serverTimingMetrics.get(GFE_TIMING_HEADER); + afeLatency = serverTimingMetrics.get(AFE_TIMING_HEADER); + } catch (NumberFormatException e) { + LOGGER.log(LEVEL, "Invalid server-timing object in header: {}", serverTiming); + } + + super.onHeaders(metadata); + } + + @Override + public void onClose(Status status, Metadata trailers) { + // Record Built-in Metrics + boolean isDirectPathUsed = AltsContextUtil.check(getAttributes()); + boolean isAfeEnabled = GapicSpannerRpc.isEnableAFEServerTiming(); + recordSpan(span, requestId); + recordCustomMetrics(tagContext, attributes, isDirectPathUsed); + Map builtInMetricsAttributes = new HashMap<>(); + try { + builtInMetricsAttributes = getBuiltInMetricAttributes(key, databaseName); + } catch (ExecutionException e) { + LOGGER.log( + LEVEL, "Unable to get built-in metric attributes {}", e.getMessage()); + } + recordBuiltInMetrics( + compositeTracer, + builtInMetricsAttributes, + requestId, + isDirectPathUsed, + isAfeEnabled); + super.onClose(status, trailers); + } + }, + headers); + } catch (ExecutionException executionException) { + // This should never happen, + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } + } + }; + } + + private void recordCustomMetrics( + TagContext tagContext, Attributes attributes, Boolean isDirectPathUsed) { + // Record OpenCensus and Custom OpenTelemetry Metrics + MeasureMap measureMap = STATS_RECORDER.newMeasureMap(); + + if (!isDirectPathUsed) { + if (gfeLatency != null) { + long gfeVal = gfeLatency.longValue(); + measureMap.put(SPANNER_GFE_LATENCY, gfeVal); + measureMap.put(SPANNER_GFE_HEADER_MISSING_COUNT, 0L); + spannerRpcMetrics.recordGfeLatency(gfeVal, attributes); + spannerRpcMetrics.recordGfeHeaderMissingCount(0L, attributes); + } else { + measureMap.put(SPANNER_GFE_HEADER_MISSING_COUNT, 1L); + spannerRpcMetrics.recordGfeHeaderMissingCount(1L, attributes); + } + } + measureMap.record(tagContext); + } + + private void recordSpan(Span span, String requestId) { + if (span != null) { + if (gfeLatency != null) { + span.setAttribute("gfe_latency", gfeLatency.toString()); + } + if (afeLatency != null) { + span.setAttribute("afe_latency", afeLatency.toString()); + } + span.setAttribute(XGoogSpannerRequestId.REQUEST_ID_HEADER_NAME, requestId); + } + } + + private void recordBuiltInMetrics( + CompositeTracer compositeTracer, + Map builtInMetricsAttributes, + String requestId, + Boolean isDirectPathUsed, + Boolean isAfeEnabled) { + if (compositeTracer != null) { + builtInMetricsAttributes.put(BuiltInMetricsConstant.REQUEST_ID_KEY.getKey(), requestId); + builtInMetricsAttributes.put( + BuiltInMetricsConstant.DIRECT_PATH_USED_KEY.getKey(), Boolean.toString(isDirectPathUsed)); + compositeTracer.addAttributes(builtInMetricsAttributes); + compositeTracer.recordServerTimingHeaderMetrics( + gfeLatency, afeLatency, isDirectPathUsed, isAfeEnabled); + } + } + + private Map parseServerTimingHeader(String serverTiming) { + Map serverTimingMetrics = new HashMap<>(); + if (serverTiming != null) { + Matcher matcher = SERVER_TIMING_PATTERN.matcher(serverTiming); + while (matcher.find()) { + String metricName = matcher.group("metricName"); + String durationStr = matcher.group("duration"); + + if (metricName != null && durationStr != null) { + serverTimingMetrics.put(metricName, Float.valueOf(durationStr)); + } + } + } + return serverTimingMetrics; + } + + private String extractKey(DatabaseName databaseName, String methodName) + throws ExecutionException { + Cache keys = + keyCache.get(databaseName, () -> CacheBuilder.newBuilder().maximumSize(1000).build()); + return keys.get(methodName, () -> databaseName + methodName); + } + + private DatabaseName extractDatabaseName(Metadata headers) throws ExecutionException { + String googleResourcePrefix = headers.get(GOOGLE_CLOUD_RESOURCE_PREFIX_KEY); + if (googleResourcePrefix != null) { + return databaseNameCache.get( + googleResourcePrefix, + () -> { + String projectId = "undefined-project"; + String instanceId = "undefined-database"; + String databaseId = "undefined-database"; + Matcher matcher = GOOGLE_CLOUD_RESOURCE_PREFIX_PATTERN.matcher(googleResourcePrefix); + if (matcher.find()) { + projectId = matcher.group("project"); + if (matcher.group("instance") != null) { + instanceId = matcher.group("instance"); + } + if (matcher.group("database") != null) { + databaseId = matcher.group("database"); + } + } else { + LOGGER.log( + LEVEL, "Error parsing google cloud resource header: " + googleResourcePrefix); + } + return DatabaseName.of(projectId, instanceId, databaseId); + }); + } + return UNDEFINED_DATABASE_NAME; + } + + private String extractRequestId(Metadata headers) throws ExecutionException { + return headers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + } + + private TagContext getTagContext(String key, String method, DatabaseName databaseName) + throws ExecutionException { + return tagsCache.get( + key, + () -> + TAGGER + .currentBuilder() + .putLocal(PROJECT_ID, TagValue.create(databaseName.getProject())) + .putLocal(INSTANCE_ID, TagValue.create(databaseName.getInstance())) + .putLocal(DATABASE_ID, TagValue.create(databaseName.getDatabase())) + .putLocal(METHOD, TagValue.create(method)) + .build()); + } + + private Attributes getMetricAttributes(String key, String method, DatabaseName databaseName) + throws ExecutionException { + return attributesCache.get( + key, + () -> { + AttributesBuilder attributesBuilder = Attributes.builder(); + attributesBuilder.put("database", databaseName.getDatabase()); + attributesBuilder.put("instance_id", databaseName.getInstance()); + attributesBuilder.put("project_id", databaseName.getProject()); + attributesBuilder.put("method", method); + + return attributesBuilder.build(); + }); + } + + private Map getBuiltInMetricAttributes(String key, DatabaseName databaseName) + throws ExecutionException { + return builtInAttributesCache.get( + key, + () -> { + Map attributes = new HashMap<>(); + attributes.put(BuiltInMetricsConstant.DATABASE_KEY.getKey(), databaseName.getDatabase()); + attributes.put( + BuiltInMetricsConstant.INSTANCE_ID_KEY.getKey(), databaseName.getInstance()); + attributes.put( + BuiltInMetricsConstant.DIRECT_PATH_ENABLED_KEY.getKey(), + String.valueOf(GapicSpannerRpc.DIRECTPATH_CHANNEL_CREATED)); + return attributes; + }); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareChannel.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareChannel.java new file mode 100644 index 000000000000..382ccead71a7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareChannel.java @@ -0,0 +1,633 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import java.io.IOException; +import java.lang.ref.SoftReference; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; + +/** + * ManagedChannel that routes eligible requests using location-aware routing hints. + * + *

Routing hints are applied to streaming read/query and unary ExecuteSql. Commit/Rollback use + * transaction affinity when available. BeginTransaction is routed only when a mutation key is + * provided. + */ +@InternalApi +final class KeyAwareChannel extends ManagedChannel { + private static final long MAX_TRACKED_READ_ONLY_TRANSACTIONS = 100_000L; + private static final String STREAMING_READ_METHOD = "google.spanner.v1.Spanner/StreamingRead"; + private static final String STREAMING_SQL_METHOD = + "google.spanner.v1.Spanner/ExecuteStreamingSql"; + private static final String UNARY_SQL_METHOD = "google.spanner.v1.Spanner/ExecuteSql"; + private static final String BEGIN_TRANSACTION_METHOD = + "google.spanner.v1.Spanner/BeginTransaction"; + private static final String COMMIT_METHOD = "google.spanner.v1.Spanner/Commit"; + private static final String ROLLBACK_METHOD = "google.spanner.v1.Spanner/Rollback"; + + private final ManagedChannel defaultChannel; + private final ChannelEndpointCache endpointCache; + private final String authority; + private final String defaultEndpointAddress; + private final Map> channelFinders = + new ConcurrentHashMap<>(); + private final Map transactionAffinities = new ConcurrentHashMap<>(); + // Maps read-only transaction IDs to their preferLeader value. + // Strong reads → true (prefer leader), Stale reads → false (any replica). + // Bounded to prevent unbounded growth if application code does not close read-only transactions. + private final Cache readOnlyTxPreferLeader = + CacheBuilder.newBuilder().maximumSize(MAX_TRACKED_READ_ONLY_TRANSACTIONS).build(); + + private KeyAwareChannel( + InstantiatingGrpcChannelProvider channelProvider, + @Nullable ChannelEndpointCacheFactory endpointCacheFactory) + throws IOException { + if (endpointCacheFactory == null) { + this.endpointCache = new GrpcChannelEndpointCache(channelProvider); + } else { + this.endpointCache = endpointCacheFactory.create(channelProvider); + } + this.defaultChannel = endpointCache.defaultChannel().getChannel(); + this.defaultEndpointAddress = endpointCache.defaultChannel().getAddress(); + this.authority = this.defaultChannel.authority(); + } + + static KeyAwareChannel create( + InstantiatingGrpcChannelProvider channelProvider, + @Nullable ChannelEndpointCacheFactory endpointCacheFactory) + throws IOException { + return new KeyAwareChannel(channelProvider, endpointCacheFactory); + } + + private String extractDatabaseIdFromSession(String session) { + if (session == null || session.isEmpty()) { + return null; + } + int sessionsIndex = session.indexOf("/sessions/"); + if (sessionsIndex == -1) { + return null; + } + return session.substring(0, sessionsIndex); + } + + private ChannelFinder getOrCreateChannelFinder(String databaseId) { + SoftReference ref = channelFinders.get(databaseId); + ChannelFinder finder = (ref != null) ? ref.get() : null; + if (finder == null) { + synchronized (channelFinders) { + ref = channelFinders.get(databaseId); + finder = (ref != null) ? ref.get() : null; + if (finder == null) { + finder = new ChannelFinder(endpointCache); + channelFinders.put(databaseId, new SoftReference<>(finder)); + } + } + } + return finder; + } + + @Override + public ManagedChannel shutdown() { + endpointCache.shutdown(); + return this; + } + + @Override + public ManagedChannel shutdownNow() { + endpointCache.shutdown(); + return this; + } + + @Override + public boolean isTerminated() { + return defaultChannel.isTerminated(); + } + + @Override + public boolean isShutdown() { + return defaultChannel.isShutdown(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return defaultChannel.awaitTermination(timeout, unit); + } + + @Override + public String authority() { + return authority; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + if (isKeyAware(methodDescriptor)) { + return new KeyAwareClientCall<>(this, methodDescriptor, callOptions); + } + return defaultChannel.newCall(methodDescriptor, callOptions); + } + + private static boolean isKeyAware(MethodDescriptor methodDescriptor) { + String method = methodDescriptor.getFullMethodName(); + return STREAMING_READ_METHOD.equals(method) + || STREAMING_SQL_METHOD.equals(method) + || UNARY_SQL_METHOD.equals(method) + || BEGIN_TRANSACTION_METHOD.equals(method) + || COMMIT_METHOD.equals(method) + || ROLLBACK_METHOD.equals(method); + } + + @Nullable + private ChannelEndpoint affinityEndpoint(ByteString transactionId) { + if (transactionId == null || transactionId.isEmpty()) { + return null; + } + String address = transactionAffinities.get(transactionId); + if (address == null) { + return null; + } + return endpointCache.get(address); + } + + private void clearAffinity(ByteString transactionId) { + if (transactionId == null || transactionId.isEmpty()) { + return; + } + transactionAffinities.remove(transactionId); + readOnlyTxPreferLeader.invalidate(transactionId); + } + + void clearTransactionAffinity(ByteString transactionId) { + clearAffinity(transactionId); + } + + private boolean isReadOnlyTransaction(ByteString transactionId) { + return transactionId != null + && !transactionId.isEmpty() + && readOnlyTxPreferLeader.getIfPresent(transactionId) != null; + } + + @Nullable + private Boolean readOnlyPreferLeader(ByteString transactionId) { + if (transactionId == null || transactionId.isEmpty()) { + return null; + } + return readOnlyTxPreferLeader.getIfPresent(transactionId); + } + + private void trackReadOnlyTransaction(ByteString transactionId, boolean preferLeader) { + if (transactionId == null || transactionId.isEmpty()) { + return; + } + readOnlyTxPreferLeader.put(transactionId, preferLeader); + } + + private void recordAffinity( + ByteString transactionId, @Nullable ChannelEndpoint endpoint, boolean allowDefault) { + if (transactionId == null || transactionId.isEmpty() || endpoint == null) { + return; + } + String address = endpoint.getAddress(); + if (!allowDefault && defaultEndpointAddress.equals(address)) { + return; + } + transactionAffinities.put(transactionId, address); + } + + private static ByteString transactionIdFromSelector(TransactionSelector selector) { + if (selector.getSelectorCase() == TransactionSelector.SelectorCase.ID) { + return selector.getId(); + } + return ByteString.EMPTY; + } + + @Nullable + private static ByteString transactionIdFromMetadata(PartialResultSet result) { + if (result.hasMetadata()) { + return transactionIdFromTransaction(result.getMetadata().getTransaction()); + } + return null; + } + + @Nullable + private static ByteString transactionIdFromMetadata(ResultSet result) { + if (result.hasMetadata()) { + return transactionIdFromTransaction(result.getMetadata().getTransaction()); + } + return null; + } + + @Nullable + private static ByteString transactionIdFromTransaction(Transaction transaction) { + if (transaction != null && !transaction.getId().isEmpty()) { + return transaction.getId(); + } + return null; + } + + static final class KeyAwareClientCall + extends ForwardingClientCall { + private final KeyAwareChannel parentChannel; + private final MethodDescriptor methodDescriptor; + private final CallOptions callOptions; + private Listener responseListener; + private Metadata headers; + @Nullable private ClientCall delegate; + private ChannelFinder channelFinder; + @Nullable private ChannelEndpoint selectedEndpoint; + @Nullable private ByteString transactionIdToClear; + private boolean allowDefaultAffinity; + private long pendingRequests; + private boolean pendingHalfClose; + @Nullable private Boolean pendingMessageCompression; + @Nullable private io.grpc.Status cancelledStatus; + @Nullable private Metadata cancelledTrailers; + private boolean isReadOnlyBegin; + private boolean readOnlyIsStrong; + private final Object lock = new Object(); + + KeyAwareClientCall( + KeyAwareChannel parentChannel, + MethodDescriptor methodDescriptor, + CallOptions callOptions) { + this.parentChannel = parentChannel; + this.methodDescriptor = methodDescriptor; + this.callOptions = callOptions; + } + + @Override + protected ClientCall delegate() { + synchronized (lock) { + if (delegate == null) { + throw new IllegalStateException( + "Delegate call not initialized before use. sendMessage was likely not called."); + } + return delegate; + } + } + + @Override + public void start(Listener responseListener, Metadata headers) { + Listener listenerToClose = null; + io.grpc.Status statusToClose = null; + Metadata trailersToClose = null; + synchronized (lock) { + this.responseListener = new KeyAwareClientCallListener<>(responseListener, this); + this.headers = headers; + if (this.cancelledStatus != null) { + listenerToClose = this.responseListener; + statusToClose = this.cancelledStatus; + trailersToClose = + this.cancelledTrailers == null ? new Metadata() : this.cancelledTrailers; + } + } + if (listenerToClose != null) { + listenerToClose.onClose(statusToClose, trailersToClose); + } + } + + @Override + @SuppressWarnings("unchecked") + public void sendMessage(RequestT message) { + synchronized (lock) { + if (this.cancelledStatus != null) { + return; + } + if (responseListener == null || headers == null) { + throw new IllegalStateException("start must be called before sendMessage"); + } + ChannelEndpoint endpoint = null; + ChannelFinder finder = null; + + if (message instanceof ReadRequest) { + ReadRequest.Builder reqBuilder = ((ReadRequest) message).toBuilder(); + maybeTrackReadOnlyBegin(reqBuilder.getTransaction()); + RoutingDecision routing = routeFromRequest(reqBuilder); + finder = routing.finder; + endpoint = routing.endpoint; + message = (RequestT) reqBuilder.build(); + } else if (message instanceof ExecuteSqlRequest) { + ExecuteSqlRequest.Builder reqBuilder = ((ExecuteSqlRequest) message).toBuilder(); + maybeTrackReadOnlyBegin(reqBuilder.getTransaction()); + RoutingDecision routing = routeFromRequest(reqBuilder); + finder = routing.finder; + endpoint = routing.endpoint; + message = (RequestT) reqBuilder.build(); + } else if (message instanceof BeginTransactionRequest) { + BeginTransactionRequest.Builder reqBuilder = + ((BeginTransactionRequest) message).toBuilder(); + String databaseId = parentChannel.extractDatabaseIdFromSession(reqBuilder.getSession()); + if (databaseId != null && reqBuilder.hasMutationKey()) { + finder = parentChannel.getOrCreateChannelFinder(databaseId); + endpoint = finder.findServer(reqBuilder); + } + if (reqBuilder.hasOptions() && reqBuilder.getOptions().hasReadOnly()) { + isReadOnlyBegin = true; + readOnlyIsStrong = reqBuilder.getOptions().getReadOnly().getStrong(); + } else { + allowDefaultAffinity = true; + } + message = (RequestT) reqBuilder.build(); + } else if (message instanceof CommitRequest) { + CommitRequest request = (CommitRequest) message; + if (!request.getTransactionId().isEmpty()) { + endpoint = parentChannel.affinityEndpoint(request.getTransactionId()); + transactionIdToClear = request.getTransactionId(); + } + } else if (message instanceof RollbackRequest) { + RollbackRequest request = (RollbackRequest) message; + if (!request.getTransactionId().isEmpty()) { + endpoint = parentChannel.affinityEndpoint(request.getTransactionId()); + transactionIdToClear = request.getTransactionId(); + } + } else { + throw new IllegalStateException( + "Only read, query, begin transaction, commit, and rollback requests are supported for" + + " key-aware calls."); + } + + if (endpoint == null) { + endpoint = parentChannel.endpointCache.defaultChannel(); + } + selectedEndpoint = endpoint; + this.channelFinder = finder; + + delegate = endpoint.getChannel().newCall(methodDescriptor, callOptions); + if (pendingMessageCompression != null) { + delegate.setMessageCompression(pendingMessageCompression); + pendingMessageCompression = null; + } + delegate.start(responseListener, headers); + drainPendingRequests(); + delegate.sendMessage(message); + if (pendingHalfClose) { + delegate.halfClose(); + } + } + } + + @Override + public void halfClose() { + ClientCall currentDelegate; + synchronized (lock) { + if (this.cancelledStatus != null) { + return; + } + if (delegate == null) { + pendingHalfClose = true; + return; + } + currentDelegate = delegate; + } + currentDelegate.halfClose(); + } + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) { + ClientCall currentDelegate; + Listener listenerToClose = null; + io.grpc.Status statusToClose = null; + Metadata trailersToClose = null; + synchronized (lock) { + currentDelegate = delegate; + if (currentDelegate == null) { + cancelledStatus = io.grpc.Status.CANCELLED.withDescription(message).withCause(cause); + Metadata trailers = + cause == null ? new Metadata() : io.grpc.Status.trailersFromThrowable(cause); + cancelledTrailers = trailers == null ? new Metadata() : trailers; + if (responseListener != null) { + listenerToClose = responseListener; + statusToClose = cancelledStatus; + trailersToClose = cancelledTrailers; + } + } + } + if (currentDelegate != null) { + currentDelegate.cancel(message, cause); + } else if (listenerToClose != null) { + listenerToClose.onClose(statusToClose, trailersToClose); + } + } + + @Override + public void request(int numMessages) { + ClientCall currentDelegate; + synchronized (lock) { + if (cancelledStatus != null) { + return; + } + if (delegate != null) { + currentDelegate = delegate; + } else { + if (numMessages <= 0) { + return; + } + long updated = pendingRequests + numMessages; + if (updated < 0L) { + updated = Long.MAX_VALUE; + } + pendingRequests = updated; + return; + } + } + currentDelegate.request(numMessages); + } + + @Override + public boolean isReady() { + ClientCall currentDelegate; + synchronized (lock) { + currentDelegate = delegate; + } + if (currentDelegate == null) { + return false; + } + return currentDelegate.isReady(); + } + + @Override + public void setMessageCompression(boolean enabled) { + ClientCall currentDelegate; + synchronized (lock) { + if (cancelledStatus != null) { + return; + } + if (delegate != null) { + currentDelegate = delegate; + } else { + pendingMessageCompression = enabled; + return; + } + } + currentDelegate.setMessageCompression(enabled); + } + + private void drainPendingRequests() { + ClientCall currentDelegate = delegate; + if (currentDelegate == null) { + return; + } + long requests = pendingRequests; + pendingRequests = 0L; + while (requests > 0) { + int batch = requests > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) requests; + currentDelegate.request(batch); + requests -= batch; + } + } + + void maybeRecordAffinity(ByteString transactionId) { + parentChannel.recordAffinity(transactionId, selectedEndpoint, allowDefaultAffinity); + } + + void maybeClearAffinity() { + parentChannel.clearAffinity(transactionIdToClear); + } + + private void maybeTrackReadOnlyBegin(TransactionSelector selector) { + if (selector.getSelectorCase() == TransactionSelector.SelectorCase.BEGIN + && selector.getBegin().hasReadOnly()) { + isReadOnlyBegin = true; + readOnlyIsStrong = selector.getBegin().getReadOnly().getStrong(); + } + } + + private RoutingDecision routeFromRequest(ReadRequest.Builder reqBuilder) { + String databaseId = parentChannel.extractDatabaseIdFromSession(reqBuilder.getSession()); + ByteString transactionId = transactionIdFromSelector(reqBuilder.getTransaction()); + // Skip affinity for read-only transactions so each read routes independently. + boolean isReadOnly = parentChannel.isReadOnlyTransaction(transactionId); + ChannelEndpoint endpoint = isReadOnly ? null : parentChannel.affinityEndpoint(transactionId); + ChannelFinder finder = null; + if (databaseId != null) { + finder = parentChannel.getOrCreateChannelFinder(databaseId); + } + if (databaseId != null && endpoint == null) { + Boolean preferLeaderOverride = parentChannel.readOnlyPreferLeader(transactionId); + ChannelEndpoint routed = + preferLeaderOverride != null + ? finder.findServer(reqBuilder, preferLeaderOverride) + : finder.findServer(reqBuilder); + endpoint = routed; + } + return new RoutingDecision(finder, endpoint); + } + + private RoutingDecision routeFromRequest(ExecuteSqlRequest.Builder reqBuilder) { + String databaseId = parentChannel.extractDatabaseIdFromSession(reqBuilder.getSession()); + ByteString transactionId = transactionIdFromSelector(reqBuilder.getTransaction()); + // Skip affinity for read-only transactions so each query routes independently. + boolean isReadOnly = parentChannel.isReadOnlyTransaction(transactionId); + ChannelEndpoint endpoint = isReadOnly ? null : parentChannel.affinityEndpoint(transactionId); + ChannelFinder finder = null; + if (databaseId != null) { + finder = parentChannel.getOrCreateChannelFinder(databaseId); + } + if (databaseId != null && endpoint == null) { + Boolean preferLeaderOverride = parentChannel.readOnlyPreferLeader(transactionId); + ChannelEndpoint routed = + preferLeaderOverride != null + ? finder.findServer(reqBuilder, preferLeaderOverride) + : finder.findServer(reqBuilder); + endpoint = routed; + } + return new RoutingDecision(finder, endpoint); + } + } + + private static final class RoutingDecision { + @Nullable private final ChannelFinder finder; + @Nullable private final ChannelEndpoint endpoint; + + private RoutingDecision(@Nullable ChannelFinder finder, @Nullable ChannelEndpoint endpoint) { + this.finder = finder; + this.endpoint = endpoint; + } + } + + static final class KeyAwareClientCallListener + extends SimpleForwardingClientCallListener { + private final KeyAwareClientCall call; + + KeyAwareClientCallListener( + ClientCall.Listener responseListener, KeyAwareClientCall call) { + super(responseListener); + this.call = call; + } + + @Override + public void onMessage(ResponseT message) { + ByteString transactionId = null; + if (message instanceof PartialResultSet) { + PartialResultSet response = (PartialResultSet) message; + if (response.hasCacheUpdate() && call.channelFinder != null) { + call.channelFinder.update(response.getCacheUpdate()); + } + transactionId = transactionIdFromMetadata(response); + } else if (message instanceof ResultSet) { + ResultSet response = (ResultSet) message; + if (response.hasCacheUpdate() && call.channelFinder != null) { + call.channelFinder.update(response.getCacheUpdate()); + } + transactionId = transactionIdFromMetadata(response); + } else if (message instanceof Transaction) { + Transaction response = (Transaction) message; + transactionId = transactionIdFromTransaction(response); + } + if (transactionId != null) { + if (call.isReadOnlyBegin) { + // Track the read-only transaction so subsequent reads skip affinity + // and route independently based on key-based routing. + call.parentChannel.trackReadOnlyTransaction(transactionId, call.readOnlyIsStrong); + } else if (!call.parentChannel.isReadOnlyTransaction(transactionId)) { + call.maybeRecordAffinity(transactionId); + } + } + super.onMessage(message); + } + + @Override + public void onClose(io.grpc.Status status, Metadata trailers) { + call.maybeClearAffinity(); + super.onClose(status, trailers); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareTransportChannelProvider.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareTransportChannelProvider.java new file mode 100644 index 000000000000..438717c3c98f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyAwareTransportChannelProvider.java @@ -0,0 +1,129 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.auth.Credentials; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; +import javax.annotation.Nullable; + +final class KeyAwareTransportChannelProvider implements TransportChannelProvider { + private final InstantiatingGrpcChannelProvider baseProvider; + @Nullable private final ChannelEndpointCacheFactory endpointCacheFactory; + + KeyAwareTransportChannelProvider( + InstantiatingGrpcChannelProvider.Builder builder, + @Nullable ChannelEndpointCacheFactory endpointCacheFactory) { + this.baseProvider = builder.build(); + this.endpointCacheFactory = endpointCacheFactory; + } + + KeyAwareTransportChannelProvider( + InstantiatingGrpcChannelProvider baseProvider, + @Nullable ChannelEndpointCacheFactory endpointCacheFactory) { + this.baseProvider = baseProvider; + this.endpointCacheFactory = endpointCacheFactory; + } + + @Override + public GrpcTransportChannel getTransportChannel() throws IOException { + return GrpcTransportChannel.newBuilder() + .setManagedChannel(KeyAwareChannel.create(baseProvider, endpointCacheFactory)) + .build(); + } + + @Override + public String getTransportName() { + return baseProvider.getTransportName(); + } + + @Override + public boolean needsEndpoint() { + return baseProvider.needsEndpoint(); + } + + @Override + public boolean needsCredentials() { + return baseProvider.needsCredentials(); + } + + @Override + public boolean needsExecutor() { + return baseProvider.needsExecutor(); + } + + @Override + public boolean needsHeaders() { + return baseProvider.needsHeaders(); + } + + @Override + public boolean shouldAutoClose() { + return baseProvider.shouldAutoClose(); + } + + @Override + public TransportChannelProvider withEndpoint(String endpoint) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withEndpoint(endpoint), + endpointCacheFactory); + } + + @Override + public TransportChannelProvider withCredentials(Credentials credentials) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withCredentials(credentials), + endpointCacheFactory); + } + + @Override + public TransportChannelProvider withHeaders(Map headers) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withHeaders(headers), endpointCacheFactory); + } + + @Override + public TransportChannelProvider withPoolSize(int poolSize) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withPoolSize(poolSize), + endpointCacheFactory); + } + + @Override + public TransportChannelProvider withExecutor(ScheduledExecutorService executor) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withExecutor(executor), + endpointCacheFactory); + } + + @Override + public TransportChannelProvider withExecutor(Executor executor) { + return new KeyAwareTransportChannelProvider( + (InstantiatingGrpcChannelProvider) baseProvider.withExecutor(executor), + endpointCacheFactory); + } + + @Override + public boolean acceptsPoolSize() { + return baseProvider.acceptsPoolSize(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRangeCache.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRangeCache.java new file mode 100644 index 000000000000..bdbd495aa58c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRangeCache.java @@ -0,0 +1,688 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.Group; +import com.google.spanner.v1.Range; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.Tablet; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.IntStream; + +/** Cache for routing information used by location-aware routing. */ +@InternalApi +public final class KeyRangeCache { + + private static final int MAX_LOCAL_REPLICA_DISTANCE = 5; + private static final int DEFAULT_MIN_ENTRIES_FOR_RANDOM_PICK = 1000; + + /** Determines how to handle ranges that span multiple splits. */ + public enum RangeMode { + /** Consider it a cache miss if the whole range is not in a single split. */ + COVERING_SPLIT, + /** If the range spans multiple splits, pick a random split when possible. */ + PICK_RANDOM + } + + private final ChannelEndpointCache endpointCache; + private final NavigableMap ranges = + new TreeMap<>(ByteString.unsignedLexicographicalComparator()); + private final Map groups = new HashMap<>(); + private final Object lock = new Object(); + private final AtomicLong accessCounter = new AtomicLong(); + + private volatile boolean deterministicRandom = false; + private volatile int minCacheEntriesForRandomPick = DEFAULT_MIN_ENTRIES_FOR_RANDOM_PICK; + + public KeyRangeCache(ChannelEndpointCache endpointCache) { + this.endpointCache = Objects.requireNonNull(endpointCache); + } + + @VisibleForTesting + void useDeterministicRandom() { + deterministicRandom = true; + } + + @VisibleForTesting + void setMinCacheEntriesForRandomPick(int value) { + minCacheEntriesForRandomPick = value; + } + + /** Applies cache updates. Tablets are processed inside group updates. */ + public void addRanges(CacheUpdate cacheUpdate) { + List newGroups = new ArrayList<>(); + synchronized (lock) { + for (Group groupIn : cacheUpdate.getGroupList()) { + newGroups.add(findOrInsertGroup(groupIn)); + } + for (Range rangeIn : cacheUpdate.getRangeList()) { + replaceRangeIfNewer(rangeIn); + } + for (CachedGroup group : newGroups) { + unref(group); + } + } + } + + /** + * Fills routing hint and returns the server to use, or null if no routing decision can be made. + */ + public ChannelEndpoint fillRoutingHint( + boolean preferLeader, + RangeMode rangeMode, + DirectedReadOptions directedReadOptions, + RoutingHint.Builder hintBuilder) { + ByteString key = hintBuilder.getKey(); + if (key.isEmpty()) { + return null; + } + + CachedRange targetRange; + synchronized (lock) { + targetRange = findRangeLocked(key, hintBuilder.getLimitKey(), rangeMode); + } + + if (targetRange == null || targetRange.group == null) { + return null; + } + + hintBuilder.setGroupUid(targetRange.group.groupUid); + hintBuilder.setSplitId(targetRange.splitId); + hintBuilder.setKey(targetRange.startKey); + hintBuilder.setLimitKey(targetRange.limitKey); + + return targetRange.group.fillRoutingHint(preferLeader, directedReadOptions, hintBuilder); + } + + public void clear() { + synchronized (lock) { + for (CachedRange range : ranges.values()) { + unref(range.group); + } + ranges.clear(); + groups.clear(); + } + } + + public int size() { + synchronized (lock) { + return ranges.size(); + } + } + + public void shrinkTo(int newSize) { + synchronized (lock) { + if (newSize <= 0) { + clear(); + return; + } + if (newSize >= ranges.size()) { + return; + } + + int numToShrink = ranges.size() - newSize; + int numToSample = Math.min(numToShrink * 2, ranges.size()); + List allRanges = new ArrayList<>(ranges.values()); + int[] sampleIndexes = sampleWithoutReplacement(allRanges.size(), numToSample); + Arrays.sort(sampleIndexes); + + List sampled = new ArrayList<>(numToSample); + for (int index : sampleIndexes) { + sampled.add(allRanges.get(index)); + } + sampled.sort(Comparator.comparingLong(range -> range.lastAccess)); + + for (int i = 0; i < numToShrink; i++) { + CachedRange range = sampled.get(i); + ranges.remove(range.limitKey); + unref(range.group); + } + } + } + + public String debugString() { + StringBuilder sb = new StringBuilder(); + synchronized (lock) { + for (Map.Entry entry : ranges.entrySet()) { + CachedRange cachedRange = entry.getValue(); + sb.append("Range[") + .append(cachedRange.startKey.toStringUtf8()) + .append("-") + .append(entry.getKey().toStringUtf8()) + .append("]: ") + .append(cachedRange.debugString()) + .append("\n"); + } + for (CachedGroup g : groups.values()) { + sb.append(g.debugString()).append("\n"); + } + } + return sb.toString(); + } + + private long accessTimeNow() { + return accessCounter.incrementAndGet(); + } + + private CachedRange findRangeLocked(ByteString key, ByteString limit, RangeMode mode) { + Map.Entry entry = ranges.higherEntry(key); + if (entry == null) { + return null; + } + + CachedRange firstRange = entry.getValue(); + boolean startInRange = compare(key, firstRange.startKey) >= 0; + if (limit.isEmpty()) { + if (startInRange) { + firstRange.lastAccess = accessTimeNow(); + return firstRange; + } + return null; + } + + boolean limitInRange = compare(limit, entry.getKey()) <= 0; + if (startInRange && limitInRange) { + firstRange.lastAccess = accessTimeNow(); + return firstRange; + } + if (mode == RangeMode.COVERING_SPLIT) { + return null; + } + + int total = 0; + boolean foundGap = !startInRange; + boolean hitEnd = false; + Map.Entry sampled = entry; + ByteString lastLimit = firstRange.startKey; + + Map.Entry current = entry; + while (current != null) { + CachedRange range = current.getValue(); + if (!lastLimit.equals(range.startKey)) { + foundGap = true; + if (compare(range.startKey, limit) >= 0) { + break; + } + } + total++; + if (uniformRandom(total, key, limit, range.startKey) == 0) { + sampled = current; + } + lastLimit = range.limitKey; + if (compare(lastLimit, limit) >= 0 || total >= minCacheEntriesForRandomPick) { + break; + } + Map.Entry next = ranges.higherEntry(current.getKey()); + if (next == null) { + hitEnd = true; + break; + } + current = next; + } + + if (hitEnd) { + foundGap = true; + } + + if (!foundGap || total >= minCacheEntriesForRandomPick) { + CachedRange selected = sampled.getValue(); + selected.lastAccess = accessTimeNow(); + return selected; + } + return null; + } + + private int uniformRandom(int n, ByteString seed1, ByteString seed2, ByteString seed3) { + if (deterministicRandom) { + ByteString combined = seed1.concat(seed2).concat(seed3); + int hash = Hashing.crc32c().hashBytes(combined.toByteArray()).asInt(); + long unsigned = Integer.toUnsignedLong(hash); + return (int) (unsigned % n); + } + return ThreadLocalRandom.current().nextInt(n); + } + + private int[] sampleWithoutReplacement(int populationSize, int sampleSize) { + int[] indexes = IntStream.range(0, populationSize).toArray(); + for (int i = 0; i < sampleSize; i++) { + int j = i + ThreadLocalRandom.current().nextInt(populationSize - i); + int tmp = indexes[i]; + indexes[i] = indexes[j]; + indexes[j] = tmp; + } + return Arrays.copyOf(indexes, sampleSize); + } + + private void replaceRangeIfNewer(Range rangeIn) { + ByteString startKey = rangeIn.getStartKey(); + ByteString limitKey = rangeIn.getLimitKey(); + + Map.Entry startEntry = ranges.higherEntry(startKey); + if (startEntry == null || compare(startEntry.getValue().startKey, limitKey) >= 0) { + CachedRange newRange = + new CachedRange( + startKey, + limitKey, + findAndRefGroup(rangeIn.getGroupUid()), + rangeIn.getSplitId(), + rangeIn.getGeneration(), + accessTimeNow()); + ranges.put(limitKey, newRange); + return; + } + + List overlapping = new ArrayList<>(); + for (Map.Entry entry = startEntry; + entry != null && compare(entry.getValue().startKey, limitKey) < 0; + entry = ranges.higherEntry(entry.getKey())) { + CachedRange existing = entry.getValue(); + int genCompare = compare(rangeIn.getGeneration(), existing.generation); + if (genCompare < 0 + || (genCompare == 0 + && startKey.equals(existing.startKey) + && limitKey.equals(existing.limitKey))) { + return; + } + overlapping.add(existing); + } + + for (CachedRange range : overlapping) { + ranges.remove(range.limitKey); + } + + CachedRange first = overlapping.get(0); + if (compare(first.startKey, startKey) < 0) { + CachedRange head = + new CachedRange( + first.startKey, + startKey, + refGroup(first.group), + first.splitId, + first.generation, + first.lastAccess); + ranges.put(head.limitKey, head); + } + + CachedRange newRange = + new CachedRange( + startKey, + limitKey, + findAndRefGroup(rangeIn.getGroupUid()), + rangeIn.getSplitId(), + rangeIn.getGeneration(), + accessTimeNow()); + ranges.put(limitKey, newRange); + + CachedRange last = overlapping.get(overlapping.size() - 1); + if (compare(last.limitKey, limitKey) > 0) { + CachedRange tail = + new CachedRange( + limitKey, + last.limitKey, + refGroup(last.group), + last.splitId, + last.generation, + last.lastAccess); + ranges.put(tail.limitKey, tail); + } + + for (CachedRange range : overlapping) { + unref(range.group); + } + } + + private CachedGroup findAndRefGroup(long groupUid) { + CachedGroup group = groups.get(groupUid); + if (group != null) { + group.refs++; + } + return group; + } + + private CachedGroup findOrInsertGroup(Group groupIn) { + CachedGroup group = groups.get(groupIn.getGroupUid()); + if (group == null) { + group = new CachedGroup(groupIn.getGroupUid()); + groups.put(groupIn.getGroupUid(), group); + } else { + group.refs++; + } + group.update(groupIn); + return group; + } + + private CachedGroup refGroup(CachedGroup group) { + if (group != null) { + group.refs++; + } + return group; + } + + private void unref(CachedGroup group) { + if (group == null) { + return; + } + if (--group.refs == 0) { + groups.remove(group.groupUid); + } + } + + private int compare(ByteString left, ByteString right) { + return ByteString.unsignedLexicographicalComparator().compare(left, right); + } + + /** Represents a single tablet within a group. */ + private class CachedTablet { + long tabletUid = 0; + ByteString incarnation = ByteString.EMPTY; + String serverAddress = ""; + int distance = 0; + boolean skip = false; + Tablet.Role role = Tablet.Role.ROLE_UNSPECIFIED; + String location = ""; + + ChannelEndpoint endpoint = null; + + void update(Tablet tabletIn) { + if (tabletUid > 0 && compare(incarnation, tabletIn.getIncarnation()) > 0) { + return; + } + + tabletUid = tabletIn.getTabletUid(); + incarnation = tabletIn.getIncarnation(); + distance = tabletIn.getDistance(); + skip = tabletIn.getSkip(); + role = tabletIn.getRole(); + location = tabletIn.getLocation(); + + if (!serverAddress.equals(tabletIn.getServerAddress())) { + serverAddress = tabletIn.getServerAddress(); + endpoint = null; + } + } + + boolean matches(DirectedReadOptions directedReadOptions) { + switch (directedReadOptions.getReplicasCase()) { + case INCLUDE_REPLICAS: + for (DirectedReadOptions.ReplicaSelection rs : + directedReadOptions.getIncludeReplicas().getReplicaSelectionsList()) { + if (matches(rs)) { + return true; + } + } + return false; + case EXCLUDE_REPLICAS: + for (DirectedReadOptions.ReplicaSelection rs : + directedReadOptions.getExcludeReplicas().getReplicaSelectionsList()) { + if (matches(rs)) { + return false; + } + } + return true; + case REPLICAS_NOT_SET: + default: + return distance <= MAX_LOCAL_REPLICA_DISTANCE; + } + } + + private boolean matches(DirectedReadOptions.ReplicaSelection selection) { + if (!selection.getLocation().isEmpty() && !selection.getLocation().equals(location)) { + return false; + } + switch (selection.getType()) { + case READ_WRITE: + return role == Tablet.Role.READ_WRITE || role == Tablet.Role.ROLE_UNSPECIFIED; + case READ_ONLY: + return role == Tablet.Role.READ_ONLY; + default: + return true; + } + } + + boolean shouldSkip(RoutingHint.Builder hintBuilder) { + if (skip || serverAddress.isEmpty() || (endpoint != null && !endpoint.isHealthy())) { + RoutingHint.SkippedTablet.Builder skipped = hintBuilder.addSkippedTabletUidBuilder(); + skipped.setTabletUid(tabletUid); + skipped.setIncarnation(incarnation); + return true; + } + return false; + } + + ChannelEndpoint pick(RoutingHint.Builder hintBuilder) { + hintBuilder.setTabletUid(tabletUid); + if (endpoint == null && !serverAddress.isEmpty()) { + endpoint = endpointCache.get(serverAddress); + } + return endpoint; + } + + String debugString() { + return tabletUid + + ":" + + serverAddress + + "@" + + incarnation + + "(location=" + + location + + ",role=" + + role + + ",distance=" + + distance + + (skip ? ",skip" : "") + + ")"; + } + } + + /** Represents a paxos group with its tablets. */ + private class CachedGroup { + final long groupUid; + ByteString generation = ByteString.EMPTY; + List tablets = new ArrayList<>(); + int leaderIndex = -1; + int refs = 1; + + CachedGroup(long groupUid) { + this.groupUid = groupUid; + } + + synchronized void update(Group groupIn) { + if (compare(groupIn.getGeneration(), generation) > 0) { + generation = groupIn.getGeneration(); + if (groupIn.getLeaderIndex() >= 0 && groupIn.getLeaderIndex() < groupIn.getTabletsCount()) { + leaderIndex = groupIn.getLeaderIndex(); + } else { + leaderIndex = -1; + } + } + + if (tablets.size() == groupIn.getTabletsCount()) { + boolean mismatch = false; + for (int t = 0; t < groupIn.getTabletsCount(); t++) { + if (tablets.get(t).tabletUid != groupIn.getTablets(t).getTabletUid()) { + mismatch = true; + break; + } + } + if (!mismatch) { + for (int t = 0; t < groupIn.getTabletsCount(); t++) { + tablets.get(t).update(groupIn.getTablets(t)); + } + return; + } + } + + Map tabletsByUid = new HashMap<>(tablets.size()); + for (CachedTablet tablet : tablets) { + tabletsByUid.put(tablet.tabletUid, tablet); + } + List newTablets = new ArrayList<>(groupIn.getTabletsCount()); + for (int t = 0; t < groupIn.getTabletsCount(); t++) { + Tablet tabletIn = groupIn.getTablets(t); + CachedTablet tablet = tabletsByUid.get(tabletIn.getTabletUid()); + if (tablet == null) { + tablet = new CachedTablet(); + } + tablet.update(tabletIn); + newTablets.add(tablet); + } + tablets = newTablets; + } + + ChannelEndpoint fillRoutingHint( + boolean preferLeader, + DirectedReadOptions directedReadOptions, + RoutingHint.Builder hintBuilder) { + boolean hasDirectedReadOptions = + directedReadOptions.getReplicasCase() + != DirectedReadOptions.ReplicasCase.REPLICAS_NOT_SET; + + // Fast path: pick a tablet while holding the lock. If the endpoint is already + // cached on the tablet, return it immediately without releasing the lock. + // If the endpoint needs to be created (blocking network dial), release the + // lock first so other threads are not blocked during channel creation. + CachedTablet selected; + synchronized (this) { + selected = + selectTabletLocked( + preferLeader, hasDirectedReadOptions, hintBuilder, directedReadOptions); + if (selected == null) { + return null; + } + if (selected.endpoint != null || selected.serverAddress.isEmpty()) { + return selected.pick(hintBuilder); + } + // Slow path: endpoint not yet created. Capture the address and release the + // lock before calling endpointCache.get(), which may block on network dial. + hintBuilder.setTabletUid(selected.tabletUid); + } + + String serverAddress = selected.serverAddress; + ChannelEndpoint endpoint = endpointCache.get(serverAddress); + + synchronized (this) { + // Only update if the tablet's address hasn't changed since we released the lock. + if (selected.endpoint == null && selected.serverAddress.equals(serverAddress)) { + selected.endpoint = endpoint; + } + // Re-set tabletUid with the latest value in case update() ran concurrently. + hintBuilder.setTabletUid(selected.tabletUid); + return selected.endpoint; + } + } + + private CachedTablet selectTabletLocked( + boolean preferLeader, + boolean hasDirectedReadOptions, + RoutingHint.Builder hintBuilder, + DirectedReadOptions directedReadOptions) { + if (preferLeader + && !hasDirectedReadOptions + && hasLeader() + && leader().distance <= MAX_LOCAL_REPLICA_DISTANCE + && !leader().shouldSkip(hintBuilder)) { + return leader(); + } + for (CachedTablet tablet : tablets) { + if (!tablet.matches(directedReadOptions)) { + continue; + } + if (tablet.shouldSkip(hintBuilder)) { + continue; + } + return tablet; + } + return null; + } + + boolean hasLeader() { + return leaderIndex >= 0 && leaderIndex < tablets.size(); + } + + CachedTablet leader() { + return tablets.get(leaderIndex); + } + + String debugString() { + StringBuilder sb = new StringBuilder(); + sb.append(groupUid).append(":["); + for (int i = 0; i < tablets.size(); i++) { + sb.append(tablets.get(i).debugString()); + if (hasLeader() && i == leaderIndex) { + sb.append(" (leader)"); + } + if (i < tablets.size() - 1) { + sb.append(", "); + } + } + sb.append("]@").append(generation.toStringUtf8()); + sb.append("#").append(refs); + return sb.toString(); + } + } + + /** Represents a cached range with its group and split information. */ + private static class CachedRange { + final ByteString startKey; + final ByteString limitKey; + final CachedGroup group; + final long splitId; + final ByteString generation; + long lastAccess; + + CachedRange( + ByteString startKey, + ByteString limitKey, + CachedGroup group, + long splitId, + ByteString generation, + long lastAccess) { + this.startKey = startKey; + this.limitKey = limitKey; + this.group = group; + this.splitId = splitId; + this.generation = generation; + this.lastAccess = lastAccess; + } + + String debugString() { + return (group != null ? group.groupUid : "null_group") + + "," + + splitId + + "@" + + (generation.isEmpty() ? "" : generation.toStringUtf8()) + + ",last_access=" + + lastAccess; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipe.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipe.java new file mode 100644 index 000000000000..1e15b69bf979 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipe.java @@ -0,0 +1,865 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.google.spanner.v1.KeyRange; +import com.google.spanner.v1.KeySet; +import com.google.spanner.v1.Mutation; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.time.format.ResolverStyle; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +@InternalApi +public final class KeyRecipe { + + // kInfinity is "\xff" - the largest single byte, used as a sentinel for ranges + private static final ByteString K_INFINITY = ByteString.copyFrom(new byte[] {(byte) 0xFF}); + + private enum Kind { + TAG, + VALUE, + INVALID + } + + private enum KeyType { + FULL_KEY, + PREFIX, + PREFIX_SUCCESSOR, + INDEX_KEY + } + + private enum EncodeState { + OK, + FAILED, + END_OF_KEYS + } + + private static final class Part { + private final Kind kind; + private final int tag; // if kind == TAG + private final com.google.spanner.v1.Type type; // if kind == VALUE + private final com.google.spanner.v1.KeyRecipe.Part.Order order; // if kind == VALUE + private final com.google.spanner.v1.KeyRecipe.Part.NullOrder nullOrder; // if kind == VALUE + private final String identifier; // if kind == VALUE + private final List structIdentifiers; // if kind == VALUE + private final Value constantValue; // if kind == VALUE and value is set + private final boolean random; // if kind == VALUE and random: true + + private Value constantValue() { + return constantValue; + } + + private boolean hasConstantValue() { + return constantValue != null; + } + + private Part( + Kind kind, + int tag, + com.google.spanner.v1.Type type, + com.google.spanner.v1.KeyRecipe.Part.Order order, + com.google.spanner.v1.KeyRecipe.Part.NullOrder nullOrder, + String identifier, + List structIdentifiers, + Value constantValue, + boolean random) { + this.kind = kind; + this.tag = tag; + this.type = type; + this.order = order; + this.nullOrder = nullOrder; + this.identifier = identifier; + this.structIdentifiers = structIdentifiers; + this.constantValue = constantValue; + this.random = random; + } + + private ResolvedValue resolveValue(BiFunction valueFinder, int index) { + if (hasConstantValue()) { + return ResolvedValue.ofValue(constantValue()); + } + Value value = valueFinder.apply(index, identifier == null ? "" : identifier); + if (value == null) { + return ResolvedValue.missing(); + } + if (structIdentifiers.isEmpty()) { + return ResolvedValue.ofValue(value); + } + Value current = value; + // structIdentifiers is a path of list indices into nested STRUCT values. + // STRUCT values are represented as ListValue in field order. + for (int structIndex : structIdentifiers) { + if (current.getKindCase() != Value.KindCase.LIST_VALUE + || structIndex < 0 + || structIndex >= current.getListValue().getValuesCount()) { + return ResolvedValue.failed(); + } + current = current.getListValue().getValues(structIndex); + } + return ResolvedValue.ofValue(current); + } + + private boolean shouldConsumeValueIndex() { + return !hasConstantValue() && !random; + } + + static Part fromProto(com.google.spanner.v1.KeyRecipe.Part partProto) { + if (partProto.getTag() != 0) { + if (partProto.getTag() < 0) { + return new Part(Kind.INVALID, 0, null, null, null, null, null, null, false); + } + return new Part(Kind.TAG, partProto.getTag(), null, null, null, null, null, null, false); + } + if (!partProto.hasType()) { + return new Part(Kind.INVALID, 0, null, null, null, null, null, null, false); + } + if (partProto.getOrder() != com.google.spanner.v1.KeyRecipe.Part.Order.ASCENDING + && partProto.getOrder() != com.google.spanner.v1.KeyRecipe.Part.Order.DESCENDING) { + return new Part(Kind.INVALID, 0, null, null, null, null, null, null, false); + } + if (partProto.getNullOrder() != com.google.spanner.v1.KeyRecipe.Part.NullOrder.NULLS_FIRST + && partProto.getNullOrder() != com.google.spanner.v1.KeyRecipe.Part.NullOrder.NULLS_LAST + && partProto.getNullOrder() != com.google.spanner.v1.KeyRecipe.Part.NullOrder.NOT_NULL) { + return new Part(Kind.INVALID, 0, null, null, null, null, null, null, false); + } + if (partProto.hasRandom() + && partProto.getType().getCode() != com.google.spanner.v1.TypeCode.INT64) { + return new Part(Kind.INVALID, 0, null, null, null, null, null, null, false); + } + + String identifier = partProto.hasIdentifier() ? partProto.getIdentifier() : null; + List structIdentifiers = new ArrayList<>(partProto.getStructIdentifiersList()); + + Value constantValue = partProto.hasValue() ? partProto.getValue() : null; + + return new Part( + Kind.VALUE, + 0, + partProto.getType(), + partProto.getOrder(), + partProto.getNullOrder(), + identifier, + structIdentifiers, + constantValue, + partProto.hasRandom()); + } + } + + private static void encodeRandomValuePart(Part part, UnsynchronizedByteArrayOutputStream out) { + long value = ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE); + boolean ascending = part.order == com.google.spanner.v1.KeyRecipe.Part.Order.ASCENDING; + if (ascending) { + SsFormat.appendInt64Increasing(out, value); + } else { + SsFormat.appendInt64Decreasing(out, value); + } + } + + private static final class ResolvedValue { + private final Value value; + private final boolean found; + private final boolean failed; + + private ResolvedValue(Value value, boolean found, boolean failed) { + this.value = value; + this.found = found; + this.failed = failed; + } + + private static ResolvedValue ofValue(Value value) { + return new ResolvedValue(value, true, false); + } + + private static ResolvedValue missing() { + return new ResolvedValue(null, false, false); + } + + private static ResolvedValue failed() { + return new ResolvedValue(null, false, true); + } + } + + private final List parts; + private final boolean isIndex; + + private KeyRecipe(List parts, boolean isIndex) { + this.parts = parts; + this.isIndex = isIndex; + } + + public static KeyRecipe create(com.google.spanner.v1.KeyRecipe in) { + if (in.getPartCount() == 0) { + throw new IllegalArgumentException("KeyRecipe must have at least one part."); + } + boolean isIndex = in.hasIndexName(); + List partsList = + in.getPartList().stream().map(Part::fromProto).collect(Collectors.toList()); + if (partsList.get(0).kind != Kind.TAG) { + throw new IllegalArgumentException("KeyRecipe must start with a tag."); + } + return new KeyRecipe(partsList, isIndex); + } + + private static void encodeNull(Part part, UnsynchronizedByteArrayOutputStream out) { + switch (part.nullOrder) { + case NULLS_FIRST: + SsFormat.appendNullOrderedFirst(out); + break; + case NULLS_LAST: + SsFormat.appendNullOrderedLast(out); + break; + case NOT_NULL: + throw new IllegalArgumentException("Key part cannot be NULL"); + default: + throw new IllegalArgumentException("Unknown null order: " + part.nullOrder); + } + } + + private static void encodeNotNull(Part part, UnsynchronizedByteArrayOutputStream out) { + switch (part.nullOrder) { + case NULLS_FIRST: + SsFormat.appendNotNullMarkerNullOrderedFirst(out); + break; + case NULLS_LAST: + SsFormat.appendNotNullMarkerNullOrderedLast(out); + break; + case NOT_NULL: + // No marker needed for NOT_NULL + break; + default: + throw new IllegalArgumentException("Unknown null order: " + part.nullOrder); + } + } + + private static void encodeSingleValuePart( + Part part, Value value, UnsynchronizedByteArrayOutputStream out) { + if (value.getKindCase() == Value.KindCase.NULL_VALUE) { + encodeNull(part, out); + return; + } + + // Validate type compatibility BEFORE encoding anything + validateValueType(part, value); + + // Now safe to encode the NOT_NULL marker + encodeNotNull(part, out); + + boolean isAscending = (part.order == com.google.spanner.v1.KeyRecipe.Part.Order.ASCENDING); + + switch (part.type.getCode()) { + case BOOL: + if (isAscending) { + SsFormat.appendBoolIncreasing(out, value.getBoolValue()); + } else { + SsFormat.appendBoolDecreasing(out, value.getBoolValue()); + } + break; + case INT64: + long intVal = Long.parseLong(value.getStringValue()); + if (isAscending) { + SsFormat.appendInt64Increasing(out, intVal); + } else { + SsFormat.appendInt64Decreasing(out, intVal); + } + break; + case FLOAT64: + double dblVal; + if (value.getKindCase() == Value.KindCase.STRING_VALUE) { + // Handle special float values like Infinity, -Infinity, NaN + String strVal = value.getStringValue(); + if ("Infinity".equals(strVal)) { + dblVal = Double.POSITIVE_INFINITY; + } else if ("-Infinity".equals(strVal)) { + dblVal = Double.NEGATIVE_INFINITY; + } else if ("NaN".equals(strVal)) { + dblVal = Double.NaN; + } else { + throw new IllegalArgumentException("Invalid FLOAT64 string: " + strVal); + } + } else { + dblVal = value.getNumberValue(); + } + if (isAscending) { + SsFormat.appendDoubleIncreasing(out, dblVal); + } else { + SsFormat.appendDoubleDecreasing(out, dblVal); + } + break; + case STRING: + if (isAscending) { + SsFormat.appendStringIncreasing(out, value.getStringValue()); + } else { + SsFormat.appendStringDecreasing(out, value.getStringValue()); + } + break; + case BYTES: + byte[] bytesDecoded = Base64.getDecoder().decode(value.getStringValue()); + if (isAscending) { + SsFormat.appendBytesIncreasing(out, bytesDecoded); + } else { + SsFormat.appendBytesDecreasing(out, bytesDecoded); + } + break; + case TIMESTAMP: + String tsStr = value.getStringValue(); + long[] parsed = parseTimestamp(tsStr); + byte[] encoded = SsFormat.encodeTimestamp(parsed[0], (int) parsed[1]); + if (isAscending) { + SsFormat.appendBytesIncreasing(out, encoded); + } else { + SsFormat.appendBytesDecreasing(out, encoded); + } + break; + case DATE: + String dateStr = value.getStringValue(); + int daysSinceEpoch = parseDate(dateStr); + if (isAscending) { + SsFormat.appendInt64Increasing(out, daysSinceEpoch); + } else { + SsFormat.appendInt64Decreasing(out, daysSinceEpoch); + } + break; + case UUID: + String uuidStr = value.getStringValue(); + long[] parsedUuid = parseUuid(uuidStr); + byte[] encodedUuid = SsFormat.encodeUuid(parsedUuid[0], parsedUuid[1]); + if (isAscending) { + SsFormat.appendBytesIncreasing(out, encodedUuid); + } else { + SsFormat.appendBytesDecreasing(out, encodedUuid); + } + break; + case ENUM: + // ENUM values are sent as string representation of the enum number + long enumVal = Long.parseLong(value.getStringValue()); + if (isAscending) { + SsFormat.appendInt64Increasing(out, enumVal); + } else { + SsFormat.appendInt64Decreasing(out, enumVal); + } + break; + case NUMERIC: + case TYPE_CODE_UNSPECIFIED: + case ARRAY: + case STRUCT: + case PROTO: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException( + "Unsupported type code for ssformat encoding: " + part.type.getCode()); + } + } + + private static void validateValueType(Part part, Value value) { + switch (part.type.getCode()) { + case BOOL: + if (value.getKindCase() != Value.KindCase.BOOL_VALUE) { + throw new IllegalArgumentException("Type mismatch for BOOL."); + } + break; + case INT64: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for INT64, expecting decimal string."); + } + // Also validate it's a valid integer + try { + Long.parseLong(value.getStringValue()); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid INT64 string: " + value.getStringValue(), e); + } + break; + case FLOAT64: + if (value.getKindCase() != Value.KindCase.NUMBER_VALUE + && value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for FLOAT64."); + } + if (value.getKindCase() == Value.KindCase.STRING_VALUE) { + String strVal = value.getStringValue(); + if (!"Infinity".equals(strVal) && !"-Infinity".equals(strVal) && !"NaN".equals(strVal)) { + throw new IllegalArgumentException("Invalid FLOAT64 string: " + strVal); + } + } + break; + case STRING: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for STRING."); + } + break; + case BYTES: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for BYTES, expecting base64 string."); + } + // Validate base64 + try { + Base64.getDecoder().decode(value.getStringValue()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid base64 for BYTES type.", e); + } + break; + case TIMESTAMP: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for TIMESTAMP."); + } + // Validate timestamp format: must end with Z (UTC) and be RFC3339 + validateTimestamp(value.getStringValue()); + break; + case DATE: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for DATE."); + } + // Validate date format: YYYY-MM-DD, exactly 10 chars + validateDate(value.getStringValue()); + break; + case UUID: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for UUID."); + } + // Validate UUID format + validateUuid(value.getStringValue()); + break; + case ENUM: + if (value.getKindCase() != Value.KindCase.STRING_VALUE) { + throw new IllegalArgumentException("Type mismatch for ENUM, expecting string."); + } + // Validate it's a valid integer string + try { + Long.parseLong(value.getStringValue()); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "Invalid ENUM string (expecting number): " + value.getStringValue(), e); + } + break; + case NUMERIC: + case TYPE_CODE_UNSPECIFIED: + case ARRAY: + case STRUCT: + case PROTO: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException( + "Unsupported type code for ssformat encoding: " + part.type.getCode()); + } + } + + private static void validateTimestamp(String ts) { + parseTimestamp(ts); + } + + private static long[] parseTimestamp(String ts) { + if (!ts.endsWith("Z")) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + String withoutZ = ts.substring(0, ts.length() - 1); + int tIndex = withoutZ.indexOf('T'); + if (tIndex <= 0 || tIndex == withoutZ.length() - 1) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + + String datePart = withoutZ.substring(0, tIndex); + String timePart = withoutZ.substring(tIndex + 1); + LocalDate date; + try { + date = LocalDate.parse(datePart, DATE_FORMATTER); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts, e); + } + + int nanos = 0; + String timeMain = timePart; + int dotIndex = timePart.indexOf('.'); + if (dotIndex >= 0) { + timeMain = timePart.substring(0, dotIndex); + String fracStr = timePart.substring(dotIndex + 1); + if (fracStr.isEmpty()) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + for (int i = 0; i < fracStr.length(); i++) { + char c = fracStr.charAt(i); + if (c < '0' || c > '9') { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + } + while (fracStr.length() < 9) { + fracStr = fracStr + "0"; + } + if (fracStr.length() > 9) { + fracStr = fracStr.substring(0, 9); + } + nanos = Integer.parseInt(fracStr); + } + + String[] timeParts = timeMain.split(":"); + if (timeParts.length != 3) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + int hour; + int minute; + int second; + try { + hour = Integer.parseInt(timeParts[0]); + minute = Integer.parseInt(timeParts[1]); + second = Integer.parseInt(timeParts[2]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts, e); + } + if (hour < 0 || hour > 23 || minute < 0 || minute > 59 || second < 0 || second > 59) { + throw new IllegalArgumentException("Invalid TIMESTAMP string: " + ts); + } + + long seconds = date.toEpochDay() * 86400L + hour * 3600L + minute * 60L + second; + return new long[] {seconds, nanos}; + } + + private static final DateTimeFormatter DATE_FORMATTER = + DateTimeFormatter.ofPattern("uuuu-MM-dd").withResolverStyle(ResolverStyle.STRICT); + + private static void validateDate(String dateStr) { + parseDate(dateStr); + } + + private static int parseDate(String dateStr) { + try { + LocalDate date = LocalDate.parse(dateStr, DATE_FORMATTER); + return (int) date.toEpochDay(); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException("Invalid DATE string: " + dateStr, e); + } + } + + private static void validateUuid(String uuid) { + parseUuid(uuid); + // parseUuid throws if invalid + } + + private static long[] parseUuid(String uuid) { + String originalUuid = uuid; + + // Handle optional braces + if (uuid.startsWith("{")) { + if (!uuid.endsWith("}")) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + uuid = uuid.substring(1, uuid.length() - 1); + } + + // Minimum 36 characters required (standard UUID format: 8-4-4-4-12) + if (uuid.length() < 36) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + + // Check for leading hyphen + if (uuid.startsWith("-")) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + + // Parse 32 hex digits (ignoring hyphens in between) + long high = 0; + long low = 0; + int hexCount = 0; + + for (int i = 0; i < uuid.length(); i++) { + char c = uuid.charAt(i); + if (c == '-') { + continue; // Skip hyphens + } + int digit = hexDigit(c); + if (digit < 0) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + if (hexCount < 16) { + high = (high << 4) | digit; + } else { + low = (low << 4) | digit; + } + hexCount++; + } + + if (hexCount != 32) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + + // After parsing, verify there are no trailing characters + // (uuid must be exactly consumed) + if (uuid.length() > 36) { + throw new IllegalArgumentException("Invalid UUID string: " + originalUuid); + } + + return new long[] {high, low}; + } + + private static int hexDigit(char c) { + if (c >= '0' && c <= '9') return c - '0'; + if (c >= 'a' && c <= 'f') return 10 + (c - 'a'); + if (c >= 'A' && c <= 'F') return 10 + (c - 'A'); + return -1; + } + + private TargetRange encodeKeyInternal( + BiFunction valueFinder, KeyType keyType) { + UnsynchronizedByteArrayOutputStream ssKey = new UnsynchronizedByteArrayOutputStream(); + int valueIdx = 0; + EncodeState state = EncodeState.OK; + int p = 0; + for (; p < parts.size(); ++p) { + final Part part = parts.get(p); + if (part.kind == Kind.TAG) { + SsFormat.appendCompositeTag(ssKey, part.tag); + } else if (part.kind == Kind.VALUE) { + if (part.random) { + encodeRandomValuePart(part, ssKey); + continue; + } + + int currentIndex = valueIdx; + if (part.shouldConsumeValueIndex()) { + valueIdx++; + } + ResolvedValue resolved = part.resolveValue(valueFinder, currentIndex); + if (resolved.failed) { + state = EncodeState.FAILED; + break; + } + if (!resolved.found) { + state = part.shouldConsumeValueIndex() ? EncodeState.END_OF_KEYS : EncodeState.FAILED; + break; + } + try { + encodeSingleValuePart(part, resolved.value, ssKey); + } catch (IllegalArgumentException e) { + state = EncodeState.FAILED; + break; + } + } else { + state = EncodeState.FAILED; + break; + } + } + + ByteString start = ByteString.copyFrom(ssKey.toByteArray()); + ByteString limit = ByteString.EMPTY; + boolean approximate = false; + + if (p == parts.size() || (keyType != KeyType.FULL_KEY && state == EncodeState.END_OF_KEYS)) { + if (keyType == KeyType.PREFIX_SUCCESSOR) { + start = SsFormat.makePrefixSuccessor(start); + } else if (keyType == KeyType.INDEX_KEY) { + limit = SsFormat.makePrefixSuccessor(start); + } + } else { + approximate = true; + limit = SsFormat.makePrefixSuccessor(start); + } + return new TargetRange(start, limit, approximate); + } + + public TargetRange keyToTargetRange(ListValue in) { + return encodeKeyInternal( + (index, identifier) -> { + if (index < 0 || index >= in.getValuesCount()) { + return null; + } + return in.getValues(index); + }, + isIndex ? KeyType.INDEX_KEY : KeyType.FULL_KEY); + } + + public TargetRange keyRangeToTargetRange(KeyRange in) { + TargetRange start; + switch (in.getStartKeyTypeCase()) { + case START_CLOSED: + start = + encodeKeyInternal( + (index, id) -> { + if (index < 0 || index >= in.getStartClosed().getValuesCount()) { + return null; + } + return in.getStartClosed().getValues(index); + }, + KeyType.PREFIX); + break; + case START_OPEN: + start = + encodeKeyInternal( + (index, id) -> { + if (index < 0 || index >= in.getStartOpen().getValuesCount()) { + return null; + } + return in.getStartOpen().getValues(index); + }, + KeyType.PREFIX_SUCCESSOR); + break; + default: + start = encodeKeyInternal((index, id) -> null, KeyType.PREFIX); + start.approximate = true; + break; + } + + TargetRange limit; + switch (in.getEndKeyTypeCase()) { + case END_CLOSED: + limit = + encodeKeyInternal( + (index, id) -> { + if (index < 0 || index >= in.getEndClosed().getValuesCount()) { + return null; + } + return in.getEndClosed().getValues(index); + }, + KeyType.PREFIX_SUCCESSOR); + break; + case END_OPEN: + limit = + encodeKeyInternal( + (index, id) -> { + if (index < 0 || index >= in.getEndOpen().getValuesCount()) { + return null; + } + return in.getEndOpen().getValues(index); + }, + KeyType.PREFIX); + break; + default: + limit = encodeKeyInternal((index, id) -> null, KeyType.PREFIX_SUCCESSOR); + limit.approximate = true; + break; + } + ByteString limitKey = limit.approximate ? limit.limit : limit.start; + return new TargetRange(start.start, limitKey, start.approximate || limit.approximate); + } + + public TargetRange keySetToTargetRange(KeySet in) { + if (in.getAll()) { + return keyRangeToTargetRange( + KeyRange.newBuilder() + .setStartClosed(ListValue.getDefaultInstance()) + .setEndClosed(ListValue.getDefaultInstance()) + .build()); + } + if (in.getRangesCount() == 0) { + if (in.getKeysCount() == 0) { + return new TargetRange(ByteString.EMPTY, K_INFINITY, true); + } else if (in.getKeysCount() == 1) { + return keyToTargetRange(in.getKeys(0)); + } + } + + TargetRange target = new TargetRange(K_INFINITY, ByteString.EMPTY, false); + for (ListValue key : in.getKeysList()) { + target.mergeFrom(keyToTargetRange(key)); + } + for (KeyRange range : in.getRangesList()) { + target.mergeFrom(keyRangeToTargetRange(range)); + } + return target; + } + + public TargetRange queryParamsToTargetRange(Struct in) { + // toLowerCase(Locale.ROOT) is safe for query parameter names, even for non-ASCII + // characters such as the Turkish upper-case İ (U+0130). Query parameter names cannot + // be quoted in Spanner SQL (the @paramName syntax imposes an unquoted identifier + // grammar), so both the identifier sent by the server in the KeyRecipe and the + // parameter name bound by the user must follow the same syntax rules. Applying the + // same Locale.ROOT case-folding to both sides guarantees a consistent match. + // If the server were to normalize identifiers differently, the only consequence is + // a routing miss and graceful fallback to the default endpoint — not a query failure. + // + // Sort field names before inserting into the map so that when two param names + // collide after case-folding (e.g. "Id" vs "ID") the winner is deterministic, + // matching the Go implementation. + List fieldNames = new ArrayList<>(in.getFieldsMap().keySet()); + Collections.sort(fieldNames); + final Map lowercaseFields = new HashMap<>(fieldNames.size()); + for (String fieldName : fieldNames) { + lowercaseFields.put(fieldName.toLowerCase(Locale.ROOT), in.getFieldsMap().get(fieldName)); + } + return encodeKeyInternal( + (index, identifier) -> lowercaseFields.get(identifier.toLowerCase(Locale.ROOT)), + KeyType.FULL_KEY); + } + + public TargetRange mutationToTargetRange(Mutation in) { + TargetRange target = new TargetRange(K_INFINITY, ByteString.EMPTY, false); + + switch (in.getOperationCase()) { + case INSERT: + case UPDATE: + case INSERT_OR_UPDATE: + case REPLACE: + final Mutation.Write write = getWrite(in); + for (ListValue values : write.getValuesList()) { + target.mergeFrom( + encodeKeyInternal( + (index, id) -> { + int colIndex = write.getColumnsList().indexOf(id); + if (colIndex == -1 || colIndex >= values.getValuesCount()) { + return null; + } + return values.getValues(colIndex); + }, + KeyType.FULL_KEY)); + } + break; + case DELETE: + target.mergeFrom(keySetToTargetRange(in.getDelete().getKeySet())); + break; + case SEND: + target.mergeFrom(keyToTargetRange(in.getSend().getKey())); + break; + case ACK: + target.mergeFrom(keyToTargetRange(in.getAck().getKey())); + break; + default: + break; + } + + if (target.start.equals(K_INFINITY)) { + target = new TargetRange(ByteString.EMPTY, K_INFINITY, true); + } + return target; + } + + private Mutation.Write getWrite(Mutation in) { + switch (in.getOperationCase()) { + case INSERT: + return in.getInsert(); + case UPDATE: + return in.getUpdate(); + case INSERT_OR_UPDATE: + return in.getInsertOrUpdate(); + case REPLACE: + return in.getReplace(); + default: + throw new IllegalArgumentException("Mutation is not a write operation"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipeCache.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipeCache.java new file mode 100644 index 000000000000..eff3aeacb67f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/KeyRecipeCache.java @@ -0,0 +1,374 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.Hasher; +import com.google.common.hash.Hashing; +import com.google.protobuf.ByteString; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Mutation; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RecipeList; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.Type; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Logger; + +@InternalApi +public final class KeyRecipeCache { + // Best-effort routing cache; compute calls are intentionally unsynchronized and may race with + // updates. Requests still succeed without routing hints when data is stale. + private static final Logger logger = Logger.getLogger(KeyRecipeCache.class.getName()); + private static final long DEFAULT_SCHEMA_RECIPE_CACHE_SIZE = 1000; + private static final long DEFAULT_PREPARED_QUERY_CACHE_SIZE = 1000; + private static final long DEFAULT_PREPARED_READ_CACHE_SIZE = 1000; + + @VisibleForTesting + static long fingerprint(ReadRequest req) { + Hasher hasher = Hashing.goodFastHash(64).newHasher(); + hasher.putString(req.getTable(), StandardCharsets.UTF_8); + hasher.putString(req.getIndex(), StandardCharsets.UTF_8); + hasher.putInt(req.getColumnsCount()); + for (String column : req.getColumnsList()) { + hasher.putString(column, StandardCharsets.UTF_8); + } + return hasher.hash().asLong(); + } + + @VisibleForTesting + static long fingerprint(ExecuteSqlRequest req) { + Hasher hasher = Hashing.goodFastHash(64).newHasher(); + hasher.putString(req.getSql(), StandardCharsets.UTF_8); + + List paramNames = new ArrayList<>(req.getParams().getFieldsMap().keySet()); + paramNames.sort(Comparator.naturalOrder()); + for (String name : paramNames) { + hasher.putString(name, StandardCharsets.UTF_8); + if (req.getParamTypesMap().containsKey(name)) { + hasher.putBytes(req.getParamTypesMap().get(name).toByteArray()); + } else { + Value value = req.getParams().getFieldsMap().get(name); + hasher.putInt(value.getKindCase().getNumber()); + } + } + + hasher.putBytes(req.getQueryOptions().toByteArray()); + return hasher.hash().asLong(); + } + + private final AtomicLong nextOperationUid = new AtomicLong(1); + private volatile ByteString schemaGeneration = ByteString.EMPTY; + + private final Cache schemaRecipes = + CacheBuilder.newBuilder().maximumSize(DEFAULT_SCHEMA_RECIPE_CACHE_SIZE).build(); + private final Cache queryRecipes = + CacheBuilder.newBuilder().maximumSize(DEFAULT_PREPARED_QUERY_CACHE_SIZE).build(); + private final Cache preparedReads = + CacheBuilder.newBuilder().maximumSize(DEFAULT_PREPARED_READ_CACHE_SIZE).build(); + private final Cache preparedQueries = + CacheBuilder.newBuilder().maximumSize(DEFAULT_PREPARED_QUERY_CACHE_SIZE).build(); + + public KeyRecipeCache() {} + + private static V getIfPresent(Cache cache, K key) { + return cache.getIfPresent(key); + } + + @VisibleForTesting + static int getPreparedReadCacheSize(KeyRecipeCache cache) { + return (int) cache.preparedReads.size(); + } + + @VisibleForTesting + static int getPreparedQueryCacheSize(KeyRecipeCache cache) { + return (int) cache.preparedQueries.size(); + } + + /** + * Applies recipes from a server CacheUpdate. + * + *

This is expected to be called only when responses include new recipes, not on every request. + * It is synchronized to atomically update schema generation and cache contents. + */ + public synchronized void addRecipes(RecipeList recipeList) { + int cmp = + ByteString.unsignedLexicographicalComparator() + .compare(recipeList.getSchemaGeneration(), schemaGeneration); + if (cmp < 0) { + return; + } + if (cmp > 0) { + schemaGeneration = recipeList.getSchemaGeneration(); + schemaRecipes.invalidateAll(); + queryRecipes.invalidateAll(); + } + + int failedCount = 0; + IllegalArgumentException failureExample = null; + for (com.google.spanner.v1.KeyRecipe recipeProto : recipeList.getRecipeList()) { + try { + KeyRecipe recipe = KeyRecipe.create(recipeProto); + if (recipeProto.hasTableName()) { + schemaRecipes.put(recipeProto.getTableName(), recipe); + } else if (recipeProto.hasIndexName()) { + schemaRecipes.put(recipeProto.getIndexName(), recipe); + } else if (recipeProto.hasOperationUid()) { + queryRecipes.put(recipeProto.getOperationUid(), recipe); + } + } catch (IllegalArgumentException e) { + failedCount++; + if (failureExample == null) { + failureExample = e; + } + } + } + if (failedCount > 0) { + logger.warning( + "Failed to add " + failedCount + " recipes, example: " + failureExample.getMessage()); + } + } + + public void computeKeys(ReadRequest.Builder reqBuilder) { + long reqFp = fingerprint(reqBuilder.buildPartial()); + + RoutingHint.Builder hintBuilder = reqBuilder.getRoutingHintBuilder(); + if (!schemaGeneration.isEmpty()) { + hintBuilder.setSchemaGeneration(schemaGeneration); + } + + PreparedRead preparedRead = getIfPresent(preparedReads, reqFp); + if (preparedRead == null) { + preparedRead = PreparedRead.fromRequest(reqBuilder.buildPartial()); + preparedRead.operationUid = nextOperationUid.getAndIncrement(); + preparedReads.put(reqFp, preparedRead); + } else if (!preparedRead.matches(reqBuilder.buildPartial())) { + logger.fine("Fingerprint collision for ReadRequest: " + reqFp); + return; + } + + hintBuilder.setOperationUid(preparedRead.operationUid); + String recipeKey = reqBuilder.getTable(); + if (!reqBuilder.getIndex().isEmpty()) { + recipeKey = reqBuilder.getIndex(); + } + + KeyRecipe recipe = getIfPresent(schemaRecipes, recipeKey); + if (recipe == null) { + logger.fine("Schema recipe not found for: " + recipeKey); + return; + } + + try { + TargetRange target = recipe.keySetToTargetRange(reqBuilder.getKeySet()); + hintBuilder.setKey(target.start); + if (!target.limit.isEmpty()) { + hintBuilder.setLimitKey(target.limit); + } + } catch (IllegalArgumentException e) { + logger.fine("Failed key encoding: " + e.getMessage()); + } + } + + public void computeKeys(ExecuteSqlRequest.Builder reqBuilder) { + long reqFp = fingerprint(reqBuilder.buildPartial()); + + RoutingHint.Builder hintBuilder = reqBuilder.getRoutingHintBuilder(); + if (!schemaGeneration.isEmpty()) { + hintBuilder.setSchemaGeneration(schemaGeneration); + } + + PreparedQuery preparedQuery = getIfPresent(preparedQueries, reqFp); + if (preparedQuery == null) { + preparedQuery = PreparedQuery.fromRequest(reqBuilder.buildPartial()); + preparedQuery.operationUid = nextOperationUid.getAndIncrement(); + preparedQueries.put(reqFp, preparedQuery); + } else if (!preparedQuery.matches(reqBuilder.buildPartial())) { + logger.fine("Fingerprint collision for ExecuteSqlRequest: " + reqFp); + return; + } + + hintBuilder.setOperationUid(preparedQuery.operationUid); + KeyRecipe recipe = getIfPresent(queryRecipes, preparedQuery.operationUid); + if (recipe == null) { + return; + } + + try { + TargetRange target = recipe.queryParamsToTargetRange(reqBuilder.getParams()); + hintBuilder.setKey(target.start); + if (!target.limit.isEmpty()) { + hintBuilder.setLimitKey(target.limit); + } + } catch (IllegalArgumentException e) { + logger.fine("Failed query param encoding: " + e.getMessage()); + } + } + + public TargetRange mutationToTargetRange(Mutation mutation) { + if (mutation == null) { + return null; + } + String tableName = tableNameFromMutation(mutation); + if (tableName == null || tableName.isEmpty()) { + return null; + } + + KeyRecipe recipe = getIfPresent(schemaRecipes, tableName); + if (recipe == null) { + logger.fine("Schema recipe not found for mutation table: " + tableName); + return null; + } + + try { + return recipe.mutationToTargetRange(mutation); + } catch (IllegalArgumentException e) { + logger.fine("Failed mutation key encoding: " + e.getMessage()); + return null; + } + } + + private static String tableNameFromMutation(Mutation mutation) { + switch (mutation.getOperationCase()) { + case INSERT: + return mutation.getInsert().getTable(); + case UPDATE: + return mutation.getUpdate().getTable(); + case INSERT_OR_UPDATE: + return mutation.getInsertOrUpdate().getTable(); + case REPLACE: + return mutation.getReplace().getTable(); + case DELETE: + return mutation.getDelete().getTable(); + default: + return null; + } + } + + public synchronized void clear() { + schemaGeneration = ByteString.EMPTY; + preparedReads.invalidateAll(); + preparedQueries.invalidateAll(); + schemaRecipes.invalidateAll(); + queryRecipes.invalidateAll(); + } + + private static class PreparedRead { + final String table; + final ImmutableList columns; + long operationUid; // Not final, assigned after construction + + private PreparedRead(String table, List columns) { + this.table = table; + this.columns = ImmutableList.copyOf(columns); + } + + static PreparedRead fromRequest(ReadRequest req) { + return new PreparedRead(req.getTable(), req.getColumnsList()); + } + + boolean matches(ReadRequest req) { + if (!Objects.equals(table, req.getTable())) { + return false; + } + return columns.equals(req.getColumnsList()); + } + } + + private static final class PreparedQuery { + private final String sql; + private final ImmutableList params; + private final ExecuteSqlRequest.QueryOptions queryOptions; + private long operationUid; + + private PreparedQuery( + String sql, List params, ExecuteSqlRequest.QueryOptions queryOptions) { + this.sql = sql; + this.params = ImmutableList.copyOf(params); + this.queryOptions = queryOptions; + } + + private static PreparedQuery fromRequest(ExecuteSqlRequest req) { + List params = new ArrayList<>(); + for (Map.Entry entry : req.getParams().getFieldsMap().entrySet()) { + String name = entry.getKey(); + if (req.getParamTypesMap().containsKey(name)) { + params.add(Param.ofType(name, req.getParamTypesMap().get(name))); + } else { + params.add(Param.ofKind(name, entry.getValue().getKindCase())); + } + } + params.sort(Comparator.comparing(param -> param.name)); + return new PreparedQuery(req.getSql(), params, req.getQueryOptions()); + } + + private boolean matches(ExecuteSqlRequest req) { + if (!sql.equals(req.getSql())) { + return false; + } + if (params.size() != req.getParams().getFieldsCount()) { + return false; + } + for (Param param : params) { + Value value = req.getParams().getFieldsMap().get(param.name); + if (value == null) { + return false; + } + if (param.type != null) { + Type type = req.getParamTypesMap().get(param.name); + if (type == null || !type.equals(param.type)) { + return false; + } + } else if (param.kindCase != value.getKindCase()) { + return false; + } + } + return Objects.equals(queryOptions, req.getQueryOptions()); + } + } + + private static final class Param { + private final String name; + private final Type type; + private final Value.KindCase kindCase; + + private Param(String name, Type type, Value.KindCase kindCase) { + this.name = name; + this.type = type; + this.kindCase = kindCase; + } + + private static Param ofType(String name, Type type) { + return new Param(name, type, null); + } + + private static Param ofKind(String name, Value.KindCase kindCase) { + return new Param(name, null, kindCase); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/LoggingInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/LoggingInterceptor.java new file mode 100644 index 000000000000..44571b1a6523 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/LoggingInterceptor.java @@ -0,0 +1,108 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall; +import io.grpc.ForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Adds logging to rpc calls */ +class LoggingInterceptor implements ClientInterceptor { + + private final Logger logger; + private final Level level; + + LoggingInterceptor(Logger logger, Level level) { + this.logger = logger; + this.level = level; + } + + private class CallLogger { + + private final MethodDescriptor method; + + CallLogger(MethodDescriptor method) { + this.method = method; + } + + void log(String message) { + logger.log( + level, + "{0}[{1}]: {2}", + new Object[] { + method.getFullMethodName(), Integer.toHexString(System.identityHashCode(this)), message + }); + } + + void logfmt(String message, Object... params) { + log(String.format(message, params)); + } + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + if (!logger.isLoggable(level)) { + return next.newCall(method, callOptions); + } + + final CallLogger callLogger = new CallLogger(method); + callLogger.log("Start"); + return new ForwardingClientCall.SimpleForwardingClientCall( + next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + super.start( + new ForwardingClientCallListener.SimpleForwardingClientCallListener( + responseListener) { + @Override + public void onMessage(RespT message) { + callLogger.logfmt("Received:\n%s", message); + super.onMessage(message); + } + + @Override + public void onClose(Status status, Metadata trailers) { + callLogger.logfmt("Closed with status %s and trailers %s", status, trailers); + super.onClose(status, trailers); + } + }, + headers); + } + + @Override + public void sendMessage(ReqT message) { + callLogger.logfmt("Send:\n%s", message); + super.sendMessage(message); + } + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) { + callLogger.logfmt("Cancelled with message %s", message); + super.cancel(message, cause); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdCreatorImpl.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdCreatorImpl.java new file mode 100644 index 000000000000..5904fa581fd9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdCreatorImpl.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.cloud.spanner.XGoogSpannerRequestId; +import com.google.cloud.spanner.XGoogSpannerRequestId.RequestIdCreator; +import java.util.concurrent.atomic.AtomicLong; + +class RequestIdCreatorImpl implements RequestIdCreator { + private static final AtomicLong NEXT_CLIENT_ID = new AtomicLong(); + + private final long clientId = NEXT_CLIENT_ID.incrementAndGet(); + private final AtomicLong requestId = new AtomicLong(); + + @Override + public long getClientId() { + return this.clientId; + } + + @Override + public XGoogSpannerRequestId nextRequestId(long channelId) { + return XGoogSpannerRequestId.of(clientId, channelId, requestId.incrementAndGet(), 0); + } + + @Override + public void reset() { + requestId.set(0); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptor.java new file mode 100644 index 000000000000..ea7204301e33 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptor.java @@ -0,0 +1,67 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_CALL_OPTIONS_KEY; +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY; + +import com.google.cloud.grpc.GcpManagedChannel; +import com.google.cloud.spanner.XGoogSpannerRequestId; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import java.util.concurrent.atomic.AtomicLong; + +class RequestIdInterceptor implements ClientInterceptor { + static final CallOptions.Key ATTEMPT_KEY = CallOptions.Key.create("Attempt"); + private static final String RESPONSE_ENCODING_KEY_NAME = "x-response-encoding"; + private static final Key RESPONSE_ENCODING_KEY = + Key.of(RESPONSE_ENCODING_KEY_NAME, Metadata.ASCII_STRING_MARSHALLER); + + RequestIdInterceptor() {} + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new ForwardingClientCall.SimpleForwardingClientCall( + next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + XGoogSpannerRequestId requestId = callOptions.getOption(REQUEST_ID_CALL_OPTIONS_KEY); + if (requestId != null) { + // If grpc-gcp has set the actual channel ID, use it to update the request ID. + // This provides the real channel ID used after channel selection, especially + // important when dynamic channel pooling is enabled. + Integer gcpChannelId = callOptions.getOption(GcpManagedChannel.CHANNEL_ID_KEY); + if (gcpChannelId != null) { + // Channel IDs from grpc-gcp are 0-based, add 1 to match request ID convention + // where 0 means unknown and >0 means a known channel. + requestId.setChannelId(gcpChannelId + 1); + } + requestId.incrementAttempt(); + headers.put(REQUEST_ID_HEADER_KEY, requestId.getHeaderValue()); + } + super.start(responseListener, headers); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerErrorInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerErrorInterceptor.java new file mode 100644 index 000000000000..9c3b2af2b069 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerErrorInterceptor.java @@ -0,0 +1,134 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.cloud.spanner.IsRetryableInternalError; +import com.google.cloud.spanner.XGoogSpannerRequestId; +import com.google.common.base.Strings; +import com.google.rpc.BadRequest; +import com.google.rpc.Help; +import com.google.rpc.LocalizedMessage; +import com.google.rpc.QuotaFailure; +import com.google.rpc.RequestInfo; +import com.google.rpc.ResourceInfo; +import com.google.rpc.RetryInfo; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.protobuf.ProtoUtils; +import java.util.Objects; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Adds error details to statuses returned by the Cloud Spanner API. TODO(user): Add integration + * tests to check for error details. + */ +public final class SpannerErrorInterceptor implements ClientInterceptor { + private static final Logger logger = Logger.getLogger(SpannerErrorInterceptor.class.getName()); + + private static final Metadata.Key LOCALIZED_MESSAGE_KEY = + ProtoUtils.keyForProto(LocalizedMessage.getDefaultInstance()); + private static final Metadata.Key BAD_REQUEST_KEY = + ProtoUtils.keyForProto(BadRequest.getDefaultInstance()); + private static final Metadata.Key HELP_KEY = + ProtoUtils.keyForProto(Help.getDefaultInstance()); + private static final Metadata.Key QUOTA_FAILURE_KEY = + ProtoUtils.keyForProto(QuotaFailure.getDefaultInstance()); + private static final Metadata.Key REQUEST_INFO_KEY = + ProtoUtils.keyForProto(RequestInfo.getDefaultInstance()); + private static final Metadata.Key RESOURCE_INFO_KEY = + ProtoUtils.keyForProto(ResourceInfo.getDefaultInstance()); + private static final Metadata.Key RETRY_INFO_KEY = + ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onClose(Status status, Metadata trailers) { + // Return quickly if there is no error. + if (status.isOk()) { + super.onClose(status, trailers); + return; + } + try { + if (headers.containsKey(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY)) { + String requestId = headers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + if (!Strings.isNullOrEmpty(requestId)) { + if (!trailers.containsKey(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY)) { + trailers.put( + XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY, + Objects.requireNonNull( + headers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY))); + } + } + } + // Translate INTERNAL errors that should be retried to a retryable error code. + if (IsRetryableInternalError.INSTANCE.isRetryableInternalError(status)) { + status = + Status.fromCode(Code.UNAVAILABLE).withDescription(status.getDescription()); + } + if (trailers.containsKey(LOCALIZED_MESSAGE_KEY)) { + status = + Status.fromCodeValue(status.getCode().value()) + .withDescription(trailers.get(LOCALIZED_MESSAGE_KEY).getMessage()); + } + if (trailers.containsKey(BAD_REQUEST_KEY)) { + status = status.augmentDescription(trailers.get(BAD_REQUEST_KEY).toString()); + } + if (trailers.containsKey(HELP_KEY)) { + status = status.augmentDescription(trailers.get(HELP_KEY).toString()); + } + if (trailers.containsKey(QUOTA_FAILURE_KEY)) { + status = status.augmentDescription(trailers.get(QUOTA_FAILURE_KEY).toString()); + } + if (trailers.containsKey(REQUEST_INFO_KEY)) { + status = status.augmentDescription(trailers.get(REQUEST_INFO_KEY).toString()); + } + if (trailers.containsKey(RESOURCE_INFO_KEY)) { + status = status.augmentDescription(trailers.get(RESOURCE_INFO_KEY).toString()); + } + if (trailers.containsKey(RETRY_INFO_KEY)) { + status = status.augmentDescription(trailers.get(RETRY_INFO_KEY).toString()); + } + } catch (IllegalArgumentException e) { + // Messages could be invalid if, say, some invalid UTF8 is echoed back in some + // error text. + logger.log(Level.WARNING, "Invalid protocol message in metadata", e); + } finally { + super.onClose(status, trailers); + } + } + }, + headers); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java new file mode 100644 index 000000000000..e8d6c3ebddbc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java @@ -0,0 +1,103 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.spanner.SpannerRpcMetrics; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import io.grpc.ClientInterceptor; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * For internal use only. An interceptor provider that provides a list of grpc interceptors for + * {@code GapicSpannerRpc} to handle logging and error augmentation by intercepting grpc calls. + */ +@InternalApi("Exposed for testing") +public class SpannerInterceptorProvider implements GrpcInterceptorProvider { + private final List clientInterceptors; + + private SpannerInterceptorProvider(List clientInterceptors) { + this.clientInterceptors = clientInterceptors; + } + + @ObsoleteApi("This method always uses Global OpenTelemetry") + public static SpannerInterceptorProvider createDefault() { + return createDefault(GlobalOpenTelemetry.get()); + } + + public static SpannerInterceptorProvider createDefault(OpenTelemetry openTelemetry) { + return createDefault( + openTelemetry, + Suppliers.memoize( + () -> { + return false; + })); + } + + @ObsoleteApi("DirectPathEnabledSupplier is not used") + public static SpannerInterceptorProvider createDefault( + OpenTelemetry openTelemetry, Supplier directPathEnabledSupplier) { + List defaultInterceptorList = new ArrayList<>(); + defaultInterceptorList.add(new SpannerErrorInterceptor()); + defaultInterceptorList.add( + new LoggingInterceptor(Logger.getLogger(GapicSpannerRpc.class.getName()), Level.FINER)); + defaultInterceptorList.add(new HeaderInterceptor(new SpannerRpcMetrics(openTelemetry))); + defaultInterceptorList.add(new RequestIdInterceptor()); + return new SpannerInterceptorProvider(ImmutableList.copyOf(defaultInterceptorList)); + } + + static SpannerInterceptorProvider create(GrpcInterceptorProvider provider) { + return new SpannerInterceptorProvider(ImmutableList.copyOf(provider.getInterceptors())); + } + + public SpannerInterceptorProvider with(ClientInterceptor clientInterceptor) { + List interceptors = + ImmutableList.builder() + .addAll(this.clientInterceptors) + .add(clientInterceptor) + .build(); + return new SpannerInterceptorProvider(interceptors); + } + + SpannerInterceptorProvider withEncoding(String encoding) { + if (encoding != null) { + return with(new EncodingInterceptor(encoding)); + } + return this; + } + + SpannerInterceptorProvider withTraceContext( + boolean endToEndTracingEnabled, OpenTelemetry openTelemetry) { + if (endToEndTracingEnabled) { + return with(new TraceContextInterceptor(openTelemetry)); + } + return this; + } + + @Override + public List getInterceptors() { + return clientInterceptors; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java new file mode 100644 index 000000000000..e9c748472754 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java @@ -0,0 +1,132 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.MoreObjects; +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.ImmutableMap; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** For internal use only. */ +class SpannerMetadataProvider { + private final Cache>> extraHeadersCache = + CacheBuilder.newBuilder().maximumSize(100).build(); + private final Map, String> headers; + private final String resourceHeaderKey; + private static final String ROUTE_TO_LEADER_HEADER_KEY = "x-goog-spanner-route-to-leader"; + private static final String END_TO_END_TRACING_HEADER_KEY = "x-goog-spanner-end-to-end-tracing"; + private static final String AFE_SERVER_TIMING_HEADER_KEY = + "x-goog-spanner-enable-afe-server-timing"; + private static final Pattern[] RESOURCE_TOKEN_PATTERNS = { + Pattern.compile("^(?projects/[^/]*/instances/[^/]*/databases/[^/]*)(.*)?"), + Pattern.compile("^(?projects/[^/]*/instances/[^/]*)(.*)?") + }; + + private static final Map> ROUTE_TO_LEADER_HEADER_MAP = + ImmutableMap.of(ROUTE_TO_LEADER_HEADER_KEY, Collections.singletonList("true")); + private static final Map> END_TO_END_TRACING_HEADER_MAP = + ImmutableMap.of(END_TO_END_TRACING_HEADER_KEY, Collections.singletonList("true")); + private static final Map> AFE_SERVER_TIMING_HEADER_MAP = + ImmutableMap.of(AFE_SERVER_TIMING_HEADER_KEY, Collections.singletonList("true")); + + private SpannerMetadataProvider(Map headers, String resourceHeaderKey) { + this.resourceHeaderKey = resourceHeaderKey; + this.headers = constructHeadersAsMetadata(headers); + } + + static SpannerMetadataProvider create(Map headers, String resourceHeaderKey) { + return new SpannerMetadataProvider(headers, resourceHeaderKey); + } + + Metadata newMetadata(String resourceTokenTemplate, String defaultResourceToken) { + Metadata metadata = new Metadata(); + for (Map.Entry, String> header : headers.entrySet()) { + metadata.put(header.getKey(), header.getValue()); + } + + metadata.put( + Key.of(resourceHeaderKey, Metadata.ASCII_STRING_MARSHALLER), + getResourceHeaderValue(resourceTokenTemplate, defaultResourceToken)); + + return metadata; + } + + Map> newExtraHeaders( + String resourceTokenTemplate, String defaultResourceToken) { + try { + return extraHeadersCache.get( + MoreObjects.firstNonNull(resourceTokenTemplate, ""), + () -> + ImmutableMap.>builder() + .put( + resourceHeaderKey, + Collections.singletonList( + getResourceHeaderValue(resourceTokenTemplate, defaultResourceToken))) + .build()); + } catch (ExecutionException executionException) { + // This should never happen. + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } + } + + Map> newRouteToLeaderHeader() { + return ROUTE_TO_LEADER_HEADER_MAP; + } + + Map> newEndToEndTracingHeader() { + return END_TO_END_TRACING_HEADER_MAP; + } + + Map> newAfeServerTimingHeader() { + return AFE_SERVER_TIMING_HEADER_MAP; + } + + private Map, String> constructHeadersAsMetadata( + Map headers) { + ImmutableMap.Builder, String> headersAsMetadataBuilder = + ImmutableMap.builder(); + for (Map.Entry entry : headers.entrySet()) { + headersAsMetadataBuilder.put( + Metadata.Key.of(entry.getKey(), Metadata.ASCII_STRING_MARSHALLER), entry.getValue()); + } + return headersAsMetadataBuilder.build(); + } + + private String getResourceHeaderValue(String resourceTokenTemplate, String defaultResourceToken) { + String resourceToken = defaultResourceToken; + if (!Strings.isNullOrEmpty(resourceTokenTemplate)) { + for (Pattern pattern : RESOURCE_TOKEN_PATTERNS) { + Matcher m = pattern.matcher(resourceTokenTemplate); + if (m.matches()) { + resourceToken = m.group("headerValue"); + break; + } + } + } + + return resourceToken; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java new file mode 100644 index 000000000000..7fd50f41c2d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java @@ -0,0 +1,565 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.cloud.ServiceRpc; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.Restore; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.XGoogSpannerRequestId; +import com.google.cloud.spanner.XGoogSpannerRequestId.RequestIdCreator; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStub; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.collect.ImmutableList; +import com.google.iam.v1.GetPolicyOptions; +import com.google.iam.v1.Policy; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import com.google.spanner.v1.*; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; + +/** + * Abstracts remote calls to the Cloud Spanner service. Typically end-consumer code will never use + * this interface; it's main purpose is to abstract the implementation of the public Cloud Spanner + * API from the underlying transport mechanism. + * + *

Each {@code SpannerRPC} instance is bound to a particular project and set of authorization + * credentials. + * + *

The interface is currently defined in terms of the generated HTTP client model classes. This + * is purely for expedience; a future version of this interface is likely to be independent of + * transport to allow switching between gRPC and HTTP. + */ +@InternalApi +public interface SpannerRpc extends ServiceRpc { + /** Options passed in {@link SpannerRpc} methods to control how an RPC is issued. */ + enum Option { + CHANNEL_HINT("Channel Hint"); + + private final String value; + + Option(String value) { + this.value = value; + } + + @SuppressWarnings("unchecked") + T get(@Nullable Map options) { + if (options == null) { + return null; + } + return (T) options.get(this); + } + + @InternalApi + public Long getLong(@Nullable Map options) { + return get(options); + } + + @Override + public String toString() { + return value; + } + } + + /** + * Represents results from paginated RPCs, i.e., those where up to a maximum number of items is + * returned from each call and a followup call must be made to fetch more. + * + * @param the type of result + */ + final class Paginated { + private final Iterable results; + private final String nextPageToken; + + /** + * Creates a new page of results. + * + * @param results the result, or null for no results. + * @param nextPageToken the token for the next page of results, or null if no more pages exist + */ + public Paginated(@Nullable Iterable results, @Nullable String nextPageToken) { + // The generated HTTP client has null members when no results are present, rather than an + // empty list. Implicitly convert to an empty list to minimize the risk of NPEs. + this.results = (results == null) ? ImmutableList.of() : results; + this.nextPageToken = + (nextPageToken == null || nextPageToken.isEmpty()) ? null : nextPageToken; + } + + /** + * Returns the current page of results. Always returns non-null; if a null "results" was passed + * to the constructor, a default empty {@code Iterable} will be returned. + */ + public Iterable getResults() { + return results; + } + + /** + * Returns the token to use in the request for the next page, or null if this is the last page. + */ + @Nullable + public String getNextPageToken() { + return nextPageToken; + } + } + + /** Consumer for the results produced by a streaming read or query call. */ + interface ResultStreamConsumer { + void onPartialResultSet(PartialResultSet results); + + void onCompleted(); + + void onError(SpannerException e); + + /** + * Returns true if the stream should be cancelled when the Spanner client is closed. This + * returns true for {@link com.google.cloud.spanner.BatchReadOnlyTransaction}, as these use a + * non-pooled session. Pooled sessions are deleted when the Spanner client is closed, and this + * automatically also cancels any query that uses the session, which means that we don't need to + * explicitly cancel those queries when the Spanner client is closed. + */ + boolean cancelQueryWhenClientIsClosed(); + } + + /** Handle for cancellation of a streaming read or query call. */ + interface StreamingCall { + + /** Returns the {@link ApiCallContext} that is used for this streaming call. */ + ApiCallContext getCallContext(); + + /** + * Requests more messages from the stream. We disable the auto flow control mechanism in grpc, + * so we need to request messages ourself. This gives us more control over how much buffer we + * maintain in the client. Grpc will request 1 initial message automatically so we don't need to + * call this at the beginning. After that it should be called whenever there is a flow control + * window available based on the flow control setting configured by the client. Currently we do + * not have any flow control so this is called automatically when a message is received. + */ + void request(int numMessages); + + /** + * Cancels the call. + * + * @param message a message to use in the final status of any underlying RPC + */ + void cancel(@Nullable String message); + } + + default RequestIdCreator getRequestIdCreator() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** Clears any client-side affinity associated with the given transaction id. */ + default void clearTransactionAffinity(ByteString transactionId) {} + + // Instance admin APIs. + Paginated listInstanceConfigs(int pageSize, @Nullable String pageToken) + throws SpannerException; + + default OperationFuture createInstanceConfig( + String parent, + String instanceConfigId, + InstanceConfig instanceConfig, + @Nullable Boolean validateOnly) + throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + default OperationFuture updateInstanceConfig( + InstanceConfig instanceConfig, @Nullable Boolean validateOnly, FieldMask fieldMask) + throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + InstanceConfig getInstanceConfig(String instanceConfigName) throws SpannerException; + + default void deleteInstanceConfig( + String instanceConfigName, @Nullable String etag, @Nullable Boolean validateOnly) + throws SpannerException { + throw new UnsupportedOperationException("Not implemented"); + } + + /** List all long-running instance config operations on the given project. */ + default Paginated listInstanceConfigOperations( + int pageSize, @Nullable String filter, @Nullable String pageToken) { + throw new UnsupportedOperationException("Not implemented"); + } + + Paginated listInstances( + int pageSize, @Nullable String pageToken, @Nullable String filter) throws SpannerException; + + OperationFuture createInstance( + String parent, String instanceId, Instance instance) throws SpannerException; + + OperationFuture updateInstance( + Instance instance, FieldMask fieldMask) throws SpannerException; + + Instance getInstance(String instanceName) throws SpannerException; + + void deleteInstance(String instanceName) throws SpannerException; + + // Database admin APIs. + Paginated listDatabases(String instanceName, int pageSize, @Nullable String pageToken) + throws SpannerException; + + OperationFuture createDatabase( + String instanceName, + String createDatabaseStatement, + Iterable additionalStatements, + com.google.cloud.spanner.Database database) + throws SpannerException; + + OperationFuture updateDatabaseDdl( + com.google.cloud.spanner.Database database, + Iterable updateDatabaseStatements, + @Nullable String updateId) + throws SpannerException; + + void dropDatabase(String databaseName) throws SpannerException; + + Database getDatabase(String databaseName) throws SpannerException; + + /** + * Updates the specified fields of a Cloud Spanner database. + * + * @param database The database proto whose field values will be used as the new values in the + * stored database. + * @param fieldMask The fields to update. Currently, only the "enable_drop_protection" field of + * the database supports updates. + * @return an `OperationFuture` that can be used to track the status of the update. + * @throws SpannerException + */ + OperationFuture updateDatabase( + Database database, FieldMask fieldMask) throws SpannerException; + + GetDatabaseDdlResponse getDatabaseDdl(String databaseName) throws SpannerException; + + /** Lists the backups in the specified instance. */ + Paginated listBackups( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken) + throws SpannerException; + + /** + * Creates a new backup from the source database specified in the {@link + * com.google.cloud.spanner.Backup} instance. + * + * @param backupInfo the backup to create. The instance, database and expireTime fields of the + * backup must be filled. + * @return the operation that monitors the backup creation. + */ + OperationFuture createBackup( + com.google.cloud.spanner.Backup backupInfo) throws SpannerException; + + /** + * Creates a copy backup from the source backup specified. + * + * @param destinationBackup the backup to create. The instance, database, and expireTime fields of + * the backup must be filled. It may also optionally have an encryption config set. If no + * encryption config has been set, the new backup will use the same encryption config as the + * source backup. + * @return the operation that monitors the backup creation. + */ + default OperationFuture copyBackup( + BackupId sourceBackupId, com.google.cloud.spanner.Backup destinationBackup) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * Restore a backup into the given database. + * + * @param restore a {@link Restore} instance with the backup source and destination database + */ + OperationFuture restoreDatabase(Restore restore); + + /** Gets the backup with the specified name. */ + Backup getBackup(String backupName) throws SpannerException; + + /** Updates the specified backup. The only supported field for updates is expireTime. */ + Backup updateBackup(Backup backup, FieldMask updateMask); + + /** List all long-running backup operations on the given instance. */ + Paginated listBackupOperations( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken); + + /** + * Deletes a pending or completed backup. + * + * @param backupName Required. The fully qualified name of the backup to delete. + */ + void deleteBackup(String backupName); + + Paginated listDatabaseOperations( + String instanceName, int pageSize, @Nullable String filter, @Nullable String pageToken); + + Paginated listDatabaseRoles( + String databaseName, int pageSize, @Nullable String pageToken); + + /** Retrieves a long running operation. */ + Operation getOperation(String name) throws SpannerException; + + /** Cancels the specified long-running operation. */ + void cancelOperation(String name) throws SpannerException; + + List batchCreateSessions( + String databaseName, + int sessionCount, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options) + throws SpannerException; + + Session createSession( + String databaseName, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options) + throws SpannerException; + + default Session createSession( + String databaseName, + @Nullable String databaseRole, + @Nullable Map labels, + @Nullable Map options, + boolean isMultiplexed) + throws SpannerException { + throw new UnsupportedOperationException("Unimplemented"); + } + + void deleteSession(String sessionName, @Nullable Map options) throws SpannerException; + + ApiFuture asyncDeleteSession(String sessionName, @Nullable Map options) + throws SpannerException; + + /** Returns the retry settings for streaming read operations. */ + default RetrySettings getReadRetrySettings() { + return SpannerStubSettings.newBuilder().streamingReadSettings().getRetrySettings(); + } + + /** Returns the retryable codes for streaming read operations. */ + default Set getReadRetryableCodes() { + return SpannerStubSettings.newBuilder().streamingReadSettings().getRetryableCodes(); + } + + /** + * Performs a streaming read. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + StreamingCall read( + ReadRequest request, + ResultStreamConsumer consumer, + @Nullable Map options, + XGoogSpannerRequestId requestId, + boolean routeToLeader); + + /** Returns the retry settings for streaming query operations. */ + default RetrySettings getExecuteQueryRetrySettings() { + return SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings(); + } + + /** Returns the retryable codes for streaming query operations. */ + default Set getExecuteQueryRetryableCodes() { + return SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetryableCodes(); + } + + /** + * Executes a query. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + ResultSet executeQuery( + ExecuteSqlRequest request, @Nullable Map options, boolean routeToLeader); + + /** + * Executes a query asynchronously. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + ApiFuture executeQueryAsync( + ExecuteSqlRequest request, @Nullable Map options, boolean routeToLeader); + + ResultSet executePartitionedDml(ExecuteSqlRequest request, @Nullable Map options); + + RetrySettings getPartitionedDmlRetrySettings(); + + ServerStream executeStreamingPartitionedDml( + ExecuteSqlRequest request, + @Nullable Map options, + XGoogSpannerRequestId requestId, + Duration timeout); + + ServerStream batchWriteAtLeastOnce( + BatchWriteRequest request, @Nullable Map options); + + /** + * Executes a query with streaming result. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + StreamingCall executeQuery( + ExecuteSqlRequest request, + ResultStreamConsumer consumer, + @Nullable Map options, + XGoogSpannerRequestId requestId, + boolean routeToLeader); + + ExecuteBatchDmlResponse executeBatchDml(ExecuteBatchDmlRequest build, Map options); + + ApiFuture executeBatchDmlAsync( + ExecuteBatchDmlRequest build, Map options); + + /** + * Begins a transaction. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + Transaction beginTransaction( + BeginTransactionRequest request, @Nullable Map options, boolean routeToLeader) + throws SpannerException; + + /** + * Begins a transaction asynchronously. + * + * @param routeToLeader Set to true to route the request to the leader region, and false to route + * the request to any region. When leader aware routing is enabled, RW/PDML requests are + * preferred to be routed to the leader region, and RO requests (except for + * PartitionRead/PartitionQuery) are preferred to be routed to any region for optimal latency. + */ + ApiFuture beginTransactionAsync( + BeginTransactionRequest request, @Nullable Map options, boolean routeToLeader); + + CommitResponse commit(CommitRequest commitRequest, @Nullable Map options) + throws SpannerException; + + ApiFuture commitAsync( + CommitRequest commitRequest, @Nullable Map options); + + default RetrySettings getCommitRetrySettings() { + return SpannerStubSettings.newBuilder().commitSettings().getRetrySettings(); + } + + void rollback(RollbackRequest request, @Nullable Map options) throws SpannerException; + + ApiFuture rollbackAsync(RollbackRequest request, @Nullable Map options); + + PartitionResponse partitionQuery(PartitionQueryRequest request, @Nullable Map options) + throws SpannerException; + + PartitionResponse partitionRead(PartitionReadRequest request, @Nullable Map options) + throws SpannerException; + + /** Gets the IAM policy for the given resource using the {@link DatabaseAdminStub}. */ + Policy getDatabaseAdminIAMPolicy(String resource, @Nullable GetPolicyOptions options); + + /** + * Updates the IAM policy for the given resource using the {@link DatabaseAdminStub}. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(com.google.protobuf.ByteString)} for information on the + * recommended read-modify-write cycle. + */ + Policy setDatabaseAdminIAMPolicy(String resource, Policy policy); + + /** Tests the IAM permissions for the given resource using the {@link DatabaseAdminStub}. */ + TestIamPermissionsResponse testDatabaseAdminIAMPermissions( + String resource, Iterable permissions); + + /** Gets the IAM policy for the given resource using the {@link InstanceAdminStub}. */ + Policy getInstanceAdminIAMPolicy(String resource); + + /** + * Updates the IAM policy for the given resource using the {@link InstanceAdminStub}. It is highly + * recommended to first get the current policy and base the updated policy on the returned policy. + * See {@link Policy.Builder#setEtag(com.google.protobuf.ByteString)} for information on the + * recommended read-modify-write cycle. + */ + Policy setInstanceAdminIAMPolicy(String resource, Policy policy); + + /** Tests the IAM permissions for the given resource using the {@link InstanceAdminStub}. */ + TestIamPermissionsResponse testInstanceAdminIAMPermissions( + String resource, Iterable permissions); + + void shutdown(); + + boolean isClosed(); + + /** + * Getter method to obtain the auto-generated instance admin client stub settings. + * + * @return InstanceAdminStubSettings + */ + @InternalApi + default InstanceAdminStubSettings getInstanceAdminStubSettings() { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Getter method to obtain the auto-generated database admin client stub settings. + * + * @return DatabaseAdminStubSettings + */ + @InternalApi + default DatabaseAdminStubSettings getDatabaseAdminStubSettings() { + throw new UnsupportedOperationException("Not implemented"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpcViews.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpcViews.java new file mode 100644 index 000000000000..21f639e130a6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpcViews.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.ObsoleteApi; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import io.opencensus.stats.Aggregation; +import io.opencensus.stats.Aggregation.Distribution; +import io.opencensus.stats.Aggregation.Sum; +import io.opencensus.stats.BucketBoundaries; +import io.opencensus.stats.Measure.MeasureLong; +import io.opencensus.stats.Stats; +import io.opencensus.stats.View; +import io.opencensus.stats.ViewManager; +import io.opencensus.tags.TagKey; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +@VisibleForTesting +public class SpannerRpcViews { + + /** Unit to represent milliseconds. */ + private static final String MILLISECOND = "ms"; + + /** Unit to represent counts. */ + private static final String COUNT = "1"; + + /** TagKeys */ + public static final TagKey METHOD = TagKey.create("method"); + + public static final TagKey PROJECT_ID = TagKey.create("project_id"); + public static final TagKey INSTANCE_ID = TagKey.create("instance_id"); + public static final TagKey DATABASE_ID = TagKey.create("database"); + + /** GFE t4t7 latency extracted from server-timing header. */ + public static final MeasureLong SPANNER_GFE_LATENCY = + MeasureLong.create( + "cloud.google.com/java/spanner/gfe_latency", + "Latency between Google's network receiving an RPC and reading back the first byte of the" + + " response", + MILLISECOND); + + /** Number of responses without the server-timing header. */ + public static final MeasureLong SPANNER_GFE_HEADER_MISSING_COUNT = + MeasureLong.create( + "cloud.google.com/java/spanner/gfe_header_missing_count", + "Number of RPC responses received without the server-timing header, most likely means" + + " that the RPC never reached Google's network", + COUNT); + + static final List RPC_MILLIS_BUCKET_BOUNDARIES = + Collections.unmodifiableList( + Arrays.asList( + 0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, + 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, + 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, + 100000.0)); + static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Distribution.create(BucketBoundaries.create(RPC_MILLIS_BUCKET_BOUNDARIES)); + static final View SPANNER_GFE_LATENCY_VIEW = + View.create( + View.Name.create("cloud.google.com/java/spanner/gfe_latency"), + "Latency between Google's network receiving an RPC and reading back the first byte of the" + + " response", + SPANNER_GFE_LATENCY, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + ImmutableList.of(METHOD, PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + + private static final Aggregation SUM = Sum.create(); + static final View SPANNER_GFE_HEADER_MISSING_COUNT_VIEW = + View.create( + View.Name.create("cloud.google.com/java/spanner/gfe_header_missing_count"), + "Number of RPC responses received without the server-timing header, most likely means" + + " that the RPC never reached Google's network", + SPANNER_GFE_HEADER_MISSING_COUNT, + SUM, + ImmutableList.of(METHOD, PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + + public static ViewManager viewManager = Stats.getViewManager(); + + /** + * Register views for GFE metrics, including gfe_latency and gfe_header_missing_count. gfe_latency + * measures the latency between Google's network receives an RPC and reads back the first byte of + * the response. gfe_header_missing_count is a counter of the number of RPC responses without a + * server-timing header. + * + * @deprecated The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and + * gfe_header_missing_count metrics. + */ + @VisibleForTesting + @ObsoleteApi( + "The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and" + + " gfe_header_missing_count metrics.") + public static void registerGfeLatencyAndHeaderMissingCountViews() { + if (SpannerOptions.isEnabledOpenCensusMetrics()) { + viewManager.registerView(SPANNER_GFE_LATENCY_VIEW); + viewManager.registerView(SPANNER_GFE_HEADER_MISSING_COUNT_VIEW); + } + } + + /** + * Register GFE Latency view. gfe_latency measures the latency between Google's network receives + * an RPC and reads back the first byte of the response. + * + * @deprecated The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and + * gfe_header_missing_count metrics. + */ + @VisibleForTesting + @ObsoleteApi( + "The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and" + + " gfe_header_missing_count metrics.") + public static void registerGfeLatencyView() { + if (SpannerOptions.isEnabledOpenCensusMetrics()) { + viewManager.registerView(SPANNER_GFE_LATENCY_VIEW); + } + } + + /** + * Register GFE Header Missing Count view. gfe_header_missing_count is a counter of the number of + * RPC responses without a server-timing header. + * + * @deprecated The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and + * gfe_header_missing_count metrics. + */ + @VisibleForTesting + @ObsoleteApi( + "The OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency and" + + " gfe_header_missing_count metrics.") + public static void registerGfeHeaderMissingCountView() { + if (SpannerOptions.isEnabledOpenCensusMetrics()) { + viewManager.registerView(SPANNER_GFE_HEADER_MISSING_COUNT_VIEW); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SsFormat.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SsFormat.java new file mode 100644 index 000000000000..0e2c12dd2025 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SsFormat.java @@ -0,0 +1,322 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.protobuf.ByteString; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; + +/** + * Sortable String Format encoding utilities for Spanner keys. + * + *

This class provides methods to encode various data types into a byte format that preserves + * lexicographic ordering. The encoding supports both increasing and decreasing sort orders. + */ +@InternalApi +public final class SsFormat { + + /** + * Makes the given key a prefix successor. This means that the returned key is the smallest + * possible key that is larger than the input key, and that does not have the input key as a + * prefix. + * + *

This is done by flipping the least significant bit of the last byte of the key. + * + * @param key The key to make a prefix successor. + * @return The prefix successor key. + */ + public static ByteString makePrefixSuccessor(ByteString key) { + if (key == null || key.isEmpty()) { + return ByteString.EMPTY; + } + byte[] bytes = key.toByteArray(); + bytes[bytes.length - 1] = (byte) (bytes[bytes.length - 1] | 1); + return ByteString.copyFrom(bytes); + } + + private SsFormat() {} + + private static final int IS_KEY = 0x80; + + // HeaderType enum values + // Unsigned integers (variable length 1-9 bytes) + private static final int TYPE_UINT_1 = 0; + private static final int TYPE_DECREASING_UINT_1 = 40; + + // Signed integers (variable length 1-8 bytes) + private static final int TYPE_NEG_INT_1 = 16; + private static final int TYPE_POS_INT_1 = 17; + private static final int TYPE_DECREASING_NEG_INT_1 = 48; + private static final int TYPE_DECREASING_POS_INT_1 = 49; + + // Strings + private static final int TYPE_STRING = 25; + private static final int TYPE_DECREASING_STRING = 57; + + // Nullable markers + private static final int TYPE_NULL_ORDERED_FIRST = 27; + private static final int TYPE_NULLABLE_NOT_NULL_NULL_ORDERED_FIRST = 28; + private static final int TYPE_NULLABLE_NOT_NULL_NULL_ORDERED_LAST = 59; + private static final int TYPE_NULL_ORDERED_LAST = 60; + + // Doubles (variable length 1-8 bytes, encoded as transformed int64) + private static final int TYPE_NEG_DOUBLE_1 = 73; + private static final int TYPE_POS_DOUBLE_1 = 74; + private static final int TYPE_DECREASING_NEG_DOUBLE_1 = 89; + private static final int TYPE_DECREASING_POS_DOUBLE_1 = 90; + + // EscapeChar enum values + private static final byte ASCENDING_ZERO_ESCAPE = (byte) 0xf0; + private static final byte ASCENDING_FF_ESCAPE = (byte) 0x10; + private static final byte SEP = (byte) 0x78; // 'x' + + // For AppendCompositeTag + private static final int K_OBJECT_EXISTENCE_TAG = 0x7e; + private static final int K_MAX_FIELD_TAG = 0xffff; + + // Offset to make negative timestamp seconds sort correctly + private static final long TIMESTAMP_SECONDS_OFFSET = 1L << 63; + + public static void appendCompositeTag(UnsynchronizedByteArrayOutputStream out, int tag) { + if (tag == K_OBJECT_EXISTENCE_TAG || tag <= 0 || tag > K_MAX_FIELD_TAG) { + throw new IllegalArgumentException("Invalid tag value: " + tag); + } + + if (tag < 16) { + // Short tag: 000 TTTT S (S is LSB of tag, but here tag is original, so S=0) + // Encodes as (tag << 1) + out.write((byte) (tag << 1)); + } else { + // Long tag + int shiftedTag = tag << 1; // LSB is 0 for prefix successor + if (shiftedTag < (1 << (5 + 8))) { // Original tag < 4096 + // Header: num_extra_bytes=1 (01xxxxx), P=payload bits from tag + // (1 << 5) is 00100000 + // (shiftedTag >> 8) are the 5 MSBs of the payload part of the tag + out.write((byte) ((1 << 5) | (shiftedTag >> 8))); + out.write((byte) (shiftedTag & 0xFF)); + } else { // Original tag >= 4096 and <= K_MAX_FIELD_TAG (65535) + // Header: num_extra_bytes=2 (10xxxxx) + // (2 << 5) is 01000000 + out.write((byte) ((2 << 5) | (shiftedTag >> 16))); + out.write((byte) ((shiftedTag >> 8) & 0xFF)); + out.write((byte) (shiftedTag & 0xFF)); + } + } + } + + public static void appendNullOrderedFirst(UnsynchronizedByteArrayOutputStream out) { + out.write((byte) (IS_KEY | TYPE_NULL_ORDERED_FIRST)); + out.write((byte) 0); + } + + public static void appendNullOrderedLast(UnsynchronizedByteArrayOutputStream out) { + out.write((byte) (IS_KEY | TYPE_NULL_ORDERED_LAST)); + out.write((byte) 0); + } + + public static void appendNotNullMarkerNullOrderedFirst(UnsynchronizedByteArrayOutputStream out) { + out.write((byte) (IS_KEY | TYPE_NULLABLE_NOT_NULL_NULL_ORDERED_FIRST)); + } + + public static void appendNotNullMarkerNullOrderedLast(UnsynchronizedByteArrayOutputStream out) { + out.write((byte) (IS_KEY | TYPE_NULLABLE_NOT_NULL_NULL_ORDERED_LAST)); + } + + /** + * Appends a boolean value in ascending (increasing) sort order. + * + *

Boolean values are encoded using unsigned integer encoding where false=0 and true=1. This + * preserves the natural ordering where false < true. + * + * @param out the output stream to append to + * @param value the boolean value to encode + */ + public static void appendBoolIncreasing(UnsynchronizedByteArrayOutputStream out, boolean value) { + // BOOL uses unsigned int encoding: false=0, true=1 + // For values 0 and 1, payload is always 1 byte + int encoded = value ? 1 : 0; + out.write((byte) (IS_KEY | TYPE_UINT_1)); // Header for 1-byte unsigned int + out.write( + (byte) (encoded << 1)); // Payload: value shifted left by 1 (LSB is prefix-successor bit) + } + + /** + * Appends a boolean value in descending (decreasing) sort order. + * + *

Boolean values are encoded using unsigned integer encoding where false=0 and true=1, then + * inverted for descending order. This preserves reverse ordering where true < false. + * + * @param out the output stream to append to + * @param value the boolean value to encode + */ + public static void appendBoolDecreasing(UnsynchronizedByteArrayOutputStream out, boolean value) { + // BOOL uses decreasing unsigned int encoding: false=0, true=1, then inverted + // For values 0 and 1, payload is always 1 byte + int encoded = value ? 1 : 0; + out.write( + (byte) (IS_KEY | TYPE_DECREASING_UINT_1)); // Header for 1-byte decreasing unsigned int + out.write((byte) ((~encoded & 0x7F) << 1)); // Inverted payload + } + + private static void appendInt64Internal( + UnsynchronizedByteArrayOutputStream out, long val, boolean decreasing, boolean isDouble) { + if (decreasing) { + val = ~val; + } + + byte[] buf = new byte[8]; // Max 8 bytes for payload + int len = 0; + long tempVal = val; + + if (tempVal >= 0) { + buf[7 - len] = (byte) ((tempVal & 0x7F) << 1); + tempVal >>= 7; + len++; + while (tempVal > 0) { + buf[7 - len] = (byte) (tempVal & 0xFF); + tempVal >>= 8; + len++; + } + } else { // tempVal < 0 + // For negative numbers, extend sign bit after shifting + buf[7 - len] = (byte) ((tempVal & 0x7F) << 1); + // Simulate sign extension for right shift of negative number + // (x >> 7) | 0xFE00000000000000ULL; (if x has 64 bits) + // In Java, right shift `>>` on negative longs performs sign extension. + tempVal >>= 7; + len++; + while (tempVal != -1L) { // Loop until all remaining bits are 1s (sign extension) + buf[7 - len] = (byte) (tempVal & 0xFF); + tempVal >>= 8; + len++; + if (len > 8) { + // Defensive assertion: unreachable for any valid 64-bit signed integer + throw new AssertionError("Signed int encoding overflow"); + } + } + } + + int type; + if (val >= 0) { // Original val before potential bit-negation for decreasing + if (!decreasing) { + type = isDouble ? (TYPE_POS_DOUBLE_1 + len - 1) : (TYPE_POS_INT_1 + len - 1); + } else { + type = + isDouble + ? (TYPE_DECREASING_POS_DOUBLE_1 + len - 1) + : (TYPE_DECREASING_POS_INT_1 + len - 1); + } + } else { + if (!decreasing) { + type = isDouble ? (TYPE_NEG_DOUBLE_1 - len + 1) : (TYPE_NEG_INT_1 - len + 1); + } else { + type = + isDouble + ? (TYPE_DECREASING_NEG_DOUBLE_1 - len + 1) + : (TYPE_DECREASING_NEG_INT_1 - len + 1); + } + } + out.write((byte) (IS_KEY | type)); + out.write(buf, 8 - len, len); + } + + public static void appendInt64Increasing(UnsynchronizedByteArrayOutputStream out, long value) { + appendInt64Internal(out, value, false, false); + } + + public static void appendInt64Decreasing(UnsynchronizedByteArrayOutputStream out, long value) { + appendInt64Internal(out, value, true, false); + } + + public static void appendDoubleIncreasing(UnsynchronizedByteArrayOutputStream out, double value) { + long enc = Double.doubleToRawLongBits(value); + if (enc < 0) { + // Transform negative doubles to maintain lexicographic sort order + enc = Long.MIN_VALUE - enc; + } + appendInt64Internal(out, enc, false, true); + } + + public static void appendDoubleDecreasing(UnsynchronizedByteArrayOutputStream out, double value) { + long enc = Double.doubleToRawLongBits(value); + if (enc < 0) { + enc = Long.MIN_VALUE - enc; + } + appendInt64Internal(out, enc, true, true); + } + + private static void appendByteSequence( + UnsynchronizedByteArrayOutputStream out, byte[] bytes, boolean decreasing) { + out.write((byte) (IS_KEY | (decreasing ? TYPE_DECREASING_STRING : TYPE_STRING))); + + for (byte b : bytes) { + byte currentByte = decreasing ? (byte) ~b : b; + int unsignedByte = currentByte & 0xFF; + if (unsignedByte == 0x00) { + // Escape sequence for 0x00: write 0x00 followed by 0xF0 + out.write((byte) 0x00); + out.write(ASCENDING_ZERO_ESCAPE); + } else if (unsignedByte == 0xFF) { + // Escape sequence for 0xFF: write 0xFF followed by 0x10 + out.write((byte) 0xFF); + out.write(ASCENDING_FF_ESCAPE); + } else { + out.write((byte) unsignedByte); + } + } + // Terminator + out.write((byte) (decreasing ? 0xFF : 0x00)); + out.write(SEP); + } + + public static void appendStringIncreasing(UnsynchronizedByteArrayOutputStream out, String value) { + appendByteSequence(out, value.getBytes(StandardCharsets.UTF_8), false); + } + + public static void appendStringDecreasing(UnsynchronizedByteArrayOutputStream out, String value) { + appendByteSequence(out, value.getBytes(StandardCharsets.UTF_8), true); + } + + public static void appendBytesIncreasing(UnsynchronizedByteArrayOutputStream out, byte[] value) { + appendByteSequence(out, value, false); + } + + public static void appendBytesDecreasing(UnsynchronizedByteArrayOutputStream out, byte[] value) { + appendByteSequence(out, value, true); + } + + /** + * Encodes a timestamp as 12 bytes: 8 bytes for seconds since epoch (with offset to handle + * negative), 4 bytes for nanoseconds. + */ + public static byte[] encodeTimestamp(long seconds, int nanos) { + long offsetSeconds = seconds + TIMESTAMP_SECONDS_OFFSET; + byte[] buf = new byte[12]; + ByteBuffer.wrap(buf).order(ByteOrder.BIG_ENDIAN).putLong(offsetSeconds).putInt(nanos); + return buf; + } + + /** Encodes a UUID (128-bit) as 16 bytes in big-endian order. */ + public static byte[] encodeUuid(long high, long low) { + byte[] buf = new byte[16]; + ByteBuffer.wrap(buf).order(ByteOrder.BIG_ENDIAN).putLong(high).putLong(low); + return buf; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TargetRange.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TargetRange.java new file mode 100644 index 000000000000..bfcd2e30a8b7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TargetRange.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import com.google.protobuf.ByteString; + +/** Represents a key range with start and limit boundaries for routing. */ +@InternalApi +public class TargetRange { + public ByteString start; + public ByteString limit; + public boolean approximate; + + public TargetRange(ByteString start, ByteString limit, boolean approximate) { + this.start = start; + this.limit = limit; + this.approximate = approximate; + } + + public boolean isPoint() { + return limit.isEmpty(); + } + + /** + * Merges another TargetRange into this one. The resulting range will be the union of the two + * ranges, taking the minimum start key and maximum limit key. + */ + public void mergeFrom(TargetRange other) { + if (ByteString.unsignedLexicographicalComparator().compare(other.start, this.start) < 0) { + this.start = other.start; + } + if (other.isPoint() + && ByteString.unsignedLexicographicalComparator().compare(other.start, this.limit) >= 0) { + this.limit = SsFormat.makePrefixSuccessor(other.start); + } else if (ByteString.unsignedLexicographicalComparator().compare(other.limit, this.limit) + > 0) { + this.limit = other.limit; + } + this.approximate |= other.approximate; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java new file mode 100644 index 000000000000..4280e3103550 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapPropagator; +import io.opentelemetry.context.propagation.TextMapSetter; + +/** + * Intercepts all gRPC calls and injects trace context related headers to propagate trace context to + * Spanner. This class takes reference from OpenTelemetry's JAVA instrumentation library for gRPC. + * https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/9ecf7965aa455d41ea8cc0761b6c6b6eeb106324/instrumentation/grpc-1.6/library/src/main/java/io/opentelemetry/instrumentation/grpc/v1_6/TracingClientInterceptor.java#L27 + */ +public class TraceContextInterceptor implements ClientInterceptor { + + private final TextMapPropagator textMapPropagator; + + public TraceContextInterceptor(OpenTelemetry openTelemetry) { + this.textMapPropagator = openTelemetry.getPropagators().getTextMapPropagator(); + } + + enum MetadataSetter implements TextMapSetter { + INSTANCE; + + @SuppressWarnings("null") + @Override + public void set(Metadata carrier, String key, String value) { + carrier.put(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value); + } + } + + private static final class NoopSimpleForwardingClientCallListener + extends SimpleForwardingClientCallListener { + public NoopSimpleForwardingClientCallListener(ClientCall.Listener responseListener) { + super(responseListener); + } + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + Context parentContext = Context.current(); + textMapPropagator.inject(parentContext, headers, MetadataSetter.INSTANCE); + super.start(new NoopSimpleForwardingClientCallListener(responseListener), headers); + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/UnsynchronizedByteArrayOutputStream.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/UnsynchronizedByteArrayOutputStream.java new file mode 100644 index 000000000000..864215c98741 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/UnsynchronizedByteArrayOutputStream.java @@ -0,0 +1,105 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import com.google.api.core.InternalApi; +import java.util.Arrays; + +/** + * A simple, unsynchronized byte array output stream optimized for key encoding. + * + *

Unlike {@link java.io.ByteArrayOutputStream}, this class is not thread-safe and does not incur + * synchronization overhead. This provides better performance for single-threaded key encoding + * operations where synchronization is not required. + */ +@InternalApi +public final class UnsynchronizedByteArrayOutputStream { + + private byte[] buf; + private int count; + + /** Creates a new output stream with a default initial capacity of 32 bytes. */ + public UnsynchronizedByteArrayOutputStream() { + this(32); + } + + /** + * Creates a new output stream with the specified initial capacity. + * + * @param initialCapacity the initial buffer size + * @throws IllegalArgumentException if initialCapacity is negative + */ + public UnsynchronizedByteArrayOutputStream(int initialCapacity) { + if (initialCapacity < 0) { + throw new IllegalArgumentException("Negative initial capacity: " + initialCapacity); + } + this.buf = new byte[initialCapacity]; + } + + private void ensureCapacity(int minCapacity) { + if (minCapacity > buf.length) { + int newCapacity = Math.max(buf.length << 1, minCapacity); + buf = Arrays.copyOf(buf, newCapacity); + } + } + + /** + * Writes the specified byte to this output stream. + * + * @param b the byte to write (only the low 8 bits are used) + */ + public void write(int b) { + ensureCapacity(count + 1); + buf[count++] = (byte) b; + } + + /** + * Writes a portion of a byte array to this output stream. + * + * @param b the source byte array + * @param off the start offset in the array + * @param len the number of bytes to write + */ + public void write(byte[] b, int off, int len) { + ensureCapacity(count + len); + System.arraycopy(b, off, buf, count, len); + count += len; + } + + /** + * Returns a copy of the buffer contents as a new byte array. + * + * @return a new byte array containing the written bytes + */ + public byte[] toByteArray() { + return Arrays.copyOf(buf, count); + } + + /** Resets the buffer so that it can be reused. The underlying buffer is retained. */ + public void reset() { + count = 0; + } + + /** + * Returns the current number of bytes written to this stream. + * + * @return the number of valid bytes in the buffer + */ + public int size() { + return count; + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/EmulatorSpannerHelper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/EmulatorSpannerHelper.java new file mode 100644 index 000000000000..e7a4cb85225d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/EmulatorSpannerHelper.java @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.testing; + +import com.google.common.base.Strings; + +/** Utility class for checking emulator state for tests */ +public class EmulatorSpannerHelper { + + public static final String SPANNER_EMULATOR_HOST = "SPANNER_EMULATOR_HOST"; + + /** + * Checks whether the emulator is being used. This is done by checking if the + * SPANNER_EMULATOR_HOST environment variable is set. + * + * @return true if the emulator is being used. Returns false otherwise. + */ + public static boolean isUsingEmulator() { + return !Strings.isNullOrEmpty(System.getenv(SPANNER_EMULATOR_HOST)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/ExperimentalHostHelper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/ExperimentalHostHelper.java new file mode 100644 index 000000000000..f6387535e4d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/ExperimentalHostHelper.java @@ -0,0 +1,67 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.testing; + +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.base.Strings; + +public class ExperimentalHostHelper { + private static final String EXPERIMENTAL_HOST = "spanner.experimental_host"; + private static final String USE_PLAIN_TEXT = "spanner.use_plain_text"; + private static final String USE_MTLS = "spanner.mtls"; + private static final String CLIENT_CERT_PATH = "spanner.client_cert_path"; + private static final String CLIENT_CERT_KEY_PATH = "spanner.client_cert_key_path"; + + /** + * Checks whether the emulator is being used. This is done by checking if the + * SPANNER_EMULATOR_HOST environment variable is set. + * + * @return true if the emulator is being used. Returns false otherwise. + */ + public static boolean isExperimentalHost() { + return !Strings.isNullOrEmpty(System.getProperty(EXPERIMENTAL_HOST)); + } + + public static void appendExperimentalHost(StringBuilder uri) { + uri.append(";isExperimentalHost=true"); + if (isMtlsSetup()) { + String clientCertificate = System.getProperty(CLIENT_CERT_PATH, ""); + String clientKey = System.getProperty(CLIENT_CERT_KEY_PATH, ""); + uri.append(";clientCertificate=").append(clientCertificate); + uri.append(";clientKey=").append(clientKey); + } + } + + public static boolean isMtlsSetup() { + return Boolean.getBoolean(USE_MTLS); + } + + public static void setExperimentalHostSpannerOptions(SpannerOptions.Builder builder) { + String experimentalHost = System.getProperty(EXPERIMENTAL_HOST, ""); + boolean usePlainText = Boolean.getBoolean(USE_PLAIN_TEXT); + builder.setExperimentalHost(experimentalHost); + builder.setBuiltInMetricsEnabled(false); + if (usePlainText) { + builder.usePlainText(); + } + if (isMtlsSetup()) { + String clientCertificate = System.getProperty(CLIENT_CERT_PATH, ""); + String clientKey = System.getProperty(CLIENT_CERT_KEY_PATH, ""); + builder.useClientCert(clientCertificate, clientKey); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/RemoteSpannerHelper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/RemoteSpannerHelper.java new file mode 100644 index 000000000000..c70c2c93cc3c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/RemoteSpannerHelper.java @@ -0,0 +1,241 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.testing; + +import com.google.api.client.util.BackOff; +import com.google.api.client.util.ExponentialBackOff; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.Iterables; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Utility that provides access to a Cloud Spanner instance to use for tests, and allows uniquely + * named test databases to be created within that instance. + */ +public class RemoteSpannerHelper { + private static final Logger logger = Logger.getLogger(RemoteSpannerHelper.class.getName()); + + private final SpannerOptions options; + private final Spanner client; + private final InstanceId instanceId; + private static final AtomicInteger dbSeq = new AtomicInteger(); + private static final int dbPrefix = new Random().nextInt(Integer.MAX_VALUE); + private static final AtomicInteger dbRoleSeq = new AtomicInteger(); + ; + private static int dbRolePrefix = new Random().nextInt(Integer.MAX_VALUE); + private static final AtomicInteger backupSeq = new AtomicInteger(); + private static final int backupPrefix = new Random().nextInt(Integer.MAX_VALUE); + private final List databaseIds = new ArrayList<>(); + + protected RemoteSpannerHelper(SpannerOptions options, InstanceId instanceId, Spanner client) { + this.options = options; + this.instanceId = instanceId; + this.client = client; + } + + public SpannerOptions getOptions() { + return options; + } + + /** + * Checks whether the emulator is being used. + * + * @deprecated use {@link EmulatorSpannerHelper#isUsingEmulator()} instead. + * @return true if the emulator is being used. Returns false otherwise. + */ + @Deprecated + public boolean isEmulator() { + return EmulatorSpannerHelper.isUsingEmulator(); + } + + public Spanner getClient() { + return client; + } + + public DatabaseClient getDatabaseClient(Database db) { + return getClient().getDatabaseClient(db.getId()); + } + + public BatchClient getBatchClient(Database db) { + return getClient().getBatchClient(db.getId()); + } + + public InstanceId getInstanceId() { + return instanceId; + } + + /** + * Creates a test database defined by {@code statements}. A {@code CREATE DATABASE ...} statement + * should not be included; an appropriate name will be chosen and the statement generated + * accordingly. + */ + public Database createTestDatabase(String... statements) throws SpannerException { + return createTestDatabase(Dialect.GOOGLE_STANDARD_SQL, Arrays.asList(statements)); + } + + /** + * Returns a database id which is guaranteed to be unique within the context of this environment. + */ + public String getUniqueDatabaseId() { + return String.format("testdb_%d_%04d", dbPrefix, dbSeq.incrementAndGet()); + } + + /** + * Returns a database role name which is guaranteed to be unique within the context of this + * environment. + */ + public String getUniqueDatabaseRole() { + return String.format("testdbrole_%d_%04d", dbRolePrefix, dbRoleSeq.incrementAndGet()); + } + + /** + * Returns a backup id which is guaranteed to be unique within the context of this environment. + */ + public String getUniqueBackupId() { + return String.format("testbck_%06d_%04d", backupPrefix, backupSeq.incrementAndGet()); + } + + /** + * Creates a test database defined by {@code statements} in the test instance. A {@code CREATE + * DATABASE ...} statement should not be included; an appropriate name will be chosen and the + * statement generated accordingly. + */ + public Database createTestDatabase(Dialect dialect, Iterable statements) + throws SpannerException { + String dbId = getUniqueDatabaseId(); + DatabaseId databaseId = DatabaseId.of(instanceId.getProject(), instanceId.getInstance(), dbId); + Database databaseToCreate = + client.getDatabaseAdminClient().newDatabaseBuilder(databaseId).setDialect(dialect).build(); + try { + Iterable ddlStatements = + dialect == Dialect.POSTGRESQL ? Collections.emptyList() : statements; + Database db = null; + final int maxAttempts = 20; + BackOff backOff = + new ExponentialBackOff.Builder() + .setInitialIntervalMillis(10_000) + .setMaxIntervalMillis(60_000) + .setMaxElapsedTimeMillis(120_000) + .build(); + for (int attempts = 0; attempts < maxAttempts; attempts++) { + try { + OperationFuture op = + client.getDatabaseAdminClient().createDatabase(databaseToCreate, ddlStatements); + db = op.get(); + break; + } catch (ExecutionException executionException) { + SpannerException spannerException = + SpannerExceptionFactory.asSpannerException(executionException.getCause()); + if (spannerException.getErrorCode() != ErrorCode.RESOURCE_EXHAUSTED) { + throw executionException; + } + } catch (SpannerException spannerException) { + if (spannerException.getErrorCode() != ErrorCode.RESOURCE_EXHAUSTED) { + throw spannerException; + } + } + long sleep = backOff.nextBackOffMillis(); + if (sleep > 0L) { + Thread.sleep(sleep); + } + } + if (db == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.RESOURCE_EXHAUSTED, + String.format("Failed to create test database after %d attempts", maxAttempts)); + } + if (dialect == Dialect.POSTGRESQL && Iterables.size(statements) > 0) { + client + .getDatabaseAdminClient() + .updateDatabaseDdl(instanceId.getInstance(), dbId, statements, null) + .get(); + } + logger.log(Level.FINE, "Created test database {0}", db.getId()); + return db; + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } finally { + databaseIds.add(databaseId); + } + } + + public Database createTestDatabase(Iterable statements) throws SpannerException { + return createTestDatabase(Dialect.GOOGLE_STANDARD_SQL, statements); + } + + /** Deletes all the databases created via {@code createTestDatabase}. Shuts down the client. */ + public void cleanUp() { + // Drop all the databases we created explicitly. + int numDropped = 0; + for (DatabaseId databaseId : databaseIds) { + try { + logger.log(Level.INFO, "Dropping test database {0}", databaseId); + client + .getDatabaseAdminClient() + .dropDatabase(databaseId.getInstanceId().getInstance(), databaseId.getDatabase()); + ++numDropped; + } catch (Throwable e) { + logger.log(Level.SEVERE, "Failed to drop test database " + databaseId, e); + } + } + logger.log(Level.INFO, "Dropped {0} test database(s)", numDropped); + } + + /** + * Creates a {@code RemoteSpannerHelper} bound to the given instance ID. All databases created + * using this will be created in the given instance. + */ + public static RemoteSpannerHelper create(InstanceId instanceId) { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId(instanceId.getProject()) + .setAutoThrottleAdministrativeRequests() + .setTrackTransactionStarter() + .build(); + Spanner client = options.getService(); + return new RemoteSpannerHelper(options, instanceId, client); + } + + /** + * Creates a {@code RemoteSpannerHelper} for the given option and bound to the given instance ID. + * All databases created using this will be created in the given instance. + */ + public static RemoteSpannerHelper create(SpannerOptions options, InstanceId instanceId) { + Spanner client = options.getService(); + return new RemoteSpannerHelper(options, instanceId, client); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/TimestampHelper.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/TimestampHelper.java new file mode 100644 index 000000000000..021adf4dd7f2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/testing/TimestampHelper.java @@ -0,0 +1,41 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.testing; + +import com.google.cloud.Timestamp; +import java.util.concurrent.TimeUnit; + +public class TimestampHelper { + + public static Timestamp daysAgo(int days) { + return Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + - TimeUnit.MICROSECONDS.convert(days, TimeUnit.DAYS)); + } + + public static Timestamp afterDays(int days) { + return Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + + TimeUnit.MICROSECONDS.convert(days, TimeUnit.DAYS)); + } + + public static Timestamp afterMinutes(int minutes) { + return Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + + TimeUnit.MICROSECONDS.convert(minutes, TimeUnit.MINUTES)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java new file mode 100644 index 000000000000..47dc7da50af9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java @@ -0,0 +1,2470 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.paging.AbstractFixedSizeCollection; +import com.google.api.gax.paging.AbstractPage; +import com.google.api.gax.paging.AbstractPagedListResponse; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.spanner.v1.stub.SpannerStub; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DatabaseName; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.Mutation; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SessionName; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Service Description: Cloud Spanner API + * + *

The Cloud Spanner API can be used to manage sessions and execute transactions on data stored + * in Cloud Spanner databases. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (SpannerClient spannerClient = SpannerClient.create()) {
+ *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+ *   Session response = spannerClient.createSession(database);
+ * }
+ * }
+ * + *

Note: close() needs to be called on the SpannerClient object to clean up resources such as + * threads. In the example above, try-with-resources is used, which automatically calls close(). + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Methods
MethodDescriptionMethod Variants

CreateSession

Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive transactions. + *

Sessions can only execute one transaction at a time. To execute multiple concurrent read-write/write-only transactions, create multiple sessions. Note that standalone reads and queries use a transaction internally, and count toward the one transaction limit. + *

Active sessions use additional server resources, so it's a good idea to delete idle and unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions when no operations are sent for more than an hour. If a session is deleted, requests to it return `NOT_FOUND`. + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, `"SELECT 1"`.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • createSession(CreateSessionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • createSession(DatabaseName database) + *

  • createSession(String database) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • createSessionCallable() + *

+ *

BatchCreateSessions

Creates multiple new sessions. + *

This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 for best practices on session cache management.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • batchCreateSessions(BatchCreateSessionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • batchCreateSessions(DatabaseName database, int sessionCount) + *

  • batchCreateSessions(String database, int sessionCount) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchCreateSessionsCallable() + *

+ *

GetSession

Gets a session. Returns `NOT_FOUND` if the session doesn't exist. This is mainly useful for determining whether a session is still alive.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • getSession(GetSessionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • getSession(SessionName name) + *

  • getSession(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • getSessionCallable() + *

+ *

ListSessions

Lists all sessions in a given database.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • listSessions(ListSessionsRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • listSessions(DatabaseName database) + *

  • listSessions(String database) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • listSessionsPagedCallable() + *

  • listSessionsCallable() + *

+ *

DeleteSession

Ends a session, releasing server resources associated with it. This asynchronously triggers the cancellation of any operations that are running with this session.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • deleteSession(DeleteSessionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • deleteSession(SessionName name) + *

  • deleteSession(String name) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • deleteSessionCallable() + *

+ *

ExecuteSql

Executes an SQL statement, returning all results in a single reply. This method can't be used to return a result set larger than 10 MiB; if the query yields more data than that, the query fails with a `FAILED_PRECONDITION` error. + *

Operations inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + *

Larger result sets can be fetched in streaming fashion by calling [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + *

The query string can be SQL or [Graph Query Language (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • executeSql(ExecuteSqlRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • executeSqlCallable() + *

+ *

ExecuteStreamingSql

Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB. + *

The query string can be SQL or [Graph Query Language (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • executeStreamingSqlCallable() + *

+ *

ExecuteBatchDml

Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them sequentially with [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + *

Statements are executed in sequential order. A request can succeed even if a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the response provides information about the statement that failed. Clients must inspect this field to determine whether an error occurred. + *

Execution stops after the first failed statement; the remaining statements are not executed.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • executeBatchDml(ExecuteBatchDmlRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • executeBatchDmlCallable() + *

+ *

Read

Reads rows from the database using key lookups and scans, as a simple key/value style alternative to [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be used to return a result set larger than 10 MiB; if the read matches more data than that, the read fails with a `FAILED_PRECONDITION` error. + *

Reads inside read-write transactions might return `ABORTED`. If this occurs, the application should restart the transaction from the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + *

Larger result sets can be yielded in streaming fashion by calling [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • read(ReadRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • readCallable() + *

+ *

StreamingRead

Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the size of the returned result set. However, no individual row in the result set can exceed 100 MiB, and no column value can exceed 10 MiB.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • streamingReadCallable() + *

+ *

BeginTransaction

Begins a new transaction. This step can often be skipped: [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a side-effect.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • beginTransaction(BeginTransactionRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • beginTransaction(SessionName session, TransactionOptions options) + *

  • beginTransaction(String session, TransactionOptions options) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • beginTransactionCallable() + *

+ *

Commit

Commits a transaction. The request includes the mutations to be applied to rows in the database. + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is conflicts with concurrent transactions. However, it can also happen for a variety of other reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the beginning, reusing the same session. + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost track of the transaction outcome and we recommend that you perform another read from the database to see the state of things as they are now.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • commit(CommitRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • commit(SessionName session, ByteString transactionId, List<Mutation> mutations) + *

  • commit(SessionName session, TransactionOptions singleUseTransaction, List<Mutation> mutations) + *

  • commit(String session, ByteString transactionId, List<Mutation> mutations) + *

  • commit(String session, TransactionOptions singleUseTransaction, List<Mutation> mutations) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • commitCallable() + *

+ *

Rollback

Rolls back a transaction, releasing any locks it holds. It's a good idea to call this for any transaction that includes one or more [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately decides not to commit. + *

`Rollback` returns `OK` if it successfully aborts the transaction, the transaction was already aborted, or the transaction isn't found. `Rollback` never returns `ABORTED`.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • rollback(RollbackRequest request) + *

+ *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ *
    + *
  • rollback(SessionName session, ByteString transactionId) + *

  • rollback(String session, ByteString transactionId) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • rollbackCallable() + *

+ *

PartitionQuery

Creates a set of partition tokens that can be used to execute a query operation in parallel. Each of the returned partition tokens can be used by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset of the query result to read. The same session and read-only transaction must be used by the `PartitionQueryRequest` used to create the partition tokens and the `ExecuteSqlRequests` that use the partition tokens. + *

Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it isn't possible to resume the query, and the whole operation must be restarted from the beginning.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • partitionQuery(PartitionQueryRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • partitionQueryCallable() + *

+ *

PartitionRead

Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition tokens can be used by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read result to read. The same session and read-only transaction must be used by the `PartitionReadRequest` used to create the partition tokens and the `ReadRequests` that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even within each individual `StreamingRead` call issued with a `partition_token`. + *

Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it isn't possible to resume the read, and the whole operation must be restarted from the beginning.

+ *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ *
    + *
  • partitionRead(PartitionReadRequest request) + *

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • partitionReadCallable() + *

+ *

BatchWrite

Batches the supplied mutation groups in a collection of efficient transactions. All mutations in a group are committed atomically. However, mutations across groups can be committed non-atomically in an unspecified order and thus, they must be independent of each other. Partial failure is possible, that is, some groups might have been committed successfully, while some might have failed. The results of individual batches are streamed into the response as the batches are applied. + *

`BatchWrite` requests are not replay protected, meaning that each mutation group can be applied more than once. Replays of non-idempotent mutations can have undesirable effects. For example, replays of an insert mutation can produce an already exists error or if you use generated or commit timestamp-based keys, it can result in additional rows being added to the mutation's table. We recommend structuring your mutation groups to be idempotent to avoid this issue.

+ *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ *
    + *
  • batchWriteCallable() + *

+ *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of SpannerSettings to create(). + * For example: + * + *

To customize credentials: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerSettings spannerSettings =
+ *     SpannerSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * SpannerClient spannerClient = SpannerClient.create(spannerSettings);
+ * }
+ * + *

To customize the endpoint: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerSettings spannerSettings = SpannerSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * SpannerClient spannerClient = SpannerClient.create(spannerSettings);
+ * }
+ * + *

To use REST (HTTP1.1/JSON) transport (instead of gRPC) for sending and receiving requests over + * the wire: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerSettings spannerSettings = SpannerSettings.newHttpJsonBuilder().build();
+ * SpannerClient spannerClient = SpannerClient.create(spannerSettings);
+ * }
+ * + *

Please refer to the GitHub repository's samples for more quickstart code snippets. + */ +@Generated("by gapic-generator-java") +public class SpannerClient implements BackgroundResource { + private final SpannerSettings settings; + private final SpannerStub stub; + + /** Constructs an instance of SpannerClient with default settings. */ + public static final SpannerClient create() throws IOException { + return create(SpannerSettings.newBuilder().build()); + } + + /** + * Constructs an instance of SpannerClient, using the given settings. The channels are created + * based on the settings passed in, or defaults for any settings that are not set. + */ + public static final SpannerClient create(SpannerSettings settings) throws IOException { + return new SpannerClient(settings); + } + + /** + * Constructs an instance of SpannerClient, using the given stub for making calls. This is for + * advanced usage - prefer using create(SpannerSettings). + */ + public static final SpannerClient create(SpannerStub stub) { + return new SpannerClient(stub); + } + + /** + * Constructs an instance of SpannerClient, using the given settings. This is protected so that it + * is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected SpannerClient(SpannerSettings settings) throws IOException { + this.settings = settings; + this.stub = ((SpannerStubSettings) settings.getStubSettings()).createStub(); + } + + protected SpannerClient(SpannerStub stub) { + this.settings = null; + this.stub = stub; + } + + public final SpannerSettings getSettings() { + return settings; + } + + public SpannerStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new session. A session can be used to perform transactions that read and/or modify + * data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive + * transactions. + * + *

Sessions can only execute one transaction at a time. To execute multiple concurrent + * read-write/write-only transactions, create multiple sessions. Note that standalone reads and + * queries use a transaction internally, and count toward the one transaction limit. + * + *

Active sessions use additional server resources, so it's a good idea to delete idle and + * unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions when no + * operations are sent for more than an hour. If a session is deleted, requests to it return + * `NOT_FOUND`. + * + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, + * `"SELECT 1"`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   Session response = spannerClient.createSession(database);
+   * }
+   * }
+ * + * @param database Required. The database in which the new session is created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session createSession(DatabaseName database) { + CreateSessionRequest request = + CreateSessionRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .build(); + return createSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new session. A session can be used to perform transactions that read and/or modify + * data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive + * transactions. + * + *

Sessions can only execute one transaction at a time. To execute multiple concurrent + * read-write/write-only transactions, create multiple sessions. Note that standalone reads and + * queries use a transaction internally, and count toward the one transaction limit. + * + *

Active sessions use additional server resources, so it's a good idea to delete idle and + * unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions when no + * operations are sent for more than an hour. If a session is deleted, requests to it return + * `NOT_FOUND`. + * + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, + * `"SELECT 1"`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   Session response = spannerClient.createSession(database);
+   * }
+   * }
+ * + * @param database Required. The database in which the new session is created. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session createSession(String database) { + CreateSessionRequest request = CreateSessionRequest.newBuilder().setDatabase(database).build(); + return createSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new session. A session can be used to perform transactions that read and/or modify + * data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive + * transactions. + * + *

Sessions can only execute one transaction at a time. To execute multiple concurrent + * read-write/write-only transactions, create multiple sessions. Note that standalone reads and + * queries use a transaction internally, and count toward the one transaction limit. + * + *

Active sessions use additional server resources, so it's a good idea to delete idle and + * unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions when no + * operations are sent for more than an hour. If a session is deleted, requests to it return + * `NOT_FOUND`. + * + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, + * `"SELECT 1"`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   CreateSessionRequest request =
+   *       CreateSessionRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setSession(Session.newBuilder().build())
+   *           .build();
+   *   Session response = spannerClient.createSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session createSession(CreateSessionRequest request) { + return createSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new session. A session can be used to perform transactions that read and/or modify + * data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive + * transactions. + * + *

Sessions can only execute one transaction at a time. To execute multiple concurrent + * read-write/write-only transactions, create multiple sessions. Note that standalone reads and + * queries use a transaction internally, and count toward the one transaction limit. + * + *

Active sessions use additional server resources, so it's a good idea to delete idle and + * unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions when no + * operations are sent for more than an hour. If a session is deleted, requests to it return + * `NOT_FOUND`. + * + *

Idle sessions can be kept alive by sending a trivial SQL query periodically, for example, + * `"SELECT 1"`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   CreateSessionRequest request =
+   *       CreateSessionRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setSession(Session.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = spannerClient.createSessionCallable().futureCall(request);
+   *   // Do something.
+   *   Session response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable createSessionCallable() { + return stub.createSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates multiple new sessions. + * + *

This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 + * for best practices on session cache management. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   int sessionCount = 185691686;
+   *   BatchCreateSessionsResponse response =
+   *       spannerClient.batchCreateSessions(database, sessionCount);
+   * }
+   * }
+ * + * @param database Required. The database in which the new sessions are created. + * @param sessionCount Required. The number of sessions to be created in this batch call. At least + * one session is created. The API can return fewer than the requested number of sessions. If + * a specific number of sessions are desired, the client can make additional calls to + * `BatchCreateSessions` (adjusting + * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] as necessary). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateSessionsResponse batchCreateSessions( + DatabaseName database, int sessionCount) { + BatchCreateSessionsRequest request = + BatchCreateSessionsRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .setSessionCount(sessionCount) + .build(); + return batchCreateSessions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates multiple new sessions. + * + *

This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 + * for best practices on session cache management. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   int sessionCount = 185691686;
+   *   BatchCreateSessionsResponse response =
+   *       spannerClient.batchCreateSessions(database, sessionCount);
+   * }
+   * }
+ * + * @param database Required. The database in which the new sessions are created. + * @param sessionCount Required. The number of sessions to be created in this batch call. At least + * one session is created. The API can return fewer than the requested number of sessions. If + * a specific number of sessions are desired, the client can make additional calls to + * `BatchCreateSessions` (adjusting + * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] as necessary). + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateSessionsResponse batchCreateSessions(String database, int sessionCount) { + BatchCreateSessionsRequest request = + BatchCreateSessionsRequest.newBuilder() + .setDatabase(database) + .setSessionCount(sessionCount) + .build(); + return batchCreateSessions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates multiple new sessions. + * + *

This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 + * for best practices on session cache management. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   BatchCreateSessionsRequest request =
+   *       BatchCreateSessionsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setSessionTemplate(Session.newBuilder().build())
+   *           .setSessionCount(185691686)
+   *           .build();
+   *   BatchCreateSessionsResponse response = spannerClient.batchCreateSessions(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCreateSessionsResponse batchCreateSessions(BatchCreateSessionsRequest request) { + return batchCreateSessionsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates multiple new sessions. + * + *

This API can be used to initialize a session cache on the clients. See https://goo.gl/TgSFN2 + * for best practices on session cache management. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   BatchCreateSessionsRequest request =
+   *       BatchCreateSessionsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setSessionTemplate(Session.newBuilder().build())
+   *           .setSessionCount(185691686)
+   *           .build();
+   *   ApiFuture future =
+   *       spannerClient.batchCreateSessionsCallable().futureCall(request);
+   *   // Do something.
+   *   BatchCreateSessionsResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + batchCreateSessionsCallable() { + return stub.batchCreateSessionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a session. Returns `NOT_FOUND` if the session doesn't exist. This is mainly useful for + * determining whether a session is still alive. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   Session response = spannerClient.getSession(name);
+   * }
+   * }
+ * + * @param name Required. The name of the session to retrieve. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session getSession(SessionName name) { + GetSessionRequest request = + GetSessionRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + return getSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a session. Returns `NOT_FOUND` if the session doesn't exist. This is mainly useful for + * determining whether a session is still alive. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   Session response = spannerClient.getSession(name);
+   * }
+   * }
+ * + * @param name Required. The name of the session to retrieve. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session getSession(String name) { + GetSessionRequest request = GetSessionRequest.newBuilder().setName(name).build(); + return getSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a session. Returns `NOT_FOUND` if the session doesn't exist. This is mainly useful for + * determining whether a session is still alive. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   GetSessionRequest request =
+   *       GetSessionRequest.newBuilder()
+   *           .setName(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .build();
+   *   Session response = spannerClient.getSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Session getSession(GetSessionRequest request) { + return getSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets a session. Returns `NOT_FOUND` if the session doesn't exist. This is mainly useful for + * determining whether a session is still alive. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   GetSessionRequest request =
+   *       GetSessionRequest.newBuilder()
+   *           .setName(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .build();
+   *   ApiFuture future = spannerClient.getSessionCallable().futureCall(request);
+   *   // Do something.
+   *   Session response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable getSessionCallable() { + return stub.getSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all sessions in a given database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   for (Session element : spannerClient.listSessions(database).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param database Required. The database in which to list sessions. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListSessionsPagedResponse listSessions(DatabaseName database) { + ListSessionsRequest request = + ListSessionsRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .build(); + return listSessions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all sessions in a given database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   for (Session element : spannerClient.listSessions(database).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param database Required. The database in which to list sessions. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListSessionsPagedResponse listSessions(String database) { + ListSessionsRequest request = ListSessionsRequest.newBuilder().setDatabase(database).build(); + return listSessions(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all sessions in a given database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ListSessionsRequest request =
+   *       ListSessionsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   for (Session element : spannerClient.listSessions(request).iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListSessionsPagedResponse listSessions(ListSessionsRequest request) { + return listSessionsPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all sessions in a given database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ListSessionsRequest request =
+   *       ListSessionsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   ApiFuture future = spannerClient.listSessionsPagedCallable().futureCall(request);
+   *   // Do something.
+   *   for (Session element : future.get().iterateAll()) {
+   *     // doThingsWith(element);
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable + listSessionsPagedCallable() { + return stub.listSessionsPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all sessions in a given database. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ListSessionsRequest request =
+   *       ListSessionsRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setPageSize(883849137)
+   *           .setPageToken("pageToken873572522")
+   *           .setFilter("filter-1274492040")
+   *           .build();
+   *   while (true) {
+   *     ListSessionsResponse response = spannerClient.listSessionsCallable().call(request);
+   *     for (Session element : response.getSessionsList()) {
+   *       // doThingsWith(element);
+   *     }
+   *     String nextPageToken = response.getNextPageToken();
+   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
+   *       request = request.toBuilder().setPageToken(nextPageToken).build();
+   *     } else {
+   *       break;
+   *     }
+   *   }
+   * }
+   * }
+ */ + public final UnaryCallable listSessionsCallable() { + return stub.listSessionsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Ends a session, releasing server resources associated with it. This asynchronously triggers the + * cancellation of any operations that are running with this session. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   spannerClient.deleteSession(name);
+   * }
+   * }
+ * + * @param name Required. The name of the session to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteSession(SessionName name) { + DeleteSessionRequest request = + DeleteSessionRequest.newBuilder().setName(name == null ? null : name.toString()).build(); + deleteSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Ends a session, releasing server resources associated with it. This asynchronously triggers the + * cancellation of any operations that are running with this session. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   spannerClient.deleteSession(name);
+   * }
+   * }
+ * + * @param name Required. The name of the session to delete. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteSession(String name) { + DeleteSessionRequest request = DeleteSessionRequest.newBuilder().setName(name).build(); + deleteSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Ends a session, releasing server resources associated with it. This asynchronously triggers the + * cancellation of any operations that are running with this session. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   DeleteSessionRequest request =
+   *       DeleteSessionRequest.newBuilder()
+   *           .setName(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .build();
+   *   spannerClient.deleteSession(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteSession(DeleteSessionRequest request) { + deleteSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Ends a session, releasing server resources associated with it. This asynchronously triggers the + * cancellation of any operations that are running with this session. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   DeleteSessionRequest request =
+   *       DeleteSessionRequest.newBuilder()
+   *           .setName(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .build();
+   *   ApiFuture future = spannerClient.deleteSessionCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable deleteSessionCallable() { + return stub.deleteSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Executes an SQL statement, returning all results in a single reply. This method can't be used + * to return a result set larger than 10 MiB; if the query yields more data than that, the query + * fails with a `FAILED_PRECONDITION` error. + * + *

Operations inside read-write transactions might return `ABORTED`. If this occurs, the + * application should restart the transaction from the beginning. See + * [Transaction][google.spanner.v1.Transaction] for more details. + * + *

Larger result sets can be fetched in streaming fashion by calling + * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + * + *

The query string can be SQL or [Graph Query Language + * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ExecuteSqlRequest request =
+   *       ExecuteSqlRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setSql("sql114126")
+   *           .setParams(Struct.newBuilder().build())
+   *           .putAllParamTypes(new HashMap())
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setSeqno(109325920)
+   *           .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setLastStatement(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ResultSet response = spannerClient.executeSql(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ResultSet executeSql(ExecuteSqlRequest request) { + return executeSqlCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Executes an SQL statement, returning all results in a single reply. This method can't be used + * to return a result set larger than 10 MiB; if the query yields more data than that, the query + * fails with a `FAILED_PRECONDITION` error. + * + *

Operations inside read-write transactions might return `ABORTED`. If this occurs, the + * application should restart the transaction from the beginning. See + * [Transaction][google.spanner.v1.Transaction] for more details. + * + *

Larger result sets can be fetched in streaming fashion by calling + * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + * + *

The query string can be SQL or [Graph Query Language + * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ExecuteSqlRequest request =
+   *       ExecuteSqlRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setSql("sql114126")
+   *           .setParams(Struct.newBuilder().build())
+   *           .putAllParamTypes(new HashMap())
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setSeqno(109325920)
+   *           .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setLastStatement(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = spannerClient.executeSqlCallable().futureCall(request);
+   *   // Do something.
+   *   ResultSet response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable executeSqlCallable() { + return stub.executeSqlCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result set as a + * stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on the + * size of the returned result set. However, no individual row in the result set can exceed 100 + * MiB, and no column value can exceed 10 MiB. + * + *

The query string can be SQL or [Graph Query Language + * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro). + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ExecuteSqlRequest request =
+   *       ExecuteSqlRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setSql("sql114126")
+   *           .setParams(Struct.newBuilder().build())
+   *           .putAllParamTypes(new HashMap())
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setSeqno(109325920)
+   *           .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setLastStatement(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ServerStream stream =
+   *       spannerClient.executeStreamingSqlCallable().call(request);
+   *   for (PartialResultSet response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable + executeStreamingSqlCallable() { + return stub.executeStreamingSqlCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Executes a batch of SQL DML statements. This method allows many statements to be run with lower + * latency than submitting them sequentially with + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + * + *

Statements are executed in sequential order. A request can succeed even if a statement + * fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + * field in the response provides information about the statement that failed. Clients must + * inspect this field to determine whether an error occurred. + * + *

Execution stops after the first failed statement; the remaining statements are not executed. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ExecuteBatchDmlRequest request =
+   *       ExecuteBatchDmlRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .addAllStatements(new ArrayList())
+   *           .setSeqno(109325920)
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setLastStatements(true)
+   *           .build();
+   *   ExecuteBatchDmlResponse response = spannerClient.executeBatchDml(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ExecuteBatchDmlResponse executeBatchDml(ExecuteBatchDmlRequest request) { + return executeBatchDmlCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Executes a batch of SQL DML statements. This method allows many statements to be run with lower + * latency than submitting them sequentially with + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + * + *

Statements are executed in sequential order. A request can succeed even if a statement + * fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + * field in the response provides information about the statement that failed. Clients must + * inspect this field to determine whether an error occurred. + * + *

Execution stops after the first failed statement; the remaining statements are not executed. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ExecuteBatchDmlRequest request =
+   *       ExecuteBatchDmlRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .addAllStatements(new ArrayList())
+   *           .setSeqno(109325920)
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setLastStatements(true)
+   *           .build();
+   *   ApiFuture future =
+   *       spannerClient.executeBatchDmlCallable().futureCall(request);
+   *   // Do something.
+   *   ExecuteBatchDmlResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable + executeBatchDmlCallable() { + return stub.executeBatchDmlCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads rows from the database using key lookups and scans, as a simple key/value style + * alternative to [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be used to + * return a result set larger than 10 MiB; if the read matches more data than that, the read fails + * with a `FAILED_PRECONDITION` error. + * + *

Reads inside read-write transactions might return `ABORTED`. If this occurs, the application + * should restart the transaction from the beginning. See + * [Transaction][google.spanner.v1.Transaction] for more details. + * + *

Larger result sets can be yielded in streaming fashion by calling + * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ReadRequest request =
+   *       ReadRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setTable("table110115790")
+   *           .setIndex("index100346066")
+   *           .addAllColumns(new ArrayList())
+   *           .setKeySet(KeySet.newBuilder().build())
+   *           .setLimit(102976443)
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ResultSet response = spannerClient.read(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ResultSet read(ReadRequest request) { + return readCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Reads rows from the database using key lookups and scans, as a simple key/value style + * alternative to [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be used to + * return a result set larger than 10 MiB; if the read matches more data than that, the read fails + * with a `FAILED_PRECONDITION` error. + * + *

Reads inside read-write transactions might return `ABORTED`. If this occurs, the application + * should restart the transaction from the beginning. See + * [Transaction][google.spanner.v1.Transaction] for more details. + * + *

Larger result sets can be yielded in streaming fashion by calling + * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ReadRequest request =
+   *       ReadRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setTable("table110115790")
+   *           .setIndex("index100346066")
+   *           .addAllColumns(new ArrayList())
+   *           .setKeySet(KeySet.newBuilder().build())
+   *           .setLimit(102976443)
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = spannerClient.readCallable().futureCall(request);
+   *   // Do something.
+   *   ResultSet response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable readCallable() { + return stub.readCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a stream. Unlike + * [Read][google.spanner.v1.Spanner.Read], there is no limit on the size of the returned result + * set. However, no individual row in the result set can exceed 100 MiB, and no column value can + * exceed 10 MiB. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   ReadRequest request =
+   *       ReadRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setTable("table110115790")
+   *           .setIndex("index100346066")
+   *           .addAllColumns(new ArrayList())
+   *           .setKeySet(KeySet.newBuilder().build())
+   *           .setLimit(102976443)
+   *           .setResumeToken(ByteString.EMPTY)
+   *           .setPartitionToken(ByteString.EMPTY)
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setDirectedReadOptions(DirectedReadOptions.newBuilder().build())
+   *           .setDataBoostEnabled(true)
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ServerStream stream = spannerClient.streamingReadCallable().call(request);
+   *   for (PartialResultSet response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable streamingReadCallable() { + return stub.streamingReadCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Begins a new transaction. This step can often be skipped: + * [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a side-effect. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   TransactionOptions options = TransactionOptions.newBuilder().build();
+   *   Transaction response = spannerClient.beginTransaction(session, options);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction runs. + * @param options Required. Options for the new transaction. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Transaction beginTransaction(SessionName session, TransactionOptions options) { + BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session == null ? null : session.toString()) + .setOptions(options) + .build(); + return beginTransaction(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Begins a new transaction. This step can often be skipped: + * [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a side-effect. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String session =
+   *       SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   TransactionOptions options = TransactionOptions.newBuilder().build();
+   *   Transaction response = spannerClient.beginTransaction(session, options);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction runs. + * @param options Required. Options for the new transaction. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Transaction beginTransaction(String session, TransactionOptions options) { + BeginTransactionRequest request = + BeginTransactionRequest.newBuilder().setSession(session).setOptions(options).build(); + return beginTransaction(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Begins a new transaction. This step can often be skipped: + * [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a side-effect. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   BeginTransactionRequest request =
+   *       BeginTransactionRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setOptions(TransactionOptions.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setMutationKey(Mutation.newBuilder().build())
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   Transaction response = spannerClient.beginTransaction(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final Transaction beginTransaction(BeginTransactionRequest request) { + return beginTransactionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Begins a new transaction. This step can often be skipped: + * [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a side-effect. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   BeginTransactionRequest request =
+   *       BeginTransactionRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setOptions(TransactionOptions.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setMutationKey(Mutation.newBuilder().build())
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = spannerClient.beginTransactionCallable().futureCall(request);
+   *   // Do something.
+   *   Transaction response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable beginTransactionCallable() { + return stub.beginTransactionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   ByteString transactionId = ByteString.EMPTY;
+   *   List mutations = new ArrayList<>();
+   *   CommitResponse response = spannerClient.commit(session, transactionId, mutations);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to be committed is running. + * @param transactionId Commit a previously-started transaction. + * @param mutations The mutations to be executed when this transaction commits. All mutations are + * applied atomically, in the order they appear in this list. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CommitResponse commit( + SessionName session, ByteString transactionId, List mutations) { + CommitRequest request = + CommitRequest.newBuilder() + .setSession(session == null ? null : session.toString()) + .setTransactionId(transactionId) + .addAllMutations(mutations) + .build(); + return commit(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build();
+   *   List mutations = new ArrayList<>();
+   *   CommitResponse response = spannerClient.commit(session, singleUseTransaction, mutations);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to be committed is running. + * @param singleUseTransaction Execute mutations in a temporary transaction. Note that unlike + * commit of a previously-started transaction, commit with a temporary transaction is + * non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once + * (for instance, due to retries in the application, or in the transport library), it's + * possible that the mutations are executed more than once. If this is undesirable, use + * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and + * [Commit][google.spanner.v1.Spanner.Commit] instead. + * @param mutations The mutations to be executed when this transaction commits. All mutations are + * applied atomically, in the order they appear in this list. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CommitResponse commit( + SessionName session, TransactionOptions singleUseTransaction, List mutations) { + CommitRequest request = + CommitRequest.newBuilder() + .setSession(session == null ? null : session.toString()) + .setSingleUseTransaction(singleUseTransaction) + .addAllMutations(mutations) + .build(); + return commit(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String session =
+   *       SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   ByteString transactionId = ByteString.EMPTY;
+   *   List mutations = new ArrayList<>();
+   *   CommitResponse response = spannerClient.commit(session, transactionId, mutations);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to be committed is running. + * @param transactionId Commit a previously-started transaction. + * @param mutations The mutations to be executed when this transaction commits. All mutations are + * applied atomically, in the order they appear in this list. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CommitResponse commit( + String session, ByteString transactionId, List mutations) { + CommitRequest request = + CommitRequest.newBuilder() + .setSession(session) + .setTransactionId(transactionId) + .addAllMutations(mutations) + .build(); + return commit(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String session =
+   *       SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build();
+   *   List mutations = new ArrayList<>();
+   *   CommitResponse response = spannerClient.commit(session, singleUseTransaction, mutations);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to be committed is running. + * @param singleUseTransaction Execute mutations in a temporary transaction. Note that unlike + * commit of a previously-started transaction, commit with a temporary transaction is + * non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once + * (for instance, due to retries in the application, or in the transport library), it's + * possible that the mutations are executed more than once. If this is undesirable, use + * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and + * [Commit][google.spanner.v1.Spanner.Commit] instead. + * @param mutations The mutations to be executed when this transaction commits. All mutations are + * applied atomically, in the order they appear in this list. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CommitResponse commit( + String session, TransactionOptions singleUseTransaction, List mutations) { + CommitRequest request = + CommitRequest.newBuilder() + .setSession(session) + .setSingleUseTransaction(singleUseTransaction) + .addAllMutations(mutations) + .build(); + return commit(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   CommitRequest request =
+   *       CommitRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .addAllMutations(new ArrayList())
+   *           .setReturnCommitStats(true)
+   *           .setMaxCommitDelay(Duration.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build())
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   CommitResponse response = spannerClient.commit(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final CommitResponse commit(CommitRequest request) { + return commitCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Commits a transaction. The request includes the mutations to be applied to rows in the + * database. + * + *

`Commit` might return an `ABORTED` error. This can occur at any time; commonly, the cause is + * conflicts with concurrent transactions. However, it can also happen for a variety of other + * reasons. If `Commit` returns `ABORTED`, the caller should retry the transaction from the + * beginning, reusing the same session. + * + *

On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, for example, if + * the client job experiences a 1+ hour networking failure. At that point, Cloud Spanner has lost + * track of the transaction outcome and we recommend that you perform another read from the + * database to see the state of things as they are now. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   CommitRequest request =
+   *       CommitRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .addAllMutations(new ArrayList())
+   *           .setReturnCommitStats(true)
+   *           .setMaxCommitDelay(Duration.newBuilder().build())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build())
+   *           .setRoutingHint(RoutingHint.newBuilder().build())
+   *           .build();
+   *   ApiFuture future = spannerClient.commitCallable().futureCall(request);
+   *   // Do something.
+   *   CommitResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable commitCallable() { + return stub.commitCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rolls back a transaction, releasing any locks it holds. It's a good idea to call this for any + * transaction that includes one or more [Read][google.spanner.v1.Spanner.Read] or + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately decides not to + * commit. + * + *

`Rollback` returns `OK` if it successfully aborts the transaction, the transaction was + * already aborted, or the transaction isn't found. `Rollback` never returns `ABORTED`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]");
+   *   ByteString transactionId = ByteString.EMPTY;
+   *   spannerClient.rollback(session, transactionId);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to roll back is running. + * @param transactionId Required. The transaction to roll back. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void rollback(SessionName session, ByteString transactionId) { + RollbackRequest request = + RollbackRequest.newBuilder() + .setSession(session == null ? null : session.toString()) + .setTransactionId(transactionId) + .build(); + rollback(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rolls back a transaction, releasing any locks it holds. It's a good idea to call this for any + * transaction that includes one or more [Read][google.spanner.v1.Spanner.Read] or + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately decides not to + * commit. + * + *

`Rollback` returns `OK` if it successfully aborts the transaction, the transaction was + * already aborted, or the transaction isn't found. `Rollback` never returns `ABORTED`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   String session =
+   *       SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString();
+   *   ByteString transactionId = ByteString.EMPTY;
+   *   spannerClient.rollback(session, transactionId);
+   * }
+   * }
+ * + * @param session Required. The session in which the transaction to roll back is running. + * @param transactionId Required. The transaction to roll back. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void rollback(String session, ByteString transactionId) { + RollbackRequest request = + RollbackRequest.newBuilder().setSession(session).setTransactionId(transactionId).build(); + rollback(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rolls back a transaction, releasing any locks it holds. It's a good idea to call this for any + * transaction that includes one or more [Read][google.spanner.v1.Spanner.Read] or + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately decides not to + * commit. + * + *

`Rollback` returns `OK` if it successfully aborts the transaction, the transaction was + * already aborted, or the transaction isn't found. `Rollback` never returns `ABORTED`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   RollbackRequest request =
+   *       RollbackRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransactionId(ByteString.EMPTY)
+   *           .build();
+   *   spannerClient.rollback(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void rollback(RollbackRequest request) { + rollbackCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Rolls back a transaction, releasing any locks it holds. It's a good idea to call this for any + * transaction that includes one or more [Read][google.spanner.v1.Spanner.Read] or + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately decides not to + * commit. + * + *

`Rollback` returns `OK` if it successfully aborts the transaction, the transaction was + * already aborted, or the transaction isn't found. `Rollback` never returns `ABORTED`. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   RollbackRequest request =
+   *       RollbackRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransactionId(ByteString.EMPTY)
+   *           .build();
+   *   ApiFuture future = spannerClient.rollbackCallable().futureCall(request);
+   *   // Do something.
+   *   future.get();
+   * }
+   * }
+ */ + public final UnaryCallable rollbackCallable() { + return stub.rollbackCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a set of partition tokens that can be used to execute a query operation in parallel. + * Each of the returned partition tokens can be used by + * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset of the + * query result to read. The same session and read-only transaction must be used by the + * `PartitionQueryRequest` used to create the partition tokens and the `ExecuteSqlRequests` that + * use the partition tokens. + * + *

Partition tokens become invalid when the session used to create them is deleted, is idle for + * too long, begins a new transaction, or becomes too old. When any of these happen, it isn't + * possible to resume the query, and the whole operation must be restarted from the beginning. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   PartitionQueryRequest request =
+   *       PartitionQueryRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setSql("sql114126")
+   *           .setParams(Struct.newBuilder().build())
+   *           .putAllParamTypes(new HashMap())
+   *           .setPartitionOptions(PartitionOptions.newBuilder().build())
+   *           .build();
+   *   PartitionResponse response = spannerClient.partitionQuery(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final PartitionResponse partitionQuery(PartitionQueryRequest request) { + return partitionQueryCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a set of partition tokens that can be used to execute a query operation in parallel. + * Each of the returned partition tokens can be used by + * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset of the + * query result to read. The same session and read-only transaction must be used by the + * `PartitionQueryRequest` used to create the partition tokens and the `ExecuteSqlRequests` that + * use the partition tokens. + * + *

Partition tokens become invalid when the session used to create them is deleted, is idle for + * too long, begins a new transaction, or becomes too old. When any of these happen, it isn't + * possible to resume the query, and the whole operation must be restarted from the beginning. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   PartitionQueryRequest request =
+   *       PartitionQueryRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setSql("sql114126")
+   *           .setParams(Struct.newBuilder().build())
+   *           .putAllParamTypes(new HashMap())
+   *           .setPartitionOptions(PartitionOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       spannerClient.partitionQueryCallable().futureCall(request);
+   *   // Do something.
+   *   PartitionResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable partitionQueryCallable() { + return stub.partitionQueryCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a set of partition tokens that can be used to execute a read operation in parallel. + * Each of the returned partition tokens can be used by + * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read result + * to read. The same session and read-only transaction must be used by the `PartitionReadRequest` + * used to create the partition tokens and the `ReadRequests` that use the partition tokens. There + * are no ordering guarantees on rows returned among the returned partition tokens, or even within + * each individual `StreamingRead` call issued with a `partition_token`. + * + *

Partition tokens become invalid when the session used to create them is deleted, is idle for + * too long, begins a new transaction, or becomes too old. When any of these happen, it isn't + * possible to resume the read, and the whole operation must be restarted from the beginning. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   PartitionReadRequest request =
+   *       PartitionReadRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setTable("table110115790")
+   *           .setIndex("index100346066")
+   *           .addAllColumns(new ArrayList())
+   *           .setKeySet(KeySet.newBuilder().build())
+   *           .setPartitionOptions(PartitionOptions.newBuilder().build())
+   *           .build();
+   *   PartitionResponse response = spannerClient.partitionRead(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final PartitionResponse partitionRead(PartitionReadRequest request) { + return partitionReadCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a set of partition tokens that can be used to execute a read operation in parallel. + * Each of the returned partition tokens can be used by + * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read result + * to read. The same session and read-only transaction must be used by the `PartitionReadRequest` + * used to create the partition tokens and the `ReadRequests` that use the partition tokens. There + * are no ordering guarantees on rows returned among the returned partition tokens, or even within + * each individual `StreamingRead` call issued with a `partition_token`. + * + *

Partition tokens become invalid when the session used to create them is deleted, is idle for + * too long, begins a new transaction, or becomes too old. When any of these happen, it isn't + * possible to resume the read, and the whole operation must be restarted from the beginning. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   PartitionReadRequest request =
+   *       PartitionReadRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setTransaction(TransactionSelector.newBuilder().build())
+   *           .setTable("table110115790")
+   *           .setIndex("index100346066")
+   *           .addAllColumns(new ArrayList())
+   *           .setKeySet(KeySet.newBuilder().build())
+   *           .setPartitionOptions(PartitionOptions.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       spannerClient.partitionReadCallable().futureCall(request);
+   *   // Do something.
+   *   PartitionResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable partitionReadCallable() { + return stub.partitionReadCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Batches the supplied mutation groups in a collection of efficient transactions. All mutations + * in a group are committed atomically. However, mutations across groups can be committed + * non-atomically in an unspecified order and thus, they must be independent of each other. + * Partial failure is possible, that is, some groups might have been committed successfully, while + * some might have failed. The results of individual batches are streamed into the response as the + * batches are applied. + * + *

`BatchWrite` requests are not replay protected, meaning that each mutation group can be + * applied more than once. Replays of non-idempotent mutations can have undesirable effects. For + * example, replays of an insert mutation can produce an already exists error or if you use + * generated or commit timestamp-based keys, it can result in additional rows being added to the + * mutation's table. We recommend structuring your mutation groups to be idempotent to avoid this + * issue. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (SpannerClient spannerClient = SpannerClient.create()) {
+   *   BatchWriteRequest request =
+   *       BatchWriteRequest.newBuilder()
+   *           .setSession(
+   *               SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString())
+   *           .setRequestOptions(RequestOptions.newBuilder().build())
+   *           .addAllMutationGroups(new ArrayList())
+   *           .setExcludeTxnFromChangeStreams(true)
+   *           .build();
+   *   ServerStream stream = spannerClient.batchWriteCallable().call(request);
+   *   for (BatchWriteResponse response : stream) {
+   *     // Do something when a response is received.
+   *   }
+   * }
+   * }
+ */ + public final ServerStreamingCallable batchWriteCallable() { + return stub.batchWriteCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListSessionsPagedResponse + extends AbstractPagedListResponse< + ListSessionsRequest, + ListSessionsResponse, + Session, + ListSessionsPage, + ListSessionsFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListSessionsPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListSessionsPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListSessionsPagedResponse(ListSessionsPage page) { + super(page, ListSessionsFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListSessionsPage + extends AbstractPage { + + private ListSessionsPage( + PageContext context, + ListSessionsResponse response) { + super(context, response); + } + + private static ListSessionsPage createEmptyPage() { + return new ListSessionsPage(null, null); + } + + @Override + protected ListSessionsPage createPage( + PageContext context, + ListSessionsResponse response) { + return new ListSessionsPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListSessionsFixedSizeCollection + extends AbstractFixedSizeCollection< + ListSessionsRequest, + ListSessionsResponse, + Session, + ListSessionsPage, + ListSessionsFixedSizeCollection> { + + private ListSessionsFixedSizeCollection(List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListSessionsFixedSizeCollection createEmptyCollection() { + return new ListSessionsFixedSizeCollection(null, 0); + } + + @Override + protected ListSessionsFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListSessionsFixedSizeCollection(pages, collectionSize); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java new file mode 100644 index 000000000000..49b7de0cd9f4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java @@ -0,0 +1,406 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link SpannerClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerSettings.Builder spannerSettingsBuilder = SpannerSettings.newBuilder();
+ * spannerSettingsBuilder
+ *     .createSessionSettings()
+ *     .setRetrySettings(
+ *         spannerSettingsBuilder
+ *             .createSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * SpannerSettings spannerSettings = spannerSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +public class SpannerSettings extends ClientSettings { + + /** Returns the object with the settings used for calls to createSession. */ + public UnaryCallSettings createSessionSettings() { + return ((SpannerStubSettings) getStubSettings()).createSessionSettings(); + } + + /** Returns the object with the settings used for calls to batchCreateSessions. */ + public UnaryCallSettings + batchCreateSessionsSettings() { + return ((SpannerStubSettings) getStubSettings()).batchCreateSessionsSettings(); + } + + /** Returns the object with the settings used for calls to getSession. */ + public UnaryCallSettings getSessionSettings() { + return ((SpannerStubSettings) getStubSettings()).getSessionSettings(); + } + + /** Returns the object with the settings used for calls to listSessions. */ + public PagedCallSettings + listSessionsSettings() { + return ((SpannerStubSettings) getStubSettings()).listSessionsSettings(); + } + + /** Returns the object with the settings used for calls to deleteSession. */ + public UnaryCallSettings deleteSessionSettings() { + return ((SpannerStubSettings) getStubSettings()).deleteSessionSettings(); + } + + /** Returns the object with the settings used for calls to executeSql. */ + public UnaryCallSettings executeSqlSettings() { + return ((SpannerStubSettings) getStubSettings()).executeSqlSettings(); + } + + /** Returns the object with the settings used for calls to executeStreamingSql. */ + public ServerStreamingCallSettings + executeStreamingSqlSettings() { + return ((SpannerStubSettings) getStubSettings()).executeStreamingSqlSettings(); + } + + /** Returns the object with the settings used for calls to executeBatchDml. */ + public UnaryCallSettings + executeBatchDmlSettings() { + return ((SpannerStubSettings) getStubSettings()).executeBatchDmlSettings(); + } + + /** Returns the object with the settings used for calls to read. */ + public UnaryCallSettings readSettings() { + return ((SpannerStubSettings) getStubSettings()).readSettings(); + } + + /** Returns the object with the settings used for calls to streamingRead. */ + public ServerStreamingCallSettings streamingReadSettings() { + return ((SpannerStubSettings) getStubSettings()).streamingReadSettings(); + } + + /** Returns the object with the settings used for calls to beginTransaction. */ + public UnaryCallSettings beginTransactionSettings() { + return ((SpannerStubSettings) getStubSettings()).beginTransactionSettings(); + } + + /** Returns the object with the settings used for calls to commit. */ + public UnaryCallSettings commitSettings() { + return ((SpannerStubSettings) getStubSettings()).commitSettings(); + } + + /** Returns the object with the settings used for calls to rollback. */ + public UnaryCallSettings rollbackSettings() { + return ((SpannerStubSettings) getStubSettings()).rollbackSettings(); + } + + /** Returns the object with the settings used for calls to partitionQuery. */ + public UnaryCallSettings partitionQuerySettings() { + return ((SpannerStubSettings) getStubSettings()).partitionQuerySettings(); + } + + /** Returns the object with the settings used for calls to partitionRead. */ + public UnaryCallSettings partitionReadSettings() { + return ((SpannerStubSettings) getStubSettings()).partitionReadSettings(); + } + + /** Returns the object with the settings used for calls to batchWrite. */ + public ServerStreamingCallSettings batchWriteSettings() { + return ((SpannerStubSettings) getStubSettings()).batchWriteSettings(); + } + + public static final SpannerSettings create(SpannerStubSettings stub) throws IOException { + return new SpannerSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return SpannerStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return SpannerStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return SpannerStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return SpannerStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return SpannerStubSettings.defaultGrpcTransportProviderBuilder(); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return SpannerStubSettings.defaultHttpJsonTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return SpannerStubSettings.defaultTransportChannelProvider(); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return SpannerStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected SpannerSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for SpannerSettings. */ + public static class Builder extends ClientSettings.Builder { + + protected Builder() throws IOException { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(SpannerStubSettings.newBuilder(clientContext)); + } + + protected Builder(SpannerSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(SpannerStubSettings.Builder stubSettings) { + super(stubSettings); + } + + private static Builder createDefault() { + return new Builder(SpannerStubSettings.newBuilder()); + } + + private static Builder createHttpJsonDefault() { + return new Builder(SpannerStubSettings.newHttpJsonBuilder()); + } + + public SpannerStubSettings.Builder getStubSettingsBuilder() { + return ((SpannerStubSettings.Builder) getStubSettings()); + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createSession. */ + public UnaryCallSettings.Builder createSessionSettings() { + return getStubSettingsBuilder().createSessionSettings(); + } + + /** Returns the builder for the settings used for calls to batchCreateSessions. */ + public UnaryCallSettings.Builder + batchCreateSessionsSettings() { + return getStubSettingsBuilder().batchCreateSessionsSettings(); + } + + /** Returns the builder for the settings used for calls to getSession. */ + public UnaryCallSettings.Builder getSessionSettings() { + return getStubSettingsBuilder().getSessionSettings(); + } + + /** Returns the builder for the settings used for calls to listSessions. */ + public PagedCallSettings.Builder< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse> + listSessionsSettings() { + return getStubSettingsBuilder().listSessionsSettings(); + } + + /** Returns the builder for the settings used for calls to deleteSession. */ + public UnaryCallSettings.Builder deleteSessionSettings() { + return getStubSettingsBuilder().deleteSessionSettings(); + } + + /** Returns the builder for the settings used for calls to executeSql. */ + public UnaryCallSettings.Builder executeSqlSettings() { + return getStubSettingsBuilder().executeSqlSettings(); + } + + /** Returns the builder for the settings used for calls to executeStreamingSql. */ + public ServerStreamingCallSettings.Builder + executeStreamingSqlSettings() { + return getStubSettingsBuilder().executeStreamingSqlSettings(); + } + + /** Returns the builder for the settings used for calls to executeBatchDml. */ + public UnaryCallSettings.Builder + executeBatchDmlSettings() { + return getStubSettingsBuilder().executeBatchDmlSettings(); + } + + /** Returns the builder for the settings used for calls to read. */ + public UnaryCallSettings.Builder readSettings() { + return getStubSettingsBuilder().readSettings(); + } + + /** Returns the builder for the settings used for calls to streamingRead. */ + public ServerStreamingCallSettings.Builder + streamingReadSettings() { + return getStubSettingsBuilder().streamingReadSettings(); + } + + /** Returns the builder for the settings used for calls to beginTransaction. */ + public UnaryCallSettings.Builder + beginTransactionSettings() { + return getStubSettingsBuilder().beginTransactionSettings(); + } + + /** Returns the builder for the settings used for calls to commit. */ + public UnaryCallSettings.Builder commitSettings() { + return getStubSettingsBuilder().commitSettings(); + } + + /** Returns the builder for the settings used for calls to rollback. */ + public UnaryCallSettings.Builder rollbackSettings() { + return getStubSettingsBuilder().rollbackSettings(); + } + + /** Returns the builder for the settings used for calls to partitionQuery. */ + public UnaryCallSettings.Builder + partitionQuerySettings() { + return getStubSettingsBuilder().partitionQuerySettings(); + } + + /** Returns the builder for the settings used for calls to partitionRead. */ + public UnaryCallSettings.Builder + partitionReadSettings() { + return getStubSettingsBuilder().partitionReadSettings(); + } + + /** Returns the builder for the settings used for calls to batchWrite. */ + public ServerStreamingCallSettings.Builder + batchWriteSettings() { + return getStubSettingsBuilder().batchWriteSettings(); + } + + @Override + public SpannerSettings build() throws IOException { + return new SpannerSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/gapic_metadata.json b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/gapic_metadata.json new file mode 100644 index 000000000000..a44723b07309 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/gapic_metadata.json @@ -0,0 +1,66 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "java", + "protoPackage": "google.spanner.v1", + "libraryPackage": "com.google.cloud.spanner.v1", + "services": { + "Spanner": { + "clients": { + "grpc": { + "libraryClient": "SpannerClient", + "rpcs": { + "BatchCreateSessions": { + "methods": ["batchCreateSessions", "batchCreateSessions", "batchCreateSessions", "batchCreateSessionsCallable"] + }, + "BatchWrite": { + "methods": ["batchWriteCallable"] + }, + "BeginTransaction": { + "methods": ["beginTransaction", "beginTransaction", "beginTransaction", "beginTransactionCallable"] + }, + "Commit": { + "methods": ["commit", "commit", "commit", "commit", "commit", "commitCallable"] + }, + "CreateSession": { + "methods": ["createSession", "createSession", "createSession", "createSessionCallable"] + }, + "DeleteSession": { + "methods": ["deleteSession", "deleteSession", "deleteSession", "deleteSessionCallable"] + }, + "ExecuteBatchDml": { + "methods": ["executeBatchDml", "executeBatchDmlCallable"] + }, + "ExecuteSql": { + "methods": ["executeSql", "executeSqlCallable"] + }, + "ExecuteStreamingSql": { + "methods": ["executeStreamingSqlCallable"] + }, + "GetSession": { + "methods": ["getSession", "getSession", "getSession", "getSessionCallable"] + }, + "ListSessions": { + "methods": ["listSessions", "listSessions", "listSessions", "listSessionsPagedCallable", "listSessionsCallable"] + }, + "PartitionQuery": { + "methods": ["partitionQuery", "partitionQueryCallable"] + }, + "PartitionRead": { + "methods": ["partitionRead", "partitionReadCallable"] + }, + "Read": { + "methods": ["read", "readCallable"] + }, + "Rollback": { + "methods": ["rollback", "rollback", "rollback", "rollbackCallable"] + }, + "StreamingRead": { + "methods": ["streamingReadCallable"] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/package-info.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/package-info.java new file mode 100644 index 000000000000..5379035e913a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/package-info.java @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Cloud Spanner API + * + *

The interfaces provided are listed below, along with usage samples. + * + *

======================= SpannerClient ======================= + * + *

Service Description: Cloud Spanner API + * + *

The Cloud Spanner API can be used to manage sessions and execute transactions on data stored + * in Cloud Spanner databases. + * + *

Sample for SpannerClient: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (SpannerClient spannerClient = SpannerClient.create()) {
+ *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+ *   Session response = spannerClient.createSession(database);
+ * }
+ * }
+ */ +@Generated("by gapic-generator-java") +package com.google.cloud.spanner.v1; + +import javax.annotation.Generated; diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerCallableFactory.java new file mode 100644 index 000000000000..afb24becec43 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerCallableFactory.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC callable factory implementation for the Spanner service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class GrpcSpannerCallableFactory implements GrpcStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, callSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerStub.java new file mode 100644 index 000000000000..ab3ac0932d63 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/GrpcSpannerStub.java @@ -0,0 +1,658 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.stub.GrpcOperationsStub; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * gRPC stub implementation for the Spanner service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class GrpcSpannerStub extends SpannerStub { + private static final MethodDescriptor + createSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/CreateSession") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Session.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchCreateSessionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/BatchCreateSessions") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCreateSessionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCreateSessionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor getSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/GetSession") + .setRequestMarshaller(ProtoUtils.marshaller(GetSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Session.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + listSessionsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/ListSessions") + .setRequestMarshaller(ProtoUtils.marshaller(ListSessionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListSessionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor deleteSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/DeleteSession") + .setRequestMarshaller(ProtoUtils.marshaller(DeleteSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor executeSqlMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/ExecuteSql") + .setRequestMarshaller(ProtoUtils.marshaller(ExecuteSqlRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ResultSet.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + executeStreamingSqlMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.spanner.v1.Spanner/ExecuteStreamingSql") + .setRequestMarshaller(ProtoUtils.marshaller(ExecuteSqlRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(PartialResultSet.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + executeBatchDmlMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/ExecuteBatchDml") + .setRequestMarshaller( + ProtoUtils.marshaller(ExecuteBatchDmlRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ExecuteBatchDmlResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor readMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/Read") + .setRequestMarshaller(ProtoUtils.marshaller(ReadRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ResultSet.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + streamingReadMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.spanner.v1.Spanner/StreamingRead") + .setRequestMarshaller(ProtoUtils.marshaller(ReadRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(PartialResultSet.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + beginTransactionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/BeginTransaction") + .setRequestMarshaller( + ProtoUtils.marshaller(BeginTransactionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Transaction.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor commitMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/Commit") + .setRequestMarshaller(ProtoUtils.marshaller(CommitRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(CommitResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor rollbackMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/Rollback") + .setRequestMarshaller(ProtoUtils.marshaller(RollbackRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + partitionQueryMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/PartitionQuery") + .setRequestMarshaller( + ProtoUtils.marshaller(PartitionQueryRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(PartitionResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + partitionReadMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.v1.Spanner/PartitionRead") + .setRequestMarshaller( + ProtoUtils.marshaller(PartitionReadRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(PartitionResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private static final MethodDescriptor + batchWriteMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.spanner.v1.Spanner/BatchWrite") + .setRequestMarshaller(ProtoUtils.marshaller(BatchWriteRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BatchWriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) + .build(); + + private final UnaryCallable createSessionCallable; + private final UnaryCallable + batchCreateSessionsCallable; + private final UnaryCallable getSessionCallable; + private final UnaryCallable listSessionsCallable; + private final UnaryCallable + listSessionsPagedCallable; + private final UnaryCallable deleteSessionCallable; + private final UnaryCallable executeSqlCallable; + private final ServerStreamingCallable + executeStreamingSqlCallable; + private final UnaryCallable + executeBatchDmlCallable; + private final UnaryCallable readCallable; + private final ServerStreamingCallable streamingReadCallable; + private final UnaryCallable beginTransactionCallable; + private final UnaryCallable commitCallable; + private final UnaryCallable rollbackCallable; + private final UnaryCallable partitionQueryCallable; + private final UnaryCallable partitionReadCallable; + private final ServerStreamingCallable batchWriteCallable; + + private final BackgroundResource backgroundResources; + private final GrpcOperationsStub operationsStub; + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcSpannerStub create(SpannerStubSettings settings) throws IOException { + return new GrpcSpannerStub(settings, ClientContext.create(settings)); + } + + public static final GrpcSpannerStub create(ClientContext clientContext) throws IOException { + return new GrpcSpannerStub(SpannerStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcSpannerStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcSpannerStub( + SpannerStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcSpannerStub, using the given settings. This is protected so that + * it is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected GrpcSpannerStub(SpannerStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcSpannerCallableFactory()); + } + + /** + * Constructs an instance of GrpcSpannerStub, using the given settings. This is protected so that + * it is easy to make a subclass, but otherwise, the static factory methods should be preferred. + */ + protected GrpcSpannerStub( + SpannerStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); + + GrpcCallSettings createSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings + batchCreateSessionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(batchCreateSessionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings getSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings listSessionsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listSessionsMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + GrpcCallSettings deleteSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteSessionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + GrpcCallSettings executeSqlTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(executeSqlMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings executeStreamingSqlTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(executeStreamingSqlMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings + executeBatchDmlTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(executeBatchDmlMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings readTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings streamingReadTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(streamingReadMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings beginTransactionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(beginTransactionMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings commitTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(commitMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings rollbackTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(rollbackMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings partitionQueryTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(partitionQueryMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings partitionReadTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(partitionReadMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + GrpcCallSettings batchWriteTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(batchWriteMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + + this.createSessionCallable = + callableFactory.createUnaryCallable( + createSessionTransportSettings, settings.createSessionSettings(), clientContext); + this.batchCreateSessionsCallable = + callableFactory.createUnaryCallable( + batchCreateSessionsTransportSettings, + settings.batchCreateSessionsSettings(), + clientContext); + this.getSessionCallable = + callableFactory.createUnaryCallable( + getSessionTransportSettings, settings.getSessionSettings(), clientContext); + this.listSessionsCallable = + callableFactory.createUnaryCallable( + listSessionsTransportSettings, settings.listSessionsSettings(), clientContext); + this.listSessionsPagedCallable = + callableFactory.createPagedCallable( + listSessionsTransportSettings, settings.listSessionsSettings(), clientContext); + this.deleteSessionCallable = + callableFactory.createUnaryCallable( + deleteSessionTransportSettings, settings.deleteSessionSettings(), clientContext); + this.executeSqlCallable = + callableFactory.createUnaryCallable( + executeSqlTransportSettings, settings.executeSqlSettings(), clientContext); + this.executeStreamingSqlCallable = + callableFactory.createServerStreamingCallable( + executeStreamingSqlTransportSettings, + settings.executeStreamingSqlSettings(), + clientContext); + this.executeBatchDmlCallable = + callableFactory.createUnaryCallable( + executeBatchDmlTransportSettings, settings.executeBatchDmlSettings(), clientContext); + this.readCallable = + callableFactory.createUnaryCallable( + readTransportSettings, settings.readSettings(), clientContext); + this.streamingReadCallable = + callableFactory.createServerStreamingCallable( + streamingReadTransportSettings, settings.streamingReadSettings(), clientContext); + this.beginTransactionCallable = + callableFactory.createUnaryCallable( + beginTransactionTransportSettings, settings.beginTransactionSettings(), clientContext); + this.commitCallable = + callableFactory.createUnaryCallable( + commitTransportSettings, settings.commitSettings(), clientContext); + this.rollbackCallable = + callableFactory.createUnaryCallable( + rollbackTransportSettings, settings.rollbackSettings(), clientContext); + this.partitionQueryCallable = + callableFactory.createUnaryCallable( + partitionQueryTransportSettings, settings.partitionQuerySettings(), clientContext); + this.partitionReadCallable = + callableFactory.createUnaryCallable( + partitionReadTransportSettings, settings.partitionReadSettings(), clientContext); + this.batchWriteCallable = + callableFactory.createServerStreamingCallable( + batchWriteTransportSettings, settings.batchWriteSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public GrpcOperationsStub getOperationsStub() { + return operationsStub; + } + + @Override + public UnaryCallable createSessionCallable() { + return createSessionCallable; + } + + @Override + public UnaryCallable + batchCreateSessionsCallable() { + return batchCreateSessionsCallable; + } + + @Override + public UnaryCallable getSessionCallable() { + return getSessionCallable; + } + + @Override + public UnaryCallable listSessionsCallable() { + return listSessionsCallable; + } + + @Override + public UnaryCallable listSessionsPagedCallable() { + return listSessionsPagedCallable; + } + + @Override + public UnaryCallable deleteSessionCallable() { + return deleteSessionCallable; + } + + @Override + public UnaryCallable executeSqlCallable() { + return executeSqlCallable; + } + + @Override + public ServerStreamingCallable + executeStreamingSqlCallable() { + return executeStreamingSqlCallable; + } + + @Override + public UnaryCallable executeBatchDmlCallable() { + return executeBatchDmlCallable; + } + + @Override + public UnaryCallable readCallable() { + return readCallable; + } + + @Override + public ServerStreamingCallable streamingReadCallable() { + return streamingReadCallable; + } + + @Override + public UnaryCallable beginTransactionCallable() { + return beginTransactionCallable; + } + + @Override + public UnaryCallable commitCallable() { + return commitCallable; + } + + @Override + public UnaryCallable rollbackCallable() { + return rollbackCallable; + } + + @Override + public UnaryCallable partitionQueryCallable() { + return partitionQueryCallable; + } + + @Override + public UnaryCallable partitionReadCallable() { + return partitionReadCallable; + } + + @Override + public ServerStreamingCallable batchWriteCallable() { + return batchWriteCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerCallableFactory.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerCallableFactory.java new file mode 100644 index 000000000000..df6025750396 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerCallableFactory.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonCallableFactory; +import com.google.api.gax.httpjson.HttpJsonOperationSnapshotCallable; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.longrunning.stub.OperationsStub; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.Operation; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST callable factory implementation for the Spanner service API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator-java") +public class HttpJsonSpannerCallableFactory + implements HttpJsonStubCallableFactory { + + @Override + public UnaryCallable createUnaryCallable( + HttpJsonCallSettings httpJsonCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createUnaryCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + HttpJsonCallSettings httpJsonCallSettings, + PagedCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createPagedCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + HttpJsonCallSettings httpJsonCallSettings, + BatchingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createBatchingCallable( + httpJsonCallSettings, callSettings, clientContext); + } + + @Override + public + OperationCallable createOperationCallable( + HttpJsonCallSettings httpJsonCallSettings, + OperationCallSettings callSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + UnaryCallable innerCallable = + HttpJsonCallableFactory.createBaseUnaryCallable( + httpJsonCallSettings, callSettings.getInitialCallSettings(), clientContext); + HttpJsonOperationSnapshotCallable initialCallable = + new HttpJsonOperationSnapshotCallable( + innerCallable, + httpJsonCallSettings.getMethodDescriptor().getOperationSnapshotFactory()); + return HttpJsonCallableFactory.createOperationCallable( + callSettings, clientContext, operationsStub.longRunningClient(), initialCallable); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + HttpJsonCallSettings httpJsonCallSettings, + ServerStreamingCallSettings callSettings, + ClientContext clientContext) { + return HttpJsonCallableFactory.createServerStreamingCallable( + httpJsonCallSettings, callSettings, clientContext); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerStub.java new file mode 100644 index 000000000000..e6ac3d5e840c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/HttpJsonSpannerStub.java @@ -0,0 +1,1119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.core.InternalApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.httpjson.ApiMethodDescriptor; +import com.google.api.gax.httpjson.HttpJsonCallSettings; +import com.google.api.gax.httpjson.HttpJsonStubCallableFactory; +import com.google.api.gax.httpjson.ProtoMessageRequestFormatter; +import com.google.api.gax.httpjson.ProtoMessageResponseParser; +import com.google.api.gax.httpjson.ProtoRestSerializer; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsBuilder; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.protobuf.Empty; +import com.google.protobuf.TypeRegistry; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * REST stub implementation for the Spanner service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public class HttpJsonSpannerStub extends SpannerStub { + private static final TypeRegistry typeRegistry = TypeRegistry.newBuilder().build(); + + private static final ApiMethodDescriptor + createSessionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/CreateSession") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}/sessions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearDatabase().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Session.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + batchCreateSessionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/BatchCreateSessions") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearDatabase().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BatchCreateSessionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor getSessionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/GetSession") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/sessions/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Session.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listSessionsMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/ListSessions") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{database=projects/*/instances/*/databases/*}/sessions", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "database", request.getDatabase()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "filter", request.getFilter()); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListSessionsResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteSessionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/DeleteSession") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/sessions/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + executeSqlMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/ExecuteSql") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ResultSet.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + executeStreamingSqlMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/ExecuteStreamingSql") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.SERVER_STREAMING) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(PartialResultSet.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + executeBatchDmlMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/ExecuteBatchDml") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ExecuteBatchDmlResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor readMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/Read") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ResultSet.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + streamingReadMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/StreamingRead") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.SERVER_STREAMING) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(PartialResultSet.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + beginTransactionMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/BeginTransaction") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Transaction.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor commitMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/Commit") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(CommitResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor rollbackMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/Rollback") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + partitionQueryMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/PartitionQuery") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(PartitionResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + partitionReadMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/PartitionRead") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(PartitionResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + batchWriteMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.v1.Spanner/BatchWrite") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.SERVER_STREAMING) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "session", request.getSession()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearSession().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BatchWriteResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private final UnaryCallable createSessionCallable; + private final UnaryCallable + batchCreateSessionsCallable; + private final UnaryCallable getSessionCallable; + private final UnaryCallable listSessionsCallable; + private final UnaryCallable + listSessionsPagedCallable; + private final UnaryCallable deleteSessionCallable; + private final UnaryCallable executeSqlCallable; + private final ServerStreamingCallable + executeStreamingSqlCallable; + private final UnaryCallable + executeBatchDmlCallable; + private final UnaryCallable readCallable; + private final ServerStreamingCallable streamingReadCallable; + private final UnaryCallable beginTransactionCallable; + private final UnaryCallable commitCallable; + private final UnaryCallable rollbackCallable; + private final UnaryCallable partitionQueryCallable; + private final UnaryCallable partitionReadCallable; + private final ServerStreamingCallable batchWriteCallable; + + private final BackgroundResource backgroundResources; + private final HttpJsonStubCallableFactory callableFactory; + + public static final HttpJsonSpannerStub create(SpannerStubSettings settings) throws IOException { + return new HttpJsonSpannerStub(settings, ClientContext.create(settings)); + } + + public static final HttpJsonSpannerStub create(ClientContext clientContext) throws IOException { + return new HttpJsonSpannerStub(SpannerStubSettings.newHttpJsonBuilder().build(), clientContext); + } + + public static final HttpJsonSpannerStub create( + ClientContext clientContext, HttpJsonStubCallableFactory callableFactory) throws IOException { + return new HttpJsonSpannerStub( + SpannerStubSettings.newHttpJsonBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of HttpJsonSpannerStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected HttpJsonSpannerStub(SpannerStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new HttpJsonSpannerCallableFactory()); + } + + /** + * Constructs an instance of HttpJsonSpannerStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected HttpJsonSpannerStub( + SpannerStubSettings settings, + ClientContext clientContext, + HttpJsonStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + HttpJsonCallSettings createSessionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createSessionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings + batchCreateSessionsTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(batchCreateSessionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings getSessionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getSessionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings listSessionsTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(listSessionsMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("database", String.valueOf(request.getDatabase())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getDatabase()) + .build(); + HttpJsonCallSettings deleteSessionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteSessionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getName()) + .build(); + HttpJsonCallSettings executeSqlTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(executeSqlMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings executeStreamingSqlTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(executeStreamingSqlMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings + executeBatchDmlTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(executeBatchDmlMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings readTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(readMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings streamingReadTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(streamingReadMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings beginTransactionTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(beginTransactionMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings commitTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(commitMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings rollbackTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(rollbackMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings partitionQueryTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(partitionQueryMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings partitionReadTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(partitionReadMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + HttpJsonCallSettings batchWriteTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(batchWriteMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("session", String.valueOf(request.getSession())); + return builder.build(); + }) + .setResourceNameExtractor(request -> request.getSession()) + .build(); + + this.createSessionCallable = + callableFactory.createUnaryCallable( + createSessionTransportSettings, settings.createSessionSettings(), clientContext); + this.batchCreateSessionsCallable = + callableFactory.createUnaryCallable( + batchCreateSessionsTransportSettings, + settings.batchCreateSessionsSettings(), + clientContext); + this.getSessionCallable = + callableFactory.createUnaryCallable( + getSessionTransportSettings, settings.getSessionSettings(), clientContext); + this.listSessionsCallable = + callableFactory.createUnaryCallable( + listSessionsTransportSettings, settings.listSessionsSettings(), clientContext); + this.listSessionsPagedCallable = + callableFactory.createPagedCallable( + listSessionsTransportSettings, settings.listSessionsSettings(), clientContext); + this.deleteSessionCallable = + callableFactory.createUnaryCallable( + deleteSessionTransportSettings, settings.deleteSessionSettings(), clientContext); + this.executeSqlCallable = + callableFactory.createUnaryCallable( + executeSqlTransportSettings, settings.executeSqlSettings(), clientContext); + this.executeStreamingSqlCallable = + callableFactory.createServerStreamingCallable( + executeStreamingSqlTransportSettings, + settings.executeStreamingSqlSettings(), + clientContext); + this.executeBatchDmlCallable = + callableFactory.createUnaryCallable( + executeBatchDmlTransportSettings, settings.executeBatchDmlSettings(), clientContext); + this.readCallable = + callableFactory.createUnaryCallable( + readTransportSettings, settings.readSettings(), clientContext); + this.streamingReadCallable = + callableFactory.createServerStreamingCallable( + streamingReadTransportSettings, settings.streamingReadSettings(), clientContext); + this.beginTransactionCallable = + callableFactory.createUnaryCallable( + beginTransactionTransportSettings, settings.beginTransactionSettings(), clientContext); + this.commitCallable = + callableFactory.createUnaryCallable( + commitTransportSettings, settings.commitSettings(), clientContext); + this.rollbackCallable = + callableFactory.createUnaryCallable( + rollbackTransportSettings, settings.rollbackSettings(), clientContext); + this.partitionQueryCallable = + callableFactory.createUnaryCallable( + partitionQueryTransportSettings, settings.partitionQuerySettings(), clientContext); + this.partitionReadCallable = + callableFactory.createUnaryCallable( + partitionReadTransportSettings, settings.partitionReadSettings(), clientContext); + this.batchWriteCallable = + callableFactory.createServerStreamingCallable( + batchWriteTransportSettings, settings.batchWriteSettings(), clientContext); + + this.backgroundResources = + new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + @InternalApi + public static List getMethodDescriptors() { + List methodDescriptors = new ArrayList<>(); + methodDescriptors.add(createSessionMethodDescriptor); + methodDescriptors.add(batchCreateSessionsMethodDescriptor); + methodDescriptors.add(getSessionMethodDescriptor); + methodDescriptors.add(listSessionsMethodDescriptor); + methodDescriptors.add(deleteSessionMethodDescriptor); + methodDescriptors.add(executeSqlMethodDescriptor); + methodDescriptors.add(executeStreamingSqlMethodDescriptor); + methodDescriptors.add(executeBatchDmlMethodDescriptor); + methodDescriptors.add(readMethodDescriptor); + methodDescriptors.add(streamingReadMethodDescriptor); + methodDescriptors.add(beginTransactionMethodDescriptor); + methodDescriptors.add(commitMethodDescriptor); + methodDescriptors.add(rollbackMethodDescriptor); + methodDescriptors.add(partitionQueryMethodDescriptor); + methodDescriptors.add(partitionReadMethodDescriptor); + methodDescriptors.add(batchWriteMethodDescriptor); + return methodDescriptors; + } + + @Override + public UnaryCallable createSessionCallable() { + return createSessionCallable; + } + + @Override + public UnaryCallable + batchCreateSessionsCallable() { + return batchCreateSessionsCallable; + } + + @Override + public UnaryCallable getSessionCallable() { + return getSessionCallable; + } + + @Override + public UnaryCallable listSessionsCallable() { + return listSessionsCallable; + } + + @Override + public UnaryCallable listSessionsPagedCallable() { + return listSessionsPagedCallable; + } + + @Override + public UnaryCallable deleteSessionCallable() { + return deleteSessionCallable; + } + + @Override + public UnaryCallable executeSqlCallable() { + return executeSqlCallable; + } + + @Override + public ServerStreamingCallable + executeStreamingSqlCallable() { + return executeStreamingSqlCallable; + } + + @Override + public UnaryCallable executeBatchDmlCallable() { + return executeBatchDmlCallable; + } + + @Override + public UnaryCallable readCallable() { + return readCallable; + } + + @Override + public ServerStreamingCallable streamingReadCallable() { + return streamingReadCallable; + } + + @Override + public UnaryCallable beginTransactionCallable() { + return beginTransactionCallable; + } + + @Override + public UnaryCallable commitCallable() { + return commitCallable; + } + + @Override + public UnaryCallable rollbackCallable() { + return rollbackCallable; + } + + @Override + public UnaryCallable partitionQueryCallable() { + return partitionQueryCallable; + } + + @Override + public UnaryCallable partitionReadCallable() { + return partitionReadCallable; + } + + @Override + public ServerStreamingCallable batchWriteCallable() { + return batchWriteCallable; + } + + @Override + public final void close() { + try { + backgroundResources.close(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Failed to close resource", e); + } + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStub.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStub.java new file mode 100644 index 000000000000..aa3c5b0fe152 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStub.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Base stub class for the Spanner service API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator-java") +public abstract class SpannerStub implements BackgroundResource { + + public UnaryCallable createSessionCallable() { + throw new UnsupportedOperationException("Not implemented: createSessionCallable()"); + } + + public UnaryCallable + batchCreateSessionsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCreateSessionsCallable()"); + } + + public UnaryCallable getSessionCallable() { + throw new UnsupportedOperationException("Not implemented: getSessionCallable()"); + } + + public UnaryCallable listSessionsPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listSessionsPagedCallable()"); + } + + public UnaryCallable listSessionsCallable() { + throw new UnsupportedOperationException("Not implemented: listSessionsCallable()"); + } + + public UnaryCallable deleteSessionCallable() { + throw new UnsupportedOperationException("Not implemented: deleteSessionCallable()"); + } + + public UnaryCallable executeSqlCallable() { + throw new UnsupportedOperationException("Not implemented: executeSqlCallable()"); + } + + public ServerStreamingCallable + executeStreamingSqlCallable() { + throw new UnsupportedOperationException("Not implemented: executeStreamingSqlCallable()"); + } + + public UnaryCallable executeBatchDmlCallable() { + throw new UnsupportedOperationException("Not implemented: executeBatchDmlCallable()"); + } + + public UnaryCallable readCallable() { + throw new UnsupportedOperationException("Not implemented: readCallable()"); + } + + public ServerStreamingCallable streamingReadCallable() { + throw new UnsupportedOperationException("Not implemented: streamingReadCallable()"); + } + + public UnaryCallable beginTransactionCallable() { + throw new UnsupportedOperationException("Not implemented: beginTransactionCallable()"); + } + + public UnaryCallable commitCallable() { + throw new UnsupportedOperationException("Not implemented: commitCallable()"); + } + + public UnaryCallable rollbackCallable() { + throw new UnsupportedOperationException("Not implemented: rollbackCallable()"); + } + + public UnaryCallable partitionQueryCallable() { + throw new UnsupportedOperationException("Not implemented: partitionQueryCallable()"); + } + + public UnaryCallable partitionReadCallable() { + throw new UnsupportedOperationException("Not implemented: partitionReadCallable()"); + } + + public ServerStreamingCallable batchWriteCallable() { + throw new UnsupportedOperationException("Not implemented: batchWriteCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java new file mode 100644 index 000000000000..6114827d0825 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java @@ -0,0 +1,837 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.HttpJsonTransportChannel; +import com.google.api.gax.httpjson.InstantiatingHttpJsonChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.LibraryMetadata; +import com.google.api.gax.rpc.PageContext; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.PagedListDescriptor; +import com.google.api.gax.rpc.PagedListResponseFactory; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +/** + * Settings class to configure an instance of {@link SpannerStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (spanner.googleapis.com) and default port (443) are used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createSession: + * + *

{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * SpannerStubSettings.Builder spannerSettingsBuilder = SpannerStubSettings.newBuilder();
+ * spannerSettingsBuilder
+ *     .createSessionSettings()
+ *     .setRetrySettings(
+ *         spannerSettingsBuilder
+ *             .createSessionSettings()
+ *             .getRetrySettings()
+ *             .toBuilder()
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
+ *             .build());
+ * SpannerStubSettings spannerSettings = spannerSettingsBuilder.build();
+ * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. + */ +@Generated("by gapic-generator-java") +@SuppressWarnings("CanonicalDuration") +public class SpannerStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/cloud-platform") + .add("https://www.googleapis.com/auth/spanner.data") + .build(); + + private final UnaryCallSettings createSessionSettings; + private final UnaryCallSettings + batchCreateSessionsSettings; + private final UnaryCallSettings getSessionSettings; + private final PagedCallSettings< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse> + listSessionsSettings; + private final UnaryCallSettings deleteSessionSettings; + private final UnaryCallSettings executeSqlSettings; + private final ServerStreamingCallSettings + executeStreamingSqlSettings; + private final UnaryCallSettings + executeBatchDmlSettings; + private final UnaryCallSettings readSettings; + private final ServerStreamingCallSettings streamingReadSettings; + private final UnaryCallSettings beginTransactionSettings; + private final UnaryCallSettings commitSettings; + private final UnaryCallSettings rollbackSettings; + private final UnaryCallSettings partitionQuerySettings; + private final UnaryCallSettings partitionReadSettings; + private final ServerStreamingCallSettings + batchWriteSettings; + + private static final PagedListDescriptor + LIST_SESSIONS_PAGE_STR_DESC = + new PagedListDescriptor() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListSessionsRequest injectToken(ListSessionsRequest payload, String token) { + return ListSessionsRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListSessionsRequest injectPageSize(ListSessionsRequest payload, int pageSize) { + return ListSessionsRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListSessionsRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListSessionsResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListSessionsResponse payload) { + return payload.getSessionsList(); + } + }; + + private static final PagedListResponseFactory< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse> + LIST_SESSIONS_PAGE_STR_FACT = + new PagedListResponseFactory< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListSessionsRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext pageContext = + PageContext.create(callable, LIST_SESSIONS_PAGE_STR_DESC, request, context); + return ListSessionsPagedResponse.createAsync(pageContext, futureResponse); + } + }; + + /** Returns the object with the settings used for calls to createSession. */ + public UnaryCallSettings createSessionSettings() { + return createSessionSettings; + } + + /** Returns the object with the settings used for calls to batchCreateSessions. */ + public UnaryCallSettings + batchCreateSessionsSettings() { + return batchCreateSessionsSettings; + } + + /** Returns the object with the settings used for calls to getSession. */ + public UnaryCallSettings getSessionSettings() { + return getSessionSettings; + } + + /** Returns the object with the settings used for calls to listSessions. */ + public PagedCallSettings + listSessionsSettings() { + return listSessionsSettings; + } + + /** Returns the object with the settings used for calls to deleteSession. */ + public UnaryCallSettings deleteSessionSettings() { + return deleteSessionSettings; + } + + /** Returns the object with the settings used for calls to executeSql. */ + public UnaryCallSettings executeSqlSettings() { + return executeSqlSettings; + } + + /** Returns the object with the settings used for calls to executeStreamingSql. */ + public ServerStreamingCallSettings + executeStreamingSqlSettings() { + return executeStreamingSqlSettings; + } + + /** Returns the object with the settings used for calls to executeBatchDml. */ + public UnaryCallSettings + executeBatchDmlSettings() { + return executeBatchDmlSettings; + } + + /** Returns the object with the settings used for calls to read. */ + public UnaryCallSettings readSettings() { + return readSettings; + } + + /** Returns the object with the settings used for calls to streamingRead. */ + public ServerStreamingCallSettings streamingReadSettings() { + return streamingReadSettings; + } + + /** Returns the object with the settings used for calls to beginTransaction. */ + public UnaryCallSettings beginTransactionSettings() { + return beginTransactionSettings; + } + + /** Returns the object with the settings used for calls to commit. */ + public UnaryCallSettings commitSettings() { + return commitSettings; + } + + /** Returns the object with the settings used for calls to rollback. */ + public UnaryCallSettings rollbackSettings() { + return rollbackSettings; + } + + /** Returns the object with the settings used for calls to partitionQuery. */ + public UnaryCallSettings partitionQuerySettings() { + return partitionQuerySettings; + } + + /** Returns the object with the settings used for calls to partitionRead. */ + public UnaryCallSettings partitionReadSettings() { + return partitionReadSettings; + } + + /** Returns the object with the settings used for calls to batchWrite. */ + public ServerStreamingCallSettings batchWriteSettings() { + return batchWriteSettings; + } + + public SpannerStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcSpannerStub.create(this); + } + if (getTransportChannelProvider() + .getTransportName() + .equals(HttpJsonTransportChannel.getHttpJsonTransportName())) { + return HttpJsonSpannerStub.create(this); + } + throw new UnsupportedOperationException( + String.format( + "Transport not supported: %s", getTransportChannelProvider().getTransportName())); + } + + /** Returns the default service name. */ + @Override + public String getServiceName() { + return "spanner"; + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") + public static String getDefaultEndpoint() { + return "spanner.googleapis.com:443"; + } + + /** Returns the default mTLS service endpoint. */ + public static String getDefaultMtlsEndpoint() { + return "spanner.mtls.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder() + .setScopesToApply(DEFAULT_SERVICE_SCOPES) + .setUseJwtAccessWithScope(true); + } + + /** Returns a builder for the default gRPC ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + /** Returns a builder for the default REST ChannelProvider for this service. */ + @BetaApi + public static InstantiatingHttpJsonChannelProvider.Builder + defaultHttpJsonTransportProviderBuilder() { + return InstantiatingHttpJsonChannelProvider.newBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + public static ApiClientHeaderProvider.Builder defaultGrpcApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken("gapic", GaxProperties.getLibraryVersion(SpannerStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultHttpJsonApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken("gapic", GaxProperties.getLibraryVersion(SpannerStubSettings.class)) + .setTransportToken( + GaxHttpJsonProperties.getHttpJsonTokenName(), + GaxHttpJsonProperties.getHttpJsonVersion()); + } + + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return SpannerStubSettings.defaultGrpcApiClientHeaderProviderBuilder(); + } + + /** Returns a new gRPC builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new REST builder for this class. */ + public static Builder newHttpJsonBuilder() { + return Builder.createHttpJsonDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected SpannerStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createSessionSettings = settingsBuilder.createSessionSettings().build(); + batchCreateSessionsSettings = settingsBuilder.batchCreateSessionsSettings().build(); + getSessionSettings = settingsBuilder.getSessionSettings().build(); + listSessionsSettings = settingsBuilder.listSessionsSettings().build(); + deleteSessionSettings = settingsBuilder.deleteSessionSettings().build(); + executeSqlSettings = settingsBuilder.executeSqlSettings().build(); + executeStreamingSqlSettings = settingsBuilder.executeStreamingSqlSettings().build(); + executeBatchDmlSettings = settingsBuilder.executeBatchDmlSettings().build(); + readSettings = settingsBuilder.readSettings().build(); + streamingReadSettings = settingsBuilder.streamingReadSettings().build(); + beginTransactionSettings = settingsBuilder.beginTransactionSettings().build(); + commitSettings = settingsBuilder.commitSettings().build(); + rollbackSettings = settingsBuilder.rollbackSettings().build(); + partitionQuerySettings = settingsBuilder.partitionQuerySettings().build(); + partitionReadSettings = settingsBuilder.partitionReadSettings().build(); + batchWriteSettings = settingsBuilder.batchWriteSettings().build(); + } + + @Override + protected LibraryMetadata getLibraryMetadata() { + return LibraryMetadata.newBuilder() + .setArtifactName("com.google.cloud:google-cloud-spanner") + .setRepository("googleapis/google-cloud-java") + .setVersion(Version.VERSION) + .build(); + } + + /** Builder for SpannerStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + private final UnaryCallSettings.Builder createSessionSettings; + private final UnaryCallSettings.Builder + batchCreateSessionsSettings; + private final UnaryCallSettings.Builder getSessionSettings; + private final PagedCallSettings.Builder< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse> + listSessionsSettings; + private final UnaryCallSettings.Builder deleteSessionSettings; + private final UnaryCallSettings.Builder executeSqlSettings; + private final ServerStreamingCallSettings.Builder + executeStreamingSqlSettings; + private final UnaryCallSettings.Builder + executeBatchDmlSettings; + private final UnaryCallSettings.Builder readSettings; + private final ServerStreamingCallSettings.Builder + streamingReadSettings; + private final UnaryCallSettings.Builder + beginTransactionSettings; + private final UnaryCallSettings.Builder commitSettings; + private final UnaryCallSettings.Builder rollbackSettings; + private final UnaryCallSettings.Builder + partitionQuerySettings; + private final UnaryCallSettings.Builder + partitionReadSettings; + private final ServerStreamingCallSettings.Builder + batchWriteSettings; + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "retry_policy_3_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "retry_policy_2_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "retry_policy_1_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); + definitions.put( + "no_retry_0_codes", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + definitions.put("retry_policy_3_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + definitions.put("retry_policy_2_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("retry_policy_1_params", settings); + settings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + definitions.put("no_retry_0_params", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this(((ClientContext) null)); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchCreateSessionsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listSessionsSettings = PagedCallSettings.newBuilder(LIST_SESSIONS_PAGE_STR_FACT); + deleteSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + executeSqlSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + executeStreamingSqlSettings = ServerStreamingCallSettings.newBuilder(); + executeBatchDmlSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + streamingReadSettings = ServerStreamingCallSettings.newBuilder(); + beginTransactionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + commitSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + rollbackSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + partitionQuerySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + partitionReadSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + batchWriteSettings = ServerStreamingCallSettings.newBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createSessionSettings, + batchCreateSessionsSettings, + getSessionSettings, + listSessionsSettings, + deleteSessionSettings, + executeSqlSettings, + executeBatchDmlSettings, + readSettings, + beginTransactionSettings, + commitSettings, + rollbackSettings, + partitionQuerySettings, + partitionReadSettings); + initDefaults(this); + } + + protected Builder(SpannerStubSettings settings) { + super(settings); + + createSessionSettings = settings.createSessionSettings.toBuilder(); + batchCreateSessionsSettings = settings.batchCreateSessionsSettings.toBuilder(); + getSessionSettings = settings.getSessionSettings.toBuilder(); + listSessionsSettings = settings.listSessionsSettings.toBuilder(); + deleteSessionSettings = settings.deleteSessionSettings.toBuilder(); + executeSqlSettings = settings.executeSqlSettings.toBuilder(); + executeStreamingSqlSettings = settings.executeStreamingSqlSettings.toBuilder(); + executeBatchDmlSettings = settings.executeBatchDmlSettings.toBuilder(); + readSettings = settings.readSettings.toBuilder(); + streamingReadSettings = settings.streamingReadSettings.toBuilder(); + beginTransactionSettings = settings.beginTransactionSettings.toBuilder(); + commitSettings = settings.commitSettings.toBuilder(); + rollbackSettings = settings.rollbackSettings.toBuilder(); + partitionQuerySettings = settings.partitionQuerySettings.toBuilder(); + partitionReadSettings = settings.partitionReadSettings.toBuilder(); + batchWriteSettings = settings.batchWriteSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createSessionSettings, + batchCreateSessionsSettings, + getSessionSettings, + listSessionsSettings, + deleteSessionSettings, + executeSqlSettings, + executeBatchDmlSettings, + readSettings, + beginTransactionSettings, + commitSettings, + rollbackSettings, + partitionQuerySettings, + partitionReadSettings); + } + + private static Builder createDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder createHttpJsonDefault() { + Builder builder = new Builder(((ClientContext) null)); + + builder.setTransportChannelProvider(defaultHttpJsonTransportProviderBuilder().build()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultHttpJsonApiClientHeaderProviderBuilder().build()); + builder.setMtlsEndpoint(getDefaultMtlsEndpoint()); + builder.setSwitchToMtlsEndpointAllowed(true); + + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + builder + .createSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .batchCreateSessionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); + + builder + .getSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .listSessionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .deleteSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .executeSqlSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .executeStreamingSqlSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_0_params")); + + builder + .executeBatchDmlSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .readSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .streamingReadSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_0_params")); + + builder + .beginTransactionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .commitSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_1_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_1_params")); + + builder + .rollbackSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .partitionQuerySettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .partitionReadSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_3_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_3_params")); + + builder + .batchWriteSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_0_params")); + + return builder; + } + + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createSession. */ + public UnaryCallSettings.Builder createSessionSettings() { + return createSessionSettings; + } + + /** Returns the builder for the settings used for calls to batchCreateSessions. */ + public UnaryCallSettings.Builder + batchCreateSessionsSettings() { + return batchCreateSessionsSettings; + } + + /** Returns the builder for the settings used for calls to getSession. */ + public UnaryCallSettings.Builder getSessionSettings() { + return getSessionSettings; + } + + /** Returns the builder for the settings used for calls to listSessions. */ + public PagedCallSettings.Builder< + ListSessionsRequest, ListSessionsResponse, ListSessionsPagedResponse> + listSessionsSettings() { + return listSessionsSettings; + } + + /** Returns the builder for the settings used for calls to deleteSession. */ + public UnaryCallSettings.Builder deleteSessionSettings() { + return deleteSessionSettings; + } + + /** Returns the builder for the settings used for calls to executeSql. */ + public UnaryCallSettings.Builder executeSqlSettings() { + return executeSqlSettings; + } + + /** Returns the builder for the settings used for calls to executeStreamingSql. */ + public ServerStreamingCallSettings.Builder + executeStreamingSqlSettings() { + return executeStreamingSqlSettings; + } + + /** Returns the builder for the settings used for calls to executeBatchDml. */ + public UnaryCallSettings.Builder + executeBatchDmlSettings() { + return executeBatchDmlSettings; + } + + /** Returns the builder for the settings used for calls to read. */ + public UnaryCallSettings.Builder readSettings() { + return readSettings; + } + + /** Returns the builder for the settings used for calls to streamingRead. */ + public ServerStreamingCallSettings.Builder + streamingReadSettings() { + return streamingReadSettings; + } + + /** Returns the builder for the settings used for calls to beginTransaction. */ + public UnaryCallSettings.Builder + beginTransactionSettings() { + return beginTransactionSettings; + } + + /** Returns the builder for the settings used for calls to commit. */ + public UnaryCallSettings.Builder commitSettings() { + return commitSettings; + } + + /** Returns the builder for the settings used for calls to rollback. */ + public UnaryCallSettings.Builder rollbackSettings() { + return rollbackSettings; + } + + /** Returns the builder for the settings used for calls to partitionQuery. */ + public UnaryCallSettings.Builder + partitionQuerySettings() { + return partitionQuerySettings; + } + + /** Returns the builder for the settings used for calls to partitionRead. */ + public UnaryCallSettings.Builder + partitionReadSettings() { + return partitionReadSettings; + } + + /** Returns the builder for the settings used for calls to batchWrite. */ + public ServerStreamingCallSettings.Builder + batchWriteSettings() { + return batchWriteSettings; + } + + @Override + public SpannerStubSettings build() throws IOException { + return new SpannerStubSettings(this); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/Version.java b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/Version.java new file mode 100644 index 000000000000..8d72ba6a4784 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/Version.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1.stub; + +import com.google.api.core.InternalApi; + +@InternalApi("For internal use only") +final class Version { + // {x-version-update-start:google-cloud-spanner:current} + static final String VERSION = "0.0.0-SNAPSHOT"; + // {x-version-update-end} + +} diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json new file mode 100644 index 000000000000..2377603cc28b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json @@ -0,0 +1,2936 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$LogType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.NullValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.AddSplitPointsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Backup$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInstancePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupInstancePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CopyBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.Database$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseDialect", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseRole", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DatabaseRole$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DdlStatementActionInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DdlStatementActionInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.EncryptionInfo$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabaseRolesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListDatabasesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OperationProgress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OperationProgress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig$EncryptionType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.RestoreSourceType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Key", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.SplitPoints$Key$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateDatabaseRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json new file mode 100644 index 000000000000..489454bd09e0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json @@ -0,0 +1,2621 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditConfigDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.AuditLogConfig$LogType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Binding$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Action", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.BindingDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.GetPolicyOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.Policy$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.PolicyDelta$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.SetIamPolicyRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.iam.v1.TestIamPermissionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.CancelOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.DeleteOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.GetOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.ListOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.Operation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.OperationInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.longrunning.WaitOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.FieldMask$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingTargets", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingTargets$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.CreateInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.DeleteInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FreeInstanceMetadata$ExpireBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.FulfillmentPeriod", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.GetInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$DefaultBackupScheduleType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$InstanceType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$FreeInstanceAvailability", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$QuorumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstanceConfig$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.InstancePartition$State", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ListInstancesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.OperationProgress", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.OperationProgress$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaInfo$ReplicaType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.UpdateInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.type.Expr$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json new file mode 100644 index 000000000000..71bc0fe83f64 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json @@ -0,0 +1,2936 @@ +[ + { + "name": "com.google.api.ClientLibraryDestination", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibraryOrganization", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ClientLibrarySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CommonLanguageSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CppSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.CustomHttpPattern$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.DotnetSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.FieldBehavior", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.GoSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Http$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.HttpRule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.JavaSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.LaunchStage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.MethodSettings$LongRunning$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.NodeSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PhpSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.Publishing$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$History", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceDescriptor$Style", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.ResourceReference$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.RubySettings$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.SelectiveGapicGeneration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Any$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ExtensionRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$DescriptorProto$ReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumDescriptorProto$EnumReservedRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$EnumValueOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$Declaration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ExtensionRangeOptions$VerificationState", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnforceNamingStyle", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$EnumType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$FieldPresence", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$JsonFormat", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$MessageEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$RepeatedFieldEncoding", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$Utf8Validation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSet$VisibilityFeature$DefaultSymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FeatureSetDefaults$FeatureSetEditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Label", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldDescriptorProto$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$CType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$EditionDefault$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$FeatureSupport$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$JSType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionRetention", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FieldOptions$OptionTargetType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileDescriptorSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$FileOptions$OptimizeMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Annotation$Semantic", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$GeneratedCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MessageOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$MethodOptions$IdempotencyLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$OneofOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceDescriptorProto$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$ServiceOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SourceCodeInfo$Location$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$SymbolVisibility", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.DescriptorProtos$UninterpretedOption$NamePart$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Duration$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Empty$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.ListValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.NullValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Struct$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Timestamp$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.protobuf.Value$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Status$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchCreateSessionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$MutationGroup", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteRequest$MutationGroup$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BatchWriteResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BeginTransactionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.BeginTransactionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CacheUpdate", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CacheUpdate$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ColumnMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ColumnMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$Mod", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$Mod$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ModType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ModValue", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ModValue$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$DataChangeRecord$ValueCaptureType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$HeartbeatRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$HeartbeatRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEndRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEndRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord$MoveInEvent", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord$MoveInEvent$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord$MoveOutEvent", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionEventRecord$MoveOutEvent$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionStartRecord", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ChangeStreamRecord$PartitionStartRecord$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$CommitStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CommitResponse$CommitStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CreateSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.CreateSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DeleteSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DeleteSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ExcludeReplicas", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ExcludeReplicas$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$IncludeReplicas", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$IncludeReplicas$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.DirectedReadOptions$ReplicaSelection$Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Statement", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlRequest$Statement$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteBatchDmlResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ExecuteSqlRequest$QueryOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.GetSessionRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.GetSessionRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Group", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Group$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRange", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRange$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$NullOrder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeyRecipe$Part$Order", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeySet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.KeySet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ListSessionsResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Ack", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Ack$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Delete", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Delete$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Send", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Send$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Write", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Mutation$Write$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartialResultSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartialResultSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Partition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Partition$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionQueryRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionQueryRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionReadRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionReadRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PartitionResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ChildLink", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ChildLink$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$Kind", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ShortRepresentation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.PlanNode$ShortRepresentation$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$IndexAdvice", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryAdvisorResult$IndexAdvice$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryPlan", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.QueryPlan$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Range", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Range$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$LockHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$OrderBy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RecipeList", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RecipeList$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$ClientContext", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$ClientContext$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RequestOptions$Priority", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetStats", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ResultSetStats$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RollbackRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RollbackRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$SkippedTablet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.RoutingHint$SkippedTablet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Session", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Session$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Field", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.StructType$Field$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Tablet$Role", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Transaction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Transaction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$IsolationLevel", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$PartitionedDml", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$PartitionedDml$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadOnly", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadOnly$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionOptions$ReadWrite$ReadLockMode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionSelector", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TransactionSelector$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Type", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.Type$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TypeAnnotationCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.TypeCode", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/grpc-gcp-reflect-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/grpc-gcp-reflect-config.json new file mode 100644 index 000000000000..a92f2c29737f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/grpc-gcp-reflect-config.json @@ -0,0 +1,56 @@ +[ + { + "name": "com.google.cloud.grpc.proto.ApiConfig", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.ApiConfig$Builder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.ChannelPoolConfig", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.ChannelPoolConfig$Builder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.MethodConfig", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.MethodConfig$Builder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.AffinityConfig", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.AffinityConfig$Builder", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, + { + "name": "com.google.cloud.grpc.proto.AffinityConfig$Command", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + } +] diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/reflect-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/reflect-config.json new file mode 100644 index 000000000000..b734ec5c1a42 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner/reflect-config.json @@ -0,0 +1,173 @@ +[ + { + "name": "com.google.rpc.LocalizedMessage", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.LocalizedMessage$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.BadRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.BadRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.BadRequest$FieldViolation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Help", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Help$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.Help$Link", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.QuotaFailure", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.QuotaFailure$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.QuotaFailure$Violation", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.RequestInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.RequestInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.ResourceInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.ResourceInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.RetryInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.RetryInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.ErrorInfo", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.rpc.ErrorInfo$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + } +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/native-image.properties b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/native-image.properties new file mode 100644 index 000000000000..566244d3e592 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/native-image.properties @@ -0,0 +1,6 @@ +Args = --initialize-at-build-time=com.google.cloud.spanner.IntegrationTestEnv,\ + org.junit.experimental.categories.CategoryValidator,\ + org.junit.validator.AnnotationValidator,\ + java.lang.annotation.Annotation \ + -H:ReflectionConfigurationResources=${.}/com.google.cloud.spanner/grpc-gcp-reflect-config.json \ + --features=com.google.cloud.spanner.nativeimage.SpannerFeature diff --git a/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/resource-config.json b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/resource-config.json new file mode 100644 index 000000000000..91d913b239bd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/META-INF/native-image/resource-config.json @@ -0,0 +1,8 @@ +{ + "resources": [ + {"pattern": "\\Qcom/google/cloud/spanner/connection/ClientSideStatements.json\\E"}, + {"pattern": "\\Qcom/google/cloud/spanner/connection/PG_ClientSideStatements.json\\E"}, + {"pattern": "\\Qcom/google/cloud/spanner/spi/v1/grpc-gcp-apiconfig.json\\E"}, + {"pattern": "\\Qcom/google/cloud/spanner/connection/ITSqlScriptTest_TestQueryOptions.sql\\E"} + ] +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json new file mode 100644 index 000000000000..bf7fb8968bc3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json @@ -0,0 +1,951 @@ +{ +"statements": + [ + { + "name": "SHOW VARIABLE AUTOCOMMIT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTOCOMMIT", + "regex": "(?is)\\A\\s*show\\s+variable\\s+autocommit\\s*\\z", + "method": "statementShowAutocommit", + "exampleStatements": ["show variable autocommit"] + }, + { + "name": "SHOW VARIABLE READONLY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READONLY", + "regex": "(?is)\\A\\s*show\\s+variable\\s+readonly\\s*\\z", + "method": "statementShowReadOnly", + "exampleStatements": ["show variable readonly"] + }, + { + "name": "SHOW VARIABLE RETRY_ABORTS_INTERNALLY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_RETRY_ABORTS_INTERNALLY", + "regex": "(?is)\\A\\s*show\\s+variable\\s+retry_aborts_internally\\s*\\z", + "method": "statementShowRetryAbortsInternally", + "exampleStatements": ["show variable retry_aborts_internally"], + "examplePrerequisiteStatements": ["set readonly=false", "set autocommit=false"] + }, + { + "name": "SHOW VARIABLE AUTOCOMMIT_DML_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTOCOMMIT_DML_MODE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+autocommit_dml_mode\\s*\\z", + "method": "statementShowAutocommitDmlMode", + "exampleStatements": ["show variable autocommit_dml_mode"] + }, + { + "name": "SHOW VARIABLE STATEMENT_TIMEOUT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_STATEMENT_TIMEOUT", + "regex": "(?is)\\A\\s*show\\s+variable\\s+statement_timeout\\s*\\z", + "method": "statementShowStatementTimeout", + "exampleStatements": ["show variable statement_timeout"] + }, + { + "name": "SHOW VARIABLE TRANSACTION_TIMEOUT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_TIMEOUT", + "regex": "(?is)\\A\\s*show\\s+variable\\s+transaction_timeout\\s*\\z", + "method": "statementShowTransactionTimeout", + "exampleStatements": ["show variable transaction_timeout"] + }, + { + "name": "SHOW VARIABLE READ_TIMESTAMP", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_TIMESTAMP", + "regex": "(?is)\\A\\s*show\\s+variable\\s+read_timestamp\\s*\\z", + "method": "statementShowReadTimestamp", + "exampleStatements": ["show variable read_timestamp"], + "examplePrerequisiteStatements": ["set readonly = true", "SELECT 1 AS TEST"] + }, + { + "name": "SHOW VARIABLE COMMIT_TIMESTAMP", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_COMMIT_TIMESTAMP", + "regex": "(?is)\\A\\s*show\\s+variable\\s+commit_timestamp\\s*\\z", + "method": "statementShowCommitTimestamp", + "exampleStatements": ["show variable commit_timestamp"], + "examplePrerequisiteStatements": ["update foo set bar=1"] + }, + { + "name": "SHOW VARIABLE READ_ONLY_STALENESS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_ONLY_STALENESS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+read_only_staleness\\s*\\z", + "method": "statementShowReadOnlyStaleness", + "exampleStatements": ["show variable read_only_staleness"] + }, + { + "name": "SHOW VARIABLE DIRECTED_READ", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DIRECTED_READ", + "regex": "(?is)\\A\\s*show\\s+variable\\s+directed_read\\s*\\z", + "method": "statementShowDirectedRead", + "exampleStatements": ["show variable directed_read"] + }, + { + "name": "SHOW VARIABLE OPTIMIZER_VERSION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_OPTIMIZER_VERSION", + "regex": "(?is)\\A\\s*show\\s+variable\\s+optimizer_version\\s*\\z", + "method": "statementShowOptimizerVersion", + "exampleStatements": ["show variable optimizer_version"] + }, + { + "name": "SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_OPTIMIZER_STATISTICS_PACKAGE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+optimizer_statistics_package\\s*\\z", + "method": "statementShowOptimizerStatisticsPackage", + "exampleStatements": ["show variable optimizer_statistics_package"] + }, + { + "name": "SHOW VARIABLE RETURN_COMMIT_STATS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_RETURN_COMMIT_STATS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+return_commit_stats\\s*\\z", + "method": "statementShowReturnCommitStats", + "exampleStatements": ["show variable return_commit_stats"] + }, + { + "name": "SHOW VARIABLE MAX_COMMIT_DELAY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_COMMIT_DELAY", + "regex": "(?is)\\A\\s*show\\s+variable\\s+max_commit_delay\\s*\\z", + "method": "statementShowMaxCommitDelay", + "exampleStatements": ["show variable max_commit_delay"] + }, + { + "name": "SHOW VARIABLE COMMIT_RESPONSE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_COMMIT_RESPONSE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+commit_response\\s*\\z", + "method": "statementShowCommitResponse", + "exampleStatements": ["show variable commit_response"], + "examplePrerequisiteStatements": ["update foo set bar=1"] + }, + { + "name": "SHOW VARIABLE STATEMENT_TAG", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_STATEMENT_TAG", + "regex": "(?is)\\A\\s*show\\s+variable\\s+statement_tag\\s*\\z", + "method": "statementShowStatementTag", + "exampleStatements": ["show variable statement_tag"] + }, + { + "name": "SHOW VARIABLE TRANSACTION_TAG", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_TAG", + "regex": "(?is)\\A\\s*show\\s+variable\\s+transaction_tag\\s*\\z", + "method": "statementShowTransactionTag", + "exampleStatements": ["show variable transaction_tag"] + }, + { + "name": "SHOW VARIABLE EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+exclude_txn_from_change_streams\\s*\\z", + "method": "statementShowExcludeTxnFromChangeStreams", + "exampleStatements": ["show variable exclude_txn_from_change_streams"] + }, + { + "name": "SHOW VARIABLE RPC_PRIORITY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_TAG", + "regex": "(?is)\\A\\s*show\\s+variable\\s+rpc_priority\\s*\\z", + "method": "statementShowRPCPriority", + "exampleStatements": ["show variable rpc_priority"] + }, + { + "name": "SHOW VARIABLE SAVEPOINT_SUPPORT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_SAVEPOINT_SUPPORT", + "regex": "(?is)\\A\\s*show\\s+variable\\s+savepoint_support\\s*\\z", + "method": "statementShowSavepointSupport", + "exampleStatements": ["show variable savepoint_support"] + }, + { + "name": "SHOW VARIABLE DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+delay_transaction_start_until_first_write\\s*\\z", + "method": "statementShowDelayTransactionStartUntilFirstWrite", + "exampleStatements": ["show variable delay_transaction_start_until_first_write"] + }, + { + "name": "SHOW VARIABLE KEEP_TRANSACTION_ALIVE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+keep_transaction_alive\\s*\\z", + "method": "statementShowKeepTransactionAlive", + "exampleStatements": ["show variable keep_transaction_alive"] + }, + { + "name": "SHOW VARIABLE AUTO_BATCH_DML", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML", + "regex": "(?is)\\A\\s*show\\s+variable\\s+auto_batch_dml\\s*\\z", + "method": "statementShowAutoBatchDml", + "exampleStatements": ["show variable auto_batch_dml"] + }, + { + "name": "SHOW VARIABLE AUTO_BATCH_DML_UPDATE_COUNT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*show\\s+variable\\s+auto_batch_dml_update_count\\s*\\z", + "method": "statementShowAutoBatchDmlUpdateCount", + "exampleStatements": ["show variable auto_batch_dml_update_count"] + }, + { + "name": "SHOW VARIABLE AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "regex": "(?is)\\A\\s*show\\s+variable\\s+auto_batch_dml_update_count_verification\\s*\\z", + "method": "statementShowAutoBatchDmlUpdateCountVerification", + "exampleStatements": ["show variable auto_batch_dml_update_count_verification"] + }, + { + "name": "PARTITION ", + "executorName": "ClientSideStatementPartitionExecutor", + "resultType": "RESULT_SET", + "statementType": "PARTITION", + "regex": "(?is)\\A\\s*partition(\\s+|\\()(.*)\\z", + "method": "statementPartition", + "exampleStatements": ["partition select col1, col2 from my_table"] + }, + { + "name": "RUN PARTITION ['']", + "executorName": "ClientSideStatementRunPartitionExecutor", + "resultType": "RESULT_SET", + "statementType": "RUN_PARTITION", + "regex": "(?is)\\A\\s*run\\s+partition(?:\\s*'(.*)')?\\s*\\z", + "method": "statementRunPartition", + "exampleStatements": [] + }, + { + "name": "RUN PARTITIONED QUERY ", + "executorName": "ClientSideStatementRunPartitionedQueryExecutor", + "resultType": "RESULT_SET", + "statementType": "RUN_PARTITIONED_QUERY", + "regex": "(?is)\\A\\s*run\\s+partitioned\\s+query(\\s+|\\()(.*)\\z", + "method": "statementRunPartitionedQuery", + "exampleStatements": ["run partitioned query select col1, col2 from my_table"] + }, + { + "name": "BEGIN [TRANSACTION] [ISOLATION LEVEL isolation_level]", + "executorName": "ClientSideStatementBeginExecutor", + "resultType": "NO_RESULT", + "statementType": "BEGIN", + "regex": "(?is)\\A\\s*(?:begin|start)(?:\\s+transaction)?(?:\\s+isolation\\s+level\\s+(repeatable\\s+read|serializable))?\\s*\\z", + "method": "statementBeginTransaction", + "exampleStatements": [ + "begin", + "start", + "begin transaction", + "start transaction", + "begin isolation level repeatable read", + "begin transaction isolation level repeatable read", + "begin isolation level serializable", + "begin transaction isolation level serializable", + "start isolation level repeatable read", + "start transaction isolation level repeatable read", + "start isolation level serializable", + "start transaction isolation level serializable" + ] + }, + { + "name": "COMMIT TRANSACTION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "COMMIT", + "regex": "(?is)\\A\\s*(?:commit)(?:\\s+transaction)?\\s*\\z", + "method": "statementCommit", + "exampleStatements": ["commit", "commit transaction"], + "examplePrerequisiteStatements": ["begin transaction"] + }, + { + "name": "ROLLBACK TRANSACTION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "ROLLBACK", + "regex": "(?is)\\A\\s*(?:rollback)(?:\\s+transaction)?\\s*\\z", + "method": "statementRollback", + "exampleStatements": ["rollback", "rollback transaction"], + "examplePrerequisiteStatements": ["begin transaction"] + }, + { + "name": "START BATCH DDL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "START_BATCH_DDL", + "regex": "(?is)\\A\\s*(?:start)(?:\\s+batch)(?:\\s+ddl)\\s*\\z", + "method": "statementStartBatchDdl", + "exampleStatements": ["start batch ddl"] + }, + { + "name": "START BATCH DML", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "START_BATCH_DML", + "regex": "(?is)\\A\\s*(?:start)(?:\\s+batch)(?:\\s+dml)\\s*\\z", + "method": "statementStartBatchDml", + "exampleStatements": ["start batch dml"] + }, + { + "name": "RUN BATCH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RUN_BATCH", + "regex": "(?is)\\A\\s*(?:run)(?:\\s+batch)\\s*\\z", + "method": "statementRunBatch", + "exampleStatements": ["run batch"], + "examplePrerequisiteStatements": ["start batch ddl"] + }, + { + "name": "ABORT BATCH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "ABORT_BATCH", + "regex": "(?is)\\A\\s*(?:abort)(?:\\s+batch)\\s*\\z", + "method": "statementAbortBatch", + "exampleStatements": ["abort batch"], + "examplePrerequisiteStatements": ["start batch ddl"] + }, + { + "name": "RESET ALL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RESET_ALL", + "regex": "(?is)\\A\\s*(?:reset)(?:\\s+all)\\s*\\z", + "method": "statementResetAll", + "exampleStatements": ["reset all"] + }, + { + "name": "SET AUTOCOMMIT = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTOCOMMIT", + "regex": "(?is)\\A\\s*set\\s+autocommit\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutocommit", + "exampleStatements": ["set autocommit = true", "set autocommit = false"], + "setStatement": { + "propertyName": "AUTOCOMMIT", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET READONLY = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READONLY", + "regex": "(?is)\\A\\s*set\\s+readonly\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetReadOnly", + "exampleStatements": ["set readonly = true", "set readonly = false"], + "setStatement": { + "propertyName": "READONLY", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET [LOCAL] RETRY_ABORTS_INTERNALLY = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RETRY_ABORTS_INTERNALLY", + "regex": "(?is)\\A\\s*set\\s+(local\\s+)?retry_aborts_internally\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetRetryAbortsInternally", + "exampleStatements": [ + "set retry_aborts_internally = true", + "set retry_aborts_internally = false", + "set local retry_aborts_internally = true", + "set local retry_aborts_internally = false" + ], + "examplePrerequisiteStatements": ["set readonly = false", "set autocommit = false"], + "setStatement": { + "propertyName": "RETRY_ABORTS_INTERNALLY", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET AUTOCOMMIT_DML_MODE = 'PARTITIONED_NON_ATOMIC'|'TRANSACTIONAL'|'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTOCOMMIT_DML_MODE", + "regex": "(?is)\\A\\s*set\\s+autocommit_dml_mode\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutocommitDmlMode", + "exampleStatements": ["set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'", "set autocommit_dml_mode='TRANSACTIONAL'", "set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'"], + "setStatement": { + "propertyName": "AUTOCOMMIT_DML_MODE", + "separator": "=", + "allowedValues": "'(PARTITIONED_NON_ATOMIC|TRANSACTIONAL|TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC)'", + "converterName": "ClientSideStatementValueConverters$AutocommitDmlModeConverter" + } + }, + { + "name": "SET STATEMENT_TIMEOUT = ''|NULL", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_STATEMENT_TIMEOUT", + "regex": "(?is)\\A\\s*set\\s+statement_timeout\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetStatementTimeout", + "exampleStatements": [ + "set statement_timeout=null", + "set statement_timeout = null ", + "set statement_timeout='1s'", + "set statement_timeout = '1s' ", + "set statement_timeout=100", + "set statement_timeout = 100 ", + "set statement_timeout='100ms'", + "set statement_timeout='10000us'", + "set statement_timeout='9223372036854775807ns'" + ], + "setStatement": { + "propertyName": "STATEMENT_TIMEOUT", + "separator": "=", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", + "converterName": "ClientSideStatementValueConverters$DurationConverter" + } + }, + { + "name": "SET TRANSACTION_TIMEOUT = ''|NULL", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_TIMEOUT", + "regex": "(?is)\\A\\s*set\\s+transaction_timeout\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetTransactionTimeout", + "exampleStatements": [ + "set transaction_timeout=null", + "set transaction_timeout = null ", + "set transaction_timeout='1s'", + "set transaction_timeout = '1s' ", + "set transaction_timeout=100", + "set transaction_timeout = 100 ", + "set transaction_timeout='100ms'", + "set transaction_timeout='10000us'", + "set transaction_timeout='9223372036854775807ns'" + ], + "setStatement": { + "propertyName": "TRANSACTION_TIMEOUT", + "separator": "=", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", + "converterName": "ClientSideStatementValueConverters$DurationConverter" + } + }, + { + "name": "SET TRANSACTION READ ONLY|READ WRITE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_MODE", + "regex": "(?is)\\A\\s*set\\s+transaction\\s*(?:\\s+)\\s*(.*)\\z", + "method": "statementSetTransactionMode", + "exampleStatements": ["set transaction read only", "set transaction read write"], + "examplePrerequisiteStatements": ["set autocommit = false"], + "setStatement": { + "propertyName": "TRANSACTION", + "separator": "\\s+", + "allowedValues": "(READ\\s+ONLY|READ\\s+WRITE)", + "converterName": "ClientSideStatementValueConverters$TransactionModeConverter" + } + }, + { + "name": "SET READ_ONLY_STALENESS = 'STRONG' | 'MIN_READ_TIMESTAMP ' | 'READ_TIMESTAMP ' | 'MAX_STALENESS s|ms|us|ns' | 'EXACT_STALENESS (s|ms|us|ns)'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READ_ONLY_STALENESS", + "regex": "(?is)\\A\\s*set\\s+read_only_staleness\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetReadOnlyStaleness", + "exampleStatements": ["set read_only_staleness='STRONG'", + "set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'", + "set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'", + "set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'", + "set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'", + "set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'", + "set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'", + "set read_only_staleness='MAX_STALENESS 12s'", + "set read_only_staleness='MAX_STALENESS 100ms'", + "set read_only_staleness='MAX_STALENESS 99999us'", + "set read_only_staleness='MAX_STALENESS 10ns'", + "set read_only_staleness='EXACT_STALENESS 15s'", + "set read_only_staleness='EXACT_STALENESS 1500ms'", + "set read_only_staleness='EXACT_STALENESS 15000000us'", + "set read_only_staleness='EXACT_STALENESS 9999ns'"], + "setStatement": { + "propertyName": "READ_ONLY_STALENESS", + "separator": "=", + "allowedValues": "'((STRONG)|(MIN_READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(MAX_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns))|(EXACT_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns)))'", + "converterName": "ClientSideStatementValueConverters$ReadOnlyStalenessConverter" + } + }, + { + "name": "SET DIRECTED_READ = ''|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DIRECTED_READ", + "regex": "(?is)\\A\\s*set\\s+directed_read\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetDirectedRead", + "exampleStatements": ["set directed_read='{\"includeReplicas\":{\"replicaSelections\":[{\"location\":\"eu-west1\",\"type\":\"READ_ONLY\"}]}}'", "set directed_read=''"], + "setStatement": { + "propertyName": "DIRECTED_READ", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$DirectedReadOptionsConverter" + } + }, + { + "name": "SET OPTIMIZER_VERSION = ''|'LATEST'|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_OPTIMIZER_VERSION", + "regex": "(?is)\\A\\s*set\\s+optimizer_version\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetOptimizerVersion", + "exampleStatements": ["set optimizer_version='1'", "set optimizer_version='200'", "set optimizer_version='LATEST'", "set optimizer_version=''"], + "setStatement": { + "propertyName": "OPTIMIZER_VERSION", + "separator": "=", + "allowedValues": "'((\\d{1,20})|(LATEST)|())'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET OPTIMIZER_STATISTICS_PACKAGE = ''|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_OPTIMIZER_STATISTICS_PACKAGE", + "regex": "(?is)\\A\\s*set\\s+optimizer_statistics_package\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetOptimizerStatisticsPackage", + "exampleStatements": ["set optimizer_statistics_package='auto_20191128_14_47_22UTC'", "set optimizer_statistics_package=''"], + "setStatement": { + "propertyName": "OPTIMIZER_STATISTICS_PACKAGE", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET RETURN_COMMIT_STATS = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RETURN_COMMIT_STATS", + "regex": "(?is)\\A\\s*set\\s+return_commit_stats\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetReturnCommitStats", + "exampleStatements": ["set return_commit_stats = true", "set return_commit_stats = false"], + "setStatement": { + "propertyName": "RETURN_COMMIT_STATS", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET MAX_COMMIT_DELAY = ''|NULL", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_COMMIT_DELAY", + "regex": "(?is)\\A\\s*set\\s+max_commit_delay\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetMaxCommitDelay", + "exampleStatements": [ + "set max_commit_delay=null", + "set max_commit_delay = null", + "set max_commit_delay = null ", + "set max_commit_delay=1000", + "set max_commit_delay = 1000", + "set max_commit_delay = 1000 ", + "set max_commit_delay='1s'", + "set max_commit_delay = '1s'", + "set max_commit_delay = '1s' ", + "set max_commit_delay='100ms'", + "set max_commit_delay='10000us'", + "set max_commit_delay='9223372036854775807ns'"], + "setStatement": { + "propertyName": "MAX_COMMIT_DELAY", + "separator": "=", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", + "converterName": "ClientSideStatementValueConverters$DurationConverter" + } + }, + { + "name": "SET STATEMENT_TAG = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_STATEMENT_TAG", + "regex": "(?is)\\A\\s*set\\s+statement_tag\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetStatementTag", + "exampleStatements": ["set statement_tag='tag1'", "set statement_tag='tag2'", "set statement_tag=''", "set statement_tag='test_tag'"], + "setStatement": { + "propertyName": "STATEMENT_TAG", + "separator": "=", + "allowedValues": "'(.*)'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET TRANSACTION_TAG = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_TAG", + "regex": "(?is)\\A\\s*set\\s+transaction_tag\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetTransactionTag", + "exampleStatements": ["set transaction_tag='tag1'", "set transaction_tag='tag2'", "set transaction_tag=''", "set transaction_tag='test_tag'"], + "examplePrerequisiteStatements": ["set autocommit = false"], + "setStatement": { + "propertyName": "TRANSACTION_TAG", + "separator": "=", + "allowedValues": "'(.*)'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*set\\s+exclude_txn_from_change_streams\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetExcludeTxnFromChangeStreams", + "exampleStatements": ["set exclude_txn_from_change_streams = true", "set exclude_txn_from_change_streams = false"], + "setStatement": { + "propertyName": "EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET RPC_PRIORITY = 'HIGH'|'MEDIUM'|'LOW'|'NULL'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RPC_PRIORITY", + "regex": "(?is)\\A\\s*set\\s+rpc_priority\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetRPCPriority", + "exampleStatements": [ + "set rpc_priority='HIGH'", + "set rpc_priority='MEDIUM'", + "set rpc_priority='LOW'", + "set rpc_priority='NULL'" + ], + "setStatement": { + "propertyName": "RPC_PRIORITY", + "separator": "=", + "allowedValues": "'(HIGH|MEDIUM|LOW|NULL)'", + "converterName": "ClientSideStatementValueConverters$RpcPriorityConverter" + } + }, + { + "name": "SET SAVEPOINT_SUPPORT = 'ENABLED'|'FAIL_AFTER_ROLLBACK'|'DISABLED'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_SAVEPOINT_SUPPORT", + "regex": "(?is)\\A\\s*set\\s+savepoint_support\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetSavepointSupport", + "exampleStatements": [ + "set savepoint_support='ENABLED'", + "set savepoint_support='FAIL_AFTER_ROLLBACK'", + "set savepoint_support='DISABLED'"], + "setStatement": { + "propertyName": "SAVEPOINT_SUPPORT", + "separator": "=", + "allowedValues": "'(ENABLED|FAIL_AFTER_ROLLBACK|DISABLED)'", + "converterName": "ClientSideStatementValueConverters$SavepointSupportConverter" + } + }, + { + "name": "SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "regex": "(?is)\\A\\s*set\\s+delay_transaction_start_until_first_write\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetDelayTransactionStartUntilFirstWrite", + "exampleStatements": ["set delay_transaction_start_until_first_write = true", "set delay_transaction_start_until_first_write = false"], + "setStatement": { + "propertyName": "DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET KEEP_TRANSACTION_ALIVE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*set\\s+keep_transaction_alive\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetKeepTransactionAlive", + "exampleStatements": ["set keep_transaction_alive = true", "set keep_transaction_alive = false"], + "setStatement": { + "propertyName": "KEEP_TRANSACTION_ALIVE", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET AUTO_BATCH_DML = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML", + "regex": "(?is)\\A\\s*set\\s+auto_batch_dml\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutoBatchDml", + "exampleStatements": ["set auto_batch_dml = true", "set auto_batch_dml = false"], + "setStatement": { + "propertyName": "AUTO_BATCH_DML", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET AUTO_BATCH_DML_UPDATE_COUNT = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*set\\s+auto_batch_dml_update_count\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutoBatchDmlUpdateCount", + "exampleStatements": ["set auto_batch_dml_update_count = 0", "set auto_batch_dml_update_count = 100"], + "setStatement": { + "propertyName": "AUTO_BATCH_DML_UPDATE_COUNT", + "separator": "=", + "allowedValues": "(\\d{1,19})", + "converterName": "ClientSideStatementValueConverters$LongConverter" + } + }, + { + "name": "SET AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "regex": "(?is)\\A\\s*set\\s+auto_batch_dml_update_count_verification\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutoBatchDmlUpdateCountVerification", + "exampleStatements": ["set auto_batch_dml_update_count_verification = true", "set auto_batch_dml_update_count_verification = false"], + "setStatement": { + "propertyName": "AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET [LOCAL] BATCH_DML_UPDATE_COUNT = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*set\\s+(local\\s+)?batch_dml_update_count\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetBatchDmlUpdateCount", + "exampleStatements": [ + "set local batch_dml_update_count = 0", + "set local batch_dml_update_count = 100", + "set batch_dml_update_count = 1", + "set batch_dml_update_count = 100" + ], + "examplePrerequisiteStatements": ["set readonly = false", "set autocommit = false"], + "setStatement": { + "propertyName": "BATCH_DML_UPDATE_COUNT", + "separator": "=", + "allowedValues": "(\\d{1,19})", + "converterName": "ClientSideStatementValueConverters$LongConverter" + } + }, + { + "name": "SHOW VARIABLE READ_LOCK_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_LOCK_MODE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+read_lock_mode\\s*\\z", + "method": "statementShowReadLockMode", + "exampleStatements": ["show variable read_lock_mode"] + }, + { + "name": "SET READ_LOCK_MODE = 'OPTIMISTIC'|'PESSIMISTIC'|'UNSPECIFIED'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READ_LOCK_MODE", + "regex": "(?is)\\A\\s*set\\s+read_lock_mode\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetReadLockMode", + "exampleStatements": [ + "set read_lock_mode='OPTIMISTIC'", + "set read_lock_mode='PESSIMISTIC'", + "set read_lock_mode='UNSPECIFIED'"], + "setStatement": { + "propertyName": "READ_LOCK_MODE", + "separator": "=", + "allowedValues": "'(OPTIMISTIC|PESSIMISTIC|UNSPECIFIED|READ_LOCK_MODE_UNSPECIFIED)'", + "converterName": "ClientSideStatementValueConverters$ReadLockModeConverter" + } + }, + { + "name": "SHOW VARIABLE DATA_BOOST_ENABLED", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DATA_BOOST_ENABLED", + "regex": "(?is)\\A\\s*show\\s+variable\\s+data_boost_enabled\\s*\\z", + "method": "statementShowDataBoostEnabled", + "exampleStatements": ["show variable data_boost_enabled"] + }, + { + "name": "SET DATA_BOOST_ENABLED = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DATA_BOOST_ENABLED", + "regex": "(?is)\\A\\s*set\\s+data_boost_enabled\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetDataBoostEnabled", + "exampleStatements": ["set data_boost_enabled = true", "set data_boost_enabled = false"], + "setStatement": { + "propertyName": "DATA_BOOST_ENABLED", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SHOW VARIABLE AUTO_PARTITION_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_PARTITION_MODE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+auto_partition_mode\\s*\\z", + "method": "statementShowAutoPartitionMode", + "exampleStatements": ["show variable auto_partition_mode"] + }, + { + "name": "SET AUTO_PARTITION_MODE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_PARTITION_MODE", + "regex": "(?is)\\A\\s*set\\s+auto_partition_mode\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetAutoPartitionMode", + "exampleStatements": ["set auto_partition_mode = true", "set auto_partition_mode = false"], + "setStatement": { + "propertyName": "AUTO_PARTITION_MODE", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SHOW VARIABLE MAX_PARTITIONS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_PARTITIONS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+max_partitions\\s*\\z", + "method": "statementShowMaxPartitions", + "exampleStatements": ["show variable max_partitions"] + }, + { + "name": "SET MAX_PARTITIONS = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_PARTITIONS", + "regex": "(?is)\\A\\s*set\\s+max_partitions\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetMaxPartitions", + "exampleStatements": ["set max_partitions = 0", "set max_partitions = 10"], + "setStatement": { + "propertyName": "MAX_PARTITIONS", + "separator": "=", + "allowedValues": "(\\d{1,9})", + "converterName": "ClientSideStatementValueConverters$NonNegativeIntegerConverter" + } + }, + { + "name": "SHOW VARIABLE MAX_PARTITIONED_PARALLELISM", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_PARTITIONED_PARALLELISM", + "regex": "(?is)\\A\\s*show\\s+variable\\s+max_partitioned_parallelism\\s*\\z", + "method": "statementShowMaxPartitionedParallelism", + "exampleStatements": ["show variable max_partitioned_parallelism"] + }, + { + "name": "SET MAX_PARTITIONED_PARALLELISM = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_PARTITIONED_PARALLELISM", + "regex": "(?is)\\A\\s*set\\s+max_partitioned_parallelism\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetMaxPartitionedParallelism", + "exampleStatements": ["set max_partitioned_parallelism = 0", "set max_partitioned_parallelism = 10"], + "setStatement": { + "propertyName": "MAX_PARTITIONED_PARALLELISM", + "separator": "=", + "allowedValues": "(\\d{1,9})", + "converterName": "ClientSideStatementValueConverters$NonNegativeIntegerConverter" + } + }, + { + "name": "SET PROTO_DESCRIPTORS = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_PROTO_DESCRIPTORS", + "regex": "(?is)\\A\\s*set\\s+proto_descriptors\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetProtoDescriptors", + "exampleStatements": ["set proto_descriptors='protodescriptorsbase64'"], + "setStatement": { + "propertyName": "PROTO_DESCRIPTORS", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$ProtoDescriptorsConverter" + } + }, + { + "name": "SET PROTO_DESCRIPTORS_FILE_PATH = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_PROTO_DESCRIPTORS_FILE_PATH", + "regex": "(?is)\\A\\s*set\\s+proto_descriptors_file_path\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetProtoDescriptorsFilePath", + "exampleStatements": ["set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'"], + "setStatement": { + "propertyName": "PROTO_DESCRIPTORS_FILE_PATH", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$ProtoDescriptorsFileConverter" + } + }, + { + "name": "SHOW VARIABLE PROTO_DESCRIPTORS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_PROTO_DESCRIPTORS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+proto_descriptors\\s*\\z", + "method": "statementShowProtoDescriptors", + "exampleStatements": ["show variable proto_descriptors"] + }, + { + "name": "SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_PROTO_DESCRIPTORS_FILE_PATH", + "regex": "(?is)\\A\\s*show\\s+variable\\s+proto_descriptors_file_path\\s*\\z", + "method": "statementShowProtoDescriptorsFilePath", + "exampleStatements": [ + "show variable proto_descriptors_file_path" + ] + } + ] +} diff --git a/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json new file mode 100644 index 000000000000..f5246d5a0cca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json @@ -0,0 +1,1113 @@ +{ + "statements": + [ + { + "name": "SHOW [VARIABLE] AUTOCOMMIT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTOCOMMIT", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?autocommit\\s*\\z", + "method": "statementShowAutocommit", + "exampleStatements": ["show autocommit","show variable autocommit"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.READONLY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READONLY", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.readonly\\s*\\z", + "method": "statementShowReadOnly", + "exampleStatements": ["show spanner.readonly","show variable spanner.readonly"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.RETRY_ABORTS_INTERNALLY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_RETRY_ABORTS_INTERNALLY", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.retry_aborts_internally\\s*\\z", + "method": "statementShowRetryAbortsInternally", + "exampleStatements": ["show spanner.retry_aborts_internally","show variable spanner.retry_aborts_internally"], + "examplePrerequisiteStatements": ["set spanner.readonly=false", "set autocommit=false"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.AUTOCOMMIT_DML_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTOCOMMIT_DML_MODE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.autocommit_dml_mode\\s*\\z", + "method": "statementShowAutocommitDmlMode", + "exampleStatements": ["show spanner.autocommit_dml_mode","show variable spanner.autocommit_dml_mode"] + }, + { + "name": "SHOW [VARIABLE] STATEMENT_TIMEOUT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_STATEMENT_TIMEOUT", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?statement_timeout\\s*\\z", + "method": "statementShowStatementTimeout", + "exampleStatements": ["show statement_timeout","show variable statement_timeout"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.TRANSACTION_TIMEOUT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_TIMEOUT", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.transaction_timeout\\s*\\z", + "method": "statementShowTransactionTimeout", + "exampleStatements": ["show spanner.transaction_timeout","show variable spanner.transaction_timeout"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.READ_TIMESTAMP", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_TIMESTAMP", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.read_timestamp\\s*\\z", + "method": "statementShowReadTimestamp", + "exampleStatements": ["show spanner.read_timestamp","show variable spanner.read_timestamp"], + "examplePrerequisiteStatements": ["set spanner.readonly = true", "SELECT 1 AS TEST"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.COMMIT_TIMESTAMP", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_COMMIT_TIMESTAMP", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.commit_timestamp\\s*\\z", + "method": "statementShowCommitTimestamp", + "exampleStatements": ["show spanner.commit_timestamp","show variable spanner.commit_timestamp"], + "examplePrerequisiteStatements": ["update foo set bar=1"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.READ_ONLY_STALENESS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_ONLY_STALENESS", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.read_only_staleness\\s*\\z", + "method": "statementShowReadOnlyStaleness", + "exampleStatements": ["show spanner.read_only_staleness","show variable spanner.read_only_staleness"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.DIRECTED_READ", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DIRECTED_READ", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.directed_read\\s*\\z", + "method": "statementShowDirectedRead", + "exampleStatements": ["show spanner.directed_read", "show variable spanner.directed_read"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.OPTIMIZER_VERSION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_OPTIMIZER_VERSION", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.optimizer_version\\s*\\z", + "method": "statementShowOptimizerVersion", + "exampleStatements": ["show spanner.optimizer_version","show variable spanner.optimizer_version"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.OPTIMIZER_STATISTICS_PACKAGE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_OPTIMIZER_STATISTICS_PACKAGE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.optimizer_statistics_package\\s*\\z", + "method": "statementShowOptimizerStatisticsPackage", + "exampleStatements": ["show spanner.optimizer_statistics_package","show variable spanner.optimizer_statistics_package"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.RETURN_COMMIT_STATS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_RETURN_COMMIT_STATS", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.return_commit_stats\\s*\\z", + "method": "statementShowReturnCommitStats", + "exampleStatements": ["show spanner.return_commit_stats","show variable spanner.return_commit_stats"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.MAX_COMMIT_DELAY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_COMMIT_DELAY", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.max_commit_delay\\s*\\z", + "method": "statementShowMaxCommitDelay", + "exampleStatements": ["show spanner.max_commit_delay", "show variable spanner.max_commit_delay"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.COMMIT_RESPONSE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_COMMIT_RESPONSE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.commit_response\\s*\\z", + "method": "statementShowCommitResponse", + "exampleStatements": ["show spanner.commit_response","show variable spanner.commit_response"], + "examplePrerequisiteStatements": ["update foo set bar=1"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.STATEMENT_TAG", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_STATEMENT_TAG", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.statement_tag\\s*\\z", + "method": "statementShowStatementTag", + "exampleStatements": ["show spanner.statement_tag","show variable spanner.statement_tag"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.TRANSACTION_TAG", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_TAG", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.transaction_tag\\s*\\z", + "method": "statementShowTransactionTag", + "exampleStatements": ["show spanner.transaction_tag","show variable spanner.transaction_tag"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.exclude_txn_from_change_streams\\s*\\z", + "method": "statementShowExcludeTxnFromChangeStreams", + "exampleStatements": ["show spanner.exclude_txn_from_change_streams","show variable spanner.exclude_txn_from_change_streams"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.RPC_PRIORITY", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_RPC_PRIORITY", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.rpc_priority\\s*\\z", + "method": "statementShowRPCPriority", + "exampleStatements": ["show spanner.rpc_priority","show variable spanner.rpc_priority"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.SAVEPOINT_SUPPORT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_SAVEPOINT_SUPPORT", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.savepoint_support\\s*\\z", + "method": "statementShowSavepointSupport", + "exampleStatements": ["show spanner.savepoint_support","show variable spanner.savepoint_support"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.READ_LOCK_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_READ_LOCK_MODE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.read_lock_mode\\s*\\z", + "method": "statementShowReadLockMode", + "exampleStatements": ["show spanner.read_lock_mode","show variable spanner.read_lock_mode"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.delay_transaction_start_until_first_write\\s*\\z", + "method": "statementShowDelayTransactionStartUntilFirstWrite", + "exampleStatements": ["show spanner.delay_transaction_start_until_first_write","show variable spanner.delay_transaction_start_until_first_write"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.KEEP_TRANSACTION_ALIVE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.keep_transaction_alive\\s*\\z", + "method": "statementShowKeepTransactionAlive", + "exampleStatements": ["show spanner.keep_transaction_alive","show variable spanner.keep_transaction_alive"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.AUTO_BATCH_DML", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.auto_batch_dml\\s*\\z", + "method": "statementShowAutoBatchDml", + "exampleStatements": ["show spanner.auto_batch_dml", "show variable spanner.auto_batch_dml"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.AUTO_BATCH_DML_UPDATE_COUNT", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.auto_batch_dml_update_count\\s*\\z", + "method": "statementShowAutoBatchDmlUpdateCount", + "exampleStatements": ["show spanner.auto_batch_dml_update_count","show variable spanner.auto_batch_dml_update_count"] + }, + { + "name": "SHOW [VARIABLE] SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.auto_batch_dml_update_count_verification\\s*\\z", + "method": "statementShowAutoBatchDmlUpdateCountVerification", + "exampleStatements": ["show spanner.auto_batch_dml_update_count_verification","show variable spanner.auto_batch_dml_update_count_verification"] + }, + { + "name": "SHOW [VARIABLE] TRANSACTION ISOLATION LEVEL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_TRANSACTION_ISOLATION_LEVEL", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?transaction\\s*isolation\\s*level\\s*\\z", + "method": "statementShowTransactionIsolationLevel", + "exampleStatements": ["show transaction isolation level","show variable transaction isolation level"] + }, + { + "name": "SHOW [VARIABLE] DEFAULT_TRANSACTION_ISOLATION", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DEFAULT_TRANSACTION_ISOLATION", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?default_transaction_isolation\\s*\\z", + "method": "statementShowDefaultTransactionIsolation", + "exampleStatements": ["show default_transaction_isolation","show variable default_transaction_isolation"] + }, + { + "name": "EXPLAIN ", + "executorName": "ClientSideStatementExplainExecutor", + "resultType": "RESULT_SET", + "statementType": "EXPLAIN", + "regex": "(?is)\\A\\s*explain(\\s+|\\()(.*)\\z", + "method": "statementExplain", + "exampleStatements": [] + }, + { + "name": "PARTITION ", + "executorName": "ClientSideStatementPartitionExecutor", + "resultType": "RESULT_SET", + "statementType": "PARTITION", + "regex": "(?is)\\A\\s*partition(\\s+|\\()(.*)\\z", + "method": "statementPartition", + "exampleStatements": [] + }, + { + "name": "RUN PARTITION ['']", + "executorName": "ClientSideStatementRunPartitionExecutor", + "resultType": "RESULT_SET", + "statementType": "RUN_PARTITION", + "regex": "(?is)\\A\\s*run\\s+partition(?:\\s*'(.*)')?\\s*\\z", + "method": "statementRunPartition", + "exampleStatements": [] + }, + { + "name": "RUN PARTITIONED QUERY ", + "executorName": "ClientSideStatementRunPartitionedQueryExecutor", + "resultType": "RESULT_SET", + "statementType": "RUN_PARTITIONED_QUERY", + "regex": "(?is)\\A\\s*run\\s+partitioned\\s+query(\\s+|\\()(.*)\\z", + "method": "statementRunPartitionedQuery", + "exampleStatements": [] + }, + { + "name": "{START | BEGIN} [TRANSACTION | WORK] [{ (READ ONLY|READ WRITE) [[,] (ISOLATION LEVEL (DEFAULT|SERIALIZABLE|REPEATABLE READ))] [[,] NOT DEFERRABLE]}]", + "executorName": "ClientSideStatementPgBeginExecutor", + "resultType": "NO_RESULT", + "statementType": "BEGIN", + "regex": "(?is)\\A\\s*(?:begin|start)(?:\\s+transaction|\\s+work)?((?:(?:\\s+|\\s*,\\s*)read\\s+only|(?:\\s+|\\s*,\\s*)read\\s+write|(?:\\s+|\\s*,\\s*)isolation\\s+level\\s+default|(?:\\s+|\\s*,\\s*)isolation\\s+level\\s+serializable|(?:\\s+|\\s*,\\s*)isolation\\s+level\\s+repeatable\\s+read|(?:\\s+|\\s*,\\s*)not\\s+deferrable)*)?\\s*\\z", + "method": "statementBeginPgTransaction", + "exampleStatements": [ + "begin", "start", "begin transaction", "start transaction", "begin work", "start work", + "begin read only", "start read only", "begin transaction read only", "start transaction read only", "begin work read only", "start work read only", + "begin read write", "start read write", "begin transaction read write", "start transaction read write", "begin work read write", "start work read write", + "begin isolation level default", "start isolation level default", "begin transaction isolation level default", "start transaction isolation level default", "begin work isolation level default", "start work isolation level default", + "begin isolation level serializable", "start isolation level serializable", "begin transaction isolation level serializable", "start transaction isolation level serializable", "begin work isolation level serializable", "start work isolation level serializable", + "begin isolation level repeatable read", "start isolation level repeatable read", "begin transaction isolation level repeatable read", "start transaction isolation level repeatable read", "begin work isolation level repeatable read", "start work isolation level repeatable read", + "begin isolation level default read write", "start isolation level default read only", "begin transaction isolation level default read only", "start transaction isolation level default read write", "begin work isolation level default read write", "start work isolation level default read only", + "begin isolation level serializable read write", "start isolation level serializable read write", "begin transaction isolation level serializable read only", "start transaction isolation level serializable read write", "begin work isolation level serializable read write", "start work isolation level serializable read only", + "begin isolation level repeatable read read write", "start isolation level repeatable read read write", "begin transaction isolation level repeatable read read only", "start transaction isolation level repeatable read read write", "begin work isolation level repeatable read read write", "start work isolation level repeatable read read only", + "begin isolation level serializable, read write", "start isolation level serializable, read write", "begin transaction isolation level serializable, read only", "start transaction isolation level serializable, read write", "begin work isolation level serializable, read write", "start work isolation level serializable, read only", + "begin isolation level repeatable read, read write", "start isolation level repeatable read, read write", "begin transaction isolation level repeatable read, read only", "start transaction isolation level repeatable read, read write", "begin work isolation level repeatable read, read write", "start work isolation level repeatable read, read only", + "begin not deferrable", "start not deferrable", "begin transaction not deferrable", "start transaction not deferrable", "begin work not deferrable", "start work not deferrable", + "begin read only not deferrable", "start read only not deferrable", "begin transaction read only not deferrable", "start transaction read only not deferrable", "begin work read only not deferrable", "start work read only not deferrable", + "begin read write not deferrable", "start read write not deferrable", "begin transaction read write not deferrable", "start transaction read write not deferrable", "begin work read write not deferrable", "start work read write not deferrable", + "begin isolation level default not deferrable", "start isolation level default not deferrable", "begin transaction isolation level default not deferrable", "start transaction isolation level default not deferrable", "begin work isolation level default not deferrable", "start work isolation level default not deferrable", + "begin isolation level serializable not deferrable", "start isolation level serializable not deferrable", "begin transaction isolation level serializable not deferrable", "start transaction isolation level serializable not deferrable", "begin work isolation level serializable not deferrable", "start work isolation level serializable not deferrable", + "begin isolation level default read write not deferrable", "start isolation level default read only not deferrable", "begin transaction isolation level default read only not deferrable", "start transaction isolation level default read write not deferrable", "begin work isolation level default read write not deferrable", "start work isolation level default read only not deferrable", + "begin isolation level serializable read write not deferrable", "start isolation level serializable read write not deferrable", "begin transaction isolation level serializable read only not deferrable", "start transaction isolation level serializable read write not deferrable", "begin work isolation level serializable read write not deferrable", "start work isolation level serializable read only not deferrable", + "begin isolation level serializable, read write, not deferrable", "start isolation level serializable, read write, not deferrable", "begin transaction isolation level serializable, read only, not deferrable", "start transaction isolation level serializable, read write, not deferrable", "begin work isolation level serializable, read write, not deferrable", "start work isolation level serializable, read only", + "begin transaction not deferrable", "start transaction not deferrable", "begin work not deferrable", "start work not deferrable", + "begin not deferrable read only", "start read only", "begin transaction not deferrable read only", "start transaction read only", "begin work not deferrable read only", "start work read only", + "begin not deferrable read write", "start read write", "begin transaction not deferrable read write", "start transaction read write", "begin work not deferrable read write", "start work read write", + "begin not deferrable isolation level default", "start isolation level default", "begin transaction not deferrable isolation level default", "start transaction isolation level default", "begin work not deferrable isolation level default", "start work isolation level default", + "begin not deferrable isolation level serializable", "start isolation level serializable", "begin transaction not deferrable isolation level serializable", "start transaction isolation level serializable", "begin work not deferrable isolation level serializable", "start work isolation level serializable", + "begin not deferrable isolation level default read write", "start isolation level default read only", "begin transaction not deferrable isolation level default read only", "start transaction isolation level default read write", "begin work not deferrable isolation level default read write", "start work isolation level default read only", + "begin not deferrable isolation level serializable read write", "start isolation level serializable read write", "begin transaction not deferrable isolation level serializable read only", "start transaction isolation level serializable read write", "begin work not deferrable isolation level serializable read write", "start work isolation level serializable read only", + "begin not deferrable isolation level serializable, read write", "start isolation level serializable, read write", "begin transaction not deferrable isolation level serializable, read only", "start transaction isolation level serializable, read write", "begin work not deferrable isolation level serializable, read write", "start work isolation level serializable, read only", + "begin not deferrable isolation level repeatable read, read write", "start isolation level repeatable read, read write", "begin transaction not deferrable isolation level repeatable read, read only", "start transaction isolation level repeatable read, read write", "begin work not deferrable isolation level repeatable read, read write", "start work isolation level repeatable read, read only" + ] + }, + { + "name": "{END | COMMIT} [TRANSACTION | WORK] [AND NO CHAIN]", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "COMMIT", + "regex": "(?is)\\A\\s*(?:commit|end)(?:\\s+transaction|\\s+work)?(?:\\s+and\\s+no\\s+chain)?\\s*\\z", + "method": "statementCommit", + "exampleStatements": [ + "commit", "commit transaction", "commit work", "commit and no chain", "commit transaction and no chain", "commit work and no chain", + "end", "end transaction", "end work", "end and no chain", "end transaction and no chain", "end work and no chain" + ], + "examplePrerequisiteStatements": ["begin transaction"] + }, + { + "name": "{ROLLBACK | ABORT} [TRANSACTION | WORK] [AND NO CHAIN]", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "ROLLBACK", + "regex": "(?is)\\A\\s*(?:rollback|abort)(?:\\s+transaction|\\s+work)?(?:\\s+and\\s+no\\s+chain)?\\s*\\z", + "method": "statementRollback", + "exampleStatements": [ + "rollback", "rollback transaction", "rollback work", "rollback and no chain", "rollback transaction and no chain", "rollback work and no chain", + "abort", "abort transaction", "abort work", "abort and no chain", "abort transaction and no chain", "abort work and no chain" + ], + "examplePrerequisiteStatements": ["begin transaction"] + }, + { + "name": "START BATCH DDL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "START_BATCH_DDL", + "regex": "(?is)\\A\\s*(?:start)(?:\\s+batch)(?:\\s+ddl)\\s*\\z", + "method": "statementStartBatchDdl", + "exampleStatements": ["start batch ddl"] + }, + { + "name": "START BATCH DML", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "START_BATCH_DML", + "regex": "(?is)\\A\\s*(?:start)(?:\\s+batch)(?:\\s+dml)\\s*\\z", + "method": "statementStartBatchDml", + "exampleStatements": ["start batch dml"] + }, + { + "name": "RUN BATCH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RUN_BATCH", + "regex": "(?is)\\A\\s*(?:run)(?:\\s+batch)\\s*\\z", + "method": "statementRunBatch", + "exampleStatements": ["run batch"], + "examplePrerequisiteStatements": ["start batch ddl"] + }, + { + "name": "ABORT BATCH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "ABORT_BATCH", + "regex": "(?is)\\A\\s*(?:abort)(?:\\s+batch)\\s*\\z", + "method": "statementAbortBatch", + "exampleStatements": ["abort batch"], + "examplePrerequisiteStatements": ["start batch ddl"] + }, + { + "name": "RESET ALL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RESET_ALL", + "regex": "(?is)\\A\\s*(?:reset)(?:\\s+all)\\s*\\z", + "method": "statementResetAll", + "exampleStatements": ["reset all"] + }, + { + "name": "SET AUTOCOMMIT =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTOCOMMIT", + "regex": "(?is)\\A\\s*set\\s+autocommit(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutocommit", + "exampleStatements": ["set autocommit = true", "set autocommit = false", "set autocommit to true", "set autocommit to false"], + "setStatement": { + "propertyName": "AUTOCOMMIT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.READONLY =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READONLY", + "regex": "(?is)\\A\\s*set\\s+spanner\\.readonly(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReadOnly", + "exampleStatements": ["set spanner.readonly = true", "set spanner.readonly = false", "set spanner.readonly to true", "set spanner.readonly to false"], + "setStatement": { + "propertyName": "SPANNER.READONLY", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET [SESSION|LOCAL] SPANNER.RETRY_ABORTS_INTERNALLY =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RETRY_ABORTS_INTERNALLY", + "regex": "(?is)\\A\\s*set\\s+((?:session|local)\\s+)?spanner\\.retry_aborts_internally(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetRetryAbortsInternally", + "exampleStatements": [ + "set spanner.retry_aborts_internally = true", + "set spanner.retry_aborts_internally = false", + "set spanner.retry_aborts_internally to true", + "set spanner.retry_aborts_internally to false", + "set local spanner.retry_aborts_internally = true", + "set local spanner.retry_aborts_internally = false", + "set local spanner.retry_aborts_internally to true", + "set local spanner.retry_aborts_internally to false", + "set session spanner.retry_aborts_internally = true", + "set session spanner.retry_aborts_internally = false", + "set session spanner.retry_aborts_internally to true", + "set session spanner.retry_aborts_internally to false" + ], + "examplePrerequisiteStatements": ["set spanner.readonly = false", "set autocommit = false"], + "setStatement": { + "propertyName": "SPANNER.RETRY_ABORTS_INTERNALLY", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.AUTOCOMMIT_DML_MODE =|TO 'PARTITIONED_NON_ATOMIC'|'TRANSACTIONAL'|'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTOCOMMIT_DML_MODE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.autocommit_dml_mode(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutocommitDmlMode", + "exampleStatements": [ + "set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'", + "set spanner.autocommit_dml_mode='TRANSACTIONAL'", + "set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'", + "set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'", + "set spanner.autocommit_dml_mode to 'TRANSACTIONAL'", + "set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'" + ], + "setStatement": { + "propertyName": "SPANNER.AUTOCOMMIT_DML_MODE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(PARTITIONED_NON_ATOMIC|TRANSACTIONAL|TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC)'", + "converterName": "ClientSideStatementValueConverters$AutocommitDmlModeConverter" + } + }, + { + "name": "SET STATEMENT_TIMEOUT =|TO ''|INT8|DEFAULT", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_STATEMENT_TIMEOUT", + "regex": "(?is)\\A\\s*set\\s+statement_timeout(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetStatementTimeout", + "exampleStatements": [ + "set statement_timeout=default", + "set statement_timeout = default ", + "set statement_timeout = DEFAULT ", + "set statement_timeout='1s'", + "set statement_timeout = '1s' ", + "set statement_timeout='100ms'", + "set statement_timeout=100", + "set statement_timeout = 100 ", + "set statement_timeout='10000us'", + "set statement_timeout='9223372036854775807ns'", + "set statement_timeout to default", + "set statement_timeout to '1s'", + "set statement_timeout to '100ms'", + "set statement_timeout to 100", + "set statement_timeout to '10000us'", + "set statement_timeout to '9223372036854775807ns'" + ], + "setStatement": { + "propertyName": "STATEMENT_TIMEOUT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|DEFAULT)", + "converterName": "ClientSideStatementValueConverters$PgDurationConverter" + } + }, + { + "name": "SET SPANNER.TRANSACTION_TIMEOUT =|TO ''|INT8|DEFAULT", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_TIMEOUT", + "regex": "(?is)\\A\\s*set\\s+spanner\\.transaction_timeout(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetTransactionTimeout", + "exampleStatements": [ + "set spanner.transaction_timeout=default", + "set spanner.transaction_timeout = default ", + "set spanner.transaction_timeout = DEFAULT ", + "set spanner.transaction_timeout='1s'", + "set spanner.transaction_timeout = '1s' ", + "set spanner.transaction_timeout='100ms'", + "set spanner.transaction_timeout=100", + "set spanner.transaction_timeout = 100 ", + "set spanner.transaction_timeout='10000us'", + "set spanner.transaction_timeout='9223372036854775807ns'", + "set spanner.transaction_timeout to default", + "set spanner.transaction_timeout to '1s'", + "set spanner.transaction_timeout to '100ms'", + "set spanner.transaction_timeout to 100", + "set spanner.transaction_timeout to '10000us'", + "set spanner.transaction_timeout to '9223372036854775807ns'" + ], + "setStatement": { + "propertyName": "SPANNER.TRANSACTION_TIMEOUT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|DEFAULT)", + "converterName": "ClientSideStatementValueConverters$PgDurationConverter" + } + }, + { + "name": "SET TRANSACTION { (READ ONLY|READ WRITE) [[,] (ISOLATION LEVEL (DEFAULT|SERIALIZABLE|REPEATABLE READ))] }", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_MODE", + "regex": "(?is)\\A\\s*set\\s+transaction\\s*(?:\\s+)\\s*(.*)\\z", + "method": "statementSetPgTransactionMode", + "exampleStatements": ["set transaction read only", "set transaction read write", "set transaction isolation level default", "set transaction isolation level serializable", "set transaction isolation level repeatable read"], + "examplePrerequisiteStatements": ["set autocommit = false"], + "setStatement": { + "propertyName": "TRANSACTION", + "separator": "\\s+", + "allowedValues": "(((?:\\s*|\\s*,\\s*)READ\\s+ONLY|(?:\\s*|\\s*,\\s*)READ\\s+WRITE|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+DEFAULT|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+SERIALIZABLE|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+REPEATABLE\\s+READ)+)", + "converterName": "ClientSideStatementValueConverters$PgTransactionModeConverter" + } + }, + { + "name": "SET SESSION CHARACTERISTICS AS TRANSACTION { (READ ONLY|READ WRITE) [[,] (ISOLATION LEVEL (DEFAULT|SERIALIZABLE|REPEATABLE READ))] }", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READONLY", + "regex": "(?is)\\A\\s*set\\s+session\\s+characteristics\\s+as\\s+transaction\\s*(?:\\s+)\\s*(.*)\\z", + "method": "statementSetPgSessionCharacteristicsTransactionMode", + "exampleStatements": ["set session characteristics as transaction read only", "set session characteristics as transaction read write", "set session characteristics as transaction isolation level default", "set session characteristics as transaction isolation level serializable", "set session characteristics as transaction isolation level repeatable read"], + "setStatement": { + "propertyName": "SESSION\\s+CHARACTERISTICS\\s+AS\\s+TRANSACTION", + "separator": "\\s+", + "allowedValues": "(((?:\\s*|\\s*,\\s*)READ\\s+ONLY|(?:\\s*|\\s*,\\s*)READ\\s+WRITE|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+DEFAULT|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+SERIALIZABLE|(?:\\s*|\\s*,\\s*)ISOLATION\\s+LEVEL\\s+REPEATABLE\\s+READ)+)", + "converterName": "ClientSideStatementValueConverters$PgTransactionModeConverter" + } + }, + { + "name": "SET DEFAULT_TRANSACTION_ISOLATION =|TO 'SERIALIZABLE'|SERIALIZABLE|'REPEATABLE READ'|REPEATABLE READ|DEFAULT", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READONLY", + "regex": "(?is)\\A\\s*set\\s+default_transaction_isolation(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetPgDefaultTransactionIsolation", + "exampleStatements": [ + "set default_transaction_isolation=serializable", + "set default_transaction_isolation to serializable", + "set default_transaction_isolation to 'serializable'", + "set default_transaction_isolation = 'serializable'", + "set default_transaction_isolation = \"SERIALIZABLE\"", + "set default_transaction_isolation=repeatable read", + "set default_transaction_isolation to repeatable read", + "set default_transaction_isolation to 'repeatable read'", + "set default_transaction_isolation = 'repeatable read'", + "set default_transaction_isolation = \"REPEATABLE READ\"", + "set default_transaction_isolation = DEFAULT", + "set default_transaction_isolation to DEFAULT" + ], + "setStatement": { + "propertyName": "default_transaction_isolation", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(SERIALIZABLE|'SERIALIZABLE'|\"SERIALIZABLE\"|REPEATABLE\\s+READ|'REPEATABLE\\s+READ'|\"REPEATABLE\\s+READ\"|DEFAULT)", + "converterName": "ClientSideStatementValueConverters$PgTransactionIsolationConverter" + } + }, + { + "name": "SET DEFAULT_TRANSACTION_READ_ONLY =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READONLY", + "regex": "(?is)\\A\\s*set\\s+default_transaction_read_only(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReadOnly", + "exampleStatements": [ + "set default_transaction_read_only = true", + "set default_transaction_read_only = false", + "set default_transaction_read_only = t", + "set default_transaction_read_only = f", + "set default_transaction_read_only to 't'", + "set default_transaction_read_only to \"f\"", + "set default_transaction_read_only = on", + "set default_transaction_read_only = off", + "set default_transaction_read_only = 1", + "set default_transaction_read_only = 0", + "set default_transaction_read_only = yes", + "set default_transaction_read_only = no", + "set default_transaction_read_only = y", + "set default_transaction_read_only = n" + ], + "setStatement": { + "propertyName": "DEFAULT_TRANSACTION_READ_ONLY", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(.+)", + "converterName": "ClientSideStatementValueConverters$PgBooleanConverter" + } + }, + { + "name": "SET SPANNER.READ_ONLY_STALENESS =|TO 'STRONG' | 'MIN_READ_TIMESTAMP ' | 'READ_TIMESTAMP ' | 'MAX_STALENESS s|ms|us|ns' | 'EXACT_STALENESS (s|ms|us|ns)'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READ_ONLY_STALENESS", + "regex": "(?is)\\A\\s*set\\s+spanner\\.read_only_staleness(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReadOnlyStaleness", + "exampleStatements": [ + "set spanner.read_only_staleness='STRONG'", + "set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'", + "set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'", + "set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'", + "set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'", + "set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'", + "set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'", + "set spanner.read_only_staleness='MAX_STALENESS 12s'", + "set spanner.read_only_staleness='MAX_STALENESS 100ms'", + "set spanner.read_only_staleness='MAX_STALENESS 99999us'", + "set spanner.read_only_staleness='MAX_STALENESS 10ns'", + "set spanner.read_only_staleness='EXACT_STALENESS 15s'", + "set spanner.read_only_staleness='EXACT_STALENESS 1500ms'", + "set spanner.read_only_staleness='EXACT_STALENESS 15000000us'", + "set spanner.read_only_staleness='EXACT_STALENESS 9999ns'", + "set spanner.read_only_staleness to 'STRONG'", + "set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'", + "set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'", + "set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'", + "set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'", + "set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'", + "set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'", + "set spanner.read_only_staleness to 'MAX_STALENESS 12s'", + "set spanner.read_only_staleness to 'MAX_STALENESS 100ms'", + "set spanner.read_only_staleness to 'MAX_STALENESS 99999us'", + "set spanner.read_only_staleness to 'MAX_STALENESS 10ns'", + "set spanner.read_only_staleness to 'EXACT_STALENESS 15s'", + "set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'", + "set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'", + "set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'" + ], + "setStatement": { + "propertyName": "SPANNER.READ_ONLY_STALENESS", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'((STRONG)|(MIN_READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(MAX_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns))|(EXACT_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns)))'", + "converterName": "ClientSideStatementValueConverters$ReadOnlyStalenessConverter" + } + }, + { + "name": "SET SPANNER.DIRECTED_READ =|TO ''|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DIRECTED_READ", + "regex": "(?is)\\A\\s*set\\s+spanner\\.directed_read(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetDirectedRead", + "exampleStatements": ["set spanner.directed_read='{\"includeReplicas\":{\"replicaSelections\":[{\"location\":\"eu-west1\",\"type\":\"READ_ONLY\"}]}}'", "set spanner.directed_read=''"], + "setStatement": { + "propertyName": "SPANNER.DIRECTED_READ", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$DirectedReadOptionsConverter" + } + }, + { + "name": "SET SPANNER.OPTIMIZER_VERSION =|TO ''|'LATEST'|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_OPTIMIZER_VERSION", + "regex": "(?is)\\A\\s*set\\s+spanner\\.optimizer_version(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetOptimizerVersion", + "exampleStatements": [ + "set spanner.optimizer_version='1'", + "set spanner.optimizer_version='200'", + "set spanner.optimizer_version='LATEST'", + "set spanner.optimizer_version=''", + "set spanner.optimizer_version to '1'", + "set spanner.optimizer_version to '200'", + "set spanner.optimizer_version to 'LATEST'", + "set spanner.optimizer_version to ''" + ], + "setStatement": { + "propertyName": "SPANNER.OPTIMIZER_VERSION", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'((\\d{1,20})|(LATEST)|())'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE =|TO ''|''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_OPTIMIZER_STATISTICS_PACKAGE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.optimizer_statistics_package(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetOptimizerStatisticsPackage", + "exampleStatements": [ + "set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'", + "set spanner.optimizer_statistics_package=''", + "set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'", + "set spanner.optimizer_statistics_package to ''" + ], + "setStatement": { + "propertyName": "SPANNER.OPTIMIZER_STATISTICS_PACKAGE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET SPANNER.RETURN_COMMIT_STATS =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RETURN_COMMIT_STATS", + "regex": "(?is)\\A\\s*set\\s+spanner\\.return_commit_stats(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReturnCommitStats", + "exampleStatements": ["set spanner.return_commit_stats = true", "set spanner.return_commit_stats = false", "set spanner.return_commit_stats to true", "set spanner.return_commit_stats to false"], + "setStatement": { + "propertyName": "SPANNER.RETURN_COMMIT_STATS", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.MAX_COMMIT_DELAY =|TO ''|NULL", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_COMMIT_DELAY", + "regex": "(?is)\\A\\s*set\\s+spanner\\.max_commit_delay(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetMaxCommitDelay", + "exampleStatements": [ + "set spanner.max_commit_delay=null", + "set spanner.max_commit_delay = NULL", + "set spanner.max_commit_delay = null ", + "set spanner.max_commit_delay='1s'", + "set spanner.max_commit_delay = '1s'", + "set spanner.max_commit_delay = '1s' ", + "set spanner.max_commit_delay=1000", + "set spanner.max_commit_delay = 1000", + "set spanner.max_commit_delay = 1000 ", + "set spanner.max_commit_delay='100ms'", + "set spanner.max_commit_delay to '10000us'", + "set spanner.max_commit_delay TO '9223372036854775807ns'"], + "setStatement": { + "propertyName": "SPANNER.MAX_COMMIT_DELAY", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", + "converterName": "ClientSideStatementValueConverters$DurationConverter" + } + }, + { + "name": "SET SPANNER.STATEMENT_TAG =|TO ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_STATEMENT_TAG", + "regex": "(?is)\\A\\s*set\\s+spanner\\.statement_tag(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetStatementTag", + "exampleStatements": [ + "set spanner.statement_tag='tag1'", + "set spanner.statement_tag='tag2'", + "set spanner.statement_tag=''", + "set spanner.statement_tag to 'tag1'", + "set spanner.statement_tag to 'tag2'", + "set spanner.statement_tag to ''", + "set spanner.statement_tag to 'test_tag'" + ], + "setStatement": { + "propertyName": "SPANNER.STATEMENT_TAG", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(.*)'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET SPANNER.TRANSACTION_TAG =|TO ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_TRANSACTION_TAG", + "regex": "(?is)\\A\\s*set\\s+spanner\\.transaction_tag(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetTransactionTag", + "exampleStatements": [ + "set spanner.transaction_tag='tag1'", + "set spanner.transaction_tag='tag2'", + "set spanner.transaction_tag=''", + "set spanner.transaction_tag to 'tag1'", + "set spanner.transaction_tag to 'tag2'", + "set spanner.transaction_tag to ''", + "set spanner.transaction_tag to 'test_tag'" + ], + "examplePrerequisiteStatements": ["set autocommit = false"], + "setStatement": { + "propertyName": "SPANNER.TRANSACTION_TAG", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(.*)'", + "converterName": "ClientSideStatementValueConverters$StringValueConverter" + } + }, + { + "name": "SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*set\\s+spanner\\.exclude_txn_from_change_streams(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReturnCommitStats", + "exampleStatements": ["set spanner.exclude_txn_from_change_streams = true", "set spanner.exclude_txn_from_change_streams = false", "set spanner.exclude_txn_from_change_streams to true", "set spanner.exclude_txn_from_change_streams to false"], + "setStatement": { + "propertyName": "SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.RPC_PRIORITY =|TO 'HIGH'|'MEDIUM'|'LOW'|'NULL'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_RPC_PRIORITY", + "regex": "(?is)\\A\\s*set\\s+spanner\\.rpc_priority(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetRPCPriority", + "exampleStatements": [ + "set spanner.rpc_priority='HIGH'", + "set spanner.rpc_priority='MEDIUM'", + "set spanner.rpc_priority='LOW'", + "set spanner.rpc_priority='NULL'", + "set spanner.rpc_priority to 'HIGH'", + "set spanner.rpc_priority to 'MEDIUM'", + "set spanner.rpc_priority to 'LOW'", + "set spanner.rpc_priority to 'NULL'" + ], + "setStatement": { + "propertyName": "SPANNER.RPC_PRIORITY", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(HIGH|MEDIUM|LOW|NULL)'", + "converterName": "ClientSideStatementValueConverters$RpcPriorityConverter" + } + }, + { + "name": "SET SPANNER.SAVEPOINT_SUPPORT =|TO 'ENABLED'|'FAIL_AFTER_ROLLBACK'|'DISABLED'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_SAVEPOINT_SUPPORT", + "regex": "(?is)\\A\\s*set\\s+spanner\\.savepoint_support(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetSavepointSupport", + "exampleStatements": [ + "set spanner.savepoint_support='ENABLED'", + "set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'", + "set spanner.savepoint_support='DISABLED'", + "set spanner.savepoint_support to 'ENABLED'", + "set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'", + "set spanner.savepoint_support to 'DISABLED'" + ], + "setStatement": { + "propertyName": "SPANNER.SAVEPOINT_SUPPORT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(ENABLED|FAIL_AFTER_ROLLBACK|DISABLED)'", + "converterName": "ClientSideStatementValueConverters$SavepointSupportConverter" + } + }, + { + "name": "SET SPANNER.READ_LOCK_MODE =|TO 'OPTIMISTIC'|'PESSIMISTIC'|'UNSPECIFIED'", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_READ_LOCK_MODE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.read_lock_mode(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReadLockMode", + "exampleStatements": [ + "set spanner.read_lock_mode='OPTIMISTIC'", + "set spanner.read_lock_mode='PESSIMISTIC'", + "set spanner.read_lock_mode='UNSPECIFIED'", + "set spanner.read_lock_mode to 'OPTIMISTIC'", + "set spanner.read_lock_mode to 'PESSIMISTIC'", + "set spanner.read_lock_mode to 'UNSPECIFIED'" + ], + "setStatement": { + "propertyName": "SPANNER.READ_LOCK_MODE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "'(OPTIMISTIC|PESSIMISTIC|UNSPECIFIED|READ_LOCK_MODE_UNSPECIFIED)'", + "converterName": "ClientSideStatementValueConverters$ReadLockModeConverter" + } + }, + { + "name": "SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.delay_transaction_start_until_first_write(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetDelayTransactionStartUntilFirstWrite", + "exampleStatements": ["set spanner.delay_transaction_start_until_first_write = true", "set spanner.delay_transaction_start_until_first_write = false", "set spanner.delay_transaction_start_until_first_write to true", "set spanner.delay_transaction_start_until_first_write to false"], + "setStatement": { + "propertyName": "SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.KEEP_TRANSACTION_ALIVE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.keep_transaction_alive(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetKeepTransactionAlive", + "exampleStatements": ["set spanner.keep_transaction_alive = true", "set spanner.keep_transaction_alive = false", "set spanner.keep_transaction_alive to true", "set spanner.keep_transaction_alive to false"], + "setStatement": { + "propertyName": "SPANNER.KEEP_TRANSACTION_ALIVE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SET SPANNER.AUTO_BATCH_DML = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML", + "regex": "(?is)\\A\\s*set\\s+spanner\\.auto_batch_dml(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutoBatchDml", + "exampleStatements": ["set spanner.auto_batch_dml = true", "set spanner.auto_batch_dml = false", "set spanner.auto_batch_dml to true", "set spanner.auto_batch_dml to false", "set spanner.auto_batch_dml to off"], + "setStatement": { + "propertyName": "SPANNER.AUTO_BATCH_DML", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(.+)", + "converterName": "ClientSideStatementValueConverters$PgBooleanConverter" + } + }, + { + "name": "SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*set\\s+spanner\\.auto_batch_dml_update_count(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutoBatchDmlUpdateCount", + "exampleStatements": ["set spanner.auto_batch_dml_update_count = 0", "set spanner.auto_batch_dml_update_count = 100", "set spanner.auto_batch_dml_update_count to 1"], + "setStatement": { + "propertyName": "SPANNER.AUTO_BATCH_DML_UPDATE_COUNT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(\\d{1,19})", + "converterName": "ClientSideStatementValueConverters$LongConverter" + } + }, + { + "name": "SET [LOCAL] SPANNER.BATCH_DML_UPDATE_COUNT =|TO ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_BATCH_DML_UPDATE_COUNT", + "regex": "(?is)\\A\\s*set\\s+((?:session|local)\\s+)?spanner\\.batch_dml_update_count(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetBatchDmlUpdateCount", + "exampleStatements": [ + "set local spanner.batch_dml_update_count = 0", + "set local spanner.batch_dml_update_count = 100", + "set local spanner.batch_dml_update_count to 1", + "set spanner.batch_dml_update_count to 1", + "set spanner.batch_dml_update_count = 1" + ], + "examplePrerequisiteStatements": ["set spanner.readonly = false", "set autocommit = false"], + "setStatement": { + "propertyName": "SPANNER.BATCH_DML_UPDATE_COUNT", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(\\d{1,19})", + "converterName": "ClientSideStatementValueConverters$LongConverter" + } + }, + { + "name": "SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "regex": "(?is)\\A\\s*set\\s+spanner\\.auto_batch_dml_update_count_verification(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutoBatchDmlUpdateCountVerification", + "exampleStatements": ["set spanner.auto_batch_dml_update_count_verification = true", "set spanner.auto_batch_dml_update_count_verification = false", "set spanner.auto_batch_dml_update_count_verification to true", "set spanner.auto_batch_dml_update_count_verification to false", "set spanner.auto_batch_dml_update_count_verification to off"], + "setStatement": { + "propertyName": "SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(.+)", + "converterName": "ClientSideStatementValueConverters$PgBooleanConverter" + } + }, + + { + "name": "SHOW [VARIABLE] SPANNER.DATA_BOOST_ENABLED", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_DATA_BOOST_ENABLED", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.data_boost_enabled\\s*\\z", + "method": "statementShowDataBoostEnabled", + "exampleStatements": ["show spanner.data_boost_enabled","show variable spanner.data_boost_enabled"] + }, + { + "name": "SET SPANNER.DATA_BOOST_ENABLED = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_DATA_BOOST_ENABLED", + "regex": "(?is)\\A\\s*set\\s+spanner\\.data_boost_enabled(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetDataBoostEnabled", + "exampleStatements": ["set spanner.data_boost_enabled = true", "set spanner.data_boost_enabled = false", "set spanner.data_boost_enabled to true", "set spanner.data_boost_enabled to false"], + "setStatement": { + "propertyName": "SPANNER.DATA_BOOST_ENABLED", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SHOW [VARIABLE] SPANNER.AUTO_PARTITION_MODE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_AUTO_PARTITION_MODE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.auto_partition_mode\\s*\\z", + "method": "statementShowAutoPartitionMode", + "exampleStatements": ["show spanner.auto_partition_mode","show variable spanner.auto_partition_mode"] + }, + { + "name": "SET SPANNER.AUTO_PARTITION_MODE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_AUTO_PARTITION_MODE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.auto_partition_mode(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetAutoPartitionMode", + "exampleStatements": ["set spanner.auto_partition_mode = true", "set spanner.auto_partition_mode = false", "set spanner.auto_partition_mode to true", "set spanner.auto_partition_mode to false"], + "setStatement": { + "propertyName": "SPANNER.AUTO_PARTITION_MODE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, + { + "name": "SHOW [VARIABLE] SPANNER.MAX_PARTITIONS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_PARTITIONS", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.max_partitions\\s*\\z", + "method": "statementShowMaxPartitions", + "exampleStatements": ["show spanner.max_partitions","show variable spanner.max_partitions"] + }, + { + "name": "SET SPANNER.MAX_PARTITIONS = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_PARTITIONS", + "regex": "(?is)\\A\\s*set\\s+spanner\\.max_partitions(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetMaxPartitions", + "exampleStatements": ["set spanner.max_partitions = 1", "set spanner.max_partitions = 10", "set spanner.max_partitions to 5", "set spanner.max_partitions to 20"], + "setStatement": { + "propertyName": "SPANNER.MAX_PARTITIONS", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(\\d{1,9})", + "converterName": "ClientSideStatementValueConverters$NonNegativeIntegerConverter" + } + }, + { + "name": "SHOW [VARIABLE] SPANNER.MAX_PARTITIONED_PARALLELISM", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_MAX_PARTITIONED_PARALLELISM", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.max_partitioned_parallelism\\s*\\z", + "method": "statementShowMaxPartitionedParallelism", + "exampleStatements": ["show spanner.max_partitioned_parallelism","show variable spanner.max_partitioned_parallelism"] + }, + { + "name": "SET SPANNER.MAX_PARTITIONED_PARALLELISM = ", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_MAX_PARTITIONED_PARALLELISM", + "regex": "(?is)\\A\\s*set\\s+spanner\\.max_partitioned_parallelism(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetMaxPartitionedParallelism", + "exampleStatements": ["set spanner.max_partitioned_parallelism = 1", "set spanner.max_partitioned_parallelism = 10", "set spanner.max_partitioned_parallelism to 5", "set spanner.max_partitioned_parallelism to 20"], + "setStatement": { + "propertyName": "SPANNER.MAX_PARTITIONED_PARALLELISM", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(\\d{1,9})", + "converterName": "ClientSideStatementValueConverters$NonNegativeIntegerConverter" + } + } + ] +} diff --git a/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/spi/v1/grpc-gcp-apiconfig.json b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/spi/v1/grpc-gcp-apiconfig.json new file mode 100644 index 000000000000..1761bd2d382f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/spi/v1/grpc-gcp-apiconfig.json @@ -0,0 +1,106 @@ +{ + "channelPool": { + "maxSize": 3, + "maxConcurrentStreamsLowWatermark": 0 + }, + "method": [ + { + "name": ["google.spanner.v1.Spanner/CreateSession"], + "affinity" : { + "command": "BIND", + "affinityKey": "name" + } + }, + { + "name": ["google.spanner.v1.Spanner/BatchCreateSessions"], + "affinity" : { + "command": "BIND", + "affinityKey": "session.name" + } + }, + { + "name": ["google.spanner.v1.Spanner/GetSession"], + "affinity": { + "command": "BOUND", + "affinityKey": "name" + } + }, + { + "name": ["google.spanner.v1.Spanner/DeleteSession"], + "affinity": { + "command": "UNBIND", + "affinityKey": "name" + } + }, + { + "name": ["google.spanner.v1.Spanner/ExecuteSql"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/ExecuteBatchDml"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/ExecuteStreamingSql"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/Read"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/StreamingRead"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/BeginTransaction"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/Commit"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/PartitionRead"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/PartitionQuery"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + }, + { + "name": ["google.spanner.v1.Spanner/Rollback"], + "affinity": { + "command": "BOUND", + "affinityKey": "session" + } + } + ] +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractAsyncTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractAsyncTransactionTest.java new file mode 100644 index 000000000000..0474a807d2b6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractAsyncTransactionTest.java @@ -0,0 +1,113 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.EMPTY_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.INVALID_UPDATE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_MULTIPLE_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_MULTIPLE_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_EMPTY_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.TEST_DATABASE; +import static com.google.cloud.spanner.MockSpannerTestUtil.TEST_INSTANCE; +import static com.google.cloud.spanner.MockSpannerTestUtil.TEST_PROJECT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_ABORTED_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_COUNT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_STATEMENT; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +/** Base class for {@link AsyncRunnerTest} and {@link AsyncTransactionManagerTest}. */ +public abstract class AbstractAsyncTransactionTest { + static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static InetSocketAddress address; + static ExecutorService executor; + + Spanner spanner; + + @BeforeClass + public static void setup() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_EMPTY_KEY_VALUE_STATEMENT, EMPTY_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_KEY_VALUE_STATEMENT, READ_ONE_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query( + READ_MULTIPLE_KEY_VALUE_STATEMENT, READ_MULTIPLE_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.exception( + UPDATE_ABORTED_STATEMENT, + Status.ABORTED.withDescription("Transaction was aborted").asRuntimeException())); + + address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + executor = Executors.newSingleThreadExecutor(); + } + + @AfterClass + public static void teardown() throws Exception { + server.shutdown(); + server.awaitTermination(); + executor.shutdown(); + } + + @Before + public void before() { + String endpoint = address.getHostString() + ":" + server.getPort(); + spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()) + .build() + .getService(); + } + + @After + public void after() { + spanner.close(); + mockSpanner.removeAllExecutionTimes(); + mockSpanner.reset(); + } + + DatabaseClient client() { + return spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractLatencyBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractLatencyBenchmark.java new file mode 100644 index 000000000000..80a376efa018 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractLatencyBenchmark.java @@ -0,0 +1,118 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.MoreObjects; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +public abstract class AbstractLatencyBenchmark { + + static final String SELECT_QUERY = "SELECT ID FROM FOO WHERE ID = @id"; + static final String UPDATE_QUERY = "UPDATE FOO SET BAR=1 WHERE ID = @id"; + static final String ID_COLUMN_NAME = "id"; + + /** + * Used to determine how many concurrent requests are allowed. For ex - To simulate a low QPS + * scenario, using 1 thread means there will be 1 request. Use a value > 1 to have concurrent + * requests. + */ + static final int PARALLEL_THREADS = + Integer.valueOf( + MoreObjects.firstNonNull(System.getenv("SPANNER_TEST_JMH_NUM_PARALLEL_THREADS"), "30")); + + static final int NUM_GRPC_CHANNELS = + Integer.valueOf( + MoreObjects.firstNonNull(System.getenv("SPANNER_TEST_JMH_NUM_GRPC_CHANNELS"), "4")); + + /** + * Total number of reads per test run for 1 thread. Increasing the value here will increase the + * duration of the benchmark. For ex - With PARALLEL_THREADS = 2, TOTAL_READS_PER_RUN = 200, there + * will be 400 read requests (200 on each thread). + */ + static final int TOTAL_READS_PER_RUN = + Integer.valueOf( + MoreObjects.firstNonNull( + System.getenv("SPANNER_TEST_JMH_NUM_READS_PER_THREAD"), "48000")); + + /** + * Total number of writes per test run for 1 thread. Increasing the value here will increase the + * duration of the benchmark. For ex - With PARALLEL_THREADS = 2, TOTAL_WRITES_PER_RUN = 200, + * there will be 400 write requests (200 on each thread). + */ + static final int TOTAL_WRITES_PER_RUN = + Integer.valueOf( + MoreObjects.firstNonNull( + System.getenv("SPANNER_TEST_JMH_NUM_WRITES_PER_THREAD"), "4000")); + + /** + * Number of requests which are used to initialise/warmup the benchmark. The latency number of + * these runs are ignored from the final reported results. + */ + static final int WARMUP_REQUEST_COUNT = 1; + + /** + * Numbers of records in the sample table used in the benchmark. This is used in this benchmark to + * randomly choose a primary key and ensure that the reads are randomly distributed. This is done + * to ensure we don't end up reading/writing the same table record (leading to hot-spotting). + */ + static final int TOTAL_RECORDS = 1000000; + + /** Utility to print latency numbers. It computes metrics such as Average, P50, P95 and P99. */ + public void printResults(List results) { + if (results == null) { + return; + } + List orderedResults = new ArrayList<>(results); + Collections.sort(orderedResults); + System.out.println(); + System.out.printf("Total number of queries: %d\n", orderedResults.size()); + System.out.printf("Avg: %fms\n", avg(results)); + System.out.printf("P50: %fms\n", percentile(50, orderedResults)); + System.out.printf("P95: %fms\n", percentile(95, orderedResults)); + System.out.printf("P99: %fms\n", percentile(99, orderedResults)); + } + + private double percentile(int percentile, List orderedResults) { + int index = percentile * orderedResults.size() / 100; + Duration value = orderedResults.get(index); + Double convertedValue = convertDurationToFractionInMilliSeconds(value); + return convertedValue; + } + + /** Returns the average duration in seconds from a list of duration values. */ + private double avg(List results) { + return results.stream() + .collect(Collectors.averagingDouble(this::convertDurationToFractionInMilliSeconds)); + } + + private double convertDurationToFractionInSeconds(Duration duration) { + long seconds = duration.getSeconds(); + long nanos = duration.getNano(); + double fraction = (double) nanos / 1_000_000_000; + double value = seconds + fraction; + return value; + } + + private double convertDurationToFractionInMilliSeconds(Duration duration) { + long nanoseconds = duration.toNanos(); + return nanoseconds / 1000000.0; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java new file mode 100644 index 000000000000..7857054bcb04 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java @@ -0,0 +1,148 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc.OperationsImplBase; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Server; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.stub.StreamObserver; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +abstract class AbstractMockServerTest { + protected static MockSpannerServiceImpl mockSpanner; + public static MockInstanceAdminImpl mockInstanceAdmin; + public static MockDatabaseAdminImpl mockDatabaseAdmin; + public static OperationsImplBase mockOperations; + protected static Server server; + protected static LocalChannelProvider channelProvider; + + protected Spanner spanner; + + @BeforeClass + public static void startMockServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockInstanceAdmin = new MockInstanceAdminImpl(); + mockDatabaseAdmin = new MockDatabaseAdminImpl(); + mockOperations = + new OperationsImplBase() { + AtomicBoolean done = new AtomicBoolean(false); + + @Override + public void getOperation( + GetOperationRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + Operation.newBuilder() + .setDone(done.getAndSet(!done.get())) + .setName(request.getName()) + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + responseObserver.onCompleted(); + } + }; + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .addService(mockOperations) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopMockServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void createSpannerInstance() { + spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()) + .build() + .getService(); + } + + @After + public void cleanup() { + spanner.close(); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + + void addUpdateDdlResponse() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(false) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } + + void addUpdateDdlError() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setError( + Status.newBuilder() + .setCode(Code.FAILED_PRECONDITION_VALUE) + .setMessage("test error") + .build()) + .build()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractNettyMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractNettyMockServerTest.java new file mode 100644 index 000000000000..a5d3b62d98d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractNettyMockServerTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import io.grpc.ForwardingServerCall; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +abstract class AbstractNettyMockServerTest { + protected static MockSpannerServiceImpl mockSpanner; + + protected static Server server; + protected static InetSocketAddress address; + static ExecutorService executor; + protected static LocalChannelProvider channelProvider; + protected static final AtomicReference fakeServerTiming = + new AtomicReference<>((float) (new Random().nextDouble() * 1000) + 1); + protected static final AtomicReference fakeAFEServerTiming = + new AtomicReference<>((float) new Random().nextInt(500) + 1); + + protected Spanner spanner; + + @BeforeClass + public static void startMockServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall serverCall, + Metadata headers, + ServerCallHandler serverCallHandler) { + return serverCallHandler.startCall( + new ForwardingServerCall.SimpleForwardingServerCall( + serverCall) { + @Override + public void sendHeaders(Metadata headers) { + headers.put( + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), + String.format( + "afe; dur=%f, gfet4t7; dur=%f", + fakeAFEServerTiming.get(), fakeServerTiming.get())); + super.sendHeaders(headers); + } + }, + headers); + } + }) + .build() + .start(); + executor = Executors.newSingleThreadExecutor(); + } + + @AfterClass + public static void stopMockServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + executor.shutdown(); + } + + @Before + public void createSpannerInstance() { + String endpoint = address.getHostString() + ":" + server.getPort(); + spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()) + .build() + .getService(); + } + + @After + public void cleanup() { + spanner.close(); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java new file mode 100644 index 000000000000..b4bc7bf7bb6c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java @@ -0,0 +1,378 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ReadRequest.LockHint; +import com.google.spanner.v1.ReadRequest.OrderBy; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.SessionName; +import com.google.spanner.v1.TransactionSelector; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class AbstractReadContextTest { + private static final DirectedReadOptions DIRECTED_READ_OPTIONS = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-west1").build())) + .build(); + + @Parameter(0) + public QueryOptions defaultQueryOptions; + + @Parameters(name = "SpannerOptions.DefaultQueryOptions = {0}") + public static Collection parameters() { + List params = new ArrayList<>(); + params.add(new Object[] {QueryOptions.getDefaultInstance()}); + params.add( + new Object[] { + QueryOptions.newBuilder() + .setOptimizerVersion("some-version") + .setOptimizerStatisticsPackage("some-package") + .build() + }); + return params; + } + + class TestReadContextBuilder + extends AbstractReadContext.Builder { + @Override + TestReadContext build() { + return new TestReadContext(this); + } + } + + class TestReadContextWithTagBuilder + extends AbstractReadContext.Builder { + @Override + TestReadContextWithTag build() { + return new TestReadContextWithTag(this); + } + } + + private final class TestReadContext extends AbstractReadContext { + TestReadContext(TestReadContextBuilder builder) { + super(builder); + } + + @Override + TransactionSelector getTransactionSelector() { + return TransactionSelector.getDefaultInstance(); + } + + @Override + Map getTransactionChannelHint() { + return null; + } + } + + private final class TestReadContextWithTag extends AbstractReadContext { + TestReadContextWithTag(TestReadContextWithTagBuilder builder) { + super(builder); + } + + @Override + TransactionSelector getTransactionSelector() { + return TransactionSelector.getDefaultInstance(); + } + + @Override + Map getTransactionChannelHint() { + return null; + } + + String getTransactionTag() { + return "app=spanner,env=test"; + } + } + + private TestReadContext context; + + @Before + public void setup() { + SessionImpl session = mock(SessionImpl.class); + when(session.getName()).thenReturn("session-1"); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(session.getSpanner()).thenReturn(spanner); + TestReadContextBuilder builder = new TestReadContextBuilder(); + context = + builder + .setSession(session) + .setRpc(mock(SpannerRpc.class)) + .setDefaultQueryOptions(defaultQueryOptions) + .setExecutorProvider(mock(ExecutorProvider.class)) + .build(); + } + + @Test + public void executeSqlRequestBuilderWithoutQueryOptions() { + ExecuteSqlRequest request = + context + .getExecuteSqlRequestBuilder( + Statement.of("SELECT FOO FROM BAR"), + QueryMode.NORMAL, + Options.fromQueryOptions(), + true) + .build(); + assertThat(request.getSql()).isEqualTo("SELECT FOO FROM BAR"); + assertThat(request.getQueryOptions()).isEqualTo(defaultQueryOptions); + } + + @Test + public void executeSqlRequestBuilderWithQueryOptions() { + ExecuteSqlRequest request = + context + .getExecuteSqlRequestBuilder( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("2.0") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build(), + QueryMode.NORMAL, + Options.fromQueryOptions(), + true) + .build(); + assertThat(request.getSql()).isEqualTo("SELECT FOO FROM BAR"); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("2.0"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + } + + @Test + public void testBuildRequestOptions() { + RequestOptions requestOptions = context.buildRequestOptions(Options.fromQueryOptions()); + assertEquals(RequestOptions.Priority.PRIORITY_UNSPECIFIED, requestOptions.getPriority()); + } + + @Test + public void testBuildRequestOptionsWithPriority() { + RequestOptions requestOptionsHighPriority = + context.buildRequestOptions(Options.fromQueryOptions(Options.priority(RpcPriority.HIGH))); + assertEquals(RequestOptions.Priority.PRIORITY_HIGH, requestOptionsHighPriority.getPriority()); + + RequestOptions requestOptionsMediumPriority = + context.buildRequestOptions(Options.fromQueryOptions(Options.priority(RpcPriority.MEDIUM))); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, requestOptionsMediumPriority.getPriority()); + + RequestOptions requestOptionsLowPriority = + context.buildRequestOptions(Options.fromQueryOptions(Options.priority(RpcPriority.LOW))); + assertEquals(RequestOptions.Priority.PRIORITY_LOW, requestOptionsLowPriority.getPriority()); + } + + @Test + public void testGetExecuteSqlRequestBuilderWithPriority() { + ExecuteSqlRequest.Builder request = + context.getExecuteSqlRequestBuilder( + Statement.of("SELECT * FROM FOO"), + QueryMode.NORMAL, + Options.fromQueryOptions(Options.priority(RpcPriority.MEDIUM)), + false); + assertEquals(Priority.PRIORITY_MEDIUM, request.getRequestOptions().getPriority()); + } + + @Test + public void testGetExecuteSqlRequestBuilderWithDataBoost() { + ExecuteSqlRequest.Builder request = + context.getExecuteSqlRequestBuilder( + Statement.of("SELECT * FROM FOO"), + QueryMode.NORMAL, + Options.fromQueryOptions(Options.dataBoostEnabled(true)), + false); + assertTrue(request.getDataBoostEnabled()); + } + + @Test + public void testGetReadRequestBuilderWithOrderBy() { + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setOrderByValue(2) + .build(); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, request.getOrderBy()); + } + + @Test + public void testGetReadRequestBuilderWithLockHint() { + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setLockHintValue(2) + .build(); + assertEquals(LockHint.LOCK_HINT_EXCLUSIVE, request.getLockHint()); + } + + @Test + public void testGetExecuteBatchDmlRequestBuilderWithPriority() { + ExecuteBatchDmlRequest.Builder request = + context.getExecuteBatchDmlRequestBuilder( + Collections.singleton(Statement.of("SELECT * FROM FOO")), + Options.fromQueryOptions(Options.priority(RpcPriority.LOW))); + assertEquals(Priority.PRIORITY_LOW, request.getRequestOptions().getPriority()); + } + + @Test + public void testExecuteSqlLastStatement() { + assertFalse( + context + .getExecuteSqlRequestBuilder( + Statement.of("insert into test (id) values (1)"), + QueryMode.NORMAL, + Options.fromUpdateOptions(), + false) + .getLastStatement()); + assertTrue( + context + .getExecuteSqlRequestBuilder( + Statement.of("insert into test (id) values (1)"), + QueryMode.NORMAL, + Options.fromUpdateOptions(Options.lastStatement()), + false) + .getLastStatement()); + } + + @Test + public void testExecuteBatchDmlLastStatement() { + assertFalse( + context + .getExecuteBatchDmlRequestBuilder( + Collections.singleton(Statement.of("insert into test (id) values (1)")), + Options.fromUpdateOptions()) + .getLastStatements()); + assertTrue( + context + .getExecuteBatchDmlRequestBuilder( + Collections.singleton(Statement.of("insert into test (id) values (1)")), + Options.fromUpdateOptions(Options.lastStatement())) + .getLastStatements()); + } + + public void executeSqlRequestBuilderWithRequestOptions() { + ExecuteSqlRequest request = + context + .getExecuteSqlRequestBuilder( + Statement.newBuilder("SELECT FOO FROM BAR").build(), + QueryMode.NORMAL, + Options.fromUpdateOptions(Options.tag("app=spanner,env=test,action=query")), + false) + .build(); + assertThat(request.getSql()).isEqualTo("SELECT FOO FROM BAR"); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=query"); + assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); + } + + @Test + public void executeSqlRequestBuilderWithRequestOptionsWithTxnTag() { + SessionImpl session = mock(SessionImpl.class); + when(session.getName()).thenReturn("session-1"); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(session.getSpanner()).thenReturn(spanner); + TestReadContextWithTagBuilder builder = new TestReadContextWithTagBuilder(); + TestReadContextWithTag contextWithTag = + builder + .setSession(session) + .setRpc(mock(SpannerRpc.class)) + .setDefaultQueryOptions(defaultQueryOptions) + .setExecutorProvider(mock(ExecutorProvider.class)) + .build(); + + ExecuteSqlRequest request = + contextWithTag + .getExecuteSqlRequestBuilder( + Statement.newBuilder("SELECT FOO FROM BAR").build(), + QueryMode.NORMAL, + Options.fromUpdateOptions(Options.tag("app=spanner,env=test,action=query")), + false) + .build(); + assertThat(request.getSql()).isEqualTo("SELECT FOO FROM BAR"); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=query"); + assertThat(request.getRequestOptions().getTransactionTag()).isEqualTo("app=spanner,env=test"); + } + + @Test + public void testBuildRequestOptionsWithClientContext() { + RequestOptions.ClientContext clientContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("value").build()) + .build(); + RequestOptions requestOptions = + context.buildRequestOptions(Options.fromQueryOptions(Options.clientContext(clientContext))); + assertEquals(clientContext, requestOptions.getClientContext()); + } + + @Test + public void testGetExecuteSqlRequestBuilderWithDirectedReadOptions() { + ExecuteSqlRequest.Builder request = + context.getExecuteSqlRequestBuilder( + Statement.of("SELECT * FROM FOO"), + QueryMode.NORMAL, + Options.fromQueryOptions(Options.directedRead(DIRECTED_READ_OPTIONS)), + false); + assertEquals(DIRECTED_READ_OPTIONS, request.getDirectedReadOptions()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractStructReaderTypesTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractStructReaderTypesTest.java new file mode 100644 index 000000000000..66596cacb92c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractStructReaderTypesTest.java @@ -0,0 +1,623 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.Type.StructField; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.runners.Parameterized.Parameter; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.common.base.Throwables; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; +import javax.annotation.Nullable; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; + +/** Unit tests for {@link AbstractStructReader} that cover all type combinations. */ +@RunWith(Parameterized.class) +public class AbstractStructReaderTypesTest { + private static class TestStructReader extends AbstractStructReader { + @Override + protected boolean getBooleanInternal(int columnIndex) { + return false; + } + + @Override + protected long getLongInternal(int columnIndex) { + return 0; + } + + @Override + protected float getFloatInternal(int columnIndex) { + return 0f; + } + + @Override + protected double getDoubleInternal(int columnIndex) { + return 0; + } + + @Override + protected BigDecimal getBigDecimalInternal(int columnIndex) { + return null; + } + + @Override + protected String getStringInternal(int columnIndex) { + return null; + } + + @Override + protected String getJsonInternal(int columnIndex) { + return null; + } + + @Override + protected String getPgJsonbInternal(int columnIndex) { + return null; + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + return null; + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + return null; + } + + @Override + protected Date getDateInternal(int columnIndex) { + return null; + } + + @Override + protected UUID getUuidInternal(int columnIndex) { + return null; + } + + protected Interval getIntervalInternal(int columnIndex) { + return null; + } + + @Override + protected T getProtoMessageInternal(int columnIndex, T message) { + return null; + } + + @Override + protected T getProtoEnumInternal( + int columnIndex, Function method) { + return null; + } + + @Override + protected Value getValueInternal(int columnIndex) { + return null; + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + return null; + } + + @Override + protected List getBooleanListInternal(int columnIndex) { + return null; + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + return null; + } + + @Override + protected List getLongListInternal(int columnIndex) { + return null; + } + + @Override + protected float[] getFloatArrayInternal(int columnIndex) { + return null; + } + + @Override + protected List getFloatListInternal(int columnIndex) { + return null; + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + return null; + } + + @Override + protected List getDoubleListInternal(int columnIndex) { + return null; + } + + @Override + protected List getBigDecimalListInternal(int columnIndex) { + return null; + } + + @Override + protected List getStringListInternal(int columnIndex) { + return null; + } + + @Override + protected List getJsonListInternal(int columnIndex) { + return null; + } + + @Override + protected List getPgJsonbListInternal(int columnIndex) { + return null; + } + + @Override + protected List getBytesListInternal(int columnIndex) { + return null; + } + + @Override + protected List getTimestampListInternal(int columnIndex) { + return null; + } + + @Override + protected List getProtoMessageListInternal( + int columnIndex, T message) { + return null; + } + + @Override + protected List getProtoEnumListInternal( + int columnIndex, Function method) { + return null; + } + + @Override + protected List getDateListInternal(int columnIndex) { + return null; + } + + @Override + protected List getUuidListInternal(int columnIndex) { + return null; + } + + @Override + protected List getIntervalListInternal(int columnIndex) { + return null; + } + + @Override + protected List getStructListInternal(int columnIndex) { + return null; + } + + @Override + public Type getType() { + return null; + } + + @Override + public boolean isNull(int columnIndex) { + return false; + } + } + + private static List NON_VALUE_GETTERS = + Arrays.asList("getType", "getColumnCount", "getColumnIndex", "getColumnType"); + + @Parameterized.Parameters(name = "{index}: {0}: {3}()={4}") + public static Collection parameters() { + return Arrays.asList( + new Object[][] { + { + Type.bool(), + "getBooleanInternal", + false, + "getBoolean", + Collections.singletonList("getValue") + }, + {Type.int64(), "getLongInternal", 123L, "getLong", Collections.singletonList("getValue")}, + { + Type.float32(), + "getFloatInternal", + 2.0f, + "getFloat", + Collections.singletonList("getValue") + }, + { + Type.float64(), + "getDoubleInternal", + 2.0, + "getDouble", + Collections.singletonList("getValue") + }, + { + Type.numeric(), + "getBigDecimalInternal", + BigDecimal.valueOf(21, 1), + "getBigDecimal", + Collections.singletonList("getValue") + }, + { + Type.pgNumeric(), + "getStringInternal", + "1.23", + "getString", + Collections.singletonList("getValue") + }, + { + Type.string(), + "getStringInternal", + "a", + "getString", + Collections.singletonList("getValue") + }, + { + Type.bytes(), + "getBytesInternal", + ByteArray.copyFrom(new byte[] {0}), + "getBytes", + Collections.singletonList("getValue") + }, + { + Type.json(), + "getJsonInternal", + "{\"color\":\"red\",\"value\":\"#f00\"}", + "getJson", + Collections.singletonList("getValue") + }, + {Type.pgOid(), "getLongInternal", 123L, "getLong", Collections.singletonList("getValue")}, + { + Type.timestamp(), + "getTimestampInternal", + Timestamp.parseTimestamp("2015-09-15T00:00:00Z"), + "getTimestamp", + Collections.singletonList("getValue") + }, + { + Type.date(), + "getDateInternal", + Date.parseDate("2015-09-15"), + "getDate", + Collections.singletonList("getValue") + }, + { + Type.uuid(), + "getUuidInternal", + UUID.randomUUID(), + "getUuid", + Collections.singletonList("getValue") + }, + { + Type.interval(), + "getIntervalInternal", + Interval.parseFromString("P1Y2M3DT4H5M6.78912345S"), + "getInterval", + Collections.singletonList("getValue") + }, + { + Type.array(Type.bool()), + "getBooleanArrayInternal", + new boolean[] {true, false}, + "getBooleanArray", + Arrays.asList("getBooleanList", "getValue") + }, + { + Type.array(Type.bool()), + "getBooleanListInternal", + Arrays.asList(false, true), + "getBooleanList", + Arrays.asList("getBooleanArray", "getValue") + }, + { + Type.array(Type.int64()), + "getLongArrayInternal", + new long[] {1, 2}, + "getLongArray", + Arrays.asList("getLongList", "getValue") + }, + { + Type.array(Type.int64()), + "getLongListInternal", + Arrays.asList(3L, 4L), + "getLongList", + Arrays.asList("getLongArray", "getValue") + }, + { + Type.array(Type.float32()), + "getFloatArrayInternal", + new float[] {1.0f, 2.0f}, + "getFloatArray", + Arrays.asList("getFloatList", "getValue") + }, + { + Type.array(Type.float32()), + "getFloatListInternal", + Arrays.asList(2.0f, 4.0f), + "getFloatList", + Arrays.asList("getFloatArray", "getValue") + }, + { + Type.array(Type.float64()), + "getDoubleArrayInternal", + new double[] {1.0, 2.0}, + "getDoubleArray", + Arrays.asList("getDoubleList", "getValue") + }, + { + Type.array(Type.float64()), + "getDoubleListInternal", + Arrays.asList(2.0, 4.0), + "getDoubleList", + Arrays.asList("getDoubleArray", "getValue") + }, + { + Type.array(Type.numeric()), + "getBigDecimalListInternal", + Arrays.asList(BigDecimal.valueOf(21, 1), BigDecimal.valueOf(41, 1)), + "getBigDecimalList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.pgNumeric()), + "getStringListInternal", + Arrays.asList("1.23", "2.34"), + "getStringList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.string()), + "getStringListInternal", + Arrays.asList("a", "b", "c"), + "getStringList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.json()), + "getJsonListInternal", + Arrays.asList("{}", "{\"color\":\"red\",\"value\":\"#f00\"}", "[]"), + "getJsonList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.pgOid()), + "getLongArrayInternal", + new long[] {1, 2}, + "getLongArray", + Arrays.asList("getLongList", "getValue") + }, + { + Type.array(Type.pgOid()), + "getLongListInternal", + Arrays.asList(3L, 4L), + "getLongList", + Arrays.asList("getLongArray", "getValue") + }, + { + Type.array(Type.bytes()), + "getBytesListInternal", + Arrays.asList( + ByteArray.copyFrom("a"), ByteArray.copyFrom("b"), ByteArray.copyFrom("c")), + "getBytesList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.timestamp()), + "getTimestampListInternal", + Arrays.asList( + Timestamp.parseTimestamp("2015-09-15T00:00:00Z"), + Timestamp.parseTimestamp("2015-09-14T00:00:00Z")), + "getTimestampList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.date()), + "getDateListInternal", + Arrays.asList(Date.parseDate("2015-09-15"), Date.parseDate("2015-09-14")), + "getDateList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.uuid()), + "getUuidListInternal", + Arrays.asList(UUID.randomUUID(), UUID.randomUUID()), + "getUuidList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.interval()), + "getIntervalListInternal", + Arrays.asList( + Interval.parseFromString("P1Y2M3DT4H5M6.78912345S"), + Interval.parseFromString("P0Y"), + Interval.parseFromString("P-1Y2M-3DT-4H5M6.78912345S")), + "getIntervalList", + Collections.singletonList("getValue") + }, + { + Type.array(Type.struct(StructField.of("f1", Type.int64()))), + "getStructListInternal", + Arrays.asList( + Struct.newBuilder().set("f1").to(1).build(), + Struct.newBuilder().set("f1").to(2).build(), + Struct.newBuilder().set("f1").to(3).build()), + "getStructList", + Collections.singletonList("getValue") + } + }); + } + + /** The type of the column being tested. */ + @Parameter(value = 0) + public Type type; + + /** The name of the implementation method to be called in {@code AbstractStructReader}. */ + @Parameter(value = 1) + public String implMethodName; + + /** + * The value that should be returned when {@code implMethodName} is called, and is expected from + * {@code getterMethodName}. + */ + @Parameter(value = 2) + public Object value; + + /** The name of the public getter method to be called. */ + @Parameter(value = 3) + public String getterMethodName; + + @Parameter(value = 4) + @Nullable + public List otherAllowedGetters; + + private TestStructReader reader; + + @Before + public void setUp() { + // Partial mock allows us to set expectations for the abstract methods. + reader = Mockito.spy(new TestStructReader()); + } + + private Object getterByIndex(int columnIndex) { + return getterByIndex(getterMethodName, columnIndex); + } + + private Object getterByIndex(String methodName, int columnIndex) { + try { + return reader.getClass().getMethod(methodName, int.class).invoke(reader, columnIndex); + } catch (InvocationTargetException e) { + Throwables.throwIfUnchecked(e.getCause()); + throw new RuntimeException(e); + } catch (NoSuchMethodException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private Object getterByName(String columnName) { + return getterByName(getterMethodName, columnName); + } + + private Object getterByName(String methodName, String columnName) { + try { + return reader.getClass().getMethod(methodName, String.class).invoke(reader, columnName); + } catch (InvocationTargetException e) { + Throwables.throwIfUnchecked(e.getCause()); + throw new RuntimeException(e); + } catch (NoSuchMethodException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + @Test + public void getter() throws Exception { + Mockito.when(reader.getType()) + .thenReturn(Type.struct(StructField.of("F0", Type.int64()), StructField.of("F1", type))); + int columnIndex = 1; + Mockito.when(reader.isNull(columnIndex)).thenReturn(false); + Mockito.when( + reader + .getClass() + .getDeclaredMethod(implMethodName, int.class) + .invoke(reader, columnIndex)) + .thenReturn(value); + assertThat(getterByIndex(columnIndex)).isEqualTo(value); + assertThat(getterByName("F1")).isEqualTo(value); + } + + @Test + public void getterForIncorrectType() { + Mockito.when(reader.getType()).thenReturn(Type.struct(StructField.of("F1", type))); + int columnIndex = 0; + Mockito.when(reader.isNull(columnIndex)).thenReturn(false); + for (Method method : StructReader.class.getMethods()) { + if (NON_VALUE_GETTERS.contains(method.getName())) { + continue; + } + if (!method.getName().startsWith("get") + || method.getParameterTypes().length != 1 + || method.getParameterTypes()[0] != int.class) { + // Skip non-column index getter methods. + continue; + } + if (method.getName().equals(getterMethodName) + || (otherAllowedGetters != null && otherAllowedGetters.contains(method.getName()))) { + // Skip allowed getters. + continue; + } + IllegalStateException getterByIndexException = + assertThrows( + IllegalStateException.class, () -> getterByIndex(method.getName(), columnIndex)); + assertWithMessage("Exception for " + method) + .that(getterByIndexException.getMessage()) + .contains("was " + type); + assertWithMessage("Exception for " + method) + .that(getterByIndexException.getMessage()) + .contains("Column " + columnIndex); + + IllegalStateException getterByNameException = + assertThrows(IllegalStateException.class, () -> getterByName(method.getName(), "F1")); + assertWithMessage("Exception for " + method) + .that(getterByNameException.getMessage()) + .contains("was " + type); + assertWithMessage("Exception for " + method) + .that(getterByNameException.getMessage()) + .contains("Column F1"); + } + } + + @Test + public void getterWhenNull() { + Mockito.when(reader.getType()).thenReturn(Type.struct(StructField.of("F1", type))); + Mockito.when(reader.isNull(0)).thenReturn(true); + NullPointerException ex = assertThrows(NullPointerException.class, () -> getterByIndex(0)); + assertNotNull(ex.getMessage()); + } + + @Test + public void getterByNameWhenNull() { + Mockito.when(reader.getType()).thenReturn(Type.struct(StructField.of("F1", type))); + Mockito.when(reader.isNull(0)).thenReturn(true); + NullPointerException ex = assertThrows(NullPointerException.class, () -> getterByName("F1")); + assertNotNull(ex.getMessage()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplStressTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplStressTest.java new file mode 100644 index 000000000000..1a63c538653c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplStressTest.java @@ -0,0 +1,458 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.CursorState; +import com.google.cloud.spanner.Type.StructField; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class AsyncResultSetImplStressTest { + private static final int TEST_RUNS = 25; + + /** Timeout is applied to each test case individually. */ + @Rule public Timeout timeout = new Timeout(240, TimeUnit.SECONDS); + + @Parameter(0) + public int resultSetSize; + + @Parameters(name = "rows = {0}") + public static Collection data() { + List params = new ArrayList<>(); + for (int rows : new int[] {0, 1, 5, 10}) { + params.add(new Object[] {rows}); + } + return params; + } + + /** POJO representing a row in the test {@link ResultSet}. */ + private static final class Row { + private final Long id; + private final String name; + + static Row create(StructReader reader) { + return new Row(reader.getLong("ID"), reader.getString("NAME")); + } + + private Row(Long id, String name) { + this.id = id; + this.name = name; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Row)) { + return false; + } + Row other = (Row) o; + return Objects.equals(this.id, other.id) && Objects.equals(this.name, other.name); + } + + @Override + public int hashCode() { + return Objects.hash(this.id, this.name); + } + + @Override + public String toString() { + return String.format("ID: %d, NAME: %s", id, name); + } + } + + private static final class ResultSetWithRandomErrors extends ForwardingResultSet { + private final Random random = new Random(); + private final double errorFraction; + + private ResultSetWithRandomErrors(ResultSet delegate, double errorFraction) { + super(delegate); + this.errorFraction = errorFraction; + } + + @Override + public boolean next() { + if (random.nextDouble() < errorFraction) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "random error"); + } + return super.next(); + } + } + + /** Creates a simple in-mem {@link ResultSet}. */ + private ResultSet createResultSet() { + List rows = new ArrayList<>(resultSetSize); + for (int i = 0; i < resultSetSize; i++) { + rows.add( + Struct.newBuilder() + .set("ID") + .to(i + 1) + .set("NAME") + .to(String.format("Row %d", (i + 1))) + .build()); + } + return ResultSets.forRows( + Type.struct(StructField.of("ID", Type.int64()), StructField.of("NAME", Type.string())), + rows); + } + + private ResultSet createResultSetWithErrors(double errorFraction) { + return new ResultSetWithRandomErrors(createResultSet(), errorFraction); + } + + /** + * Generates a list of {@link Row} instances that correspond with the rows in {@link + * #createResultSet()}. + */ + private List createExpectedRows() { + List rows = new ArrayList<>(resultSetSize); + for (int i = 0; i < resultSetSize; i++) { + rows.add(new Row(i + 1L, String.format("Row %d", (i + 1)))); + } + return rows; + } + + /** Creates a single-threaded {@link ExecutorService}. */ + private static ScheduledExecutorService createExecService() { + return createExecService(1); + } + + /** Creates an {@link ExecutorService} using a bounded pool of threadCount threads. */ + private static ScheduledExecutorService createExecService(int threadCount) { + return Executors.newScheduledThreadPool( + threadCount, new ThreadFactoryBuilder().setDaemon(true).build()); + } + + @Test + public void toList() { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + List list = impl.toList(Row::create); + assertThat(list).containsExactlyElementsIn(createExpectedRows()); + } + } + } + } + + @Test + public void toListWithErrors() { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl( + executorProvider, createResultSetWithErrors(1.0 / resultSetSize), bufferSize)) { + List list = impl.toList(Row::create); + assertThat(list).containsExactlyElementsIn(createExpectedRows()); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("random error"); + } + } + } + } + + @Test + public void asyncToList() throws Exception { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + List>> futures = new ArrayList<>(TEST_RUNS); + ExecutorService executor = createExecService(32); + for (int i = 0; i < TEST_RUNS; i++) { + try (AsyncResultSet impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + futures.add(impl.toListAsync(Row::create, executor)); + } + } + List> lists = ApiFutures.allAsList(futures).get(); + for (List list : lists) { + assertThat(list).containsExactlyElementsIn(createExpectedRows()); + } + executor.shutdown(); + } + } + + @Test + public void consume() throws Exception { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + final Random random = new Random(); + for (Executor executor : + new Executor[] { + MoreExecutors.directExecutor(), createExecService(), createExecService(32) + }) { + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + final SettableApiFuture> future = SettableApiFuture.create(); + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + final ImmutableList.Builder builder = ImmutableList.builder(); + impl.setCallback( + executor, + resultSet -> { + // Randomly do something with the received data or not. Not calling tryNext() in + // the onDataReady is not 'normal', but users may do it, and the result set + // should be able to handle that. + if (random.nextBoolean()) { + CursorState state; + while ((state = resultSet.tryNext()) == CursorState.OK) { + builder.add(Row.create(resultSet)); + } + if (state == CursorState.DONE) { + future.set(builder.build()); + } + } + return CallbackResponse.CONTINUE; + }); + assertThat(future.get()).containsExactlyElementsIn(createExpectedRows()); + } + } + } + } + } + + @Test + public void returnDoneBeforeEnd() throws Exception { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + final Random random = new Random(); + for (Executor executor : + new Executor[] { + MoreExecutors.directExecutor(), createExecService(), createExecService(32) + }) { + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + ApiFuture res = + impl.setCallback( + executor, + resultSet -> { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return random.nextBoolean() + ? CallbackResponse.DONE + : CallbackResponse.CONTINUE; + case OK: + return random.nextInt(resultSetSize) <= 2 + ? CallbackResponse.DONE + : CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + }); + assertThat(res.get(10L, TimeUnit.SECONDS)).isNull(); + } + } + } + } + } + + @Test + public void pauseResume() throws Exception { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + final Random random = new Random(); + List>> futures = new ArrayList<>(); + for (Executor executor : + new Executor[] { + MoreExecutors.directExecutor(), createExecService(), createExecService(32) + }) { + final List resultSets = Collections.synchronizedList(new ArrayList<>()); + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + final SettableApiFuture> future = SettableApiFuture.create(); + futures.add(future); + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + resultSets.add(impl); + final ImmutableList.Builder builder = ImmutableList.builder(); + impl.setCallback( + executor, + resultSet -> { + CursorState state; + while ((state = resultSet.tryNext()) == CursorState.OK) { + builder.add(Row.create(resultSet)); + // Randomly request the iterator to pause. + if (random.nextBoolean()) { + return CallbackResponse.PAUSE; + } + } + if (state == CursorState.DONE) { + future.set(builder.build()); + } + return CallbackResponse.CONTINUE; + }); + } + } + } + final AtomicBoolean finished = new AtomicBoolean(false); + ExecutorService resumeService = createExecService(); + resumeService.execute( + () -> { + while (!finished.get()) { + // Randomly resume result sets. + resultSets.get(random.nextInt(resultSets.size())).resume(); + } + }); + List> lists = ApiFutures.allAsList(futures).get(); + for (ImmutableList list : lists) { + assertThat(list).containsExactlyElementsIn(createExpectedRows()); + } + if (executor instanceof ExecutorService) { + ((ExecutorService) executor).shutdown(); + } + finished.set(true); + resumeService.shutdown(); + } + } + + @Test + public void cancel() throws Exception { + ExecutorProvider executorProvider = SpannerOptions.createDefaultAsyncExecutorProvider(); + final Random random = new Random(); + for (Executor executor : + new Executor[] { + MoreExecutors.directExecutor(), createExecService(), createExecService(32) + }) { + List>> futures = new ArrayList<>(); + final List resultSets = Collections.synchronizedList(new ArrayList<>()); + final Set cancelledIndexes = new HashSet<>(); + for (int bufferSize = 1; bufferSize < resultSetSize * 2; bufferSize *= 2) { + for (int i = 0; i < TEST_RUNS; i++) { + final SettableApiFuture> future = SettableApiFuture.create(); + futures.add(future); + try (AsyncResultSetImpl impl = + new AsyncResultSetImpl(executorProvider, createResultSet(), bufferSize)) { + resultSets.add(impl); + final ImmutableList.Builder builder = ImmutableList.builder(); + impl.setCallback( + executor, + resultSet -> { + try { + CursorState state; + while ((state = resultSet.tryNext()) == CursorState.OK) { + builder.add(Row.create(resultSet)); + // Randomly request the iterator to pause. + if (random.nextBoolean()) { + return CallbackResponse.PAUSE; + } + } + if (state == CursorState.DONE) { + future.set(builder.build()); + } + return CallbackResponse.CONTINUE; + } catch (SpannerException e) { + future.setException(e); + throw e; + } + }); + } + } + } + final AtomicBoolean finished = new AtomicBoolean(false); + // Both resume and cancel resultsets randomly. + ExecutorService resumeService = createExecService(); + resumeService.execute( + () -> { + while (!finished.get()) { + // Randomly resume result sets. + resultSets.get(random.nextInt(resultSets.size())).resume(); + } + // Make sure all result sets finish. + for (AsyncResultSet rs : resultSets) { + rs.resume(); + } + }); + ExecutorService cancelService = createExecService(); + cancelService.execute( + () -> { + while (!finished.get()) { + // Randomly cancel result sets. + int index = random.nextInt(resultSets.size()); + resultSets.get(index).cancel(); + cancelledIndexes.add(index); + } + }); + + // First wait until all result sets have finished. + for (ApiFuture> future : futures) { + try { + future.get(); + } catch (Throwable e) { + // ignore for now. + } + } + finished.set(true); + cancelService.shutdown(); + cancelService.awaitTermination(10L, TimeUnit.SECONDS); + + int index = 0; + for (ApiFuture> future : futures) { + try { + ImmutableList list = future.get(30L, TimeUnit.SECONDS); + // Note that the fact that the call succeeded for for this result set, does not + // necessarily mean that the result set was not cancelled. Cancelling a result set is a + // best-effort operation, and the entire result set may still be produced and returned to + // the user. + assertThat(list).containsExactlyElementsIn(createExpectedRows()); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.CANCELLED); + assertThat(cancelledIndexes).contains(index); + } + index++; + } + if (executor instanceof ExecutorService) { + ((ExecutorService) executor).shutdown(); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplTest.java new file mode 100644 index 000000000000..74487283c1ce --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncResultSetImplTest.java @@ -0,0 +1,568 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.CursorState; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.common.base.Function; +import com.google.common.collect.Range; +import com.google.protobuf.ByteString; +import com.google.protobuf.Value; +import com.google.spanner.v1.PartialResultSet; +import java.util.List; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class AsyncResultSetImplTest { + private ExecutorProvider mockedProvider; + private ExecutorProvider simpleProvider; + + @Before + public void setup() { + mockedProvider = mock(ExecutorProvider.class); + when(mockedProvider.getExecutor()).thenReturn(mock(ScheduledExecutorService.class)); + simpleProvider = SpannerOptions.createAsyncExecutorProvider(1, 1L, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + @Test + public void close() { + AsyncResultSetImpl rs = + new AsyncResultSetImpl( + mockedProvider, mock(ResultSet.class), AsyncResultSetImpl.DEFAULT_BUFFER_SIZE); + rs.close(); + // Closing a second time should be a no-op. + rs.close(); + + // The following methods are not allowed to call after closing the result set. + assertThrows( + IllegalStateException.class, + () -> rs.setCallback(mock(Executor.class), mock(ReadyCallback.class))); + assertThrows(IllegalStateException.class, () -> rs.toList(mock(Function.class))); + assertThrows( + IllegalStateException.class, + () -> rs.toListAsync(mock(Function.class), mock(Executor.class))); + + // The following methods are allowed on a closed result set. + AsyncResultSetImpl rs2 = + new AsyncResultSetImpl( + mockedProvider, mock(ResultSet.class), AsyncResultSetImpl.DEFAULT_BUFFER_SIZE); + rs2.setCallback(mock(Executor.class), mock(ReadyCallback.class)); + rs2.close(); + rs2.cancel(); + rs2.resume(); + } + + @Test + public void tryNextNotAllowed() { + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl( + mockedProvider, mock(ResultSet.class), AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback(mock(Executor.class), mock(ReadyCallback.class)); + IllegalStateException e = assertThrows(IllegalStateException.class, rs::tryNext); + assertThat(e.getMessage()).contains("tryNext may only be called from a DataReady callback."); + } + } + + @Test + public void toList() { + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + List list = rs.toList(ignored -> new Object()); + assertThat(list).hasSize(3); + } + } + + @Test + public void toListPropagatesError() { + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "invalid query")); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + SpannerException e = + assertThrows(SpannerException.class, () -> rs.toList(ignored -> new Object())); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("invalid query"); + } + } + + @Test + public void toListAsync() throws InterruptedException, ExecutionException { + ExecutorService executor = Executors.newFixedThreadPool(1); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + ApiFuture> future = rs.toListAsync(ignored -> new Object(), executor); + assertThat(future.get()).hasSize(3); + } + executor.shutdown(); + } + + @Test + public void toListAsyncPropagatesError() { + ExecutorService executor = Executors.newFixedThreadPool(1); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "invalid query")); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + ExecutionException e = + assertThrows( + ExecutionException.class, + () -> rs.toListAsync(ignored -> new Object(), executor).get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("invalid query"); + } + executor.shutdown(); + } + + @Test + public void withCallback() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger callbackCounter = new AtomicInteger(); + final AtomicInteger rowCounter = new AtomicInteger(); + final CountDownLatch finishedLatch = new CountDownLatch(1); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + callbackCounter.incrementAndGet(); + CursorState state; + while ((state = resultSet.tryNext()) == CursorState.OK) { + rowCounter.incrementAndGet(); + } + if (state == CursorState.DONE) { + finishedLatch.countDown(); + } + return CallbackResponse.CONTINUE; + }); + } + finishedLatch.await(); + // There should be between 1 and 5 callbacks, depending on the timing of the threads. + // Normally, there should be just 1 callback. + assertThat(callbackCounter.get()).isIn(Range.closed(1, 5)); + assertThat(rowCounter.get()).isEqualTo(3); + } + + @Test + public void callbackReceivesError() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "invalid query")); + final BlockingDeque receivedErr = new LinkedBlockingDeque<>(1); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + try { + resultSet.tryNext(); + receivedErr.push(new Exception("missing expected exception")); + } catch (SpannerException e) { + receivedErr.push(e); + } + return CallbackResponse.DONE; + }); + } + Exception e = receivedErr.take(); + assertThat(e).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e; + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("invalid query"); + } + + @Test + public void callbackReceivesErrorHalfwayThrough() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()) + .thenReturn(true) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "invalid query")); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger rowCount = new AtomicInteger(); + final BlockingDeque receivedErr = new LinkedBlockingDeque<>(1); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + try { + if (resultSet.tryNext() != CursorState.DONE) { + rowCount.incrementAndGet(); + return CallbackResponse.CONTINUE; + } + } catch (SpannerException e) { + receivedErr.push(e); + } + return CallbackResponse.DONE; + }); + } + Exception e = receivedErr.take(); + assertThat(e).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e; + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("invalid query"); + assertThat(rowCount.get()).isEqualTo(1); + } + + @Test + public void pauseResume() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger callbackCounter = new AtomicInteger(); + final BlockingDeque queue = new LinkedBlockingDeque<>(1); + final AtomicBoolean finished = new AtomicBoolean(false); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + callbackCounter.incrementAndGet(); + CursorState state = resultSet.tryNext(); + if (state == CursorState.OK) { + try { + queue.put(new Object()); + } catch (InterruptedException e) { + // Finish early if an error occurs. + return CallbackResponse.DONE; + } + return CallbackResponse.PAUSE; + } + finished.set(true); + return CallbackResponse.DONE; + }); + int rowCounter = 0; + while (!finished.get()) { + Object o = queue.poll(1L, TimeUnit.MILLISECONDS); + if (o != null) { + rowCounter++; + } + rs.resume(); + } + // There should be exactly 4 callbacks as we only consume one row per callback. + assertThat(callbackCounter.get()).isEqualTo(4); + assertThat(rowCounter).isEqualTo(3); + } + } + + @Test + public void testCallbackIsNotCalledWhilePaused() throws InterruptedException, ExecutionException { + Executor executor = Executors.newSingleThreadExecutor(); + final int simulatedRows = 100; + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()) + .thenAnswer( + new Answer() { + int row = 0; + + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + row++; + return row <= simulatedRows; + } + }); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger callbackCounter = new AtomicInteger(); + final BlockingDeque queue = new LinkedBlockingDeque<>(1); + final AtomicBoolean paused = new AtomicBoolean(); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + ApiFuture callbackResult = + rs.setCallback( + executor, + resultSet -> { + assertFalse(paused.get()); + callbackCounter.incrementAndGet(); + try { + switch (resultSet.tryNext()) { + case OK: + paused.set(true); + queue.put(new Object()); + return CallbackResponse.PAUSE; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }); + int rowCounter = 0; + while (!callbackResult.isDone()) { + Object o = queue.poll(1L, TimeUnit.MILLISECONDS); + if (o != null) { + rowCounter++; + } + Thread.yield(); + paused.set(false); + rs.resume(); + } + // Empty the queue to ensure we count all elements. + while (queue.poll() != null) { + rowCounter++; + } + // Assert that we can get the result from the callback future without any exceptions. That + // indicates that the callback function never failed with an unexpected exception. + assertNull(callbackResult.get()); + assertThat(callbackCounter.get()).isEqualTo(simulatedRows + 1); + assertThat(rowCounter).isEqualTo(simulatedRows); + } + } + + @Test + public void testCallbackIsNotCalledWhilePausedAndCanceled() { + ExecutorService executor = Executors.newSingleThreadExecutor(); + StreamingResultSet delegate = mock(StreamingResultSet.class); + + final AtomicInteger callbackCounter = new AtomicInteger(); + ApiFuture callbackResult; + + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + + when(delegate.initiateStreaming(any(AsyncResultSet.StreamMessageListener.class))) + .thenAnswer( + answer -> { + rs.onStreamMessage(PartialResultSet.newBuilder().build(), false); + return null; + }); + callbackResult = + rs.setCallback( + executor, + resultSet -> { + callbackCounter.getAndIncrement(); + return CallbackResponse.PAUSE; + }); + + rs.cancel(); + + SpannerException exception = assertThrows(SpannerException.class, () -> get(callbackResult)); + assertEquals(ErrorCode.CANCELLED, exception.getErrorCode()); + assertEquals(1, callbackCounter.get()); + } finally { + executor.shutdown(); + } + } + + @Test + public void cancel() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger callbackCounter = new AtomicInteger(); + final BlockingDeque queue = new LinkedBlockingDeque<>(1); + final AtomicBoolean finished = new AtomicBoolean(false); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + callbackCounter.incrementAndGet(); + try { + CursorState state = resultSet.tryNext(); + if (state == CursorState.OK) { + try { + queue.put(new Object()); + } catch (InterruptedException e) { + // Finish early if an error occurs. + return CallbackResponse.DONE; + } + } + // Pause after 2 rows to make sure that no more data is consumed until the cancel + // call has been received. + return callbackCounter.get() == 2 + ? CallbackResponse.PAUSE + : CallbackResponse.CONTINUE; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.CANCELLED) { + finished.set(true); + } + } + return CallbackResponse.DONE; + }); + int rowCounter = 0; + while (!finished.get()) { + Object o = queue.poll(1L, TimeUnit.MILLISECONDS); + if (o != null) { + rowCounter++; + } + if (rowCounter == 2) { + // Cancel the result set and then resume it to get the cancelled error. + rs.cancel(); + rs.resume(); + } + } + assertThat(callbackCounter.get()).isIn(Range.closed(2, 4)); + assertThat(rowCounter).isIn(Range.closed(2, 3)); + } + } + + @Test + public void callbackReturnsError() throws InterruptedException { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + final AtomicInteger callbackCounter = new AtomicInteger(); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + resultSet -> { + callbackCounter.incrementAndGet(); + throw new RuntimeException("async test"); + }); + ExecutionException e = assertThrows(ExecutionException.class, () -> rs.getResult().get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(se.getMessage()).contains("async test"); + assertThat(callbackCounter.get()).isEqualTo(1); + } + } + + @Test + public void callbackReturnsDoneBeforeEnd_shouldStopIteration() throws Exception { + Executor executor = Executors.newSingleThreadExecutor(); + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, true, false); + when(delegate.getCurrentRowAsStruct()).thenReturn(mock(Struct.class)); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(simpleProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + rs.setCallback( + executor, + // Not calling resultSet.tryNext() means that it will also never return DONE. + // Instead the callback indicates that it does not want any more rows. + ignored -> CallbackResponse.DONE); + rs.getResult().get(10L, TimeUnit.SECONDS); + } + } + + @Test + public void testOnStreamMessageWhenResumeTokenIsPresent() { + StreamingResultSet delegate = mock(StreamingResultSet.class); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(mockedProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + // Marking Streaming as supported + Mockito.when( + delegate.initiateStreaming(Mockito.any(AsyncResultSet.StreamMessageListener.class))) + .thenReturn(true); + + rs.setCallback(Executors.newSingleThreadExecutor(), ignored -> CallbackResponse.DONE); + rs.onStreamMessage( + PartialResultSet.newBuilder().addValues(Value.newBuilder().build()).build(), false); + + rs.onStreamMessage( + PartialResultSet.newBuilder().setResumeToken(ByteString.copyFromUtf8("test")).build(), + false); + Mockito.verify(mockedProvider.getExecutor(), times(2)).execute(Mockito.any()); + } + } + + @Test + public void testOnStreamMessageWhenCurrentBufferSizeReachedPrefetchChunkSize() { + StreamingResultSet delegate = mock(StreamingResultSet.class); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(mockedProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + // Marking Streaming as supported + Mockito.when( + delegate.initiateStreaming(Mockito.any(AsyncResultSet.StreamMessageListener.class))) + .thenReturn(true); + + rs.setCallback(Executors.newSingleThreadExecutor(), ignored -> CallbackResponse.DONE); + rs.onStreamMessage( + PartialResultSet.newBuilder().addValues(Value.newBuilder().build()).build(), true); + Mockito.verify(mockedProvider.getExecutor(), times(2)).execute(Mockito.any()); + } + } + + @Test + public void testOnStreamMessageWhenAsyncResultIsCancelled() { + StreamingResultSet delegate = mock(StreamingResultSet.class); + try (AsyncResultSetImpl rs = + new AsyncResultSetImpl(mockedProvider, delegate, AsyncResultSetImpl.DEFAULT_BUFFER_SIZE)) { + // Marking Streaming as supported + Mockito.when( + delegate.initiateStreaming(Mockito.any(AsyncResultSet.StreamMessageListener.class))) + .thenReturn(true); + + rs.setCallback(Executors.newSingleThreadExecutor(), ignored -> CallbackResponse.DONE); + rs.cancel(); + rs.onStreamMessage( + PartialResultSet.newBuilder().addValues(Value.newBuilder().build()).build(), false); + Mockito.verify(mockedProvider.getExecutor(), times(2)).execute(Mockito.any()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerImplTest.java new file mode 100644 index 000000000000..936e47118902 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerImplTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AsyncRunnerImplTest { + private static final ExecutorService executor = Executors.newSingleThreadExecutor(); + + @AfterClass + public static void teardown() { + executor.shutdown(); + } + + @SuppressWarnings("unchecked") + @Test + public void testAsyncRunReturnsResultAndCommitResponse() { + final Object expectedResult = new Object(); + final CommitResponse expectedCommitResponse = mock(CommitResponse.class); + + TransactionRunnerImpl delegate = mock(TransactionRunnerImpl.class); + when(delegate.run(any(TransactionCallable.class))).thenReturn(expectedResult); + when(delegate.getCommitResponse()).thenReturn(expectedCommitResponse); + + AsyncRunnerImpl runner = new AsyncRunnerImpl(delegate); + ApiFuture result = + runner.runAsync(txn -> ApiFutures.immediateFuture(expectedResult), executor); + + assertSame(expectedResult, get(result)); + assertSame(expectedCommitResponse, get(runner.getCommitResponse())); + assertEquals( + get(runner.getCommitResponse()).getCommitTimestamp(), get(runner.getCommitTimestamp())); + } + + @Test + public void testGetCommitTimestampReturnsErrorBeforeRun() { + TransactionRunnerImpl delegate = mock(TransactionRunnerImpl.class); + AsyncRunnerImpl runner = new AsyncRunnerImpl(delegate); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> runner.getCommitTimestamp()); + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + + @Test + public void testGetCommitResponseReturnsErrorBeforeRun() { + TransactionRunnerImpl delegate = mock(TransactionRunnerImpl.class); + AsyncRunnerImpl runner = new AsyncRunnerImpl(delegate); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> runner.getCommitResponse()); + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + + @Test + public void testGetCommitResponseReturnsErrorIfRunFails() { + final SpannerException expectedException = + SpannerExceptionFactory.newSpannerException(ErrorCode.ALREADY_EXISTS, "Row already exists"); + + TransactionRunnerImpl delegate = mock(TransactionRunnerImpl.class); + when(delegate.getCommitResponse()).thenThrow(expectedException); + + AsyncRunnerImpl runner = new AsyncRunnerImpl(delegate); + runner.runAsync(txn -> ApiFutures.immediateFailedFuture(expectedException), executor); + + SpannerException e = + assertThrows(SpannerException.class, () -> get(runner.getCommitResponse())); + assertSame(expectedException, e); + } + + @SuppressWarnings("unchecked") + @Test + public void testRunAsyncFailsIfCalledMultipleTimes() { + final Object result = new Object(); + TransactionRunnerImpl delegate = mock(TransactionRunnerImpl.class); + when(delegate.run(any(TransactionCallable.class))).thenReturn(result); + + AsyncRunnerImpl runner = new AsyncRunnerImpl(delegate); + runner.runAsync(txn -> ApiFutures.immediateFuture(result), executor); + + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> runner.runAsync(txn -> ApiFutures.immediateFuture(null), executor)); + assertTrue(e.getMessage().contains("runAsync() can only be called once")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java new file mode 100644 index 000000000000..562e90186cc2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java @@ -0,0 +1,643 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.*; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AsyncRunnerTest extends AbstractAsyncTransactionTest { + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testAsyncRunner_doesNotReturnCommitTimestampBeforeCommit() { + AsyncRunner runner = client().runAsync(); + if (isMultiplexedSessionsEnabledForRW()) { + Throwable e = assertThrows(Throwable.class, () -> runner.getCommitTimestamp().get()); + // If the error occurs within the future, it gets wrapped in an ExecutionException. + // This happens when DelayedAsyncRunner is invoked while the multiplexed session is not yet + // created. + // If the error occurs before the future is created, it may throw an IllegalStateException + // instead. + assertTrue(e instanceof ExecutionException || e instanceof IllegalStateException); + if (e instanceof ExecutionException) { + Throwable cause = e.getCause(); + assertTrue(cause instanceof IllegalStateException); + assertTrue(cause.getMessage().contains("runAsync() has not yet been called")); + } else { + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + } else { + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> runner.getCommitTimestamp()); + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + } + + @Test + public void testAsyncRunner_doesNotReturnCommitResponseBeforeCommit() { + AsyncRunner runner = client().runAsync(); + if (isMultiplexedSessionsEnabledForRW()) { + Throwable e = assertThrows(Throwable.class, () -> runner.getCommitResponse().get()); + // If the error occurs within the future, it gets wrapped in an ExecutionException. + // This happens when DelayedAsyncRunner is invoked while the multiplexed session is not yet + // created. + // If the error occurs before the future is created, it may throw an IllegalStateException + // instead. + assertTrue(e instanceof ExecutionException || e instanceof IllegalStateException); + if (e instanceof ExecutionException) { + Throwable cause = e.getCause(); + assertTrue(cause instanceof IllegalStateException); + assertTrue(cause.getMessage().contains("runAsync() has not yet been called")); + } else { + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + } else { + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> runner.getCommitResponse()); + assertTrue(e.getMessage().contains("runAsync() has not yet been called")); + } + } + + @Test + public void asyncRunnerUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync(txn -> txn.executeUpdateAsync(UPDATE_STATEMENT), executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + } + + @Test + public void asyncRunnerIsNonBlocking() throws Exception { + mockSpanner.freeze(); + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.executeUpdateAsync(UPDATE_STATEMENT); + return ApiFutures.immediateFuture(null); + }, + executor); + ApiFuture ts = runner.getCommitTimestamp(); + mockSpanner.unfreeze(); + assertThat(res.get()).isNull(); + assertThat(ts.get()).isNotNull(); + } + + @Test + public void asyncRunnerInvalidUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync(txn -> txn.executeUpdateAsync(INVALID_UPDATE_STATEMENT), executor); + ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("invalid statement"); + } + + @Test + public void asyncRunnerFireAndForgetInvalidUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.executeUpdateAsync(INVALID_UPDATE_STATEMENT); + return txn.executeUpdateAsync(UPDATE_STATEMENT); + }, + executor); + assertThat(res.get()).isEqualTo(UPDATE_COUNT); + } + + @Test + public void asyncRunnerUpdateAborted() throws Exception { + try { + // Temporarily set the result of the update to 2 rows. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT + 1L)); + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } else { + // Set the result of the update statement back to 1 row. + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + return txn.executeUpdateAsync(UPDATE_STATEMENT); + }, + executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + } finally { + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + } + + @Test + public void asyncRunnerCommitAborted() throws Exception { + try { + // Temporarily set the result of the update to 2 rows. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT + 1L)); + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + if (attempt.get() > 0) { + // Set the result of the update statement back to 1 row. + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + ApiFuture updateCount1 = txn.executeUpdateAsync(UPDATE_STATEMENT); + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortTransaction(txn); + } + return updateCount1; + }, + executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + } finally { + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + } + + @Test + public void asyncRunnerUpdateAbortedWithoutGettingResult() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture result = + runner.runAsync( + txn -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + // This update statement will be aborted, but the error will not propagated to the + // transaction runner and cause the transaction to retry. Instead, the commit call + // will do that. + txn.executeUpdateAsync(UPDATE_STATEMENT); + // Resolving this future will not resolve the result of the entire transaction. The + // transaction result will be resolved when the commit has actually finished + // successfully. + return ApiFutures.immediateFuture(null); + }, + executor); + assertThat(result.get()).isNull(); + assertThat(attempt.get()).isEqualTo(2); + if (isMultiplexedSessionsEnabledForRW()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, + ExecuteSqlRequest.class, + // The retry will use an explicit BeginTransaction RPC because the first statement of + // the transaction did not return a transaction id during the initial attempt. + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); + } else if (isMultiplexedSessionsEnabled()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, + BatchCreateSessionsRequest.class, + ExecuteSqlRequest.class, + // The retry will use an explicit BeginTransaction RPC because the first statement of + // the transaction did not return a transaction id during the initial attempt. + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); + } else { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + BatchCreateSessionsRequest.class, + ExecuteSqlRequest.class, + // The retry will use an explicit BeginTransaction RPC because the first statement of + // the transaction did not return a transaction id during the initial attempt. + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); + } + } + + @Test + public void asyncRunnerCommitFails() throws Exception { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT + .withDescription("mutation limit exceeded") + .asRuntimeException())); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + // This statement will succeed, but the commit will fail. The error from the commit + // will bubble up to the future that is returned by the transaction, and the update + // count returned here will never reach the user application. + return txn.executeUpdateAsync(UPDATE_STATEMENT); + }, + executor); + ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("mutation limit exceeded"); + } + + @Test + public void asyncRunnerWaitsUntilAsyncUpdateHasFinished() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.executeUpdateAsync(UPDATE_STATEMENT); + return ApiFutures.immediateFuture(null); + }, + executor); + res.get(); + if (isMultiplexedSessionsEnabledForRW()) { + assertThat(mockSpanner.getRequestTypes()) + .containsAtLeast( + CreateSessionRequest.class, ExecuteSqlRequest.class, CommitRequest.class); + } else if (isMultiplexedSessionsEnabled()) { + // The mock server could have received a CreateSession request for a multiplexed session, but + // it could also be that that request has not yet reached the server. + assertThat(mockSpanner.getRequestTypes()) + .containsAtLeast( + BatchCreateSessionsRequest.class, ExecuteSqlRequest.class, CommitRequest.class); + } else { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + BatchCreateSessionsRequest.class, ExecuteSqlRequest.class, CommitRequest.class); + } + } + + @Test + public void asyncRunnerBatchUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor); + assertThat(updateCount.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + } + + @Test + public void asyncRunnerIsNonBlockingWithBatchUpdate() throws Exception { + mockSpanner.freeze(); + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT)); + return ApiFutures.immediateFuture(null); + }, + executor); + ApiFuture ts = runner.getCommitTimestamp(); + mockSpanner.unfreeze(); + assertThat(res.get()).isNull(); + assertThat(ts.get()).isNotNull(); + } + + @Test + public void asyncRunnerInvalidBatchUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, INVALID_UPDATE_STATEMENT)), + executor); + ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("invalid statement"); + } + + @Test + public void asyncRunnerFireAndForgetInvalidBatchUpdate() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, INVALID_UPDATE_STATEMENT)); + return txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + }, + executor); + assertThat(res.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + } + + @Test + public void asyncRunnerBatchUpdateAborted() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + if (attempt.incrementAndGet() == 1) { + return txn.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_ABORTED_STATEMENT)); + } else { + return txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + } + }, + executor); + assertThat(updateCount.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + } + + @Test + public void asyncRunnerWithBatchUpdateCommitAborted() throws Exception { + try { + // Temporarily set the result of the update to 2 rows. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT + 1L)); + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + if (attempt.get() > 0) { + // Set the result of the update statement back to 1 row. + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + ApiFuture updateCount1 = + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortTransaction(txn); + } + return updateCount1; + }, + executor); + assertThat(updateCount.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + } finally { + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + } + + @Test + public void asyncRunnerBatchUpdateAbortedWithoutGettingResult() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + AsyncRunner runner = client().runAsync(); + ApiFuture result = + runner.runAsync( + txn -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextTransaction(); + } + // This statement will succeed and return a transaction id. The transaction will be + // marked as aborted on the mock server. + txn.executeUpdate(UPDATE_STATEMENT); + + // This batch update statement will be aborted, but the error will not propagated to + // the + // transaction runner and cause the transaction to retry. Instead, the commit call + // will do that. + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + // Resolving this future will not resolve the result of the entire transaction. The + // transaction result will be resolved when the commit has actually finished + // successfully. + return ApiFutures.immediateFuture(null); + }, + executor); + assertThat(result.get()).isNull(); + assertThat(attempt.get()).isEqualTo(2); + if (isMultiplexedSessionsEnabledForRW()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } else if (isMultiplexedSessionsEnabled()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, + BatchCreateSessionsRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } else { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + BatchCreateSessionsRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + ExecuteSqlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } + } + + @Test + public void asyncRunnerWithBatchUpdateCommitFails() throws Exception { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT + .withDescription("mutation limit exceeded") + .asRuntimeException())); + AsyncRunner runner = client().runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + // This statement will succeed, but the commit will fail. The error from the commit + // will bubble up to the future that is returned by the transaction, and the update + // count returned here will never reach the user application. + return txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + }, + executor); + ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(se.getMessage()).contains("mutation limit exceeded"); + } + + @Test + public void asyncRunnerWaitsUntilAsyncBatchUpdateHasFinished() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT)); + return ApiFutures.immediateFuture(null); + }, + executor); + res.get(); + if (isMultiplexedSessionsEnabledForRW()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + } else if (isMultiplexedSessionsEnabled()) { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } else { + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + } + } + + @Test + public void closeTransactionBeforeEndOfAsyncQuery() throws Exception { + final BlockingQueue results = new SynchronousQueue<>(); + final SettableApiFuture finished = SettableApiFuture.create(); + DatabaseClientImpl clientImpl = (DatabaseClientImpl) client(); + + AsyncRunner runner = clientImpl.runAsync(); + final CountDownLatch dataReceived = new CountDownLatch(1); + final CountDownLatch dataChecked = new CountDownLatch(1); + ApiFuture res = + runner.runAsync( + txn -> { + try (AsyncResultSet rs = + txn.readAsync( + READ_TABLE_NAME, KeySet.all(), READ_COLUMN_NAMES, Options.bufferRows(1))) { + rs.setCallback( + Executors.newSingleThreadExecutor(), + resultSet -> { + dataReceived.countDown(); + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + finished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + dataChecked.await(); + results.put(resultSet.getString(0)); + } + } + } catch (Throwable t) { + finished.setException(t); + return CallbackResponse.DONE; + } + }); + } + try { + dataReceived.await(); + return ApiFutures.immediateFuture(null); + } catch (InterruptedException e) { + return ApiFutures.immediateFailedFuture( + SpannerExceptionFactory.propagateInterrupt(e)); + } + }, + executor); + // Wait until at least one row has been fetched. At that moment there should be one session + // checked out. + dataReceived.await(); + assertThat(res.isDone()).isFalse(); + dataChecked.countDown(); + // Get the data from the transaction. + List resultList = new ArrayList<>(); + do { + results.drainTo(resultList); + } while (!finished.isDone() || results.size() > 0); + assertThat(finished.get()).isTrue(); + assertThat(resultList).containsExactly("k1", "k2", "k3"); + assertThat(res.get()).isNull(); + } + + @Test + public void asyncRunnerReadRow() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture val = + runner.runAsync( + txn -> + ApiFutures.transform( + txn.readRowAsync(READ_TABLE_NAME, Key.of(1L), READ_COLUMN_NAMES), + input -> input.getString("Value"), + MoreExecutors.directExecutor()), + executor); + assertThat(val.get()).isEqualTo("v1"); + } + + @Test + public void asyncRunnerRead() throws Exception { + AsyncRunner runner = client().runAsync(); + ApiFuture> val = + runner.runAsync( + txn -> + txn.readAsync(READ_TABLE_NAME, KeySet.all(), READ_COLUMN_NAMES) + .toListAsync(input -> input.getString("Value"), MoreExecutors.directExecutor()), + executor); + assertThat(val.get()).containsExactly("v1", "v2", "v3"); + } + + private boolean isMultiplexedSessionsEnabled() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } + + private boolean isMultiplexedSessionsEnabledForRW() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerImplTest.java new file mode 100644 index 000000000000..dd13c39abc8b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerImplTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFutures; +import com.google.cloud.Timestamp; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class AsyncTransactionManagerImplTest { + + @Mock private SessionImpl session; + @Mock TransactionRunnerImpl.TransactionContextImpl transaction; + + @Test + public void testCommitReturnsCommitStats() { + Span oTspan = mock(Span.class); + ISpan span = new OpenTelemetrySpan(oTspan); + when(oTspan.makeCurrent()).thenReturn(mock(Scope.class)); + try (AsyncTransactionManagerImpl manager = + new AsyncTransactionManagerImpl(session, span, Options.commitStats())) { + when(session.newTransaction(eq(Options.fromTransactionOptions(Options.commitStats())), any())) + .thenReturn(transaction); + when(transaction.ensureTxnAsync()).thenReturn(ApiFutures.immediateFuture(null)); + Timestamp commitTimestamp = Timestamp.ofTimeMicroseconds(1); + CommitResponse response = mock(CommitResponse.class); + when(response.getCommitTimestamp()).thenReturn(commitTimestamp); + when(transaction.commitAsync()).thenReturn(ApiFutures.immediateFuture(response)); + manager.beginAsync(); + manager.commitAsync(); + verify(transaction).commitAsync(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java new file mode 100644 index 000000000000..964fd9c80041 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java @@ -0,0 +1,1139 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.INVALID_UPDATE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_COLUMN_NAMES; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_TABLE_NAME; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_ABORTED_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_COUNT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_STATEMENT; +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionFunction; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Range; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Message; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.Status; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class AsyncTransactionManagerTest extends AbstractAsyncTransactionTest { + + @Parameter public Executor executor; + + @Parameters(name = "executor = {0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + {MoreExecutors.directExecutor()}, + {Executors.newSingleThreadExecutor()}, + {Executors.newFixedThreadPool(4)} + }); + } + + /** + * Static helper methods that simplifies creating {@link AsyncTransactionFunction}s for Java7. + * Java8 and higher can use lambda expressions. + */ + public static class AsyncTransactionManagerHelper { + + public static AsyncTransactionFunction readAsync( + final String table, + final KeySet keys, + final Iterable columns, + final ReadOption... options) { + return (transaction, ignored) -> + ApiFutures.immediateFuture(transaction.readAsync(table, keys, columns, options)); + } + + public static AsyncTransactionFunction readRowAsync( + final String table, final Key key, final Iterable columns) { + return (transaction, ignored) -> transaction.readRowAsync(table, key, columns); + } + + public static AsyncTransactionFunction buffer(Mutation mutation) { + return buffer(ImmutableList.of(mutation)); + } + + public static AsyncTransactionFunction buffer(final Iterable mutations) { + return (transaction, ignored) -> { + transaction.buffer(mutations); + return ApiFutures.immediateFuture(null); + }; + } + + public static AsyncTransactionFunction executeUpdateAsync(Statement statement) { + return executeUpdateAsync(SettableApiFuture.create(), statement); + } + + public static AsyncTransactionFunction executeUpdateAsync( + final SettableApiFuture result, final Statement statement) { + return (transaction, ignored) -> { + ApiFuture updateCount = transaction.executeUpdateAsync(statement); + ApiFutures.addCallback( + updateCount, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + result.setException(t); + } + + @Override + public void onSuccess(Long input) { + result.set(input); + } + }, + MoreExecutors.directExecutor()); + return updateCount; + }; + } + + public static AsyncTransactionFunction batchUpdateAsync( + final Statement... statements) { + return batchUpdateAsync(SettableApiFuture.create(), statements); + } + + public static AsyncTransactionFunction batchUpdateAsync( + final SettableApiFuture result, final Statement... statements) { + return (transaction, ignored) -> { + ApiFuture updateCounts = transaction.batchUpdateAsync(Arrays.asList(statements)); + ApiFutures.addCallback( + updateCounts, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + result.setException(t); + } + + @Override + public void onSuccess(long[] input) { + result.set(input); + } + }, + MoreExecutors.directExecutor()); + return updateCounts; + }; + } + } + + @Test + public void asyncTransactionManager_shouldRollbackOnCloseAsync() throws Exception { + AsyncTransactionManager manager = client().transactionManagerAsync(); + TransactionContext txn = manager.beginAsync().get(); + txn.executeUpdateAsync(UPDATE_STATEMENT).get(); + TransactionContextImpl impl = (TransactionContextImpl) txn; + final TransactionSelector selector = impl.getTransactionSelector(); + + SpannerApiFutures.get(manager.closeAsync()); + // The mock server should already have the Rollback request, as we are waiting for the returned + // ApiFuture to be done. + mockSpanner.waitForRequestsToContain( + input -> { + if (input instanceof RollbackRequest) { + RollbackRequest request = (RollbackRequest) input; + return request.getTransactionId().equals(selector.getId()); + } + return false; + }, + 0L); + } + + @Test + public void testAsyncTransactionManager_getCommitResponseReturnsErrorBeforeCommit() { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + assertThrows(IllegalStateException.class, manager::getCommitResponse); + } + } + + @Test + public void testAsyncTransactionManager_returnsCommitStats() throws Exception { + try (AsyncTransactionManager manager = + client().transactionManagerAsync(Options.commitStats())) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + CommitTimestampFuture commitTimestamp = + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.bufferAsync( + Collections.singleton(Mutation.delete("FOO", Key.of("foo")))), + executor) + .commitAsync(); + assertNotNull(commitTimestamp.get()); + assertNotNull(manager.getCommitResponse().get()); + assertNotNull(manager.getCommitResponse().get().getCommitStats()); + assertEquals(1L, manager.getCommitResponse().get().getCommitStats().getMutationCount()); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transactionContext, ignored) -> + transactionContext.executeUpdateAsync(UPDATE_STATEMENT), + executor); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(commitTimestamp.get()).isNotNull(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerIsNonBlocking() throws Exception { + // TODO: Remove this condition once DelayedAsyncTransactionManager is made non-blocking with + // multiplexed sessions. + assumeFalse( + "DelayedAsyncTransactionManager is currently blocking with multiplexed sessions.", + spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW()); + mockSpanner.freeze(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transactionContext, ignored) -> + transactionContext.executeUpdateAsync(UPDATE_STATEMENT), + executor); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + mockSpanner.unfreeze(); + assertThat(updateCount.get(10L, TimeUnit.SECONDS)).isEqualTo(UPDATE_COUNT); + assertThat(commitTimestamp.get(10L, TimeUnit.SECONDS)).isNotNull(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerInvalidUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + CommitTimestampFuture commitTimestamp = + transactionContextFuture + .then( + (transaction, ignored) -> + transaction.executeUpdateAsync(INVALID_UPDATE_STATEMENT), + executor) + .commitAsync(); + SpannerException e = assertThrows(SpannerException.class, () -> get(commitTimestamp)); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("invalid statement"); + } + } + + @Test + public void asyncTransactionManagerCommitAborted() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + CountDownLatch abortedLatch = new CountDownLatch(1); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + attempt.incrementAndGet(); + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + executor); + updateCount.then( + (transaction, ignored) -> { + if (attempt.get() == 1) { + mockSpanner.abortTransaction(transaction); + abortedLatch.countDown(); + } + return ApiFutures.immediateFuture(null); + }, + executor); + abortedLatch.await(10L, TimeUnit.SECONDS); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(commitTimestamp.get()).isNotNull(); + assertThat(attempt.get()).isEqualTo(2); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerFireAndForgetInvalidUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep transaction = + transactionContextFuture.then( + (transactionContext, ignored) -> { + // This fire-and-forget update statement should not fail the transaction. + // The exception will however cause the transaction to be retried, as the + // statement will not return a transaction id. + transactionContext.executeUpdateAsync(INVALID_UPDATE_STATEMENT); + return transactionContext.executeUpdateAsync(UPDATE_STATEMENT); + }, + executor); + CommitTimestampFuture commitTimestamp = transaction.commitAsync(); + assertThat(commitTimestamp.get()).isNotNull(); + assertThat(transaction.get()).isEqualTo(UPDATE_COUNT); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + ImmutableList> expectedRequestsWithMultiplexedSessionForRW = + ImmutableList.of( + CreateSessionRequest.class, + // The first update that fails. This will cause a transaction retry. + ExecuteSqlRequest.class, + // The retry will use an explicit BeginTransaction call. + BeginTransactionRequest.class, + // The first update will again fail, but now there is a transaction id, so the + // transaction can continue. + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionForRW); + } + + @Test + public void asyncTransactionManagerChain() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + CommitTimestampFuture commitTimestamp = + transactionContextFuture + .then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + executor) + .then( + (transactionContext, ignored) -> + transactionContext.readRowAsync( + READ_TABLE_NAME, Key.of(1L), READ_COLUMN_NAMES), + executor) + .then( + (ignored, input) -> ApiFutures.immediateFuture(input.getString("Value")), + executor) + .then( + (ignored, input) -> { + assertThat(input).isEqualTo("v1"); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync(); + assertThat(commitTimestamp.get()).isNotNull(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerChainWithErrorInTheMiddle() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + CommitTimestampFuture commitTimestampFuture = + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.executeUpdateAsync(INVALID_UPDATE_STATEMENT), + executor) + .then( + (ignored1, ignored2) -> { + throw new IllegalStateException("this should not be executed"); + }, + executor) + .commitAsync(); + SpannerException e = + assertThrows(SpannerException.class, () -> get(commitTimestampFuture)); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerUpdateAborted() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + // Temporarily set the result of the update to 2 rows. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT + 1L)); + final AtomicInteger attempt = new AtomicInteger(); + + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + CommitTimestampFuture commitTimestampFuture = + transactionContextFuture + .then( + (ignored1, ignored2) -> { + if (attempt.incrementAndGet() == 1) { + // Abort the first attempt. + mockSpanner.abortNextStatement(); + } else { + // Set the result of the update statement back to 1 row. + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + return ApiFutures.immediateFuture(null); + }, + executor) + .then( + (transactionContext, ignored) -> + transactionContext.executeUpdateAsync(UPDATE_STATEMENT), + executor) + .commitAsync(); + assertThat(commitTimestampFuture.get()).isNotNull(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + assertThat(attempt.get()).isEqualTo(2); + } finally { + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + } + + @Test + public void asyncTransactionManagerUpdateAbortedWithoutGettingResult() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + CommitTimestampFuture commitTimestampFuture = + transactionContextFuture + .then( + (transaction, ignored) -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + // This update statement will be aborted, but the error will not + // propagated to the transaction runner and cause the transaction to + // retry. Instead, the commit call will do that. + transaction.executeUpdateAsync(UPDATE_STATEMENT); + // Resolving this future will not resolve the result of the entire + // transaction. The transaction result will be resolved when the commit + // has actually finished successfully. + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync(); + assertThat(commitTimestampFuture.get()).isNotNull(); + assertThat(attempt.get()).isEqualTo(2); + // The server may receive 1 or 2 commit requests depending on whether the call to + // commitAsync() already knows that the transaction has aborted. If it does, it will not + // attempt to call the Commit RPC and instead directly propagate the Aborted error. + assertThat(mockSpanner.getRequestTypes()) + .containsAtLeast( + CreateSessionRequest.class, + ExecuteSqlRequest.class, + // The retry will use a BeginTransaction RPC. + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerCommitFails() throws Exception { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT + .withDescription("mutation limit exceeded") + .asRuntimeException())); + try (AsyncTransactionManager mgr = client().transactionManagerAsync()) { + TransactionContextFuture txn = mgr.beginAsync(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + get( + txn.then( + AsyncTransactionManagerHelper.executeUpdateAsync(UPDATE_STATEMENT), + executor) + .commitAsync())); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("mutation limit exceeded"); + } + } + + @Test + public void asyncTransactionManagerWaitsUntilAsyncUpdateHasFinished() throws Exception { + try (AsyncTransactionManager mgr = client().transactionManagerAsync()) { + TransactionContextFuture txn = mgr.beginAsync(); + while (true) { + try { + txn.then( + (transaction, input) -> { + // Shoot-and-forget update. The commit will still wait for this request to + // finish. + transaction.executeUpdateAsync(UPDATE_STATEMENT); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + assertThat(mockSpanner.getRequestTypes()) + .containsExactly( + CreateSessionRequest.class, ExecuteSqlRequest.class, CommitRequest.class); + break; + } catch (AbortedException e) { + txn = mgr.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerBatchUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCounts = + transactionContextFuture.then( + (transaction, ignored) -> + transaction.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor); + get(updateCounts.commitAsync()); + assertThat(get(updateCounts)).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerIsNonBlockingWithBatchUpdate() throws Exception { + // TODO: Remove this condition once DelayedAsyncTransactionManager is made non-blocking with + // multiplexed sessions. + assumeFalse( + "DelayedAsyncTransactionManager is currently blocking with multiplexed sessions.", + spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW()); + mockSpanner.freeze(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCounts = + transactionContextFuture.then( + (transactionContext, ignored) -> + transactionContext.batchUpdateAsync(Collections.singleton(UPDATE_STATEMENT)), + executor); + CommitTimestampFuture commitTimestampFuture = updateCounts.commitAsync(); + mockSpanner.unfreeze(); + assertThat(commitTimestampFuture.get()).isNotNull(); + assertThat(updateCounts.get()).asList().containsExactly(UPDATE_COUNT); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerInvalidBatchUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + get( + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, INVALID_UPDATE_STATEMENT)), + executor) + .commitAsync())); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("invalid statement"); + } + } + + @Test + public void asyncTransactionManagerFireAndForgetInvalidBatchUpdate() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCounts = + transactionContextFuture + .then( + (transactionContext, ignored) -> { + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, INVALID_UPDATE_STATEMENT)); + return ApiFutures.immediateFuture(null); + }, + executor) + .then( + (transactionContext, ignored) -> + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor); + updateCounts.commitAsync().get(); + assertThat(updateCounts.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerBatchUpdateAborted() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + transactionContextFuture + .then( + (transaction, ignored) -> { + if (attempt.incrementAndGet() == 1) { + return transaction.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_ABORTED_STATEMENT)); + } else { + return transaction.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + } + }, + executor) + .commitAsync() + .get(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + assertThat(attempt.get()).isEqualTo(2); + // There should only be 1 CommitRequest, as the first attempt should abort already after the + // ExecuteBatchDmlRequest. + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerBatchUpdateAbortedBeforeFirstStatement() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + transactionContextFuture + .then( + (transactionContext, ignored) -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + return transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + }, + executor) + .commitAsync() + .get(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + assertThat(attempt.get()).isEqualTo(2); + // There should only be 1 CommitRequest, as the first attempt should abort already after the + // ExecuteBatchDmlRequest. + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + // When requests run using multiplexed session with read-write enabled, the + // BatchCreateSessionsRequest will not be + // triggered because we are creating an empty pool during initialization. + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerWithBatchUpdateCommitAborted() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + // Temporarily set the result of the update to 2 rows. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT + 1L)); + final AtomicInteger attempt = new AtomicInteger(); + TransactionContextFuture txn = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCounts = + txn.then( + (ignored1, ignored2) -> { + if (attempt.get() > 0) { + // Set the result of the update statement back to 1 row. + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + return ApiFutures.immediateFuture(null); + }, + executor) + .then( + (transactionContext, ignored) -> + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor); + updateCounts + .then( + (transaction, ignored) -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortTransaction(transaction); + } + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + assertThat(updateCounts.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + break; + } catch (AbortedException e) { + txn = manager.resetForRetryAsync(); + } + } + } finally { + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerBatchUpdateAbortedWithoutGettingResult() throws Exception { + final AtomicInteger attempt = new AtomicInteger(); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + transactionContextFuture + .then( + (transactionContext, ignored) -> { + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + // This update statement will be aborted, but the error will not propagated + // to the transaction manager and cause the transaction to retry. Instead, + // the commit call will do that. Depending on the timing, that will happen + // directly in the transaction manager if the ABORTED error has already been + // returned by the batch update call before the commit call starts. + // Otherwise, the backend will return an ABORTED error for the commit call. + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + assertThat(attempt.get()).isEqualTo(2); + List> requests = mockSpanner.getRequestTypes(); + int size = Iterables.size(requests); + assertThat(size).isIn(Range.closed(5, 6)); + if (size == 5) { + assertThat(requests) + .containsExactly( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } else { + assertThat(requests) + .containsExactly( + CreateSessionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); + } + } + + @Test + public void asyncTransactionManagerWithBatchUpdateCommitFails() { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT + .withDescription("mutation limit exceeded") + .asRuntimeException())); + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + get( + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.batchUpdateAsync( + ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor) + .commitAsync())); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("mutation limit exceeded"); + } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerWaitsUntilAsyncBatchUpdateHasFinished() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + transactionContextFuture + .then( + (transactionContext, ignored) -> { + transactionContext.batchUpdateAsync(ImmutableList.of(UPDATE_STATEMENT)); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + ImmutableList> expectedRequestsWithMultiplexedSessionsRW = + ImmutableList.of( + CreateSessionRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()) + .containsExactlyElementsIn(expectedRequestsWithMultiplexedSessionsRW); + } + + @Test + public void asyncTransactionManagerReadRow() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep value = + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.readRowAsync( + READ_TABLE_NAME, Key.of(1L), READ_COLUMN_NAMES), + executor) + .then( + (ignored, input) -> ApiFutures.immediateFuture(input.getString("Value")), + executor); + value.commitAsync().get(); + assertThat(value.get()).isEqualTo("v1"); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerRead() throws Exception { + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep> values = + transactionContextFuture.then( + (transactionContext, ignored) -> + transactionContext + .readAsync(READ_TABLE_NAME, KeySet.all(), READ_COLUMN_NAMES) + .toListAsync( + input -> input.getString("Value"), MoreExecutors.directExecutor()), + executor); + // Commit the transaction. + values.commitAsync().get(); + assertThat(values.get()).containsExactly("v1", "v2", "v3"); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManagerQuery() throws Exception { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT FirstName FROM Singers WHERE ID=1"), + MockSpannerTestUtil.READ_FIRST_NAME_SINGERS_RESULTSET)); + final long singerId = 1L; + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + final String column = "FirstName"; + CommitTimestampFuture commitTimestamp = + transactionContextFuture + .then( + (transactionContext, ignored) -> + transactionContext.readRowAsync( + "Singers", Key.of(singerId), Collections.singleton(column)), + executor) + .then( + (transaction, input) -> { + String name = input.getString(column); + return transaction.bufferAsync( + Mutation.newUpdateBuilder("Singers") + .set(column) + .to(name.toUpperCase()) + .build()); + }, + executor) + .commitAsync(); + try { + commitTimestamp.get(); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void asyncTransactionManager_shouldPropagateStatementFailure() + throws ExecutionException, InterruptedException, TimeoutException { + DatabaseClient dbClient = client(); + try (AsyncTransactionManager transactionManager = dbClient.transactionManagerAsync()) { + TransactionContextFuture txnContextFuture = transactionManager.beginAsync(); + AsyncTransactionStep updateFuture = + txnContextFuture.then( + (transaction, ignored) -> transaction.executeUpdateAsync(INVALID_UPDATE_STATEMENT), + executor); + final SettableApiFuture res = SettableApiFuture.create(); + ApiFutures.addCallback( + updateFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + // Check that we got the expected failure. + try { + assertThat(throwable).isInstanceOf(SpannerException.class); + SpannerException e = (SpannerException) throwable; + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("invalid statement"); + res.set(null); + } catch (Throwable t) { + res.setException(t); + } + } + + @Override + public void onSuccess(Long aLong) { + res.setException(new AssertionError("Statement should not succeed.")); + } + }, + executor); + + assertThat(res.get(10L, TimeUnit.SECONDS)).isNull(); + } + } + + @Test + public void testAbandonedAsyncTransactionManager_rollbackFails() throws Exception { + mockSpanner.setRollbackExecutionTime( + SimulatedExecutionTime.ofException(Status.PERMISSION_DENIED.asRuntimeException())); + + boolean gotException = false; + try (AsyncTransactionManager manager = client().transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transactionContext, ignored) -> + transactionContext.executeUpdateAsync(UPDATE_STATEMENT), + executor); + assertEquals(1L, updateCount.get().longValue()); + // Break without committing or rolling back the transaction. + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } catch (SpannerException spannerException) { + // The error from the automatically executed Rollback is surfaced when the + // AsyncTransactionManager is closed. + assertEquals(ErrorCode.PERMISSION_DENIED, spannerException.getErrorCode()); + gotException = true; + } + assertTrue(gotException); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupIdTest.java new file mode 100644 index 000000000000..76ba64a2ff2c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupIdTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.BackupId}. */ +@RunWith(JUnit4.class) +public class BackupIdTest { + + @Test + public void basics() { + String name = "projects/test-project/instances/test-instance/backups/backup-1"; + BackupId bid = BackupId.of(name); + assertThat(bid.getName()).isEqualTo(name); + assertThat(bid.getInstanceId().getInstance()).isEqualTo("test-instance"); + assertThat(bid.getBackup()).isEqualTo("backup-1"); + assertThat(BackupId.of("test-project", "test-instance", "backup-1")).isEqualTo(bid); + assertThat(BackupId.of(name)).isEqualTo(bid); + assertThat(BackupId.of(name).hashCode()).isEqualTo(bid.hashCode()); + assertThat(bid.toString()).isEqualTo(name); + } + + @Test + public void badName() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> BackupId.of("bad name")); + assertThat(e.getMessage().contains("projects")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupTest.java new file mode 100644 index 000000000000..ea1d3724c411 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BackupTest.java @@ -0,0 +1,337 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.*; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.Role; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup.Builder; +import com.google.cloud.spanner.BackupInfo.State; +import com.google.cloud.spanner.encryption.EncryptionInfo; +import com.google.rpc.Code; +import com.google.rpc.Status; +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; + +@RunWith(JUnit4.class) +public class BackupTest { + + private static final String NAME = + "projects/test-project/instances/test-instance/backups/backup-1"; + private static final String REFERENCING_BACKUP_NAME = + "projects/test-project/instances/test-instance/backups/backup-2"; + private static final String DB = "projects/test-project/instances/test-instance/databases/db-1"; + private static final Timestamp EXP_TIME = Timestamp.ofTimeSecondsAndNanos(1000L, 1000); + private static final Timestamp VERSION_TIME = Timestamp.ofTimeSecondsAndNanos(2000L, 2000); + public static final String KMS_KEY_VERSION = "key-version"; + private static final com.google.spanner.admin.database.v1.EncryptionInfo ENCRYPTION_INFO = + com.google.spanner.admin.database.v1.EncryptionInfo.newBuilder() + .setEncryptionType( + com.google.spanner.admin.database.v1.EncryptionInfo.Type.CUSTOMER_MANAGED_ENCRYPTION) + .setEncryptionStatus(Status.newBuilder().setCode(Code.OK.getNumber())) + .setKmsKeyVersion(KMS_KEY_VERSION) + .build(); + + @Mock DatabaseAdminClient dbClient; + + @Before + public void setUp() { + initMocks(this); + when(dbClient.newBackupBuilder(Mockito.any(BackupId.class))) + .thenAnswer(invocation -> new Builder(dbClient, (BackupId) invocation.getArguments()[0])); + } + + @Test + public void build() { + Timestamp expireTime = Timestamp.now(); + Timestamp versionTime = Timestamp.ofTimeMicroseconds(10L); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "instance-id", "backup-id")) + .setDatabase(DatabaseId.of("test-project", "instance-id", "src-database")) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .setSize(100L) + .setState(State.CREATING) + .build(); + Backup copy = backup.toBuilder().build(); + assertThat(copy.getId()).isEqualTo(backup.getId()); + assertThat(copy.getDatabase()).isEqualTo(backup.getDatabase()); + assertThat(copy.getExpireTime()).isEqualTo(backup.getExpireTime()); + assertThat(copy.getVersionTime()).isEqualTo(backup.getVersionTime()); + assertThat(copy.getSize()).isEqualTo(backup.getSize()); + assertThat(copy.getState()).isEqualTo(backup.getState()); + } + + @Test + public void create() { + Timestamp expireTime = Timestamp.now(); + Timestamp versionTime = Timestamp.ofTimeMicroseconds(10L); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "instance-id", "backup-id")) + .setDatabase(DatabaseId.of("test-project", "instance-id", "src-database")) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + backup.create(); + verify(dbClient).createBackup(backup); + } + + @Test + public void createWithoutVersionTimeShouldSucceed() { + final Timestamp expireTime = Timestamp.now(); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "instance-id", "backup-id")) + .setDatabase(DatabaseId.of("test-project", "instance-id", "src-database")) + .setExpireTime(expireTime) + .build(); + backup.create(); + verify(dbClient).createBackup(backup); + } + + @Test + public void exists() { + when(dbClient.getBackup("test-instance", "test-backup")) + .thenReturn( + new Backup.Builder( + dbClient, BackupId.of("test-project", "test-instance", "test-backup")) + .build()); + when(dbClient.getBackup("other-instance", "other-backup")) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.NOT_FOUND, "backup not found")); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + assertThat(backup.exists()).isTrue(); + Backup otherBackup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "other-instance", "other-backup")) + .build(); + assertThat(otherBackup.exists()).isFalse(); + } + + @Test + public void isReady() { + when(dbClient.getBackup("test-instance", "test-backup")) + .thenReturn( + new Backup.Builder( + dbClient, BackupId.of("test-project", "test-instance", "test-backup")) + .setState(State.READY) + .build()); + when(dbClient.getBackup("other-instance", "other-backup")) + .thenReturn( + new Backup.Builder( + dbClient, BackupId.of("test-project", "other-instance", "other-backup")) + .setState(State.CREATING) + .build()); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .setState(State.UNSPECIFIED) + .build(); + assertThat(backup.isReady()).isTrue(); + assertThat(backup.getState()).isEqualTo(State.UNSPECIFIED); + Backup otherBackup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "other-instance", "other-backup")) + .setState(State.READY) + .build(); + assertThat(otherBackup.isReady()).isFalse(); + assertThat(otherBackup.getState()).isEqualTo(State.READY); + } + + @Test + public void reload() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + backup.reload(); + verify(dbClient).getBackup("test-instance", "test-backup"); + } + + @Test + public void delete() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + backup.delete(); + verify(dbClient).deleteBackup("test-instance", "test-backup"); + } + + @Test + public void updateExpireTime() { + Timestamp expireTime = Timestamp.now(); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .setExpireTime(expireTime) + .build(); + backup.updateExpireTime(); + verify(dbClient).updateBackup("test-instance", "test-backup", expireTime); + } + + @Test + public void updateExpireTimeWithoutExpireTime() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> backup.updateExpireTime()); + assertNotNull(e.getMessage()); + } + + @Test + public void restore() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "backup-instance", "test-backup")) + .build(); + backup.restore(DatabaseId.of("test-project", "db-instance", "test-database")); + verify(dbClient) + .restoreDatabase("backup-instance", "test-backup", "db-instance", "test-database"); + } + + @Test + public void restoreWithoutDestination() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + NullPointerException e = assertThrows(NullPointerException.class, () -> backup.restore(null)); + assertNull(e.getMessage()); + } + + @Test + public void listBackupOperations() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "backup-id")) + .build(); + backup.listBackupOperations(); + verify(dbClient) + .listBackupOperations("test-instance", Options.filter("name:backups/backup-id")); + } + + @Test + public void getIAMPolicy() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + backup.getIAMPolicy(); + verify(dbClient).getBackupIAMPolicy("test-instance", "test-backup"); + } + + @Test + public void setIAMPolicy() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + Policy policy = + Policy.newBuilder().addIdentity(Role.editor(), Identity.user("joe@example.com")).build(); + backup.setIAMPolicy(policy); + verify(dbClient).setBackupIAMPolicy("test-instance", "test-backup", policy); + } + + @Test + public void testIAMPermissions() { + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .build(); + Iterable permissions = Collections.singletonList("read"); + backup.testIAMPermissions(permissions); + verify(dbClient).testBackupIAMPermissions("test-instance", "test-backup", permissions); + } + + @Test + public void fromProto() { + Backup backup = createBackup(); + assertThat(backup.getId().getName()).isEqualTo(NAME); + assertThat(backup.getState()).isEqualTo(BackupInfo.State.CREATING); + assertThat(backup.getExpireTime()).isEqualTo(EXP_TIME); + assertThat(backup.getVersionTime()).isEqualTo(VERSION_TIME); + assertThat(backup.getEncryptionInfo()) + .isEqualTo(EncryptionInfo.fromProtoOrNull(ENCRYPTION_INFO)); + } + + @Test + public void testEqualsAndHashCode() { + final Backup backup1 = createBackup(); + final Backup backup2 = createBackup(); + final Backup copyBackup1 = copyBackup(); + + assertEquals(backup1, backup2); + assertEquals(backup1.hashCode(), backup2.hashCode()); + assertEquals(backup1.hashCode(), copyBackup1.hashCode()); + } + + private Backup createBackup() { + com.google.spanner.admin.database.v1.Backup proto = + com.google.spanner.admin.database.v1.Backup.newBuilder() + .setName(NAME) + .setDatabase(DB) + .setExpireTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(1000L).setNanos(1000).build()) + .setVersionTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(2000L).setNanos(2000).build()) + .setEncryptionInfo(ENCRYPTION_INFO) + .setState(com.google.spanner.admin.database.v1.Backup.State.CREATING) + .setMaxExpireTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(3000L).setNanos(3000).build()) + .addAllReferencingBackups(Collections.singletonList(REFERENCING_BACKUP_NAME)) + .build(); + return Backup.fromProto(proto, dbClient); + } + + private Backup copyBackup() { + com.google.spanner.admin.database.v1.Backup proto = + com.google.spanner.admin.database.v1.Backup.newBuilder() + .setName(NAME) + .setDatabase(DB) + .setExpireTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(1000L).setNanos(1000).build()) + .setVersionTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(2000L).setNanos(2000).build()) + .setEncryptionInfo(ENCRYPTION_INFO) + .setState(com.google.spanner.admin.database.v1.Backup.State.CREATING) + .setMaxExpireTime( + com.google.protobuf.Timestamp.newBuilder().setSeconds(3000L).setNanos(3000).build()) + .addAllReferencingBackups(Collections.singletonList(REFERENCING_BACKUP_NAME)) + .build(); + return Backup.fromProto(proto, dbClient); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java new file mode 100644 index 000000000000..7a9ed7dcd91d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.Timestamp; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.protobuf.ByteString; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import io.opentelemetry.api.OpenTelemetry; +import java.time.Duration; +import java.util.Collections; +import java.util.Map; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; + +/** Unit tests for {@link com.google.cloud.spanner.BatchClientImpl}. */ +@RunWith(JUnit4.class) +public final class BatchClientImplTest { + + private static final String DB_NAME = "projects/my-project/instances/my-instance/databases/my-db"; + private static final String SESSION_NAME = DB_NAME + "/sessions/s1"; + private static final ByteString TXN_ID = ByteString.copyFromUtf8("my-txn"); + private static final String TIMESTAMP = "2017-11-15T10:54:20Z"; + + @Mock private SpannerRpc gapicRpc; + @Mock private SpannerOptions spannerOptions; + @Captor private ArgumentCaptor> optionsCaptor; + @Mock private BatchTransactionId txnID; + + private BatchClient client; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @SuppressWarnings("unchecked") + @Before + public void setUp() { + initMocks(this); + DatabaseId db = DatabaseId.of(DB_NAME); + when(spannerOptions.getNumChannels()).thenReturn(4); + when(spannerOptions.getDatabaseRole()).thenReturn("role"); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + when(spannerOptions.getRetrySettings()).thenReturn(RetrySettings.newBuilder().build()); + when(spannerOptions.getClock()).thenReturn(NanoClock.getDefaultClock()); + when(spannerOptions.getSpannerRpcV1()).thenReturn(gapicRpc); + when(spannerOptions.getSessionLabels()).thenReturn(Collections.emptyMap()); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()).thenReturn(mock(ExecutorFactory.class)); + when(spannerOptions.getTransportOptions()).thenReturn(transportOptions); + SessionPoolOptions sessionPoolOptions = mock(SessionPoolOptions.class); + when(sessionPoolOptions.getPoolMaintainerClock()).thenReturn(Clock.INSTANCE); + when(sessionPoolOptions.getUseMultiplexedSessionPartitionedOps()).thenReturn(true); + when(sessionPoolOptions.getMultiplexedSessionMaintenanceDuration()).thenReturn(Duration.ZERO); + when(spannerOptions.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + @SuppressWarnings("resource") + SpannerImpl spanner = new SpannerImpl(gapicRpc, spannerOptions); + client = new BatchClientImpl(spanner.getSessionClient(db)); + } + + @SuppressWarnings("unchecked") + @Test + public void testBatchReadOnlyTxnWithBound() throws Exception { + Session sessionProto = Session.newBuilder().setName(SESSION_NAME).setMultiplexed(true).build(); + when(gapicRpc.createSession( + eq(DB_NAME), anyString(), anyMap(), optionsCaptor.capture(), eq(true))) + .thenReturn(sessionProto); + com.google.protobuf.Timestamp timestamp = Timestamps.parse(TIMESTAMP); + Transaction txnMetadata = + Transaction.newBuilder().setId(TXN_ID).setReadTimestamp(timestamp).build(); + when(gapicRpc.beginTransaction(Mockito.any(), optionsCaptor.capture(), eq(false))) + .thenReturn(txnMetadata); + + BatchReadOnlyTransaction batchTxn = client.batchReadOnlyTransaction(TimestampBound.strong()); + assertThat(batchTxn.getBatchTransactionId().getSessionId()).isEqualTo(SESSION_NAME); + assertThat(batchTxn.getBatchTransactionId().getTransactionId()).isEqualTo(TXN_ID); + Timestamp t = Timestamp.parseTimestamp(TIMESTAMP); + assertThat(batchTxn.getReadTimestamp()).isEqualTo(t); + assertThat(batchTxn.getReadTimestamp()) + .isEqualTo(batchTxn.getBatchTransactionId().getTimestamp()); + } + + @Test + public void testBatchReadOnlyTxnWithTxnId() { + when(txnID.getSessionId()).thenReturn(SESSION_NAME); + when(txnID.getTransactionId()).thenReturn(TXN_ID); + Timestamp t = Timestamp.parseTimestamp(TIMESTAMP); + when(txnID.getTimestamp()).thenReturn(t); + + BatchReadOnlyTransaction batchTxn = client.batchReadOnlyTransaction(txnID); + assertThat(batchTxn.getBatchTransactionId().getSessionId()).isEqualTo(SESSION_NAME); + assertThat(batchTxn.getBatchTransactionId().getTransactionId()).isEqualTo(TXN_ID); + assertThat(batchTxn.getReadTimestamp()).isEqualTo(t); + assertThat(batchTxn.getReadTimestamp()) + .isEqualTo(batchTxn.getBatchTransactionId().getTimestamp()); + } + + @Test + public void testGetDatabaseRole() { + assertEquals(client.getDatabaseRole(), "role"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsSlowTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsSlowTest.java new file mode 100644 index 000000000000..38dcaa91d19d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsSlowTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import io.grpc.Server; +import io.grpc.inprocess.InProcessServerBuilder; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(SlowTest.class) +@RunWith(JUnit4.class) +public class BatchCreateSessionsSlowTest { + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_DATABASE_ROLE = "my-role"; + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private Spanner spanner; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult( + StatementResult.query(SELECT1, MockSpannerTestUtil.SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_KEY_VALUE_STATEMENT, READ_ONE_KEY_VALUE_RESULTSET)); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setFailOnSessionLeak().build(); + spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setDatabaseRole(TEST_DATABASE_ROLE) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(sessionPoolOptions) + .build() + .getService(); + // BatchCreateSessions RPC is not invoked when multiplexed sessions is enabled and just RO + // transactions is used. + // Use a different transaction shape (for ex - RW transactions) which is presently unsupported + // with multiplexed sessions. + assumeFalse(sessionPoolOptions.getUseMultiplexedSession()); + } + + @After + public void tearDown() { + mockSpanner.unfreeze(); + spanner.close(); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + + @Test + public void testBatchCreateSessionsTimesOut_whenDeadlineExceeded() throws Exception { + // Simulate a minimum execution time of 1000 milliseconds for the BatchCreateSessions RPC. + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0)); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + // Set the timeout and retry settings for BatchCreateSessions to a simple + // single-attempt-and-timeout after 100ms. + builder + .getSpannerStubSettingsBuilder() + .batchCreateSessionsSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofMillis(100)); + + try (Spanner spanner = builder.build().getService()) { + DatabaseId databaseId = DatabaseId.of("my-project", "my-instance", "my-database"); + DatabaseClient client = spanner.getDatabaseClient(databaseId); + + ListeningExecutorService service = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1000)); + List> futures = new ArrayList<>(5000); + AtomicInteger counter = new AtomicInteger(); + for (int i = 0; i < 5000; i++) { + final int index = i; + futures.add( + service.submit( + () -> { + // The following call is non-blocking and will not generate an exception. + ResultSet rs = client.singleUse().executeQuery(SELECT1); + // Actually trying to get any results will cause an exception. + // The DEADLINE_EXCEEDED error of the BatchCreateSessions RPC is in this case + // propagated to + // the application. + SpannerException e = assertThrows(SpannerException.class, rs::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + System.out.printf("finished test %d\n", counter.incrementAndGet()); + + return null; + })); + } + service.shutdown(); + assertEquals(5000, Futures.allAsList(futures).get().size()); + } + } + + @Test + public void testBatchCreateSessionsTimesOut_whenResourceExhausted() throws Exception { + // Simulate a minimum execution time of 2000 milliseconds for the BatchCreateSessions RPC. + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(2000, 0)); + // Add a timeout for the max amount of time (60ms) that a request waits when a session is + // unavailable. + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeoutDuration(Duration.ofMillis(60)) + .build(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(sessionPoolOptions); + // Set the timeout and retry settings for BatchCreateSessions to a simple + // single-attempt-and-timeout after 1000ms. This will ensure that session acquisition timeout of + // 60ms will kick for all requests before the overall request RPC timeout is breached. + builder + .getSpannerStubSettingsBuilder() + .batchCreateSessionsSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofMillis(1000)); + + try (Spanner spanner = builder.build().getService()) { + DatabaseId databaseId = DatabaseId.of("my-project", "my-instance", "my-database"); + DatabaseClient client = spanner.getDatabaseClient(databaseId); + + ListeningExecutorService service = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1000)); + List> futures = new ArrayList<>(5000); + AtomicInteger counter = new AtomicInteger(); + for (int i = 0; i < 5000; i++) { + futures.add( + service.submit( + () -> { + // The following call is non-blocking and will not generate an exception. + ResultSet rs = client.singleUse().executeQuery(SELECT1); + // Actually trying to get any results will cause an exception. + // When number of requests > MAX_SESSIONS, post setAcquireSessionTimeout + // a few requests will timeout with RESOURCE_EXHAUSTED error. + SpannerException e = assertThrows(SpannerException.class, rs::next); + assertEquals(ErrorCode.RESOURCE_EXHAUSTED, e.getErrorCode()); + System.out.printf("finished test %d\n", counter.incrementAndGet()); + + return null; + })); + } + service.shutdown(); + assertEquals(5000, Futures.allAsList(futures).get().size()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchTransactionIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchTransactionIdTest.java new file mode 100644 index 000000000000..81d4de41695c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchTransactionIdTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; + +import com.google.cloud.Timestamp; +import com.google.common.testing.EqualsTester; +import com.google.protobuf.ByteString; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.BatchTransactionId}. */ +@RunWith(JUnit4.class) +public class BatchTransactionIdTest { + + @Test + public void equalAndHashCode() { + new EqualsTester() + .addEqualityGroup( + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MIN_VALUE), + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MIN_VALUE)) + .addEqualityGroup( + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MAX_VALUE), + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MAX_VALUE)) + .testEquals(); + } + + @Test + public void serialization() { + reserializeAndAssert( + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MIN_VALUE)); + reserializeAndAssert( + new BatchTransactionId( + "testSession", ByteString.copyFromUtf8("testTxn"), Timestamp.MIN_VALUE)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BenchmarkingUtilityScripts.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BenchmarkingUtilityScripts.java new file mode 100644 index 000000000000..75e0ca3da0fd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BenchmarkingUtilityScripts.java @@ -0,0 +1,142 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.ServerStream; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.rpc.Code; +import com.google.spanner.v1.BatchWriteResponse; +import java.time.Duration; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Hosts a bunch of utility methods/scripts that can be used while performing benchmarks to load + * data, report latency metrics, etc. + * + *

Table schema used here: CREATE TABLE FOO ( id INT64 NOT NULL, BAZ INT64, BAR INT64, ) PRIMARY + * KEY(id); + */ +@Category(SlowTest.class) +@RunWith(JUnit4.class) +public class BenchmarkingUtilityScripts { + + // TODO(developer): Add your values here for PROJECT_ID, INSTANCE_ID, DATABASE_ID + // TODO(developer): By default these values are blank and should be replaced before a run. + private static final String PROJECT_ID = ""; + private static final String INSTANCE_ID = ""; + private static final String DATABASE_ID = ""; + private static final String SERVER_URL = "https://staging-wrenchworks.sandbox.googleapis.com"; + private static DatabaseClient client; + private static Spanner spanner; + + @BeforeClass + public static void beforeClass() { + final SpannerOptions.Builder optionsBuilder = + SpannerOptions.newBuilder() + .setProjectId(PROJECT_ID) + .setAutoThrottleAdministrativeRequests(); + if (!SERVER_URL.isEmpty()) { + optionsBuilder.setHost(SERVER_URL); + } + final SpannerOptions options = optionsBuilder.build(); + spanner = options.getService(); + client = spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + + // Delete all existing data from the table + client.write(ImmutableList.of(Mutation.delete("FOO", KeySet.all()))); + } + + @AfterClass + public static void afterClass() { + spanner.close(); + } + + /** + * A utility which bulk inserts 10^6 records into the database in batches. The method assumes that + * the instance/database/table is already created. It does not perform any admin operations. + * + *

Table schema used here: CREATE TABLE FOO ( id INT64 NOT NULL, BAZ INT64, BAR INT64, ) + * PRIMARY KEY(id); + */ + @Test + public void bulkInsertTestData() { + int key = 0; + List mutationGroups = new ArrayList<>(); + for (int batch = 0; batch < 100; batch++) { + List mutations = new LinkedList<>(); + for (int i = 0; i < 10000; i++) { + mutations.add( + Mutation.newInsertBuilder("FOO") + .set("id") + .to(key) + .set("BAZ") + .to(1) + .set("BAR") + .to(2) + .build()); + key++; + } + mutationGroups.add(MutationGroup.of(mutations)); + } + ServerStream responses = client.batchWriteAtLeastOnce(mutationGroups); + for (BatchWriteResponse response : responses) { + if (response.getStatus().getCode() == Code.OK_VALUE) { + System.out.printf( + "Mutation group indexes %s have been applied with commit timestamp %s", + response.getIndexesList(), response.getCommitTimestamp()); + } else { + System.out.printf( + "Mutation group indexes %s could not be applied with error code %s and " + + "error message %s", + response.getIndexesList(), + Code.forNumber(response.getStatus().getCode()), + response.getStatus().getMessage()); + } + } + } + + /** Collects all results from a collection of future objects. */ + public static List collectResults( + final ListeningScheduledExecutorService service, + final List>> results, + final int numOperations, + final Duration timeoutDuration) + throws Exception { + service.shutdown(); + if (!service.awaitTermination(timeoutDuration.toMinutes(), TimeUnit.MINUTES)) { + throw new TimeoutException(); + } + List allResults = new ArrayList<>(numOperations); + for (Future> result : results) { + allResults.addAll(result.get()); + } + return allResults; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java new file mode 100644 index 000000000000..73185177de19 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class BuiltInOpenTelemetryMetricsProviderTest { + + @Test + public void testGenerateClientHashWithSimpleUid() { + String clientUid = "testClient"; + verifyHash(BuiltInMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithEmptyUid() { + String clientUid = ""; + verifyHash(BuiltInMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithNullUid() { + String clientUid = null; + verifyHash(BuiltInMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithLongUid() { + String clientUid = "aVeryLongUniqueClientIdentifierThatIsUnusuallyLong"; + verifyHash(BuiltInMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithSpecialCharacters() { + String clientUid = "273d60f2-5604-42f1-b687-f5f1b975fd07@2316645@test#"; + verifyHash(BuiltInMetricsProvider.generateClientHash(clientUid)); + } + + private void verifyHash(String hash) { + // Check if the hash length is 6 + assertEquals(hash.length(), 6); + // Check if the hash is in the range [000000, 0003ff] + long hashValue = Long.parseLong(hash, 16); // Convert hash from hex to decimal + assertTrue(hashValue >= 0 && hashValue <= 0x3FF); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ChannelUsageTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ChannelUsageTest.java new file mode 100644 index 000000000000..182c9cc35b5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ChannelUsageTest.java @@ -0,0 +1,246 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static java.util.stream.Collectors.toSet; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.Deque; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Tests that the Spanner client opens multiple channels, and that each session is associated with + * one specific channel. + */ +@RunWith(Parameterized.class) +public class ChannelUsageTest { + + @Parameter(0) + public int numChannels; + + @Parameters(name = "num channels = {0}") + public static Collection data() { + return Arrays.asList(new Object[][] {{1}, {2}, {4}}); + } + + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static InetSocketAddress address; + // Track channel hints (from X-Goog-Spanner-Request-Id header) per RPC method + private static final Set batchCreateSessionChannelHints = ConcurrentHashMap.newKeySet(); + private static final Set executeSqlChannelHints = ConcurrentHashMap.newKeySet(); + private static final Deque allExecuteSqlChannelHints = new ConcurrentLinkedDeque<>(); + + private static Level originalLogLevel; + + @BeforeClass + public static void startServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + // Add a server interceptor to extract channel hints from X-Goog-Spanner-Request-Id + // header. This verifies that the client uses all configured channels. + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next) { + // Verify that the compressor name header is set. + assertEquals( + "gzip", + headers.get( + Metadata.Key.of( + "x-response-encoding", Metadata.ASCII_STRING_MARSHALLER))); + // Extract channel hint from X-Goog-Spanner-Request-Id header + String requestId = headers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + if (requestId != null) { + // Format: + // ..... + String[] parts = requestId.split("\\."); + if (parts.length >= 4) { + try { + long channelHint = Long.parseLong(parts[3]); + if (call.getMethodDescriptor() + .equals(SpannerGrpc.getBatchCreateSessionsMethod())) { + batchCreateSessionChannelHints.add(channelHint); + } + if (call.getMethodDescriptor() + .equals(SpannerGrpc.getExecuteStreamingSqlMethod())) { + executeSqlChannelHints.add(channelHint); + allExecuteSqlChannelHints.add(channelHint); + } + } catch (NumberFormatException e) { + // Ignore parse errors + } + } + } + return Contexts.interceptCall(Context.current(), call, headers, next); + } + }) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @BeforeClass + public static void disableLogging() { + Logger logger = Logger.getLogger(""); + originalLogLevel = logger.getLevel(); + logger.setLevel(Level.OFF); + } + + @AfterClass + public static void resetLogging() { + Logger logger = Logger.getLogger(""); + logger.setLevel(originalLogLevel); + } + + @After + public void reset() { + mockSpanner.reset(); + batchCreateSessionChannelHints.clear(); + executeSqlChannelHints.clear(); + allExecuteSqlChannelHints.clear(); + } + + private SpannerOptions createSpannerOptions() { + String endpoint = address.getHostString() + ":" + server.getPort(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .setNumChannels(numChannels) + .setCompressorName("gzip") + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(numChannels * 2) + .setMaxSessions(numChannels * 2) + .build()) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()); + + return builder.build(); + } + + @Test + public void testUsesAllChannels() throws InterruptedException { + final int multiplier = 10; + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + for (int run = 0; run < numChannels * multiplier; run++) { + try (ReadOnlyTransaction transaction = client.readOnlyTransaction()) { + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + } + } + } + // Bound the channel hints to numChannels (matching gRPC-GCP behavior) and verify + // that channels are being distributed. The raw channel hints may be unbounded (based on + // session index), but gRPC-GCP bounds them to the actual number of channels. + assertEquals(2 * numChannels * multiplier, allExecuteSqlChannelHints.size()); + Set boundedChannelHints = + executeSqlChannelHints.stream().map(hint -> hint % numChannels).collect(toSet()); + // Verify that channel distribution is working: + // - For numChannels=1, exactly 1 channel should be used + // - For numChannels>1, multiple channels should be used (at least half) + if (numChannels == 1) { + assertEquals(1, boundedChannelHints.size()); + } else { + assertTrue( + "Expected at least " + + (numChannels / 2) + + " channels to be used, but got " + + boundedChannelHints.size(), + boundedChannelHints.size() >= numChannels / 2); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java new file mode 100644 index 000000000000..ed9f21c46353 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.spi.v1.GapicSpannerRpc; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CloseSpannerWithOpenResultSetTest extends AbstractMockServerTest { + + Spanner createSpanner() { + return SpannerOptions.newBuilder() + .setProjectId("p") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .build()) + .build() + .getService(); + } + + @BeforeClass + public static void setWatchdogTimeout() { + System.setProperty("com.google.cloud.spanner.watchdogTimeoutSeconds", "1"); + } + + @AfterClass + public static void clearWatchdogTimeout() { + System.clearProperty("com.google.cloud.spanner.watchdogTimeoutSeconds"); + } + + @After + public void cleanup() { + mockSpanner.unfreeze(); + mockSpanner.clearRequests(); + } + + @Test + public void testStreamsAreCleanedUp() throws Exception { + String invalidSql = "select * from foo"; + Statement invalidStatement = Statement.of(invalidSql); + mockSpanner.putStatementResult( + StatementResult.exception( + invalidStatement, + Status.NOT_FOUND.withDescription("Table not found: foo").asRuntimeException())); + int numThreads = 16; + int numQueries = 32; + try (Spanner spanner = createSpanner()) { + BatchClient client = spanner.getBatchClient(DatabaseId.of("p", "i", "d")); + ExecutorService service = Executors.newFixedThreadPool(numThreads); + List> futures = new ArrayList<>(numQueries); + for (int n = 0; n < numQueries; n++) { + futures.add( + service.submit( + () -> { + try (BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong())) { + if (ThreadLocalRandom.current().nextInt(10) < 2) { + try (ResultSet resultSet = transaction.executeQuery(invalidStatement)) { + SpannerException exception = + assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + } + } else { + try (ResultSet resultSet = + transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + assertNotNull(resultSet.getCurrentRowAsStruct()); + } + } + } + } + })); + } + service.shutdown(); + for (Future fut : futures) { + fut.get(); + } + assertTrue(service.awaitTermination(1L, TimeUnit.MINUTES)); + // Verify that all response observers have been unregistered. + assertEquals( + 0, ((GapicSpannerRpc) ((SpannerImpl) spanner).getRpc()).getNumActiveResponseObservers()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CommitResponseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CommitResponseTest.java new file mode 100644 index 000000000000..6ac22a28937c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CommitResponseTest.java @@ -0,0 +1,122 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Timestamp; +import com.google.spanner.v1.CommitResponse.CommitStats; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CommitResponseTest { + + @Test + public void testConstructWithTimestamp() { + Timestamp timestamp = Timestamp.ofTimeSecondsAndNanos(100L, 100); + CommitResponse response = new CommitResponse(timestamp); + assertEquals(timestamp, response.getCommitTimestamp()); + } + + @Test + public void testFromProto() { + long mutationCount = 5L; + com.google.protobuf.Timestamp timestamp = + com.google.protobuf.Timestamp.newBuilder().setSeconds(123L).setNanos(456).build(); + com.google.spanner.v1.CommitResponse proto = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitStats( + com.google.spanner.v1.CommitResponse.CommitStats.newBuilder() + .setMutationCount(mutationCount) + .build()) + .setCommitTimestamp(timestamp) + .build(); + + CommitResponse response = new CommitResponse(proto); + + assertEquals(Timestamp.ofTimeSecondsAndNanos(123L, 456), response.getCommitTimestamp()); + assertEquals(mutationCount, response.getCommitStats().getMutationCount()); + } + + @Test + public void testEqualsAndHashCode() { + com.google.spanner.v1.CommitResponse proto1 = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp(com.google.protobuf.Timestamp.newBuilder().setSeconds(1L).build()) + .build(); + com.google.spanner.v1.CommitResponse proto2 = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp(com.google.protobuf.Timestamp.newBuilder().setSeconds(2L).build()) + .build(); + com.google.spanner.v1.CommitResponse proto3 = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp(com.google.protobuf.Timestamp.newBuilder().setSeconds(1L).build()) + .build(); + + CommitResponse response1 = new CommitResponse(proto1); + CommitResponse response2 = new CommitResponse(proto2); + CommitResponse response3 = new CommitResponse(proto3); + + assertEquals(response3, response1); + assertNotEquals(response2, response1); + assertNotEquals(response3, response2); + assertNotEquals(response1, null); + assertNotEquals(response1, new Object()); + + assertEquals(response3.hashCode(), response1.hashCode()); + assertNotEquals(response2.hashCode(), response1.hashCode()); + assertNotEquals(response3.hashCode(), response2.hashCode()); + } + + @Test + public void testHasCommitStats() { + com.google.spanner.v1.CommitResponse protoWithoutCommitStats = + com.google.spanner.v1.CommitResponse.getDefaultInstance(); + CommitResponse responseWithoutCommitStats = new CommitResponse(protoWithoutCommitStats); + assertFalse(responseWithoutCommitStats.hasCommitStats()); + + com.google.spanner.v1.CommitResponse protoWithCommitStats = + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitStats(CommitStats.getDefaultInstance()) + .build(); + CommitResponse responseWithCommitStats = new CommitResponse(protoWithCommitStats); + assertTrue(responseWithCommitStats.hasCommitStats()); + } + + @Test + public void testGetSnapshotTimestamp() { + com.google.spanner.v1.CommitResponse protoWithoutSnapshotTimestamp = + com.google.spanner.v1.CommitResponse.getDefaultInstance(); + CommitResponse responseWithoutSnapshotTimestamp = + new CommitResponse(protoWithoutSnapshotTimestamp); + assertEquals(null, responseWithoutSnapshotTimestamp.getSnapshotTimestamp()); + + com.google.protobuf.Timestamp timestamp = + com.google.protobuf.Timestamp.newBuilder().setSeconds(123L).setNanos(456).build(); + com.google.spanner.v1.CommitResponse protoWithSnapshotTimestamp = + com.google.spanner.v1.CommitResponse.newBuilder().setSnapshotTimestamp(timestamp).build(); + CommitResponse responseWithSnapshotTimestamp = new CommitResponse(protoWithSnapshotTimestamp); + assertEquals( + Timestamp.ofTimeSecondsAndNanos(123L, 456), + responseWithSnapshotTimestamp.getSnapshotTimestamp()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java new file mode 100644 index 000000000000..7f2e7fffc6f6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java @@ -0,0 +1,277 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracer.Scope; +import com.google.api.gax.tracing.MetricsTracer; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.ReadRequest; +import java.lang.reflect.Method; +import java.time.Duration; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +@RunWith(JUnit4.class) +public class CompositeTracerTest { + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private ApiTracer child1; + @Mock private ApiTracer child2; + @Mock private OpenTelemetryApiTracer child3; + @Mock private MetricsTracer child4; + + private CompositeTracer compositeTracer; + + @Before + public void setup() { + compositeTracer = new CompositeTracer(ImmutableList.of(child1, child2, child3, child4)); + } + + @Test + public void testInScope() { + Scope scope1 = mock(Scope.class); + when(child1.inScope()).thenReturn(scope1); + + Scope scope2 = mock(Scope.class); + when(child2.inScope()).thenReturn(scope2); + + Scope scope3 = mock(Scope.class); + when(child3.inScope()).thenReturn(scope3); + + Scope scope4 = mock(Scope.class); + when(child4.inScope()).thenReturn(scope4); + + Scope parentScope = compositeTracer.inScope(); + + parentScope.close(); + verify(scope1, times(1)).close(); + verify(scope2, times(1)).close(); + verify(scope3, times(1)).close(); + verify(scope4, times(1)).close(); + } + + @Test + public void testOperationSucceeded() { + compositeTracer.operationSucceeded(); + verify(child1, times(1)).operationSucceeded(); + verify(child2, times(1)).operationSucceeded(); + verify(child3, times(1)).operationSucceeded(); + verify(child4, times(1)).operationSucceeded(); + } + + @Test + public void testOperationCancelled() { + compositeTracer.operationCancelled(); + verify(child1, times(1)).operationCancelled(); + verify(child2, times(1)).operationCancelled(); + verify(child3, times(1)).operationCancelled(); + verify(child4, times(1)).operationCancelled(); + } + + @Test + public void testOperationFailed() { + RuntimeException error = new RuntimeException(); + compositeTracer.operationFailed(error); + verify(child1, times(1)).operationFailed(error); + verify(child2, times(1)).operationFailed(error); + verify(child3, times(1)).operationFailed(error); + verify(child4, times(1)).operationFailed(error); + } + + @Test + public void testConnectionSelected() { + compositeTracer.connectionSelected("connection-one"); + verify(child1, times(1)).connectionSelected("connection-one"); + verify(child2, times(1)).connectionSelected("connection-one"); + verify(child3, times(1)).connectionSelected("connection-one"); + verify(child4, times(1)).connectionSelected("connection-one"); + } + + @Test + public void testAttemptStarted() { + ReadRequest request = ReadRequest.getDefaultInstance(); + compositeTracer.attemptStarted(request, 3); + verify(child1, times(1)).attemptStarted(request, 3); + verify(child2, times(1)).attemptStarted(request, 3); + verify(child3, times(1)).attemptStarted(request, 3); + verify(child4, times(1)).attemptStarted(request, 3); + } + + @Test + public void testAttemptSucceeded() { + compositeTracer.attemptSucceeded(); + verify(child1, times(1)).attemptSucceeded(); + verify(child2, times(1)).attemptSucceeded(); + verify(child3, times(1)).attemptSucceeded(); + verify(child4, times(1)).attemptSucceeded(); + } + + @Test + public void testAttemptCancelled() { + compositeTracer.attemptCancelled(); + verify(child1, times(1)).attemptCancelled(); + verify(child2, times(1)).attemptCancelled(); + verify(child3, times(1)).attemptCancelled(); + verify(child4, times(1)).attemptCancelled(); + } + + @Test + public void testAttemptFailed() { + RuntimeException error = new RuntimeException(); + Duration delay = Duration.ofMillis(10); + compositeTracer.attemptFailedDuration(error, delay); + verify(child1, times(1)).attemptFailedDuration(error, delay); + verify(child2, times(1)).attemptFailedDuration(error, delay); + verify(child3, times(1)).attemptFailedDuration(error, delay); + verify(child4, times(1)).attemptFailedDuration(error, delay); + } + + @Test + public void testAttemptFailedRetriesExhausted() { + RuntimeException error = new RuntimeException(); + compositeTracer.attemptFailedRetriesExhausted(error); + verify(child1, times(1)).attemptFailedRetriesExhausted(error); + verify(child2, times(1)).attemptFailedRetriesExhausted(error); + verify(child3, times(1)).attemptFailedRetriesExhausted(error); + verify(child4, times(1)).attemptFailedRetriesExhausted(error); + } + + @Test + public void testAttemptPermanentFailure() { + RuntimeException error = new RuntimeException(); + compositeTracer.attemptPermanentFailure(error); + verify(child1, times(1)).attemptPermanentFailure(error); + verify(child2, times(1)).attemptPermanentFailure(error); + verify(child3, times(1)).attemptPermanentFailure(error); + verify(child4, times(1)).attemptPermanentFailure(error); + } + + @Test + public void testLroStartFailed() { + RuntimeException error = new RuntimeException(); + compositeTracer.lroStartFailed(error); + verify(child1, times(1)).lroStartFailed(error); + verify(child2, times(1)).lroStartFailed(error); + verify(child3, times(1)).lroStartFailed(error); + verify(child4, times(1)).lroStartFailed(error); + } + + @Test + public void testLroStartSucceeded() { + compositeTracer.lroStartSucceeded(); + verify(child1, times(1)).lroStartSucceeded(); + verify(child2, times(1)).lroStartSucceeded(); + verify(child3, times(1)).lroStartSucceeded(); + verify(child4, times(1)).lroStartSucceeded(); + } + + @Test + public void testResponseReceived() { + compositeTracer.responseReceived(); + verify(child1, times(1)).responseReceived(); + verify(child2, times(1)).responseReceived(); + verify(child3, times(1)).responseReceived(); + verify(child4, times(1)).responseReceived(); + } + + @Test + public void testRequestSent() { + compositeTracer.requestSent(); + verify(child1, times(1)).requestSent(); + verify(child2, times(1)).requestSent(); + verify(child3, times(1)).requestSent(); + verify(child4, times(1)).requestSent(); + } + + @Test + public void testBatchRequestSent() { + compositeTracer.batchRequestSent(2, 20); + verify(child1, times(1)).batchRequestSent(2, 20); + verify(child2, times(1)).batchRequestSent(2, 20); + verify(child3, times(1)).batchRequestSent(2, 20); + verify(child4, times(1)).batchRequestSent(2, 20); + } + + @Test + public void testMethodsOverrideMetricsTracer() { + Method[] metricsTracerMethods = MetricsTracer.class.getDeclaredMethods(); + Method[] compositeTracerMethods = CompositeTracer.class.getDeclaredMethods(); + + List visibleForTestingMethods = Arrays.asList("getAttributes", "extractStatus"); + + Set compositeMethodsSet = new HashSet<>(Arrays.asList(compositeTracerMethods)); + + for (Method metricsMethod : metricsTracerMethods) { + if (!visibleForTestingMethods.contains(metricsMethod.getName()) + && !containsMethod(compositeMethodsSet, metricsMethod)) { + throw new AssertionError("Method not found in compositeTracerMethods: " + metricsMethod); + } + } + } + + @Test + public void testMethodsOverrideOpenTelemetryTracer() { + + Method[] compositeTracerMethods = CompositeTracer.class.getDeclaredMethods(); + + List openTelemetryTracerMethods = + Arrays.stream(OpenTelemetryApiTracer.class.getDeclaredMethods()) + .filter(method -> java.lang.reflect.Modifier.isPublic(method.getModifiers())) + .collect(Collectors.toList()); + + Set compositeMethodsSet = new HashSet<>(Arrays.asList(compositeTracerMethods)); + + for (Method metricsMethod : openTelemetryTracerMethods) { + if (!containsMethod(compositeMethodsSet, metricsMethod)) { + throw new AssertionError("Method not found in compositeTracerMethods: " + metricsMethod); + } + } + } + + private boolean compareMethods(Method actual, Method expected) { + return actual.getName().equals(expected.getName()) + && Arrays.equals(actual.getParameterTypes(), expected.getParameterTypes()) + && actual.getModifiers() == expected.getModifiers() + && actual.getReturnType().equals(expected.getReturnType()); + } + + public boolean containsMethod(Set methodSet, Method method) { + for (Method m : methodSet) { + if (compareMethods(m, method)) { + return true; + } + } + return false; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java new file mode 100644 index 000000000000..f889d5b5f6fc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java @@ -0,0 +1,757 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.spanner.admin.database.v1.DatabaseDialect.GOOGLE_STANDARD_SQL; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Identity; +import com.google.cloud.Role; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.cloud.spanner.DatabaseInfo.State; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.io.BaseEncoding; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Message; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.EncryptionInfo; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; + +@RunWith(JUnit4.class) +public class DatabaseAdminClientImplTest { + private static final String PROJECT_ID = "my-project"; + private static final String INSTANCE_ID = "my-instance"; + private static final String INSTANCE_ID_2 = "my-instance-2"; + private static final String INSTANCE_NAME = "projects/my-project/instances/my-instance"; + private static final String DB_ID = "my-db"; + private static final String DB_NAME = "projects/my-project/instances/my-instance/databases/my-db"; + private static final String DB_NAME2 = + "projects/my-project/instances/my-instance/databases/my-db2"; + private static final String BK_ID = "my-bk"; + private static final String SOURCE_BK = "my-source-bk"; + private static final String DB_ROLE = "dummy-role"; + private static final String DB_ROLE2 = "dummy-role-2"; + private static final String BK_NAME = "projects/my-project/instances/my-instance/backups/my-bk"; + private static final String BK_NAME2 = "projects/my-project/instances/my-instance/backups/my-bk2"; + private static final Timestamp EARLIEST_VERSION_TIME = Timestamp.now(); + private static final String VERSION_RETENTION_PERIOD = "7d"; + private static final String KMS_KEY_NAME = + "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"; + private static final String KMS_KEY_VERSION = "1"; + private static final DatabaseDialect DIALECT = GOOGLE_STANDARD_SQL; + + @Mock SpannerRpc rpc; + DatabaseAdminClientImpl client; + + @Before + public void setUp() { + initMocks(this); + client = new DatabaseAdminClientImpl(PROJECT_ID, rpc); + } + + private Database getDatabaseProto() { + return Database.newBuilder() + .setName(DB_NAME) + .setState(Database.State.READY) + .setEarliestVersionTime(EARLIEST_VERSION_TIME.toProto()) + .setVersionRetentionPeriod(VERSION_RETENTION_PERIOD) + .setDatabaseDialect(DIALECT) + .build(); + } + + private DatabaseRole getDatabaseRoleProto() { + return DatabaseRole.newBuilder().setName(DB_ROLE).build(); + } + + private DatabaseRole getAnotherDatabaseRoleProto() { + return DatabaseRole.newBuilder().setName(DB_ROLE2).build(); + } + + private Database getEncryptedDatabaseProto() { + return getDatabaseProto().toBuilder() + .setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.newBuilder() + .setKmsKeyName(KMS_KEY_NAME) + .build()) + .build(); + } + + private Database getAnotherDatabaseProto() { + return Database.newBuilder().setName(DB_NAME2).setState(Database.State.READY).build(); + } + + static Any toAny(Message message) { + return Any.newBuilder() + .setTypeUrl("type.googleapis.com/" + message.getDescriptorForType().getFullName()) + .setValue(message.toByteString()) + .build(); + } + + private Backup getBackupProto() { + return Backup.newBuilder() + .setName(BK_NAME) + .setDatabase(DB_NAME) + .setState(Backup.State.READY) + .build(); + } + + private Backup getEncryptedBackupProto() { + return Backup.newBuilder() + .setName(BK_NAME) + .setDatabase(DB_NAME) + .setState(Backup.State.READY) + .setEncryptionInfo(EncryptionInfo.newBuilder().setKmsKeyVersion(KMS_KEY_VERSION).build()) + .build(); + } + + private Backup getAnotherBackupProto() { + return Backup.newBuilder() + .setName(BK_NAME2) + .setDatabase(DB_NAME2) + .setState(Backup.State.READY) + .build(); + } + + @Test + public void getDatabase() { + when(rpc.getDatabase(DB_NAME)).thenReturn(getDatabaseProto()); + com.google.cloud.spanner.Database db = client.getDatabase(INSTANCE_ID, DB_ID); + assertThat(db.getId().getName()).isEqualTo(DB_NAME); + assertThat(db.getState()).isEqualTo(DatabaseInfo.State.READY); + assertThat(db.getEarliestVersionTime()).isEqualTo(EARLIEST_VERSION_TIME); + assertThat(db.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + } + + @Test + public void createDatabase() throws Exception { + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createDatabase", getDatabaseProto(), CreateDatabaseMetadata.getDefaultInstance()); + when(rpc.createDatabase( + INSTANCE_NAME, + "CREATE DATABASE `" + DB_ID + "`", + Collections.emptyList(), + client + .newDatabaseBuilder(DatabaseId.of(DB_NAME)) + .setState(State.UNSPECIFIED) + .setDialect(Dialect.GOOGLE_STANDARD_SQL) + .build())) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.createDatabase(INSTANCE_ID, DB_ID, Collections.emptyList()); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(DB_NAME); + } + + @Test + public void createEncryptedDatabase() throws Exception { + com.google.cloud.spanner.Database database = + client + .newDatabaseBuilder(DatabaseId.of(DB_NAME)) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(KMS_KEY_NAME)) + .setDialect(Dialect.GOOGLE_STANDARD_SQL) + .build(); + + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createDatabase", + getEncryptedDatabaseProto(), + CreateDatabaseMetadata.getDefaultInstance()); + when(rpc.createDatabase( + INSTANCE_NAME, "CREATE DATABASE `" + DB_ID + "`", Collections.emptyList(), database)) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.createDatabase(database, Collections.emptyList()); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(DB_NAME); + } + + @Test + public void updateDatabaseDdl() throws Exception { + String opName = DB_NAME + "/operations/myop"; + String opId = "myop"; + List ddl = ImmutableList.of(); + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + opName, Empty.getDefaultInstance(), UpdateDatabaseDdlMetadata.getDefaultInstance()); + when(rpc.updateDatabaseDdl( + client.newDatabaseBuilder(DatabaseId.of(DB_NAME)).build(), ddl, opId)) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.updateDatabaseDdl(INSTANCE_ID, DB_ID, ddl, opId); + assertThat(op.isDone()).isTrue(); + assertThat(op.getName()).isEqualTo(opName); + } + + @Test + public void updateDatabaseDdlOpAlreadyExists() throws Exception { + String originalOpName = DB_NAME + "/operations/originalop"; + List ddl = ImmutableList.of(); + OperationFuture originalOp = + OperationFutureUtil.immediateOperationFuture( + originalOpName, + Empty.getDefaultInstance(), + UpdateDatabaseDdlMetadata.getDefaultInstance()); + + String newOpId = "newop"; + when(rpc.updateDatabaseDdl( + client.newDatabaseBuilder(DatabaseId.of(DB_NAME)).build(), ddl, newOpId)) + .thenReturn(originalOp); + OperationFuture op = + client.updateDatabaseDdl(INSTANCE_ID, DB_ID, ddl, newOpId); + assertThat(op.getName()).isEqualTo(originalOpName); + } + + @Test + public void updateDatabase() throws Exception { + com.google.cloud.spanner.Database database = + client.newDatabaseBuilder(DatabaseId.of(DB_NAME)).enableDropProtection().build(); + Database databaseProto = database.toProto(); + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateDatabase", databaseProto, UpdateDatabaseMetadata.getDefaultInstance()); + when(rpc.updateDatabase( + databaseProto, DatabaseField.toFieldMask(DatabaseField.DROP_PROTECTION))) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.updateDatabase(database, DatabaseField.DROP_PROTECTION); + assertTrue(op.isDone()); + assertEquals(op.get().getId().getName(), DB_NAME); + assertTrue(op.get().isDropProtectionEnabled()); + } + + @Test + public void dropDatabase() { + client.dropDatabase(INSTANCE_ID, DB_ID); + verify(rpc).dropDatabase(DB_NAME); + } + + @Test + public void getDatabaseDdl() { + List ddl = ImmutableList.of("CREATE TABLE mytable()"); + when(rpc.getDatabaseDdl(DB_NAME)) + .thenReturn(GetDatabaseDdlResponse.newBuilder().addAllStatements(ddl).build()); + assertThat(client.getDatabaseDdl(INSTANCE_ID, DB_ID)).isEqualTo(ddl); + } + + @Test + public void getDatabaseDdlResponse() { + List ddl = ImmutableList.of("CREATE TABLE mytable()"); + when(rpc.getDatabaseDdl(DB_NAME)) + .thenReturn( + GetDatabaseDdlResponse.newBuilder() + .addAllStatements(ddl) + .setProtoDescriptors(ByteString.EMPTY) + .build()); + GetDatabaseDdlResponse response = client.getDatabaseDdlResponse(INSTANCE_ID, DB_ID); + assertThat(response.getStatementsList()).isEqualTo(ddl); + assertThat(response.getProtoDescriptors()).isEqualTo(ByteString.EMPTY); + } + + @Test + public void listDatabases() { + String pageToken = "token"; + when(rpc.listDatabases(INSTANCE_NAME, 1, null)) + .thenReturn(new Paginated<>(ImmutableList.of(getDatabaseProto()), pageToken)); + when(rpc.listDatabases(INSTANCE_NAME, 1, pageToken)) + .thenReturn(new Paginated<>(ImmutableList.of(getAnotherDatabaseProto()), "")); + List dbs = + Lists.newArrayList(client.listDatabases(INSTANCE_ID, Options.pageSize(1)).iterateAll()); + assertThat(dbs.get(0).getId().getName()).isEqualTo(DB_NAME); + assertThat(dbs.get(1).getId().getName()).isEqualTo(DB_NAME2); + assertThat(dbs.size()).isEqualTo(2); + } + + @Test + public void listDatabasesError() { + when(rpc.listDatabases(INSTANCE_NAME, 1, null)) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Test error")); + SpannerException e = + assertThrows( + SpannerException.class, () -> client.listDatabases(INSTANCE_ID, Options.pageSize(1))); + assertThat(e.getMessage()).contains(INSTANCE_NAME); + // Assert that the call was done without a page token. + assertThat(e.getMessage()).contains("with pageToken "); + } + + @Test + public void listDatabaseErrorWithToken() { + String pageToken = "token"; + when(rpc.listDatabases(INSTANCE_NAME, 1, null)) + .thenReturn(new Paginated<>(ImmutableList.of(getDatabaseProto()), pageToken)); + when(rpc.listDatabases(INSTANCE_NAME, 1, pageToken)) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Test error")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + Lists.newArrayList( + client.listDatabases(INSTANCE_ID, Options.pageSize(1)).iterateAll())); + assertThat(e.getMessage()).contains(INSTANCE_NAME); + // Assert that the call was done without a page token. + assertThat(e.getMessage()).contains(String.format("with pageToken %s", pageToken)); + } + + @Test + public void listDatabaseRoles() { + String pageToken = "token"; + when(rpc.listDatabaseRoles(DB_NAME, 1, null)) + .thenReturn(new Paginated<>(ImmutableList.of(getDatabaseRoleProto()), pageToken)); + when(rpc.listDatabaseRoles(DB_NAME, 1, pageToken)) + .thenReturn(new Paginated<>(ImmutableList.of(getAnotherDatabaseRoleProto()), "")); + + ArrayList databaseRoles = + Lists.newArrayList( + client.listDatabaseRoles(INSTANCE_ID, DB_ID, Options.pageSize(1)).iterateAll()); + assertThat(databaseRoles.get(0).getName()).isEqualTo(DB_ROLE); + assertThat(databaseRoles.get(1).getName()).isEqualTo(DB_ROLE2); + assertThat(databaseRoles.size()).isEqualTo(2); + } + + @Test + public void listDatabaseRolesError() { + when(rpc.listDatabaseRoles(DB_NAME, 1, null)) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Test error")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> client.listDatabaseRoles(INSTANCE_ID, DB_ID, Options.pageSize(1))); + assertThat(e.getMessage()).contains(INSTANCE_NAME); + // Assert that the call was done without a page token. + assertThat(e.getMessage()).contains("with pageToken "); + } + + @Test + public void listDatabaseRolesErrorWithToken() { + String pageToken = "token"; + when(rpc.listDatabaseRoles(DB_NAME, 1, null)) + .thenReturn(new Paginated<>(ImmutableList.of(getDatabaseRoleProto()), pageToken)); + when(rpc.listDatabaseRoles(DB_NAME, 1, pageToken)) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "Test error")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + Lists.newArrayList( + client + .listDatabaseRoles(INSTANCE_ID, DB_ID, Options.pageSize(1)) + .iterateAll())); + assertThat(e.getMessage()).contains(INSTANCE_NAME); + // Assert that the call was done without a page token. + assertThat(e.getMessage()).contains(String.format("with pageToken %s", pageToken)); + } + + @Test + public void getDatabaseIAMPolicy() { + when(rpc.getDatabaseAdminIAMPolicy(DB_NAME, null)) + .thenReturn( + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .addMembers("user:joe@example.com") + .setRole("roles/viewer") + .build()) + .build()); + com.google.cloud.Policy policy = client.getDatabaseIAMPolicy(INSTANCE_ID, DB_ID, 0); + assertThat(policy.getBindings()) + .containsExactly(Role.viewer(), Sets.newHashSet(Identity.user("joe@example.com"))); + + when(rpc.getDatabaseAdminIAMPolicy(DB_NAME, null)) + .thenReturn( + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .addAllMembers(Arrays.asList("allAuthenticatedUsers", "domain:google.com")) + .setRole("roles/viewer") + .build()) + .build()); + policy = client.getDatabaseIAMPolicy(INSTANCE_ID, DB_ID, 0); + assertThat(policy.getBindings()) + .containsExactly( + Role.viewer(), + Sets.newHashSet(Identity.allAuthenticatedUsers(), Identity.domain("google.com"))); + } + + @Test + public void setDatabaseIAMPolicy() { + ByteString etag = ByteString.copyFrom(BaseEncoding.base64().decode("v1")); + String etagEncoded = BaseEncoding.base64().encode(etag.toByteArray()); + Policy proto = + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .setRole("roles/viewer") + .addMembers("user:joe@example.com") + .build()) + .setEtag(etag) + .build(); + when(rpc.setDatabaseAdminIAMPolicy(DB_NAME, proto)).thenReturn(proto); + com.google.cloud.Policy policy = + com.google.cloud.Policy.newBuilder() + .addIdentity(Role.viewer(), Identity.user("joe@example.com")) + .setEtag(etagEncoded) + .build(); + com.google.cloud.Policy updated = client.setDatabaseIAMPolicy(INSTANCE_ID, DB_ID, policy); + assertThat(updated).isEqualTo(policy); + } + + @Test + public void testDatabaseIAMPermissions() { + Iterable permissions = + Arrays.asList("spanner.databases.select", "spanner.databases.write"); + when(rpc.testDatabaseAdminIAMPermissions(DB_NAME, permissions)) + .thenReturn( + TestIamPermissionsResponse.newBuilder() + .addPermissions("spanner.databases.select") + .build()); + Iterable allowed = client.testDatabaseIAMPermissions(INSTANCE_ID, DB_ID, permissions); + assertThat(allowed).containsExactly("spanner.databases.select"); + } + + @Test + public void createBackupWithParams() throws Exception { + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createBackup", getBackupProto(), CreateBackupMetadata.getDefaultInstance()); + Timestamp t = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final com.google.cloud.spanner.Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(t) + .build(); + when(rpc.createBackup(backup)).thenReturn(rawOperationFuture); + OperationFuture op = + client.createBackup(INSTANCE_ID, BK_ID, DB_ID, t); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + } + + @Test + public void createBackupWithBackupObject() throws ExecutionException, InterruptedException { + final OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createBackup", getBackupProto(), CreateBackupMetadata.getDefaultInstance()); + final Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final Timestamp versionTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) - TimeUnit.DAYS.toMicros(2)); + final com.google.cloud.spanner.Backup requestBackup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + + when(rpc.createBackup(requestBackup)).thenReturn(rawOperationFuture); + + final OperationFuture op = + client.createBackup(requestBackup); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + } + + @Test(expected = IllegalArgumentException.class) + public void testCreateBackupNoExpireTime() { + final com.google.cloud.spanner.Backup requestBackup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .build(); + + client.createBackup(requestBackup); + } + + @Test(expected = IllegalArgumentException.class) + public void testCreateBackupNoDatabase() { + final com.google.cloud.spanner.Backup requestBackup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setExpireTime(Timestamp.now()) + .build(); + + client.createBackup(requestBackup); + } + + @Test + public void createEncryptedBackup() throws ExecutionException, InterruptedException { + final OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createBackup", getEncryptedBackupProto(), CreateBackupMetadata.getDefaultInstance()); + final Timestamp t = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final com.google.cloud.spanner.Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(t) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(KMS_KEY_NAME)) + .build(); + when(rpc.createBackup(backup)).thenReturn(rawOperationFuture); + final OperationFuture op = + client.createBackup(backup); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + assertThat(op.get().getEncryptionInfo().getKmsKeyVersion()).isEqualTo(KMS_KEY_VERSION); + } + + @Test + public void copyBackupWithParams() throws Exception { + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "copyBackup", getBackupProto(), CopyBackupMetadata.getDefaultInstance()); + Timestamp t = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final com.google.cloud.spanner.Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setExpireTime(t) + .build(); + when(rpc.copyBackup(BackupId.of(PROJECT_ID, INSTANCE_ID, SOURCE_BK), backup)) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.copyBackup(INSTANCE_ID, SOURCE_BK, BK_ID, t); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + } + + @Test + public void copyBackupWithBackupObject() throws ExecutionException, InterruptedException { + final OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "copyBackup", getBackupProto(), CopyBackupMetadata.getDefaultInstance()); + final Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final Timestamp versionTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) - TimeUnit.DAYS.toMicros(2)); + final com.google.cloud.spanner.Backup requestBackup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + BackupId sourceBackupId = BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID); + + when(rpc.copyBackup(sourceBackupId, requestBackup)).thenReturn(rawOperationFuture); + + final OperationFuture op = + client.copyBackup(sourceBackupId, requestBackup); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + } + + @Test + public void copyBackupWithBackupObject_onDifferentInstances() + throws ExecutionException, InterruptedException { + Backup testProto = + Backup.newBuilder() + .setName(BK_NAME) + .setDatabase("projects/my-project/instances/my-instance-2/databases/my-db") + .setState(Backup.State.READY) + .build(); + final OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "copyBackup", testProto, CopyBackupMetadata.getDefaultInstance()); + final Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final Timestamp versionTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) - TimeUnit.DAYS.toMicros(2)); + final com.google.cloud.spanner.Backup requestBackup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID_2, BK_ID)) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + BackupId sourceBackupId = BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID); + + when(rpc.copyBackup(sourceBackupId, requestBackup)).thenReturn(rawOperationFuture); + + final OperationFuture op = + client.copyBackup(sourceBackupId, requestBackup); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + } + + @Test + public void copyEncryptedBackup() throws ExecutionException, InterruptedException { + final OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "copyBackup", getEncryptedBackupProto(), CopyBackupMetadata.getDefaultInstance()); + final Timestamp t = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + final com.google.cloud.spanner.Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(t) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(KMS_KEY_NAME)) + .build(); + BackupId sourceBackupId = BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID); + when(rpc.copyBackup(sourceBackupId, backup)).thenReturn(rawOperationFuture); + final OperationFuture op = + client.copyBackup(sourceBackupId, backup); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(BK_NAME); + assertThat(op.get().getEncryptionInfo().getKmsKeyVersion()).isEqualTo(KMS_KEY_VERSION); + } + + @Test + public void deleteBackup() { + client.deleteBackup(INSTANCE_ID, BK_ID); + verify(rpc).deleteBackup(BK_NAME); + } + + @Test + public void getBackup() { + when(rpc.getBackup(BK_NAME)).thenReturn(getBackupProto()); + com.google.cloud.spanner.Backup bk = client.getBackup(INSTANCE_ID, BK_ID); + BackupId bid = BackupId.of(bk.getId().getName()); + assertThat(bid.getName()).isEqualTo(BK_NAME); + assertThat(bk.getState()).isEqualTo(com.google.cloud.spanner.Backup.State.READY); + } + + @Test + public void listBackups() { + String pageToken = "token"; + when(rpc.listBackups(INSTANCE_NAME, 1, null, null)) + .thenReturn(new Paginated<>(ImmutableList.of(getBackupProto()), pageToken)); + when(rpc.listBackups(INSTANCE_NAME, 1, null, pageToken)) + .thenReturn(new Paginated<>(ImmutableList.of(getAnotherBackupProto()), "")); + List backups = + Lists.newArrayList(client.listBackups(INSTANCE_ID, Options.pageSize(1)).iterateAll()); + assertThat(backups.get(0).getId().getName()).isEqualTo(BK_NAME); + assertThat(backups.get(1).getId().getName()).isEqualTo(BK_NAME2); + assertThat(backups.size()).isEqualTo(2); + } + + @Test + public void updateBackup() { + Timestamp t = + Timestamp.ofTimeMicroseconds( + TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis()) + + TimeUnit.HOURS.toMicros(28)); + Backup backup = Backup.newBuilder().setName(BK_NAME).setExpireTime(t.toProto()).build(); + when(rpc.updateBackup(backup, FieldMask.newBuilder().addPaths("expire_time").build())) + .thenReturn( + Backup.newBuilder() + .setName(BK_NAME) + .setDatabase(DB_NAME) + .setExpireTime(t.toProto()) + .build()); + com.google.cloud.spanner.Backup updatedBackup = client.updateBackup(INSTANCE_ID, BK_ID, t); + assertThat(updatedBackup.getExpireTime()).isEqualTo(t); + } + + @Test + public void restoreDatabase() throws Exception { + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "restoreDatabase", getDatabaseProto(), RestoreDatabaseMetadata.getDefaultInstance()); + final Restore restore = + new Restore.Builder( + BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID), + DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .build(); + when(rpc.restoreDatabase(restore)).thenReturn(rawOperationFuture); + OperationFuture op = + client.restoreDatabase(restore); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(DB_NAME); + } + + @Test + public void restoreEncryptedDatabase() throws Exception { + OperationFuture rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "restoreEncryptedDatabase", + getEncryptedDatabaseProto(), + RestoreDatabaseMetadata.getDefaultInstance()); + final Restore restore = + new Restore.Builder( + BackupId.of(PROJECT_ID, INSTANCE_ID, BK_ID), + DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .build(); + when(rpc.restoreDatabase(restore)).thenReturn(rawOperationFuture); + OperationFuture op = + client.restoreDatabase(restore); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(DB_NAME); + assertThat(op.get().getEncryptionConfig().getKmsKeyName()).isEqualTo(KMS_KEY_NAME); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientTest.java new file mode 100644 index 000000000000..752f4c524cc3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientTest.java @@ -0,0 +1,967 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.testing.TimestampHelper.afterDays; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.Identity; +import com.google.cloud.NoCredentials; +import com.google.cloud.Policy; +import com.google.cloud.Role; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseInfo.State; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.rpc.ErrorInfo; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DatabaseAdminClientTest { + private static final String PROJECT_ID = "my-project"; + private static final String INSTANCE_ID = "my-instance"; + private static final String DB_ID = "test-db"; + private static final String BCK_ID = "test-bck"; + private static final String RESTORED_ID = "restored-test-db"; + private static final String TEST_PARENT = "projects/my-project/instances/my-instance"; + private static final String TEST_BCK_NAME = String.format("%s/backups/test-bck", TEST_PARENT); + private static final List INITIAL_STATEMENTS = + Arrays.asList("CREATE TABLE FOO", "CREATE TABLE BAR"); + + private static MockOperationsServiceImpl mockOperations; + private static MockDatabaseAdminServiceImpl mockDatabaseAdmin; + private static Server server; + + private static Spanner spanner; + private static DatabaseAdminClient client; + private OperationFuture createDatabaseOperation; + private OperationFuture createBackupOperation; + private OperationFuture restoreDatabaseOperation; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockOperations = new MockOperationsServiceImpl(); + mockDatabaseAdmin = new MockDatabaseAdminServiceImpl(mockOperations); + // This test uses a NettyServer to properly test network and timeout issues. + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockOperations) + .addService(mockDatabaseAdmin) + .build() + .start(); + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + RetrySettings longRunningInitialRetrySettings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(600L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(6000L)) + .setInitialRetryDelayDuration(Duration.ofMillis(20L)) + .setMaxRetryDelayDuration(Duration.ofMillis(45L)) + .setRetryDelayMultiplier(1.5) + .setRpcTimeoutMultiplier(1.5) + .setTotalTimeoutDuration(Duration.ofMinutes(48L)) + .build(); + builder + .getDatabaseAdminStubSettingsBuilder() + .createBackupOperationSettings() + .setInitialCallSettings( + UnaryCallSettings.newUnaryCallSettingsBuilder() + .setRetrySettings(longRunningInitialRetrySettings) + .build()); + builder + .getDatabaseAdminStubSettingsBuilder() + .createBackupOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + + builder + .getDatabaseAdminStubSettingsBuilder() + .createDatabaseOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetrySettings(longRunningInitialRetrySettings) + .build()); + builder + .getDatabaseAdminStubSettingsBuilder() + .createDatabaseOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + builder + .getDatabaseAdminStubSettingsBuilder() + .restoreDatabaseOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetrySettings(longRunningInitialRetrySettings) + .build()); + builder + .getDatabaseAdminStubSettingsBuilder() + .restoreDatabaseOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + builder.setRetryAdministrativeRequestsSettings( + SpannerOptions.Builder.DEFAULT_ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS.toBuilder() + .setInitialRetryDelayDuration(Duration.ofNanos(1L)) + .build()); + spanner = + builder + .setHost("http://localhost:" + server.getPort()) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setProjectId(PROJECT_ID) + .build() + .getService(); + client = spanner.getDatabaseAdminClient(); + } + + @AfterClass + public static void stopServer() throws Exception { + spanner.close(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockDatabaseAdmin.reset(); + mockOperations.reset(); + createTestDatabase(); + createTestBackup(); + restoreTestBackup(); + } + + @After + public void tearDown() { + mockDatabaseAdmin.reset(); + mockDatabaseAdmin.removeAllExecutionTimes(); + mockOperations.reset(); + } + + @Test + public void dbAdminCreateBackup() throws InterruptedException, ExecutionException { + final String backupId = "other-backup-id"; + OperationFuture op = + client.createBackup(INSTANCE_ID, backupId, DB_ID, afterDays(7)); + Backup backup = op.get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + } + + @Test + public void backupCreate() throws InterruptedException, ExecutionException { + final String backupId = "other-backup-id"; + Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(afterDays(7)) + .setVersionTime(sevenDaysAgo()) + .build(); + OperationFuture op = backup.create(); + backup = op.get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + } + + @Test + public void databaseAdminBackupCreate() throws ExecutionException, InterruptedException { + final String backupId = "other-backup-id"; + Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(afterDays(7)) + .setVersionTime(sevenDaysAgo()) + .build(); + final OperationFuture op = client.createBackup(backup); + backup = op.get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + } + + @Test + public void backupCreateCancel() { + final String backupId = "other-backup-id"; + // Set expire time to 14 days from now. + long currentTimeInMicroSeconds = + TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS); + long deltaTimeInMicroseconds = TimeUnit.MICROSECONDS.convert(14L, TimeUnit.DAYS); + Timestamp expireTime = + Timestamp.ofTimeMicroseconds(currentTimeInMicroSeconds + deltaTimeInMicroseconds); + Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(expireTime) + .build(); + // Start a creation of a backup. + OperationFuture op = backup.create(); + try { + // Try to cancel the backup operation. + client.cancelOperation(op.getName()); + // Get a polling future for the running operation. This future will regularly poll the server + // for the current status of the backup operation. + RetryingFuture pollingFuture = op.getPollingFuture(); + // Wait for the operation to finish. + // isDone will return true if the operation has finished successfully or if it was cancelled + // or any other error occurred. + while (!pollingFuture.get().isDone()) { + Thread.sleep(TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS)); + } + } catch (CancellationException e) { + // ignore, this exception may also occur if the polling future has been cancelled. + } catch (ExecutionException e) { + throw (RuntimeException) e.getCause(); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } finally { + backup.delete(); + } + } + + @Test + public void databaseBackup() throws InterruptedException, ExecutionException { + final String backupId = "other-backup-id"; + Database db = client.getDatabase(INSTANCE_ID, DB_ID); + Backup backup = + db.backup( + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setExpireTime(afterDays(7)) + .build()) + .get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + } + + @Test + public void dbAdminCreateBackupAlreadyExists() { + OperationFuture op = + client.createBackup(INSTANCE_ID, BCK_ID, DB_ID, afterDays(7)); + SpannerException e = assertThrows(SpannerException.class, () -> get(op)); + assertEquals(ErrorCode.ALREADY_EXISTS, e.getErrorCode()); + } + + @Test + public void backupCreateAlreadyExists() { + Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DB_ID)) + .setExpireTime(afterDays(7)) + .build(); + SpannerException e = assertThrows(SpannerException.class, () -> get(backup.create())); + assertEquals(ErrorCode.ALREADY_EXISTS, e.getErrorCode()); + } + + @Test + public void databaseBackupAlreadyExists() { + Database db = client.getDatabase(INSTANCE_ID, DB_ID); + OperationFuture op = + db.backup( + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)) + .setExpireTime(afterDays(7)) + .build()); + SpannerException e = assertThrows(SpannerException.class, () -> get(op)); + assertEquals(ErrorCode.ALREADY_EXISTS, e.getErrorCode()); + } + + @Test + public void dbAdminCreateBackupDbNotFound() { + final String backupId = "other-backup-id"; + OperationFuture op = + client.createBackup(INSTANCE_ID, backupId, "does-not-exist", afterDays(7)); + SpannerException e = assertThrows(SpannerException.class, () -> get(op)); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void backupCreateDbNotFound() { + final String backupId = "other-backup-id"; + Backup backup = + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setDatabase(DatabaseId.of(PROJECT_ID, INSTANCE_ID, "does-not-exist")) + .setExpireTime(afterDays(7)) + .build(); + SpannerException e = assertThrows(SpannerException.class, () -> get(backup.create())); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void databaseBackupDbNotFound() throws InterruptedException { + final String backupId = "other-backup-id"; + Database db = + new Database( + DatabaseId.of(PROJECT_ID, INSTANCE_ID, "does-not-exist"), State.UNSPECIFIED, client); + OperationFuture op = + db.backup( + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, backupId)) + .setExpireTime(afterDays(7)) + .build()); + SpannerException e = assertThrows(SpannerException.class, () -> get(op)); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void dbAdminDeleteBackup() { + Backup backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + assertThat(backup.exists()).isTrue(); + client.deleteBackup(INSTANCE_ID, BCK_ID); + assertThat(backup.exists()).isFalse(); + } + + @Test + public void backupDelete() { + Backup backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + assertThat(backup.exists()).isTrue(); + backup.delete(); + assertThat(backup.exists()).isFalse(); + } + + @Test + public void dbAdminDeleteBackupNotFound() { + SpannerException e = + assertThrows( + SpannerException.class, () -> client.deleteBackup(INSTANCE_ID, "does-not-exist")); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void backupDeleteNotFound() { + Backup backup = + client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, "does-not-exist")).build(); + SpannerException e = assertThrows(SpannerException.class, () -> backup.delete()); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void dbAdminGetBackup() { + Backup backup = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(backup.getId().getName()).isEqualTo(TEST_BCK_NAME); + } + + @Test + public void backupReload() { + Backup backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + assertThat(backup.getState()).isEqualTo(com.google.cloud.spanner.BackupInfo.State.UNSPECIFIED); + backup.reload(); + assertThat(backup.getId().getName()).isEqualTo(TEST_BCK_NAME); + } + + @Test + public void dbAdminGetBackupNotFound() { + SpannerException e = + assertThrows(SpannerException.class, () -> client.getBackup(INSTANCE_ID, "does-not-exist")); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void backupReloadNotFound() { + Backup backup = + client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, "does-not-exist")).build(); + SpannerException e = assertThrows(SpannerException.class, () -> backup.reload()); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void backupExists() { + Backup backup = + client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, "does-not-exist")).build(); + assertThat(backup.exists()).isFalse(); + backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + assertThat(backup.exists()).isTrue(); + } + + @Test + public void dbClientListBackups() + throws SpannerException, InterruptedException, ExecutionException { + Backup backup = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(client.listBackups(INSTANCE_ID).iterateAll()).containsExactly(backup); + Backup backup2 = client.createBackup(INSTANCE_ID, "backup2", DB_ID, afterDays(7)).get(); + assertThat(client.listBackups(INSTANCE_ID).iterateAll()).containsExactly(backup, backup2); + backup2.delete(); + assertThat(client.listBackups(INSTANCE_ID).iterateAll()).containsExactly(backup); + } + + @Test + public void instanceListBackups() + throws SpannerException, InterruptedException, ExecutionException { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + Backup backup = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(instance.listBackups().iterateAll()).containsExactly(backup); + Backup backup2 = client.createBackup(INSTANCE_ID, "backup2", DB_ID, afterDays(7)).get(); + assertThat(instance.listBackups().iterateAll()).containsExactly(backup, backup2); + backup2.delete(); + assertThat(instance.listBackups().iterateAll()).containsExactly(backup); + } + + @Test + public void instanceListBackupsWithFilter() + throws SpannerException, InterruptedException, ExecutionException { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + + Backup backup = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(instance.listBackups().iterateAll()).containsExactly(backup); + Backup backup2 = client.createBackup(INSTANCE_ID, "backup2", DB_ID, afterDays(7)).get(); + + // All backups. + assertThat(instance.listBackups().iterateAll()).containsExactly(backup, backup2); + + // All backups with name containing 'backup2'. + String filter = "name:backup2"; + mockDatabaseAdmin.addFilterMatches(filter, backup2.getId().getName()); + assertThat(instance.listBackups(Options.filter(filter)).iterateAll()).containsExactly(backup2); + + // All backups for a database with the name db2. + filter = String.format("database:%s", DB_ID); + mockDatabaseAdmin.addFilterMatches(filter, backup.getId().getName(), backup2.getId().getName()); + assertThat(instance.listBackups(Options.filter(filter)).iterateAll()) + .containsExactly(backup, backup2); + + // All backups that expire before a certain time. + String ts = afterDays(14).toString(); + filter = String.format("expire_time < \"%s\"", ts); + mockDatabaseAdmin.addFilterMatches(filter, backup.getId().getName(), backup2.getId().getName()); + assertThat(instance.listBackups(Options.filter(filter)).iterateAll()) + .containsExactly(backup, backup2); + // All backups with size greater than a certain number of bytes. + long minBytes = Math.min(backup.getSize(), backup2.getSize()); + filter = String.format("size_bytes > %d", minBytes); + Backup backupWithLargestSize; + if (backup.getSize() == minBytes) { + backupWithLargestSize = backup2; + } else { + backupWithLargestSize = backup; + } + mockDatabaseAdmin.addFilterMatches(filter, backupWithLargestSize.getId().getName()); + assertThat(instance.listBackups(Options.filter(filter)).iterateAll()) + .containsExactly(backupWithLargestSize); + // All backups with a create time after a certain timestamp and that are also ready. + ts = backup2.getProto().getCreateTime().toString(); + filter = String.format("create_time >= \"%s\" AND state:READY", ts); + mockDatabaseAdmin.addFilterMatches(filter, backup2.getId().getName()); + assertThat(instance.listBackups(Options.filter(filter)).iterateAll()).containsExactly(backup2); + } + + @Test + public void dbClientUpdateBackup() { + Timestamp oldExpireTime = client.getBackup(INSTANCE_ID, BCK_ID).getExpireTime(); + Timestamp newExpireTime = + Timestamp.ofTimeSecondsAndNanos( + Timestamp.now().getSeconds() + TimeUnit.SECONDS.convert(1, TimeUnit.DAYS), 0); + assertThat(oldExpireTime).isNotEqualTo(newExpireTime); + Backup backup = client.updateBackup(INSTANCE_ID, BCK_ID, newExpireTime); + assertThat(backup.getExpireTime()).isEqualTo(newExpireTime); + assertThat(client.getBackup(INSTANCE_ID, BCK_ID)).isEqualTo(backup); + } + + @Test + public void backupUpdate() { + Timestamp newExpireTime = + Timestamp.ofTimeSecondsAndNanos( + Timestamp.now().getSeconds() + TimeUnit.SECONDS.convert(1, TimeUnit.DAYS), 0); + Backup backup = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(backup.getExpireTime()).isNotEqualTo(newExpireTime); + backup.toBuilder().setExpireTime(newExpireTime).build().updateExpireTime(); + Backup updated = client.getBackup(INSTANCE_ID, BCK_ID); + assertThat(updated.getExpireTime()).isEqualTo(newExpireTime); + assertThat(updated).isNotEqualTo(backup); + assertThat(backup.reload()).isEqualTo(updated); + } + + @Test + public void dbClientRestoreDatabase() throws InterruptedException, ExecutionException { + OperationFuture op = + client.restoreDatabase(INSTANCE_ID, BCK_ID, "other-instance-id", "restored-db"); + Database restored = op.get(); + assertThat(restored.getId().getDatabase()).isEqualTo("restored-db"); + assertThat(restored.getId().getInstanceId().getInstance()).isEqualTo("other-instance-id"); + } + + @Test + public void backupRestoreDatabase() throws InterruptedException, ExecutionException { + Backup backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + Database restored = + backup.restore(DatabaseId.of(PROJECT_ID, "other-instance-id", "restored-db")).get(); + assertThat(restored.getId().getDatabase()).isEqualTo("restored-db"); + assertThat(restored.getId().getInstanceId().getInstance()).isEqualTo("other-instance-id"); + } + + @Test + public void dbClientListDatabaseOperations() + throws SpannerException, InterruptedException, ExecutionException { + // Note: The mock server keeps all operations until the server is reset, including operations + // that have already finished. + // The setup method creates a test database --> 1 operation. + // + restores a database --> 2 operations. + assertThat(client.listDatabaseOperations(INSTANCE_ID).iterateAll()).hasSize(3); + // Create another database which should also create another operation. + client.createDatabase(INSTANCE_ID, "other-database", Collections.emptyList()).get(); + assertThat(client.listDatabaseOperations(INSTANCE_ID).iterateAll()).hasSize(4); + // Restore a backup. This should create 2 database operations: One to restore the database and + // one to optimize it. + client.restoreDatabase(INSTANCE_ID, BCK_ID, INSTANCE_ID, "restored-db").get(); + assertThat(client.listDatabaseOperations(INSTANCE_ID).iterateAll()).hasSize(6); + } + + @Test + public void instanceListDatabaseOperations() + throws SpannerException, InterruptedException, ExecutionException { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + assertThat(instance.listDatabaseOperations().iterateAll()).hasSize(3); + instance.createDatabase("other-database", Collections.emptyList()).get(); + assertThat(instance.listDatabaseOperations().iterateAll()).hasSize(4); + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)) + .build() + .restore(DatabaseId.of(PROJECT_ID, INSTANCE_ID, "restored-db")) + .get(); + assertThat(instance.listDatabaseOperations().iterateAll()).hasSize(6); + } + + @Test + public void instanceListDatabaseOperationsWithMetadata() throws Exception { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + String filter = + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata)"; + mockDatabaseAdmin.addFilterMatches( + filter, restoreDatabaseOperation.getMetadata().get().getOptimizeDatabaseOperationName()); + Iterable operations = + instance.listDatabaseOperations(Options.filter(filter)).iterateAll(); + assertThat(operations).hasSize(1); + for (Operation op : operations) { + OptimizeRestoredDatabaseMetadata metadata = + op.getMetadata().unpack(OptimizeRestoredDatabaseMetadata.class); + String progress = + String.format( + "Restored database %s is optimized %d%%", + metadata.getName(), metadata.getProgress().getProgressPercent()); + assertThat(progress.contains("100%")); + } + } + + @Test + public void databaseListDatabaseOperations() + throws SpannerException, InterruptedException, ExecutionException { + Database database = client.getDatabase(INSTANCE_ID, DB_ID); + mockDatabaseAdmin.addFilterMatches( + "name:databases/" + DB_ID, createDatabaseOperation.getName()); + assertThat(database.listDatabaseOperations().iterateAll()).hasSize(1); + // Create another database which should also create another operation, but for a different + // database. + client.createDatabase(INSTANCE_ID, "other-database", Collections.emptyList()).get(); + assertThat(database.listDatabaseOperations().iterateAll()).hasSize(1); + // Update the database DDL. This should create an operation for this database. + OperationFuture op = + database.updateDdl(Collections.singletonList("DROP TABLE FOO"), null); + mockDatabaseAdmin.addFilterMatches("name:databases/" + DB_ID, op.getName()); + assertThat(database.listDatabaseOperations().iterateAll()).hasSize(2); + } + + @Test + public void dbClientListBackupOperations() + throws SpannerException, InterruptedException, ExecutionException { + assertThat(client.listBackupOperations(INSTANCE_ID).iterateAll()).hasSize(1); + client.createBackup(INSTANCE_ID, "other-backup", DB_ID, afterDays(7)).get(); + assertThat(client.listBackupOperations(INSTANCE_ID).iterateAll()).hasSize(2); + // Restore a backup. This creates 2 DATABASE operations: One to restore the database and + // one to optimize it. + client.restoreDatabase(INSTANCE_ID, BCK_ID, INSTANCE_ID, "restored-db").get(); + assertThat(client.listBackupOperations(INSTANCE_ID).iterateAll()).hasSize(2); + } + + @Test + public void instanceListBackupOperations() + throws SpannerException, InterruptedException, ExecutionException { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + assertThat(instance.listBackupOperations().iterateAll()).hasSize(1); + instance + .getDatabase(DB_ID) + .backup( + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, "other-backup")) + .setExpireTime(afterDays(7)) + .build()) + .get(); + assertThat(instance.listBackupOperations().iterateAll()).hasSize(2); + client + .newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)) + .build() + .restore(DatabaseId.of(PROJECT_ID, INSTANCE_ID, "restored-db")) + .get(); + assertThat(instance.listBackupOperations().iterateAll()).hasSize(2); + } + + @Test + public void instanceListBackupOperationsWithProgress() throws InvalidProtocolBufferException { + Instance instance = + spanner + .getInstanceAdminClient() + .newInstanceBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .build(); + String database = String.format("%s/databases/%s", TEST_PARENT, DB_ID); + String filter = + String.format( + "(metadata.database:%s) AND " + + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CreateBackupMetadata)", + database); + Page operations = instance.listBackupOperations(Options.filter(filter)); + for (Operation op : operations.iterateAll()) { + CreateBackupMetadata metadata = op.getMetadata().unpack(CreateBackupMetadata.class); + String progress = + String.format( + "Backup %s on database %s pending: %d%% complete", + metadata.getName(), + metadata.getDatabase(), + metadata.getProgress().getProgressPercent()); + assertThat(progress.contains("100%")); + } + } + + @Test + public void backupListBackupOperations() + throws SpannerException, InterruptedException, ExecutionException { + Backup backup = client.newBackupBuilder(BackupId.of(PROJECT_ID, INSTANCE_ID, BCK_ID)).build(); + mockDatabaseAdmin.addFilterMatches("name:backups/" + BCK_ID, createBackupOperation.getName()); + assertThat(backup.listBackupOperations().iterateAll()).hasSize(1); + client.createBackup(INSTANCE_ID, "other-backup", DB_ID, afterDays(7)).get(); + assertThat(backup.listBackupOperations().iterateAll()).hasSize(1); + } + + @Test + public void getAndSetIAMPolicy() { + Policy policy = client.getDatabaseIAMPolicy(INSTANCE_ID, DB_ID, 1); + assertThat(policy).isEqualTo(Policy.newBuilder().build()); + Policy newPolicy = + Policy.newBuilder().addIdentity(Role.editor(), Identity.user("joe@example.com")).build(); + Policy returnedPolicy = client.setDatabaseIAMPolicy(INSTANCE_ID, DB_ID, newPolicy); + assertThat(returnedPolicy).isEqualTo(newPolicy); + assertThat(client.getDatabaseIAMPolicy(INSTANCE_ID, DB_ID, 1)).isEqualTo(newPolicy); + } + + @Test + public void testDatabaseIAMPermissions() { + Iterable permissions = + client.testDatabaseIAMPermissions( + INSTANCE_ID, DB_ID, Collections.singletonList("spanner.databases.select")); + assertThat(permissions).containsExactly("spanner.databases.select"); + } + + private Timestamp sevenDaysAgo() { + return Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + - TimeUnit.MICROSECONDS.convert(7, TimeUnit.DAYS)); + } + + private void createTestDatabase() { + try { + createDatabaseOperation = client.createDatabase(INSTANCE_ID, DB_ID, INITIAL_STATEMENTS); + createDatabaseOperation.get(); + } catch (InterruptedException | ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + private void createTestBackup() { + try { + createBackupOperation = client.createBackup(INSTANCE_ID, BCK_ID, DB_ID, afterDays(7)); + createBackupOperation.get(); + } catch (InterruptedException | ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + private void restoreTestBackup() { + try { + restoreDatabaseOperation = + client.restoreDatabase(INSTANCE_ID, BCK_ID, INSTANCE_ID, RESTORED_ID); + restoreDatabaseOperation.get(); + } catch (InterruptedException | ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + @Test + public void retryCreateBackupSlowResponse() throws Exception { + // Throw a DEADLINE_EXCEEDED after the operation has been created. This should cause the retry + // to pick up the existing operation. + mockDatabaseAdmin.setCreateBackupResponseExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String backupId = "other-backup-id"; + OperationFuture op = + client.createBackup(INSTANCE_ID, backupId, DB_ID, afterDays(7)); + Backup backup = op.get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + // There should be at exactly 2 requests. One from this test case and one from the setup of the + // test which also creates a test backup. + assertThat(mockDatabaseAdmin.countRequestsOfType(CreateBackupRequest.class)).isEqualTo(2); + } + + @Test + public void retryCreateBackupSlowStartup() throws Exception { + mockDatabaseAdmin.setCreateBackupStartupExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String backupId = "other-backup-id"; + OperationFuture op = + client.createBackup(INSTANCE_ID, backupId, DB_ID, afterDays(7)); + Backup backup = op.get(); + assertThat(backup.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/backups/%s", PROJECT_ID, INSTANCE_ID, backupId)); + assertThat(client.getBackup(INSTANCE_ID, backupId)).isEqualTo(backup); + assertThat(mockDatabaseAdmin.countRequestsOfType(CreateBackupRequest.class)).isAtLeast(3); + } + + @Test + public void retryCreateDatabaseSlowResponse() throws Exception { + // Throw a DEADLINE_EXCEEDED after the operation has been created. This should cause the retry + // to pick up the existing operation. + mockDatabaseAdmin.setCreateDatabaseResponseExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String databaseId = "other-database-id"; + OperationFuture op = + client.createDatabase(INSTANCE_ID, databaseId, Collections.emptyList()); + Database database = op.get(); + assertThat(database.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/databases/%s", PROJECT_ID, INSTANCE_ID, databaseId)); + assertThat(client.getDatabase(INSTANCE_ID, databaseId)).isEqualTo(database); + // There should be at exactly 2 requests. One from this test case and one from the setup of the + // test which also creates a test database. + assertThat(mockDatabaseAdmin.countRequestsOfType(CreateDatabaseRequest.class)).isEqualTo(2); + } + + @Test + public void retryCreateDatabaseSlowStartup() throws Exception { + mockDatabaseAdmin.setCreateDatabaseStartupExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String databaseId = "other-database-id"; + OperationFuture op = + client.createDatabase(INSTANCE_ID, databaseId, Collections.emptyList()); + Database database = op.get(); + assertThat(database.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/databases/%s", PROJECT_ID, INSTANCE_ID, databaseId)); + assertThat(client.getDatabase(INSTANCE_ID, databaseId)).isEqualTo(database); + assertThat(mockDatabaseAdmin.countRequestsOfType(CreateDatabaseRequest.class)).isAtLeast(3); + } + + @Test + public void retryRestoreDatabaseSlowResponse() throws Exception { + // Throw a DEADLINE_EXCEEDED after the operation has been created. This should cause the retry + // to pick up the existing operation. + mockDatabaseAdmin.setRestoreDatabaseResponseExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String databaseId = "other-database-id"; + OperationFuture op = + client.restoreDatabase(INSTANCE_ID, BCK_ID, INSTANCE_ID, databaseId); + Database database = op.get(); + assertThat(database.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/databases/%s", PROJECT_ID, INSTANCE_ID, databaseId)); + Database retrieved = client.getDatabase(INSTANCE_ID, databaseId); + assertThat(retrieved.getCreateTime()).isEqualTo(database.getCreateTime()); + // There should be exactly 2 requests. One from this test case and one from the setup of the + // test which also restores a test database. + assertThat(mockDatabaseAdmin.countRequestsOfType(RestoreDatabaseRequest.class)).isEqualTo(2); + } + + @Test + public void retryRestoreDatabaseSlowStartup() throws Exception { + mockDatabaseAdmin.setRestoreDatabaseStartupExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + final String databaseId = "other-database-id"; + OperationFuture op = + client.restoreDatabase(INSTANCE_ID, BCK_ID, INSTANCE_ID, databaseId); + Database database = op.get(); + assertThat(database.getId().getName()) + .isEqualTo( + String.format( + "projects/%s/instances/%s/databases/%s", PROJECT_ID, INSTANCE_ID, databaseId)); + Database retrieved = client.getDatabase(INSTANCE_ID, databaseId); + assertThat(retrieved.getCreateTime()).isEqualTo(database.getCreateTime()); + assertThat(mockDatabaseAdmin.countRequestsOfType(RestoreDatabaseRequest.class)).isAtLeast(3); + } + + @Test + public void testRetryOperationOnAdminMethodQuotaPerMinutePerProjectExceeded() { + ErrorInfo info = + ErrorInfo.newBuilder() + .putMetadata("quota_limit", "AdminMethodQuotaPerMinutePerProject") + .build(); + Metadata.Key key = + Metadata.Key.of( + info.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(info)); + Metadata trailers = new Metadata(); + trailers.put(key, info); + mockDatabaseAdmin.addException( + Status.RESOURCE_EXHAUSTED.withDescription("foo").asRuntimeException(trailers)); + mockDatabaseAdmin.clearRequests(); + + Database database = client.getDatabase(INSTANCE_ID, DB_ID); + assertEquals(DB_ID, database.getId().getDatabase()); + assertEquals(2, mockDatabaseAdmin.countRequestsOfType(GetDatabaseRequest.class)); + } + + @Test + public void testRetriesDisabledForOperationOnAdminMethodQuotaPerMinutePerProjectExceeded() { + ErrorInfo info = + ErrorInfo.newBuilder() + .putMetadata("quota_limit", "AdminMethodQuotaPerMinutePerProject") + .build(); + Metadata.Key key = + Metadata.Key.of( + info.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(info)); + Metadata trailers = new Metadata(); + trailers.put(key, info); + mockDatabaseAdmin.addException( + Status.RESOURCE_EXHAUSTED.withDescription("foo").asRuntimeException(trailers)); + mockDatabaseAdmin.clearRequests(); + + try (Spanner spannerWithoutRetries = + spanner.getOptions().toBuilder() + .disableAdministrativeRequestRetries() + .build() + .getService()) { + AdminRequestsPerMinuteExceededException exception = + assertThrows( + AdminRequestsPerMinuteExceededException.class, + () -> spannerWithoutRetries.getDatabaseAdminClient().getDatabase(INSTANCE_ID, DB_ID)); + assertEquals(ErrorCode.RESOURCE_EXHAUSTED, exception.getErrorCode()); + // There should be only one request on the server, as the request was not retried. + assertEquals(1, mockDatabaseAdmin.countRequestsOfType(GetDatabaseRequest.class)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminGaxTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminGaxTest.java new file mode 100644 index 000000000000..be9f07d7f798 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminGaxTest.java @@ -0,0 +1,426 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.common.base.Throwables; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.instance.v1.InstanceName; +import io.grpc.Server; +import io.grpc.StatusRuntimeException; +import io.grpc.inprocess.InProcessServerBuilder; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class DatabaseAdminGaxTest { + public static class DelayedStatusRuntimeException extends RuntimeException { + private final long millis; + + public DelayedStatusRuntimeException(StatusRuntimeException cause, long millis) { + super(cause); + this.millis = millis; + } + + @Override + public synchronized Throwable getCause() { + Uninterruptibles.sleepUninterruptibly(millis, TimeUnit.MILLISECONDS); + return super.getCause(); + } + } + + private static final String PROJECT = "PROJECT"; + private static final String INSTANCE = "INSTANCE"; + private static final StatusRuntimeException UNAVAILABLE = + io.grpc.Status.UNAVAILABLE.withDescription("Retryable test exception.").asRuntimeException(); + private static final StatusRuntimeException FAILED_PRECONDITION = + io.grpc.Status.FAILED_PRECONDITION + .withDescription("Non-retryable test exception.") + .asRuntimeException(); + + private static Exception createDelayedInternal() { + return new DelayedStatusRuntimeException( + io.grpc.Status.INTERNAL.withDescription("Delayed test exception.").asRuntimeException(), + 500L); + } + + public enum ExceptionType { + RETRYABLE { + @Override + public Exception getException() { + return UNAVAILABLE; + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return null; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.UNAVAILABLE; + } + + @Override + public boolean isRetryable() { + return true; + } + }, + NON_RETRYABLE { + @Override + public Exception getException() { + return FAILED_PRECONDITION; + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return ErrorCode.FAILED_PRECONDITION; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.FAILED_PRECONDITION; + } + + @Override + public boolean isRetryable() { + return false; + } + }, + DELAYED { + @Override + public Exception getException() { + return createDelayedInternal(); + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return ErrorCode.DEADLINE_EXCEEDED; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.INTERNAL; + } + + @Override + public boolean isRetryable() { + return true; + } + }; + + public abstract Exception getException(); + + public abstract ErrorCode getExpectedErrorCodeWithGax(); + + public abstract ErrorCode getExpectedErrorCodeWithoutGax(); + + public abstract boolean isRetryable(); + } + + private static MockDatabaseAdminImpl mockDatabaseAdmin; + private static Server server; + private static Spanner spanner; + private static DatabaseAdminClient client; + private static LocalChannelProvider channelProvider; + + @Parameter(0) + public int exceptionAtCall; + + @Parameter(1) + public ExceptionType exceptionType; + + @Parameters(name = "exception at call = {0}, exception type = {1}") + public static Collection data() { + List params = new ArrayList<>(); + for (int exceptionAtCall : new int[] {0, 1}) { + for (ExceptionType exceptionType : ExceptionType.values()) { + params.add(new Object[] {exceptionAtCall, exceptionType}); + } + } + return params; + } + + @Rule public ExpectedException expectedException = ExpectedException.none(); + + @BeforeClass + public static void startStaticServer() throws IOException { + mockDatabaseAdmin = new MockDatabaseAdminImpl(); + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockDatabaseAdmin) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + mockDatabaseAdmin.reset(); + RetrySettings retrySettingsWithLowTimeout = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(2.0) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(200L)) + .build(); + RetrySettings retrySettingsWithHighTimeout = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(2000L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(5000L)) + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(15000L)) + .build(); + final RetrySettings retrySettingsToUse = + exceptionType == ExceptionType.DELAYED + ? retrySettingsWithLowTimeout + : retrySettingsWithHighTimeout; + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + builder + .getDatabaseAdminStubSettingsBuilder() + .applyToAllUnaryMethods( + input -> { + input.setRetrySettings(retrySettingsToUse); + return null; + }); + if (!builder + .getDatabaseAdminStubSettingsBuilder() + .createDatabaseOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty()) { + builder + .getDatabaseAdminStubSettingsBuilder() + .createDatabaseOperationSettings() + .setInitialCallSettings( + builder + .getDatabaseAdminStubSettingsBuilder() + .createDatabaseOperationSettings() + .getInitialCallSettings() + .toBuilder() + .setRetrySettings(retrySettingsToUse) + .build()); + } + if (!builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty()) { + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setInitialCallSettings( + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .getInitialCallSettings() + .toBuilder() + .setRetrySettings(retrySettingsToUse) + .build()); + } + spanner = builder.build().getService(); + client = spanner.getDatabaseAdminClient(); + } + + @After + public void tearDown() { + spanner.close(); + } + + private Exception setupException() { + if (!exceptionType.isRetryable()) { + expectedException.expect( + SpannerMatchers.isSpannerException(exceptionType.getExpectedErrorCodeWithGax())); + } + return exceptionType.getException(); + } + + @Test + public void listDatabasesTest() { + Exception exception = setupException(); + String nextPageToken = "token%d"; + List databases = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + databases.add( + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName( + String.format("projects/%s/instances/%s/databases/test%d", PROJECT, INSTANCE, i)) + .build()); + } + if (exceptionAtCall == 0) { + mockDatabaseAdmin.addException(exception); + } + for (int i = 0; i < 2; i++) { + ListDatabasesResponse.Builder builder = + ListDatabasesResponse.newBuilder() + .addAllDatabases(Collections.singletonList(databases.get(i))); + if (i < (databases.size() - 1)) { + builder.setNextPageToken(String.format(nextPageToken, i)); + } + if (exceptionAtCall == (i + 1)) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(builder.build()); + } + + InstanceName parent = InstanceName.of(PROJECT, INSTANCE); + Page pagedListResponse = client.listDatabases(INSTANCE); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(2, resources.size()); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + ListDatabasesRequest actualRequest = (ListDatabasesRequest) actualRequests.get(0); + + Assert.assertEquals(parent, InstanceName.parse(actualRequest.getParent())); + } + + @Test + public void getDatabaseTest() { + Exception exception = setupException(); + DatabaseName name2 = DatabaseName.of(PROJECT, INSTANCE, "DATABASE"); + com.google.spanner.admin.database.v1.Database expectedResponse = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(name2.toString()) + .build(); + if (exceptionAtCall == 0) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(expectedResponse); + if (exceptionAtCall == 1) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(expectedResponse); + + for (int i = 0; i < 2; i++) { + Database actualResponse = client.getDatabase(INSTANCE, "DATABASE"); + Assert.assertEquals(name2.toString(), actualResponse.getId().getName()); + } + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + } + + @Test + public void updateDatabaseDdlTest() throws Exception { + Exception exception = setupException(); + com.google.longrunning.Operation resultOperation = + com.google.longrunning.Operation.newBuilder() + .setName("updateDatabaseDdlTest") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build(); + if (exceptionAtCall == 0) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(resultOperation); + if (exceptionAtCall == 1) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(resultOperation); + + for (int i = 0; i < 2; i++) { + OperationFuture actualResponse = + client.updateDatabaseDdl( + INSTANCE, + "DATABASE", + Collections.singletonList("CREATE TABLE FOO"), + "updateDatabaseDdlTest"); + try { + actualResponse.get(); + } catch (ExecutionException e) { + Throwables.throwIfUnchecked(e.getCause()); + throw e; + } + } + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + } + + @Test + public void deleteInstanceTest() { + Exception exception = setupException(); + Empty expectedResponse = Empty.newBuilder().build(); + if (exceptionAtCall == 0) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(expectedResponse); + if (exceptionAtCall == 1) { + mockDatabaseAdmin.addException(exception); + } + mockDatabaseAdmin.addResponse(expectedResponse); + for (int i = 0; i < 2; i++) { + client.dropDatabase(INSTANCE, "DATABASE"); + } + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java new file mode 100644 index 000000000000..8acb0a7b725b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java @@ -0,0 +1,4251 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_COLUMN_NAMES; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_TABLE_NAME; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1_FROM_TABLE; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1_RESULTSET; +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.ByteArray; +import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcLockHint; +import com.google.cloud.spanner.Options.RpcOrderBy; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.SpannerOptions.SpannerCallContextTimeoutConfigurator; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.io.BaseEncoding; +import com.google.common.util.concurrent.SettableFuture; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ReadRequest.LockHint; +import com.google.spanner.v1.ReadRequest.OrderBy; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.Context; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerInterceptors; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DatabaseClientImplTest { + + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_INSTANCE = "my-instance"; + private static final String TEST_DATABASE = "my-database"; + private static final String TEST_DATABASE_ROLE = "my-role"; + private static final String INSTANCE_NAME = + String.format("projects/%s/instances/%s", TEST_PROJECT, TEST_INSTANCE); + private static final String DATABASE_NAME = + String.format( + "projects/%s/instances/%s/databases/%s", TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE); + private static XGoogSpannerRequestIdTest.ServerHeaderEnforcer xGoogReqIdInterceptor; + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final com.google.rpc.Status STATUS_OK = + com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.OK_VALUE).build(); + private static final Iterable MUTATION_GROUPS = + ImmutableList.of( + MutationGroup.of( + Mutation.newInsertBuilder("FOO1").set("ID").to(1L).set("NAME").to("Bar1").build(), + Mutation.newInsertBuilder("FOO2").set("ID").to(2L).set("NAME").to("Bar2").build()), + MutationGroup.of( + Mutation.newInsertBuilder("FOO3").set("ID").to(3L).set("NAME").to("Bar3").build(), + Mutation.newInsertBuilder("FOO4").set("ID").to(4L).set("NAME").to("Bar4").build()), + MutationGroup.of( + Mutation.newInsertBuilder("FOO4").set("ID").to(4L).set("NAME").to("Bar4").build(), + Mutation.newInsertBuilder("FOO5").set("ID").to(5L).set("NAME").to("Bar5").build()), + MutationGroup.of( + Mutation.newInsertBuilder("FOO6").set("ID").to(6L).set("NAME").to("Bar6").build())); + private static final Iterable BATCH_WRITE_RESPONSES = + ImmutableList.of( + BatchWriteResponse.newBuilder() + .setStatus(STATUS_OK) + .addAllIndexes(ImmutableList.of(0, 1)) + .build(), + BatchWriteResponse.newBuilder() + .setStatus(STATUS_OK) + .addAllIndexes(ImmutableList.of(2, 3)) + .build()); + private static final DirectedReadOptions DIRECTED_READ_OPTIONS1 = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-west1").build())) + .build(); + private static final DirectedReadOptions DIRECTED_READ_OPTIONS2 = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-east1").build())) + .build(); + private Spanner spanner; + private static ExecutorService executor; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.query(SELECT1, MockSpannerTestUtil.SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_KEY_VALUE_STATEMENT, READ_ONE_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.query(SELECT1_FROM_TABLE, MockSpannerTestUtil.SELECT1_RESULTSET)); + mockSpanner.setBatchWriteResult(BATCH_WRITE_RESPONSES); + + Set checkMethods = + new HashSet( + Arrays.asList( + "google.spanner.v1.Spanner/BatchCreateSessions", + "google.spanner.v1.Spanner/BatchWrite", + "google.spanner.v1.Spanner/BeginTransaction", + "google.spanner.v1.Spanner/Commit", + "google.spanner.v1.Spanner/CreateSession", + "google.spanner.v1.Spanner/DeleteSession", + "google.spanner.v1.Spanner/ExecuteBatchDml", + "google.spanner.v1.Spanner/ExecuteSql", + "google.spanner.v1.Spanner/ExecuteStreamingSql", + "google.spanner.v1.Spanner/GetSession", + "google.spanner.v1.Spanner/ListSessions", + "google.spanner.v1.Spanner/PartitionQuery", + "google.spanner.v1.Spanner/PartitionRead", + "google.spanner.v1.Spanner/Read", + "google.spanner.v1.Spanner/Rollback", + "google.spanner.v1.Spanner/StreamingRead")); + xGoogReqIdInterceptor = new XGoogSpannerRequestIdTest.ServerHeaderEnforcer(checkMethods); + executor = Executors.newSingleThreadExecutor(); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(ServerInterceptors.intercept(mockSpanner, xGoogReqIdInterceptor)) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + executor.shutdown(); + } + + @Before + public void setUp() { + String endpoint = "localhost:" + server.getPort(); + spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setDatabaseRole(TEST_DATABASE_ROLE) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()) + .build() + .getService(); + } + + @After + public void tearDown() { + mockSpanner.unfreeze(); + spanner.close(); + mockSpanner.reset(); + xGoogReqIdInterceptor.reset(); + mockSpanner.removeAllExecutionTimes(); + } + + @Test + public void testWrite() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + Timestamp timestamp = MockSpannerTestActions.writeInsertMutation(client); + assertNotNull(timestamp); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + } + + @Test + public void testWriteAborted() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + Timestamp timestamp = MockSpannerTestActions.writeInsertMutation(client); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + } + + @Test + public void testWriteAtLeastOnceAborted() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + Timestamp timestamp = MockSpannerTestActions.writeAtLeastOnceInsertMutation(client); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + // TODO(@odeke-em): Enable in later PR. + // xGoogReqIdInterceptor.assertIntegrity(); + } + + @Test + public void testWriteWithOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.writeInsertMutationWithOptions( + client, Options.priority(RpcPriority.HIGH)); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + + List commits = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commits).hasSize(1); + CommitRequest commit = commits.get(0); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, commit.getRequestOptions().getPriority()); + } + + @Test + public void testWriteWithCommitStats() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + CommitResponse response = + client.writeWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + } + + @Test + public void testWriteWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.writeInsertMutationWithOptions( + client, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testWriteAtLeastOnce() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + Timestamp timestamp = MockSpannerTestActions.writeAtLeastOnceInsertMutation(client); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + } + + @Test + public void testWriteAtLeastOnceWithCommitStats() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + CommitResponse response = + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + } + + @Test + public void testWriteAtLeastOnceWithOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.priority(RpcPriority.LOW)); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_LOW, commit.getRequestOptions().getPriority()); + } + + @Test + public void testWriteAtLeastOnceWithTagOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.tag("app=spanner,env=test")); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertThat(commit.getRequestOptions().getTransactionTag()).isEqualTo("app=spanner,env=test"); + assertThat(commit.getRequestOptions().getRequestTag()).isEmpty(); + } + + @Test + public void testWriteAtLeastOnceWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.excludeTxnFromChangeStreams()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertTrue(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testBatchWriteAtLeastOnceWithoutOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + ServerStream responseStream = client.batchWriteAtLeastOnce(MUTATION_GROUPS); + int idx = 0; + for (BatchWriteResponse response : responseStream) { + assertEquals( + response.getStatus(), + com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.OK_VALUE).build()); + assertEquals(response.getIndexesList(), ImmutableList.of(idx, idx + 1)); + idx += 2; + } + + assertNotNull(responseStream); + List requests = mockSpanner.getRequestsOfType(BatchWriteRequest.class); + assertEquals(requests.size(), 1); + BatchWriteRequest request = requests.get(0); + assertEquals(request.getMutationGroupsCount(), 4); + assertEquals(request.getRequestOptions().getPriority(), Priority.PRIORITY_UNSPECIFIED); + assertFalse(request.getExcludeTxnFromChangeStreams()); + } + + @Test + public void testBatchWriteAtLeastOnceWithOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + consumeBatchWriteStream( + client.batchWriteAtLeastOnce(MUTATION_GROUPS, Options.priority(RpcPriority.LOW))); + + List requests = mockSpanner.getRequestsOfType(BatchWriteRequest.class); + assertEquals(requests.size(), 1); + BatchWriteRequest request = requests.get(0); + assertEquals(request.getMutationGroupsCount(), 4); + assertEquals(request.getRequestOptions().getPriority(), Priority.PRIORITY_LOW); + assertFalse(request.getExcludeTxnFromChangeStreams()); + } + + @Test + public void testBatchWriteAtLeastOnceWithTagOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + consumeBatchWriteStream( + client.batchWriteAtLeastOnce(MUTATION_GROUPS, Options.tag("app=spanner,env=test"))); + + List requests = mockSpanner.getRequestsOfType(BatchWriteRequest.class); + assertEquals(requests.size(), 1); + BatchWriteRequest request = requests.get(0); + assertEquals(request.getMutationGroupsCount(), 4); + assertEquals(request.getRequestOptions().getTransactionTag(), "app=spanner,env=test"); + assertThat(request.getRequestOptions().getRequestTag()).isEmpty(); + assertFalse(request.getExcludeTxnFromChangeStreams()); + } + + @Test + public void testBatchWriteAtLeastOnceWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + consumeBatchWriteStream( + client.batchWriteAtLeastOnce(MUTATION_GROUPS, Options.excludeTxnFromChangeStreams())); + + List requests = mockSpanner.getRequestsOfType(BatchWriteRequest.class); + assertEquals(requests.size(), 1); + BatchWriteRequest request = requests.get(0); + assertEquals(request.getMutationGroupsCount(), 4); + assertTrue(request.getExcludeTxnFromChangeStreams()); + } + + @Test + public void testExecuteQueryWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .executeQuery(SELECT1, Options.tag("app=spanner,env=test,action=query"))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=query"); + assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); + } + + @Test + public void testExecuteQuery_withDirectedReadOptionsViaRequest() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client.singleUse().executeQuery(SELECT1, Options.directedRead(DIRECTED_READ_OPTIONS1))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertTrue(request.hasDirectedReadOptions()); + assertEquals(DIRECTED_READ_OPTIONS1, request.getDirectedReadOptions()); + } + + @Test + public void testExecuteQuery_withDirectedReadOptionsViaSpannerOptions() { + Spanner spannerWithDirectedReadOptions = + spanner.getOptions().toBuilder() + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService(); + DatabaseClient client = + spannerWithDirectedReadOptions.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1)) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(requests.size(), 1); + ExecuteSqlRequest request = requests.get(0); + assertTrue(request.hasDirectedReadOptions()); + assertEquals(DIRECTED_READ_OPTIONS2, request.getDirectedReadOptions()); + } + + @Test + public void testExecuteQuery_whenMultipleDirectedReadsOptions_preferRequestOption() { + Spanner spannerWithDirectedReadOptions = + spanner.getOptions().toBuilder() + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService(); + DatabaseClient client = + spannerWithDirectedReadOptions.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client.singleUse().executeQuery(SELECT1, Options.directedRead(DIRECTED_READ_OPTIONS1))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(requests.size(), 1); + ExecuteSqlRequest request = requests.get(0); + assertTrue(request.hasDirectedReadOptions()); + assertEquals(DIRECTED_READ_OPTIONS1, request.getDirectedReadOptions()); + } + + @Test + public void testExecuteReadWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.tag("app=spanner,env=test,action=read"))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=read"); + assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); + } + + @Test + public void testExecuteReadWithOrderByOption() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.orderBy(RpcOrderBy.NO_ORDER))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, request.getOrderBy()); + } + + @Test + public void testUnsupportedTransactionWithLockHintOption() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.lockHint(RpcLockHint.EXCLUSIVE))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + // lock hint is only supported in ReadWriteTransaction + assertEquals(LockHint.LOCK_HINT_UNSPECIFIED, request.getLockHint()); + } + + @Test + public void testReadWriteTransactionWithLockHint() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.lockHint(RpcLockHint.EXCLUSIVE))) { + consumeResults(resultSet); + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertEquals(LockHint.LOCK_HINT_EXCLUSIVE, request.getLockHint()); + } + + @Test + public void testExecuteReadWithDirectedReadOptions() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.directedRead(DIRECTED_READ_OPTIONS1))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertEquals(1, requests.size()); + ReadRequest request = requests.get(0); + assertTrue(request.hasDirectedReadOptions()); + assertEquals(DIRECTED_READ_OPTIONS1, request.getDirectedReadOptions()); + } + + @Test + public void testExecuteReadWithDirectedReadOptionsViaSpannerOptions() { + Spanner spannerWithDirectedReadOptions = + spanner.getOptions().toBuilder() + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService(); + DatabaseClient client = + spannerWithDirectedReadOptions.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client.singleUse().read(READ_TABLE_NAME, KeySet.singleKey(Key.of(1L)), READ_COLUMN_NAMES)) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertEquals(requests.size(), 1); + ReadRequest request = requests.get(0); + assertTrue(request.hasDirectedReadOptions()); + assertEquals(DIRECTED_READ_OPTIONS2, request.getDirectedReadOptions()); + } + + @Test + public void testReadWriteExecuteQueryWithDirectedReadOptionsViaSpannerOptions() { + Spanner spannerWithDirectedReadOptions = + spanner.getOptions().toBuilder() + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService(); + DatabaseClient client = + spannerWithDirectedReadOptions.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + consumeResults(resultSet); + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(requests.size(), 1); + ExecuteSqlRequest request = requests.get(0); + assertFalse(request.hasDirectedReadOptions()); + } + + @Test + public void testReadWriteExecuteQueryWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = + client.readWriteTransaction(Options.tag("app=spanner,env=test,action=txn")); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.executeQuery(SELECT1, Options.tag("app=spanner,env=test,action=query"))) { + consumeResults(resultSet); + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=query"); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=txn"); + } + + @Test + public void testBlindWriteWithTransactionTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + String transactionTag = "app=spanner,env=test,action=txn"; + TransactionRunner runner = client.readWriteTransaction(Options.tag(transactionTag)); + runner.run( + transaction -> { + transaction.buffer(Mutation.newInsertBuilder("abc").set("id").to(1L).build()); + return null; + }); + + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactionRequests).hasSize(1); + assertThat(beginTransactionRequests.get(0).getRequestOptions().getTransactionTag()) + .isEqualTo(transactionTag); + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + assertThat(commitRequests.get(0).getRequestOptions().getTransactionTag()) + .isEqualTo(transactionTag); + } + + @Test + public void testReadWriteExecuteReadWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = + client.readWriteTransaction(Options.tag("app=spanner,env=test,action=txn")); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.tag("app=spanner,env=test,action=read"))) { + consumeResults(resultSet); + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=read"); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=txn"); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + request.getTransaction().getBegin().getIsolationLevel()); + } + + @Test + public void testExecuteUpdateWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> + transaction.executeUpdate( + UPDATE_STATEMENT, Options.tag("app=spanner,env=test,action=update"))); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=update"); + assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); + assertNotNull(request.getTransaction().getBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + request.getTransaction().getBegin().getIsolationLevel()); + } + + @Test + public void testBatchUpdateWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = + client.readWriteTransaction(Options.tag("app=spanner,env=test,action=txn")); + runner.run( + transaction -> + transaction.batchUpdate( + Collections.singletonList(UPDATE_STATEMENT), + Options.tag("app=spanner,env=test,action=batch"))); + + List requests = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class); + assertThat(requests).hasSize(1); + ExecuteBatchDmlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=batch"); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=txn"); + assertNotNull(request.getTransaction().getBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + request.getTransaction().getBegin().getIsolationLevel()); + } + + @Test + public void testPartitionedDMLWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client.executePartitionedUpdate( + UPDATE_STATEMENT, Options.tag("app=spanner,env=test,action=dml")); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasPartitionedDml()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()) + .isEqualTo("app=spanner,env=test,action=dml"); + assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); + } + + @Test + public void testCommitWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.commitDeleteTransaction( + client, Options.tag("app=spanner,env=test,action=commit")); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()).isEmpty(); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=commit"); + } + + @Test + public void testTransactionManagerCommitWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerCommit( + client, Options.tag("app=spanner,env=test,action=manager")); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + beginTransaction.getOptions().getIsolationLevel()); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()).isEmpty(); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=manager"); + } + + @Test + public void testAsyncRunnerCommitWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.asyncRunnerCommit( + client, executor, Options.tag("app=spanner,env=test,action=runner")); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + beginTransaction.getOptions().getIsolationLevel()); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()).isEmpty(); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=runner"); + } + + @Test + public void testAsyncTransactionManagerCommitWithTag() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerAsyncCommit( + client, executor, Options.tag("app=spanner,env=test,action=manager")); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertFalse(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + beginTransaction.getOptions().getIsolationLevel()); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertThat(request.getRequestOptions().getRequestTag()).isEmpty(); + assertThat(request.getRequestOptions().getTransactionTag()) + .isEqualTo("app=spanner,env=test,action=manager"); + } + + @Test + public void testReadWriteTxnWithExcludeTxnFromChangeStreams_executeUpdate() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(Options.excludeTxnFromChangeStreams()); + runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getTransaction().getBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testReadWriteTxnWithExcludeTxnFromChangeStreams_batchUpdate() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.executeBatchUpdateTransaction( + client, Options.excludeTxnFromChangeStreams()); + + List requests = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class); + assertThat(requests).hasSize(1); + ExecuteBatchDmlRequest request = requests.get(0); + assertNotNull(request.getTransaction().getBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testPartitionedDMLWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client.executePartitionedUpdate(UPDATE_STATEMENT, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasPartitionedDml()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testCommitWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.commitDeleteTransaction(client, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionManagerCommitWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerCommit(client, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAsyncRunnerCommitWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.asyncRunnerCommit( + client, executor, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAsyncTransactionManagerCommitWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerAsyncCommit( + client, executor, Options.excludeTxnFromChangeStreams()); + + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertNotNull(beginTransaction.getOptions()); + assertTrue(beginTransaction.getOptions().hasReadWrite()); + assertTrue(beginTransaction.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testExecuteUpdateWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> + transaction.executeUpdate( + UPDATE_STATEMENT, Options.excludeTxnFromChangeStreams()))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void testExecuteUpdateAsyncWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + AsyncRunner runner = client.runAsync(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + get( + runner.runAsync( + txn -> { + txn.executeUpdateAsync( + UPDATE_STATEMENT, Options.excludeTxnFromChangeStreams()); + return ApiFutures.immediateFuture(null); + }, + executor))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void testAnalyzeUpdateWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> + transaction.analyzeUpdate( + UPDATE_STATEMENT, + QueryAnalyzeMode.PROFILE, + Options.excludeTxnFromChangeStreams()))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void testAnalyzeUpdateStatementWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> + transaction.analyzeUpdateStatement( + UPDATE_STATEMENT, + QueryAnalyzeMode.PROFILE, + Options.excludeTxnFromChangeStreams()))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void testBatchUpdateWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> + transaction.batchUpdate( + Collections.singletonList(UPDATE_STATEMENT), + Options.excludeTxnFromChangeStreams()))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void testBatchUpdateAsyncWithExcludeTxnFromChangeStreams() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + AsyncRunner runner = client.runAsync(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + get( + runner.runAsync( + txn -> { + txn.batchUpdateAsync( + Collections.singletonList(UPDATE_STATEMENT), + Options.excludeTxnFromChangeStreams()); + return ApiFutures.immediateFuture(null); + }, + executor))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()) + .contains( + "Options.excludeTxnFromChangeStreams() cannot be specified for individual DML requests." + + " This option should be set at the transaction level."); + } + + @Test + public void singleUse() { + DatabaseClientImpl client = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseIsNonBlocking() { + mockSpanner.freeze(); + // Use a Spanner instance with no initial sessions in the pool to show that getting a session + // from the pool and then preparing a query is non-blocking (i.e. does not wait on a reply from + // the server). + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseAsync() throws Exception { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + final AtomicInteger rowCount = new AtomicInteger(); + ApiFuture res; + try (AsyncResultSet rs = client.singleUse().executeQueryAsync(SELECT1)) { + res = + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + }); + } + res.get(); + assertThat(rowCount.get()).isEqualTo(1); + } + + @Test + public void singleUseAsyncWithoutCallback() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + int rowCount = 0; + try (AsyncResultSet rs = client.singleUse().executeQueryAsync(SELECT1)) { + while (rs.next()) { + rowCount++; + } + } + assertThat(rowCount).isEqualTo(1); + } + + @Test + public void singleUseBound() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client + .singleUse(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseBoundIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client + .singleUse(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseBoundAsync() throws Exception { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + final AtomicInteger rowCount = new AtomicInteger(); + ApiFuture res; + try (AsyncResultSet rs = + client + .singleUse(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQueryAsync(SELECT1)) { + res = + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + }); + } + res.get(); + assertThat(rowCount.get()).isEqualTo(1); + } + + @Test + public void singleUseTransaction() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = client.singleUseReadOnlyTransaction().executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseTransactionIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = client.singleUseReadOnlyTransaction().executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseTransactionBound() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client + .singleUseReadOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void singleUseTransactionBoundIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client + .singleUseReadOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void readOnlyTransaction() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void readOnlyTransactionIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void readOnlyTransactionBound() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ReadOnlyTransaction tx = + client.readOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS))) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void readOnlyTransactionBoundIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ReadOnlyTransaction tx = + client.readOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS))) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testReadWriteTransaction() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_STATEMENT); + return null; + }); + assertNotNull(runner.getCommitTimestamp()); + } + + @Test + public void testReadWriteTransaction_returnsCommitStats() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(Options.commitStats()); + runner.run( + transaction -> { + transaction.buffer(Mutation.delete("FOO", Key.of("foo"))); + return null; + }); + assertNotNull(runner.getCommitResponse()); + assertNotNull(runner.getCommitResponse().getCommitStats()); + assertEquals(1L, runner.getCommitResponse().getCommitStats().getMutationCount()); + } + + @Test + public void readWriteTransactionIsNonBlocking() { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + // The runner.run(...) method cannot be made non-blocking, as it returns the result of the + // transaction. + mockSpanner.unfreeze(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_STATEMENT); + return null; + }); + } + + @Test + public void testRunAsync() throws Exception { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + AsyncRunner runner = client.runAsync(); + ApiFuture result = + runner.runAsync( + txn -> ApiFutures.immediateFuture(txn.executeUpdate(UPDATE_STATEMENT)), executor); + assertEquals(UPDATE_COUNT, result.get().longValue()); + assertNotNull(runner.getCommitTimestamp().get()); + executor.shutdown(); + } + + @Test + public void testRunAsync_returnsCommitStats() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + AsyncRunner runner = client.runAsync(Options.commitStats()); + ApiFuture result = + runner.runAsync( + txn -> { + txn.buffer(Mutation.delete("FOO", Key.of("foo"))); + return ApiFutures.immediateFuture(null); + }, + executor); + assertNull(get(result)); + assertNotNull(get(runner.getCommitResponse())); + assertNotNull(get(runner.getCommitResponse()).getCommitStats()); + assertEquals(1L, get(runner.getCommitResponse()).getCommitStats().getMutationCount()); + executor.shutdown(); + } + + @Test + public void runAsyncIsNonBlocking() throws Exception { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + AsyncRunner runner = client.runAsync(); + ApiFuture fut = + runner.runAsync( + txn -> ApiFutures.immediateFuture(txn.executeUpdate(UPDATE_STATEMENT)), executor); + mockSpanner.unfreeze(); + assertThat(fut.get()).isEqualTo(UPDATE_COUNT); + executor.shutdown(); + } + + @Test + public void runAsyncWithException() throws Exception { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + AsyncRunner runner = client.runAsync(); + ApiFuture fut = + runner.runAsync( + txn -> ApiFutures.immediateFuture(txn.executeUpdate(INVALID_UPDATE_STATEMENT)), + executor); + + ExecutionException e = assertThrows(ExecutionException.class, fut::get); + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + + executor.shutdown(); + } + + @Test + public void testTransactionManager() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + transaction.executeUpdate(UPDATE_STATEMENT); + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + } + + @Test + public void testTransactionManager_returnsCommitStats() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (TransactionManager manager = client.transactionManager(Options.commitStats())) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + transaction.buffer(Mutation.delete("FOO", Key.of("foo"))); + manager.commit(); + assertNotNull(manager.getCommitResponse()); + assertNotNull(manager.getCommitResponse().getCommitStats()); + assertEquals(1L, manager.getCommitResponse().getCommitStats().getMutationCount()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + } + + @Test + public void transactionManagerIsNonBlocking() throws Exception { + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (TransactionManager txManager = client.transactionManager()) { + mockSpanner.unfreeze(); + TransactionContext transaction = txManager.begin(); + while (true) { + try { + transaction.executeUpdate(UPDATE_STATEMENT); + txManager.commit(); + break; + } catch (AbortedException e) { + //noinspection BusyWait + Thread.sleep(e.getRetryDelayInMillis()); + transaction = txManager.resetForRetry(); + } + } + } + } + + @Test + public void transactionManagerExecuteQueryAsync() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + final AtomicInteger rowCount = new AtomicInteger(); + try (TransactionManager txManager = client.transactionManager()) { + TransactionContext transaction = txManager.begin(); + while (true) { + try { + try (AsyncResultSet rs = transaction.executeQueryAsync(SELECT1)) { + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (Throwable t) { + return CallbackResponse.DONE; + } + }); + } + txManager.commit(); + break; + } catch (AbortedException e) { + transaction = txManager.resetForRetry(); + } + } + } + assertThat(rowCount.get()).isEqualTo(1); + } + + /** + * Test that the update statement can be executed as a partitioned transaction that returns a + * lower bound update count. + */ + @Test + public void testExecutePartitionedDml() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + long updateCount = client.executePartitionedUpdate(UPDATE_STATEMENT); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + } + + /** {@link AbortedException} should automatically be retried. */ + @Test + public void testExecutePartitionedDmlAborted() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + mockSpanner.abortNextTransaction(); + long updateCount = client.executePartitionedUpdate(UPDATE_STATEMENT); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + } + + /** + * A valid query that returns a {@link ResultSet} should not be accepted by a partitioned dml + * transaction. + */ + @Test(expected = SpannerException.class) + public void testExecutePartitionedDmlWithQuery() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client.executePartitionedUpdate(SELECT1); + } + + /** Server side exceptions that are not {@link AbortedException}s should propagate to the user. */ + @Test(expected = SpannerException.class) + public void testExecutePartitionedDmlWithException() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client.executePartitionedUpdate(INVALID_UPDATE_STATEMENT); + } + + @Test + public void testPartitionedDmlDoesNotTimeout() { + mockSpanner.setExecuteSqlExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(20, 0)); + final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(1L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(1L)) + .setMaxAttempts(1) + .setTotalTimeoutDuration(Duration.ofMillis(1L)) + .build(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()); + // Set normal DML timeout value. + builder.getSpannerStubSettingsBuilder().executeSqlSettings().setRetrySettings(retrySettings); + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + assertThat(spanner.getOptions().getPartitionedDmlTimeoutDuration()) + .isEqualTo(Duration.ofHours(2L)); + + // PDML should not timeout with these settings. + long updateCount = client.executePartitionedUpdate(UPDATE_STATEMENT); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + + // Normal DML should timeout. + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(UPDATE_STATEMENT); + return null; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + + long NON_DETERMINISTIC = XGoogSpannerRequestIdTest.NON_DETERMINISTIC; + XGoogSpannerRequestIdTest.MethodAndRequestId[] wantStreamingValues = { + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/ExecuteStreamingSql", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 6, 1)), + }; + if (false) { // TODO(@odeke-em): enable in next PRs. + xGoogReqIdInterceptor.checkExpectedStreamingXGoogRequestIds(wantStreamingValues); + } + + XGoogSpannerRequestIdTest.MethodAndRequestId[] wantUnaryValues = { + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/BeginTransaction", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 7, 1)), + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/CreateSession", + new XGoogSpannerRequestId(NON_DETERMINISTIC, 0, 1, 1)), + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/ExecuteSql", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 8, 1)), + }; + if (false) { // TODO(@odeke-em): enable in next PRs. + xGoogReqIdInterceptor.checkExpectedUnaryXGoogRequestIdsAsSuffixes(wantUnaryValues); + } + } + } + + @Test + public void testPartitionedDmlWithLowerTimeout() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0)); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()); + // Set PDML timeout value. + builder.setPartitionedDmlTimeoutDuration(Duration.ofMillis(10L)); + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertThat(spanner.getOptions().getPartitionedDmlTimeoutDuration()) + .isEqualTo(Duration.ofMillis(10L)); + // PDML should time out with these settings. + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0)); + SpannerException e = + assertThrows( + SpannerException.class, () -> client.executePartitionedUpdate(UPDATE_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + + // Normal DML should not time out. + mockSpanner.setExecuteSqlExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + Long updateCount = + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + } + } + + @Test + public void testPartitionedDmlWithHigherTimeout() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(100, 0)); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()); + // Set PDML timeout value to a value that should allow the statement to be executed. + builder.setPartitionedDmlTimeoutDuration(Duration.ofMillis(5000L)); + // Set the ExecuteSql RPC timeout value to a value lower than the time needed to execute the + // statement. The higher timeout value that is set above should be respected, and the value for + // the ExecuteSQL RPC should be ignored specifically for Partitioned DML. + builder + .getSpannerStubSettingsBuilder() + .executeSqlSettings() + .setRetrySettings( + builder + .getSpannerStubSettingsBuilder() + .executeSqlSettings() + .getRetrySettings() + .toBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(10L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(10L)) + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .build()); + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // PDML should not timeout with these settings. + long updateCount = client.executePartitionedUpdate(UPDATE_STATEMENT); + + // Normal DML should timeout as it should use the ExecuteSQL RPC settings. + mockSpanner.setExecuteSqlExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(100, 0)); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT))); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + + long NON_DETERMINISTIC = XGoogSpannerRequestIdTest.NON_DETERMINISTIC; + XGoogSpannerRequestIdTest.MethodAndRequestId[] wantStreamingValues = { + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/ExecuteStreamingSql", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 6, 1)), + }; + + if (false) { // TODO(@odeke-em): enable in next PRs. + xGoogReqIdInterceptor.checkExpectedStreamingXGoogRequestIds(wantStreamingValues); + } + + XGoogSpannerRequestIdTest.MethodAndRequestId[] wantUnaryValues = { + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/BeginTransaction", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 7, 1)), + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/CreateSession", + new XGoogSpannerRequestId(NON_DETERMINISTIC, 0, 1, 1)), + XGoogSpannerRequestIdTest.ofMethodAndRequestId( + "google.spanner.v1.Spanner/ExecuteSql", + new XGoogSpannerRequestId(NON_DETERMINISTIC, NON_DETERMINISTIC, 8, 1)), + }; + if (false) { // TODO(@odeke-em): enable in next PRs. + xGoogReqIdInterceptor.checkExpectedUnaryXGoogRequestIdsAsSuffixes(wantUnaryValues); + } + } + } + + @Test + public void testPartitionedDmlRetriesOnUnavailable() { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()); + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + long updateCount = client.executePartitionedUpdate(UPDATE_STATEMENT); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + } + } + + @Test + public void testDatabaseOrInstanceDoesNotExistOnInitialization() throws Exception { + StatusRuntimeException[] exceptions = + new StatusRuntimeException[] { + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, DATABASE_NAME), + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Instance", SpannerExceptionFactory.INSTANCE_RESOURCE_TYPE, INSTANCE_NAME) + }; + for (StatusRuntimeException exception : exceptions) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService()) { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(exception)); + DatabaseClientImpl dbClient = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Wait until session creation has finished. + Stopwatch watch = Stopwatch.createStarted(); + while (watch.elapsed(TimeUnit.SECONDS) < 5 + && dbClient.multiplexedSessionDatabaseClient.isValid()) { + //noinspection BusyWait + Thread.sleep(1L); + } + // All session creation should fail and stop trying. + assertFalse(dbClient.isValid()); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + } + } + + @Test + public void testDatabaseOrInstanceDoesNotExistOnCreate() { + for (Duration waitForMinSessions : ImmutableList.of(Duration.ZERO, Duration.ofSeconds(5L))) { + StatusRuntimeException[] exceptions = + new StatusRuntimeException[] { + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, DATABASE_NAME), + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Instance", SpannerExceptionFactory.INSTANCE_RESOURCE_TYPE, INSTANCE_NAME) + }; + for (StatusRuntimeException exception : exceptions) { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(exception)); + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofStickyException(exception)); + // Ensure there are no sessions in the pool by default. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(0) + .setWaitForMinSessionsDuration(waitForMinSessions) + .build()) + .build() + .getService()) { + DatabaseId databaseId = DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE); + if (!waitForMinSessions.isZero()) { + assertThrows( + ResourceNotFoundException.class, () -> spanner.getDatabaseClient(databaseId)); + } else { + // Freeze the server until we try to execute ResultSet#next() to prevent the creation of + // a multiplexed session to finish before we try to use it. + mockSpanner.freeze(); + DatabaseClientImpl dbClient = + (DatabaseClientImpl) spanner.getDatabaseClient(databaseId); + // The CreateSession / BatchCreateSessions failure should propagate to the client and + // not retry. + try (ResultSet rs = dbClient.singleUse().executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + assertThrows(ResourceNotFoundException.class, rs::next); + // The server should only receive one BatchCreateSessions request. + assertThat(mockSpanner.getRequests()).hasSize(1); + } + assertThrows( + ResourceNotFoundException.class, + () -> + dbClient + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT))); + // No additional requests should have been sent by the client. + assertThat(mockSpanner.getRequests()).hasSize(1); + } + } + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + } + } + + /** + * Test showing that when a database is deleted while it is in use by a database client and then + * re-created with the same name, will continue to return {@link DatabaseNotFoundException}s until + * a new {@link DatabaseClient} is created. + */ + @Test + public void testDatabaseOrInstanceIsDeletedAndThenRecreated() throws Exception { + StatusRuntimeException[] exceptions = + new StatusRuntimeException[] { + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, DATABASE_NAME), + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Instance", SpannerExceptionFactory.INSTANCE_RESOURCE_TYPE, INSTANCE_NAME) + }; + for (StatusRuntimeException exception : exceptions) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService()) { + DatabaseClientImpl dbClient = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Wait until all sessions have been created and prepared. + Stopwatch watch = Stopwatch.createStarted(); + while (watch.elapsed(TimeUnit.SECONDS) < 5 + && (dbClient.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null)) { + //noinspection BusyWait + Thread.sleep(1L); + } + // Simulate that the database or instance has been deleted. + mockSpanner.setStickyGlobalExceptions(true); + mockSpanner.addException(exception); + + // All subsequent calls should fail with a DatabaseNotFoundException. + try (ResultSet rs = dbClient.singleUse().executeQuery(SELECT1)) { + assertThrows(ResourceNotFoundException.class, rs::next); + } + assertThrows( + ResourceNotFoundException.class, + () -> dbClient.readWriteTransaction().run(transaction -> null)); + + // Now simulate that the database has been re-created. The database client should still + // throw DatabaseNotFoundExceptions, as it is not the same database. The server should not + // receive any new requests. + mockSpanner.reset(); + // All subsequent calls should fail with a DatabaseNotFoundException. + + assertThat(mockSpanner.getRequests()).isEmpty(); + // Now get a new database client. Normally multiple calls to Spanner#getDatabaseClient will + // return the same instance, but not when the instance has been invalidated by a + // DatabaseNotFoundException. + DatabaseClientImpl newClient = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertThat(newClient).isNotSameInstanceAs(dbClient); + // Executing a query should now work without problems. + try (ResultSet rs = newClient.singleUse().executeQuery(SELECT1)) { + consumeResults(rs); + } + assertThat(mockSpanner.getRequests()).isNotEmpty(); + } + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + } + + @Test + public void testGetInvalidatedClientMultipleTimes() { + StatusRuntimeException[] exceptions = + new StatusRuntimeException[] { + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, DATABASE_NAME), + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "Instance", SpannerExceptionFactory.INSTANCE_RESOURCE_TYPE, INSTANCE_NAME) + }; + for (StatusRuntimeException exception : exceptions) { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(exception)); + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .build() + .getService()) { + for (int run = 0; run < 2; run++) { + DatabaseClientImpl dbClient = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + for (int useClient = 0; useClient < 2; useClient++) { + // The multiplexed session client tries to create a new session at every attempt. + assertThrows( + ResourceNotFoundException.class, + () -> dbClient.singleUse().executeQuery(SELECT1).next()); + // We should only receive 1 CreateSession request per attempt. + // The query should never be executed, as the session creation fails before it gets to + // executing a query. + assertEquals(run + 1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } + } + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + } + + @Test + public void testAllowNestedTransactions() throws InterruptedException { + final DatabaseClientImpl client = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Wait until all sessions have been created. + final int minSessions = spanner.getOptions().getSessionPoolOptions().getMinSessions(); + Stopwatch watch = Stopwatch.createStarted(); + while (watch.elapsed(TimeUnit.SECONDS) < 5 + && client.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null) { + //noinspection BusyWait + Thread.sleep(1L); + } + Long res = + client + .readWriteTransaction() + .allowNestedTransaction() + .run( + transaction -> { + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(res).isEqualTo(UPDATE_COUNT); + } + + @Test + public void testNestedTransactionsUsingTwoDatabases() throws InterruptedException { + final DatabaseClientImpl client1 = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, "my-database-1")); + final DatabaseClientImpl client2 = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, "my-database-2")); + // Wait until all sessions have been created so we can actually check the number of sessions + // checked out of the pools. + final int minSessions = spanner.getOptions().getSessionPoolOptions().getMinSessions(); + Stopwatch watch = Stopwatch.createStarted(); + while (watch.elapsed(TimeUnit.SECONDS) < 5 + && (client1.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null + || client2.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null)) { + //noinspection BusyWait + Thread.sleep(1L); + } + Long res = + client1 + .readWriteTransaction() + .allowNestedTransaction() + .run( + transaction -> { + Long add = + client2 + .readWriteTransaction() + .run( + transaction1 -> { + try (ResultSet rs = transaction1.executeQuery(SELECT1)) { + if (rs.next()) { + return rs.getLong(0); + } + return 0L; + } + }); + assertNotNull(add); + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + if (rs.next()) { + return add + rs.getLong(0); + } + return add; + } + }); + assertThat(res).isEqualTo(2L); + } + + @Test + public void testBackendQueryOptions() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder(SELECT1.getSql()) + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build())) { + // Just iterate over the results to execute the query. + consumeResults(rs); + } + // Check that the last query was executed using a custom optimizer version and statistics + // package. + List requests = mockSpanner.getRequests(); + assertThat(requests).isNotEmpty(); + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest request = (ExecuteSqlRequest) requests.get(requests.size() - 1); + assertThat(request.getQueryOptions()).isNotNull(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("1"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + } + } + + @Test + public void testBackendQueryOptionsWithAnalyzeQuery() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (ResultSet rs = + tx.analyzeQuery( + Statement.newBuilder(SELECT1.getSql()) + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build(), + QueryAnalyzeMode.PROFILE)) { + // Just iterate over the results to execute the query. + consumeResults(rs); + } + } + // Check that the last query was executed using a custom optimizer version and statistics + // package. + List requests = mockSpanner.getRequests(); + assertThat(requests).isNotEmpty(); + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest request = (ExecuteSqlRequest) requests.get(requests.size() - 1); + assertThat(request.getQueryOptions()).isNotNull(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("1"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + assertThat(request.getQueryMode()).isEqualTo(QueryMode.PROFILE); + } + } + + @Test + public void testBackendPartitionQueryOptions() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService()) { + BatchClient client = + spanner.getBatchClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong()); + List partitions = + transaction.partitionQuery( + PartitionOptions.newBuilder().setMaxPartitions(10L).build(), + Statement.newBuilder(SELECT1.getSql()) + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build(), + Options.directedRead(DIRECTED_READ_OPTIONS1)); + try (ResultSet rs = transaction.execute(partitions.get(0))) { + // Just iterate over the results to execute the query. + consumeResults(rs); + } finally { + transaction.cleanup(); + } + // Check if the last query executed is a DeleteSessionRequest and the second last query + // executed is a ExecuteSqlRequest and was executed using a custom optimizer version, + // statistics package and directed read options. + List requests = mockSpanner.getRequests(); + assert requests.size() >= 2 : "required to have at least 2 requests"; + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest executeSqlRequest = (ExecuteSqlRequest) requests.get(requests.size() - 1); + assertThat(executeSqlRequest.getQueryOptions()).isNotNull(); + assertThat(executeSqlRequest.getQueryOptions().getOptimizerVersion()).isEqualTo("1"); + assertThat(executeSqlRequest.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + assertThat(executeSqlRequest.getDirectedReadOptions()).isEqualTo(DIRECTED_READ_OPTIONS1); + } + } + + @Test + public void + testBackendPartitionQueryOptions_whenDirectedReadOptionsViaSpannerOptions_assertOptions() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService()) { + BatchClient client = + spanner.getBatchClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong()); + List partitions = + transaction.partitionQuery( + PartitionOptions.newBuilder().setMaxPartitions(10L).build(), + Statement.newBuilder(SELECT1.getSql()) + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build()); + try (ResultSet rs = transaction.execute(partitions.get(0))) { + // Just iterate over the results to execute the query. + consumeResults(rs); + } finally { + transaction.cleanup(); + } + // Check if the last query executed is a DeleteSessionRequest and the second last query + // executed is a ExecuteSqlRequest and was executed using a custom optimizer version, + // statistics package and directed read options. + List requests = mockSpanner.getRequests(); + assert requests.size() >= 2 : "required to have at least 2 requests"; + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest executeSqlRequest = (ExecuteSqlRequest) requests.get(requests.size() - 1); + assertThat(executeSqlRequest.getQueryOptions()).isNotNull(); + assertThat(executeSqlRequest.getQueryOptions().getOptimizerVersion()).isEqualTo("1"); + assertThat(executeSqlRequest.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + assertThat(executeSqlRequest.getDirectedReadOptions()).isEqualTo(DIRECTED_READ_OPTIONS2); + } + } + + @Test + public void testBackendPartitionReadOptions() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService()) { + BatchClient client = + spanner.getBatchClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong()); + List partitions = + transaction.partitionRead( + PartitionOptions.newBuilder().setMaxPartitions(10L).build(), + "FOO", + KeySet.all(), + Lists.newArrayList("1"), + Options.directedRead(DIRECTED_READ_OPTIONS1)); + try (ResultSet rs = transaction.execute(partitions.get(0))) { + // Just iterate over the results to execute the query. + consumeResults(rs); + } finally { + transaction.cleanup(); + } + // Check if the last query executed is a DeleteSessionRequest and the second last query + // executed is a ExecuteSqlRequest and was executed using a custom optimizer version, + // statistics package and directed read options. + List requests = mockSpanner.getRequests(); + assert requests.size() >= 2 : "required to have at least 2 requests"; + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ReadRequest.class); + ReadRequest readRequest = (ReadRequest) requests.get(requests.size() - 1); + assertThat(readRequest.getDirectedReadOptions()).isEqualTo(DIRECTED_READ_OPTIONS1); + } + } + + @Test + public void + testBackendPartitionReadOptions_whenDirectedReadOptionsViaSpannerOptions_assertOptions() { + // Use a Spanner instance with MinSession=0 to prevent background requests + // from the session pool interfering with the test case. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setMinSessions(0).build()) + .setDirectedReadOptions(DIRECTED_READ_OPTIONS2) + .build() + .getService()) { + BatchClient client = + spanner.getBatchClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE")); + BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong()); + List partitions = + transaction.partitionRead( + PartitionOptions.newBuilder().setMaxPartitions(10L).build(), + "FOO", + KeySet.all(), + Lists.newArrayList("1")); + try (ResultSet rs = transaction.execute(partitions.get(0))) { + // Just iterate over the results to execute the query. + //noinspection StatementWithEmptyBody + while (rs.next()) {} + } finally { + transaction.cleanup(); + } + // Check if the last query executed is a DeleteSessionRequest and the second last query + // executed is a ExecuteSqlRequest and was executed using a custom optimizer version, + // statistics package and directed read options. + List requests = mockSpanner.getRequests(); + assert requests.size() >= 2 : "required to have at least 2 requests"; + assertThat(requests.get(requests.size() - 1)).isInstanceOf(ReadRequest.class); + ReadRequest readRequest = (ReadRequest) requests.get(requests.size() - 1); + assertThat(readRequest.getDirectedReadOptions()).isEqualTo(DIRECTED_READ_OPTIONS2); + } + } + + @Test + public void testAsyncQuery() throws Exception { + final int EXPECTED_ROW_COUNT = 10; + com.google.cloud.spanner.connection.RandomResultSetGenerator generator = + new RandomResultSetGenerator(EXPECTED_ROW_COUNT); + com.google.spanner.v1.ResultSet resultSet = generator.generate(); + mockSpanner.putStatementResult( + StatementResult.query(Statement.of("SELECT * FROM RANDOM"), resultSet)); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + ApiFuture resultSetClosed; + final SettableFuture finished = SettableFuture.create(); + final List receivedResults = new ArrayList<>(); + try (AsyncResultSet rs = + client.singleUse().executeQueryAsync(Statement.of("SELECT * FROM RANDOM"))) { + resultSetClosed = + rs.setCallback( + executor, + asyncResultSet -> { + try { + while (true) { + switch (rs.tryNext()) { + case DONE: + finished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + receivedResults.add(asyncResultSet.getCurrentRowAsStruct()); + break; + default: + throw new IllegalStateException("Unknown cursor state"); + } + } + } catch (Throwable t) { + finished.setException(t); + return CallbackResponse.DONE; + } + }); + } + assertThat(finished.get()).isTrue(); + assertThat(receivedResults.size()).isEqualTo(EXPECTED_ROW_COUNT); + resultSetClosed.get(); + } + + @Test + public void testClientIdReusedOnDatabaseNotFound() { + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofStickyException( + SpannerExceptionFactoryTest.newStatusResourceNotFoundException( + "my-database", + SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, + "project/my-project/instances/my-instance/databases/my-database"))); + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService()) { + DatabaseId databaseId = DatabaseId.of("my-project", "my-instance", "my-database"); + String prevClientId = null; + for (int i = 0; i < 100; i++) { + try { + DatabaseClientImpl client = (DatabaseClientImpl) spanner.getDatabaseClient(databaseId); + if (prevClientId != null) { + assertThat(client.clientId).isEqualTo(prevClientId); + } + prevClientId = client.clientId; + client.singleUse().readRow("MyTable", Key.of(0), Collections.singletonList("MyColumn")); + } catch (Exception e) { + // ignore + } + } + } + } + + @Test + public void testBatchCreateSessionsPermissionDenied() { + for (Duration waitForMinSessions : ImmutableList.of(Duration.ZERO, Duration.ofSeconds(5L))) { + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofStickyException( + Status.PERMISSION_DENIED.withDescription("Not permitted").asRuntimeException())); + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException( + Status.PERMISSION_DENIED.withDescription("Not permitted").asRuntimeException())); + if (waitForMinSessions.isZero()) { + mockSpanner.freeze(); + } + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(waitForMinSessions) + .build()) + .build() + .getService()) { + DatabaseId databaseId = DatabaseId.of("my-project", "my-instance", "my-database"); + SpannerException spannerException; + if (waitForMinSessions.isZero()) { + // The following call is non-blocking and will not generate an exception. + DatabaseClient client = spanner.getDatabaseClient(databaseId); + ResultSet resultSet = client.singleUse().executeQuery(SELECT1); + mockSpanner.unfreeze(); + // Actually trying to get any results will cause an exception. + spannerException = assertThrows(SpannerException.class, resultSet::next); + } else { + // This is blocking when we should wait for min sessions, and will therefore fail. + spannerException = + assertThrows(SpannerException.class, () -> spanner.getDatabaseClient(databaseId)); + } + assertEquals(ErrorCode.PERMISSION_DENIED, spannerException.getErrorCode()); + } finally { + mockSpanner.setBatchCreateSessionsExecutionTime(SimulatedExecutionTime.none()); + mockSpanner.setCreateSessionExecutionTime(SimulatedExecutionTime.none()); + } + } + } + + @Test + public void testExceptionIncludesStatement() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT.withDescription("Invalid query").asRuntimeException())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder("SELECT * FROM FOO WHERE ID=@id").bind("id").to(1L).build())) { + SpannerException e = assertThrows(SpannerException.class, rs::next); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Statement: 'SELECT * FROM FOO WHERE ID=@id'"); + // The error message should normally not include the parameter values to prevent sensitive + // information from accidentally being logged. + assertThat(e.getMessage()).doesNotContain("id: 1"); + } + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT.withDescription("Invalid query").asRuntimeException())); + Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName()); + Level currentLevel = logger.getLevel(); + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder("SELECT * FROM FOO WHERE ID=@id").bind("id").to(1L).build())) { + logger.setLevel(Level.FINEST); + SpannerException e = assertThrows(SpannerException.class, rs::next); + // With log level set to FINEST the error should also include the parameter values. + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Statement: 'SELECT * FROM FOO WHERE ID=@id {id: 1}'"); + } finally { + logger.setLevel(currentLevel); + } + } + + @Test + public void testReadDoesNotIncludeStatement() { + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT.withDescription("Invalid read").asRuntimeException())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = + client.singleUse().read("FOO", KeySet.singleKey(Key.of(1L)), ImmutableList.of("BAR"))) { + SpannerException e = assertThrows(SpannerException.class, rs::next); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).doesNotContain("Statement:"); + } + } + + @Test + public void testSpecificTimeout() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10000, 0)); + final DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + Context.current() + .withValue( + SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, + SpannerCallContextTimeoutConfigurator.create() + .withExecuteQueryTimeoutDuration(Duration.ofNanos(1L))) + .run( + () -> { + // Query should fail with a timeout. + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + SpannerException e = assertThrows(SpannerException.class, rs::next); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + } + // Update should succeed. + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + }); + } + + @Test + public void testCreateSessionsFailure_shouldNotPropagateToCloseMethod() { + try { + // Simulate session creation failures on the backend. + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.PERMISSION_DENIED.asRuntimeException())); + // This will not cause any failure as getting a session from the pool is guaranteed to be + // non-blocking, and any exceptions will be delayed until actual query execution. + mockSpanner.freeze(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + mockSpanner.unfreeze(); + SpannerException exception = assertThrows(SpannerException.class, rs::next); + assertEquals(ErrorCode.PERMISSION_DENIED, exception.getErrorCode()); + } + } finally { + mockSpanner.setCreateSessionExecutionTime(SimulatedExecutionTime.none()); + } + } + + @Test + public void testExecuteQueryWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client.singleUse().executeQuery(SELECT1, Options.priority(RpcPriority.HIGH))) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testExecuteReadWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.priority(RpcPriority.HIGH))) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testReadWriteExecuteQueryWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.executeQuery(SELECT1, Options.priority(RpcPriority.HIGH))) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testReadWriteExecuteReadWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.priority(RpcPriority.HIGH))) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + return null; + }); + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testExecuteUpdateWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> + transaction.executeUpdate(UPDATE_STATEMENT, Options.priority(RpcPriority.HIGH))); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testBatchUpdateWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> + transaction.batchUpdate( + Collections.singletonList(UPDATE_STATEMENT), Options.priority(RpcPriority.HIGH))); + + List requests = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class); + assertThat(requests).hasSize(1); + ExecuteBatchDmlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testPartitionedDMLWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client.executePartitionedUpdate(UPDATE_STATEMENT, Options.priority(RpcPriority.HIGH)); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests).hasSize(1); + ExecuteSqlRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testCommitWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.commitDeleteTransaction(client, Options.priority(RpcPriority.HIGH)); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testTransactionManagerCommitWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerCommit(client, Options.priority(RpcPriority.HIGH)); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testAsyncRunnerCommitWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.asyncRunnerCommit(client, executor, Options.priority(RpcPriority.HIGH)); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testAsyncTransactionManagerCommitWithPriority() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerAsyncCommit( + client, executor, Options.priority(RpcPriority.HIGH)); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getRequestOptions()); + assertEquals(Priority.PRIORITY_HIGH, request.getRequestOptions().getPriority()); + } + + @Test + public void testCommitWithoutMaxCommitDelay() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.commitDeleteTransaction(client); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertFalse(request.hasMaxCommitDelay()); + } + + @Test + public void testCommitWithMaxCommitDelay() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.commitDeleteTransaction( + client, Options.maxCommitDelay(java.time.Duration.ofMillis(100))); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getMaxCommitDelay()); + assertEquals( + com.google.protobuf.Duration.newBuilder().setNanos(100000000).build(), + request.getMaxCommitDelay()); + } + + @Test + public void testTransactionManagerCommitWithMaxCommitDelay() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerCommit( + client, Options.maxCommitDelay(java.time.Duration.ofMillis(100))); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getMaxCommitDelay()); + assertEquals( + com.google.protobuf.Duration.newBuilder().setNanos(100000000).build(), + request.getMaxCommitDelay()); + } + + @Test + public void testAsyncRunnerCommitWithMaxCommitDelay() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.asyncRunnerCommit( + client, executor, Options.maxCommitDelay(java.time.Duration.ofMillis(100))); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getMaxCommitDelay()); + assertEquals( + com.google.protobuf.Duration.newBuilder().setNanos(100000000).build(), + request.getMaxCommitDelay()); + } + + @Test + public void testAsyncTransactionManagerCommitWithMaxCommitDelay() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + MockSpannerTestActions.transactionManagerAsyncCommit( + client, executor, Options.maxCommitDelay(java.time.Duration.ofMillis(100))); + + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getMaxCommitDelay()); + assertEquals( + com.google.protobuf.Duration.newBuilder().setNanos(100000000).build(), + request.getMaxCommitDelay()); + } + + @Test + public void transactionContextFailsIfUsedMultipleTimes() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Function function = + new Function() { + TransactionContext ctx; + + @Override + public Long apply(TransactionContext transactionContext) { + if (ctx == null) { + ctx = transactionContext; + } + try (ResultSet rs = ctx.executeQuery(SELECT1)) { + //noinspection StatementWithEmptyBody + while (rs.next()) {} + } + return 1L; + } + }; + assertEquals(Long.valueOf(1L), client.readWriteTransaction().run(function::apply)); + SpannerException exception = + assertThrows( + SpannerException.class, () -> client.readWriteTransaction().run(function::apply)); + assertTrue(exception.getMessage().contains("Context has been closed")); + } + + @Test + public void testGetDialectDefault() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, client.getDialect()); + } + + @Test + public void testGetDialectDefaultPreloaded() { + try (Spanner spanner = + this.spanner.getOptions().toBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setAutoDetectDialect(true).build()) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, client.getDialect()); + } + } + + @Test + public void testGetDialectPostgreSQL() { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + try { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertEquals(Dialect.POSTGRESQL, client.getDialect()); + } finally { + mockSpanner.putStatementResult( + StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + } + } + + @Test + public void testGetDialectPostgreSQLPreloaded() { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + try (Spanner spanner = + this.spanner.getOptions().toBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setAutoDetectDialect(true).build()) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertEquals(Dialect.POSTGRESQL, client.getDialect()); + } finally { + mockSpanner.putStatementResult( + StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + } + } + + @Test + public void testGetDialect_FailsDirectlyIfDatabaseNotFound() { + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + SpannerException exception = assertThrows(SpannerException.class, client::getDialect); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertTrue( + exception + .getMessage() + .contains( + "NOT_FOUND: Database not found: Database with id invalid-database not found")); + } + + @Test + public void testGetDialectDefaultPreloaded_FailsDirectlyIfDatabaseNotFound() { + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + try (Spanner spanner = + this.spanner.getOptions().toBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setAutoDetectDialect(true).build()) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + SpannerException exception = assertThrows(SpannerException.class, client::getDialect); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertTrue( + exception + .getMessage() + .contains( + "NOT_FOUND: Database not found: Database with id invalid-database not found")); + } + } + + @Test + public void testUntypedNullParameters() { + Statement statement = + Statement.newBuilder("INSERT INTO FOO (BAR) VALUES (@p)") + .bind("p") + .to((Value) null) + .build(); + mockSpanner.putStatementResult(StatementResult.update(statement, 1L)); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + Long updateCount = + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(statement)); + + assertNotNull(updateCount); + assertEquals(1L, updateCount.longValue()); + } + + @Test + public void testGetDatabaseRole() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + assertEquals(TEST_DATABASE_ROLE, client.getDatabaseRole()); + } + + @Test + public void testAnalyzeUpdateStatement() { + String sql = "update foo set bar=1 where baz=@param"; + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of(sql), + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setUndeclaredParameters( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("param") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(0L).build()) + .build())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = + transaction.analyzeUpdateStatement(Statement.of(sql), QueryAnalyzeMode.PLAN)) { + assertFalse(resultSet.next()); + assertNotNull(resultSet.getStats()); + assertEquals(0L, resultSet.getStats().getRowCountExact()); + assertNotNull(resultSet.getMetadata()); + assertEquals(1, resultSet.getMetadata().getUndeclaredParameters().getFieldsCount()); + assertEquals( + "param", + resultSet.getMetadata().getUndeclaredParameters().getFields(0).getName()); + assertEquals( + Type.newBuilder().setCode(TypeCode.STRING).build(), + resultSet.getMetadata().getUndeclaredParameters().getFields(0).getType()); + } + return null; + }); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals(QueryMode.PLAN, request.getQueryMode()); + } + + @Test + public void testByteArray() { + Random random = new Random(); + byte[] bytes = new byte[random.nextInt(200)]; + int numRows = 5; + List rows = new ArrayList<>(numRows); + for (int i = 0; i < numRows; i++) { + random.nextBytes(bytes); + rows.add( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + // Use both the Guava and the JDK encoder to encode the values to ensure + // that encoding/decoding using both of them works. + i % 2 == 0 + ? Base64.getEncoder().encodeToString(bytes) + : BaseEncoding.base64().encode(bytes)) + .build()) + .build()); + } + Statement statement = Statement.of("select * from foo"); + mockSpanner.putStatementResult( + StatementResult.query( + statement, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .setName("f1") + .build()) + .build()) + .build()) + .addAllRows(rows) + .build())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + String base64String = resultSet.getValue(0).getAsString(); + ByteArray byteArray = resultSet.getBytes(0); + // Use the 'old' ByteArray.fromBase64(..) method that uses the Guava encoder to ensure that + // the two encoders (JDK and Guava) return the same values. + assertEquals(ByteArray.fromBase64(base64String), byteArray); + } + } + } + + @Test + public void testGetAllTypesAsString() { + SingerInfo info = SingerInfo.newBuilder().setSingerId(1).build(); + for (Dialect dialect : Dialect.values()) { + Statement statement = Statement.of("select * from all_types"); + mockSpanner.putStatementResult( + StatementResult.query( + statement, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + RandomResultSetGenerator.generateAllTypesMetadata( + RandomResultSetGenerator.generateAllTypes(dialect))) + .addRows(getRows(dialect)) + .build())); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + int col = 0; + assertAsString("true", resultSet, col++); + assertAsString("100", resultSet, col++); + assertAsString("-3.14", resultSet, col++); + assertAsString("3.14", resultSet, col++); + assertAsString("6.626", resultSet, col++); + assertAsString("test-string", resultSet, col++); + assertAsString("{\"key1\": \"value1\"}", resultSet, col++); + assertAsString( + Base64.getEncoder().encodeToString("test-bytes".getBytes(StandardCharsets.UTF_8)), + resultSet, + col++); + assertAsString("2023-01-11", resultSet, col++); + assertAsString("b1153a48-cd31-498e-b770-f554bce48e05", resultSet, col++); + assertAsString("2023-01-11T11:55:18.123456789Z", resultSet, col++); + if (dialect == Dialect.POSTGRESQL) { + // Check PG_OID value + assertAsString("100", resultSet, col++); + } + assertAsString(ImmutableList.of("true", "NULL", "false"), resultSet, col++); + assertAsString( + ImmutableList.of( + String.format("%d", Long.MAX_VALUE), String.format("%d", Long.MIN_VALUE), "NULL"), + resultSet, + col++); + assertAsString( + ImmutableList.of( + "NULL", + Float.valueOf(Float.MAX_VALUE).toString(), + Float.valueOf(Float.MIN_VALUE).toString(), + "NaN", + "3.14"), + resultSet, + col++); + assertAsString(ImmutableList.of("NULL", "-12345.6789", "3.14"), resultSet, col++); + assertAsString(ImmutableList.of("6.626", "NULL", "-8.9123"), resultSet, col++); + assertAsString(ImmutableList.of("test-string1", "NULL", "test-string2"), resultSet, col++); + assertAsString( + ImmutableList.of("{\"key\": \"value1\"}", "{\"key\": \"value2\"}", "NULL"), + resultSet, + col++); + assertAsString( + ImmutableList.of( + String.format( + "%s", + Base64.getEncoder() + .encodeToString("test-bytes1".getBytes(StandardCharsets.UTF_8))), + String.format( + "%s", + Base64.getEncoder() + .encodeToString("test-bytes2".getBytes(StandardCharsets.UTF_8))), + "NULL"), + resultSet, + col++); + assertAsString(ImmutableList.of("2000-02-29", "NULL", "2000-01-01"), resultSet, col++); + assertAsString( + ImmutableList.of( + "b1153a48-cd31-498e-b770-f554bce48e05", + "NULL", + "11546309-8b37-4366-9a20-369381c7803a"), + resultSet, + col++); + assertAsString( + ImmutableList.of("2023-01-11T11:55:18.123456789Z", "NULL", "2023-01-12T11:55:18Z"), + resultSet, + col++); + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + assertAsString(Base64.getEncoder().encodeToString(info.toByteArray()), resultSet, col++); + assertAsString(String.valueOf(Genre.JAZZ_VALUE), resultSet, col++); + assertAsString( + ImmutableList.of( + String.format("%s", Base64.getEncoder().encodeToString(info.toByteArray())), + "NULL"), + resultSet, + col++); + assertAsString( + ImmutableList.of(String.format("%d", Genre.JAZZ_VALUE), "NULL"), resultSet, col++); + } + if (dialect == Dialect.POSTGRESQL) { + // Check ARRAY value + assertAsString( + ImmutableList.of( + String.format("%d", Long.MAX_VALUE), String.format("%d", Long.MIN_VALUE), "NULL"), + resultSet, + col++); + } + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testSelectUnknownType() { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM foo"), + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("bar").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setBoolValue(true).build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("baz") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setBoolValue(false) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNumberValue(6.626) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build()) + .build()) + .build()) + .build())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(Statement.of("SELECT * FROM foo"))) { + assertTrue(resultSet.next()); + assertAsString("bar", resultSet, 0); + + assertTrue(resultSet.next()); + assertAsString("true", resultSet, 0); + + assertTrue(resultSet.next()); + assertAsString("3.14", resultSet, 0); + + assertTrue(resultSet.next()); + assertAsString("NULL", resultSet, 0); + + assertTrue(resultSet.next()); + assertAsString(ImmutableList.of("baz", "false", "6.626", "NULL"), resultSet, 0); + + assertFalse(resultSet.next()); + } + } + + @Test + public void testMetadataUnknownTypes() { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM foo"), + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c1") + .setType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c2") + .setType( + Type.newBuilder() + .setCode(TypeCode.STRING) + .setTypeAnnotationValue(Integer.MAX_VALUE) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c3") + .setType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c4") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c5") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.STRING) + .setTypeAnnotationValue(Integer.MAX_VALUE) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c6") + .setType( + Type.newBuilder() + // Set an unrecognized type with an array element + // type. The client should recognize this as an + // array. + .setCodeValue(Integer.MAX_VALUE) + .setArrayElementType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("c7") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCodeValue(Integer.MAX_VALUE) + .setTypeAnnotation( + TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .build()) + .build())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(Statement.of("SELECT * FROM foo"))) { + // There are no rows, but we need to call resultSet.next() before we can get the metadata. + assertFalse(resultSet.next()); + assertEquals( + "STRUCT, c3 UNRECOGNIZED, c4" + + " ARRAY, c5 ARRAY>, c6" + + " UNRECOGNIZED, c7 ARRAY>>", + resultSet.getType().toString()); + assertEquals( + "UNRECOGNIZED", resultSet.getType().getStructFields().get(0).getType().toString()); + assertEquals( + "STRING", + resultSet.getType().getStructFields().get(1).getType().toString()); + assertEquals( + "UNRECOGNIZED", + resultSet.getType().getStructFields().get(2).getType().toString()); + assertEquals( + "ARRAY", resultSet.getType().getStructFields().get(3).getType().toString()); + assertEquals(Code.ARRAY, resultSet.getType().getStructFields().get(3).getType().getCode()); + assertEquals( + Code.UNRECOGNIZED, + resultSet.getType().getStructFields().get(3).getType().getArrayElementType().getCode()); + assertEquals( + "ARRAY>", + resultSet.getType().getStructFields().get(4).getType().toString()); + assertEquals(Code.ARRAY, resultSet.getType().getStructFields().get(4).getType().getCode()); + assertEquals( + Code.UNRECOGNIZED, + resultSet.getType().getStructFields().get(4).getType().getArrayElementType().getCode()); + assertEquals( + "UNRECOGNIZED", + resultSet.getType().getStructFields().get(5).getType().toString()); + assertEquals( + Code.UNRECOGNIZED, resultSet.getType().getStructFields().get(5).getType().getCode()); + assertEquals( + Code.UNRECOGNIZED, + resultSet.getType().getStructFields().get(5).getType().getArrayElementType().getCode()); + assertEquals( + "ARRAY>", + resultSet.getType().getStructFields().get(6).getType().toString()); + assertEquals(Code.ARRAY, resultSet.getType().getStructFields().get(6).getType().getCode()); + assertEquals( + Code.UNRECOGNIZED, + resultSet.getType().getStructFields().get(6).getType().getArrayElementType().getCode()); + } + } + + @Test + public void testStatementWithUnnamedParameters() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client.getStatementFactory().withUnnamedParameters("select id from test where b=?", true); + Statement generatedStatement = + Statement.newBuilder("select id from test where b=@p1").bind("p1").to(true).build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithUnnamedParametersAndSingleLineComment() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client + .getStatementFactory() + .withUnnamedParameters( + "-- comment about ? in the statement\nselect id from test where b=?", true); + Statement generatedStatement = + Statement.newBuilder("-- comment about ? in the statement\nselect id from test where b=@p1") + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithUnnamedParametersAndSingleLineCommentWithHash() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client + .getStatementFactory() + .withUnnamedParameters( + "# comment about ? in the statement\nselect id from test where b=?", true); + Statement generatedStatement = + Statement.newBuilder("# comment about ? in the statement\nselect id from test where b=@p1") + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithUnnamedParametersAndMultiLineComment() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client + .getStatementFactory() + .withUnnamedParameters( + "# comment about ? in the statement\n" + + "select id from test\n" + + " /* This is a ? comment \n" + + " about ? */ \n" + + " where b=? # this is a inline command about ?", + true); + Statement generatedStatement = + Statement.newBuilder( + "# comment about ? in the statement\n" + + "select id from test\n" + + " /* This is a ? comment \n" + + " about ? */ \n" + + " where b=@p1 # this is a inline command about ?") + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithUnnamedParametersAndStringLiteralWithQuestionMark() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client + .getStatementFactory() + .withUnnamedParameters("select id from test where name = \"abc?\" AND b=?", true); + Statement generatedStatement = + Statement.newBuilder("select id from test where name = \"abc?\" AND b=@p1") + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithUnnamedParametersAndHint() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + Statement statement = + client + .getStatementFactory() + .withUnnamedParameters("@{FORCE_INDEX=ABCDEF} select id from test where b=?", true); + Statement generatedStatement = + Statement.newBuilder("@{FORCE_INDEX=ABCDEF} select id from test where b=@p1") + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(generatedStatement, SELECT1_RESULTSET)); + + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStatementWithBytesArrayParameter() { + Statement statement = + Statement.newBuilder("select id from test where b=@p1") + .bind("p1") + .toBytesArray( + Arrays.asList(ByteArray.copyFrom("test1"), null, ByteArray.copyFrom("test2"))) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, SELECT1_RESULTSET)); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + + @Test + public void testStreamWaitTimeout() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Add a wait time to the mock server. Note that the test won't actually wait 100ms, as it uses + // a 1ns time out. + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(100, 0)); + // Create a custom call configuration that uses a 1 nanosecond stream timeout value. This will + // always time out, as a call to the mock server will always take more than 1 nanosecond. + CallContextConfigurator configurator = + new CallContextConfigurator() { + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + return context.withStreamWaitTimeoutDuration(Duration.ofNanos(1L)); + } + }; + Context context = + Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator); + context.run( + () -> { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + assertTrue( + exception.getMessage(), exception.getMessage().contains("stream wait timeout")); + } + }); + } + + @Test + public void testZeroStreamWaitTimeout() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Create a custom call configuration that sets the stream timeout to zero. + // This should disable the timeout. + CallContextConfigurator configurator = + new CallContextConfigurator() { + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + return context.withStreamWaitTimeoutDuration(Duration.ZERO); + } + }; + Context context = + Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator); + context.run( + () -> { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1)) { + // A zero timeout should not cause a timeout, and instead be ignored. + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + }); + } + + @Test + public void testRetryOnResourceExhausted() { + final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofSeconds(60L)) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(60L)) + .setTotalTimeoutDuration(Duration.ofSeconds(60L)) + .setRpcTimeoutMultiplier(1.0d) + .setInitialRetryDelayDuration(Duration.ZERO) + .setMaxRetryDelayDuration(Duration.ZERO) + .setMaxAttempts(100) + .build(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + com.google.protobuf.Duration.newBuilder() + .setNanos((int) Duration.ofMillis(1).toNanos()) + .build()) + .build(); + Metadata.Key key = + Metadata.Key.of( + retryInfo.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(retryInfo)); + Metadata trailers = new Metadata(); + trailers.put(key, retryInfo); + builder + .getSpannerStubSettingsBuilder() + .executeStreamingSqlSettings() + .setRetryableCodes(StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED) + .setRetrySettings(retrySettings); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + final int expectedRowCount = 5; + RandomResultSetGenerator generator = new RandomResultSetGenerator(expectedRowCount); + Statement statement = Statement.of("select * from random_table"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + for (int errorIndex = 0; errorIndex < expectedRowCount - 1; errorIndex++) { + for (boolean withRetryInfo : new boolean[] {false, true}) { + // RESOURCE_EXHAUSTED errors with and without retry-info should be retried. + StatusRuntimeException exception = + Status.RESOURCE_EXHAUSTED.asRuntimeException(withRetryInfo ? trailers : null); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException(exception, errorIndex)); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + if (errorIndex == 0) { + // We should only have two requests without a resume token, as the error occurred before + // any resume token could be returned. + assertEquals( + 2, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getResumeToken().isEmpty()) + .count()); + } else { + final int expectedResumeToken = errorIndex; + // Check that we have one request with a resume token that corresponds with the place in + // the stream where the error happened. + assertEquals( + 1, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + request + .getResumeToken() + .equals( + ByteString.copyFromUtf8( + String.format("%09d", expectedResumeToken)))) + .count()); + } + mockSpanner.clearRequests(); + } + } + } + } + + @Test + public void testSelectHasXGoogRequestIdHeader() { + Statement statement = + Statement.newBuilder("select id from test where b=@p1") + .bind("p1") + .toBytesArray( + Arrays.asList(ByteArray.copyFrom("test1"), null, ByteArray.copyFrom("test2"))) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, SELECT1_RESULTSET)); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } finally { + // TODO(@odeke-em): Enable in later PR. + // xGoogReqIdInterceptor.assertIntegrity(); + } + } + + static void assertAsString(String expected, ResultSet resultSet, int col) { + assertEquals(expected, resultSet.getValue(col).getAsString()); + assertEquals(ImmutableList.of(expected), resultSet.getValue(col).getAsStringList()); + } + + static void assertAsString(ImmutableList expected, ResultSet resultSet, int col) { + assertEquals(expected, resultSet.getValue(col).getAsStringList()); + assertEquals( + expected.stream().collect(Collectors.joining(",", "[", "]")), + resultSet.getValue(col).getAsString()); + } + + private void consumeResults(ResultSet resultSet) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + + private void consumeBatchWriteStream(ServerStream stream) { + //noinspection StatementWithEmptyBody + for (BatchWriteResponse ignore : stream) {} + } + + private ListValue getRows(Dialect dialect) { + SingerInfo info = SingerInfo.newBuilder().setSingerId(1).build(); + ListValue.Builder valuesBuilder = + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setBoolValue(true).build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("100").build()) + .addValues(com.google.protobuf.Value.newBuilder().setNumberValue(-3.14f).build()) + .addValues(com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("6.626").build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("test-string").build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("{\"key1\": \"value1\"}") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString("test-bytes".getBytes(StandardCharsets.UTF_8))) + .build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2023-01-11").build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("b1153a48-cd31-498e-b770-f554bce48e05") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2023-01-11T11:55:18.123456789Z") + .build()); + if (dialect == Dialect.POSTGRESQL) { + // Add PG_OID value + valuesBuilder.addValues(com.google.protobuf.Value.newBuilder().setStringValue("100").build()); + } + valuesBuilder + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setBoolValue(true).build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder().setBoolValue(false).build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Long.MAX_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Long.MIN_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNumberValue(Float.MAX_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNumberValue(Float.MIN_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("NaN").build()) + .addValues( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14f).build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNumberValue(-12345.6789d) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("6.626").build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("-8.9123") + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("test-string1") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("test-string2") + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("{\"key\": \"value1\"}") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("{\"key\": \"value2\"}") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString( + "test-bytes1".getBytes(StandardCharsets.UTF_8))) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString( + "test-bytes2".getBytes(StandardCharsets.UTF_8))) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2000-02-29") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2000-01-01") + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("b1153a48-cd31-498e-b770-f554bce48e05") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("11546309-8b37-4366-9a20-369381c7803a") + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2023-01-11T11:55:18.123456789Z") + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2023-01-12T11:55:18Z") + .build()) + .build())); + + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + // Proto columns is supported only for GOOGLE_STANDARD_SQL + valuesBuilder + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(Base64.getEncoder().encodeToString(info.toByteArray())) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Genre.JAZZ_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + Base64.getEncoder().encodeToString(info.toByteArray())) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Genre.JAZZ_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())); + } + if (dialect == Dialect.POSTGRESQL) { + // Add ARRAY value + valuesBuilder.addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Long.MAX_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(String.valueOf(Long.MIN_VALUE)) + .build()) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build())); + } + + return valuesBuilder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplWithDefaultRWTransactionOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplWithDefaultRWTransactionOptionsTest.java new file mode 100644 index 000000000000..d37bef01895d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplWithDefaultRWTransactionOptionsTest.java @@ -0,0 +1,620 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.INVALID_SELECT_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_COUNT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_STATEMENT; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.cloud.spanner.SpannerOptions.Builder.DefaultReadWriteTransactionOptions; +import com.google.protobuf.AbstractMessage; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; +import java.util.Collections; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.function.Consumer; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DatabaseClientImplWithDefaultRWTransactionOptionsTest { + private static final TransactionOption SERIALIZABLE_ISOLATION_OPTION = + Options.isolationLevel(IsolationLevel.SERIALIZABLE); + private static final TransactionOption RR_ISOLATION_OPTION = + Options.isolationLevel(IsolationLevel.REPEATABLE_READ); + private static final TransactionOption OPTIMISTIC_READ_LOCK_OPTION = + Options.readLockMode(ReadLockMode.OPTIMISTIC); + private static final TransactionOption PESSIMISTIC_READ_LOCK_OPTION = + Options.readLockMode(ReadLockMode.PESSIMISTIC); + private static final DatabaseId DATABASE_ID = + DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static ExecutorService executor; + private static LocalChannelProvider channelProvider; + private Spanner spanner; + private Spanner spannerWithRR; + private Spanner spannerWithRRPessimistic; + private Spanner spannerWithSerializable; + private Spanner spannerWithSerOptimistic; + private DatabaseClient client; + private DatabaseClient clientWithRepeatableReadOption; + private DatabaseClient clientWithRRPessimisticOption; + private DatabaseClient clientWithSerializableOption; + private DatabaseClient clientWithSerOptimisticOption; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_SELECT_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.read( + "FOO", KeySet.all(), Collections.singletonList("ID"), SELECT1_RESULTSET)); + + String uniqueName = InProcessServerBuilder.generateName(); + executor = Executors.newSingleThreadExecutor(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + spanner = getSpannerOptionsBuilder().build().getService(); + spannerWithRR = getSpannerOptionsBuilder(IsolationLevel.REPEATABLE_READ).build().getService(); + spannerWithRRPessimistic = + getSpannerOptionsBuilder(IsolationLevel.REPEATABLE_READ, ReadLockMode.PESSIMISTIC) + .build() + .getService(); + spannerWithSerializable = + getSpannerOptionsBuilder(IsolationLevel.SERIALIZABLE).build().getService(); + spannerWithSerOptimistic = + getSpannerOptionsBuilder(IsolationLevel.SERIALIZABLE, ReadLockMode.OPTIMISTIC) + .build() + .getService(); + client = spanner.getDatabaseClient(DATABASE_ID); + clientWithRepeatableReadOption = spannerWithRR.getDatabaseClient(DATABASE_ID); + clientWithRRPessimisticOption = spannerWithRRPessimistic.getDatabaseClient(DATABASE_ID); + clientWithSerializableOption = spannerWithSerializable.getDatabaseClient(DATABASE_ID); + clientWithSerOptimisticOption = spannerWithSerOptimistic.getDatabaseClient(DATABASE_ID); + } + + private static SpannerOptions.Builder getSpannerOptionsBuilder() { + return getSpannerOptionsBuilder( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, ReadLockMode.READ_LOCK_MODE_UNSPECIFIED); + } + + private static SpannerOptions.Builder getSpannerOptionsBuilder(IsolationLevel isolationLevel) { + return getSpannerOptionsBuilder(isolationLevel, ReadLockMode.READ_LOCK_MODE_UNSPECIFIED); + } + + private static SpannerOptions.Builder getSpannerOptionsBuilder( + IsolationLevel isolationLevel, ReadLockMode readLockMode) { + SpannerOptions.Builder spannerOptionsBuilder = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + return spannerOptionsBuilder.setDefaultTransactionOptions( + DefaultReadWriteTransactionOptions.newBuilder() + .setIsolationLevel(isolationLevel) + .setReadLockMode(readLockMode) + .build()); + } + + private void executeTest( + Consumer testAction, IsolationLevel expectedIsolationLevel) { + testAction.accept(client); + validateIsolationLevel(expectedIsolationLevel, ReadLockMode.READ_LOCK_MODE_UNSPECIFIED); + } + + private void executeTest( + Consumer testAction, + IsolationLevel expectedIsolationLevel, + ReadLockMode readLockMode) { + testAction.accept(client); + validateIsolationLevel(expectedIsolationLevel, readLockMode); + } + + private void executeTestWithRR( + Consumer testAction, IsolationLevel expectedIsolationLevel) { + testAction.accept(clientWithRepeatableReadOption); + validateIsolationLevel(expectedIsolationLevel, ReadLockMode.READ_LOCK_MODE_UNSPECIFIED); + } + + private void executeTestWithRRPessimistic( + Consumer testAction, + IsolationLevel expectedIsolationLevel, + ReadLockMode expectedReadLockMode) { + testAction.accept(clientWithRRPessimisticOption); + validateIsolationLevel(expectedIsolationLevel, expectedReadLockMode); + } + + private void executeTestWithSerializable( + Consumer testAction, IsolationLevel expectedIsolationLevel) { + testAction.accept(clientWithSerializableOption); + validateIsolationLevel(expectedIsolationLevel, ReadLockMode.READ_LOCK_MODE_UNSPECIFIED); + } + + private void executeTestWithSerializableOptimistic( + Consumer testAction, + IsolationLevel expectedIsolationLevel, + ReadLockMode expectedReadLockMode) { + testAction.accept(clientWithSerOptimisticOption); + validateIsolationLevel(expectedIsolationLevel, expectedReadLockMode); + } + + @After + public void tearDown() { + spanner.close(); + spannerWithRR.close(); + spannerWithRRPessimistic.close(); + spannerWithSerializable.close(); + spannerWithSerOptimistic.close(); + } + + @Test + public void testWrite_WithNoIsolationLevel() { + executeTest( + MockSpannerTestActions::writeInsertMutation, IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED); + } + + @Test + public void testWrite_WithRRSpannerOptions() { + executeTestWithRR(MockSpannerTestActions::writeInsertMutation, IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testWriteWithOptions_WithRRSpannerOptions() { + executeTestWithRR( + c -> + MockSpannerTestActions.writeInsertMutationWithOptions( + c, Options.priority(RpcPriority.HIGH)), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testWriteWithOptions_WithRRPessimisticSpannerOptions() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.writeInsertMutationWithOptions( + c, Options.priority(RpcPriority.HIGH)), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void testWriteWithOptions_WithSerializableTxnOption() { + executeTestWithRR( + c -> + MockSpannerTestActions.writeInsertMutationWithOptions(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void testWriteWithOptions_WithSerializableOptimisticTxnOption() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.writeInsertMutationWithOptions( + c, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testWriteAtLeastOnce_WithSerializableSpannerOptions() { + executeTestWithSerializable( + MockSpannerTestActions::writeAtLeastOnceInsertMutation, IsolationLevel.SERIALIZABLE); + } + + @Test + public void testWriteAtLeastOnceWithOptions_WithRRTxnOption() { + executeTestWithSerializable( + c -> + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testWriteAtLeastOnceWithOptions_WithRRPessimisticTxnOption() { + executeTestWithSerializableOptimistic( + c -> + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + c, RR_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void testWriteAtLeastOnceWithOptions_WithPessimisticTxnOption() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + c, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testReadWriteTxn_WithRRSpannerOption_batchUpdate() { + executeTestWithRR( + MockSpannerTestActions::executeBatchUpdateTransaction, IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testReadWriteTxn_WithSerializableTxnOption_batchUpdate() { + executeTestWithRR( + c -> MockSpannerTestActions.executeBatchUpdateTransaction(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void testReadWriteTxn_WithSerOptimisticTxnOption_batchUpdate() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.executeBatchUpdateTransaction( + c, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testPartitionedDML_WithRRSpannerOption() { + executeTestWithRR( + MockSpannerTestActions::executePartitionedUpdate, + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED); + } + + @Test + public void testCommit_WithSerializableTxnOption() { + executeTest( + c -> MockSpannerTestActions.commitDeleteTransaction(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void testCommit_WithSerializablePessimisticTxnOption() { + executeTest( + c -> + MockSpannerTestActions.commitDeleteTransaction( + c, SERIALIZABLE_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void testCommit_WithSerializableOptimisticTxnOption() { + executeTest( + c -> + MockSpannerTestActions.commitDeleteTransaction( + c, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testTransactionManagerCommit_WithRRTxnOption() { + executeTestWithSerializable( + c -> MockSpannerTestActions.transactionManagerCommit(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testTransactionManagerCommit_WithRRTxnOptionAndSerOptimisticSpannerOptions() { + executeTestWithSerializableOptimistic( + c -> MockSpannerTestActions.transactionManagerCommit(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testAsyncRunnerCommit_WithRRSpannerOption() { + executeTestWithRR( + c -> MockSpannerTestActions.asyncRunnerCommit(c, executor), IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testAsyncRunnerCommit_WithSerOptimisticSpannerOption() { + executeTestWithSerializableOptimistic( + c -> MockSpannerTestActions.asyncRunnerCommit(c, executor), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testAsyncTransactionManagerCommit_WithSerializableTxnOption() { + executeTestWithRR( + c -> + MockSpannerTestActions.transactionManagerAsyncCommit( + c, executor, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void testAsyncTransactionManagerCommit_WithRRPessimisticSpannerOptions() { + executeTestWithRRPessimistic( + c -> MockSpannerTestActions.transactionManagerAsyncCommit(c, executor), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void testAsyncTransactionManagerCommit_WithSerOptimisticTxnOption() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.transactionManagerAsyncCommit( + c, executor, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void testReadWriteTxn_WithNoOptions() { + executeTest(MockSpannerTestActions::executeSelect1, IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED); + } + + @Test + public void executeSqlWithRWTransactionOptions_RepeatableRead() { + executeTest( + c -> MockSpannerTestActions.executeSelect1(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void executeSqlWithRWTransactionOptions_RRPessimistic() { + executeTest( + c -> + MockSpannerTestActions.executeSelect1( + c, RR_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void executeSqlWithRWTransactionOptions_RROptimistic() { + executeTest( + c -> + MockSpannerTestActions.executeSelect1( + c, RR_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void + executeSqlWithDefaultSpannerOptions_SerializableAndRWTransactionOptions_RepeatableRead() { + executeTestWithSerializable( + c -> MockSpannerTestActions.executeSelect1(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void + executeSqlWithDefaultSpannerOptions_RepeatableReadAndRWTransactionOptions_Serializable() { + executeTestWithRR( + c -> MockSpannerTestActions.executeSelect1(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void executeSqlWithDefaultSpannerOptions_RepeatableReadAndNoRWTransactionOptions() { + executeTestWithRR(MockSpannerTestActions::executeSelect1, IsolationLevel.REPEATABLE_READ); + } + + @Test + public void executeSqlWithRWTransactionOptions_Serializable() { + executeTest( + c -> MockSpannerTestActions.executeSelect1(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void executeSqlWithRWTransactionOptions_SerializablePessimistic() { + executeTest( + c -> + MockSpannerTestActions.executeSelect1( + c, SERIALIZABLE_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void executeSqlWithRWTransactionOptions_SerializableOptimistic() { + executeTest( + c -> + MockSpannerTestActions.executeSelect1( + c, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void readWithRWTransactionOptions_RepeatableRead() { + executeTest( + c -> MockSpannerTestActions.executeReadFoo(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void readWithRWTransactionOptions_RepeatableReadPessimistic() { + executeTest( + c -> + MockSpannerTestActions.executeReadFoo( + c, RR_ISOLATION_OPTION, PESSIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void readWithRWTransactionOptions_RepeatableReadOptimistic() { + executeTest( + c -> + MockSpannerTestActions.executeReadFoo( + c, RR_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void readWithRWTransactionOptions_Serializable() { + executeTest( + c -> MockSpannerTestActions.executeReadFoo(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void beginTransactionWithRWTransactionOptions_RepeatableRead() { + executeTest( + c -> MockSpannerTestActions.executeInvalidAndValidSql(c, RR_ISOLATION_OPTION), + IsolationLevel.REPEATABLE_READ); + } + + @Test + public void beginTransactionWithRWTransactionOptions_Serializable() { + executeTest( + c -> MockSpannerTestActions.executeInvalidAndValidSql(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE); + } + + @Test + public void beginTransactionWithRWTransactionOptions_RROptimistic() { + executeTestWithRRPessimistic( + c -> MockSpannerTestActions.executeInvalidAndValidSql(c, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.REPEATABLE_READ, + ReadLockMode.OPTIMISTIC); + } + + @Test + public void beginTransactionWithRWTransactionOptions_SerPessimistic() { + executeTestWithRRPessimistic( + c -> MockSpannerTestActions.executeInvalidAndValidSql(c, SERIALIZABLE_ISOLATION_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.PESSIMISTIC); + } + + @Test + public void beginTransactionWithRWTransactionOptions_SerOptimistic() { + executeTestWithRRPessimistic( + c -> + MockSpannerTestActions.executeInvalidAndValidSql( + c, SERIALIZABLE_ISOLATION_OPTION, OPTIMISTIC_READ_LOCK_OPTION), + IsolationLevel.SERIALIZABLE, + ReadLockMode.OPTIMISTIC); + } + + private void validateIsolationLevel(IsolationLevel isolationLevel, ReadLockMode readLockMode) { + boolean foundMatchingRequest = false; + for (AbstractMessage request : mockSpanner.getRequests()) { + if (request instanceof ExecuteSqlRequest) { + foundMatchingRequest = true; + assertEquals( + isolationLevel, + ((ExecuteSqlRequest) request).getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, + ((ExecuteSqlRequest) request) + .getTransaction() + .getBegin() + .getReadWrite() + .getReadLockMode()); + } else if (request instanceof BeginTransactionRequest) { + foundMatchingRequest = true; + assertEquals( + isolationLevel, ((BeginTransactionRequest) request).getOptions().getIsolationLevel()); + assertEquals( + readLockMode, + ((BeginTransactionRequest) request).getOptions().getReadWrite().getReadLockMode()); + } else if (request instanceof ReadRequest) { + foundMatchingRequest = true; + assertEquals( + isolationLevel, + ((ReadRequest) request).getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, + ((ReadRequest) request).getTransaction().getBegin().getReadWrite().getReadLockMode()); + } else if (request instanceof CommitRequest) { + foundMatchingRequest = true; + assertEquals( + isolationLevel, + ((CommitRequest) request).getSingleUseTransaction().getIsolationLevel()); + assertEquals( + readLockMode, + ((CommitRequest) request).getSingleUseTransaction().getReadWrite().getReadLockMode()); + } else if (request instanceof ExecuteBatchDmlRequest) { + foundMatchingRequest = true; + assertEquals( + isolationLevel, + ((ExecuteBatchDmlRequest) request).getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, + ((ExecuteBatchDmlRequest) request) + .getTransaction() + .getBegin() + .getReadWrite() + .getReadLockMode()); + } + if (foundMatchingRequest) { + break; + } + } + assertTrue("No gRPC call is made", foundMatchingRequest); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseIdTest.java new file mode 100644 index 000000000000..69d3dce0c93c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseIdTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.DatabaseId}. */ +@RunWith(JUnit4.class) +public class DatabaseIdTest { + + @Test + public void basics() { + String name = "projects/test-project/instances/test-instance/databases/database-1"; + DatabaseId db = DatabaseId.of(name); + assertThat(db.getName()).isEqualTo(name); + assertThat(db.getInstanceId().getInstance()).isEqualTo("test-instance"); + assertThat(db.getDatabase()).isEqualTo("database-1"); + assertThat(DatabaseId.of("test-project", "test-instance", "database-1")).isEqualTo(db); + assertThat(DatabaseId.of(name)).isEqualTo(db); + assertThat(DatabaseId.of(name).hashCode()).isEqualTo(db.hashCode()); + assertThat(db.toString()).isEqualTo(name); + } + + @Test + public void badName() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> DatabaseId.of("bad name")); + assertThat(e.getMessage()).contains("projects"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseTest.java new file mode 100644 index 000000000000..dc65f7386199 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseTest.java @@ -0,0 +1,314 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.DatabaseInfo.State.CREATING; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.Role; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseInfo.State; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.common.io.ByteStreams; +import com.google.protobuf.ByteString; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.EncryptionInfo; +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; + +/** Unit tests for {@link com.google.cloud.spanner.Database}. */ +@RunWith(JUnit4.class) +public class DatabaseTest { + private static final String NAME = + "projects/test-project/instances/test-instance/databases/database-1"; + + private static final Timestamp EARLIEST_VERSION_TIME = Timestamp.now(); + private static final String VERSION_RETENTION_PERIOD = "7d"; + private static final String KMS_KEY_NAME = "kms-key-name"; + private static final String KMS_KEY_VERSION = "kms-key-version"; + private static final com.google.spanner.admin.database.v1.EncryptionConfig ENCRYPTION_CONFIG = + com.google.spanner.admin.database.v1.EncryptionConfig.newBuilder() + .setKmsKeyName(KMS_KEY_NAME) + .build(); + private static final List ENCRYPTION_INFOS = + Collections.singletonList( + EncryptionInfo.newBuilder() + .setEncryptionType(EncryptionInfo.Type.CUSTOMER_MANAGED_ENCRYPTION) + .setEncryptionStatus(Status.newBuilder().setCode(Code.OK.getNumber())) + .setKmsKeyVersion(KMS_KEY_VERSION) + .build()); + private static final String DEFAULT_LEADER = "default-leader"; + private static final DatabaseDialect DEFAULT_DIALECT = DatabaseDialect.GOOGLE_STANDARD_SQL; + private static ByteString protoDescriptors; + private static byte[] protoDescriptorsByteArray; + private static final String PROTO_DESCRIPTORS_RESOURCE_PATH = + "com/google/cloud/spanner/descriptors.pb"; + + private static final boolean DROP_PROTECTION_ENABLED = true; + + private static final boolean RECONCILING = true; + + @Mock DatabaseAdminClient dbClient; + + @Before + public void setUp() { + initMocks(this); + when(dbClient.newBackupBuilder(Mockito.any(BackupId.class))) + .thenAnswer( + invocation -> new Backup.Builder(dbClient, (BackupId) invocation.getArguments()[0])); + when(dbClient.newDatabaseBuilder(Mockito.any(DatabaseId.class))) + .thenAnswer( + invocation -> + new Database.Builder(dbClient, (DatabaseId) invocation.getArguments()[0])); + try { + InputStream protoDescriptorsInputStream = + getClass().getClassLoader().getResourceAsStream(PROTO_DESCRIPTORS_RESOURCE_PATH); + assertNotNull(protoDescriptorsInputStream); + protoDescriptorsByteArray = ByteStreams.toByteArray(protoDescriptorsInputStream); + protoDescriptors = ByteString.copyFrom(protoDescriptorsByteArray); + } catch (IOException e) { + e.printStackTrace(); + } + } + + @Test + public void backup() { + Timestamp expireTime = Timestamp.now(); + Database db = createDatabase(); + Backup backup = + dbClient + .newBackupBuilder(BackupId.of("test-project", "test-instance", "test-backup")) + .setExpireTime(expireTime) + .build(); + db.backup(backup); + verify(dbClient).createBackup(backup.toBuilder().setDatabase(db.getId()).build()); + } + + @Test + public void listDatabaseOperations() { + Database db = createDatabase(); + db.listDatabaseOperations(); + verify(dbClient) + .listDatabaseOperations("test-instance", Options.filter("name:databases/database-1")); + } + + @Test + public void testFromProto() { + final Database database = createDatabase(); + assertEquals(NAME, database.getId().getName()); + assertEquals(CREATING, database.getState()); + assertEquals(VERSION_RETENTION_PERIOD, database.getVersionRetentionPeriod()); + assertEquals(EARLIEST_VERSION_TIME, database.getEarliestVersionTime()); + assertEquals( + EncryptionConfigs.customerManagedEncryption(KMS_KEY_NAME), database.getEncryptionConfig()); + assertEquals(DEFAULT_LEADER, database.getDefaultLeader()); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, database.getDialect()); + assertEquals(DROP_PROTECTION_ENABLED, database.isDropProtectionEnabled()); + assertEquals(RECONCILING, database.getReconciling()); + } + + @Test + public void testToProto() { + final com.google.spanner.admin.database.v1.Database database = createDatabase().toProto(); + assertEquals(NAME, database.getName()); + assertEquals(com.google.spanner.admin.database.v1.Database.State.CREATING, database.getState()); + assertEquals(VERSION_RETENTION_PERIOD, database.getVersionRetentionPeriod()); + assertEquals(EARLIEST_VERSION_TIME.toProto(), database.getEarliestVersionTime()); + assertEquals(ENCRYPTION_CONFIG, database.getEncryptionConfig()); + assertEquals(DEFAULT_LEADER, database.getDefaultLeader()); + assertEquals(DEFAULT_DIALECT, database.getDatabaseDialect()); + assertEquals(DROP_PROTECTION_ENABLED, database.getEnableDropProtection()); + assertEquals(RECONCILING, database.getReconciling()); + } + + @Test + public void testUnspecifiedDialectDefaultsToGoogleStandardSqlDialect() { + final Database database = + Database.fromProto( + defaultProtoDatabase().toBuilder() + .setDatabaseDialect(DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED) + .build(), + dbClient); + + assertEquals(Dialect.GOOGLE_STANDARD_SQL, database.getDialect()); + } + + @Test + public void testUnrecognizedDialectThrowsException() { + assertThrows( + IllegalArgumentException.class, + () -> + Database.fromProto( + defaultProtoDatabase().toBuilder() + .setDatabaseDialect(DatabaseDialect.UNRECOGNIZED) + .build(), + dbClient)); + } + + @Test + public void testBuildWithEncryptionConfig() { + Database db = + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setEncryptionConfig( + EncryptionConfigs.customerManagedEncryption( + "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key")) + .build(); + assertThat(db.getEncryptionConfig()).isNotNull(); + assertThat(db.getEncryptionConfig().getKmsKeyName()) + .isEqualTo( + "projects/my-project/locations/some-location/keyRings/my-keyring/cryptoKeys/my-key"); + } + + @Test + public void testBuildWithDefaultLeader() { + Database db = + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setDefaultLeader(DEFAULT_LEADER) + .build(); + + assertEquals(DEFAULT_LEADER, db.getDefaultLeader()); + } + + @Test + public void testBuildWithDatabaseDialect() { + final Database database = + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setDialect(Dialect.GOOGLE_STANDARD_SQL) + .build(); + + assertEquals(Dialect.GOOGLE_STANDARD_SQL, database.getDialect()); + } + + @Test + public void testBuildWithProtoDescriptorsFromInputStream() throws IOException { + InputStream in = + getClass().getClassLoader().getResourceAsStream(PROTO_DESCRIPTORS_RESOURCE_PATH); + assertNotNull(in); + final Database database = + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setProtoDescriptors(in) + .build(); + + assertEquals(protoDescriptors, database.getProtoDescriptors()); + } + + @Test + public void testBuildWithProtoDescriptorsFromByteArray() { + final Database database = + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setProtoDescriptors(protoDescriptorsByteArray) + .build(); + + assertEquals(protoDescriptors, database.getProtoDescriptors()); + } + + @Test + public void testBuildWithProtoDescriptorsThrowsException() throws IOException { + InputStream in = + getClass().getClassLoader().getResourceAsStream(PROTO_DESCRIPTORS_RESOURCE_PATH); + in.close(); + // case1: Test one of the IOException case, where InputStream is closed before read + assertThrows( + IOException.class, + () -> + dbClient + .newDatabaseBuilder(DatabaseId.of("my-project", "my-instance", "my-database")) + .setProtoDescriptors(in) + .build()); + } + + @Test + public void getIAMPolicy() { + Database database = + new Database( + DatabaseId.of("test-project", "test-instance", "test-database"), State.READY, dbClient); + database.getIAMPolicy(1); + verify(dbClient).getDatabaseIAMPolicy("test-instance", "test-database", 1); + } + + @Test + public void setIAMPolicy() { + Database database = + new Database( + DatabaseId.of("test-project", "test-instance", "test-database"), State.READY, dbClient); + Policy policy = + Policy.newBuilder().addIdentity(Role.editor(), Identity.user("joe@example.com")).build(); + database.setIAMPolicy(policy); + verify(dbClient).setDatabaseIAMPolicy("test-instance", "test-database", policy); + } + + @Test + public void testIAMPermissions() { + Database database = + new Database( + DatabaseId.of("test-project", "test-instance", "test-database"), State.READY, dbClient); + Iterable permissions = Collections.singletonList("read"); + database.testIAMPermissions(permissions); + verify(dbClient).testDatabaseIAMPermissions("test-instance", "test-database", permissions); + } + + @Test + public void testEqualsAndHashCode() { + final Database database1 = createDatabase(); + final Database database2 = createDatabase(); + + assertEquals(database1, database2); + assertEquals(database1.hashCode(), database2.hashCode()); + } + + private Database createDatabase() { + return Database.fromProto(defaultProtoDatabase(), dbClient); + } + + private com.google.spanner.admin.database.v1.Database defaultProtoDatabase() { + return com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(NAME) + .setState(com.google.spanner.admin.database.v1.Database.State.CREATING) + .setEarliestVersionTime(EARLIEST_VERSION_TIME.toProto()) + .setVersionRetentionPeriod(VERSION_RETENTION_PERIOD) + .setEncryptionConfig(ENCRYPTION_CONFIG) + .addAllEncryptionInfo(ENCRYPTION_INFOS) + .setDefaultLeader(DEFAULT_LEADER) + .setDatabaseDialect(DEFAULT_DIALECT) + .setEnableDropProtection(DROP_PROTECTION_ENABLED) + .setReconciling(RECONCILING) + .build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DefaultBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DefaultBenchmark.java new file mode 100644 index 000000000000..28580f533658 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DefaultBenchmark.java @@ -0,0 +1,235 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.BenchmarkingUtilityScripts.collectResults; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.common.base.Stopwatch; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +/** + * Benchmarks for measuring existing latencies of various APIs using the Java Client. The benchmarks + * are bound to the Maven profile `benchmark` and can be executed like this: + * mvn clean test -DskipTests -Pbenchmark -Dbenchmark.name=DefaultBenchmark + * Test Table Schema : + * + *

CREATE TABLE FOO ( id INT64 NOT NULL, BAZ INT64, BAR INT64, ) PRIMARY KEY(id); + * + *

Below are a few considerations here: 1. We use all default options for this test because that + * is what most customers would be using. 2. The test schema uses a numeric primary key. To ensure + * that the reads/updates are distributed across a large query space, we insert 10^5 records. + * Utility at {@link BenchmarkingUtilityScripts} can be used for loading data. 3. For queries, we + * make sure that the query is sampled randomly across a large query space. This ensure we don't + * cause hot-spots. 4. For avoid cold start issues, we execute 1 query/update and ignore its latency + * from the final reported metrics. + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(value = 1, warmups = 0) +@Measurement(batchSize = 1, iterations = 1, timeUnit = TimeUnit.MILLISECONDS) +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 0) +public class DefaultBenchmark extends AbstractLatencyBenchmark { + + @State(Scope.Benchmark) + public static class BenchmarkState { + + // TODO(developer): Add your values here for PROJECT_ID, INSTANCE_ID, DATABASE_ID + private static final String INSTANCE_ID = ""; + private static final String DATABASE_ID = ""; + private static final String SERVER_URL = "https://staging-wrenchworks.sandbox.googleapis.com"; + private Spanner spanner; + private DatabaseClientImpl client; + + @Param({"100"}) + int minSessions; + + @Param({"400"}) + int maxSessions; + + @Setup(Level.Iteration) + public void setup() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(minSessions) + .setMaxSessions(maxSessions) + .setWaitForMinSessionsDuration(Duration.ofSeconds(20)) + .build()) + .setHost(SERVER_URL) + .setNumChannels(NUM_GRPC_CHANNELS) + .build(); + spanner = options.getService(); + client = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(options.getProjectId(), INSTANCE_ID, DATABASE_ID)); + } + + @TearDown(Level.Iteration) + public void teardown() throws Exception { + spanner.close(); + } + } + + /** Measures the time needed to execute a burst of queries. */ + @Benchmark + public void burstQueries(final BenchmarkState server) throws Exception { + final DatabaseClientImpl client = server.client; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(PARALLEL_THREADS)); + List>> results = new ArrayList<>(PARALLEL_THREADS); + for (int i = 0; i < PARALLEL_THREADS; i++) { + results.add( + service.submit(() -> runBenchmarksForSingleUseQueries(server, TOTAL_READS_PER_RUN))); + } + collectResultsAndPrint(service, results, TOTAL_READS_PER_RUN); + } + + /** Measures the time needed to execute a burst of read and write requests. */ + @Benchmark + public void burstQueriesAndWrites(final BenchmarkState server) throws Exception { + final DatabaseClientImpl client = server.client; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(PARALLEL_THREADS)); + List>> results = new ArrayList<>(PARALLEL_THREADS); + for (int i = 0; i < PARALLEL_THREADS; i++) { + results.add( + service.submit(() -> runBenchmarksForSingleUseQueries(server, TOTAL_READS_PER_RUN))); + } + for (int i = 0; i < PARALLEL_THREADS; i++) { + results.add(service.submit(() -> runBenchmarkForUpdates(server, TOTAL_WRITES_PER_RUN))); + } + + collectResultsAndPrint(service, results, TOTAL_READS_PER_RUN + TOTAL_WRITES_PER_RUN); + } + + /** Measures the time needed to execute a burst of read and write requests. */ + @Benchmark + public void burstUpdates(final BenchmarkState server) throws Exception { + final DatabaseClientImpl client = server.client; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(PARALLEL_THREADS)); + List>> results = new ArrayList<>(PARALLEL_THREADS); + for (int i = 0; i < PARALLEL_THREADS; i++) { + results.add(service.submit(() -> runBenchmarkForUpdates(server, TOTAL_WRITES_PER_RUN))); + } + + collectResultsAndPrint(service, results, TOTAL_WRITES_PER_RUN); + } + + private List runBenchmarksForSingleUseQueries( + final BenchmarkState server, int numberOfOperations) { + List results = new ArrayList<>(numberOfOperations); + // Execute one query to make sure everything has been warmed up. + executeWarmup(server); + + for (int i = 0; i < numberOfOperations; i++) { + results.add(executeSingleUseQuery(server)); + } + return results; + } + + private void executeWarmup(final BenchmarkState server) { + for (int i = 0; i < WARMUP_REQUEST_COUNT; i++) { + executeSingleUseQuery(server); + } + } + + private Duration executeSingleUseQuery(final BenchmarkState server) { + Stopwatch watch = Stopwatch.createStarted(); + + try (ResultSet rs = server.client.singleUse().executeQuery(getRandomisedReadStatement())) { + while (rs.next()) { + assertEquals(1, rs.getColumnCount()); + assertNotNull(rs.getValue(0)); + } + } + return watch.elapsed(); + } + + private List runBenchmarkForUpdates( + final BenchmarkState server, int numberOfOperations) { + List results = new ArrayList<>(numberOfOperations); + // Execute one query to make sure everything has been warmed up. + executeWarmup(server); + + // Execute one update to make sure everything has been warmed up. + executeUpdate(server); + + for (int i = 0; i < numberOfOperations; i++) { + results.add(executeUpdate(server)); + } + return results; + } + + private Duration executeUpdate(final BenchmarkState server) { + Stopwatch watch = Stopwatch.createStarted(); + + TransactionRunner runner = server.client.readWriteTransaction(); + runner.run(transaction -> transaction.executeUpdate(getRandomisedUpdateStatement())); + + return watch.elapsed(); + } + + static Statement getRandomisedReadStatement() { + int randomKey = ThreadLocalRandom.current().nextInt(TOTAL_RECORDS); + return Statement.newBuilder(SELECT_QUERY).bind(ID_COLUMN_NAME).to(randomKey).build(); + } + + static Statement getRandomisedUpdateStatement() { + int randomKey = ThreadLocalRandom.current().nextInt(TOTAL_RECORDS); + return Statement.newBuilder(UPDATE_QUERY).bind(ID_COLUMN_NAME).to(randomKey).build(); + } + + void collectResultsAndPrint( + ListeningScheduledExecutorService service, + List>> results, + int numOperationsPerThread) + throws Exception { + final List collectResults = + collectResults( + service, results, numOperationsPerThread * PARALLEL_THREADS, Duration.ofMinutes(60)); + printResults(collectResults); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DialectTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DialectTest.java new file mode 100644 index 000000000000..01c0bc38fc9a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DialectTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.spanner.admin.database.v1.DatabaseDialect; +import org.junit.Test; + +public class DialectTest { + + @Test + public void testCreateDatabaseStatementForGoogleStandardSQLDialect() { + assertEquals( + "CREATE DATABASE `my-database`", + Dialect.GOOGLE_STANDARD_SQL.createDatabaseStatementFor("my-database")); + } + + @Test + public void testCreateDatabaseStatementForPostgreSQLDialect() { + assertEquals( + "CREATE DATABASE \"my-database\"", + Dialect.POSTGRESQL.createDatabaseStatementFor("my-database")); + } + + @Test + public void testGoogleStandardSQLDialectToProto() { + assertEquals(DatabaseDialect.GOOGLE_STANDARD_SQL, Dialect.GOOGLE_STANDARD_SQL.toProto()); + } + + @Test + public void testPostgreSQLToProto() { + assertEquals(DatabaseDialect.POSTGRESQL, Dialect.POSTGRESQL.toProto()); + } + + @Test + public void testFromGoogleStandardSQLDialectProto() { + assertEquals( + Dialect.GOOGLE_STANDARD_SQL, Dialect.fromProto(DatabaseDialect.GOOGLE_STANDARD_SQL)); + } + + @Test + public void testFromUnspecifiedDialectProto() { + assertEquals( + Dialect.GOOGLE_STANDARD_SQL, + Dialect.fromProto(DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED)); + } + + @Test + public void testFromPostgreSQLDialectProto() { + assertEquals(Dialect.POSTGRESQL, Dialect.fromProto(DatabaseDialect.POSTGRESQL)); + } + + @Test + public void testFromUnrecognizedDialectProto() { + assertThrows( + IllegalArgumentException.class, () -> Dialect.fromProto(DatabaseDialect.UNRECOGNIZED)); + } + + @Test + public void testFromNullDialectProto() { + assertThrows( + IllegalArgumentException.class, () -> Dialect.fromProto(DatabaseDialect.UNRECOGNIZED)); + } + + @Test + public void testFromGoogleStandardSQLDialectName() { + assertEquals(Dialect.GOOGLE_STANDARD_SQL, Dialect.fromName("GOOGLE_STANDARD_SQL")); + } + + @Test + public void testFromPostgreSQLDialectName() { + assertEquals(Dialect.POSTGRESQL, Dialect.fromName("POSTGRESQL")); + } + + @Test + public void testFromInvalidDialectName() { + assertThrows(IllegalArgumentException.class, () -> Dialect.fromName("INVALID")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExcludeFromChangeStreamTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExcludeFromChangeStreamTest.java new file mode 100644 index 000000000000..498e2fab107a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExcludeFromChangeStreamTest.java @@ -0,0 +1,299 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ReadRequest; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExcludeFromChangeStreamTest extends AbstractMockServerTest { + + @BeforeClass + public static void setupReadResult() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(10); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT my-column FROM my-table WHERE 1=1"), generator.generate())); + } + + private Spanner createSpanner() { + return SpannerOptions.newBuilder() + .setProjectId("fake-project") + .setHost("http://localhost:" + getPort()) + .setCredentials(NoCredentials.getInstance()) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .build() + .getService(); + } + + @Test + public void testStandardTransaction() { + try (Spanner spanner = createSpanner()) { + for (int i = 0; i < 10; i++) { + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + client + .readWriteTransaction(Options.tag("some-tag"), Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + try (ResultSet resultSet = + transaction.read("my-table", KeySet.all(), ImmutableList.of("my-column"))) { + while (resultSet.next()) {} + } + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("my-table") + .set("my-column") + .to(1L) + .build()); + return null; + }); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(ReadRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + ReadRequest readRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(0); + assertTrue(readRequest.hasTransaction()); + assertTrue(readRequest.getTransaction().hasBegin()); + assertTrue(readRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(readRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertNotNull(commitRequest.getTransactionId()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testTransactionAbortedDuringRead() { + try (Spanner spanner = createSpanner()) { + for (int i = 0; i < 10; i++) { + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + AtomicBoolean hasAborted = new AtomicBoolean(false); + client + .readWriteTransaction(Options.tag("some-tag"), Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + if (hasAborted.compareAndSet(false, true)) { + mockSpanner.abortNextStatement(); + } + try (ResultSet resultSet = + transaction.read("my-table", KeySet.all(), ImmutableList.of("my-column"))) { + while (resultSet.next()) {} + } + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("my-table") + .set("my-column") + .to(1L) + .build()); + return null; + }); + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ReadRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasReadWrite()); + assertTrue(beginRequest.getOptions().getExcludeTxnFromChangeStreams()); + + ReadRequest firstReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(0); + assertTrue(firstReadRequest.hasTransaction()); + assertTrue(firstReadRequest.getTransaction().hasBegin()); + assertTrue(firstReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(firstReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + ReadRequest secondReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(1); + assertTrue(secondReadRequest.hasTransaction()); + assertTrue(secondReadRequest.getTransaction().hasId()); + + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertNotNull(commitRequest.getTransactionId()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testTransactionAbortedDuringCommit() { + try (Spanner spanner = createSpanner()) { + for (int i = 0; i < 10; i++) { + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + AtomicBoolean hasAborted = new AtomicBoolean(false); + client + .readWriteTransaction(Options.tag("some-tag"), Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + try (ResultSet resultSet = + transaction.read("my-table", KeySet.all(), ImmutableList.of("my-column"))) { + while (resultSet.next()) {} + } + if (hasAborted.compareAndSet(false, true)) { + mockSpanner.abortNextStatement(); + } + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("my-table") + .set("my-column") + .to(1L) + .build()); + return null; + }); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ReadRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + + ReadRequest firstReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(0); + assertTrue(firstReadRequest.hasTransaction()); + assertTrue(firstReadRequest.getTransaction().hasBegin()); + assertTrue(firstReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(firstReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + ReadRequest secondReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(1); + assertTrue(secondReadRequest.hasTransaction()); + assertTrue(secondReadRequest.getTransaction().hasBegin()); + assertTrue(secondReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(secondReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + for (CommitRequest commitRequest : mockSpanner.getRequestsOfType(CommitRequest.class)) { + assertNotNull(commitRequest.getTransactionId()); + } + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testReadReturnsUnavailable() { + + try (Spanner spanner = createSpanner()) { + for (int i = 0; i < 10; i++) { + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + client + .readWriteTransaction(Options.tag("some-tag"), Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + try (ResultSet resultSet = + transaction.read("my-table", KeySet.all(), ImmutableList.of("my-column"))) { + while (resultSet.next()) {} + } + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("my-table") + .set("my-column") + .to(1L) + .build()); + return null; + }); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ReadRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + ReadRequest firstReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(0); + assertTrue(firstReadRequest.hasTransaction()); + assertTrue(firstReadRequest.getTransaction().hasBegin()); + assertTrue(firstReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(firstReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + ReadRequest secondReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(1); + assertTrue(secondReadRequest.hasTransaction()); + assertTrue(secondReadRequest.getTransaction().hasBegin()); + assertTrue(secondReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(secondReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertNotNull(commitRequest.getTransactionId()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testReadReturnsUnavailableHalfway() { + try (Spanner spanner = createSpanner()) { + for (int i = 0; i < 10; i++) { + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.UNAVAILABLE.asRuntimeException(), 2)); + + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + client + .readWriteTransaction(Options.tag("some-tag"), Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + try (ResultSet resultSet = + transaction.read("my-table", KeySet.all(), ImmutableList.of("my-column"))) { + while (resultSet.next()) {} + } + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("my-table") + .set("my-column") + .to(1L) + .build()); + return null; + }); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ReadRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + ReadRequest firstReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(0); + assertTrue(firstReadRequest.hasTransaction()); + assertTrue(firstReadRequest.getTransaction().hasBegin()); + assertTrue(firstReadRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(firstReadRequest.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + + ReadRequest secondReadRequest = mockSpanner.getRequestsOfType(ReadRequest.class).get(1); + assertTrue(secondReadRequest.hasTransaction()); + assertTrue(secondReadRequest.getTransaction().hasId()); + + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertNotNull(commitRequest.getTransactionId()); + + mockSpanner.clearRequests(); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExperimentalHostMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExperimentalHostMockServerTest.java new file mode 100644 index 000000000000..423c3337ab82 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ExperimentalHostMockServerTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertFalse; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.TypeCode; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExperimentalHostMockServerTest extends AbstractMockServerTest { + + private static final String SQL_QUERY = "SELECT * FROM Singers"; + + private static final ResultSetMetadata SINGERS_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + StructType.Field.newBuilder() + .setName("FirstName") + .setType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING))) + .addFields( + StructType.Field.newBuilder() + .setName("LastName") + .setType( + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING))) + .build()) + .build(); + + private static final com.google.spanner.v1.ResultSet SINGERS_RESULT_SET = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(SINGERS_METADATA) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("Jane")) + .addValues(Value.newBuilder().setStringValue("Doe")) + .build()) + .build(); + + @Test + public void testExperimentalHostPreventsBatchCreateSessions() { + mockSpanner.putStatementResult( + StatementResult.query(Statement.of(SQL_QUERY), SINGERS_RESULT_SET)); + + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("p") + .setCredentials(NoCredentials.getInstance()) + .setExperimentalHost(null) + .setChannelProvider(channelProvider) + .build(); + + try (Spanner spanner = options.getService()) { + DatabaseClient dbClient = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // Perform an operation to trigger session creation + ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of(SQL_QUERY)); + while (resultSet.next()) {} + + assertFalse(mockSpanner.getRequestTypes().contains(BatchCreateSessionsRequest.class)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java new file mode 100644 index 000000000000..14f575ef3d9e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java @@ -0,0 +1,295 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.errorprone.annotations.concurrent.GuardedBy; +import io.opencensus.common.Clock; +import io.opencensus.internal.ZeroTimeClock; +import io.opencensus.trace.Annotation; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.EndSpanOptions; +import io.opencensus.trace.Link; +import io.opencensus.trace.Sampler; +import io.opencensus.trace.Span; +import io.opencensus.trace.Span.Options; +import io.opencensus.trace.SpanBuilder; +import io.opencensus.trace.SpanContext; +import io.opencensus.trace.SpanId; +import io.opencensus.trace.Status; +import io.opencensus.trace.TraceComponent; +import io.opencensus.trace.TraceId; +import io.opencensus.trace.TraceOptions; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracestate; +import io.opencensus.trace.config.TraceConfig; +import io.opencensus.trace.config.TraceParams; +import io.opencensus.trace.export.ExportComponent; +import io.opencensus.trace.export.RunningSpanStore; +import io.opencensus.trace.export.SampledSpanStore; +import io.opencensus.trace.export.SpanExporter; +import io.opencensus.trace.propagation.BinaryFormat; +import io.opencensus.trace.propagation.PropagationComponent; +import io.opencensus.trace.propagation.TextFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import javax.annotation.Nullable; + +/** + * Simple {@link TraceComponent} implementation that will throw an exception if a {@link Span} is + * ended more than once. + */ +public class FailOnOverkillTraceComponentImpl extends TraceComponent { + private static final Random RANDOM = new Random(); + private final Tracer tracer = new TestTracer(); + private final PropagationComponent propagationComponent = new TestPropagationComponent(); + private final Clock clock = ZeroTimeClock.getInstance(); + private final ExportComponent exportComponent = new TestExportComponent(); + private final TraceConfig traceConfig = new TestTraceConfig(); + private static final Map spans = + Collections.synchronizedMap(new LinkedHashMap<>()); + private static final List spanList = Collections.synchronizedList(new LinkedList<>()); + + private static final List annotations = new ArrayList<>(); + + public static class TestSpan extends Span { + @GuardedBy("this") + private volatile boolean ended = false; + + private final String spanName; + + private Status status; + + private final List annotations = Collections.synchronizedList(new ArrayList<>()); + + private TestSpan(String spanName, SpanContext context, EnumSet options) { + super(context, options); + this.spanName = spanName; + spans.put(this.spanName, false); + spanList.add(this); + } + + public String getSpanName() { + return this.spanName; + } + + public List getAnnotations() { + return this.annotations; + } + + @Override + public void addAnnotation(String description, Map attributes) { + FailOnOverkillTraceComponentImpl.annotations.add(description); + this.annotations.add(description); + } + + @Override + public void addAnnotation(Annotation annotation) { + FailOnOverkillTraceComponentImpl.annotations.add(annotation.getDescription()); + this.annotations.add(annotation.getDescription()); + } + + @Override + public void putAttributes(Map attributes) {} + + @Override + public void addAttributes(Map attributes) {} + + @Override + public void addLink(Link link) {} + + @Nullable + public Status getStatus() { + return this.status; + } + + @Override + public void setStatus(Status status) { + this.status = status; + } + + @Override + public void end(EndSpanOptions options) { + synchronized (this) { + if (ended) { + throw new IllegalStateException(this.spanName + " already ended"); + } + if (spans.containsKey(this.spanName)) { + spans.put(this.spanName, true); + ended = true; + } + } + } + } + + public static class TestSpanBuilder extends SpanBuilder { + private String spanName; + + TestSpanBuilder(String spanName) { + this.spanName = spanName; + } + + @Override + public SpanBuilder setSampler(Sampler sampler) { + return this; + } + + @Override + public SpanBuilder setParentLinks(List parentLinks) { + return this; + } + + @Override + public SpanBuilder setRecordEvents(boolean recordEvents) { + return this; + } + + @Override + public Span startSpan() { + return new TestSpan( + this.spanName, + SpanContext.create( + TraceId.generateRandomId(RANDOM), + SpanId.generateRandomId(RANDOM), + TraceOptions.builder().setIsSampled(true).build(), + Tracestate.builder().build()), + EnumSet.of(Options.RECORD_EVENTS)); + } + } + + public static class TestTracer extends Tracer { + @Override + public SpanBuilder spanBuilderWithExplicitParent(String spanName, Span parent) { + return new TestSpanBuilder(spanName); + } + + @Override + public SpanBuilder spanBuilderWithRemoteParent( + String spanName, SpanContext remoteParentSpanContext) { + return new TestSpanBuilder(spanName); + } + } + + public static class TestPropagationComponent extends PropagationComponent { + @Override + public BinaryFormat getBinaryFormat() { + return null; + } + + @Override + public TextFormat getB3Format() { + return null; + } + + @Override + public TextFormat getTraceContextFormat() { + return null; + } + } + + public static class TestSpanExporter extends SpanExporter { + @Override + public void registerHandler(String name, Handler handler) {} + + @Override + public void unregisterHandler(String name) {} + } + + public static class TestExportComponent extends ExportComponent { + private final SpanExporter spanExporter = new TestSpanExporter(); + + @Override + public SpanExporter getSpanExporter() { + return spanExporter; + } + + @Override + public RunningSpanStore getRunningSpanStore() { + return null; + } + + @Override + public SampledSpanStore getSampledSpanStore() { + return null; + } + } + + public static class TestTraceConfig extends TraceConfig { + private volatile TraceParams activeTraceParams = TraceParams.DEFAULT; + + @Override + public TraceParams getActiveTraceParams() { + return activeTraceParams; + } + + @Override + public void updateActiveTraceParams(TraceParams traceParams) { + this.activeTraceParams = traceParams; + } + } + + @Override + public Tracer getTracer() { + return tracer; + } + + Map getSpans() { + return spans; + } + + List getTestSpans() { + return spanList; + } + + List getAnnotations() { + return annotations; + } + + void clearSpans() { + spans.clear(); + spanList.clear(); + } + + void clearAnnotations() { + annotations.clear(); + } + + @Override + public PropagationComponent getPropagationComponent() { + return propagationComponent; + } + + @Override + public Clock getClock() { + return clock; + } + + @Override + public ExportComponent getExportComponent() { + return exportComponent; + } + + @Override + public TraceConfig getTraceConfig() { + return traceConfig; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FakeClock.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FakeClock.java new file mode 100644 index 000000000000..ab2e859adffb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FakeClock.java @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import java.time.Instant; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Class which allows to mock {@link Clock} in unit tests and return custom time values within the + * tests. + */ +class FakeClock extends Clock { + final AtomicLong currentTimeMillis = new AtomicLong(); + + @Override + public Instant instant() { + return Instant.ofEpochMilli(currentTimeMillis.get()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FlakyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FlakyTest.java new file mode 100644 index 000000000000..db0e250376d7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FlakyTest.java @@ -0,0 +1,25 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** + * Annotation for JUnit {@link org.junit.experimental.categories.Category} that indicates a test is + * flaky. These will be excluded from integration tests. Use this annotation sparingly: typically it + * should only be used for a test where the flakiness is dependent on a fix in a module dependency + * (for example, grpc-java) and cannot be addressed locally. + */ +public interface FlakyTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ForceCloseSpannerFunction.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ForceCloseSpannerFunction.java new file mode 100644 index 000000000000..a065a20bb52a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ForceCloseSpannerFunction.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.common.base.Function; +import java.util.concurrent.TimeUnit; + +/** Class for tests that need to be able to force-close a {@link Spanner} instance. */ +public class ForceCloseSpannerFunction implements Function { + private final long timeout; + private final TimeUnit unit; + + public ForceCloseSpannerFunction(long timeout, TimeUnit unit) { + this.timeout = timeout; + this.unit = unit; + } + + public Void apply(Spanner spanner) { + if (spanner instanceof SpannerImpl) { + ((SpannerImpl) spanner).close(timeout, unit); + } else { + spanner.close(); + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GceTestEnvConfig.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GceTestEnvConfig.java new file mode 100644 index 000000000000..c48c5ec2f42f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GceTestEnvConfig.java @@ -0,0 +1,241 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.setExperimentalHostSpannerOptions; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.spanner.spi.v1.SpannerInterceptorProvider; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Grpc; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Random; +import java.util.concurrent.atomic.AtomicBoolean; + +/** Configure TestEnv based on configuration provided. */ +public class GceTestEnvConfig implements TestEnvConfig { + public static final String GCE_PROJECT_ID = "spanner.gce.config.project_id"; + public static final String GCE_SERVER_URL = "spanner.gce.config.server_url"; + public static final String GCE_CREDENTIALS_FILE = "spanner.gce.config.credentials_file"; + public static final String GCE_STREAM_BROKEN_PROBABILITY = + "spanner.gce.config.stream_broken_probability"; + public static final String ENABLE_DIRECT_ACCESS = "spanner.enable_direct_access"; + public static final String DIRECT_PATH_TEST_SCENARIO = "spanner.directpath_test_scenario"; + + // IP address prefixes allocated for DirectPath backends. + public static final String DP_IPV6_PREFIX = "2001:4860:8040"; + public static final String DP_IPV4_PREFIX = "34.126"; + + private static final String DIRECT_PATH_ENDPOINT = "wrenchworks-nonprod.googleapis.com:443"; + + private final SpannerOptions options; + + public GceTestEnvConfig() { + String projectId = System.getProperty(GCE_PROJECT_ID, ""); + String serverUrl = System.getProperty(GCE_SERVER_URL, ""); + String credentialsFile = System.getProperty(GCE_CREDENTIALS_FILE, ""); + double errorProbability = + Double.parseDouble(System.getProperty(GCE_STREAM_BROKEN_PROBABILITY, "0.0")); + checkState(errorProbability <= 1.0); + boolean enableDirectAccess = Boolean.getBoolean(ENABLE_DIRECT_ACCESS); + String directPathTestScenario = System.getProperty(DIRECT_PATH_TEST_SCENARIO, ""); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setAutoThrottleAdministrativeRequests() + .setTrackTransactionStarter(); + if (!projectId.isEmpty()) { + builder.setProjectId(projectId); + } + if (!serverUrl.isEmpty()) { + builder.setHost(serverUrl); + } + if (!credentialsFile.isEmpty()) { + try { + builder.setCredentials( + ServiceAccountCredentials.fromStream(new FileInputStream(credentialsFile))); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + SpannerInterceptorProvider interceptorProvider = + SpannerInterceptorProvider.createDefault().with(new GrpcErrorInjector(errorProbability)); + if (enableDirectAccess) { + interceptorProvider = + interceptorProvider.with(new DirectPathAddressCheckInterceptor(directPathTestScenario)); + } + builder.setInterceptorProvider(interceptorProvider); + // DirectPath tests need to set a custom endpoint to the ChannelProvider + InstantiatingGrpcChannelProvider.Builder customChannelProviderBuilder = + InstantiatingGrpcChannelProvider.newBuilder(); + if (enableDirectAccess) { + customChannelProviderBuilder + .setEndpoint(DIRECT_PATH_ENDPOINT) + .setAttemptDirectPath(true) + .setAttemptDirectPathXds() + .setInterceptorProvider(interceptorProvider); + builder.setChannelProvider(customChannelProviderBuilder.build()); + } + + if (isExperimentalHost()) { + setExperimentalHostSpannerOptions(builder); + } + options = builder.build(); + } + + @Override + public SpannerOptions spannerOptions() { + return options; + } + + @Override + public void setUp() {} + + @Override + public void tearDown() {} + + /** Injects errors in streaming calls to simulate call restarts */ + private static class GrpcErrorInjector implements ClientInterceptor { + + private final double errorProbability; + private final Random random = new Random(); + + GrpcErrorInjector(double errorProbability) { + this.errorProbability = errorProbability; + } + + @Override + public ClientCall interceptCall( + final MethodDescriptor method, CallOptions callOptions, Channel next) { + // Only inject errors in the Cloud Spanner data API. + if (!method.getFullMethodName().startsWith("google.spanner.v1.Spanner")) { + return next.newCall(method, callOptions); + } + + final AtomicBoolean errorInjected = new AtomicBoolean(); + final ClientCall clientCall = next.newCall(method, callOptions); + + return new SimpleForwardingClientCall(clientCall) { + @Override + public void start(Listener responseListener, Metadata headers) { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onMessage(RespT message) { + super.onMessage(message); + if (mayInjectError()) { + // Cancel the call after at least one response has been received. + // This will cause the call to terminate, then we can set UNAVAILABLE + // in the onClose() handler to cause a retry. + errorInjected.set(true); + clientCall.cancel("Cancelling call for injected error", null); + } + } + + @Override + public void onClose(Status status, Metadata metadata) { + if (errorInjected.get()) { + // UNAVAILABLE error will cause the call to retry. + status = Status.UNAVAILABLE.augmentDescription("INJECTED BY TEST"); + } + super.onClose(status, metadata); + } + }, + headers); + } + }; + } + + private boolean mayInjectError() { + return random.nextDouble() < errorProbability; + } + } + + /** + * Captures the request attributes "Grpc.TRANSPORT_ATTR_REMOTE_ADDR" when connection is + * established and verifies if the remote address is a DirectPath address. This is only used for + * DirectPath testing. {@link ClientCall#getAttributes()} + */ + private static class DirectPathAddressCheckInterceptor implements ClientInterceptor { + private final String directPathTestScenario; + + DirectPathAddressCheckInterceptor(String directPathTestScenario) { + this.directPathTestScenario = directPathTestScenario; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + final ClientCall clientCall = next.newCall(method, callOptions); + return new SimpleForwardingClientCall(clientCall) { + @Override + public void start(Listener responseListener, Metadata headers) { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata headers) { + // Check peer IP after connection is established. + SocketAddress remoteAddr = + clientCall.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR); + if (!verifyRemoteAddress(remoteAddr)) { + throw new RuntimeException( + String.format( + "Synthetically aborting the current request because it did not adhere" + + " to the test environment's requirement for DirectPath." + + " Expected test for DirectPath %s scenario," + + " but RPC was destined for %s", + directPathTestScenario, remoteAddr.toString())); + } + super.onHeaders(headers); + } + }, + headers); + } + }; + } + + private boolean verifyRemoteAddress(SocketAddress remoteAddr) { + if (remoteAddr instanceof InetSocketAddress) { + InetAddress inetAddress = ((InetSocketAddress) remoteAddr).getAddress(); + String addr = inetAddress.getHostAddress(); + if (directPathTestScenario.equals("ipv4")) { + // For ipv4-only VM, client should connect to ipv4 DirectPath addresses. + return addr.startsWith(DP_IPV4_PREFIX); + } else if (directPathTestScenario.equals("ipv6")) { + // For ipv6-enabled VM, client could connect to either ipv4 or ipv6 DirectPath addresses. + return addr.startsWith(DP_IPV6_PREFIX) || addr.startsWith(DP_IPV4_PREFIX); + } + } + // For all other scenarios(e.g. fallback), we should allow both DirectPath and CFE addresses. + return true; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java new file mode 100644 index 000000000000..4007c972c24e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java @@ -0,0 +1,1257 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserialize; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.base.Function; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.QueryPlan; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.Transaction; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import javax.annotation.Nullable; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link GrpcResultSet}. */ +@RunWith(JUnit4.class) +public class GrpcResultSetTest { + + private GrpcResultSet resultSet; + private SpannerRpc.ResultStreamConsumer consumer; + private GrpcStreamIterator stream; + private final Duration streamWaitTimeout = Duration.ofNanos(1L); + + private static class NoOpListener implements AbstractResultSet.Listener { + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) + throws SpannerException {} + + @Override + public SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean lastStatement) { + return e; + } + + @Override + public void onDone(boolean withBeginTransaction) {} + + @Override + public void onPrecommitToken(MultiplexedSessionPrecommitToken token) {} + } + + @Before + public void setUp() { + stream = + new GrpcStreamIterator( + /* lastStatement= */ false, 10, /* cancelQueryWhenClientIsClosed= */ false); + stream.setCall( + new SpannerRpc.StreamingCall() { + @Override + public ApiCallContext getCallContext() { + return GrpcCallContext.createDefault().withStreamWaitTimeoutDuration(streamWaitTimeout); + } + + @Override + public void cancel(@Nullable String message) {} + + @Override + public void request(int numMessages) {} + }, + false); + consumer = stream.consumer(); + resultSet = new GrpcResultSet(stream, new NoOpListener()); + } + + public GrpcResultSet resultSetWithMode(QueryMode queryMode) { + return new GrpcResultSet(stream, new NoOpListener()); + } + + @Test + public void testStreamTimeout() { + // We don't add any results to the stream. That means that it will time out after 1ns. + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + assertTrue(exception.getMessage(), exception.getMessage().contains("stream wait timeout")); + } + + @Test + public void metadata() { + Type rowType = Type.struct(Type.StructField.of("f", Type.string())); + ResultSetMetadata.Builder metadataBuilder = ResultSetMetadata.newBuilder(); + metadataBuilder + .setRowType(rowType.toProto().getStructType()) + .getTransactionBuilder() + .setId(ByteString.copyFromUtf8("t1")); + PartialResultSet partialResultSet = + PartialResultSet.newBuilder().setMetadata(metadataBuilder.build()).build(); + + consumer.onPartialResultSet(partialResultSet); + consumer.onCompleted(); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getType()).isEqualTo(rowType); + } + + @Test + public void metadataFailure() { + SpannerException t = + SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, "outatime"); + consumer.onError(t); + SpannerException e = assertThrows(SpannerException.class, () -> resultSet.next()); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + assertThat(e.getMessage()).contains("outatime"); + } + + @Test + public void noMetadata() { + consumer.onCompleted(); + SpannerException e = assertThrows(SpannerException.class, () -> resultSet.next()); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + + @Test + public void empty() { + Type type = Type.struct(Type.StructField.of("f", Type.string())); + PartialResultSet partialResultSet = + PartialResultSet.newBuilder().setMetadata(makeMetadata(type)).build(); + consumer.onPartialResultSet(partialResultSet); + consumer.onCompleted(); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getType()).isEqualTo(type); + } + + @Test + public void emptyMultipleResponses() { + PartialResultSet partialResultSet = + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.string())))) + .build(); + consumer.onPartialResultSet(partialResultSet); + consumer.onPartialResultSet(PartialResultSet.getDefaultInstance()); + consumer.onPartialResultSet(PartialResultSet.getDefaultInstance()); + consumer.onCompleted(); + assertThat(resultSet.next()).isFalse(); + } + + private List consumeAllString() { + List results = new ArrayList<>(); + while (resultSet.next()) { + results.add(resultSet.getString(0)); + } + return results; + } + + @Test + public void singleResponse() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.string())))) + .addValues(Value.string("a").toProto()) + .addValues(Value.string("b").toProto()) + .addValues(Value.string("c").toProto()) + .build()); + consumer.onCompleted(); + assertThat(consumeAllString()).containsExactly("a", "b", "c").inOrder(); + } + + @Test + public void multiResponse() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.string())))) + .addValues(Value.string("a").toProto()) + .addValues(Value.string("b").toProto()) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(Value.string("c").toProto()) + .addValues(Value.string("d").toProto()) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(Value.string("e").toProto()) + .addValues(Value.string("f").toProto()) + .build()); + consumer.onCompleted(); + assertThat(consumeAllString()).containsExactly("a", "b", "c", "d", "e", "f").inOrder(); + } + + @Test + public void multiResponseChunkingStreamClosed() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.string())))) + .addValues(Value.string("abcdefg").toProto()) + .setChunkedValue(true) + .build()); + consumer.onCompleted(); + SpannerException e = assertThrows(SpannerException.class, () -> resultSet.next()); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + + @Test + public void multiResponseChunkingStrings() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.string())))) + .addValues(Value.string("before").toProto()) + .addValues(Value.string("abcdefg").toProto()) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(Value.string("hijklmnop").toProto()) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(Value.string("qrstuvwxyz").toProto()) + .addValues(Value.string("after").toProto()) + .setChunkedValue(false) + .build()); + consumer.onCompleted(); + assertThat(consumeAllString()) + .containsExactly("before", "abcdefghijklmnopqrstuvwxyz", "after") + .inOrder(); + } + + @Test + public void multiResponseChunkingBytes() { + ByteArray expectedBytes = ByteArray.copyFrom("abcdefghijklmnopqrstuvwxyz"); + String base64 = expectedBytes.toBase64(); + String chunk1 = base64.substring(0, 10); + String chunk2 = base64.substring(10, 20); + String chunk3 = base64.substring(20); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.bytes())))) + .addValues(Value.bytes(ByteArray.copyFrom("before")).toProto()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue(chunk1)) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue(chunk2)) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue(chunk3)) + .addValues(Value.bytes(ByteArray.copyFrom("after")).toProto()) + .setChunkedValue(false) + .build()); + consumer.onCompleted(); + List results = new ArrayList<>(); + while (resultSet.next()) { + results.add(resultSet.getBytes(0)); + } + assertThat(results) + .containsExactly(ByteArray.copyFrom("before"), expectedBytes, ByteArray.copyFrom("after")) + .inOrder(); + } + + @Test + public void multiResponseChunkingBoolArray() { + List beforeValue = Collections.singletonList(true); + List chunkedValue = Arrays.asList(false, null, true, true, true, null, null, false); + List afterValue = Collections.singletonList(true); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + Type.bool(), + input -> Value.boolArray(input).toProto(), + input -> input.getBooleanList(0)); + } + + @Test + public void multiResponseChunkingInt64Array() { + List beforeValue = Collections.singletonList(10L); + List chunkedValue = Arrays.asList(1L, 2L, null, null, 5L, null, 7L, 8L); + List afterValue = Collections.singletonList(20L); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + Type.int64(), + input -> Value.int64Array(input).toProto(), + input -> input.getLongList(0)); + } + + @Test + public void multiResponseChunkingFloat64Array() { + List beforeValue = Collections.singletonList(10.0); + List chunkedValue = Arrays.asList(null, 2.0, 3.0, 4.0, null, 6.0, 7.0, null); + List afterValue = Collections.singletonList(20.0); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + Type.float64(), + input -> Value.float64Array(input).toProto(), + input -> input.getDoubleList(0)); + } + + @Test + public void multiResponseChunkingStringArray() { + List beforeValue = Collections.singletonList("before"); + List chunkedValue = Arrays.asList("a", "b", null, "d", null, "f", null, "h"); + List afterValue = Collections.singletonList("after"); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + Type.string(), + input -> Value.stringArray(input).toProto(), + input -> input.getStringList(0)); + } + + private static ByteArray b(String data) { + return ByteArray.copyFrom(data); + } + + @Test + public void multiResponseChunkingBytesArray() { + List beforeValue = Collections.singletonList(b("before")); + List chunkedValue = + Arrays.asList(b("a"), b("b"), null, b("d"), null, b("f"), null, b("h")); + List afterValue = Collections.singletonList(b("after")); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + Type.bytes(), + input -> Value.bytesArray(input).toProto(), + input -> input.getBytesList(0)); + } + + private static Struct s(String a, long b) { + return Struct.newBuilder().set("a").to(a).set("b").to(b).build(); + } + + @Test + public void multiResponseChunkingStructArray() { + final Type elementType = + Type.struct( + Type.StructField.of("a", Type.string()), Type.StructField.of("b", Type.int64())); + List beforeValue = Collections.singletonList(s("before", 10)); + List chunkedValue = + Arrays.asList( + s("a", 1), s("b", 2), s("c", 3), null, s(null, 5), null, s("g", 7), s("h", 8)); + List afterValue = Collections.singletonList(s("after", 20)); + doArrayTest( + beforeValue, + chunkedValue, + afterValue, + elementType, + input -> Value.structArray(elementType, input).toProto(), + input -> input.getStructList(0)); + } + + @Test + public void profileResultInFinalResultSet() { + Map statsMap = + ImmutableMap.of( + "f1", Value.string("").toProto(), + "f2", Value.string("").toProto()); + ResultSetStats stats = + ResultSetStats.newBuilder() + .setQueryPlan(QueryPlan.newBuilder().build()) + .setQueryStats(com.google.protobuf.Struct.newBuilder().putAllFields(statsMap).build()) + .build(); + ArrayList dataType = new ArrayList<>(); + dataType.add(Type.StructField.of("data", Type.string())); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(dataType))) + .addValues(Value.string("d1").toProto()) + .setChunkedValue(false) + .setStats(stats) + .build()); + resultSet = resultSetWithMode(QueryMode.PROFILE); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.next()).isFalse(); + ResultSetStats receivedStats = resultSet.getStats(); + assertThat(stats).isEqualTo(receivedStats); + resultSet.close(); + } + + @Test + public void profileResultInExtraFinalResultSet() { + Map statsMap = + ImmutableMap.of( + "f1", Value.string("").toProto(), + "f2", Value.string("").toProto()); + ResultSetStats stats = + ResultSetStats.newBuilder() + .setQueryPlan(QueryPlan.newBuilder().build()) + .setQueryStats(com.google.protobuf.Struct.newBuilder().putAllFields(statsMap).build()) + .build(); + ArrayList dataType = new ArrayList<>(); + dataType.add(Type.StructField.of("data", Type.string())); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(dataType))) + .addValues(Value.string("d1").toProto()) + .setChunkedValue(false) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(dataType))) + .setChunkedValue(false) + .setStats(stats) + .build()); + resultSet = resultSetWithMode(QueryMode.PROFILE); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.next()).isFalse(); + ResultSetStats receivedStats = resultSet.getStats(); + assertThat(stats).isEqualTo(receivedStats); + resultSet.close(); + } + + @Test + public void planResult() { + ResultSetStats stats = + ResultSetStats.newBuilder().setQueryPlan(QueryPlan.newBuilder().build()).build(); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(new ArrayList<>()))) + .setChunkedValue(false) + .setStats(stats) + .build()); + resultSet = resultSetWithMode(QueryMode.PLAN); + consumer.onCompleted(); + assertThat(resultSet.next()).isFalse(); + ResultSetStats receivedStats = resultSet.getStats(); + assertThat(stats).isEqualTo(receivedStats); + resultSet.close(); + } + + @Test + public void statsUnavailable() { + ResultSetStats stats = ResultSetStats.newBuilder().build(); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(new ArrayList<>()))) + .setChunkedValue(false) + .setStats(stats) + .build()); + resultSet = resultSetWithMode(QueryMode.PROFILE); + consumer.onCompleted(); + assertThat(resultSet.getStats()).isNull(); + } + + private void doArrayTest( + List beforeValue, + List chunkedValue, + List afterValue, + Type elementType, + Function, com.google.protobuf.Value> toProto, + Function> getter) { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(elementType))))) + .addValues(toProto.apply(beforeValue)) + .addValues(toProto.apply(chunkedValue.subList(0, 3))) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(toProto.apply(chunkedValue.subList(3, 5))) + .setChunkedValue(true) + .build()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .addValues(toProto.apply(chunkedValue.subList(5, chunkedValue.size()))) + .addValues(toProto.apply(afterValue)) + .setChunkedValue(false) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(getter.apply(resultSet)).containsExactlyElementsIn(beforeValue).inOrder(); + assertThat(resultSet.next()).isTrue(); + assertThat(getter.apply(resultSet)).containsExactlyElementsIn(chunkedValue).inOrder(); + assertThat(resultSet.next()).isTrue(); + assertThat(getter.apply(resultSet)).containsExactlyElementsIn(afterValue).inOrder(); + assertThat(resultSet.next()).isFalse(); + } + + private static ResultSetMetadata makeMetadata(Type rowType) { + com.google.spanner.v1.Type typeProto = rowType.toProto(); + return ResultSetMetadata.newBuilder().setRowType(typeProto.getStructType()).build(); + } + + @Test + public void serialization() { + Type structType = + Type.struct( + Arrays.asList( + Type.StructField.of("a", Type.string()), Type.StructField.of("b", Type.int64()))); + + verifySerialization( + Value.string("a"), + Value.string(null), + Value.bool(true), + Value.bool(null), + Value.int64(1), + Value.int64(null), + Value.float64(1.0), + Value.float64(null), + Value.float32(1.0f), + Value.float32(null), + Value.bytes(ByteArray.fromBase64("abcd")), + Value.bytesFromBase64( + Base64.getEncoder().encodeToString("test".getBytes(StandardCharsets.UTF_8))), + Value.bytes(null), + Value.bytesFromBase64(null), + Value.timestamp(Timestamp.ofTimeSecondsAndNanos(1, 2)), + Value.timestamp(null), + Value.date(Date.fromYearMonthDay(2017, 4, 17)), + Value.date(null), + Value.uuid(UUID.randomUUID()), + Value.uuid(null), + Value.interval( + Interval.builder() + .setMonths(100) + .setDays(10) + .setNanos(BigInteger.valueOf(1000010)) + .build()), + Value.interval(null), + Value.stringArray(ImmutableList.of("one", "two")), + Value.stringArray(null), + Value.boolArray(new boolean[] {true, false}), + Value.boolArray((boolean[]) null), + Value.int64Array(new long[] {1, 2, 3}), + Value.int64Array((long[]) null), + Value.float64Array(new double[] {1.1, 2.2, 3.3}), + Value.float64Array((double[]) null), + Value.float32Array(new float[] {1.1f, 2.2f, 3.3f}), + Value.float32Array((float[]) null), + Value.bytesArray(Arrays.asList(ByteArray.fromBase64("abcd"), null)), + Value.bytesArrayFromBase64( + Arrays.asList( + Base64.getEncoder().encodeToString("test".getBytes(StandardCharsets.UTF_8)), null)), + Value.bytesArray(null), + Value.bytesArrayFromBase64(null), + Value.timestampArray(ImmutableList.of(Timestamp.MAX_VALUE, Timestamp.MAX_VALUE)), + Value.timestampArray(null), + Value.dateArray( + ImmutableList.of( + Date.fromYearMonthDay(2017, 4, 17), Date.fromYearMonthDay(2017, 5, 18))), + Value.dateArray(null), + Value.uuidArray(ImmutableList.of(UUID.randomUUID(), UUID.randomUUID())), + Value.uuidArray(null), + Value.intervalArray( + ImmutableList.of( + Interval.parseFromString("P0Y"), + Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30000L)))), + Value.intervalArray(null), + Value.struct(s(null, 30)), + Value.struct(structType, null), + Value.structArray(structType, Arrays.asList(s("def", 10), null)), + Value.structArray(structType, Collections.singletonList(null)), + Value.structArray(structType, null)); + } + + @Test + public void nestedStructSerialization() { + Type structType = + Type.struct( + Arrays.asList( + Type.StructField.of("a", Type.string()), Type.StructField.of("b", Type.int64()))); + + Struct nestedStruct = s("1", 2L); + Value struct = Value.structArray(structType, Collections.singletonList(nestedStruct)); + verifySerialization( + new Function() { + + @Override + @Nullable + public com.google.protobuf.Value apply(@Nullable Value input) { + return input.toProto(); + } + }, + struct); + } + + private void verifySerialization(Value... values) { + verifySerialization( + new Function() { + + @Override + @Nullable + public com.google.protobuf.Value apply(@Nullable Value input) { + return input.toProto(); + } + }, + values); + } + + private void verifySerialization( + Function protoFn, Value... values) { + resultSet = new GrpcResultSet(stream, new NoOpListener()); + PartialResultSet.Builder builder = PartialResultSet.newBuilder(); + List types = new ArrayList<>(); + for (Value value : values) { + types.add(Type.StructField.of("f", value.getType())); + builder.addValues(protoFn.apply(value)); + } + consumer.onPartialResultSet(builder.setMetadata(makeMetadata(Type.struct(types))).build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + Struct row = resultSet.getCurrentRowAsStruct(); + Struct copy = reserialize(row); + assertThat(row).isEqualTo(copy); + } + + @Test + public void getBoolean() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.bool())))) + .addValues(Value.bool(true).toProto()) + .addValues(Value.bool(false).toProto()) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBoolean(0)).isTrue(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBoolean(0)).isFalse(); + } + + @Test + public void getDouble() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.float64())))) + .addValues(Value.float64(Double.MIN_VALUE).toProto()) + .addValues(Value.float64(Double.MAX_VALUE).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getDouble(0)).isWithin(0.0).of(Double.MIN_VALUE); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getDouble(0)).isWithin(0.0).of(Double.MAX_VALUE); + } + + @Test + public void getFloat() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.float32())))) + .addValues(Value.float32(Float.MIN_VALUE).toProto()) + .addValues(Value.float32(Float.MAX_VALUE).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getFloat(0)).isWithin(0.0f).of(Float.MIN_VALUE); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getFloat(0)).isWithin(0.0f).of(Float.MAX_VALUE); + } + + @Test + public void getBigDecimal() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.numeric())))) + .addValues( + Value.numeric( + new BigDecimal( + "-" + Strings.repeat("9", 29) + "." + Strings.repeat("9", 9))) + .toProto()) + .addValues( + Value.numeric( + new BigDecimal(Strings.repeat("9", 29) + "." + Strings.repeat("9", 9))) + .toProto()) + .addValues(Value.numeric(BigDecimal.ZERO).toProto()) + .addValues(Value.numeric(new BigDecimal("1.23456")).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBigDecimal(0).toPlainString()) + .isEqualTo("-99999999999999999999999999999.999999999"); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBigDecimal(0).toPlainString()) + .isEqualTo("99999999999999999999999999999.999999999"); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBigDecimal(0)).isEqualTo(BigDecimal.ZERO); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBigDecimal(0)).isEqualTo(BigDecimal.valueOf(123456, 5)); + } + + @Test + public void getLong() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.int64())))) + .addValues(Value.int64(Long.MIN_VALUE).toProto()) + .addValues(Value.int64(Long.MAX_VALUE).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLong(0)).isEqualTo(Long.MIN_VALUE); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLong(0)).isEqualTo(Long.MAX_VALUE); + } + + @Test + public void getDate() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.date())))) + .addValues(Value.date(Date.fromYearMonthDay(2018, 5, 29)).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getDate(0)).isEqualTo(Date.fromYearMonthDay(2018, 5, 29)); + } + + @Test + public void getUuid() { + final UUID uuid = UUID.randomUUID(); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.uuid())))) + .addValues(Value.uuid(uuid).toProto()) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getUuid(0)).isEqualTo(uuid); + } + + @Test + public void getInterval() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.interval())))) + .addValues( + Value.interval(Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(12345678))) + .toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getInterval(0)) + .isEqualTo(Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(12345678))); + } + + @Test + public void getTimestamp() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.timestamp())))) + .addValues(Value.timestamp(Timestamp.parseTimestamp("0001-01-01T00:00:00Z")).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getTimestamp(0)) + .isEqualTo(Timestamp.parseTimestamp("0001-01-01T00:00:00Z")); + } + + @Test + public void getJson() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.json())))) + .addValues(Value.json("{\"color\":\"red\",\"value\":\"#f00\"}").toProto()) + .addValues(Value.json("{}").toProto()) + .addValues(Value.json("[]").toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals("{\"color\":\"red\",\"value\":\"#f00\"}", resultSet.getJson(0)); + assertTrue(resultSet.next()); + assertEquals("{}", resultSet.getJson(0)); + assertTrue(resultSet.next()); + assertEquals("[]", resultSet.getJson(0)); + } + + @Test + public void getPgJsonb() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.pgJsonb())))) + .addValues(Value.pgJsonb("{\"color\":\"red\",\"value\":\"#f00\"}").toProto()) + .addValues(Value.pgJsonb("{}").toProto()) + .addValues(Value.pgJsonb("[]").toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals("{\"color\":\"red\",\"value\":\"#f00\"}", resultSet.getPgJsonb(0)); + assertTrue(resultSet.next()); + assertEquals("{}", resultSet.getPgJsonb(0)); + assertTrue(resultSet.next()); + assertEquals("[]", resultSet.getPgJsonb(0)); + } + + @Test + public void getPgOid() { + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata(makeMetadata(Type.struct(Type.StructField.of("f", Type.pgOid())))) + .addValues(Value.pgOid(Long.MIN_VALUE).toProto()) + .addValues(Value.pgOid(Long.MAX_VALUE).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLong(0)).isEqualTo(Long.MIN_VALUE); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLong(0)).isEqualTo(Long.MAX_VALUE); + } + + @Test + public void getProtoMessage() { + SingerInfo singerInfo1 = + SingerInfo.newBuilder() + .setSingerId(111) + .setNationality("COUNTRY1") + .setGenre(Genre.FOLK) + .build(); + SingerInfo singerInfo2 = SingerInfo.newBuilder().setSingerId(222).setGenre(Genre.JAZZ).build(); + String singerInfoFullName = SingerInfo.getDescriptor().getFullName(); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.proto(singerInfoFullName))))) + .addValues(Value.protoMessage(singerInfo1).toProto()) + .addValues( + Value.protoMessage( + ByteArray.copyFrom(singerInfo2.toByteArray()), singerInfoFullName) + .toProto()) + .addValues(Value.protoMessage(null, SingerInfo.getDescriptor().getFullName()).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals(singerInfo1, resultSet.getProtoMessage(0, SingerInfo.getDefaultInstance())); + assertTrue(resultSet.next()); + assertEquals(singerInfo2, resultSet.getProtoMessage(0, SingerInfo.getDefaultInstance())); + assertTrue(resultSet.next()); + assertThrows( + NullPointerException.class, + () -> { + resultSet.getProtoMessage(0, SingerInfo.getDefaultInstance()); + }); + } + + @Test + public void getProtoEnum() { + String genreFullyQualifiedName = Genre.getDescriptor().getFullName(); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata( + Type.struct(Type.StructField.of("f", Type.protoEnum(genreFullyQualifiedName))))) + .addValues(Value.protoEnum(Genre.FOLK).toProto()) + .addValues(Value.protoEnum(Genre.JAZZ.getNumber(), genreFullyQualifiedName).toProto()) + .addValues(Value.protoEnum(null, genreFullyQualifiedName).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals(Genre.FOLK, resultSet.getProtoEnum(0, Genre::forNumber)); + assertTrue(resultSet.next()); + assertEquals(Genre.JAZZ, resultSet.getProtoEnum(0, Genre::forNumber)); + assertTrue(resultSet.next()); + assertThrows( + NullPointerException.class, + () -> { + resultSet.getProtoEnum(0, Genre::forNumber); + }); + } + + @Test + public void getBooleanArray() { + boolean[] boolArray = {true, true, false}; + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.bool()))))) + .addValues(Value.boolArray(boolArray).toProto()) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBooleanArray(0)).isEqualTo(boolArray); + } + + @Test + public void getLongArray() { + long[] longArray = {111, 333, 444, 0, -1, -2234, Long.MAX_VALUE, Long.MIN_VALUE}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.int64()))))) + .addValues(Value.int64Array(longArray).toProto()) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLongArray(0)).isEqualTo(longArray); + } + + @Test + public void getDoubleArray() { + double[] doubleArray = {Double.MAX_VALUE, Double.MIN_VALUE, 111, 333, 444, 0, -1, -2234}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.float64()))))) + .addValues(Value.float64Array(doubleArray).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getDoubleArray(0)) + .usingTolerance(0.0) + .containsExactly(doubleArray) + .inOrder(); + } + + @Test + public void getFloatArray() { + float[] floatArray = {Float.MAX_VALUE, Float.MIN_VALUE, 111, 333, 444, 0, -1, -2234}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.float32()))))) + .addValues(Value.float32Array(floatArray).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getFloatArray(0)) + .usingTolerance(0.0) + .containsExactly(floatArray) + .inOrder(); + } + + @Test + public void getBigDecimalList() { + List bigDecimalsList = new ArrayList<>(); + bigDecimalsList.add(BigDecimal.valueOf(Double.MIN_VALUE)); + bigDecimalsList.add(BigDecimal.valueOf(Double.MAX_VALUE)); + bigDecimalsList.add(BigDecimal.ZERO); + bigDecimalsList.add(new BigDecimal("1.23456")); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.numeric()))))) + .addValues(Value.numericArray(bigDecimalsList).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getBigDecimalList(0)).isEqualTo(bigDecimalsList); + } + + @Test + public void getTimestampList() { + List timestampList = new ArrayList<>(); + timestampList.add(Timestamp.parseTimestamp("0001-01-01T00:00:00Z")); + timestampList.add(Timestamp.parseTimestamp("0002-02-02T02:00:00Z")); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.timestamp()))))) + .addValues(Value.timestampArray(timestampList).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getTimestampList(0)).isEqualTo(timestampList); + } + + @Test + public void getDateList() { + List dateList = new ArrayList<>(); + dateList.add(Date.fromYearMonthDay(1999, 8, 23)); + dateList.add(Date.fromYearMonthDay(1986, 3, 17)); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.date()))))) + .addValues(Value.dateArray(dateList).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getDateList(0)).isEqualTo(dateList); + } + + @Test + public void getUuidList() { + List uuidList = Arrays.asList(UUID.randomUUID(), UUID.randomUUID()); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.uuid()))))) + .addValues(Value.uuidArray(uuidList).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getUuidList(0)).isEqualTo(uuidList); + } + + @Test + public void getIntervalList() { + List intervalList = new ArrayList<>(); + intervalList.add(Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(100))); + intervalList.add(Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(134520))); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.interval()))))) + .addValues(Value.intervalArray(intervalList).toProto()) + .build()); + consumer.onCompleted(); + + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getIntervalList(0)).isEqualTo(intervalList); + } + + @Test + public void getJsonList() { + List jsonList = new ArrayList<>(); + jsonList.add("{\"color\":\"red\",\"value\":\"#f00\"}"); + jsonList.add("{\"special\":\"%😃∮πρότερονแผ่นดินฮั่นเสื่อมሰማይᚻᛖ\"}"); + jsonList.add("[]"); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.json()))))) + .addValues(Value.jsonArray(jsonList).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals(jsonList, resultSet.getJsonList(0)); + } + + @Test + public void getPgJsonbList() { + List jsonList = new ArrayList<>(); + jsonList.add("{\"color\":\"red\",\"value\":\"#f00\"}"); + jsonList.add("{\"special\":\"%😃∮πρότερονแผ่นดินฮั่นเสื่อมሰማይᚻᛖ\"}"); + jsonList.add("[]"); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.pgJsonb()))))) + .addValues(Value.pgJsonbArray(jsonList).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals(jsonList, resultSet.getPgJsonbList(0)); + } + + @Test + public void getPgOidArray() { + long[] longArray = {111, 333, 444, 0, -1, -2234, Long.MAX_VALUE, Long.MIN_VALUE}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.pgOid()))))) + .addValues(Value.pgOidArray(longArray).toProto()) + .build()); + consumer.onCompleted(); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLongArray(0)).isEqualTo(longArray); + } + + @Test + public void getProtoMessageList() { + SingerInfo singerInfo1 = + SingerInfo.newBuilder() + .setSingerId(111) + .setNationality("COUNTRY1") + .setGenre(Genre.FOLK) + .build(); + SingerInfo singerInfo2 = SingerInfo.newBuilder().setSingerId(222).setGenre(Genre.JAZZ).build(); + String singerInfoFullName = SingerInfo.getDescriptor().getFullName(); + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata( + Type.struct( + Type.StructField.of("f", Type.array(Type.proto(singerInfoFullName)))))) + .addValues( + Value.protoMessageArray( + Arrays.asList(singerInfo1, singerInfo2), SingerInfo.getDescriptor()) + .toProto()) + .addValues( + Value.protoMessageArray( + Arrays.asList(singerInfo2, null, singerInfo1), SingerInfo.getDescriptor()) + .toProto()) + .addValues(Value.protoMessageArray(null, SingerInfo.getDescriptor()).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals( + Arrays.asList(singerInfo1, singerInfo2), + resultSet.getProtoMessageList(0, SingerInfo.getDefaultInstance())); + assertTrue(resultSet.next()); + assertEquals( + Arrays.asList(singerInfo2, null, singerInfo1), + resultSet.getProtoMessageList(0, SingerInfo.getDefaultInstance())); + assertTrue(resultSet.next()); + assertThrows( + NullPointerException.class, + () -> { + resultSet.getProtoMessageList(0, SingerInfo.getDefaultInstance()); + }); + } + + @Test + public void getProtoEnumList() { + String genreFullyQualifiedName = Genre.getDescriptor().getFullName(); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata( + Type.struct(Type.StructField.of("f", Type.protoEnum(genreFullyQualifiedName))))) + .addValues(Value.protoEnum(Genre.FOLK).toProto()) + .addValues(Value.protoEnum(Genre.JAZZ.getNumber(), genreFullyQualifiedName).toProto()) + .addValues(Value.protoEnum(null, genreFullyQualifiedName).toProto()) + .build()); + consumer.onCompleted(); + + assertTrue(resultSet.next()); + assertEquals(Genre.FOLK, resultSet.getProtoEnum(0, Genre::forNumber)); + assertTrue(resultSet.next()); + assertEquals(Genre.JAZZ, resultSet.getProtoEnum(0, Genre::forNumber)); + assertTrue(resultSet.next()); + assertThrows( + NullPointerException.class, + () -> { + resultSet.getProtoEnum(0, Genre::forNumber); + }); + } + + @Test + public void verifyResultSetWithLastTrue() { + long[] longArray = {111, 333, 444, 0, -1, -2234, Long.MAX_VALUE, Long.MIN_VALUE}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.int64()))))) + .addValues(Value.int64Array(longArray).toProto()) + .setLast(false) + .build()); + assertTrue(resultSet.next()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.int64()))))) + .addValues(Value.int64Array(longArray).toProto()) + .setLast(true) + .build()); + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + consumer.onCompleted(); + } + + @Test + public void shouldThrowDeadlineExceededIfLastTrueIsNotReceived() { + long[] longArray = {111, 333, 444, 0, -1, -2234, Long.MAX_VALUE, Long.MIN_VALUE}; + + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.int64()))))) + .addValues(Value.int64Array(longArray).toProto()) + .setLast(false) + .build()); + assertTrue(resultSet.next()); + consumer.onPartialResultSet( + PartialResultSet.newBuilder() + .setMetadata( + makeMetadata(Type.struct(Type.StructField.of("f", Type.array(Type.int64()))))) + .addValues(Value.int64Array(longArray).toProto()) + .setLast(false) + .build()); + assertTrue(resultSet.next()); + SpannerException spannerException = + assertThrows( + SpannerException.class, + () -> { + assertThat(resultSet.next()).isFalse(); + }); + assertEquals("DEADLINE_EXCEEDED: stream wait timeout", spannerException.getMessage()); + consumer.onCompleted(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITTransactionRetryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITTransactionRetryTest.java new file mode 100644 index 000000000000..e93abc3f8efa --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITTransactionRetryTest.java @@ -0,0 +1,143 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITTransactionRetryTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Test + public void TestRetryInfo() { + assumeFalse("emulator does not support parallel transaction", isUsingEmulator()); + // TODO(sakthivelmani) - Re-enable once b/422916293 is resolved + assumeFalse( + "Skipping the test due to a known bug b/422916293", + env.getTestHelper().getOptions().isEnableDirectAccess()); + assumeFalse("Skipping the test due to a known bug b/422916293", isExperimentalHost()); + + // Creating a database with the table which contains INT64 columns + Database db = + env.getTestHelper() + .createTestDatabase("CREATE TABLE Test(ID INT64, " + "EMPID INT64) PRIMARY KEY (ID)"); + DatabaseClient databaseClient = env.getTestHelper().getClient().getDatabaseClient(db.getId()); + + // Inserting one row + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("Test").set("ID").to(1).set("EMPID").to(1).build()); + return null; + }); + + int numRetries = 10; + boolean isAbortedWithRetryInfo = false; + while (numRetries-- > 0) { + try (TransactionManager transactionManager1 = databaseClient.transactionManager()) { + try (TransactionManager transactionManager2 = databaseClient.transactionManager()) { + try { + TransactionContext transaction1 = transactionManager1.begin(); + TransactionContext transaction2 = transactionManager2.begin(); + transaction1.executeUpdate( + Statement.of("UPDATE Test SET EMPID = EMPID + 1 WHERE ID = 1")); + transaction2.executeUpdate( + Statement.of("UPDATE Test SET EMPID = EMPID + 1 WHERE ID = 1")); + transactionManager1.commit(); + transactionManager2.commit(); + } catch (AbortedException abortedException) { + assertThat(abortedException.getErrorCode()).isEqualTo(ErrorCode.ABORTED); + if (abortedException.getRetryDelayInMillis() > 0) { + isAbortedWithRetryInfo = true; + break; + } + } + } + } + } + + assertTrue("Transaction is not aborted with the trailers", isAbortedWithRetryInfo); + } + + @Test + public void TestRetryInfoWithDirectPath() { + assumeFalse("emulator does not support parallel transaction", isUsingEmulator()); + // TODO(sakthivelmani) - Re-enable once b/422916293 is resolved + assumeTrue( + "Enabling this test due to bug b/422916293", + env.getTestHelper().getOptions().isEnableDirectAccess()); + + // Creating a database with the table which contains INT64 columns + Database db = + env.getTestHelper() + .createTestDatabase("CREATE TABLE Test(ID INT64, " + "EMPID INT64) PRIMARY KEY (ID)"); + DatabaseClient databaseClient = env.getTestHelper().getClient().getDatabaseClient(db.getId()); + + // Inserting one row + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("Test").set("ID").to(1).set("EMPID").to(1).build()); + return null; + }); + + int numRetries = 10; + boolean isAbortedWithRetryInfo = false; + while (numRetries-- > 0) { + try (TransactionManager transactionManager1 = databaseClient.transactionManager()) { + try (TransactionManager transactionManager2 = databaseClient.transactionManager()) { + try { + TransactionContext transaction1 = transactionManager1.begin(); + TransactionContext transaction2 = transactionManager2.begin(); + transaction1.executeUpdate( + Statement.of("UPDATE Test SET EMPID = EMPID + 1 WHERE ID = 1")); + transaction2.executeUpdate( + Statement.of("UPDATE Test SET EMPID = EMPID + 1 WHERE ID = 1")); + transactionManager1.commit(); + transactionManager2.commit(); + } catch (AbortedException abortedException) { + assertThat(abortedException.getErrorCode()).isEqualTo(ErrorCode.ABORTED); + if (abortedException.getRetryDelayInMillis() > 0) { + isAbortedWithRetryInfo = true; + break; + } + } + } + } + } + + assertFalse("Transaction is aborted with the trailers", isAbortedWithRetryInfo); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginBenchmark.java new file mode 100644 index 000000000000..c3063f4d6c54 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginBenchmark.java @@ -0,0 +1,227 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.common.base.Stopwatch; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +/** + * Benchmarks for inlining the BeginTransaction RPC with the first statement of a transaction. The + * simulated execution times are based on reasonable estimates and are primarily intended to keep + * the benchmarks comparable with each other before and after changes have been made to the pool. + * The benchmarks are bound to the Maven profile `benchmark` and can be executed like this: + * mvn clean test -DskipTests -Pbenchmark -Dbenchmark.name=InlineBeginBenchmark + * + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(value = 1, warmups = 0) +@Measurement(batchSize = 1, iterations = 1, timeUnit = TimeUnit.MILLISECONDS) +@Warmup(batchSize = 0, iterations = 0) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class InlineBeginBenchmark { + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_INSTANCE = "my-instance"; + private static final String TEST_DATABASE = "my-database"; + private static final int HOLD_SESSION_TIME = 100; + private static final int RND_WAIT_TIME_BETWEEN_REQUESTS = 10; + private static final Random RND = new Random(); + + @State(Scope.Thread) + public static class BenchmarkState { + private final boolean useRealServer = Boolean.parseBoolean(System.getProperty("useRealServer")); + private final String instance = System.getProperty("instance", TEST_INSTANCE); + private final String database = System.getProperty("database", TEST_DATABASE); + private StandardBenchmarkMockServer mockServer; + private Spanner spanner; + private DatabaseClientImpl client; + + @Param({"true"}) + boolean inlineBegin; + + @Param({"0.2"}) + float writeFraction; + + @Setup(Level.Invocation) + public void setup() throws Exception { + System.out.println("useRealServer: " + System.getProperty("useRealServer")); + System.out.println("instance: " + System.getProperty("instance")); + SpannerOptions options; + if (useRealServer) { + System.out.println("running benchmark with **REAL** server"); + System.out.println("instance: " + instance); + System.out.println("database: " + database); + options = createRealServerOptions(); + } else { + System.out.println("running benchmark with **MOCK** server"); + mockServer = new StandardBenchmarkMockServer(); + TransportChannelProvider channelProvider = mockServer.start(); + options = createBenchmarkServerOptions(channelProvider); + } + + spanner = options.getService(); + client = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(options.getProjectId(), instance, database)); + Stopwatch watch = Stopwatch.createStarted(); + // Wait until the session pool has initialized. + while (client.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null) { + Thread.sleep(1L); + if (watch.elapsed(TimeUnit.SECONDS) > 10L) { + break; + } + } + } + + SpannerOptions createBenchmarkServerOptions(TransportChannelProvider channelProvider) { + return SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setWriteSessionsFraction(writeFraction).build()) + .build(); + } + + SpannerOptions createRealServerOptions() { + return SpannerOptions.newBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setWriteSessionsFraction(writeFraction).build()) + .build(); + } + + @TearDown(Level.Invocation) + public void teardown() throws Exception { + spanner.close(); + if (mockServer != null) { + mockServer.shutdown(); + } + } + } + + /** Measures the time needed to execute a burst of read requests. */ + @Benchmark + public void burstRead(final BenchmarkState server) throws Exception { + int totalQueries = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 8; + int parallelThreads = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 2; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(parallelThreads)); + List> futures = new ArrayList<>(totalQueries); + for (int i = 0; i < totalQueries; i++) { + futures.add( + service.submit( + () -> { + Thread.sleep(RND.nextInt(RND_WAIT_TIME_BETWEEN_REQUESTS)); + try (ResultSet rs = + server.client.singleUse().executeQuery(StandardBenchmarkMockServer.SELECT1)) { + while (rs.next()) { + Thread.sleep(RND.nextInt(HOLD_SESSION_TIME)); + } + return null; + } + })); + } + Futures.allAsList(futures).get(); + service.shutdown(); + } + + /** Measures the time needed to execute a burst of write requests. */ + @Benchmark + public void burstWrite(final BenchmarkState server) throws Exception { + int totalWrites = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 8; + int parallelThreads = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 2; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(parallelThreads)); + List> futures = new ArrayList<>(totalWrites); + for (int i = 0; i < totalWrites; i++) { + futures.add( + service.submit( + () -> { + Thread.sleep(RND.nextInt(RND_WAIT_TIME_BETWEEN_REQUESTS)); + TransactionRunner runner = server.client.readWriteTransaction(); + return runner.run( + transaction -> + transaction.executeUpdate(StandardBenchmarkMockServer.UPDATE_STATEMENT)); + })); + } + Futures.allAsList(futures).get(); + service.shutdown(); + } + + /** Measures the time needed to execute a burst of read and write requests. */ + @Benchmark + public void burstReadAndWrite(final BenchmarkState server) throws Exception { + int totalWrites = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 4; + int totalReads = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 4; + int parallelThreads = server.spanner.getOptions().getSessionPoolOptions().getMaxSessions() * 2; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(parallelThreads)); + List> futures = new ArrayList<>(totalReads + totalWrites); + for (int i = 0; i < totalWrites; i++) { + futures.add( + service.submit( + () -> { + Thread.sleep(RND.nextInt(RND_WAIT_TIME_BETWEEN_REQUESTS)); + TransactionRunner runner = server.client.readWriteTransaction(); + return runner.run( + transaction -> + transaction.executeUpdate(StandardBenchmarkMockServer.UPDATE_STATEMENT)); + })); + } + for (int i = 0; i < totalReads; i++) { + futures.add( + service.submit( + () -> { + Thread.sleep(RND.nextInt(RND_WAIT_TIME_BETWEEN_REQUESTS)); + try (ResultSet rs = + server.client.singleUse().executeQuery(StandardBenchmarkMockServer.SELECT1)) { + while (rs.next()) { + Thread.sleep(RND.nextInt(HOLD_SESSION_TIME)); + } + return null; + } + })); + } + Futures.allAsList(futures).get(); + service.shutdown(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginTransactionTest.java new file mode 100644 index 000000000000..db1b39ac0a0b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InlineBeginTransactionTest.java @@ -0,0 +1,2041 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Enclosed.class) +public class InlineBeginTransactionTest { + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final com.google.spanner.v1.ResultSet EMPTY_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder().setMetadata(SELECT1_METADATA).build(); + private static final Statement SELECT1_UNION_ALL_SELECT2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final com.google.spanner.v1.ResultSet SELECT1_UNION_ALL_SELECT2_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final Statement INVALID_SELECT = Statement.of("SELECT * FROM NON_EXISTING_TABLE"); + private static final Statement READ_STATEMENT = Statement.of("SELECT ID FROM FOO WHERE 1=1"); + private static final Statement READ_ROW_STATEMENT = + Statement.of("SELECT BAR FROM FOO WHERE ID=1"); + + protected Spanner spanner; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(SELECT1_UNION_ALL_SELECT2, SELECT1_UNION_ALL_SELECT2_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.query(READ_STATEMENT, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.query(READ_ROW_STATEMENT, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT + .withDescription("invalid update statement") + .asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_SELECT, + Status.INVALID_ARGUMENT + .withDescription("invalid select statement") + .asRuntimeException())); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + // Create a Spanner instance that will inline BeginTransaction calls. It also has no prepared + // sessions in the pool to prevent session preparing from interfering with test cases. + spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setTrackTransactionStarter() + // The extra BeginTransaction RPC for multiplexed session read-write is causing + // unexpected behavior in tests having a mock on the BeginTransaction RPC. Therefore, + // this is being skipped. + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + .build() + .getService(); + } + + @After + public void tearDown() { + spanner.close(); + mockSpanner.reset(); + mockSpanner.clearRequests(); + } + + @RunWith(Parameterized.class) + public static class InlineBeginTransactionWithExecutorTest extends InlineBeginTransactionTest { + @Parameter public Executor executor; + + @Parameters(name = "executor = {0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + {MoreExecutors.directExecutor()}, + {Executors.newSingleThreadExecutor()}, + {Executors.newFixedThreadPool(4)} + }); + } + + @Test + public void testInlinedBeginAsyncTx() throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + ApiFuture updateCount = + client.runAsync().runAsync(txn -> txn.executeUpdateAsync(UPDATE_STATEMENT), executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginAsyncTxAborted() throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + final AtomicBoolean firstAttempt = new AtomicBoolean(true); + ApiFuture updateCount = + client + .runAsync() + .runAsync( + txn -> { + ApiFuture res = txn.executeUpdateAsync(UPDATE_STATEMENT); + if (firstAttempt.getAndSet(false)) { + mockSpanner.abortTransaction(txn); + } + return res; + }, + executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + // We have started 2 transactions, because the first transaction aborted. + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginAsyncTxWithQuery() throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + final ExecutorService queryExecutor = Executors.newSingleThreadExecutor(); + ApiFuture updateCount = + client + .runAsync() + .runAsync( + txn -> { + final SettableApiFuture res = SettableApiFuture.create(); + try (AsyncResultSet rs = txn.executeQueryAsync(SELECT1)) { + rs.setCallback( + executor, + resultSet -> { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + res.set(resultSet.getLong(0)); + default: + throw new IllegalStateException(); + } + }); + } + return res; + }, + queryExecutor); + assertThat(updateCount.get()).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + queryExecutor.shutdown(); + } + + @Test + public void testInlinedBeginAsyncTxWithBatchDml() + throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + ApiFuture updateCounts = + client + .runAsync() + .runAsync( + transaction -> + transaction.batchUpdateAsync( + Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT)), + executor); + assertThat(updateCounts.get()).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginAsyncTxWithError() throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + ApiFuture updateCount = + client + .runAsync() + .runAsync( + transaction -> { + transaction.executeUpdateAsync(INVALID_UPDATE_STATEMENT); + return transaction.executeUpdateAsync(UPDATE_STATEMENT); + }, + executor); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + // The first statement will fail and not return a transaction id. This will trigger a retry of + // the entire transaction, and the retry will do an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + // The first update will start a transaction, but then fail the update statement. This will + // start a transaction on the mock server, but that transaction will never be returned to the + // client. + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginAsyncTxWithOnlyMutations() + throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + client + .runAsync() + .runAsync( + transaction -> { + transaction.buffer(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + return ApiFutures.immediateFuture(null); + }, + executor) + .get(); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testAsyncTransactionManagerInlinedBeginTx() + throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (AsyncTransactionManager txMgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = txMgr.beginAsync(); + while (true) { + AsyncTransactionStep updateCount = + txn.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + executor); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + try { + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + assertThat(commitTimestamp.get()).isNotNull(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetryAsync(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testAsyncTransactionManagerInlinedBeginTxAborted() + throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (AsyncTransactionManager txMgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = txMgr.beginAsync(); + boolean first = true; + while (true) { + try { + AsyncTransactionStep updateCount = + txn.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + executor); + if (first) { + // Abort the transaction after the statement has been executed to ensure that the + // transaction has actually been started before the test tries to abort it. + updateCount.then( + (ignored1, ignored2) -> { + mockSpanner.abortAllTransactions(); + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor()); + first = false; + } + assertThat(updateCount.commitAsync().get()).isNotNull(); + assertThat(updateCount.get()).isEqualTo(UPDATE_COUNT); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetryAsync(); + } + } + } + // The retry will use a BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testAsyncTransactionManagerInlinedBeginTxWithOnlyMutations() + throws InterruptedException, ExecutionException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (AsyncTransactionManager txMgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = txMgr.beginAsync(); + while (true) { + try { + txn.then( + (transaction, ignored) -> { + transaction.buffer(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetryAsync(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testAsyncTransactionManagerInlinedBeginTxWithError() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (AsyncTransactionManager txMgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = txMgr.beginAsync(); + while (true) { + try { + AsyncTransactionStep updateCount = + txn.then( + (transaction, ignored) -> + transaction.executeUpdateAsync(INVALID_UPDATE_STATEMENT), + executor) + .then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + executor); + SpannerException e = + assertThrows(SpannerException.class, () -> get(updateCount.commitAsync())); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetryAsync(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + } + + @RunWith(JUnit4.class) + public static class InlineBeginTransactionWithoutExecutorTest extends InlineBeginTransactionTest { + @Test + public void testInlinedBeginTx() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxAborted() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + final AtomicBoolean firstAttempt = new AtomicBoolean(true); + long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + long res = transaction.executeUpdate(UPDATE_STATEMENT); + if (firstAttempt.getAndSet(false)) { + mockSpanner.abortTransaction(transaction); + } + return res; + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + // We have started 2 transactions, because the first transaction aborted during the commit. + assertThat(countRequests(CommitRequest.class)).isEqualTo(2); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginFirstUpdateAborts() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + long updateCount = + client + .readWriteTransaction() + .run( + new TransactionCallable() { + boolean firstAttempt = true; + + @Override + public Long run(TransactionContext transaction) { + if (firstAttempt) { + firstAttempt = false; + mockSpanner.putStatementResult( + StatementResult.exception( + UPDATE_STATEMENT, + mockSpanner.createAbortedException( + ByteString.copyFromUtf8("some-tx")))); + } else { + mockSpanner.putStatementResult( + StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + } + return transaction.executeUpdate(UPDATE_STATEMENT); + } + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstQueryAborts() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + long updateCount = + client + .readWriteTransaction() + .run( + new TransactionCallable() { + boolean firstAttempt = true; + + @Override + public Long run(TransactionContext transaction) { + if (firstAttempt) { + firstAttempt = false; + mockSpanner.putStatementResult( + StatementResult.exception( + SELECT1, + mockSpanner.createAbortedException( + ByteString.copyFromUtf8("some-tx")))); + } else { + mockSpanner.putStatementResult( + StatementResult.query(SELECT1, SELECT1_RESULTSET)); + } + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) { + return rs.getLong(0); + } + } + return 0L; + } + }); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstQueryReturnsUnavailable() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.UNAVAILABLE.asRuntimeException(), 0)); + long value = MockSpannerTestActions.executeSelect1(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstReadReturnsUnavailable() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.UNAVAILABLE.asRuntimeException(), 0)); + Long value = MockSpannerTestActions.executeReadFoo(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstReadReturnsUnavailableRetryReturnsAborted() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = MockSpannerTestActions.executeReadFoo(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ReadRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstQueryReturnsUnavailableRetryReturnsAborted() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = MockSpannerTestActions.executeSelect1(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstDmlReturnsUnavailableRetryReturnsAborted() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = + client + .readWriteTransaction() + .run( + transaction -> { + // The first attempt will return UNAVAILABLE and retry internally. + // The second attempt will return ABORTED and should cause the transaction to + // retry. + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(value).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstReadReturnsUnavailableRetryReturnsAborted_WithCatchAll() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = MockSpannerTestActions.executeReadFoo(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ReadRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstQueryReturnsUnavailableRetryReturnsAborted_WithCatchAll() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = + client + .readWriteTransaction() + .run( + transaction -> { + // The first attempt will return UNAVAILABLE and retry internally. + // The second attempt will return ABORTED and should cause the transaction to + // retry. + try { + return transaction.executeUpdate(UPDATE_STATEMENT); + } catch (AbortedException e) { + // Ignore the AbortedException and let the commit handle it. + } + return 0L; + }); + assertThat(value).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstDmlReturnsUnavailableRetryReturnsAborted_WithCatchAll() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.UNAVAILABLE.asRuntimeException(), Status.ABORTED.asRuntimeException()))); + Long value = MockSpannerTestActions.executeSelect1(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginFirstReadCancelledSecondReadAborted_WithCatch() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException(Status.CANCELLED.asRuntimeException())); + Long value = + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.read("FOO", KeySet.all(), Collections.singletonList("ID"))) { + if (rs.next()) { + return rs.getLong(0); + } + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.CANCELLED) { + // Ignore and let the transaction continue. + // Also make sure that the next read operation will return Aborted. + mockSpanner.abortNextTransaction(); + } else if (e.getErrorCode() == ErrorCode.ABORTED) { + // Ignore Aborted errors. This will cause the transaction to try to commit. + } else { + // Propagate any other errors (there should not be any in this test case). + throw e; + } + } + return 0L; + }); + + assertThat(value).isEqualTo(1L); + // 1. The initial attempt will inline the BeginTransaction option. + // 2. The CANCELLED error during the first attempt will cause a retry with a BeginTransaction + // RPC. + // 3. The ABORTED error during the second attempt will NOT cause the next retry to use an + // explicit BeginTransaction RPC, because the previous attempt did return a transaction ID + // (the ID that was returned by the BeginTransaction RPC of that attempt). + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + // There will be 3 attempts to read: + // 1. The first will return CANCELLED. + // 2. The second will return ABORTED. + // 3. The third will return the results. + assertThat(countRequests(ReadRequest.class)).isEqualTo(3); + // There are two attempts to commit: + // 1. The initial attempt will NOT try to commit, because the initial Read operation did not + // return a transaction ID. + // 2. The second attempt will try to commit, because the BeginTransaction RPC did return a + // transaction ID, and the Aborted error that was returned by the Read operation was caught + // by the application. This means that the TransactionRunner does not know that the + // transaction was aborted. The Commit RPC will return an Aborted error. + // 3. The third attempt will commit, as the Read operation succeeded and returned a + // transaction ID. + assertThat(countRequests(CommitRequest.class)).isEqualTo(2); + } + + @Test + public void testInlinedBeginFirstReadCancelledSecondReadAborted_WithoutCatch() + throws InterruptedException, ExecutionException { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException(Status.CANCELLED.asRuntimeException())); + // The CANCELLED error is not caught by the application, so it will bubble up and cause the + // transaction to fail. + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.read( + "FOO", KeySet.all(), Collections.singletonList("ID"))) { + if (rs.next()) { + return rs.getLong(0); + } + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.CANCELLED) { + // Make sure that the next read operation will return Aborted. + mockSpanner.abortNextTransaction(); + } + // Always propagate the error to the TransactionRunner. + throw e; + } + return 0L; + })); + + // The initial attempt will inline the BeginTransaction option. + // There is no second attempt as the CANCELLED error is not caught. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + // The CANCELLED error means that there is no transaction ID returned by the Read operation. + // So there is also no transaction to rollback. + assertThat(countRequests(RollbackRequest.class)).isEqualTo(0); + } + + @Test + public void testInlinedBeginFirstReadCancelledSecondReadAborted_WithCatchForCancelled() + throws InterruptedException, ExecutionException { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException(Status.CANCELLED.asRuntimeException())); + Long value = + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.read("FOO", KeySet.all(), Collections.singletonList("ID"))) { + if (rs.next()) { + return rs.getLong(0); + } + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.CANCELLED) { + // Do not propagate the CANCELLED error. + // Make sure that the next read operation will return Aborted. + mockSpanner.abortNextTransaction(); + } else { + // Propagate all other errors to the TransactionRunner. + throw e; + } + } + return 0L; + }); + + assertThat(value).isEqualTo(1L); + // 1. The initial attempt will inline the BeginTransaction option. + // 2. The CANCELLED error during the first attempt will cause a retry with a BeginTransaction + // RPC, because the error was returned by the first statement in the transaction. + // 3. The ABORTED error during the second attempt will NOT cause the next retry to use an + // explicit BeginTransaction RPC, because the previous attempt did return a transaction ID + // (the ID that was returned by the BeginTransaction RPC of that attempt). + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + // There will be 3 attempts to read: + // 1. The first will return CANCELLED. + // 2. The second will return ABORTED. + // 3. The third will return the results. + assertThat(countRequests(ReadRequest.class)).isEqualTo(3); + // There is only one attempt to commit: + // 1. The initial attempt will NOT try to commit, because the initial Read operation did not + // return a transaction ID. + // 2. The second attempt will NOT try to commit, because the Aborted error from the Read + // operation is propagated to the TransactionRunner. This means that the TransactionRunner + // knows that the transaction was aborted, and will automatically initiate a retry without + // first trying to commit the transaction. + // 3. The third attempt will commit, as the Read operation succeeded and returned a + // transaction ID. + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testInlinedBeginCommitAfterReadReturnsUnavailable() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + Long value = MockSpannerTestActions.executeReadFoo(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(2); + } + + @Test + public void testInlinedBeginFirstReadReturnsUnavailableAndCommitAborts() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.UNAVAILABLE.asRuntimeException(), 0)); + final AtomicBoolean firstAttempt = new AtomicBoolean(true); + Long value = + client + .readWriteTransaction() + .run( + transaction -> { + long res = 0L; + // The first attempt will return UNAVAILABLE and retry internally. + try (ResultSet rs = + transaction.read("FOO", KeySet.all(), Collections.singletonList("ID"))) { + if (rs.next()) { + res = rs.getLong(0); + } + } + if (firstAttempt.compareAndSet(true, false)) { + mockSpanner.abortTransaction(transaction); + } + return res; + }); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(2); + } + + @Test + public void testInlinedBeginTxWithQuery() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = MockSpannerTestActions.executeSelect1(client); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithRead() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + long updateCount = MockSpannerTestActions.executeReadFoo(client); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithBatchDml() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long[] updateCounts = + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT))); + assertThat(updateCounts).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteBatchDmlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithError() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + SpannerException e = + assertThrows( + SpannerException.class, + () -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + // The transaction will be retried because the first statement that also tried to include the + // BeginTransaction statement failed and did not return a transaction. That forces a retry of + // the entire transaction with an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + // The update statement will be executed 3 times: + // 1. The invalid update statement will be executed during the first attempt and fail. The + // second update statement will not be executed, as the transaction runner sees that the + // initial + // statement failed and did not return a valid transaction id. + // 2. The invalid update statement is executed again during the retry. + // 3. The valid update statement is only executed after the first statement succeeded. + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + // The first update will start a transaction, but then fail the update statement. This will + // start a transaction on the mock server, but that transaction will never be returned to the + // client. + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginTxWithErrorOnFirstStatement_andThenErrorOnBeginTransaction() { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException( + Status.INTERNAL + .withDescription("Begin transaction failed due to an internal error") + .asRuntimeException())); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + SpannerException outerException = + assertThrows( + SpannerException.class, + () -> { + client + .readWriteTransaction() + .run( + transaction -> { + SpannerException innerException = + assertThrows( + SpannerException.class, + () -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, innerException.getErrorCode()); + return null; + }); + }); + assertEquals(ErrorCode.INTERNAL, outerException.getErrorCode()); + assertThat(outerException.getMessage()) + .contains("Begin transaction failed due to an internal error"); + // The transaction will be retried because the first statement that also tried to include the + // BeginTransaction statement failed and did not return a transaction. That forces a retry of + // the entire transaction with an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + // The explicit BeginTransaction RPC failed, so only one transaction was started. + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithUncaughtError() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + // The first update will start a transaction, but then fail the update statement. This will + // start a transaction on the mock server, but that transaction will never be returned to the + // client. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + // No rollback request will be initiated because the client does not receive any transaction + // id. + assertThat(countRequests(RollbackRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithUncaughtErrorAfterSuccessfulBegin() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + // This statement will start a transaction. + transaction.executeUpdate(UPDATE_STATEMENT); + // This statement will fail and cause a rollback as the exception is not + // caught. + return transaction.executeUpdate(INVALID_UPDATE_STATEMENT); + })); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(RollbackRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxBatchDmlWithErrorOnFirstStatement() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + Void res = + client + .readWriteTransaction() + .run( + transaction -> { + SpannerBatchUpdateException e = + assertThrows( + SpannerBatchUpdateException.class, + () -> + transaction.batchUpdate( + ImmutableList.of(INVALID_UPDATE_STATEMENT, UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertEquals(0, e.getUpdateCounts().length); + return null; + }); + assertThat(res).isNull(); + // The first statement failed and could not return a transaction. The entire transaction is + // therefore retried with an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteBatchDmlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginTxBatchDmlWithErrorOnSecondStatement() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + SpannerBatchUpdateException e = + assertThrows( + SpannerBatchUpdateException.class, + () -> + transaction.batchUpdate( + ImmutableList.of(UPDATE_STATEMENT, INVALID_UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertEquals(1, e.getUpdateCounts().length); + return e.getUpdateCounts()[0]; + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + // Although the batch DML returned an error, that error was for the second statement. That + // means that the transaction was started by the first statement. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteBatchDmlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithErrorOnStreamingSql() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + Void res = + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = transaction.executeQuery(INVALID_SELECT)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + return null; + }); + assertThat(res).isNull(); + // The transaction will be retried because the first statement that also tried to include the + // BeginTransaction statement failed and did not return a transaction. That forces a retry of + // the entire transaction with an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + // The first update will start a transaction, but then fail the update statement. This will + // start a transaction on the mock server, but that transaction will never be returned to the + // client. + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testInlinedBeginTxWithErrorOnSecondPartialResultSet() { + final Statement statement = Statement.of("SELECT * FROM BROKEN_TABLE"); + RandomResultSetGenerator generator = new RandomResultSetGenerator(2); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + // The first PartialResultSet will be returned successfully, and then a DATA_LOSS exception + // will be returned. + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.DATA_LOSS.asRuntimeException(), 1)); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + Void res = + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = transaction.executeQuery(statement)) { + assertTrue(rs.next()); + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.DATA_LOSS, e.getErrorCode()); + } + return null; + }); + assertThat(res).isNull(); + // The transaction will not be retried, as the first PartialResultSet returns the transaction + // ID, and the second fails with an error code. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithParallelQueries() { + final int numQueries = 100; + final ScheduledExecutorService executor = Executors.newScheduledThreadPool(16); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + List> futures = new ArrayList<>(numQueries); + for (int i = 0; i < numQueries; i++) { + futures.add( + executor.submit( + () -> { + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) { + return rs.getLong(0); + } + } + return 0L; + })); + } + Long res = 0L; + for (Future f : futures) { + res += f.get(); + } + return res; + }); + assertThat(updateCount).isEqualTo((long) numQueries); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithOnlyMutations() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Arrays.asList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).build(), + Mutation.delete("FOO", Key.of(1L)))); + return null; + }); + // There should be 1 call to BeginTransaction because there is no statement that we can use to + // inline the BeginTransaction call with. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginTxWithMutationsBeforeFailedSqlStatement() { + Statement insert = Statement.of("insert into foo (id) values (1)"); + Statement update = Statement.of("update foo set value='Two' where id=2"); + mockSpanner.putStatementResult(StatementResult.update(insert, 1L)); + mockSpanner.putStatementResult(StatementResult.update(update, 1L)); + // This error will be returned the first time the ExecuteSql method is called. The error is + // cleared after the first call, meaning that the second attempt will succeed. + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.ALREADY_EXISTS.asRuntimeException())); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + AtomicInteger attempts = new AtomicInteger(); + client + .readWriteTransaction() + .run( + transaction -> { + attempts.incrementAndGet(); + // Buffer a blind write before executing a SQL statement. + transaction.buffer( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).build())); + try { + transaction.executeUpdate(insert); + } catch (SpannerException exception) { + assertEquals(ErrorCode.ALREADY_EXISTS, exception.getErrorCode()); + // The error should only occur during the initial attempt. + assertEquals(1, attempts.get()); + } + // We need to execute one more statement in the transaction in order to force a + // retry. + assertEquals(1L, transaction.executeUpdate(update)); + return null; + }); + // The transaction should be retried once. + assertEquals(2, attempts.get()); + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + // We get 3 ExecuteSql requests: + // 1. The initial attempt that carries a BeginTransaction option. + // 2. The retry attempt that does not use a BeginTransaction option. + // 3. The second UPDATE statement in the transaction that is only executed during the retry. + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + CommitRequest commit = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + // The mutations should only be applied once. + assertEquals(1, commit.getMutationsCount()); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerInlinedBeginTx() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (TransactionManager txMgr = client.transactionManager()) { + TransactionContext txn = txMgr.begin(); + while (true) { + try { + assertThat(txn.executeUpdate(UPDATE_STATEMENT)).isEqualTo(UPDATE_COUNT); + txMgr.commit(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetry(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerInlinedBeginTxAborted() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (TransactionManager txMgr = client.transactionManager()) { + TransactionContext txn = txMgr.begin(); + boolean first = true; + while (true) { + try { + assertThat(txn.executeUpdate(UPDATE_STATEMENT)).isEqualTo(UPDATE_COUNT); + if (first) { + mockSpanner.abortAllTransactions(); + first = false; + } + txMgr.commit(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetry(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerInlinedBeginTxWithOnlyMutations() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (TransactionManager txMgr = client.transactionManager()) { + TransactionContext txn = txMgr.begin(); + while (true) { + try { + txn.buffer(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + txMgr.commit(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetry(); + } + } + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testTransactionManagerInlinedBeginTxWithError() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (TransactionManager txMgr = client.transactionManager()) { + TransactionContext txn = txMgr.begin(); + while (true) { + final TransactionContext txnToUse = txn; + try { + SpannerException e = + assertThrows( + SpannerException.class, () -> txnToUse.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertEquals(UPDATE_COUNT, txnToUse.executeUpdate(UPDATE_STATEMENT)); + txMgr.commit(); + break; + } catch (AbortedException e) { + txn = txMgr.resetForRetry(); + } + } + } + // The first statement will fail and not return a transaction id. This will trigger a retry of + // the entire transaction, and the retry will do an explicit BeginTransaction RPC. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + // The first statement will start a transaction, but it will never be returned to the client + // as the update statement fails. + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void testTransactionManagerInlinedBeginTxWithUncaughtError() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + try (TransactionManager txMgr = client.transactionManager()) { + TransactionContext txn = txMgr.begin(); + SpannerException e = + assertThrows(SpannerException.class, () -> txn.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void testInlinedBeginAsyncTxWithParallelQueries() + throws InterruptedException, ExecutionException { + final int numQueries = 100; + final ScheduledExecutorService executor = Executors.newScheduledThreadPool(16); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + ApiFuture updateCount = + client + .runAsync() + .runAsync( + txn -> { + List> futures = new ArrayList<>(numQueries); + for (int i = 0; i < numQueries; i++) { + final SettableApiFuture res = SettableApiFuture.create(); + try (AsyncResultSet rs = txn.executeQueryAsync(SELECT1)) { + rs.setCallback( + executor, + resultSet -> { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + res.set(resultSet.getLong(0)); + default: + throw new IllegalStateException(); + } + }); + } + futures.add(res); + } + return ApiFutures.transformAsync( + ApiFutures.allAsList(futures), + input -> { + long sum = 0L; + for (Long l : input) { + sum += l; + } + return ApiFutures.immediateFuture(sum); + }, + MoreExecutors.directExecutor()); + }, + executor); + assertThat(updateCount.get()).isEqualTo((long) numQueries); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void queryWithoutNext() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + assertThat( + client + .readWriteTransaction() + .run( + transaction -> { + // This will not actually send an RPC, so it will also not request a + // transaction. + transaction.executeQuery(SELECT1); + return transaction.executeUpdate(UPDATE_STATEMENT); + })) + .isEqualTo(UPDATE_COUNT); + assertThat(mockSpanner.countRequestsOfType(BeginTransactionRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(1L); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void queryAsyncWithoutCallback() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + assertThat( + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeQueryAsync(SELECT1); + return transaction.executeUpdate(UPDATE_STATEMENT); + })) + .isEqualTo(UPDATE_COUNT); + assertThat(mockSpanner.countRequestsOfType(BeginTransactionRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(1L); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void readWithoutNext() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + assertThat( + client + .readWriteTransaction() + .run( + transaction -> { + transaction.read("FOO", KeySet.all(), Collections.singletonList("ID")); + return transaction.executeUpdate(UPDATE_STATEMENT); + })) + .isEqualTo(UPDATE_COUNT); + assertThat(mockSpanner.countRequestsOfType(BeginTransactionRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ReadRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(1L); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void readAsyncWithoutCallback() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + assertThat( + client + .readWriteTransaction() + .run( + transaction -> { + transaction.readAsync("FOO", KeySet.all(), Collections.singletonList("ID")); + return transaction.executeUpdate(UPDATE_STATEMENT); + })) + .isEqualTo(UPDATE_COUNT); + assertThat(mockSpanner.countRequestsOfType(BeginTransactionRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ReadRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(1L); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void query_ThenUpdate_ThenConsumeResultSet() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + assertThat( + client + .readWriteTransaction() + .run( + transaction -> { + ResultSet rs = transaction.executeQuery(SELECT1); + long updateCount = transaction.executeUpdate(UPDATE_STATEMENT); + // Consume the result set. + while (rs.next()) {} + return updateCount; + })) + .isEqualTo(UPDATE_COUNT); + // The update statement should start the transaction, and the query should use the transaction + // id returned by the update. + assertThat(mockSpanner.countRequestsOfType(BeginTransactionRequest.class)).isEqualTo(0L); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(2L); + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(1L); + assertThat(countTransactionsStarted()).isEqualTo(1); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests.get(0)).isInstanceOf(ExecuteSqlRequest.class); + assertThat(requests.get(0).getSql()).isEqualTo(UPDATE_STATEMENT.getSql()); + assertThat(requests.get(1)).isInstanceOf(ExecuteSqlRequest.class); + assertThat(requests.get(1).getSql()).isEqualTo(SELECT1.getSql()); + } + + @Test + public void testInlinedBeginTxWithStreamRetry() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException(Status.UNAVAILABLE.asRuntimeException(), 1)); + + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = transaction.executeQuery(SELECT1_UNION_ALL_SELECT2)) { + while (rs.next()) {} + } + return null; + }); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(1); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertThat(requests.get(0)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest request1 = requests.get(0); + assertThat(request1.getSql()).isEqualTo(SELECT1_UNION_ALL_SELECT2.getSql()); + assertThat(request1.getTransaction().getBegin().hasReadWrite()).isTrue(); + assertThat(request1.getTransaction().getId()).isEqualTo(ByteString.EMPTY); + assertThat(request1.getResumeToken()).isEqualTo(ByteString.EMPTY); + + assertThat(requests.get(1)).isInstanceOf(ExecuteSqlRequest.class); + ExecuteSqlRequest request2 = requests.get(1); + assertThat(request2.getSql()).isEqualTo(SELECT1_UNION_ALL_SELECT2.getSql()); + assertThat(request2.getTransaction().hasBegin()).isFalse(); + assertThat(request2.getTransaction().getId()).isNotEqualTo(ByteString.EMPTY); + assertThat(request2.getResumeToken()).isNotEqualTo(ByteString.EMPTY); + } + + @Test + public void testWaitForTransactionTimeout() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client + .readWriteTransaction() + .run( + new TransactionCallable() { + int attempt = 0; + + @Override + public Void run(TransactionContext transaction) { + attempt++; + TransactionContextImpl impl = (TransactionContextImpl) transaction; + if (attempt == 1) { + impl.waitForTransactionTimeoutMillis = 1L; + // Freeze the mock server to prevent the first (async) statement from returning + // a transaction. + mockSpanner.freeze(); + } else { + impl.waitForTransactionTimeoutMillis = 60_000L; + } + ApiFuture updateCount = transaction.executeUpdateAsync(UPDATE_STATEMENT); + + // Try to execute a query. This will timeout during the first attempt while + // waiting + // for the first statement to return a transaction, and then force a retry of the + // transaction. + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) {} + } catch (Throwable t) { + mockSpanner.unfreeze(); + // Wait until the update actually finishes so it has returned a transaction. + // This ensures that the retry does not issue a BeginTransaction RPC. + SpannerApiFutures.get(updateCount); + throw t; + } + return null; + } + }); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(3); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + } + + @Test + public void testWaitForTransactionTimeoutForCommit() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + AtomicBoolean firstAttempt = new AtomicBoolean(true); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + TransactionContextImpl impl = (TransactionContextImpl) transaction; + Struct res = + transaction.readRow( + "FOO", Key.of(1L), Collections.singletonList("BAR")); + if (firstAttempt.compareAndSet(true, false)) { + impl.waitForTransactionTimeoutMillis = 1L; + // Simulate that the transaction id got lost. + impl.transactionIdFuture = SettableApiFuture.create(); + impl.transactionId = null; + } else { + impl.waitForTransactionTimeoutMillis = 60_000L; + } + return res; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + assertEquals(0, countRequests(BeginTransactionRequest.class)); + assertEquals(1, countRequests(ReadRequest.class)); + assertEquals(0, countRequests(CommitRequest.class)); + } + + static void runWithIgnoreInlineBegin(Runnable runnable) { + // This will cause statements that requests a transaction to not return a transaction id. + mockSpanner.setIgnoreInlineBeginRequest(true); + try { + runnable.run(); + } finally { + mockSpanner.setIgnoreInlineBeginRequest(false); + } + } + + @Test + public void testQueryWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery(SELECT1_UNION_ALL_SELECT2)) { + while (rs.next()) {} + } + return null; + })); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testReadWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> + transaction.readRow( + "FOO", Key.of(1L), Collections.singletonList("BAR")))); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testUpdateWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT))); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testBatchUpdateWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.batchUpdate( + Collections.singletonList(UPDATE_STATEMENT)); + return null; + })); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteBatchDmlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testQueryAsyncWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + SpannerException outerException = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + try (AsyncResultSet rs = + transaction.executeQueryAsync(SELECT1_UNION_ALL_SELECT2)) { + return SpannerApiFutures.get( + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (SpannerException e) { + return CallbackResponse.DONE; + } + })); + } + })); + assertEquals(ErrorCode.FAILED_PRECONDITION, outerException.getErrorCode()); + assertThat(outerException.getMessage()) + .contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testUpdateAsyncWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> + SpannerApiFutures.get( + transaction.executeUpdateAsync(UPDATE_STATEMENT)))); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteSqlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testBatchUpdateAsyncWithInlineBeginDidNotReturnTransaction() { + runWithIgnoreInlineBegin( + () -> { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> + SpannerApiFutures.get( + transaction.batchUpdateAsync( + Collections.singletonList(UPDATE_STATEMENT))))); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains(AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ExecuteBatchDmlRequest.class)).isEqualTo(1); + assertThat(countRequests(CommitRequest.class)).isEqualTo(0); + }); + } + + @Test + public void testInlinedBeginTx_withCancelledOnFirstStatement() { + final Statement statement = Statement.of("INSERT INTO FOO (Id) VALUES (1)"); + mockSpanner.putStatementResult( + StatementResult.exception( + statement, + Status.CANCELLED + .withDescription( + "Read/query was cancelled due to the enclosing transaction being invalidated" + + " by a later transaction in the same session.") + .asRuntimeException())); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + long updateCount = + client + .readWriteTransaction() + .run( + new TransactionCallable() { + int attempt = 0; + + @Override + public Long run(TransactionContext transaction) { + if (attempt > 0) { + mockSpanner.putStatementResult(StatementResult.update(statement, 1L)); + } + attempt++; + return transaction.executeUpdate(statement); + } + }); + assertEquals(1L, updateCount); + // The transaction will be retried because the first statement that also tried to include the + // BeginTransaction statement failed with the specific CANCELLED error and did not return a + // transaction. That forces a retry of the entire transaction with an explicit + // BeginTransaction RPC. + assertEquals(1, countRequests(BeginTransactionRequest.class)); + // The update statement will be executed 2 times: + assertEquals(2, countRequests(ExecuteSqlRequest.class)); + // The transaction will attempt to commit once. + assertEquals(1, countRequests(CommitRequest.class)); + // The first update will start a transaction, but then fail the update statement. This will + // start a transaction on the mock server, but that transaction will never be returned to the + // client. + assertEquals(2, countTransactionsStarted()); + } + + @Test + public void testInlinedBeginTx_withStickyCancelledOnFirstStatement() { + final Statement statement = Statement.of("INSERT INTO FOO (Id) VALUES (1)"); + mockSpanner.putStatementResult( + StatementResult.exception( + statement, + Status.CANCELLED + .withDescription( + "Read/query was cancelled due to the enclosing transaction being invalidated" + + " by a later transaction in the same session.") + .asRuntimeException())); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + // The CANCELLED error is thrown both on the first and second attempt. The second attempt will + // not be retried, as it did not include a BeginTransaction option. + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(statement))); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + assertEquals(1, countRequests(BeginTransactionRequest.class)); + // The update statement will be executed 2 times: + assertEquals(2, countRequests(ExecuteSqlRequest.class)); + // The transaction will never attempt to commit. + assertEquals(0, countRequests(CommitRequest.class)); + assertEquals(2, countTransactionsStarted()); + } + + @Test + public void testReadRowAborted() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // The retry behavior should be equal regardless whether the readRow operation returns a row + // or not. + for (boolean emptyResult : new boolean[] {true, false}) { + AtomicBoolean firstAttempt = new AtomicBoolean(true); + Struct row = + client + .readWriteTransaction() + .run( + transaction -> { + if (firstAttempt.compareAndSet(true, false)) { + mockSpanner.putStatementResult( + StatementResult.exception( + READ_ROW_STATEMENT, + mockSpanner.createAbortedException(ByteString.copyFromUtf8("tx")))); + } else { + mockSpanner.putStatementResult( + StatementResult.query( + READ_ROW_STATEMENT, + emptyResult ? EMPTY_RESULTSET : SELECT1_RESULTSET)); + } + return transaction.readRow( + "FOO", Key.of(1L), Collections.singletonList("BAR")); + }); + if (emptyResult) { + assertNull(row); + } else { + assertNotNull(row); + assertEquals(1L, row.getLong(0)); + } + // The transaction is retried once, and the retry will use an explicit BeginTransaction RPC + // as the first attempt did not return a transaction id. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countRequests(ReadRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(1); + mockSpanner.clearRequests(); + } + } + + @Test + public void testReadRowCommitAborted() { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // The retry behavior should be equal regardless whether the readRow operation returns a row + // or not. + for (boolean emptyResult : new boolean[] {true, false}) { + mockSpanner.putStatementResult( + StatementResult.query( + READ_ROW_STATEMENT, emptyResult ? EMPTY_RESULTSET : SELECT1_RESULTSET)); + AtomicBoolean firstAttempt = new AtomicBoolean(true); + Struct row = + client + .readWriteTransaction() + .run( + transaction -> { + Struct res = + transaction.readRow("FOO", Key.of(1L), Collections.singletonList("BAR")); + // This will cause the commit request to return Aborted. + if (firstAttempt.compareAndSet(true, false)) { + mockSpanner.abortTransaction(transaction); + } + return res; + }); + if (emptyResult) { + assertNull(row); + } else { + assertNotNull(row); + assertEquals(1L, row.getLong(0)); + } + // The transaction is retried once, and will inline the BeginTransaction option with the + // read operation during the retry again, as the initial attempt did return a transaction + // id. + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countRequests(ReadRequest.class)).isEqualTo(2); + assertThat(countRequests(CommitRequest.class)).isEqualTo(2); + mockSpanner.clearRequests(); + } + } + } + + private static int countRequests(Class requestType) { + int count = 0; + for (AbstractMessage msg : mockSpanner.getRequests()) { + if (msg.getClass().equals(requestType)) { + count++; + } + } + return count; + } + + private static int countTransactionsStarted() { + return mockSpanner.getTransactionsStarted().size(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java new file mode 100644 index 000000000000..a78982e8c35a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java @@ -0,0 +1,709 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Identity; +import com.google.cloud.Role; +import com.google.cloud.spanner.InstanceConfigInfo.InstanceConfigField; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.io.BaseEncoding; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import java.util.Arrays; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; + +/** Unit tests for {@link com.google.cloud.spanner.InstanceAdminClientImpl}. */ +@RunWith(JUnit4.class) +public class InstanceAdminClientImplTest { + private static final String PROJECT_ID = "my-project"; + private static final String INSTANCE_ID = "my-instance"; + private static final String INSTANCE_NAME = "projects/my-project/instances/my-instance"; + private static final String INSTANCE_NAME2 = "projects/my-project/instances/my-instance2"; + private static final String CONFIG_ID = "my-config"; + private static final String CONFIG_NAME = "projects/my-project/instanceConfigs/my-config"; + private static final String CONFIG_NAME2 = "projects/my-project/instanceConfigs/my-config2"; + private static final String BASE_CONFIG = "projects/my-project/instanceConfigs/my-base-config"; + + @Mock SpannerRpc rpc; + @Mock DatabaseAdminClient dbClient; + InstanceAdminClientImpl client; + + @Before + public void setUp() { + initMocks(this); + client = new InstanceAdminClientImpl(PROJECT_ID, rpc, dbClient); + } + + private List getAllReplicas() { + return Arrays.asList( + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 1") + .setType(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.READ_WRITE) + .setDefaultLeaderLocation(true) + .build(), + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 2") + .setType(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(false) + .build(), + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 3") + .setType(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.WITNESS) + .setDefaultLeaderLocation(false) + .build()); + } + + private com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfigProto() { + return com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setName(CONFIG_NAME) + .setBaseConfig(BASE_CONFIG) + .addAllReplicas(getAllReplicas()) + .build(); + } + + @Test + public void createInstanceConfig() { + OperationFuture< + com.google.spanner.admin.instance.v1.InstanceConfig, CreateInstanceConfigMetadata> + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createInstanceConfig", + getInstanceConfigProto(), + CreateInstanceConfigMetadata.getDefaultInstance()); + when(rpc.createInstanceConfig( + "projects/" + PROJECT_ID, CONFIG_ID, getInstanceConfigProto(), false)) + .thenReturn(rawOperationFuture); + + InstanceConfigInfo instanceConfigInfo = + InstanceConfigInfo.fromProto(getInstanceConfigProto(), client); + + OperationFuture op = + client.createInstanceConfig(instanceConfigInfo, Options.validateOnly(false)); + assertThat(op.isDone()).isTrue(); + } + + @Test + public void updateInstanceConfig() throws Exception { + com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig = + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setName(CONFIG_NAME) + .setDisplayName(CONFIG_NAME) + .build(); + OperationFuture< + com.google.spanner.admin.instance.v1.InstanceConfig, UpdateInstanceConfigMetadata> + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstanceConfig", + getInstanceConfigProto(), + UpdateInstanceConfigMetadata.getDefaultInstance()); + when(rpc.updateInstanceConfig( + instanceConfig, false, FieldMask.newBuilder().addPaths("display_name").build())) + .thenReturn(rawOperationFuture); + InstanceConfigInfo instanceConfigInfo = + InstanceConfigInfo.newBuilder(InstanceConfigId.of(CONFIG_NAME)) + .setDisplayName(CONFIG_NAME) + .build(); + OperationFuture op = + client.updateInstanceConfig( + instanceConfigInfo, + ImmutableList.of(InstanceConfigField.DISPLAY_NAME), + Options.validateOnly(false)); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(CONFIG_NAME); + } + + @Test + public void getInstanceConfig() { + when(rpc.getInstanceConfig(CONFIG_NAME)) + .thenReturn(InstanceConfig.newBuilder().setName(CONFIG_NAME).build()); + assertThat(client.getInstanceConfig(CONFIG_ID).getId().getName()).isEqualTo(CONFIG_NAME); + } + + @Test + public void dropInstanceConfig() { + client.deleteInstanceConfig(CONFIG_ID); + verify(rpc).deleteInstanceConfig(CONFIG_NAME, null, null); + } + + public Operation getInstanceConfigOperation(String instanceConfigId, Integer operationId) { + InstanceConfig instanceConfig = + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setName(instanceConfigId) + .setBaseConfig(BASE_CONFIG) + .addAllReplicas(getAllReplicas()) + .build(); + + CreateInstanceConfigMetadata metadata = + CreateInstanceConfigMetadata.newBuilder().setInstanceConfig(instanceConfig).build(); + + final String operationName = + String.format( + "projects/%s/instanceConfigs/%s/operations/%d", + PROJECT_ID, instanceConfigId, operationId); + return com.google.longrunning.Operation.newBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(instanceConfig)) + .setDone(false) + .setName(operationName) + .build(); + } + + @Test + public void listInstanceConfigOperations() { + String nextToken = "token"; + Operation operation1 = getInstanceConfigOperation("custom-instance-config-1", 1); + Operation operation2 = getInstanceConfigOperation("custom-instance-config-2", 2); + when(rpc.listInstanceConfigOperations(1, null, null)) + .thenReturn(new Paginated<>(ImmutableList.of(operation1), nextToken)); + when(rpc.listInstanceConfigOperations(1, null, nextToken)) + .thenReturn(new Paginated<>(ImmutableList.of(operation2), "")); + List operations = + Lists.newArrayList(client.listInstanceConfigOperations(Options.pageSize(1)).iterateAll()); + assertThat(operations.get(0).getName()).isEqualTo(operation1.getName()); + assertThat(operations.get(1).getName()).isEqualTo(operation2.getName()); + assertThat(operations.size()).isEqualTo(2); + } + + @Test + public void listInstanceConfigs() { + String nextToken = "token"; + when(rpc.listInstanceConfigs(1, null)) + .thenReturn( + new Paginated<>( + ImmutableList.of(InstanceConfig.newBuilder().setName(CONFIG_NAME).build()), + nextToken)); + when(rpc.listInstanceConfigs(1, nextToken)) + .thenReturn( + new Paginated<>( + ImmutableList.of(InstanceConfig.newBuilder().setName(CONFIG_NAME2).build()), "")); + List configs = + Lists.newArrayList(client.listInstanceConfigs(Options.pageSize(1)).iterateAll()); + assertThat(configs.get(0).getId().getName()).isEqualTo(CONFIG_NAME); + assertThat(configs.get(1).getId().getName()).isEqualTo(CONFIG_NAME2); + assertThat(configs.size()).isEqualTo(2); + } + + private com.google.spanner.admin.instance.v1.Instance getInstanceProto() { + return com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setConfig(CONFIG_NAME) + .setName(INSTANCE_NAME) + .setNodeCount(1) + .setProcessingUnits(1000) + .build(); + } + + private com.google.spanner.admin.instance.v1.Instance getInstanceProtoWithProcessingUnits() { + return com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setConfig(CONFIG_NAME) + .setName(INSTANCE_NAME) + .setProcessingUnits(10) + .build(); + } + + private AutoscalingConfig getAutoscalingConfigProto() { + return AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(2).setMaxNodes(10)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + } + + private com.google.spanner.admin.instance.v1.Instance getAutoscalingInstanceProto() { + + return com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setConfig(CONFIG_NAME) + .setName(INSTANCE_NAME) + .setAutoscalingConfig(getAutoscalingConfigProto()) + .build(); + } + + private com.google.spanner.admin.instance.v1.Instance getAnotherInstanceProto() { + return com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setConfig(CONFIG_NAME) + .setName(INSTANCE_NAME2) + .setNodeCount(2) + .setProcessingUnits(2000) + .build(); + } + + @Test + public void createInstance() throws Exception { + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createInstance", getInstanceProto(), CreateInstanceMetadata.getDefaultInstance()); + when(rpc.createInstance( + "projects/" + PROJECT_ID, + INSTANCE_ID, + getInstanceProto().toBuilder() + .setProcessingUnits(0) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .build())) + .thenReturn(rawOperationFuture); + OperationFuture op = + client.createInstance( + InstanceInfo.newBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .setInstanceConfigId(InstanceConfigId.of(PROJECT_ID, CONFIG_ID)) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .setNodeCount(1) + .build()); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(INSTANCE_NAME); + } + + @Test + public void createInstanceWithOrgNameInProjectId() throws Exception { + String projectIdWithOrg = "my-org:my-project"; + String instanceNameWithOrg = "projects/my-org:my-project/instances/my-instance"; + String configNameWithOrg = "projects/my-org:my-project/instanceConfigs/my-config"; + + InstanceAdminClient universeClient = + new InstanceAdminClientImpl(projectIdWithOrg, rpc, dbClient); + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setConfig(configNameWithOrg) + .setName(instanceNameWithOrg) + .setNodeCount(1) + .setProcessingUnits(0) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createInstance", instance, CreateInstanceMetadata.getDefaultInstance()); + when(rpc.createInstance("projects/" + projectIdWithOrg, INSTANCE_ID, instance)) + .thenReturn(rawOperationFuture); + OperationFuture op = + universeClient.createInstance( + InstanceInfo.newBuilder(InstanceId.of(projectIdWithOrg, INSTANCE_ID)) + .setInstanceConfigId(InstanceConfigId.of(projectIdWithOrg, CONFIG_ID)) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .setNodeCount(1) + .build()); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(instanceNameWithOrg); + } + + @Test + public void testCreateInstanceWithProcessingUnits() throws Exception { + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createInstance", + getInstanceProtoWithProcessingUnits(), + CreateInstanceMetadata.getDefaultInstance()); + when(rpc.createInstance( + "projects/" + PROJECT_ID, INSTANCE_ID, getInstanceProtoWithProcessingUnits())) + .thenReturn(rawOperationFuture); + OperationFuture operation = + client.createInstance( + InstanceInfo.newBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .setInstanceConfigId(InstanceConfigId.of(PROJECT_ID, CONFIG_ID)) + .setProcessingUnits(10) + .build()); + assertTrue(operation.isDone()); + assertEquals(INSTANCE_NAME, operation.get().getId().getName()); + } + + @Test + public void testCreateInstanceWithAutoscalingConfig() throws Exception { + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "createInstance", + getAutoscalingInstanceProto(), + CreateInstanceMetadata.getDefaultInstance()); + when(rpc.createInstance("projects/" + PROJECT_ID, INSTANCE_ID, getAutoscalingInstanceProto())) + .thenReturn(rawOperationFuture); + OperationFuture operation = + client.createInstance( + InstanceInfo.newBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) + .setInstanceConfigId(InstanceConfigId.of(PROJECT_ID, CONFIG_ID)) + .setAutoscalingConfig(getAutoscalingInstanceProto().getAutoscalingConfig()) + .build()); + assertTrue(operation.isDone()); + Instance instance = operation.get(); + assertEquals(INSTANCE_NAME, instance.getId().getName()); + assertEquals( + getAutoscalingInstanceProto().getAutoscalingConfig(), instance.getAutoscalingConfig()); + } + + @Test + public void testGetInstance() { + when(rpc.getInstance(INSTANCE_NAME)).thenReturn(getInstanceProto()); + Instance instance = client.getInstance(INSTANCE_ID); + assertEquals(INSTANCE_NAME, instance.getId().getName()); + assertEquals(1000, instance.getProcessingUnits()); + } + + @Test + public void testGetAutoscalingInstance() { + when(rpc.getInstance(INSTANCE_NAME)).thenReturn(getAutoscalingInstanceProto()); + Instance instance = client.getInstance(INSTANCE_ID); + assertEquals(INSTANCE_NAME, instance.getId().getName()); + assertEquals( + getAutoscalingInstanceProto().getAutoscalingConfig(), instance.getAutoscalingConfig()); + } + + @Test + public void dropInstance() { + client.deleteInstance(INSTANCE_ID); + verify(rpc).deleteInstance(INSTANCE_NAME); + } + + @Test + public void updateInstanceMetadata() throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setNodeCount(2) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", getInstanceProto(), UpdateInstanceMetadata.getDefaultInstance()); + when(rpc.updateInstance(instance, FieldMask.newBuilder().addPaths("node_count").build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setNodeCount(2) + .build(); + OperationFuture op = + client.updateInstance(instanceInfo, InstanceInfo.InstanceField.NODE_COUNT); + assertThat(op.isDone()).isTrue(); + assertThat(op.get().getId().getName()).isEqualTo(INSTANCE_NAME); + } + + @Test + public void testUpdateInstanceProcessingUnits() throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setProcessingUnits(10) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", + getInstanceProtoWithProcessingUnits(), + UpdateInstanceMetadata.getDefaultInstance()); + when(rpc.updateInstance(instance, FieldMask.newBuilder().addPaths("processing_units").build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setProcessingUnits(10) + .build(); + OperationFuture operationWithFieldMask = + client.updateInstance(instanceInfo, InstanceInfo.InstanceField.PROCESSING_UNITS); + assertTrue(operationWithFieldMask.isDone()); + assertEquals(INSTANCE_NAME, operationWithFieldMask.get().getId().getName()); + + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addAllPaths( + Arrays.asList( + "display_name", "autoscaling_config", "processing_units", "labels")) + .build())) + .thenReturn(rawOperationFuture); + OperationFuture operation = + client.updateInstance(instanceInfo); + assertTrue(operation.isDone()); + assertEquals(INSTANCE_NAME, operation.get().getId().getName()); + } + + @Test + public void testEnableInstanceAutoscaling() throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setAutoscalingConfig(getAutoscalingConfigProto()) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", + getAutoscalingInstanceProto(), + UpdateInstanceMetadata.getDefaultInstance()); + when(rpc.updateInstance( + instance, FieldMask.newBuilder().addPaths("autoscaling_config").build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setAutoscalingConfig(getAutoscalingConfigProto()) + .build(); + OperationFuture operationWithFieldMask = + client.updateInstance(instanceInfo, InstanceInfo.InstanceField.AUTOSCALING_CONFIG); + assertTrue(operationWithFieldMask.isDone()); + assertEquals(INSTANCE_NAME, operationWithFieldMask.get().getId().getName()); + + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addAllPaths(Arrays.asList("display_name", "autoscaling_config", "labels")) + .build())) + .thenReturn(rawOperationFuture); + OperationFuture operation = + client.updateInstance(instanceInfo); + assertTrue(operation.isDone()); + assertEquals(INSTANCE_NAME, operation.get().getId().getName()); + } + + @Test + public void testDisableInstanceAutoscaling() throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setProcessingUnits(10) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", + getInstanceProtoWithProcessingUnits(), + UpdateInstanceMetadata.getDefaultInstance()); + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addPaths("autoscaling_config") + .addPaths("processing_units") + .build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setProcessingUnits(10) + .build(); + OperationFuture operationWithFieldMask = + client.updateInstance( + instanceInfo, + InstanceInfo.InstanceField.AUTOSCALING_CONFIG, + InstanceInfo.InstanceField.PROCESSING_UNITS); + assertTrue(operationWithFieldMask.isDone()); + assertEquals(INSTANCE_NAME, operationWithFieldMask.get().getId().getName()); + + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addAllPaths( + Arrays.asList( + "display_name", "autoscaling_config", "processing_units", "labels")) + .build())) + .thenReturn(rawOperationFuture); + OperationFuture operation = + client.updateInstance(instanceInfo); + assertTrue(operation.isDone()); + assertEquals(INSTANCE_NAME, operation.get().getId().getName()); + } + + @Test + public void testUpdateInstanceWithNodeCountAndProcessingUnits() throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setNodeCount(3) + .setProcessingUnits(3000) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", + getInstanceProtoWithProcessingUnits(), + UpdateInstanceMetadata.getDefaultInstance()); + // node_count should take precedence over processing_units when node_count>0 and no specific + // field mask is set by the caller. + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addAllPaths( + Arrays.asList("display_name", "autoscaling_config", "node_count", "labels")) + .build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setNodeCount(3) + .setProcessingUnits(3000) + .build(); + OperationFuture operationWithFieldMask = + client.updateInstance(instanceInfo); + assertTrue(operationWithFieldMask.isDone()); + assertEquals(INSTANCE_NAME, operationWithFieldMask.get().getId().getName()); + } + + @Test + public void testUpdateInstanceWithNodeCountAndProcessingUnitsAndAutoscalingConfig() + throws Exception { + com.google.spanner.admin.instance.v1.Instance instance = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(INSTANCE_NAME) + .setConfig(CONFIG_NAME) + .setNodeCount(3) + .setProcessingUnits(3000) + .setAutoscalingConfig(getAutoscalingConfigProto()) + .build(); + OperationFuture + rawOperationFuture = + OperationFutureUtil.immediateOperationFuture( + "updateInstance", + getAutoscalingInstanceProto(), + UpdateInstanceMetadata.getDefaultInstance()); + // autoscaling_config should take precedence over node_count or processing_units when + // autoscaling_config is not null and no specific field mask is set by the caller. + when(rpc.updateInstance( + instance, + FieldMask.newBuilder() + .addAllPaths(Arrays.asList("display_name", "autoscaling_config", "labels")) + .build())) + .thenReturn(rawOperationFuture); + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(INSTANCE_NAME)) + .setInstanceConfigId(InstanceConfigId.of(CONFIG_NAME)) + .setNodeCount(3) + .setProcessingUnits(3000) + .setAutoscalingConfig(getAutoscalingConfigProto()) + .build(); + OperationFuture operationWithFieldMask = + client.updateInstance(instanceInfo); + assertTrue(operationWithFieldMask.isDone()); + assertEquals(INSTANCE_NAME, operationWithFieldMask.get().getId().getName()); + } + + @Test + public void testListInstances() { + String nextToken = "token"; + String filter = "env:dev"; + when(rpc.listInstances(1, null, filter)) + .thenReturn(new Paginated<>(ImmutableList.of(getAutoscalingInstanceProto()), nextToken)); + when(rpc.listInstances(1, nextToken, filter)) + .thenReturn(new Paginated<>(ImmutableList.of(getAnotherInstanceProto()), "")); + List instances = + Lists.newArrayList( + client.listInstances(Options.pageSize(1), Options.filter(filter)).iterateAll()); + assertEquals(INSTANCE_NAME, instances.get(0).getId().getName()); + assertEquals(getAutoscalingConfigProto(), instances.get(0).getAutoscalingConfig()); + assertEquals(INSTANCE_NAME2, instances.get(1).getId().getName()); + assertEquals(2000, instances.get(1).getProcessingUnits()); + assertEquals(2, instances.size()); + } + + @Test + public void getInstanceIAMPolicy() { + when(rpc.getInstanceAdminIAMPolicy(INSTANCE_NAME)) + .thenReturn( + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .addMembers("user:joe@example.com") + .setRole("roles/viewer") + .build()) + .build()); + com.google.cloud.Policy policy = client.getInstanceIAMPolicy(INSTANCE_ID); + assertThat(policy.getBindings()) + .containsExactly(Role.viewer(), Sets.newHashSet(Identity.user("joe@example.com"))); + + when(rpc.getInstanceAdminIAMPolicy(INSTANCE_NAME)) + .thenReturn( + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .addAllMembers(Arrays.asList("allAuthenticatedUsers", "domain:google.com")) + .setRole("roles/viewer") + .build()) + .build()); + policy = client.getInstanceIAMPolicy(INSTANCE_ID); + assertThat(policy.getBindings()) + .containsExactly( + Role.viewer(), + Sets.newHashSet(Identity.allAuthenticatedUsers(), Identity.domain("google.com"))); + } + + @Test + public void setInstanceIAMPolicy() { + ByteString etag = ByteString.copyFrom(BaseEncoding.base64().decode("v1")); + String etagEncoded = BaseEncoding.base64().encode(etag.toByteArray()); + Policy proto = + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .setRole("roles/viewer") + .addMembers("user:joe@example.com") + .build()) + .setEtag(etag) + .build(); + when(rpc.setInstanceAdminIAMPolicy(INSTANCE_NAME, proto)).thenReturn(proto); + com.google.cloud.Policy policy = + com.google.cloud.Policy.newBuilder() + .addIdentity(Role.viewer(), Identity.user("joe@example.com")) + .setEtag(etagEncoded) + .build(); + com.google.cloud.Policy updated = client.setInstanceIAMPolicy(INSTANCE_ID, policy); + assertThat(updated).isEqualTo(policy); + } + + @Test + public void testInstanceIAMPermissions() { + Iterable permissions = + Arrays.asList("spanner.instances.list", "spanner.instances.create"); + when(rpc.testInstanceAdminIAMPermissions(INSTANCE_NAME, permissions)) + .thenReturn( + TestIamPermissionsResponse.newBuilder() + .addPermissions("spanner.instances.list") + .build()); + Iterable allowed = client.testInstanceIAMPermissions(INSTANCE_ID, permissions); + assertThat(allowed).containsExactly("spanner.instances.list"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientTest.java new file mode 100644 index 000000000000..1f62b8d5924c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.cloud.Identity; +import com.google.cloud.NoCredentials; +import com.google.cloud.Policy; +import com.google.cloud.Role; +import java.util.Collections; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class InstanceAdminClientTest { + private static final String PROJECT_ID = "my-project"; + private static final String INSTANCE_ID = "my-instance"; + + private static MockInstanceAdminServiceImpl mockInstanceAdmin; + private static MockServiceHelper serviceHelper; + private LocalChannelProvider channelProvider; + private Spanner spanner; + private InstanceAdminClient client; + + @BeforeClass + public static void startStaticServer() { + mockInstanceAdmin = new MockInstanceAdminServiceImpl(); + serviceHelper = + new MockServiceHelper("in-process-1", Collections.singletonList(mockInstanceAdmin)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + spanner = + SpannerOptions.newBuilder() + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setProjectId(PROJECT_ID) + .build() + .getService(); + client = spanner.getInstanceAdminClient(); + } + + @After + public void tearDown() { + spanner.close(); + } + + @Test + public void getAndSetIAMPolicy() { + Policy policy = client.getInstanceIAMPolicy(INSTANCE_ID); + assertThat(policy).isEqualTo(Policy.newBuilder().build()); + Policy newPolicy = + Policy.newBuilder().addIdentity(Role.viewer(), Identity.user("joe@example.com")).build(); + Policy returnedPolicy = client.setInstanceIAMPolicy(INSTANCE_ID, newPolicy); + assertThat(returnedPolicy).isEqualTo(newPolicy); + assertThat(client.getInstanceIAMPolicy(INSTANCE_ID)).isEqualTo(newPolicy); + } + + @Test + public void testIAMPermissions() { + Iterable permissions = + client.testInstanceIAMPermissions( + INSTANCE_ID, Collections.singletonList("spanner.instances.list")); + assertThat(permissions).containsExactly("spanner.instances.list"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminGaxTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminGaxTest.java new file mode 100644 index 000000000000..f619d9b461f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminGaxTest.java @@ -0,0 +1,616 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.fail; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.common.base.Throwables; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import io.grpc.Server; +import io.grpc.StatusRuntimeException; +import io.grpc.inprocess.InProcessServerBuilder; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class InstanceAdminGaxTest { + public static class DelayedStatusRuntimeException extends RuntimeException { + private final long millis; + + public DelayedStatusRuntimeException(StatusRuntimeException cause, long millis) { + super(cause); + this.millis = millis; + } + + @Override + public synchronized Throwable getCause() { + Uninterruptibles.sleepUninterruptibly(millis, TimeUnit.MILLISECONDS); + return super.getCause(); + } + } + + private static final String PROJECT = "PROJECT"; + private static final StatusRuntimeException UNAVAILABLE = + io.grpc.Status.UNAVAILABLE.withDescription("Retryable test exception.").asRuntimeException(); + private static final StatusRuntimeException FAILED_PRECONDITION = + io.grpc.Status.FAILED_PRECONDITION + .withDescription("Non-retryable test exception.") + .asRuntimeException(); + + private static Exception createDelayedInternal() { + return new DelayedStatusRuntimeException( + io.grpc.Status.INTERNAL.withDescription("Delayed test exception.").asRuntimeException(), + 1000L); + } + + public enum ExceptionType { + RETRYABLE { + @Override + public Exception getException() { + return UNAVAILABLE; + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return null; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.UNAVAILABLE; + } + + @Override + public boolean isRetryable() { + return true; + } + }, + NON_RETRYABLE { + @Override + public Exception getException() { + return FAILED_PRECONDITION; + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return ErrorCode.FAILED_PRECONDITION; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.FAILED_PRECONDITION; + } + + @Override + public boolean isRetryable() { + return false; + } + }, + DELAYED { + @Override + public Exception getException() { + return createDelayedInternal(); + } + + @Override + public ErrorCode getExpectedErrorCodeWithGax() { + return ErrorCode.DEADLINE_EXCEEDED; + } + + @Override + public ErrorCode getExpectedErrorCodeWithoutGax() { + return ErrorCode.INTERNAL; + } + + @Override + public boolean isRetryable() { + return true; + } + }; + + public abstract Exception getException(); + + public abstract ErrorCode getExpectedErrorCodeWithGax(); + + public abstract ErrorCode getExpectedErrorCodeWithoutGax(); + + public abstract boolean isRetryable(); + } + + private static MockInstanceAdminImpl mockInstanceAdmin; + private static Server server; + private static Spanner spanner; + private static InstanceAdminClient client; + private static LocalChannelProvider channelProvider; + + @Parameter(0) + public int exceptionAtCall; + + @Parameter(1) + public ExceptionType exceptionType; + + @Parameters(name = "exception at call = {0}, exception type = {1}") + public static Collection data() { + List params = new ArrayList<>(); + for (int exceptionAtCall : new int[] {0, 1}) { + for (ExceptionType exceptionType : ExceptionType.values()) { + params.add(new Object[] {exceptionAtCall, exceptionType}); + } + } + return params; + } + + @Rule public ExpectedException expectedException = ExpectedException.none(); + + @BeforeClass + public static void startStaticServer() throws IOException { + mockInstanceAdmin = new MockInstanceAdminImpl(); + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockInstanceAdmin) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + mockInstanceAdmin.reset(); + RetrySettings retrySettingsWithLowTimeout = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(10L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(200L)) + .setRetryDelayMultiplier(1.3d) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(20000L)) + .setJittered(false) + .build(); + RetrySettings retrySettingsWithHighTimeout = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(2000L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(5000L)) + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(15000L)) + .build(); + final RetrySettings retrySettingsToUse = + exceptionType == ExceptionType.DELAYED + ? retrySettingsWithLowTimeout + : retrySettingsWithHighTimeout; + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + builder + .getInstanceAdminStubSettingsBuilder() + .applyToAllUnaryMethods( + input -> { + input.setRetrySettings(retrySettingsToUse); + return null; + }); + if (!builder + .getInstanceAdminStubSettingsBuilder() + .createInstanceOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty()) { + builder + .getInstanceAdminStubSettingsBuilder() + .createInstanceOperationSettings() + .setInitialCallSettings( + builder + .getInstanceAdminStubSettingsBuilder() + .createInstanceOperationSettings() + .getInitialCallSettings() + .toBuilder() + .setRetrySettings(retrySettingsToUse) + .build()); + } + if (!builder + .getInstanceAdminStubSettingsBuilder() + .updateInstanceOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty()) { + builder + .getInstanceAdminStubSettingsBuilder() + .updateInstanceOperationSettings() + .setInitialCallSettings( + builder + .getInstanceAdminStubSettingsBuilder() + .updateInstanceOperationSettings() + .getInitialCallSettings() + .toBuilder() + .setRetrySettings(retrySettingsToUse) + .build()); + } + spanner = builder.build().getService(); + client = spanner.getInstanceAdminClient(); + } + + @After + public void tearDown() { + spanner.close(); + } + + private Exception setupException() { + if (!exceptionType.isRetryable()) { + expectedException.expect( + SpannerMatchers.isSpannerException(exceptionType.getExpectedErrorCodeWithGax())); + } + return exceptionType.getException(); + } + + @Test + public void listInstanceConfigsTest() { + Exception exception = setupException(); + String nextPageToken = "token%d"; + List configs = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + configs.add( + InstanceConfig.newBuilder() + .setDisplayName(String.format("TEST%d", i)) + .setName(String.format("projects/%s/instanceConfigs/test%d", PROJECT, i)) + .build()); + } + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + for (int i = 0; i < 2; i++) { + ListInstanceConfigsResponse.Builder builder = + ListInstanceConfigsResponse.newBuilder() + .addAllInstanceConfigs(Collections.singletonList(configs.get(i))); + if (i < (configs.size() - 1)) { + builder.setNextPageToken(String.format(nextPageToken, i)); + } + if (exceptionAtCall == (i + 1)) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(builder.build()); + } + ProjectName parent = ProjectName.of(PROJECT); + Page pagedListResponse = client.listInstanceConfigs(); + List resources = + Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(2, resources.size()); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + ListInstanceConfigsRequest actualRequest = (ListInstanceConfigsRequest) actualRequests.get(0); + + Assert.assertEquals(parent, ProjectName.parse(actualRequest.getParent())); + } + + @Test + public void getInstanceConfigTest() { + Exception exception = setupException(); + for (int i = 0; i < 2; i++) { + InstanceConfigName name2 = InstanceConfigName.of(PROJECT, "INSTANCE_CONFIG"); + String displayName = "displayName1615086568"; + List leaderOptions = Arrays.asList("leader option 1", "leader option 2"); + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(name2.toString()) + .setDisplayName(displayName) + .addAllLeaderOptions(leaderOptions) + .build(); + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(expectedResponse); + if (exceptionAtCall == 1) { + mockInstanceAdmin.addException(exception); + } + + InstanceConfigName name = InstanceConfigName.of(PROJECT, "INSTANCE_CONFIG"); + com.google.cloud.spanner.InstanceConfig actualResponse = + client.getInstanceConfig(name.toString()); + + Assert.assertEquals(displayName, actualResponse.getDisplayName()); + Assert.assertEquals(leaderOptions, actualResponse.getLeaderOptions()); + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(i + 1, actualRequests.size()); + } + } + + @Test + public void listInstancesTest() { + Exception exception = setupException(); + String nextPageToken = "token%d"; + List instances = new ArrayList<>(2); + for (int i = 0; i < 2; i++) { + instances.add( + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setDisplayName(String.format("TEST%d", i)) + .setName(String.format("projects/%s/instances/test%d", PROJECT, i)) + .setConfig(String.format("projects/%s/instanceConfigs/test%d", PROJECT, i)) + .build()); + } + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + for (int i = 0; i < 2; i++) { + ListInstancesResponse.Builder builder = + ListInstancesResponse.newBuilder() + .addAllInstances(Collections.singletonList(instances.get(i))); + if (i < (instances.size() - 1)) { + builder.setNextPageToken(String.format(nextPageToken, i)); + } + if (exceptionAtCall == (i + 1)) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(builder.build()); + } + + ProjectName parent = ProjectName.of(PROJECT); + Page pagedListResponse = client.listInstances(); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + Assert.assertEquals(2, resources.size()); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + ListInstancesRequest actualRequest = (ListInstancesRequest) actualRequests.get(0); + + Assert.assertEquals(parent, ProjectName.parse(actualRequest.getParent())); + } + + @Test + public void getInstanceTest() { + Exception exception = setupException(); + InstanceName name2 = InstanceName.of(PROJECT, "INSTANCE"); + String displayName = "displayName1615086568"; + InstanceConfigName config = InstanceConfigName.of(PROJECT, "INSTANCE_CONFIG"); + com.google.spanner.admin.instance.v1.Instance expectedResponse = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(name2.toString()) + .setConfig(config.toString()) + .setDisplayName(displayName) + .setNodeCount(3) + .build(); + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(expectedResponse); + if (exceptionAtCall == 1) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceName name = InstanceName.of(PROJECT, "INSTANCE"); + for (int i = 0; i < 2; i++) { + Instance actualResponse = client.getInstance(name.toString()); + Assert.assertEquals(displayName, actualResponse.getDisplayName()); + } + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + } + + @Test + public void createInstanceTest() throws Exception { + boolean methodIsIdempotent = + !spanner + .getOptions() + .getInstanceAdminStubSettings() + .createInstanceOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty(); + if (!methodIsIdempotent && exceptionType == ExceptionType.DELAYED) { + // Skip this test as the method is non-idempotent and won't retry anyways. + return; + } + + Exception exception = setupException(); + InstanceName name = InstanceName.of(PROJECT, "INSTANCE"); + InstanceConfigName config = InstanceConfigName.of(PROJECT, "INSTANCE_CONFIG"); + String displayName = "displayName1615086568"; + int nodeCount = 1539922066; + com.google.spanner.admin.instance.v1.Instance expectedResponse = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(name.toString()) + .setConfig(config.toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .build(); + com.google.longrunning.Operation resultOperation = + com.google.longrunning.Operation.newBuilder() + .setName("createInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(resultOperation); + if (exceptionAtCall == 1) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(resultOperation); + + for (int i = 0; i < 2; i++) { + OperationFuture actualResponse = + client.createInstance( + InstanceInfo.newBuilder(InstanceId.of(PROJECT, "INSTANCE")) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .build()); + try { + Instance returnedInstance = actualResponse.get(); + if (!methodIsIdempotent && i == exceptionAtCall) { + fail("missing expected exception"); + } + Assert.assertEquals(displayName, returnedInstance.getDisplayName()); + } catch (ExecutionException e) { + if (!exceptionType.isRetryable() || methodIsIdempotent || i != exceptionAtCall) { + Throwables.throwIfUnchecked(e.getCause()); + throw e; + } + } + } + List actualRequests = mockInstanceAdmin.getRequests(); + if (methodIsIdempotent) { + Assert.assertEquals(2, actualRequests.size()); + } else { + Assert.assertEquals(1, actualRequests.size()); + } + } + + @Test + public void updateInstanceTest() throws Exception { + boolean methodIsIdempotent = + !spanner + .getOptions() + .getInstanceAdminStubSettings() + .updateInstanceOperationSettings() + .getInitialCallSettings() + .getRetryableCodes() + .isEmpty(); + if (!methodIsIdempotent && exceptionType == ExceptionType.DELAYED) { + // Skip this test as the method is non-idempotent and won't retry anyways. + return; + } + + Exception exception = setupException(); + InstanceName name = InstanceName.of(PROJECT, "INSTANCE"); + InstanceConfigName config = InstanceConfigName.of(PROJECT, "INSTANCE_CONFIG"); + String displayName = "displayName1615086568"; + int nodeCount = 1539922066; + com.google.spanner.admin.instance.v1.Instance expectedResponse = + com.google.spanner.admin.instance.v1.Instance.newBuilder() + .setName(name.toString()) + .setConfig(config.toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .build(); + com.google.longrunning.Operation resultOperation = + com.google.longrunning.Operation.newBuilder() + .setName("updateInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(resultOperation); + if (exceptionAtCall == 1) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(resultOperation); + + for (int i = 0; i < 2; i++) { + OperationFuture actualResponse = + client.updateInstance( + InstanceInfo.newBuilder(InstanceId.of(PROJECT, "INSTANCE")) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .build()); + try { + Instance returnedInstance = actualResponse.get(); + if (!methodIsIdempotent && i == exceptionAtCall) { + fail("missing expected exception"); + } + Assert.assertEquals(displayName, returnedInstance.getDisplayName()); + } catch (ExecutionException e) { + if (!exceptionType.isRetryable() || methodIsIdempotent || i != exceptionAtCall) { + Throwables.throwIfUnchecked(e.getCause()); + throw e; + } + } + } + List actualRequests = mockInstanceAdmin.getRequests(); + if (methodIsIdempotent) { + Assert.assertEquals(2, actualRequests.size()); + } else { + Assert.assertEquals(1, actualRequests.size()); + } + } + + @Test + public void deleteInstanceTest() { + Exception exception = setupException(); + Empty expectedResponse = Empty.newBuilder().build(); + if (exceptionAtCall == 0) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(expectedResponse); + if (exceptionAtCall == 1) { + mockInstanceAdmin.addException(exception); + } + mockInstanceAdmin.addResponse(expectedResponse); + for (int i = 0; i < 2; i++) { + client.deleteInstance("INSTANCE"); + } + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(2, actualRequests.size()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigIdTest.java new file mode 100644 index 000000000000..3f5114aea153 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigIdTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link InstanceConfigId}. */ +@RunWith(JUnit4.class) +public class InstanceConfigIdTest { + + @Test + public void basic() { + String name = "projects/test-project/instanceConfigs/test-instance-config"; + InstanceConfigId config = InstanceConfigId.of(name); + assertThat(config.getProject()).isEqualTo("test-project"); + assertThat(config.getInstanceConfig()).isEqualTo("test-instance-config"); + assertThat(config.getName()).isEqualTo(name); + assertThat(InstanceConfigId.of(name)).isEqualTo(config); + assertThat(InstanceConfigId.of("test-project", "test-instance-config")).isEqualTo(config); + assertThat(InstanceConfigId.of(name).hashCode()).isEqualTo(config.hashCode()); + } + + @Test + public void badName() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> InstanceConfigId.of("bad name")); + assertNotNull(e.getMessage()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigTest.java new file mode 100644 index 000000000000..234c322f7621 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceConfigTest.java @@ -0,0 +1,134 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + +import com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType; +import java.util.Arrays; +import org.junit.Before; +import org.junit.Test; + +public class InstanceConfigTest { + + private InstanceAdminClient client; + + @Before + public void setUp() { + client = mock(InstanceAdminClient.class); + } + + @Test + public void testInstanceConfigFromProto() { + final InstanceConfig instanceConfig = + InstanceConfig.fromProto( + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setDisplayName("Display Name") + .setName("projects/my-project/instanceConfigs/my-instance-config") + .setBaseConfig("projects/my-project/instanceConfigs/custom-base-config") + .addAllOptionalReplicas( + Arrays.asList( + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Optional Replica Location 1") + .setType(ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(true) + .build(), + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Optional Replica Location 2") + .setType(ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(false) + .build())) + .addAllLeaderOptions(Arrays.asList("Leader Option 1", "Leader Option 2")) + .addAllReplicas( + Arrays.asList( + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 1") + .setType(ReplicaType.READ_WRITE) + .setDefaultLeaderLocation(true) + .build(), + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 2") + .setType(ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(false) + .build(), + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Replica Location 3") + .setType(ReplicaType.WITNESS) + .setDefaultLeaderLocation(false) + .build())) + .build(), + client); + + assertEquals( + new InstanceConfig( + new InstanceConfig.Builder( + client, InstanceConfigId.of("my-project", "my-instance-config")) + .setDisplayName("Display Name") + .setReplicas( + Arrays.asList( + ReplicaInfo.newBuilder() + .setLocation("Replica Location 1") + .setType(ReplicaInfo.ReplicaType.READ_WRITE) + .setDefaultLeaderLocation(true) + .build(), + ReplicaInfo.newBuilder() + .setLocation("Replica Location 2") + .setType(ReplicaInfo.ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(false) + .build(), + ReplicaInfo.newBuilder() + .setLocation("Replica Location 3") + .setType(ReplicaInfo.ReplicaType.WITNESS) + .setDefaultLeaderLocation(false) + .build())) + .setLeaderOptions(Arrays.asList("Leader Option 1", "Leader Option 2")) + .setOptionalReplicas( + Arrays.asList( + ReplicaInfo.newBuilder() + .setLocation("Optional Replica Location 1") + .setType(ReplicaInfo.ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(true) + .build(), + ReplicaInfo.newBuilder() + .setLocation("Optional Replica Location 2") + .setType(ReplicaInfo.ReplicaType.READ_ONLY) + .setDefaultLeaderLocation(false) + .build())) + .setBaseConfig( + new InstanceConfigInfo.BuilderImpl( + InstanceConfigId.of("my-project", "custom-base-config")) + .build())), + instanceConfig); + } + + @Test + public void testInstanceConfigFromProtoWithoutReplicasAndLeaderOptions() { + final InstanceConfig instanceConfig = + InstanceConfig.fromProto( + com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + .setDisplayName("Display Name") + .setName("projects/my-project/instanceConfigs/my-instance-config") + .build(), + client); + + assertEquals( + new InstanceConfig( + InstanceConfigId.of("my-project", "my-instance-config"), "Display Name", client), + instanceConfig); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceIdTest.java new file mode 100644 index 000000000000..01a609fb10f4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceIdTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link InstanceId}. */ +@RunWith(JUnit4.class) +public class InstanceIdTest { + + @Test + public void basic() { + String name = "projects/test-project/instances/test-instance"; + InstanceId instance = InstanceId.of(name); + assertThat(instance.getProject()).isEqualTo("test-project"); + assertThat(instance.getInstance()).isEqualTo("test-instance"); + assertThat(instance.getName()).isEqualTo(name); + assertThat(InstanceId.of(name)).isEqualTo(instance); + assertThat(InstanceId.of("test-project", "test-instance")).isEqualTo(instance); + assertThat(InstanceId.of(name).hashCode()).isEqualTo(instance.hashCode()); + } + + @Test + public void badName() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> InstanceId.of("bad name")); + assertNotNull(e.getMessage()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceInfoTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceInfoTest.java new file mode 100644 index 000000000000..645c696e3e94 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceInfoTest.java @@ -0,0 +1,236 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Timestamp; +import com.google.common.testing.EqualsTester; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class InstanceInfoTest { + + @Test + public void testEmptyBuilder() { + InstanceId id = InstanceId.of("test-project", "test-instance"); + InstanceInfo.Builder builder = InstanceInfo.newBuilder(id); + InstanceInfo info = builder.build(); + assertNull(info.getDisplayName()); + assertEquals(InstanceId.of("test-project", "test-instance"), info.getId()); + assertNull(info.getInstanceConfigId()); + assertNull(info.getState()); + assertEquals(0, info.getNodeCount()); + assertEquals(0, info.getProcessingUnits()); + assertTrue(info.getLabels().isEmpty()); + assertNull(info.getUpdateTime()); + assertNull(info.getCreateTime()); + assertNull(info.getAutoscalingConfig()); + } + + @Test + public void testBuildInstanceInfo() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId = new InstanceConfigId("test-project", "test-instance-config"); + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(5000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + + InstanceInfo info = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + assertThat(info.getId()).isEqualTo(id); + assertThat(info.getInstanceConfigId()).isEqualTo(configId); + assertThat(info.getDisplayName()).isEqualTo("test instance"); + assertThat(info.getNodeCount()).isEqualTo(1); + assertThat(info.getProcessingUnits()).isEqualTo(2000); + assertThat(info.getAutoscalingConfig()).isEqualTo(autoscalingConfig); + assertThat(info.getState()).isEqualTo(InstanceInfo.State.READY); + assertThat(info.getLabels()).containsExactly("env", "prod", "region", "us"); + assertEquals(Timestamp.ofTimeMicroseconds(86000), info.getUpdateTime()); + assertEquals(Timestamp.ofTimeMicroseconds(46000), info.getCreateTime()); + + AutoscalingConfig newAutoscalingConfig = + autoscalingConfig.toBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(10).setMaxNodes(100)) + .build(); + info = + info.toBuilder() + .setDisplayName("new test instance") + .setAutoscalingConfig(newAutoscalingConfig) + .build(); + assertThat(info.getId()).isEqualTo(id); + assertThat(info.getInstanceConfigId()).isEqualTo(configId); + assertThat(info.getDisplayName()).isEqualTo("new test instance"); + assertThat(info.getNodeCount()).isEqualTo(1); + assertThat(info.getProcessingUnits()).isEqualTo(2000); + assertThat(info.getAutoscalingConfig()).isEqualTo(newAutoscalingConfig); + assertThat(info.getState()).isEqualTo(InstanceInfo.State.READY); + assertThat(info.getLabels()).containsExactly("env", "prod", "region", "us"); + assertEquals(Timestamp.ofTimeMicroseconds(86000), info.getUpdateTime()); + assertEquals(Timestamp.ofTimeMicroseconds(46000), info.getCreateTime()); + } + + @Test + public void testToBuilder() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId = new InstanceConfigId("test-project", "test-instance-config"); + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(5000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + InstanceInfo info = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + + InstanceInfo rebuilt = info.toBuilder().setDisplayName("new test instance").build(); + assertThat(rebuilt.getId()).isEqualTo(id); + assertThat(rebuilt.getInstanceConfigId()).isEqualTo(configId); + assertThat(rebuilt.getDisplayName()).isEqualTo("new test instance"); + assertThat(rebuilt.getNodeCount()).isEqualTo(1); + assertThat(rebuilt.getProcessingUnits()).isEqualTo(2000); + assertThat(info.getAutoscalingConfig()).isEqualTo(autoscalingConfig); + assertThat(rebuilt.getState()).isEqualTo(InstanceInfo.State.READY); + assertThat(rebuilt.getLabels()).containsExactly("env", "prod", "region", "us"); + assertEquals(Timestamp.ofTimeMicroseconds(86000), rebuilt.getUpdateTime()); + assertEquals(Timestamp.ofTimeMicroseconds(46000), rebuilt.getCreateTime()); + } + + @Test + public void testEquals() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId1 = new InstanceConfigId("test-project", "test-instance-config"); + InstanceConfigId configId2 = new InstanceConfigId("test-project", "other-test-instance-config"); + + AutoscalingConfig autoscalingConfig1 = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(5000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + + AutoscalingConfig autoscalingConfig2 = + autoscalingConfig1.toBuilder() + .setAutoscalingLimits( + autoscalingConfig1.getAutoscalingLimits().toBuilder() + .setMinNodes(50) + .setMaxNodes(100)) + .build(); + + InstanceInfo instance = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId1) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig1) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + InstanceInfo instance2 = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId1) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig1) + .setState(InstanceInfo.State.READY) + .addLabel("region", "us") + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + InstanceInfo instance3 = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId2) + .setDisplayName("other test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(8000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(4000)) + .build(); + InstanceInfo instance4 = + InstanceInfo.newBuilder(id) + .setInstanceConfigId(configId2) + .setDisplayName("other test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig2) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(8000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(4000)) + .build(); + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup(instance, instance2); + tester.addEqualityGroup(instance3); + tester.addEqualityGroup(instance4); + tester.testEquals(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceTest.java new file mode 100644 index 000000000000..692d92de8961 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceTest.java @@ -0,0 +1,296 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.verify; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.cloud.Identity; +import com.google.cloud.Policy; +import com.google.cloud.Role; +import com.google.cloud.Timestamp; +import com.google.common.testing.EqualsTester; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; + +/** Unit tests for {@link Instance}. */ +@RunWith(JUnit4.class) +public class InstanceTest { + + @Mock InstanceAdminClient instanceClient; + @Mock DatabaseAdminClient dbClient; + + @Before + public void setUp() { + initMocks(this); + } + + @Test + public void buildInstance() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId = new InstanceConfigId("test-project", "test-instance-config"); + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(5000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + Instance instance = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + assertThat(instance.getId()).isEqualTo(id); + assertThat(instance.getInstanceConfigId()).isEqualTo(configId); + assertThat(instance.getDisplayName()).isEqualTo("test instance"); + assertThat(instance.getNodeCount()).isEqualTo(1); + assertThat(instance.getProcessingUnits()).isEqualTo(2000); + assertThat(instance.getAutoscalingConfig()).isEqualTo(autoscalingConfig); + assertThat(instance.getState()).isEqualTo(InstanceInfo.State.READY); + assertThat(instance.getLabels()).containsExactly("env", "prod", "region", "us"); + assertEquals(Timestamp.ofTimeMicroseconds(86000), instance.getUpdateTime()); + assertEquals(Timestamp.ofTimeMicroseconds(46000), instance.getCreateTime()); + + AutoscalingConfig newAutoscalingConfig = + autoscalingConfig.toBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(10).setMaxNodes(100)) + .build(); + instance = + instance.toBuilder() + .setDisplayName("new test instance") + .setAutoscalingConfig(newAutoscalingConfig) + .build(); + assertThat(instance.getId()).isEqualTo(id); + assertThat(instance.getInstanceConfigId()).isEqualTo(configId); + assertThat(instance.getDisplayName()).isEqualTo("new test instance"); + assertThat(instance.getNodeCount()).isEqualTo(1); + assertThat(instance.getProcessingUnits()).isEqualTo(2000); + assertThat(instance.getAutoscalingConfig()).isEqualTo(newAutoscalingConfig); + assertThat(instance.getState()).isEqualTo(InstanceInfo.State.READY); + assertThat(instance.getLabels()).containsExactly("env", "prod", "region", "us"); + assertEquals(Timestamp.ofTimeMicroseconds(86000), instance.getUpdateTime()); + assertEquals(Timestamp.ofTimeMicroseconds(46000), instance.getCreateTime()); + } + + @Test + public void equality() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId = new InstanceConfigId("test-project", "test-instance-config"); + + Instance instance = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + Instance instance2 = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setState(InstanceInfo.State.READY) + .addLabel("region", "us") + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + Instance instance3 = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(8000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(4000)) + .build(); + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup(instance, instance2); + tester.addEqualityGroup(instance3); + tester.testEquals(); + } + + @Test + public void equalityWithAutoscalingConfig() { + InstanceId id = new InstanceId("test-project", "test-instance"); + InstanceConfigId configId = new InstanceConfigId("test-project", "test-instance-config"); + AutoscalingConfig autoscalingConfig1 = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(5000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + + AutoscalingConfig autoscalingConfig2 = + autoscalingConfig1.toBuilder() + .setAutoscalingLimits( + autoscalingConfig1.getAutoscalingLimits().toBuilder() + .setMinNodes(50) + .setMaxNodes(100)) + .build(); + + Instance instance = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig1) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .addLabel("region", "us") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + Instance instance2 = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig1) + .setState(InstanceInfo.State.READY) + .addLabel("region", "us") + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + Instance instance3 = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig1) + .setState(InstanceInfo.State.READY) + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(8000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(4000)) + .build(); + Instance instance4 = + new Instance.Builder(instanceClient, dbClient, id) + .setInstanceConfigId(configId) + .setDisplayName("test instance") + .setNodeCount(1) + .setProcessingUnits(2000) + .setAutoscalingConfig(autoscalingConfig2) + .setState(InstanceInfo.State.READY) + .addLabel("region", "us") + .addLabel("env", "prod") + .setUpdateTime(Timestamp.ofTimeMicroseconds(86000)) + .setCreateTime(Timestamp.ofTimeMicroseconds(46000)) + .build(); + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup(instance, instance2); + tester.addEqualityGroup(instance3); + tester.addEqualityGroup(instance4); + tester.testEquals(); + } + + @Test + public void listDatabases() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + instance.listDatabases(); + verify(dbClient).listDatabases("test-instance"); + } + + @Test + public void listBackups() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + instance.listBackups(); + verify(dbClient).listBackups("test-instance"); + } + + @Test + public void listDatabaseOperations() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + instance.listDatabaseOperations(); + verify(dbClient).listDatabaseOperations("test-instance"); + } + + @Test + public void listBackupOperations() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + instance.listBackupOperations(); + verify(dbClient).listBackupOperations("test-instance"); + } + + @Test + public void getIAMPolicy() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + instance.getIAMPolicy(); + verify(instanceClient).getInstanceIAMPolicy("test-instance"); + } + + @Test + public void setIAMPolicy() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + Policy policy = + Policy.newBuilder().addIdentity(Role.viewer(), Identity.user("joe@example.com")).build(); + instance.setIAMPolicy(policy); + verify(instanceClient).setInstanceIAMPolicy("test-instance", policy); + } + + @Test + public void testIAMPermissions() { + InstanceId id = new InstanceId("test-project", "test-instance"); + Instance instance = new Instance.Builder(instanceClient, dbClient, id).build(); + Iterable permissions = Collections.singletonList("read"); + instance.testIAMPermissions(permissions); + verify(instanceClient).testInstanceIAMPermissions("test-instance", permissions); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTest.java new file mode 100644 index 000000000000..f0e7480e4a43 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTest.java @@ -0,0 +1,20 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Integration Test interface. */ +public interface IntegrationTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java new file mode 100644 index 000000000000..ed59601f1867 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java @@ -0,0 +1,381 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.base.Preconditions.checkState; +import static org.junit.Assume.assumeFalse; + +import com.google.api.client.util.ExponentialBackOff; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.opentelemetry.trace.TraceConfiguration; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.Iterators; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.rules.ExternalResource; + +/** + * JUnit 4 test rule that provides access to a Cloud Spanner instance to use for tests, and allows + * uniquely named (per {@code IntegrationTestEnv} instance) test databases to be created within that + * instance. An existing instance can be used by naming it in the {@link #TEST_INSTANCE_PROPERTY} + * property; if the property is not set, an instance will be created and destroyed by the rule. + * + *

This class is normally used as a {@code @ClassRule}. + */ +public class IntegrationTestEnv extends ExternalResource { + + /** Names a property that provides the class name of the {@link TestEnvConfig} to use. */ + public static final String TEST_ENV_CONFIG_CLASS_NAME = "spanner.testenv.config.class"; + + public static final String CONFIG_CLASS = System.getProperty(TEST_ENV_CONFIG_CLASS_NAME, null); + + /** + * Names a property that, if set, identifies an existing Cloud Spanner instance to use for tests. + */ + public static final String TEST_INSTANCE_PROPERTY = "spanner.testenv.instance"; + + public static final String MAX_CREATE_INSTANCE_ATTEMPTS = + "spanner.testenv.max_create_instance_attempts"; + + private static final Logger logger = Logger.getLogger(IntegrationTestEnv.class.getName()); + + private TestEnvConfig config; + private InstanceAdminClient instanceAdminClient; + private DatabaseAdminClient databaseAdminClient; + private boolean isOwnedInstance; + private final boolean alwaysCreateNewInstance; + private RemoteSpannerHelper testHelper; + + private Collection testEnvOptions = Collections.emptyList(); + + public enum TestEnvOptions { + USE_END_TO_END_TRACING; + // TODO : Move alwaysCreateNewInstance to TestEnvOptions + } + + public IntegrationTestEnv() { + this(false); + } + + public IntegrationTestEnv(Collection testEnvOptions) { + this(false); + this.testEnvOptions = testEnvOptions; + } + + public IntegrationTestEnv(final boolean alwaysCreateNewInstance) { + this.alwaysCreateNewInstance = alwaysCreateNewInstance; + } + + public RemoteSpannerHelper getTestHelper() { + checkInitialized(); + return testHelper; + } + + @SuppressWarnings("unchecked") + protected void initializeConfig() + throws ClassNotFoundException, InstantiationException, IllegalAccessException { + if (CONFIG_CLASS == null) { + throw new NullPointerException("Property " + TEST_ENV_CONFIG_CLASS_NAME + " needs to be set"); + } + Class configClass; + if (EmulatorSpannerHelper.isUsingEmulator()) { + // Make sure that we use an owned instance on the emulator. + System.setProperty(TEST_INSTANCE_PROPERTY, ""); + } + configClass = (Class) Class.forName(CONFIG_CLASS); + config = configClass.newInstance(); + } + + boolean isCloudDevel() { + return Objects.equals( + System.getProperty("spanner.gce.config.server_url"), + "https://staging-wrenchworks.sandbox.googleapis.com"); + } + + @Override + protected void before() throws Throwable { + this.initializeConfig(); + assumeFalse(alwaysCreateNewInstance && isCloudDevel()); + assumeFalse( + "Creating instances is not supported in experimental host", + alwaysCreateNewInstance && isExperimentalHost()); + + this.config.setUp(); + SpannerOptions options = config.spannerOptions(); + if (testEnvOptions.stream() + .anyMatch(testEnvOption -> TestEnvOptions.USE_END_TO_END_TRACING.equals(testEnvOption))) { + // OpenTelemetry set up for enabling End to End tracing for all integration test env. + // The gRPC stub and connections are created during test env set up using SpannerOptions and + // are reused for executing statements. + options = spannerOptionsWithEndToEndTracing(options); + } + String instanceProperty = System.getProperty(TEST_INSTANCE_PROPERTY, ""); + InstanceId instanceId; + if (!instanceProperty.isEmpty() && !alwaysCreateNewInstance) { + instanceId = InstanceId.of(instanceProperty); + isOwnedInstance = false; + logger.log(Level.INFO, "Using existing test instance: {0}", instanceId); + } else { + instanceId = + InstanceId.of( + config.spannerOptions().getProjectId(), + String.format("test-instance-%08d", new Random().nextInt(100000000))); + isOwnedInstance = true; + } + testHelper = createTestHelper(options, instanceId); + instanceAdminClient = testHelper.getClient().getInstanceAdminClient(); + databaseAdminClient = testHelper.getClient().getDatabaseAdminClient(); + logger.log(Level.FINE, "Test env endpoint is {0}", options.getHost()); + if (isOwnedInstance) { + initializeInstance(instanceId); + } else { + cleanUpOldDatabases(instanceId); + } + } + + public SpannerOptions spannerOptionsWithEndToEndTracing(SpannerOptions options) { + assumeFalse("This test requires credentials", EmulatorSpannerHelper.isUsingEmulator()); + + TraceConfiguration.Builder traceConfigurationBuilder = TraceConfiguration.builder(); + if (options.getCredentials() != null) { + traceConfigurationBuilder.setCredentials(options.getCredentials()); + } + SpanExporter traceExporter = + TraceExporter.createWithConfiguration( + traceConfigurationBuilder.setProjectId(options.getProjectId()).build()); + + String serviceName = "java-spanner-integration-tests-" + ThreadLocalRandom.current().nextInt(); + SdkTracerProvider sdkTracerProvider = + SdkTracerProvider.builder() + // Always sample in this test to ensure we know what we get. + .setSampler(Sampler.alwaysOn()) + .setResource(Resource.builder().put("service.name", serviceName).build()) + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .build(); + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .build(); + SpannerOptions.enableOpenTelemetryTraces(); + return options.toBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(true) + .build(); + } + + RemoteSpannerHelper createTestHelper(SpannerOptions options, InstanceId instanceId) + throws Throwable { + return RemoteSpannerHelper.create(options, instanceId); + } + + @Override + protected void after() { + cleanUpInstance(); + this.config.tearDown(); + } + + private void initializeInstance(InstanceId instanceId) throws Exception { + InstanceConfig instanceConfig; + try { + instanceConfig = instanceAdminClient.getInstanceConfig("regional-us-east4"); + } catch (Throwable ignore) { + instanceConfig = + Iterators.get(instanceAdminClient.listInstanceConfigs().iterateAll().iterator(), 0, null); + } + checkState(instanceConfig != null, "No instance configs found"); + + InstanceConfigId configId = instanceConfig.getId(); + logger.log(Level.FINE, "Creating instance using config {0}", configId); + InstanceInfo instance = + InstanceInfo.newBuilder(instanceId) + .setNodeCount(1) + .setDisplayName("Test instance") + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .setInstanceConfigId(configId) + .build(); + OperationFuture op = + instanceAdminClient.createInstance(instance); + Instance createdInstance; + int maxAttempts = 25; + try { + maxAttempts = + Integer.parseInt( + System.getProperty(MAX_CREATE_INSTANCE_ATTEMPTS, String.valueOf(maxAttempts))); + } catch (NumberFormatException ignore) { + // Ignore and fall back to the default. + } + ExponentialBackOff backOff = + new ExponentialBackOff.Builder() + .setInitialIntervalMillis(5_000) + .setMaxIntervalMillis(500_000) + .setMultiplier(2.0) + .build(); + int attempts = 0; + while (true) { + try { + createdInstance = op.get(); + } catch (Exception e) { + SpannerException spannerException = + (e instanceof ExecutionException && e.getCause() != null) + ? SpannerExceptionFactory.asSpannerException(e.getCause()) + : SpannerExceptionFactory.asSpannerException(e); + if (attempts < maxAttempts && isRetryableResourceExhaustedException(spannerException)) { + attempts++; + if (spannerException.getRetryDelayInMillis() > 0L) { + //noinspection BusyWait + Thread.sleep(spannerException.getRetryDelayInMillis()); + } else { + // The Math.max(...) prevents Backoff#STOP (=-1) to be used as the sleep value. + //noinspection BusyWait + Thread.sleep(Math.max(backOff.getMaxIntervalMillis(), backOff.nextBackOffMillis())); + } + continue; + } + throw SpannerExceptionFactory.newSpannerException( + spannerException.getErrorCode(), + String.format( + "Could not create test instance and giving up after %d attempts: %s", + attempts, e.getMessage()), + e); + } + logger.log(Level.INFO, "Created test instance: {0}", createdInstance.getId()); + break; + } + } + + static boolean isRetryableResourceExhaustedException(SpannerException exception) { + if (exception.getErrorCode() != ErrorCode.RESOURCE_EXHAUSTED) { + return false; + } + return exception + .getMessage() + .contains( + "Quota exceeded for quota metric 'Instance create requests' and limit 'Instance" + + " create requests per minute'") + || exception.getMessage().matches(".*cannot add \\d+ nodes in region.*"); + } + + private void cleanUpOldDatabases(InstanceId instanceId) { + long OLD_DB_THRESHOLD_SECS = TimeUnit.SECONDS.convert(2L, TimeUnit.HOURS); + Timestamp currentTimestamp = Timestamp.now(); + int numDropped = 0; + String TEST_DB_REGEX = "(testdb_(.*)_(.*))|(mysample-(.*))"; + + logger.log(Level.INFO, "Dropping old test databases from {0}", instanceId.getName()); + while (true) { + try { + for (Database db : + databaseAdminClient.listDatabases(instanceId.getInstance()).iterateAll()) { + try { + long timeDiff = currentTimestamp.getSeconds() - db.getCreateTime().getSeconds(); + // Delete all databases which are more than OLD_DB_THRESHOLD_SECS seconds old. + if ((db.getId().getDatabase().matches(TEST_DB_REGEX)) + && (timeDiff > OLD_DB_THRESHOLD_SECS)) { + logger.log(Level.INFO, "Dropping test database {0}", db.getId()); + if (db.isDropProtectionEnabled()) { + Database updatedDatabase = + databaseAdminClient + .newDatabaseBuilder(db.getId()) + .disableDropProtection() + .build(); + databaseAdminClient + .updateDatabase(updatedDatabase, DatabaseField.DROP_PROTECTION) + .get(); + } + db.drop(); + ++numDropped; + } + } catch (SpannerException | ExecutionException | InterruptedException e) { + logger.log(Level.SEVERE, "Failed to drop test database " + db.getId(), e); + } + } + break; + } catch (SpannerException exception) { + if (exception.getErrorCode() != ErrorCode.RESOURCE_EXHAUSTED) { + throw exception; + } + // Wait a little and try again. + try { + Thread.sleep(10_000); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + } + logger.log(Level.INFO, "Dropped {0} test database(s)", numDropped); + } + + private void cleanUpInstance() { + try { + if (isOwnedInstance) { + // Delete the instance, which implicitly drops all databases in it. + try { + if (!EmulatorSpannerHelper.isUsingEmulator() && !isExperimentalHost()) { + // Backups must be explicitly deleted before the instance may be deleted. + logger.log( + Level.FINE, "Deleting backups on test instance {0}", testHelper.getInstanceId()); + for (Backup backup : + testHelper + .getClient() + .getDatabaseAdminClient() + .listBackups(testHelper.getInstanceId().getInstance()) + .iterateAll()) { + logger.log(Level.FINE, "Deleting backup {0}", backup.getId()); + backup.delete(); + } + } + logger.log(Level.FINE, "Deleting test instance {0}", testHelper.getInstanceId()); + instanceAdminClient.deleteInstance(testHelper.getInstanceId().getInstance()); + logger.log(Level.INFO, "Deleted test instance {0}", testHelper.getInstanceId()); + } catch (SpannerException e) { + logger.log( + Level.SEVERE, "Failed to delete test instance " + testHelper.getInstanceId(), e); + } + } else { + testHelper.cleanUp(); + } + } finally { + testHelper.getClient().close(); + } + } + + void checkInitialized() { + checkState(testHelper != null, "Setup has not completed successfully"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnvTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnvTest.java new file mode 100644 index 000000000000..5164c3c8b6b3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnvTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.IntegrationTestEnv.isRetryableResourceExhaustedException; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class IntegrationTestEnvTest { + + @Test + public void testIsRetryableResourceExhaustedException() { + assertFalse( + isRetryableResourceExhaustedException( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "test"))); + assertFalse( + isRetryableResourceExhaustedException( + SpannerExceptionFactory.newSpannerException(ErrorCode.RESOURCE_EXHAUSTED, "test"))); + assertTrue( + isRetryableResourceExhaustedException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.RESOURCE_EXHAUSTED, + "Operation with name" + + " \"projects/my-project/instances/my-instance/operations/32bb3dccf4243afc\"" + + " failed with status = GrpcStatusCode{transportCode=RESOURCE_EXHAUSTED} and" + + " message = Project 123 cannot add 1 nodes in region ."))); + assertTrue( + isRetryableResourceExhaustedException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.RESOURCE_EXHAUSTED, + "Operation with name" + + " \"projects/my-project/instances/my-instance/operations/32bb3dccf4243afc\"" + + " failed with status = GrpcStatusCode{transportCode=RESOURCE_EXHAUSTED} and" + + " message = Project 123 cannot add 99 nodes in region ."))); + assertTrue( + isRetryableResourceExhaustedException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.RESOURCE_EXHAUSTED, + "Could not create instance. Quota exceeded for quota metric 'Instance create" + + " requests' and limit 'Instance create requests per minute'"))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntervalTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntervalTest.java new file mode 100644 index 000000000000..97a43eef2448 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntervalTest.java @@ -0,0 +1,377 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.*; + +import java.math.BigInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for {@link Interval} */ +@RunWith(JUnit4.class) +public class IntervalTest { + + @Test + public void testOfMonths() { + Interval interval = Interval.ofMonths(10); + assertEquals(10, interval.getMonths()); + assertEquals(0, interval.getDays()); + assertEquals(BigInteger.ZERO, interval.getNanos()); + } + + @Test + public void testOfDays() { + Interval interval = Interval.ofDays(10); + assertEquals(0, interval.getMonths()); + assertEquals(10, interval.getDays()); + assertEquals(BigInteger.ZERO, interval.getNanos()); + } + + @Test + public void testOfSeconds() { + Interval interval = Interval.ofSeconds(10); + assertEquals(0, interval.getMonths()); + assertEquals(0, interval.getDays()); + assertEquals(BigInteger.valueOf(10000000000L), interval.getNanos()); + } + + @Test + public void testOfMillis() { + Interval interval = Interval.ofMillis(10); + assertEquals(0, interval.getMonths()); + assertEquals(0, interval.getDays()); + assertEquals(BigInteger.valueOf(10000000L), interval.getNanos()); + } + + @Test + public void testOfMicros() { + Interval interval = Interval.ofMicros(10); + assertEquals(0, interval.getMonths()); + assertEquals(0, interval.getDays()); + assertEquals(BigInteger.valueOf(10000), interval.getNanos()); + } + + @Test + public void testOfNanos() { + Interval interval = Interval.ofNanos(BigInteger.valueOf(10)); + assertEquals(0, interval.getMonths()); + assertEquals(0, interval.getDays()); + assertEquals(10, interval.getNanos().longValueExact()); + } + + @Test + public void testFromMonthsDaysNanoseconds() { + Interval interval = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(1030)); + assertEquals(10, interval.getMonths()); + assertEquals(20, interval.getDays()); + assertEquals(1030, interval.getNanos().longValueExact()); + + Interval interval2 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(-1030)); + assertEquals(10, interval2.getMonths()); + assertEquals(20, interval2.getDays()); + assertEquals(-1030, interval2.getNanos().longValueExact()); + } + + @Test + public void testParseFromString() { + TestCase[] testCases = + new TestCase[] { + // Regular cases + new TestCase("P1Y2M3DT12H12M6.789000123S", 14, 3, 43926789000123L), + new TestCase("P1Y2M3DT13H-48M6S", 14, 3, 43926000000000L), + new TestCase("P1Y2M3D", 14, 3, 0L), + new TestCase("P1Y2M", 14, 0, 0L), + new TestCase("P1Y", 12, 0, 0L), + new TestCase("P2M", 2, 0, 0L), + new TestCase("P3D", 0, 3, 0L), + new TestCase("PT4H25M6.7890001S", 0, 0, 15906789000100L), + new TestCase("PT4H25M6S", 0, 0, 15906000000000L), + new TestCase("PT4H30S", 0, 0, 14430000000000L), + new TestCase("PT4H1M", 0, 0, 14460000000000L), + new TestCase("PT5M", 0, 0, 300000000000L), + new TestCase("PT6.789S", 0, 0, 6789000000L), + new TestCase("PT0.123S", 0, 0, 123000000L), + new TestCase("PT.000000123S", 0, 0, 123L), + new TestCase("P0Y", 0, 0, 0L), + new TestCase("P-1Y-2M-3DT-12H-12M-6.789000123S", -14, -3, -43926789000123L), + new TestCase("P1Y-2M3DT13H-51M6.789S", 10, 3, 43746789000000L), + new TestCase("P-1Y2M-3DT-13H49M-6.789S", -10, -3, -43866789000000L), + new TestCase("P1Y2M3DT-4H25M-6.7890001S", 14, 3, -12906789000100L), + new TestCase("PT100H100M100.5S", 0, 0, 366100500000000L), + new TestCase("P0Y", 0, 0, 0L), // Zero value + new TestCase("PT12H30M1S", 0, 0, 45001000000000L), // Only time components, with seconds + new TestCase("P1Y2M3D", 14, 3, 0L), // Only date components + new TestCase("P1Y2M3DT12H30M", 14, 3, 45000000000000L), // Date and time, no seconds + new TestCase("PT0.123456789S", 0, 0, 123456789L), // Fractional seconds with max digits + new TestCase("PT1H0.5S", 0, 0, 3600500000000L), // Hours and fractional seconds + new TestCase( + "P1Y2M3DT12H30M1.23456789S", 14, 3, 45001234567890L), // Years and months to months + new TestCase( + "P1Y2M3DT12H30M1,23456789S", 14, 3, 45001234567890L), // Comma as decimal point + new TestCase("PT.5S", 0, 0, 500000000L), // Fractional seconds without 0 before decimal + new TestCase("P-1Y2M3DT12H-30M1.234S", -10, 3, 41401234000000L), // Mixed signs + new TestCase("P1Y-2M3DT-12H30M-1.234S", 10, 3, -41401234000000L), // More mixed signs + new TestCase("PT1.234000S", 0, 0, 1234000000L), // Trailing zeros after decimal + new TestCase("PT1.000S", 0, 0, 1000000000L), // All zeros after decimal + + // Large values + new TestCase("PT87840000H", 0, 0, new BigInteger("316224000000000000000")), + new TestCase("PT-87840000H", 0, 0, new BigInteger("-316224000000000000000")), + new TestCase( + "P2Y1M15DT87839999H59M59.999999999S", + 25, + 15, + new BigInteger("316223999999999999999")), + new TestCase( + "P2Y1M15DT-87839999H-59M-59.999999999S", + 25, + 15, + new BigInteger("-316223999999999999999")), + }; + + for (TestCase testCase : testCases) { + Interval interval = Interval.parseFromString(testCase.intervalString); + assertEquals(testCase.months, interval.getMonths()); + assertEquals(testCase.days, interval.getDays()); + assertEquals(testCase.nanoseconds, interval.getNanos()); + } + } + + @Test + public void testParseFromString_InvalidString() { + String[] invalidStrings = + new String[] { + "invalid", + "P", + "PT", + "P1YM", + "P1Y2M3D4H5M6S", // Missing T + "P1Y2M3DT4H5M6.S", // Missing decimal value + "P1Y2M3DT4H5M6.789SS", // Extra S + "P1Y2M3DT4H5M6.", // Missing value after decimal point + "P1Y2M3DT4H5M6.ABC", // Non-digit characters after decimal point + "P1Y2M3", // Missing unit specifier + "P1Y2M3DT", // Missing time components + "P-T1H", // Invalid negative sign position + "PT1H-", // Invalid negative sign position + "P1Y2M3DT4H5M6.789123456789S", // Too many digits after decimal + "P1Y2M3DT4H5M6.123.456S", // Multiple decimal points + "P1Y2M3DT4H5M6.,789S", // Dot and comma both for decimal + }; + + for (String invalidString : invalidStrings) { + assertThrows(SpannerException.class, () -> Interval.parseFromString(invalidString)); + } + } + + @Test + public void testToISO8601() { + TestCase[] testCases = + new TestCase[] { + // Regular cases + new TestCase(14, 3, 43926789000123L, "P1Y2M3DT12H12M6.789000123S"), + new TestCase(14, 3, 14706789000000L, "P1Y2M3DT4H5M6.789S"), + new TestCase(14, 3, 0L, "P1Y2M3D"), + new TestCase(14, 0, 0L, "P1Y2M"), + new TestCase(12, 0, 0L, "P1Y"), + new TestCase(2, 0, 0L, "P2M"), + new TestCase(0, 3, 0L, "P3D"), + new TestCase(0, 0, 15906789000000L, "PT4H25M6.789S"), + new TestCase(0, 0, 14430000000000L, "PT4H30S"), + new TestCase(0, 0, 300000000000L, "PT5M"), + new TestCase(0, 0, 6789000000L, "PT6.789S"), + new TestCase(0, 0, 123000000L, "PT0.123S"), + new TestCase(0, 0, 123L, "PT0.000000123S"), + + // digits after decimal in multiple of 3s + new TestCase(0, 0, 100000000L, "PT0.100S"), + new TestCase(0, 0, 100100000L, "PT0.100100S"), + new TestCase(0, 0, 100100100L, "PT0.100100100S"), + new TestCase(0, 0, 9L, "PT0.000000009S"), + new TestCase(0, 0, 9000L, "PT0.000009S"), + new TestCase(0, 0, 9000000L, "PT0.009S"), + + // Zero value cases + new TestCase(0, 0, 0L, "P0Y"), + new TestCase(0, 0, 0L, "P0Y"), // All zero + new TestCase(1, 0, 0L, "P1M"), // Only month + new TestCase(0, 1, 0L, "P1D"), // Only day + new TestCase(0, 0, 10010L, "PT0.000010010S"), // Only nanoseconds + + // Negative value cases + new TestCase(-14, -3, -43926789000123L, "P-1Y-2M-3DT-12H-12M-6.789000123S"), + new TestCase(10, 3, 43746789100000L, "P10M3DT12H9M6.789100S"), + new TestCase(-10, -3, -43866789010000L, "P-10M-3DT-12H-11M-6.789010S"), + new TestCase(14, 3, -12906662400000L, "P1Y2M3DT-3H-35M-6.662400S"), + + // Fractional seconds cases + new TestCase(0, 0, 500000000L, "PT0.500S"), // Fractional seconds + new TestCase(0, 0, -500000000L, "PT-0.500S"), // Negative fractional seconds + + // Large values + new TestCase(0, 0, new BigInteger("316224000000000000000"), "PT87840000H"), + new TestCase(0, 0, new BigInteger("-316224000000000000000"), "PT-87840000H"), + new TestCase( + 25, + 15, + new BigInteger("316223999999999999999"), + "P2Y1M15DT87839999H59M59.999999999S"), + new TestCase( + 25, + 15, + new BigInteger("-316223999999999999999"), + "P2Y1M15DT-87839999H-59M-59.999999999S"), + new TestCase(13, 0, 0L, "P1Y1M"), // Months normalized to years + new TestCase(0, 0, 86400000000000L, "PT24H"), // 24 hours + new TestCase(0, 31, 0L, "P31D"), // 31 days + new TestCase(-12, 0, 0L, "P-1Y"), // Negative year + }; + + for (TestCase testCase : testCases) { + Interval interval = + Interval.builder() + .setMonths(testCase.months) + .setDays(testCase.days) + .setNanos(testCase.nanoseconds) + .build(); + + assertEquals(testCase.intervalString, interval.toISO8601()); + } + } + + @Test + public void testGetNanoseconds() { + Interval interval1 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30040)); + assertEquals(30040, interval1.getNanos().longValueExact()); + + Interval interval2 = Interval.fromMonthsDaysNanos(0, 0, BigInteger.valueOf(123456789)); + assertEquals(123456789, interval2.getNanos().longValueExact()); + + Interval interval3 = Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(-123456789)); + assertEquals(-123456789, interval3.getNanos().longValueExact()); + } + + @Test + public void testEquals() { + Interval interval1 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30)); + Interval interval2 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30)); + Interval interval3 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(31)); + Interval interval4 = Interval.fromMonthsDaysNanos(10, 21, BigInteger.valueOf(30)); + Interval interval5 = Interval.fromMonthsDaysNanos(11, 20, BigInteger.valueOf(30)); + Interval interval6 = Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(-30)); + Interval interval7 = Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(-30)); + + // Test with identical intervals + assertEquals(interval1, interval2); + assertEquals(interval2, interval1); // Check symmetry + + // Test with different intervals + assertNotEquals(interval1, interval3); + assertNotEquals(interval1, interval4); + assertNotEquals(interval1, interval5); + + // Test with negative values + assertEquals(interval6, interval7); + assertEquals(interval7, interval6); // Check symmetry + + // Test with different values for each field (including negative) + assertNotEquals(interval1, Interval.fromMonthsDaysNanos(1, 2, BigInteger.valueOf(3))); + assertNotEquals(interval1, Interval.fromMonthsDaysNanos(-10, 20, BigInteger.valueOf(30))); + assertNotEquals(interval1, Interval.fromMonthsDaysNanos(10, -20, BigInteger.valueOf(30))); + assertNotEquals(interval1, Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(-30))); + + // Test with null and an object that is not an Interval + assertNotEquals(interval1, null); + assertNotEquals(interval1, new Object()); + } + + @Test + public void testHashCode() { + // Test cases with different combinations of months, days, and nanoseconds + Interval interval1 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30)); + Interval interval2 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(30)); + Interval interval3 = Interval.fromMonthsDaysNanos(11, 20, BigInteger.valueOf(30)); + Interval interval4 = Interval.fromMonthsDaysNanos(10, 21, BigInteger.valueOf(30)); + Interval interval5 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(31)); + Interval interval6 = Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(-30)); + Interval interval7 = Interval.fromMonthsDaysNanos(-10, -20, BigInteger.valueOf(-30)); + Interval interval8 = Interval.fromMonthsDaysNanos(0, 0, BigInteger.ZERO); // Zero values + Interval interval9 = + Interval.fromMonthsDaysNanos(1000, 1000, BigInteger.valueOf(1234567890)); // Large values + + // Test with identical intervals + assertEquals(interval1.hashCode(), interval2.hashCode()); + assertEquals(interval6.hashCode(), interval7.hashCode()); + + // Test with different months + assertNotEquals(interval1.hashCode(), interval3.hashCode()); + + // Test with different days + assertNotEquals(interval1.hashCode(), interval4.hashCode()); + + // Test with different nanoseconds + assertNotEquals(interval1.hashCode(), interval5.hashCode()); + + // Test with zero values + assertNotEquals(interval1.hashCode(), interval8.hashCode()); + + // Test with large values + assertNotEquals(interval1.hashCode(), interval9.hashCode()); + + // Test for collision. + Interval interval10 = Interval.fromMonthsDaysNanos(20, 10, BigInteger.valueOf(50)); + Interval interval11 = Interval.fromMonthsDaysNanos(10, 20, BigInteger.valueOf(50)); + assertNotEquals(interval10.hashCode(), interval11.hashCode()); + } + + private static class TestCase { + private final String intervalString; + private final int months; + private final int days; + private final BigInteger nanoseconds; + + private TestCase(String intervalString, int months, int days, long nanoseconds) { + this.intervalString = intervalString; + this.months = months; + this.days = days; + this.nanoseconds = BigInteger.valueOf(nanoseconds); + } + + private TestCase(String intervalString, int months, int days, BigInteger nanoseconds) { + this.intervalString = intervalString; + this.months = months; + this.days = days; + this.nanoseconds = nanoseconds; + } + + private TestCase(int months, int days, long nanoseconds, String intervalString) { + this.intervalString = intervalString; + this.months = months; + this.days = days; + this.nanoseconds = BigInteger.valueOf(nanoseconds); + } + + private TestCase(int months, int days, BigInteger nanoseconds, String intervalString) { + this.intervalString = intervalString; + this.months = months; + this.days = days; + this.nanoseconds = nanoseconds; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsRetryableInternalErrorTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsRetryableInternalErrorTest.java new file mode 100644 index 000000000000..514b1e96b7f3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsRetryableInternalErrorTest.java @@ -0,0 +1,158 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.InternalException; +import com.google.common.base.Predicate; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class IsRetryableInternalErrorTest { + + private Predicate predicate; + + @Before + public void setUp() { + predicate = new IsRetryableInternalError(); + } + + @Test + public void http2ErrorStatusRuntimeExceptionIsRetryable() { + final StatusRuntimeException e = + new StatusRuntimeException( + Status.fromCode(Code.INTERNAL) + .withDescription("INTERNAL: HTTP/2 error code: INTERNAL_ERROR.")); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void http2ErrorInternalExceptionIsRetryable() { + final InternalException e = + new InternalException( + "INTERNAL: HTTP/2 error code: INTERNAL_ERROR.", + null, + GrpcStatusCode.of(Code.INTERNAL), + false); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void connectionClosedStatusRuntimeExceptionIsRetryable() { + final StatusRuntimeException e = + new StatusRuntimeException( + Status.fromCode(Code.INTERNAL) + .withDescription("INTERNAL: Connection closed with unknown cause.")); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void connectionClosedInternalExceptionIsRetryable() { + final InternalException e = + new InternalException( + "INTERNAL: Connection closed with unknown cause.", + null, + GrpcStatusCode.of(Code.INTERNAL), + false); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void eosStatusRuntimeExceptionIsRetryable() { + final StatusRuntimeException e = + new StatusRuntimeException( + Status.fromCode(Code.INTERNAL) + .withDescription("INTERNAL: Received unexpected EOS on DATA frame from server.")); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void eosInternalExceptionIsRetryable() { + final InternalException e = + new InternalException( + "INTERNAL: Received unexpected EOS on DATA frame from server.", + null, + GrpcStatusCode.of(Code.INTERNAL), + false); + + assertThat(predicate.apply(e)).isTrue(); + } + + @Test + public void genericInternalStatusRuntimeExceptionIsRetryable() { + final StatusRuntimeException e = + new StatusRuntimeException( + Status.fromCode(Code.INTERNAL).withDescription("INTERNAL: Generic.")); + + assertThat(predicate.apply(e)).isFalse(); + } + + @Test + public void rstStreamInternalExceptionIsRetryable() { + final InternalException e = + new InternalException( + "INTERNAL: stream terminated by RST_STREAM.", + null, + GrpcStatusCode.of(Code.INTERNAL), + false); + + assertTrue(predicate.apply(e)); + } + + @Test + public void testAuthenticationBackendInternalServerErrorIsRetryable() { + final StatusRuntimeException exception = + new StatusRuntimeException( + Status.fromCode(Code.INTERNAL) + .withDescription( + "INTERNAL: Authentication backend internal server error. Please retry.")); + + assertTrue(predicate.apply(exception)); + } + + @Test + public void genericInternalExceptionIsNotRetryable() { + final InternalException e = + new InternalException("INTERNAL: Generic.", null, GrpcStatusCode.of(Code.INTERNAL), false); + + assertThat(predicate.apply(e)).isFalse(); + } + + @Test + public void nullIsNotRetryable() { + assertThat(predicate.apply(null)).isFalse(); + } + + @Test + public void genericExceptionIsNotRetryable() { + assertThat(predicate.apply(new Exception())).isFalse(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsSslHandshakeExceptionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsSslHandshakeExceptionTest.java new file mode 100644 index 000000000000..5dd52352c2ea --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IsSslHandshakeExceptionTest.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.common.base.Predicate; +import javax.net.ssl.SSLHandshakeException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class IsSslHandshakeExceptionTest { + + private Predicate predicate; + + @Before + public void setUp() { + predicate = new IsSslHandshakeException(); + } + + @Test + public void sslHandshakeExceptionIsTrue() { + assertThat(predicate.apply(new SSLHandshakeException("test"))).isTrue(); + } + + @Test + public void genericExceptionIsNotSslHandshakeException() { + assertThat(predicate.apply(new Exception("test"))).isFalse(); + } + + @Test + public void nullIsNotSslHandshakeException() { + assertThat(predicate.apply(null)).isFalse(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/JavaVersionUtil.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/JavaVersionUtil.java new file mode 100644 index 000000000000..99f7a9932347 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/JavaVersionUtil.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.gson.internal.JavaVersion; + +/** Util class for getting the Java version the tests are executed on. */ +public class JavaVersionUtil { + + /** Returns the major Java version (e.g. 8, 11, 17) */ + public static int getJavaMajorVersion() { + return JavaVersion.getMajorJavaVersion(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyRangeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyRangeTest.java new file mode 100644 index 000000000000..f11bb6295205 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyRangeTest.java @@ -0,0 +1,135 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.KeyRange.Endpoint.CLOSED; +import static com.google.cloud.spanner.KeyRange.Endpoint.OPEN; +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.common.testing.EqualsTester; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.KeyRange}. */ +@RunWith(JUnit4.class) +public class KeyRangeTest { + + @Test + public void basics() { + checkKeyRange(KeyRange.closedOpen(Key.of(1), Key.of(2)), Key.of(1), CLOSED, Key.of(2), OPEN); + checkKeyRange( + KeyRange.closedClosed(Key.of(1), Key.of(2)), Key.of(1), CLOSED, Key.of(2), CLOSED); + checkKeyRange(KeyRange.openOpen(Key.of(1), Key.of(2)), Key.of(1), OPEN, Key.of(2), OPEN); + checkKeyRange(KeyRange.openClosed(Key.of(1), Key.of(2)), Key.of(1), OPEN, Key.of(2), CLOSED); + } + + private static void checkKeyRange( + KeyRange r, Key start, KeyRange.Endpoint startType, Key end, KeyRange.Endpoint endType) { + assertThat(r.getStartType()).isEqualTo(startType); + assertThat(r.geEndType()).isEqualTo(endType); + assertThat(r.getStart()).isEqualTo(start); + assertThat(r.getEnd()).isEqualTo(end); + + KeyRange fromBuilder = + KeyRange.newBuilder() + .setStart(start) + .setStartType(startType) + .setEnd(end) + .setEndType(endType) + .build(); + assertThat(fromBuilder.getStartType()).isEqualTo(startType); + assertThat(fromBuilder.geEndType()).isEqualTo(endType); + assertThat(fromBuilder.getStart()).isEqualTo(start); + assertThat(fromBuilder.getEnd()).isEqualTo(end); + + assertThat(fromBuilder).isEqualTo(r); + assertThat(fromBuilder.hashCode()).isEqualTo(fromBuilder.hashCode()); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + + // Test factory method vs builder with defaults vs builder with all parts explicit. + tester.addEqualityGroup( + KeyRange.closedOpen(Key.of(1), Key.of(2)), + KeyRange.newBuilder().setStart(Key.of(1)).setEnd(Key.of(2)).build(), + KeyRange.newBuilder() + .setStart(Key.of(1)) + .setStartType(CLOSED) + .setEnd(Key.of(2)) + .setEndType(OPEN) + .build()); + + // Differing endpoint types. + tester.addEqualityGroup(KeyRange.closedClosed(Key.of(1), Key.of(2))); + tester.addEqualityGroup(KeyRange.openOpen(Key.of(1), Key.of(2))); + tester.addEqualityGroup(KeyRange.openClosed(Key.of(1), Key.of(2))); + + // Differing start/end keys. + tester.addEqualityGroup(KeyRange.closedOpen(Key.of(2), Key.of(1))); + tester.addEqualityGroup(KeyRange.closedClosed(Key.of(), Key.of())); + + // Prefix range. + tester.addEqualityGroup( + KeyRange.prefix(Key.of(1, 2)), KeyRange.closedClosed(Key.of(1, 2), Key.of(1, 2))); + + tester.testEquals(); + } + + @Test + public void toBuilder() { + KeyRange r = KeyRange.closedOpen(Key.of(1), Key.of(2)); + r.toBuilder().setEndType(CLOSED).build(); + } + + @Test + public void builderRequiresStart() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, () -> KeyRange.newBuilder().setEnd(Key.of("z")).build()); + assertThat(e.getMessage()).contains("start(Key)"); + } + + @Test + public void builderRequiresEnd() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, () -> KeyRange.newBuilder().setStart(Key.of("a")).build()); + assertThat(e.getMessage()).contains("end(Key)"); + } + + @Test + public void testToString() { + assertThat(KeyRange.closedOpen(Key.of("a"), Key.of("b")).toString()).isEqualTo("[[a],[b])"); + assertThat(KeyRange.closedClosed(Key.of("a"), Key.of("b")).toString()).isEqualTo("[[a],[b]]"); + assertThat(KeyRange.openOpen(Key.of("a"), Key.of("b")).toString()).isEqualTo("([a],[b])"); + assertThat(KeyRange.openClosed(Key.of("a"), Key.of("b")).toString()).isEqualTo("([a],[b]]"); + assertThat(KeyRange.closedClosed(Key.of(), Key.of()).toString()).isEqualTo("[[],[]]"); + } + + @Test + public void serialization() { + reserializeAndAssert(KeyRange.closedOpen(Key.of(1), Key.of(2))); + reserializeAndAssert(KeyRange.closedClosed(Key.of(1), Key.of(2))); + reserializeAndAssert(KeyRange.openOpen(Key.of(1), Key.of(2))); + reserializeAndAssert(KeyRange.openClosed(Key.of(1), Key.of(2))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeySetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeySetTest.java new file mode 100644 index 000000000000..1b1fc0222e59 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeySetTest.java @@ -0,0 +1,279 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.ByteArray; +import com.google.common.testing.EqualsTester; +import org.hamcrest.MatcherAssert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.KeySet}. */ +@RunWith(JUnit4.class) +public class KeySetTest { + + @Test + public void singleKey() { + KeySet set = KeySet.singleKey(Key.of("a", "b", "c")); + assertThat(set.isAll()).isFalse(); + assertThat(set.getKeys()).containsExactly(Key.of("a", "b", "c")); + assertThat(set.getRanges()).isEmpty(); + } + + @Test + public void range() { + KeySet set = KeySet.range(KeyRange.closedOpen(Key.of("a"), Key.of("b"))); + assertThat(set.isAll()).isFalse(); + assertThat(set.getKeys()).isEmpty(); + assertThat(set.getRanges()).containsExactly(KeyRange.closedOpen(Key.of("a"), Key.of("b"))); + } + + @Test + public void prefixRange() { + KeySet set = KeySet.prefixRange(Key.of("a", "b")); + assertThat(set.isAll()).isFalse(); + assertThat(set.getKeys()).isEmpty(); + assertThat(set.getRanges()).containsExactly(KeyRange.prefix(Key.of("a", "b"))); + } + + @Test + public void all() { + KeySet set = KeySet.all(); + assertThat(set.isAll()).isTrue(); + assertThat(set.getKeys()).isEmpty(); + assertThat(set.getRanges()).isEmpty(); + } + + @Test + public void builder() { + KeySet set = + KeySet.newBuilder() + .addKey(Key.of("k1")) + .addKey(Key.of("k2")) + .addRange(KeyRange.closedOpen(Key.of("r1"), Key.of("rr1"))) + .addRange(KeyRange.closedOpen(Key.of("r2"), Key.of("rr2"))) + .build(); + assertThat(set.isAll()).isFalse(); + // Order isn't strictly important to the API, but it's helpful to preserve it. + assertThat(set.getKeys()).containsExactly(Key.of("k1"), Key.of("k2")).inOrder(); + assertThat(set.getRanges()) + .containsExactly( + KeyRange.closedOpen(Key.of("r1"), Key.of("rr1")), + KeyRange.closedOpen(Key.of("r2"), Key.of("rr2"))) + .inOrder(); + assertThat(set.toString()).isEqualTo("{[k1],[k2],[[r1],[rr1]),[[r2],[rr2])}"); + } + + @Test + public void toBuilder() { + KeySet set = KeySet.singleKey(Key.of(1)).toBuilder().addKey(Key.of(2)).build(); + assertThat(set.isAll()).isFalse(); + assertThat(set.getKeys()).containsExactly(Key.of(1), Key.of(2)).inOrder(); + assertThat(set.getRanges()).isEmpty(); + + set = + KeySet.range(KeyRange.closedOpen(Key.of("a"), Key.of("b"))).toBuilder() + .addRange(KeyRange.closedOpen(Key.of("c"), Key.of("d"))) + .build(); + assertThat(set.isAll()).isFalse(); + assertThat(set.getKeys()).isEmpty(); + assertThat(set.getRanges()) + .containsExactly( + KeyRange.closedOpen(Key.of("a"), Key.of("b")), + KeyRange.closedOpen(Key.of("c"), Key.of("d"))) + .inOrder(); + + set = KeySet.all().toBuilder().addKey(Key.of(1)).build(); + assertThat(set.isAll()).isTrue(); + assertThat(set.getKeys()).containsExactly(Key.of(1)); + assertThat(set.getRanges()).isEmpty(); + } + + @Test + public void testToString() { + assertThat(KeySet.all().toString()).isEqualTo("{}"); + assertThat(KeySet.singleKey(Key.of("x")).toString()).isEqualTo("{[x]}"); + assertThat(KeySet.range(KeyRange.closedOpen(Key.of("a"), Key.of("z"))).toString()) + .isEqualTo("{[[a],[z])}"); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup(KeySet.newBuilder().build()); + tester.addEqualityGroup(KeySet.all(), KeySet.newBuilder().setAll().build()); + tester.addEqualityGroup( + KeySet.singleKey(Key.of("a")), KeySet.newBuilder().addKey(Key.of("a")).build()); + tester.addEqualityGroup( + KeySet.range(KeyRange.closedOpen(Key.of("a"), Key.of("b"))), + KeySet.newBuilder().addRange(KeyRange.closedOpen(Key.of("a"), Key.of("b"))).build()); + tester.addEqualityGroup(KeySet.newBuilder().addKey(Key.of(1)).addKey(Key.of(2)).build()); + // We currently consider order, although this doesn't affect visible results. + tester.addEqualityGroup(KeySet.newBuilder().addKey(Key.of(2)).addKey(Key.of(1)).build()); + tester.addEqualityGroup(KeySet.newBuilder().setAll().addKey(Key.of("a")).build()); + tester.addEqualityGroup( + KeySet.newBuilder() + .addKey(Key.of("a")) + .addRange(KeyRange.closedOpen(Key.of("a"), Key.of("b"))) + .build()); + tester.testEquals(); + } + + @Test + public void serializationEmpty() { + KeySet keySet = KeySet.singleKey(Key.of()); + checkProto(keySet, "keys {}"); + } + + @Test + public void serializationSingleKeyBool() { + KeySet keySet = KeySet.singleKey(Key.of(true)); + checkProto(keySet, "keys { values { bool_value: true } }"); + } + + @Test + public void serializationSingleKeyInt64() { + KeySet keySet = KeySet.singleKey(Key.of(1234L)); + checkProto(keySet, "keys { values { string_value: '1234' } }"); + } + + @Test + public void serializationSingleKeyFloat64() { + KeySet keySet = KeySet.singleKey(Key.of(2.0)); + checkProto(keySet, "keys { values { number_value: 2.0 } }"); + } + + @Test + public void serializationSingleKeyString() { + KeySet keySet = KeySet.singleKey(Key.of("abc")); + checkProto(keySet, "keys { values { string_value: 'abc' } }"); + } + + @Test + public void serializationSingleKeyBytes() { + KeySet keySet = KeySet.singleKey(Key.of(ByteArray.copyFrom(new byte[] {'a'}))); + checkProto(keySet, "keys { values { string_value: 'YQ==' } }"); + } + + @Test + public void serializationSingleKeyNull() { + KeySet keySet = KeySet.singleKey(Key.of((String) null)); + checkProto(keySet, "keys { values { null_value: NULL_VALUE } }"); + } + + @Test + public void serializationSingleKeyMultiPart() { + KeySet keySet = KeySet.singleKey(Key.of("a", false)); + checkProto(keySet, "keys { values { string_value: 'a' } values { bool_value: false } }"); + } + + @Test + public void serializationSingleRangeClosedOpen() { + KeySet keySet = KeySet.range(KeyRange.closedOpen(Key.of("a"), Key.of("z"))); + checkProto( + keySet, + "ranges { start_closed { values { string_value: 'a' } }" + + " end_open { values { string_value: 'z' } } }"); + } + + @Test + public void serializationSingleRangeClosedClosed() { + KeySet keySet = KeySet.range(KeyRange.closedClosed(Key.of("a"), Key.of("z"))); + checkProto( + keySet, + "ranges { start_closed { values { string_value: 'a' } }" + + " end_closed { values { string_value: 'z' } } }"); + } + + @Test + public void serializationSingleRangeOpenOpen() { + KeySet keySet = KeySet.range(KeyRange.openOpen(Key.of("a"), Key.of("z"))); + checkProto( + keySet, + "ranges { start_open { values { string_value: 'a' } }" + + " end_open { values { string_value: 'z' } } }"); + } + + @Test + public void serializationSingleRangeOpenClosed() { + KeySet keySet = KeySet.range(KeyRange.openClosed(Key.of("a"), Key.of("z"))); + checkProto( + keySet, + "ranges { start_open { values { string_value: 'a' } }" + + " end_closed { values { string_value: 'z' } } }"); + } + + @Test + public void serializationAll() { + KeySet keySet = KeySet.all(); + checkProto(keySet, "all: true"); + } + + @Test + public void serializationMulti() { + KeySet keySet = + KeySet.newBuilder() + .addKey(Key.of("d", 1)) + .addRange(KeyRange.closedOpen(Key.of("m"), Key.of("p"))) + .addKey(Key.of("a", 1)) + .addRange(KeyRange.closedClosed(Key.of("a"), Key.of("d"))) + .build(); + checkProto( + keySet, + "keys { values { string_value: 'd' } values { string_value: '1' } }" + + " keys { values { string_value: 'a' } values { string_value:'1' } }" + + " ranges { start_closed { values { string_value: 'm' } }" + + " end_open { values { string_value: 'p' } } }" + + " ranges { start_closed { values { string_value: 'a' } }" + + " end_closed { values { string_value: 'd' } } }"); + } + + @Test + public void serializationMultiWithAll() { + KeySet keySet = + KeySet.all().toBuilder() + .addKey(Key.of("a", 1)) + .addRange(KeyRange.closedOpen(Key.of("m"), Key.of("p"))) + .build(); + checkProto( + keySet, + "keys { values { string_value: 'a' } values { string_value: '1' } }" + + " ranges { start_closed { values { string_value: 'm' } }" + + " end_open { values { string_value: 'p' } } }" + + " all:true"); + } + + @Test + public void javaSerialization() { + reserializeAndAssert( + KeySet.all().toBuilder() + .addKey(Key.of("a", 1)) + .addRange(KeyRange.closedOpen(Key.of("m"), Key.of("p"))) + .build()); + } + + private static void checkProto(KeySet keySet, String proto) { + com.google.spanner.v1.KeySet.Builder builder = com.google.spanner.v1.KeySet.newBuilder(); + keySet.appendToProto(builder); + MatcherAssert.assertThat( + builder.build(), SpannerMatchers.matchesProto(com.google.spanner.v1.KeySet.class, proto)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyTest.java new file mode 100644 index 000000000000..afd97d5e2abf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/KeyTest.java @@ -0,0 +1,271 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.common.testing.EqualsTester; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import java.math.BigDecimal; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Key}. */ +@RunWith(JUnit4.class) +public class KeyTest { + @Test + public void of() { + Key k = Key.of(); + assertThat(k.size()).isEqualTo(0); + assertThat(k.getParts()).isEmpty(); + + k = Key.of("a", "b", "c"); + assertThat(k.size()).isEqualTo(3); + assertThat(k.getParts()).containsExactly("a", "b", "c").inOrder(); + + k = Key.of("a", null, "c"); + assertThat(k.size()).isEqualTo(3); + assertThat(k.getParts()).containsExactly("a", null, "c").inOrder(); + + // All supported Java types: note coercion to canonical types. + String numeric = "3.141592"; + String timestamp = "2015-09-15T00:00:00Z"; + String date = "2015-09-15"; + String uuid = UUID.randomUUID().toString(); + String json = "{\"color\":\"red\",\"value\":\"#f00\"}"; + k = + Key.of( + null, + true, + 32, + 64L, + 2.0f, + 4.0d, + new BigDecimal(numeric), + "x", + json, + ByteArray.copyFrom("y"), + Timestamp.parseTimestamp(timestamp), + Date.parseDate(date), + UUID.fromString(uuid)); + assertThat(k.size()).isEqualTo(13); + assertThat(k.getParts()) + .containsExactly( + null, + Boolean.TRUE, + 32L, + 64L, + 2.0d, + 4.0d, + BigDecimal.valueOf(3141592, 6), + "x", + json, + ByteArray.copyFrom("y"), + Timestamp.parseTimestamp(timestamp), + Date.parseDate(date), + UUID.fromString(uuid)) + .inOrder(); + + // Singleton null key. + k = Key.of((Object) null); + assertThat(k.size()).isEqualTo(1); + assertThat(k.getParts()).contains(null); + } + + @Test + public void builder() { + String numeric = "3.141592"; + String timestamp = "2015-09-15T00:00:00Z"; + String date = "2015-09-15"; + String uuid = UUID.randomUUID().toString(); + String json = "{\"color\":\"red\",\"value\":\"#f00\"}"; + Key k = + Key.newBuilder() + .append((Boolean) null) + .append(true) + .append(32) + .append(64L) + .append(2.0f) + .append(4.0d) + .append(new BigDecimal(numeric)) + .append("x") + .append(json) + .append(ByteArray.copyFrom("y")) + .append(Timestamp.parseTimestamp(timestamp)) + .append(Date.parseDate(date)) + .append(UUID.fromString(uuid)) + .build(); + assertThat(k.size()).isEqualTo(13); + assertThat(k.getParts()) + .containsExactly( + null, + Boolean.TRUE, + 32L, + 64L, + 2.0d, + 4.0d, + BigDecimal.valueOf(3141592, 6), + "x", + json, + ByteArray.copyFrom("y"), + Timestamp.parseTimestamp(timestamp), + Date.parseDate(date), + UUID.fromString(uuid)) + .inOrder(); + } + + @Test + public void toBuilder() { + Key k = Key.of(1, 2).toBuilder().append(3).build(); + assertThat(k.size()).isEqualTo(3); + assertThat(k.getParts()).containsExactly(1L, 2L, 3L).inOrder(); + } + + @Test + public void testToString() { + assertThat(Key.of().toString()).isEqualTo("[]"); + assertThat(Key.of(new Object[] {null}).toString()).isEqualTo("[]"); + assertThat(Key.of(true).toString()).isEqualTo("[true]"); + assertThat(Key.of(32).toString()).isEqualTo("[32]"); + assertThat(Key.of(2.0).toString()).isEqualTo("[2.0]"); + assertThat(Key.of(new BigDecimal("3.14")).toString()).isEqualTo("[3.14]"); + assertThat(Key.of("xyz").toString()).isEqualTo("[xyz]"); + assertThat(Key.of("{\"color\":\"red\",\"value\":\"#f00\"}").toString()) + .isEqualTo("[{\"color\":\"red\",\"value\":\"#f00\"}]"); + ByteArray b = ByteArray.copyFrom("xyz"); + assertThat(Key.of(b).toString()).isEqualTo("[" + b.toString() + "]"); + String timestamp = "2015-09-15T00:00:00Z"; + assertThat(Key.of(Timestamp.parseTimestamp(timestamp)).toString()) + .isEqualTo("[" + timestamp + "]"); + String date = "2015-09-15"; + assertThat(Key.of(Date.parseDate(date)).toString()).isEqualTo("[" + date + "]"); + String uuid = UUID.randomUUID().toString(); + assertThat(Key.of(UUID.fromString(uuid)).toString()).isEqualTo("[" + uuid + "]"); + assertThat(Key.of(1, 2, 3).toString()).isEqualTo("[1,2,3]"); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + + tester.addEqualityGroup(Key.of(), Key.newBuilder().build()); + + // All types of null are considered equal. + tester.addEqualityGroup( + Key.of((Object) null), + Key.newBuilder().append((Boolean) null).build(), + Key.newBuilder().append((Long) null).build(), + Key.newBuilder().append((Double) null).build(), + Key.newBuilder().append((BigDecimal) null).build(), + Key.newBuilder().append((String) null).build(), + Key.newBuilder().append((ByteArray) null).build(), + Key.newBuilder().append((Timestamp) null).build(), + Key.newBuilder().append((Date) null).build(), + Key.newBuilder().append((UUID) null).build(), + Key.newBuilder().appendObject(null).build()); + + tester.addEqualityGroup(Key.of(true), Key.newBuilder().append(true).build()); + tester.addEqualityGroup(Key.of(false), Key.newBuilder().append(false).build()); + tester.addEqualityGroup(Key.of(1), Key.of(1L), Key.newBuilder().append(1).build()); + tester.addEqualityGroup(Key.of(2), Key.of(2L), Key.newBuilder().append(2).build()); + tester.addEqualityGroup(Key.of(1, 2)); + tester.addEqualityGroup(Key.of(1.0f), Key.of(1.0d), Key.newBuilder().append(1.0).build()); + tester.addEqualityGroup(Key.of(2.0f), Key.of(2.0d), Key.newBuilder().append(2.0).build()); + tester.addEqualityGroup( + Key.of(new BigDecimal("3.141592")), + Key.of(BigDecimal.valueOf(3141592, 6)), + Key.newBuilder().append(new BigDecimal("3141592e-6")).build()); + tester.addEqualityGroup(Key.of("a"), Key.newBuilder().append("a").build()); + tester.addEqualityGroup(Key.of("a", "b", "c")); + tester.addEqualityGroup( + Key.of(ByteArray.copyFrom("a")), Key.newBuilder().append(ByteArray.copyFrom("a")).build()); + tester.addEqualityGroup( + Key.of("{\"color\":\"red\",\"value\":\"#f00\"}"), + Key.newBuilder().append("{\"color\":\"red\",\"value\":\"#f00\"}").build()); + Timestamp t = Timestamp.parseTimestamp("2015-09-15T00:00:00Z"); + tester.addEqualityGroup(Key.of(t), Key.newBuilder().append(t).build()); + Date d = Date.parseDate("2016-09-15"); + tester.addEqualityGroup(Key.of(d), Key.newBuilder().append(d).build()); + UUID uuid = UUID.randomUUID(); + tester.addEqualityGroup(Key.of(uuid), Key.newBuilder().append(uuid).build()); + tester.addEqualityGroup(Key.of("a", 2, null)); + + tester.testEquals(); + } + + @Test + public void serialization() { + reserializeAndAssert(Key.of()); + reserializeAndAssert(Key.of(new Object[] {null})); + reserializeAndAssert(Key.of(true)); + reserializeAndAssert(Key.of(32)); + reserializeAndAssert(Key.of(2.0)); + reserializeAndAssert(Key.of(new BigDecimal("3.141592"))); + reserializeAndAssert(Key.of("xyz")); + reserializeAndAssert(Key.of("{\"color\":\"red\",\"value\":\"#f00\"}")); + reserializeAndAssert(Key.of(ByteArray.copyFrom("xyz"))); + reserializeAndAssert(Key.of(Timestamp.parseTimestamp("2015-09-15T00:00:00Z"))); + reserializeAndAssert(Key.of(Date.parseDate("2015-09-15"))); + reserializeAndAssert(Key.of(UUID.randomUUID())); + reserializeAndAssert(Key.of(1, 2, 3)); + } + + @Test + public void toProto() { + String timestamp = "2015-09-15T00:00:00Z"; + String date = "2015-09-15"; + String uuid = UUID.randomUUID().toString(); + Key k = + Key.newBuilder() + .append((Boolean) null) + .append(true) + .append(32) + .append(64L) + .append(2.0f) + .append(4.0d) + .append(new BigDecimal("6.62607004e-34")) + .append("x") + .append("{\"color\":\"red\",\"value\":\"#f00\"}") + .append(ByteArray.copyFrom("y")) + .append(Timestamp.parseTimestamp(timestamp)) + .append(Date.parseDate(date)) + .append(UUID.fromString(uuid)) + .build(); + ListValue.Builder builder = ListValue.newBuilder(); + builder.addValuesBuilder().setNullValue(NullValue.NULL_VALUE); + builder.addValuesBuilder().setBoolValue(true); + builder.addValuesBuilder().setStringValue("32"); + builder.addValuesBuilder().setStringValue("64"); + builder.addValuesBuilder().setNumberValue(2.0f); + builder.addValuesBuilder().setNumberValue(4.0d); + builder.addValuesBuilder().setStringValue("6.62607004E-34"); + builder.addValuesBuilder().setStringValue("x"); + builder.addValuesBuilder().setStringValue("{\"color\":\"red\",\"value\":\"#f00\"}"); + builder.addValuesBuilder().setStringValue("eQ=="); + builder.addValuesBuilder().setStringValue(timestamp); + builder.addValuesBuilder().setStringValue(date); + builder.addValuesBuilder().setStringValue(uuid); + assertThat(k.toProto()).isEqualTo(builder.build()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazyByteArrayTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazyByteArrayTest.java new file mode 100644 index 000000000000..a36b34634394 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazyByteArrayTest.java @@ -0,0 +1,52 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + +import com.google.cloud.spanner.AbstractResultSet.LazyByteArray; +import java.util.Base64; +import java.util.Random; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class LazyByteArrayTest { + + @Test + public void testEqualsAndHashCode() { + Random random = new Random(); + byte[] bytes1 = new byte[random.nextInt(300) + 300]; + // Make sure the second byte array has a different length than the first to be absolutely sure + // that they can never contain the same value. + byte[] bytes2 = new byte[bytes1.length + 1]; + random.nextBytes(bytes1); + random.nextBytes(bytes2); + + LazyByteArray lazyByteArray1 = new LazyByteArray(Base64.getEncoder().encodeToString(bytes1)); + LazyByteArray lazyByteArray2 = new LazyByteArray(Base64.getEncoder().encodeToString(bytes2)); + LazyByteArray lazyByteArray3 = new LazyByteArray(Base64.getEncoder().encodeToString(bytes1)); + + assertEquals(lazyByteArray1, lazyByteArray3); + assertNotEquals(lazyByteArray1, lazyByteArray2); + assertNotEquals(lazyByteArray2, lazyByteArray3); + + assertEquals(lazyByteArray1.hashCode(), lazyByteArray3.hashCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazySpannerInitializerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazySpannerInitializerTest.java new file mode 100644 index 000000000000..aa4879875e32 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LazySpannerInitializerTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class LazySpannerInitializerTest { + + @Test + public void testGet_shouldReturnSameInstance() throws Throwable { + final LazySpannerInitializer initializer = + new LazySpannerInitializer() { + @Override + public Spanner initialize() { + return mock(Spanner.class); + } + }; + Spanner s1 = initializer.get(); + Spanner s2 = initializer.get(); + assertThat(s1).isSameInstanceAs(s2); + } + + @Test + public void testGet_shouldThrowErrorFromInitializeMethod() { + final LazySpannerInitializer initializer = + new LazySpannerInitializer() { + @Override + public Spanner initialize() throws IOException { + throw new IOException("Could not find credentials file"); + } + }; + Throwable t1 = assertThrows(Throwable.class, () -> initializer.get()); + Throwable t2 = assertThrows(Throwable.class, () -> initializer.get()); + assertThat(t1).isSameInstanceAs(t2); + } + + @Test + public void testGet_shouldInvokeInitializeOnlyOnce() + throws InterruptedException, ExecutionException { + final AtomicInteger count = new AtomicInteger(); + final LazySpannerInitializer initializer = + new LazySpannerInitializer() { + @Override + public Spanner initialize() { + count.incrementAndGet(); + return mock(Spanner.class); + } + }; + final int threads = 16; + final CountDownLatch latch = new CountDownLatch(threads); + ListeningExecutorService executor = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(threads)); + List> futures = new ArrayList<>(threads); + for (int i = 0; i < threads; i++) { + futures.add( + executor.submit( + () -> { + latch.countDown(); + latch.await(10L, TimeUnit.SECONDS); + return initializer.get(); + })); + } + assertThat(Futures.allAsList(futures).get()).hasSize(threads); + for (int i = 0; i < threads - 1; i++) { + assertThat(futures.get(i).get()).isSameInstanceAs(futures.get(i + 1).get()); + } + assertThat(count.get()).isEqualTo(1); + executor.shutdown(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LocationAwareTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LocationAwareTest.java new file mode 100644 index 000000000000..aa038d512ee9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/LocationAwareTest.java @@ -0,0 +1,270 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.SpannerGrpc; +import io.grpc.Context; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.time.Duration; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class LocationAwareTest extends AbstractMockServerTest { + private static final Statement SELECT_RANDOM_STATEMENT = Statement.of("select * from random"); + private static final int RANDOM_RESULT_ROW_COUNT = 20; + private static Spanner spanner; + private static DatabaseClient client; + + private static final class TimeoutHolder { + private Duration timeout; + } + + @BeforeClass + public static void enableLocationApiAndSetupClient() { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableLocationApi() { + return true; + } + }); + spanner = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("my-project", "my-instance", "my-database")); + + RandomResultSetGenerator generator = new RandomResultSetGenerator(RANDOM_RESULT_ROW_COUNT); + mockSpanner.putStatementResult( + StatementResult.query(SELECT_RANDOM_STATEMENT, generator.generate())); + } + + @AfterClass + public static void cleanup() { + SpannerOptions.useDefaultEnvironment(); + if (spanner != null) { + spanner.close(); + } + } + + @Test + public void testSingleQuery() { + int rowCount = 0; + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + rowCount++; + } + } + assertEquals(RANDOM_RESULT_ROW_COUNT, rowCount); + } + + @Test + public void testParallelQueries() throws Exception { + int numThreads = 10; + ListeningExecutorService executor = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads)); + List> results = new ArrayList<>(); + for (int i = 0; i < numThreads; i++) { + results.add( + executor.submit( + () -> { + try (ResultSet resultSet = + client.singleUse().executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + // Randomly stop consuming results somewhere halfway the results (sometimes). + if (ThreadLocalRandom.current().nextInt(RANDOM_RESULT_ROW_COUNT * 2) == 5) { + break; + } + } + } + return null; + })); + } + executor.shutdown(); + Futures.allAsList(results).get(); + } + + @Test + public void testSingleReadWriteTransaction() { + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(INSERT_STATEMENT)); + } + + @Test + public void testParallelReadWriteTransactions() throws Exception { + int numThreads = 10; + ListeningExecutorService executor = + MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads)); + List> results = new ArrayList<>(); + for (int i = 0; i < numThreads; i++) { + results.add( + executor.submit( + () -> { + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(INSERT_STATEMENT)); + return null; + })); + } + executor.shutdown(); + Futures.allAsList(results).get(); + } + + @Test + public void testExecuteStreamingSqlCallContextTimeout_locationAware() { + final TimeoutHolder timeoutHolder = new TimeoutHolder(); + CallContextConfigurator configurator = + new CallContextConfigurator() { + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + if (request instanceof ExecuteSqlRequest + && method.equals(SpannerGrpc.getExecuteStreamingSqlMethod())) { + return context.withTimeoutDuration(timeoutHolder.timeout); + } + return null; + } + }; + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + Context context = + Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator); + try { + context.run( + () -> { + timeoutHolder.timeout = Duration.ofNanos(1L); + SpannerException e = + assertThrows( + SpannerException.class, + () -> { + try (ResultSet rs = + client.singleUse().executeQuery(SELECT_RANDOM_STATEMENT)) { + rs.next(); + } + }); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + + timeoutHolder.timeout = Duration.ofMinutes(1L); + try (ResultSet rs = client.singleUse().executeQuery(SELECT_RANDOM_STATEMENT)) { + assertTrue(rs.next()); + } + }); + } finally { + mockSpanner.removeAllExecutionTimes(); + } + } + + @Test + public void testExecuteStreamingSqlInvalidArgumentPropagates_locationAware() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT.withDescription("invalid request").asRuntimeException())); + try { + SpannerException e = + assertThrows( + SpannerException.class, + () -> { + try (ResultSet rs = client.singleUse().executeQuery(SELECT_RANDOM_STATEMENT)) { + rs.next(); + } + }); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } finally { + mockSpanner.removeAllExecutionTimes(); + } + } + + @Test + public void testExecuteQueryAsyncCancelReturnsCancelled_locationAware() throws Exception { + final List values = new LinkedList<>(); + final CountDownLatch receivedFirstRow = new CountDownLatch(1); + final CountDownLatch cancelled = new CountDownLatch(1); + final ApiFuture callbackResult; + + ExecutorService executor = Executors.newSingleThreadExecutor(); + try (AsyncResultSet rs = client.singleUse().executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + callbackResult = + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(1); + receivedFirstRow.countDown(); + cancelled.await(); + break; + } + } + } catch (Throwable t) { + return CallbackResponse.DONE; + } + }); + + assertTrue(receivedFirstRow.await(30L, TimeUnit.SECONDS)); + rs.cancel(); + cancelled.countDown(); + SpannerException e = assertThrows(SpannerException.class, () -> get(callbackResult)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + assertEquals(1, values.size()); + } finally { + executor.shutdownNow(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MetricRegistryTestUtils.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MetricRegistryTestUtils.java new file mode 100644 index 000000000000..1bedda47e5f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MetricRegistryTestUtils.java @@ -0,0 +1,196 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.common.collect.Maps; +import io.opencensus.common.ToLongFunction; +import io.opencensus.metrics.DerivedDoubleCumulative; +import io.opencensus.metrics.DerivedDoubleGauge; +import io.opencensus.metrics.DerivedLongCumulative; +import io.opencensus.metrics.DerivedLongGauge; +import io.opencensus.metrics.DoubleCumulative; +import io.opencensus.metrics.DoubleGauge; +import io.opencensus.metrics.LabelKey; +import io.opencensus.metrics.LabelValue; +import io.opencensus.metrics.LongCumulative; +import io.opencensus.metrics.LongGauge; +import io.opencensus.metrics.MetricOptions; +import io.opencensus.metrics.MetricRegistry; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +class MetricRegistryTestUtils { + + static class PointWithFunction { + private final T ref; + private final ToLongFunction function; + private final List key; + private final List values; + + PointWithFunction( + T obj, ToLongFunction function, List keys, List values) { + this.ref = obj; + this.function = function; + this.key = keys; + this.values = values; + } + + long value() { + return function.applyAsLong(ref); + } + + List keys() { + return key; + } + + List values() { + return values; + } + } + + static class MetricsRecord { + private final Map> metrics; + + private MetricsRecord() { + this.metrics = Maps.newHashMap(); + } + + Map> getMetrics() { + return metrics; + } + } + + public static final class FakeDerivedLongGauge extends DerivedLongGauge { + private final MetricsRecord record; + private final String name; + private final List labelKeys; + + private FakeDerivedLongGauge( + FakeMetricRegistry metricRegistry, String name, List labelKeys) { + this.record = metricRegistry.record; + this.labelKeys = labelKeys; + this.name = name; + } + + @Override + public void createTimeSeries( + List labelValues, T t, ToLongFunction toLongFunction) { + if (!this.record.metrics.containsKey(this.name)) { + this.record.metrics.put(this.name, new ArrayList<>()); + } + this.record + .metrics + .get(this.name) + .add(new PointWithFunction<>(t, toLongFunction, labelKeys, labelValues)); + } + + @Override + public void removeTimeSeries(List list) {} + + @Override + public void clear() {} + } + + public static final class FakeDerivedLongCumulative extends DerivedLongCumulative { + private final MetricsRecord record; + private final String name; + private final List labelKeys; + + private FakeDerivedLongCumulative( + FakeMetricRegistry metricRegistry, String name, List labelKeys) { + this.record = metricRegistry.record; + this.labelKeys = labelKeys; + this.name = name; + } + + @Override + public void createTimeSeries( + List labelValues, T t, ToLongFunction toLongFunction) { + if (!this.record.metrics.containsKey(this.name)) { + this.record.metrics.put(this.name, new ArrayList<>()); + } + this.record + .metrics + .get(this.name) + .add(new PointWithFunction<>(t, toLongFunction, labelKeys, labelValues)); + } + + @Override + public void removeTimeSeries(List list) {} + + @Override + public void clear() {} + } + + /** + * A {@link MetricRegistry} implementation that saves metrics records to be accessible from {@link + * #pollRecord()}. + */ + public static final class FakeMetricRegistry extends MetricRegistry { + + private MetricsRecord record; + + FakeMetricRegistry() { + record = new MetricsRecord(); + } + + MetricsRecord pollRecord() { + return record; + } + + @Override + public DerivedLongGauge addDerivedLongGauge(String s, MetricOptions metricOptions) { + return new FakeDerivedLongGauge(this, s, metricOptions.getLabelKeys()); + } + + @Override + public LongGauge addLongGauge(String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public DoubleGauge addDoubleGauge(String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public DerivedDoubleGauge addDerivedDoubleGauge(String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public LongCumulative addLongCumulative(String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public DoubleCumulative addDoubleCumulative(String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public DerivedLongCumulative addDerivedLongCumulative(String s, MetricOptions metricOptions) { + return new FakeDerivedLongCumulative(this, s, metricOptions.getLabelKeys()); + } + + @Override + public DerivedDoubleCumulative addDerivedDoubleCumulative( + String s, MetricOptions metricOptions) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImpl.java new file mode 100644 index 000000000000..8cd784b4f2c0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImpl.java @@ -0,0 +1,1011 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.common.base.Strings; +import com.google.common.collect.Collections2; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.Timestamp; +import com.google.rpc.Code; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupInfo; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.Database.State; +import com.google.spanner.admin.database.v1.DatabaseAdminGrpc.DatabaseAdminImplBase; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.OperationProgress; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.RestoreInfo; +import com.google.spanner.admin.database.v1.RestoreSourceType; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map.Entry; +import java.util.Queue; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class MockDatabaseAdminServiceImpl extends DatabaseAdminImplBase implements MockGrpcService { + private static final class MockDatabase { + private final String name; + private State state; + private final Timestamp createTime; + private final List ddl = new ArrayList<>(); + private final RestoreInfo restoreInfo; + + private MockDatabase(String name, List ddl, RestoreInfo restoreInfo) { + this.name = name; + this.state = State.CREATING; + this.createTime = + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() / 1000L).build(); + this.ddl.addAll(ddl); + this.restoreInfo = restoreInfo; + } + + private Database toProto() { + return Database.newBuilder() + .setCreateTime(createTime) + .setName(name) + .setRestoreInfo(restoreInfo == null ? RestoreInfo.getDefaultInstance() : restoreInfo) + .setState(state) + .build(); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof MockDatabase)) { + return false; + } + return ((MockDatabase) o).name.equals(this.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } + } + + static final class MockBackup { + private final String name; + private Backup.State state; + private final Timestamp createTime; + private final String database; + private final List ddl = new ArrayList<>(); + private final List referencingDatabases = new ArrayList<>(); + private final long size; + private Timestamp expireTime; + + private MockBackup(String name, Backup backup, MockDatabase database) { + this.name = name; + this.state = Backup.State.CREATING; + this.createTime = + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() / 1000L).build(); + this.database = database.name; + this.ddl.addAll(database.ddl); + this.size = RND.nextInt(Integer.MAX_VALUE); + this.expireTime = backup.getExpireTime(); + } + + private Backup toProto() { + return Backup.newBuilder() + .setCreateTime(createTime) + .setDatabase(database) + .setExpireTime(expireTime) + .setName(name) + .setSizeBytes(size) + .setState(state) + .addAllReferencingDatabases(referencingDatabases) + .build(); + } + + private BackupInfo toBackupInfo() { + return BackupInfo.newBuilder() + .setBackup(name) + .setCreateTime(createTime) + .setSourceDatabase(database) + .build(); + } + + public String getName() { + return name; + } + + public String getDatabase() { + return database; + } + + public Timestamp getExpireTime() { + return expireTime; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof MockDatabase)) { + return false; + } + return ((MockDatabase) o).name.equals(this.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } + } + + private final class CreateDatabaseCallable implements Callable { + private final String operationName; + private final String name; + + private CreateDatabaseCallable(String operationName, String name) { + this.operationName = operationName; + this.name = name; + } + + @Override + public Database call() { + MockDatabase db = databases.get(name); + db.state = State.READY; + Database proto = db.toProto(); + Operation operation = operations.get(operationName); + if (operation != null) { + operations.update(operation.toBuilder().setDone(true).setResponse(Any.pack(proto)).build()); + } + return proto; + } + } + + private final class UpdateDatabaseDdlCallable implements Callable { + private final String operationName; + + private UpdateDatabaseDdlCallable(String operationName) { + this.operationName = operationName; + } + + @Override + public Empty call() throws Exception { + Operation operation = operations.get(operationName); + if (operation != null) { + UpdateDatabaseDdlMetadata metadata = + operation.getMetadata().unpack(UpdateDatabaseDdlMetadata.class); + List commitTimestamps = new ArrayList<>(metadata.getStatementsCount()); + for (int i = 0; i < metadata.getStatementsCount(); i++) { + commitTimestamps.add(currentTime()); + } + metadata = metadata.toBuilder().addAllCommitTimestamps(commitTimestamps).build(); + operations.update( + operation.toBuilder() + .setMetadata(Any.pack(metadata)) + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } + return Empty.getDefaultInstance(); + } + } + + private final class CreateBackupCallable implements Callable { + private final String operationName; + private final String name; + + private CreateBackupCallable(String operationName, String name) { + this.operationName = operationName; + this.name = name; + } + + @Override + public Backup call() throws Exception { + MockBackup backup = backups.get(name); + Backup proto = backup.toProto(); + Operation operation = operations.get(operationName); + for (int progress = 1; progress <= 100; progress++) { + operation = operations.get(operationName); + long sleep = createBackupOperationExecutionTime / 100; + if (progress == 100) { + sleep += createBackupOperationExecutionTime % 100; + } + Thread.sleep(sleep); + if (operation != null) { + CreateBackupMetadata metadata = + operation.getMetadata().unpack(CreateBackupMetadata.class); + metadata = + metadata.toBuilder() + .setProgress( + metadata.getProgress().toBuilder().setProgressPercent(progress).build()) + .build(); + operations.update( + operation.toBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(proto)) + .build()); + } + } + backup.state = Backup.State.READY; + proto = backup.toProto(); + if (operation != null) { + CreateBackupMetadata metadata = operation.getMetadata().unpack(CreateBackupMetadata.class); + metadata = + metadata.toBuilder() + .setProgress( + metadata.getProgress().toBuilder() + .setProgressPercent(100) + .setEndTime(currentTime()) + .build()) + .build(); + operations.update( + operation.toBuilder() + .setDone(true) + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(proto)) + .build()); + } + return proto; + } + } + + private final class RestoreDatabaseCallable implements Callable { + private final String operationName; + private final String name; + + private RestoreDatabaseCallable(String operationName, String name) { + this.operationName = operationName; + this.name = name; + } + + @Override + public Database call() throws Exception { + MockDatabase db = databases.get(name); + db.state = State.READY_OPTIMIZING; + Database proto = db.toProto(); + Operation operation = operations.get(operationName); + for (int progress = 1; progress <= 100; progress++) { + long sleep = restoreDatabaseOperationExecutionTime / 100; + if (progress == 100) { + sleep += restoreDatabaseOperationExecutionTime % 100; + } + Thread.sleep(sleep); + if (operation != null) { + RestoreDatabaseMetadata metadata = + operation.getMetadata().unpack(RestoreDatabaseMetadata.class); + metadata = + metadata.toBuilder() + .setProgress( + metadata.getProgress().toBuilder().setProgressPercent(progress).build()) + .build(); + operations.update( + operation.toBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(proto)) + .build()); + } + } + db.state = State.READY_OPTIMIZING; + proto = db.toProto(); + if (operation != null) { + RestoreDatabaseMetadata metadata = + operation.getMetadata().unpack(RestoreDatabaseMetadata.class); + metadata = + metadata.toBuilder() + .setProgress( + metadata.getProgress().toBuilder() + .setEndTime(currentTime()) + .setProgressPercent(100) + .build()) + .build(); + operations.update( + operation.toBuilder() + .setDone(true) + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(proto)) + .build()); + } + return proto; + } + } + + private final class OptimizeDatabaseCallable implements Callable { + private final String operationName; + private final String restoreOperationName; + private final String name; + + private OptimizeDatabaseCallable( + String operationName, String restoreOperationName, String name) { + this.operationName = operationName; + this.restoreOperationName = restoreOperationName; + this.name = name; + } + + @Override + public Database call() throws Exception { + MockDatabase db = databases.get(name); + Operation operation = operations.get(operationName); + try { + // Wait until the restore operation has finished. + Operation restoreOperation = operations.get(restoreOperationName); + while (!restoreOperation.getDone()) { + Thread.sleep(10L); + restoreOperation = operations.get(restoreOperationName); + } + Thread.sleep(optimizeDatabaseOperationExecutionTime); + db.state = State.READY; + Database proto = db.toProto(); + if (operation != null) { + operations.update( + operation.toBuilder().setDone(true).setResponse(Any.pack(proto)).build()); + } + return proto; + } catch (Exception e) { + if (operation != null) { + Database proto = db.toProto(); + operations.update( + operation.toBuilder() + .setDone(true) + .setError(fromException(e)) + .setResponse(Any.pack(proto)) + .build()); + } + throw e; + } + } + } + + private com.google.rpc.Status fromException(Exception e) { + int code = Code.UNKNOWN_VALUE; + if (e instanceof InterruptedException) { + code = Code.CANCELLED_VALUE; + } + return com.google.rpc.Status.newBuilder().setCode(code).setMessage(e.getMessage()).build(); + } + + private final Queue requests = new ConcurrentLinkedQueue<>(); + private ConcurrentMap policies = new ConcurrentHashMap<>(); + private static final String EXPIRE_TIME_MASK = "expire_time"; + private static final Random RND = new Random(); + private final Queue exceptions = new ConcurrentLinkedQueue<>(); + private volatile CountDownLatch freezeLock = new CountDownLatch(0); + private final ConcurrentMap databases = new ConcurrentHashMap<>(); + private final ConcurrentMap backups = new ConcurrentHashMap<>(); + private final ConcurrentMap> filterMatches = new ConcurrentHashMap<>(); + private final List databaseRoles = new ArrayList<>(); + private final MockOperationsServiceImpl operations; + + private long createBackupOperationExecutionTime; + private long restoreDatabaseOperationExecutionTime; + private long optimizeDatabaseOperationExecutionTime; + + private SimulatedExecutionTime createBackupStartupExecutionTime = SimulatedExecutionTime.none(); + private SimulatedExecutionTime createBackupResponseExecutionTime = SimulatedExecutionTime.none(); + private SimulatedExecutionTime createDatabaseStartupExecutionTime = SimulatedExecutionTime.none(); + private SimulatedExecutionTime createDatabaseResponseExecutionTime = + SimulatedExecutionTime.none(); + private SimulatedExecutionTime getDatabaseExecutionTime = SimulatedExecutionTime.none(); + private SimulatedExecutionTime restoreDatabaseStartupExecutionTime = + SimulatedExecutionTime.none(); + private SimulatedExecutionTime restoreDatabaseResponseExecutionTime = + SimulatedExecutionTime.none(); + + public MockDatabaseAdminServiceImpl(MockOperationsServiceImpl operations) { + this.operations = operations; + } + + @Override + public void createDatabase( + CreateDatabaseRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + createDatabaseStartupExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + String id = request.getCreateStatement().replace("CREATE DATABASE ", ""); + if (id.startsWith("`") && id.endsWith("`")) { + id = id.substring(1, id.length() - 1); + } + String name = String.format("%s/databases/%s", request.getParent(), id); + MockDatabase db = new MockDatabase(name, request.getExtraStatementsList(), null); + if (databases.putIfAbsent(name, db) == null) { + CreateDatabaseMetadata metadata = + CreateDatabaseMetadata.newBuilder().setDatabase(name).build(); + Database database = Database.newBuilder().setName(name).setState(db.state).build(); + Operation operation = + Operation.newBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(database)) + .setDone(false) + .setName(operations.generateOperationName(name)) + .build(); + operations.addOperation(operation, new CreateDatabaseCallable(operation.getName(), name)); + createDatabaseResponseExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + responseObserver.onNext(operation); + responseObserver.onCompleted(); + } else { + responseObserver.onError( + Status.ALREADY_EXISTS + .withDescription(String.format("Database with name %s already exists", name)) + .asRuntimeException()); + } + } catch (Throwable t) { + responseObserver.onError(t); + } + } + + @Override + public void dropDatabase(DropDatabaseRequest request, StreamObserver responseObserver) { + requests.add(request); + MockDatabase db = databases.get(request.getDatabase()); + if (databases.remove(request.getDatabase(), db)) { + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void getDatabase(GetDatabaseRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + getDatabaseExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + MockDatabase db = databases.get(request.getName()); + if (db != null) { + responseObserver.onNext( + Database.newBuilder() + .setName(request.getName()) + .setCreateTime(db.createTime) + .setState(State.READY) + .build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } catch (Throwable t) { + responseObserver.onError(t); + } + } + + @Override + public void getDatabaseDdl( + GetDatabaseDdlRequest request, StreamObserver responseObserver) { + requests.add(request); + MockDatabase db = databases.get(request.getDatabase()); + if (db != null) { + responseObserver.onNext(GetDatabaseDdlResponse.newBuilder().addAllStatements(db.ddl).build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void listDatabases( + ListDatabasesRequest request, StreamObserver responseObserver) { + requests.add(request); + List dbs = new ArrayList<>(databases.size()); + for (Entry entry : databases.entrySet()) { + dbs.add( + Database.newBuilder() + .setName(entry.getKey()) + .setCreateTime(entry.getValue().createTime) + .setState(State.READY) + .build()); + } + responseObserver.onNext(ListDatabasesResponse.newBuilder().addAllDatabases(dbs).build()); + responseObserver.onCompleted(); + } + + @Override + public void listDatabaseOperations( + ListDatabaseOperationsRequest request, + StreamObserver responseObserver) { + requests.add(request); + ListDatabaseOperationsResponse.Builder builder = ListDatabaseOperationsResponse.newBuilder(); + try { + for (Operation op : operations.iterable()) { + if (op.getName().matches(".*?/databases\\/.*?/operations/.*?") + && op.getName().startsWith(request.getParent())) { + if (matchesFilter(op, request.getFilter())) { + builder.addOperations(op); + } + } + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + } catch (Exception e) { + responseObserver.onError(e); + } + } + + @Override + public void listDatabaseRoles( + ListDatabaseRolesRequest request, + StreamObserver responseObserver) { + requests.add(request); + List dbRoles = + new ArrayList<>(databaseRoles.size()); + for (DatabaseRole entry : databaseRoles) { + dbRoles.add( + com.google.spanner.admin.database.v1.DatabaseRole.newBuilder() + .setName(entry.getName()) + .build()); + } + + responseObserver.onNext( + ListDatabaseRolesResponse.newBuilder().addAllDatabaseRoles(dbRoles).build()); + responseObserver.onCompleted(); + } + + private boolean matchesFilter(Object obj, String filter) throws Exception { + if (!Strings.isNullOrEmpty(filter)) { + Set matches = filterMatches.get(filter); + if (matches != null) { + String name = (String) obj.getClass().getMethod("getName").invoke(obj); + return matches.contains(name); + } + if (obj instanceof Operation) { + Operation operation = (Operation) obj; + Pattern pattern = + Pattern.compile( + "(?:\\(metadata.@type:type.googleapis.com/(.*)\\)) AND" + + " (?:\\(metadata.(?:name|database):(.*)\\)|\\(name:(.*)/operations/\\))"); + Matcher matcher = pattern.matcher(filter); + if (matcher.matches()) { + String type = matcher.group(1); + String objectName = matcher.group(2); + if (objectName == null) { + objectName = matcher.group(3); + } + Any anyMetadata = operation.getMetadata(); + if (anyMetadata.getTypeUrl().endsWith(type)) { + if (type.equals(CreateBackupMetadata.getDescriptor().getFullName())) { + CreateBackupMetadata metadata = + operation.getMetadata().unpack(CreateBackupMetadata.class); + return metadata.getName().equals(objectName); + } else if (type.equals(CreateDatabaseMetadata.getDescriptor().getFullName())) { + CreateDatabaseMetadata metadata = + operation.getMetadata().unpack(CreateDatabaseMetadata.class); + return metadata.getDatabase().equals(objectName); + } else if (type.equals(RestoreDatabaseMetadata.getDescriptor().getFullName())) { + RestoreDatabaseMetadata metadata = + operation.getMetadata().unpack(RestoreDatabaseMetadata.class); + return metadata.getName().equals(objectName); + } + } + } + } + return false; + } + return true; + } + + @Override + public void updateDatabaseDdl( + UpdateDatabaseDdlRequest request, StreamObserver responseObserver) { + requests.add(request); + MockDatabase db = databases.get(request.getDatabase()); + if (db != null) { + db.ddl.addAll(request.getStatementsList()); + UpdateDatabaseDdlMetadata metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase(request.getDatabase()) + .addAllStatements(request.getStatementsList()) + .build(); + Operation operation = + Operation.newBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setDone(true) + .setName(operations.generateOperationName(request.getDatabase())) + .build(); + operations.addOperation(operation, new UpdateDatabaseDdlCallable(operation.getName())); + responseObserver.onNext(operation); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void createBackup( + CreateBackupRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + createBackupStartupExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + String name = String.format("%s/backups/%s", request.getParent(), request.getBackupId()); + MockDatabase db = databases.get(request.getBackup().getDatabase()); + if (db == null) { + responseObserver.onError( + Status.NOT_FOUND + .withDescription( + String.format( + "Database with name %s not found", request.getBackup().getDatabase())) + .asRuntimeException()); + return; + } + MockBackup bck = new MockBackup(name, request.getBackup(), db); + if (backups.putIfAbsent(name, bck) == null) { + CreateBackupMetadata metadata = + CreateBackupMetadata.newBuilder() + .setName(name) + .setDatabase(bck.database) + .setProgress( + OperationProgress.newBuilder() + .setStartTime( + Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() / 1000L) + .build()) + .setProgressPercent(0)) + .build(); + Operation operation = + Operation.newBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(bck.toProto())) + .setName(operations.generateOperationName(name)) + .build(); + operations.addOperation(operation, new CreateBackupCallable(operation.getName(), name)); + + createBackupResponseExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + responseObserver.onNext(operation); + responseObserver.onCompleted(); + } else { + responseObserver.onError( + Status.ALREADY_EXISTS + .withDescription(String.format("Backup with name %s already exists", name)) + .asRuntimeException()); + } + } catch (Throwable t) { + responseObserver.onError(t); + } + } + + @Override + public void deleteBackup(DeleteBackupRequest request, StreamObserver responseObserver) { + requests.add(request); + MockBackup bck = backups.get(request.getName()); + if (backups.remove(request.getName(), bck)) { + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void getBackup(GetBackupRequest request, StreamObserver responseObserver) { + requests.add(request); + MockBackup bck = backups.get(request.getName()); + if (bck != null) { + responseObserver.onNext( + Backup.newBuilder() + .setName(request.getName()) + .setCreateTime(bck.createTime) + .setDatabase(bck.database) + .setExpireTime(bck.expireTime) + .setSizeBytes(bck.size) + .setState(Backup.State.READY) + .build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void listBackups( + ListBackupsRequest request, StreamObserver responseObserver) { + requests.add(request); + List bcks = new ArrayList<>(backups.size()); + try { + for (Entry entry : backups.entrySet()) { + if (matchesFilter(entry.getValue(), request.getFilter())) { + bcks.add( + Backup.newBuilder() + .setName(entry.getKey()) + .setCreateTime(entry.getValue().createTime) + .setDatabase(entry.getValue().database) + .setExpireTime(entry.getValue().expireTime) + .setSizeBytes(entry.getValue().size) + .setState(Backup.State.READY) + .build()); + } + } + responseObserver.onNext(ListBackupsResponse.newBuilder().addAllBackups(bcks).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + responseObserver.onError(e); + } + } + + @Override + public void listBackupOperations( + ListBackupOperationsRequest request, + StreamObserver responseObserver) { + requests.add(request); + ListBackupOperationsResponse.Builder builder = ListBackupOperationsResponse.newBuilder(); + try { + for (Operation op : operations.iterable()) { + if (op.getName().matches(".*?/backups/.*?/operations/.*?") + && op.getName().startsWith(request.getParent())) { + if (matchesFilter(op, request.getFilter())) { + builder.addOperations(op); + } + } + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + } catch (Exception e) { + responseObserver.onError(e); + } + } + + @Override + public void updateBackup(UpdateBackupRequest request, StreamObserver responseObserver) { + requests.add(request); + MockBackup bck = backups.get(request.getBackup().getName()); + if (bck != null) { + if (request.getUpdateMask().getPathsList().contains(EXPIRE_TIME_MASK)) { + bck.expireTime = request.getBackup().getExpireTime(); + } + responseObserver.onNext( + Backup.newBuilder() + .setName(bck.name) + .setCreateTime(bck.createTime) + .setDatabase(bck.database) + .setExpireTime(bck.expireTime) + .setSizeBytes(bck.size) + .setState(Backup.State.READY) + .build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void restoreDatabase( + RestoreDatabaseRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + restoreDatabaseStartupExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + MockBackup bck = backups.get(request.getBackup()); + if (bck != null) { + String name = + String.format("%s/databases/%s", request.getParent(), request.getDatabaseId()); + MockDatabase db = + new MockDatabase( + name, + bck.ddl, + RestoreInfo.newBuilder() + .setBackupInfo(bck.toBackupInfo()) + .setSourceType(RestoreSourceType.BACKUP) + .build()); + if (databases.putIfAbsent(name, db) == null) { + bck.referencingDatabases.add(db.name); + Operation optimizeOperation = + Operation.newBuilder() + .setDone(false) + .setName(operations.generateOperationName(name)) + .setMetadata( + Any.pack( + OptimizeRestoredDatabaseMetadata.newBuilder() + .setName(name) + .setProgress( + OperationProgress.newBuilder() + .setStartTime(currentTime()) + .setProgressPercent(0) + .build()) + .build())) + .setResponse(Any.pack(db.toProto())) + .build(); + RestoreDatabaseMetadata metadata = + RestoreDatabaseMetadata.newBuilder() + .setBackupInfo(bck.toBackupInfo()) + .setName(name) + .setProgress( + OperationProgress.newBuilder() + .setStartTime(currentTime()) + .setProgressPercent(0) + .build()) + .setOptimizeDatabaseOperationName(optimizeOperation.getName()) + .setSourceType(RestoreSourceType.BACKUP) + .build(); + Operation operation = + Operation.newBuilder() + .setMetadata(Any.pack(metadata)) + .setResponse(Any.pack(db.toProto())) + .setDone(false) + .setName(operations.generateOperationName(name)) + .build(); + operations.addOperation( + operation, new RestoreDatabaseCallable(operation.getName(), name)); + operations.addOperation( + optimizeOperation, + new OptimizeDatabaseCallable(optimizeOperation.getName(), operation.getName(), name)); + restoreDatabaseResponseExecutionTime.simulateExecutionTime(exceptions, false, freezeLock); + responseObserver.onNext(operation); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.ALREADY_EXISTS.asRuntimeException()); + } + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } catch (Throwable t) { + responseObserver.onError(t); + } + } + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + requests.add(request); + Policy policy = policies.get(request.getResource()); + if (policy != null) { + responseObserver.onNext(policy); + } else { + responseObserver.onNext(Policy.getDefaultInstance()); + } + responseObserver.onCompleted(); + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + requests.add(request); + policies.put(request.getResource(), request.getPolicy()); + responseObserver.onNext(request.getPolicy()); + responseObserver.onCompleted(); + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + requests.add(request); + // Just return the same permissions as in the request, as we don't have any credentials. + responseObserver.onNext( + TestIamPermissionsResponse.newBuilder() + .addAllPermissions(request.getPermissionsList()) + .build()); + responseObserver.onCompleted(); + } + + @Override + public List getRequests() { + return new ArrayList<>(requests); + } + + public void clearRequests() { + requests.clear(); + } + + public int countRequestsOfType(final Class type) { + return Collections2.filter(getRequests(), input -> input.getClass().equals(type)).size(); + } + + @Override + public void addResponse(AbstractMessage response) { + throw new UnsupportedOperationException(); + } + + @Override + public void addException(Exception exception) { + exceptions.add(exception); + } + + public void addFilterMatches(String filter, String... names) { + Set matches = filterMatches.computeIfAbsent(filter, k -> new HashSet<>()); + matches.addAll(Arrays.asList(names)); + } + + public void clearFilterMatches() { + filterMatches.clear(); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return bindService(); + } + + @Override + public void reset() { + requests.clear(); + exceptions.clear(); + policies.clear(); + databases.clear(); + backups.clear(); + filterMatches.clear(); + } + + public void removeAllExecutionTimes() { + createBackupStartupExecutionTime = SimulatedExecutionTime.none(); + createBackupResponseExecutionTime = SimulatedExecutionTime.none(); + createBackupOperationExecutionTime = 0L; + createDatabaseStartupExecutionTime = SimulatedExecutionTime.none(); + createDatabaseResponseExecutionTime = SimulatedExecutionTime.none(); + restoreDatabaseStartupExecutionTime = SimulatedExecutionTime.none(); + restoreDatabaseResponseExecutionTime = SimulatedExecutionTime.none(); + restoreDatabaseOperationExecutionTime = 0L; + } + + private Timestamp currentTime() { + return Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L).build(); + } + + public void setCreateBackupStartupExecutionTime(SimulatedExecutionTime exec) { + this.createBackupStartupExecutionTime = exec; + } + + public void setCreateBackupResponseExecutionTime(SimulatedExecutionTime exec) { + this.createBackupResponseExecutionTime = exec; + } + + public void setCreateDatabaseStartupExecutionTime(SimulatedExecutionTime exec) { + this.createDatabaseStartupExecutionTime = exec; + } + + public void setCreateDatabaseResponseExecutionTime(SimulatedExecutionTime exec) { + this.createDatabaseResponseExecutionTime = exec; + } + + public void setRestoreDatabaseStartupExecutionTime(SimulatedExecutionTime exec) { + this.restoreDatabaseStartupExecutionTime = exec; + } + + public void setRestoreDatabaseResponseExecutionTime(SimulatedExecutionTime exec) { + this.restoreDatabaseResponseExecutionTime = exec; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImplTest.java new file mode 100644 index 000000000000..637f97cb097d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockDatabaseAdminServiceImplTest.java @@ -0,0 +1,518 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.CancelledException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.cloud.spanner.OperationFutureUtil.FakeStatusCode; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminSettings; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MockDatabaseAdminServiceImplTest { + @Rule public ExpectedException exception = ExpectedException.none(); + + private static class ApiExceptionMatcher extends BaseMatcher { + private final StatusCode.Code expectedStatus; + + private static ApiExceptionMatcher forCode(StatusCode.Code status) { + return new ApiExceptionMatcher(status); + } + + private ApiExceptionMatcher(StatusCode.Code expectedStatus) { + this.expectedStatus = checkNotNull(expectedStatus); + } + + @Override + public boolean matches(Object item) { + ApiException ae = null; + if (item instanceof ExecutionException) { + ExecutionException e = (ExecutionException) item; + if (e.getCause() instanceof ApiException) { + ae = (ApiException) e.getCause(); + } + } else if (item instanceof ApiException) { + ae = (ApiException) item; + } + if (ae != null) { + return ae.getStatusCode().getCode() == expectedStatus; + } + return false; + } + + @Override + public void describeTo(Description description) { + description.appendText("ApiException[" + expectedStatus + "]"); + } + } + + private static final String TEST_PARENT = "projects/my-project/instances/my-instance"; + private static final String TEST_DB_NAME = String.format("%s/databases/test-db", TEST_PARENT); + private static final String TEST_BCK_NAME = String.format("%s/backups/test-bck", TEST_PARENT); + private static MockOperationsServiceImpl mockOperations; + private static MockDatabaseAdminServiceImpl mockDatabaseAdmin; + private static MockServiceHelper serviceHelper; + private DatabaseAdminClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockOperations = new MockOperationsServiceImpl(); + mockDatabaseAdmin = new MockDatabaseAdminServiceImpl(mockOperations); + serviceHelper = + new MockServiceHelper("in-process-1", Arrays.asList(mockOperations, mockDatabaseAdmin)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + DatabaseAdminSettings.Builder settingsBuilder = + DatabaseAdminSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()); + settingsBuilder + .createBackupOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + settingsBuilder + .createDatabaseOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + settingsBuilder + .restoreDatabaseOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(20L)) + .setInitialRetryDelayDuration(Duration.ofMillis(10L)) + .setMaxRetryDelayDuration(Duration.ofMillis(150L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(150L)) + .setMaxAttempts(10) + .setTotalTimeoutDuration(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.3) + .setRpcTimeoutMultiplier(1.3) + .build())); + client = DatabaseAdminClient.create(settingsBuilder.build()); + } + + @After + public void tearDown() { + client.close(); + } + + private Database createTestDb() { + CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `test-db`") + .addAllExtraStatements(Arrays.asList("CREATE TABLE FOO", "CREATE TABLE BAR")) + .setParent(TEST_PARENT) + .build(); + OperationFuture op = + client.createDatabaseOperationCallable().futureCall(request); + try { + return op.get(); + } catch (ExecutionException e) { + if (e.getCause() != null && e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new CancelledException(e, FakeStatusCode.of(Code.CANCELLED), false); + } + } + + @Test + public void createDatabase() { + Database db = createTestDb(); + assertThat(db.getName()).isEqualTo(TEST_DB_NAME); + } + + @Test + public void createDatabaseAlreadyExists() { + createTestDb(); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.ALREADY_EXISTS)); + createTestDb(); + } + + @Test + public void dropDatabase() { + createTestDb(); + Database db = client.getDatabase(TEST_DB_NAME); + assertThat(db.getName()).isEqualTo(TEST_DB_NAME); + client.dropDatabase(TEST_DB_NAME); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.getDatabase(TEST_DB_NAME); + } + + @Test + public void dropDatabaseNotFound() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.dropDatabase(TEST_DB_NAME); + } + + @Test + public void getDatabase() { + createTestDb(); + Database db = client.getDatabase(TEST_DB_NAME); + assertThat(db.getName()).isEqualTo(TEST_DB_NAME); + } + + @Test + public void getDatabaseNotFound() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.getDatabase(TEST_DB_NAME); + } + + @Test + public void getDatabaseDdl() { + createTestDb(); + GetDatabaseDdlResponse ddl = client.getDatabaseDdl(TEST_DB_NAME); + assertThat(ddl.getStatementsList()).containsExactly("CREATE TABLE FOO", "CREATE TABLE BAR"); + } + + @Test + public void getDatabaseDdlNotFound() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.getDatabaseDdl(TEST_DB_NAME); + } + + @Test + public void listDatabases() { + createTestDb(); + ListDatabasesPagedResponse response = client.listDatabases(TEST_PARENT); + List databases = new ArrayList<>(); + for (Database db : response.iterateAll()) { + databases.add(db.getName()); + } + assertThat(databases).containsExactly(TEST_DB_NAME); + } + + @Test + public void listDatabaseOperations() { + createTestDb(); + ListDatabaseOperationsPagedResponse response = client.listDatabaseOperations(TEST_DB_NAME); + List operations = new ArrayList<>(); + for (Operation op : response.iterateAll()) { + operations.add(op); + } + assertThat(operations).hasSize(1); + } + + @Test + public void updateDatabaseDdl() throws InterruptedException, ExecutionException { + createTestDb(); + UpdateDatabaseDdlRequest request = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(TEST_DB_NAME) + .addAllStatements(Arrays.asList("CREATE TABLE BAZ", "DROP TABLE FOO")) + .build(); + OperationFuture op = + client.updateDatabaseDdlOperationCallable().futureCall(request); + op.get(); + GetDatabaseDdlResponse response = client.getDatabaseDdl(TEST_DB_NAME); + assertThat(response.getStatementsList()) + .containsExactly( + "CREATE TABLE FOO", "CREATE TABLE BAR", "CREATE TABLE BAZ", "DROP TABLE FOO"); + } + + private Backup createTestBackup() { + CreateBackupRequest request = + CreateBackupRequest.newBuilder() + .setBackupId("test-bck") + .setBackup( + Backup.newBuilder() + .setDatabase(TEST_DB_NAME) + .setExpireTime( + Timestamp.newBuilder() + .setSeconds( + System.currentTimeMillis() * 1000L + + TimeUnit.MILLISECONDS.convert(7, TimeUnit.DAYS))) + .build()) + .setParent(TEST_PARENT) + .build(); + OperationFuture op = + client.createBackupOperationCallable().futureCall(request); + try { + return op.get(); + } catch (ExecutionException e) { + if (e.getCause() != null && e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new CancelledException(e, FakeStatusCode.of(Code.CANCELLED), false); + } + } + + @Test + public void createBackup() { + createTestDb(); + Backup bck = createTestBackup(); + assertThat(bck.getName()).isEqualTo(TEST_BCK_NAME); + } + + @Test + public void createBackupAlreadyExists() { + createTestDb(); + createTestBackup(); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.ALREADY_EXISTS)); + createTestBackup(); + } + + @Test + public void createBackupDatabaseDoesNotExist() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + createTestBackup(); + } + + @Test + public void deleteBackup() { + createTestDb(); + createTestBackup(); + Backup bck = client.getBackup(TEST_BCK_NAME); + assertThat(bck.getName()).isEqualTo(TEST_BCK_NAME); + client.deleteBackup(TEST_BCK_NAME); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.getBackup(TEST_BCK_NAME); + } + + @Test + public void deleteBackupNotFound() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.deleteBackup(TEST_BCK_NAME); + } + + @Test + public void getBackup() { + createTestDb(); + createTestBackup(); + Backup bck = client.getBackup(TEST_BCK_NAME); + assertThat(bck.getName()).isEqualTo(TEST_BCK_NAME); + } + + @Test + public void getBackupNotFound() { + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + client.getBackup(TEST_BCK_NAME); + } + + @Test + public void listBackups() { + createTestDb(); + createTestBackup(); + ListBackupsPagedResponse response = client.listBackups(TEST_PARENT); + List backups = new ArrayList<>(); + for (Backup bck : response.iterateAll()) { + backups.add(bck.getName()); + } + assertThat(backups).containsExactly(TEST_BCK_NAME); + } + + @Test + public void listBackupOperations() { + createTestDb(); + createTestBackup(); + ListBackupOperationsPagedResponse response = client.listBackupOperations(TEST_BCK_NAME); + List operations = new ArrayList<>(); + for (Operation op : response.iterateAll()) { + operations.add(op); + } + assertThat(operations).hasSize(1); + } + + @Test + public void updateBackup() { + createTestDb(); + Backup backup = createTestBackup(); + Backup toBeUpdated = + backup.toBuilder().setExpireTime(Timestamp.newBuilder().setSeconds(1000L).build()).build(); + Backup updated = + client.updateBackup(toBeUpdated, FieldMask.newBuilder().addPaths("expire_time").build()); + assertThat(updated.getExpireTime()).isEqualTo(toBeUpdated.getExpireTime()); + assertThat(backup.getExpireTime()).isNotEqualTo(updated.getExpireTime()); + } + + @Test + public void restoreDatabase() throws InterruptedException, ExecutionException { + createTestDb(); + createTestBackup(); + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setBackup(TEST_BCK_NAME) + .setDatabaseId("restored-db") + .setParent(TEST_PARENT) + .build(); + OperationFuture op = + client.restoreDatabaseOperationCallable().futureCall(request); + Database restoredDb = op.get(); + assertThat(restoredDb.getName()) + .isEqualTo(String.format("%s/databases/%s", TEST_PARENT, "restored-db")); + assertThat(restoredDb.getRestoreInfo().getBackupInfo().getBackup()).isEqualTo(TEST_BCK_NAME); + assertThat(restoredDb.getRestoreInfo().getBackupInfo().getSourceDatabase()) + .isEqualTo(TEST_DB_NAME); + } + + @Test + public void restoreDatabaseNotFound() throws InterruptedException, ExecutionException { + createTestDb(); + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setBackup(TEST_BCK_NAME) + .setDatabaseId("restored-db") + .setParent(TEST_PARENT) + .build(); + OperationFuture op = + client.restoreDatabaseOperationCallable().futureCall(request); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.NOT_FOUND)); + op.get(); + } + + @Test + public void restoreDatabaseAlreadyExists() throws InterruptedException, ExecutionException { + createTestDb(); + createTestBackup(); + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setBackup(TEST_BCK_NAME) + .setDatabaseId("test-db") + .setParent(TEST_PARENT) + .build(); + OperationFuture op = + client.restoreDatabaseOperationCallable().futureCall(request); + exception.expect(ApiExceptionMatcher.forCode(StatusCode.Code.ALREADY_EXISTS)); + op.get(); + } + + @Test + public void testIAMPolicy() { + TestIamPermissionsResponse response = + client.testIamPermissions( + TestIamPermissionsRequest.newBuilder() + .setResource(TEST_PARENT) + .addPermissions("spanner.databases.select") + .addPermissions("spanner.databases.write") + .build()); + assertThat(response.getPermissionsList()) + .containsExactly("spanner.databases.select", "spanner.databases.write"); + + GetIamPolicyRequest request = GetIamPolicyRequest.newBuilder().setResource(TEST_PARENT).build(); + Policy policy = client.getIamPolicy(request); + assertThat(policy).isNotNull(); + + Policy newPolicy = + Policy.newBuilder() + .addBindings( + Binding.newBuilder().setRole("roles/admin").addMembers("user:joe@example.com")) + .setEtag(policy.getEtag()) + .build(); + client.setIamPolicy( + SetIamPolicyRequest.newBuilder().setResource(TEST_PARENT).setPolicy(newPolicy).build()); + policy = client.getIamPolicy(TEST_PARENT); + assertThat(policy).isEqualTo(newPolicy); + + response = + client.testIamPermissions( + TestIamPermissionsRequest.newBuilder() + .setResource(TEST_PARENT) + .addPermissions("spanner.databases.select") + .addPermissions("spanner.databases.update") + .build()); + assertThat(response.getPermissionsList()) + .containsExactly("spanner.databases.select", "spanner.databases.update"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockInstanceAdminServiceImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockInstanceAdminServiceImpl.java new file mode 100644 index 000000000000..0ea3699b3d53 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockInstanceAdminServiceImpl.java @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.protobuf.AbstractMessage; +import com.google.spanner.admin.instance.v1.InstanceAdminGrpc.InstanceAdminImplBase; +import io.grpc.ServerServiceDefinition; +import io.grpc.stub.StreamObserver; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +public class MockInstanceAdminServiceImpl extends InstanceAdminImplBase implements MockGrpcService { + private ConcurrentMap policies = new ConcurrentHashMap<>(); + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + Policy policy = policies.get(request.getResource()); + if (policy != null) { + responseObserver.onNext(policy); + } else { + responseObserver.onNext(Policy.getDefaultInstance()); + } + responseObserver.onCompleted(); + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + policies.put(request.getResource(), request.getPolicy()); + responseObserver.onNext(request.getPolicy()); + responseObserver.onCompleted(); + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + // Just return the same permissions as in the request, as we don't have any credentials. + responseObserver.onNext( + TestIamPermissionsResponse.newBuilder() + .addAllPermissions(request.getPermissionsList()) + .build()); + responseObserver.onCompleted(); + } + + @Override + public List getRequests() { + return Collections.emptyList(); + } + + @Override + public void addResponse(AbstractMessage response) { + throw new UnsupportedOperationException(); + } + + @Override + public void addException(Exception exception) { + throw new UnsupportedOperationException(); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return bindService(); + } + + @Override + public void reset() { + policies.clear(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockOperationsServiceImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockOperationsServiceImpl.java new file mode 100644 index 000000000000..35d6bcdce304 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockOperationsServiceImpl.java @@ -0,0 +1,187 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.longrunning.CancelOperationRequest; +import com.google.longrunning.DeleteOperationRequest; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.ListOperationsRequest; +import com.google.longrunning.ListOperationsResponse; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc.OperationsImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; + +public class MockOperationsServiceImpl extends OperationsImplBase implements MockGrpcService { + private final AtomicLong operationCounter = new AtomicLong(); + private final ConcurrentMap operations = new ConcurrentHashMap<>(); + private final ConcurrentMap> futures = new ConcurrentHashMap<>(); + private final ExecutorService executor = + Executors.newScheduledThreadPool( + 8, + new ThreadFactoryBuilder() + .setThreadFactory(MoreExecutors.platformThreadFactory()) + .setNameFormat("mock-operations-%d") + .setDaemon(true) + .build()); + + String generateOperationName(String parent) { + return String.format("%s/operations/%d", parent, operationCounter.incrementAndGet()); + } + + Future addOperation(Operation operation, Callable task) { + operations.put(operation.getName(), operation); + Future future = executor.submit(task); + futures.put(operation.getName(), future); + return future; + } + + Operation get(String name) { + return operations.get(name); + } + + void update(Operation operation) { + Operation existing = operations.get(operation.getName()); + if (!existing.getDone()) { + operations.put(operation.getName(), operation); + } + } + + Iterable iterable() { + return operations.values(); + } + + @Override + public void listOperations( + ListOperationsRequest request, StreamObserver responseObserver) { + ListOperationsResponse.Builder builder = ListOperationsResponse.newBuilder(); + for (Operation op : iterable()) { + if (op.getName().startsWith(request.getName())) { + builder.addOperations(op); + } + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + } + + @Override + public void getOperation( + GetOperationRequest request, StreamObserver responseObserver) { + Operation op = operations.get(request.getName()); + if (op != null) { + responseObserver.onNext(op); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void deleteOperation( + DeleteOperationRequest request, StreamObserver responseObserver) { + Operation op = operations.get(request.getName()); + if (op != null) { + if (op.getDone()) { + if (operations.remove(request.getName(), op)) { + futures.remove(request.getName()); + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } else { + responseObserver.onError( + Status.FAILED_PRECONDITION + .withDescription("Operation is not done") + .asRuntimeException()); + } + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public void cancelOperation( + CancelOperationRequest request, StreamObserver responseObserver) { + Operation op = operations.get(request.getName()); + Future fut = futures.get(request.getName()); + if (op != null && fut != null) { + if (!op.getDone()) { + operations.put( + request.getName(), + op.toBuilder() + .clearResponse() + .setDone(true) + .setError( + com.google.rpc.Status.newBuilder() + .setCode(Status.CANCELLED.getCode().value()) + .setMessage("Operation was cancelled") + .build()) + .build()); + fut.cancel(true); + } + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.NOT_FOUND.asRuntimeException()); + } + } + + @Override + public List getRequests() { + return Collections.emptyList(); + } + + @Override + public void addResponse(AbstractMessage response) { + throw new UnsupportedOperationException(); + } + + @Override + public void addException(Exception exception) { + throw new UnsupportedOperationException(); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return bindService(); + } + + @Override + public void reset() { + for (Future fut : futures.values()) { + fut.cancel(true); + } + operations.clear(); + futures.clear(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java new file mode 100644 index 000000000000..782f54d30c2a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java @@ -0,0 +1,2619 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.spanner.AbstractResultSet.LazyByteArray; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.base.Stopwatch; +import com.google.common.base.Throwables; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.Timestamp; +import com.google.protobuf.Value.KindCase; +import com.google.rpc.Code; +import com.google.rpc.ResourceInfo; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.Partition; +import com.google.spanner.v1.PartitionOptions; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SpannerGrpc.SpannerImplBase; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ModeCase; +import com.google.spanner.v1.TransactionOptions.ReadWrite; +import com.google.spanner.v1.TransactionSelector; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.Metadata; +import io.grpc.ServerServiceDefinition; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import io.grpc.stub.StreamObserver; +import java.math.BigDecimal; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Queue; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * In-process mock implementation of a Cloud Spanner server. The user must specify the results the + * server should return for queries and updates by calling {@link + * MockSpannerServiceImpl#putStatementResult(StatementResult)}. Note that the SQL statements that + * are put as statement results are not parsed and are only used as a key know which result to + * return. This also means that small stylistic differences between two SQL statements will be + * regarded as two different SQL statements by the mock server. + * + *

Session and transaction creation is automatically managed and does not need to be mocked. + * + *

Usage: + * + *

{@code
+ * Statement statementSelect1 = Statement.of("SELECT 1 AS COL1");
+ * com.google.spanner.v1.ResultSetMetadata select1Metadata =
+ *     com.google.spanner.v1.ResultSetMetadata.newBuilder()
+ *         .setRowType(
+ *             StructType.newBuilder()
+ *                 .addFields(
+ *                     Field.newBuilder()
+ *                         .setName("COL1")
+ *                         .setType(
+ *                             com.google.spanner.v1.Type.newBuilder()
+ *                                 .setCode(TypeCode.INT64)
+ *                                 .build())
+ *                         .build())
+ *                 .build())
+ *         .build();
+ * com.google.spanner.v1.ResultSet select1ResultSet =
+ *     com.google.spanner.v1.ResultSet.newBuilder()
+ *         .addRows(
+ *             ListValue.newBuilder()
+ *                 .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build())
+ *                 .build())
+ *         .setMetadata(select1Metadata)
+ *         .build();
+ * Statement updateStatement = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2");
+ * MockSpannerServiceImpl mockSpanner = new MockSpannerServiceImpl();
+ * mockSpanner.putStatementResult(StatementResult.of(statementSelect1, select1ResultSet));
+ * mockSpanner.putStatementResult(StatementResult.of(updateStatement, 1L));
+ * MockServiceHelper serviceHelper =
+ *     new MockServiceHelper("in-process-1", Arrays.asList(mockSpanner));
+ * serviceHelper.start();
+ *
+ * serviceHelper.reset();
+ * TransportChannelProvider channelProvider = serviceHelper.createChannelProvider();
+ * SpannerSettings settings =
+ *     SpannerSettings.newBuilder()
+ *         .setTransportChannelProvider(channelProvider)
+ *         .setCredentialsProvider(NoCredentialsProvider.create())
+ *         .build();
+ * SpannerClient spannerClient = SpannerClient.create(settings);
+ * Spanner spanner =
+ *     SpannerOptions.newBuilder()
+ *         .setChannelProvider(channelProvider)
+ *         .setCredentials(NoCredentials.getInstance())
+ *         .build()
+ *         .getService();
+ * DatabaseClient dbClient =
+ *     spanner.getDatabaseClient(DatabaseId.of("PROJECT_ID", "INSTANCE_ID", "DATABASE_ID"));
+ * try (ResultSet resultSet =
+ *     dbClient.singleUse().executeQuery(Statement.of("SELECT 1 AS COL1"))) {
+ *   while (resultSet.next()) {
+ *     System.out.println("COL1: " + resultSet.getLong("COL1"));
+ *   }
+ * }
+ * long updateCount =
+ *     dbClient
+ *         .readWriteTransaction()
+ *         .run(transaction ->
+ *             transaction.executeUpdate(Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"))
+ *          );
+ * System.out.println("Update count: " + updateCount);
+ * spannerClient.close();
+ * }
+ */ +public class MockSpannerServiceImpl extends SpannerImplBase implements MockGrpcService { + private static class PartialResultSetsIterator implements Iterator { + private static final int MAX_ROWS_IN_CHUNK = 1; + + private final ResultSet resultSet; + private boolean hasNext; + private boolean first = true; + private int currentRow = 0; + private final boolean setPrecommitToken; + private final ByteString transactionId; + + private PartialResultSetsIterator( + ResultSet resultSet, boolean setPrecommitToken, ByteString transactionId) { + this.resultSet = resultSet; + this.hasNext = true; + this.setPrecommitToken = setPrecommitToken; + this.transactionId = transactionId; + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public PartialResultSet next() { + PartialResultSet.Builder builder = PartialResultSet.newBuilder(); + if (first) { + builder.setMetadata(resultSet.getMetadata()); + first = false; + } + int recordCount = 0; + while (recordCount < MAX_ROWS_IN_CHUNK && currentRow < resultSet.getRowsCount()) { + builder.addAllValues(resultSet.getRows(currentRow).getValuesList()); + builder.setResumeToken(ByteString.copyFromUtf8(String.format("%010d", currentRow))); + recordCount++; + currentRow++; + } + if (currentRow == resultSet.getRowsCount()) { + builder.setStats(resultSet.getStats()); + } + builder.setResumeToken(ByteString.copyFromUtf8(String.format("%09d", currentRow))); + hasNext = currentRow < resultSet.getRowsCount(); + if (this.setPrecommitToken) { + builder.setPrecommitToken(getPartialResultSetPrecommitToken(this.transactionId)); + } + return builder.build(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + /** The result of a statement that is executed on a {@link MockSpannerServiceImpl}. */ + public static class StatementResult { + private enum StatementResultType { + RESULT_SET, + UPDATE_COUNT, + EXCEPTION + } + + private final StatementResultType type; + private final Statement statement; + private final Long updateCount; + private final Deque resultSets; + private final StatusRuntimeException exception; + + /** Creates a {@link StatementResult} for a query that returns a {@link ResultSet}. */ + public static StatementResult query(Statement statement, ResultSet resultSet) { + return new StatementResult(statement, resultSet); + } + + /** + * Creates a {@link StatementResult} for a query that returns a {@link ResultSet} the first + * time, and a different {@link ResultSet} for all subsequent calls. + */ + public static StatementResult queryAndThen( + Statement statement, ResultSet resultSet, ResultSet next) { + return new StatementResult(statement, resultSet, next); + } + + /** Creates a {@link StatementResult} for a read request. */ + public static StatementResult read( + String table, KeySet keySet, Iterable columns, ResultSet resultSet) { + return new StatementResult(table, keySet, columns, resultSet); + } + + /** Creates a {@link StatementResult} for a DML statement that returns an update count. */ + public static StatementResult update(Statement statement, long updateCount) { + return new StatementResult(statement, updateCount); + } + + /** + * Creates a {@link StatementResult} for a DML statement with returning clause that returns a + * ResultSet. + */ + public static StatementResult updateReturning(Statement statement, ResultSet resultSet) { + return new StatementResult(statement, resultSet); + } + + /** Creates a {@link StatementResult} for statement that should return an error. */ + public static StatementResult exception(Statement statement, StatusRuntimeException exception) { + return new StatementResult(statement, exception); + } + + /** Creates a result for the query that detects the dialect that is used for the database. */ + public static StatementResult detectDialectResult(Dialect resultDialect) { + return StatementResult.query( + MultiplexedSessionDatabaseClient.DETERMINE_DIALECT_STATEMENT, + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("DIALECT") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue(resultDialect.toString()) + .build()) + .build()) + .build()); + } + + private static class KeepLastElementDeque extends LinkedList { + private static KeepLastElementDeque singleton(E item) { + return new KeepLastElementDeque<>(Collections.singleton(item)); + } + + private static KeepLastElementDeque of(E first, E second) { + return new KeepLastElementDeque<>(Arrays.asList(first, second)); + } + + private KeepLastElementDeque(Collection coll) { + super(coll); + } + + @Override + public E pop() { + return this.size() == 1 ? super.peek() : super.pop(); + } + } + + /** + * Creates a {@link Statement} for a read statement. This {@link Statement} can be used to mock + * a result for a read request. + */ + public static Statement createReadStatement( + String table, KeySet keySet, Iterable columns) { + Preconditions.checkNotNull(table); + Preconditions.checkNotNull(keySet); + Preconditions.checkNotNull(columns); + Preconditions.checkArgument( + isValidKeySet(keySet), + "Currently only KeySet.all() and KeySet.singleKey(Key.of()) are supported for read" + + " statements"); + StringBuilder builder = new StringBuilder("SELECT "); + boolean first = true; + for (String col : columns) { + if (!first) { + builder.append(", "); + } + builder.append(col); + first = false; + } + builder.append(" FROM ").append(table); + if (keySet.isAll()) { + builder.append(" WHERE 1=1"); + } else { + builder.append(" WHERE ID=1"); + } + return Statement.of(builder.toString()); + } + + private static boolean isValidKeySet(KeySet keySet) { + if (keySet.isAll()) { + return true; + } + int keys = 0; + for (Key key : keySet.getKeys()) { + keys++; + if (key.size() != 0) { + return false; + } + } + return keys == 1; + } + + private StatementResult(Statement statement, Long updateCount) { + this.statement = Preconditions.checkNotNull(statement); + this.updateCount = Preconditions.checkNotNull(updateCount); + this.resultSets = null; + this.exception = null; + this.type = StatementResultType.UPDATE_COUNT; + } + + private StatementResult(Statement statement, ResultSet resultSet) { + this.statement = Preconditions.checkNotNull(statement); + this.resultSets = KeepLastElementDeque.singleton(Preconditions.checkNotNull(resultSet)); + this.updateCount = null; + this.exception = null; + this.type = StatementResultType.RESULT_SET; + } + + private StatementResult(Statement statement, ResultSet resultSet, ResultSet andThen) { + this.statement = Preconditions.checkNotNull(statement); + this.resultSets = + KeepLastElementDeque.of( + Preconditions.checkNotNull(resultSet), Preconditions.checkNotNull(andThen)); + this.updateCount = null; + this.exception = null; + this.type = StatementResultType.RESULT_SET; + } + + private StatementResult( + String table, KeySet keySet, Iterable columns, ResultSet resultSet) { + this.statement = createReadStatement(table, keySet, columns); + this.resultSets = KeepLastElementDeque.singleton(Preconditions.checkNotNull(resultSet)); + this.updateCount = null; + this.exception = null; + this.type = StatementResultType.RESULT_SET; + } + + private StatementResult(Statement statement, StatusRuntimeException exception) { + this.statement = Preconditions.checkNotNull(statement); + this.exception = Preconditions.checkNotNull(exception); + this.resultSets = null; + this.updateCount = null; + this.type = StatementResultType.EXCEPTION; + } + + private StatementResultType getType() { + return type; + } + + private ResultSet getResultSet() { + Preconditions.checkState( + type == StatementResultType.RESULT_SET, + "This statement result does not contain a result set"); + return resultSets.pop(); + } + + private Long getUpdateCount() { + Preconditions.checkState( + type == StatementResultType.UPDATE_COUNT, + "This statement result does not contain an update count"); + return updateCount; + } + + private StatusRuntimeException getException() { + Preconditions.checkState( + type == StatementResultType.EXCEPTION, + "This statement result does not contain an exception"); + return exception; + } + } + + /** Class for simulating execution time of server calls. */ + public static class SimulatedExecutionTime { + private static final Random RANDOM = new Random(); + private final int minimumExecutionTime; + private final int randomExecutionTime; + private final Queue exceptions; + private final boolean stickyException; + private final Queue streamIndices; + + /** + * Creates a simulated execution time that will always be somewhere between + * minimumExecutionTime+randomExecutionTime milliseconds long. + * + * @param minimumExecutionTime The minimum number of milliseconds the execution of the method + * should be. + * @param randomExecutionTime The maximum random number of milliseconds that should be added to + * the minimum execution time. + * @return a {@link SimulatedExecutionTime} that can be set as the execution time of a server + * call on a {@link MockSpannerServiceImpl}. + */ + public static SimulatedExecutionTime ofMinimumAndRandomTime( + int minimumExecutionTime, int randomExecutionTime) { + return new SimulatedExecutionTime(minimumExecutionTime, randomExecutionTime); + } + + public static SimulatedExecutionTime none() { + return new SimulatedExecutionTime(0, 0); + } + + public static SimulatedExecutionTime ofException(Exception exception) { + return new SimulatedExecutionTime( + 0, 0, Collections.singletonList(exception), false, Collections.emptySet()); + } + + public static SimulatedExecutionTime ofStickyException(Exception exception) { + return new SimulatedExecutionTime( + 0, 0, Collections.singletonList(exception), true, Collections.emptySet()); + } + + public static SimulatedExecutionTime ofStreamException(Exception exception, long streamIndex) { + return new SimulatedExecutionTime( + 0, 0, Collections.singletonList(exception), false, Collections.singleton(streamIndex)); + } + + public static SimulatedExecutionTime stickyDatabaseNotFoundException(String name) { + return ofStickyException( + SpannerExceptionFactoryTest.newStatusDatabaseNotFoundException(name)); + } + + public static SimulatedExecutionTime ofExceptions(Collection exceptions) { + return new SimulatedExecutionTime(0, 0, exceptions, false, Collections.emptySet()); + } + + public static SimulatedExecutionTime ofMinimumAndRandomTimeAndExceptions( + int minimumExecutionTime, + int randomExecutionTime, + Collection exceptions) { + return new SimulatedExecutionTime( + minimumExecutionTime, randomExecutionTime, exceptions, false, Collections.emptySet()); + } + + private SimulatedExecutionTime(int minimum, int random) { + this(minimum, random, Collections.emptyList(), false, Collections.emptySet()); + } + + private SimulatedExecutionTime( + int minimum, + int random, + Collection exceptions, + boolean stickyException, + Collection streamIndices) { + Preconditions.checkArgument(minimum >= 0, "Minimum execution time must be >= 0"); + Preconditions.checkArgument(random >= 0, "Random execution time must be >= 0"); + this.minimumExecutionTime = minimum; + this.randomExecutionTime = random; + this.exceptions = new LinkedList<>(exceptions); + this.stickyException = stickyException; + this.streamIndices = new LinkedList<>(streamIndices); + } + + void simulateExecutionTime( + Queue globalExceptions, + boolean stickyGlobalExceptions, + CountDownLatch freezeLock) { + Uninterruptibles.awaitUninterruptibly(freezeLock); + if (minimumExecutionTime > 0 || randomExecutionTime > 0) { + Uninterruptibles.sleepUninterruptibly( + (randomExecutionTime == 0 ? 0 : RANDOM.nextInt(randomExecutionTime)) + + minimumExecutionTime, + TimeUnit.MILLISECONDS); + } + checkException(globalExceptions, stickyGlobalExceptions); + if (streamIndices.isEmpty()) { + checkException(this.exceptions, stickyException); + } + } + + private static void checkException(Queue exceptions, boolean keepException) { + Exception e = keepException ? exceptions.peek() : exceptions.poll(); + if (e != null) { + Throwables.throwIfUnchecked(e); + throw Status.INTERNAL.withDescription(e.getMessage()).withCause(e).asRuntimeException(); + } + } + + private static void checkStreamException( + long streamIndex, Queue exceptions, Queue streamIndices) { + Exception e = exceptions.peek(); + Long index = streamIndices.peek(); + if (e != null && index != null && index == streamIndex) { + exceptions.poll(); + streamIndices.poll(); + Throwables.throwIfUnchecked(e); + throw Status.INTERNAL.withDescription(e.getMessage()).withCause(e).asRuntimeException(); + } + } + } + + public static final SimulatedExecutionTime NO_EXECUTION_TIME = SimulatedExecutionTime.none(); + + private final Random random = new Random(); + private double abortProbability = 0.0010D; + + /** + * Flip this switch to true if you want the {@link + * MultiplexedSessionDatabaseClient#DETERMINE_DIALECT_STATEMENT} statement to be included in the + * recorded requests on the mock server. It is ignored by default to prevent tests that do not + * expect this request to suddenly start failing. + */ + private boolean includeDetermineDialectStatementInRequests = false; + + private final Object lock = new Object(); + private Deque requests = new ConcurrentLinkedDeque<>(); + private volatile CountDownLatch freezeLock = new CountDownLatch(0); + private final AtomicInteger freezeAfterReturningNumRows = new AtomicInteger(); + private Queue exceptions = new ConcurrentLinkedQueue<>(); + private boolean stickyGlobalExceptions = false; + private ConcurrentMap statementResults = new ConcurrentHashMap<>(); + private ConcurrentMap statementGetCounts = new ConcurrentHashMap<>(); + private ConcurrentMap partialStatementResults = + new ConcurrentHashMap<>(); + private ConcurrentMap sessions = new ConcurrentHashMap<>(); + private ConcurrentMap multiplexedSessions = new ConcurrentHashMap<>(); + + private ConcurrentMap sessionLastUsed = new ConcurrentHashMap<>(); + private ConcurrentMap transactions = new ConcurrentHashMap<>(); + private final Queue transactionsStarted = new ConcurrentLinkedQueue<>(); + private ConcurrentMap isPartitionedDmlTransaction = + new ConcurrentHashMap<>(); + private ConcurrentMap abortedTransactions = new ConcurrentHashMap<>(); + private ConcurrentMap commitRetryTransactions = new ConcurrentHashMap<>(); + private final AtomicBoolean abortNextTransaction = new AtomicBoolean(); + private final AtomicBoolean abortNextStatement = new AtomicBoolean(); + private final AtomicBoolean ignoreInlineBeginRequest = new AtomicBoolean(); + private ConcurrentMap transactionCounters = new ConcurrentHashMap<>(); + private ConcurrentMap> partitionTokens = new ConcurrentHashMap<>(); + private ConcurrentMap transactionLastUsed = new ConcurrentHashMap<>(); + + // Stores the latest sequence number needed for the precommit token. + // The transaction entry is created only if the transaction is read-write and executed on a + // multiplexed session. + private static ConcurrentMap transactionSequenceNo = + new ConcurrentHashMap<>(); + private int maxNumSessionsInOneBatch = 100; + private int maxTotalSessions = Integer.MAX_VALUE; + private Iterable batchWriteResult = new ArrayList<>(); + private AtomicInteger numSessionsCreated = new AtomicInteger(); + private SimulatedExecutionTime beginTransactionExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime commitExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime batchCreateSessionsExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime createSessionExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime deleteSessionExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime executeBatchDmlExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime executeSqlExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime executeStreamingSqlExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime getSessionExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime listSessionsExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime partitionQueryExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime partitionReadExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime readExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime rollbackExecutionTime = NO_EXECUTION_TIME; + private SimulatedExecutionTime streamingReadExecutionTime = NO_EXECUTION_TIME; + + public MockSpannerServiceImpl() { + putStatementResult(StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + } + + private String generateSessionName(String database) { + return String.format("%s/sessions/%s", database, UUID.randomUUID().toString()); + } + + private ByteString generateTransactionName(String session) { + AtomicLong counter = transactionCounters.get(session); + if (counter == null) { + counter = new AtomicLong(); + transactionCounters.put(session, counter); + } + return ByteString.copyFromUtf8( + String.format("%s/transactions/%d", session, counter.incrementAndGet())); + } + + private ByteString generatePartitionToken(String session, ByteString transactionId) { + ByteString token = ByteString.copyFromUtf8(UUID.randomUUID().toString()); + String key = partitionKey(session, transactionId); + List tokens = partitionTokens.computeIfAbsent(key, k -> new ArrayList<>(5)); + tokens.add(token); + return token; + } + + private String partitionKey(String session, ByteString transactionId) { + return String.format("%s/transactions/%s", session, transactionId.toStringUtf8()); + } + + private Timestamp getCurrentGoogleTimestamp() { + long current = System.currentTimeMillis(); + long seconds = TimeUnit.MILLISECONDS.toSeconds(current); + int nanos = (int) TimeUnit.MILLISECONDS.toNanos(current - TimeUnit.SECONDS.toMillis(seconds)); + return Timestamp.newBuilder().setSeconds(seconds).setNanos(nanos).build(); + } + + /** + * Puts a result that will be returned by this service. {@link StatementResult#statement} will be + * used as a key for the result, and any existing {@link StatementResult} for the same {@link + * Statement} will be overwritten. + */ + public void putStatementResult(StatementResult result) { + Preconditions.checkNotNull(result); + synchronized (lock) { + statementResults.put(result.statement, result); + } + } + + public void putStatementResults(StatementResult... results) { + synchronized (lock) { + for (StatementResult result : results) { + statementResults.put(result.statement, result); + } + } + } + + public void putPartialStatementResult(StatementResult result) { + synchronized (lock) { + partialStatementResults.put(result.statement.getSql(), result); + } + } + + public void setBatchWriteResult(final Iterable responses) { + synchronized (lock) { + this.batchWriteResult = responses; + } + } + + private StatementResult getResult(Statement statement) { + StatementResult res; + synchronized (lock) { + res = statementResults.get(statement); + if (statementGetCounts.containsKey(statement)) { + statementGetCounts.put(statement, statementGetCounts.get(statement) + 1L); + } else { + statementGetCounts.put(statement, 1L); + } + if (res == null) { + for (String partialSql : partialStatementResults.keySet()) { + if (statement.getSql().startsWith(partialSql)) { + res = partialStatementResults.get(partialSql); + } + } + } + } + if (res == null) { + throw Status.INTERNAL + .withDescription( + String.format( + "There is no result registered for the statement: %s\n" + + "Call TestSpannerImpl#addStatementResult(StatementResult) before executing" + + " the statement.", + statement.toString())) + .asRuntimeException(); + } + return res; + } + + /** Sets the probability that this mock server aborts a read/write transaction at random. */ + public void setAbortProbability(double probability) { + Preconditions.checkArgument( + probability >= 0D && probability <= 1D, "Probability must be >= 0 and <= 1"); + this.abortProbability = probability; + } + + /** + * Set this to true if you want the {@link + * MultiplexedSessionDatabaseClient#DETERMINE_DIALECT_STATEMENT} statement to be included in the + * recorded requests on the mock server. It is ignored by default to prevent tests that do not + * expect this request to suddenly start failing. + */ + public void setIncludeDetermineDialectStatementInRequests(boolean include) { + this.includeDetermineDialectStatementInRequests = include; + } + + /** + * Instruct the mock server to abort the specified transaction. Use this method to test handling + * of {@link AbortedException} in your code. + */ + public void abortTransaction(TransactionContext transactionContext) { + Preconditions.checkNotNull(transactionContext); + if (transactionContext instanceof TransactionContextImpl) { + TransactionContextImpl impl = (TransactionContextImpl) transactionContext; + ByteString id = + impl.getTransactionSelector() == null ? null : impl.getTransactionSelector().getId(); + if (id != null) { + markAbortedTransaction(id); + } + } else { + throw new IllegalArgumentException( + "Unsupported TransactionContext type: " + transactionContext.getClass().getName()); + } + } + + /** Instruct the mock server to abort the next transaction that is created. */ + public void abortNextTransaction() { + abortNextTransaction.set(true); + } + + /** Instructs the mock server to abort the transaction of the next statement that is executed. */ + public void abortNextStatement() { + abortNextStatement.set(true); + } + + /** Instruct the mock server to abort all transactions currently active on the server. */ + public void abortAllTransactions() { + for (ByteString id : transactions.keySet()) { + markAbortedTransaction(id); + } + } + + public void setIgnoreInlineBeginRequest(boolean ignore) { + ignoreInlineBeginRequest.set(ignore); + } + + public void freeze() { + freezeLock = new CountDownLatch(1); + } + + public void unfreeze() { + freezeLock.countDown(); + } + + public void freezeAfterReturningNumRows(int numRows) { + freezeAfterReturningNumRows.set(numRows); + } + + public void setMaxSessionsInOneBatch(int max) { + this.maxNumSessionsInOneBatch = max; + } + + public void setMaxTotalSessions(int max) { + this.maxTotalSessions = max; + } + + @Override + public void batchCreateSessions( + BatchCreateSessionsRequest request, + StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getDatabase()); + String name = null; + try { + if (request.getSessionCount() <= 0) { + throw Status.INVALID_ARGUMENT + .withDescription("Session count must be >= 0") + .asRuntimeException(); + } + batchCreateSessionsExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + if (sessions.size() >= maxTotalSessions) { + throw Status.FAILED_PRECONDITION + .withDescription("Maximum number of sessions reached") + .asRuntimeException(); + } + Timestamp now = getCurrentGoogleTimestamp(); + BatchCreateSessionsResponse.Builder response = BatchCreateSessionsResponse.newBuilder(); + int maxSessionsToCreate = Math.min(maxNumSessionsInOneBatch, request.getSessionCount()); + for (int i = 0; i < Math.min(maxTotalSessions - sessions.size(), maxSessionsToCreate); i++) { + name = generateSessionName(request.getDatabase()); + Session session = + Session.newBuilder() + .setCreateTime(now) + .setName(name) + .setApproximateLastUseTime(now) + .build(); + Session prev = sessions.putIfAbsent(name, session); + if (prev == null) { + if (sessions.size() <= maxTotalSessions) { + sessionLastUsed.put(name, Instant.now()); + response.addSession(session); + numSessionsCreated.incrementAndGet(); + } else { + removeSession(name); + } + } else { + // Someone else tried to create a session with the same id. This should not be possible + throw Status.ALREADY_EXISTS.asRuntimeException(); + } + } + responseObserver.onNext(response.build()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + if (name != null) { + removeSession(name); + } + responseObserver.onError(e); + } catch (Throwable e) { + if (name != null) { + removeSession(name); + } + responseObserver.onError( + Status.INTERNAL + .withDescription("Batch create sessions failed: " + e.getMessage()) + .asRuntimeException()); + } + } + + @Override + public void createSession( + CreateSessionRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getDatabase()); + Preconditions.checkNotNull(request.getSession()); + String name = generateSessionName(request.getDatabase()); + Session requestSession = request.getSession(); + try { + createSessionExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + Timestamp now = getCurrentGoogleTimestamp(); + Session session = + Session.newBuilder() + .setCreateTime(now) + .setName(name) + .setApproximateLastUseTime(now) + .setMultiplexed(requestSession.getMultiplexed()) + .build(); + Session prev = addSession(session); + if (prev == null) { + sessionLastUsed.put(name, Instant.now()); + numSessionsCreated.incrementAndGet(); + responseObserver.onNext(session); + responseObserver.onCompleted(); + } else { + // Someone else tried to create a session with the same id. This should not be possible + responseObserver.onError(Status.ALREADY_EXISTS.asRuntimeException()); + } + } catch (StatusRuntimeException e) { + removeSession(name); + responseObserver.onError(e); + } catch (Throwable e) { + removeSession(name); + responseObserver.onError( + Status.INTERNAL + .withDescription("Create session failed: " + e.getMessage()) + .asRuntimeException()); + } + } + + @Override + public void getSession(GetSessionRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getName()); + try { + getSessionExecutionTime.simulateExecutionTime(exceptions, stickyGlobalExceptions, freezeLock); + Session session = getSession(request.getName()); + if (session == null) { + setSessionNotFound(request.getName(), responseObserver); + } else { + session = + session.toBuilder().setApproximateLastUseTime(getCurrentGoogleTimestamp()).build(); + responseObserver.onNext(session); + responseObserver.onCompleted(); + } + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + public StatusRuntimeException createSessionNotFoundException(String name) { + ResourceInfo resourceInfo = + ResourceInfo.newBuilder() + .setResourceType(SpannerExceptionFactory.SESSION_RESOURCE_TYPE) + .setResourceName(name) + .build(); + Metadata.Key key = + Metadata.Key.of( + resourceInfo.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(resourceInfo)); + Metadata trailers = new Metadata(); + trailers.put(key, resourceInfo); + return Status.NOT_FOUND + .withDescription(String.format("Session not found: Session with id %s not found", name)) + .asRuntimeException(trailers); + } + + private void setSessionNotFound(String name, StreamObserver responseObserver) { + final StatusRuntimeException statusRuntimeException = createSessionNotFoundException(name); + responseObserver.onError(statusRuntimeException); + } + + @Override + public void listSessions( + ListSessionsRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + listSessionsExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + List res = new ArrayList<>(); + for (Session session : sessions.values()) { + if (session.getName().startsWith(request.getDatabase())) { + res.add( + session.toBuilder().setApproximateLastUseTime(getCurrentGoogleTimestamp()).build()); + } + } + res.sort(Comparator.comparing(Session::getName)); + responseObserver.onNext(ListSessionsResponse.newBuilder().addAllSessions(res).build()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + @Override + public void deleteSession(DeleteSessionRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getName()); + try { + deleteSessionExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + Session session = getSession(request.getName()); + if (session != null) { + try { + doDeleteSession(session); + } catch (Throwable e) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + return; + } + } + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } + } + + void doDeleteSession(Session session) { + removeSession(session.getName()); + transactionCounters.remove(session.getName()); + sessionLastUsed.remove(session.getName()); + } + + @Override + public void executeSql(ExecuteSqlRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + executeSqlExecutionTime.simulateExecutionTime(exceptions, stickyGlobalExceptions, freezeLock); + ByteString transactionId = getTransactionId(session, request.getTransaction()); + simulateAbort(session, transactionId); + Statement statement = + buildStatement(request.getSql(), request.getParamTypesMap(), request.getParams()); + StatementResult result = getResult(statement); + switch (result.getType()) { + case EXCEPTION: + throw result.getException(); + case RESULT_SET: + returnResultSet( + result.getResultSet(), + transactionId, + request.getTransaction(), + responseObserver, + session); + break; + case UPDATE_COUNT: + if (isPartitionedDmlTransaction(transactionId)) { + commitTransaction(transactionId); + responseObserver.onNext( + ResultSet.newBuilder() + .setStats( + ResultSetStats.newBuilder() + .setRowCountLowerBound(result.getUpdateCount()) + .build()) + .build()); + } else { + ResultSet.Builder resultSetBuilder = + ResultSet.newBuilder() + .setStats( + ResultSetStats.newBuilder() + .setRowCountExact(result.getUpdateCount()) + .build()) + .setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction( + ignoreInlineBeginRequest.get() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build()); + if (session.getMultiplexed() && isReadWriteTransaction(transactionId)) { + resultSetBuilder.setPrecommitToken(getResultSetPrecommitToken(transactionId)); + } + responseObserver.onNext(resultSetBuilder.build()); + } + break; + default: + throw new IllegalStateException("Unknown result type: " + result.getType()); + } + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + private void returnResultSet( + ResultSet resultSet, + ByteString transactionId, + TransactionSelector transactionSelector, + StreamObserver responseObserver, + Session session) { + ResultSetMetadata metadata = resultSet.getMetadata(); + if (transactionId != null) { + metadata = + metadata.toBuilder() + .setTransaction( + ignoreInlineBeginRequest.get() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build(); + } else if (transactionSelector.hasBegin() || transactionSelector.hasSingleUse()) { + Transaction transaction = getTemporaryTransactionOrNull(transactionSelector); + metadata = metadata.toBuilder().setTransaction(transaction).build(); + } + ResultSet.Builder resultSetBuilder = resultSet.toBuilder(); + resultSetBuilder.setMetadata(metadata); + if (session.getMultiplexed() && isReadWriteTransaction(transactionId)) { + resultSetBuilder.setPrecommitToken(getResultSetPrecommitToken(transactionId)); + } + resultSet = resultSetBuilder.build(); + responseObserver.onNext(resultSet); + } + + @Override + public void executeBatchDml( + ExecuteBatchDmlRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + executeBatchDmlExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + // Get or start transaction + ByteString transactionId = getTransactionId(session, request.getTransaction()); + if (isPartitionedDmlTransaction(transactionId)) { + throw Status.FAILED_PRECONDITION + .withDescription( + "This transaction is a partitioned DML transaction and cannot be used for batch DML" + + " updates.") + .asRuntimeException(); + } + simulateAbort(session, transactionId); + List results = new ArrayList<>(); + com.google.rpc.Status status = + com.google.rpc.Status.newBuilder().setCode(Code.OK_VALUE).build(); + resultLoop: + for (com.google.spanner.v1.ExecuteBatchDmlRequest.Statement statement : + request.getStatementsList()) { + try { + Statement spannerStatement = + buildStatement( + statement.getSql(), statement.getParamTypesMap(), statement.getParams()); + StatementResult res = getResult(spannerStatement); + switch (res.getType()) { + case EXCEPTION: + status = + com.google.rpc.Status.newBuilder() + .setCode(res.getException().getStatus().getCode().value()) + .setMessage(res.getException().getMessage()) + .build(); + break resultLoop; + case RESULT_SET: + case UPDATE_COUNT: + results.add(res); + break; + default: + throw new IllegalStateException("Unknown result type: " + res.getType()); + } + } catch (StatusRuntimeException e) { + status = + com.google.rpc.Status.newBuilder() + .setCode(e.getStatus().getCode().value()) + .setMessage(e.getMessage()) + .build(); + break; + } catch (Exception e) { + status = + com.google.rpc.Status.newBuilder() + .setCode(Code.UNKNOWN_VALUE) + .setMessage(e.getMessage()) + .build(); + break; + } + } + ExecuteBatchDmlResponse.Builder builder = ExecuteBatchDmlResponse.newBuilder(); + for (StatementResult res : results) { + Long updateCount; + switch (res.getType()) { + case UPDATE_COUNT: + updateCount = res.getUpdateCount(); + break; + case RESULT_SET: + updateCount = res.getResultSet().getStats().getRowCountExact(); + break; + default: + throw new IllegalStateException("Invalid result type: " + res.getType()); + } + builder.addResultSets( + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(updateCount).build()) + .setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction( + ignoreInlineBeginRequest.get() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build()) + .build()); + } + builder.setStatus(status); + if (session.getMultiplexed() && isReadWriteTransaction(transactionId)) { + builder.setPrecommitToken(getExecuteBatchDmlResponsePrecommitToken(transactionId)); + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + @Override + public void executeStreamingSql( + ExecuteSqlRequest request, StreamObserver responseObserver) { + if (includeDetermineDialectStatementInRequests + || !request + .getSql() + .equals(MultiplexedSessionDatabaseClient.DETERMINE_DIALECT_STATEMENT.getSql())) { + requests.add(request); + } + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + Statement statement = + buildStatement(request.getSql(), request.getParamTypesMap(), request.getParams()); + ByteString transactionId = getTransactionId(session, request.getTransaction()); + boolean isPartitioned = isPartitionedDmlTransaction(transactionId); + if (isPartitioned) { + StatementResult firstRes = getResult(statement); + switch (firstRes.getType()) { + case EXCEPTION: + throw firstRes.getException(); + case UPDATE_COUNT: + returnPartialResultSet( + session, + 0L, + !isPartitioned, + responseObserver, + request.getTransaction(), + transactionId, + false); + break; + case RESULT_SET: + default: + break; + } + } + executeStreamingSqlExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + // Get or start transaction + if (!request.getPartitionToken().isEmpty()) { + List tokens = + partitionTokens.get(partitionKey(session.getName(), transactionId)); + if (tokens == null || !tokens.contains(request.getPartitionToken())) { + throw Status.INVALID_ARGUMENT + .withDescription( + String.format( + "Partition token %s is not a valid token for this transaction", + request.getPartitionToken())) + .asRuntimeException(); + } + } + simulateAbort(session, transactionId); + StatementResult res = getResult(statement); + switch (res.getType()) { + case EXCEPTION: + throw res.getException(); + case RESULT_SET: + returnPartialResultSet( + res.getResultSet(), + transactionId, + request.getTransaction(), + responseObserver, + getExecuteStreamingSqlExecutionTime(), + session.getMultiplexed()); + break; + case UPDATE_COUNT: + if (isPartitioned) { + commitTransaction(transactionId); + } + returnPartialResultSet( + session, + res.getUpdateCount(), + !isPartitioned, + responseObserver, + request.getTransaction(), + transactionId); + break; + default: + throw new IllegalStateException("Unknown result type: " + res.getType()); + } + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.withCause(t).asRuntimeException()); + } + } + + @SuppressWarnings("unchecked") + private Statement buildStatement( + String sql, Map paramTypes, com.google.protobuf.Struct params) { + Statement.Builder builder = Statement.newBuilder(sql); + // Set all untyped null values first. + for (Entry entry : params.getFieldsMap().entrySet()) { + if (entry.getValue().hasNullValue() && !paramTypes.containsKey(entry.getKey())) { + builder.bind(entry.getKey()).to((Value) null); + } + } + + for (Entry entry : paramTypes.entrySet()) { + final String fieldName = entry.getKey(); + final Type fieldType = entry.getValue(); + final Type elementType = fieldType.getArrayElementType(); + com.google.protobuf.Value value = params.getFieldsOrThrow(fieldName); + if (value.getKindCase() == KindCase.NULL_VALUE) { + switch (fieldType.getCode()) { + case ARRAY: + switch (elementType.getCode()) { + case BOOL: + builder.bind(fieldName).toBoolArray((Iterable) null); + break; + case BYTES: + builder.bind(fieldName).toBytesArray(null); + break; + case DATE: + builder.bind(fieldName).toDateArray(null); + break; + case UUID: + builder.bind(fieldName).toUuidArray(null); + break; + case INTERVAL: + builder.bind(fieldName).toIntervalArray(null); + break; + case FLOAT32: + builder.bind(fieldName).toFloat32Array((Iterable) null); + break; + case FLOAT64: + builder.bind(fieldName).toFloat64Array((Iterable) null); + break; + case INT64: + builder.bind(fieldName).toInt64Array((Iterable) null); + break; + case STRING: + builder.bind(fieldName).toStringArray(null); + break; + case NUMERIC: + if (elementType.getTypeAnnotation() == TypeAnnotationCode.PG_NUMERIC) { + builder.bind(fieldName).toPgNumericArray(null); + } else { + builder.bind(fieldName).toNumericArray(null); + } + break; + case TIMESTAMP: + builder.bind(fieldName).toTimestampArray(null); + break; + case JSON: + if (elementType.getTypeAnnotation() == TypeAnnotationCode.PG_JSONB) { + builder.bind(fieldName).toPgJsonbArray(null); + } else { + builder.bind(fieldName).toJsonArray(null); + } + break; + case STRUCT: + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException( + "Unknown or invalid array parameter type: " + elementType.getCode()); + } + break; + case BOOL: + builder.bind(fieldName).to((Boolean) null); + break; + case BYTES: + builder.bind(fieldName).to((ByteArray) null); + break; + case DATE: + builder.bind(fieldName).to((Date) null); + break; + case UUID: + builder.bind(fieldName).to((UUID) null); + break; + case INTERVAL: + builder.bind(fieldName).to((Interval) null); + break; + case FLOAT32: + builder.bind(fieldName).to((Float) null); + break; + case FLOAT64: + builder.bind(fieldName).to((Double) null); + break; + case INT64: + builder.bind(fieldName).to((Long) null); + break; + case STRING: + builder.bind(fieldName).to((String) null); + break; + case NUMERIC: + if (fieldType.getTypeAnnotation() == TypeAnnotationCode.PG_NUMERIC) { + builder.bind(fieldName).to(Value.pgNumeric(null)); + } else { + builder.bind(fieldName).to((BigDecimal) null); + } + break; + case STRUCT: + builder.bind(fieldName).to((Struct) null); + break; + case TIMESTAMP: + builder.bind(fieldName).to((com.google.cloud.Timestamp) null); + break; + case JSON: + if (fieldType.getTypeAnnotation() == TypeAnnotationCode.PG_JSONB) { + builder.bind(fieldName).to(Value.pgJsonb(null)); + } else { + builder.bind(fieldName).to(Value.json(null)); + } + break; + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown parameter type: " + fieldType.getCode()); + } + } else { + switch (fieldType.getCode()) { + case ARRAY: + switch (elementType.getCode()) { + case BOOL: + builder + .bind(fieldName) + .toBoolArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.bool(), value.getListValue())); + break; + case BYTES: + builder + .bind(fieldName) + .toBytesArray( + Iterables.transform( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.bytes(), value.getListValue()), + lazyByteArray -> + lazyByteArray == null ? null : lazyByteArray.getByteArray())); + break; + case DATE: + builder + .bind(fieldName) + .toDateArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.date(), value.getListValue())); + break; + case UUID: + builder + .bind(fieldName) + .toUuidArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.uuid(), value.getListValue())); + break; + case INTERVAL: + builder + .bind(fieldName) + .toIntervalArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.interval(), value.getListValue())); + break; + case FLOAT32: + builder + .bind(fieldName) + .toFloat32Array( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.float32(), value.getListValue())); + break; + case FLOAT64: + builder + .bind(fieldName) + .toFloat64Array( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.float64(), value.getListValue())); + break; + case INT64: + builder + .bind(fieldName) + .toInt64Array( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.int64(), value.getListValue())); + break; + case STRING: + builder + .bind(fieldName) + .toStringArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.string(), value.getListValue())); + break; + case NUMERIC: + if (elementType.getTypeAnnotation() == TypeAnnotationCode.PG_NUMERIC) { + builder + .bind(fieldName) + .toPgNumericArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.pgNumeric(), value.getListValue())); + } else { + builder + .bind(fieldName) + .toNumericArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.numeric(), value.getListValue())); + } + break; + case TIMESTAMP: + builder + .bind(fieldName) + .toTimestampArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.timestamp(), value.getListValue())); + break; + case JSON: + if (elementType.getTypeAnnotation() == TypeAnnotationCode.PG_JSONB) { + builder + .bind(fieldName) + .toPgJsonbArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.pgJsonb(), value.getListValue())); + } else { + builder + .bind(fieldName) + .toJsonArray( + (Iterable) + GrpcStruct.decodeArrayValue( + com.google.cloud.spanner.Type.json(), value.getListValue())); + } + break; + case STRUCT: + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException( + "Unknown or invalid array parameter type: " + elementType.getCode()); + } + break; + case BOOL: + builder.bind(fieldName).to(value.getBoolValue()); + break; + case BYTES: + builder.bind(fieldName).to(ByteArray.fromBase64(value.getStringValue())); + break; + case DATE: + builder.bind(fieldName).to(Date.parseDate(value.getStringValue())); + break; + case UUID: + builder.bind(fieldName).to(UUID.fromString(value.getStringValue())); + break; + case INTERVAL: + builder.bind(fieldName).to(Interval.parseFromString(value.getStringValue())); + break; + case FLOAT32: + builder.bind(fieldName).to((float) value.getNumberValue()); + break; + case FLOAT64: + builder.bind(fieldName).to(value.getNumberValue()); + break; + case INT64: + builder.bind(fieldName).to(Long.valueOf(value.getStringValue())); + break; + case STRING: + builder.bind(fieldName).to(value.getStringValue()); + break; + case NUMERIC: + if (fieldType.getTypeAnnotation() == TypeAnnotationCode.PG_NUMERIC) { + builder.bind(fieldName).to(Value.pgNumeric(value.getStringValue())); + } else { + builder.bind(fieldName).to(new BigDecimal(value.getStringValue())); + } + break; + case STRUCT: + throw new IllegalArgumentException("Struct parameters not (yet) supported"); + case TIMESTAMP: + builder + .bind(fieldName) + .to(com.google.cloud.Timestamp.parseTimestamp(value.getStringValue())); + break; + case JSON: + if (fieldType.getTypeAnnotation() == TypeAnnotationCode.PG_JSONB) { + builder.bind(fieldName).to(Value.pgJsonb(value.getStringValue())); + } else { + builder.bind(fieldName).to(Value.json(value.getStringValue())); + } + break; + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown parameter type: " + fieldType.getCode()); + } + } + } + return builder.build(); + } + + private void setTransactionNotFound( + ByteString transactionId, StreamObserver responseObserver) { + responseObserver.onError( + Status.ABORTED + .withDescription( + String.format( + "Transaction with id %s not found and has probably been aborted", + transactionId.toStringUtf8())) + .asRuntimeException()); + } + + private void throwTransactionNotFound(ByteString transactionId) { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + Duration.newBuilder() + .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) + .setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + throw Status.ABORTED + .withDescription( + String.format( + "Transaction with id %s not found and has probably been aborted", + transactionId.toStringUtf8())) + .asRuntimeException(trailers); + } + + private void throwTransactionAborted(ByteString transactionId) { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + Duration.newBuilder() + .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) + .setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + throw Status.ABORTED + .withDescription( + String.format("Transaction with id %s has been aborted", transactionId.toStringUtf8())) + .asRuntimeException(trailers); + } + + @Override + public void read(final ReadRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + readExecutionTime.simulateExecutionTime(exceptions, stickyGlobalExceptions, freezeLock); + // Get or start transaction + ByteString transactionId = getTransactionId(session, request.getTransaction()); + simulateAbort(session, transactionId); + Iterable cols = () -> request.getColumnsList().iterator(); + Statement statement = + StatementResult.createReadStatement( + request.getTable(), + request.getKeySet().getAll() ? KeySet.all() : KeySet.singleKey(Key.of()), + cols); + StatementResult res = getResult(statement); + returnResultSet( + res.getResultSet(), transactionId, request.getTransaction(), responseObserver, session); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + @Override + public void streamingRead( + final ReadRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + streamingReadExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + // Get or start transaction + ByteString transactionId = getTransactionId(session, request.getTransaction()); + if (!request.getPartitionToken().isEmpty()) { + List tokens = + partitionTokens.get(partitionKey(session.getName(), transactionId)); + if (tokens == null || !tokens.contains(request.getPartitionToken())) { + throw Status.INVALID_ARGUMENT + .withDescription( + String.format( + "Partition token %s is not a valid token for this transaction", + request.getPartitionToken())) + .asRuntimeException(); + } + } + simulateAbort(session, transactionId); + Iterable cols = () -> request.getColumnsList().iterator(); + Statement statement = + StatementResult.createReadStatement( + request.getTable(), + request.getKeySet().getAll() ? KeySet.all() : KeySet.singleKey(Key.of()), + cols); + StatementResult res = getResult(statement); + if (res == null) { + throw Status.NOT_FOUND + .withDescription("No result found for " + statement.toString()) + .asRuntimeException(); + } + if (res.getType() == StatementResult.StatementResultType.EXCEPTION) { + throw res.getException(); + } + returnPartialResultSet( + res.getResultSet(), + transactionId, + request.getTransaction(), + responseObserver, + getStreamingReadExecutionTime(), + session.getMultiplexed()); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + private void returnPartialResultSet( + ResultSet resultSet, + ByteString transactionId, + TransactionSelector transactionSelector, + StreamObserver responseObserver, + SimulatedExecutionTime executionTime, + boolean isMultiplexedSession) + throws Exception { + ResultSetMetadata metadata = resultSet.getMetadata(); + if (transactionId == null) { + Transaction transaction = getTemporaryTransactionOrNull(transactionSelector); + metadata = metadata.toBuilder().setTransaction(transaction).build(); + } else { + metadata = + metadata.toBuilder() + .setTransaction( + ignoreInlineBeginRequest.get() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build(); + } + resultSet = resultSet.toBuilder().setMetadata(metadata).build(); + PartialResultSetsIterator iterator = + new PartialResultSetsIterator( + resultSet, + isMultiplexedSession && isReadWriteTransaction(transactionId), + transactionId); + long index = 0L; + while (iterator.hasNext()) { + SimulatedExecutionTime.checkStreamException( + index, executionTime.exceptions, executionTime.streamIndices); + responseObserver.onNext(iterator.next()); + if (freezeAfterReturningNumRows.get() > 0) { + if (freezeAfterReturningNumRows.decrementAndGet() == 0) { + freeze(); + freezeLock.await(); + } + } + index++; + } + responseObserver.onCompleted(); + } + + private void returnPartialResultSet( + Session session, + Long updateCount, + boolean exact, + StreamObserver responseObserver, + TransactionSelector transactionSelector, + ByteString transactionId) { + returnPartialResultSet( + session, updateCount, exact, responseObserver, transactionSelector, transactionId, true); + } + + private void returnPartialResultSet( + Session session, + Long updateCount, + boolean exact, + StreamObserver responseObserver, + TransactionSelector transactionSelector, + ByteString transactionId, + boolean complete) { + if (exact) { + responseObserver.onNext( + PartialResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType(StructType.newBuilder().build()) + .setTransaction( + ignoreInlineBeginRequest.get() || !transactionSelector.hasBegin() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(updateCount).build()) + .build()); + } else { + responseObserver.onNext( + PartialResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType(StructType.newBuilder().build()) + .setTransaction( + ignoreInlineBeginRequest.get() || !transactionSelector.hasBegin() + ? Transaction.getDefaultInstance() + : Transaction.newBuilder().setId(transactionId).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountLowerBound(updateCount).build()) + .build()); + } + if (complete) { + responseObserver.onCompleted(); + } + } + + private boolean isPartitionedDmlTransaction(ByteString transactionId) { + return transactionId != null + && isPartitionedDmlTransaction.get(transactionId) != null + && isPartitionedDmlTransaction.get(transactionId); + } + + private boolean isReadWriteTransaction(ByteString transactionId) { + return transactionId != null + && transactions.get(transactionId) != null + && transactions.get(transactionId).getReadTimestamp().getSeconds() == 0; + } + + private ByteString getTransactionId(Session session, TransactionSelector tx) { + ByteString transactionId = null; + switch (tx.getSelectorCase()) { + case SELECTOR_NOT_SET: + case SINGLE_USE: + transactionId = null; + break; + case BEGIN: + transactionId = beginTransaction(session, tx.getBegin(), null, null).getId(); + break; + case ID: + Transaction transaction = transactions.get(tx.getId()); + if (transaction == null) { + Optional aborted = Optional.fromNullable(abortedTransactions.get(tx.getId())); + if (aborted.or(Boolean.FALSE)) { + throwTransactionAborted(tx.getId()); + } else { + throwTransactionNotFound(tx.getId()); + } + } else { + transactionId = transaction.getId(); + transactionLastUsed.put(transactionId, Instant.now()); + } + break; + default: + throw Status.UNIMPLEMENTED.asRuntimeException(); + } + return transactionId; + } + + private Transaction getTemporaryTransactionOrNull(TransactionSelector tx) { + Transaction.Builder builder; + switch (tx.getSelectorCase()) { + case SELECTOR_NOT_SET: + case SINGLE_USE: + builder = Transaction.newBuilder(); + setReadTimestamp(tx.getSingleUse(), builder); + return builder.build(); + case BEGIN: + builder = Transaction.newBuilder(); + setReadTimestamp(tx.getBegin(), builder); + return builder.build(); + case ID: + return transactions.get(tx.getId()); + default: + return null; + } + } + + @Override + public void beginTransaction( + BeginTransactionRequest request, StreamObserver responseObserver) { + // TODO: Remove once this is guaranteed to be available. + // Skip storing the explicit BeginTransactionRequest used to verify read-write transaction + // server availability on multiplexed sessions. + // This code will be removed once read-write multiplexed sessions are stable on the backend, + // hence the temporary trade-off. + if (!request + .getRequestOptions() + .getTransactionTag() + .equals("multiplexed-rw-background-begin-txn")) { + requests.add(request); + } + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + beginTransactionExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + Transaction transaction = + beginTransaction( + session, request.getOptions(), request.getMutationKey(), request.getRequestOptions()); + responseObserver.onNext(transaction); + responseObserver.onCompleted(); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + private Transaction beginTransaction( + Session session, + TransactionOptions options, + com.google.spanner.v1.Mutation mutationKey, + RequestOptions requestOptions) { + ByteString transactionId = generateTransactionName(session.getName()); + Transaction.Builder builder = Transaction.newBuilder().setId(transactionId); + if (options != null && options.getModeCase() == ModeCase.READ_ONLY) { + setReadTimestamp(options, builder); + } + if (session.getMultiplexed() + && options.getModeCase() == ModeCase.READ_WRITE + && mutationKey != null + && mutationKey != com.google.spanner.v1.Mutation.getDefaultInstance()) { + // Mutation only case in a read-write transaction. + builder.setPrecommitToken(getTransactionPrecommitToken(transactionId)); + } + Transaction transaction = builder.build(); + transactions.put(transaction.getId(), transaction); + // TODO: remove once UNIMPLEMENTED error is not thrown for read-write mux + // Do not consider the transaction if this request was from background thread + if (requestOptions == null + || !requestOptions.getTransactionTag().equals("multiplexed-rw-background-begin-txn")) { + transactionsStarted.add(transaction.getId()); + if (abortNextTransaction.getAndSet(false)) { + markAbortedTransaction(transaction.getId()); + } + } + isPartitionedDmlTransaction.put( + transaction.getId(), options.getModeCase() == ModeCase.PARTITIONED_DML); + return transaction; + } + + private void setReadTimestamp(TransactionOptions options, Transaction.Builder builder) { + if (options.getReadOnly().getStrong()) { + builder.setReadTimestamp(getCurrentGoogleTimestamp()); + } else if (options.getReadOnly().hasReadTimestamp()) { + builder.setReadTimestamp(options.getReadOnly().getReadTimestamp()); + } else if (options.getReadOnly().hasMinReadTimestamp()) { + builder.setReadTimestamp(options.getReadOnly().getMinReadTimestamp()); + } else if (options.getReadOnly().hasExactStaleness() + || options.getReadOnly().hasMaxStaleness()) { + Timestamp timestamp = getCurrentGoogleTimestamp(); + Duration staleness = + options.getReadOnly().hasExactStaleness() + ? options.getReadOnly().getExactStaleness() + : options.getReadOnly().getMaxStaleness(); + long seconds = timestamp.getSeconds() - staleness.getSeconds(); + int nanos = timestamp.getNanos() - staleness.getNanos(); + if (nanos < 0) { + seconds = seconds - 1; + nanos = 1000000000 + nanos; + } + timestamp = Timestamp.newBuilder().setSeconds(seconds).setNanos(nanos).build(); + builder.setReadTimestamp(timestamp); + } + } + + private void simulateAbort(Session session, ByteString transactionId) { + if (!session.getMultiplexed()) { + // multiplexed sessions allow concurrent transactions on a single session. + ensureMostRecentTransaction(session, transactionId); + } + if (isReadWriteTransaction(transactionId)) { + if (abortNextStatement.getAndSet(false) || abortProbability > random.nextDouble()) { + rollbackTransaction(transactionId); + throw createAbortedException(transactionId); + } + } + } + + public StatusRuntimeException createAbortedException(ByteString transactionId) { + RetryInfo retryInfo = + RetryInfo.newBuilder().setRetryDelay(Duration.newBuilder().setNanos(1).build()).build(); + Metadata.Key key = + Metadata.Key.of( + retryInfo.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(retryInfo)); + Metadata trailers = new Metadata(); + trailers.put(key, retryInfo); + return Status.ABORTED + .withDescription( + String.format("Transaction with id %s has been aborted", transactionId.toStringUtf8())) + .asRuntimeException(trailers); + } + + private void ensureMostRecentTransaction(Session session, ByteString transactionId) { + AtomicLong counter = transactionCounters.get(session.getName()); + if (transactionId != null && transactionId.toStringUtf8() != null && counter != null) { + int index = transactionId.toStringUtf8().lastIndexOf('/'); + if (index > -1) { + long id = Long.parseLong(transactionId.toStringUtf8().substring(index + 1)); + if (id != counter.get()) { + throw Status.FAILED_PRECONDITION + .withDescription( + String.format( + "This transaction has been invalidated by a later transaction in the same" + + " session.\n" + + "Transaction id: " + + id + + "\nExpected: " + + counter.get(), + session.getName())) + .asRuntimeException(); + } + } + } + } + + @Override + public void commit(CommitRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + commitExecutionTime.simulateExecutionTime(exceptions, stickyGlobalExceptions, freezeLock); + // Find or start a transaction + Transaction transaction; + if (request.hasSingleUseTransaction()) { + // Start a temporary transaction + transaction = + beginTransaction( + session, + TransactionOptions.newBuilder() + .setReadWrite(ReadWrite.getDefaultInstance()) + .build(), + null, + request.getRequestOptions()); + } else if (request.getTransactionId() != null) { + transaction = transactions.get(request.getTransactionId()); + Optional aborted = + Optional.fromNullable(abortedTransactions.get(request.getTransactionId())); + if (aborted.or(Boolean.FALSE)) { + throwTransactionAborted(request.getTransactionId()); + } + } else { + // No transaction mode specified + responseObserver.onError( + Status.INVALID_ARGUMENT + .withDescription("No transaction mode specified") + .asRuntimeException()); + return; + } + if (transaction == null) { + setTransactionNotFound(request.getTransactionId(), responseObserver); + return; + } + simulateAbort(session, request.getTransactionId()); + CommitResponse.Builder responseBuilder = CommitResponse.newBuilder(); + Optional commitRetry = + Optional.fromNullable(commitRetryTransactions.get(request.getTransactionId())); + if (commitRetry.or(Boolean.FALSE) && session.getMultiplexed()) { + responseBuilder.setPrecommitToken( + getCommitResponsePrecommitToken(request.getTransactionId())); + commitRetryTransactions.remove(request.getTransactionId()); + } else { + commitTransaction(transaction.getId()); + responseBuilder.setCommitTimestamp(getCurrentGoogleTimestamp()); + if (request.getReturnCommitStats()) { + responseBuilder.setCommitStats( + com.google.spanner.v1.CommitResponse.CommitStats.newBuilder() + // This is not really always equal, but at least it returns a value. + .setMutationCount(request.getMutationsCount()) + .build()); + } + } + responseObserver.onNext(responseBuilder.build()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + @Override + public void batchWrite( + BatchWriteRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getSession()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + for (BatchWriteResponse response : batchWriteResult) { + responseObserver.onNext(response); + } + responseObserver.onCompleted(); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + private void commitTransaction(ByteString transactionId) { + transactions.remove(transactionId); + isPartitionedDmlTransaction.remove(transactionId); + transactionLastUsed.remove(transactionId); + transactionSequenceNo.remove(transactionId); + } + + @Override + public void rollback(RollbackRequest request, StreamObserver responseObserver) { + requests.add(request); + Preconditions.checkNotNull(request.getTransactionId()); + Session session = getSession(request.getSession()); + if (session == null) { + setSessionNotFound(request.getSession(), responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + rollbackExecutionTime.simulateExecutionTime(exceptions, stickyGlobalExceptions, freezeLock); + Transaction transaction = transactions.get(request.getTransactionId()); + if (transaction != null) { + rollbackTransaction(transaction.getId()); + } + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + void rollbackTransaction(ByteString transactionId) { + transactions.remove(transactionId); + isPartitionedDmlTransaction.remove(transactionId); + transactionLastUsed.remove(transactionId); + transactionSequenceNo.remove(transactionId); + } + + void markAbortedTransaction(ByteString transactionId) { + abortedTransactions.put(transactionId, Boolean.TRUE); + transactions.remove(transactionId); + isPartitionedDmlTransaction.remove(transactionId); + transactionLastUsed.remove(transactionId); + transactionSequenceNo.remove(transactionId); + } + + public void markCommitRetryOnTransaction(ByteString transactionId) { + Transaction transaction = transactions.get(transactionId); + if (transaction == null || !isReadWriteTransaction(transactionId)) { + return; + } + commitRetryTransactions.putIfAbsent(transactionId, Boolean.TRUE); + } + + @Override + public void partitionQuery( + PartitionQueryRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + partitionQueryExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + partition( + request.getSession(), + request.getTransaction(), + request.getPartitionOptions(), + responseObserver); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + @Override + public void partitionRead( + PartitionReadRequest request, StreamObserver responseObserver) { + requests.add(request); + try { + partitionReadExecutionTime.simulateExecutionTime( + exceptions, stickyGlobalExceptions, freezeLock); + partition( + request.getSession(), + request.getTransaction(), + request.getPartitionOptions(), + responseObserver); + } catch (StatusRuntimeException t) { + responseObserver.onError(t); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + private void partition( + String sessionName, + TransactionSelector transactionSelector, + PartitionOptions options, + StreamObserver responseObserver) { + Session session = getSession(sessionName); + if (session == null) { + setSessionNotFound(sessionName, responseObserver); + return; + } + sessionLastUsed.put(session.getName(), Instant.now()); + try { + ByteString transactionId = getTransactionId(session, transactionSelector); + responseObserver.onNext( + PartitionResponse.newBuilder() + .addAllPartitions( + LongStream.range( + 0L, options.getMaxPartitions() == 0L ? 1L : options.getMaxPartitions()) + .mapToObj( + ignored -> + Partition.newBuilder() + .setPartitionToken( + generatePartitionToken(session.getName(), transactionId)) + .build()) + .collect(Collectors.toList())) + .build()); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + responseObserver.onError(e); + } catch (Throwable t) { + responseObserver.onError(Status.INTERNAL.asRuntimeException()); + } + } + + public int numSessionsCreated() { + return numSessionsCreated.get(); + } + + public Map getSessions() { + return sessions; + } + + @Override + public List getRequests() { + return new ArrayList<>(this.requests); + } + + public void clearRequests() { + this.requests.clear(); + } + + @SuppressWarnings("unchecked") + public List getRequestsOfType(Class type) { + List result = new ArrayList<>(); + for (AbstractMessage message : this.requests) { + if (message.getClass().equals(type)) { + result.add((T) message); + } + } + return result; + } + + public List> getRequestTypes() { + List> res = new LinkedList<>(); + for (AbstractMessage m : this.requests) { + res.add(m.getClass()); + } + return res; + } + + public int countRequestsOfType(Class type) { + int c = 0; + for (AbstractMessage m : this.requests) { + if (m.getClass().equals(type)) { + c++; + } + } + return c; + } + + public void waitForLastRequestToBe(Class type, long timeoutMillis) + throws InterruptedException, TimeoutException { + Stopwatch watch = Stopwatch.createStarted(); + while (!(this.requests.peekLast() != null + && this.requests.peekLast().getClass().equals(type))) { + Thread.sleep(1L); + if (watch.elapsed(TimeUnit.MILLISECONDS) > timeoutMillis) { + throw new TimeoutException( + "Timeout while waiting for last request to become " + type.getName()); + } + } + } + + public List getTransactionsStarted() { + return new ArrayList<>(transactionsStarted); + } + + public void waitForRequestsToContain(Class type, long timeoutMillis) + throws InterruptedException, TimeoutException { + Stopwatch watch = Stopwatch.createStarted(); + while (countRequestsOfType(type) == 0) { + //noinspection BusyWait + Thread.sleep(1L); + if (watch.elapsed(TimeUnit.MILLISECONDS) > timeoutMillis) { + throw new TimeoutException( + "Timeout while waiting for requests to contain " + type.getName()); + } + } + } + + public void waitForRequestsToContain( + Predicate predicate, long timeoutMillis) + throws InterruptedException, TimeoutException { + Stopwatch watch = Stopwatch.createStarted(); + while (true) { + Iterable msg = Iterables.filter(getRequests(), predicate); + if (msg.iterator().hasNext()) { + break; + } + Thread.sleep(1L); + if (watch.elapsed(TimeUnit.MILLISECONDS) > timeoutMillis) { + throw new TimeoutException( + "Timeout while waiting for requests to contain the wanted request"); + } + } + } + + @Override + public void addResponse(AbstractMessage response) { + throw new UnsupportedOperationException(); + } + + @Override + public void addException(Exception exception) { + exceptions.add(exception); + } + + public void clearExceptions() { + exceptions.clear(); + } + + public void setStickyGlobalExceptions(boolean sticky) { + this.stickyGlobalExceptions = sticky; + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return bindService(); + } + + /** Removes all sessions and transactions. Mocked results are not removed. */ + @Override + public void reset() { + requests = new ConcurrentLinkedDeque<>(); + exceptions = new ConcurrentLinkedQueue<>(); + statementGetCounts = new ConcurrentHashMap<>(); + sessions = new ConcurrentHashMap<>(); + sessionLastUsed = new ConcurrentHashMap<>(); + transactions = new ConcurrentHashMap<>(); + transactionsStarted.clear(); + isPartitionedDmlTransaction = new ConcurrentHashMap<>(); + abortedTransactions = new ConcurrentHashMap<>(); + transactionCounters = new ConcurrentHashMap<>(); + partitionTokens = new ConcurrentHashMap<>(); + transactionLastUsed = new ConcurrentHashMap<>(); + transactionSequenceNo = new ConcurrentHashMap<>(); + + numSessionsCreated.set(0); + stickyGlobalExceptions = false; + freezeLock.countDown(); + } + + public void removeAllExecutionTimes() { + batchCreateSessionsExecutionTime = NO_EXECUTION_TIME; + beginTransactionExecutionTime = NO_EXECUTION_TIME; + commitExecutionTime = NO_EXECUTION_TIME; + createSessionExecutionTime = NO_EXECUTION_TIME; + deleteSessionExecutionTime = NO_EXECUTION_TIME; + executeBatchDmlExecutionTime = NO_EXECUTION_TIME; + executeSqlExecutionTime = NO_EXECUTION_TIME; + executeStreamingSqlExecutionTime = NO_EXECUTION_TIME; + getSessionExecutionTime = NO_EXECUTION_TIME; + listSessionsExecutionTime = NO_EXECUTION_TIME; + partitionQueryExecutionTime = NO_EXECUTION_TIME; + partitionReadExecutionTime = NO_EXECUTION_TIME; + readExecutionTime = NO_EXECUTION_TIME; + rollbackExecutionTime = NO_EXECUTION_TIME; + streamingReadExecutionTime = NO_EXECUTION_TIME; + } + + public void setBeginTransactionExecutionTime( + SimulatedExecutionTime beginTransactionExecutionTime) { + this.beginTransactionExecutionTime = Preconditions.checkNotNull(beginTransactionExecutionTime); + } + + public SimulatedExecutionTime getCommitExecutionTime() { + return commitExecutionTime; + } + + public void setCommitExecutionTime(SimulatedExecutionTime commitExecutionTime) { + this.commitExecutionTime = Preconditions.checkNotNull(commitExecutionTime); + } + + public SimulatedExecutionTime getBatchCreateSessionsExecutionTime() { + return batchCreateSessionsExecutionTime; + } + + public void setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime batchCreateSessionsExecutionTime) { + this.batchCreateSessionsExecutionTime = + Preconditions.checkNotNull(batchCreateSessionsExecutionTime); + } + + public SimulatedExecutionTime getCreateSessionExecutionTime() { + return createSessionExecutionTime; + } + + public void setCreateSessionExecutionTime(SimulatedExecutionTime createSessionExecutionTime) { + this.createSessionExecutionTime = Preconditions.checkNotNull(createSessionExecutionTime); + } + + public SimulatedExecutionTime getDeleteSessionExecutionTime() { + return deleteSessionExecutionTime; + } + + public void setDeleteSessionExecutionTime(SimulatedExecutionTime deleteSessionExecutionTime) { + this.deleteSessionExecutionTime = Preconditions.checkNotNull(deleteSessionExecutionTime); + } + + public SimulatedExecutionTime getExecuteBatchDmlExecutionTime() { + return executeBatchDmlExecutionTime; + } + + public void setExecuteBatchDmlExecutionTime(SimulatedExecutionTime executeBatchDmlExecutionTime) { + this.executeBatchDmlExecutionTime = Preconditions.checkNotNull(executeBatchDmlExecutionTime); + } + + public SimulatedExecutionTime getExecuteSqlExecutionTime() { + return executeSqlExecutionTime; + } + + public void setExecuteSqlExecutionTime(SimulatedExecutionTime executeSqlExecutionTime) { + this.executeSqlExecutionTime = Preconditions.checkNotNull(executeSqlExecutionTime); + } + + public SimulatedExecutionTime getExecuteStreamingSqlExecutionTime() { + return executeStreamingSqlExecutionTime; + } + + public void setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime executeStreamingSqlExecutionTime) { + this.executeStreamingSqlExecutionTime = + Preconditions.checkNotNull(executeStreamingSqlExecutionTime); + } + + public SimulatedExecutionTime getGetSessionExecutionTime() { + return getSessionExecutionTime; + } + + public void setGetSessionExecutionTime(SimulatedExecutionTime getSessionExecutionTime) { + this.getSessionExecutionTime = Preconditions.checkNotNull(getSessionExecutionTime); + } + + public SimulatedExecutionTime getListSessionsExecutionTime() { + return listSessionsExecutionTime; + } + + public void setListSessionsExecutionTime(SimulatedExecutionTime listSessionsExecutionTime) { + this.listSessionsExecutionTime = Preconditions.checkNotNull(listSessionsExecutionTime); + } + + public SimulatedExecutionTime getPartitionQueryExecutionTime() { + return partitionQueryExecutionTime; + } + + public void setPartitionQueryExecutionTime(SimulatedExecutionTime partitionQueryExecutionTime) { + this.partitionQueryExecutionTime = Preconditions.checkNotNull(partitionQueryExecutionTime); + } + + public SimulatedExecutionTime getPartitionReadExecutionTime() { + return partitionReadExecutionTime; + } + + public void setPartitionReadExecutionTime(SimulatedExecutionTime partitionReadExecutionTime) { + this.partitionReadExecutionTime = Preconditions.checkNotNull(partitionReadExecutionTime); + } + + public SimulatedExecutionTime getReadExecutionTime() { + return readExecutionTime; + } + + public void setReadExecutionTime(SimulatedExecutionTime readExecutionTime) { + this.readExecutionTime = Preconditions.checkNotNull(readExecutionTime); + } + + public SimulatedExecutionTime getRollbackExecutionTime() { + return rollbackExecutionTime; + } + + public void setRollbackExecutionTime(SimulatedExecutionTime rollbackExecutionTime) { + this.rollbackExecutionTime = Preconditions.checkNotNull(rollbackExecutionTime); + } + + public SimulatedExecutionTime getStreamingReadExecutionTime() { + return streamingReadExecutionTime; + } + + public void setStreamingReadExecutionTime(SimulatedExecutionTime streamingReadExecutionTime) { + this.streamingReadExecutionTime = Preconditions.checkNotNull(streamingReadExecutionTime); + } + + Session addSession(Session session) { + Session prev; + if (session.getMultiplexed()) { + prev = multiplexedSessions.putIfAbsent(session.getName(), session); + } else { + prev = sessions.putIfAbsent(session.getName(), session); + } + return prev; + } + + void removeSession(String name) { + if (multiplexedSessions.containsKey(name)) { + multiplexedSessions.remove(name); + } else { + sessions.remove(name); + } + } + + Session getSession(String name) { + if (multiplexedSessions.containsKey(name)) { + return multiplexedSessions.get(name); + } else if (sessions.containsKey(name)) { + return sessions.get(name); + } + return null; + } + + static MultiplexedSessionPrecommitToken getTransactionPrecommitToken(ByteString transactionId) { + return getPrecommitToken("TransactionPrecommitToken", transactionId); + } + + static MultiplexedSessionPrecommitToken getResultSetPrecommitToken(ByteString transactionId) { + return getPrecommitToken("ResultSetPrecommitToken", transactionId); + } + + static MultiplexedSessionPrecommitToken getPartialResultSetPrecommitToken( + ByteString transactionId) { + return getPrecommitToken("PartialResultSetPrecommitToken", transactionId); + } + + static MultiplexedSessionPrecommitToken getExecuteBatchDmlResponsePrecommitToken( + ByteString transactionId) { + return getPrecommitToken("ExecuteBatchDmlResponsePrecommitToken", transactionId); + } + + static MultiplexedSessionPrecommitToken getCommitResponsePrecommitToken( + ByteString transactionId) { + return getPrecommitToken("CommitResponsePrecommitToken", transactionId); + } + + static MultiplexedSessionPrecommitToken getPrecommitToken( + String value, ByteString transactionId) { + transactionSequenceNo.putIfAbsent(transactionId, new AtomicInteger(0)); + + // Generates an incrementing sequence number + int seqNum = transactionSequenceNo.get(transactionId).incrementAndGet(); + return MultiplexedSessionPrecommitToken.newBuilder() + .setPrecommitToken(ByteString.copyFromUtf8(value)) + .setSeqNum(seqNum) + .build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestActions.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestActions.java new file mode 100644 index 000000000000..b7dbacff1180 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestActions.java @@ -0,0 +1,158 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.INVALID_SELECT_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_STATEMENT; +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFutures; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.Options.TransactionOption; +import java.util.Collections; +import java.util.concurrent.Executor; + +public class MockSpannerTestActions { + + static final Mutation TEST_MUTATION = + Mutation.newInsertBuilder("foo").set("id").to(1L).set("name").to("bar").build(); + + static Timestamp writeInsertMutation(DatabaseClient client) { + return client.write(Collections.singletonList(TEST_MUTATION)); + } + + static void writeInsertMutationWithOptions(DatabaseClient client, TransactionOption... options) { + client.writeWithOptions(Collections.singletonList(TEST_MUTATION), options); + } + + static Timestamp writeAtLeastOnceInsertMutation(DatabaseClient client) { + return client.writeAtLeastOnce(Collections.singletonList(TEST_MUTATION)); + } + + static void writeAtLeastOnceWithOptionsInsertMutation( + DatabaseClient client, TransactionOption... options) { + client.writeAtLeastOnceWithOptions(Collections.singletonList(TEST_MUTATION), options); + } + + static void executeBatchUpdateTransaction(DatabaseClient client, TransactionOption... options) { + client + .readWriteTransaction(options) + .run(transaction -> transaction.batchUpdate(Collections.singletonList(UPDATE_STATEMENT))); + } + + static void executePartitionedUpdate(DatabaseClient client) { + client.executePartitionedUpdate(UPDATE_STATEMENT); + } + + static void commitDeleteTransaction(DatabaseClient client, TransactionOption... options) { + client + .readWriteTransaction(options) + .run( + transaction -> { + transaction.buffer(Mutation.delete("TEST", KeySet.all())); + return null; + }); + } + + static void transactionManagerCommit(DatabaseClient client, TransactionOption... options) { + try (TransactionManager manager = client.transactionManager(options)) { + TransactionContext transaction = manager.begin(); + transaction.buffer(Mutation.delete("TEST", KeySet.all())); + manager.commit(); + } + } + + static void asyncRunnerCommit( + DatabaseClient client, Executor executor, TransactionOption... options) { + AsyncRunner runner = client.runAsync(options); + SpannerApiFutures.get( + runner.runAsync( + txn -> { + txn.buffer(Mutation.delete("TEST", KeySet.all())); + return ApiFutures.immediateFuture(null); + }, + executor)); + } + + static void transactionManagerAsyncCommit( + DatabaseClient client, Executor executor, TransactionOption... options) { + try (AsyncTransactionManager manager = client.transactionManagerAsync(options)) { + TransactionContextFuture transaction = manager.beginAsync(); + get( + transaction + .then( + (txn, input) -> { + txn.buffer(Mutation.delete("TEST", KeySet.all())); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync()); + } + } + + static Long executeSelect1(DatabaseClient client, TransactionOption... options) { + return client + .readWriteTransaction(options) + .run( + transaction -> { + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) { + return rs.getLong(0); + } + } catch (AbortedException e) { + + } + return 0L; + }); + } + + static Long executeReadFoo(DatabaseClient client, TransactionOption... options) { + return client + .readWriteTransaction(options) + .run( + transaction -> { + try (ResultSet rs = + transaction.read("FOO", KeySet.all(), Collections.singletonList("ID"))) { + while (rs.next()) { + return rs.getLong(0); + } + } catch (AbortedException e) { + // Ignore the AbortedException and let the commit handle it. + } + return 0L; + }); + } + + static Long executeInvalidAndValidSql(DatabaseClient client, TransactionOption... options) { + return client + .readWriteTransaction(options) + .run( + transaction -> { + // This query carries the BeginTransaction, but fails. The BeginTransaction will + // then be carried by the subsequent statement. + try (ResultSet rs = transaction.executeQuery(INVALID_SELECT_STATEMENT)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestUtil.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestUtil.java new file mode 100644 index 000000000000..e2e012f8ae0f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerTestUtil.java @@ -0,0 +1,153 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.Type.StructField; +import com.google.common.collect.ContiguousSet; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import java.util.Arrays; + +public class MockSpannerTestUtil { + public static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + public static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + public static final Statement SELECT1_FROM_TABLE = Statement.of("SELECT 1 FROM FOO WHERE 1=1"); + static final Statement INVALID_SELECT_STATEMENT = + Statement.of("SELECT * FROM NON_EXISTENT_TABLE"); + static final String TEST_PROJECT = "my-project"; + static final String TEST_INSTANCE = "my-instance"; + static final String TEST_DATABASE = "my-database"; + + static final Statement UPDATE_STATEMENT = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + static final Statement UPDATE_ABORTED_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2 AND THIS_WILL_ABORT=TRUE"); + static final long UPDATE_COUNT = 1L; + + static final String READ_TABLE_NAME = "TestTable"; + static final String EMPTY_READ_TABLE_NAME = "EmptyTestTable"; + static final Iterable READ_COLUMN_NAMES = Arrays.asList("Key", "Value"); + static final Statement READ_ONE_KEY_VALUE_STATEMENT = + Statement.of("SELECT Key, Value FROM TestTable WHERE ID=1"); + static final Statement READ_MULTIPLE_KEY_VALUE_STATEMENT = + Statement.of("SELECT Key, Value FROM TestTable WHERE 1=1"); + static final Statement READ_ONE_EMPTY_KEY_VALUE_STATEMENT = + Statement.of("SELECT Key, Value FROM EmptyTestTable WHERE ID=1"); + static final Statement READ_ALL_EMPTY_KEY_VALUE_STATEMENT = + Statement.of("SELECT Key, Value FROM EmptyTestTable WHERE 1=1"); + static final ResultSetMetadata READ_KEY_VALUE_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("Key") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("Value") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .build()) + .build(); + static final Type READ_TABLE_TYPE = + Type.struct(StructField.of("Key", Type.string()), StructField.of("Value", Type.string())); + static final com.google.spanner.v1.ResultSet EMPTY_KEY_VALUE_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows(ListValue.newBuilder().build()) + .setMetadata(READ_KEY_VALUE_METADATA) + .build(); + static final com.google.spanner.v1.ResultSet READ_ONE_KEY_VALUE_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("k1").build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("v1").build()) + .build()) + .setMetadata(READ_KEY_VALUE_METADATA) + .build(); + static final com.google.spanner.v1.ResultSet READ_MULTIPLE_KEY_VALUE_RESULTSET = + generateKeyValueResultSet(ContiguousSet.closed(1, 3)); + + static com.google.spanner.v1.ResultSet generateKeyValueResultSet(Iterable rows) { + com.google.spanner.v1.ResultSet.Builder builder = com.google.spanner.v1.ResultSet.newBuilder(); + for (Integer row : rows) { + builder.addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("k" + row).build()) + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("v" + row).build()) + .build()); + } + return builder.setMetadata(READ_KEY_VALUE_METADATA).build(); + } + + static final ResultSetMetadata READ_FIRST_NAME_SINGERS_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("FirstName") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .build()) + .build(); + static final com.google.spanner.v1.ResultSet READ_FIRST_NAME_SINGERS_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("FirstName").build()) + .build()) + .setMetadata(READ_FIRST_NAME_SINGERS_METADATA) + .build(); +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java new file mode 100644 index 000000000000..629b5611862d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java @@ -0,0 +1,2106 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.INVALID_UPDATE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_COUNT; +import static com.google.cloud.spanner.MockSpannerTestUtil.UPDATE_STATEMENT; +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.*; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.Session; +import io.grpc.Status; +import java.time.Duration; +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MultiplexedSessionDatabaseClientMockServerTest extends AbstractMockServerTest { + private static final Statement STATEMENT = Statement.of("select * from random"); + + @BeforeClass + public static void setupResults() { + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + mockSpanner.putStatementResults( + StatementResult.query(STATEMENT, new RandomResultSetGenerator(1).generate())); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + } + + @Before + public void createSpannerInstance() { + spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + // Set the maintainer to loop once every 1ms + .setMultiplexedSessionMaintenanceLoopFrequency(Duration.ofMillis(1L)) + // Set multiplexed sessions to be replaced once every 1ms + .setMultiplexedSessionMaintenanceDuration(Duration.ofMillis(1L)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + } + + @Test + public void testCreateSessionDeadlineExceeded() { + // Simulate a problem with the CreateSession RPC making it slow. + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService(); + DatabaseClient client = testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // The first attempt should lead to a DEADLINE_EXCEEDED error being propagated from the + // CreateSession attempt. + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + + // Remove the simulated problem on the mock server. + // The next attempt should then succeed. + mockSpanner.removeAllExecutionTimes(); + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + } + + @Test + public void testMultiUseReadOnlyTransactionUsesSameSession() { + // Execute two queries using the same transaction. Both queries should use the same + // session, also when the maintainer has executed in the meantime. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ReadOnlyTransaction transaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + // Wait until the maintainer has replaced the current session. + waitForSessionToBeReplaced(client); + + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + } + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, requests.size()); + assertEquals(requests.get(0).getSession(), requests.get(1).getSession()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testNewTransactionUsesNewSession() { + // Execute a single-use read-only transactions, then wait for the maintainer to replace the + // current session, and then run another single-use read-only transaction. The two transactions + // should use two different sessions. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + // Wait until the maintainer has replaced the current session. + waitForSessionToBeReplaced(client); + + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, requests.size()); + assertNotEquals(requests.get(0).getSession(), requests.get(1).getSession()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testMaintainerMaintainsMultipleClients() { + // Verify that the single-threaded shared executor that is used by the multiplexed client + // maintains and replaces sessions from multiple clients. + DatabaseClientImpl client1 = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of("p", "i", "d" + UUID.randomUUID())); + DatabaseClientImpl client2 = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of("p", "i", "d" + UUID.randomUUID())); + + for (DatabaseClientImpl client : ImmutableList.of(client1, client2)) { + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + // Wait until the maintainer has replaced the current session. + waitForSessionToBeReplaced(client); + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(4, requests.size()); + // Put all session IDs in a Set to verify that they were all different. + Set sessionIds = + requests.stream().map(ExecuteSqlRequest::getSession).collect(Collectors.toSet()); + assertEquals(4, sessionIds.size()); + + for (DatabaseClientImpl client : ImmutableList.of(client1, client2)) { + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + } + + @Test + public void testRetryWithTheSessionCreationWaitTime() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofExceptions( + Arrays.asList( + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException(), + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException()))); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + .setWaitForMinSessionsDuration(Duration.ofSeconds(1)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + + DatabaseClientImpl client = + (DatabaseClientImpl) testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + List createSessionRequests = + mockSpanner.getRequestsOfType(CreateSessionRequest.class); + assertEquals(3, createSessionRequests.size()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + + testSpanner.close(); + } + + @Test + public void testRetryWithTheDatabaseNotFoundExceptionWithSessionCreationWaitTime() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofExceptions( + Collections.singletonList( + Status.NOT_FOUND.withDescription("Database not found.").asRuntimeException()))); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + .setWaitForMinSessionsDuration(Duration.ofMillis(200)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + + assertThrows( + SpannerException.class, () -> testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d"))); + + List createSessionRequests = + mockSpanner.getRequestsOfType(CreateSessionRequest.class); + assertEquals(1, createSessionRequests.size()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(0, requests.size()); + + testSpanner.close(); + } + + @Test + public void testRetryWithNoSessionCreationWaitTime() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofExceptions( + Collections.singletonList( + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException()))); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + + DatabaseClientImpl client = + (DatabaseClientImpl) testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + SpannerException spannerException = + assertThrows( + SpannerException.class, + () -> { + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + }); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, spannerException.getErrorCode()); + + // The CreateSession RPC will be retried, and as the exception is removed by the first call, + // the second attempt will succeed. + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + List createSessionRequests = + mockSpanner.getRequestsOfType(CreateSessionRequest.class); + assertEquals(2, createSessionRequests.size()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + + testSpanner.close(); + } + + @Test + public void testRetryWithDelayedInResponseExceedsSessionCreationWaitTime() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTimeAndExceptions( + 150, + 0, + Arrays.asList( + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException(), + Status.UNAVAILABLE + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException()))); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + .setWaitForMinSessionsDuration(Duration.ofMillis(200)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + + SpannerException spannerException = + assertThrows( + SpannerException.class, + () -> { + DatabaseClientImpl client = + (DatabaseClientImpl) testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + }); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, spannerException.getErrorCode()); + + List createSessionRequests = + mockSpanner.getRequestsOfType(CreateSessionRequest.class); + assertEquals(2, createSessionRequests.size()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(0, requests.size()); + + testSpanner.close(); + } + + @Test + public void testRetryWithDelayInExceptionWithInSessionCreationWaitTime() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTimeAndExceptions( + 50, + 0, + Arrays.asList( + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException(), + Status.DEADLINE_EXCEEDED + .withDescription( + "CallOptions deadline exceeded after 22.986872393s. " + + "Name resolution delay 6.911918521 seconds. [closed=[], " + + "open=[[connecting_and_lb_delay=32445014148ns, was_still_waiting]]]") + .asRuntimeException()))); + + Spanner testSpanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setUseMultiplexedSessionPartitionedOps(true) + .setWaitForMinSessionsDuration(Duration.ofMillis(200)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + + DatabaseClientImpl client = + (DatabaseClientImpl) testSpanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + List createSessionRequests = + mockSpanner.getRequestsOfType(CreateSessionRequest.class); + assertEquals(3, createSessionRequests.size()); + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + + testSpanner.close(); + } + + @Test + public void testUnimplementedErrorOnCreationIsPropagated() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNIMPLEMENTED + .withDescription("Multiplexed sessions are not implemented") + .asRuntimeException())); + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Get the current session reference. This will block until the CreateSession RPC has failed. + assertNotNull(client.multiplexedSessionDatabaseClient); + SpannerException spannerException = + assertThrows( + SpannerException.class, + client.multiplexedSessionDatabaseClient::getCurrentSessionReference); + assertEquals(ErrorCode.UNIMPLEMENTED, spannerException.getErrorCode()); + + spannerException = + assertThrows(SpannerException.class, () -> client.singleUse().executeQuery(STATEMENT)); + assertEquals(ErrorCode.UNIMPLEMENTED, spannerException.getErrorCode()); + + // Verify that we received no ExecuteSqlRequests. + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testWriteAtLeastOnceAborted() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + Timestamp timestamp = MockSpannerTestActions.writeAtLeastOnceInsertMutation(client); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + for (CommitRequest request : commitRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnce() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + Timestamp timestamp = MockSpannerTestActions.writeAtLeastOnceInsertMutation(client); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithCommitStats() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + CommitResponse response = + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithOptions() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.priority(RpcPriority.LOW)); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_LOW, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithTagOptions() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.tag("app=spanner,env=test")); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertThat(commit.getRequestOptions().getTransactionTag()).isEqualTo("app=spanner,env=test"); + assertThat(commit.getRequestOptions().getRequestTag()).isEmpty(); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithExcludeTxnFromChangeStreams() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + MockSpannerTestActions.writeAtLeastOnceWithOptionsInsertMutation( + client, Options.excludeTxnFromChangeStreams()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertTrue(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testReadWriteTransactionUsingTransactionRunner() { + // Queries executed within a R/W transaction via TransactionRunner should use a multiplexed + // session. + // During a retry (due to an ABORTED error), the transaction should use the same multiplexed + // session as before, assuming the maintainer hasn't run in the meantime. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + return null; + }); + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, executeSqlRequests.size()); + assertEquals(executeSqlRequests.get(0).getSession(), executeSqlRequests.get(1).getSession()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testReadWriteTransactionUsingTransactionManager() { + // Queries executed within a R/W transaction via TransactionManager should use a multiplexed + // session. + // During a retry (due to an ABORTED error), the transaction should use the same multiplexed + // session as before, assuming the maintainer hasn't run in the meantime. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, executeSqlRequests.size()); + assertEquals(executeSqlRequests.get(0).getSession(), executeSqlRequests.get(1).getSession()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testMutationUsingWrite() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + Timestamp timestamp = MockSpannerTestActions.writeInsertMutation(client); + assertNotNull(timestamp); + + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertEquals(2, beginTransactionRequests.size()); + for (BeginTransactionRequest request : beginTransactionRequests) { + // Verify that mutation key is set for mutations-only case in read-write transaction. + assertTrue(request.hasMutationKey()); + } + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + for (CommitRequest request : commitRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + // Verify that the precommit token is set in CommitRequest + assertTrue(request.hasPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + request.getPrecommitToken().getPrecommitToken()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testMutationUsingWriteWithOptions() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + CommitResponse response = + client.writeWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.tag("app=spanner,env=test")); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1L, commitRequests.size()); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getRequestOptions()); + assertEquals("app=spanner,env=test", commit.getRequestOptions().getTransactionTag()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testReadWriteTransactionUsingAsyncTransactionManager() throws Exception { + // Updates executed within a R/W transaction via AsyncTransactionManager should use a + // multiplexed session. + // During a retry (due to an ABORTED error), the transaction should use the same multiplexed + // session as before, assuming the maintainer hasn't run in the meantime. + final AtomicInteger attempt = new AtomicInteger(); + CountDownLatch abortedLatch = new CountDownLatch(1); + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + while (true) { + try { + attempt.incrementAndGet(); + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + MoreExecutors.directExecutor()); + updateCount.then( + (transaction, ignored) -> { + if (attempt.get() == 1) { + mockSpanner.abortTransaction(transaction); + abortedLatch.countDown(); + } + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor()); + abortedLatch.await(10L, TimeUnit.SECONDS); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + assertEquals(UPDATE_COUNT, updateCount.get().longValue()); + assertNotNull(commitTimestamp.get()); + assertEquals(2L, attempt.get()); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, executeSqlRequests.size()); + assertEquals(executeSqlRequests.get(0).getSession(), executeSqlRequests.get(1).getSession()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testReadWriteTransactionUsingAsyncRunner() throws Exception { + // Updates executed within a R/W transaction via AsyncRunner should use a multiplexed + // session. + // During a retry (due to an ABORTED error), the transaction should use the same multiplexed + // session as before, assuming the maintainer hasn't run in the meantime. + final AtomicInteger attempt = new AtomicInteger(); + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + AsyncRunner runner = client.runAsync(); + ApiFuture updateCount = + runner.runAsync( + txn -> { + ApiFuture updateCount1 = txn.executeUpdateAsync(UPDATE_STATEMENT); + if (attempt.incrementAndGet() == 1) { + mockSpanner.abortTransaction(txn); + } + return updateCount1; + }, + MoreExecutors.directExecutor()); + assertEquals(UPDATE_COUNT, updateCount.get().longValue()); + assertEquals(2L, attempt.get()); + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2L, executeSqlRequests.size()); + assertEquals(executeSqlRequests.get(0).getSession(), executeSqlRequests.get(1).getSession()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testAsyncRunnerIsNonBlockingWithMultiplexedSession() throws Exception { + mockSpanner.freeze(); + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + AsyncRunner runner = client.runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + txn.executeUpdateAsync(UPDATE_STATEMENT); + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor()); + ApiFuture ts = runner.getCommitTimestamp(); + mockSpanner.unfreeze(); + assertThat(res.get()).isNull(); + assertThat(ts.get()).isNotNull(); + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1L, executeSqlRequests.size()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testAbortedReadWriteTxnUsesPreviousTxnIdOnRetryWithInlineBegin() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + TransactionRunner runner = client.readWriteTransaction(); + AtomicReference validTransactionId = new AtomicReference<>(); + runner.run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + while (resultSet.next()) {} + } + + TransactionContextImpl impl = (TransactionContextImpl) transaction; + if (validTransactionId.get() == null) { + // Track the first not-null transactionId. This transaction gets ABORTED during commit + // operation and gets retried. + validTransactionId.set(impl.transactionId); + } + return null; + }); + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, executeSqlRequests.size()); + + // Verify the requests are executed using multiplexed sessions + for (ExecuteSqlRequest request : executeSqlRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + // Verify that the first request uses inline begin, and the previous transaction ID is set to + // ByteString.EMPTY + assertTrue(executeSqlRequests.get(0).hasTransaction()); + assertTrue(executeSqlRequests.get(0).getTransaction().hasBegin()); + assertTrue(executeSqlRequests.get(0).getTransaction().getBegin().hasReadWrite()); + assertNotNull( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + ByteString.EMPTY, + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + + // Verify that the second request uses inline begin, and the previous transaction ID is set + // appropriately + assertTrue(executeSqlRequests.get(1).hasTransaction()); + assertTrue(executeSqlRequests.get(1).getTransaction().hasBegin()); + assertTrue(executeSqlRequests.get(1).getTransaction().getBegin().hasReadWrite()); + assertNotNull( + executeSqlRequests + .get(1) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertNotEquals( + ByteString.EMPTY, + executeSqlRequests + .get(1) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + validTransactionId.get(), + executeSqlRequests + .get(1) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + } + + @Test + public void testAbortedReadWriteTxnUsesPreviousTxnIdOnRetryWithExplicitBegin() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + TransactionRunner runner = client.readWriteTransaction(); + AtomicReference validTransactionId = new AtomicReference<>(); + Long updateCount = + runner.run( + transaction -> { + // This update statement carries the BeginTransaction, but fails. This will + // cause the entire transaction to be retried with an explicit + // BeginTransaction RPC to ensure all statements in the transaction are + // actually executed against the same transaction. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + if (validTransactionId.get() == null) { + // Track the first not-null transactionId. This transaction gets ABORTED during + // commit operation and gets retried. + validTransactionId.set(impl.transactionId); + } + SpannerException e = + assertThrows( + SpannerException.class, + () -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + + assertThat(updateCount).isEqualTo(1L); + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertEquals(2, beginTransactionRequests.size()); + + // Verify the requests are executed using multiplexed sessions + for (BeginTransactionRequest request : beginTransactionRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + // Verify that explicit begin transaction is called during retry, and the previous transaction + // ID is set to ByteString.EMPTY + assertTrue(beginTransactionRequests.get(0).hasOptions()); + assertTrue(beginTransactionRequests.get(0).getOptions().hasReadWrite()); + assertNotNull( + beginTransactionRequests + .get(0) + .getOptions() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + ByteString.EMPTY, + beginTransactionRequests + .get(0) + .getOptions() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + + // The previous transaction with id (txn1) fails during commit operation with ABORTED error. + // Verify that explicit begin transaction is called during retry, and the previous transaction + // ID is not ByteString.EMPTY (should be set to txn1) + assertTrue(beginTransactionRequests.get(1).hasOptions()); + assertTrue(beginTransactionRequests.get(1).getOptions().hasReadWrite()); + assertNotNull( + beginTransactionRequests + .get(1) + .getOptions() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertNotEquals( + ByteString.EMPTY, + beginTransactionRequests + .get(1) + .getOptions() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + validTransactionId.get(), + beginTransactionRequests + .get(1) + .getOptions() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + } + + @Test + public void testPrecommitTokenForResultSet() { + // This test verifies that the precommit token received from the ResultSet is properly tracked + // and set in the CommitRequest. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + Long count = + client + .readWriteTransaction() + .run( + transaction -> { + long res = transaction.executeUpdate(UPDATE_STATEMENT); + + // Verify that the latest precommit token is tracked in the transaction context. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + assertNotNull(impl.getLatestPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ResultSetPrecommitToken"), + impl.getLatestPrecommitToken().getPrecommitToken()); + return res; + }); + + assertNotNull(count); + assertEquals(1, count.longValue()); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1, commitRequests.size()); + assertTrue(mockSpanner.getSession(commitRequests.get(0).getSession()).getMultiplexed()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ResultSetPrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testPrecommitTokenForExecuteBatchDmlResponse() { + // This test verifies that the precommit token received from the ExecuteBatchDmlResponse is + // properly tracked and set in the CommitRequest. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + long[] count = + client + .readWriteTransaction() + .run( + transaction -> { + long[] res = transaction.batchUpdate(Lists.newArrayList(UPDATE_STATEMENT)); + + // Verify that the latest precommit token is tracked in the transaction context. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + assertNotNull(impl.getLatestPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ExecuteBatchDmlResponsePrecommitToken"), + impl.getLatestPrecommitToken().getPrecommitToken()); + return res; + }); + + assertNotNull(count); + assertEquals(1, count.length); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1, commitRequests.size()); + assertTrue(mockSpanner.getSession(commitRequests.get(0).getSession()).getMultiplexed()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ExecuteBatchDmlResponsePrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testPrecommitTokenForPartialResultSet() { + // This test verifies that the precommit token received from the PartialResultSet is properly + // tracked and set in the CommitRequest. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + client + .readWriteTransaction() + .run( + transaction -> { + ResultSet resultSet = transaction.executeQuery(STATEMENT); + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + + // Verify that the latest precommit token is tracked in the transaction context. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + assertNotNull(impl.getLatestPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("PartialResultSetPrecommitToken"), + impl.getLatestPrecommitToken().getPrecommitToken()); + return null; + }); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1, commitRequests.size()); + assertTrue(mockSpanner.getSession(commitRequests.get(0).getSession()).getMultiplexed()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("PartialResultSetPrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testTxnTracksPrecommitTokenWithLatestSeqNo() { + // This test ensures that the read-write transaction tracks the precommit token with the + // highest sequence number and sets it in the CommitRequest. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + client + .readWriteTransaction() + .run( + transaction -> { + // Returns a ResultSet containing the precommit token (ResultSetPrecommitToken) + transaction.executeUpdate(UPDATE_STATEMENT); + + // Returns a PartialResultSet containing the precommit token + // (PartialResultSetPrecommitToken) + ResultSet resultSet = transaction.executeQuery(STATEMENT); + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + + // Returns an ExecuteBatchDmlResponse containing the precommit token + // (ExecuteBatchDmlResponsePrecommitToken). + // Since this is the last request received by the mock Spanner, it should be the most + // recent precommit token tracked by the transaction context. + transaction.batchUpdate(Lists.newArrayList(UPDATE_STATEMENT)); + + // Verify that the latest precommit token with highest sequence number is tracked in + // the transaction context. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + assertNotNull(impl.getLatestPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ExecuteBatchDmlResponsePrecommitToken"), + impl.getLatestPrecommitToken().getPrecommitToken()); + return null; + }); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1, commitRequests.size()); + assertTrue(mockSpanner.getSession(commitRequests.get(0).getSession()).getMultiplexed()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ExecuteBatchDmlResponsePrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testPrecommitTokenForTransactionResponse() { + // This test verifies that + // 1. A random mutation from the list is set in BeginTransactionRequest. + // 2. The precommit token from the Transaction response is correctly tracked + // and applied in the CommitRequest. The Transaction response includes a precommit token + // only when the read-write transaction consists solely of mutations. + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + client + .readWriteTransaction() + .run( + transaction -> { + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + return null; + }); + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTxnRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertEquals(1, beginTxnRequest.size()); + assertTrue(mockSpanner.getSession(beginTxnRequest.get(0).getSession()).getMultiplexed()); + assertTrue(beginTxnRequest.get(0).hasMutationKey()); + assertTrue(beginTxnRequest.get(0).getMutationKey().hasInsert()); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1L, commitRequests.size()); + assertTrue(mockSpanner.getSession(commitRequests.get(0).getSession()).getMultiplexed()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testMutationOnlyCaseAborted() { + // This test verifies that in the case of mutations-only, when a transaction is retried after an + // ABORT, the mutation key is correctly set in the BeginTransaction request. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + client + .readWriteTransaction() + .run( + transaction -> { + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + return null; + }); + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertEquals(2, beginTransactionRequests.size()); + // Verify the requests are executed using multiplexed sessions + for (BeginTransactionRequest request : beginTransactionRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + assertTrue(request.hasMutationKey()); + assertTrue(request.getMutationKey().hasInsert()); + } + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2L, commitRequests.size()); + for (CommitRequest request : commitRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + assertNotNull(request.getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + request.getPrecommitToken().getPrecommitToken()); + } + } + + @Test + public void testMutationOnlyUsingTransactionManager() { + // Test verifies mutation-only case within a R/W transaction via TransactionManager. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactionRequests).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactionRequests.get(0); + assertTrue(mockSpanner.getSession(beginTransaction.getSession()).getMultiplexed()); + assertTrue(beginTransaction.hasMutationKey()); + assertTrue(beginTransaction.getMutationKey().hasInsert()); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commitRequest = commitRequests.get(0); + assertNotNull(commitRequest.getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + commitRequest.getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testMutationOnlyUsingAsyncRunner() { + // Test verifies mutation-only case within a R/W transaction via AsyncRunner. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + MockSpannerTestActions.asyncRunnerCommit(client, MoreExecutors.directExecutor()); + // Verify that the mutation key is set in BeginTransactionRequest + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertTrue(beginTransaction.hasMutationKey()); + assertTrue(beginTransaction.getMutationKey().hasDelete()); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commitRequest = commitRequests.get(0); + assertNotNull(commitRequest.getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + commitRequest.getPrecommitToken().getPrecommitToken()); + } + + @Test + public void testMutationOnlyUsingAsyncTransactionManager() { + // Test verifies mutation-only case within a R/W transaction via AsyncTransactionManager. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + MockSpannerTestActions.transactionManagerAsyncCommit(client, MoreExecutors.directExecutor()); + + // Verify that the mutation key is set in BeginTransactionRequest + List beginTransactions = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertThat(beginTransactions).hasSize(1); + BeginTransactionRequest beginTransaction = beginTransactions.get(0); + assertTrue(beginTransaction.hasMutationKey()); + assertTrue(beginTransaction.getMutationKey().hasDelete()); + + // Verify that the latest precommit token is set in the CommitRequest + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(requests).hasSize(1); + CommitRequest request = requests.get(0); + assertNotNull(request.getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + request.getPrecommitToken().getPrecommitToken()); + } + + private Spanner setupSpannerBySkippingBeginTransactionVerificationForMux() { + return SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + .build() + .getService(); + } + + private void verifyMutationKeySetInBeginTransactionRequests( + List beginTransactionRequests) { + assertEquals(2, beginTransactionRequests.size()); + // Verify the requests are executed using multiplexed sessions + for (BeginTransactionRequest request : beginTransactionRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + assertTrue(request.hasMutationKey()); + assertTrue(request.getMutationKey().hasInsert()); + } + } + + private void verifyPreCommitTokenSetInCommitRequest(List commitRequests) { + assertEquals(1L, commitRequests.size()); + for (CommitRequest request : commitRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + assertNotNull(request.getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("TransactionPrecommitToken"), + request.getPrecommitToken().getPrecommitToken()); + } + } + + // The following 4 tests validate mutation-only cases where the BeginTransaction RPC fails with an + // ABORTED or retryable error + @Test + public void testMutationOnlyCaseAbortedDuringBeginTransaction() { + // This test ensures that when a transaction containing only mutations is retried after an + // ABORT error in the BeginTransaction RPC: + // 1. The mutation key is correctly included in the BeginTransaction request. + // 2. The precommit token is properly set in the Commit request. + Spanner spanner = setupSpannerBySkippingBeginTransactionVerificationForMux(); + + // Force the BeginTransaction RPC to return Aborted the first time it is called. The exception + // is cleared after the first call, so the retry should succeed. + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + client + .readWriteTransaction() + .run( + transaction -> { + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + return null; + }); + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + verifyMutationKeySetInBeginTransactionRequests(beginTransactionRequests); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + verifyPreCommitTokenSetInCommitRequest(commitRequests); + + spanner.close(); + } + + @Test + public void testMutationOnlyUsingTransactionManagerAbortedDuringBeginTransaction() { + // This test ensures that when a transaction containing only mutations is retried after an + // ABORT error in the BeginTransaction RPC: + // 1. The mutation key is correctly included in the BeginTransaction request. + // 2. The precommit token is properly set in the Commit request. + Spanner spanner = setupSpannerBySkippingBeginTransactionVerificationForMux(); + + // Force the BeginTransaction RPC to return Aborted the first time it is called. The exception + // is cleared after the first call, so the retry should succeed. + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + verifyMutationKeySetInBeginTransactionRequests(beginTransactionRequests); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + verifyPreCommitTokenSetInCommitRequest(commitRequests); + + spanner.close(); + } + + @Test + public void testMutationOnlyUsingAsyncRunnerAbortedDuringBeginTransaction() { + // This test ensures that when a transaction containing only mutations is retried after an + // ABORT error in the BeginTransaction RPC: + // 1. The mutation key is correctly included in the BeginTransaction request. + // 2. The precommit token is properly set in the Commit request. + + Spanner spanner = setupSpannerBySkippingBeginTransactionVerificationForMux(); + + // Force the BeginTransaction RPC to return Aborted the first time it is called. The exception + // is cleared after the first call, so the retry should succeed. + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + AsyncRunner runner = client.runAsync(); + get( + runner.runAsync( + txn -> { + txn.buffer( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()); + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor())); + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + verifyMutationKeySetInBeginTransactionRequests(beginTransactionRequests); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + verifyPreCommitTokenSetInCommitRequest(commitRequests); + + spanner.close(); + } + + @Test + public void testMutationOnlyUsingTransactionManagerAsyncAbortedDuringBeginTransaction() + throws Exception { + // This test verifies that in the case of mutations-only, when a transaction is retried after an + // ABORT in BeginTransaction RPC, the mutation key is correctly set in the BeginTransaction + // request + // and precommit token is set in Commit request. + Spanner spanner = setupSpannerBySkippingBeginTransactionVerificationForMux(); + + // Force the BeginTransaction RPC to return Aborted the first time it is called. The exception + // is cleared after the first call, so the retry should succeed. + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transaction = manager.beginAsync(); + while (true) { + CommitTimestampFuture commitTimestamp = + transaction + .then( + (txn, input) -> { + txn.buffer( + Mutation.newInsertBuilder("FOO") + .set("ID") + .to(1L) + .set("NAME") + .to("Bar") + .build()); + return ApiFutures.immediateFuture(null); + }, + MoreExecutors.directExecutor()) + .commitAsync(); + try { + assertThat(commitTimestamp.get()).isNotNull(); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetryAsync(); + } + } + } + + // Verify that for mutation only case, a mutation key is set in BeginTransactionRequest. + List beginTransactionRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + verifyMutationKeySetInBeginTransactionRequests(beginTransactionRequests); + + // Verify that the latest precommit token is set in the CommitRequest + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + verifyPreCommitTokenSetInCommitRequest(commitRequests); + + spanner.close(); + } + + @Test + public void testOtherUnimplementedError_ReadWriteTransactionStillUsesMultiplexedSession() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNIMPLEMENTED + .withDescription("Multiplexed sessions are not supported.") + .asRuntimeException())); + + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // Try to execute a query using single use transaction. + try (ResultSet resultSet = client.singleUse().executeQuery(STATEMENT)) { + SpannerException spannerException = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.UNIMPLEMENTED, spannerException.getErrorCode()); + } + + // The read-write transaction should use multiplexed sessions and succeed. + client + .readWriteTransaction() + .run( + transaction -> { + // Returns a ResultSet containing the precommit token (ResultSetPrecommitToken) + transaction.executeUpdate(UPDATE_STATEMENT); + + // Verify that a precommit token is received. This guarantees that the read-write + // transaction was executed on a multiplexed session. + TransactionContextImpl impl = (TransactionContextImpl) transaction; + assertNotNull(impl.getLatestPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("ResultSetPrecommitToken"), + impl.getLatestPrecommitToken().getPrecommitToken()); + return null; + }); + + // Verify that two ExecuteSqlRequests were received and second one uses a multiplexed session. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + + Session session2 = mockSpanner.getSession(requests.get(1).getSession()); + assertNotNull(session2); + assertTrue(session2.getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testReadWriteTransactionWithCommitRetryProtocolExtensionSet() { + // This test simulates the commit retry protocol extension which occurs when a read-write + // transaction contains read/query + mutation operations. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + Mutation mutation = + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build(); + transaction.buffer(mutation); + + TransactionContextImpl impl = (TransactionContextImpl) transaction; + // Force the Commit RPC to return a CommitResponse with MultiplexedSessionRetry field + // set. + // This scenario is only possible when a read-write transaction contains read/query + + // mutation operations. + mockSpanner.markCommitRetryOnTransaction(impl.transactionId); + return null; + }); + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, executeSqlRequests.size()); + // Verify the request is executed using multiplexed sessions + assertTrue(mockSpanner.getSession(executeSqlRequests.get(0).getSession()).getMultiplexed()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + assertNotNull(commitRequests.get(0).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("PartialResultSetPrecommitToken"), + commitRequests.get(0).getPrecommitToken().getPrecommitToken()); + // Verify that the first request has mutations set + assertTrue(commitRequests.get(0).getMutationsCount() > 0); + + // Second CommitRequest should contain the latest precommit token received via the + // CommitResponse in previous attempt. + assertNotNull(commitRequests.get(1).getPrecommitToken()); + assertEquals( + ByteString.copyFromUtf8("CommitResponsePrecommitToken"), + commitRequests.get(1).getPrecommitToken().getPrecommitToken()); + // Verify that the commit retry request does not have any mutations set + assertEquals(0, commitRequests.get(1).getMutationsCount()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testBatchWriteAtLeastOnce() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + Iterable MUTATION_GROUPS = + ImmutableList.of( + MutationGroup.of( + Mutation.newInsertBuilder("FOO1").set("ID").to(1L).set("NAME").to("Bar1").build(), + Mutation.newInsertBuilder("FOO2").set("ID").to(2L).set("NAME").to("Bar2").build()), + MutationGroup.of( + Mutation.newInsertBuilder("FOO3").set("ID").to(3L).set("NAME").to("Bar3").build(), + Mutation.newInsertBuilder("FOO4").set("ID").to(4L).set("NAME").to("Bar4").build())); + + ServerStream responseStream = client.batchWriteAtLeastOnce(MUTATION_GROUPS); + int idx = 0; + for (BatchWriteResponse response : responseStream) { + assertEquals( + response.getStatus(), + com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.OK_VALUE).build()); + assertEquals(response.getIndexesList(), ImmutableList.of(idx, idx + 1)); + idx += 2; + } + + assertNotNull(responseStream); + List requests = mockSpanner.getRequestsOfType(BatchWriteRequest.class); + assertEquals(requests.size(), 1); + BatchWriteRequest request = requests.get(0); + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + assertEquals(request.getMutationGroupsCount(), 2); + assertEquals(request.getRequestOptions().getPriority(), Priority.PRIORITY_UNSPECIFIED); + assertFalse(request.getExcludeTxnFromChangeStreams()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void + testRWTransactionWithTransactionManager_CommitAborted_SetsTransactionId_AndUsedInNewInstance() { + // The below test verifies the behaviour of begin(AbortedException) method which is used to + // maintain transaction priority if resetForRetry() is not called. + + // This test performs the following steps: + // 1. Simulates an ABORTED exception during commit and verifies that the transaction ID is + // included in the AbortedException. + // 2. Passes the ABORTED exception to the begin(AbortedException) method of a new + // TransactionManager, and verifies that the transaction ID from the failed transaction is sent + // during the inline begin of the first request. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + ByteString abortedTransactionID = null; + AbortedException exception = null; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + try { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + } catch (AbortedException e) { + // The transactionID of the Aborted transaction should be set in AbortedException class. + assertNotNull(e.getTransactionID()); + abortedTransactionID = e.getTransactionID(); + exception = e; + } + } + // Verify that the transactionID of the aborted transaction is set. + assertNotNull(abortedTransactionID); + assertNotNull(exception); + mockSpanner.clearRequests(); + + // Pass AbortedException while invoking begin on the new manager instance. + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(exception); + while (true) { + try { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + + // Verify that the ExecuteSqlRequest with the inline begin passes the transactionID of the + // previously aborted transaction. + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, executeSqlRequests.size()); + assertTrue(mockSpanner.getSession(executeSqlRequests.get(0).getSession()).getMultiplexed()); + assertNotNull( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId(), + abortedTransactionID); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void + testRWTransactionWithTransactionManager_ExecuteSQLAborted_SetsTransactionId_AndUsedInNewInstance() { + // This test performs the following steps: + // 1. Simulates an ABORTED exception during ExecuteSQL and verifies that the transaction ID is + // included in the AbortedException. + // 2. Passes the ABORTED exception to the begin(AbortedException) method of a new + // TransactionManager, and verifies that the transaction ID from the failed transaction is sent + // during the inline begin of the first request. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + ByteString abortedTransactionID = null; + AbortedException exception = null; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + try { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + + // Simulate an ABORTED in next ExecuteSQL request. + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + } catch (AbortedException e) { + // The transactionID of the Aborted transaction should be set in AbortedException class. + assertNotNull(e.getTransactionID()); + abortedTransactionID = e.getTransactionID(); + exception = e; + } + } + // Verify that the transactionID of the aborted transaction is set. + assertNotNull(abortedTransactionID); + assertNotNull(exception); + mockSpanner.clearRequests(); + + // Pass AbortedException while invoking begin on the new manager instance. + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(exception); + while (true) { + try { + try (ResultSet resultSet = transaction.executeQuery(STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + + // Verify that the ExecuteSqlRequest with inline begin includes the transaction ID from the + // previously aborted transaction. + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, executeSqlRequests.size()); + assertTrue(mockSpanner.getSession(executeSqlRequests.get(0).getSession()).getMultiplexed()); + assertNotNull( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId(), + abortedTransactionID); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void + testRWTransactionWithAsyncTransactionManager_CommitAborted_SetsTransactionId_AndUsedInNewInstance() + throws Exception { + // This test performs the following steps: + // 1. Simulates an ABORTED exception during ExecuteSQL and verifies that the transaction ID is + // included in the AbortedException. + // 2. Passes the ABORTED exception to the begin(AbortedException) method of a new + // AsyncTransactionManager, and verifies that the transaction ID from the failed transaction is + // sent + // during the inline begin of the first request. + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + ByteString abortedTransactionID = null; + AbortedException exception = null; + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(); + try { + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + MoreExecutors.directExecutor()); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + assertEquals(UPDATE_COUNT, updateCount.get().longValue()); + assertNotNull(commitTimestamp.get()); + } catch (AbortedException e) { + assertNotNull(e.getTransactionID()); + exception = e; + abortedTransactionID = e.getTransactionID(); + } + } + + // Verify that the transactionID of the aborted transaction is set. + assertNotNull(abortedTransactionID); + assertNotNull(exception); + mockSpanner.clearRequests(); + + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transactionContextFuture = manager.beginAsync(exception); + while (true) { + try { + AsyncTransactionStep updateCount = + transactionContextFuture.then( + (transaction, ignored) -> transaction.executeUpdateAsync(UPDATE_STATEMENT), + MoreExecutors.directExecutor()); + CommitTimestampFuture commitTimestamp = updateCount.commitAsync(); + assertEquals(UPDATE_COUNT, updateCount.get().longValue()); + assertNotNull(commitTimestamp.get()); + break; + } catch (AbortedException e) { + transactionContextFuture = manager.resetForRetryAsync(); + } + } + } + + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, executeSqlRequests.size()); + assertTrue(mockSpanner.getSession(executeSqlRequests.get(0).getSession()).getMultiplexed()); + assertNotNull( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId()); + assertEquals( + executeSqlRequests + .get(0) + .getTransaction() + .getBegin() + .getReadWrite() + .getMultiplexedSessionPreviousTransactionId(), + abortedTransactionID); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(2L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + private void waitForSessionToBeReplaced(DatabaseClientImpl client) { + assertNotNull(client.multiplexedSessionDatabaseClient); + SessionReference sessionReference = + client.multiplexedSessionDatabaseClient.getCurrentSessionReference(); + while (sessionReference + == client.multiplexedSessionDatabaseClient.getCurrentSessionReference()) { + Thread.yield(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java new file mode 100644 index 000000000000..9a43ad07cdfe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java @@ -0,0 +1,255 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.SessionClient.SessionConsumer; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.lang.reflect.Field; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class MultiplexedSessionDatabaseClientTest { + + @Test + public void testMaintainer() { + // This fails for the native builds due to the extensive use of reflection. + assumeTrue(isJava8()); + + Instant now = Instant.now(); + Clock clock = mock(Clock.class); + when(clock.instant()).thenReturn(now); + SessionClient sessionClient = mock(SessionClient.class); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + SessionPoolOptions sessionPoolOptions = mock(SessionPoolOptions.class); + when(sessionClient.getSpanner()).thenReturn(spanner); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(spannerOptions.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + when(sessionPoolOptions.getMultiplexedSessionMaintenanceDuration()) + .thenReturn(Duration.ofDays(7)); + when(sessionPoolOptions.getMultiplexedSessionMaintenanceLoopFrequency()) + .thenReturn(Duration.ofMinutes(10)); + + SessionImpl session1 = mock(SessionImpl.class); + SessionReference sessionReference1 = mock(SessionReference.class); + when(session1.getSessionReference()).thenReturn(sessionReference1); + + SessionImpl session2 = mock(SessionImpl.class); + SessionReference sessionReference2 = mock(SessionReference.class); + when(session2.getSessionReference()).thenReturn(sessionReference2); + + doAnswer( + (Answer) + invocationOnMock -> { + SessionConsumer consumer = invocationOnMock.getArgument(0); + // Return session1 the first time it is called. + consumer.onSessionReady(session1); + return null; + }) + .doAnswer( + (Answer) + invocationOnMock -> { + SessionConsumer consumer = invocationOnMock.getArgument(0); + // Return session2 the second time that it is called. + consumer.onSessionReady(session2); + return null; + }) + .when(sessionClient) + .asyncCreateMultiplexedSession(any(SessionConsumer.class)); + + // Create a client. This should get session1. + MultiplexedSessionDatabaseClient client = + new MultiplexedSessionDatabaseClient(sessionClient, clock); + + // Make sure that the client uses the initial session that is created. + assertEquals(client.getCurrentSessionReference(), session1.getSessionReference()); + + // Run the maintainer without advancing the clock. We should still get the same session. + client.getMaintainer().maintain(); + assertEquals(client.getCurrentSessionReference(), session1.getSessionReference()); + + // Advance the clock by 1 day. We should still get the same session. + when(clock.instant()).thenReturn(now.plus(Duration.ofDays(1))); + client.getMaintainer().maintain(); + assertEquals(client.getCurrentSessionReference(), session1.getSessionReference()); + + // Advance the clock by 8 days. We should now get a new session. + when(clock.instant()).thenReturn(now.plus(Duration.ofDays(8))); + client.getMaintainer().maintain(); + assertEquals(client.getCurrentSessionReference(), session2.getSessionReference()); + } + + @Test + public void testDisableMultiplexedSessionEnvVar() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertTrue( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "false"); + // Assert that the env var overrides the mux sessions setting. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .build() + .getUseMultiplexedSession()); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testEnableMultiplexedSessionEnvVar() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "true"); + // Assert that the env var overrides the mux sessions setting. + assertTrue( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testIgnoreMultiplexedSessionEnvVar() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", ""); + // Assert that the env var overrides the mux sessions setting. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testThrowExceptionMultiplexedSessionEnvVarInvalidValues() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "test"); + + // setting an invalid GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS value throws error. + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + assertThat(sw.toString()) + .contains("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS should be either true or false"); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + private boolean isJava8() { + return JavaVersionUtil.getJavaMajorVersion() == 8; + } + + private boolean isWindows() { + return System.getProperty("os.name").toLowerCase().contains("windows"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionsBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionsBenchmark.java new file mode 100644 index 000000000000..f71fdfe37a3e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionsBenchmark.java @@ -0,0 +1,179 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.BenchmarkingUtilityScripts.collectResults; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.common.base.Stopwatch; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +/** + * Benchmarks for measuring existing latencies of various APIs using the Java Client with + * multiplexed sessions. The benchmarks are bound to the Maven profile `benchmark` and can be + * executed like this: + * mvn clean test -DskipTests -Pbenchmark -Dbenchmark.name=MultiplexedSessionsBenchmark + * Test Table Schema : + * + *

CREATE TABLE FOO ( id INT64 NOT NULL, BAZ INT64, BAR INT64, ) PRIMARY KEY(id); + * + *

Below are a few considerations here: 1. We use all default options with multiplexed sessions + * for this test because that is what most customers would be using. 2. The test schema uses a + * numeric primary key. To ensure that the reads/updates are distributed across a large query space, + * we insert 10^5 records. Utility at {@link BenchmarkingUtilityScripts} can be used for loading + * data. 3. For queries, we make sure that the query is sampled randomly across a large query space. + * This ensure we don't cause hot-spots. 4. For avoid cold start issues, we execute 1 query/update + * and ignore its latency from the final reported metrics. + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(value = 1, warmups = 0) +@Measurement(batchSize = 1, iterations = 1, timeUnit = TimeUnit.MILLISECONDS) +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 0) +public class MultiplexedSessionsBenchmark extends AbstractLatencyBenchmark { + + @State(Scope.Benchmark) + public static class BenchmarkState { + + // TODO(developer): Add your values here for PROJECT_ID, INSTANCE_ID, DATABASE_ID + private static final String INSTANCE_ID = ""; + private static final String DATABASE_ID = ""; + private static final String SERVER_URL = "https://staging-wrenchworks.sandbox.googleapis.com"; + private Spanner spanner; + private DatabaseClientImpl client; + + @Param({"100"}) + int minSessions; + + @Param({"400"}) + int maxSessions; + + @Setup(Level.Iteration) + public void setup() throws Exception { + SpannerOptions.enableOpenTelemetryMetrics(); + SpannerOptions.enableOpenTelemetryTraces(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(minSessions) + .setMaxSessions(maxSessions) + .setWaitForMinSessionsDuration(Duration.ofSeconds(20)) + .setUseMultiplexedSession(true) + .build()) + .setHost(SERVER_URL) + .setNumChannels(NUM_GRPC_CHANNELS) + .build(); + spanner = options.getService(); + client = + (DatabaseClientImpl) + spanner.getDatabaseClient( + DatabaseId.of(options.getProjectId(), INSTANCE_ID, DATABASE_ID)); + } + + @TearDown(Level.Iteration) + public void teardown() throws Exception { + spanner.close(); + } + } + + /** Measures the time needed to execute a burst of queries. */ + @Benchmark + public void burstQueries(final BenchmarkState server) throws Exception { + final DatabaseClientImpl client = server.client; + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(PARALLEL_THREADS)); + List>> results = new ArrayList<>(PARALLEL_THREADS); + for (int i = 0; i < PARALLEL_THREADS; i++) { + results.add( + service.submit(() -> runBenchmarksForSingleUseQueries(server, TOTAL_READS_PER_RUN))); + } + collectResultsAndPrint(service, results, TOTAL_READS_PER_RUN); + } + + private List runBenchmarksForSingleUseQueries( + final BenchmarkState server, int numberOfOperations) { + List results = new ArrayList<>(numberOfOperations); + // Execute one query to make sure everything has been warmed up. + executeWarmup(server); + + for (int i = 0; i < numberOfOperations; i++) { + results.add(executeSingleUseQuery(server)); + } + return results; + } + + private void executeWarmup(final BenchmarkState server) { + for (int i = 0; i < WARMUP_REQUEST_COUNT; i++) { + executeSingleUseQuery(server); + } + } + + private Duration executeSingleUseQuery(final BenchmarkState server) { + Stopwatch watch = Stopwatch.createStarted(); + + try (ResultSet rs = server.client.singleUse().executeQuery(getRandomisedReadStatement())) { + while (rs.next()) { + assertEquals(1, rs.getColumnCount()); + assertNotNull(rs.getValue(0)); + } + } catch (Throwable t) { + // ignore exception + System.out.println("Got exception = " + t); + } + return watch.elapsed(); + } + + static Statement getRandomisedReadStatement() { + int randomKey = ThreadLocalRandom.current().nextInt(TOTAL_RECORDS); + return Statement.newBuilder(SELECT_QUERY).bind(ID_COLUMN_NAME).to(randomKey).build(); + } + + void collectResultsAndPrint( + ListeningScheduledExecutorService service, + List>> results, + int numOperationsPerThread) + throws Exception { + final List collectResults = + collectResults( + service, results, numOperationsPerThread * PARALLEL_THREADS, Duration.ofMinutes(60)); + printResults(collectResults); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutableCredentialsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutableCredentialsTest.java new file mode 100644 index 000000000000..dfa6d6695dd7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutableCredentialsTest.java @@ -0,0 +1,196 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.auth.CredentialTypeForMetrics; +import com.google.auth.RequestMetadataCallback; +import com.google.auth.oauth2.ServiceAccountCredentials; +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MutableCredentialsTest { + ServiceAccountCredentials initialCredentials = mock(ServiceAccountCredentials.class); + ServiceAccountCredentials initialScopedCredentials = mock(ServiceAccountCredentials.class); + ServiceAccountCredentials updatedCredentials = mock(ServiceAccountCredentials.class); + ServiceAccountCredentials updatedScopedCredentials = mock(ServiceAccountCredentials.class); + Set scopes = new HashSet<>(Arrays.asList("scope-a", "scope-b")); + Map> initialMetadata = + Collections.singletonMap("Authorization", Collections.singletonList("v1")); + Map> updatedMetadata = + Collections.singletonMap("Authorization", Collections.singletonList("v2")); + String initialAuthType = "auth-1"; + String updatedAuthType = "auth-2"; + String initialUniverseDomain = "googleapis.com"; + String updatedUniverseDomain = "abc.goog"; + CredentialTypeForMetrics initialMetricsCredentialType = + CredentialTypeForMetrics.SERVICE_ACCOUNT_CREDENTIALS_JWT; + CredentialTypeForMetrics updatedMetricsCredentialType = + CredentialTypeForMetrics.SERVICE_ACCOUNT_CREDENTIALS_AT; + + @Test + public void testCreateMutableCredentials() throws IOException { + setupInitialCredentials(); + + MutableCredentials credentials = new MutableCredentials(initialCredentials, scopes); + URI testUri = URI.create("https://spanner.googleapis.com"); + Executor executor = mock(Executor.class); + RequestMetadataCallback callback = mock(RequestMetadataCallback.class); + + validateInitialDelegatedCredentialsAreSet(credentials, testUri); + + credentials.getRequestMetadata(testUri, executor, callback); + + credentials.refresh(); + + verify(initialScopedCredentials, times(1)).getRequestMetadata(testUri, executor, callback); + verify(initialScopedCredentials, times(1)).refresh(); + } + + @Test + public void testCreateMutableCredentialsWithDefaultScopes() throws IOException { + Set defaultScopes = SpannerOptions.SCOPES; + when(initialCredentials.createScoped(defaultScopes)).thenReturn(initialScopedCredentials); + when(initialScopedCredentials.getAuthenticationType()).thenReturn(initialAuthType); + when(initialScopedCredentials.getRequestMetadata(any(URI.class))).thenReturn(initialMetadata); + when(initialScopedCredentials.getUniverseDomain()).thenReturn(initialUniverseDomain); + when(initialScopedCredentials.getMetricsCredentialType()) + .thenReturn(initialMetricsCredentialType); + when(initialScopedCredentials.hasRequestMetadata()).thenReturn(true); + when(initialScopedCredentials.hasRequestMetadataOnly()).thenReturn(true); + + MutableCredentials credentials = new MutableCredentials(initialCredentials); + URI testUri = URI.create("https://spanner.googleapis.com"); + + validateInitialDelegatedCredentialsAreSet(credentials, testUri); + verify(initialCredentials).createScoped(defaultScopes); + } + + @Test + public void testUpdateMutableCredentials() throws IOException { + setupInitialCredentials(); + setupUpdatedCredentials(); + + MutableCredentials credentials = new MutableCredentials(initialCredentials, scopes); + URI testUri = URI.create("https://example.com"); + Executor executor = mock(Executor.class); + RequestMetadataCallback callback = mock(RequestMetadataCallback.class); + + validateInitialDelegatedCredentialsAreSet(credentials, testUri); + + credentials.updateCredentials(updatedCredentials); + + assertEquals(updatedAuthType, credentials.getAuthenticationType()); + assertFalse(credentials.hasRequestMetadata()); + assertFalse(credentials.hasRequestMetadataOnly()); + assertSame(updatedMetadata, credentials.getRequestMetadata(testUri)); + assertEquals(updatedUniverseDomain, credentials.getUniverseDomain()); + assertEquals(updatedMetricsCredentialType, credentials.getMetricsCredentialType()); + + credentials.getRequestMetadata(testUri, executor, callback); + + credentials.refresh(); + + verify(updatedScopedCredentials, times(1)).getRequestMetadata(testUri, executor, callback); + verify(updatedScopedCredentials, times(1)).refresh(); + } + + @Test(expected = IllegalArgumentException.class) + public void testCreateMutableCredentialsEmptyScopesThrowsError() { + new MutableCredentials(initialCredentials, Collections.emptySet()); + } + + @Test + public void testCreateMutableCredentialsNullCredentialsThrowsError() { + NullPointerException exception = + assertThrows(NullPointerException.class, () -> new MutableCredentials(null, scopes)); + assertEquals("credentials must not be null", exception.getMessage()); + } + + @Test + public void testCreateMutableCredentialsNullScopesThrowsError() { + NullPointerException exception = + assertThrows( + NullPointerException.class, () -> new MutableCredentials(initialCredentials, null)); + assertEquals("scopes must not be null", exception.getMessage()); + } + + @Test + public void testUpdateMutableCredentialsNullCredentialsThrowsError() throws IOException { + setupInitialCredentials(); + MutableCredentials credentials = new MutableCredentials(initialCredentials, scopes); + + NullPointerException exception = + assertThrows(NullPointerException.class, () -> credentials.updateCredentials(null)); + assertEquals("credentials must not be null", exception.getMessage()); + } + + private void validateInitialDelegatedCredentialsAreSet( + MutableCredentials credentials, URI testUri) throws IOException { + assertEquals(initialAuthType, credentials.getAuthenticationType()); + assertTrue(credentials.hasRequestMetadata()); + assertTrue(credentials.hasRequestMetadataOnly()); + assertEquals(initialMetadata, credentials.getRequestMetadata(testUri)); + assertEquals(initialUniverseDomain, credentials.getUniverseDomain()); + assertEquals(initialMetricsCredentialType, credentials.getMetricsCredentialType()); + } + + private void setupInitialCredentials() throws IOException { + when(initialCredentials.createScoped(scopes)).thenReturn(initialScopedCredentials); + when(initialCredentials.createScoped(Collections.emptyList())) + .thenReturn(initialScopedCredentials); + when(initialScopedCredentials.getAuthenticationType()).thenReturn(initialAuthType); + when(initialScopedCredentials.getRequestMetadata(any(URI.class))).thenReturn(initialMetadata); + when(initialScopedCredentials.getUniverseDomain()).thenReturn(initialUniverseDomain); + when(initialScopedCredentials.getMetricsCredentialType()) + .thenReturn(initialMetricsCredentialType); + when(initialScopedCredentials.hasRequestMetadata()).thenReturn(true); + when(initialScopedCredentials.hasRequestMetadataOnly()).thenReturn(true); + } + + private void setupUpdatedCredentials() throws IOException { + when(updatedCredentials.createScoped(scopes)).thenReturn(updatedScopedCredentials); + when(updatedScopedCredentials.getAuthenticationType()).thenReturn(updatedAuthType); + when(updatedScopedCredentials.getRequestMetadata(any(URI.class))).thenReturn(updatedMetadata); + when(updatedScopedCredentials.getUniverseDomain()).thenReturn(updatedUniverseDomain); + when(updatedScopedCredentials.getMetricsCredentialType()) + .thenReturn(updatedMetricsCredentialType); + when(updatedScopedCredentials.hasRequestMetadata()).thenReturn(false); + when(updatedScopedCredentials.hasRequestMetadataOnly()).thenReturn(false); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationGroupTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationGroupTest.java new file mode 100644 index 000000000000..fbf91b9fc16c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationGroupTest.java @@ -0,0 +1,114 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BatchWriteRequest; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link MutationGroup}. */ +@RunWith(JUnit4.class) +public class MutationGroupTest { + private final Random random = new Random(); + + private Mutation getRandomMutation() { + return Mutation.newInsertBuilder(String.valueOf(random.nextInt())) + .set("ID") + .to(random.nextInt()) + .set("NAME") + .to(String.valueOf(random.nextInt())) + .build(); + } + + private BatchWriteRequest.MutationGroup getMutationGroupProto(ImmutableList mutations) { + List mutationsProto = new ArrayList<>(); + Mutation.toProtoAndReturnRandomMutation(mutations, mutationsProto); + return BatchWriteRequest.MutationGroup.newBuilder().addAllMutations(mutationsProto).build(); + } + + @Test + public void ofVarargTest() { + Mutation[] mutations = + new Mutation[] { + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation() + }; + MutationGroup mutationGroup = MutationGroup.of(mutations); + assertArrayEquals(mutations, mutationGroup.getMutations().toArray()); + assertEquals( + MutationGroup.toProto(mutationGroup), + getMutationGroupProto(ImmutableList.copyOf(mutations))); + } + + @Test + public void ofIterableTest() { + ImmutableList mutations = + ImmutableList.of( + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation()); + MutationGroup mutationGroup = MutationGroup.of(mutations); + assertEquals(mutations, mutationGroup.getMutations()); + assertEquals(MutationGroup.toProto(mutationGroup), getMutationGroupProto(mutations)); + } + + @Test + public void toProtoTest() { + Mutation[] mutations = + new Mutation[] { + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation() + }; + MutationGroup mutationGroup = MutationGroup.of(mutations); + assertEquals( + MutationGroup.toProto(mutationGroup), + getMutationGroupProto(ImmutableList.copyOf(mutations))); + } + + @Test + public void toListProtoTest() { + Mutation[] mutations1 = + new Mutation[] { + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation() + }; + Mutation[] mutations2 = + new Mutation[] { + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation() + }; + Mutation[] mutations3 = + new Mutation[] { + getRandomMutation(), getRandomMutation(), getRandomMutation(), getRandomMutation() + }; + List mutationGroups = + ImmutableList.of( + MutationGroup.of(mutations1), + MutationGroup.of(mutations2), + MutationGroup.of(mutations3)); + List mutationGroupsProto = + MutationGroup.toListProto(mutationGroups); + assertEquals( + mutationGroupsProto.get(0), getMutationGroupProto(ImmutableList.copyOf(mutations1))); + assertEquals( + mutationGroupsProto.get(1), getMutationGroupProto(ImmutableList.copyOf(mutations2))); + assertEquals( + mutationGroupsProto.get(2), getMutationGroupProto(ImmutableList.copyOf(mutations3))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationTest.java new file mode 100644 index 000000000000..fbc34a37daf6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MutationTest.java @@ -0,0 +1,898 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.testing.EqualsTester; +import java.math.BigDecimal; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Mutation}. */ +@RunWith(JUnit4.class) +public class MutationTest { + + @Test + public void insertEmpty() { + Mutation m = Mutation.newInsertBuilder("T1").build(); + assertThat(m.getTable()).isEqualTo("T1"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.INSERT); + assertThat(m.getColumns()).isEmpty(); + assertThat(m.getValues()).isEmpty(); + assertThat(m.toString()).isEqualTo("insert(T1{})"); + } + + @Test + public void insert() { + Mutation m = Mutation.newInsertBuilder("T1").set("C1").to(true).set("C2").to(1234).build(); + assertThat(m.getTable()).isEqualTo("T1"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.INSERT); + assertThat(m.getColumns()).containsExactly("C1", "C2").inOrder(); + assertThat(m.getValues()).containsExactly(Value.bool(true), Value.int64(1234)).inOrder(); + assertThat(m.toString()).isEqualTo("insert(T1{C1=true,C2=1234})"); + } + + @Test + public void insertOrUpdateEmpty() { + Mutation m = Mutation.newInsertOrUpdateBuilder("T2").build(); + assertThat(m.getTable()).isEqualTo("T2"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.INSERT_OR_UPDATE); + assertThat(m.getColumns()).isEmpty(); + assertThat(m.getValues()).isEmpty(); + assertThat(m.toString()).isEqualTo("insert_or_update(T2{})"); + } + + @Test + public void insertOrUpdate() { + Mutation m = Mutation.newInsertOrUpdateBuilder("T1").set("C1").to(true).build(); + assertThat(m.getTable()).isEqualTo("T1"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.INSERT_OR_UPDATE); + assertThat(m.getColumns()).containsExactly("C1"); + assertThat(m.getValues()).containsExactly(Value.bool(true)); + assertThat(m.toString()).isEqualTo("insert_or_update(T1{C1=true})"); + } + + @Test + public void updateEmpty() { + Mutation m = Mutation.newUpdateBuilder("T2").build(); + assertThat(m.getTable()).isEqualTo("T2"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.UPDATE); + assertThat(m.getColumns()).isEmpty(); + assertThat(m.getValues()).isEmpty(); + assertThat(m.toString()).isEqualTo("update(T2{})"); + } + + @Test + public void update() { + Mutation m = Mutation.newUpdateBuilder("T1").set("C1").to(true).set("C2").to(1234).build(); + assertThat(m.getTable()).isEqualTo("T1"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.UPDATE); + assertThat(m.getColumns()).containsExactly("C1", "C2").inOrder(); + assertThat(m.getValues()).containsExactly(Value.bool(true), Value.int64(1234)).inOrder(); + assertThat(m.toString()).isEqualTo("update(T1{C1=true,C2=1234})"); + } + + @Test + public void replaceEmpty() { + Mutation m = Mutation.newReplaceBuilder("T2").build(); + assertThat(m.getTable()).isEqualTo("T2"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.REPLACE); + assertThat(m.getColumns()).isEmpty(); + assertThat(m.getValues()).isEmpty(); + assertThat(m.toString()).isEqualTo("replace(T2{})"); + } + + @Test + public void replace() { + Mutation m = Mutation.newReplaceBuilder("T1").set("C1").to(true).set("C2").to(1234).build(); + assertThat(m.getTable()).isEqualTo("T1"); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.REPLACE); + assertThat(m.getColumns()).containsExactly("C1", "C2").inOrder(); + assertThat(m.getValues()).containsExactly(Value.bool(true), Value.int64(1234)).inOrder(); + assertThat(m.toString()).isEqualTo("replace(T1{C1=true,C2=1234})"); + } + + @Test + public void duplicateColumn() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> Mutation.newInsertBuilder("T1").set("C1").to(true).set("C1").to(false).build()); + assertThat(e.getMessage()).contains("Duplicate column"); + } + + @Test + public void duplicateColumnCaseInsensitive() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> Mutation.newInsertBuilder("T1").set("C1").to(true).set("c1").to(false).build()); + assertThat(e.getMessage()).contains("Duplicate column"); + } + + @Test + public void asMap() { + Mutation m = Mutation.newInsertBuilder("T").build(); + assertThat(m.asMap()).isEqualTo(ImmutableMap.of()); + + m = Mutation.newInsertBuilder("T").set("C1").to(true).set("C2").to(1234).build(); + assertThat(m.asMap()) + .isEqualTo(ImmutableMap.of("C1", Value.bool(true), "C2", Value.int64(1234))); + } + + @Test + public void unfinishedBindingV1() { + Mutation.WriteBuilder b = Mutation.newInsertBuilder("T1"); + b.set("C1"); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> b.build()); + assertThat(e.getMessage()).contains("Incomplete binding for column C1"); + } + + @Test + public void unfinishedBindingV2() { + Mutation.WriteBuilder b = Mutation.newInsertBuilder("T1"); + b.set("C1"); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> b.set("C2")); + assertThat(e.getMessage()).contains("Incomplete binding for column C1"); + } + + @Test + public void notInBinding() { + ValueBinder binder = Mutation.newInsertBuilder("T1").set("C1"); + binder.to(1234); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> binder.to(5678)); + assertThat(e.getMessage()).contains("No binding currently active"); + } + + @Test + public void delete() { + KeySet keySet = KeySet.singleKey(Key.of("k1")); + Mutation m = Mutation.delete("T1", keySet); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.DELETE); + assertThat(m.getKeySet()).isEqualTo(keySet); + assertThat(m.toString()).isEqualTo("delete(T1{[k1]})"); + } + + @Test + public void send() { + Key key = Key.of(123); + Value payload = Value.bytes(ByteArray.copyFrom("payload")); + Instant deliverAt = Instant.now().plusSeconds(3600); + Mutation m = + Mutation.newSendBuilder("TestQueue") + .setKey(key) + .setPayload(payload) + .setDeliveryTime(deliverAt) + .build(); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.SEND); + assertThat(m.getQueue()).isEqualTo("TestQueue"); + assertThat(m.getKey()).isEqualTo(key); + assertThat(m.getPayload()).isEqualTo(payload); + assertThat(m.getDeliveryTime()).isEqualTo(deliverAt); + assertThat(m.toString()) + .isEqualTo( + "send(TestQueue{key=[123], payload=" + payload + ", deliveryTime=" + deliverAt + "})"); + } + + @Test + public void sendMissingKey() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> Mutation.newSendBuilder("TestQueue").setPayload(Value.string("payload")).build()); + assertThat(e.getMessage()).contains("Key must be set"); + } + + @Test + public void sendMissingPayload() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> Mutation.newSendBuilder("TestQueue").setKey(Key.of("k1")).build()); + assertThat(e.getMessage()).contains("Payload must be set"); + } + + @Test + public void ackIgnoreNotFound() { + Key key = Key.of("k1"); + Mutation m = Mutation.newAckBuilder("TestQueue").setKey(key).setIgnoreNotFound(true).build(); + assertThat(m.getOperation()).isEqualTo(Mutation.Op.ACK); + assertThat(m.getQueue()).isEqualTo("TestQueue"); + assertThat(m.getKey()).isEqualTo(key); + assertTrue(m.getIgnoreNotFound()); + assertThat(m.toString()).isEqualTo("ack(TestQueue{key=[k1], ignoreNotFound=true})"); + } + + @Test + public void ackMissingKey() { + IllegalStateException e = + assertThrows( + IllegalStateException.class, () -> Mutation.newAckBuilder("TestQueue").build()); + assertThat(e.getMessage()).contains("Key must be set"); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + + // Equality, not identity. + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1").build(), Mutation.newInsertBuilder("T1").build()); + + // Operation types are distinguished. + tester.addEqualityGroup(Mutation.newInsertOrUpdateBuilder("T1").build()); + tester.addEqualityGroup(Mutation.newUpdateBuilder("T1").build()); + tester.addEqualityGroup(Mutation.newReplaceBuilder("T1").build()); + + // Table is distinguished. + tester.addEqualityGroup(Mutation.newInsertBuilder("T2").build()); + + // Columns/values are distinguished (but by equality, not identity). + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1").set("C").to("V").build(), + Mutation.newInsertBuilder("T1").set("C").to("V").build()); + + // Deletes consider the key set. + tester.addEqualityGroup(Mutation.delete("T1", KeySet.all())); + tester.addEqualityGroup( + Mutation.delete("T1", KeySet.singleKey(Key.of("k"))), Mutation.delete("T1", Key.of("k"))); + + // Test NaNs + // Refer the comment in `Value.hashCode()` for more details on NaN equality. + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1").set("C").to(Double.NaN).build(), + Mutation.newInsertBuilder("T1").set("C").to(Value.float64(Double.NaN)).build(), + Mutation.newInsertBuilder("T1").set("C").to(Float.NaN).build(), + Mutation.newInsertBuilder("T1").set("C").to(Value.float64(Float.NaN)).build(), + Mutation.newInsertBuilder("T1").set("C").to(Value.float32(Float.NaN)).build()); + + // Test NaN arrays + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1").set("C").toFloat32Array(new float[] {Float.NaN}).build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat32Array(new float[] {Float.NaN}, 0, 1) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat32Array(Collections.singletonList(Float.NaN)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float32Array(new float[] {Float.NaN})) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float32Array(new float[] {Float.NaN}, 0, 1)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float32Array(Collections.singletonList(Float.NaN))) + .build()); + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1").set("C").toFloat64Array(new double[] {Double.NaN}).build(), + Mutation.newInsertBuilder("T1").set("C").toFloat64Array(new double[] {Float.NaN}).build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(new double[] {Double.NaN}, 0, 1) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(new double[] {Float.NaN}, 0, 1) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(Collections.singletonList(Double.NaN)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(Collections.singletonList((double) Float.NaN)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(new double[] {Double.NaN})) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(new double[] {Float.NaN})) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(new double[] {Double.NaN}, 0, 1)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(new double[] {Float.NaN}, 0, 1)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(Collections.singletonList(Double.NaN))) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .to(Value.float64Array(Collections.singletonList((double) Float.NaN))) + .build()); + // Test NaNs and nulls + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(Arrays.asList(null, Double.NaN)) + .build(), + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat64Array(Arrays.asList(null, (double) Float.NaN)) + .build()); + tester.addEqualityGroup( + Mutation.newInsertBuilder("T1") + .set("C") + .toFloat32Array(Arrays.asList(null, Float.NaN)) + .build()); + + tester.testEquals(); + } + + @Test + public void equalsAndHashCode_sendAndAck() { + EqualsTester tester = new EqualsTester(); + + Key key1 = Key.of("k1"); + Key key2 = Key.of("k2"); + Value payload1 = Value.string("p1"); + Value payload2 = Value.string("p2"); + Instant time1 = Instant.now(); + Instant time2 = time1.plusSeconds(10); + + // SEND + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue").setKey(key1).setPayload(payload1).build(), + Mutation.newSendBuilder("TestQueue").setKey(key1).setPayload(payload1).build()); + // Different key + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue").setKey(key2).setPayload(payload1).build()); + // Different payload + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue").setKey(key1).setPayload(payload2).build()); + // Different queue + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue2").setKey(key1).setPayload(payload1).build()); + // Different time + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue") + .setKey(key1) + .setPayload(payload1) + .setDeliveryTime(time1) + .build(), + Mutation.newSendBuilder("TestQueue") + .setKey(key1) + .setPayload(payload1) + .setDeliveryTime(time1) + .build()); + tester.addEqualityGroup( + Mutation.newSendBuilder("TestQueue") + .setKey(key1) + .setPayload(payload1) + .setDeliveryTime(time2) + .build()); + + // ACK + tester.addEqualityGroup( + Mutation.newAckBuilder("TestQueue").setKey(key1).build(), + Mutation.newAckBuilder("TestQueue").setKey(key1).build()); + // Different key + tester.addEqualityGroup(Mutation.newAckBuilder("TestQueue").setKey(key2).build()); + // Different queue + tester.addEqualityGroup(Mutation.newAckBuilder("TestQueue2").setKey(key1).build()); + // Different ignoreNotFound + tester.addEqualityGroup( + Mutation.newAckBuilder("TestQueue").setKey(key1).setIgnoreNotFound(true).build(), + Mutation.newAckBuilder("TestQueue").setKey(key1).setIgnoreNotFound(true).build()); + + // Distinct Op types + tester.addEqualityGroup(Mutation.newInsertBuilder("TestQueue").build()); + + tester.testEquals(); + } + + @Test + public void serializationBasic() { + Instant time = Instant.now(); + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T").set("C").to("V").build(), + Mutation.newUpdateBuilder("T").set("C").to("V").build(), + Mutation.newInsertOrUpdateBuilder("T").set("C").to("V").build(), + Mutation.newReplaceBuilder("T").set("C").to("V").build(), + Mutation.delete("T", KeySet.singleKey(Key.of("k"))), + Mutation.newSendBuilder("Q") + .setKey(Key.of("k")) + .setPayload(Value.string("p")) + .setDeliveryTime(time) + .build(), + Mutation.newAckBuilder("Q").setKey(Key.of("k")).setIgnoreNotFound(true).build()); + + List proto = new ArrayList<>(); + + // Include an existing element so that we know toProto() do not clear the list. + com.google.spanner.v1.Mutation existingProto = + com.google.spanner.v1.Mutation.getDefaultInstance(); + proto.add(existingProto); + + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + + assertThat(proto.size()).isAtLeast(1); + assertThat(proto.get(0)).isSameInstanceAs(existingProto); + proto.remove(0); + + assertThat(proto.size()).isEqualTo(7); + MatcherAssert.assertThat( + proto.get(0), + matchesProto("insert { table: 'T' columns: 'C' values { values { string_value: 'V' } } }")); + MatcherAssert.assertThat( + proto.get(1), + matchesProto("update { table: 'T' columns: 'C' values { values { string_value: 'V' } } }")); + MatcherAssert.assertThat( + proto.get(2), + matchesProto( + "insert_or_update { table: 'T' columns: 'C'" + + " values { values { string_value: 'V' } } }")); + MatcherAssert.assertThat( + proto.get(3), + matchesProto( + "replace { table: 'T' columns: 'C' values { values { string_value: 'V' } } }")); + MatcherAssert.assertThat( + proto.get(4), + matchesProto("delete { table: 'T' key_set { keys { values { string_value: 'k' } } } }")); + MatcherAssert.assertThat( + proto.get(5), + matchesProto( + "send { queue: 'Q' key { values { string_value: 'k' } } deliver_time { seconds: " + + time.getEpochSecond() + + " nanos: " + + time.getNano() + + " } payload { string_value: 'p' } }")); + MatcherAssert.assertThat( + proto.get(6), + matchesProto( + "ack { queue: 'Q' key { values { string_value: 'k' } } ignore_not_found: true }")); + } + + @Test + public void toProtoCoalescingChangeOfTable() { + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T1").set("C").to("V1").build(), + Mutation.newInsertBuilder("T1").set("C").to("V2").build(), + Mutation.newInsertBuilder("T1").set("C").to("V3").build(), + Mutation.newInsertBuilder("T2").set("C").to("V4").build(), + Mutation.newInsertBuilder("T2").set("C").to("V5").build()); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + // Random mutation returned should be INSERT with large number of values + MatcherAssert.assertThat( + mutation, + matchesProto( + "insert { table: 'T1' columns: 'C' values { values { string_value: 'V1' } }" + + " values { values { string_value: 'V2' } }" + + " values { values { string_value: 'V3' } } }")); + + assertThat(proto.size()).isEqualTo(2); + MatcherAssert.assertThat( + proto.get(0), + matchesProto( + "insert { table: 'T1' columns: 'C' values { values { string_value: 'V1' } }" + + " values { values { string_value: 'V2' } }" + + " values { values { string_value: 'V3' } } }")); + MatcherAssert.assertThat( + proto.get(1), + matchesProto( + "insert { table: 'T2' columns: 'C' values { values { string_value: 'V4' } }" + + " values { values { string_value: 'V5' } } }")); + } + + @Test + public void toProtoCoalescingChangeOfOperation() { + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T").set("C").to("V1").build(), + Mutation.newInsertBuilder("T").set("C").to("V2").build(), + Mutation.newInsertBuilder("T").set("C").to("V3").build(), + Mutation.newUpdateBuilder("T").set("C").to("V4").build(), + Mutation.newUpdateBuilder("T").set("C").to("V5").build()); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + // Random mutation returned should be of UPDATE operation + MatcherAssert.assertThat( + mutation, + matchesProto( + "update { table: 'T' columns: 'C' values { values { string_value: 'V4' } }" + + " values { values { string_value: 'V5' } } }")); + + assertThat(proto.size()).isEqualTo(2); + MatcherAssert.assertThat( + proto.get(0), + matchesProto( + "insert { table: 'T' columns: 'C' values { values { string_value: 'V1' } }" + + " values { values { string_value: 'V2' } }" + + " values { values { string_value: 'V3' } } }")); + MatcherAssert.assertThat( + proto.get(1), + matchesProto( + "update { table: 'T' columns: 'C' values { values { string_value: 'V4' } }" + + " values { values { string_value: 'V5' } } }")); + } + + @Test + public void toProtoCoalescingChangeOfColumn() { + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T").set("C1").to("V1").build(), + Mutation.newInsertBuilder("T").set("C1").to("V2").build(), + Mutation.newInsertBuilder("T").set("C1").to("V3").build(), + Mutation.newInsertBuilder("T").set("C2").to("V4").build(), + Mutation.newInsertBuilder("T").set("C2").to("V5").build()); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + MatcherAssert.assertThat( + mutation, + matchesProto( + "insert { table: 'T' columns: 'C1' values { values { string_value: 'V1' } }" + + " values { values { string_value: 'V2' } }" + + " values { values { string_value: 'V3' } } }")); + + assertThat(proto.size()).isEqualTo(2); + MatcherAssert.assertThat( + proto.get(0), + matchesProto( + "insert { table: 'T' columns: 'C1' values { values { string_value: 'V1' } }" + + " values { values { string_value: 'V2' } }" + + " values { values { string_value: 'V3' } } }")); + MatcherAssert.assertThat( + proto.get(1), + matchesProto( + "insert { table: 'T' columns: 'C2' values { values { string_value: 'V4' } }" + + " values { values { string_value: 'V5' } } }")); + } + + @Test + public void toProtoCoalescingDelete() { + List mutations = + Arrays.asList( + Mutation.delete("T", Key.of("k1")), + Mutation.delete("T", Key.of("k2")), + Mutation.delete("T", KeySet.range(KeyRange.closedOpen(Key.of("ka"), Key.of("kb")))), + Mutation.delete("T", KeySet.range(KeyRange.closedClosed(Key.of("kc"), Key.of("kd"))))); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + // Random mutation returned should be of DELETE operation + assertTrue(mutation.hasDelete()); + + assertThat(proto.size()).isEqualTo(1); + MatcherAssert.assertThat( + proto.get(0), + matchesProto( + "delete {" + + " table: 'T'" + + " key_set {" + + " keys { values { string_value: 'k1' } }" + + " keys { values { string_value: 'k2' } }" + + " ranges { start_closed { values { string_value: 'ka' } } " + + " end_open { values { string_value: 'kb' } } }" + + " ranges { start_closed { values { string_value: 'kc' } } " + + " end_closed { values { string_value: 'kd' } } }" + + " }" + + "} ")); + } + + @Test + public void toProtoCoalescingDeleteChanges() { + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T1").set("C").to("V1").build(), + Mutation.delete("T1", Key.of("k1")), + Mutation.delete("T1", Key.of("k2")), + Mutation.delete("T2", Key.of("k3")), + Mutation.delete("T2", Key.of("k4")), + Mutation.newInsertBuilder("T2").set("C").to("V1").build()); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + assertTrue(mutation.hasDelete()); + + assertThat(proto.size()).isEqualTo(4); + MatcherAssert.assertThat( + proto.get(0), + matchesProto( + "insert { table: 'T1' columns: 'C' values { values { string_value: 'V1' } } }")); + MatcherAssert.assertThat( + proto.get(1), + matchesProto( + "delete { table: 'T1' key_set { keys { values { string_value: 'k1' } } " + + "keys { values { string_value: 'k2' } } } }")); + MatcherAssert.assertThat( + proto.get(2), + matchesProto( + "delete { table: 'T2' key_set { keys { values { string_value: 'k3' } } " + + "keys { values { string_value: 'k4' } } } }")); + MatcherAssert.assertThat( + proto.get(3), + matchesProto( + "insert { table: 'T2', columns: 'C', values { values { string_value: 'V1' } } }")); + } + + @Test + public void toProtoWithEmptyInsertMutations() { + List mutations = + Arrays.asList( + Mutation.newInsertBuilder("T").build(), Mutation.newInsertBuilder("A").build()); + + List proto = new ArrayList<>(); + com.google.spanner.v1.Mutation mutation = + Mutation.toProtoAndReturnRandomMutation(mutations, proto); + + // Random mutation returned should be of INSERT operation with empty values + MatcherAssert.assertThat(mutation, matchesProto("insert { table: 'T' values { } }")); + + assertThat(proto.size()).isEqualTo(2); + } + + @Test + public void javaSerialization() { + reserializeAndAssert(appendAllTypes(Mutation.newInsertBuilder("test")).build()); + reserializeAndAssert(appendAllTypes(Mutation.newUpdateBuilder("test")).build()); + reserializeAndAssert(appendAllTypes(Mutation.newReplaceBuilder("test")).build()); + reserializeAndAssert(appendAllTypes(Mutation.newInsertOrUpdateBuilder("test")).build()); + + reserializeAndAssert( + Mutation.delete( + "test", + Key.of( + "one", + 2, + null, + true, + 2.3, + ByteArray.fromBase64("abcd"), + Timestamp.ofTimeSecondsAndNanos(1, 2), + Date.fromYearMonthDay(2017, 4, 17)))); + reserializeAndAssert(Mutation.delete("test", KeySet.all())); + reserializeAndAssert( + Mutation.delete( + "test", + KeySet.newBuilder() + .addRange(KeyRange.closedClosed(Key.of("one", 2, null), Key.of("two", 3, null))) + .build())); + reserializeAndAssert( + Mutation.delete( + "test", + KeySet.newBuilder() + .addRange(KeyRange.closedOpen(Key.of("one", 2, null), Key.of("two", 3, null))) + .build())); + reserializeAndAssert( + Mutation.delete( + "test", + KeySet.newBuilder() + .addRange(KeyRange.openClosed(Key.of("one", 2, null), Key.of("two", 3, null))) + .build())); + reserializeAndAssert( + Mutation.delete( + "test", + KeySet.newBuilder() + .addRange(KeyRange.openOpen(Key.of("one", 2, null), Key.of("two", 3, null))) + .build())); + } + + private Mutation.WriteBuilder appendAllTypes(Mutation.WriteBuilder builder) { + return builder + .set("bool") + .to(true) + .set("boolNull") + .to((Boolean) null) + .set("boolValue") + .to(Value.bool(false)) + .set("int") + .to(42) + .set("intNull") + .to((Long) null) + .set("intValue") + .to(Value.int64(1L)) + .set("float32") + .to(42.1f) + .set("float32Null") + .to((Float) null) + .set("float32Value") + .to(Value.float32(10f)) + .set("float64") + .to(42.1) + .set("float64Null") + .to((Double) null) + .set("float64Value") + .to(Value.float64(10D)) + .set("string") + .to("str") + .set("stringNull") + .to((String) null) + .set("stringValue") + .to(Value.string("strValue")) + .set("bigDecimal") + .to(BigDecimal.valueOf(123, 2)) + .set("bigDecimalNull") + .to((BigDecimal) null) + .set("bigDecimalValueAsNumeric") + .to(Value.numeric(BigDecimal.TEN)) + .set("pgNumericValue") + .to(Value.pgNumeric("4.2")) + .set("json") + .to(Value.json("{\"key\": \"value\"}}")) + .set("jsonNull") + .to(Value.json(null)) + .set("protoMessage") + .to(SingerInfo.newBuilder().setSingerId(232).setGenre(Genre.POP).build()) + .set("protoMessageNull") + .to(Value.protoMessage(null, SingerInfo.getDescriptor().getFullName())) + .set("protoEnum") + .to(Genre.JAZZ) + .set("protoEnumNull") + .to(Value.protoEnum(null, SingerInfo.getDescriptor().getFullName())) + .set("pgJsonb") + .to(Value.pgJsonb("{\"key\": \"value\"}}")) + .set("pgJsonbNull") + .to(Value.pgJsonb(null)) + .set("pgOid") + .to(Value.pgOid(42)) + .set("pgOidNull") + .to(Value.pgOid(null)) + .set("timestamp") + .to(Timestamp.MAX_VALUE) + .set("timestampNull") + .to((Timestamp) null) + .set("timestampValue") + .to(Value.timestamp(Timestamp.MIN_VALUE)) + .set("date") + .to(Date.fromYearMonthDay(2017, 4, 17)) + .set("dateNull") + .to((Date) null) + .set("dateValue") + .to(Value.date(Date.fromYearMonthDay(2021, 1, 2))) + .set("boolArr") + .toBoolArray(new boolean[] {true, false}) + .set("boolArrNull") + .toBoolArray((boolean[]) null) + .set("boolArrValue") + .to(Value.boolArray(ImmutableList.of(false, true))) + .set("intArr") + .toInt64Array(new long[] {1, 2, 3}) + .set("intArrNull") + .toInt64Array((long[]) null) + .set("intArrValue") + .to(Value.int64Array(ImmutableList.of(1L, 2L))) + .set("float32Arr") + .toFloat32Array(new float[] {1.1f, 2.2f, 3.3f}) + .set("float32ArrNull") + .toFloat32Array((float[]) null) + .set("float32ArrValue") + .to(Value.float32Array(ImmutableList.of(10.1F, 10.2F, 10.3F))) + .set("float64Arr") + .toFloat64Array(new double[] {1.1, 2.2, 3.3}) + .set("float64ArrNull") + .toFloat64Array((double[]) null) + .set("float64ArrValue") + .to(Value.float64Array(ImmutableList.of(10.1D, 10.2D, 10.3D))) + .set("stringArr") + .toStringArray(ImmutableList.of("one", "two")) + .set("stringArrNull") + .toStringArray(null) + .set("stringArrValue") + .to(Value.stringArray(ImmutableList.of("uno", "dos"))) + .set("numericArr") + .toNumericArray(ImmutableList.of(BigDecimal.ONE, BigDecimal.TEN)) + .set("numericArrNull") + .toNumericArray(null) + .set("numericArrValue") + .to(Value.numericArray(ImmutableList.of(BigDecimal.ZERO, BigDecimal.valueOf(234, 2)))) + .set("pgNumericArr") + .toPgNumericArray(ImmutableList.of("1.23", "2.34")) + .set("pgNumericArrNull") + .toPgNumericArray(null) + .set("pgNumericArrValue") + .to(Value.pgNumericArray(ImmutableList.of("10.20", "20.30"))) + .set("jsonArr") + .toJsonArray(ImmutableList.of("{\"key\": \"value1\"}}", "{\"key\": \"value2\"}")) + .set("jsonArrNull") + .toJsonArray(null) + .set("jsonArrValue") + .to(Value.jsonArray(ImmutableList.of("{\"key\": \"value1\"}}", "{\"key\": \"value2\"}"))) + .set("protoMessageArr") + .toProtoMessageArray( + ImmutableList.of(SingerInfo.newBuilder().setSingerId(232).setGenre(Genre.POP).build()), + SingerInfo.getDescriptor()) + .set("protoMessageArrNull") + .toProtoMessageArray(null, SingerInfo.getDescriptor()) + .set("protoMessageArrValue") + .to( + Value.protoMessageArray( + ImmutableList.of( + SingerInfo.newBuilder().setSingerId(232).setGenre(Genre.POP).build()), + SingerInfo.getDescriptor())) + .set("protoEnumArr") + .toProtoEnumArray(ImmutableList.of(Genre.JAZZ), Genre.getDescriptor()) + .set("protoEnumArrNull") + .toProtoEnumArray(null, Genre.getDescriptor()) + .set("protoEnumArrValue") + .to(Value.protoEnumArray(ImmutableList.of(Genre.JAZZ), Genre.getDescriptor())) + .set("pgJsonbArr") + .toPgJsonbArray(ImmutableList.of("{\"key\": \"value1\"}}", "{\"key\": \"value2\"}")) + .set("pgJsonbArrNull") + .toPgJsonbArray(null) + .set("pgJsonbArrValue") + .to(Value.pgJsonbArray(ImmutableList.of("{\"key\": \"value1\"}}", "{\"key\": \"value2\"}"))) + .set("pgOidArr") + .toPgOidArray(new long[] {1, 2, 3}) + .set("pgOidArrNull") + .toPgOidArray((long[]) null) + .set("pgOidArrValue") + .to(Value.pgOidArray(ImmutableList.of(1L, 2L))) + .set("timestampArr") + .toTimestampArray(ImmutableList.of(Timestamp.MAX_VALUE, Timestamp.MAX_VALUE)) + .set("timestampArrNull") + .toTimestampArray(null) + .set("timestampArrValue") + .to(Value.timestampArray(ImmutableList.of(Timestamp.MIN_VALUE, Timestamp.MAX_VALUE))) + .set("dateArr") + .toDateArray( + ImmutableList.of( + Date.fromYearMonthDay(2017, 4, 17), Date.fromYearMonthDay(2017, 4, 18))) + .set("dateArrNull") + .toDateArray(null) + .set("dateArrValue") + .to( + Value.dateArray( + ImmutableList.of( + Date.fromYearMonthDay(2021, 1, 2), Date.fromYearMonthDay(2022, 2, 3)))); + } + + static Matcher matchesProto(String expected) { + return SpannerMatchers.matchesProto(com.google.spanner.v1.Mutation.class, expected); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java new file mode 100644 index 000000000000..ad27c775729b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java @@ -0,0 +1,427 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.FailOnOverkillTraceComponentImpl.TestSpan; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.opencensus.trace.Status.CanonicalCode; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.trace.data.SpanData; +import java.lang.reflect.Modifier; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(TracerTest.class) +@RunWith(JUnit4.class) +@Ignore("OpenCensus is too intrusive and affects other tests, so this test is by default disabled") +public class OpenCensusApiTracerTest extends AbstractMockServerTest { + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static final FailOnOverkillTraceComponentImpl failOnOverkillTraceComponent = + new FailOnOverkillTraceComponentImpl(); + + private DatabaseClient client; + + @BeforeClass + public static void setupOpenTelemetry() throws Exception { + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenCensusTraces(); + + // Use a little reflection to set the test tracer. + // This is not possible in Java 12 and later. + java.lang.reflect.Field field = Tracing.class.getDeclaredField("traceComponent"); + field.setAccessible(true); + java.lang.reflect.Field modifiersField = null; + try { + modifiersField = java.lang.reflect.Field.class.getDeclaredField("modifiers"); + } catch (NoSuchFieldException e) { + // Halt the test and ignore it. + Assume.assumeTrue( + "Skipping test as reflection is not allowed on reflection class in this JDK build", + false); + } + modifiersField.setAccessible(true); + // Remove the final modifier from the 'traceComponent' field. + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, failOnOverkillTraceComponent); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + failOnOverkillTraceComponent.clearSpans(); + failOnOverkillTraceComponent.clearAnnotations(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofNanos(1L)) + .setMaxRetryDelayDuration(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeoutDuration(Duration.ofMinutes(10L)) + .build())); + spanner = + builder + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .setEnableApiTracing(true) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + } + + @Test + public void testSingleUseQuery() { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testExecuteUpdate() { + assertNotNull( + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(UPDATE_RANDOM))); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Commit", spans); + } + + @Test + public void testBatchUpdate() { + assertNotNull( + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(ImmutableList.of(UPDATE_RANDOM, UPDATE_RANDOM)))); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.BatchUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteBatchDml", spans); + assertContains("Spanner.Commit", spans); + } + + @Test + public void testMultiUseReadOnlyQuery() { + try (ReadOnlyTransaction readOnlyTransaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = readOnlyTransaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testReadWriteTransactionQuery() { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + return null; + }); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("CloudSpannerOperation.Commit", spans); + } + + // TODO: Enable test when the problem with overkilling the span has been fixed. + @Ignore("The client.write method overkills the span") + @Test + public void testRetryUnaryRpc() { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan span = getSpan("Spanner.BeginTransaction", spans); + assertNotNull(span.getStatus()); + assertEquals(CanonicalCode.OK, span.getStatus().getCanonicalCode()); + } + + @Test + public void testRetryQuery() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + List spans = failOnOverkillTraceComponent.getTestSpans(); + // UNAVAILABLE errors for the ExecuteStreamingSql RPC is manually retried by the Spanner client + // library, and not by Gax. This means that we get two Gax spans, instead of one with a retry + // attempt. + List executeStreamingSqlSpans = getSpans("Spanner.ExecuteStreamingSql", spans); + assertEquals(2, executeStreamingSqlSpans.size()); + TestSpan span1 = executeStreamingSqlSpans.get(0); + assertNull(span1.getStatus()); + TestSpan span2 = executeStreamingSqlSpans.get(1); + assertNull(span2.getStatus()); + } + + @Test + public void testLroSucceeded() throws Exception { + addUpdateDdlResponse(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + assertNull(operationFuture.get()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertNotNull(updateDatabaseDdl); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getAnnotations().get(0)); + + TestSpan updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getAnnotations().size() >= 2); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getAnnotations()); + if (updateDatabaseDdlOperation.getAnnotations().size() > 2) { + assertContainsEvent("Scheduling next poll", updateDatabaseDdlOperation.getAnnotations()); + } + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getAnnotations()); + + // Verify that there are two GetOperations calls for polling the lro. + List polls = getSpans("Operations.GetOperation", spans); + assertEquals(2, polls.size()); + } + + @Test + public void testLroCreationFailed() { + mockDatabaseAdmin.addException(Status.INVALID_ARGUMENT.asRuntimeException()); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + } + + @Test + public void testLroOperationFailed() { + addUpdateDdlError(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.FAILED_PRECONDITION, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + // Creating the LRO succeeds. + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getAnnotations().get(0)); + + // The LRO itself returns an error. + TestSpan updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getAnnotations().size() >= 2); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getAnnotations()); + if (updateDatabaseDdlOperation.getAnnotations().size() > 2) { + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getAnnotations()); + } + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getAnnotations()); + } + + @Test + public void testEnableWithEnvVar() { + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableApiTracing() { + return true; + } + }); + // Create a Spanner instance without explicitly enabling API tracing. + Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + void assertContains(String expected, Map spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.keySet().stream().anyMatch(span -> span.equals(expected))); + } + + void assertContainsEvent(String expected, List events) { + assertTrue( + "Expected " + eventsToString(events) + " to contain " + expected, + events.stream().anyMatch(event -> event.equals(expected))); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + TestSpan getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getSpanName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, List spans) { + return spans.stream() + .filter(span -> Objects.equals(span.getSpanName(), name)) + .collect(Collectors.toList()); + } + + private String spansToString(Map spans) { + return spans.keySet().stream().collect(Collectors.joining("\n", "\n", "\n")); + } + + private String eventsToString(List events) { + return events.stream().collect(Collectors.joining("\n", "\n", "\n")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java new file mode 100644 index 000000000000..67012ed96225 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java @@ -0,0 +1,571 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.EventData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.time.Duration; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class OpenTelemetryApiTracerTest extends AbstractMockServerTest { + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static InMemorySpanExporter spanExporter; + + private static OpenTelemetrySdk openTelemetry; + + private DatabaseClient client; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + GlobalOpenTelemetry.resetForTest(); + + spanExporter = InMemorySpanExporter.create(); + + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) + .build(); + + openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .buildAndRegisterGlobal(); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @AfterClass + public static void closeOpenTelemetry() { + if (openTelemetry != null) { + openTelemetry.close(); + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + spanExporter.reset(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofNanos(1L)) + .setMaxRetryDelayDuration(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeoutDuration(Duration.ofMinutes(10L)) + .build())); + spanner = + builder + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + .setEnableApiTracing(true) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + } + + @Test + public void testSingleUseQuery() { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testExecuteUpdate() { + assertNotNull( + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(UPDATE_RANDOM))); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Commit", spans); + + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteUpdate", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent("CloudSpannerOperation.ExecuteUpdate", "Spanner.ExecuteSql", spans); + } + + @Test + public void testBatchUpdate() { + assertNotNull( + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(ImmutableList.of(UPDATE_RANDOM, UPDATE_RANDOM)))); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.BatchUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteBatchDml", spans); + assertContains("Spanner.Commit", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.BatchUpdate", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent("CloudSpannerOperation.BatchUpdate", "Spanner.ExecuteBatchDml", spans); + assertParent("CloudSpannerOperation.Commit", "Spanner.Commit", spans); + } + + @Test + public void testMultiUseReadOnlyQuery() { + try (ReadOnlyTransaction readOnlyTransaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = readOnlyTransaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.empty(), + spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", + "Spanner.ExecuteStreamingSql", + Attributes.empty(), + spans); + } + + @Test + public void testReadWriteTransactionQuery() { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + return null; + }); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testRetryUnaryRpc() { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + SpanData span = getSpan("Spanner.BeginTransaction", spans); + assertEquals(StatusCode.OK, span.getStatus().getStatusCode()); + assertEquals(3, span.getTotalRecordedEvents()); + List events = span.getEvents(); + assertEquals("Attempt failed, scheduling next attempt", events.get(0).getName()); + assertEquals("Starting RPC retry 1", events.get(1).getName()); + assertEquals("Attempt succeeded", events.get(2).getName()); + } + + @Test + public void testRetryQuery() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + // UNAVAILABLE errors for the ExecuteStreamingSql RPC is manually retried by the Spanner client + // library, and not by Gax. This means that we get two Gax spans, instead of one with a retry + // attempt. + List executeStreamingSqlSpans = + getSpans("Spanner.ExecuteStreamingSql", Attributes.empty(), spans); + assertEquals(2, executeStreamingSqlSpans.size()); + SpanData span1 = executeStreamingSqlSpans.get(0); + assertEquals(StatusCode.ERROR, span1.getStatus().getStatusCode()); + SpanData span2 = executeStreamingSqlSpans.get(1); + assertEquals(StatusCode.OK, span2.getStatus().getStatusCode()); + } + + @Test + public void testLroSucceeded() throws Exception { + addUpdateDdlResponse(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + assertNull(operationFuture.get()); + + // Wait until the last span has been exported, which can take a few microseconds, as it is + // added by a gRPC executor thread. + do { + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + } while (getSpans( + "DatabaseAdmin.UpdateDatabaseDdlOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .isEmpty() + || getSpans( + "Operations.GetOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .size() + < 2); + List spans = spanExporter.getFinishedSpanItems(); + + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getEvents().get(0).getName()); + + SpanData updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getTotalRecordedEvents() >= 5); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Scheduling next poll", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 1", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getEvents()); + + // Verify that there are two GetOperations calls for polling the lro. + List polls = getSpans("Operations.GetOperation", Attributes.empty(), spans); + assertEquals(2, polls.size()); + } + + @Test + public void testLroCreationFailed() { + mockDatabaseAdmin.addException(Status.INVALID_ARGUMENT.asRuntimeException()); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals( + "Attempt failed, error not retryable", updateDatabaseDdl.getEvents().get(0).getName()); + assertEquals(StatusCode.ERROR, updateDatabaseDdl.getStatus().getStatusCode()); + } + + @Test + public void testLroOperationFailed() { + addUpdateDdlError(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.FAILED_PRECONDITION, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + // Wait until the last span has been exported, which can take a few microseconds, as it is + // added by a gRPC executor thread. + do { + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + } while (getSpans( + "DatabaseAdmin.UpdateDatabaseDdlOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .isEmpty()); + List spans = spanExporter.getFinishedSpanItems(); + + // Creating the LRO succeeds. + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getEvents().get(0).getName()); + assertEquals(StatusCode.OK, updateDatabaseDdl.getStatus().getStatusCode()); + + // The LRO itself returns an error. + SpanData updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertEquals(3, updateDatabaseDdlOperation.getTotalRecordedEvents()); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getEvents()); + assertEquals(StatusCode.ERROR, updateDatabaseDdlOperation.getStatus().getStatusCode()); + } + + @Test + public void testEnableWithEnvVar() { + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableApiTracing() { + return true; + } + }); + // Create a Spanner instance without explicitly enabling API tracing. + Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + .build() + .getService(); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testAsyncTransactionManagerCommit() throws Exception { + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transactionFuture = manager.beginAsync(); + CommitTimestampFuture commitTimestamp = + transactionFuture + .then( + (transaction, __) -> transaction.executeUpdateAsync(UPDATE_RANDOM), + MoreExecutors.directExecutor()) + .commitAsync(); + commitTimestamp.get(); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Commit", spans); + + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteUpdate", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent("CloudSpannerOperation.ExecuteUpdate", "Spanner.ExecuteSql", spans); + } + + @Test + public void testAsyncTransactionManagerRollback() throws Exception { + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture transactionFuture = manager.beginAsync(); + transactionFuture + .then( + (transaction, __) -> transaction.executeUpdateAsync(UPDATE_RANDOM), + MoreExecutors.directExecutor()) + .get(); + manager.rollbackAsync().get(); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Rollback", spans); + + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteUpdate", spans); + assertParent("CloudSpannerOperation.ExecuteUpdate", "Spanner.ExecuteSql", spans); + SpanData transactionSpan = getSpan("CloudSpanner.ReadWriteTransaction", spans); + assertNotNull(transactionSpan); + assertContainsEvent("Transaction rolled back", transactionSpan.getEvents()); + } + + void assertContains(String expected, List spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.stream().anyMatch(span -> span.getName().equals(expected))); + } + + void assertContainsEvent(String expected, List events) { + assertTrue( + "Expected " + eventsToString(events) + " to contain " + expected, + events.stream().anyMatch(event -> event.getName().equals(expected))); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + void assertParent(String expectedParent, String child, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + SpanData childSpan = getSpan(child, spans); + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + + void assertParent( + String expectedParent, String child, Attributes attributes, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + List childSpans = getSpans(child, attributes, spans); + for (SpanData childSpan : childSpans) { + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + } + + SpanData getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .collect(Collectors.toList()); + } + + private String spansToString(List spans) { + return spans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } + + private String eventsToString(List events) { + return events.stream().map(EventData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java new file mode 100644 index 000000000000..a3273c2a6aa0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java @@ -0,0 +1,469 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Range; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class OpenTelemetryBuiltInMetricsTracerTest extends AbstractNettyMockServerTest { + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static InMemoryMetricReader metricReader; + private static final Map attributes = + BuiltInMetricsProvider.INSTANCE.createClientAttributes(); + private static final Attributes expectedCommonBaseAttributes = + Attributes.builder() + .put(BuiltInMetricsConstant.CLIENT_NAME_KEY, "spanner-java/") + .put(BuiltInMetricsConstant.CLIENT_UID_KEY, attributes.get("client_uid")) + .put(BuiltInMetricsConstant.INSTANCE_ID_KEY, "i") + .put(BuiltInMetricsConstant.DATABASE_KEY, "d") + .put(BuiltInMetricsConstant.DIRECT_PATH_ENABLED_KEY, "false") + .put(BuiltInMetricsConstant.DIRECT_PATH_USED_KEY, "false") + .build(); + private static final double MIN_LATENCY = 0; + + private DatabaseClient client; + + public ApiTracerFactory createMetricsTracerFactory() { + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltInMetricsConstant.getAllViews().forEach(meterProvider::registerView); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + return new BuiltInMetricsTracerFactory( + new BuiltInMetricsRecorder(openTelemetry, BuiltInMetricsConstant.METER_NAME), + attributes, + new TraceWrapper( + Tracing.getTracer(), + openTelemetry.getTracer( + MetricRegistryConstants.INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(getClass())), + true)); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @After + public void clearRequests() throws IOException { + mockSpanner.clearRequests(); + metricReader.close(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + + ApiTracerFactory metricsTracerFactory = + new BuiltInMetricsTracerFactory( + new BuiltInMetricsRecorder(OpenTelemetry.noop(), BuiltInMetricsConstant.METER_NAME), + attributes, + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), true)); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofNanos(1L)) + .setMaxRetryDelayDuration(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeoutDuration(Duration.ofMinutes(10L)) + .build())); + String endpoint = address.getHostString() + ":" + server.getPort(); + spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + // Setting this to false so that Spanner Options does not register Metrics Tracer + // factory again. + .setBuiltInMetricsEnabled(false) + .setApiTracerFactory(createMetricsTracerFactory()) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("test-project", "i", "d")); + } + + @Test + public void testMetricsSingleUseQuery() { + Stopwatch stopwatch = Stopwatch.createStarted(); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + double elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); + Attributes expectedAttributes = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.ExecuteStreamingSql") + .build(); + + MetricData operationLatencyMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_LATENCIES_NAME); + assertNotNull(operationLatencyMetricData); + double operationLatencyValue = + getAggregatedValue(operationLatencyMetricData, expectedAttributes); + assertThat(operationLatencyValue).isIn(Range.closed(MIN_LATENCY, elapsed)); + + MetricData attemptLatencyMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_LATENCIES_NAME); + assertNotNull(attemptLatencyMetricData); + double attemptLatencyValue = getAggregatedValue(attemptLatencyMetricData, expectedAttributes); + assertThat(attemptLatencyValue).isIn(Range.closed(MIN_LATENCY, elapsed)); + + MetricData operationCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_COUNT_NAME); + assertNotNull(operationCountMetricData); + assertThat(getAggregatedValue(operationCountMetricData, expectedAttributes)).isEqualTo(1); + + MetricData attemptCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_COUNT_NAME); + assertNotNull(attemptCountMetricData); + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributes)).isEqualTo(1); + + assertFalse( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.GFE_CONNECTIVITY_ERROR_NAME)); + assertFalse( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.AFE_CONNECTIVITY_ERROR_NAME)); + // AFE metrics are enabled for DirectPath. + MetricData afeLatencyMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.AFE_LATENCIES_NAME); + double afeLatencyValue = getAggregatedValue(afeLatencyMetricData, expectedAttributes); + assertEquals(fakeAFEServerTiming.get(), afeLatencyValue, 1e-6); + } + + private boolean isJava8() { + return JavaVersionUtil.getJavaMajorVersion() == 8; + } + + private boolean isWindows() { + return System.getProperty("os.name").toLowerCase().contains("windows"); + } + + @Test + public void testMetricsWithGaxRetryUnaryRpc() { + Stopwatch stopwatch = Stopwatch.createStarted(); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + stopwatch.elapsed(TimeUnit.MILLISECONDS); + + Attributes expectedAttributesBeginTransactionOK = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.BeginTransaction") + .build(); + + Attributes expectedAttributesBeginTransactionFailed = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "UNAVAILABLE") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.BeginTransaction") + .build(); + + MetricData attemptCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_COUNT_NAME); + assertNotNull(attemptCountMetricData); + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributesBeginTransactionOK)) + .isEqualTo(1); + // Attempt count should have a failed metric point for Begin Transaction. + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributesBeginTransactionFailed)) + .isEqualTo(1); + + MetricData operationCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_COUNT_NAME); + assertNotNull(operationCountMetricData); + assertThat(getAggregatedValue(operationCountMetricData, expectedAttributesBeginTransactionOK)) + .isEqualTo(1); + // Operation count should not have a failed metric point for Begin Transaction as overall + // operation is success.. + assertThat( + getAggregatedValue(operationCountMetricData, expectedAttributesBeginTransactionFailed)) + .isEqualTo(0); + } + + @Test + public void testNoNetworkConnection() { + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + // Create a Spanner instance that tries to connect to a server that does not exist. + // This simulates a bad network connection. + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + + // Set up the client to fail fast. + builder + .getSpannerStubSettingsBuilder() + .applyToAllUnaryMethods( + input -> { + // This tells the Spanner client to fail directly if it gets an UNAVAILABLE exception. + // The 10-second deadline is chosen to ensure that: + // 1. The test fails within a reasonable amount of time if retries for whatever reason + // has been re-enabled. + // 2. The timeout is long enough to never be triggered during normal tests. + input.setSimpleTimeoutNoRetriesDuration(Duration.ofSeconds(10L)); + return null; + }); + + Spanner spanner = + builder + .setProjectId("test-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:0") + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(0) + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .setSkipVerifyingBeginTransactionForMuxRW(true) + .setFailOnSessionLeak() + .build()) + // Setting this to false so that Spanner Options does not register Metrics Tracer + // factory again. + .setBuiltInMetricsEnabled(false) + .setApiTracerFactory(createMetricsTracerFactory()) + .build() + .getService(); + String instance = "i"; + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("test-project", instance, "d")); + + // Using this client will return UNAVAILABLE, as the server is not reachable and we have + // disabled retries. + SpannerException exception = + assertThrows( + SpannerException.class, () -> client.singleUse().executeQuery(SELECT_RANDOM).next()); + assertEquals(ErrorCode.UNAVAILABLE, exception.getErrorCode()); + + Attributes expectedAttributesCreateSessionOK = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.CreateSession") + // Include the additional attributes that are added by the HeaderInterceptor in the + // filter. Note that the DIRECT_PATH_USED attribute is not added, as the request never + // leaves the client. + .build(); + + Attributes expectedAttributesCreateSessionFailed = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "UNAVAILABLE") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.CreateSession") + // Include the additional attributes that are added by the HeaderInterceptor in the + // filter. Note that the DIRECT_PATH_USED attribute is not added, as the request never + // leaves the client. + .build(); + + MetricData attemptCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_COUNT_NAME); + assertNotNull(attemptCountMetricData); + + // Attempt count should have a failed metric point for CreateSession. + assertEquals( + 1, getAggregatedValue(attemptCountMetricData, expectedAttributesCreateSessionFailed), 0); + assertTrue( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.GFE_CONNECTIVITY_ERROR_NAME)); + assertTrue( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.AFE_CONNECTIVITY_ERROR_NAME)); + } + + @Test + public void testNoServerTimingHeader() throws IOException, InterruptedException { + // Create Spanner Object without headers + InetSocketAddress addressNoHeader = new InetSocketAddress("localhost", 0); + Server serverNoHeader = + NettyServerBuilder.forAddress(addressNoHeader).addService(mockSpanner).build().start(); + String endpoint = address.getHostString() + ":" + serverNoHeader.getPort(); + Spanner spannerNoHeader = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()) + // Setting this to false so that Spanner Options does not register Metrics Tracer + // factory again. + .setBuiltInMetricsEnabled(false) + .setApiTracerFactory(createMetricsTracerFactory()) + .build() + .getService(); + DatabaseClient databaseClientNoHeader = + spannerNoHeader.getDatabaseClient(DatabaseId.of("test-project", "i", "d")); + + databaseClientNoHeader + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_RANDOM)); + + Attributes expectedAttributes = + expectedCommonBaseAttributes.toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.ExecuteSql") + .build(); + + assertFalse(checkIfMetricExists(metricReader, BuiltInMetricsConstant.AFE_LATENCIES_NAME)); + assertFalse(checkIfMetricExists(metricReader, BuiltInMetricsConstant.GFE_LATENCIES_NAME)); + assertTrue( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.GFE_CONNECTIVITY_ERROR_NAME)); + assertTrue( + checkIfMetricExists(metricReader, BuiltInMetricsConstant.AFE_CONNECTIVITY_ERROR_NAME)); + + spannerNoHeader.close(); + serverNoHeader.shutdown(); + serverNoHeader.awaitTermination(); + } + + private MetricData getMetricData(InMemoryMetricReader reader, String metricName) { + String fullMetricName = BuiltInMetricsConstant.METER_NAME + "/" + metricName; + Collection allMetricData; + + // Fetch the MetricData with retries + for (int attemptsLeft = 1000; attemptsLeft > 0; attemptsLeft--) { + allMetricData = reader.collectAllMetrics(); + List matchingMetadata = + allMetricData.stream() + .filter(md -> md.getName().equals(fullMetricName)) + .collect(Collectors.toList()); + assertWithMessage( + "Found multiple MetricData with the same name: %s, in: %s", + fullMetricName, matchingMetadata) + .that(matchingMetadata.size()) + .isAtMost(1); + + if (!matchingMetadata.isEmpty()) { + return matchingMetadata.get(0); + } + + try { + Thread.sleep(1); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + throw new RuntimeException(interruptedException); + } + } + + fail(String.format("MetricData is missing for metric %s", fullMetricName)); + return null; + } + + private boolean checkIfMetricExists(InMemoryMetricReader reader, String metricName) { + String fullMetricName = BuiltInMetricsConstant.METER_NAME + "/" + metricName; + + for (int attemptsLeft = 1000; attemptsLeft > 0; attemptsLeft--) { + boolean exists = + reader.collectAllMetrics().stream().anyMatch(md -> md.getName().equals(fullMetricName)); + if (exists) { + return true; + } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + return false; + } + + private float getAggregatedValue(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + return metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .map(data -> (float) data.getSum() / data.getCount()) + .findFirst() + .orElse(0F); + case LONG_SUM: + return metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .map(LongPointData::getValue) + .findFirst() + .orElse(0L); + default: + return 0; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java new file mode 100644 index 000000000000..8ff8827664da --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java @@ -0,0 +1,799 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.EventData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.lang.reflect.Modifier; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(TracerTest.class) +@RunWith(JUnit4.class) +public class OpenTelemetrySpanTest { + + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_INSTANCE = "my-instance"; + private static final String TEST_DATABASE = "my-database"; + private static LocalChannelProvider channelProvider; + private static MockSpannerServiceImpl mockSpanner; + private Spanner spanner; + private Spanner spannerWithApiTracing; + private static Server server; + private static InMemorySpanExporter spanExporter; + + private static FailOnOverkillTraceComponentImpl failOnOverkillTraceComponent = + new FailOnOverkillTraceComponentImpl(); + + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + + private List expectedCreateMultiplexedSessionsRequestEvents = + ImmutableList.of("Request for 1 multiplexed session returned 1 session"); + + private int expectedCreateMultiplexedSessionsRequestEventsCount = 1; + + private List expectedExecuteStreamingQueryEvents = + ImmutableList.of("Starting/Resuming stream"); + + private int expectedExecuteStreamingQueryEventsCount = 1; + + private List expectedReadWriteTransactionErrorEvents = + ImmutableList.of( + "Acquiring session", + "Acquired session", + "Using Session", + "Starting Transaction Attempt", + "Transaction Attempt Failed in user operation", + "exception"); + + private int expectedReadWriteTransactionErrorEventsCount = 6; + private List expectedReadWriteTransactionEvents = + ImmutableList.of( + "Acquiring session", + "Acquired session", + "Using Session", + "Starting Transaction Attempt", + "Starting Commit", + "Commit Done", + "Transaction Attempt Succeeded"); + + private int expectedReadWriteTransactionEventsCount = 7; + private List expectedReadWriteTransactionErrorWithBeginTransactionEvents = + ImmutableList.of( + "Acquiring session", + "Acquired session", + "Using Session", + "Starting Transaction Attempt", + "Transaction Attempt Aborted in user operation. Retrying", + "Creating Transaction", + "Transaction Creation Done", + "Starting Commit", + "Commit Done", + "Transaction Attempt Succeeded"); + + private int expectedReadWriteTransactionErrorWithBeginTransactionEventsCount = 11; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @BeforeClass + public static void startStaticServer() throws Exception { + // Incorporating OpenCensus tracer to ensure that OpenTraces traces are utilized if enabled, + // regardless of the presence of OpenCensus tracer. + java.lang.reflect.Field field = Tracing.class.getDeclaredField("traceComponent"); + field.setAccessible(true); + java.lang.reflect.Field modifiersField = null; + try { + modifiersField = java.lang.reflect.Field.class.getDeclaredField("modifiers"); + } catch (NoSuchFieldException e) { + // Halt the test and ignore it. + Assume.assumeTrue( + "Skipping test as reflection is not allowed on reflection class in this JDK build", + false); + } + modifiersField.setAccessible(true); + // Remove the final modifier from the 'traceComponent' field. + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, failOnOverkillTraceComponent); + + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.read( + "FOO", KeySet.all(), Collections.singletonList("ID"), SELECT1_RESULTSET)); + String uniqueName = InProcessServerBuilder.generateName(); + server = InProcessServerBuilder.forName(uniqueName).addService(mockSpanner).build().start(); + + channelProvider = LocalChannelProvider.create(uniqueName); + failOnOverkillTraceComponent.clearSpans(); + failOnOverkillTraceComponent.clearAnnotations(); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + if (server != null) { + server.shutdown(); + server.awaitTermination(); + } + } + + @Before + public void setUp() throws Exception { + spanExporter = InMemorySpanExporter.create(); + + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) + .build(); + + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .build(); + + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelProvider(channelProvider) + .setOpenTelemetry(openTelemetry) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(2) + .setWaitForMinSessionsDuration(Duration.ofSeconds(10)) + .setSkipVerifyingBeginTransactionForMuxRW(true) + .build()); + + spanner = builder.build().getService(); + spannerWithApiTracing = builder.setEnableApiTracing(true).build().getService(); + } + + DatabaseClient getClient() { + return spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } + + DatabaseClient getClientWithApiTracing() { + return spannerWithApiTracing.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } + + @After + public void tearDown() { + spanner.close(); + spannerWithApiTracing.close(); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + spanExporter.reset(); + } + + @Test + public void singleUse() { + List expectedReadOnlyTransactionSingleUseEvents = + getExpectedReadOnlyTransactionSingleUseEvents(); + List expectedReadOnlyTransactionSpans = + ImmutableList.of( + "CloudSpannerOperation.CreateMultiplexedSession", + "CloudSpannerOperation.ExecuteStreamingQuery", + "CloudSpanner.ReadOnlyTransaction"); + int expectedReadOnlyTransactionSingleUseEventsCount = + expectedReadOnlyTransactionSingleUseEvents.size(); + + DatabaseClient client = getClient(); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + while (rs.next()) { + // Just consume the result set. + } + } + + // OpenCensus spans should be 0 as OpenTelemetry is enabled. + assertEquals(failOnOverkillTraceComponent.getSpans().size(), 0); + + List actualSpanItems = new ArrayList<>(); + spanExporter + .getFinishedSpanItems() + .forEach( + spanItem -> { + actualSpanItems.add(spanItem.getName()); + switch (spanItem.getName()) { + case "CloudSpannerOperation.CreateMultiplexedSession": + verifyRequestEvents( + spanItem, + expectedCreateMultiplexedSessionsRequestEvents, + expectedCreateMultiplexedSessionsRequestEventsCount); + break; + case "CloudSpannerOperation.ExecuteStreamingQuery": + verifyRequestEvents( + spanItem, + expectedExecuteStreamingQueryEvents, + expectedExecuteStreamingQueryEventsCount); + break; + case "CloudSpanner.ReadOnlyTransaction": + verifyRequestEvents( + spanItem, + expectedReadOnlyTransactionSingleUseEvents, + expectedReadOnlyTransactionSingleUseEventsCount); + verifyCommonAttributes(spanItem); + break; + default: + assert false; + } + }); + + verifySpans(actualSpanItems, expectedReadOnlyTransactionSpans); + } + + private List getExpectedReadOnlyTransactionSingleUseEvents() { + List expectedReadOnlyTransactionSingleUseEvents; + if (isMultiplexedSessionsEnabled()) { + expectedReadOnlyTransactionSingleUseEvents = ImmutableList.of(); + } else { + expectedReadOnlyTransactionSingleUseEvents = + ImmutableList.of("Acquiring session", "Acquired session", "Using Session"); + } + return expectedReadOnlyTransactionSingleUseEvents; + } + + @Test + public void multiUse() { + List expectedReadOnlyTransactionSpans = + ImmutableList.of( + "CloudSpannerOperation.CreateMultiplexedSession", + "CloudSpannerOperation.ExecuteStreamingQuery", + "CloudSpanner.ReadOnlyTransaction"); + List expectedReadOnlyTransactionMultiUseEvents = + ImmutableList.of("Creating Transaction", "Transaction Creation Done"); + int expectedReadOnlyTransactionMultiUseEventsCount = + expectedReadOnlyTransactionMultiUseEvents.size(); + + DatabaseClient client = getClient(); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + while (rs.next()) { + // Just consume the result set. + } + } + } + + List actualSpanItems = new ArrayList<>(); + spanExporter + .getFinishedSpanItems() + .forEach( + spanItem -> { + actualSpanItems.add(spanItem.getName()); + switch (spanItem.getName()) { + case "CloudSpannerOperation.CreateMultiplexedSession": + verifyRequestEvents( + spanItem, + expectedCreateMultiplexedSessionsRequestEvents, + expectedCreateMultiplexedSessionsRequestEventsCount); + break; + case "CloudSpannerOperation.ExecuteStreamingQuery": + verifyRequestEvents( + spanItem, + expectedExecuteStreamingQueryEvents, + expectedExecuteStreamingQueryEventsCount); + break; + case "CloudSpanner.ReadOnlyTransaction": + verifyRequestEvents( + spanItem, + expectedReadOnlyTransactionMultiUseEvents, + expectedReadOnlyTransactionMultiUseEventsCount); + verifyCommonAttributes(spanItem); + break; + default: + assert false; + } + }); + + verifySpans(actualSpanItems, expectedReadOnlyTransactionSpans); + } + + @Test + public void transactionRunner() { + List expectedReadWriteTransactionWithCommitSpans = + ImmutableList.of( + "CloudSpannerOperation.CreateMultiplexedSession", + "CloudSpannerOperation.ExecuteUpdate", + "CloudSpannerOperation.Commit", + "CloudSpanner.ReadWriteTransaction"); + + expectedReadWriteTransactionEvents = + ImmutableList.of( + "Starting Transaction Attempt", + "Starting Commit", + "Commit Done", + "Transaction Attempt Succeeded"); + expectedReadWriteTransactionEventsCount = 4; + DatabaseClient client = getClient(); + TransactionRunner runner = client.readWriteTransaction(); + runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + // Wait until the list of spans contains "CloudSpannerOperation.CreateSession", as this is + // an async operation. + Stopwatch stopwatch = Stopwatch.createStarted(); + while (spanExporter.getFinishedSpanItems().stream() + .noneMatch(span -> span.getName().equals("CloudSpannerOperation.CreateSession")) + && stopwatch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.yield(); + } + List actualSpanItems = new ArrayList<>(); + spanExporter + .getFinishedSpanItems() + .forEach( + spanItem -> { + actualSpanItems.add(spanItem.getName()); + switch (spanItem.getName()) { + case "CloudSpannerOperation.CreateMultiplexedSession": + verifyRequestEvents( + spanItem, + expectedCreateMultiplexedSessionsRequestEvents, + expectedCreateMultiplexedSessionsRequestEventsCount); + break; + case "CloudSpannerOperation.Commit": + case "CloudSpannerOperation.ExecuteUpdate": + assertEquals(0, spanItem.getEvents().size()); + break; + case "CloudSpanner.ReadWriteTransaction": + verifyRequestEvents( + spanItem, + expectedReadWriteTransactionEvents, + expectedReadWriteTransactionEventsCount); + verifyCommonAttributes(spanItem); + break; + default: + assert false; + } + }); + + verifySpans(actualSpanItems, expectedReadWriteTransactionWithCommitSpans); + } + + @Test + public void transactionRunnerWithError() { + List expectedReadWriteTransactionSpans = + ImmutableList.of( + "CloudSpannerOperation.CreateMultiplexedSession", + "CloudSpannerOperation.ExecuteUpdate", + "CloudSpanner.ReadWriteTransaction"); + expectedReadWriteTransactionErrorEvents = + ImmutableList.of( + "Starting Transaction Attempt", + "Transaction Attempt Failed in user operation", + "exception"); + expectedReadWriteTransactionErrorEventsCount = 3; + DatabaseClient client = getClient(); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> runner.run(transaction -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + + List actualSpanItems = new ArrayList<>(); + spanExporter + .getFinishedSpanItems() + .forEach( + spanItem -> { + actualSpanItems.add(spanItem.getName()); + switch (spanItem.getName()) { + case "CloudSpannerOperation.CreateMultiplexedSession": + verifyRequestEvents( + spanItem, + expectedCreateMultiplexedSessionsRequestEvents, + expectedCreateMultiplexedSessionsRequestEventsCount); + break; + case "CloudSpanner.ReadWriteTransaction": + verifyRequestEvents( + spanItem, + expectedReadWriteTransactionErrorEvents, + expectedReadWriteTransactionErrorEventsCount); + verifyCommonAttributes(spanItem); + break; + case "CloudSpannerOperation.ExecuteUpdate": + assertEquals(0, spanItem.getEvents().size()); + break; + default: + assert false; + } + }); + + verifySpans(actualSpanItems, expectedReadWriteTransactionSpans); + } + + @Test + public void transactionRunnerWithFailedAndBeginTransaction() { + List expectedReadWriteTransactionWithCommitAndBeginTransactionSpans = + ImmutableList.of( + "CloudSpannerOperation.BeginTransaction", + "CloudSpannerOperation.ExecuteUpdate", + "CloudSpannerOperation.Commit", + "CloudSpanner.ReadWriteTransaction"); + expectedReadWriteTransactionErrorWithBeginTransactionEvents = + ImmutableList.of( + "Starting Transaction Attempt", + "Transaction Attempt Aborted in user operation. Retrying", + "Creating Transaction", + "Transaction Creation Done", + "Starting Commit", + "Commit Done", + "Transaction Attempt Succeeded"); + expectedReadWriteTransactionErrorWithBeginTransactionEventsCount = 8; + DatabaseClient client = getClient(); + assertEquals( + Long.valueOf(1L), + client + .readWriteTransaction() + .run( + transaction -> { + // This update statement carries the BeginTransaction, but fails. This will + // cause the entire transaction to be retried with an explicit + // BeginTransaction RPC to ensure all statements in the transaction are + // actually executed against the same transaction. + SpannerException e = + assertThrows( + SpannerException.class, + () -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + return transaction.executeUpdate(UPDATE_STATEMENT); + })); + // Wait for all spans to finish. Failing to do so can cause the test to miss the + // CreateSession span, as that span is executed asynchronously in the SessionClient, and + // the SessionClient returns the session to the pool before the span has finished fully. + Stopwatch stopwatch = Stopwatch.createStarted(); + while (spanExporter.getFinishedSpanItems().size() + < expectedReadWriteTransactionWithCommitAndBeginTransactionSpans.size() + && stopwatch.elapsed(TimeUnit.MILLISECONDS) < 2000) { + Thread.yield(); + } + + List actualSpanItems = new ArrayList<>(); + spanExporter + .getFinishedSpanItems() + .forEach( + spanItem -> { + // Ignore multiplexed sessions, as they are not used by this test and can therefore + // best be ignored, as it is not 100% certain that it has already been created. + if (!"CloudSpannerOperation.CreateMultiplexedSession".equals(spanItem.getName())) { + actualSpanItems.add(spanItem.getName()); + } + switch (spanItem.getName()) { + case "CloudSpannerOperation.CreateMultiplexedSession": + verifyRequestEvents( + spanItem, + expectedCreateMultiplexedSessionsRequestEvents, + expectedCreateMultiplexedSessionsRequestEventsCount); + break; + case "CloudSpannerOperation.Commit": + case "CloudSpannerOperation.BeginTransaction": + case "CloudSpannerOperation.ExecuteUpdate": + assertEquals(0, spanItem.getEvents().size()); + break; + case "CloudSpanner.ReadWriteTransaction": + verifyRequestEvents( + spanItem, + expectedReadWriteTransactionErrorWithBeginTransactionEvents, + expectedReadWriteTransactionErrorWithBeginTransactionEventsCount); + verifyCommonAttributes(spanItem); + break; + default: + assert false; + } + }); + + verifySpans(actualSpanItems, expectedReadWriteTransactionWithCommitAndBeginTransactionSpans); + } + + @Test + public void testTransactionRunnerWithRetryOnBeginTransaction() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // BeginTransaction RPC is called. This RPC is then retried, and the transaction succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + clientWithApiTracing + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + int numExpectedSpans = 7; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadWriteTransaction")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.BeginTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.Commit")); + + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.BeginTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.Commit")); + + SpanData beginTransactionSpan = + finishedSpans.stream() + .filter(span -> span.getName().equals("Spanner.BeginTransaction")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + beginTransactionSpan.toString(), + beginTransactionSpan.getEvents().stream() + .anyMatch(event -> event.getName().equals("Starting RPC retry 1"))); + } + + @Test + public void testSingleUseRetryOnExecuteStreamingSql() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // BeginTransaction RPC is called. This RPC is then retried, and the transaction succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + try (ResultSet resultSet = clientWithApiTracing.singleUse().executeQuery(SELECT1)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + int numExpectedSpans = 6; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadOnlyTransaction")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.ExecuteStreamingQuery")); + + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.ExecuteStreamingSql")); + + // UNAVAILABLE errors on ExecuteStreamingSql are handled manually in the client library, which + // means that the retry event is on this span. + SpanData executeStreamingQuery = + finishedSpans.stream() + .filter(span -> span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + executeStreamingQuery.toString(), + executeStreamingQuery.getEvents().stream() + .anyMatch(event -> event.getName().contains("Stream broken. Safe to retry"))); + } + + @Test + public void testRetryOnExecuteSql() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // ExecuteSql RPC is called. This RPC is then retried, and the statement succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + clientWithApiTracing + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + int numExpectedSpans = 7; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadWriteTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.Commit")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.ExecuteSql")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.Commit")); + + SpanData executeSqlSpan = + finishedSpans.stream() + .filter(span -> span.getName().equals("Spanner.ExecuteSql")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + executeSqlSpan.toString(), + executeSqlSpan.getEvents().stream() + .anyMatch(event -> event.getName().equals("Starting RPC retry 1"))); + } + + @Test + public void testTableAttributes() { + DatabaseClient client = getClient(); + client + .readWriteTransaction(Options.optimisticLock()) + .run( + transaction -> { + try (ResultSet rs = + transaction.read( + "FOO", + KeySet.all(), + Collections.singletonList("ID"), + Options.tag("test-tag"))) { + while (rs.next()) { + assertEquals(rs.getLong(0), 1); + } + } + return null; + }); + SpanData spanData = + spanExporter.getFinishedSpanItems().stream() + .filter(x -> x.getName().equals("CloudSpannerOperation.ExecuteStreamingRead")) + .findFirst() + .get(); + verifyTableAttributes(spanData); + } + + private void waitForFinishedSpans(int numExpectedSpans) { + // Wait for all spans to finish. Failing to do so can cause the test to miss the + // BatchCreateSessions span, as that span is executed asynchronously in the SessionClient, and + // the SessionClient returns the session to the pool before the span has finished fully. + Stopwatch stopwatch = Stopwatch.createStarted(); + while (spanExporter.getFinishedSpanItems().size() < numExpectedSpans + && stopwatch.elapsed().compareTo(java.time.Duration.ofMillis(1000)) < 0) { + Thread.yield(); + } + } + + private void verifyRequestEvents(SpanData spanItem, List expectedEvents, int eventCount) { + List eventNames = + spanItem.getEvents().stream().map(EventData::getName).collect(Collectors.toList()); + assertEquals(eventCount, spanItem.getEvents().size()); + assertEquals( + expectedEvents.stream().sorted().collect(Collectors.toList()), + eventNames.stream().distinct().sorted().collect(Collectors.toList())); + } + + private static void verifySpans(List actualSpanItems, List expectedSpansItems) { + assertEquals( + expectedSpansItems.stream().sorted().collect(Collectors.toList()), + actualSpanItems.stream().distinct().sorted().collect(Collectors.toList())); + } + + private static void verifyCommonAttributes(SpanData span) { + assertEquals(span.getAttributes().get(AttributeKey.stringKey("instance.name")), "my-instance"); + assertEquals(span.getAttributes().get(AttributeKey.stringKey("db.name")), "my-database"); + assertEquals(span.getAttributes().get(AttributeKey.stringKey("gcp.client.service")), "spanner"); + assertEquals( + span.getAttributes().get(AttributeKey.stringKey("gcp.client.repo")), + "googleapis/java-spanner"); + assertEquals( + span.getAttributes().get(AttributeKey.stringKey("gcp.client.version")), + GaxProperties.getLibraryVersion(TraceWrapper.class)); + assertEquals( + span.getAttributes().get(AttributeKey.stringKey("gcp.resource.name")), + String.format( + "//spanner.googleapis.com/projects/%s/instances/%s/databases/%s", + TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } + + private static void verifyTableAttributes(SpanData span) { + assertEquals(span.getAttributes().get(AttributeKey.stringKey("statement.tag")), "test-tag"); + assertEquals(span.getAttributes().get(AttributeKey.stringKey("db.table")), "FOO"); + } + + private boolean isMultiplexedSessionsEnabled() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } + + private boolean isMultiplexedSessionsEnabledForRW() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationFutureUtil.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationFutureUtil.java new file mode 100644 index 000000000000..b04d24413cbf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationFutureUtil.java @@ -0,0 +1,273 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.TimedAttemptSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.base.Preconditions; +import com.google.protobuf.Any; +import com.google.protobuf.Message; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; + +// TODO(hzyi): add a public FakeOperationSnapshot in gax to support testing +class OperationFutureUtil { + + private OperationFutureUtil() { + // Utility class + } + + public static class FakeStatusCode implements StatusCode { + private final Code code; + + public FakeStatusCode(Code code) { + this.code = code; + } + + @Override + public Code getCode() { + return code; + } + + @Override + public Code getTransportCode() { + return getCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FakeStatusCode that = (FakeStatusCode) o; + + return code == that.code; + } + + @Override + public int hashCode() { + return code != null ? code.hashCode() : 0; + } + + public static FakeStatusCode of(Code code) { + return new FakeStatusCode(code); + } + } + + public static + OperationSnapshot completedSnapshot( + final String name, final ResponseT response, final MetadataT metadata) { + return new OperationSnapshot() { + @Override + public String getName() { + return name; + } + + @Override + public Object getMetadata() { + return Any.pack(metadata); + } + + @Override + public Object getResponse() { + return Any.pack(response); + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public StatusCode getErrorCode() { + return FakeStatusCode.of(StatusCode.Code.OK); + } + + @Override + public String getErrorMessage() { + return null; + } + }; + } + + /** Already-completed {@code ImmediateRetryingFuture}, useful for testing. */ + public static final class ImmediateRetryingFuture implements RetryingFuture { + + private final ApiFuture immediateFuture; + private ApiFuture attemptFuture; + + ImmediateRetryingFuture(V response) { + this.immediateFuture = ApiFutures.immediateFuture(response); + } + + @Override + public void addListener(Runnable runnable, Executor executor) { + immediateFuture.addListener(runnable, executor); + } + + @Override + public V get(long time, TimeUnit unit) throws ExecutionException, InterruptedException { + return get(); + } + + @Override + public V get() throws ExecutionException, InterruptedException { + return immediateFuture.get(); + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean cancel(boolean b) { + return false; + } + + @Override + public void setAttemptFuture(ApiFuture attemptFuture) { + this.attemptFuture = attemptFuture; + } + + @Override + public ApiFuture getAttemptResult() { + return this.attemptFuture; + } + + @Override + public TimedAttemptSettings getAttemptSettings() { + throw new UnsupportedOperationException("Not implemented: getAttemptSettings()"); + } + + @Override + public Callable getCallable() { + throw new UnsupportedOperationException("Not implemented: getCallable()"); + } + + @Override + public ApiFuture peekAttemptResult() { + return this.attemptFuture; + } + } + + public static RetryingFuture immediateRetryingFuture( + final ResponseT response) { + return new ImmediateRetryingFuture<>(response); + } + + public static + OperationFuture immediateOperationFuture( + final String name, final ResponseT response, final MetadataT metadata) { + return immediateOperationFuture(completedSnapshot(name, response, metadata)); + } + + /** + * Creates an already-completed {@code OperationFuture}, useful for testing. + * + *

{@code completedSnapshot.isDone()} must return true. The snapshot's {@code getResponse()} + * and {@code getMetadata()} must be instances of {@code ResponseT} and {@code MetadataT}, + * respectively. + */ + @SuppressWarnings("unchecked") + public static + OperationFuture immediateOperationFuture( + final OperationSnapshot completedSnapshot) { + + Preconditions.checkArgument( + completedSnapshot.isDone(), "given snapshot must already be completed"); + final ApiFuture metadataFuture = + ApiFutures.immediateFuture((MetadataT) completedSnapshot.getMetadata()); + final ApiFuture initialFuture = + ApiFutures.immediateFuture(completedSnapshot); + final RetryingFuture pollingFuture = + immediateRetryingFuture(completedSnapshot); + + return new OperationFuture() { + @Override + public String getName() { + return completedSnapshot.getName(); + } + + @Override + public ApiFuture getMetadata() { + return metadataFuture; + } + + @Override + public ApiFuture peekMetadata() { + return metadataFuture; + } + + @Override + public ApiFuture getInitialFuture() { + return initialFuture; + } + + @Override + public RetryingFuture getPollingFuture() { + return pollingFuture; + } + + @Override + public void addListener(Runnable runnable, Executor executor) { + pollingFuture.addListener(runnable, executor); + } + + @Override + public ResponseT get(long time, TimeUnit unit) + throws ExecutionException, InterruptedException { + return get(); + } + + @Override + public ResponseT get() throws ExecutionException, InterruptedException { + return (ResponseT) pollingFuture.get().getResponse(); + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean cancel(boolean b) { + return false; + } + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationTest.java new file mode 100644 index 000000000000..55211de19804 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OperationTest.java @@ -0,0 +1,188 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.longrunning.Operation.newBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.api.core.ApiClock; +import com.google.cloud.RetryOption; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.protobuf.Any; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.time.Duration; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; + +/** Unit tests for {@link Operation}. */ +@RunWith(JUnit4.class) +public class OperationTest { + private static final String NAME = + "projects/test-project/instances/test-instance/databases/database-1"; + + @Mock private SpannerRpc rpc; + @Mock private DatabaseAdminClient dbClient; + @Mock private ApiClock clock; + + private class ParserImpl implements Operation.Parser { + + @Override + public Database parseResult(Any response) { + try { + return Database.fromProto( + response.unpack(com.google.spanner.admin.database.v1.Database.class), dbClient); + } catch (InvalidProtocolBufferException e) { + return null; + } + } + + @Override + public String parseMetadata(Any metadata) { + try { + return metadata.unpack(CreateDatabaseMetadata.class).getDatabase(); + } catch (InvalidProtocolBufferException e) { + return null; + } + } + } + + @Before + public void setUp() { + initMocks(this); + } + + @Test + public void failedOperation() { + com.google.longrunning.Operation proto = + newBuilder() + .setName("op1") + .setDone(true) + .setError(Status.newBuilder().setCode(Code.NOT_FOUND.getNumber())) + .build(); + Operation op = Operation.create(rpc, proto, new ParserImpl()); + assertThat(op.getName()).isEqualTo("op1"); + assertThat(op.isDone()).isTrue(); + assertThat(op.isSuccessful()).isFalse(); + assertThat(op.getMetadata()).isNull(); + SpannerException e = assertThrows(SpannerException.class, () -> op.getResult()); + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + + @Test + public void successfulOperation() { + com.google.spanner.admin.database.v1.Database db = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(NAME) + .setState(com.google.spanner.admin.database.v1.Database.State.READY) + .build(); + + com.google.longrunning.Operation proto = + newBuilder().setName("op1").setDone(true).setResponse(Any.pack(db)).build(); + Operation op = Operation.create(rpc, proto, new ParserImpl()); + assertThat(op.getName()).isEqualTo("op1"); + assertThat(op.isDone()).isTrue(); + assertThat(op.isSuccessful()).isTrue(); + assertThat(op.getMetadata()).isNull(); + assertThat(op.getResult().getId().getName()).isEqualTo(NAME); + } + + @Test + public void pendingOperation() { + com.google.longrunning.Operation proto = + newBuilder() + .setName("op1") + .setDone(false) + .setMetadata(Any.pack(CreateDatabaseMetadata.newBuilder().setDatabase(NAME).build())) + .build(); + Operation op = Operation.create(rpc, proto, new ParserImpl()); + assertThat(op.getName()).isEqualTo("op1"); + assertThat(op.isDone()).isFalse(); + assertThat(op.isSuccessful()).isFalse(); + assertThat(op.getMetadata()).isEqualTo(NAME); + assertThat(op.getResult()).isNull(); + } + + @Test + public void reload() { + com.google.longrunning.Operation proto = newBuilder().setName("op1").setDone(false).build(); + Operation op = Operation.create(rpc, proto, new ParserImpl()); + com.google.spanner.admin.database.v1.Database db = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(NAME) + .setState(com.google.spanner.admin.database.v1.Database.State.READY) + .build(); + proto = newBuilder().setName("op1").setDone(true).setResponse(Any.pack(db)).build(); + when(rpc.getOperation("op1")).thenReturn(proto); + op = op.reload(); + assertThat(op.getName()).isEqualTo("op1"); + assertThat(op.isDone()).isTrue(); + assertThat(op.isSuccessful()).isTrue(); + assertThat(op.getMetadata()).isNull(); + assertThat(op.getResult().getId().getName()).isEqualTo(NAME); + } + + @Test + public void waitForCompletes() { + com.google.longrunning.Operation proto = newBuilder().setName("op1").setDone(false).build(); + Operation op = Operation.create(rpc, proto, new ParserImpl()); + com.google.spanner.admin.database.v1.Database db = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(NAME) + .setState(com.google.spanner.admin.database.v1.Database.State.READY) + .build(); + com.google.longrunning.Operation proto2 = + newBuilder().setName("op1").setDone(true).setResponse(Any.pack(db)).build(); + when(rpc.getOperation("op1")).thenReturn(proto, proto2); + + op = + op.waitFor( + RetryOption.totalTimeoutDuration(Duration.ofSeconds(3)), + RetryOption.initialRetryDelayDuration(Duration.ZERO)); + + assertThat(op.getName()).isEqualTo("op1"); + assertThat(op.isDone()).isTrue(); + assertThat(op.isSuccessful()).isTrue(); + assertThat(op.getMetadata()).isNull(); + assertThat(op.getResult().getId().getName()).isEqualTo(NAME); + } + + @Test + public void waitForTimeout() { + com.google.longrunning.Operation proto = newBuilder().setName("op1").setDone(false).build(); + Operation op = Operation.create(rpc, proto, new ParserImpl(), clock); + when(rpc.getOperation("op1")).thenReturn(proto); + when(clock.nanoTime()).thenReturn(0L, 50_000_000L, 100_000_000L, 150_000_000L); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + op.waitFor( + RetryOption.totalTimeoutDuration(Duration.ofMillis(100L)), + RetryOption.initialRetryDelayDuration(Duration.ZERO))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java new file mode 100644 index 000000000000..5bd594e83c12 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java @@ -0,0 +1,983 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Options.RpcLockHint; +import com.google.cloud.spanner.Options.RpcOrderBy; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.Options.TransactionOption; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ReadRequest.LockHint; +import com.google.spanner.v1.ReadRequest.OrderBy; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link Options}. */ +@RunWith(JUnit4.class) +public class OptionsTest { + private static final DirectedReadOptions DIRECTED_READ_OPTIONS = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-west1").build())) + .build(); + + @Test + public void testToRequestOptionsProto() { + RequestOptions.ClientContext clientContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("value").build()) + .build(); + Options options = + Options.fromQueryOptions( + Options.priority(RpcPriority.HIGH), + Options.tag("tag"), + Options.clientContext(clientContext)); + + RequestOptions protoForStatement = options.toRequestOptionsProto(false); + assertEquals(RequestOptions.Priority.PRIORITY_HIGH, protoForStatement.getPriority()); + assertEquals("tag", protoForStatement.getRequestTag()); + assertEquals("", protoForStatement.getTransactionTag()); + assertEquals(clientContext, protoForStatement.getClientContext()); + + RequestOptions protoForTransaction = options.toRequestOptionsProto(true); + assertEquals(RequestOptions.Priority.PRIORITY_HIGH, protoForTransaction.getPriority()); + assertEquals("", protoForTransaction.getRequestTag()); + assertEquals("tag", protoForTransaction.getTransactionTag()); + assertEquals(clientContext, protoForTransaction.getClientContext()); + } + + @Test + public void negativeLimitsNotAllowed() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> Options.limit(-1)); + assertNotNull(e.getMessage()); + } + + @Test + public void zeroLimitNotAllowed() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> Options.limit(0)); + assertNotNull(e.getMessage()); + } + + @Test + public void negativePrefetchChunksNotAllowed() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> Options.prefetchChunks(-1)); + assertNotNull(e.getMessage()); + } + + @Test + public void zeroPrefetchChunksNotAllowed() { + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> Options.prefetchChunks(0)); + assertNotNull(e.getMessage()); + } + + @Test + public void allOptionsPresent() { + XGoogSpannerRequestId reqId1 = XGoogSpannerRequestId.of(2, 3, 4, 5); + Options options = + Options.fromReadOptions( + Options.limit(10), + Options.prefetchChunks(1), + Options.dataBoostEnabled(true), + Options.directedRead(DIRECTED_READ_OPTIONS), + Options.orderBy(RpcOrderBy.NO_ORDER), + Options.requestId(reqId1), + Options.lockHint(Options.RpcLockHint.SHARED)); + assertThat(options.hasLimit()).isTrue(); + assertThat(options.limit()).isEqualTo(10); + assertThat(options.hasPrefetchChunks()).isTrue(); + assertThat(options.prefetchChunks()).isEqualTo(1); + assertThat(options.hasDataBoostEnabled()).isTrue(); + assertTrue(options.dataBoostEnabled()); + assertTrue(options.hasDirectedReadOptions()); + assertTrue(options.hasOrderBy()); + assertTrue(options.hasLockHint()); + assertEquals(DIRECTED_READ_OPTIONS, options.directedReadOptions()); + assertEquals(options.reqId(), reqId1); + } + + @Test + public void allOptionsAbsent() { + Options options = Options.fromReadOptions(); + assertThat(options.hasLimit()).isFalse(); + assertThat(options.hasPrefetchChunks()).isFalse(); + assertThat(options.hasFilter()).isFalse(); + assertThat(options.hasPageToken()).isFalse(); + assertThat(options.hasPriority()).isFalse(); + assertThat(options.hasTag()).isFalse(); + assertThat(options.hasDataBoostEnabled()).isFalse(); + assertThat(options.hasDirectedReadOptions()).isFalse(); + assertThat(options.hasOrderBy()).isFalse(); + assertThat(options.hasLockHint()).isFalse(); + assertNull(options.withExcludeTxnFromChangeStreams()); + assertThat(options.toString()).isEqualTo(""); + assertThat(options.equals(options)).isTrue(); + assertThat(options.equals(null)).isFalse(); + assertThat(options.equals(this)).isFalse(); + assertNull(options.isolationLevel()); + assertThat(options.hashCode()).isEqualTo(31); + } + + @Test + public void listOptionsTest() { + int pageSize = 3; + String pageToken = "ptok"; + String filter = "env"; + Options options = + Options.fromListOptions( + Options.pageSize(pageSize), Options.pageToken(pageToken), Options.filter(filter)); + + assertThat(options.toString()) + .isEqualTo( + "pageSize: " + pageSize + " pageToken: " + pageToken + " filter: " + filter + " "); + + assertThat(options.hasPageSize()).isTrue(); + assertThat(options.hasPageToken()).isTrue(); + assertThat(options.hasFilter()).isTrue(); + + assertThat(options.pageSize()).isEqualTo(pageSize); + assertThat(options.pageToken()).isEqualTo(pageToken); + assertThat(options.filter()).isEqualTo(filter); + assertThat(options.hashCode()).isEqualTo(108027089); + } + + @Test + public void listEquality() { + Options o1; + Options o2; + Options o3; + + o1 = Options.fromListOptions(); + o2 = Options.fromListOptions(); + assertThat(o1.equals(o2)).isTrue(); + + o2 = Options.fromListOptions(Options.pageSize(1)); + assertThat(o1.equals(o2)).isFalse(); + assertThat(o2.equals(o1)).isFalse(); + + o3 = Options.fromListOptions(Options.pageSize(1)); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromListOptions(Options.pageSize(2)); + assertThat(o2.equals(o3)).isFalse(); + + o2 = Options.fromListOptions(Options.pageToken("t1")); + assertThat(o1.equals(o2)).isFalse(); + + o3 = Options.fromListOptions(Options.pageToken("t1")); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromListOptions(Options.pageToken("t2")); + assertThat(o2.equals(o3)).isFalse(); + + o2 = Options.fromListOptions(Options.filter("f1")); + assertThat(o1.equals(o2)).isFalse(); + + o3 = Options.fromListOptions(Options.filter("f1")); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromListOptions(Options.filter("f2")); + assertThat(o2.equals(o3)).isFalse(); + } + + @Test + public void readOptionsTest() { + int limit = 3; + String tag = "app=spanner,env=test,action=read"; + boolean dataBoost = true; + Options options = + Options.fromReadOptions( + Options.limit(limit), + Options.tag(tag), + Options.dataBoostEnabled(true), + Options.directedRead(DIRECTED_READ_OPTIONS), + Options.orderBy(RpcOrderBy.NO_ORDER), + Options.lockHint(RpcLockHint.SHARED)); + + assertThat(options.toString()) + .isEqualTo( + "limit: " + + limit + + " " + + "tag: " + + tag + + " " + + "dataBoostEnabled: " + + dataBoost + + " " + + "directedReadOptions: " + + DIRECTED_READ_OPTIONS + + " " + + "orderBy: " + + RpcOrderBy.NO_ORDER + + " " + + "lockHint: " + + RpcLockHint.SHARED + + " "); + assertThat(options.tag()).isEqualTo(tag); + assertEquals(dataBoost, options.dataBoostEnabled()); + assertEquals(DIRECTED_READ_OPTIONS, options.directedReadOptions()); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, options.orderBy()); + assertEquals(LockHint.LOCK_HINT_SHARED, options.lockHint()); + } + + @Test + public void readEquality() { + Options o1; + Options o2; + Options o3; + + o1 = Options.fromReadOptions(); + o2 = Options.fromReadOptions(); + assertThat(o1.equals(o2)).isTrue(); + + o2 = Options.fromReadOptions(Options.limit(1)); + assertThat(o1.equals(o2)).isFalse(); + assertThat(o2.equals(o1)).isFalse(); + + o3 = Options.fromReadOptions(Options.limit(1)); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromReadOptions(Options.limit(2)); + assertThat(o2.equals(o3)).isFalse(); + } + + @Test + public void queryOptionsTest() { + int chunks = 3; + String tag = "app=spanner,env=test,action=query"; + boolean dataBoost = true; + Options options = + Options.fromQueryOptions( + Options.prefetchChunks(chunks), + Options.tag(tag), + Options.dataBoostEnabled(true), + Options.directedRead(DIRECTED_READ_OPTIONS)); + assertThat(options.toString()) + .isEqualTo( + "prefetchChunks: " + + chunks + + " " + + "tag: " + + tag + + " " + + "dataBoostEnabled: " + + dataBoost + + " " + + "directedReadOptions: " + + DIRECTED_READ_OPTIONS + + " "); + assertThat(options.prefetchChunks()).isEqualTo(chunks); + assertThat(options.tag()).isEqualTo(tag); + assertEquals(dataBoost, options.dataBoostEnabled()); + assertEquals(DIRECTED_READ_OPTIONS, options.directedReadOptions()); + } + + @Test + public void testReadOptionsDataBoost() { + boolean dataBoost = true; + Options options = Options.fromReadOptions(Options.dataBoostEnabled(true)); + assertTrue(options.hasDataBoostEnabled()); + assertEquals("dataBoostEnabled: " + dataBoost + " ", options.toString()); + } + + @Test + public void testQueryOptionsDataBoost() { + boolean dataBoost = true; + Options options = Options.fromQueryOptions(Options.dataBoostEnabled(true)); + assertTrue(options.hasDataBoostEnabled()); + assertEquals("dataBoostEnabled: " + dataBoost + " ", options.toString()); + } + + @Test + public void queryEquality() { + Options o1; + Options o2; + Options o3; + + o1 = Options.fromQueryOptions(); + o2 = Options.fromQueryOptions(); + assertThat(o1.equals(o2)).isTrue(); + + o2 = Options.fromReadOptions(Options.prefetchChunks(1)); + assertThat(o1.equals(o2)).isFalse(); + assertThat(o2.equals(o1)).isFalse(); + + o3 = Options.fromReadOptions(Options.prefetchChunks(1)); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromReadOptions(Options.prefetchChunks(2)); + assertThat(o2.equals(o3)).isFalse(); + } + + @Test + public void testFromTransactionOptions_toStringNoOptions() { + Options options = Options.fromTransactionOptions(); + assertThat(options.toString()).isEqualTo(""); + } + + @Test + public void testFromTransactionOptions_toStringWithCommitStats() { + Options options = Options.fromTransactionOptions(Options.commitStats()); + assertThat(options.toString()).contains("withCommitStats: true"); + } + + @Test + public void testTransactionOptions_noOptionsAreEqual() { + Options option1 = Options.fromTransactionOptions(); + Options option2 = Options.fromTransactionOptions(); + assertEquals(option1, option2); + } + + @Test + public void testTransactionOptions_withCommitStatsAreEqual() { + Options option1 = Options.fromTransactionOptions(Options.commitStats()); + Options option2 = Options.fromTransactionOptions(Options.commitStats()); + assertEquals(option1, option2); + } + + @Test + public void testTransactionOptions_withCommitStatsAndOtherOptionAreNotEqual() { + Options option1 = Options.fromTransactionOptions(Options.commitStats()); + Options option2 = Options.fromQueryOptions(Options.prefetchChunks(10)); + assertNotEquals(option1, option2); + } + + @Test + public void testTransactionOptions_noOptionsHashCode() { + Options option1 = Options.fromTransactionOptions(); + Options option2 = Options.fromTransactionOptions(); + assertEquals(option2.hashCode(), option1.hashCode()); + } + + @Test + public void testTransactionOptions_withCommitStatsHashCode() { + Options option1 = Options.fromTransactionOptions(Options.commitStats()); + Options option2 = Options.fromTransactionOptions(Options.commitStats()); + assertEquals(option2.hashCode(), option1.hashCode()); + } + + @Test + public void testTransactionOptions_withCommitStatsAndOtherOptionHashCode() { + Options option1 = Options.fromTransactionOptions(Options.commitStats()); + Options option2 = Options.fromQueryOptions(Options.prefetchChunks(10)); + assertNotEquals(option2.hashCode(), option1.hashCode()); + } + + @Test + public void testTransactionOptionsPriority() { + RpcPriority priority = RpcPriority.HIGH; + Options options = Options.fromTransactionOptions(Options.priority(priority)); + assertTrue(options.hasPriority()); + assertEquals("priority: " + priority + " ", options.toString()); + } + + @Test + public void testTransactionOptionsIsolationLevel() { + Options options = + Options.fromTransactionOptions(Options.isolationLevel(IsolationLevel.REPEATABLE_READ)); + assertEquals(options.isolationLevel(), IsolationLevel.REPEATABLE_READ); + assertEquals( + "isolationLevel: " + IsolationLevel.REPEATABLE_READ.name() + " ", options.toString()); + } + + @Test + public void testReadOptionsOrderBy() { + RpcOrderBy orderBy = RpcOrderBy.NO_ORDER; + Options options = Options.fromReadOptions(Options.orderBy(orderBy)); + assertTrue(options.hasOrderBy()); + assertEquals("orderBy: " + orderBy + " ", options.toString()); + } + + @Test + public void testReadOptionsLockHint() { + RpcLockHint lockHint = RpcLockHint.SHARED; + Options options = Options.fromReadOptions(Options.lockHint(lockHint)); + assertTrue(options.hasLockHint()); + assertEquals("lockHint: " + lockHint + " ", options.toString()); + } + + @Test + public void testReadOptionsWithOrderByEquality() { + Options optionsWithNoOrderBy1 = Options.fromReadOptions(Options.orderBy(RpcOrderBy.NO_ORDER)); + Options optionsWithNoOrderBy2 = Options.fromReadOptions(Options.orderBy(RpcOrderBy.NO_ORDER)); + assertTrue(optionsWithNoOrderBy1.equals(optionsWithNoOrderBy2)); + + Options optionsWithPkOrder = Options.fromReadOptions(Options.orderBy(RpcOrderBy.PRIMARY_KEY)); + assertFalse(optionsWithNoOrderBy1.equals(optionsWithPkOrder)); + } + + @Test + public void testReadOptionsWithLockHintEquality() { + Options optionsWithSharedLockHint1 = + Options.fromReadOptions(Options.lockHint(RpcLockHint.SHARED)); + Options optionsWithSharedLockHint2 = + Options.fromReadOptions(Options.lockHint(RpcLockHint.SHARED)); + assertEquals(optionsWithSharedLockHint1, optionsWithSharedLockHint2); + + Options optionsWithExclusiveLock = + Options.fromReadOptions(Options.lockHint(RpcLockHint.EXCLUSIVE)); + assertNotEquals(optionsWithSharedLockHint1, optionsWithExclusiveLock); + } + + @Test + public void testQueryOptionsPriority() { + RpcPriority priority = RpcPriority.MEDIUM; + Options options = Options.fromQueryOptions(Options.priority(priority)); + assertTrue(options.hasPriority()); + assertEquals("priority: " + priority + " ", options.toString()); + } + + @Test + public void testReadOptionsPriority() { + RpcPriority priority = RpcPriority.LOW; + Options options = Options.fromReadOptions(Options.priority(priority)); + assertTrue(options.hasPriority()); + assertEquals("priority: " + priority + " ", options.toString()); + } + + @Test + public void testUpdateOptionsPriority() { + RpcPriority priority = RpcPriority.LOW; + Options options = Options.fromUpdateOptions(Options.priority(priority)); + assertTrue(options.hasPriority()); + assertEquals("priority: " + priority + " ", options.toString()); + } + + @Test + public void testRpcPriorityEnumFromProto() { + assertEquals(RpcPriority.fromProto(Priority.PRIORITY_LOW), RpcPriority.LOW); + assertEquals(RpcPriority.fromProto(Priority.PRIORITY_MEDIUM), RpcPriority.MEDIUM); + assertEquals(RpcPriority.fromProto(Priority.PRIORITY_HIGH), RpcPriority.HIGH); + assertEquals(RpcPriority.fromProto(Priority.PRIORITY_UNSPECIFIED), RpcPriority.UNSPECIFIED); + assertEquals(RpcPriority.fromProto(null), RpcPriority.UNSPECIFIED); + } + + @Test + public void testTransactionOptionsHashCode() { + Options option1 = Options.fromTransactionOptions(); + Options option2 = Options.fromTransactionOptions(); + assertEquals(option1.hashCode(), option2.hashCode()); + } + + @Test + public void testTransactionOptionsWithPriorityEquality() { + Options optionsWithHighPriority1 = + Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = + Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1, optionsWithHighPriority2); + + Options optionsWithMediumPriority = + Options.fromTransactionOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1, optionsWithMediumPriority); + } + + @Test + public void testTransactionOptionsWithPriorityHashCode() { + Options optionsWithHighPriority1 = + Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = + Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1.hashCode(), optionsWithHighPriority2.hashCode()); + + Options optionsWithMediumPriority = + Options.fromTransactionOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1.hashCode(), optionsWithMediumPriority.hashCode()); + } + + @Test + public void testUpdateOptionsEquality() { + Options option1 = Options.fromUpdateOptions(); + Options option2 = Options.fromUpdateOptions(); + assertEquals(option1, option2); + } + + @Test + public void testUpdateOptionsHashCode() { + Options option1 = Options.fromUpdateOptions(); + Options option2 = Options.fromUpdateOptions(); + assertEquals(option1.hashCode(), option2.hashCode()); + } + + @Test + public void testUpdateOptionsWithPriorityEquality() { + Options optionsWithHighPriority1 = + Options.fromUpdateOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = + Options.fromUpdateOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1, optionsWithHighPriority2); + + Options optionsWithMediumPriority = + Options.fromUpdateOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1, optionsWithMediumPriority); + } + + @Test + public void testUpdateOptionsWithPriorityHashCode() { + Options optionsWithHighPriority1 = + Options.fromUpdateOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = + Options.fromUpdateOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1.hashCode(), optionsWithHighPriority2.hashCode()); + + Options optionsWithMediumPriority = + Options.fromUpdateOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1.hashCode(), optionsWithMediumPriority.hashCode()); + } + + @Test + public void testQueryOptionsEquality() { + Options option1 = Options.fromQueryOptions(); + Options option2 = Options.fromQueryOptions(); + assertEquals(option1, option2); + } + + @Test + public void testQueryOptionsHashCode() { + Options option1 = Options.fromQueryOptions(); + Options option2 = Options.fromQueryOptions(); + assertEquals(option1.hashCode(), option2.hashCode()); + } + + @Test + public void testQueryOptionsWithPriorityEquality() { + Options optionsWithHighPriority1 = Options.fromQueryOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = Options.fromQueryOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1, optionsWithHighPriority2); + + Options optionsWithMediumPriority = + Options.fromQueryOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1, optionsWithMediumPriority); + + Options optionsWithHighPriorityAndBufferRows = + Options.fromQueryOptions(Options.priority(RpcPriority.HIGH), Options.bufferRows(10)); + assertNotEquals(optionsWithHighPriorityAndBufferRows, optionsWithHighPriority1); + } + + @Test + public void testQueryOptionsWithPriorityHashCode() { + Options optionsWithHighPriority1 = Options.fromQueryOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = Options.fromQueryOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1.hashCode(), optionsWithHighPriority2.hashCode()); + + Options optionsWithMediumPriority = + Options.fromQueryOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1.hashCode(), optionsWithMediumPriority.hashCode()); + + Options optionsWithHighPriorityAndBufferRows = + Options.fromQueryOptions(Options.priority(RpcPriority.HIGH), Options.bufferRows(10)); + assertNotEquals( + optionsWithHighPriorityAndBufferRows.hashCode(), optionsWithHighPriority1.hashCode()); + } + + @Test + public void testReadOptionsEquality() { + Options option1 = Options.fromReadOptions(); + Options option2 = Options.fromReadOptions(); + assertEquals(option1, option2); + } + + @Test + public void testReadOptionsHashCode() { + Options option1 = Options.fromReadOptions(); + Options option2 = Options.fromReadOptions(); + assertEquals(option1.hashCode(), option2.hashCode()); + } + + @Test + public void testReadOptionsWithPriorityEquality() { + Options optionsWithHighPriority1 = Options.fromReadOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = Options.fromReadOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1, optionsWithHighPriority2); + + Options optionsWithMediumPriority = + Options.fromReadOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1, optionsWithMediumPriority); + + Options optionsWithHighPriorityAndBufferRows = + Options.fromReadOptions(Options.priority(RpcPriority.HIGH), Options.bufferRows(10)); + assertNotEquals(optionsWithHighPriorityAndBufferRows, optionsWithHighPriority1); + } + + @Test + public void testReadOptionsWithPriorityHashCode() { + Options optionsWithHighPriority1 = Options.fromReadOptions(Options.priority(RpcPriority.HIGH)); + Options optionsWithHighPriority2 = Options.fromReadOptions(Options.priority(RpcPriority.HIGH)); + assertEquals(optionsWithHighPriority1.hashCode(), optionsWithHighPriority2.hashCode()); + + Options optionsWithMediumPriority = + Options.fromReadOptions(Options.priority(RpcPriority.MEDIUM)); + assertNotEquals(optionsWithHighPriority1.hashCode(), optionsWithMediumPriority.hashCode()); + + Options optionsWithHighPriorityAndBufferRows = + Options.fromReadOptions(Options.priority(RpcPriority.HIGH), Options.bufferRows(10)); + assertNotEquals( + optionsWithHighPriorityAndBufferRows.hashCode(), optionsWithHighPriority1.hashCode()); + } + + @Test + public void testFromUpdateOptions() { + Options options = Options.fromUpdateOptions(); + assertThat(options.toString()).isEqualTo(""); + } + + @Test + public void testTransactionOptions() { + RpcPriority prio = RpcPriority.HIGH; + Options options = Options.fromTransactionOptions(Options.priority(prio)); + assertThat(options.toString()).isEqualTo("priority: " + prio + " "); + assertThat(options.priority()).isEqualTo(Priority.PRIORITY_HIGH); + } + + @Test + public void testTransactionOptionsDefaultEqual() { + Options options1 = Options.fromTransactionOptions(); + Options options2 = Options.fromTransactionOptions(); + assertEquals(options1, options2); + } + + @Test + public void testTransactionOptionsPriorityEquality() { + Options options1 = Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + Options options2 = Options.fromTransactionOptions(Options.priority(RpcPriority.HIGH)); + Options options3 = Options.fromTransactionOptions(); + Options options4 = Options.fromTransactionOptions(Options.priority(RpcPriority.LOW)); + + assertEquals(options1, options2); + assertNotEquals(options1, options3); + assertNotEquals(options1, options4); + assertNotEquals(options2, options3); + assertNotEquals(options2, options4); + } + + @Test + public void updateOptionsTest() { + String tag = "app=spanner,env=test"; + Options options = Options.fromUpdateOptions(Options.tag(tag)); + + assertEquals("tag: " + tag + " ", options.toString()); + assertTrue(options.hasTag()); + assertThat(options.tag()).isEqualTo(tag); + assertThat(options.hashCode()).isEqualTo(-2118248262); + } + + @Test + public void updateEquality() { + Options o1; + Options o2; + Options o3; + + o1 = Options.fromUpdateOptions(); + o2 = Options.fromUpdateOptions(); + assertThat(o1.equals(o2)).isTrue(); + + o2 = Options.fromUpdateOptions(Options.tag("app=spanner,env=test")); + assertThat(o1.equals(o2)).isFalse(); + assertThat(o2.equals(o1)).isFalse(); + + o3 = Options.fromUpdateOptions(Options.tag("app=spanner,env=test")); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromUpdateOptions(Options.tag("app=spanner,env=stage")); + assertThat(o2.equals(o3)).isFalse(); + } + + @Test + public void transactionOptionsTest() { + String tag = "app=spanner,env=test"; + Options options = Options.fromTransactionOptions(Options.tag(tag)); + + assertEquals("tag: " + tag + " ", options.toString()); + assertTrue(options.hasTag()); + assertThat(options.tag()).isEqualTo(tag); + assertThat(options.hashCode()).isEqualTo(-2118248262); + } + + @Test + public void transactionEquality() { + Options o1; + Options o2; + Options o3; + + o1 = Options.fromTransactionOptions(); + o2 = Options.fromTransactionOptions(); + assertThat(o1.equals(o2)).isTrue(); + + o2 = Options.fromTransactionOptions(Options.tag("app=spanner,env=test")); + assertThat(o1.equals(o2)).isFalse(); + assertThat(o2.equals(o1)).isFalse(); + + o3 = Options.fromTransactionOptions(Options.tag("app=spanner,env=test")); + assertThat(o2.equals(o3)).isTrue(); + + o3 = Options.fromTransactionOptions(Options.tag("app=spanner,env=stage")); + assertThat(o2.equals(o3)).isFalse(); + } + + @Test + public void optimisticLockEquality() { + Options option1 = Options.fromTransactionOptions(Options.optimisticLock()); + Options option2 = Options.fromTransactionOptions(Options.optimisticLock()); + Options option3 = Options.fromReadOptions(); + + assertEquals(option1, option2); + assertNotEquals(option1, option3); + } + + @Test + public void readLockModeEquality() { + Options option1 = Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.OPTIMISTIC)); + Options option2 = Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.OPTIMISTIC)); + Options option3 = + Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.PESSIMISTIC)); + Options option4 = Options.fromReadOptions(); + + assertEquals(option1, option2); + assertNotEquals(option1, option3); + assertNotEquals(option1, option4); + } + + @Test + public void optimisticLockHashCode() { + Options option1 = Options.fromTransactionOptions(Options.optimisticLock()); + Options option2 = Options.fromTransactionOptions(Options.optimisticLock()); + Options option3 = Options.fromReadOptions(); + + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1.hashCode(), option3.hashCode()); + } + + @Test + public void readLockModeHashCode() { + Options option1 = Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.OPTIMISTIC)); + Options option2 = Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.OPTIMISTIC)); + Options option3 = + Options.fromTransactionOptions(Options.readLockMode(ReadLockMode.PESSIMISTIC)); + Options option4 = Options.fromReadOptions(); + + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1.hashCode(), option3.hashCode()); + assertNotEquals(option1.hashCode(), option4.hashCode()); + } + + @Test + public void directedReadEquality() { + Options option1 = Options.fromReadOptions(Options.directedRead(DIRECTED_READ_OPTIONS)); + Options option2 = Options.fromReadOptions(Options.directedRead(DIRECTED_READ_OPTIONS)); + Options option3 = Options.fromTransactionOptions(); + + assertEquals(option1, option2); + assertNotEquals(option1, option3); + } + + @Test + public void directedReadHashCode() { + Options option1 = Options.fromReadOptions(Options.directedRead(DIRECTED_READ_OPTIONS)); + Options option2 = Options.fromReadOptions(Options.directedRead(DIRECTED_READ_OPTIONS)); + Options option3 = Options.fromTransactionOptions(); + + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1.hashCode(), option3.hashCode()); + } + + @Test + public void directedReadsNullNotAllowed() { + assertThrows(NullPointerException.class, () -> Options.directedRead(null)); + } + + @Test + public void transactionOptionsExcludeTxnFromChangeStreams() { + Options option1 = Options.fromTransactionOptions(Options.excludeTxnFromChangeStreams()); + Options option2 = Options.fromTransactionOptions(Options.excludeTxnFromChangeStreams()); + Options option3 = Options.fromTransactionOptions(); + + assertEquals(option1, option2); + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1, option3); + assertNotEquals(option1.hashCode(), option3.hashCode()); + + assertTrue(option1.withExcludeTxnFromChangeStreams()); + assertThat(option1.toString()).contains("withExcludeTxnFromChangeStreams: true"); + + assertNull(option3.withExcludeTxnFromChangeStreams()); + assertThat(option3.toString()).doesNotContain("withExcludeTxnFromChangeStreams: true"); + } + + @Test + public void transactionOptionsIsolationLevel() { + Options option1 = + Options.fromTransactionOptions(Options.isolationLevel(IsolationLevel.REPEATABLE_READ)); + Options option2 = + Options.fromTransactionOptions(Options.isolationLevel(IsolationLevel.REPEATABLE_READ)); + Options option3 = Options.fromTransactionOptions(); + + assertEquals(option1, option2); + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1, option3); + assertNotEquals(option1.hashCode(), option3.hashCode()); + + assertEquals(option1.isolationLevel(), IsolationLevel.REPEATABLE_READ); + assertThat(option1.toString()) + .contains("isolationLevel: " + IsolationLevel.REPEATABLE_READ.name()); + + assertNull(option3.isolationLevel()); + assertThat(option3.toString()) + .doesNotContain("isolationLevel: " + IsolationLevel.REPEATABLE_READ.name()); + } + + @Test + public void updateOptionsExcludeTxnFromChangeStreams() { + Options option1 = Options.fromUpdateOptions(Options.excludeTxnFromChangeStreams()); + Options option2 = Options.fromUpdateOptions(Options.excludeTxnFromChangeStreams()); + Options option3 = Options.fromUpdateOptions(); + + assertEquals(option1, option2); + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1, option3); + assertNotEquals(option1.hashCode(), option3.hashCode()); + + assertTrue(option1.withExcludeTxnFromChangeStreams()); + assertThat(option1.toString()).contains("withExcludeTxnFromChangeStreams: true"); + + assertNull(option3.withExcludeTxnFromChangeStreams()); + assertThat(option3.toString()).doesNotContain("withExcludeTxnFromChangeStreams: true"); + } + + @Test + public void testLastStatement() { + Options option1 = Options.fromUpdateOptions(Options.lastStatement()); + Options option2 = Options.fromUpdateOptions(Options.lastStatement()); + Options option3 = Options.fromUpdateOptions(); + + assertEquals(option1, option2); + assertEquals(option1.hashCode(), option2.hashCode()); + assertNotEquals(option1, option3); + assertNotEquals(option1.hashCode(), option3.hashCode()); + + assertTrue(option1.isLastStatement()); + assertThat(option1.toString()).contains("lastStatement: true"); + + assertNull(option3.isLastStatement()); + assertThat(option3.toString()).doesNotContain("lastStatement: true"); + } + + @Test + public void testTransactionOptionCombine_WithNoSpannerOptions() { + com.google.spanner.v1.TransactionOptions primaryOptions = + com.google.spanner.v1.TransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.SERIALIZABLE) + .setExcludeTxnFromChangeStreams(true) + .setReadWrite(ReadWrite.newBuilder().setReadLockMode(ReadLockMode.PESSIMISTIC)) + .build(); + com.google.spanner.v1.TransactionOptions spannerOptions = + com.google.spanner.v1.TransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.REPEATABLE_READ) + .build(); + com.google.spanner.v1.TransactionOptions combinedOptions = + spannerOptions.toBuilder().mergeFrom(primaryOptions).build(); + assertEquals(combinedOptions.getIsolationLevel(), IsolationLevel.SERIALIZABLE); + assertTrue(combinedOptions.getExcludeTxnFromChangeStreams()); + assertEquals( + combinedOptions.getReadWrite(), + ReadWrite.newBuilder().setReadLockMode(ReadLockMode.PESSIMISTIC).build()); + } + + @Test + public void testOptions_WithMultipleDifferentIsolationLevels() { + TransactionOption[] transactionOptions = { + Options.isolationLevel(IsolationLevel.REPEATABLE_READ), + Options.isolationLevel(IsolationLevel.SERIALIZABLE) + }; + Options options = Options.fromTransactionOptions(transactionOptions); + assertEquals(options.isolationLevel(), IsolationLevel.SERIALIZABLE); + } + + @Test + public void testRequestId() { + XGoogSpannerRequestId reqId1 = XGoogSpannerRequestId.of(1, 2, 3, 4); + XGoogSpannerRequestId reqId2 = XGoogSpannerRequestId.of(2, 3, 4, 5); + Options option1 = Options.fromUpdateOptions(Options.requestId(reqId1)); + Options option1Prime = Options.fromUpdateOptions(Options.requestId(reqId1)); + Options option2 = Options.fromUpdateOptions(Options.requestId(reqId2)); + Options option3 = Options.fromUpdateOptions(); + + assertEquals(option1, option1Prime); + assertNotEquals(option1, option2); + assertEquals(option1.hashCode(), option1Prime.hashCode()); + assertNotEquals(option1, option2); + assertNotEquals(option1, option3); + assertNotEquals(option1.hashCode(), option3.hashCode()); + + assertTrue(option1.hasReqId()); + assertThat(option1.toString()).contains("requestId: " + reqId1.toString()); + + assertFalse(option3.hasReqId()); + assertThat(option3.toString()).doesNotContain("requestId"); + } + + @Test + public void testRequestIdOptionEqualsAndHashCode() { + XGoogSpannerRequestId reqId1 = XGoogSpannerRequestId.of(1, 2, 3, 4); + XGoogSpannerRequestId reqId2 = XGoogSpannerRequestId.of(2, 3, 4, 5); + Options.RequestIdOption opt1 = Options.requestId(reqId1); + Options.RequestIdOption opt1Prime = Options.requestId(reqId1); + Options.RequestIdOption opt2 = Options.requestId(reqId2); + + assertTrue(opt1.equals(opt1)); + assertTrue(opt1.equals(opt1Prime)); + assertEquals(opt1.hashCode(), opt1Prime.hashCode()); + assertFalse(opt1.equals(opt2)); + assertNotEquals(opt1, opt2); + assertNotEquals(opt1.hashCode(), opt2.hashCode()); + } + + @Test + public void testOptions_WithMultipleDifferentRequestIds() { + XGoogSpannerRequestId reqId1 = XGoogSpannerRequestId.of(1, 1, 1, 1); + XGoogSpannerRequestId reqId2 = XGoogSpannerRequestId.of(1, 1, 1, 2); + TransactionOption[] transactionOptions = { + Options.requestId(reqId1), Options.requestId(reqId2), + }; + Options options = Options.fromTransactionOptions(transactionOptions); + assertNotEquals(options.reqId(), reqId1); + assertEquals(options.reqId(), reqId2); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OrphanedTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OrphanedTransactionTest.java new file mode 100644 index 000000000000..2e0a72086ed2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OrphanedTransactionTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertNull; + +import com.google.api.core.ApiFuture; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.base.Function; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class OrphanedTransactionTest extends AbstractMockServerTest { + private static final Statement STATEMENT = Statement.of("SELECT * FROM random"); + + @BeforeClass + public static void setupReadResult() { + com.google.cloud.spanner.connection.RandomResultSetGenerator generator = + new RandomResultSetGenerator(10); + mockSpanner.putStatementResult(StatementResult.query(STATEMENT, generator.generate())); + } + + private Spanner createSpanner() { + return SpannerOptions.newBuilder() + .setProjectId("fake-project") + .setHost("http://localhost:" + getPort()) + .setCredentials(NoCredentials.getInstance()) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setWaitForMinSessions(Duration.ofSeconds(5L)).build()) + .build() + .getService(); + } + + @Test + public void testOrphanedTransaction() throws Exception { + ExecutorService executor = Executors.newCachedThreadPool(); + try (Spanner spanner = createSpanner()) { + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + // Freeze the mock server to ensure that the request lands on the mock server before we + // proceed. + mockSpanner.freeze(); + AsyncTransactionManager manager = client.transactionManagerAsync(); + TransactionContextFuture context = manager.beginAsync(); + context.then( + (txn, input) -> { + try (AsyncResultSet resultSet = txn.executeQueryAsync(STATEMENT)) { + resultSet.toListAsync( + (Function) + row -> Objects.requireNonNull(row).getValue(0).getAsString(), + executor); + } + return null; + }, + executor); + // Wait for the ExecuteSqlRequest to land on the mock server. + mockSpanner.waitForRequestsToContain( + input -> + input instanceof ExecuteSqlRequest + && ((ExecuteSqlRequest) input).getSql().equals(STATEMENT.getSql()), + 5000L); + // Now close the transaction. This should (eventually) trigger a rollback, even though the + // client has not yet received a transaction ID. + manager.closeAsync(); + // Unfreeze the mock server and wait for the Rollback request to be received. + mockSpanner.unfreeze(); + mockSpanner.waitForLastRequestToBe(RollbackRequest.class, 5000L); + } finally { + executor.shutdown(); + } + } + + @Test + public void testOrphanedTransactionWithFailedFirstQuery() throws Exception { + ExecutorService executor = Executors.newCachedThreadPool(); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT.withDescription("table not found").asRuntimeException())); + try (Spanner spanner = createSpanner()) { + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("fake-project", "fake-instance", "fake-database")); + // Freeze the mock server to ensure that the request lands on the mock server before we + // proceed. + mockSpanner.freeze(); + AsyncTransactionManager manager = client.transactionManagerAsync(); + TransactionContextFuture context = manager.beginAsync(); + context.then( + (txn, input) -> { + try (AsyncResultSet resultSet = txn.executeQueryAsync(STATEMENT)) { + resultSet.toListAsync( + (Function) + row -> Objects.requireNonNull(row).getValue(0).getAsString(), + executor); + } + return null; + }, + executor); + // Wait for the ExecuteSqlRequest to land on the mock server. + mockSpanner.waitForRequestsToContain( + input -> + input instanceof ExecuteSqlRequest + && ((ExecuteSqlRequest) input).getSql().equals(STATEMENT.getSql()), + 5000L); + // Now close the transaction. This will not trigger a Rollback, as the statement failed. + // The closeResult will be done when the error for the failed statement is returned to the + // client. + ApiFuture closeResult = manager.closeAsync(); + mockSpanner.unfreeze(); + assertNull(closeResult.get()); + } finally { + executor.shutdown(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ParallelIntegrationTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ParallelIntegrationTest.java new file mode 100644 index 000000000000..3553f331b315 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ParallelIntegrationTest.java @@ -0,0 +1,20 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Parallel Integration Test interface. */ +public interface ParallelIntegrationTest extends IntegrationTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionOptionsTest.java new file mode 100644 index 000000000000..1ea0387417de --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionOptionsTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.common.testing.EqualsTester; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.PartitionOptions}. */ +@RunWith(JUnit4.class) +public class PartitionOptionsTest { + + @Test + public void builder() { + long desiredBytesPerBatch = 1 << 30; + long maxPartitionCount = 100; + PartitionOptions parameters = + PartitionOptions.newBuilder() + .setPartitionSizeBytes(desiredBytesPerBatch) + .setMaxPartitions(maxPartitionCount) + .build(); + assertThat(parameters.getPartitionSizeBytes()).isEqualTo(desiredBytesPerBatch); + assertThat(parameters.getMaxPartitions()).isEqualTo(maxPartitionCount); + + // Test serialization. + reserializeAndAssert(parameters); + } + + @Test + public void emptyBuilder() { + PartitionOptions parameters = PartitionOptions.newBuilder().build(); + assertThat(parameters.getPartitionSizeBytes()).isEqualTo(0); + assertThat(parameters.getMaxPartitions()).isEqualTo(0); + + // Test serialization. + reserializeAndAssert(parameters); + } + + @Test + public void defaultBuilder() { + PartitionOptions parameters = PartitionOptions.getDefaultInstance(); + assertThat(parameters.getPartitionSizeBytes()).isEqualTo(0); + assertThat(parameters.getMaxPartitions()).isEqualTo(0); + + // Test serialization. + reserializeAndAssert(parameters); + } + + @Test + public void equalAndHashCode() { + new EqualsTester() + .addEqualityGroup( + PartitionOptions.newBuilder() + .setPartitionSizeBytes(1 << 30) + .setMaxPartitions(100) + .build(), + PartitionOptions.newBuilder() + .setPartitionSizeBytes(1 << 30) + .setMaxPartitions(100) + .build()) + .addEqualityGroup( + PartitionOptions.newBuilder().build(), PartitionOptions.getDefaultInstance()) + .testEquals(); + } + + @Test + public void invalidDesiredBytesPerBatch() { + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> PartitionOptions.newBuilder().setPartitionSizeBytes(-1).build()); + assertNotNull(e.getMessage()); + } + + @Test + public void invalidMaxPartitionCount() { + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> PartitionOptions.newBuilder().setMaxPartitions(-1).build()); + assertNotNull(e.getMessage()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionTest.java new file mode 100644 index 000000000000..54954e53c33b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionTest.java @@ -0,0 +1,145 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.common.testing.EqualsTester; +import com.google.protobuf.ByteString; +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Partition}. */ +@RunWith(JUnit4.class) +public class PartitionTest { + + ByteString partitionToken = ByteString.copyFromUtf8("partitionToken"); + PartitionOptions partitionOptions = PartitionOptions.getDefaultInstance(); + String table = "table"; + String index = "index"; + KeySet keys = KeySet.singleKey(Key.of("a", "b", "c")); + Iterable columns = Arrays.asList("c1", "c2"); + Options.ReadOption rOption = Options.limit(10); + Options readOptions = Options.fromReadOptions(rOption); + Statement stmt = + Statement.newBuilder("SELECT Name FROM Users") + .append(" WHERE Id = @id") + .bind("id") + .to(1234) + .append(" AND Status = @status") + .bind("status") + .to("ACTIVE") + .build(); + Options.QueryOption qOption = Options.prefetchChunks(10); + Options queryOptions = Options.fromQueryOptions(qOption); + + @Test + public void statementInstance() { + Partition p = + Partition.createQueryPartition(partitionToken, partitionOptions, stmt, queryOptions); + assertThat(p.getPartitionToken()).isEqualTo(partitionToken); + assertThat(p.getStatement()).isEqualTo(stmt); + assertThat(p.getQueryOptions().hasPrefetchChunks()).isTrue(); + assertThat(p.getQueryOptions().prefetchChunks()).isEqualTo(10); + + assertNull(p.getTable()); + assertNull(p.getColumns()); + assertNull(p.getKeys()); + assertNull(p.getIndex()); + assertNull(p.getReadOptions()); + + // Test serialization. + reserializeAndAssert(p); + } + + @Test + public void readInstance() { + Partition p = + Partition.createReadPartition( + partitionToken, partitionOptions, table, null /*index*/, keys, columns, readOptions); + assertThat(p.getPartitionToken()).isEqualTo(partitionToken); + + assertThat(p.getTable()).isEqualTo(table); + assertThat(p.getKeys()).isEqualTo(keys); + assertThat(p.getColumns()).isEqualTo(columns); + assertTrue(p.getReadOptions().hasLimit()); + assertThat(p.getReadOptions().limit()).isEqualTo(10); + + assertNull(p.getIndex()); + assertNull(p.getStatement()); + assertNull(p.getQueryOptions()); + + // Test serialization. + reserializeAndAssert(p); + } + + @Test + public void readUsingIndexInstance() { + Partition p = + Partition.createReadPartition( + partitionToken, partitionOptions, table, index, keys, columns, readOptions); + assertThat(p.getPartitionToken()).isEqualTo(partitionToken); + assertThat(p.getTable()).isEqualTo(table); + assertThat(p.getIndex()).isEqualTo(index); + assertThat(p.getKeys()).isEqualTo(keys); + assertThat(p.getColumns()).isEqualTo(columns); + assertTrue(p.getReadOptions().hasLimit()); + assertThat(p.getReadOptions().limit()).isEqualTo(10); + + assertNull(p.getStatement()); + assertNull(p.getQueryOptions()); + + // Test serialization. + reserializeAndAssert(p); + } + + @Test + public void equalAndHashCode() { + new EqualsTester() + .addEqualityGroup( + Partition.createQueryPartition(partitionToken, partitionOptions, stmt, queryOptions), + Partition.createQueryPartition(partitionToken, partitionOptions, stmt, queryOptions)) + .addEqualityGroup( + Partition.createReadPartition( + partitionToken, + partitionOptions, + table, + null /*index*/, + keys, + columns, + readOptions), + Partition.createReadPartition( + partitionToken, + partitionOptions, + table, + null /*index*/, + keys, + columns, + readOptions)) + .addEqualityGroup( + Partition.createReadPartition( + partitionToken, partitionOptions, table, index, keys, columns, readOptions), + Partition.createReadPartition( + partitionToken, partitionOptions, table, index, keys, columns, readOptions)) + .testEquals(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionedDmlTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionedDmlTransactionTest.java new file mode 100644 index 000000000000..c6155f0cbb65 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PartitionedDmlTransactionTest.java @@ -0,0 +1,444 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.UnavailableException; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.XGoogSpannerRequestId.NoopRequestIdCreator; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.base.Ticker; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.Status.Code; +import java.time.Duration; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +@SuppressWarnings("unchecked") +@RunWith(JUnit4.class) +public class PartitionedDmlTransactionTest { + + @Mock private SpannerRpc rpc; + + @Mock private SessionImpl session; + + @Mock private Ticker ticker; + + private PartitionedDmlTransaction tx; + + private final String sessionId = "projects/p/instances/i/databases/d/sessions/s"; + private final ByteString txId = ByteString.copyFromUtf8("tx"); + private final ByteString resumeToken = ByteString.copyFromUtf8("resume"); + private final String sql = "UPDATE FOO SET BAR=1 WHERE TRUE"; + private final String tag = "app=spanner,env=test"; + private final ExecuteSqlRequest executeRequestWithoutResumeToken = + ExecuteSqlRequest.newBuilder() + .setQueryMode(QueryMode.NORMAL) + .setSession(sessionId) + .setSql(sql) + .setTransaction(TransactionSelector.newBuilder().setId(txId)) + .build(); + private final ExecuteSqlRequest executeRequestWithResumeToken = + executeRequestWithoutResumeToken.toBuilder().setResumeToken(resumeToken).build(); + private final ExecuteSqlRequest executeRequestWithRequestOptions = + executeRequestWithoutResumeToken.toBuilder() + .setRequestOptions(RequestOptions.newBuilder().setRequestTag(tag).build()) + .build(); + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(session.getName()).thenReturn(sessionId); + when(session.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + when(session.getOptions()).thenReturn(Collections.EMPTY_MAP); + when(session.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + when(rpc.beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true))) + .thenReturn(Transaction.newBuilder().setId(txId).build()); + + tx = new PartitionedDmlTransaction(session, rpc, ticker); + } + + @Test + public void testExecuteStreamingPartitionedUpdate() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream = mock(ServerStream.class); + when(stream.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream); + + long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)); + + assertThat(count).isEqualTo(1000L); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateWithUpdateOptions() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream = mock(ServerStream.class); + when(stream.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithRequestOptions), anyMap(), any(), any(Duration.class))) + .thenReturn(stream); + + long count = + tx.executeStreamingPartitionedUpdate( + Statement.of(sql), Duration.ofMinutes(10), Options.tag(tag)); + + assertThat(count).isEqualTo(1000L); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithRequestOptions), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateAborted() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new AbortedException( + "transaction aborted", null, GrpcStatusCode.of(Code.ABORTED), true)); + when(stream1.iterator()).thenReturn(iterator); + ServerStream stream2 = mock(ServerStream.class); + when(stream2.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + any(ExecuteSqlRequest.class), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1, stream2); + + long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)); + + assertThat(count).isEqualTo(1000L); + verify(rpc, times(2)).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc, times(2)) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateUnavailable() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new UnavailableException( + "temporary unavailable", null, GrpcStatusCode.of(Code.UNAVAILABLE), true)); + when(stream1.iterator()).thenReturn(iterator); + ServerStream stream2 = mock(ServerStream.class); + when(stream2.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream2); + + long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)); + + assertThat(count).isEqualTo(1000L); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateUnavailableAndThenDeadlineExceeded() { + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new UnavailableException( + "temporary unavailable", null, GrpcStatusCode.of(Code.UNAVAILABLE), true)); + when(stream1.iterator()).thenReturn(iterator); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(ticker.read()).thenReturn(0L, 1L, TimeUnit.NANOSECONDS.convert(10L, TimeUnit.MINUTES)); + + SpannerException e = + assertThrows( + SpannerException.class, + () -> tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateAbortedAndThenDeadlineExceeded() { + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new AbortedException( + "transaction aborted", null, GrpcStatusCode.of(Code.ABORTED), true)); + when(stream1.iterator()).thenReturn(iterator); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(ticker.read()).thenReturn(0L, 1L, TimeUnit.NANOSECONDS.convert(10L, TimeUnit.MINUTES)); + + SpannerException e = + assertThrows( + SpannerException.class, + () -> tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + verify(rpc, times(2)).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateMultipleAbortsUntilDeadlineExceeded() { + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new AbortedException( + "transaction aborted", null, GrpcStatusCode.of(Code.ABORTED), true)); + when(stream1.iterator()).thenReturn(iterator); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(ticker.read()) + .thenAnswer( + new Answer() { + long ticks = 0L; + + @Override + public Long answer(InvocationOnMock invocation) { + return TimeUnit.NANOSECONDS.convert(++ticks, TimeUnit.MINUTES); + } + }); + + SpannerException e = + assertThrows( + SpannerException.class, + () -> tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + // It should start a transaction exactly 10 times (10 ticks == 10 minutes). + verify(rpc, times(10)).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + // The last transaction should timeout before it starts the actual statement execution, which + // means that the execute method is only executed 9 times. + verify(rpc, times(9)) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateUnexpectedEOS() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new InternalException( + "INTERNAL: Received unexpected EOS on DATA frame from server.", + null, + GrpcStatusCode.of(Code.INTERNAL), + true)); + when(stream1.iterator()).thenReturn(iterator); + ServerStream stream2 = mock(ServerStream.class); + when(stream2.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream2); + + PartitionedDmlTransaction tx = new PartitionedDmlTransaction(session, rpc, ticker); + long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)); + + assertThat(count).isEqualTo(1000L); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateRSTstream() { + ResultSetStats stats = ResultSetStats.newBuilder().setRowCountLowerBound(1000L).build(); + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + PartialResultSet p2 = PartialResultSet.newBuilder().setStats(stats).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new InternalException( + "INTERNAL: stream terminated by RST_STREAM.", + null, + GrpcStatusCode.of(Code.INTERNAL), + true)); + when(stream1.iterator()).thenReturn(iterator); + ServerStream stream2 = mock(ServerStream.class); + when(stream2.iterator()).thenReturn(ImmutableList.of(p1, p2).iterator()); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream2); + + PartitionedDmlTransaction tx = new PartitionedDmlTransaction(session, rpc, ticker); + long count = tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10)); + + assertThat(count).isEqualTo(1000L); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testExecuteStreamingPartitionedUpdateGenericInternalException() { + PartialResultSet p1 = PartialResultSet.newBuilder().setResumeToken(resumeToken).build(); + ServerStream stream1 = mock(ServerStream.class); + Iterator iterator = mock(Iterator.class); + when(iterator.hasNext()).thenReturn(true, true, false); + when(iterator.next()) + .thenReturn(p1) + .thenThrow( + new InternalException( + "INTERNAL: Error", null, GrpcStatusCode.of(Code.INTERNAL), false)); + when(stream1.iterator()).thenReturn(iterator); + when(rpc.executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class))) + .thenReturn(stream1); + + PartitionedDmlTransaction tx = new PartitionedDmlTransaction(session, rpc, ticker); + SpannerException e = + assertThrows( + SpannerException.class, + () -> tx.executeStreamingPartitionedUpdate(Statement.of(sql), Duration.ofMinutes(10))); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + verify(rpc).beginTransaction(any(BeginTransactionRequest.class), anyMap(), eq(true)); + verify(rpc) + .executeStreamingPartitionedDml( + Mockito.eq(executeRequestWithoutResumeToken), anyMap(), any(), any(Duration.class)); + } + + @Test + public void testRequestWithoutPriority() { + ExecuteSqlRequest request = + tx.newTransactionRequestFrom( + Statement.of("UPDATE FOO SET BAR=1 WHERE TRUE"), Options.fromUpdateOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, request.getRequestOptions().getPriority()); + } + + @Test + public void testRequestWithPriority() { + ExecuteSqlRequest request = + tx.newTransactionRequestFrom( + Statement.of("UPDATE FOO SET BAR=1 WHERE TRUE"), + Options.fromUpdateOptions(Options.priority(RpcPriority.LOW))); + assertEquals(Priority.PRIORITY_LOW, request.getRequestOptions().getPriority()); + } + + @Test + public void testRequestWithPriorityAndRequestTag() { + ExecuteSqlRequest request = + tx.newTransactionRequestFrom( + Statement.of("UPDATE FOO SET BAR=1 WHERE TRUE"), + Options.fromUpdateOptions( + Options.priority(RpcPriority.LOW), Options.tag("app=spanner,env=test"))); + assertEquals(Priority.PRIORITY_LOW, request.getRequestOptions().getPriority()); + assertThat(request.getRequestOptions().getRequestTag()).isEqualTo("app=spanner,env=test"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PgNumericTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PgNumericTest.java new file mode 100644 index 000000000000..40591d17fe8b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/PgNumericTest.java @@ -0,0 +1,372 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class PgNumericTest { + + private static final String PROJECT = "my-project"; + private static final String INSTANCE = "my-instance"; + private static final String DATABASE = "database"; + private static final ResultSetMetadata RESULT_SET_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("PgNumeric") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build())) + .addFields( + Field.newBuilder() + .setName("PgNumericArray") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC)) + .build())) + .build()) + .build(); + private static MockSpannerServiceImpl mockSpanner; + private static InetSocketAddress address; + private static Server server; + private Spanner spanner; + private DatabaseClient databaseClient; + + @BeforeClass + public static void beforeClass() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); + + address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + @AfterClass + public static void afterClass() throws Exception { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + final String endpoint = address.getHostString() + ":" + server.getPort(); + spanner = + SpannerOptions.newBuilder() + .setProjectId(PROJECT) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()) + .build() + .getService(); + databaseClient = spanner.getDatabaseClient(DatabaseId.of(PROJECT, INSTANCE, DATABASE)); + } + + @After + public void tearDown() throws Exception { + spanner.close(); + mockSpanner.removeAllExecutionTimes(); + mockSpanner.reset(); + } + + @Test + public void testQueryNoNullsAsStrings() { + final Statement statement = + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 0"); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1.23")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2.34")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("3.45")) + .build())) + .build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery( + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 0"))) { + resultSet.next(); + + assertEquals("1.23", resultSet.getString("PgNumeric")); + assertEquals("1.23", resultSet.getString(0)); + assertEquals(Arrays.asList("2.34", "3.45"), resultSet.getStringList("PgNumericArray")); + assertEquals(Arrays.asList("2.34", "3.45"), resultSet.getStringList(1)); + } + } + + @Test + public void testQueryNoNullsAsValues() { + final Statement statement = + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 0"); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1.23")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2.34")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("3.45")) + .build())) + .build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = databaseClient.singleUse().executeQuery(statement)) { + resultSet.next(); + + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("PgNumeric")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue(0)); + assertEquals( + Value.pgNumericArray(Arrays.asList("2.34", "3.45")), + resultSet.getValue("PgNumericArray")); + assertEquals(Value.pgNumericArray(Arrays.asList("2.34", "3.45")), resultSet.getValue(1)); + } + } + + @Test + public void testQueryNullElements() { + final Statement statement = + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 3"); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE)) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("1.23")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE)) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("2.34")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE)) + .build())) + .build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = databaseClient.singleUse().executeQuery(statement)) { + resultSet.next(); + + assertEquals( + Value.pgNumericArray(Arrays.asList("1.23", null, "2.34", null)), + resultSet.getValue("PgNumericArray")); + assertEquals( + Value.pgNumericArray(Arrays.asList("1.23", null, "2.34", null)), resultSet.getValue(1)); + } + } + + @Test + public void testQueryNaNs() { + final Statement statement = + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 2"); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("NaN")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("NaN")) + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue("NaN")) + .build())) + .build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = databaseClient.singleUse().executeQuery(statement)) { + resultSet.next(); + + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("PgNumeric")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue(0)); + assertEquals( + Value.pgNumericArray(Arrays.asList("NaN", "NaN")), resultSet.getValue("PgNumericArray")); + assertEquals(Value.pgNumericArray(Arrays.asList("NaN", "NaN")), resultSet.getValue(1)); + } + } + + @Test + public void testQueryNulls() { + final Statement statement = + Statement.of("SELECT PgNumeric, PgNumericArray FROM Table WHERE Id = 1"); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE)) + .addValues( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE)) + .build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = databaseClient.singleUse().executeQuery(statement)) { + resultSet.next(); + + assertTrue(resultSet.isNull("PgNumeric")); + assertTrue(resultSet.isNull(0)); + assertTrue(resultSet.isNull("PgNumericArray")); + assertTrue(resultSet.isNull(1)); + } + } + + @Test + public void testMutation() { + final List mutations = + Collections.singletonList( + Mutation.newInsertBuilder("Table") + .set("PgNumeric") + .to("1.23") + .set("PgNumericNull") + .to((String) null) + .set("PgNumericNaN") + .to("NaN") + .set("PgNumericValue") + .to(Value.pgNumeric("2.34")) + .set("PgNumericArray") + .toStringArray(Arrays.asList("2.34", null, "3.45")) + .set("PgNumericArrayNull") + .toStringArray(null) + .build()); + final List expectedMutations = new ArrayList<>(); + Mutation.toProtoAndReturnRandomMutation(mutations, expectedMutations); + + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer(mutations); + return null; + }); + + final List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + final CommitRequest request = requests.get(0); + assertEquals(1, requests.size()); + assertEquals(expectedMutations, request.getMutationsList()); + } + + @Test + public void testParameterizedStatement() { + final Statement statement = + Statement.newBuilder("SELECT * FROM Table WHERE PgNumeric IN (@col1, @col2, @col3)") + .bind("col1") + .to(Value.pgNumeric("1.23")) + .bind("col2") + .to(Value.pgNumeric("NaN")) + .bind("col3") + .to(Value.pgNumeric(null)) + .build(); + final com.google.spanner.v1.ResultSet result = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata(RESULT_SET_METADATA) + .addRows(ListValue.newBuilder().build()) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, result)); + + try (ResultSet resultSet = databaseClient.singleUse().executeQuery(statement)) { + resultSet.next(); + + final List requests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + final ExecuteSqlRequest request = requests.get(0); + + assertEquals(1, requests.size()); + assertEquals( + ImmutableMap.of( + "col1", Type.pgNumeric().toProto(), + "col2", Type.pgNumeric().toProto(), + "col3", Type.pgNumeric().toProto()), + request.getParamTypesMap()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RandomResultSetGenerator.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RandomResultSetGenerator.java new file mode 100644 index 000000000000..051546352cbb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RandomResultSetGenerator.java @@ -0,0 +1,193 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.client.util.Base64; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.math.BigInteger; +import java.util.Random; + +/** + * @deprecated Use {@link com.google.cloud.spanner.connection.RandomResultSetGenerator} instead. + */ +@Deprecated +public class RandomResultSetGenerator { + private static final Type[] TYPES = + new Type[] { + Type.newBuilder().setCode(TypeCode.BOOL).build(), + Type.newBuilder().setCode(TypeCode.INT64).build(), + Type.newBuilder().setCode(TypeCode.FLOAT64).build(), + Type.newBuilder().setCode(TypeCode.FLOAT32).build(), + Type.newBuilder().setCode(TypeCode.STRING).build(), + Type.newBuilder().setCode(TypeCode.BYTES).build(), + Type.newBuilder().setCode(TypeCode.DATE).build(), + Type.newBuilder().setCode(TypeCode.INTERVAL).build(), + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.BOOL)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.INT64)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.FLOAT64)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.FLOAT32)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.STRING)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.BYTES)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.DATE)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.INTERVAL)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.TIMESTAMP)) + .build(), + }; + + private static ResultSetMetadata generateMetadata() { + StructType.Builder rowTypeBuilder = StructType.newBuilder(); + for (int col = 0; col < TYPES.length; col++) { + rowTypeBuilder.addFields(Field.newBuilder().setName("COL" + col).setType(TYPES[col])).build(); + } + ResultSetMetadata.Builder builder = ResultSetMetadata.newBuilder(); + builder.setRowType(rowTypeBuilder.build()); + return builder.build(); + } + + private static final ResultSetMetadata METADATA = generateMetadata(); + + private final int rowCount; + private final Random random = new Random(); + + public RandomResultSetGenerator(int rowCount) { + this.rowCount = rowCount; + } + + public ResultSet generate() { + ResultSet.Builder builder = ResultSet.newBuilder(); + for (int row = 0; row < rowCount; row++) { + ListValue.Builder rowBuilder = ListValue.newBuilder(); + for (Type type : TYPES) { + Value.Builder valueBuilder = Value.newBuilder(); + setRandomValue(valueBuilder, type); + rowBuilder.addValues(valueBuilder.build()); + } + builder.addRows(rowBuilder.build()); + } + builder.setMetadata(METADATA); + return builder.build(); + } + + private void setRandomValue(Value.Builder builder, Type type) { + if (randomNull()) { + builder.setNullValue(NullValue.NULL_VALUE); + } else { + switch (type.getCode()) { + case ARRAY: + int length = random.nextInt(20) + 1; + ListValue.Builder arrayBuilder = ListValue.newBuilder(); + for (int i = 0; i < length; i++) { + Value.Builder valueBuilder = Value.newBuilder(); + setRandomValue(valueBuilder, type.getArrayElementType()); + arrayBuilder.addValues(valueBuilder.build()); + } + builder.setListValue(arrayBuilder.build()); + break; + case BOOL: + builder.setBoolValue(random.nextBoolean()); + break; + case STRING: + case BYTES: + byte[] bytes = new byte[random.nextInt(200)]; + random.nextBytes(bytes); + builder.setStringValue(Base64.encodeBase64String(bytes)); + break; + case DATE: + Date date = + Date.fromYearMonthDay( + random.nextInt(2019) + 1, random.nextInt(11) + 1, random.nextInt(28) + 1); + builder.setStringValue(date.toString()); + break; + case INTERVAL: + Interval interval = + Interval.builder() + .setMonths(random.nextInt(100) - 100) + .setDays(random.nextInt(100) - 100) + .setNanos(BigInteger.valueOf(random.nextInt(10000000) - 10000000)) + .build(); + builder.setStringValue(interval.toISO8601()); + break; + case FLOAT64: + builder.setNumberValue(random.nextDouble()); + break; + case FLOAT32: + builder.setNumberValue(random.nextFloat()); + break; + case INT64: + builder.setStringValue(String.valueOf(random.nextLong())); + break; + case TIMESTAMP: + com.google.protobuf.Timestamp ts = + Timestamps.add( + Timestamps.EPOCH, + com.google.protobuf.Duration.newBuilder() + .setSeconds(random.nextInt(100_000_000)) + .setNanos(random.nextInt(1000_000_000)) + .build()); + builder.setStringValue(Timestamp.fromProto(ts).toString()); + break; + case STRUCT: + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown or unsupported type: " + type.getCode()); + } + } + } + + private boolean randomNull() { + return random.nextInt(10) == 0; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadAsyncTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadAsyncTest.java new file mode 100644 index 000000000000..1251ee270faf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadAsyncTest.java @@ -0,0 +1,440 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.*; +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.collect.ContiguousSet; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.SynchronousQueue; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadAsyncTest { + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + + private static ExecutorService executor; + private Spanner spanner; + private DatabaseClient client; + + @BeforeClass + public static void setup() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_KEY_VALUE_STATEMENT, READ_ONE_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_EMPTY_KEY_VALUE_STATEMENT, EMPTY_KEY_VALUE_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query( + READ_MULTIPLE_KEY_VALUE_STATEMENT, READ_MULTIPLE_KEY_VALUE_RESULTSET)); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + executor = Executors.newScheduledThreadPool(8); + } + + @AfterClass + public static void teardown() throws Exception { + executor.shutdown(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void before() { + spanner = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setFailOnSessionLeak().setMinSessions(0).build()) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } + + @After + public void after() { + spanner.close(); + mockSpanner.removeAllExecutionTimes(); + } + + @Test + public void readAsyncPropagatesError() throws Exception { + ApiFuture result; + try (AsyncResultSet resultSet = + client + .singleUse(TimestampBound.strong()) + .readAsync(EMPTY_READ_TABLE_NAME, KeySet.singleKey(Key.of("k99")), READ_COLUMN_NAMES)) { + result = + resultSet.setCallback( + executor, + ignored -> { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.CANCELLED, "Don't want the data"); + }); + } + SpannerException e = assertThrows(SpannerException.class, () -> get(result)); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.CANCELLED); + assertThat(e.getMessage()).contains("Don't want the data"); + } + + @Test + public void emptyReadAsync() throws Exception { + ApiFuture result; + try (AsyncResultSet resultSet = + client + .singleUse(TimestampBound.strong()) + .readAsync(EMPTY_READ_TABLE_NAME, KeySet.singleKey(Key.of("k99")), READ_COLUMN_NAMES)) { + result = + resultSet.setCallback( + executor, + rs -> { + while (true) { + switch (rs.tryNext()) { + case OK: + fail("received unexpected data"); + case NOT_READY: + return CallbackResponse.CONTINUE; + case DONE: + assertThat(rs.getType()).isEqualTo(READ_TABLE_TYPE); + return CallbackResponse.DONE; + } + } + }); + } + assertThat(result.get()).isNull(); + } + + @Test + public void pointReadAsync() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync(READ_TABLE_NAME, Key.of("k1"), READ_COLUMN_NAMES); + assertThat(row.get()).isNotNull(); + assertThat(row.get().getString(0)).isEqualTo("k1"); + assertThat(row.get().getString(1)).isEqualTo("v1"); + } + + @Test + public void pointReadNotFound() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync(EMPTY_READ_TABLE_NAME, Key.of("k999"), READ_COLUMN_NAMES); + assertThat(row.get()).isNull(); + } + + @Test + public void invalidDatabase() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + mockSpanner.freeze(); + DatabaseClient invalidClient = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, "invalid-database")); + ApiFuture row = + invalidClient + .singleUse(TimestampBound.strong()) + .readRowAsync(READ_TABLE_NAME, Key.of("k99"), READ_COLUMN_NAMES); + mockSpanner.unfreeze(); + assertThrows(DatabaseNotFoundException.class, () -> get(row)); + } + + @Test + public void tableNotFound() { + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofStickyException( + Status.NOT_FOUND + .withDescription("Table not found: BadTableName") + .asRuntimeException())); + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync("BadTableName", Key.of("k1"), READ_COLUMN_NAMES); + SpannerException e = assertThrows(SpannerException.class, () -> get(row)); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(e.getMessage()).contains("BadTableName"); + } + + /** + * Ending a read-only transaction before an asynchronous query that was executed on that + * transaction has finished fetching all rows should keep the session checked out of the pool + * until all the rows have been returned. The session is then automatically returned to the + * session. + */ + @Test + public void closeTransactionBeforeEndOfAsyncQuery() throws Exception { + final BlockingQueue results = new SynchronousQueue<>(); + final SettableApiFuture finished = SettableApiFuture.create(); + ApiFuture closed; + DatabaseClientImpl clientImpl = (DatabaseClientImpl) client; + + final CountDownLatch dataReceived = new CountDownLatch(1); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (AsyncResultSet rs = + tx.readAsync(READ_TABLE_NAME, KeySet.all(), READ_COLUMN_NAMES, Options.bufferRows(1))) { + closed = + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + finished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + dataReceived.countDown(); + results.put(resultSet.getString(0)); + } + } + } catch (Throwable t) { + finished.setException(t); + return CallbackResponse.DONE; + } + }); + } + // Wait until at least one row has been fetched. At that moment there should be one session + // checked out. + dataReceived.await(); + } + List resultList = new ArrayList<>(); + do { + results.drainTo(resultList); + } while (!finished.isDone() || results.size() > 0); + assertThat(finished.get()).isTrue(); + assertThat(resultList).containsExactly("k1", "k2", "k3"); + closed.get(); + } + + @Test + public void readOnlyTransaction() throws Exception { + Statement statement1 = + Statement.of("SELECT * FROM TestTable WHERE Key IN ('k10', 'k11', 'k12')"); + Statement statement2 = Statement.of("SELECT * FROM TestTable WHERE Key IN ('k1', 'k2', 'k3"); + mockSpanner.putStatementResult( + StatementResult.query(statement1, generateKeyValueResultSet(ContiguousSet.closed(10, 12)))); + mockSpanner.putStatementResult( + StatementResult.query(statement2, generateKeyValueResultSet(ContiguousSet.closed(1, 3)))); + + ApiFuture> values1; + ApiFuture> values2; + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (AsyncResultSet rs = tx.executeQueryAsync(statement1)) { + values1 = rs.toListAsync(input -> input.getString("Value"), executor); + } + try (AsyncResultSet rs = tx.executeQueryAsync(statement2)) { + values2 = rs.toListAsync(input -> input.getString("Value"), executor); + } + } + + ApiFuture>> allValuesAsList = + ApiFutures.allAsList(Arrays.asList(values1, values2)); + ApiFuture> allValues = + ApiFutures.transform( + allValuesAsList, + input -> + Iterables.mergeSorted( + input, + // Return in numerical order (i.e. without the preceding 'v'). + Comparator.comparing(o -> Integer.valueOf(o.substring(1)))), + executor); + assertThat(allValues.get()).containsExactly("v1", "v2", "v3", "v10", "v11", "v12"); + } + + @Test + public void pauseResume() throws Exception { + Statement unevenStatement = + Statement.of("SELECT * FROM TestTable WHERE MOD(CAST(SUBSTR(Key, 2) AS INT64), 2) = 1"); + Statement evenStatement = + Statement.of("SELECT * FROM TestTable WHERE MOD(CAST(SUBSTR(Key, 2) AS INT64), 2) = 0"); + mockSpanner.putStatementResult( + StatementResult.query( + unevenStatement, generateKeyValueResultSet(ImmutableSet.of(1, 3, 5, 7, 9)))); + mockSpanner.putStatementResult( + StatementResult.query( + evenStatement, generateKeyValueResultSet(ImmutableSet.of(2, 4, 6, 8, 10)))); + + final Object lock = new Object(); + ApiFuture evenFinished; + ApiFuture unevenFinished; + final CountDownLatch unevenReturnedFirstRow = new CountDownLatch(1); + final Deque allValues = new ConcurrentLinkedDeque<>(); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (AsyncResultSet evenRs = tx.executeQueryAsync(evenStatement); + AsyncResultSet unevenRs = tx.executeQueryAsync(unevenStatement)) { + unevenFinished = + unevenRs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + synchronized (lock) { + allValues.add(resultSet.getString("Value")); + } + unevenReturnedFirstRow.countDown(); + return CallbackResponse.PAUSE; + } + } + }); + evenFinished = + evenRs.setCallback( + executor, + resultSet -> { + try { + // Make sure the uneven result set has returned the first before we start the + // even results. + unevenReturnedFirstRow.await(); + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + synchronized (lock) { + allValues.add(resultSet.getString("Value")); + } + return CallbackResponse.PAUSE; + } + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }); + while (!(evenFinished.isDone() && unevenFinished.isDone())) { + synchronized (lock) { + if (allValues.peekLast() != null) { + if (Integer.parseInt(allValues.peekLast().substring(1)) % 2 == 1) { + evenRs.resume(); + } else { + unevenRs.resume(); + } + } + if (allValues.size() == 10) { + unevenRs.resume(); + evenRs.resume(); + } + } + } + } + } + assertThat(ApiFutures.allAsList(Arrays.asList(evenFinished, unevenFinished)).get()) + .containsExactly(null, null); + assertThat(allValues) + .containsExactly("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10"); + } + + @Test + public void cancel() throws Exception { + final List values = new LinkedList<>(); + final CountDownLatch receivedFirstRow = new CountDownLatch(1); + final CountDownLatch cancelled = new CountDownLatch(1); + final ApiFuture res; + try (AsyncResultSet rs = + client.singleUse().readAsync(READ_TABLE_NAME, KeySet.all(), READ_COLUMN_NAMES)) { + res = + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(resultSet.getString("Value")); + receivedFirstRow.countDown(); + cancelled.await(); + break; + } + } + } catch (Throwable t) { + return CallbackResponse.DONE; + } + }); + receivedFirstRow.await(); + rs.cancel(); + } + cancelled.countDown(); + SpannerException e = assertThrows(SpannerException.class, () -> get(res)); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.CANCELLED); + assertThat(values).containsExactly("v1"); + } + + private boolean isMultiplexedSessionsEnabled() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTest.java new file mode 100644 index 000000000000..1bbfa92a0190 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTest.java @@ -0,0 +1,23 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import org.junit.runner.RunWith; + +/** Unit tests for running the test read format tests. */ +@RunWith(ReadFormatTestRunner.class) +public class ReadFormatTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java new file mode 100644 index 000000000000..ff26f774b4b0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java @@ -0,0 +1,273 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.io.Resources; +import com.google.protobuf.util.JsonFormat; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.Transaction; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nullable; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Assert; +import org.junit.internal.runners.model.EachTestNotifier; +import org.junit.runner.Description; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.ParentRunner; +import org.junit.runners.model.InitializationError; + +/** Test runner that runs tests specified in json file */ +public class ReadFormatTestRunner extends ParentRunner { + + private static class NoOpListener implements AbstractResultSet.Listener { + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) + throws SpannerException {} + + @Override + public SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean lastStatement) { + return e; + } + + @Override + public void onDone(boolean withBeginTransaction) {} + + @Override + public void onPrecommitToken(MultiplexedSessionPrecommitToken token) {} + } + + public ReadFormatTestRunner(Class clazz) throws InitializationError { + super(clazz); + } + + @Override + protected Description describeChild(JSONObject child) { + try { + return Description.createTestDescription( + getTestClass().getJavaClass(), child.getString("name")); + } catch (JSONException e) { + throw new IllegalStateException("Illegal json object: " + child.toString(), e); + } + } + + @Override + protected void runChild(JSONObject child, RunNotifier notifier) { + EachTestNotifier eachNotifier = new EachTestNotifier(notifier, describeChild(child)); + eachNotifier.fireTestStarted(); + try { + new TestCaseRunner(child).run(); + eachNotifier.fireTestFinished(); + } catch (Exception | AssertionError e) { + eachNotifier.addFailure(e); + } + } + + @Override + protected List getChildren() { + try { + List children = new ArrayList<>(); + String jsonStr = + Resources.toString( + Resources.getResource(this.getClass(), "read_tests.json"), StandardCharsets.UTF_8); + JSONObject json = new JSONObject(jsonStr); + JSONArray testCases = json.getJSONArray("tests"); + for (int i = 0; i < testCases.length(); i++) { + JSONObject testCase = testCases.getJSONObject(i); + children.add(testCase); + } + return children; + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + private static class TestCaseRunner { + private GrpcResultSet resultSet; + private SpannerRpc.ResultStreamConsumer consumer; + private GrpcStreamIterator stream; + private JSONObject testCase; + + TestCaseRunner(JSONObject testCase) { + this.testCase = testCase; + } + + private void run() throws Exception { + stream = + new GrpcStreamIterator( + /* lastStatement= */ false, 10, /* cancelQueryWhenClientIsClosed= */ false); + stream.setCall( + new SpannerRpc.StreamingCall() { + @Override + public ApiCallContext getCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + public void cancel(@Nullable String message) {} + + @Override + public void request(int numMessages) {} + }, + false); + consumer = stream.consumer(); + resultSet = new GrpcResultSet(stream, new NoOpListener()); + + JSONArray chunks = testCase.getJSONArray("chunks"); + JSONObject expectedResult = testCase.getJSONObject("result"); + for (int i = 0; i < chunks.length(); i++) { + PartialResultSet.Builder builder = PartialResultSet.newBuilder(); + JsonFormat.parser().merge(chunks.getString(i), builder); + consumer.onPartialResultSet(builder.build()); + } + consumer.onCompleted(); + assertResultSet(resultSet, expectedResult.getJSONArray("value")); + } + + private void assertResultSet(GrpcResultSet actual, JSONArray expected) throws Exception { + int i = 0; + while (actual.next()) { + Struct actualRow = actual.getCurrentRowAsStruct(); + JSONArray expectedRow = expected.getJSONArray(i); + assertRow(actualRow, expectedRow); + i++; + } + assertThat(i).isEqualTo(expected.length()); + } + + private void assertRow(Struct actualRow, JSONArray expectedRow) throws Exception { + assertThat(actualRow.getColumnCount()).isEqualTo(expectedRow.length()); + for (int i = 0; i < expectedRow.length(); i++) { + switch (actualRow.getColumnType(i).getCode()) { + case BOOL: + assertThat(actualRow.getBoolean(i)).isEqualTo(expectedRow.getBoolean(i)); + break; + case STRING: + assertThat(actualRow.getString(i)).isEqualTo(expectedRow.getString(i)); + break; + case JSON: + assertThat(actualRow.getJson(i)).isEqualTo(expectedRow.getString(i)); + break; + case INT64: + assertThat(actualRow.getLong(i)).isEqualTo(expectedRow.getLong(i)); + break; + case FLOAT32: + assertThat(actualRow.getFloat(i)).isEqualTo(expectedRow.getFloat(i)); + break; + case FLOAT64: + assertThat(actualRow.getDouble(i)).isEqualTo(expectedRow.getDouble(i)); + break; + case NUMERIC: + assertThat(actualRow.getBigDecimal(i)).isEqualTo(expectedRow.getBigDecimal(i)); + break; + case BYTES: + assertThat(actualRow.getBytes(i)) + .isEqualTo(ByteArray.fromBase64(expectedRow.getString(i))); + break; + case ARRAY: + Type elementType = actualRow.getColumnType(i).getArrayElementType(); + assertArray(getRawList(actualRow, i, elementType), expectedRow.getJSONArray(i)); + break; + default: + Assert.fail("Unexpected type code:" + actualRow.getColumnType(i).getCode()); + } + } + } + + private List getRawList(Struct actualRow, int index, Type elementType) { + List rawList = null; + switch (elementType.getCode()) { + case BOOL: + rawList = actualRow.getBooleanList(index); + break; + case STRING: + rawList = actualRow.getStringList(index); + break; + case JSON: + rawList = actualRow.getJsonList(index); + break; + case BYTES: + rawList = actualRow.getBytesList(index); + break; + case INT64: + rawList = actualRow.getLongList(index); + break; + case FLOAT32: + rawList = actualRow.getFloatList(index); + break; + case FLOAT64: + rawList = actualRow.getDoubleList(index); + break; + case NUMERIC: + rawList = actualRow.getBigDecimalList(index); + break; + case STRUCT: + rawList = actualRow.getStructList(index); + break; + default: + Assert.fail("Unexpected type code:" + elementType.getCode()); + } + return rawList; + } + + private void assertArray(List actualValues, JSONArray expectedList) throws Exception { + assertThat(actualValues.size()).isEqualTo(expectedList.length()); + for (int i = 0; i < actualValues.size(); i++) { + Object actualValue = actualValues.get(i); + if (actualValue == null) { + assertThat(expectedList.isNull(i)).isTrue(); + } else { + if (actualValue instanceof Boolean) { + assertThat((Boolean) actualValue).isEqualTo(expectedList.getBoolean(i)); + } else if (actualValue instanceof String) { + assertThat((String) actualValue).isEqualTo(expectedList.getString(i)); + } else if (actualValue instanceof Long) { + assertThat((Long) actualValue).isEqualTo(expectedList.getLong(i)); + } else if (actualValue instanceof Double) { + // "Infinity" is not a valid parseable json double value + if (expectedList.get(i) instanceof String) { + assertThat((Double) actualValue).isEqualTo(Double.valueOf(expectedList.getString(i))); + } else { + assertThat((Double) actualValue).isEqualTo(expectedList.getDouble(i)); + } + } else if (actualValue instanceof BigDecimal) { + assertThat((BigDecimal) actualValue).isEqualTo(expectedList.getBigDecimal(i)); + } else if (actualValue instanceof ByteArray) { + assertThat((ByteArray) actualValue) + .isEqualTo(ByteArray.fromBase64(expectedList.getString(i))); + } else if (actualValue instanceof Struct) { + Struct actualStruct = (Struct) actualValue; + JSONArray expectedFields = expectedList.getJSONArray(i); + assertRow(actualStruct, expectedFields); + } + } + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadWriteTransactionWithInlineBeginTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadWriteTransactionWithInlineBeginTest.java new file mode 100644 index 000000000000..492252d486cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadWriteTransactionWithInlineBeginTest.java @@ -0,0 +1,495 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.collect.Iterables; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ReadWrite; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadWriteTransactionWithInlineBeginTest { + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + private static final Statement INVALID_SELECT_STATEMENT = + Statement.of("SELECT * FROM NON_EXISTENT_TABLE"); + private static final long UPDATE_COUNT = 1L; + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final TransactionOptions OPTIMISTIC_LOCK_OPTIONS = + TransactionOptions.newBuilder() + .setReadWrite(ReadWrite.newBuilder().setReadLockMode(ReadWrite.ReadLockMode.OPTIMISTIC)) + .build(); + private Spanner spanner; + private DatabaseClient client; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_SELECT_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + mockSpanner.putStatementResult( + StatementResult.read( + "FOO", KeySet.all(), Collections.singletonList("ID"), SELECT1_RESULTSET)); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + } + + @After + public void tearDown() { + spanner.close(); + } + + @Test + public void singleUpdate() { + Long updateCount = + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void singleBatchUpdate() { + long[] updateCounts = + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT))); + assertThat(updateCounts).isEqualTo(new long[] {UPDATE_COUNT, UPDATE_COUNT}); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void singleQuery() { + Long value = MockSpannerTestActions.executeSelect1(client); + assertThat(value).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void updateAndQuery() { + long[] res = + client + .readWriteTransaction() + .run( + transaction -> { + long updateCount = transaction.executeUpdate(UPDATE_STATEMENT); + long val = 0L; + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) { + val = rs.getLong(0); + } + } + return new long[] {updateCount, val}; + }); + assertThat(res).isEqualTo(new long[] {UPDATE_COUNT, 1L}); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void concurrentUpdates() { + final int updates = 100; + final ExecutorService service = Executors.newFixedThreadPool(8); + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + List> list = new ArrayList<>(updates); + for (int i = 0; i < updates; i++) { + list.add(service.submit(() -> transaction.executeUpdate(UPDATE_STATEMENT))); + } + long totalUpdateCount = 0L; + for (Future fut : list) { + totalUpdateCount += fut.get(); + } + return totalUpdateCount; + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT * updates); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void concurrentBatchUpdates() { + final int updates = 100; + final ExecutorService service = Executors.newFixedThreadPool(8); + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + List> list = new ArrayList<>(updates); + for (int i = 0; i < updates; i++) { + list.add( + service.submit( + () -> + transaction.batchUpdate( + Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT)))); + } + long totalUpdateCount = 0L; + for (Future fut : list) { + for (long l : fut.get()) { + totalUpdateCount += l; + } + } + return totalUpdateCount; + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT * updates * 2); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void concurrentQueries() { + final int queries = 100; + final ExecutorService service = Executors.newFixedThreadPool(8); + Long selectedTotal = + client + .readWriteTransaction() + .run( + transaction -> { + List> list = new ArrayList<>(queries); + for (int i = 0; i < queries; i++) { + list.add( + service.submit( + () -> { + try (ResultSet rs = transaction.executeQuery(SELECT1)) { + while (rs.next()) { + return rs.getLong(0); + } + } + return 0L; + })); + } + long selectedTotal1 = 0L; + for (Future fut : list) { + selectedTotal1 += fut.get(); + } + return selectedTotal1; + }); + assertThat(selectedTotal).isEqualTo(queries); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void failedUpdate() { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void failedBatchUpdate() { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate( + Arrays.asList(INVALID_UPDATE_STATEMENT, UPDATE_STATEMENT)))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void failedQuery() { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = transaction.executeQuery(INVALID_SELECT_STATEMENT)) { + rs.next(); + } + return null; + })); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(1); + } + + @Test + public void failedUpdateAndThenUpdate() { + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + // This update statement carries the BeginTransaction, but fails. This will + // cause the entire transaction to be retried with an explicit + // BeginTransaction RPC to ensure all statements in the transaction are + // actually executed against the same transaction. + SpannerException e = + assertThrows( + SpannerException.class, + () -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT)); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void failedBatchUpdateAndThenUpdate() { + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + // This update statement carries the BeginTransaction, but fails. This will + // cause the entire transaction to be retried with an explicit + // BeginTransaction RPC to ensure all statements in the transaction are + // actually executed against the same transaction. + SpannerException e = + assertThrows( + SpannerException.class, + () -> + transaction.batchUpdate( + Arrays.asList(INVALID_UPDATE_STATEMENT, UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void executeSqlWithOptimisticConcurrencyControl() { + MockSpannerTestActions.executeSelect1(client, Options.optimisticLock()); + Collection requests = + mockSpanner.getRequests().stream() + .filter(msg -> msg.getClass().equals(ExecuteSqlRequest.class)) + .collect(Collectors.toList()); + assertEquals(requests.size(), 1); + ExecuteSqlRequest request = (ExecuteSqlRequest) Iterables.getOnlyElement(requests); + assertEquals(request.getTransaction().getBegin(), OPTIMISTIC_LOCK_OPTIONS); + } + + @Test + public void readWithOptimisticConcurrencyControl() { + Long updateCount = MockSpannerTestActions.executeReadFoo(client, Options.optimisticLock()); + assertThat(updateCount).isEqualTo(1L); + Collection requests = + mockSpanner.getRequests().stream() + .filter(msg -> msg.getClass().equals(ReadRequest.class)) + .collect(Collectors.toList()); + assertEquals(requests.size(), 1); + ReadRequest request = (ReadRequest) Iterables.getOnlyElement(requests); + assertThat(request.getTransaction().getBegin()).isEqualTo(OPTIMISTIC_LOCK_OPTIONS); + } + + @Test + public void beginTransactionWithOptimisticConcurrencyControl() { + MockSpannerTestActions.executeInvalidAndValidSql(client, Options.optimisticLock()); + Collection requests = + mockSpanner.getRequests().stream() + .filter(msg -> msg.getClass().equals(BeginTransactionRequest.class)) + .collect(Collectors.toList()); + assertEquals(requests.size(), 1); + BeginTransactionRequest request = (BeginTransactionRequest) Iterables.getOnlyElement(requests); + assertEquals(request.getOptions(), OPTIMISTIC_LOCK_OPTIONS); + } + + @Test + public void failedQueryAndThenUpdate() { + Long updateCount = MockSpannerTestActions.executeInvalidAndValidSql(client); + assertThat(updateCount).isEqualTo(1L); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(1); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void abortedUpdate() { + final AtomicInteger attempt = new AtomicInteger(); + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + if (attempt.incrementAndGet() == 1) { + // We use abortNextTransaction here, as the transaction context does not yet + // have a transaction (it will be requested by the first update statement). + mockSpanner.abortNextTransaction(); + } + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(attempt.get()).isEqualTo(2); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + @Test + public void abortedBatchUpdate() { + final AtomicInteger attempt = new AtomicInteger(); + long[] updateCounts = + client + .readWriteTransaction() + .run( + transaction -> { + if (attempt.incrementAndGet() == 1) { + // We use abortNextTransaction here, as the transaction context does not yet + // have a transaction (it will be requested by the first update statement). + mockSpanner.abortNextTransaction(); + } + return transaction.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT)); + }); + assertThat(updateCounts).isEqualTo(new long[] {UPDATE_COUNT, UPDATE_COUNT}); + assertThat(attempt.get()).isEqualTo(2); + assertThat(countRequests(BeginTransactionRequest.class)).isEqualTo(0); + assertThat(countTransactionsStarted()).isEqualTo(2); + } + + private int countRequests(Class requestType) { + int count = 0; + for (AbstractMessage msg : mockSpanner.getRequests()) { + if (msg.getClass().equals(requestType)) { + count++; + } + } + return count; + } + + private int countTransactionsStarted() { + return mockSpanner.getTransactionsStarted().size(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaInfoTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaInfoTest.java new file mode 100644 index 000000000000..96005791f3d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaInfoTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.ReplicaInfo.ReplicaType.READ_WRITE; +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.ReplicaInfo.ReplicaType; +import org.junit.Test; + +public class ReplicaInfoTest { + + @Test + public void testBuildReplicaInfo() { + final String location = "Location"; + final ReplicaType type = READ_WRITE; + final boolean defaultLeaderLocation = true; + final com.google.spanner.admin.instance.v1.ReplicaInfo proto = + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder().build(); + + assertEquals( + new ReplicaInfo(location, type, defaultLeaderLocation, proto), + ReplicaInfo.newBuilder() + .setLocation(location) + .setType(type) + .setDefaultLeaderLocation(defaultLeaderLocation) + .setProto(proto) + .build()); + } + + @Test + public void testFromProto() { + final com.google.spanner.admin.instance.v1.ReplicaInfo proto = + com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + .setLocation("Location") + .setType(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.READ_WRITE) + .setDefaultLeaderLocation(true) + .build(); + + assertEquals( + new ReplicaInfo("Location", READ_WRITE, true, proto), ReplicaInfo.fromProto(proto)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaTypeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaTypeTest.java new file mode 100644 index 000000000000..065a94ba2266 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReplicaTypeTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.ReplicaInfo.ReplicaType; +import org.junit.Test; + +public class ReplicaTypeTest { + + @Test + public void testTypeUnspecifiedReplicaType() { + final ReplicaType replicaType = + ReplicaType.fromProto( + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.TYPE_UNSPECIFIED); + + assertEquals(ReplicaType.TYPE_UNSPECIFIED, replicaType); + } + + @Test + public void testReadWriteReplicaType() { + final ReplicaType replicaType = + ReplicaType.fromProto( + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.READ_WRITE); + + assertEquals(ReplicaType.READ_WRITE, replicaType); + } + + @Test + public void testReadOnlyReplicaType() { + final ReplicaType replicaType = + ReplicaType.fromProto( + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.READ_ONLY); + + assertEquals(ReplicaType.READ_ONLY, replicaType); + } + + @Test + public void testWitnessReplicaType() { + final ReplicaType replicaType = + ReplicaType.fromProto(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.WITNESS); + + assertEquals(ReplicaType.WITNESS, replicaType); + } + + @Test(expected = IllegalArgumentException.class) + public void testUnrecognizedReplicaType() { + ReplicaType.fromProto( + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.UNRECOGNIZED); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RequestIdMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RequestIdMockServerTest.java new file mode 100644 index 000000000000..eac63010915f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RequestIdMockServerTest.java @@ -0,0 +1,725 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.protobuf.ProtoUtils; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@SuppressWarnings({"StatementWithEmptyBody", "resource"}) +@RunWith(JUnit4.class) +public class RequestIdMockServerTest { + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static Spanner spanner; + + private static final Statement SELECT1 = Statement.of("SELECT 1"); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULT_SET = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + private static final Statement DML = Statement.of("insert into test_table (id) values (1)"); + + private static final ConcurrentLinkedQueue requestIds = + new ConcurrentLinkedQueue<>(); + + @BeforeClass + public static void setup() throws Exception { + assumeTrue(System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS") == null); + assumeTrue(System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW") == null); + + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next) { + try { + String requestId = headers.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + if (requestId != null) { + requestIds.add(XGoogSpannerRequestId.of(requestId)); + } else { + requestIds.add(XGoogSpannerRequestId.of(0, 0, 0, 0)); + } + } catch (Throwable t) { + // Ignore and continue + } + return Contexts.interceptCall(Context.current(), call, headers, next); + } + }) + .build() + .start(); + spanner = createSpanner(); + + setupResults(); + } + + private static Spanner createSpanner() { + return SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setHost("http://localhost:" + server.getPort()) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setFailOnSessionLeak() + .setMinSessions(0) + .setSkipVerifyingBeginTransactionForMuxRW(true) + .setWaitForMinSessions(Duration.ofSeconds(5)) + .build()) + .build() + .getService(); + } + + private static void setupResults() { + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULT_SET)); + mockSpanner.putStatementResult(StatementResult.update(DML, 1L)); + } + + static Metadata createMinimalRetryInfo() { + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + com.google.protobuf.Duration.newBuilder() + .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) + .setSeconds(0L)) + .build(); + trailers.put(ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()), retryInfo); + return trailers; + } + + @AfterClass + public static void teardown() throws InterruptedException { + if (spanner != null) { + spanner.close(); + } + if (server != null) { + server.shutdown(); + server.awaitTermination(); + } + } + + @Before + public void prepareTest() { + // Call getClient() to make sure the multiplexed session has been created. + // Then clear all requests that were received as part of that so we don't need to include + // that in the test verifications. + getClient(); + mockSpanner.reset(); + requestIds.clear(); + ((SpannerImpl) spanner).resetRequestIdCounters(); + } + + private DatabaseClient getClient() { + return spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + } + + private long getClientId() { + return ((SpannerImpl) spanner).getRequestIdClientId(); + } + + @Test + public void testSingleUseQuery() { + try (ResultSet resultSet = getClient().singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + + assertEquals(ImmutableList.of(ExecuteSqlRequest.class), mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds(ImmutableList.of(XGoogSpannerRequestId.of(getClientId(), -1, 1, 1)), actual); + } + + @Test + public void testQueryError() { + Statement query = Statement.of("select * from invalid_table"); + mockSpanner.putStatementResult( + StatementResult.exception( + query, Status.NOT_FOUND.withDescription("Table not found").asRuntimeException())); + + XGoogSpannerRequestId requestIdFromException; + try (ResultSet resultSet = getClient().singleUse().executeQuery(query)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertNotNull(exception.getRequestId()); + assertNotEquals("Request ID should not be empty", "", exception.getRequestId()); + requestIdFromException = XGoogSpannerRequestId.of(exception.getRequestId()); + } + + assertEquals(ImmutableList.of(ExecuteSqlRequest.class), mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds(ImmutableList.of(XGoogSpannerRequestId.of(getClientId(), -1, 1, 1)), actual); + assertEquals(actual.get(0), requestIdFromException); + } + + @Test + public void testMultiUseReadOnlyTransaction() { + try (ReadOnlyTransaction transaction = getClient().readOnlyTransaction()) { + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + } + + assertEquals( + ImmutableList.of( + BeginTransactionRequest.class, ExecuteSqlRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 3, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testDml() { + getClient().readWriteTransaction().run(transaction -> transaction.executeUpdate(DML)); + + assertEquals( + ImmutableList.of(ExecuteSqlRequest.class, CommitRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testDmlError() { + Statement invalidDml = Statement.of("insert into invalid_table (id) values (1)"); + mockSpanner.putStatementResult( + StatementResult.exception( + invalidDml, Status.NOT_FOUND.withDescription("Table not found").asRuntimeException())); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + getClient() + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(invalidDml))); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertNotNull(exception.getRequestId()); + assertNotEquals("Request ID should not be empty", "", exception.getRequestId()); + XGoogSpannerRequestId requestIdFromException = + XGoogSpannerRequestId.of(exception.getRequestId()); + + assertEquals(ImmutableList.of(ExecuteSqlRequest.class), mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds(ImmutableList.of(XGoogSpannerRequestId.of(getClientId(), -1, 1, 1)), actual); + assertEquals(actual.get(0), requestIdFromException); + } + + @Test + public void testAbortedTransaction() { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + Status.ABORTED.asRuntimeException(createMinimalRetryInfo()))); + getClient() + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + return transaction.executeUpdate(DML); + }); + + assertEquals( + ImmutableList.of( + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class, + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + int requestId = 0; + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1)), + actual); + verifySameChannelId(actual.subList(0, 3)); + verifySameChannelId(actual.subList(3, 6)); + } + + @Test + public void testMix() { + getClient() + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + return transaction.executeUpdate(DML); + }); + try (ReadOnlyTransaction transaction = getClient().readOnlyTransaction()) { + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + } + try (ResultSet resultSet = getClient().singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT my_column FROM my_table WHERE 1=1"), SELECT1_RESULT_SET)); + try (ResultSet resultSet = + getClient().singleUse().read("my_table", KeySet.all(), ImmutableList.of("my_column"))) { + while (resultSet.next()) {} + } + + assertEquals( + ImmutableList.of( + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class, + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + ReadRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + int requestId = 0; + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1), + XGoogSpannerRequestId.of(getClientId(), -1, ++requestId, 1)), + actual); + verifySameChannelId(actual.subList(0, 3)); + verifySameChannelId(actual.subList(3, 6)); + } + + @Test + public void testUnaryUnavailable() { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()))); + + getClient().readWriteTransaction().run(transaction -> transaction.executeUpdate(DML)); + + assertEquals( + ImmutableList.of(ExecuteSqlRequest.class, ExecuteSqlRequest.class, CommitRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 1, 2), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testStreamingQueryUnavailable() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()))); + + try (ResultSet resultSet = getClient().singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + + assertEquals( + ImmutableList.of(ExecuteSqlRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 1, 2)), + actual); + } + + @Test + public void testStreamingQueryUnavailableHalfway() { + int numRows = 5; + Statement statement = Statement.of("select * from random"); + mockSpanner.putStatementResult( + StatementResult.query(statement, new RandomResultSetGenerator(numRows).generate())); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()), 2)); + + try (ResultSet resultSet = getClient().singleUse().executeQuery(statement)) { + while (resultSet.next()) {} + } + + assertEquals( + ImmutableList.of(ExecuteSqlRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 1, 2)), + actual); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(ByteString.empty(), requests.get(0).getResumeToken()); + assertNotEquals(ByteString.empty(), requests.get(1).getResumeToken()); + } + + @Test + public void testStreamingReadUnavailable() { + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()))); + + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT my_column FROM my_table WHERE 1=1"), SELECT1_RESULT_SET)); + try (ResultSet resultSet = + getClient().singleUse().read("my_table", KeySet.all(), ImmutableList.of("my_column"))) { + while (resultSet.next()) {} + } + + assertEquals( + ImmutableList.of(ReadRequest.class, ReadRequest.class), mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 1, 2)), + actual); + } + + @Test + public void testStreamingReadUnavailableHalfway() { + int numRows = 5; + Statement statement = Statement.of("SELECT my_column FROM my_table WHERE 1=1"); + mockSpanner.putStatementResult( + StatementResult.query(statement, new RandomResultSetGenerator(numRows).generate())); + mockSpanner.setStreamingReadExecutionTime( + SimulatedExecutionTime.ofStreamException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()), 2)); + + try (ResultSet resultSet = + getClient().singleUse().read("my_table", KeySet.all(), ImmutableList.of("my_column"))) { + while (resultSet.next()) {} + } + + assertEquals( + ImmutableList.of(ReadRequest.class, ReadRequest.class), mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 1, 2)), + actual); + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertEquals(ByteString.empty(), requests.get(0).getResumeToken()); + assertNotEquals(ByteString.empty(), requests.get(1).getResumeToken()); + } + + @Test + public void testPartitionedDml() { + getClient().executePartitionedUpdate(DML); + + assertEquals( + ImmutableList.of(BeginTransactionRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testPartitionedDmlError() { + Statement invalidDml = Statement.of("update invalid_table set col=true where col=false"); + mockSpanner.putStatementResult( + StatementResult.exception( + invalidDml, Status.NOT_FOUND.withDescription("Table not found").asRuntimeException())); + + SpannerException exception = + assertThrows( + SpannerException.class, () -> getClient().executePartitionedUpdate(invalidDml)); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertNotNull(exception.getRequestId()); + assertNotEquals("", exception.getRequestId()); + + assertEquals( + ImmutableList.of(BeginTransactionRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1)), + actual); + verifySameChannelId(actual); + assertEquals(XGoogSpannerRequestId.of(exception.getRequestId()), actual.get(1)); + } + + @Test + public void testPartitionedDmlAborted() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.ABORTED.asRuntimeException(createMinimalRetryInfo()))); + + getClient().executePartitionedUpdate(DML); + + assertEquals( + ImmutableList.of( + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + BeginTransactionRequest.class, + ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 3, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 4, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testPartitionedDmlUnavailable() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()))); + + getClient().executePartitionedUpdate(DML); + + assertEquals( + ImmutableList.of( + BeginTransactionRequest.class, + ExecuteSqlRequest.class, + BeginTransactionRequest.class, + ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 3, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 4, 1)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testPartitionedDmlUnavailableWithResumeToken() { + Statement update = Statement.of("UPDATE my_table SET active=true where 1=1"); + mockSpanner.putStatementResult( + StatementResult.query( + update, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType(StructType.newBuilder().build()) + .build()) + .addRows(ListValue.newBuilder().build()) + .addRows(ListValue.newBuilder().build()) + .addRows(ListValue.newBuilder().build()) + .setStats(ResultSetStats.newBuilder().setRowCountLowerBound(100L).build()) + .build())); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + Status.UNAVAILABLE.asRuntimeException(createMinimalRetryInfo()), 2L)); + + getClient().executePartitionedUpdate(update); + + assertEquals( + ImmutableList.of( + BeginTransactionRequest.class, ExecuteSqlRequest.class, ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 2)), + actual); + verifySameChannelId(actual); + } + + @Test + public void testOtherClientId() { + // Execute a query with the default client from this test class. + try (ResultSet resultSet = getClient().singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + // Create a new client and use that to execute a query. This should use a different client ID. + long otherClientId; + try (Spanner spanner = createSpanner()) { + otherClientId = ((SpannerImpl) spanner).getRequestIdClientId(); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + // Execute another query with the default client. This should use the original client ID. + try (ResultSet resultSet = getClient().singleUse().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + assertEquals( + ImmutableList.of( + ExecuteSqlRequest.class, + CreateSessionRequest.class, + ExecuteSqlRequest.class, + ExecuteSqlRequest.class), + mockSpanner.getRequestTypes()); + List actual = ImmutableList.copyOf(requestIds); + verifyRequestIds( + ImmutableList.of( + XGoogSpannerRequestId.of(getClientId(), -1, 1, 1), + // The CreateSession RPC from the initialization of the second client is included in + // the requests that we see. This request does not include a channel hint, hence the + // zero value for the channel number in the request ID. + XGoogSpannerRequestId.of(otherClientId, 0, 1, 1), + XGoogSpannerRequestId.of(otherClientId, -1, 2, 1), + XGoogSpannerRequestId.of(getClientId(), -1, 2, 1)), + actual); + } + + private void verifyRequestIds( + List expectedIds, List actualIds) { + assertEquals(message(expectedIds, actualIds), expectedIds.size(), actualIds.size()); + int i = 0; + for (XGoogSpannerRequestId actual : actualIds) { + XGoogSpannerRequestId expected = expectedIds.get(i); + if (expected.getNthChannelId() > -1) { + assertEquals(expected, actual); + } else { + assertTrue(message(expectedIds, actualIds), equalsIgnoringChannelId(expected, actual)); + assertTrue(message(expectedIds, actualIds), actual.hasChannelId()); + } + i++; + } + } + + private void verifySameChannelId(List requestIds) { + for (int i = 0; i < requestIds.size() - 1; i++) { + XGoogSpannerRequestId requestId = requestIds.get(i); + assertTrue(requestId.hasChannelId()); + assertEquals(requestId.getNthChannelId(), requestIds.get(i + 1).getNthChannelId()); + } + } + + private boolean equalsIgnoringChannelId( + XGoogSpannerRequestId expected, XGoogSpannerRequestId actual) { + return expected.getNthClientId() == actual.getNthClientId() + && expected.getNthRequest() == actual.getNthRequest() + && expected.getAttempt() == actual.getAttempt(); + } + + private String message(List expected, List actual) { + return String.format("\n Got: %s\nWant: %s", actual, expected); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RestoreTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RestoreTest.java new file mode 100644 index 000000000000..409efb204693 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RestoreTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.cloud.spanner.encryption.RestoreEncryptionConfig; +import org.junit.Test; + +/** Unit tests for {@link com.google.cloud.spanner.Restore} */ +public class RestoreTest { + + private static final BackupId BACKUP_ID = + BackupId.of("test-project", "test-instance", "test-backup"); + private static final DatabaseId DATABASE_ID = + DatabaseId.of("test-project", "test-instance", "test-database"); + private static final String KMS_KEY_NAME = "kms-key-name"; + private static final RestoreEncryptionConfig ENCRYPTION_CONFIG_INFO = + EncryptionConfigs.customerManagedEncryption(KMS_KEY_NAME); + + @Test + public void testRestore() { + final Restore actualRestore = + new Restore.Builder(BACKUP_ID, DATABASE_ID) + .setEncryptionConfig(ENCRYPTION_CONFIG_INFO) + .build(); + final Restore expectedRestore = new Restore(BACKUP_ID, DATABASE_ID, ENCRYPTION_CONFIG_INFO); + + assertEquals(expectedRestore, actualRestore); + } + + @Test + public void testEqualsAndHashCode() { + final Restore restore1 = new Restore(BACKUP_ID, DATABASE_ID, ENCRYPTION_CONFIG_INFO); + final Restore restore2 = new Restore(BACKUP_ID, DATABASE_ID, ENCRYPTION_CONFIG_INFO); + + assertEquals(restore1, restore2); + assertEquals(restore1.hashCode(), restore2.hashCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsHelper.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsHelper.java new file mode 100644 index 000000000000..4ab506f73bb1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsHelper.java @@ -0,0 +1,97 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.AbstractResultSet.Listener; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.Transaction; +import java.util.Iterator; +import javax.annotation.Nullable; + +public class ResultSetsHelper { + + /** + * Creates a {@link ResultSets} from a proto {@link com.google.spanner.v1.ResultSet}. + * + *

Note: The returned result holds a reference to the proto that is passed in to this method. + * Changing the proto will change the result that is returned. + */ + public static ResultSet fromProto(com.google.spanner.v1.ResultSet proto) { + Iterator iterator = proto.getRowsList().iterator(); + return new GrpcResultSet( + new CloseableIterator() { + private boolean first = true; + + @Override + public void close(@Nullable String message) {} + + @Override + public boolean isWithBeginTransaction() { + return false; + } + + @Override + public boolean isLastStatement() { + return false; + } + + @Override + public boolean hasNext() { + return first || iterator.hasNext(); + } + + @Override + public PartialResultSet next() { + if (!hasNext()) { + throw new IllegalStateException(); + } + PartialResultSet.Builder builder = PartialResultSet.newBuilder(); + if (first) { + builder.setMetadata(proto.getMetadata()); + first = false; + } + if (iterator.hasNext()) { + builder.addAllValues(iterator.next().getValuesList()); + } + if (!iterator.hasNext()) { + builder.setStats(proto.getStats()); + } + return builder.build(); + } + }, + new Listener() { + @Override + public void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId) + throws SpannerException {} + + @Override + public SpannerException onError( + SpannerException e, boolean withBeginTransaction, boolean isLastStatement) { + return e; + } + + @Override + public void onDone(boolean withBeginTransaction) {} + + @Override + public void onPrecommitToken(MultiplexedSessionPrecommitToken token) {} + }); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsTest.java new file mode 100644 index 000000000000..082cf30b8c28 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResultSetsTest.java @@ -0,0 +1,733 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.ExecutorProvider; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.common.primitives.Doubles; +import com.google.common.primitives.Floats; +import com.google.common.primitives.Longs; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link ResultSets} */ +@RunWith(JUnit4.class) +public class ResultSetsTest { + + @Test + public void resultSetIteration() { + double doubleVal = 1.2; + float floatVal = 6.626f; + BigDecimal bigDecimalVal = BigDecimal.valueOf(123, 2); + String stringVal = "stringVal"; + String jsonVal = "{\"color\":\"red\",\"value\":\"#f00\"}"; + SingerInfo protoMessageVal = + SingerInfo.newBuilder() + .setSingerId(111) + .setNationality("COUNTRY1") + .setGenre(Genre.FOLK) + .build(); + ProtocolMessageEnum protoEnumVal = Genre.ROCK; + String byteVal = "101"; + long usecs = 32343; + int year = 2018; + int month = 5; + int day = 26; + UUID uuid = UUID.randomUUID(); + Interval interval = Interval.parseFromString("P1Y2M3DT5H7M8.967589762S"); + + boolean[] boolArray = {true, false, true, true, false}; + long[] longArray = {Long.MAX_VALUE, Long.MIN_VALUE, 0, 1, -1}; + double[] doubleArray = {Double.MIN_VALUE, Double.MAX_VALUE, 0, 1, -1, 1.2341}; + float[] floatArray = {Float.MIN_VALUE, Float.MAX_VALUE, 0, 1, -1, 1.2341f}; + BigDecimal[] bigDecimalArray = { + BigDecimal.valueOf(1, Integer.MAX_VALUE), + BigDecimal.valueOf(1, Integer.MIN_VALUE), + BigDecimal.ZERO, + BigDecimal.TEN, + BigDecimal.valueOf(3141592, 6) + }; + ByteArray[] byteArray = { + ByteArray.copyFrom("123"), ByteArray.copyFrom("456"), ByteArray.copyFrom("789") + }; + Timestamp[] timestampArray = { + Timestamp.ofTimeMicroseconds(101), + Timestamp.ofTimeMicroseconds(202), + Timestamp.ofTimeMicroseconds(303) + }; + Date[] dateArray = { + Date.fromYearMonthDay(1, 2, 3), Date.fromYearMonthDay(4, 5, 6), Date.fromYearMonthDay(7, 8, 9) + }; + + UUID[] uuidArray = {UUID.randomUUID(), UUID.randomUUID(), UUID.randomUUID()}; + + Interval[] intervalArray = { + Interval.parseFromString("P0Y"), Interval.parseFromString("P1Y2M3DT-5H-7M8.9675S") + }; + + String[] stringArray = {"abc", "def", "ghi"}; + String[] jsonArray = {"{}", "{\"color\":\"red\",\"value\":\"#f00\"}", "[]"}; + AbstractMessage[] protoMessageArray = { + protoMessageVal, SingerInfo.newBuilder().setSingerId(1).build() + }; + ProtocolMessageEnum[] protoEnumArray = {protoEnumVal, Genre.JAZZ}; + + Type type = + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.int64()), + Type.StructField.of("f3", Type.bool()), + Type.StructField.of("doubleVal", Type.float64()), + Type.StructField.of("floatVal", Type.float32()), + Type.StructField.of("bigDecimalVal", Type.numeric()), + Type.StructField.of("stringVal", Type.string()), + Type.StructField.of("jsonVal", Type.json()), + Type.StructField.of("pgJsonbVal", Type.pgJsonb()), + Type.StructField.of("pgOidVal", Type.pgOid()), + Type.StructField.of("byteVal", Type.bytes()), + Type.StructField.of("timestamp", Type.timestamp()), + Type.StructField.of("date", Type.date()), + Type.StructField.of("uuid", Type.uuid()), + Type.StructField.of("interval", Type.interval()), + Type.StructField.of( + "protoMessage", Type.proto(protoMessageVal.getDescriptorForType().getFullName())), + Type.StructField.of( + "protoEnum", Type.protoEnum(protoEnumVal.getDescriptorForType().getFullName())), + Type.StructField.of("boolArray", Type.array(Type.bool())), + Type.StructField.of("longArray", Type.array(Type.int64())), + Type.StructField.of("doubleArray", Type.array(Type.float64())), + Type.StructField.of("floatArray", Type.array(Type.float32())), + Type.StructField.of("bigDecimalArray", Type.array(Type.numeric())), + Type.StructField.of("byteArray", Type.array(Type.bytes())), + Type.StructField.of("timestampArray", Type.array(Type.timestamp())), + Type.StructField.of("dateArray", Type.array(Type.date())), + Type.StructField.of("uuidArray", Type.array(Type.uuid())), + Type.StructField.of("intervalArray", Type.array(Type.interval())), + Type.StructField.of("stringArray", Type.array(Type.string())), + Type.StructField.of("jsonArray", Type.array(Type.json())), + Type.StructField.of("pgJsonbArray", Type.array(Type.pgJsonb())), + Type.StructField.of("pgOidArray", Type.array(Type.pgOid())), + Type.StructField.of( + "protoMessageArray", + Type.array(Type.proto(SingerInfo.getDescriptor().getFullName()))), + Type.StructField.of( + "protoEnumArray", Type.array(Type.protoEnum(Genre.getDescriptor().getFullName())))); + Struct struct1 = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .to(2) + .set("f3") + .to(Value.bool(true)) + .set("doubleVal") + .to(Value.float64(doubleVal)) + .set("floatVal") + .to(Value.float32(floatVal)) + .set("bigDecimalVal") + .to(Value.numeric(bigDecimalVal)) + .set("stringVal") + .to(stringVal) + .set("jsonVal") + .to(Value.json(jsonVal)) + .set("pgJsonbVal") + .to(Value.pgJsonb(jsonVal)) + .set("pgOidVal") + .to(Value.pgOid(2)) + .set("byteVal") + .to(Value.bytes(ByteArray.copyFrom(byteVal))) + .set("timestamp") + .to(Timestamp.ofTimeMicroseconds(usecs)) + .set("date") + .to(Date.fromYearMonthDay(year, month, day)) + .set("uuid") + .to(uuid) + .set("interval") + .to(interval) + .set("protoMessage") + .to(protoMessageVal) + .set("protoEnum") + .to(protoEnumVal) + .set("boolArray") + .to(Value.boolArray(boolArray)) + .set("longArray") + .to(Value.int64Array(longArray)) + .set("doubleArray") + .to(Value.float64Array(doubleArray)) + .set("floatArray") + .to(Value.float32Array(floatArray)) + .set("bigDecimalArray") + .to(Value.numericArray(Arrays.asList(bigDecimalArray))) + .set("byteArray") + .to(Value.bytesArray(Arrays.asList(byteArray))) + .set("timestampArray") + .to(Value.timestampArray(Arrays.asList(timestampArray))) + .set("dateArray") + .to(Value.dateArray(Arrays.asList(dateArray))) + .set("uuidArray") + .to(Value.uuidArray(Arrays.asList(uuidArray))) + .set("intervalArray") + .to(Value.intervalArray(Arrays.asList(intervalArray))) + .set("stringArray") + .to(Value.stringArray(Arrays.asList(stringArray))) + .set("jsonArray") + .to(Value.jsonArray(Arrays.asList(jsonArray))) + .set("pgJsonbArray") + .to(Value.pgJsonbArray(Arrays.asList(jsonArray))) + .set("pgOidArray") + .to(Value.pgOidArray(longArray)) + .set("protoMessageArray") + .to( + Value.protoMessageArray( + Arrays.asList(protoMessageArray), protoMessageVal.getDescriptorForType())) + .set("protoEnumArray") + .to( + Value.protoEnumArray( + Arrays.asList(protoEnumArray), protoEnumVal.getDescriptorForType())) + .build(); + Struct struct2 = + Struct.newBuilder() + .set("f1") + .to("y") + .set("f2") + .to(3) + .set("f3") + .to(Value.bool(null)) + .set("doubleVal") + .to(Value.float64(doubleVal)) + .set("floatVal") + .to(Value.float32(floatVal)) + .set("bigDecimalVal") + .to(Value.numeric(bigDecimalVal)) + .set("stringVal") + .to(stringVal) + .set("jsonVal") + .to(Value.json(jsonVal)) + .set("pgJsonbVal") + .to(Value.pgJsonb(jsonVal)) + .set("pgOidVal") + .to(Value.pgOid(3)) + .set("byteVal") + .to(Value.bytes(ByteArray.copyFrom(byteVal))) + .set("timestamp") + .to(Timestamp.ofTimeMicroseconds(usecs)) + .set("date") + .to(Date.fromYearMonthDay(year, month, day)) + .set("uuid") + .to(uuid) + .set("interval") + .to(Value.interval(interval)) + .set("protoMessage") + .to(protoMessageVal) + .set("protoEnum") + .to(protoEnumVal) + .set("boolArray") + .to(Value.boolArray(boolArray)) + .set("longArray") + .to(Value.int64Array(longArray)) + .set("doubleArray") + .to(Value.float64Array(doubleArray)) + .set("floatArray") + .to(Value.float32Array(floatArray)) + .set("bigDecimalArray") + .to(Value.numericArray(Arrays.asList(bigDecimalArray))) + .set("byteArray") + .to(Value.bytesArray(Arrays.asList(byteArray))) + .set("timestampArray") + .to(Value.timestampArray(Arrays.asList(timestampArray))) + .set("dateArray") + .to(Value.dateArray(Arrays.asList(dateArray))) + .set("uuidArray") + .to(Value.uuidArray(Arrays.asList(uuidArray))) + .set("intervalArray") + .to(Value.intervalArray(Arrays.asList(intervalArray))) + .set("stringArray") + .to(Value.stringArray(Arrays.asList(stringArray))) + .set("jsonArray") + .to(Value.jsonArray(Arrays.asList(jsonArray))) + .set("pgJsonbArray") + .to(Value.pgJsonbArray(Arrays.asList(jsonArray))) + .set("pgOidArray") + .to(Value.pgOidArray(longArray)) + .set("protoMessageArray") + .to( + Value.protoMessageArray( + Arrays.asList(protoMessageArray), protoMessageVal.getDescriptorForType())) + .set("protoEnumArray") + .to( + Value.protoEnumArray( + Arrays.asList(protoEnumArray), protoEnumVal.getDescriptorForType())) + .build(); + ResultSet rs = ResultSets.forRows(type, Arrays.asList(struct1, struct2)); + + IllegalStateException e = assertThrows(IllegalStateException.class, rs::getType); + assertThat(e.getMessage()).contains("Must be preceded by a next() call"); + + int columnIndex = 0; + assertThat(rs.next()).isTrue(); + assertThat(rs.getType()).isEqualTo(type); + assertThat(rs.getColumnCount()).isEqualTo(type.getStructFields().size()); + assertThat(rs.getColumnIndex("f1")).isEqualTo(0); + assertThat(rs.getColumnType("nonexistent")).isNull(); + assertThat(rs.getColumnType("f1")).isEqualTo(Type.string()); + assertThat(rs.getColumnType(0)).isEqualTo(Type.string()); + assertThat(rs.getColumnIndex("f2")).isEqualTo(1); + assertThat(rs.getColumnType("f2")).isEqualTo(Type.int64()); + assertThat(rs.getColumnType(1)).isEqualTo(Type.int64()); + assertThat(rs.getColumnIndex("f3")).isEqualTo(2); + assertThat(rs.getColumnType("f3")).isEqualTo(Type.bool()); + assertThat(rs.getColumnType(2)).isEqualTo(Type.bool()); + assertThat(rs.getCurrentRowAsStruct()).isEqualTo(struct1); + assertThat(rs.getString(columnIndex)).isEqualTo("x"); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.string("x")); + assertThat(rs.getLong(columnIndex)).isEqualTo(2L); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.int64(2L)); + assertThat(rs.getBoolean(columnIndex)).isTrue(); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.bool(true)); + assertThat(rs.getBoolean("f3")).isTrue(); + assertThat(rs.getValue("f3")).isEqualTo(Value.bool(true)); + assertThat(rs.getDouble("doubleVal")).isWithin(0.0).of(doubleVal); + assertThat(rs.getValue("doubleVal").getFloat64()).isWithin(0.0).of(doubleVal); + assertThat(rs.getDouble(columnIndex)).isWithin(0.0).of(doubleVal); + assertThat(rs.getValue(columnIndex++).getFloat64()).isWithin(0.0).of(doubleVal); + assertThat(rs.getFloat(columnIndex)).isWithin(0.0f).of(floatVal); + assertThat(rs.getValue(columnIndex++).getFloat32()).isWithin(0.0f).of(floatVal); + assertThat(rs.getFloat("floatVal")).isWithin(0.0f).of(floatVal); + assertThat(rs.getValue("floatVal").getFloat32()).isWithin(0.0f).of(floatVal); + assertThat(rs.getBigDecimal("bigDecimalVal")).isEqualTo(new BigDecimal("1.23")); + assertThat(rs.getValue("bigDecimalVal")).isEqualTo(Value.numeric(new BigDecimal("1.23"))); + assertThat(rs.getBigDecimal(columnIndex)).isEqualTo(new BigDecimal("1.23")); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.numeric(new BigDecimal("1.23"))); + assertThat(rs.getString(columnIndex)).isEqualTo(stringVal); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.string(stringVal)); + assertThat(rs.getString("stringVal")).isEqualTo(stringVal); + assertThat(rs.getValue("stringVal")).isEqualTo(Value.string(stringVal)); + assertThat(rs.getJson(columnIndex)).isEqualTo(jsonVal); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.json(jsonVal)); + assertThat(rs.getJson("jsonVal")).isEqualTo(jsonVal); + assertThat(rs.getValue("jsonVal")).isEqualTo(Value.json(jsonVal)); + + assertEquals(jsonVal, rs.getPgJsonb(columnIndex)); + assertEquals(Value.pgJsonb(jsonVal), rs.getValue(columnIndex++)); + assertEquals(jsonVal, rs.getPgJsonb("pgJsonbVal")); + assertEquals(Value.pgJsonb(jsonVal), rs.getValue("pgJsonbVal")); + + assertThat(rs.getLong(columnIndex)).isEqualTo(2L); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.pgOid(2L)); + assertThat(rs.getColumnType("pgOidVal")).isEqualTo(Type.pgOid()); + + assertThat(rs.getBytes(columnIndex)).isEqualTo(ByteArray.copyFrom(byteVal)); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.bytes(ByteArray.copyFrom(byteVal))); + assertThat(rs.getBytes("byteVal")).isEqualTo(ByteArray.copyFrom(byteVal)); + assertThat(rs.getValue("byteVal")).isEqualTo(Value.bytes(ByteArray.copyFrom(byteVal))); + assertThat(rs.getTimestamp(columnIndex)).isEqualTo(Timestamp.ofTimeMicroseconds(usecs)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo(Value.timestamp(Timestamp.ofTimeMicroseconds(usecs))); + assertThat(rs.getTimestamp("timestamp")).isEqualTo(Timestamp.ofTimeMicroseconds(usecs)); + assertThat(rs.getValue("timestamp")) + .isEqualTo(Value.timestamp(Timestamp.ofTimeMicroseconds(usecs))); + assertThat(rs.getDate(columnIndex)).isEqualTo(Date.fromYearMonthDay(year, month, day)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo(Value.date(Date.fromYearMonthDay(year, month, day))); + assertThat(rs.getDate("date")).isEqualTo(Date.fromYearMonthDay(year, month, day)); + assertThat(rs.getValue("date")).isEqualTo(Value.date(Date.fromYearMonthDay(year, month, day))); + + // UUID + assertThat(rs.getUuid(columnIndex)).isEqualTo(uuid); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.uuid(uuid)); + assertThat(rs.getUuid("uuid")).isEqualTo(uuid); + assertThat(rs.getValue("uuid")).isEqualTo(Value.uuid(uuid)); + + // INTERVAL + assertThat(rs.getInterval(columnIndex)).isEqualTo(interval); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.interval(interval)); + assertThat(rs.getInterval("interval")).isEqualTo(interval); + assertThat(rs.getValue("interval")).isEqualTo(Value.interval(interval)); + + assertEquals(protoMessageVal, rs.getProtoMessage(columnIndex, SingerInfo.getDefaultInstance())); + assertEquals(Value.protoMessage(protoMessageVal), rs.getValue(columnIndex++)); + assertEquals( + protoMessageVal, rs.getProtoMessage("protoMessage", SingerInfo.getDefaultInstance())); + assertEquals(Value.protoMessage(protoMessageVal), rs.getValue("protoMessage")); + + assertEquals(protoEnumVal, rs.getProtoEnum(columnIndex, Genre::forNumber)); + assertEquals(Value.protoEnum(protoEnumVal), rs.getValue(columnIndex++)); + assertEquals(protoEnumVal, rs.getProtoEnum("protoEnum", Genre::forNumber)); + assertEquals(Value.protoEnum(protoEnumVal), rs.getValue("protoEnum")); + + assertThat(rs.getBooleanArray(columnIndex)).isEqualTo(boolArray); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.boolArray(boolArray)); + assertThat(rs.getBooleanArray("boolArray")).isEqualTo(boolArray); + assertThat(rs.getValue("boolArray")).isEqualTo(Value.boolArray(boolArray)); + assertThat(rs.getLongArray(columnIndex)).isEqualTo(longArray); + assertThat(rs.getValue(columnIndex)).isEqualTo(Value.int64Array(longArray)); + assertThat(rs.getLongArray("longArray")).isEqualTo(longArray); + assertThat(rs.getValue("longArray")).isEqualTo(Value.int64Array(longArray)); + assertThat(rs.getLongList(columnIndex++)).isEqualTo(Longs.asList(longArray)); + assertThat(rs.getLongList("longArray")).isEqualTo(Longs.asList(longArray)); + assertThat(rs.getDoubleArray(columnIndex)).usingTolerance(0.0).containsAtLeast(doubleArray); + assertThat(rs.getValue(columnIndex)).isEqualTo(Value.float64Array(doubleArray)); + assertThat(rs.getDoubleArray("doubleArray")) + .usingTolerance(0.0) + .containsExactly(doubleArray) + .inOrder(); + assertThat(rs.getValue("doubleArray")).isEqualTo(Value.float64Array(doubleArray)); + assertThat(rs.getDoubleList(columnIndex++)).isEqualTo(Doubles.asList(doubleArray)); + assertThat(rs.getDoubleList("doubleArray")).isEqualTo(Doubles.asList(doubleArray)); + + assertThat(rs.getFloatArray(columnIndex)).usingTolerance(0.0f).containsAtLeast(floatArray); + assertThat(rs.getValue(columnIndex)).isEqualTo(Value.float32Array(floatArray)); + assertThat(rs.getFloatArray("floatArray")) + .usingTolerance(0.0f) + .containsExactly(floatArray) + .inOrder(); + assertThat(rs.getValue("floatArray")).isEqualTo(Value.float32Array(floatArray)); + assertThat(rs.getFloatList(columnIndex++)).isEqualTo(Floats.asList(floatArray)); + assertThat(rs.getFloatList("floatArray")).isEqualTo(Floats.asList(floatArray)); + + assertThat(rs.getBigDecimalList(columnIndex)).isEqualTo(Arrays.asList(bigDecimalArray)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo(Value.numericArray(Arrays.asList(bigDecimalArray))); + assertThat(rs.getBigDecimalList("bigDecimalArray")).isEqualTo(Arrays.asList(bigDecimalArray)); + assertThat(rs.getValue("bigDecimalArray")) + .isEqualTo(Value.numericArray(Arrays.asList(bigDecimalArray))); + assertThat(rs.getBytesList(columnIndex)).isEqualTo(Arrays.asList(byteArray)); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.bytesArray(Arrays.asList(byteArray))); + assertThat(rs.getBytesList("byteArray")).isEqualTo(Arrays.asList(byteArray)); + assertThat(rs.getValue("byteArray")).isEqualTo(Value.bytesArray(Arrays.asList(byteArray))); + assertThat(rs.getTimestampList(columnIndex)).isEqualTo(Arrays.asList(timestampArray)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo(Value.timestampArray(Arrays.asList(timestampArray))); + assertThat(rs.getTimestampList("timestampArray")).isEqualTo(Arrays.asList(timestampArray)); + assertThat(rs.getValue("timestampArray")) + .isEqualTo(Value.timestampArray(Arrays.asList(timestampArray))); + assertThat(rs.getDateList(columnIndex)).isEqualTo(Arrays.asList(dateArray)); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.dateArray(Arrays.asList(dateArray))); + assertThat(rs.getDateList("dateArray")).isEqualTo(Arrays.asList(dateArray)); + assertThat(rs.getValue("dateArray")).isEqualTo(Value.dateArray(Arrays.asList(dateArray))); + + // UUID Array + assertThat(rs.getUuidList(columnIndex)).isEqualTo(Arrays.asList(uuidArray)); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.uuidArray(Arrays.asList(uuidArray))); + assertThat(rs.getUuidList("uuidArray")).isEqualTo(Arrays.asList(uuidArray)); + assertThat(rs.getValue("uuidArray")).isEqualTo(Value.uuidArray(Arrays.asList(uuidArray))); + + // INTERVAL Array + assertThat(rs.getIntervalList(columnIndex)).isEqualTo(Arrays.asList(intervalArray)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo(Value.intervalArray(Arrays.asList(intervalArray))); + assertThat(rs.getIntervalList("intervalArray")).isEqualTo(Arrays.asList(intervalArray)); + assertThat(rs.getValue("intervalArray")) + .isEqualTo(Value.intervalArray(Arrays.asList(intervalArray))); + + assertThat(rs.getStringList(columnIndex)).isEqualTo(Arrays.asList(stringArray)); + assertThat(rs.getValue(columnIndex++)).isEqualTo(Value.stringArray(Arrays.asList(stringArray))); + assertThat(rs.getStringList("stringArray")).isEqualTo(Arrays.asList(stringArray)); + assertThat(rs.getValue("stringArray")).isEqualTo(Value.stringArray(Arrays.asList(stringArray))); + assertThat(rs.getJsonList(columnIndex++)).isEqualTo(Arrays.asList(jsonArray)); + assertThat(rs.getJsonList("jsonArray")).isEqualTo(Arrays.asList(jsonArray)); + + assertEquals(Arrays.asList(jsonArray), rs.getPgJsonbList(columnIndex++)); + assertEquals(Arrays.asList(jsonArray), rs.getPgJsonbList("pgJsonbArray")); + + assertThat(rs.getLongArray(columnIndex)).isEqualTo(longArray); + assertThat(rs.getValue(columnIndex)).isEqualTo(Value.pgOidArray(longArray)); + assertThat(rs.getLongArray("pgOidArray")).isEqualTo(longArray); + assertThat(rs.getValue("pgOidArray")).isEqualTo(Value.pgOidArray(longArray)); + assertThat(rs.getLongList(columnIndex++)).isEqualTo(Longs.asList(longArray)); + assertThat(rs.getLongList("pgOidArray")).isEqualTo(Longs.asList(longArray)); + + assertThat(rs.getProtoMessageList(columnIndex, SingerInfo.getDefaultInstance())) + .isEqualTo(Arrays.asList(protoMessageArray)); + assertThat(rs.getValue(columnIndex++)) + .isEqualTo( + Value.protoMessageArray(Arrays.asList(protoMessageArray), SingerInfo.getDescriptor())); + assertThat(rs.getProtoMessageList("protoMessageArray", SingerInfo.getDefaultInstance())) + .isEqualTo(Arrays.asList(protoMessageArray)); + assertThat(rs.getValue("protoMessageArray")) + .isEqualTo( + Value.protoMessageArray(Arrays.asList(protoMessageArray), SingerInfo.getDescriptor())); + + assertThat(rs.getProtoEnumList(columnIndex, Genre::forNumber)) + .isEqualTo(Arrays.asList(protoEnumArray)); + assertThat(rs.getValue(columnIndex)) + .isEqualTo(Value.protoEnumArray(Arrays.asList(protoEnumArray), Genre.getDescriptor())); + assertThat(rs.getProtoEnumList("protoEnumArray", Genre::forNumber)) + .isEqualTo(Arrays.asList(protoEnumArray)); + assertThat(rs.getValue("protoEnumArray")) + .isEqualTo(Value.protoEnumArray(Arrays.asList(protoEnumArray), Genre.getDescriptor())); + + assertThat(rs.next()).isTrue(); + assertThat(rs.getCurrentRowAsStruct()).isEqualTo(struct2); + assertThat(rs.getString(0)).isEqualTo("y"); + assertThat(rs.getLong(1)).isEqualTo(3L); + assertThat(rs.isNull(2)).isTrue(); + assertThat(rs.next()).isFalse(); + + UnsupportedOperationException unsupported = + assertThrows(UnsupportedOperationException.class, rs::getStats); + assertThat(unsupported.getMessage()) + .contains("ResultSetStats are available only for results returned from analyzeQuery"); + } + + @Test + public void resultSetIterationWithStructColumns() { + Type nestedStructType = Type.struct(Type.StructField.of("g1", Type.string())); + Type type = + Type.struct( + Type.StructField.of("f1", nestedStructType), Type.StructField.of("f2", Type.int64())); + + Struct value1 = Struct.newBuilder().set("g1").to("abc").build(); + + Struct struct1 = Struct.newBuilder().set("f1").to(value1).set("f2").to((Long) null).build(); + UnsupportedOperationException e = + assertThrows( + UnsupportedOperationException.class, + () -> ResultSets.forRows(type, Collections.singletonList(struct1))); + assertThat(e.getMessage()) + .contains("STRUCT-typed columns are not supported inside ResultSets."); + } + + @Test + public void resultSetIterationWithArrayStructColumns() { + Type nestedStructType = Type.struct(Type.StructField.of("g1", Type.string())); + Type type = + Type.struct( + Type.StructField.of("f1", Type.array(nestedStructType)), + Type.StructField.of("f2", Type.int64())); + + Struct value1 = Struct.newBuilder().set("g1").to("abc").build(); + + List arrayValue = Arrays.asList(value1, null); + + Struct struct1 = + Struct.newBuilder() + .set("f1") + .toStructArray(nestedStructType, arrayValue) + .set("f2") + .to((Long) null) + .build(); + + Struct struct2 = + Struct.newBuilder() + .set("f1") + .toStructArray(nestedStructType, null) + .set("f2") + .to(20) + .build(); + + ResultSet rs = ResultSets.forRows(type, Arrays.asList(struct1, struct2)); + + assertThat(rs.next()).isTrue(); + assertThat(rs.getType()).isEqualTo(type); + assertThat(rs.getColumnCount()).isEqualTo(2); + + assertThat(rs.getColumnIndex("f1")).isEqualTo(0); + assertThat(rs.getColumnType("f1")).isEqualTo(Type.array(nestedStructType)); + assertThat(rs.getColumnType(0)).isEqualTo(Type.array(nestedStructType)); + + assertThat(rs.getColumnIndex("f2")).isEqualTo(1); + assertThat(rs.getColumnType("f2")).isEqualTo(Type.int64()); + assertThat(rs.getColumnType(1)).isEqualTo(Type.int64()); + + assertThat(rs.getCurrentRowAsStruct()).isEqualTo(struct1); + + assertThat(rs.getStructList(0)).isEqualTo(arrayValue); + assertThat(rs.getValue(0)).isEqualTo(Value.structArray(nestedStructType, arrayValue)); + assertThat(rs.getStructList("f1")).isEqualTo(arrayValue); + assertThat(rs.getValue("f1")).isEqualTo(Value.structArray(nestedStructType, arrayValue)); + assertThat(rs.isNull(1)).isTrue(); + + assertThat(rs.next()).isTrue(); + assertThat(rs.getCurrentRowAsStruct()).isEqualTo(struct2); + + assertThat(rs.isNull(0)).isTrue(); + assertThat(rs.isNull("f1")).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(20); + assertThat(rs.getValue(1)).isEqualTo(Value.int64(20)); + assertThat(rs.getLong("f2")).isEqualTo(20); + assertThat(rs.getValue("f2")).isEqualTo(Value.int64(20)); + + assertThat(rs.next()).isFalse(); + } + + @Test + public void closeResultSet() { + ResultSet rs = + ResultSets.forRows( + Type.struct(Type.StructField.of("f1", Type.string())), + Collections.singletonList(Struct.newBuilder().set("f1").to("x").build())); + rs.close(); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> rs.getCurrentRowAsStruct()); + assertNotNull(e.getMessage()); + } + + @Test + public void exceptionIfNextIsNotCalled() { + ResultSet rs = + ResultSets.forRows( + Type.struct(Type.StructField.of("f1", Type.string())), + Collections.singletonList(Struct.newBuilder().set("f1").to("x").build())); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> rs.getCurrentRowAsStruct()); + assertNotNull(e.getMessage()); + } + + @Test + public void testToAsyncResultSet() { + ResultSet delegate = + ResultSets.forRows( + Type.struct(Type.StructField.of("f1", Type.string())), + Collections.singletonList(Struct.newBuilder().set("f1").to("x").build())); + + final AtomicInteger count = new AtomicInteger(); + AsyncResultSet rs = ResultSets.toAsyncResultSet(delegate); + ApiFuture fut = + rs.setCallback( + MoreExecutors.directExecutor(), + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count.incrementAndGet(); + assertThat(resultSet.getString("f1")).isEqualTo("x"); + } + } + }); + SpannerApiFutures.get(fut); + assertThat(count.get()).isEqualTo(1); + } + + @Test + public void testToAsyncResultSetWithExecProvider() { + ResultSet delegate = + ResultSets.forRows( + Type.struct(Type.StructField.of("f1", Type.string())), + Collections.singletonList(Struct.newBuilder().set("f1").to("x").build())); + + ExecutorProvider provider = + new ExecutorProvider() { + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + + @Override + public boolean shouldAutoClose() { + return true; + } + + @Override + public ScheduledExecutorService getExecutor() { + return executor; + } + }; + final AtomicInteger count = new AtomicInteger(); + AsyncResultSet rs = ResultSets.toAsyncResultSet(delegate, provider); + ApiFuture fut = + rs.setCallback( + MoreExecutors.directExecutor(), + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count.incrementAndGet(); + assertThat(resultSet.getString("f1")).isEqualTo("x"); + } + } + }); + SpannerApiFutures.get(fut); + assertThat(count.get()).isEqualTo(1); + assertThat(provider.getExecutor().isShutdown()).isTrue(); + } + + @Test + public void testToAsyncResultSetWithFuture() { + ApiFuture delegateFuture = + ApiFutures.immediateFuture( + ResultSets.forRows( + Type.struct(Type.StructField.of("f1", Type.string())), + Collections.singletonList(Struct.newBuilder().set("f1").to("x").build()))); + + ExecutorProvider provider = + new ExecutorProvider() { + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + + @Override + public boolean shouldAutoClose() { + return false; + } + + @Override + public ScheduledExecutorService getExecutor() { + return executor; + } + }; + final AtomicInteger count = new AtomicInteger(); + AsyncResultSet rs = ResultSets.toAsyncResultSet(delegateFuture, provider); + ApiFuture fut = + rs.setCallback( + MoreExecutors.directExecutor(), + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count.incrementAndGet(); + assertThat(resultSet.getString("f1")).isEqualTo("x"); + } + } + }); + SpannerApiFutures.get(fut); + assertThat(count.get()).isEqualTo(1); + assertThat(provider.getExecutor().isShutdown()).isFalse(); + provider.getExecutor().shutdown(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java new file mode 100644 index 000000000000..f13c0bb1237a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java @@ -0,0 +1,536 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.client.util.BackOff; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; +import com.google.cloud.spanner.XGoogSpannerRequestId.NoopRequestIdCreator; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Value; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.PartialResultSet; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.Mockito; + +/** Unit tests for {@link ResumableStreamIterator}. */ +@RunWith(Parameterized.class) +public class ResumableStreamIteratorTest { + interface Starter { + AbstractResultSet.CloseableIterator startStream( + @Nullable ByteString resumeToken, + AsyncResultSet.StreamMessageListener streamMessageListener); + } + + interface ResultSetStream { + PartialResultSet next(); + + void close(); + } + + @Parameter(0) + public ErrorCode errorCodeParameter; + + @Parameters(name = "errorCodeParameter = {0}") + public static List data() { + return ImmutableList.of(ErrorCode.UNAVAILABLE, ErrorCode.RESOURCE_EXHAUSTED); + } + + private static StatusRuntimeException statusWithRetryInfo(ErrorCode code) { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + Duration.newBuilder() + .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) + .setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + return code.getGrpcStatus().asRuntimeException(trailers); + } + + static class RetryableException extends SpannerException { + RetryableException(ErrorCode code, @Nullable String message) { + // OK to instantiate SpannerException directly for this unit test. + super(DoNotConstructDirectly.ALLOWED, code, true, message, statusWithRetryInfo(code)); + } + + RetryableException(ErrorCode code, @Nullable String message, StatusRuntimeException cause) { + // OK to instantiate SpannerException directly for this unit test. + super(DoNotConstructDirectly.ALLOWED, code, true, message, cause); + } + } + + static class NonRetryableException extends SpannerException { + NonRetryableException(ErrorCode code, @Nullable String message) { + super(DoNotConstructDirectly.ALLOWED, code, false, message, null); + } + } + + static class ResultSetIterator extends AbstractIterator + implements AbstractResultSet.CloseableIterator { + final ResultSetStream stream; + + ResultSetIterator(ResultSetStream stream) { + this.stream = stream; + } + + @Override + protected PartialResultSet computeNext() { + PartialResultSet next = stream.next(); + if (next == null) { + endOfData(); + } + return next; + } + + @Override + public void close(@Nullable String message) { + stream.close(); + } + + @Override + public boolean isWithBeginTransaction() { + return false; + } + + @Override + public boolean isLastStatement() { + return false; + } + } + + Starter starter = Mockito.mock(Starter.class); + ResumableStreamIterator resumableStreamIterator; + + @Before + public void setUp() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + initWithLimit(Integer.MAX_VALUE); + } + + private void initWithLimit(int maxBufferSize) { + + resumableStreamIterator = + new ResumableStreamIterator( + maxBufferSize, + "", + new OpenTelemetrySpan(mock(io.opentelemetry.api.trace.Span.class)), + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false), + DefaultErrorHandler.INSTANCE, + SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings(), + SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetryableCodes(), + NoopRequestIdCreator.INSTANCE) { + @Override + AbstractResultSet.CloseableIterator startStream( + @Nullable ByteString resumeToken, + AsyncResultSet.StreamMessageListener streamMessageListener, + XGoogSpannerRequestId requestId) { + return starter.startStream(resumeToken, null); + } + }; + } + + @Test + public void simple() { + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(null, "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + } + + @Test + public void closedOTSpan() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + + io.opentelemetry.api.trace.Span oTspan = mock(io.opentelemetry.api.trace.Span.class); + ISpan span = new OpenTelemetrySpan(oTspan); + when(oTspan.makeCurrent()).thenReturn(mock(Scope.class)); + setInternalState(ResumableStreamIterator.class, this.resumableStreamIterator, "span", span); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + + resumableStreamIterator.close("closed"); + verify(oTspan).end(); + } + + @Test + public void closedOCSpan() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenCensusTraces(); + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + Span mockSpan = mock(Span.class); + ISpan span = new OpenCensusSpan(mockSpan); + setInternalState(ResumableStreamIterator.class, this.resumableStreamIterator, "span", span); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + + resumableStreamIterator.close("closed"); + verify(mockSpan).end(OpenCensusSpan.END_SPAN_OPTIONS); + } + + @Test + public void restart() { + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r2"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r3"), "c")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r4"), "d")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b", "c", "d").inOrder(); + } + + @Test + public void restartWithHoldBack() { + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(resultSet(null, "X")) + .thenReturn(resultSet(null, "X")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r2"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r3"), "c")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r4"), "d")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b", "c", "d").inOrder(); + } + + @Test + public void restartWithHoldBackMidStream() { + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(resultSet(null, "c")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "d")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r2"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r3"), "e")) + .thenReturn(resultSet(null, "f")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)) + .containsExactly("a", "b", "c", "d", "e", "f") + .inOrder(); + } + + @Test + public void retryableErrorWithoutRetryInfo() throws IOException { + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + + BackOff backOff = mock(BackOff.class); + Mockito.when(backOff.nextBackOffMillis()).thenReturn(1L); + setInternalState( + ResumableStreamIterator.class, this.resumableStreamIterator, "backOff", backOff); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenThrow( + new RetryableException( + ErrorCode.UNAVAILABLE, "failed by test", Status.UNAVAILABLE.asRuntimeException())); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r1"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + verify(backOff).nextBackOffMillis(); + } + + @Test + public void nonRetryableError() { + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(resultSet(null, "X")) + .thenReturn(resultSet(null, "X")) + .thenThrow(new NonRetryableException(ErrorCode.FAILED_PRECONDITION, "failed by test")); + Iterator strings = stringIterator(resumableStreamIterator); + assertThat(strings.next()).isEqualTo("a"); + assertThat(strings.next()).isEqualTo("b"); + SpannerException e = assertThrows(SpannerException.class, () -> strings.next()); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + + @Test + public void bufferLimitSimple() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(null, "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + } + + @Test + public void bufferLimitSimpleWithRestartTokens() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + } + + @Test + public void bufferLimitRestart() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r2"), "b")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r2"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r3"), "c")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r4"), "d")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b", "c", "d").inOrder(); + } + + @Test + public void bufferLimitRestartWithinLimitAtStartOfResults() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(null, "XXXXXX")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(null, "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b").inOrder(); + } + + @Test + public void bufferLimitRestartWithinLimitMidResults() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(null, "XXXXXX")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r1"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()) + .thenReturn(resultSet(null, "b")) + .thenReturn(resultSet(null, "c")) + .thenReturn(null); + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b", "c").inOrder(); + } + + @Test + public void bufferLimitMissingTokensUnsafeToRetry() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(resultSet(null, "c")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + assertThat(consumeAtMost(3, resumableStreamIterator)).containsExactly("a", "b", "c").inOrder(); + SpannerException e = assertThrows(SpannerException.class, () -> resumableStreamIterator.next()); + assertThat(e.getErrorCode()).isEqualTo(errorCodeParameter); + } + + @Test + public void bufferLimitMissingTokensSafeToRetry() { + initWithLimit(1); + + ResultSetStream s1 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(null, null)).thenReturn(new ResultSetIterator(s1)); + Mockito.when(s1.next()) + .thenReturn(resultSet(ByteString.copyFromUtf8("r1"), "a")) + .thenReturn(resultSet(null, "b")) + .thenReturn(resultSet(ByteString.copyFromUtf8("r3"), "c")) + .thenThrow(new RetryableException(errorCodeParameter, "failed by test")); + + ResultSetStream s2 = Mockito.mock(ResultSetStream.class); + Mockito.when(starter.startStream(ByteString.copyFromUtf8("r3"), null)) + .thenReturn(new ResultSetIterator(s2)); + Mockito.when(s2.next()).thenReturn(resultSet(null, "d")).thenReturn(null); + + assertThat(consume(resumableStreamIterator)).containsExactly("a", "b", "c", "d").inOrder(); + } + + static PartialResultSet resultSet(@Nullable ByteString resumeToken, String... data) { + PartialResultSet.Builder builder = PartialResultSet.newBuilder(); + if (resumeToken != null) { + builder.setResumeToken(resumeToken); + } + for (String s : data) { + builder.addValuesBuilder().setStringValue(s); + } + return builder.build(); + } + + static Iterator stringIterator(final Iterator iterator) { + return new AbstractIterator() { + private final LinkedList buffer = new LinkedList<>(); + + @Override + protected String computeNext() { + while (true) { + if (!buffer.isEmpty()) { + return buffer.pop(); + } + if (!iterator.hasNext()) { + endOfData(); + return null; + } + for (Value value : iterator.next().getValuesList()) { + buffer.add(value.getStringValue()); + } + } + } + }; + } + + static List consume(Iterator iterator) { + return Lists.newArrayList(stringIterator(iterator)); + } + + static List consumeAtMost(int n, Iterator iterator) { + Iterator stringIterator = stringIterator(iterator); + List r = new ArrayList<>(n); + for (int i = 0; i < n; ++i) { + if (stringIterator.hasNext()) { + r.add(stringIterator.next()); + } + } + return r; + } + + /** + * Sets a private static final field to a specific value. This is only supported on Java11 and + * lower. + */ + private static void setInternalState(Class c, Object target, String field, Object value) { + try { + Field f = c.getDeclaredField(field); + f.setAccessible(true); + f.set(target, value); + } catch (Exception e) { + throw new RuntimeException("Unable to set internal state on a private field.", e); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java new file mode 100644 index 000000000000..5f7227592299 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java @@ -0,0 +1,398 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.grpc.GcpManagedChannel; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Context; +import io.grpc.Deadline; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.time.Duration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class RetryOnDifferentGrpcChannelMockServerTest extends AbstractMockServerTest { + /** Tracks the logical affinity keys before grpc-gcp routes the request. */ + private static final Map> LOGICAL_AFFINITY_KEYS = new HashMap<>(); + + @BeforeClass + public static void setupAndStartServer() throws Exception { + System.setProperty("spanner.retry_deadline_exceeded_on_different_channel", "true"); + // Call the parent's startStaticServer to set up the mock server + AbstractMockServerTest.startStaticServer(); + } + + @AfterClass + public static void removeSystemProperty() { + System.clearProperty("spanner.retry_deadline_exceeded_on_different_channel"); + } + + @After + public void clearRequests() { + LOGICAL_AFFINITY_KEYS.clear(); + mockSpanner.clearRequests(); + mockSpanner.removeAllExecutionTimes(); + } + + /** + * Creates a client interceptor that captures the logical affinity key before grpc-gcp routes the + * request. This allows us to verify that retry logic uses distinct logical channel hints, even + * when DCP maps them to fewer physical channels. + */ + static GrpcInterceptorProvider createAffinityKeyInterceptorProvider() { + return () -> + ImmutableList.of( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + // Capture the AFFINITY_KEY before grpc-gcp processes it + String affinityKey = callOptions.getOption(GcpManagedChannel.AFFINITY_KEY); + if (affinityKey != null) { + String methodName = method.getFullMethodName(); + synchronized (LOGICAL_AFFINITY_KEYS) { + Set keys = + LOGICAL_AFFINITY_KEYS.computeIfAbsent(methodName, k -> new HashSet<>()); + keys.add(affinityKey); + } + } + return next.newCall(method, callOptions); + } + }); + } + + SpannerOptions.Builder createSpannerOptionsBuilder() { + return SpannerOptions.newBuilder() + .setProjectId("my-project") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setInterceptorProvider(createAffinityKeyInterceptorProvider()); + } + + @Test + public void testReadWriteTransaction_retriesOnNewChannel() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + AtomicInteger attempts = new AtomicInteger(); + + try (Spanner spanner = builder.build().getService()) { + assumeFalse( + "RetryOnDifferentGrpcChannel handler is not implemented for read-write with multiplexed" + + " sessions", + isMultiplexedSessionsEnabledForRW(spanner)); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client + .readWriteTransaction() + .run( + transaction -> { + if (attempts.incrementAndGet() > 1) { + mockSpanner.setBeginTransactionExecutionTime( + MockSpannerServiceImpl.NO_EXECUTION_TIME); + } + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + } + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertNotEquals(requests.get(0).getSession(), requests.get(1).getSession()); + // Verify that the retry used 2 distinct logical affinity keys (before grpc-gcp routing). + assertEquals( + 2, + LOGICAL_AFFINITY_KEYS + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", new HashSet<>()) + .size()); + } + + @Test + public void testReadWriteTransaction_stopsRetrying() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + assumeFalse( + "RetryOnDifferentGrpcChannel handler is not implemented for read-write with multiplexed" + + " sessions", + isMultiplexedSessionsEnabledForRW(spanner)); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + + int numChannels = spanner.getOptions().getNumChannels(); + assertEquals(numChannels, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + Set sessions = + requests.stream().map(BeginTransactionRequest::getSession).collect(Collectors.toSet()); + assertEquals(numChannels, sessions.size()); + // Verify that the retry logic used distinct logical affinity keys (before grpc-gcp routing). + // This confirms each retry attempt targeted a different logical channel. + assertEquals( + numChannels, + LOGICAL_AFFINITY_KEYS + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", new HashSet<>()) + .size()); + } + } + + @Test + public void testDenyListedChannelIsCleared() { + FakeClock clock = new FakeClock(); + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5)) + .setPoolMaintainerClock(clock) + .build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + assumeFalse( + "RetryOnDifferentGrpcChannel handler is not implemented for read-write with multiplexed" + + " sessions", + isMultiplexedSessionsEnabledForRW(spanner)); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // Retry until all channels have been deny-listed. + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + + // Now advance the clock by 1 minute. This should clear all deny-listed channels. + clock.currentTimeMillis.addAndGet(TimeUnit.MILLISECONDS.convert(2L, TimeUnit.MINUTES)); + AtomicInteger attempts = new AtomicInteger(); + client + .readWriteTransaction() + .run( + transaction -> { + if (attempts.incrementAndGet() > 1) { + mockSpanner.setBeginTransactionExecutionTime(SimulatedExecutionTime.none()); + } + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + + int numChannels = spanner.getOptions().getNumChannels(); + // We should have numChannels BeginTransactionRequests from the first transaction, and 2 from + // the second transaction. + assertEquals(numChannels + 2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + // The requests should all use different sessions, as deny-listing a session will bring it to + // the back of the session pool. + Set sessions = + requests.stream().map(BeginTransactionRequest::getSession).collect(Collectors.toSet()); + // We should have used numChannels+1==5 sessions. The reason for that is that first 3 attempts + // of the first transaction used 3 different sessions, that were then all deny-listed. The + // 4th attempt also failed, but as it would be the last channel to be deny-listed, it was not + // deny-listed and instead added to the front of the pool. + // The first attempt of the second transaction then uses the same session as the last attempt + // of the first transaction. That fails, the session is deny-listed, the transaction is + // retried on yet another session and succeeds. + assertEquals(numChannels + 1, sessions.size()); + // Verify that the retry logic used distinct logical affinity keys (before grpc-gcp routing). + // This confirms each retry attempt targeted a different logical channel. + assertEquals( + numChannels, + LOGICAL_AFFINITY_KEYS + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", new HashSet<>()) + .size()); + assertEquals(numChannels, mockSpanner.countRequestsOfType(BatchCreateSessionsRequest.class)); + } + } + + @Test + public void testSingleUseQuery_retriesOnNewChannel() { + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setUseMultiplexedSession(true).build()); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + // The requests use the same multiplexed session. + assertEquals(requests.get(0).getSession(), requests.get(1).getSession()); + // Verify that the retry used 2 distinct logical affinity keys (before grpc-gcp routing). + assertEquals( + 2, + LOGICAL_AFFINITY_KEYS + .getOrDefault("google.spanner.v1.Spanner/ExecuteStreamingSql", new HashSet<>()) + .size()); + } + + @Test + public void testSingleUseQuery_stopsRetrying() { + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setUseMultiplexedSession(true).build()); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1_STATEMENT)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + int numChannels = spanner.getOptions().getNumChannels(); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + // The requests use the same multiplexed session. + String session = requests.get(0).getSession(); + for (ExecuteSqlRequest request : requests) { + assertEquals(session, request.getSession()); + } + // Verify that the retry mechanism is working (made numChannels requests). + int totalRequests = mockSpanner.countRequestsOfType(ExecuteSqlRequest.class); + assertEquals(numChannels, totalRequests); + // Verify each attempt used a distinct logical affinity key (before grpc-gcp routing). + int distinctLogicalKeys = + LOGICAL_AFFINITY_KEYS + .getOrDefault("google.spanner.v1.Spanner/ExecuteStreamingSql", new HashSet<>()) + .size(); + assertEquals(totalRequests, distinctLogicalKeys); + } + } + + @Test + public void testReadWriteTransaction_withGrpcContextDeadline_doesNotRetry() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(500, 500)); + + try (Spanner spanner = builder.build().getService()) { + assumeFalse( + "RetryOnDifferentGrpcChannel handler is not implemented for read-write with multiplexed" + + " sessions", + isMultiplexedSessionsEnabledForRW(spanner)); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + ScheduledExecutorService service = Executors.newScheduledThreadPool(1); + Context context = + Context.current().withDeadline(Deadline.after(50L, TimeUnit.MILLISECONDS), service); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + context.run( + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + // A gRPC context deadline will still cause the underlying error handler to try to retry the + // transaction on a new channel, but as the deadline has been exceeded even before those RPCs + // are being executed, the RPC invocation will be skipped, and the error will eventually bubble + // up. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + } + + private boolean isMultiplexedSessionsEnabledForRW(Spanner spanner) { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryableInternalErrorTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryableInternalErrorTest.java new file mode 100644 index 000000000000..2e9d4185cb99 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryableInternalErrorTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class RetryableInternalErrorTest extends AbstractMockServerTest { + @Test + public void testTranslateInternalException() { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofException( + Status.INTERNAL + .withDescription("Authentication backend internal server error. Please retry.") + .asRuntimeException())); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INTERNAL + .withDescription("Authentication backend internal server error. Please retry.") + .asRuntimeException())); + + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(1) + .setMaxSessions(1) + .setWaitForMinSessions(Duration.ofSeconds(5)) + .build()) + .build() + .getService()) { + + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Execute a query. This will block until a BatchCreateSessions call has finished and then + // invoke ExecuteStreamingSql. Both of these RPCs should be retried. + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + // Verify that both the CreateSession call and the ExecuteStreamingSql call were + // retried. + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // Clear the requests before the next test. + mockSpanner.clearRequests(); + + // Execute a DML statement. This uses the ExecuteSql RPC. + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INTERNAL + .withDescription("Authentication backend internal server error. Please retry.") + .asRuntimeException())); + assertEquals( + Long.valueOf(1L), + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(INSERT_STATEMENT))); + // Verify that also this request was retried. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SamplesMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SamplesMockServerTest.java new file mode 100644 index 000000000000..61c8d0573d7b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SamplesMockServerTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import java.time.Duration; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for samples that use an in-mem mock server instead of running on real Cloud Spanner. */ +@RunWith(JUnit4.class) +public class SamplesMockServerTest extends AbstractMockServerTest { + + @Test + public void testSampleRetrySettings() { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName)\n" + + "VALUES (20, 'George', 'Washington')"; + mockSpanner.putStatementResult(StatementResult.update(Statement.of(sql), 1L)); + + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("p") + .setCredentials(NoCredentials.getInstance()) + .setChannelProvider(channelProvider); + // Set a timeout value for the ExecuteSql RPC that is so low that it will always be triggered. + // This should cause the RPC to fail with a DEADLINE_EXCEEDED error. + builder + .getSpannerStubSettingsBuilder() + .executeSqlSettings() + .setRetryableCodes(StatusCode.Code.UNAVAILABLE) + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(500)) + .setMaxRetryDelayDuration(Duration.ofSeconds(16)) + .setRetryDelayMultiplier(1.5) + .setInitialRpcTimeoutDuration(Duration.ofNanos(1L)) + .setMaxRpcTimeoutDuration(Duration.ofNanos(1L)) + .setRpcTimeoutMultiplier(1.0) + .setTotalTimeoutDuration(Duration.ofNanos(1L)) + .build()); + // Create a Spanner client using the custom retry and timeout settings. + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(Statement.of(sql)))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SelectRandomBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SelectRandomBenchmark.java new file mode 100644 index 000000000000..e1f93d334dc8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SelectRandomBenchmark.java @@ -0,0 +1,141 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.AuxCounters; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +/** + * Benchmarks for common session pool scenarios. The simulated execution times are based on + * reasonable estimates and are primarily intended to keep the benchmarks comparable with each other + * before and after changes have been made to the pool. The benchmarks are bound to the Maven + * profile `benchmark` and can be executed like this: + * mvn clean test -DskipTests -Pbenchmark -Dbenchmark.name=SelectRandomBenchmark + * + */ +@BenchmarkMode(Mode.AverageTime) +@Fork(value = 1, warmups = 0) +@Measurement(batchSize = 1, iterations = 1, timeUnit = TimeUnit.MILLISECONDS) +@Warmup(batchSize = 0, iterations = 0) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class SelectRandomBenchmark { + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_INSTANCE = "my-instance"; + private static final String TEST_DATABASE = "my-database"; + private static final int RND_WAIT_TIME_BETWEEN_REQUESTS = 10; + private static final Random RND = new Random(); + + @State(Scope.Thread) + @AuxCounters(org.openjdk.jmh.annotations.AuxCounters.Type.EVENTS) + public static class BenchmarkState { + private StandardBenchmarkMockServer mockServer; + private Spanner spanner; + private DatabaseClientImpl client; + + @Param({"100"}) + int minSessions; + + @Param({"400"}) + int maxSessions; + + @Setup(Level.Invocation) + public void setup() throws Exception { + mockServer = new StandardBenchmarkMockServer(); + TransportChannelProvider channelProvider = mockServer.start(); + + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(minSessions) + .setMaxSessions(maxSessions) + .build()) + .build(); + + spanner = options.getService(); + client = + (DatabaseClientImpl) + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + // Wait until the session pool has initialized. + while (client.multiplexedSessionDatabaseClient.getCurrentSessionReference() == null) { + Thread.sleep(1L); + } + } + + @TearDown(Level.Invocation) + public void teardown() throws Exception { + spanner.close(); + mockServer.shutdown(); + } + } + + /** Measures the time needed to execute a burst of read requests. */ + @Benchmark + public void burstRead(final BenchmarkState server) throws Exception { + int totalQueries = server.maxSessions * 8; + int parallelThreads = server.maxSessions * 2; + final DatabaseClient client = + server.spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + ListeningScheduledExecutorService service = + MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(parallelThreads)); + List> futures = new ArrayList<>(totalQueries); + for (int i = 0; i < totalQueries; i++) { + futures.add( + service.submit( + () -> { + Thread.sleep(RND.nextInt(RND_WAIT_TIME_BETWEEN_REQUESTS)); + try (ResultSet rs = + client.singleUse().executeQuery(StandardBenchmarkMockServer.SELECT_RANDOM)) { + while (rs.next()) { + // Get the entire current row and convert to String. + rs.getCurrentRowAsStruct().toString(); + } + return null; + } + })); + } + Futures.allAsList(futures).get(); + service.shutdown(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SerialIntegrationTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SerialIntegrationTest.java new file mode 100644 index 000000000000..1aab60acf990 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SerialIntegrationTest.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Serial Integration Test interface. */ +public interface SerialIntegrationTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java new file mode 100644 index 000000000000..07a76970bfd0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java @@ -0,0 +1,514 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.SessionClient.SessionConsumer; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.common.collect.ImmutableMap; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.OpenTelemetry; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +@RunWith(Parameterized.class) +public class SessionClientTests { + private final class TestExecutorFactory implements ExecutorFactory { + @Override + public ScheduledExecutorService get() { + return Executors.newScheduledThreadPool(spanner.getOptions().getNumChannels()); + } + + @Override + public void release(ScheduledExecutorService executor) { + executor.shutdown(); + try { + executor.awaitTermination(10_000L, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @Parameters(name = "NumChannels = {0}") + public static Collection data() { + return Arrays.asList(new Object[][] {{1}, {2}, {4}, {8}}); + } + + @Parameter public int numChannels; + + private final String dbName = "projects/p1/instances/i1/databases/d1"; + @Mock private SpannerImpl spanner; + @Mock private SpannerRpc rpc; + @Mock private SpannerOptions spannerOptions; + private final TraceWrapper tracer = + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); + @Mock private ISpan span; + @Captor ArgumentCaptor> options; + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()) + .thenReturn( + new ExecutorFactory() { + @Override + public void release(ScheduledExecutorService executor) { + executor.shutdown(); + } + + @Override + public ScheduledExecutorService get() { + return new ScheduledThreadPoolExecutor(2); + } + }); + when(spannerOptions.getTransportOptions()).thenReturn(transportOptions); + when(spannerOptions.getNumChannels()).thenReturn(numChannels); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + when(spannerOptions.getDatabaseRole()).thenReturn("role"); + when(spannerOptions.getRetrySettings()).thenReturn(RetrySettings.newBuilder().build()); + when(spannerOptions.getClock()).thenReturn(NanoClock.getDefaultClock()); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(spanner.getTracer()).thenReturn(tracer); + doNothing().when(span).setStatus(any(Throwable.class)); + doNothing().when(span).end(); + doNothing().when(span).addAnnotation("Starting Commit"); + when(spanner.getRpc()).thenReturn(rpc); + SessionPoolOptions sessionPoolOptions = mock(SessionPoolOptions.class); + when(sessionPoolOptions.getPoolMaintainerClock()).thenReturn(Clock.INSTANCE); + when(spannerOptions.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + } + + @Test + public void createAndCloseSession() { + DatabaseId db = DatabaseId.of(dbName); + String sessionName = dbName + "/sessions/s1"; + Map labels = new HashMap<>(); + labels.put("env", "dev"); + String databaseRole = "role"; + when(spannerOptions.getSessionLabels()).thenReturn(labels); + when(spannerOptions.getDatabaseRole()).thenReturn(databaseRole); + com.google.spanner.v1.Session sessionProto = + com.google.spanner.v1.Session.newBuilder() + .setName(sessionName) + .putAllLabels(labels) + .build(); + when(rpc.createSession( + Mockito.eq(dbName), Mockito.eq(databaseRole), Mockito.eq(labels), options.capture())) + .thenReturn(sessionProto); + + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + Session session = client.createSession(); + assertThat(session.getName()).isEqualTo(sessionName); + + session.close(); + + final ArgumentCaptor> deleteOptionsCaptor = + ArgumentCaptor.forClass(Map.class); + final ArgumentCaptor sessionNameCaptor = ArgumentCaptor.forClass(String.class); + Mockito.verify(rpc).deleteSession(sessionNameCaptor.capture(), deleteOptionsCaptor.capture()); + assertEquals(sessionName, sessionNameCaptor.getValue()); + // The same channelHint is passed for deleteSession (contained in "options"). + assertEquals( + deleteOptionsCaptor.getValue().get(SpannerRpc.Option.CHANNEL_HINT), + options.getValue().get(SpannerRpc.Option.CHANNEL_HINT)); + } + } + + @Test + public void createAndCloseMultiplexedSession() { + DatabaseId db = DatabaseId.of(dbName); + String sessionName = dbName + "/sessions/s1"; + Map labels = ImmutableMap.of("env", "dev"); + String databaseRole = "role"; + when(spannerOptions.getSessionLabels()).thenReturn(labels); + when(spannerOptions.getDatabaseRole()).thenReturn(databaseRole); + com.google.spanner.v1.Session sessionProto = + com.google.spanner.v1.Session.newBuilder() + .setName(sessionName) + .setMultiplexed(true) + .putAllLabels(labels) + .build(); + when(rpc.createSession( + Mockito.eq(dbName), + Mockito.eq(databaseRole), + Mockito.eq(labels), + options.capture(), + Mockito.eq(true))) + .thenReturn(sessionProto); + final AtomicInteger returnedSessionCount = new AtomicInteger(); + final SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + assertEquals(sessionName, session.getName()); + returnedSessionCount.incrementAndGet(); + + session.close(); + Mockito.verify(rpc).deleteSession(sessionName, options.getValue()); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {} + }; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + client.createMultiplexedSession(consumer); + } + // for multiplexed session there is no channel hint pass in the RPC options + assertNull(options.getValue()); + assertEquals(1, returnedSessionCount.get()); + } + + @Test + public void createAndCloseMultiplexedSession_whenRPCThrowsException_thenAssertException() { + DatabaseId db = DatabaseId.of(dbName); + Map labels = ImmutableMap.of("env", "dev"); + String databaseRole = "role"; + when(spannerOptions.getSessionLabels()).thenReturn(labels); + when(spannerOptions.getDatabaseRole()).thenReturn(databaseRole); + when(rpc.createSession( + Mockito.eq(dbName), + Mockito.eq(databaseRole), + Mockito.eq(labels), + options.capture(), + Mockito.eq(true))) + .thenThrow(RuntimeException.class); + final SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) {} + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { + assertTrue(t instanceof RuntimeException); + } + }; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + client.createMultiplexedSession(consumer); + } + // for multiplexed session there is no channel hint pass in the RPC options + assertNull(options.getValue()); + } + + @SuppressWarnings("unchecked") + @Test + public void batchCreateAndCloseSessions() { + DatabaseId db = DatabaseId.of(dbName); + final String sessionName = dbName + "/sessions/s%d"; + final Map labels = new HashMap<>(); + labels.put("env", "dev"); + when(spannerOptions.getSessionLabels()).thenReturn(labels); + String databaseRole = new String("role"); + when(spannerOptions.getDatabaseRole()).thenReturn(databaseRole); + final List usedChannels = Collections.synchronizedList(new ArrayList<>()); + when(rpc.batchCreateSessions( + Mockito.eq(dbName), + Mockito.anyInt(), + Mockito.eq(databaseRole), + Mockito.eq(labels), + Mockito.anyMap())) + .then( + invocation -> { + Map options = invocation.getArgument(4, Map.class); + Long channelHint = (Long) options.get(Option.CHANNEL_HINT); + usedChannels.add(channelHint); + int sessionCount = invocation.getArgument(1, Integer.class); + List res = new ArrayList<>(); + for (int i = 1; i <= sessionCount; i++) { + res.add( + com.google.spanner.v1.Session.newBuilder() + .setName(String.format(sessionName, i)) + .putAllLabels(labels) + .build()); + } + return res; + }); + + final AtomicInteger returnedSessionCount = new AtomicInteger(); + SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + assertThat(session.getName()).startsWith(dbName + "/sessions/s"); + returnedSessionCount.incrementAndGet(); + session.close(); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {} + }; + final int numSessions = 10; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + client.asyncBatchCreateSessions(numSessions, true, consumer); + } + assertThat(returnedSessionCount.get()).isEqualTo(numSessions); + assertThat(usedChannels.size()).isEqualTo(spannerOptions.getNumChannels()); + List expectedChannels = new ArrayList<>(); + for (long l = 0; l < spannerOptions.getNumChannels(); l++) { + expectedChannels.add(l); + } + assertThat(usedChannels).containsExactlyElementsIn(expectedChannels); + } + + /** + * Tests that multiple consecutive calls to {@link SessionClient#asyncBatchCreateSessions(int, + * boolean, SessionConsumer)} with distributeOverChannels=false does not distribute one batch over + * multiple channels, but it does assign each new call to a new channel. This means that multiple + * calls to this method will still distribute the total set of sessions over all available + * channels. + */ + @SuppressWarnings("unchecked") + @Test + public void batchCreateSessionsDistributesMultipleRequestsOverChannels() { + DatabaseId db = DatabaseId.of(dbName); + final String sessionName = dbName + "/sessions/s%d"; + final Map labels = Collections.emptyMap(); + when(spannerOptions.getSessionLabels()).thenReturn(labels); + when(spannerOptions.getDatabaseRole()).thenReturn("role"); + final Set usedChannelHints = Collections.synchronizedSet(new HashSet<>()); + when(rpc.batchCreateSessions( + Mockito.eq(dbName), + Mockito.anyInt(), + Mockito.anyString(), + Mockito.eq(labels), + Mockito.anyMap())) + .then( + invocation -> { + Map options = invocation.getArgument(4, Map.class); + Long channelHint = (Long) options.get(Option.CHANNEL_HINT); + usedChannelHints.add(channelHint); + int sessionCount = invocation.getArgument(1, Integer.class); + List res = new ArrayList<>(); + for (int i = 1; i <= sessionCount; i++) { + res.add( + com.google.spanner.v1.Session.newBuilder() + .setName(String.format(sessionName, i)) + .putAllLabels(labels) + .setCreatorRole("role") + .build()); + } + return res; + }); + + final AtomicInteger returnedSessionCount = new AtomicInteger(); + SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + assertThat(session.getName()).startsWith(dbName + "/sessions/s"); + returnedSessionCount.incrementAndGet(); + session.close(); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {} + }; + final int numSessions = 10; + final int numBatches = spannerOptions.getNumChannels() * 2; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + for (int batch = 0; batch < numBatches; batch++) { + client.asyncBatchCreateSessions(numSessions, false, consumer); + } + } + assertThat(returnedSessionCount.get()).isEqualTo(numSessions * numBatches); + assertThat(usedChannelHints.size()).isEqualTo(spannerOptions.getNumChannels() * 2); + List expectedChannels = new ArrayList<>(); + for (long l = 0; l < spannerOptions.getNumChannels() * 2; l++) { + expectedChannels.add(l); + } + assertThat(usedChannelHints).containsExactlyElementsIn(expectedChannels); + } + + private enum AddRemoveSetException { + SET, + ADD, + REMOVE + } + + @SuppressWarnings("unchecked") + @Test + public void batchCreateSessionsWithExceptions() { + for (AddRemoveSetException behavior : AddRemoveSetException.values()) { + final List errorOnChannels = new ArrayList<>(); + if (behavior == AddRemoveSetException.REMOVE) { + for (int c = 0; c < spannerOptions.getNumChannels(); c++) { + errorOnChannels.add((long) c); + } + } + for (int errorOnChannel = 0; + errorOnChannel < spannerOptions.getNumChannels(); + errorOnChannel++) { + switch (behavior) { + case SET: + errorOnChannels.clear(); + case ADD: + errorOnChannels.add((long) errorOnChannel); + break; + case REMOVE: + errorOnChannels.remove(Long.valueOf(errorOnChannel)); + break; + default: + throw new IllegalStateException(); + } + DatabaseId db = DatabaseId.of(dbName); + final String sessionName = dbName + "/sessions/s%d"; + when(rpc.batchCreateSessions( + Mockito.eq(dbName), + Mockito.anyInt(), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.anyMap())) + .then( + invocation -> { + Map options = invocation.getArgument(4, Map.class); + Long channelHint = (Long) options.get(Option.CHANNEL_HINT); + if (errorOnChannels.contains(channelHint)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.RESOURCE_EXHAUSTED, "could not create any more sessions"); + } else { + int sessionCount = invocation.getArgument(1, Integer.class); + List res = new ArrayList<>(); + for (int i = 1; i <= sessionCount; i++) { + res.add( + com.google.spanner.v1.Session.newBuilder() + .setName(String.format(sessionName, i)) + .build()); + } + return res; + } + }); + + final AtomicInteger errorForSessionsCount = new AtomicInteger(); + final AtomicInteger errorCount = new AtomicInteger(); + final AtomicInteger returnedSessionCount = new AtomicInteger(); + SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + assertThat(session.getName()).startsWith(dbName + "/sessions/s"); + returnedSessionCount.incrementAndGet(); + session.close(); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { + assertThat(t).isInstanceOf(SpannerException.class); + SpannerException e = (SpannerException) t; + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + errorCount.incrementAndGet(); + errorForSessionsCount.addAndGet(createFailureForSessionCount); + } + }; + final int numSessions = 10; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + client.asyncBatchCreateSessions(numSessions, true, consumer); + } + assertThat(errorCount.get()).isEqualTo(errorOnChannels.size()); + assertThat(returnedSessionCount.get()) + .isAtLeast( + numSessions + - ((numSessions / spannerOptions.getNumChannels()) * errorOnChannels.size() + + numSessions % spannerOptions.getNumChannels())); + assertThat(returnedSessionCount.get() + errorForSessionsCount.get()).isEqualTo(numSessions); + } + } + } + + @SuppressWarnings("unchecked") + @Test + public void batchCreateSessionsServerReturnsLessSessionsPerBatch() { + final int MAX_SESSIONS_PER_BATCH = 5; + DatabaseId db = DatabaseId.of(dbName); + final String sessionName = dbName + "/sessions/s%d"; + when(rpc.batchCreateSessions( + Mockito.eq(dbName), + Mockito.anyInt(), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.anyMap())) + .then( + invocation -> { + int sessionCount = invocation.getArgument(1, Integer.class); + List res = new ArrayList<>(); + for (int i = 1; i <= Math.min(MAX_SESSIONS_PER_BATCH, sessionCount); i++) { + res.add( + com.google.spanner.v1.Session.newBuilder() + .setName(String.format(sessionName, i)) + .build()); + } + return res; + }); + + final AtomicInteger returnedSessionCount = new AtomicInteger(); + SessionConsumer consumer = + new SessionConsumer() { + @Override + public void onSessionReady(SessionImpl session) { + assertThat(session.getName()).startsWith(dbName + "/sessions/s"); + returnedSessionCount.incrementAndGet(); + session.close(); + } + + @Override + public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) {} + }; + // We want 100 sessions, but each rpc will only return 5. The consumer should still get 100 + // sessions. + final int numSessions = 100; + try (SessionClient client = new SessionClient(spanner, db, new TestExecutorFactory())) { + client.asyncBatchCreateSessions(numSessions, true, consumer); + } + assertThat(returnedSessionCount.get()).isEqualTo(numSessions); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java new file mode 100644 index 000000000000..53ab2c333d68 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java @@ -0,0 +1,551 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFutures; +import com.google.api.core.NanoClock; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.Timestamp; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.XGoogSpannerRequestId.NoopRequestIdCreator; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.Mutation.Write; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; +import java.text.ParseException; +import java.util.Calendar; +import java.util.Collections; +import java.util.GregorianCalendar; +import java.util.Map; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +/** Unit tests for {@link com.google.cloud.spanner.SessionImpl}. */ +@RunWith(JUnit4.class) +public class SessionImplTest { + @Mock private SpannerRpc rpc; + @Mock private SpannerOptions spannerOptions; + private com.google.cloud.spanner.Session session; + @Captor private ArgumentCaptor> optionsCaptor; + private Map options; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @SuppressWarnings("unchecked") + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + when(spannerOptions.getNumChannels()).thenReturn(4); + when(spannerOptions.getDefaultTransactionOptions()) + .thenReturn(TransactionOptions.getDefaultInstance()); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + when(spannerOptions.getDatabaseRole()).thenReturn("role"); + when(spannerOptions.getRetrySettings()).thenReturn(RetrySettings.newBuilder().build()); + when(spannerOptions.getClock()).thenReturn(NanoClock.getDefaultClock()); + when(spannerOptions.getSessionLabels()).thenReturn(Collections.emptyMap()); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()).thenReturn(mock(ExecutorFactory.class)); + when(spannerOptions.getTransportOptions()).thenReturn(transportOptions); + SessionPoolOptions sessionPoolOptions = mock(SessionPoolOptions.class); + when(sessionPoolOptions.getPoolMaintainerClock()).thenReturn(Clock.INSTANCE); + when(spannerOptions.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + @SuppressWarnings("resource") + SpannerImpl spanner = new SpannerImpl(rpc, spannerOptions); + String dbName = "projects/p1/instances/i1/databases/d1"; + String sessionName = dbName + "/sessions/s1"; + DatabaseId db = DatabaseId.of(dbName); + + Session sessionProto = Session.newBuilder().setName(sessionName).build(); + Mockito.when( + rpc.createSession( + Mockito.eq(dbName), Mockito.anyString(), Mockito.anyMap(), optionsCaptor.capture())) + .thenReturn(sessionProto); + Transaction txn = Transaction.newBuilder().setId(ByteString.copyFromUtf8("TEST")).build(); + Mockito.when( + rpc.beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), + Mockito.any(Map.class), + Mockito.anyBoolean())) + .thenReturn(ApiFutures.immediateFuture(txn)); + CommitResponse commitResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(com.google.protobuf.Timestamp.getDefaultInstance()) + .build(); + Mockito.when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.any(Map.class))) + .thenReturn(ApiFutures.immediateFuture(commitResponse)); + Mockito.when(rpc.rollbackAsync(Mockito.any(RollbackRequest.class), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); + when(rpc.getReadRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().streamingReadSettings().getRetrySettings()); + when(rpc.getReadRetryableCodes()) + .thenReturn(SpannerStubSettings.newBuilder().streamingReadSettings().getRetryableCodes()); + when(rpc.getExecuteQueryRetrySettings()) + .thenReturn( + SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings()); + when(rpc.getExecuteQueryRetryableCodes()) + .thenReturn( + SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetryableCodes()); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + when(rpc.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + session = spanner.getSessionClient(db).createSession(); + Span oTspan = mock(Span.class); + ISpan span = new OpenTelemetrySpan(oTspan); + when(oTspan.makeCurrent()).thenReturn(mock(Scope.class)); + ((SessionImpl) session).setCurrentSpan(span); + // We expect the same options, "options", on all calls on "session". + options = optionsCaptor.getValue(); + } + + private void doNestedRwTransaction() { + session + .readWriteTransaction() + .run( + transaction -> { + session.readWriteTransaction().run(transaction1 -> null); + + return null; + }); + } + + @Test + public void testBeginTransactionWithClientContext() { + RequestOptions.ClientContext clientContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("value").build()) + .build(); + Mockito.when( + rpc.beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), anyMap(), eq(true))) + .thenReturn( + ApiFutures.immediateFuture( + Transaction.newBuilder().setId(ByteString.copyFromUtf8("tx")).build())); + + ((SessionImpl) session) + .beginTransactionAsync( + Options.fromTransactionOptions( + Options.priority(Options.RpcPriority.HIGH), + Options.tag("tag"), + Options.clientContext(clientContext)), + true, + Collections.emptyMap(), + null, + null); + + ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(BeginTransactionRequest.class); + Mockito.verify(rpc).beginTransactionAsync(requestCaptor.capture(), anyMap(), eq(true)); + BeginTransactionRequest request = requestCaptor.getValue(); + RequestOptions requestOptions = request.getRequestOptions(); + assertEquals(RequestOptions.Priority.PRIORITY_HIGH, requestOptions.getPriority()); + // TransactionTag should NOT be set because session is not multiplexed. + assertEquals("", requestOptions.getTransactionTag()); + assertEquals(clientContext, requestOptions.getClientContext()); + } + + @Test + public void nestedReadWriteTxnThrows() { + SpannerException e = assertThrows(SpannerException.class, () -> doNestedRwTransaction()); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + assertThat(e.getMessage()).contains("not supported"); + } + + @Test + public void nestedReadOnlyTxnThrows() { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + session + .readWriteTransaction() + .run( + transaction -> { + session.readOnlyTransaction().getReadTimestamp(); + return null; + })); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + assertThat(e.getMessage()).contains("not supported"); + } + + @Test + public void nestedSingleUseReadTxnThrows() { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + session + .readWriteTransaction() + .run( + transaction -> { + session.singleUseReadOnlyTransaction(); + return null; + })); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + assertThat(e.getMessage()).contains("not supported"); + } + + @Test + public void nestedTxnSucceedsWhenAllowed() { + session + .readWriteTransaction() + .allowNestedTransaction() + .run( + transaction -> { + session.singleUseReadOnlyTransaction(); + return null; + }); + } + + @Test + public void writeAtLeastOnce() throws ParseException { + String timestampString = "2015-10-01T10:54:20.021Z"; + com.google.protobuf.Timestamp t = Timestamps.parse(timestampString); + Transaction txnMetadata = Transaction.newBuilder().setReadTimestamp(t).build(); + Mockito.when(rpc.beginTransaction(Mockito.any(), Mockito.eq(options), eq(false))) + .thenReturn(txnMetadata); + ArgumentCaptor commit = ArgumentCaptor.forClass(CommitRequest.class); + CommitResponse response = + CommitResponse.newBuilder().setCommitTimestamp(Timestamps.parse(timestampString)).build(); + Mockito.when(rpc.commit(commit.capture(), anyMap())).thenReturn(response); + + Timestamp timestamp = + session.writeAtLeastOnce( + Collections.singletonList(Mutation.newInsertBuilder("T").set("C").to("x").build())); + assertThat(timestamp.getSeconds()) + .isEqualTo(utcTimeSeconds(2015, Calendar.OCTOBER, 1, 10, 54, 20)); + assertThat(timestamp.getNanos()).isEqualTo(TimeUnit.MILLISECONDS.toNanos(21)); + + CommitRequest request = commit.getValue(); + assertThat(request.getSingleUseTransaction()).isNotNull(); + assertThat(request.getSingleUseTransaction().getReadWrite()).isNotNull(); + com.google.spanner.v1.Mutation mutation = + com.google.spanner.v1.Mutation.newBuilder() + .setInsert( + Write.newBuilder() + .setTable("T") + .addColumns("C") + .addValues( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("x")))) + .build(); + assertThat(request.getMutationsList()).containsExactly(mutation); + } + + @Test + public void writeAtLeastOnceWithOptions() throws ParseException { + String tag = "app=spanner,env=test"; + String timestampString = "2015-10-01T10:54:20.021Z"; + ArgumentCaptor commit = ArgumentCaptor.forClass(CommitRequest.class); + CommitResponse response = + CommitResponse.newBuilder().setCommitTimestamp(Timestamps.parse(timestampString)).build(); + Mockito.when(rpc.commit(commit.capture(), anyMap())).thenReturn(response); + session.writeAtLeastOnceWithOptions( + Collections.singletonList(Mutation.newInsertBuilder("T").set("C").to("x").build()), + Options.tag(tag)); + + CommitRequest request = commit.getValue(); + assertThat(request.getRequestOptions().getTransactionTag()).isEqualTo(tag); + com.google.spanner.v1.Mutation mutation = + com.google.spanner.v1.Mutation.newBuilder() + .setInsert( + Write.newBuilder() + .setTable("T") + .addColumns("C") + .addValues( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("x")))) + .build(); + assertThat(request.getMutationsList()).containsExactly(mutation); + } + + private static long utcTimeSeconds(int year, int month, int day, int hour, int min, int secs) { + GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC")); + calendar.set(year, month, day, hour, min, secs); + return calendar.getTimeInMillis() / 1000; + } + + @Test + public void newSingleUseContextClosesOldSingleUseContext() { + ReadContext ctx = session.singleUse(TimestampBound.strong()); + session.singleUse(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void newSingleUseContextClosesOldSingleUseReadOnlyTransactionContext() { + ReadContext ctx = session.singleUseReadOnlyTransaction(TimestampBound.strong()); + session.singleUse(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void newSingleUseContextClosesOldMultiUseReadOnlyTransactionContext() { + ReadContext ctx = session.singleUseReadOnlyTransaction(TimestampBound.strong()); + session.singleUse(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void newSingleUseReadOnlyTransactionContextClosesOldSingleUseContext() { + ReadContext ctx = session.singleUse(TimestampBound.strong()); + session.singleUseReadOnlyTransaction(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void newMultiUseReadOnlyTransactionContextClosesOldSingleUseContext() { + ReadContext ctx = session.singleUse(TimestampBound.strong()); + session.readOnlyTransaction(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void writeClosesOldSingleUseContext() throws ParseException { + ReadContext ctx = session.singleUse(TimestampBound.strong()); + + Mockito.when(rpc.commit(Mockito.any(), anyMap())) + .thenReturn( + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamps.parse("2015-10-01T10:54:20.021Z")) + .build()); + session.writeAtLeastOnce(Collections.emptyList()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void transactionClosesOldSingleUseContext() { + ReadContext ctx = session.singleUse(TimestampBound.strong()); + + // Note that we don't even run the transaction - just preparing the runner is sufficient. + session.readWriteTransaction(); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> ctx.read("Dummy", KeySet.all(), Collections.singletonList("C"))); + assertThat(e.getMessage()).contains("invalidated"); + } + + @Test + public void singleUseContextClosesTransaction() { + TransactionRunner runner = session.readWriteTransaction(); + + session.singleUse(TimestampBound.strong()); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> + runner.run( + transaction -> { + fail("Unexpected call to transaction body"); + return null; + })); + assertThat(e.getMessage()).contains("invalidated"); + } + + private static ResultSetMetadata newMetadata(Type type) { + return ResultSetMetadata.newBuilder().setRowType(type.toProto().getStructType()).build(); + } + + @Test + public void singleUseReadOnlyTransactionDoesntReturnTransactionMetadata() { + PartialResultSet resultSet = + PartialResultSet.newBuilder() + .setMetadata(newMetadata(Type.struct(Type.StructField.of("C", Type.string())))) + .build(); + mockRead(resultSet); + + ReadOnlyTransaction txn = session.singleUseReadOnlyTransaction(TimestampBound.strong()); + assertThat(txn.readRow("Dummy", Key.of(), Collections.singletonList("C"))).isNull(); + + // For now, getReadTimestamp() will raise an ISE because it hasn't seen a timestamp. It would + // be better for the read to fail with an INTERNAL error, but we can't do that until txn + // metadata is returned for failed reads (e.g., table-not-found) as well as successful ones. + // TODO(user): Fix this. + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> txn.getReadTimestamp()); + assertNotNull(e.getMessage()); + } + + @Test + public void singleUseReadOnlyTransactionReturnsEmptyTransactionMetadata() { + PartialResultSet resultSet = + PartialResultSet.newBuilder() + .setMetadata( + newMetadata(Type.struct(Type.StructField.of("C", Type.string()))).toBuilder() + .setTransaction(Transaction.getDefaultInstance())) + .build(); + mockRead(resultSet); + + ReadOnlyTransaction txn = session.singleUseReadOnlyTransaction(TimestampBound.strong()); + SpannerException e = + assertThrows( + SpannerException.class, + () -> txn.readRow("Dummy", Key.of(), Collections.singletonList("C"))); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + + private static class NoOpStreamingCall implements SpannerRpc.StreamingCall { + @Override + public ApiCallContext getCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + public void cancel(@Nullable String message) {} + + @Override + public void request(int numMessages) {} + } + + private void mockRead(final PartialResultSet myResultSet) { + final ArgumentCaptor consumer = + ArgumentCaptor.forClass(SpannerRpc.ResultStreamConsumer.class); + Mockito.when(rpc.read(Mockito.any(), consumer.capture(), anyMap(), any(), eq(false))) + .then( + invocation -> { + consumer.getValue().onPartialResultSet(myResultSet); + consumer.getValue().onCompleted(); + return new NoOpStreamingCall(); + }); + } + + @Test + public void multiUseReadOnlyTransactionReturnsEmptyTransactionMetadata() { + Transaction txnMetadata = Transaction.newBuilder().setId(ByteString.copyFromUtf8("x")).build(); + PartialResultSet resultSet = + PartialResultSet.newBuilder() + .setMetadata(newMetadata(Type.struct(Type.StructField.of("C", Type.string())))) + .build(); + Mockito.when(rpc.beginTransaction(Mockito.any(), anyMap(), eq(false))).thenReturn(txnMetadata); + mockRead(resultSet); + + ReadOnlyTransaction txn = session.readOnlyTransaction(TimestampBound.strong()); + SpannerException e = + assertThrows( + SpannerException.class, + () -> txn.readRow("Dummy", Key.of(), Collections.singletonList("C"))); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + + @Test + public void multiUseReadOnlyTransactionReturnsMissingTimestamp() { + Transaction txnMetadata = Transaction.newBuilder().setId(ByteString.copyFromUtf8("x")).build(); + PartialResultSet resultSet = + PartialResultSet.newBuilder() + .setMetadata(newMetadata(Type.struct(Type.StructField.of("C", Type.string())))) + .build(); + Mockito.when(rpc.beginTransaction(Mockito.any(), anyMap(), eq(false))).thenReturn(txnMetadata); + mockRead(resultSet); + + ReadOnlyTransaction txn = session.readOnlyTransaction(TimestampBound.strong()); + SpannerException e = + assertThrows( + SpannerException.class, + () -> txn.readRow("Dummy", Key.of(), Collections.singletonList("C"))); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + + @Test + public void multiUseReadOnlyTransactionReturnsMissingTransactionId() throws ParseException { + com.google.protobuf.Timestamp t = Timestamps.parse("2015-10-01T10:54:20.021Z"); + Transaction txnMetadata = Transaction.newBuilder().setReadTimestamp(t).build(); + PartialResultSet resultSet = + PartialResultSet.newBuilder() + .setMetadata(newMetadata(Type.struct(Type.StructField.of("C", Type.string())))) + .build(); + Mockito.when(rpc.beginTransaction(Mockito.any(), anyMap(), eq(false))).thenReturn(txnMetadata); + mockRead(resultSet); + + ReadOnlyTransaction txn = session.readOnlyTransaction(TimestampBound.strong()); + SpannerException e = + assertThrows( + SpannerException.class, + () -> txn.readRow("Dummy", Key.of(), Collections.singletonList("C"))); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java new file mode 100644 index 000000000000..705783d78c0f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java @@ -0,0 +1,469 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.SessionPoolOptions.InactiveTransactionRemovalOptions; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Unit tests for {@link com.google.cloud.spanner.SessionPoolOptions} */ +@RunWith(Parameterized.class) +public class SessionPoolOptionsTest { + @Parameter public int minSessions; + + @Parameter(1) + public int maxSessions; + + @Parameters(name = "min sessions = {0}, max sessions = {1}") + public static Collection data() { + List params = new ArrayList<>(); + params.add(new Object[] {1, 1}); + params.add(new Object[] {500, 600}); + params.add(new Object[] {600, 500}); + + return params; + } + + @Test + public void setMinMaxSessions() { + try { + SessionPoolOptions options = + SessionPoolOptions.newBuilder() + .setMinSessions(minSessions) + .setMaxSessions(maxSessions) + .build(); + if (minSessions > maxSessions) { + fail("Expected exception"); + } + assertThat(minSessions).isEqualTo(options.getMinSessions()); + assertThat(maxSessions).isEqualTo(options.getMaxSessions()); + } catch (IllegalArgumentException ex) { + if (minSessions <= maxSessions) { + throw ex; + } + assertNotNull(ex.getMessage()); + } + } + + /** + * Setting MaxSessions to a value lower than the default MinSessions should be allowed, and should + * cause the MinSessions to be set to the same value as the MaxSessions. + */ + @Test + public void setOnlyMaxSessions() { + final int defaultMinSessions = 100; + // Set max sessions > DEFAULT_MIN_SESSIONS. + SessionPoolOptions options = + SessionPoolOptions.newBuilder().setMaxSessions(defaultMinSessions + 1).build(); + assertThat(options.getMaxSessions()).isEqualTo(defaultMinSessions + 1); + assertThat(options.getMinSessions()).isEqualTo(defaultMinSessions); + // Set max sessions < DEFAULT_MIN_SESSIONS. + options = SessionPoolOptions.newBuilder().setMaxSessions(defaultMinSessions - 1).build(); + assertThat(options.getMaxSessions()).isEqualTo(defaultMinSessions - 1); + assertThat(options.getMinSessions()).isEqualTo(defaultMinSessions - 1); + } + + @Test + public void setValidMinSessions() { + SessionPoolOptions.newBuilder().setMinSessions(0).build(); + SessionPoolOptions.newBuilder().setMinSessions(1).build(); + SessionPoolOptions.newBuilder().setMinSessions(400).build(); + SessionPoolOptions.newBuilder() + .setMaxSessions(Integer.MAX_VALUE) + .setMinSessions(Integer.MAX_VALUE) + .build(); + } + + @Test(expected = IllegalArgumentException.class) + public void setNegativeMinSessions() { + SessionPoolOptions.newBuilder().setMinSessions(-1); + } + + @Test + public void setValidMaxSessions() { + SessionPoolOptions.newBuilder().setMaxSessions(1).build(); + SessionPoolOptions.newBuilder().setMaxSessions(400).build(); + SessionPoolOptions.newBuilder().setMaxSessions(Integer.MAX_VALUE).build(); + } + + @Test(expected = IllegalArgumentException.class) + public void setZeroMaxSessions() { + SessionPoolOptions.newBuilder().setMaxSessions(0); + } + + @Test(expected = IllegalArgumentException.class) + public void setNegativeMaxSessions() { + SessionPoolOptions.newBuilder().setMaxSessions(-1); + } + + @Test + public void verifyDefaultInactiveTransactionRemovalOptions() { + SessionPoolOptions sessionPoolOptions = SessionPoolOptions.newBuilder().build(); + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + sessionPoolOptions.getInactiveTransactionRemovalOptions(); + + assertTrue(sessionPoolOptions.warnInactiveTransactions()); + assertFalse(sessionPoolOptions.warnAndCloseInactiveTransactions()); + assertFalse(sessionPoolOptions.closeInactiveTransactions()); + assertEquals(0.95, inactiveTransactionRemovalOptions.getUsedSessionsRatioThreshold(), 0.0); + assertEquals(Duration.ofMinutes(2), inactiveTransactionRemovalOptions.getExecutionFrequency()); + assertEquals(Duration.ofMinutes(60), inactiveTransactionRemovalOptions.getIdleTimeThreshold()); + } + + @Test + public void setWarnIfInactiveTransactions() { + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setWarnIfInactiveTransactions().build(); + + assertTrue(sessionPoolOptions.warnInactiveTransactions()); + assertFalse(sessionPoolOptions.warnAndCloseInactiveTransactions()); + assertFalse(sessionPoolOptions.closeInactiveTransactions()); + } + + @Test + public void setWarnAndCloseIfInactiveTransactions() { + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setWarnAndCloseIfInactiveTransactions().build(); + + assertFalse(sessionPoolOptions.warnInactiveTransactions()); + assertTrue(sessionPoolOptions.warnAndCloseInactiveTransactions()); + assertFalse(sessionPoolOptions.closeInactiveTransactions()); + } + + @Test + public void setCloseIfInactiveTransactions() { + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setCloseIfInactiveTransactions().build(); + + assertFalse(sessionPoolOptions.warnInactiveTransactions()); + assertFalse(sessionPoolOptions.warnAndCloseInactiveTransactions()); + assertTrue(sessionPoolOptions.closeInactiveTransactions()); + } + + @Test + public void testSetUsedSessionsRatioThreshold() { + double threshold = ThreadLocalRandom.current().nextDouble(); + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setUsedSessionsRatioThreshold(threshold) + .build(); + assertEquals( + threshold, inactiveTransactionRemovalOptions.getUsedSessionsRatioThreshold(), 0.0d); + } + + @Test + public void testBlockIfPoolExhausted() { + assertTrue(SessionPoolOptions.newBuilder().build().isBlockIfPoolExhausted()); + assertTrue( + SessionPoolOptions.newBuilder().setBlockIfPoolExhausted().build().isBlockIfPoolExhausted()); + assertFalse( + SessionPoolOptions.newBuilder().setFailIfPoolExhausted().build().isBlockIfPoolExhausted()); + } + + @Test + public void testFailIfSessionNotFound() { + assertFalse(SessionPoolOptions.newBuilder().build().isFailIfSessionNotFound()); + assertTrue( + SessionPoolOptions.newBuilder() + .setFailIfSessionNotFound() + .build() + .isFailIfSessionNotFound()); + } + + @Test(expected = IllegalArgumentException.class) + public void setNegativeExecutionFrequency() { + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setExecutionFrequency(Duration.ofMillis(-1)) + .build(); + SessionPoolOptions.newBuilder() + .setInactiveTransactionRemovalOptions(inactiveTransactionRemovalOptions); + } + + @Test(expected = IllegalArgumentException.class) + public void setNegativeIdleTimeThreshold() { + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setIdleTimeThreshold(Duration.ofMillis(-1)) + .build(); + SessionPoolOptions.newBuilder() + .setInactiveTransactionRemovalOptions(inactiveTransactionRemovalOptions); + } + + @Test + public void setAcquireSessionTimeout() { + SessionPoolOptions sessionPoolOptions1 = + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeoutDuration(Duration.ofSeconds(20)) + .build(); + SessionPoolOptions sessionPoolOptions2 = + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeoutDuration(Duration.ofMillis(Long.MAX_VALUE)) + .build(); + + assertEquals(Duration.ofSeconds(20), sessionPoolOptions1.getAcquireSessionTimeout()); + assertEquals(Duration.ofMillis(Long.MAX_VALUE), sessionPoolOptions2.getAcquireSessionTimeout()); + } + + @Test(expected = IllegalArgumentException.class) + public void setAcquireSessionTimeout_valueLessThanLowerBound() { + SessionPoolOptions.newBuilder().setAcquireSessionTimeoutDuration(Duration.ofMillis(0)).build(); + } + + @Test(expected = IllegalArgumentException.class) + public void setAcquireSessionTimeout_valueMoreThanUpperBound() { + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeoutDuration(Duration.ofSeconds(Long.MAX_VALUE)) + .build(); + } + + @Test + public void verifyDefaultAcquireSessionTimeout() { + SessionPoolOptions sessionPoolOptions = SessionPoolOptions.newBuilder().build(); + + assertEquals(Duration.ofSeconds(60), sessionPoolOptions.getAcquireSessionTimeout()); + } + + @Test + public void testRandomizePositionQPSThreshold() { + assertEquals(0L, SessionPoolOptions.newBuilder().build().getRandomizePositionQPSThreshold()); + assertEquals( + 4L, + SessionPoolOptions.newBuilder() + .setRandomizePositionQPSThreshold(4L) + .build() + .getRandomizePositionQPSThreshold()); + assertEquals( + 10L, + SessionPoolOptions.newBuilder() + .setRandomizePositionQPSThreshold(4L) + .setRandomizePositionQPSThreshold(10L) + .build() + .getRandomizePositionQPSThreshold()); + assertEquals( + 0L, + SessionPoolOptions.newBuilder() + .setRandomizePositionQPSThreshold(0L) + .build() + .getRandomizePositionQPSThreshold()); + assertThrows( + IllegalArgumentException.class, + () -> SessionPoolOptions.newBuilder().setRandomizePositionQPSThreshold(-1L)); + } + + @Test + public void testUseMultiplexedSession() { + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + // skip these tests since this configuration can have dual behaviour in different test-runners + assumeFalse(SessionPoolOptions.newBuilder().build().getUseMultiplexedSession()); + assertEquals(false, SessionPoolOptions.newBuilder().build().getUseMultiplexedSession()); + assertEquals( + true, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .build() + .getUseMultiplexedSession()); + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + } + + @Test + public void testUseMultiplexedSessionForRW() { + // skip these tests since this configuration can have dual behaviour in different test-runners + assumeFalse(TestHelper.isMultiplexSessionDisabled()); + assumeFalse( + Boolean.parseBoolean(System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"))); + assumeFalse(SessionPoolOptions.newBuilder().build().getUseMultiplexedSession()); + assumeFalse(SessionPoolOptions.newBuilder().build().getUseMultiplexedSessionForRW()); + + // Verify default client behavior for multiplexed sessions in R/W transactions + assertEquals(false, SessionPoolOptions.newBuilder().build().getUseMultiplexedSessionForRW()); + + // Client will use multiplexed sessions for R/W transactions if both the fields are set to true. + assertEquals( + true, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since one of the field is set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(false) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since one of the field is set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .setUseMultiplexedSessionForRW(true) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since both the fields are set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .setUseMultiplexedSessionForRW(false) + .build() + .getUseMultiplexedSessionForRW()); + } + + @Test + public void testMultiplexedSessionMaintenanceDuration() { + assertEquals( + Duration.ofDays(7), + SessionPoolOptions.newBuilder().build().getMultiplexedSessionMaintenanceDuration()); + assertEquals( + Duration.ofDays(2), + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceDuration(Duration.ofDays(2)) + .build() + .getMultiplexedSessionMaintenanceDuration()); + assertEquals( + Duration.ofDays(10), + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceDuration(Duration.ofDays(2)) + .setMultiplexedSessionMaintenanceDuration(Duration.ofDays(10)) + .build() + .getMultiplexedSessionMaintenanceDuration()); + } + + @Test + public void testToBuilder() { + assertToBuilderRoundtrip(SessionPoolOptions.newBuilder().build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSessionForRW(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMinSessions(ThreadLocalRandom.current().nextInt(400)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMaxSessions(ThreadLocalRandom.current().nextInt(1, 1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setIncStep(ThreadLocalRandom.current().nextInt(1, 1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMaxIdleSessions(ThreadLocalRandom.current().nextInt(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setWriteSessionsFraction(ThreadLocalRandom.current().nextFloat()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setInactiveTransactionRemovalOptions( + InactiveTransactionRemovalOptions.newBuilder() + .setUsedSessionsRatioThreshold(ThreadLocalRandom.current().nextDouble()) + .build()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setLoopFrequency(ThreadLocalRandom.current().nextInt(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceLoopFrequency( + java.time.Duration.ofMillis(ThreadLocalRandom.current().nextInt(1000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setKeepAliveIntervalMinutes(ThreadLocalRandom.current().nextInt(60)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setRemoveInactiveSessionAfterDuration( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder().setCloseIfInactiveTransactions().build()); + assertToBuilderRoundtrip(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setTrackStackTraceOfSessionCheckout(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setInitialWaitForSessionTimeoutMillis(ThreadLocalRandom.current().nextLong(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setAutoDetectDialect(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeoutDuration( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(1, 10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setRandomizePositionQPSThreshold(ThreadLocalRandom.current().nextLong(10000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceDuration( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + } + + static void assertToBuilderRoundtrip(SessionPoolOptions options) { + assertEquals(options, options.toBuilder().build()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java new file mode 100644 index 000000000000..a7aaa70ca2d0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java @@ -0,0 +1,1248 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: src/test/resources/com/google/cloud/spanner/singer.proto + +// Protobuf Java Version: 3.25.1 +package com.google.cloud.spanner; + +public final class SingerProto { + private SingerProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + /** Protobuf enum {@code examples.spanner.music.Genre} */ + public enum Genre implements com.google.protobuf.ProtocolMessageEnum { + /** POP = 0; */ + POP(0), + /** JAZZ = 1; */ + JAZZ(1), + /** FOLK = 2; */ + FOLK(2), + /** ROCK = 3; */ + ROCK(3), + UNRECOGNIZED(-1), + ; + + /** POP = 0; */ + public static final int POP_VALUE = 0; + + /** JAZZ = 1; */ + public static final int JAZZ_VALUE = 1; + + /** FOLK = 2; */ + public static final int FOLK_VALUE = 2; + + /** ROCK = 3; */ + public static final int ROCK_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static Genre valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Genre forNumber(int value) { + switch (value) { + case 0: + return POP; + case 1: + return JAZZ; + case 2: + return FOLK; + case 3: + return ROCK; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Genre findValueByNumber(int number) { + return Genre.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException("Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return SingerProto.getDescriptor().getEnumTypes().get(0); + } + + private static final Genre[] VALUES = values(); + + public static Genre valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Genre(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:examples.spanner.music.Genre) + } + + public interface SingerInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:examples.spanner.music.SingerInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + boolean hasSingerId(); + + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + long getSingerId(); + + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + boolean hasBirthDate(); + + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + String getBirthDate(); + + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + com.google.protobuf.ByteString getBirthDateBytes(); + + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + boolean hasNationality(); + + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + String getNationality(); + + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + com.google.protobuf.ByteString getNationalityBytes(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + boolean hasGenre(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + int getGenreValue(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + Genre getGenre(); + } + + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class SingerInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + private static final long serialVersionUID = 0L; + + // Use SingerInfo.newBuilder() to construct. + private SingerInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SingerInfo() { + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new SingerInfo(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + private int bitField0_; + public static final int SINGER_ID_FIELD_NUMBER = 1; + private long singerId_ = 0L; + + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + + public static final int BIRTH_DATE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile Object birthDate_ = ""; + + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + @Override + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + @Override + public String getBirthDate() { + Object ref = birthDate_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } + } + + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + @Override + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NATIONALITY_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile Object nationality_ = ""; + + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + @Override + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + @Override + public String getNationality() { + Object ref = nationality_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } + } + + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + @Override + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENRE_FIELD_NUMBER = 4; + private int genre_ = 0; + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeEnum(4, genre_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, genre_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof SingerInfo)) { + return super.equals(obj); + } + SingerInfo other = (SingerInfo) obj; + + if (hasSingerId() != other.hasSingerId()) return false; + if (hasSingerId()) { + if (getSingerId() != other.getSingerId()) return false; + } + if (hasBirthDate() != other.hasBirthDate()) return false; + if (hasBirthDate()) { + if (!getBirthDate().equals(other.getBirthDate())) return false; + } + if (hasNationality() != other.hasNationality()) return false; + if (hasNationality()) { + if (!getNationality().equals(other.getNationality())) return false; + } + if (hasGenre() != other.hasGenre()) return false; + if (hasGenre()) { + if (genre_ != other.genre_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSingerId()) { + hash = (37 * hash) + SINGER_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSingerId()); + } + if (hasBirthDate()) { + hash = (37 * hash) + BIRTH_DATE_FIELD_NUMBER; + hash = (53 * hash) + getBirthDate().hashCode(); + } + if (hasNationality()) { + hash = (37 * hash) + NATIONALITY_FIELD_NUMBER; + hash = (53 * hash) + getNationality().hashCode(); + } + if (hasGenre()) { + hash = (37 * hash) + GENRE_FIELD_NUMBER; + hash = (53 * hash) + genre_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static SingerInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static SingerInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(SingerInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + // Construct using com.google.cloud.spanner.SingerProto.SingerInfo.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + singerId_ = 0L; + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return SingerInfo.getDefaultInstance(); + } + + @Override + public SingerInfo build() { + SingerInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public SingerInfo buildPartial() { + SingerInfo result = new SingerInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(SingerInfo result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.singerId_ = singerId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.birthDate_ = birthDate_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nationality_ = nationality_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.genre_ = genre_; + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof SingerInfo) { + return mergeFrom((SingerInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(SingerInfo other) { + if (other == SingerInfo.getDefaultInstance()) return this; + if (other.hasSingerId()) { + setSingerId(other.getSingerId()); + } + if (other.hasBirthDate()) { + birthDate_ = other.birthDate_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasNationality()) { + nationality_ = other.nationality_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasGenre()) { + setGenre(other.getGenre()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + singerId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + birthDate_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + nationality_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + genre_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long singerId_; + + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + + /** + * optional int64 singer_id = 1; + * + * @param value The singerId to set. + * @return This builder for chaining. + */ + public Builder setSingerId(long value) { + + singerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * optional int64 singer_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearSingerId() { + bitField0_ = (bitField0_ & ~0x00000001); + singerId_ = 0L; + onChanged(); + return this; + } + + private Object birthDate_ = ""; + + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + public String getBirthDate() { + Object ref = birthDate_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * optional string birth_date = 2; + * + * @param value The birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDate(String value) { + if (value == null) { + throw new NullPointerException(); + } + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * optional string birth_date = 2; + * + * @return This builder for chaining. + */ + public Builder clearBirthDate() { + birthDate_ = getDefaultInstance().getBirthDate(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * optional string birth_date = 2; + * + * @param value The bytes for birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDateBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private Object nationality_ = ""; + + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + public String getNationality() { + Object ref = nationality_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } else { + return (String) ref; + } + } + + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * optional string nationality = 3; + * + * @param value The nationality to set. + * @return This builder for chaining. + */ + public Builder setNationality(String value) { + if (value == null) { + throw new NullPointerException(); + } + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * optional string nationality = 3; + * + * @return This builder for chaining. + */ + public Builder clearNationality() { + nationality_ = getDefaultInstance().getNationality(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * optional string nationality = 3; + * + * @param value The bytes for nationality to set. + * @return This builder for chaining. + */ + public Builder setNationalityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int genre_ = 0; + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The enum numeric value on the wire for genre to set. + * @return This builder for chaining. + */ + public Builder setGenreValue(int value) { + genre_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The genre to set. + * @return This builder for chaining. + */ + public Builder setGenre(Genre value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + genre_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return This builder for chaining. + */ + public Builder clearGenre() { + bitField0_ = (bitField0_ & ~0x00000008); + genre_ = 0; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:examples.spanner.music.SingerInfo) + } + + // @@protoc_insertion_point(class_scope:examples.spanner.music.SingerInfo) + private static final SingerInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new SingerInfo(); + } + + public static SingerInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public SingerInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_examples_spanner_music_SingerInfo_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + String[] descriptorData = { + "\n" + + "\014singer.proto\022\026examples.spanner.music\"\301\001\n\n" + + "SingerInfo\022\026\n" + + "\tsinger_id\030\001 \001(\003H\000\210\001\001\022\027\n" + + "\n" + + "birth_date\030\002 \001(\tH\001\210\001\001\022\030\n" + + "\013nationality\030\003 \001(\tH\002\210\001\001\0221\n" + + "\005genre\030\004" + + " \001(\0162\035.examples.spanner.music.GenreH\003\210\001\001B\014\n\n" + + "_singer_idB\r\n" + + "\013_birth_dateB\016\n" + + "\014_nationalityB\010\n" + + "\006_genre*.\n" + + "\005Genre\022\007\n" + + "\003POP\020\000\022\010\n" + + "\004JAZZ\020\001\022\010\n" + + "\004FOLK\020\002\022\010\n" + + "\004ROCK\020\003B)\n" + + "\030com.google.cloud.spannerB\013SingerProtoP\000b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_examples_spanner_music_SingerInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_examples_spanner_music_SingerInfo_descriptor, + new String[] { + "SingerId", "BirthDate", "Nationality", "Genre", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SlowTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SlowTest.java new file mode 100644 index 000000000000..ba5e58f81f67 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SlowTest.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Category of slow tests, to be run on the nightly build * */ +public interface SlowTest extends IntegrationTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanExceptionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanExceptionTest.java new file mode 100644 index 000000000000..d09e65dfc0b0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanExceptionTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import io.grpc.ManagedChannelBuilder; +import java.time.Duration; +import java.util.ArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpanExceptionTest extends AbstractMockServerTest { + + @Test + public void testReadOnlyTransaction() throws InterruptedException, ExecutionException { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("my-project") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMaxSessions(10) + .setAcquireSessionTimeoutDuration(Duration.ofMillis(10)) + // .setAcquireSessionTimeout(null) + .build()) + .build() + .getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + int numThreads = 25; + ExecutorService service = Executors.newFixedThreadPool(numThreads); + ArrayList> futures = new ArrayList<>(numThreads); + try (ReadOnlyTransaction readOnlyTransaction = client.readOnlyTransaction()) { + for (int i = 0; i < numThreads; i++) { + futures.add(service.submit(() -> executeRandom(readOnlyTransaction))); + } + service.shutdown(); + assertTrue(service.awaitTermination(60L, TimeUnit.SECONDS)); + // Verify that all threads finished without any unexpected errors. + for (Future future : futures) { + assertNull(future.get()); + } + } + } + } + + private Void executeRandom(ReadOnlyTransaction readOnlyTransaction) { + try (ResultSet resultSet = readOnlyTransaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + // ignore + } + } catch (SpannerException spannerException) { + if (spannerException.getErrorCode() == ErrorCode.RESOURCE_EXHAUSTED) { + // This is the expected error code, so ignore. + return null; + } + throw spannerException; + } + return null; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java new file mode 100644 index 000000000000..449e78cf6125 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java @@ -0,0 +1,447 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.inprocess.InProcessServerBuilder; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.lang.reflect.Modifier; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(TracerTest.class) +@RunWith(JUnit4.class) +public class SpanTest { + + private static final String TEST_PROJECT = "my-project"; + private static final String TEST_INSTANCE = "my-instance"; + private static final String TEST_DATABASE = "my-database"; + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private Spanner spanner; + private DatabaseClient client; + private Spanner spannerWithTimeout; + private DatabaseClient clientWithTimeout; + + private static InMemorySpanExporter openTelemetrySpanExporter; + + private static FailOnOverkillTraceComponentImpl failOnOverkillTraceComponent = + new FailOnOverkillTraceComponentImpl(); + + private static final SimulatedExecutionTime ONE_SECOND = + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0); + private static final StatusRuntimeException FAILED_PRECONDITION = + io.grpc.Status.FAILED_PRECONDITION + .withDescription("Non-retryable test exception.") + .asRuntimeException(); + + @BeforeClass + public static void startStaticServer() throws Exception { + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + + // Use a little reflection to set the test tracer. + // This is not possible in Java 12 and later. + java.lang.reflect.Field field = Tracing.class.getDeclaredField("traceComponent"); + field.setAccessible(true); + java.lang.reflect.Field modifiersField = null; + try { + modifiersField = java.lang.reflect.Field.class.getDeclaredField("modifiers"); + } catch (NoSuchFieldException e) { + // Halt the test and ignore it. + Assume.assumeTrue( + "Skipping test as reflection is not allowed on reflection class in this JDK build", + false); + } + modifiersField.setAccessible(true); + // Remove the final modifier from the 'traceComponent' field. + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, failOnOverkillTraceComponent); + + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + if (server != null) { + server.shutdown(); + server.awaitTermination(); + } + } + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenCensusTraces(); + } + + @Before + public void setUp() throws Exception { + failOnOverkillTraceComponent.clearSpans(); + failOnOverkillTraceComponent.clearAnnotations(); + // Incorporating OpenTelemetry configuration to ensure that OpenCensus traces are utilized by + // default, + // regardless of the presence of OpenTelemetry configuration. + openTelemetrySpanExporter = InMemorySpanExporter.create(); + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(openTelemetrySpanExporter)) + .build(); + + GlobalOpenTelemetry.resetForTest(); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .build(); + + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(TEST_PROJECT) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setOpenTelemetry(openTelemetry) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setMinSessions(2) + .setWaitForMinSessionsDuration(Duration.ofSeconds(10)) + .build()); + + spanner = builder.build().getService(); + + client = spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + + final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(75L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(75L)) + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(200L)) + .build(); + RetrySettings commitRetrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(5000L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(10000L)) + .setMaxAttempts(1) + .setTotalTimeoutDuration(Duration.ofMillis(20000L)) + .build(); + builder + .getSpannerStubSettingsBuilder() + .applyToAllUnaryMethods( + input -> { + input.setRetrySettings(retrySettings); + return null; + }); + builder + .getSpannerStubSettingsBuilder() + .executeStreamingSqlSettings() + .setRetrySettings(retrySettings); + builder.getSpannerStubSettingsBuilder().commitSettings().setRetrySettings(commitRetrySettings); + builder + .getSpannerStubSettingsBuilder() + .executeStreamingSqlSettings() + .setRetrySettings(retrySettings); + builder.getSpannerStubSettingsBuilder().streamingReadSettings().setRetrySettings(retrySettings); + spannerWithTimeout = builder.build().getService(); + clientWithTimeout = + spannerWithTimeout.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } + + @After + public void tearDown() { + spanner.close(); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + } + + @Test + public void singleUseNonRetryableErrorOnNext() { + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + mockSpanner.addException(FAILED_PRECONDITION); + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void singleUseExecuteStreamingSqlTimeout() { + try (ResultSet rs = clientWithTimeout.singleUse().executeQuery(SELECT1)) { + mockSpanner.setExecuteStreamingSqlExecutionTime(ONE_SECOND); + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void singleUse() { + try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { + while (rs.next()) { + // Just consume the result set. + } + } + verifySingleUseSpans(); + } + + @Test + public void singleUseWithoutTryWithResources() { + // NOTE: This is bad practice. Always use try-with-resources. This test is only here to verify + // that our safeguards work. + ResultSet rs = client.singleUse().executeQuery(SELECT1); + while (rs.next()) { + // Just consume the result set. + } + verifySingleUseSpans(); + } + + private void verifySingleUseSpans() { + + // OpenTelemetry spans should be 0 as OpenCensus is default enabled. + assertEquals(openTelemetrySpanExporter.getFinishedSpanItems().size(), 0); + + // OpenCensus spans and events verification + Map spans = failOnOverkillTraceComponent.getSpans(); + assertThat(spans).containsEntry("CloudSpanner.ReadOnlyTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.ExecuteStreamingQuery", true); + + List expectedAnnotationsForMultiplexedSession = + ImmutableList.of( + "Request for 1 multiplexed session returned 1 session", "Starting/Resuming stream"); + verifyAnnotations( + failOnOverkillTraceComponent.getAnnotations().stream() + .distinct() + .collect(Collectors.toList()), + expectedAnnotationsForMultiplexedSession); + } + + @Test + public void singleUseWithError() { + Statement invalidStatement = Statement.of("select * from foo"); + mockSpanner.putStatementResults( + StatementResult.exception(invalidStatement, Status.INVALID_ARGUMENT.asRuntimeException())); + + SpannerException spannerException = + assertThrows( + SpannerException.class, + () -> client.singleUse().executeQuery(INVALID_UPDATE_STATEMENT).next()); + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + + // OpenTelemetry spans should be 0 as OpenCensus is default enabled. + assertEquals(openTelemetrySpanExporter.getFinishedSpanItems().size(), 0); + + // OpenCensus spans and events verification + Map spans = failOnOverkillTraceComponent.getSpans(); + assertThat(spans).containsEntry("CloudSpanner.ReadOnlyTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.ExecuteStreamingQuery", true); + + List expectedAnnotationsForMultiplexedSession = + ImmutableList.of( + "Request for 1 multiplexed session returned 1 session", + "Starting/Resuming stream", + "Stream broken. Not safe to retry"); + verifyAnnotations( + failOnOverkillTraceComponent.getAnnotations().stream() + .distinct() + .collect(Collectors.toList()), + expectedAnnotationsForMultiplexedSession); + } + + @Test + public void multiUse() { + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (ResultSet rs = tx.executeQuery(SELECT1)) { + while (rs.next()) { + // Just consume the result set. + } + } + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertThat(spans).containsEntry("CloudSpanner.ReadOnlyTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.ExecuteStreamingQuery", true); + + List expectedAnnotationsForMultiplexedSession = + ImmutableList.of( + "Request for 1 multiplexed session returned 1 session", + "Starting/Resuming stream", + "Creating Transaction", + "Transaction Creation Done"); + verifyAnnotations( + failOnOverkillTraceComponent.getAnnotations().stream() + .distinct() + .collect(Collectors.toList()), + expectedAnnotationsForMultiplexedSession); + } + + @Test + public void transactionRunner() { + TransactionRunner runner = client.readWriteTransaction(); + runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + Map spans = failOnOverkillTraceComponent.getSpans(); + assertThat(spans).containsEntry("CloudSpanner.ReadWriteTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.Commit", true); + + List expectedAnnotationsForMultiplexedSessionsRW = + ImmutableList.of( + "Starting Transaction Attempt", + "Starting Commit", + "Commit Done", + "Transaction Attempt Succeeded", + "Request for 1 multiplexed session returned 1 session"); + verifyAnnotations( + failOnOverkillTraceComponent.getAnnotations().stream() + .distinct() + .collect(Collectors.toList()), + expectedAnnotationsForMultiplexedSessionsRW); + } + + @Test + public void transactionRunnerWithError() { + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> runner.run(transaction -> transaction.executeUpdate(INVALID_UPDATE_STATEMENT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + + Map spans = failOnOverkillTraceComponent.getSpans(); + + assertEquals(spans.toString(), 3, spans.size()); + assertThat(spans).containsEntry("CloudSpannerOperation.CreateMultiplexedSession", true); + assertThat(spans).containsEntry("CloudSpanner.ReadWriteTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.ExecuteUpdate", true); + + List expectedAnnotationsForMultiplexedSessionsRW = + ImmutableList.of( + "Starting Transaction Attempt", + "Transaction Attempt Failed in user operation", + "Request for 1 multiplexed session returned 1 session"); + verifyAnnotations( + failOnOverkillTraceComponent.getAnnotations().stream() + .distinct() + .collect(Collectors.toList()), + expectedAnnotationsForMultiplexedSessionsRW); + } + + private void verifyAnnotations(List actualAnnotations, List expectedAnnotations) { + assertEquals( + expectedAnnotations.stream().sorted().collect(Collectors.toList()), + actualAnnotations.stream().distinct().sorted().collect(Collectors.toList())); + } + + private boolean isMultiplexedSessionsEnabled() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } + + private boolean isMultiplexedSessionsEnabledForRW() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerApiFuturesTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerApiFuturesTest.java new file mode 100644 index 000000000000..462464b2c38b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerApiFuturesTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.ForwardingApiFuture; +import java.util.concurrent.CancellationException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerApiFuturesTest { + + @Test + public void testGet() { + ApiFuture fut = ApiFutures.immediateFuture(1L); + assertThat(get(fut)).isEqualTo(1L); + } + + @Test + public void testGetNull() { + assertThrows(NullPointerException.class, () -> get(null)); + } + + @Test + public void testGetOrNull() { + assertThat(SpannerApiFutures.getOrNull((ApiFuture) null)).isNull(); + } + + @Test + public void testGetSpannerException() { + ApiFuture fut = + ApiFutures.immediateFailedFuture( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "test exception")); + SpannerException e = assertThrows(SpannerException.class, () -> get(fut)); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + assertThat(e.getMessage()).contains("test exception"); + } + + @Test + public void testGetOtherException() { + ApiFuture fut = + ApiFutures.immediateFailedFuture(new RuntimeException("test runtime exception")); + SpannerException e = assertThrows(SpannerException.class, () -> get(fut)); + assertEquals(ErrorCode.UNKNOWN, e.getErrorCode()); + assertThat(e.getMessage()).contains("test runtime exception"); + } + + @Test + public void testGetInterruptedException() { + ApiFuture fut = + new ForwardingApiFuture(ApiFutures.immediateFuture(null)) { + public Void get() throws InterruptedException { + throw new InterruptedException("test interrupted exception"); + } + }; + SpannerException e = assertThrows(SpannerException.class, () -> get(fut)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + // The message of an interrupted exception is not included in the SpannerException. + assertThat(e.getMessage()).doesNotContain("test interrupted exception"); + } + + @Test + public void testGetCancellationException() { + ApiFuture fut = + new ForwardingApiFuture(ApiFutures.immediateFuture(null)) { + public Void get() { + throw new CancellationException("test cancellation exception"); + } + }; + SpannerException e = assertThrows(SpannerException.class, () -> get(fut)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + // The message of an cancellation exception is included in the SpannerException. + assertThat(e.getMessage()).contains("test cancellation exception"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java new file mode 100644 index 000000000000..590d62db7b5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java @@ -0,0 +1,491 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_HASH_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_NAME_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_UID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DATABASE_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DIRECT_PATH_ENABLED_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DIRECT_PATH_USED_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.GAX_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_CONFIG_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.LOCATION_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.OPERATION_COUNT_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.OPERATION_LATENCIES_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.Distribution; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.cloud.monitoring.v3.stub.MetricServiceStub; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.DroppedLabels; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import com.google.protobuf.InvalidProtocolBufferException; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoubleExemplarData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableDoubleExemplarData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; +import io.opentelemetry.sdk.resources.Resource; +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +public class SpannerCloudMonitoringExporterTest { + + private static final String projectId = "fake-project"; + private static final String instanceId = "fake-instance"; + private static final String locationId = "global"; + private static final String databaseId = "fake-database"; + private static final String clientName = "spanner-java/"; + + private static final String clientHash = "spanner-test"; + private static final String instanceConfigId = "fake-instance-config-id"; + + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private MetricServiceStub mockMetricServiceStub; + private MetricServiceClient fakeMetricServiceClient; + private SpannerCloudMonitoringExporter exporter; + + private Attributes attributes; + + private Attributes resourceAttributes; + private Resource resource; + private InstrumentationScopeInfo scope; + + private String client_uid; + + @Before + public void setUp() { + fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); + exporter = new SpannerCloudMonitoringExporter(projectId, fakeMetricServiceClient); + + this.client_uid = BuiltInMetricsProvider.INSTANCE.createClientAttributes().get("client_uid"); + + attributes = + Attributes.builder() + .put(INSTANCE_ID_KEY, instanceId) + .put(DATABASE_KEY, databaseId) + .put(CLIENT_NAME_KEY, clientName) + .put(CLIENT_UID_KEY, this.client_uid) + .put(String.valueOf(DIRECT_PATH_ENABLED_KEY), true) + .put(String.valueOf(DIRECT_PATH_USED_KEY), true) + .build(); + + resourceAttributes = + Attributes.builder() + .put(PROJECT_ID_KEY, projectId) + .put(LOCATION_ID_KEY, locationId) + .put(CLIENT_HASH_KEY, clientHash) + .put(INSTANCE_CONFIG_ID_KEY, instanceConfigId) + .build(); + resource = Resource.create(resourceAttributes); + + scope = InstrumentationScopeInfo.create(GAX_METER_NAME); + } + + @After + public void tearDown() {} + + @Test + public void testExportingSumData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = Mockito.mock(UnaryCallable.class); + Mockito.when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + Mockito.when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long fakeValue = 11L; + + long startEpoch = 10; + long endEpoch = 15; + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_COUNT_NAME, + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + + exporter.export(Collections.singletonList(longData)); + assertFalse(exporter.lastExportSkippedData()); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true", + CLIENT_UID_KEY.getKey(), + this.client_uid); + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingHistogramData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, // min + true, + 2d, // max + Collections.singletonList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_LATENCIES_NAME, + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Collections.singletonList(histogramData)); + assertFalse(exporter.lastExportSkippedData()); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true", + CLIENT_UID_KEY.getKey(), + this.client_uid); + + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + assertThat(distribution.getCount()).isEqualTo(3); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingSumDataInBatches() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + + Collection toExport = new ArrayList<>(); + for (int i = 0; i < 250; i++) { + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, i); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_COUNT_NAME, + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + toExport.add(longData); + } + + exporter.export(toExport); + + assertThat(argumentCaptor.getAllValues()).hasSize(2); + CreateTimeSeriesRequest firstRequest = argumentCaptor.getAllValues().get(0); + CreateTimeSeriesRequest secondRequest = argumentCaptor.getAllValues().get(1); + + assertThat(firstRequest.getTimeSeriesList()).hasSize(200); + assertThat(secondRequest.getTimeSeriesList()).hasSize(50); + assertFalse(exporter.lastExportSkippedData()); + + for (int i = 0; i < 250; i++) { + TimeSeries timeSeries; + if (i < 200) { + timeSeries = firstRequest.getTimeSeriesList().get(i); + } else { + timeSeries = secondRequest.getTimeSeriesList().get(i - 200); + } + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true", + CLIENT_UID_KEY.getKey(), + this.client_uid); + + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(i); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + } + + @Test + public void testExportingHistogramDataWithExemplars() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10 * 1_000_000_000L; + long endEpoch = 15 * 1_000_000_000L; + long recordTimeEpoch = 12_123_456_789L; + + DoubleExemplarData exemplar = + ImmutableDoubleExemplarData.create( + Attributes.builder() + .put(XGoogSpannerRequestId.REQUEST_ID_HEADER_NAME, "test") + .put("lang", "java") + .build(), + recordTimeEpoch, + SpanContext.create( + "0123456789abcdef0123456789abcdef", + "0123456789abcdef", + TraceFlags.getSampled(), + TraceState.getDefault()), + 1.5); + + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, + true, + 2d, + Collections.singletonList(1.0), + Arrays.asList(1L, 2L), + Collections.singletonList(exemplar) // ← add exemplar + ); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_LATENCIES_NAME, + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Collections.singletonList(histogramData)); + assertFalse(exporter.lastExportSkippedData()); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + + // Assert exemplar exists and has expected value + assertThat(distribution.getExemplarsCount()).isEqualTo(1); + Distribution.Exemplar exportedExemplar = distribution.getExemplars(0); + assertThat(exportedExemplar.getValue()).isEqualTo(1.5); + + // Assert timestamp mapping + assertThat(exportedExemplar.getTimestamp().getSeconds()) + .isEqualTo(recordTimeEpoch / 1_000_000_000L); + assertThat(exportedExemplar.getTimestamp().getNanos()) + .isEqualTo((int) (recordTimeEpoch % 1_000_000_000L)); + + // Assert attachments: SpanContext + boolean hasSpanAttachment = + exportedExemplar.getAttachmentsList().stream() + .anyMatch(any -> any.is(com.google.monitoring.v3.SpanContext.class)); + assertThat(hasSpanAttachment).isTrue(); + + // Assert attachments: DroppedLabels (filtered attributes) + List filterAttributes = + exportedExemplar.getAttachmentsList().stream() + .filter(any -> any.is(DroppedLabels.class)) + .map( + any -> { + try { + return any.unpack(DroppedLabels.class); + } catch (InvalidProtocolBufferException e) { + throw new RuntimeException("Failed to unpack SpanContext", e); + } + }) + .collect(Collectors.toList()); + + // Assert only 1 attachment is there with 1 label for request_id. + assertThat(filterAttributes.size()).isEqualTo(1); + assertThat(filterAttributes.get(0).getLabelCount()).isEqualTo(1); + assertThat(filterAttributes.get(0).containsLabel(XGoogSpannerRequestId.REQUEST_ID_HEADER_NAME)) + .isTrue(); + assertThat( + filterAttributes.get(0).getLabelOrThrow(XGoogSpannerRequestId.REQUEST_ID_HEADER_NAME)) + .isEqualTo("test"); + } + + @Test + public void getAggregationTemporality() throws IOException { + SpannerCloudMonitoringExporter actualExporter = + SpannerCloudMonitoringExporter.create(projectId, null, null, null); + assertThat(actualExporter.getAggregationTemporality(InstrumentType.COUNTER)) + .isEqualTo(AggregationTemporality.CUMULATIVE); + } + + @Test + public void testUniverseDomain() throws IOException { + SpannerCloudMonitoringExporter actualExporter = + SpannerCloudMonitoringExporter.create(projectId, null, null, "abc.goog"); + MetricServiceSettings metricServiceSettings = + actualExporter.getMetricServiceClient().getSettings(); + + assertEquals("abc.goog", metricServiceSettings.getUniverseDomain()); + assertEquals("monitoring.abc.goog:443", metricServiceSettings.getEndpoint()); + + actualExporter = + SpannerCloudMonitoringExporter.create( + projectId, null, "monitoringa.abc.goog:443", "abc.goog"); + metricServiceSettings = actualExporter.getMetricServiceClient().getSettings(); + + assertEquals("abc.goog", metricServiceSettings.getUniverseDomain()); + assertEquals("monitoringa.abc.goog:443", metricServiceSettings.getEndpoint()); + } + + private static class FakeMetricServiceClient extends MetricServiceClient { + + protected FakeMetricServiceClient(MetricServiceStub stub) { + super(stub); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerExceptionFactoryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerExceptionFactoryTest.java new file mode 100644 index 000000000000..55b9523d7db0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerExceptionFactoryTest.java @@ -0,0 +1,254 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.protobuf.Duration; +import com.google.rpc.ResourceInfo; +import com.google.rpc.RetryInfo; +import io.grpc.Context; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mockito; + +/** Unit tests for {@link SpannerExceptionFactory}. */ +@RunWith(JUnit4.class) +public class SpannerExceptionFactoryTest { + + static SessionNotFoundException newSessionNotFoundException(String name) { + return (SessionNotFoundException) + newResourceNotFoundException( + "Session", SpannerExceptionFactory.SESSION_RESOURCE_TYPE, name); + } + + static DatabaseNotFoundException newDatabaseNotFoundException(String name) { + return (DatabaseNotFoundException) + newResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, name); + } + + static StatusRuntimeException newStatusDatabaseNotFoundException(String name) { + return newStatusResourceNotFoundException( + "Database", SpannerExceptionFactory.DATABASE_RESOURCE_TYPE, name); + } + + static InstanceNotFoundException newInstanceNotFoundException(String name) { + return (InstanceNotFoundException) + newResourceNotFoundException( + "Instance", SpannerExceptionFactory.INSTANCE_RESOURCE_TYPE, name); + } + + static StatusRuntimeException newStatusResourceNotFoundException( + String shortName, String resourceType, String resourceName) { + ResourceInfo resourceInfo = + ResourceInfo.newBuilder() + .setResourceType(resourceType) + .setResourceName(resourceName) + .build(); + Metadata.Key key = + Metadata.Key.of( + resourceInfo.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(resourceInfo)); + Metadata trailers = new Metadata(); + trailers.put(key, resourceInfo); + String message = + String.format("%s not found: %s with id %s not found", shortName, shortName, resourceName); + return Status.NOT_FOUND.withDescription(message).asRuntimeException(trailers); + } + + private static ResourceNotFoundException newResourceNotFoundException( + String shortName, String resourceType, String resourceName) { + return (ResourceNotFoundException) + SpannerExceptionFactory.newSpannerException( + newStatusResourceNotFoundException(shortName, resourceType, resourceName)); + } + + @Test + public void http2InternalErrorIsRetryable() { + Status status = + Status.fromCodeValue(Status.Code.INTERNAL.value()) + .withDescription("HTTP/2 error code: INTERNAL_ERROR"); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status)); + assertThat(e.isRetryable()).isTrue(); + } + + @Test + public void connectionClosedIsRetryable() { + Status status = + Status.fromCodeValue(Status.Code.INTERNAL.value()) + .withDescription("Connection closed with unknown cause"); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status)); + assertThat(e.isRetryable()).isTrue(); + } + + @Test + public void resourceExhausted() { + Status status = + Status.fromCodeValue(Status.Code.RESOURCE_EXHAUSTED.value()) + .withDescription("Memory pushback"); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status)); + assertThat(e.isRetryable()).isFalse(); + } + + @Test + public void resourceExhaustedWithBackoff() { + Status status = + Status.fromCodeValue(Status.Code.RESOURCE_EXHAUSTED.value()) + .withDescription("Memory pushback"); + Metadata trailers = new Metadata(); + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setNanos(1000000).setSeconds(1L)) + .build(); + trailers.put(key, retryInfo); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status, trailers)); + assertThat(e.isRetryable()).isTrue(); + assertThat(e.getRetryDelayInMillis()).isEqualTo(1001); + } + + @Test + public void abortWithRetryInfo() { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setNanos(1000000).setSeconds(1L)) + .build(); + trailers.put(key, retryInfo); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status, trailers)); + assertThat(e).isInstanceOf(AbortedException.class); + assertThat(e.getRetryDelayInMillis()).isEqualTo(1001L); + } + + @Test + public void abortWithoutRetryInfo() { + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status)); + assertThat(e).isInstanceOf(AbortedException.class); + assertThat(e.getRetryDelayInMillis()).isEqualTo(-1L); + } + + @Test + public void abortWithoutDuration() { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + Metadata trailers = new Metadata(); + trailers.put(key, RetryInfo.getDefaultInstance()); + SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status, trailers)); + assertThat(e).isInstanceOf(AbortedException.class); + assertThat(e.getRetryDelayInMillis()).isEqualTo(-1L); + } + + @Test + public void nullCancel() { + Context context = Mockito.mock(Context.class); + Mockito.when(context.isCancelled()).thenReturn(true); + Mockito.when(context.cancellationCause()).thenReturn(null); + SpannerException spannerException = + SpannerExceptionFactory.newSpannerExceptionForCancellation(context, null); + assertThat(spannerException.getMessage()).isEqualTo("CANCELLED: Current context was cancelled"); + } + + @Test + public void sessionNotFound() { + SessionNotFoundException e = + newSessionNotFoundException("projects/p/instances/i/databases/d/sessions/s"); + assertThat(e.getResourceName()).isEqualTo("projects/p/instances/i/databases/d/sessions/s"); + } + + @Test + public void databaseNotFound() { + DatabaseNotFoundException e = + newDatabaseNotFoundException("projects/p/instances/i/databases/d"); + assertThat(e.getResourceName()).isEqualTo("projects/p/instances/i/databases/d"); + } + + @Test + public void instanceNotFound() { + InstanceNotFoundException e = newInstanceNotFoundException("projects/p/instances/i"); + assertThat(e.getResourceName()).isEqualTo("projects/p/instances/i"); + } + + @Test + public void statusRuntimeExceptionSessionNotFound() { + SpannerException spannerException = + SpannerExceptionFactory.newSpannerException( + Status.NOT_FOUND + .withDescription( + "NOT_FOUND: Session not found: projects/p/instances/i/databases/d/sessions/s") + .asRuntimeException( + createResourceTypeMetadata( + SpannerExceptionFactory.SESSION_RESOURCE_TYPE, + "projects/p/instances/i/databases/d/sessions/s"))); + assertThat(spannerException).isInstanceOf(SessionNotFoundException.class); + } + + @Test + public void apiExceptionSessionNotFound() { + SpannerException spannerException = + SpannerExceptionFactory.newSpannerException( + ApiExceptionFactory.createException( + "NOT_FOUND: Session not found: projects/p/instances/i/databases/d/sessions/s", + Status.NOT_FOUND + .withDescription( + "NOT_FOUND: Session not found:" + + " projects/p/instances/i/databases/d/sessions/s") + .asRuntimeException( + createResourceTypeMetadata( + SpannerExceptionFactory.SESSION_RESOURCE_TYPE, + "projects/p/instances/i/databases/d/sessions/s")), + GrpcStatusCode.of(Code.NOT_FOUND), + false)); + assertThat(spannerException).isInstanceOf(SessionNotFoundException.class); + } + + private Metadata createResourceTypeMetadata(String resourceType, String resourceName) { + ResourceInfo resourceInfo = + ResourceInfo.newBuilder() + .setResourceType(resourceType) + .setResourceName(resourceName) + .build(); + Metadata.Key key = + Metadata.Key.of( + resourceInfo.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(resourceInfo)); + Metadata trailers = new Metadata(); + trailers.put(key, resourceInfo); + + return trailers; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerGaxRetryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerGaxRetryTest.java new file mode 100644 index 000000000000..fc25832860a8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerGaxRetryTest.java @@ -0,0 +1,464 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.protobuf.ListValue; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.protobuf.ProtoUtils; +import java.time.Duration; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerGaxRetryTest { + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final SimulatedExecutionTime ONE_SECOND = + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0); + private static final StatusRuntimeException UNAVAILABLE = + io.grpc.Status.UNAVAILABLE.withDescription("Retryable test exception.").asRuntimeException(); + private static final StatusRuntimeException RESOURCE_EXHAUSTED_NON_RETRYABLE = + Status.RESOURCE_EXHAUSTED + .withDescription("Non-retryable test exception.") + .asRuntimeException(); + private static final StatusRuntimeException RESOURCE_EXHAUSTED_RETRYABLE = + Status.RESOURCE_EXHAUSTED + .withDescription("Retryable test exception.") + .asRuntimeException(createRetryInfo()); + private static final StatusRuntimeException FAILED_PRECONDITION = + io.grpc.Status.FAILED_PRECONDITION + .withDescription("Non-retryable test exception.") + .asRuntimeException(); + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private Spanner spanner; + private DatabaseClient client; + private Spanner spannerWithTimeout; + private DatabaseClient clientWithTimeout; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + // Make sure the session pool is empty by default. + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setMinSessions(0).build(); + + // Add a wait time for sessions to be initialized. In this case, since minSessions = 0, the + // wait time is for multiplexed sessions + if (sessionPoolOptions.getUseMultiplexedSession()) { + sessionPoolOptions = + sessionPoolOptions.toBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5)) + .build(); + } + builder.setSessionPoolOption(sessionPoolOptions); + // Create one client with default timeout values and one with short timeout values specifically + // for the test cases that expect a DEADLINE_EXCEEDED. + spanner = builder.build().getService(); + client = spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + final RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(175L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(175L)) + .setMaxAttempts(3) + .setTotalTimeoutDuration(Duration.ofMillis(200L)) + .build(); + RetrySettings commitRetrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(5000L)) + .setMaxRpcTimeoutDuration(Duration.ofMillis(10000L)) + .setMaxAttempts(1) + .setTotalTimeoutDuration(Duration.ofMillis(20000L)) + .build(); + builder + .getSpannerStubSettingsBuilder() + .applyToAllUnaryMethods( + input -> { + input.setRetrySettings(retrySettings); + return null; + }); + builder + .getSpannerStubSettingsBuilder() + .executeStreamingSqlSettings() + .setRetrySettings(retrySettings); + builder.getSpannerStubSettingsBuilder().commitSettings().setRetrySettings(commitRetrySettings); + builder + .getSpannerStubSettingsBuilder() + .executeStreamingSqlSettings() + .setRetrySettings(retrySettings); + builder.getSpannerStubSettingsBuilder().streamingReadSettings().setRetrySettings(retrySettings); + spannerWithTimeout = builder.build().getService(); + clientWithTimeout = + spannerWithTimeout.getDatabaseClient( + DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + } + + @After + public void tearDown() { + spannerWithTimeout.close(); + spanner.close(); + } + + static Metadata createRetryInfo() { + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + com.google.protobuf.Duration.newBuilder() + .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) + .setSeconds(0L)) + .build(); + trailers.put(ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()), retryInfo); + return trailers; + } + + private void warmUpSessionPool(DatabaseClient client) { + for (int i = 0; i < 10; i++) { + int retryCount = 0; + while (true) { + try { + TransactionRunner runner = client.readWriteTransaction(); + long updateCount = runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + assertThat(updateCount, is(equalTo(UPDATE_COUNT))); + break; + } catch (SpannerException e) { + // On slow systems there is a chance of DEADLINE_EXCEEDED errors. + // These should be retried. + retryCount++; + if (e.getErrorCode() != ErrorCode.DEADLINE_EXCEEDED || retryCount > 10) { + throw e; + } + } + } + } + } + + @Test + public void singleUseTimeout() { + if (isMultiplexedSessionsEnabled()) { + // for multiplexed sessions CreateSessions RPC is already completed during start-up + // hence, we are setting a strict delay with the next RPC + mockSpanner.setExecuteStreamingSqlExecutionTime(ONE_SECOND); + } + mockSpanner.setBatchCreateSessionsExecutionTime(ONE_SECOND); + try (ResultSet rs = clientWithTimeout.singleUse().executeQuery(SELECT1AND2)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void singleUseUnavailable() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(UNAVAILABLE)); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1AND2)) { + assertTrue(resultSet.next()); + } + } + + @Test + public void singleUseResourceExhausted_nonRetryable() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(RESOURCE_EXHAUSTED_NON_RETRYABLE)); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1AND2)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.RESOURCE_EXHAUSTED, exception.getErrorCode()); + } + } + + @Test + public void singleUseResourceExhausted_retryable() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(RESOURCE_EXHAUSTED_RETRYABLE)); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1AND2)) { + assertTrue(resultSet.next()); + } + } + + @Test + public void singleUseNonRetryableError() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(FAILED_PRECONDITION)); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1AND2)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void singleUseNonRetryableErrorOnNext() { + try (ResultSet rs = client.singleUse().executeQuery(SELECT1AND2)) { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(FAILED_PRECONDITION)); + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void singleUseInternal() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(new IllegalArgumentException())); + try (ResultSet rs = client.singleUse().executeQuery(SELECT1AND2)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.INTERNAL, e.getErrorCode()); + } + } + + @Test + public void singleUseReadOnlyTransactionTimeout() { + if (isMultiplexedSessionsEnabled()) { + // for multiplexed sessions CreateSessions RPC is already completed during start-up + // hence, we are setting a strict delay with the next RPC + mockSpanner.setExecuteStreamingSqlExecutionTime(ONE_SECOND); + } + mockSpanner.setBatchCreateSessionsExecutionTime(ONE_SECOND); + try (ResultSet rs = + clientWithTimeout.singleUseReadOnlyTransaction().executeQuery(SELECT1AND2)) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void singleUseReadOnlyTransactionUnavailable() { + mockSpanner.addException(UNAVAILABLE); + try (ResultSet rs = client.singleUseReadOnlyTransaction().executeQuery(SELECT1AND2)) { + while (rs.next()) {} + } + } + + @Test + public void singleUseExecuteStreamingSqlTimeout() { + try (ResultSet rs = clientWithTimeout.singleUse().executeQuery(SELECT1AND2)) { + mockSpanner.setExecuteStreamingSqlExecutionTime(ONE_SECOND); + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void singleUseExecuteStreamingSqlUnavailable() { + try (ResultSet rs = client.singleUse().executeQuery(SELECT1AND2)) { + mockSpanner.addException(UNAVAILABLE); + while (rs.next()) {} + } + } + + @Test + public void readWriteTransactionTimeout() { + mockSpanner.setBeginTransactionExecutionTime(ONE_SECOND); + SpannerException e = + assertThrows( + SpannerException.class, + () -> clientWithTimeout.readWriteTransaction().run(transaction -> null)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + + @Test + public void readWriteTransactionUnavailable() { + warmUpSessionPool(client); + mockSpanner.addException(UNAVAILABLE); + TransactionRunner runner = client.readWriteTransaction(); + long updateCount = runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + assertThat(updateCount, is(equalTo(UPDATE_COUNT))); + } + + @Test + public void readWriteTransactionStatementAborted() { + TransactionRunner runner = client.readWriteTransaction(); + final AtomicInteger attempts = new AtomicInteger(); + long updateCount = + runner.run( + transaction -> { + if (attempts.getAndIncrement() == 0) { + mockSpanner.abortNextStatement(); + } + return transaction.executeUpdate(UPDATE_STATEMENT); + }); + assertThat(updateCount, is(equalTo(UPDATE_COUNT))); + assertThat(attempts.get(), is(equalTo(2))); + } + + @Test + public void readWriteTransactionCommitAborted() { + TransactionRunner runner = client.readWriteTransaction(); + final AtomicInteger attempts = new AtomicInteger(); + long updateCount = + runner.run( + transaction -> { + long res = transaction.executeUpdate(UPDATE_STATEMENT); + if (attempts.getAndIncrement() == 0) { + mockSpanner.abortTransaction(transaction); + } + return res; + }); + assertThat(updateCount, is(equalTo(UPDATE_COUNT))); + assertThat(attempts.get(), is(equalTo(2))); + } + + @Test(expected = Exception.class) + public void readWriteTransactionCheckedException() { + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_STATEMENT); + throw new Exception("test"); + }); + } + + @Test(expected = SpannerException.class) + public void readWriteTransactionUncheckedException() { + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_STATEMENT); + throw SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "test"); + }); + } + + @Test + public void transactionManagerTimeout() { + mockSpanner.setExecuteSqlExecutionTime(ONE_SECOND); + try (TransactionManager txManager = clientWithTimeout.transactionManager()) { + TransactionContext tx = txManager.begin(); + SpannerException e = + assertThrows(SpannerException.class, () -> tx.executeUpdate(UPDATE_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @SuppressWarnings("resource") + @Test + public void transactionManagerUnavailable() { + warmUpSessionPool(client); + mockSpanner.addException(UNAVAILABLE); + try (TransactionManager txManager = client.transactionManager()) { + TransactionContext tx = txManager.begin(); + while (true) { + try { + assertThat(tx.executeUpdate(UPDATE_STATEMENT), is(equalTo(UPDATE_COUNT))); + txManager.commit(); + break; + } catch (AbortedException e) { + tx = txManager.resetForRetry(); + } + } + } + } + + private boolean isMultiplexedSessionsEnabled() { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java new file mode 100644 index 000000000000..a675605e7685 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java @@ -0,0 +1,312 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.when; + +import com.google.api.core.NanoClock; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceRpc; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.spanner.SpannerImpl.ClosedException; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStub; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStub; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import io.opentelemetry.api.OpenTelemetry; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +/** Unit tests for {@link SpannerImpl}. */ +@RunWith(JUnit4.class) +public class SpannerImplTest { + @Mock private SpannerRpc rpc; + @Mock private SpannerOptions spannerOptions; + @Mock private DatabaseAdminStubSettings databaseAdminStubSettings; + @Mock private DatabaseAdminStub databaseAdminStub; + @Mock private InstanceAdminStubSettings instanceAdminStubSettings; + @Mock private InstanceAdminStub instanceAdminStub; + private SpannerImpl impl; + + @Captor ArgumentCaptor> options; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + when(spannerOptions.getNumChannels()).thenReturn(4); + when(spannerOptions.getDatabaseRole()).thenReturn("role"); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + when(spannerOptions.getRetrySettings()).thenReturn(RetrySettings.newBuilder().build()); + when(spannerOptions.getClock()).thenReturn(NanoClock.getDefaultClock()); + when(spannerOptions.getSessionLabels()).thenReturn(Collections.emptyMap()); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + impl = new SpannerImpl(rpc, spannerOptions); + } + + @After + public void teardown() { + impl.close(); + } + + @Test + public void getDbclientAgainGivesSame() { + Map labels = new HashMap<>(); + labels.put("env", "dev"); + Mockito.when(spannerOptions.getSessionLabels()).thenReturn(labels); + Mockito.when(spannerOptions.getDatabaseRole()).thenReturn("role"); + String dbName = "projects/p1/instances/i1/databases/d1"; + DatabaseId db = DatabaseId.of(dbName); + + Mockito.when(spannerOptions.getTransportOptions()) + .thenReturn(GrpcTransportOptions.newBuilder().build()); + Mockito.when(spannerOptions.getSessionPoolOptions()) + .thenReturn(SessionPoolOptions.newBuilder().setMinSessions(0).build()); + + DatabaseClient databaseClient = impl.getDatabaseClient(db); + + // Get db client again + DatabaseClient databaseClient1 = impl.getDatabaseClient(db); + + assertThat(databaseClient1).isSameInstanceAs(databaseClient); + } + + @Test + public void queryOptions() { + QueryOptions queryOptions = + QueryOptions.newBuilder() + .setOptimizerVersion("2") + .setOptimizerStatisticsPackage("custom-package") + .build(); + QueryOptions defaultOptions = QueryOptions.getDefaultInstance(); + DatabaseId db = DatabaseId.of("p", "i", "d"); + DatabaseId otherDb = DatabaseId.of("p", "i", "other"); + + // Create a SpannerOptions with and without default query options. + SpannerOptions optionsWithQueryOptions = + new SpannerOptions.Builder( + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .build()) { + @Override + QueryOptions getEnvironmentQueryOptions() { + // Override and return default instance to prevent environment variables from + // interfering with the test case. + return QueryOptions.getDefaultInstance(); + } + }.setDefaultQueryOptions(db, queryOptions).build(); + SpannerOptions optionsWithoutQueryOptions = + new SpannerOptions.Builder( + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .build()) { + @Override + QueryOptions getEnvironmentQueryOptions() { + // Override and return default instance to prevent environment variables from + // interfering with the test case. + return QueryOptions.getDefaultInstance(); + } + }.build(); + + try (SpannerImpl implWithQueryOptions = new SpannerImpl(rpc, optionsWithQueryOptions); + SpannerImpl implWithoutQueryOptions = new SpannerImpl(rpc, optionsWithoutQueryOptions)) { + + // Default query options are on a per-database basis, so we should only get the custom options + // for 'db' and not for 'otherDb'. + assertThat(implWithQueryOptions.getDefaultQueryOptions(db)).isEqualTo(queryOptions); + assertThat(implWithQueryOptions.getDefaultQueryOptions(otherDb)).isEqualTo(defaultOptions); + + // The other Spanner instance should return default options for both databases. + assertThat(implWithoutQueryOptions.getDefaultQueryOptions(db)).isEqualTo(defaultOptions); + assertThat(implWithoutQueryOptions.getDefaultQueryOptions(otherDb)).isEqualTo(defaultOptions); + } + } + + @Test + public void getDbclientAfterCloseThrows() { + SpannerImpl imp = new SpannerImpl(rpc, spannerOptions); + Map labels = new HashMap<>(); + labels.put("env", "dev"); + Mockito.when(spannerOptions.getSessionLabels()).thenReturn(labels); + String dbName = "projects/p1/instances/i1/databases/d1"; + DatabaseId db = DatabaseId.of(dbName); + + Mockito.when(spannerOptions.getTransportOptions()) + .thenReturn(GrpcTransportOptions.newBuilder().build()); + Mockito.when(spannerOptions.getSessionPoolOptions()) + .thenReturn(SessionPoolOptions.newBuilder().build()); + Mockito.when(spannerOptions.getDatabaseRole()).thenReturn("role"); + imp.close(); + + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> imp.getDatabaseClient(db)); + assertThat(e.getMessage()).contains("Cloud Spanner client has been closed"); + } + + @Test + public void testSpannerClosed() { + SpannerOptions options = createSpannerOptions(); + Spanner spanner1 = options.getService(); + Spanner spanner2 = options.getService(); + ServiceRpc rpc1 = options.getRpc(); + ServiceRpc rpc2 = options.getRpc(); + // The SpannerOptions object should return the same instance. + assertThat(spanner1 == spanner2, is(true)); + assertThat(rpc1 == rpc2, is(true)); + spanner1.close(); + // A new instance should be returned as the Spanner instance has been closed. + Spanner spanner3 = options.getService(); + assertThat(spanner1 == spanner3, is(false)); + // A new instance should be returned as the Spanner instance has been closed. + ServiceRpc rpc3 = options.getRpc(); + assertThat(rpc1 == rpc3, is(false)); + // Creating a copy of the SpannerOptions should result in new instances. + options = options.toBuilder().build(); + Spanner spanner4 = options.getService(); + ServiceRpc rpc4 = options.getRpc(); + assertThat(spanner4 == spanner3, is(false)); + assertThat(rpc4 == rpc3, is(false)); + Spanner spanner5 = options.getService(); + ServiceRpc rpc5 = options.getRpc(); + assertThat(spanner4 == spanner5, is(true)); + assertThat(rpc4 == rpc5, is(true)); + spanner3.close(); + spanner4.close(); + } + + @Test + public void testClosedException() { + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + assertThat(spanner.isClosed()).isFalse(); + // Close the Spanner instance in a different method so we can actually verify that the entire + // stacktrace of the method that closed the instance is included in the exception that will be + // thrown by the instance after it has been closed. + closeSpannerAndIncludeStacktrace(spanner); + assertThat(spanner.isClosed()).isTrue(); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> spanner.getDatabaseClient(DatabaseId.of("p", "i", "d"))); + assertThat(e.getCause()).isInstanceOf(ClosedException.class); + StringWriter sw = new StringWriter(); + e.getCause().printStackTrace(new PrintWriter(sw)); + assertThat(sw.toString()).contains("closeSpannerAndIncludeStacktrace"); + } + + @Test + public void testCreateDatabaseAdminClient_whenNullAdminSettings_assertPreconditionFailure() { + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + assertThrows(NullPointerException.class, spanner::createDatabaseAdminClient); + } + + @Test + public void testCreateDatabaseAdminClient_whenMockAdminSettings_assertMethodInvocation() + throws IOException { + when(rpc.getDatabaseAdminStubSettings()).thenReturn(databaseAdminStubSettings); + when(databaseAdminStubSettings.createStub()).thenReturn(databaseAdminStub); + + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient(); + assertNotNull(databaseAdminClient); + } + + @Test(expected = SpannerException.class) + public void testCreateDatabaseAdminClient_whenMockAdminSettings_assertException() + throws IOException { + when(rpc.getDatabaseAdminStubSettings()).thenReturn(databaseAdminStubSettings); + when(databaseAdminStubSettings.createStub()).thenThrow(IOException.class); + + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient(); + assertNotNull(databaseAdminClient); + } + + @Test + public void testCreateInstanceAdminClient_whenNullAdminSettings_assertPreconditionFailure() { + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + assertThrows(NullPointerException.class, spanner::createInstanceAdminClient); + } + + @Test + public void testCreateInstanceAdminClient_whenMockAdminSettings_assertMethodInvocation() + throws IOException { + when(rpc.getInstanceAdminStubSettings()).thenReturn(instanceAdminStubSettings); + when(instanceAdminStubSettings.createStub()).thenReturn(instanceAdminStub); + + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient(); + assertNotNull(instanceAdminClient); + } + + @Test(expected = SpannerException.class) + public void testCreateInstanceAdminClient_whenMockAdminSettings_assertException() + throws IOException { + when(rpc.getInstanceAdminStubSettings()).thenReturn(instanceAdminStubSettings); + when(instanceAdminStubSettings.createStub()).thenThrow(IOException.class); + + Spanner spanner = new SpannerImpl(rpc, spannerOptions); + + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient(); + assertNotNull(instanceAdminClient); + } + + private void closeSpannerAndIncludeStacktrace(Spanner spanner) { + spanner.close(); + } + + private SpannerOptions createSpannerOptions() { + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setCredentials(NoCredentials.getInstance()) + .build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerMatchers.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerMatchers.java new file mode 100644 index 000000000000..4723497a4727 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerMatchers.java @@ -0,0 +1,150 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.ExecutionException; +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; +import org.hamcrest.Matcher; + +/** Various {@link Matcher} implementations used by Cloud Spanner unit tests. */ +public final class SpannerMatchers { + private SpannerMatchers() {} + + /** + * Returns a matcher that checks that a protocol buffer message is equivalent to that described in + * text format by {@code expected}. + */ + static Matcher matchesProto(Class clazz, String expected) { + return new ProtoTextMatcher<>(clazz, expected); + } + + /** + * Returns a matcher that checks that a {@code Throwable} is a {@link SpannerException} where + * {@link SpannerException#getErrorCode()} is equal to {@code code}. + * + *

TODO(user): Move this method to testing. + */ + public static Matcher isSpannerException(ErrorCode code) { + return new SpannerExceptionMatcher<>(code); + } + + /** + * Returns a method that checks that a {@link Throwable} is an {@link ExecutionException} where + * the cause is a {@link SpannerException} with an error code to {@code code}. + */ + public static Matcher isExecutionExceptionWithSpannerCause( + ErrorCode code) { + return new ExecutionExceptionWithSpannerCauseMatcher<>(code); + } + + private static class ProtoTextMatcher extends BaseMatcher { + private final T expected; + + ProtoTextMatcher(Class clazz, String expectedTextFormat) { + T defaultInstance = getDefaultInstance(clazz); + Message.Builder builder = defaultInstance.toBuilder(); + try { + TextFormat.merge(expectedTextFormat, builder); + } catch (TextFormat.ParseException e) { + throw new IllegalArgumentException("Invalid text format for " + clazz.getName(), e); + } + @SuppressWarnings("unchecked") // T.builder().build() always returns T. + T expectedInstance = (T) builder.build(); + expected = expectedInstance; + } + + private T getDefaultInstance(Class clazz) { + try { + @SuppressWarnings("unchecked") // T.getDefaultInstance() always returns T. + T instance = (T) clazz.getMethod("getDefaultInstance").invoke(clazz); + return instance; + } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { + throw new AssertionError("Invalid proto class " + clazz, e); + } + } + + @Override + public boolean matches(Object item) { + if (item == null || !(expected.getClass().isAssignableFrom(item.getClass()))) { + return false; + } + @SuppressWarnings("unchecked") // Type checked above. + T actual = (T) item; + return expected.equals(actual); + } + + @Override + public void describeTo(Description description) { + description.appendText(expected.toString()); + } + } + + private static class SpannerExceptionMatcher extends BaseMatcher { + private final ErrorCode expectedCode; + + SpannerExceptionMatcher(ErrorCode expectedCode) { + this.expectedCode = checkNotNull(expectedCode); + } + + @Override + public boolean matches(Object item) { + if (!(item instanceof SpannerException)) { + return false; + } + SpannerException e = (SpannerException) item; + return e.getErrorCode() == expectedCode; + } + + @Override + public void describeTo(Description description) { + description.appendText("SpannerException[" + expectedCode + "]"); + } + } + + private static class ExecutionExceptionWithSpannerCauseMatcher + extends BaseMatcher { + private final ErrorCode expectedCode; + + ExecutionExceptionWithSpannerCauseMatcher(ErrorCode expectedCode) { + this.expectedCode = checkNotNull(expectedCode); + } + + @Override + public boolean matches(Object item) { + if (!(item instanceof ExecutionException)) { + return false; + } + ExecutionException ee = (ExecutionException) item; + if (!(ee.getCause() instanceof SpannerException)) { + return false; + } + SpannerException e = (SpannerException) ee.getCause(); + return e.getErrorCode() == expectedCode; + } + + @Override + public void describeTo(Description description) { + description.appendText("ExecutionException[SpannerException[" + expectedCode + "]]"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java new file mode 100644 index 000000000000..db02c625099f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Helper to configure SpannerOptions for tests. */ +public class SpannerOptionsHelper { + + /** + * Resets the activeTracingFramework. This variable is used for internal testing, and is not a + * valid production scenario. + */ + public static void resetActiveTracingFramework() { + SpannerOptions.resetActiveTracingFramework(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java new file mode 100644 index 000000000000..759888a6673a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java @@ -0,0 +1,1386 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceOptions; +import com.google.cloud.TransportOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; +import com.google.cloud.spanner.SpannerOptions.Builder.DefaultReadWriteTransactionOptions; +import com.google.cloud.spanner.SpannerOptions.FixedCloseableExecutorProvider; +import com.google.cloud.spanner.SpannerOptions.SpannerCallContextTimeoutConfigurator; +import com.google.cloud.spanner.admin.database.v1.stub.DatabaseAdminStubSettings; +import com.google.cloud.spanner.admin.instance.v1.stub.InstanceAdminStubSettings; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.base.Strings; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nonnull; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mockito; + +/** Unit tests for {@link com.google.cloud.spanner.SpannerOptions}. */ +@RunWith(JUnit4.class) +public class SpannerOptionsTest { + private static Level originalLogLevel; + + @BeforeClass + public static void disableLogging() { + Logger logger = Logger.getLogger(""); + originalLogLevel = logger.getLevel(); + logger.setLevel(Level.OFF); + } + + @AfterClass + public static void resetLogging() { + Logger logger = Logger.getLogger(""); + logger.setLevel(originalLogLevel); + } + + @Test + public void defaultBuilder() { + // We need to set the project id and credentials since in test environments we cannot guarantee + // that a default project id and credentials are available. + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .build(); + if (Strings.isNullOrEmpty(System.getenv("SPANNER_EMULATOR_HOST"))) { + assertEquals("https://spanner.googleapis.com", options.getHost()); + } else { + assertEquals("http://" + System.getenv("SPANNER_EMULATOR_HOST"), options.getHost()); + } + assertEquals(4, options.getPrefetchChunks()); + assertNull(options.getSessionLabels()); + assertEquals(DecodeMode.DIRECT, options.getDecodeMode()); + } + + @Test + public void builder() { + String host = "http://localhost:8000/"; + String projectId = "test-project"; + Map labels = new HashMap<>(); + labels.put("env", "dev"); + InMemoryMetricReader inMemoryMetricReader = InMemoryMetricReader.create(); + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder().registerMetricReader(inMemoryMetricReader).build(); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).build(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setHost(host) + .setProjectId(projectId) + .setPrefetchChunks(2) + .setSessionLabels(labels) + .setOpenTelemetry(openTelemetry) + .build(); + assertThat(options.getHost()).isEqualTo(host); + assertThat(options.getProjectId()).isEqualTo(projectId); + assertThat(options.getPrefetchChunks()).isEqualTo(2); + assertThat(options.getSessionLabels()).containsExactlyEntriesIn(labels); + assertThat(options.getOpenTelemetry()).isEqualTo(openTelemetry); + } + + @Test + public void testSpannerDefaultRetrySettings() { + RetrySettings witRetryPolicy1 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + RetrySettings witRetryPolicy2 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(60000L)) + .setTotalTimeoutDuration(Duration.ofMillis(60000L)) + .build(); + RetrySettings witRetryPolicy3 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(250L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + RetrySettings noRetry1 = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + SpannerOptions options = SpannerOptions.newBuilder().setProjectId("test-project").build(); + SpannerStubSettings stubSettings = options.getSpannerStubSettings(); + List> callsWithRetry1 = + Arrays.asList(stubSettings.listSessionsSettings(), stubSettings.commitSettings()); + List> callsWithRetry2 = + Collections.singletonList(stubSettings.batchCreateSessionsSettings()); + List> callsWithRetry3 = + Arrays.asList( + stubSettings.createSessionSettings(), + stubSettings.getSessionSettings(), + stubSettings.deleteSessionSettings(), + stubSettings.executeSqlSettings(), + stubSettings.executeBatchDmlSettings(), + stubSettings.readSettings(), + stubSettings.beginTransactionSettings(), + stubSettings.rollbackSettings(), + stubSettings.partitionQuerySettings(), + stubSettings.partitionReadSettings()); + List> callsWithNoRetry1 = + Arrays.asList( + stubSettings.executeStreamingSqlSettings(), stubSettings.streamingReadSettings()); + + for (UnaryCallSettings callSettings : callsWithRetry1) { + assertThat(callSettings.getRetrySettings()).isEqualTo(witRetryPolicy1); + } + for (UnaryCallSettings callSettings : callsWithRetry2) { + assertThat(callSettings.getRetrySettings()).isEqualTo(witRetryPolicy2); + } + for (UnaryCallSettings callSettings : callsWithRetry3) { + assertThat(callSettings.getRetrySettings()).isEqualTo(witRetryPolicy3); + } + for (ServerStreamingCallSettings callSettings : callsWithNoRetry1) { + assertThat(callSettings.getRetrySettings()).isEqualTo(noRetry1); + } + } + + @Test + public void testSpannerCustomRetrySettings() { + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(9999L)) + .setRetryDelayMultiplier(9999.99D) + .setMaxRetryDelayDuration(Duration.ofSeconds(9999L)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setRpcTimeoutMultiplier(9999.99D) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setTotalTimeoutDuration(Duration.ofSeconds(9999L)) + .build(); + SpannerOptions.Builder builder = SpannerOptions.newBuilder().setProjectId("test-project"); + SpannerStubSettings.Builder stubSettingsBuilder = builder.getSpannerStubSettingsBuilder(); + List> unaryCallSettingsBuilders = + Arrays.asList( + stubSettingsBuilder.beginTransactionSettings(), + stubSettingsBuilder.createSessionSettings(), + stubSettingsBuilder.deleteSessionSettings(), + stubSettingsBuilder.executeBatchDmlSettings(), + stubSettingsBuilder.executeSqlSettings(), + stubSettingsBuilder.getSessionSettings(), + stubSettingsBuilder.listSessionsSettings(), + stubSettingsBuilder.partitionQuerySettings(), + stubSettingsBuilder.partitionReadSettings(), + stubSettingsBuilder.readSettings(), + stubSettingsBuilder.rollbackSettings(), + stubSettingsBuilder.commitSettings()); + for (UnaryCallSettings.Builder callSettingsBuilder : unaryCallSettingsBuilders) { + callSettingsBuilder.setRetrySettings(retrySettings); + } + List> streamingCallSettingsBuilders = + Arrays.asList( + stubSettingsBuilder.executeStreamingSqlSettings(), + stubSettingsBuilder.streamingReadSettings()); + for (ServerStreamingCallSettings.Builder callSettingsBuilder : + streamingCallSettingsBuilders) { + callSettingsBuilder.setRetrySettings(retrySettings); + } + + SpannerOptions options = builder.build(); + SpannerStubSettings stubSettings = options.getSpannerStubSettings(); + List> callsWithDefaultSettings = + Arrays.asList( + stubSettings.beginTransactionSettings(), + stubSettings.createSessionSettings(), + stubSettings.deleteSessionSettings(), + stubSettings.executeBatchDmlSettings(), + stubSettings.executeSqlSettings(), + stubSettings.getSessionSettings(), + stubSettings.listSessionsSettings(), + stubSettings.partitionQuerySettings(), + stubSettings.partitionReadSettings(), + stubSettings.readSettings(), + stubSettings.rollbackSettings(), + stubSettings.commitSettings()); + List> callsWithStreamingSettings = + Arrays.asList( + stubSettings.executeStreamingSqlSettings(), stubSettings.streamingReadSettings()); + + for (UnaryCallSettings callSettings : callsWithDefaultSettings) { + assertThat(callSettings.getRetrySettings()).isEqualTo(retrySettings); + } + for (ServerStreamingCallSettings callSettings : callsWithStreamingSettings) { + assertThat(callSettings.getRetrySettings()).isEqualTo(retrySettings); + } + } + + @Test + public void testDatabaseAdminDefaultRetrySettings() { + RetrySettings withRetryPolicy1 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + RetrySettings withRetryPolicy2 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + RetrySettings noRetryPolicy2 = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + SpannerOptions options = SpannerOptions.newBuilder().setProjectId("test-project").build(); + DatabaseAdminStubSettings stubSettings = options.getDatabaseAdminStubSettings(); + List> callsWithRetryPolicy1 = + Arrays.asList( + stubSettings.dropDatabaseSettings(), + stubSettings.getDatabaseSettings(), + stubSettings.getDatabaseDdlSettings()); + List> callsWithRetryPolicy2 = + Collections.singletonList(stubSettings.getIamPolicySettings()); + List> callsWithNoRetry2 = + Arrays.asList( + stubSettings.setIamPolicySettings(), stubSettings.testIamPermissionsSettings()); + + for (UnaryCallSettings callSettings : callsWithRetryPolicy1) { + assertThat(callSettings.getRetrySettings()).isEqualTo(withRetryPolicy1); + } + for (UnaryCallSettings callSettings : callsWithRetryPolicy2) { + assertThat(callSettings.getRetrySettings()).isEqualTo(withRetryPolicy2); + } + for (UnaryCallSettings callSettings : callsWithNoRetry2) { + assertThat(callSettings.getRetrySettings()).isEqualTo(noRetryPolicy2); + } + } + + @Test + public void testDatabaseAdminCustomRetrySettings() { + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(9999L)) + .setRetryDelayMultiplier(9999.99D) + .setMaxRetryDelayDuration(Duration.ofSeconds(9999L)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setRpcTimeoutMultiplier(9999.99D) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setTotalTimeoutDuration(Duration.ofSeconds(9999L)) + .build(); + SpannerOptions.Builder builder = SpannerOptions.newBuilder().setProjectId("test-project"); + DatabaseAdminStubSettings.Builder stubSettingsBuilder = + builder.getDatabaseAdminStubSettingsBuilder(); + List> unaryCallSettingsBuilders = + Arrays.asList( + stubSettingsBuilder.dropDatabaseSettings(), + stubSettingsBuilder.getDatabaseDdlSettings(), + stubSettingsBuilder.getDatabaseSettings()); + for (UnaryCallSettings.Builder callSettingsBuilder : unaryCallSettingsBuilders) { + callSettingsBuilder.setRetrySettings(retrySettings); + } + + SpannerOptions options = builder.build(); + DatabaseAdminStubSettings stubSettings = options.getDatabaseAdminStubSettings(); + List> callsWithDefaultSettings = + Arrays.asList( + stubSettings.dropDatabaseSettings(), + stubSettings.getDatabaseDdlSettings(), + stubSettings.getDatabaseSettings()); + + for (UnaryCallSettings callSettings : callsWithDefaultSettings) { + assertThat(callSettings.getRetrySettings()).isEqualTo(retrySettings); + } + } + + @Test + public void testInstanceAdminDefaultRetrySettings() { + RetrySettings withRetryPolicy1 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + RetrySettings withRetryPolicy2 = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1000L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelayDuration(Duration.ofMillis(32000L)) + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + RetrySettings noRetryPolicy1 = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(3600000L)) + .setTotalTimeoutDuration(Duration.ofMillis(3600000L)) + .build(); + RetrySettings noRetryPolicy2 = + RetrySettings.newBuilder() + .setInitialRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeoutDuration(Duration.ofMillis(30000L)) + .setTotalTimeoutDuration(Duration.ofMillis(30000L)) + .build(); + SpannerOptions options = SpannerOptions.newBuilder().setProjectId("test-project").build(); + InstanceAdminStubSettings stubSettings = options.getInstanceAdminStubSettings(); + List> callsWithRetryPolicy1 = + Arrays.asList( + stubSettings.getInstanceConfigSettings(), + stubSettings.listInstanceConfigsSettings(), + stubSettings.deleteInstanceSettings(), + stubSettings.getInstanceSettings(), + stubSettings.listInstancesSettings()); + List> callsWithRetryPolicy2 = + Collections.singletonList(stubSettings.getIamPolicySettings()); + List> callsWithNoRetryPolicy1 = + Arrays.asList(stubSettings.createInstanceSettings(), stubSettings.updateInstanceSettings()); + List> callsWithNoRetryPolicy2 = + Arrays.asList( + stubSettings.setIamPolicySettings(), stubSettings.testIamPermissionsSettings()); + + for (UnaryCallSettings callSettings : callsWithRetryPolicy1) { + assertThat(callSettings.getRetrySettings()).isEqualTo(withRetryPolicy1); + } + for (UnaryCallSettings callSettings : callsWithRetryPolicy2) { + assertThat(callSettings.getRetrySettings()).isEqualTo(withRetryPolicy2); + } + for (UnaryCallSettings callSettings : callsWithNoRetryPolicy1) { + assertThat(callSettings.getRetrySettings()).isEqualTo(noRetryPolicy1); + } + for (UnaryCallSettings callSettings : callsWithNoRetryPolicy2) { + assertThat(callSettings.getRetrySettings()).isEqualTo(noRetryPolicy2); + } + } + + @Test + public void testInstanceAdminCustomRetrySettings() { + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofSeconds(9999L)) + .setRetryDelayMultiplier(9999.99D) + .setMaxRetryDelayDuration(Duration.ofSeconds(9999L)) + .setInitialRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setRpcTimeoutMultiplier(9999.99D) + .setMaxRpcTimeoutDuration(Duration.ofSeconds(9999L)) + .setTotalTimeoutDuration(Duration.ofSeconds(9999L)) + .build(); + SpannerOptions.Builder builder = SpannerOptions.newBuilder().setProjectId("test-project"); + InstanceAdminStubSettings.Builder stubSettingsBuilder = + builder.getInstanceAdminStubSettingsBuilder(); + List> unaryCallSettingsBuilders = + Arrays.asList( + stubSettingsBuilder.deleteInstanceSettings(), + stubSettingsBuilder.getInstanceConfigSettings(), + stubSettingsBuilder.getInstanceSettings(), + stubSettingsBuilder.listInstanceConfigsSettings(), + stubSettingsBuilder.listInstancesSettings()); + for (UnaryCallSettings.Builder callSettingsBuilder : unaryCallSettingsBuilders) { + callSettingsBuilder.setRetrySettings(retrySettings); + } + + SpannerOptions options = builder.build(); + InstanceAdminStubSettings stubSettings = options.getInstanceAdminStubSettings(); + List> callsWithDefaultSettings = + Arrays.asList( + stubSettings.getInstanceConfigSettings(), + stubSettings.listInstanceConfigsSettings(), + stubSettings.deleteInstanceSettings(), + stubSettings.getInstanceSettings(), + stubSettings.listInstancesSettings()); + + for (UnaryCallSettings callSettings : callsWithDefaultSettings) { + assertThat(callSettings.getRetrySettings()).isEqualTo(retrySettings); + } + } + + @Test + public void testInvalidTransport() { + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + SpannerOptions.newBuilder() + .setTransportOptions(Mockito.mock(TransportOptions.class))); + assertNotNull(e.getMessage()); + } + + @Test + public void testInvalidSessionLabels() { + Map labels = new HashMap<>(); + labels.put("env", null); + NullPointerException e = + assertThrows( + NullPointerException.class, () -> SpannerOptions.newBuilder().setSessionLabels(labels)); + assertNotNull(e.getMessage()); + } + + @Test + public void testNullSessionLabels() { + NullPointerException e = + assertThrows( + NullPointerException.class, () -> SpannerOptions.newBuilder().setSessionLabels(null)); + assertNotNull(e.getMessage()); + } + + @Test + public void testDoNotCacheClosedSpannerInstance() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setCredentials(NoCredentials.getInstance()) + .build(); + // Getting a service twice should give the same instance. + Spanner service1 = options.getService(); + Spanner service2 = options.getService(); + assertThat(service1 == service2, is(true)); + assertThat(service1.isClosed()).isFalse(); + // Closing a service instance should cause the SpannerOptions to create a new service. + service1.close(); + Spanner service3 = options.getService(); + assertThat(service3 == service1, is(false)); + assertThat(service1.isClosed()).isTrue(); + assertThat(service3.isClosed()).isFalse(); + + // Getting another service from the SpannerOptions should return the new cached instance. + Spanner service4 = options.getService(); + assertThat(service3 == service4, is(true)); + assertThat(service3.isClosed()).isFalse(); + service3.close(); + } + + @Test + public void testSetClientLibToken() { + final String jdbcToken = "sp-jdbc"; + final String hibernateToken = "sp-hib"; + final String pgAdapterToken = "pg-adapter"; + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .setClientLibToken(jdbcToken) + .build(); + // Verify that the client lib token that will actually be used contains both the JDBC token and + // the standard Java client library token ('gccl'). + assertEquals("sp-jdbc gccl", options.getClientLibToken()); + + options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .setClientLibToken(hibernateToken) + .build(); + assertEquals("sp-hib gccl", options.getClientLibToken()); + + options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .setClientLibToken(pgAdapterToken) + .build(); + assertEquals("pg-adapter gccl", options.getClientLibToken()); + + options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertEquals(options.getClientLibToken(), ServiceOptions.getGoogApiClientLibName()); + } + + @Test(expected = IllegalArgumentException.class) + public void testSetInvalidClientLibToken() { + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(NoCredentials.getInstance()) + .setClientLibToken("foo"); + } + + @Test + public void testSetEmulatorHostWithoutProtocol() { + // If the host doesn't have a protocol as a prefix, it will automatically be prefixed with + // "http://". + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setEmulatorHost("localhost:1234") + .build(); + assertThat(options.getHost()).isEqualTo("http://localhost:1234"); + assertThat(options.getEndpoint()).isEqualTo("localhost:1234"); + } + + @Test + public void testSetEmulatorHostWithProtocol() { + // If the host has a protocol, it should not be modified. + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setEmulatorHost("http://localhost:1234") + .build(); + assertThat(options.getHost()).isEqualTo("http://localhost:1234"); + assertThat(options.getEndpoint()).isEqualTo("localhost:1234"); + } + + @Test + public void testDefaultQueryOptions() { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public String getOptimizerVersion() { + return ""; + } + + @Nonnull + @Override + public String getOptimizerStatisticsPackage() { + return ""; + } + }); + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + DatabaseId.of("p", "i", "d"), + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .setProjectId("p") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertThat(options.getDefaultQueryOptions(DatabaseId.of("p", "i", "d"))) + .isEqualTo( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()); + assertThat(options.getDefaultQueryOptions(DatabaseId.of("p", "i", "o"))) + .isEqualTo(QueryOptions.getDefaultInstance()); + + // Now simulate that the user has set an environment variable for the query optimizer version + // and statistics package. + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public String getOptimizerVersion() { + return "2"; + } + + @Nonnull + @Override + public String getOptimizerStatisticsPackage() { + return "env-package"; + } + }); + // Create options with '1' as the default query optimizer version and 'custom-package' as the + // default query optimizer statistics package. These values should be overridden by + // the environment variable. + options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + DatabaseId.of("p", "i", "d"), + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .setProjectId("p") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertThat(options.getDefaultQueryOptions(DatabaseId.of("p", "i", "d"))) + .isEqualTo( + QueryOptions.newBuilder() + .setOptimizerVersion("2") + .setOptimizerStatisticsPackage("env-package") + .build()); + assertThat(options.getDefaultQueryOptions(DatabaseId.of("p", "i", "o"))) + .isEqualTo( + QueryOptions.newBuilder() + .setOptimizerVersion("2") + .setOptimizerStatisticsPackage("env-package") + .build()); + } + + @Test + public void testCompressorName() { + assertThat( + SpannerOptions.newBuilder() + .setProjectId("p") + .setCompressorName("gzip") + .build() + .getCompressorName()) + .isEqualTo("gzip"); + assertThat( + SpannerOptions.newBuilder() + .setProjectId("p") + .setCompressorName("identity") + .build() + .getCompressorName()) + .isEqualTo("identity"); + assertNull( + SpannerOptions.newBuilder() + .setProjectId("p") + .setCompressorName(null) + .build() + .getCompressorName()); + assertThrows( + IllegalArgumentException.class, () -> SpannerOptions.newBuilder().setCompressorName("foo")); + } + + @Test + public void testLeaderAwareRoutingEnablement() { + assertTrue(SpannerOptions.newBuilder().setProjectId("p").build().isLeaderAwareRoutingEnabled()); + assertTrue( + SpannerOptions.newBuilder() + .setProjectId("p") + .enableLeaderAwareRouting() + .build() + .isLeaderAwareRoutingEnabled()); + assertFalse( + SpannerOptions.newBuilder() + .setProjectId("p") + .disableLeaderAwareRouting() + .build() + .isLeaderAwareRoutingEnabled()); + } + + @Test + public void testEndToEndTracingEnablement() { + // Test that end-to-end tracing is disabled by default. + assertFalse(SpannerOptions.newBuilder().setProjectId("p").build().isEndToEndTracingEnabled()); + assertTrue( + SpannerOptions.newBuilder() + .setProjectId("p") + .setEnableEndToEndTracing(true) + .build() + .isEndToEndTracingEnabled()); + assertFalse( + SpannerOptions.newBuilder() + .setProjectId("p") + .setEnableEndToEndTracing(false) + .build() + .isEndToEndTracingEnabled()); + } + + @Test + public void testMonitoringHost() { + String metricsEndpoint = "test-endpoint:443"; + assertNull(SpannerOptions.newBuilder().setProjectId("p").build().getMonitoringHost()); + assertThat( + SpannerOptions.newBuilder() + .setProjectId("p") + .setMonitoringHost(metricsEndpoint) + .build() + .getMonitoringHost()) + .isEqualTo(metricsEndpoint); + } + + @Test + public void testTransactionOptions() { + DefaultReadWriteTransactionOptions transactionOptions = + DefaultReadWriteTransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.SERIALIZABLE) + .build(); + assertNotNull( + SpannerOptions.newBuilder().setProjectId("p").build().getDefaultTransactionOptions()); + assertThat( + SpannerOptions.newBuilder() + .setProjectId("p") + .setDefaultTransactionOptions(transactionOptions) + .build() + .getDefaultTransactionOptions() + .getIsolationLevel()) + .isEqualTo(IsolationLevel.SERIALIZABLE); + } + + @Test + public void testSetDirectedReadOptions() { + final DirectedReadOptions directedReadOptions = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-west1").build()) + .build()) + .build(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setDirectedReadOptions(directedReadOptions) + .build(); + assertEquals(options.getDirectedReadOptions(), directedReadOptions); + assertThrows( + NullPointerException.class, + () -> SpannerOptions.newBuilder().setDirectedReadOptions(null).build()); + } + + @Test + public void testSpannerCallContextTimeoutConfigurator_NullValues() { + SpannerCallContextTimeoutConfigurator configurator = + SpannerCallContextTimeoutConfigurator.create(); + ApiCallContext inputCallContext = GrpcCallContext.createDefault(); + + assertNull( + configurator.configure( + inputCallContext, + BatchCreateSessionsRequest.getDefaultInstance(), + SpannerGrpc.getBatchCreateSessionsMethod())); + assertNull( + configurator.configure( + inputCallContext, + CreateSessionRequest.getDefaultInstance(), + SpannerGrpc.getCreateSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + DeleteSessionRequest.getDefaultInstance(), + SpannerGrpc.getDeleteSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + DeleteSessionRequest.getDefaultInstance(), + SpannerGrpc.getDeleteSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + ListSessionsRequest.getDefaultInstance(), + SpannerGrpc.getListSessionsMethod())); + + assertNull( + configurator.configure( + inputCallContext, + BeginTransactionRequest.getDefaultInstance(), + SpannerGrpc.getBeginTransactionMethod())); + assertNull( + configurator.configure( + inputCallContext, CommitRequest.getDefaultInstance(), SpannerGrpc.getCommitMethod())); + assertNull( + configurator.configure( + inputCallContext, + RollbackRequest.getDefaultInstance(), + SpannerGrpc.getRollbackMethod())); + + assertNull( + configurator.configure( + inputCallContext, + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod())); + assertNull( + configurator.configure( + inputCallContext, + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteStreamingSqlMethod())); + assertNull( + configurator.configure( + inputCallContext, + ExecuteBatchDmlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteBatchDmlMethod())); + assertNull( + configurator.configure( + inputCallContext, ReadRequest.getDefaultInstance(), SpannerGrpc.getReadMethod())); + assertNull( + configurator.configure( + inputCallContext, + ReadRequest.getDefaultInstance(), + SpannerGrpc.getStreamingReadMethod())); + + assertNull( + configurator.configure( + inputCallContext, + PartitionQueryRequest.getDefaultInstance(), + SpannerGrpc.getPartitionQueryMethod())); + assertNull( + configurator.configure( + inputCallContext, + PartitionReadRequest.getDefaultInstance(), + SpannerGrpc.getPartitionReadMethod())); + } + + @Test + public void testSpannerCallContextTimeoutConfigurator_WithTimeouts() { + SpannerCallContextTimeoutConfigurator configurator = + SpannerCallContextTimeoutConfigurator.create(); + configurator.withBatchUpdateTimeoutDuration(Duration.ofSeconds(1L)); + configurator.withCommitTimeoutDuration(Duration.ofSeconds(2L)); + configurator.withExecuteQueryTimeoutDuration(Duration.ofSeconds(3L)); + configurator.withExecuteUpdateTimeoutDuration(Duration.ofSeconds(4L)); + configurator.withPartitionQueryTimeoutDuration(Duration.ofSeconds(5L)); + configurator.withPartitionReadTimeoutDuration(Duration.ofSeconds(6L)); + configurator.withReadTimeoutDuration(Duration.ofSeconds(7L)); + configurator.withRollbackTimeoutDuration(Duration.ofSeconds(8L)); + + ApiCallContext inputCallContext = GrpcCallContext.createDefault(); + + assertNull( + configurator.configure( + inputCallContext, + BatchCreateSessionsRequest.getDefaultInstance(), + SpannerGrpc.getBatchCreateSessionsMethod())); + assertNull( + configurator.configure( + inputCallContext, + CreateSessionRequest.getDefaultInstance(), + SpannerGrpc.getCreateSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + DeleteSessionRequest.getDefaultInstance(), + SpannerGrpc.getDeleteSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + DeleteSessionRequest.getDefaultInstance(), + SpannerGrpc.getDeleteSessionMethod())); + assertNull( + configurator.configure( + inputCallContext, + ListSessionsRequest.getDefaultInstance(), + SpannerGrpc.getListSessionsMethod())); + + assertNull( + configurator.configure( + inputCallContext, + BeginTransactionRequest.getDefaultInstance(), + SpannerGrpc.getBeginTransactionMethod())); + assertThat( + configurator + .configure( + inputCallContext, + CommitRequest.getDefaultInstance(), + SpannerGrpc.getCommitMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(2L)); + assertThat( + configurator + .configure( + inputCallContext, + RollbackRequest.getDefaultInstance(), + SpannerGrpc.getRollbackMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(8L)); + + assertNull( + configurator.configure( + inputCallContext, + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod())); + assertThat( + configurator + .configure( + inputCallContext, + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteStreamingSqlMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(3L)); + assertThat( + configurator + .configure( + inputCallContext, + ExecuteBatchDmlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteBatchDmlMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(1L)); + assertNull( + configurator.configure( + inputCallContext, ReadRequest.getDefaultInstance(), SpannerGrpc.getReadMethod())); + assertThat( + configurator + .configure( + inputCallContext, + ReadRequest.getDefaultInstance(), + SpannerGrpc.getStreamingReadMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(7L)); + + assertThat( + configurator + .configure( + inputCallContext, + PartitionQueryRequest.getDefaultInstance(), + SpannerGrpc.getPartitionQueryMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(5L)); + assertThat( + configurator + .configure( + inputCallContext, + PartitionReadRequest.getDefaultInstance(), + SpannerGrpc.getPartitionReadMethod()) + .getTimeoutDuration()) + .isEqualTo(Duration.ofSeconds(6L)); + } + + @Test + public void testCustomAsyncExecutorProvider() { + ScheduledExecutorService service = mock(ScheduledExecutorService.class); + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .setAsyncExecutorProvider(FixedCloseableExecutorProvider.create(service)) + .build(); + assertSame(service, options.getAsyncExecutorProvider().getExecutor()); + } + + @Test + public void testAsyncExecutorProviderCoreThreadCount() throws Exception { + assertEquals(8, SpannerOptions.getDefaultAsyncExecutorProviderCoreThreadCount()); + String propertyName = "com.google.cloud.spanner.async_num_core_threads"; + assertEquals( + Integer.valueOf(8), + runWithSystemProperty( + propertyName, null, SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + assertEquals( + Integer.valueOf(16), + runWithSystemProperty( + propertyName, "16", SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + assertEquals( + Integer.valueOf(1), + runWithSystemProperty( + propertyName, "1", SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + assertThrows( + SpannerException.class, + () -> + runWithSystemProperty( + propertyName, + "foo", + SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + assertThrows( + SpannerException.class, + () -> + runWithSystemProperty( + propertyName, + "-1", + SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + assertThrows( + SpannerException.class, + () -> + runWithSystemProperty( + propertyName, "", SpannerOptions::getDefaultAsyncExecutorProviderCoreThreadCount)); + } + + static V runWithSystemProperty( + String propertyName, String propertyValue, Callable callable) throws Exception { + String currentValue = System.getProperty(propertyName); + if (propertyValue == null) { + System.clearProperty(propertyName); + } else { + System.setProperty(propertyName, propertyValue); + } + try { + return callable.call(); + } finally { + if (currentValue == null) { + System.clearProperty(propertyName); + } else { + System.setProperty(propertyName, currentValue); + } + } + } + + @Test + public void testDefaultNumChannelsWithGrpcGcpExtensionEnabled() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableGrpcGcpExtension() + .build(); + + assertEquals(SpannerOptions.GRPC_GCP_ENABLED_DEFAULT_CHANNELS, options.getNumChannels()); + } + + @Test + public void testDefaultNumChannelsWithGrpcGcpExtensionDisabled() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .disableGrpcGcpExtension() + .build(); + + assertEquals(SpannerOptions.DEFAULT_CHANNELS, options.getNumChannels()); + } + + @Test + public void testNumChannelsWithGrpcGcpExtensionEnabled() { + // Set number of channels explicitly, before enabling gRPC-GCP channel pool in SpannerOptions + // builder. + int numChannels = 5; + SpannerOptions options1 = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .setNumChannels(numChannels) + .enableGrpcGcpExtension() + .build(); + + assertEquals(numChannels, options1.getNumChannels()); + + // Set number of channels explicitly, after enabling gRPC-GCP channel pool in SpannerOptions + // builder. + SpannerOptions options2 = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableGrpcGcpExtension() + .setNumChannels(numChannels) + .build(); + + assertEquals(numChannels, options2.getNumChannels()); + } + + @Test + public void checkCreatedInstanceWhenGrpcGcpExtensionDisabled() { + SpannerOptions options = + SpannerOptions.newBuilder().setProjectId("test-project").disableGrpcGcpExtension().build(); + SpannerOptions options1 = options.toBuilder().build(); + assertEquals(false, options.isGrpcGcpExtensionEnabled()); + assertEquals(options.isGrpcGcpExtensionEnabled(), options1.isGrpcGcpExtensionEnabled()); + + Spanner spanner1 = options.getService(); + Spanner spanner2 = options1.getService(); + + assertNotSame(spanner1, spanner2); + + spanner1.close(); + spanner2.close(); + } + + @Test + public void checkCreatedInstanceWhenGrpcGcpExtensionEnabled() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableGrpcGcpExtension() + .build(); + SpannerOptions options1 = options.toBuilder().build(); + assertEquals(true, options.isGrpcGcpExtensionEnabled()); + assertEquals(options.isGrpcGcpExtensionEnabled(), options1.isGrpcGcpExtensionEnabled()); + + Spanner spanner1 = options.getService(); + Spanner spanner2 = options1.getService(); + + assertNotSame(spanner1, spanner2); + + spanner1.close(); + spanner2.close(); + } + + @Test + public void checkGlobalOpenTelemetryWhenNotInjected() { + GlobalOpenTelemetry.resetForTest(); + InMemoryMetricReader inMemoryMetricReader = InMemoryMetricReader.create(); + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder().registerMetricReader(inMemoryMetricReader).build(); + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).buildAndRegisterGlobal(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertEquals(GlobalOpenTelemetry.get(), options.getOpenTelemetry()); + } + + @Test + public void testExperimentalHostOptions() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setExperimentalHost("localhost:8080") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertEquals("default", options.getProjectId()); + assertEquals(0, options.getSessionPoolOptions().getMinSessions()); + assertEquals(0, options.getSessionPoolOptions().getMaxSessions()); + assertTrue(options.getSessionPoolOptions().getUseMultiplexedSession()); + assertTrue(options.getSessionPoolOptions().getUseMultiplexedSessionForRW()); + assertTrue(options.getSessionPoolOptions().getUseMultiplexedSessionPartitionedOps()); + } + + @Test + public void testDynamicChannelPoolingDisabledByDefault() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertFalse(options.isDynamicChannelPoolEnabled()); + } + + @Test + public void testDynamicChannelPoolingEnabledExplicitly() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableDynamicChannelPool() + .build(); + assertTrue(options.isDynamicChannelPoolEnabled()); + + // Verify Spanner-specific defaults are applied + GcpChannelPoolOptions poolOptions = options.getGcpChannelPoolOptions(); + assertNotNull(poolOptions); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_INITIAL_SIZE, poolOptions.getInitSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_CHANNELS, poolOptions.getMaxSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_CHANNELS, poolOptions.getMinSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_RPC, poolOptions.getMaxRpcPerChannel()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_RPC, poolOptions.getMinRpcPerChannel()); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_SCALE_DOWN_INTERVAL, + poolOptions.getScaleDownInterval()); + } + + @Test + public void testDynamicChannelPoolingDisabledWhenNumChannelsSet() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableDynamicChannelPool() + .setNumChannels(5) // Explicitly setting numChannels should disable DCP. + .build(); + assertFalse(options.isDynamicChannelPoolEnabled()); + assertEquals(5, options.getNumChannels()); + } + + @Test + public void testDynamicChannelPoolingDisabledExplicitly() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableDynamicChannelPool() + .disableDynamicChannelPool() + .build(); + assertFalse(options.isDynamicChannelPoolEnabled()); + } + + @Test + public void testDynamicChannelPoolingCustomSettings() { + Duration scaleDownInterval = Duration.ofMinutes(5); + GcpChannelPoolOptions customPoolOptions = + GcpChannelPoolOptions.newBuilder() + .setInitSize(6) + .setMaxSize(15) + .setMinSize(3) + .setDynamicScaling(10, 50, scaleDownInterval) + .build(); + + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableDynamicChannelPool() + .setGcpChannelPoolOptions(customPoolOptions) + .build(); + + assertTrue(options.isDynamicChannelPoolEnabled()); + GcpChannelPoolOptions poolOptions = options.getGcpChannelPoolOptions(); + assertEquals(6, poolOptions.getInitSize()); + assertEquals(15, poolOptions.getMaxSize()); + assertEquals(3, poolOptions.getMinSize()); + assertEquals(50, poolOptions.getMaxRpcPerChannel()); + assertEquals(10, poolOptions.getMinRpcPerChannel()); + assertEquals(scaleDownInterval, poolOptions.getScaleDownInterval()); + } + + @Test + public void testAffinityKeySettings() { + Duration affinityKeyLifetime = Duration.ofMinutes(10); + Duration cleanupInterval = Duration.ofMinutes(5); + GcpChannelPoolOptions poolOptions = + GcpChannelPoolOptions.newBuilder() + .setAffinityKeyLifetime(affinityKeyLifetime) + .setCleanupInterval(cleanupInterval) + .build(); + + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableGrpcGcpExtension() + .setGcpChannelPoolOptions(poolOptions) + .build(); + + assertEquals(affinityKeyLifetime, options.getGcpChannelPoolOptions().getAffinityKeyLifetime()); + assertEquals(cleanupInterval, options.getGcpChannelPoolOptions().getCleanupInterval()); + } + + @Test + public void testAffinityKeySettingsDefaults() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .enableGrpcGcpExtension() + .build(); + + // Verify default affinity key settings from Spanner defaults + GcpChannelPoolOptions poolOptions = options.getGcpChannelPoolOptions(); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_AFFINITY_KEY_LIFETIME, + poolOptions.getAffinityKeyLifetime()); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_CLEANUP_INTERVAL, poolOptions.getCleanupInterval()); + } + + @Test + public void testDynamicChannelPoolingDisabledWhenGrpcGcpDisabled() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .disableGrpcGcpExtension() + .build(); + // DCP should be disabled when grpc-gcp is disabled. + assertFalse(options.isDynamicChannelPoolEnabled()); + } + + @Test + public void testCreateDefaultDynamicChannelPoolOptions() { + // Test the static factory method for creating default options + GcpChannelPoolOptions defaults = SpannerOptions.createDefaultDynamicChannelPoolOptions(); + assertNotNull(defaults); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_CHANNELS, defaults.getMaxSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_CHANNELS, defaults.getMinSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_INITIAL_SIZE, defaults.getInitSize()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_RPC, defaults.getMaxRpcPerChannel()); + assertEquals(SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_RPC, defaults.getMinRpcPerChannel()); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_SCALE_DOWN_INTERVAL, defaults.getScaleDownInterval()); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_AFFINITY_KEY_LIFETIME, + defaults.getAffinityKeyLifetime()); + assertEquals( + SpannerOptions.DEFAULT_DYNAMIC_POOL_CLEANUP_INTERVAL, defaults.getCleanupInterval()); + } + + @Test + public void testPlainTextOptions() { + SpannerOptions options = + SpannerOptions.newBuilder().setExperimentalHost("localhost:8080").usePlainText().build(); + assertEquals("http://localhost:8080", options.getHost()); + assertEquals(NoCredentials.getInstance(), options.getCredentials()); + options = + SpannerOptions.newBuilder() + .setExperimentalHost("http://localhost:8080") + .usePlainText() + .build(); + assertEquals("http://localhost:8080", options.getHost()); + options = + SpannerOptions.newBuilder().usePlainText().setExperimentalHost("localhost:8080").build(); + assertEquals("http://localhost:8080", options.getHost()); + options = + SpannerOptions.newBuilder() + .usePlainText() + .setExperimentalHost("http://localhost:8080") + .build(); + assertEquals("http://localhost:8080", options.getHost()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java new file mode 100644 index 000000000000..8f8c0a30a8ca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java @@ -0,0 +1,23 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +public class SpannerOptionsTestHelper { + + public static void resetActiveTracingFramework() { + SpannerOptions.resetActiveTracingFramework(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerRetryHelperTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerRetryHelperTest.java new file mode 100644 index 000000000000..a62355923be5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerRetryHelperTest.java @@ -0,0 +1,259 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiClock; +import com.google.common.base.Stopwatch; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.Duration; +import com.google.rpc.RetryInfo; +import io.grpc.Context; +import io.grpc.Context.CancellableContext; +import io.grpc.Deadline; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerRetryHelperTest { + private static class FakeClock implements ApiClock { + private long currentTime; + + @Override + public long nanoTime() { + return TimeUnit.NANOSECONDS.convert(currentTime, TimeUnit.MILLISECONDS); + } + + @Override + public long millisTime() { + return currentTime; + } + } + + @Test + public void testRetryDoesNotTimeoutAfterTenMinutes() { + final FakeClock clock = new FakeClock(); + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() == 0) { + clock.currentTime += TimeUnit.MILLISECONDS.convert(10L, TimeUnit.MINUTES); + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "test"); + } + return 1 + 1; + }; + assertEquals( + 2, + SpannerRetryHelper.runTxWithRetriesOnAborted( + callable, SpannerRetryHelper.txRetrySettings, clock) + .intValue()); + } + + @Test + public void testRetryDoesFailAfterMoreThanOneDay() { + final FakeClock clock = new FakeClock(); + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() == 0) { + clock.currentTime += TimeUnit.MILLISECONDS.convert(25L, TimeUnit.HOURS); + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "test"); + } + return 1 + 1; + }; + SpannerException e = + assertThrows( + SpannerException.class, + () -> + SpannerRetryHelper.runTxWithRetriesOnAborted( + callable, SpannerRetryHelper.txRetrySettings, clock)); + assertEquals(ErrorCode.ABORTED, e.getErrorCode()); + assertEquals(1, attempts.get()); + } + + @Test + public void testCancelledContext() { + final CancellableContext withCancellation = Context.current().withCancellation(); + final CountDownLatch latch = new CountDownLatch(1); + final Callable callable = + () -> { + latch.countDown(); + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "test"); + }; + ScheduledExecutorService service = Executors.newScheduledThreadPool(1); + service.submit( + () -> { + latch.await(); + withCancellation.cancel(new InterruptedException()); + return null; + }); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + withCancellation.run(() -> SpannerRetryHelper.runTxWithRetriesOnAborted(callable))); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } + + @Test + public void testTimedOutContext() { + ScheduledExecutorService service = Executors.newScheduledThreadPool(1); + final Callable callable = + () -> { + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "test"); + }; + final CancellableContext withDeadline = + Context.current().withDeadline(Deadline.after(1L, TimeUnit.MILLISECONDS), service); + SpannerException e = + assertThrows( + SpannerException.class, + () -> withDeadline.run(() -> SpannerRetryHelper.runTxWithRetriesOnAborted(callable))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + + @Test + public void noException() { + Callable callable = () -> 1 + 1; + assertThat(SpannerRetryHelper.runTxWithRetriesOnAborted(callable)).isEqualTo(2); + } + + @Test(expected = IllegalStateException.class) + public void propagateUncheckedException() { + Callable callable = + () -> { + throw new IllegalStateException("test"); + }; + SpannerRetryHelper.runTxWithRetriesOnAborted(callable); + } + + @Test + public void retryOnAborted() { + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() == 0) { + throw abortedWithRetryInfo((int) TimeUnit.MILLISECONDS.toNanos(1L)); + } + return 1 + 1; + }; + assertThat(SpannerRetryHelper.runTxWithRetriesOnAborted(callable)).isEqualTo(2); + } + + @Test + public void retryMultipleTimesOnAborted() { + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() < 2) { + throw abortedWithRetryInfo((int) TimeUnit.MILLISECONDS.toNanos(1)); + } + return 1 + 1; + }; + assertThat(SpannerRetryHelper.runTxWithRetriesOnAborted(callable)).isEqualTo(2); + } + + @Test(expected = IllegalStateException.class) + public void retryOnAbortedAndThenPropagateUnchecked() { + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() == 0) { + throw abortedWithRetryInfo((int) TimeUnit.MILLISECONDS.toNanos(1L)); + } + throw new IllegalStateException("test"); + }; + SpannerRetryHelper.runTxWithRetriesOnAborted(callable); + } + + @Test + public void testExceptionWithRetryInfo() { + // Workaround from https://bugs.java.com/bugdatabase/view_bug.do?bug_id=6435126. + // See also https://stackoverflow.com/questions/824110/accurate-sleep-for-java-on-windows + // Note that this is a daemon thread, so it will not prevent the JVM from shutting down. + new ThreadFactoryBuilder() + .setDaemon(true) + .build() + .newThread( + () -> { + while (true) { + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException e) { + // Ignored exception + } + } + }); + final int RETRY_DELAY_MILLIS = 100; + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay( + Duration.newBuilder() + .setNanos( + (int) + TimeUnit.NANOSECONDS.convert(RETRY_DELAY_MILLIS, TimeUnit.MILLISECONDS)) + .build()) + .build(); + trailers.put(key, retryInfo); + final SpannerException e = + SpannerExceptionFactory.newSpannerException(new StatusRuntimeException(status, trailers)); + final AtomicInteger attempts = new AtomicInteger(); + Callable callable = + () -> { + if (attempts.getAndIncrement() == 0) { + throw e; + } + return 1 + 1; + }; + // The following call should take at least 100ms, as that is the retry delay specified in the + // retry info of the exception. + Stopwatch watch = Stopwatch.createStarted(); + assertThat(SpannerRetryHelper.runTxWithRetriesOnAborted(callable)).isEqualTo(2); + long elapsed = watch.elapsed(TimeUnit.MILLISECONDS); + // Allow 1ms difference as that should be the accuracy of the sleep method. + assertThat(elapsed).isAtLeast(RETRY_DELAY_MILLIS - 1); + } + + private SpannerException abortedWithRetryInfo(int nanos) { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setNanos(nanos).setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + return SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "test", new StatusRuntimeException(status, trailers)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerThreadsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerThreadsTest.java new file mode 100644 index 000000000000..9b6ffaf19ca7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerThreadsTest.java @@ -0,0 +1,329 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.hamcrest.CoreMatchers.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.common.base.Stopwatch; +import com.google.protobuf.ListValue; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.v1.*; +import com.google.spanner.v1.StructType.Field; +import io.grpc.*; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests that opening and closing multiple Spanner instances does not leak any threads. */ +@RunWith(JUnit4.class) +public class SpannerThreadsTest { + + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + + private static MockSpannerServiceImpl mockSpanner; + private static MockInstanceAdminImpl mockInstanceAdmin; + private static MockDatabaseAdminImpl mockDatabaseAdmin; + private static Server server; + private static InetSocketAddress address; + + @BeforeClass + public static void startServer() throws Exception { + assumeTrue( + "Skip tests when emulator is enabled as this test interferes with the check whether the" + + " emulator is running", + System.getenv("SPANNER_EMULATOR_HOST") == null); + + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + + mockInstanceAdmin = new MockInstanceAdminImpl(); + mockDatabaseAdmin = new MockDatabaseAdminImpl(); + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + if (server != null) { + server.shutdown(); + server.awaitTermination(); + } + } + + @After + public void reset() { + mockSpanner.reset(); + } + + private static final int NUMBER_OF_TEST_RUNS = 2; + private static final int NUM_THREADS_PER_CHANNEL = 4; + private static final String THREAD_PATTERN = "%s-[0-9]+"; + + private static String generateRandomThreadNameFormat() { + return UUID.randomUUID() + "-%d"; + } + + @Test + public void testCloseAllThreadsWhenClosingSpanner() throws InterruptedException { + String threadName = generateRandomThreadNameFormat(); + int initialNumberOfThreads = getNumberOfThreadsWithName(threadName, false, 0); + assertEquals(0, initialNumberOfThreads); + + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + // Create Spanner instance. + SpannerOptions options = createSpannerOptions(threadName); + Spanner spanner = options.getService(); + // Get a database client and do a query. This should initiate threads for the Spanner service. + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + List resultSets = new ArrayList<>(); + // SpannerStub affiliates a channel with a session, so we need to use multiple sessions + // to ensure we also hit multiple channels. + for (int i2 = 0; i2 < options.getSessionPoolOptions().getMaxSessions(); i2++) { + ResultSet rs = client.singleUse().executeQuery(SELECT1AND2); + // Execute ResultSet#next() to send the query to Spanner. + rs.next(); + // Delay closing the result set in order to force the use of multiple sessions. + // As each session is linked to one transport channel, using multiple different + // sessions should initialize multiple transport channels. + resultSets.add(rs); + // Check whether the number of expected threads has been reached. + if (getNumberOfThreadsWithName(threadName, false, initialNumberOfThreads) + == options.getNumChannels() * NUM_THREADS_PER_CHANNEL + initialNumberOfThreads) { + break; + } + } + for (ResultSet rs : resultSets) { + rs.close(); + } + // Then do a request to the InstanceAdmin service and check the number of threads. + // Doing a request should initialize a thread pool for the underlying InstanceAdminClient. + for (int i2 = 0; i2 < options.getNumChannels() * 2; i2++) { + mockGetInstanceResponse(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + instanceAdminClient.getInstance("projects/[PROJECT]/instances/[INSTANCE]"); + } + // Then do a request to the DatabaseAdmin service and check the number of threads. + // Doing a request should initialize a thread pool for the underlying DatabaseAdminClient. + for (int i2 = 0; i2 < options.getNumChannels() * 2; i2++) { + mockGetDatabaseResponse(); + DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + databaseAdminClient.getDatabase("projects/[PROJECT]/instances/[INSTANCE]", "[DATABASE]"); + } + // Now close the Spanner instance and check whether the threads are shutdown or not. + spanner.close(); + // Wait for up to two seconds to allow the threads to actually shutdown. + Stopwatch watch = Stopwatch.createStarted(); + while (getNumberOfThreadsWithName(threadName, false, initialNumberOfThreads) + > initialNumberOfThreads + && watch.elapsed(TimeUnit.SECONDS) < 2) { + //noinspection BusyWait + Thread.sleep(10L); + } + assertThat( + getNumberOfThreadsWithName(threadName, true, initialNumberOfThreads), + is(equalTo(initialNumberOfThreads))); + } + } + + /** + * Tests that multiple open {@link Spanner} objects at the same time does not share any executors + * or worker threads, and that all of them are shutdown when the {@link Spanner} object is closed. + */ + @Test + public void testMultipleOpenSpanners() throws InterruptedException { + String threadName = generateRandomThreadNameFormat(); + List spanners = new ArrayList<>(); + int initialNumberOfThreads = getNumberOfThreadsWithName(threadName, false, 0); + assertEquals(0, initialNumberOfThreads); + + for (int openSpanners = 1; openSpanners <= 3; openSpanners++) { + // Create Spanner instance. + SpannerOptions options = createSpannerOptions(threadName); + Spanner spanner = options.getService(); + spanners.add(spanner); + // Get a database client and do a query. This should initiate threads for the Spanner service. + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + List resultSets = new ArrayList<>(); + // SpannerStub affiliates a channel with a session, so we need to use multiple sessions + // to ensure we also hit multiple channels. + for (int sessionCount = 0; + sessionCount < options.getSessionPoolOptions().getMaxSessions() + && getNumberOfThreadsWithName(threadName, false, initialNumberOfThreads) + < options.getNumChannels() * NUM_THREADS_PER_CHANNEL * openSpanners + + initialNumberOfThreads; + sessionCount++) { + ResultSet resultSet = client.singleUse().executeQuery(SELECT1AND2); + // Execute ResultSet#next() to send the query to Spanner. + resultSet.next(); + // Delay closing the result set in order to force the use of multiple sessions. + // As each session is linked to one transport channel, using multiple different + // sessions should initialize multiple transport channels. + resultSets.add(resultSet); + } + for (ResultSet resultSet : resultSets) { + resultSet.close(); + } + } + for (Spanner spanner : spanners) { + spanner.close(); + } + // Wait a little to allow the threads to actually shutdown. + Stopwatch watch = Stopwatch.createStarted(); + while (getNumberOfThreadsWithName(threadName, false, initialNumberOfThreads) + > initialNumberOfThreads + && watch.elapsed(TimeUnit.SECONDS) < 5) { + //noinspection BusyWait + Thread.sleep(10L); + } + assertEquals( + initialNumberOfThreads, + getNumberOfThreadsWithName(threadName, true, initialNumberOfThreads)); + } + + private static SpannerOptions createSpannerOptions(String threadNameFormat) { + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + // Set a custom channel configurator to allow http instead of https. + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setTransportChannelExecutorThreadNameFormat(threadNameFormat) + .build(); + } + + private int getNumberOfThreadsWithName(String serviceName, boolean dumpStack, int expected) { + Pattern pattern = Pattern.compile(String.format(THREAD_PATTERN, serviceName)); + ThreadGroup group = Thread.currentThread().getThreadGroup(); + while (group.getParent() != null) { + group = group.getParent(); + } + Thread[] threads = new Thread[100 * NUMBER_OF_TEST_RUNS]; + int numberOfThreads = group.enumerate(threads); + int res = 0; + List found = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + if (pattern.matcher(threads[i].getName()).matches()) { + if (dumpStack) { + found.add(threads[i]); + } + res++; + } + } + if (dumpStack && res > expected) { + found.forEach(this::dumpThread); + } + return res; + } + + private void dumpThread(Thread thread) { + StringBuilder dump = new StringBuilder(); + dump.append('"'); + dump.append(thread.getName()); + dump.append("\" "); + final Thread.State state = thread.getState(); + dump.append("\n java.lang.Thread.State: "); + dump.append(state); + final StackTraceElement[] stackTraceElements = thread.getStackTrace(); + for (final StackTraceElement stackTraceElement : stackTraceElements) { + dump.append("\n at "); + dump.append(stackTraceElement); + } + dump.append("\n\n"); + System.out.print(dump.toString()); + } + + private void mockGetInstanceResponse() { + InstanceName name2 = InstanceName.of("[PROJECT]", "[INSTANCE]"); + InstanceConfigName config = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + String displayName = "displayName1615086568"; + int nodeCount = 1539922066; + Instance expectedResponse = + Instance.newBuilder() + .setName(name2.toString()) + .setConfig(config.toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + } + + private void mockGetDatabaseResponse() { + DatabaseName name2 = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + Database expectedResponse = Database.newBuilder().setName(name2.toString()).build(); + mockDatabaseAdmin.addResponse(expectedResponse); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StandardBenchmarkMockServer.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StandardBenchmarkMockServer.java new file mode 100644 index 000000000000..83255fcf3af2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StandardBenchmarkMockServer.java @@ -0,0 +1,135 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.Collections2; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.inprocess.InProcessServerBuilder; + +/** Standard mock server used for benchmarking. */ +class StandardBenchmarkMockServer { + private static final int NETWORK_LATENCY_TIME = 10; + private static final int BATCH_CREATE_SESSIONS_MIN_TIME = 10; + private static final int BATCH_CREATE_SESSIONS_RND_TIME = 10; + private static final int BEGIN_TRANSACTION_MIN_TIME = 1; + private static final int BEGIN_TRANSACTION_RND_TIME = 1; + private static final int COMMIT_TRANSACTION_MIN_TIME = 5; + private static final int COMMIT_TRANSACTION_RND_TIME = 5; + private static final int ROLLBACK_TRANSACTION_MIN_TIME = 1; + private static final int ROLLBACK_TRANSACTION_RND_TIME = 1; + private static final int EXECUTE_STREAMING_SQL_MIN_TIME = 10; + private static final int EXECUTE_STREAMING_SQL_RND_TIME = 10; + private static final int EXECUTE_SQL_MIN_TIME = 10; + private static final int EXECUTE_SQL_RND_TIME = 10; + + static final Statement UPDATE_STATEMENT = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISTENT_TABLE SET BAR=1 WHERE BAZ=2"); + static final long UPDATE_COUNT = 1L; + static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM RANDOM_TABLE"); + private static final com.google.spanner.v1.ResultSet SELECT_RANDOM_RESULTSET = + new RandomResultSetGenerator(100).generate(); + private MockSpannerServiceImpl mockSpanner; + private Server server; + private LocalChannelProvider channelProvider; + + TransportChannelProvider start() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, SELECT_RANDOM_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.INVALID_ARGUMENT.withDescription("invalid statement").asRuntimeException())); + + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + BATCH_CREATE_SESSIONS_MIN_TIME, BATCH_CREATE_SESSIONS_RND_TIME)); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + BEGIN_TRANSACTION_MIN_TIME, BEGIN_TRANSACTION_RND_TIME)); + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + COMMIT_TRANSACTION_MIN_TIME, COMMIT_TRANSACTION_RND_TIME)); + mockSpanner.setRollbackExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + ROLLBACK_TRANSACTION_MIN_TIME, ROLLBACK_TRANSACTION_RND_TIME)); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + EXECUTE_STREAMING_SQL_MIN_TIME, EXECUTE_STREAMING_SQL_RND_TIME)); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime( + NETWORK_LATENCY_TIME + EXECUTE_SQL_MIN_TIME, EXECUTE_SQL_RND_TIME)); + + String uniqueName = InProcessServerBuilder.generateName(); + server = InProcessServerBuilder.forName(uniqueName).addService(mockSpanner).build().start(); + channelProvider = LocalChannelProvider.create(uniqueName); + + return channelProvider; + } + + void shutdown() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + MockSpannerServiceImpl getMockSpanner() { + return mockSpanner; + } + + int countRequests(final Class type) { + return Collections2.filter(mockSpanner.getRequests(), input -> input.getClass().equals(type)) + .size(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StatementTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StatementTest.java new file mode 100644 index 000000000000..e4a036673bc4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StatementTest.java @@ -0,0 +1,188 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.ByteArray; +import com.google.common.collect.ImmutableMap; +import com.google.common.testing.EqualsTester; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Statement}. */ +@RunWith(JUnit4.class) +public class StatementTest { + + @Test + public void basic() { + String sql = "SELECT 1"; + Statement stmt = Statement.of(sql); + assertThat(stmt.getSql()).isEqualTo(sql); + assertThat(stmt.getParameters()).isEmpty(); + assertThat(stmt.toString()).isEqualTo(sql); + reserializeAndAssert(stmt); + } + + @Test + public void basicWithParameters() { + String sql = "SELECT @name"; + Statement stmt = Statement.of(sql, ImmutableMap.of("name", Value.string("hello"))); + assertEquals(sql, stmt.getSql()); + assertFalse(stmt.getParameters().isEmpty()); + assertEquals(Value.string("hello"), stmt.getParameters().get("name")); + assertEquals(sql + " {name: hello}", stmt.toString()); + reserializeAndAssert(stmt); + } + + @Test + public void serialization() { + Statement stmt = + Statement.newBuilder("SELECT * FROM table WHERE ") + .append("bool_field = @bool_field ") + .bind("bool_field") + .to(true) + .append("long_field = @long_field ") + .bind("long_field") + .to(1L) + .append("float_field = @float_field ") + .bind("float_field") + .to(1.) + .append("string_field = @string_field ") + .bind("string_field") + .to("abc") + .append("bytes_field = @bytes_field ") + .bind("bytes_field") + .to(ByteArray.fromBase64("abcd")) + .bind("untyped_null_field") + .to((Value) null) + .build(); + reserializeAndAssert(stmt); + } + + @Test + public void append() { + Statement stmt = + Statement.newBuilder("SELECT Name FROM Users") + .append(" WHERE Id = @id") + .bind("id") + .to(1234) + .append(" AND Status = @status") + .bind("status") + .to("ACTIVE") + .build(); + String expectedSql = "SELECT Name FROM Users WHERE Id = @id AND Status = @status"; + assertThat(stmt.getSql()).isEqualTo(expectedSql); + assertThat(stmt.hasBinding("id")).isTrue(); + assertThat(stmt.hasBinding("status")).isTrue(); + assertThat(stmt.getParameters()) + .containsExactlyEntriesIn( + ImmutableMap.of("id", Value.int64(1234), "status", Value.string("ACTIVE"))); + assertThat(stmt.toString()).startsWith(expectedSql); + assertThat(stmt.toString()).contains("id: 1234"); + assertThat(stmt.toString()).contains("status: ACTIVE"); + } + + @Test + public void bindReplacement() { + String sql = "SELECT Name FROM Users WHERE Id = @id"; + Statement stmt = Statement.newBuilder(sql).bind("id").to(1).bind("id").to(2).build(); + assertThat(stmt.hasBinding("id")).isTrue(); + assertThat(stmt.getSql()).isEqualTo(sql); + assertThat(stmt.getParameters()).isEqualTo(ImmutableMap.of("id", Value.int64(2))); + assertThat(stmt.toString()).isEqualTo(sql + " {id: 2}"); + } + + @Test + public void incompleteBinding() { + Statement.Builder builder = Statement.newBuilder("SELECT @v"); + builder.bind("v"); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> builder.build()); + assertNotNull(e.getMessage()); + } + + @Test + public void bindingInProgress() { + Statement.Builder builder = Statement.newBuilder("SELECT @v"); + builder.bind("v"); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> builder.bind("y")); + assertNotNull(e.getMessage()); + } + + @Test + public void alreadyBound() { + ValueBinder binder = Statement.newBuilder("SELECT @v").bind("v"); + binder.to("abc"); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> binder.to("xyz")); + assertNotNull(e.getMessage()); + } + + @Test + public void bindCommitTimestampFails() { + ValueBinder binder = Statement.newBuilder("SELECT @v").bind("v"); + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> binder.to(Value.COMMIT_TIMESTAMP)); + assertNotNull(e.getMessage()); + } + + @Test + public void gettersAreSnapshot() { + Statement stmt = + Statement.newBuilder("SELECT Name FROM Users WHERE Id = @id") + .append(" AND Status = @status") + .bind("status") + .to("ACTIVE") + .bind("id") + .to(1234) + .bind("status") + .to("ACTIVE") + .build(); + assertThat(stmt.getSql()) + .isEqualTo("SELECT Name FROM Users WHERE Id = @id AND Status = @status"); + assertThat(stmt.getParameters()) + .isEqualTo(ImmutableMap.of("id", Value.int64(1234), "status", Value.string("ACTIVE"))); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup( + Statement.of("SELECT 1"), + Statement.of("SELECT 1"), + Statement.newBuilder("SELECT ").append("1").build()); + tester.addEqualityGroup(Statement.of("SELECT 2")); + // Note that some of the following are incomplete bindings: they would fail if executed. + tester.addEqualityGroup( + Statement.newBuilder("SELECT @x, @y").bind("x").to(1).build(), + Statement.newBuilder("SELECT @x, @y").bind("x").to(1).build()); + tester.addEqualityGroup(Statement.newBuilder("SELECT @x, @y").bind("x").to("1").build()); + tester.addEqualityGroup(Statement.newBuilder("SELECT @x, @y").bind("x").to(2).build()); + tester.addEqualityGroup(Statement.newBuilder("SELECT @x, @y").bind("y").to(2).build()); + tester.addEqualityGroup( + Statement.newBuilder("SELECT @x, @y").bind("x").to(1).bind("y").to(2).build()); + tester.addEqualityGroup( + Statement.newBuilder("SELECT @x, @y").bind("x").to((Value) null).build(), + Statement.newBuilder("SELECT @x, @y").bind("x").to((Value) null).build()); + tester.testEquals(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StructTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StructTest.java new file mode 100644 index 000000000000..55d066e165e1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/StructTest.java @@ -0,0 +1,314 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; + +import com.google.common.testing.EqualsTester; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Struct}. */ +@RunWith(JUnit4.class) +public class StructTest { + + @Test + public void builder() { + // These tests are basic: AbstractStructReaderTypesTest already covers all type getters. + Struct struct = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .to(2) + .set("f3") + .to(Value.bool(null)) + .build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.int64()), + Type.StructField.of("f3", Type.bool()))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isFalse(); + assertThat(struct.isNull(2)).isTrue(); + assertThat(struct.getString(0)).isEqualTo("x"); + assertThat(struct.getLong(1)).isEqualTo(2); + } + + @Test + public void getOrNullTests() { + Struct struct = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .to(2) + .set("f3") + .to(Value.bool(null)) + .build(); + String column1 = struct.getOrNull(0, StructReader::getString); + assertThat(column1).isEqualTo("x"); + + Long column2 = struct.getOrNull(1, StructReader::getLong); + assertThat(column2).isEqualTo(2); + + String column3 = struct.getOrNull("f3", StructReader::getString); + assertThat(column3).isNull(); + } + + @Test + public void getOrDefaultTests() { + Struct struct = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .to(2) + .set("f3") + .to(Value.bool(null)) + .build(); + String column1 = struct.getOrDefault(0, StructReader::getString, ""); + assertThat(column1).isEqualTo("x"); + + Long column2 = struct.getOrDefault("f2", StructReader::getLong, -1L); + assertThat(column2).isEqualTo(2); + + String column3 = struct.getOrDefault(2, StructReader::getString, ""); + assertThat(column3).isEqualTo(""); + } + + @Test + public void duplicateFields() { + // Duplicate fields are allowed - some SQL queries produce this type of value. + Struct struct = Struct.newBuilder().set("").to("x").set("").to(Value.int64(2)).build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("", Type.string()), Type.StructField.of("", Type.int64()))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isFalse(); + assertThat(struct.getString(0)).isEqualTo("x"); + assertThat(struct.getLong(1)).isEqualTo(2); + } + + @Test + public void unnamedFields() { + Struct struct = Struct.newBuilder().add(Value.int64(2)).add(Value.int64(3)).build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("", Type.int64()), Type.StructField.of("", Type.int64()))); + assertThat(struct.getLong(0)).isEqualTo(2); + assertThat(struct.getLong(1)).isEqualTo(3); + } + + @Test + public void structWithStructField() { + Struct nestedStruct = Struct.newBuilder().set("f2f1").to(10).build(); + Struct struct = + Struct.newBuilder() + .set("f1") + .to("v1") + .set("f2") + .to(nestedStruct) + .set("f3") + .to(nestedStruct.getType(), null) + .build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.struct(Type.StructField.of("f2f1", Type.int64()))), + Type.StructField.of("f3", Type.struct(Type.StructField.of("f2f1", Type.int64()))))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isFalse(); + assertThat(struct.isNull(2)).isTrue(); + assertThat(struct.getString(0)).isEqualTo("v1"); + assertThat(struct.getString("f1")).isEqualTo("v1"); + assertThat(struct.getStruct(1)).isEqualTo(nestedStruct); + assertThat(struct.getStruct("f2")).isEqualTo(nestedStruct); + } + + @Test + public void structWithArrayOfStructField() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + List arrayElements = + Arrays.asList( + Struct.newBuilder().set("ff1").to("v1").set("ff2").to(1).build(), + Struct.newBuilder().set("ff1").to("v1").set("ff2").to(1).build()); + Struct struct = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .toStructArray(elementType, arrayElements) + .build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.array(elementType)))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isFalse(); + assertThat(struct.getString(0)).isEqualTo("x"); + assertThat(struct.getStructList(1)).isEqualTo(arrayElements); + } + + @Test + public void equalsAndHashCode() { + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(1).build(), + Struct.newBuilder().set("x").to(Value.int64(1)).build()); + tester.addEqualityGroup(Struct.newBuilder().set("x").to((Long) null).build()); + tester.addEqualityGroup(Struct.newBuilder().set("x").to((String) null).build()); + tester.addEqualityGroup(Struct.newBuilder().set("x").to(1).set("y").to(2).build()); + tester.addEqualityGroup(Struct.newBuilder().set("x").to(1).set("y").to("2").build()); + tester.addEqualityGroup(Struct.newBuilder().set("y").to(2).set("x").to(1).build()); + + // Equality comparison with empty structs. + tester.addEqualityGroup(Struct.newBuilder().build(), Struct.newBuilder().build()); + + // Equality comparison with structs with struct-typed fields. + Struct nestedStruct = Struct.newBuilder().set("f").to(1).build(); + Struct structFieldStruct1 = + Struct.newBuilder() + .set("sf") + .to(nestedStruct) + .set("nullsf") + .to(nestedStruct.getType(), null) + .build(); + Struct structFieldStruct2 = + Struct.newBuilder() + .set("sf") + .to(Value.struct(nestedStruct)) + .set("nullsf") + .to(Value.struct(nestedStruct.getType(), null)) + .build(); + tester.addEqualityGroup(structFieldStruct1, structFieldStruct2); + + // Equality comparison with array-of-struct typed fields. + Struct arrayStructFieldStruct1 = + Struct.newBuilder() + .set("arraysf") + .toStructArray(nestedStruct.getType(), Arrays.asList(null, nestedStruct)) + .set("nullarraysf") + .toStructArray(nestedStruct.getType(), null) + .build(); + Struct arrayStructFieldStruct2 = + Struct.newBuilder() + .set("arraysf") + .to(Value.structArray(nestedStruct.getType(), Arrays.asList(null, nestedStruct))) + .set("nullarraysf") + .to(Value.structArray(nestedStruct.getType(), null)) + .build(); + tester.addEqualityGroup(arrayStructFieldStruct1, arrayStructFieldStruct2); + + // Equality comparison of structs with duplicate fields. + Struct duplicateFieldStruct1 = + Struct.newBuilder().set("f1").to(3).set("f1").to(nestedStruct).build(); + Struct duplicateFieldStruct2 = + Struct.newBuilder() + .set("f1") + .to(Value.int64(3)) + .set("f1") + .to(Value.struct(nestedStruct)) + .build(); + tester.addEqualityGroup(duplicateFieldStruct1, duplicateFieldStruct2); + + // Equality comparison of structs with unnamed fields. + Struct emptyFieldStruct1 = Struct.newBuilder().set("").to(3).set("").to(nestedStruct).build(); + Struct emptyFieldStruct2 = + Struct.newBuilder().add(Value.int64(3)).add(Value.struct(nestedStruct)).build(); + tester.addEqualityGroup(emptyFieldStruct1, emptyFieldStruct2); + + tester.testEquals(); + + // PgNumeric + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(Value.pgNumeric("1.23")).build(), + Struct.newBuilder().set("x").to(Value.pgNumeric("1.23")).build()); + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(Value.pgNumeric("NaN")).build(), + Struct.newBuilder().set("x").to(Value.pgNumeric("NaN")).build()); + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(Value.pgNumeric(null)).build(), + Struct.newBuilder().set("x").to(Value.pgNumeric(null)).build()); + tester.addEqualityGroup( + Struct.newBuilder() + .set("x") + .to(Value.pgNumericArray(Arrays.asList(null, "1.23", "NaN"))) + .build(), + Struct.newBuilder() + .set("x") + .to(Value.pgNumericArray(Arrays.asList(null, "1.23", "NaN"))) + .build()); + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(Value.pgNumericArray(Collections.emptyList())).build(), + Struct.newBuilder().set("x").to(Value.pgNumericArray(Collections.emptyList())).build()); + tester.addEqualityGroup( + Struct.newBuilder().set("x").to(Value.pgNumericArray(null)).build(), + Struct.newBuilder().set("x").to(Value.pgNumericArray(null)).build()); + } + + @Test + public void serialization() { + // Simple struct. + Struct simpleStruct = Struct.newBuilder().set("x").to(1).build(); + reserializeAndAssert(simpleStruct); + simpleStruct = Struct.newBuilder().set("x").to((Long) null).build(); + reserializeAndAssert(simpleStruct); + + // Struct with struct field. + Struct structFieldStruct = Struct.newBuilder().set("f1").to(simpleStruct).build(); + reserializeAndAssert(structFieldStruct); + structFieldStruct = Struct.newBuilder().set("f1").to(simpleStruct.getType(), null).build(); + reserializeAndAssert(structFieldStruct); + + // Struct with array-of-struct field + Struct arrayStructFieldStruct = + Struct.newBuilder() + .set("f1") + .toStructArray(simpleStruct.getType(), new ArrayList<>()) + .build(); + reserializeAndAssert(arrayStructFieldStruct); + arrayStructFieldStruct = + Struct.newBuilder().set("f1").toStructArray(simpleStruct.getType(), null).build(); + reserializeAndAssert(arrayStructFieldStruct); + + // Struct with no field. + reserializeAndAssert(Struct.newBuilder().build()); + + // Struct with duplicate field names. + reserializeAndAssert(Struct.newBuilder().set("f1").to(3).set("f1").to(30).build()); + + // Struct with unnamed fields. + reserializeAndAssert(Struct.newBuilder().add(Value.int64(3)).add(Value.int64(30)).build()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestEnvConfig.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestEnvConfig.java new file mode 100644 index 000000000000..0d46d664fb20 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestEnvConfig.java @@ -0,0 +1,31 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import java.io.IOException; + +/** Interface for TestEnvConfig. */ +public interface TestEnvConfig { + /** Returns the options to use to create the Cloud Spanner client for integration tests. */ + SpannerOptions spannerOptions(); + + /** Custom setup. */ + void setUp() throws IOException; + + /** Custom tear down. */ + void tearDown(); +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestHelper.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestHelper.java new file mode 100644 index 000000000000..eb72238e8a55 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TestHelper.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +class TestHelper { + + static boolean isMultiplexSessionDisabled() { + return System.getenv() + .getOrDefault("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "") + .equalsIgnoreCase("false"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ThreadFactoryUtilTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ThreadFactoryUtilTest.java new file mode 100644 index 000000000000..faf70ff577a5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ThreadFactoryUtilTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory; +import static com.google.cloud.spanner.ThreadFactoryUtil.tryCreateVirtualThreadFactory; +import static com.google.cloud.spanner.ThreadFactoryUtil.tryCreateVirtualThreadPerTaskExecutor; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.common.util.concurrent.SettableFuture; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ThreadFactoryUtilTest { + + @Test + public void testCreateThreadFactory() throws Exception { + ThreadFactory threadFactory = createVirtualOrPlatformDaemonThreadFactory("test-thread", true); + assertNotNull(threadFactory); + SettableFuture future = SettableFuture.create(); + Thread thread = threadFactory.newThread(() -> future.set(true)); + assertNotNull(thread); + // Virtual threads are by definition always daemon threads. + assertTrue(thread.isDaemon()); + thread.start(); + assertTrue(future.get(1L, TimeUnit.SECONDS)); + + if (isJava21OrHigher()) { + ThreadFactory virtualFactory = tryCreateVirtualThreadFactory("test-thread"); + assertNotNull(virtualFactory); + assertEquals(virtualFactory.getClass(), threadFactory.getClass()); + } else { + assertNull(tryCreateVirtualThreadFactory("test-thread")); + } + } + + @Test + public void testTryCreateVirtualThreadPerTaskExecutor() { + if (isJava21OrHigher()) { + assertNotNull(tryCreateVirtualThreadPerTaskExecutor("test-virtual-thread")); + } else { + assertNull(tryCreateVirtualThreadPerTaskExecutor("test-virtual-thread")); + } + } + + private static boolean isJava21OrHigher() { + String[] versionElements = System.getProperty("java.version").split("\\."); + int majorVersion = Integer.parseInt(versionElements[0]); + // Java 1.8 (Java 8) and lower used the format 1.8 etc. + // Java 9 and higher use the format 9.x + if (majorVersion == 1) { + majorVersion = Integer.parseInt(versionElements[1]); + } + return majorVersion >= 21; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TimestampBoundTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TimestampBoundTest.java new file mode 100644 index 000000000000..e56f85321bbc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TimestampBoundTest.java @@ -0,0 +1,159 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.common.testing.EqualsTester; +import com.google.spanner.v1.TransactionOptions; +import java.util.concurrent.TimeUnit; +import org.hamcrest.MatcherAssert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.TimestampBound}. */ +@RunWith(JUnit4.class) +public class TimestampBoundTest { + private static final long TEST_TIME_SECONDS = 1444662894L; + private static final String TEST_TIME_ISO = "2015-10-12T15:14:54Z"; + + @Test + public void serialization() { + reserializeAndAssert(TimestampBound.strong()); + reserializeAndAssert(TimestampBound.ofExactStaleness(10, TimeUnit.NANOSECONDS)); + reserializeAndAssert(TimestampBound.ofMaxStaleness(100, TimeUnit.DAYS)); + reserializeAndAssert(TimestampBound.ofMinReadTimestamp(Timestamp.now())); + reserializeAndAssert(TimestampBound.ofReadTimestamp(Timestamp.now())); + } + + @Test + public void strong() { + TimestampBound bound = TimestampBound.strong(); + assertThat(bound.getMode()).isEqualTo(Mode.STRONG); + assertThat(bound.toString()).isEqualTo("strong"); + assertProto(bound, "strong: true"); + } + + @Test + public void readTimestamp() { + Timestamp ts = Timestamp.ofTimeSecondsAndNanos(TEST_TIME_SECONDS, 0); + TimestampBound bound = TimestampBound.ofReadTimestamp(ts); + assertThat(bound.getMode()).isEqualTo(Mode.READ_TIMESTAMP); + assertThat(bound.getReadTimestamp()).isEqualTo(ts); + assertThat(bound.toString()).isEqualTo("exact_timestamp: " + TEST_TIME_ISO); + assertProto(bound, "read_timestamp { seconds: " + TEST_TIME_SECONDS + " }"); + } + + @Test + public void minReadTimestamp() { + Timestamp ts = Timestamp.ofTimeSecondsAndNanos(TEST_TIME_SECONDS, 0); + TimestampBound bound = TimestampBound.ofMinReadTimestamp(ts); + assertThat(bound.getMode()).isEqualTo(Mode.MIN_READ_TIMESTAMP); + assertThat(bound.getMinReadTimestamp()).isEqualTo(ts); + assertThat(bound.toString()).isEqualTo("min_read_timestamp: " + TEST_TIME_ISO); + assertProto(bound, "min_read_timestamp { seconds: " + TEST_TIME_SECONDS + " }"); + } + + @Test + public void exactStaleness() { + TimestampBound bound = TimestampBound.ofExactStaleness(3140, TimeUnit.MILLISECONDS); + assertThat(bound.getMode()).isEqualTo(Mode.EXACT_STALENESS); + assertThat(bound.getExactStaleness(TimeUnit.SECONDS)).isEqualTo(3); + assertThat(bound.getExactStaleness(TimeUnit.MILLISECONDS)).isEqualTo(3140); + assertThat(bound.getExactStaleness(TimeUnit.MICROSECONDS)).isEqualTo(3140000); + assertThat(bound.getExactStaleness(TimeUnit.NANOSECONDS)).isEqualTo(3140000000L); + assertThat(bound.toString()).isEqualTo("exact_staleness: 3.140s"); + assertProto(bound, "exact_staleness { seconds: 3 nanos: 140000000 }"); + } + + @Test + public void exactStalenessNegative() { + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> TimestampBound.ofExactStaleness(-1, TimeUnit.SECONDS)); + assertNotNull(e.getMessage()); + } + + @Test + public void maxStaleness() { + TimestampBound bound = TimestampBound.ofMaxStaleness(3140, TimeUnit.MILLISECONDS); + assertThat(bound.getMode()).isEqualTo(Mode.MAX_STALENESS); + assertThat(bound.getMaxStaleness(TimeUnit.SECONDS)).isEqualTo(3); + assertThat(bound.getMaxStaleness(TimeUnit.MILLISECONDS)).isEqualTo(3140); + assertThat(bound.getMaxStaleness(TimeUnit.MICROSECONDS)).isEqualTo(3140000); + assertThat(bound.getMaxStaleness(TimeUnit.NANOSECONDS)).isEqualTo(3140000000L); + assertThat(bound.toString()).isEqualTo("max_staleness: 3.140s"); + assertProto(bound, "max_staleness { seconds: 3 nanos: 140000000 }"); + } + + @Test + public void stalenessSourceUnits() { + long num = 7; + for (TimeUnit units : TimeUnit.values()) { + TimestampBound bound = TimestampBound.ofExactStaleness(num, units); + assertWithMessage(units.toString()) + .that(bound.getExactStaleness(TimeUnit.NANOSECONDS)) + .isEqualTo(units.toNanos(num)); + } + } + + @Test + public void maxStalenessNegative() { + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> TimestampBound.ofMaxStaleness(-1, TimeUnit.SECONDS)); + assertNotNull(e.getMessage()); + } + + @Test + public void equalsAndHashCode() { + Timestamp ts = Timestamp.ofTimeSecondsAndNanos(1444662894L, 0); + Timestamp ts2 = Timestamp.ofTimeSecondsAndNanos(1444662895L, 0); + int staleness = 5; + EqualsTester tester = new EqualsTester(); + tester.addEqualityGroup(TimestampBound.strong(), TimestampBound.strong()); + tester.addEqualityGroup(TimestampBound.ofReadTimestamp(ts), TimestampBound.ofReadTimestamp(ts)); + tester.addEqualityGroup(TimestampBound.ofReadTimestamp(ts2)); + tester.addEqualityGroup( + TimestampBound.ofMinReadTimestamp(ts), TimestampBound.ofMinReadTimestamp(ts)); + tester.addEqualityGroup(TimestampBound.ofMinReadTimestamp(ts2)); + tester.addEqualityGroup( + TimestampBound.ofExactStaleness(staleness, TimeUnit.SECONDS), + TimestampBound.ofExactStaleness(staleness, TimeUnit.SECONDS)); + tester.addEqualityGroup(TimestampBound.ofExactStaleness(staleness, TimeUnit.MILLISECONDS)); + tester.addEqualityGroup( + TimestampBound.ofMaxStaleness(staleness, TimeUnit.SECONDS), + TimestampBound.ofMaxStaleness(staleness, TimeUnit.SECONDS)); + tester.addEqualityGroup(TimestampBound.ofMaxStaleness(staleness, TimeUnit.MILLISECONDS)); + tester.testEquals(); + } + + private static void assertProto(TimestampBound bound, String expectedProtoTextFormat) { + MatcherAssert.assertThat( + bound.toProto(), + SpannerMatchers.matchesProto(TransactionOptions.ReadOnly.class, expectedProtoTextFormat)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TracerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TracerTest.java new file mode 100644 index 000000000000..c52070b02abb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TracerTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** + * Tests marked with this {@link org.junit.experimental.categories.Category} will be executed in a + * separate execution with the maven-surefire plugin. The tests will be excluded from execution with + * the maven-failsafe plugin. + * + *

Separate execution prevents the injection of any custom tracing configuration from interfering + * with other tests, as most tracing configuration is stored in static final variables. + */ +public interface TracerTest {} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionChannelHintTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionChannelHintTest.java new file mode 100644 index 000000000000..cdb0039ccd5b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionChannelHintTest.java @@ -0,0 +1,320 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_COLUMN_NAMES; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_RESULTSET; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_ONE_KEY_VALUE_STATEMENT; +import static com.google.cloud.spanner.MockSpannerTestUtil.READ_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.grpc.GcpManagedChannel; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Transaction always utilize a channel hint to ensure multiple RPCs that are part of the same + * transaction, they go via same channel. For regular session, the hint is stored per session. For + * multiplexed sessions this hint is stored per transaction. + * + *

The below tests assert this behavior by verifying that all operations within a transaction use + * the same channel hint (extracted from the X-Goog-Spanner-Request-Id header). + */ +@RunWith(JUnit4.class) +public class TransactionChannelHintTest { + + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static InetSocketAddress address; + // Track logical affinity keys (before grpc-gcp routing) per RPC method. + // These are captured by a client interceptor to verify channel affinity consistency. + private static final Set executeSqlAffinityKeys = ConcurrentHashMap.newKeySet(); + private static final Set beginTransactionAffinityKeys = ConcurrentHashMap.newKeySet(); + private static final Set streamingReadAffinityKeys = ConcurrentHashMap.newKeySet(); + private static Level originalLogLevel; + + @BeforeClass + public static void startServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(READ_ONE_KEY_VALUE_STATEMENT, READ_ONE_KEY_VALUE_RESULTSET)); + + address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + /** + * Creates a client interceptor that captures the logical affinity key before grpc-gcp routes the + * request. This allows us to verify that all operations within a transaction use the same logical + * channel affinity, even though the physical channel ID may vary. + */ + private static GrpcInterceptorProvider createAffinityKeyInterceptorProvider() { + return () -> + ImmutableList.of( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + // Capture the AFFINITY_KEY before grpc-gcp processes it + String affinityKey = callOptions.getOption(GcpManagedChannel.AFFINITY_KEY); + if (affinityKey != null) { + String methodName = method.getFullMethodName(); + if (methodName.equals( + SpannerGrpc.getExecuteStreamingSqlMethod().getFullMethodName())) { + executeSqlAffinityKeys.add(affinityKey); + } + if (methodName.equals(SpannerGrpc.getStreamingReadMethod().getFullMethodName())) { + streamingReadAffinityKeys.add(affinityKey); + } + if (methodName.equals( + SpannerGrpc.getBeginTransactionMethod().getFullMethodName())) { + beginTransactionAffinityKeys.add(affinityKey); + } + } + return next.newCall(method, callOptions); + } + }); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + server.shutdown(); + server.awaitTermination(); + } + + @BeforeClass + public static void disableLogging() { + Logger logger = Logger.getLogger(""); + originalLogLevel = logger.getLevel(); + logger.setLevel(Level.OFF); + } + + @AfterClass + public static void resetLogging() { + Logger logger = Logger.getLogger(""); + logger.setLevel(originalLogLevel); + } + + @After + public void reset() { + mockSpanner.reset(); + executeSqlAffinityKeys.clear(); + streamingReadAffinityKeys.clear(); + beginTransactionAffinityKeys.clear(); + } + + private SpannerOptions createSpannerOptions() { + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .setCompressorName("gzip") + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setInterceptorProvider(createAffinityKeyInterceptorProvider()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setSkipVerifyingBeginTransactionForMuxRW(true).build()) + .build(); + } + + @Test + public void testSingleUseReadOnlyTransaction_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUseReadOnlyTransaction().executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + // All ExecuteSql calls should use the same logical affinity key + assertEquals(1, executeSqlAffinityKeys.size()); + } + + @Test + public void testSingleUseReadOnlyTransaction_withTimestampBound_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = + client + .singleUseReadOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS)) + .executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + // All ExecuteSql calls should use the same logical affinity key + assertEquals(1, executeSqlAffinityKeys.size()); + } + + @Test + public void testReadOnlyTransaction_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ReadOnlyTransaction transaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + } + // All ExecuteSql calls within the transaction should use the same logical affinity key + assertEquals(1, executeSqlAffinityKeys.size()); + // BeginTransaction should use a single logical affinity key + assertEquals(1, beginTransactionAffinityKeys.size()); + } + + @Test + public void testReadOnlyTransaction_withTimestampBound_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ReadOnlyTransaction transaction = + client.readOnlyTransaction(TimestampBound.ofExactStaleness(15L, TimeUnit.SECONDS))) { + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + try (ResultSet resultSet = transaction.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + } + // All ExecuteSql calls within the transaction should use the same logical affinity key + assertEquals(1, executeSqlAffinityKeys.size()); + // BeginTransaction should use a single logical affinity key + assertEquals(1, beginTransactionAffinityKeys.size()); + } + + @Test + public void testTransactionManager_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (TransactionManager manager = client.transactionManager()) { + TransactionContext transaction = manager.begin(); + while (true) { + try { + try (ResultSet resultSet = + transaction.analyzeQuery(SELECT1, QueryAnalyzeMode.PROFILE)) { + while (resultSet.next()) {} + } + + try (ResultSet resultSet = + transaction.analyzeQuery(SELECT1, QueryAnalyzeMode.PROFILE)) { + while (resultSet.next()) {} + } + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } catch (AbortedException e) { + transaction = manager.resetForRetry(); + } + } + } + } + // All ExecuteSql calls within the transaction should use the same logical affinity key + assertEquals(1, executeSqlAffinityKeys.size()); + } + + @Test + public void testTransactionRunner_usesSingleChannelHint() { + try (Spanner spanner = createSpannerOptions().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + TransactionRunner runner = client.readWriteTransaction(); + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.priority(RpcPriority.HIGH))) { + while (resultSet.next()) {} + } + + try (ResultSet resultSet = + transaction.read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.priority(RpcPriority.HIGH))) { + while (resultSet.next()) {} + } + return null; + }); + } + // All StreamingRead calls within the transaction should use the same logical affinity key + assertEquals(1, streamingReadAffinityKeys.size()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java new file mode 100644 index 000000000000..49a47364a58a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java @@ -0,0 +1,246 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.XGoogSpannerRequestId.NoopRequestIdCreator; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import io.opentelemetry.api.common.Attributes; +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +@RunWith(JUnit4.class) +public class TransactionContextImplTest { + + @Mock private SpannerRpc rpc; + + @Mock private SessionImpl session; + + @Mock private ISpan span; + @Mock private TraceWrapper tracer; + + @SuppressWarnings("unchecked") + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(rpc.commitAsync(any(CommitRequest.class), anyMap())) + .thenReturn( + ApiFutures.immediateFuture( + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().setSeconds(99L).setNanos(10).build()) + .build())); + when(rpc.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + when(session.getName()).thenReturn("test"); + when(session.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(session.getSpanner()).thenReturn(spanner); + doNothing().when(span).setStatus(any(Throwable.class)); + doNothing().when(span).end(); + doNothing().when(span).addAnnotation("Starting Commit"); + when(tracer.createStatementAttributes(any(Statement.class), any())) + .thenReturn(Attributes.empty()); + when(tracer.createStatementBatchAttributes(any(Iterable.class), any())) + .thenReturn(Attributes.empty()); + when(tracer.spanBuilderWithExplicitParent(SpannerImpl.COMMIT, span)).thenReturn(span); + when(tracer.spanBuilderWithExplicitParent( + eq(SpannerImpl.COMMIT), eq(span), any(Attributes.class))) + .thenReturn(span); + when(tracer.spanBuilderWithExplicitParent( + eq(SpannerImpl.BATCH_UPDATE), eq(span), any(Attributes.class))) + .thenReturn(span); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + } + + private TransactionContextImpl createContext() { + return TransactionContextImpl.newBuilder() + .setSession(session) + .setRpc(rpc) + .setSpan(span) + .setTracer(tracer) + .setTransactionId(ByteString.copyFromUtf8("test")) + .setOptions(Options.fromTransactionOptions()) + .build(); + } + + @Test + public void testCanBufferBeforeCommit() { + try (TransactionContextImpl context = createContext()) { + context.buffer(Mutation.delete("test", KeySet.all())); + } + } + + @Test + public void testCanBufferAsyncBeforeCommit() { + try (TransactionContextImpl context = createContext()) { + context.bufferAsync(Mutation.delete("test", KeySet.all())); + } + } + + @Test + public void testCanBufferIterableBeforeCommit() { + try (TransactionContextImpl context = createContext()) { + context.buffer(Collections.singleton(Mutation.delete("test", KeySet.all()))); + } + } + + @Test + public void testCanBufferIterableAsyncBeforeCommit() { + try (TransactionContextImpl context = createContext()) { + context.bufferAsync(Collections.singleton(Mutation.delete("test", KeySet.all()))); + } + } + + @Test + public void testCannotBufferAfterCommit() { + try (TransactionContextImpl context = createContext()) { + context.commit(); + assertThrows( + IllegalStateException.class, () -> context.buffer(Mutation.delete("test", KeySet.all()))); + } + } + + @Test + public void testCannotBufferAsyncAfterCommit() { + try (TransactionContextImpl context = createContext()) { + context.commit(); + assertThrows( + IllegalStateException.class, + () -> context.bufferAsync(Mutation.delete("test", KeySet.all()))); + } + } + + @Test + public void testCannotBufferIterableAfterCommit() { + try (TransactionContextImpl context = createContext()) { + context.commit(); + assertThrows( + IllegalStateException.class, + () -> context.buffer(Collections.singleton(Mutation.delete("test", KeySet.all())))); + } + } + + @Test + public void testCannotBufferIterableAsyncAfterCommit() { + try (TransactionContextImpl context = createContext()) { + context.commit(); + assertThrows( + IllegalStateException.class, + () -> context.bufferAsync(Collections.singleton(Mutation.delete("test", KeySet.all())))); + } + } + + @Test + public void testCannotCommitTwice() { + try (TransactionContextImpl context = createContext()) { + context.commit(); + assertThrows(IllegalStateException.class, () -> context.commit()); + } + } + + @Test(expected = AbortedException.class) + public void batchDmlAborted() { + batchDml(Code.ABORTED_VALUE); + } + + @Test(expected = SpannerBatchUpdateException.class) + public void batchDmlException() { + batchDml(Code.FAILED_PRECONDITION_VALUE); + } + + @SuppressWarnings("unchecked") + @Test + public void testReturnCommitStats() { + ByteString transactionId = ByteString.copyFromUtf8("test"); + + try (TransactionContextImpl context = + TransactionContextImpl.newBuilder() + .setSession(session) + .setRpc(rpc) + .setSpan(span) + .setTracer(tracer) + .setTransactionId(transactionId) + .setOptions(Options.fromTransactionOptions(Options.commitStats())) + .build()) { + context.commitAsync(); + CommitRequest request = + CommitRequest.newBuilder() + .setReturnCommitStats(true) + .setSession(session.getName()) + .setTransactionId(transactionId) + .build(); + verify(rpc).commitAsync(eq(request), anyMap()); + } + } + + @SuppressWarnings("unchecked") + private void batchDml(int status) { + SessionImpl session = mock(SessionImpl.class); + when(session.getName()).thenReturn("test"); + when(session.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(session.getSpanner()).thenReturn(spanner); + SpannerRpc rpc = mock(SpannerRpc.class); + ExecuteBatchDmlResponse response = + ExecuteBatchDmlResponse.newBuilder() + .setStatus(Status.newBuilder().setCode(status).build()) + .build(); + Statement statement = Statement.of("UPDATE FOO SET BAR=1"); + + when(rpc.executeBatchDml(Mockito.any(ExecuteBatchDmlRequest.class), Mockito.anyMap())) + .thenReturn(response); + try (TransactionContextImpl impl = + TransactionContextImpl.newBuilder() + .setSession(session) + .setRpc(rpc) + .setTransactionId(ByteString.copyFromUtf8("test")) + .setOptions(Options.fromTransactionOptions()) + .setTracer(tracer) + .setSpan(span) + .build()) { + impl.batchUpdate(Collections.singletonList(statement)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextTest.java new file mode 100644 index 000000000000..24ec4b59d2a4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextTest.java @@ -0,0 +1,150 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertThrows; + +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.Options.UpdateOption; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class TransactionContextTest { + + @Test + public void testDefaultImplementations() { + try (TransactionContext context = + new TransactionContext() { + @Override + public AsyncResultSet readUsingIndexAsync( + String table, + String index, + KeySet keys, + Iterable columns, + ReadOption... options) { + return null; + } + + @Override + public ResultSet readUsingIndex( + String table, + String index, + KeySet keys, + Iterable columns, + ReadOption... options) { + return null; + } + + @Override + public ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public Struct readRowUsingIndex( + String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public ApiFuture readRowAsync(String table, Key key, Iterable columns) { + return null; + } + + @Override + public Struct readRow(String table, Key key, Iterable columns) { + return null; + } + + @Override + public AsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public AsyncResultSet executeQueryAsync(Statement statement, QueryOption... options) { + return null; + } + + @Override + public ResultSet executeQuery(Statement statement, QueryOption... options) { + return null; + } + + @Override + public void close() {} + + @Override + public ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode) { + return null; + } + + @Override + public ApiFuture executeUpdateAsync(Statement statement, UpdateOption... options) { + return null; + } + + @Override + public ResultSet analyzeUpdateStatement( + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + return null; + } + + @Override + public long executeUpdate(Statement statement, UpdateOption... options) { + return 0; + } + + @Override + public void buffer(Iterable mutations) {} + + @Override + public void buffer(Mutation mutation) {} + + @Override + public ApiFuture batchUpdateAsync( + Iterable statements, UpdateOption... options) { + return null; + } + + @Override + public long[] batchUpdate(Iterable statements, UpdateOption... options) { + return null; + } + }) { + assertThrows( + UnsupportedOperationException.class, + () -> context.bufferAsync(Mutation.delete("foo", KeySet.all()))); + assertThrows( + UnsupportedOperationException.class, + () -> context.bufferAsync(Collections.singleton(Mutation.delete("foo", KeySet.all())))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerAbortedTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerAbortedTest.java new file mode 100644 index 000000000000..ae24eeb7696c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerAbortedTest.java @@ -0,0 +1,447 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.v1.SpannerClient; +import com.google.cloud.spanner.v1.SpannerSettings; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.inprocess.InProcessServerBuilder; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Test aborted transaction behavior of {@link TransactionManager}. */ +@RunWith(JUnit4.class) +public class TransactionManagerAbortedTest { + private static final String PROJECT_ID = "PROJECT"; + private static final String INSTANCE_ID = "INSTANCE"; + private static final String DATABASE_ID = "DATABASE"; + private static final ResultSetMetadata READ_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("BAR") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet READ_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(READ_METADATA) + .build(); + private static final com.google.spanner.v1.ResultSet READ_ROW_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(READ_METADATA) + .build(); + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1AND2_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final long UPDATE_COUNT = 1L; + private static final Statement UPDATE_ABORTED_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2 AND THIS_WILL_ABORT=TRUE"); + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static LocalChannelProvider channelProvider; + private static SpannerClient spannerClient; + private static Spanner spanner; + + @BeforeClass + public static void startStaticServer() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult( + StatementResult.read( + "FOO", KeySet.all(), Collections.singletonList("BAR"), READ_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.read( + "FOO", + KeySet.singleKey(Key.of()), + Collections.singletonList("BAR"), + READ_ROW_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1AND2, SELECT1AND2_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.exception( + UPDATE_ABORTED_STATEMENT, + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + + String uniqueName = InProcessServerBuilder.generateName(); + server = + InProcessServerBuilder.forName(uniqueName) + // We need to use a real executor for timeouts to occur. + .scheduledExecutorService(new ScheduledThreadPoolExecutor(1)) + .addService(mockSpanner) + .build() + .start(); + channelProvider = LocalChannelProvider.create(uniqueName); + + SpannerSettings settings = + SpannerSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + spannerClient = SpannerClient.create(settings); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + spannerClient.close(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId(PROJECT_ID) + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()); + spanner = builder.build().getService(); + } + + @After + public void tearDown() { + spanner.close(); + } + + @Test + public void testTransactionManagerAbortOnCommit() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnUpdate() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + long updateCount = txn.executeUpdate(UPDATE_STATEMENT); + assertThat(updateCount, is(equalTo(UPDATE_COUNT))); + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnBatchUpdate() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + long[] updateCounts = txn.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT)); + assertThat(updateCounts, is(equalTo(new long[] {UPDATE_COUNT, UPDATE_COUNT}))); + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnBatchUpdateHalfway() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + txn.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_ABORTED_STATEMENT)); + fail("missing expected AbortedException"); + } + long[] updateCounts = txn.batchUpdate(Arrays.asList(UPDATE_STATEMENT, UPDATE_STATEMENT)); + assertThat(updateCounts, is(equalTo(new long[] {UPDATE_COUNT, UPDATE_COUNT}))); + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnSelect() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + try (ResultSet rs = txn.executeQuery(SELECT1AND2)) { + int rows = 0; + while (rs.next()) { + rows++; + } + assertThat(rows, is(equalTo(2))); + } + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnRead() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + try (ResultSet rs = txn.read("FOO", KeySet.all(), Collections.singletonList("BAR"))) { + int rows = 0; + while (rs.next()) { + rows++; + } + assertThat(rows, is(equalTo(2))); + } + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnReadUsingIndex() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + try (ResultSet rs = + txn.readUsingIndex("FOO", "INDEX", KeySet.all(), Collections.singletonList("BAR"))) { + int rows = 0; + while (rs.next()) { + rows++; + } + assertThat(rows, is(equalTo(2))); + } + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnReadRow() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + Struct row = txn.readRow("FOO", Key.of(), Collections.singletonList("BAR")); + assertThat(row.getLong(0), is(equalTo(1L))); + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerAbortOnReadRowUsingIndex() throws InterruptedException { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + int attempts = 0; + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + attempts++; + try { + if (attempts == 1) { + mockSpanner.abortNextTransaction(); + } + Struct row = + txn.readRowUsingIndex("FOO", "INDEX", Key.of(), Collections.singletonList("BAR")); + assertThat(row.getLong(0), is(equalTo(1L))); + manager.commit(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetry(); + } + } + } + assertThat(attempts, is(equalTo(2))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java new file mode 100644 index 000000000000..547f6b70a22d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java @@ -0,0 +1,460 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.google.api.core.ApiFutures; +import com.google.cloud.Timestamp; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import io.opentelemetry.api.OpenTelemetry; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; + +@RunWith(JUnit4.class) +public class TransactionManagerImplTest { + private static final class TestExecutorFactory + implements ExecutorFactory { + @Override + public ScheduledExecutorService get() { + return Executors.newSingleThreadScheduledExecutor(); + } + + @Override + public void release(ScheduledExecutorService exec) { + exec.shutdown(); + } + } + + @Mock private SessionImpl session; + @Mock TransactionRunnerImpl.TransactionContextImpl txn; + private TransactionManagerImpl manager; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @Before + public void setUp() { + initMocks(this); + manager = + new TransactionManagerImpl( + session, + new OpenTelemetrySpan(mock(io.opentelemetry.api.trace.Span.class)), + mock(TraceWrapper.class)); + } + + @Test + public void beginCalledTwiceFails() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + assertThat(manager.begin()).isEqualTo(txn); + assertThat(manager.getState()).isEqualTo(TransactionState.STARTED); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> manager.begin()); + assertNotNull(e.getMessage()); + } + + @Test + public void commitBeforeBeginFails() { + IllegalStateException e = assertThrows(IllegalStateException.class, () -> manager.commit()); + assertNotNull(e.getMessage()); + } + + @Test + public void rollbackBeforeBeginFails() { + IllegalStateException e = assertThrows(IllegalStateException.class, () -> manager.rollback()); + assertNotNull(e.getMessage()); + } + + @Test + public void resetBeforeBeginFails() { + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> manager.resetForRetry()); + assertNotNull(e.getMessage()); + } + + @Test + public void transactionRolledBackOnClose() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + when(txn.isAborted()).thenReturn(false); + manager.begin(); + manager.close(); + verify(txn).rollback(); + } + + @Test + public void commitSucceeds() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + Timestamp commitTimestamp = Timestamp.ofTimeMicroseconds(1); + CommitResponse response = new CommitResponse(commitTimestamp); + when(txn.getCommitResponse()).thenReturn(response); + manager.begin(); + manager.commit(); + assertThat(manager.getState()).isEqualTo(TransactionState.COMMITTED); + assertThat(manager.getCommitTimestamp()).isEqualTo(commitTimestamp); + } + + @Test + public void resetAfterSuccessfulCommitFails() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + manager.begin(); + manager.commit(); + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> manager.resetForRetry()); + assertNotNull(e.getMessage()); + } + + @Test + public void resetAfterAbortSucceeds() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + manager.begin(); + doThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "")).when(txn).commit(); + assertThrows(AbortedException.class, () -> manager.commit()); + assertEquals(TransactionState.ABORTED, manager.getState()); + + txn = Mockito.mock(TransactionRunnerImpl.TransactionContextImpl.class); + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + assertThat(manager.resetForRetry()).isEqualTo(txn); + assertThat(manager.getState()).isEqualTo(TransactionState.STARTED); + } + + @Test + public void resetAfterErrorFails() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + manager.begin(); + doThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "")).when(txn).commit(); + SpannerException e = assertThrows(SpannerException.class, () -> manager.commit()); + assertEquals(ErrorCode.UNKNOWN, e.getErrorCode()); + + IllegalStateException illegal = + assertThrows(IllegalStateException.class, () -> manager.resetForRetry()); + assertNotNull(illegal.getMessage()); + } + + @Test + public void rollbackAfterCommitFails() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + manager.begin(); + manager.commit(); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> manager.rollback()); + assertNotNull(e.getMessage()); + } + + @Test + public void commitAfterRollbackFails() { + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + manager.begin(); + manager.rollback(); + IllegalStateException e = assertThrows(IllegalStateException.class, () -> manager.commit()); + assertNotNull(e.getMessage()); + } + + @SuppressWarnings("unchecked") + @Test + public void usesPreparedTransaction() { + SpannerOptions options = mock(SpannerOptions.class); + when(options.getNumChannels()).thenReturn(4); + when(options.getDefaultTransactionOptions()) + .thenReturn(TransactionOptions.getDefaultInstance()); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()).thenReturn(new TestExecutorFactory()); + when(options.getTransportOptions()).thenReturn(transportOptions); + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setMinSessions(0).setIncStep(1).build(); + when(options.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + when(options.getSessionLabels()).thenReturn(Collections.emptyMap()); + when(options.getDatabaseRole()).thenReturn("role"); + when(options.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + SpannerRpc rpc = mock(SpannerRpc.class); + when(rpc.asyncDeleteSession(Mockito.anyString(), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); + when(rpc.batchCreateSessions( + Mockito.anyString(), + Mockito.eq(1), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.anyMap())) + .thenAnswer( + invocation -> + Collections.singletonList( + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setCreateTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.createSession( + Mockito.anyString(), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.eq(null), + Mockito.eq(true))) + .thenAnswer( + invocation -> + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setMultiplexed(true) + .setCreateTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build()); + when(rpc.beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true))) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + Transaction.newBuilder() + .setId(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build())); + when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.anyMap())) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + DatabaseId db = DatabaseId.of("test", "test", "test"); + try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { + DatabaseClient client = spanner.getDatabaseClient(db); + try (TransactionManager mgr = client.transactionManager()) { + mgr.begin(); + mgr.commit(); + } + verify(rpc, times(1)) + .beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true)); + } + } + + @SuppressWarnings({"unchecked", "resource"}) + @Test + public void inlineBegin() { + SpannerOptions options = mock(SpannerOptions.class); + when(options.getNumChannels()).thenReturn(4); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()).thenReturn(new TestExecutorFactory()); + when(options.getDefaultTransactionOptions()) + .thenReturn(TransactionOptions.getDefaultInstance()); + when(options.getTransportOptions()).thenReturn(transportOptions); + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setMinSessions(0).setIncStep(1).build(); + when(options.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + when(options.getSessionLabels()).thenReturn(Collections.emptyMap()); + when(options.getDefaultQueryOptions(Mockito.any(DatabaseId.class))) + .thenReturn(QueryOptions.getDefaultInstance()); + when(options.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + SpannerRpc rpc = mock(SpannerRpc.class); + when(rpc.asyncDeleteSession(Mockito.anyString(), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); + when(options.getDatabaseRole()).thenReturn("role"); + when(rpc.batchCreateSessions( + Mockito.anyString(), + Mockito.eq(1), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.anyMap())) + .thenAnswer( + invocation -> + Collections.singletonList( + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setCreateTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.createSession( + Mockito.anyString(), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.eq(null), + Mockito.eq(true))) + .thenAnswer( + invocation -> + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setMultiplexed(true) + .setCreateTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build()); + when(rpc.beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true))) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + Transaction.newBuilder() + .setId(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build())); + final AtomicInteger transactionsStarted = new AtomicInteger(); + when(rpc.executeQuery(Mockito.any(ExecuteSqlRequest.class), Mockito.anyMap(), eq(true))) + .thenAnswer( + invocation -> { + ResultSet.Builder builder = + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()); + ExecuteSqlRequest request = invocation.getArgument(0, ExecuteSqlRequest.class); + if (request.getTransaction() != null && request.getTransaction().hasBegin()) { + transactionsStarted.incrementAndGet(); + builder.setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction( + Transaction.newBuilder() + .setId(ByteString.copyFromUtf8("test-tx")) + .build()) + .build()); + } + return builder.build(); + }); + when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.anyMap())) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + com.google.spanner.v1.CommitResponse.newBuilder() + .setCommitTimestamp( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + DatabaseId db = DatabaseId.of("test", "test", "test"); + try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { + DatabaseClient client = spanner.getDatabaseClient(db); + try (TransactionManager mgr = client.transactionManager()) { + TransactionContext tx = mgr.begin(); + while (true) { + try { + tx.executeUpdate(Statement.of("UPDATE FOO SET BAR=1")); + tx.executeUpdate(Statement.of("UPDATE FOO SET BAZ=2")); + mgr.commit(); + break; + } catch (AbortedException e) { + tx = mgr.resetForRetry(); + } + } + } + // BeginTransaction should not be called, as we are inlining it with the ExecuteSql request. + verify(rpc, Mockito.never()) + .beginTransaction(Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true)); + // We should have 2 ExecuteSql requests. + verify(rpc, times(2)) + .executeQuery(Mockito.any(ExecuteSqlRequest.class), Mockito.anyMap(), eq(true)); + // But only 1 with a BeginTransaction. + assertThat(transactionsStarted.get()).isEqualTo(1); + } + } + + // This test ensures that when a transaction is aborted in a multiplexed session, + // the transaction ID of the aborted transaction is saved during the retry when a new transaction + // is created. + @Test + public void storePreviousTxnIdOnAbortForMultiplexedSession() { + txn = Mockito.mock(TransactionRunnerImpl.TransactionContextImpl.class); + final ByteString mockTransactionId = ByteString.copyFromUtf8("mockTransactionId"); + txn.transactionId = mockTransactionId; + when(session.newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY)) + .thenReturn(txn); + manager.begin(); + // Verify that for the first transaction attempt, the `previousTransactionId` is + // ByteString.EMPTY. + // This is because no transaction has been previously aborted at this point. + verify(session).newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY); + doThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "")).when(txn).commit(); + assertThrows(AbortedException.class, () -> manager.commit()); + + txn = Mockito.mock(TransactionRunnerImpl.TransactionContextImpl.class); + when(txn.getPreviousTransactionId()).thenReturn(mockTransactionId); + when(session.newTransaction(Options.fromTransactionOptions(), mockTransactionId)) + .thenReturn(txn); + when(session.getIsMultiplexed()).thenReturn(true); + assertThat(manager.resetForRetry()).isEqualTo(txn); + // Verify that in the first retry attempt, the `previousTransactionId` is passed to the new + // transaction. + // This allows Spanner to retry the transaction using the ID of the aborted transaction. + verify(session).newTransaction(Options.fromTransactionOptions(), mockTransactionId); + } + + // This test ensures that when a transaction is aborted in a regular session, + // the transaction ID of the aborted transaction is not saved during the retry when a new + // transaction is created. + @Test + public void skipTxnIdStorageOnAbortForRegularSession() { + txn = Mockito.mock(TransactionRunnerImpl.TransactionContextImpl.class); + final ByteString mockTransactionId = ByteString.copyFromUtf8("mockTransactionId"); + txn.transactionId = mockTransactionId; + when(session.newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY)) + .thenReturn(txn); + manager.begin(); + verify(session).newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY); + doThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "")).when(txn).commit(); + assertThrows(AbortedException.class, () -> manager.commit()); + clearInvocations(session); + + txn = Mockito.mock(TransactionRunnerImpl.TransactionContextImpl.class); + when(session.newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY)) + .thenReturn(txn); + when(session.getIsMultiplexed()).thenReturn(false); + assertThat(manager.resetForRetry()).isEqualTo(txn); + // Verify that in the first retry attempt, the `previousTransactionId` is not passed to the new + // transaction + // in case of regular sessions. + verify(session).newTransaction(Options.fromTransactionOptions(), ByteString.EMPTY); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java new file mode 100644 index 000000000000..1dd2418aa05a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java @@ -0,0 +1,478 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFutures; +import com.google.cloud.grpc.GrpcTransportOptions; +import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; +import com.google.cloud.spanner.SessionClient.SessionId; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.XGoogSpannerRequestId.NoopRequestIdCreator; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.Timestamp; +import com.google.rpc.Code; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +/** Unit test for {@link com.google.cloud.spanner.TransactionRunnerImpl} */ +@RunWith(JUnit4.class) +public class TransactionRunnerImplTest { + private static final class TestExecutorFactory + implements ExecutorFactory { + @Override + public ScheduledExecutorService get() { + return Executors.newSingleThreadScheduledExecutor(); + } + + @Override + public void release(ScheduledExecutorService exec) { + exec.shutdown(); + } + } + + @Mock private SpannerRpc rpc; + @Mock private SessionImpl session; + @Mock private TransactionRunnerImpl.TransactionContextImpl txn; + private TransactionRunnerImpl transactionRunner; + private boolean firstRun; + private boolean usedInlinedBegin; + private TraceWrapper tracer; + private ISpan span; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + tracer = new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); + firstRun = true; + when(session.getErrorHandler()).thenReturn(DefaultErrorHandler.INSTANCE); + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())).thenReturn(txn); + when(session.getTracer()).thenReturn(tracer); + when(session.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + when(rpc.getRequestIdCreator()).thenReturn(NoopRequestIdCreator.INSTANCE); + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(session.getSpanner()).thenReturn(spanner); + when(rpc.executeQuery(Mockito.any(ExecuteSqlRequest.class), Mockito.anyMap(), eq(true))) + .thenAnswer( + invocation -> { + ResultSet.Builder builder = + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()); + ExecuteSqlRequest request = invocation.getArgument(0, ExecuteSqlRequest.class); + if (request.getTransaction().hasBegin() + && request.getTransaction().getBegin().hasReadWrite()) { + builder.setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction( + Transaction.newBuilder().setId(ByteString.copyFromUtf8("test"))) + .build()); + usedInlinedBegin = true; + } + return builder.build(); + }); + transactionRunner = new TransactionRunnerImpl(session); + when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.anyMap())) + .thenReturn( + ApiFutures.immediateFuture( + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.getDefaultInstance()) + .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + when(rpc.rollbackAsync(Mockito.any(RollbackRequest.class), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); + Span oTspan = mock(Span.class); + span = new OpenTelemetrySpan(oTspan); + when(oTspan.makeCurrent()).thenReturn(mock(Scope.class)); + transactionRunner.setSpan(span); + } + + @Test + public void testCommitWithClientContext() { + RequestOptions.ClientContext clientContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("value").build()) + .build(); + when(session.getName()).thenReturn("projects/p/instances/i/databases/d/sessions/s"); + when(session.newTransaction(any(Options.class), any())).thenReturn(txn); + Mockito.clearInvocations(session); + transactionRunner = + new TransactionRunnerImpl( + session, + Options.priority(Options.RpcPriority.HIGH), + Options.tag("tag"), + Options.clientContext(clientContext)); + transactionRunner.setSpan(span); + + transactionRunner.run( + transaction -> { + return null; + }); + + ArgumentCaptor optionsCaptor = ArgumentCaptor.forClass(Options.class); + verify(session).newTransaction(optionsCaptor.capture(), any()); + Options capturedOptions = optionsCaptor.getValue(); + assertEquals(RequestOptions.Priority.PRIORITY_HIGH, capturedOptions.priority()); + assertEquals("tag", capturedOptions.tag()); + assertEquals(clientContext, capturedOptions.clientContext()); + } + + @SuppressWarnings("unchecked") + @Test + public void usesPreparedTransaction() { + SpannerOptions options = mock(SpannerOptions.class); + when(options.getNumChannels()).thenReturn(4); + when(options.getDefaultTransactionOptions()) + .thenReturn(TransactionOptions.getDefaultInstance()); + GrpcTransportOptions transportOptions = mock(GrpcTransportOptions.class); + when(transportOptions.getExecutorFactory()).thenReturn(new TestExecutorFactory()); + when(options.getTransportOptions()).thenReturn(transportOptions); + SessionPoolOptions sessionPoolOptions = + SessionPoolOptions.newBuilder().setMinSessions(0).setIncStep(1).build(); + when(options.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + when(options.getSessionLabels()).thenReturn(Collections.emptyMap()); + when(options.getDatabaseRole()).thenReturn("role"); + when(options.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + SpannerRpc rpc = mock(SpannerRpc.class); + when(rpc.asyncDeleteSession(Mockito.anyString(), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); + when(rpc.batchCreateSessions( + Mockito.anyString(), + Mockito.eq(1), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.anyMap())) + .thenAnswer( + invocation -> + Collections.singletonList( + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setCreateTime( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.createSession( + Mockito.anyString(), + Mockito.anyString(), + Mockito.anyMap(), + Mockito.eq(null), + Mockito.eq(true))) + .thenAnswer( + invocation -> + Session.newBuilder() + .setName(invocation.getArguments()[0] + "/sessions/1") + .setMultiplexed(true) + .setCreateTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(System.currentTimeMillis() * 1000)) + .build()); + when(rpc.beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true))) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + Transaction.newBuilder() + .setId(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .build())); + when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.anyMap())) + .thenAnswer( + invocation -> + ApiFutures.immediateFuture( + CommitResponse.newBuilder() + .setCommitTimestamp( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000)) + .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); + DatabaseId db = DatabaseId.of("test", "test", "test"); + try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { + DatabaseClient client = spanner.getDatabaseClient(db); + client.readWriteTransaction().run(transaction -> null); + verify(rpc, times(1)) + .beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true)); + } + } + + @Test + public void commitSucceeds() { + final AtomicInteger numCalls = new AtomicInteger(0); + transactionRunner.run( + transaction -> { + numCalls.incrementAndGet(); + return null; + }); + assertThat(numCalls.get()).isEqualTo(1); + verify(txn, never()).ensureTxn(); + verify(txn).commit(); + } + + @Test + public void runAbort() { + when(txn.isAborted()).thenReturn(true); + runTransaction(abortedWithRetryInfo()); + verify(txn).ensureTxn(); + } + + @Test + public void commitAbort() { + final SpannerException error = + SpannerExceptionFactory.newSpannerException(abortedWithRetryInfo()); + doThrow(error).doNothing().when(txn).commit(); + final AtomicInteger numCalls = new AtomicInteger(0); + transactionRunner.run( + transaction -> { + numCalls.incrementAndGet(); + return null; + }); + assertThat(numCalls.get()).isEqualTo(2); + // ensureTxn() is only called during retry. + verify(txn).ensureTxn(); + } + + @Test + public void commitFailsWithNonAbort() { + final SpannerException error = + SpannerExceptionFactory.newSpannerException( + SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "")); + doThrow(error).when(txn).commit(); + final AtomicInteger numCalls = new AtomicInteger(0); + SpannerException e = + assertThrows( + SpannerException.class, + () -> transactionRunner.run(transaction -> numCalls.incrementAndGet())); + assertEquals(ErrorCode.UNKNOWN, e.getErrorCode()); + assertEquals(1, numCalls.get()); + verify(txn, never()).ensureTxn(); + verify(txn, times(1)).commit(); + } + + @Test + public void runResourceExhaustedNoRetry() { + assertThrows( + SpannerException.class, + () -> + runTransaction( + new StatusRuntimeException( + Status.fromCodeValue(Status.Code.RESOURCE_EXHAUSTED.value())))); + verify(txn).rollback(); + } + + @Test + public void batchDmlAborted() { + long[] updateCount = batchDmlException(Code.ABORTED_VALUE); + assertThat(updateCount.length).isEqualTo(2); + assertThat(updateCount[0]).isEqualTo(1L); + assertThat(updateCount[1]).isEqualTo(1L); + } + + @Test + public void batchDmlFailedPrecondition() { + SpannerBatchUpdateException e = + assertThrows( + SpannerBatchUpdateException.class, + () -> batchDmlException(Code.FAILED_PRECONDITION_VALUE)); + assertArrayEquals(new long[] {1L}, e.getUpdateCounts()); + assertEquals(Code.FAILED_PRECONDITION_VALUE, e.getCode()); + } + + @SuppressWarnings("unchecked") + @Test + public void inlineBegin() { + SpannerImpl spanner = mock(SpannerImpl.class); + SpannerOptions options = mock(SpannerOptions.class); + when(options.getDefaultTransactionOptions()) + .thenReturn(TransactionOptions.getDefaultInstance()); + when(spanner.getRpc()).thenReturn(rpc); + when(spanner.getDefaultQueryOptions(Mockito.any(DatabaseId.class))) + .thenReturn(QueryOptions.getDefaultInstance()); + when(spanner.getOptions()).thenReturn(options); + when(spanner.getTracer()).thenReturn(tracer); + SessionPoolOptions sessionPoolOptions = SessionPoolOptions.newBuilder().build(); + when(options.getSessionPoolOptions()).thenReturn(sessionPoolOptions); + + SessionImpl session = + new SessionImpl( + spanner, + new SessionReference( + "projects/p/instances/i/databases/d/sessions/s", null, Collections.EMPTY_MAP)) {}; + session.setCurrentSpan(new OpenTelemetrySpan(mock(io.opentelemetry.api.trace.Span.class))); + TransactionRunnerImpl runner = new TransactionRunnerImpl(session); + runner.setSpan(span); + assertThat(usedInlinedBegin).isFalse(); + runner.run( + transaction -> { + transaction.executeUpdate(Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2")); + return null; + }); + verify(rpc, Mockito.never()) + .beginTransaction(Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true)); + verify(rpc, Mockito.never()) + .beginTransactionAsync( + Mockito.any(BeginTransactionRequest.class), Mockito.anyMap(), eq(true)); + assertThat(usedInlinedBegin).isTrue(); + } + + @SuppressWarnings("unchecked") + private long[] batchDmlException(int status) { + Preconditions.checkArgument(status != Code.OK_VALUE); + TransactionContextImpl transaction = + TransactionContextImpl.newBuilder() + .setSession(session) + .setTransactionId(ByteString.copyFromUtf8(UUID.randomUUID().toString())) + .setOptions(Options.fromTransactionOptions()) + .setRpc(rpc) + .setTracer(session.getTracer()) + .setSpan(session.getTracer().getCurrentSpan()) + .build(); + when(session.newTransaction(eq(Options.fromTransactionOptions()), any())) + .thenReturn(transaction); + when(session.getName()).thenReturn(SessionId.of("p", "i", "d", "test").getName()); + TransactionRunnerImpl runner = new TransactionRunnerImpl(session); + runner.setSpan(span); + ExecuteBatchDmlResponse response1 = + ExecuteBatchDmlResponse.newBuilder() + .addResultSets( + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L)) + .build()) + .setStatus(com.google.rpc.Status.newBuilder().setCode(status).build()) + .build(); + ExecuteBatchDmlResponse response2 = + ExecuteBatchDmlResponse.newBuilder() + .addResultSets( + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L)) + .build()) + .addResultSets( + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L)) + .build()) + .setStatus(com.google.rpc.Status.newBuilder().setCode(Code.OK_VALUE).build()) + .build(); + when(rpc.executeBatchDml(Mockito.any(ExecuteBatchDmlRequest.class), Mockito.anyMap())) + .thenReturn(response1, response2); + CommitResponse commitResponse = + CommitResponse.newBuilder().setCommitTimestamp(Timestamp.getDefaultInstance()).build(); + when(rpc.commitAsync(Mockito.any(CommitRequest.class), Mockito.anyMap())) + .thenReturn(ApiFutures.immediateFuture(commitResponse)); + final Statement statement = Statement.of("UPDATE FOO SET BAR=1"); + final AtomicInteger numCalls = new AtomicInteger(0); + long[] updateCount = + runner.run( + transaction1 -> { + numCalls.incrementAndGet(); + return transaction1.batchUpdate(Arrays.asList(statement, statement)); + }); + if (status == Code.ABORTED_VALUE) { + // Assert that the method ran twice because the first response aborted. + assertThat(numCalls.get()).isEqualTo(2); + } + return updateCount; + } + + private void runTransaction(final Exception exception) { + transactionRunner.run( + transaction -> { + if (firstRun) { + firstRun = false; + throw SpannerExceptionFactory.newSpannerException(exception); + } + return null; + }); + } + + private SpannerException abortedWithRetryInfo() { + Status status = Status.fromCodeValue(Status.Code.ABORTED.value()); + return SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "test", new StatusRuntimeException(status, createRetryTrailers())); + } + + private Metadata createRetryTrailers() { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(Duration.newBuilder().setNanos(0).setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + return trailers; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TypeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TypeTest.java new file mode 100644 index 000000000000..8fc168eae964 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TypeTest.java @@ -0,0 +1,728 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.Type.StructField; +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.Type.Code; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import org.hamcrest.MatcherAssert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Type}. */ +@RunWith(JUnit4.class) +public class TypeTest { + + private abstract static class ScalarTypeTester { + private final Type.Code expectedCode; + private final TypeCode expectedTypeCode; + private final TypeAnnotationCode expectedTypeAnnotationCode; + private String protoTypeFqn = ""; + + ScalarTypeTester(Type.Code expectedCode, TypeCode expectedTypeCode) { + this(expectedCode, expectedTypeCode, TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED); + } + + ScalarTypeTester( + Type.Code expectedCode, + TypeCode expectedTypeCode, + TypeAnnotationCode expectedTypeAnnotationCode) { + this.expectedCode = expectedCode; + this.expectedTypeCode = expectedTypeCode; + this.expectedTypeAnnotationCode = expectedTypeAnnotationCode; + } + + ScalarTypeTester(Type.Code expectedCode, TypeCode expectedTypeCode, String protoTypeFqn) { + this(expectedCode, expectedTypeCode); + this.protoTypeFqn = protoTypeFqn; + } + + abstract Type newType(); + + void test() { + Type t = newType(); + assertThat(t.getCode()).isEqualTo(expectedCode); + assertThat(newType()).isEqualTo(t); // Interned. + // String form is deliberately the same as the corresponding type enum in the public API. + if (expectedTypeAnnotationCode != TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED) { + assertThat(t.toString()) + .isEqualTo( + expectedTypeCode.toString() + "<" + expectedTypeAnnotationCode.toString() + ">"); + } else { + assertThat(t.toString()).isEqualTo(expectedTypeCode.toString()); + } + + com.google.spanner.v1.Type proto = t.toProto(); + assertThat(proto.getCode()).isEqualTo(expectedTypeCode); + assertThat(proto.getTypeAnnotation()).isEqualTo(expectedTypeAnnotationCode); + assertThat(proto.getProtoTypeFqn()).isEqualTo(protoTypeFqn); + assertThat(proto.hasArrayElementType()).isFalse(); + assertThat(proto.hasStructType()).isFalse(); + + // Round trip. + Type fromProto = Type.fromProto(proto); + assertThat(fromProto).isEqualTo(t); + + reserializeAndAssert(t); + } + } + + @Test + public void bool() { + new ScalarTypeTester(Type.Code.BOOL, TypeCode.BOOL) { + @Override + Type newType() { + return Type.bool(); + } + }.test(); + } + + @Test + public void int64() { + new ScalarTypeTester(Type.Code.INT64, TypeCode.INT64) { + @Override + Type newType() { + return Type.int64(); + } + }.test(); + } + + @Test + public void float32() { + new ScalarTypeTester(Type.Code.FLOAT32, TypeCode.FLOAT32) { + @Override + Type newType() { + return Type.float32(); + } + }.test(); + } + + @Test + public void float64() { + new ScalarTypeTester(Type.Code.FLOAT64, TypeCode.FLOAT64) { + @Override + Type newType() { + return Type.float64(); + } + }.test(); + } + + @Test + public void numeric() { + new ScalarTypeTester(Type.Code.NUMERIC, TypeCode.NUMERIC) { + @Override + Type newType() { + return Type.numeric(); + } + }.test(); + } + + @Test + public void pgNumeric() { + new ScalarTypeTester(Type.Code.PG_NUMERIC, TypeCode.NUMERIC, TypeAnnotationCode.PG_NUMERIC) { + @Override + Type newType() { + return Type.pgNumeric(); + } + }.test(); + } + + @Test + public void string() { + new ScalarTypeTester(Type.Code.STRING, TypeCode.STRING) { + @Override + Type newType() { + return Type.string(); + } + }.test(); + } + + @Test + public void json() { + new ScalarTypeTester(Code.JSON, TypeCode.JSON) { + @Override + Type newType() { + return Type.json(); + } + }.test(); + } + + @Test + public void pgJsonb() { + new ScalarTypeTester(Code.PG_JSONB, TypeCode.JSON, TypeAnnotationCode.PG_JSONB) { + @Override + Type newType() { + return Type.pgJsonb(); + } + }.test(); + } + + @Test + public void pgOid() { + new ScalarTypeTester(Type.Code.PG_OID, TypeCode.INT64, TypeAnnotationCode.PG_OID) { + @Override + Type newType() { + return Type.pgOid(); + } + }.test(); + } + + @Test + public void bytes() { + new ScalarTypeTester(Type.Code.BYTES, TypeCode.BYTES) { + @Override + Type newType() { + return Type.bytes(); + } + }.test(); + } + + @Test + public void proto() { + new ScalarTypeTester(Type.Code.PROTO, TypeCode.PROTO, "com.google.temp") { + @Override + Type newType() { + return Type.proto("com.google.temp"); + } + }.test(); + } + + @Test + public void protoEnum() { + new ScalarTypeTester(Type.Code.ENUM, TypeCode.ENUM, "com.google.temp.enum") { + @Override + Type newType() { + return Type.protoEnum("com.google.temp.enum"); + } + }.test(); + } + + @Test + public void timestamp() { + new ScalarTypeTester(Type.Code.TIMESTAMP, TypeCode.TIMESTAMP) { + @Override + Type newType() { + return Type.timestamp(); + } + }.test(); + } + + @Test + public void date() { + new ScalarTypeTester(Type.Code.DATE, TypeCode.DATE) { + @Override + Type newType() { + return Type.date(); + } + }.test(); + } + + @Test + public void uuid() { + new ScalarTypeTester(Type.Code.UUID, TypeCode.UUID) { + @Override + Type newType() { + return Type.uuid(); + } + }.test(); + } + + @Test + public void interval() { + new ScalarTypeTester(Code.INTERVAL, TypeCode.INTERVAL) { + @Override + Type newType() { + return Type.interval(); + } + }.test(); + } + + abstract static class ArrayTypeTester { + private final Type.Code expectedElementCode; + private final TypeCode expectedElementTypeCode; + private final TypeAnnotationCode expectedTypeAnnotationCode; + private final boolean expectInterned; + private String protoTypeFqn = ""; + + ArrayTypeTester( + Type.Code expectedElementCode, TypeCode expectedElementTypeCode, boolean expectInterned) { + this( + expectedElementCode, + expectedElementTypeCode, + TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED, + expectInterned); + } + + ArrayTypeTester( + Type.Code expectedElementCode, + TypeCode expectedElementTypeCode, + String protoTypeFqn, + boolean expectInterned) { + this( + expectedElementCode, + expectedElementTypeCode, + TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED, + expectInterned); + this.protoTypeFqn = protoTypeFqn; + } + + ArrayTypeTester( + Type.Code expectedElementCode, + TypeCode expectedElementTypeCode, + TypeAnnotationCode expectedTypeAnnotationCode, + boolean expectInterned) { + this.expectedElementCode = expectedElementCode; + this.expectedElementTypeCode = expectedElementTypeCode; + this.expectedTypeAnnotationCode = expectedTypeAnnotationCode; + this.expectInterned = expectInterned; + } + + abstract Type newElementType(); + + void test() { + Type elementType = newElementType(); + Type t = Type.array(elementType); + assertThat(t.getCode()).isEqualTo(Type.Code.ARRAY); + assertThat(t.getArrayElementType()).isEqualTo(elementType); + if (expectInterned) { + assertThat(Type.array(newElementType())).isSameInstanceAs(t); + } + assertThat(t.toString()).isEqualTo("ARRAY<" + elementType.toString() + ">"); + + com.google.spanner.v1.Type proto = t.toProto(); + assertThat(proto.getCode()).isEqualTo(TypeCode.ARRAY); + assertThat(proto.getArrayElementType()).isEqualTo(elementType.toProto()); + assertThat(proto.hasStructType()).isFalse(); + + Type fromProto = Type.fromProto(proto); + assertThat(fromProto).isEqualTo(t); + + if (expectInterned) { + assertThat(fromProto).isSameInstanceAs(t); + } + reserializeAndAssert(t); + } + } + + @Test + public void boolArray() { + new ArrayTypeTester(Type.Code.BOOL, TypeCode.BOOL, true) { + @Override + Type newElementType() { + return Type.bool(); + } + }.test(); + } + + @Test + public void int64Array() { + new ArrayTypeTester(Type.Code.INT64, TypeCode.INT64, true) { + @Override + Type newElementType() { + return Type.int64(); + } + }.test(); + } + + @Test + public void float32Array() { + new ArrayTypeTester(Type.Code.FLOAT32, TypeCode.FLOAT32, true) { + @Override + Type newElementType() { + return Type.float32(); + } + }.test(); + } + + @Test + public void float64Array() { + new ArrayTypeTester(Type.Code.FLOAT64, TypeCode.FLOAT64, true) { + @Override + Type newElementType() { + return Type.float64(); + } + }.test(); + } + + @Test + public void numericArray() { + new ArrayTypeTester(Type.Code.NUMERIC, TypeCode.NUMERIC, true) { + @Override + Type newElementType() { + return Type.numeric(); + } + }.test(); + } + + @Test + public void pgNumericArray() { + new ArrayTypeTester( + Type.Code.PG_NUMERIC, TypeCode.NUMERIC, TypeAnnotationCode.PG_NUMERIC, true) { + @Override + Type newElementType() { + return Type.pgNumeric(); + } + }.test(); + } + + @Test + public void stringArray() { + new ArrayTypeTester(Type.Code.STRING, TypeCode.STRING, true) { + @Override + Type newElementType() { + return Type.string(); + } + }.test(); + } + + @Test + public void jsonArray() { + new ArrayTypeTester(Code.JSON, TypeCode.JSON, true) { + @Override + Type newElementType() { + return Type.json(); + } + }.test(); + } + + @Test + public void pgJsonbArray() { + new ArrayTypeTester(Code.PG_JSONB, TypeCode.JSON, TypeAnnotationCode.PG_JSONB, true) { + @Override + Type newElementType() { + return Type.pgJsonb(); + } + }.test(); + } + + @Test + public void bytesArray() { + new ArrayTypeTester(Type.Code.BYTES, TypeCode.BYTES, true) { + @Override + Type newElementType() { + return Type.bytes(); + } + }.test(); + } + + @Test + public void timestampArray() { + new ArrayTypeTester(Type.Code.TIMESTAMP, TypeCode.TIMESTAMP, true) { + @Override + Type newElementType() { + return Type.timestamp(); + } + }.test(); + } + + @Test + public void dateArray() { + new ArrayTypeTester(Type.Code.DATE, TypeCode.DATE, true) { + @Override + Type newElementType() { + return Type.date(); + } + }.test(); + } + + @Test + public void uuidArray() { + new ArrayTypeTester(Type.Code.UUID, TypeCode.UUID, true) { + @Override + Type newElementType() { + return Type.uuid(); + } + }.test(); + } + + @Test + public void intervalArray() { + new ArrayTypeTester(Type.Code.INTERVAL, TypeCode.INTERVAL, true) { + @Override + Type newElementType() { + return Type.interval(); + } + }.test(); + } + + @Test + public void protoArray() { + new ArrayTypeTester(Type.Code.PROTO, TypeCode.PROTO, "com.google.temp", false) { + @Override + Type newElementType() { + return Type.proto("com.google.temp"); + } + }.test(); + } + + @Test + public void protoEnumArray() { + new ArrayTypeTester(Type.Code.ENUM, TypeCode.ENUM, "com.google.temp.enum", false) { + @Override + Type newElementType() { + return Type.protoEnum("com.google.temp.enum"); + } + }.test(); + } + + @Test + public void arrayOfArray() { + new ArrayTypeTester(Type.Code.ARRAY, TypeCode.ARRAY, false /* not interned */) { + @Override + Type newElementType() { + return Type.array(Type.int64()); + } + }.test(); + } + + @Test + public void struct() { + Type t = + Type.struct( + StructField.of("f1", Type.int64()), + StructField.of("f2", Type.string()), + StructField.of("f3", Type.pgNumeric())); + assertThat(t.getCode()).isEqualTo(Type.Code.STRUCT); + // Exercise StructField equality. + assertThat(t.getStructFields()) + .containsExactly( + StructField.of("f1", Type.int64()), + StructField.of("f2", Type.string()), + StructField.of("f3", Type.pgNumeric())) + .inOrder(); + // Exercise StructField getters. + assertThat(t.getStructFields().get(0).getName()).isEqualTo("f1"); + assertThat(t.getStructFields().get(0).getType()).isEqualTo(Type.int64()); + assertThat(t.getStructFields().get(1).getName()).isEqualTo("f2"); + assertThat(t.getStructFields().get(1).getType()).isEqualTo(Type.string()); + assertThat(t.getStructFields().get(2).getName()).isEqualTo("f3"); + assertThat(t.getStructFields().get(2).getType()).isEqualTo(Type.pgNumeric()); + assertThat(t.toString()).isEqualTo("STRUCT>"); + assertThat(t.getFieldIndex("f1")).isEqualTo(0); + assertThat(t.getFieldIndex("f2")).isEqualTo(1); + assertThat(t.getFieldIndex("f3")).isEqualTo(2); + + assertProtoEquals( + t.toProto(), + "code: STRUCT struct_type { fields { name: 'f1' type { code: INT64 } }" + + " fields { name: 'f2' type { code: STRING } } " + + " fields { name: 'f3' type { code: NUMERIC, type_annotation: PG_NUMERIC } } }"); + } + + @Test + public void emptyStruct() { + Type t = Type.struct(); + assertThat(t.getCode()).isEqualTo(Type.Code.STRUCT); + assertThat(t.getStructFields()).isEmpty(); + assertThat(t.toString()).isEqualTo("STRUCT<>"); + assertProtoEquals(t.toProto(), "code: STRUCT struct_type {}"); + } + + @Test + public void structFieldIndexNotFound() { + Type t = Type.struct(StructField.of("f1", Type.int64())); + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> t.getFieldIndex("f2")); + assertThat(e.getMessage().contains("Field not found: f2")); + } + + @Test + public void structFieldIndexAmbiguous() { + Type t = Type.struct(StructField.of("f1", Type.int64()), StructField.of("f1", Type.string())); + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> t.getFieldIndex("f1")); + assertThat(e.getMessage().contains("Ambiguous field name: f1")); + } + + @Test + public void parseErrorMissingTypeCode() { + com.google.spanner.v1.Type proto = com.google.spanner.v1.Type.newBuilder().build(); + assertEquals(Code.UNRECOGNIZED, Type.fromProto(proto).getCode()); + } + + @Test + public void parseErrorMissingArrayElementTypeProto() { + com.google.spanner.v1.Type proto = + com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.ARRAY).build(); + IllegalArgumentException e = + assertThrows(IllegalArgumentException.class, () -> Type.fromProto(proto)); + assertNotNull(e.getMessage()); + } + + @Test + public void testUnrecognized() { + Type unrecognized = Type.fromProto(com.google.spanner.v1.Type.newBuilder().build()); + assertEquals("TYPE_CODE_UNSPECIFIED", unrecognized.toString()); + assertEquals(unrecognized, Type.fromProto(com.google.spanner.v1.Type.newBuilder().build())); + assertNotEquals(unrecognized, Type.int64()); + } + + @Test + public void testUnrecognizedWithAnnotation() { + Type unrecognized = + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()); + assertEquals("TYPE_CODE_UNSPECIFIED", unrecognized.toString()); + assertEquals( + unrecognized, + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build())); + assertNotEquals( + unrecognized, + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setTypeAnnotation(TypeAnnotationCode.PG_JSONB) + .build())); + assertNotEquals(unrecognized, Type.int64()); + } + + @Test + public void testUnrecognizedArray() { + Type unrecognizedArray = + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(com.google.spanner.v1.Type.newBuilder().build()) + .build()); + assertEquals("ARRAY", unrecognizedArray.toString()); + assertEquals( + unrecognizedArray, + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(com.google.spanner.v1.Type.newBuilder().build()) + .build())); + assertNotEquals(unrecognizedArray, Type.array(Type.int64())); + } + + @Test + public void testUnrecognizedArrayWithAnnotation() { + Type unrecognizedArray = + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + com.google.spanner.v1.Type.newBuilder() + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()); + assertEquals("ARRAY>", unrecognizedArray.toString()); + assertEquals( + unrecognizedArray, + Type.fromProto( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + com.google.spanner.v1.Type.newBuilder() + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build())); + assertNotEquals(unrecognizedArray, Type.array(Type.int64())); + } + + @Test + public void testGoogleSQLTypeNames() { + assertEquals("INT64", Type.int64().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("BOOL", Type.bool().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("FLOAT64", Type.float64().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("STRING", Type.string().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("BYTES", Type.bytes().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("DATE", Type.date().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("UUID", Type.uuid().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("INTERVAL", Type.interval().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("TIMESTAMP", Type.timestamp().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("JSON", Type.json().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("NUMERIC", Type.numeric().getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + + assertEquals( + "ARRAY", Type.array(Type.int64()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.bool()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + Type.array(Type.float64()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.string()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.bytes()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.date()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.uuid()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + Type.array(Type.interval()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + Type.array(Type.timestamp()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", Type.array(Type.json()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + Type.array(Type.numeric()).getSpannerTypeName(Dialect.GOOGLE_STANDARD_SQL)); + } + + @Test + public void testPostgreSQLTypeNames() { + assertEquals("bigint", Type.int64().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("boolean", Type.bool().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("double precision", Type.float64().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("character varying", Type.string().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("bytea", Type.bytes().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("date", Type.date().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("uuid", Type.uuid().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("interval", Type.interval().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals( + "timestamp with time zone", Type.timestamp().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("jsonb", Type.pgJsonb().getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("numeric", Type.pgNumeric().getSpannerTypeName(Dialect.POSTGRESQL)); + + assertEquals("bigint[]", Type.array(Type.int64()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("boolean[]", Type.array(Type.bool()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals( + "double precision[]", Type.array(Type.float64()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals( + "character varying[]", Type.array(Type.string()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("bytea[]", Type.array(Type.bytes()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("date[]", Type.array(Type.date()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("uuid[]", Type.array(Type.uuid()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("interval[]", Type.array(Type.interval()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals( + "timestamp with time zone[]", + Type.array(Type.timestamp()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("jsonb[]", Type.array(Type.pgJsonb()).getSpannerTypeName(Dialect.POSTGRESQL)); + assertEquals("numeric[]", Type.array(Type.pgNumeric()).getSpannerTypeName(Dialect.POSTGRESQL)); + } + + private static void assertProtoEquals(com.google.spanner.v1.Type proto, String expected) { + MatcherAssert.assertThat( + proto, SpannerMatchers.matchesProto(com.google.spanner.v1.Type.class, expected)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueBinderTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueBinderTest.java new file mode 100644 index 000000000000..d85f816f1474 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueBinderTest.java @@ -0,0 +1,446 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.ValueBinderTest.DefaultValues.defaultBytesBase64; +import static com.google.cloud.spanner.ValueBinderTest.DefaultValues.defaultJson; +import static com.google.cloud.spanner.ValueBinderTest.DefaultValues.defaultLongWrapper; +import static com.google.cloud.spanner.ValueBinderTest.DefaultValues.defaultPgJsonb; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Descriptors.EnumDescriptor; +import com.google.protobuf.ProtocolMessageEnum; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.ValueBinder}. */ +@RunWith(JUnit4.class) +public class ValueBinderTest { + private static final String JSON_METHOD_NAME = "json"; + private static final String PG_JSONB_METHOD_NAME = "pgJsonb"; + private static final String PG_OID_METHOD_NAME = "pgOid"; + private static final String PG_NUMERIC_METHOD_NAME = "pgNumeric"; + private static final String PROTO_MESSAGE_METHOD_NAME = "protoMessage"; + private static final String PROTO_ENUM_METHOD_NAME = "protoEnum"; + private static final String BYTES_BASE64_METHOD_NAME = "bytesFromBase64"; + public static final String DEFAULT_PG_NUMERIC = "1.23"; + + private Value lastValue; + private int lastReturnValue; + private ValueBinder binder = new BinderImpl(); + + private class BinderImpl extends ValueBinder { + @Override + Integer handle(Value value) { + lastValue = value; + return ++lastReturnValue; + } + } + + @Test + public void reflection() + throws InvocationTargetException, IllegalAccessException, NoSuchMethodException { + // Test that every Value factory method has a counterpart in ValueBinder, and that invoking it + // produces the expected Value. The only exception is for untyped values, which must be + // constructed manually as an untyped value and then assigned as a parameter. + for (Method method : Value.class.getMethods()) { + if (!Modifier.isStatic(method.getModifiers()) + || !method.getReturnType().equals(Value.class) + || (method.getParameterTypes().length > 0 + && method.getParameterTypes()[0].equals(com.google.protobuf.Value.class))) { + continue; + } + Method binderMethod = findBinderMethod(method); + assertWithMessage("Binder for " + method.toString()).that(binderMethod).isNotNull(); + + if (method.getName().toLowerCase().contains("struct")) { + // Struct / Array-of-struct binding methods. + Struct struct = Struct.newBuilder().set("f1").to("abc").build(); + Type structType = struct.getType(); + + if (binderMethod.getName().equals("toStructArray")) { + // Array of structs. + assertThat(binderMethod.getParameterTypes()).hasLength(2); + + Value expected = + (Value) method.invoke(Value.class, structType, Collections.singletonList(struct)); + assertThat(binderMethod.invoke(binder, structType, Collections.singletonList(struct))) + .isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + + // Test ValueBinder.to(value) + assertThat(binder.to(expected)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + + // Null Array-of-structs + Value expectedNull = (Value) method.invoke(Value.class, structType, null); + assertThat(binderMethod.invoke(binder, structType, null)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNull); + + assertThat(binder.to(expectedNull)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNull); + } else if (binderMethod.getParameterTypes().length == 2) { + // NULL struct. + assertThat(binderMethod.getParameterTypes()[0]).isEqualTo(Type.class); + assertThat(binderMethod.getParameterTypes()[1]).isEqualTo(Struct.class); + + Value expectedNull = (Value) method.invoke(Value.class, structType, null); + assertThat(binderMethod.invoke(binder, structType, null)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNull); + + assertThat(binder.to(expectedNull)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNull); + } else { + // non-NULL struct. + assertThat(binderMethod.getParameterTypes()).hasLength(1); + assertThat(binderMethod.getParameterTypes()[0]).isEqualTo(Struct.class); + + Value expected = (Value) method.invoke(Value.class, struct); + assertThat(binderMethod.invoke(binder, struct)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + + assertThat(binder.to(expected)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + } + } else if (binderMethod.getParameterTypes().length == 1) { + // Test unary null. + if (!binderMethod.getParameterTypes()[0].isPrimitive() + && (!method.getName().equalsIgnoreCase(PROTO_MESSAGE_METHOD_NAME) + && !method.getName().equalsIgnoreCase(PROTO_ENUM_METHOD_NAME))) { + if (method.getName().equalsIgnoreCase(JSON_METHOD_NAME)) { + // Special case for json to change the method from ValueBinder.to(String) to + // ValueBinder.to(Value) + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.json(null))).isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_JSONB_METHOD_NAME)) { + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgJsonb(null))).isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_OID_METHOD_NAME)) { + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgOid(null))).isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_NUMERIC_METHOD_NAME)) { + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgNumeric(null))) + .isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(BYTES_BASE64_METHOD_NAME)) { + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.bytesFromBase64(null))) + .isEqualTo(lastReturnValue); + } else { + assertThat(binderMethod.invoke(binder, (Object) null)).isEqualTo(lastReturnValue); + } + Value expected = (Value) method.invoke(Value.class, (Object) null); + assertThat(lastValue).isEqualTo(expected); + assertThat(binder.to(expected)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + } + // Test unary non-null. + Object defaultObject; + if (method.getName().equalsIgnoreCase(JSON_METHOD_NAME)) { + defaultObject = defaultJson(); + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.json(defaultJson()))) + .isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_JSONB_METHOD_NAME)) { + defaultObject = defaultPgJsonb(); + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgJsonb(defaultPgJsonb()))) + .isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_OID_METHOD_NAME)) { + defaultObject = defaultLongWrapper(); + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgOid(defaultLongWrapper()))) + .isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(PG_NUMERIC_METHOD_NAME)) { + defaultObject = DEFAULT_PG_NUMERIC; + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.pgNumeric(DEFAULT_PG_NUMERIC))) + .isEqualTo(lastReturnValue); + } else if (method.getName().equalsIgnoreCase(BYTES_BASE64_METHOD_NAME)) { + defaultObject = defaultBytesBase64(); + binderMethod = ValueBinder.class.getMethod("to", Value.class); + assertThat(binderMethod.invoke(binder, Value.bytesFromBase64(defaultBytesBase64()))) + .isEqualTo(lastReturnValue); + } else { + defaultObject = DefaultValues.getDefault(method.getGenericParameterTypes()[0]); + assertThat(binderMethod.invoke(binder, defaultObject)).isEqualTo(lastReturnValue); + } + Value expected = (Value) method.invoke(Value.class, defaultObject); + assertThat(lastValue).isEqualTo(expected); + + assertThat(binder.to(expected)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + } else if (binderMethod.getParameterTypes().length == 2 + && (method.getName().contains(PROTO_MESSAGE_METHOD_NAME) + || method.getName().contains(PROTO_ENUM_METHOD_NAME))) { + // Test unary null. + Object firstArgument = null; + if (binderMethod.getParameterTypes()[0].isPrimitive()) { + firstArgument = 0; + } + + Object secondArgument = "com.proto.example"; + if (binderMethod.getParameterTypes()[1] == Descriptor.class) { + secondArgument = SingerInfo.getDescriptor(); + } else if (binderMethod.getParameterTypes()[1] == EnumDescriptor.class) { + secondArgument = Genre.getDescriptor(); + } + assertThat(binderMethod.invoke(binder, firstArgument, secondArgument)) + .isEqualTo(lastReturnValue); + Value expected = (Value) method.invoke(Value.class, firstArgument, secondArgument); + assertThat(lastValue).isEqualTo(expected); + assertThat(binder.to(expected)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expected); + } else { + // Array slice method: depends on DefaultValues returning arrays of length 2. + assertThat(binderMethod.getParameterTypes().length).isEqualTo(3); + assertThat(binderMethod.getParameterTypes()[1]).isEqualTo(int.class); + assertThat(binderMethod.getParameterTypes()[2]).isEqualTo(int.class); + + Value expectedNull = (Value) method.invoke(Value.class, null, -1, -1); + assertThat(binderMethod.invoke(binder, null, 0, 0)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNull); + + Object defaultObject = DefaultValues.getDefault(method.getGenericParameterTypes()[0]); + Value expectedNonNull = (Value) method.invoke(Value.class, defaultObject, 0, 2); + assertThat(binderMethod.invoke(binder, defaultObject, 0, 2)).isEqualTo(lastReturnValue); + assertThat(lastValue).isEqualTo(expectedNonNull); + } + } + } + + static Method findBinderMethod(Method valueMethod) { + String name; + if (valueMethod.getName().contains("Array")) { + // int64Array -> toInt64Array. + name = + "to" + + valueMethod.getName().substring(0, 1).toUpperCase() + + valueMethod.getName().substring(1); + } else { + name = "to"; + } + try { + return ValueBinder.class.getMethod(name, valueMethod.getParameterTypes()); + } catch (NoSuchMethodException e) { + return null; + } + } + + static class DefaultValues { + public static boolean defaultBooleanPrimitive() { + return true; + } + + public static Boolean defaultBooleanWrapper() { + return true; + } + + public static long defaultLongPrimitive() { + return 1234L; + } + + public static Long defaultLongWrapper() { + return 1234L; + } + + public static float defaultFloatPrimitive() { + return 1.0f; + } + + public static Float defaultFloatWrapper() { + return 1.0f; + } + + public static double defaultDoublePrimitive() { + return 1.0; + } + + public static Double defaultDoubleWrapper() { + return 1.0; + } + + public static BigDecimal defaultBigDecimal() { + return BigDecimal.valueOf(123, 2); + } + + public static AbstractMessage defaultAbstractMessage() { + return SingerInfo.newBuilder().setSingerId(323).build(); + } + + public static ProtocolMessageEnum defaultProtocolMessageEnum() { + return Genre.FOLK; + } + + public static String defaultString() { + return "x"; + } + + public static String defaultJson() { + return "{\"color\":\"red\",\"value\":\"#f00\"}"; + } + + public static String defaultPgJsonb() { + return "{\"color\":\"red\",\"value\":\"#f00\"}"; + } + + public static String defaultBytesBase64() { + return Base64.getEncoder().encodeToString("test-bytes".getBytes(StandardCharsets.UTF_8)); + } + + public static ByteArray defaultByteArray() { + return ByteArray.copyFrom(new byte[] {'x'}); + } + + public static Timestamp defaultTimestamp() { + return Timestamp.ofTimeSecondsAndNanos(0, 0); + } + + public static Date defaultDate() { + return Date.fromYearMonthDay(2016, 9, 15); + } + + public static UUID defaultUuid() { + return UUID.fromString("db09330e-cc05-472c-a54e-b2784deebac3"); + } + + public static Interval defaultInterval() { + return Interval.parseFromString("P0Y"); + } + + public static boolean[] defaultBooleanArray() { + return new boolean[] {false, true}; + } + + public static Iterable defaultBooleanIterable() { + return Arrays.asList(false, true); + } + + public static long[] defaultLongArray() { + return new long[] {1, 2}; + } + + public static Iterable defaultLongIterable() { + return Arrays.asList(1L, 2L); + } + + public static float[] defaultFloatArray() { + return new float[] {1.0f, 2.0f}; + } + + public static Iterable defaultFloatIterable() { + return Arrays.asList(1.0f, 2.0f); + } + + public static double[] defaultDoubleArray() { + return new double[] {1.0, 2.0}; + } + + public static Iterable defaultDoubleIterable() { + return Arrays.asList(1.0, 2.0); + } + + public static Iterable defaultBigDecimalIterable() { + return Arrays.asList(BigDecimal.valueOf(123, 2), BigDecimal.valueOf(456, 2)); + } + + public static Iterable defaultStringIterable() { + return Arrays.asList("a", "b"); + } + + public static Iterable defaultJsonIterable() { + return Arrays.asList("{}", "[]", "{\"color\":\"red\",\"value\":\"#f00\"}"); + } + + public static Iterable defaultByteArrayIterable() { + return Arrays.asList(ByteArray.copyFrom("x"), ByteArray.copyFrom("y")); + } + + public static Iterable defaultTimestampIterable() { + return Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(0, 0), Timestamp.ofTimeSecondsAndNanos(0, 1)); + } + + public static Iterable defaultDateIterable() { + return Arrays.asList(Date.fromYearMonthDay(2016, 9, 15), Date.fromYearMonthDay(2016, 9, 14)); + } + + public static Iterable defaultUuidIterable() { + return Arrays.asList( + UUID.fromString("8ebe9153-2747-4c92-a462-6da13eb25ebb"), + UUID.fromString("12c154ca-6500-4be0-89c8-160bcfa8c3f6")); + } + + public static Interval[] defaultIntervalArray() { + return new Interval[] { + Interval.builder() + .setMonths(-10) + .setDays(-100) + .setNanos(BigInteger.valueOf(-9999999L)) + .build(), + Interval.parseFromString("P0Y"), + Interval.builder().setMonths(10).setDays(100).setNanos(BigInteger.valueOf(9999999L)).build() + }; + } + + public static Iterable defaultIntervalIterable() { + return Arrays.asList( + Interval.builder() + .setMonths(-10) + .setDays(-100) + .setNanos(BigInteger.valueOf(-9999999L)) + .build(), + Interval.parseFromString("P0Y"), + Interval.builder() + .setMonths(10) + .setDays(100) + .setNanos(BigInteger.valueOf(9999999L)) + .build()); + } + + static Object getDefault(java.lang.reflect.Type type) + throws InvocationTargetException, IllegalAccessException { + for (Method method : DefaultValues.class.getMethods()) { + if (method.getName().startsWith("default") + && Modifier.isStatic(method.getModifiers()) + && method.getGenericReturnType().equals(type)) { + return method.invoke(DefaultValues.class); + } + } + throw new AssertionError("Could not find default value for " + type); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueTest.java new file mode 100644 index 000000000000..17f31434f76b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ValueTest.java @@ -0,0 +1,2986 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.testing.SerializableTester.reserializeAndAssert; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.LazyByteArray; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.Type.StructField; +import com.google.common.base.Strings; +import com.google.common.collect.ForwardingList; +import com.google.common.collect.Lists; +import com.google.common.testing.EqualsTester; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.io.Serializable; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.TimeZone; +import java.util.UUID; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link com.google.cloud.spanner.Value}. */ +@RunWith(JUnit4.class) +public class ValueTest { + private static final String NULL_STRING = "NULL"; + + private static ByteArray newByteArray(String data) { + return ByteArray.copyFrom(data); + } + + /** Returns an {@code Iterable} over {@code values} that is not a {@code Collection}. */ + @SafeVarargs + private static Iterable plainIterable(T... values) { + return Lists.newArrayList(values); + } + + @Test + public void untyped() { + com.google.protobuf.Value proto = + com.google.protobuf.Value.newBuilder().setStringValue("test").build(); + Value v = Value.untyped(proto); + assertNull(v.getType()); + assertFalse(v.isNull()); + assertSame(proto, v.toProto()); + assertNotEquals(0, v.hashCode()); + assertEquals(v, Value.untyped(proto)); + + assertEquals( + v, Value.untyped(com.google.protobuf.Value.newBuilder().setStringValue("test").build())); + assertEquals( + Value.untyped(com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build()), + Value.untyped(com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build())); + assertEquals( + Value.untyped(com.google.protobuf.Value.newBuilder().setBoolValue(true).build()), + Value.untyped(com.google.protobuf.Value.newBuilder().setBoolValue(true).build())); + + assertNotEquals( + v, Value.untyped(com.google.protobuf.Value.newBuilder().setStringValue("foo").build())); + assertNotEquals( + Value.untyped(com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build()), + Value.untyped(com.google.protobuf.Value.newBuilder().setNumberValue(0.14d).build())); + assertNotEquals( + Value.untyped(com.google.protobuf.Value.newBuilder().setBoolValue(false).build()), + Value.untyped(com.google.protobuf.Value.newBuilder().setBoolValue(true).build())); + + assertEquals("test", v.getAsString()); + } + + @Test + public void bool() { + Value v = Value.bool(true); + assertThat(v.getType()).isEqualTo(Type.bool()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBool()).isTrue(); + assertThat(v.toString()).isEqualTo("true"); + assertEquals("true", v.getAsString()); + } + + @Test + public void boolWrapper() { + Value v = Value.bool(Boolean.FALSE); + assertThat(v.getType()).isEqualTo(Type.bool()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBool()).isFalse(); + assertThat(v.toString()).isEqualTo("false"); + assertEquals("false", v.getAsString()); + } + + @Test + public void boolWrapperNull() { + Value v = Value.bool(null); + assertThat(v.getType()).isEqualTo(Type.bool()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getBool); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void int64() { + Value v = Value.int64(123); + assertThat(v.getType()).isEqualTo(Type.int64()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64()).isEqualTo(123); + assertThat(v.toString()).isEqualTo("123"); + assertEquals("123", v.getAsString()); + } + + @Test + public void int64TryGetBool() { + Value value = Value.int64(1234); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBool); + assertThat(e.getMessage()).contains("Expected: BOOL actual: INT64"); + } + + @Test + public void int64NullTryGetBool() { + Value value = Value.int64(null); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBool); + assertThat(e.getMessage()).contains("Expected: BOOL actual: INT64"); + } + + @Test + public void int64TryGetInt64Array() { + Value value = Value.int64(1234); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getInt64Array); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: INT64"); + } + + @Test + public void int64Wrapper() { + Value v = Value.int64(Long.valueOf(123)); + assertThat(v.getType()).isEqualTo(Type.int64()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64()).isEqualTo(123); + assertThat(v.toString()).isEqualTo("123"); + assertEquals("123", v.getAsString()); + } + + @Test + public void int64WrapperNull() { + Value v = Value.int64(null); + assertThat(v.getType()).isEqualTo(Type.int64()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float32() { + Value v = Value.float32(1.23f); + assertThat(v.getType()).isEqualTo(Type.float32()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat32()).isWithin(0.0001f).of(1.23f); + assertThat(v.toString()).isEqualTo("1.23"); + assertEquals("1.23", v.getAsString()); + assertEquals(Value.float32(Float.NaN), Value.float32(Float.NaN)); + } + + @Test + public void float32Wrapper() { + Value v = Value.float32(Float.valueOf(1.23f)); + assertThat(v.getType()).isEqualTo(Type.float32()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat32()).isWithin(0.0001f).of(1.23f); + assertThat(v.toString()).isEqualTo("1.23"); + assertEquals("1.23", v.getAsString()); + } + + @Test + public void float32WrapperNull() { + Value v = Value.float32(null); + assertThat(v.getType()).isEqualTo(Type.float32()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat32); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float64() { + Value v = Value.float64(1.23); + assertThat(v.getType()).isEqualTo(Type.float64()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat64()).isWithin(0.0001).of(1.23); + assertThat(v.toString()).isEqualTo("1.23"); + assertEquals("1.23", v.getAsString()); + assertEquals(Value.float64(Double.NaN), Value.float64(Double.NaN)); + } + + @Test + public void float64Wrapper() { + Value v = Value.float64(Double.valueOf(1.23)); + assertThat(v.getType()).isEqualTo(Type.float64()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat64()).isWithin(0.0001).of(1.23); + assertThat(v.toString()).isEqualTo("1.23"); + assertEquals("1.23", v.getAsString()); + } + + @Test + public void float64WrapperNull() { + Value v = Value.float64(null); + assertThat(v.getType()).isEqualTo(Type.float64()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat64); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void numeric() { + Value v = Value.numeric(new BigDecimal("1.23")); + assertThat(v.getType()).isEqualTo(Type.numeric()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getNumeric()).isEqualTo(BigDecimal.valueOf(123, 2)); + assertThat(v.toString()).isEqualTo("1.23"); + assertEquals("1.23", v.getAsString()); + } + + @Test + public void pgNumeric() { + final Value value = Value.pgNumeric("1234.5678"); + assertEquals(Type.pgNumeric(), value.getType()); + assertFalse("pgNumeric value should not be null", value.isNull()); + assertEquals("1234.5678", value.getString()); + assertEquals(BigDecimal.valueOf(12345678, 4), value.getNumeric()); + assertEquals(1234.5678D, value.getFloat64(), 0.00001); + assertEquals("1234.5678", value.toString()); + assertEquals("1234.5678", value.getAsString()); + assertEquals(Value.pgNumeric("NaN"), Value.pgNumeric("NaN")); + } + + @Test + public void pgNumericNaN() { + final Value value = Value.pgNumeric("NaN"); + assertEquals(Type.pgNumeric(), value.getType()); + assertFalse("pgNumeric value should not be null", value.isNull()); + assertEquals("NaN", value.getString()); + assertThrows(NumberFormatException.class, value::getNumeric); + assertEquals(Double.NaN, value.getFloat64(), 0.00001); + assertEquals("NaN", value.toString()); + assertEquals("NaN", value.getAsString()); + } + + @Test + public void testNumericFormats() { + // The following is copied from the Numeric proto documentation. + // Encoded as `string`, in decimal format or scientific notation format. + //
Decimal format: + //
`[+-]Digits[.[Digits]]` or + //
`[+-][Digits].Digits` + // + // Scientific notation: + //
`[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or + //
`[+-][Digits].Digits[ExponentIndicator[+-]Digits]` + //
(ExponentIndicator is `"e"` or `"E"`) + + // The following is copied from the BigDecimal#toString() documentation. + //

  • There is a one-to-one mapping between the distinguishable + // {@code BigDecimal} values and the result of this conversion. + // That is, every distinguishable {@code BigDecimal} value + // (unscaled value and scale) has a unique string representation + // as a result of using {@code toString}. If that string + // representation is converted back to a {@code BigDecimal} using + // the {@link #BigDecimal(String)} constructor, then the original + // value will be recovered. + // + //
  • The string produced for a given number is always the same; + // it is not affected by locale. This means that it can be used + // as a canonical string representation for exchanging decimal + // data, or as a key for a Hashtable, etc. Locale-sensitive + // number formatting and parsing is handled by the {@link + // java.text.NumberFormat} class and its subclasses. + + // Test that BigDecimal supports all formats that are supported by Cloud Spanner. + assertThat(new BigDecimal("1").toString()).isEqualTo("1"); + assertThat(new BigDecimal("01").toString()).isEqualTo("1"); + assertThat(new BigDecimal("1.").toString()).isEqualTo("1"); + assertThat(new BigDecimal("+1").toString()).isEqualTo("1"); + assertThat(new BigDecimal("+1.").toString()).isEqualTo("1"); + assertThat(new BigDecimal("-1").toString()).isEqualTo("-1"); + assertThat(new BigDecimal("-1.").toString()).isEqualTo("-1"); + + assertThat(new BigDecimal("0.1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("00.1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal(".1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("+0.1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("+.1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("-0.1").toString()).isEqualTo("-0.1"); + assertThat(new BigDecimal("-.1").toString()).isEqualTo("-0.1"); + + assertThat(new BigDecimal("1E+1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1e+1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1E1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1e1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("01E+1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("01e+1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("01E1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("01e1").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1E+01").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1e+01").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1E01").toString()).isEqualTo("1E+1"); + assertThat(new BigDecimal("1e01").toString()).isEqualTo("1E+1"); + + assertThat(new BigDecimal("1E-1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("1e-1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("01E-1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("01e-1").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("1E-01").toString()).isEqualTo("0.1"); + assertThat(new BigDecimal("1e-01").toString()).isEqualTo("0.1"); + } + + @Test + public void numericPrecisionAndScale() { + for (long s : new long[] {1L, -1L}) { + BigDecimal sign = new BigDecimal(s); + assertThat(Value.numeric(new BigDecimal(Strings.repeat("9", 29)).multiply(sign)).toString()) + .isEqualTo((s == -1L ? "-" : "") + Strings.repeat("9", 29)); + SpannerException e1 = + assertThrows( + SpannerException.class, + () -> Value.numeric(new BigDecimal(Strings.repeat("9", 30)).multiply(sign))); + assertThat(e1.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + SpannerException e2 = + assertThrows( + SpannerException.class, + () -> Value.numeric(new BigDecimal("1" + Strings.repeat("0", 29)).multiply(sign))); + assertThat(e2.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + + assertThat( + Value.numeric(new BigDecimal("0." + Strings.repeat("9", 9)).multiply(sign)) + .toString()) + .isEqualTo((s == -1L ? "-" : "") + "0." + Strings.repeat("9", 9)); + assertThat( + Value.numeric(new BigDecimal("0.1" + Strings.repeat("0", 8)).multiply(sign)) + .toString()) + .isEqualTo((s == -1L ? "-" : "") + "0.1" + Strings.repeat("0", 8)); + // Cloud Spanner does not store precision and considers 0.1 to be equal to 0.10. + // 0.100000000000000000000000000 is therefore also a valid value, as it will be capped to 0.1. + assertThat( + Value.numeric(new BigDecimal("0.1" + Strings.repeat("0", 20)).multiply(sign)) + .toString()) + .isEqualTo((s == -1L ? "-" : "") + "0.1" + Strings.repeat("0", 20)); + SpannerException e3 = + assertThrows( + SpannerException.class, + () -> Value.numeric(new BigDecimal("0." + Strings.repeat("9", 10)).multiply(sign))); + assertThat(e3.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + + assertThat( + Value.numeric( + new BigDecimal(Strings.repeat("9", 29) + "." + Strings.repeat("9", 9)) + .multiply(sign)) + .toString()) + .isEqualTo( + (s == -1L ? "-" : "") + Strings.repeat("9", 29) + "." + Strings.repeat("9", 9)); + + SpannerException e4 = + assertThrows( + SpannerException.class, + () -> + Value.numeric( + new BigDecimal(Strings.repeat("9", 30) + "." + Strings.repeat("9", 9)) + .multiply(sign))); + assertThat(e4.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + SpannerException e5 = + assertThrows( + SpannerException.class, + () -> + Value.numeric( + new BigDecimal("1" + Strings.repeat("0", 29) + "." + Strings.repeat("9", 9)) + .multiply(sign))); + assertThat(e5.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + + SpannerException e6 = + assertThrows( + SpannerException.class, + () -> + Value.numeric( + new BigDecimal(Strings.repeat("9", 29) + "." + Strings.repeat("9", 10)) + .multiply(sign))); + assertThat(e6.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + SpannerException e7 = + assertThrows( + SpannerException.class, + () -> Value.numeric(new BigDecimal("1." + Strings.repeat("9", 10)).multiply(sign))); + assertThat(e7.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + } + } + + @Test + public void numericNull() { + Value v = Value.numeric(null); + assertThat(v.getType()).isEqualTo(Type.numeric()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + + IllegalStateException e = assertThrows(IllegalStateException.class, v::getNumeric); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void pgNumericNull() { + final Value value = Value.pgNumeric(null); + assertEquals(Type.pgNumeric(), value.getType()); + assertTrue("pgNumeric value should be null", value.isNull()); + assertEquals(NULL_STRING, value.toString()); + + final IllegalStateException e1 = assertThrows(IllegalStateException.class, value::getString); + assertTrue("exception should mention value is null", e1.getMessage().contains("null value")); + final IllegalStateException e2 = assertThrows(IllegalStateException.class, value::getNumeric); + assertTrue("exception should mention value is null", e2.getMessage().contains("null value")); + final IllegalStateException e3 = assertThrows(IllegalStateException.class, value::getFloat64); + assertTrue("exception should mention value is null", e3.getMessage().contains("null value")); + assertEquals("NULL", value.getAsString()); + } + + @Test + public void pgNumericInvalid() { + final Value value = Value.pgNumeric("INVALID"); + assertEquals(Type.pgNumeric(), value.getType()); + assertFalse("pgNumeric value should not be null", value.isNull()); + assertEquals("INVALID", value.toString()); + + assertEquals("INVALID", value.getString()); + assertThrows(NumberFormatException.class, value::getNumeric); + assertThrows(NumberFormatException.class, value::getFloat64); + assertEquals("INVALID", value.getAsString()); + } + + @Test + public void string() { + Value v = Value.string("abc"); + assertThat(v.getType()).isEqualTo(Type.string()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getString()).isEqualTo("abc"); + assertEquals("abc", v.getAsString()); + } + + @Test + public void stringNull() { + Value v = Value.string(null); + assertThat(v.getType()).isEqualTo(Type.string()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getString); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void stringLong() { + String str = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeee"; + Value v = Value.string(str); + assertThat(v.getString()).isEqualTo(str); + assertThat(v.toString()).hasLength(36); + assertThat(v.toString()).startsWith(str.substring(0, 36 - 3)); + assertThat(v.toString()).endsWith("..."); + assertEquals(str, v.getAsString()); + } + + @Test + public void json() { + String json = "{\"color\":\"red\",\"value\":\"#f00\"}"; + Value v = Value.json(json); + assertEquals(Type.json(), v.getType()); + assertFalse(v.isNull()); + assertEquals(json, v.getJson()); + assertEquals(json, v.getString()); + assertEquals(json, v.getAsString()); + } + + @Test + public void jsonNull() { + Value v = Value.json(null); + assertEquals(Type.json(), v.getType()); + assertTrue(v.isNull()); + assertEquals(NULL_STRING, v.toString()); + assertThrowsWithMessage(v::getJson, "null value"); + assertThrowsWithMessage(v::getString, "null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void jsonEmpty() { + String json = "{}"; + Value v = Value.json(json); + assertEquals(json, v.getJson()); + assertEquals(json, v.getAsString()); + } + + @Test + public void jsonWithEmptyArray() { + String json = "[]"; + Value v = Value.json(json); + assertEquals(json, v.getJson()); + assertEquals(json, v.getAsString()); + } + + @Test + public void jsonWithArray() { + String json = + "[{\"color\":\"red\",\"value\":\"#f00\"},{\"color\":\"green\",\"value\":\"#0f0\"},{\"color\":\"blue\",\"value\":\"#00f\"},{\"color\":\"cyan\",\"value\":\"#0ff\"},{\"color\":\"magenta\",\"value\":\"#f0f\"},{\"color\":\"yellow\",\"value\":\"#ff0\"},{\"color\":\"black\",\"value\":\"#000\"}]"; + Value v = Value.json(json); + assertEquals(json, v.getJson()); + assertEquals(json, v.getAsString()); + } + + @Test + public void jsonNested() { + String json = + "[{\"id\":\"0001\",\"type\":\"donut\",\"name\":\"Cake\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"},{\"id\":\"1002\",\"type\":\"Chocolate\"},{\"id\":\"1003\",\"type\":\"Blueberry\"},{\"id\":\"1004\",\"type\":\"Devil's" + + " Food\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5005\",\"type\":\"Sugar\"},{\"id\":\"5007\",\"type\":\"Powdered" + + " Sugar\"},{\"id\":\"5006\",\"type\":\"Chocolate with" + + " Sprinkles\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]},{\"id\":\"0002\",\"type\":\"donut\",\"name\":\"Raised\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5005\",\"type\":\"Sugar\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]},{\"id\":\"0003\",\"type\":\"donut\",\"name\":\"Old" + + " Fashioned\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"},{\"id\":\"1002\",\"type\":\"Chocolate\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]}]"; + Value v = Value.json(json); + assertEquals(json, v.getJson()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgJsonb() { + String json = "{\"color\":\"red\",\"value\":\"#f00\"}"; + Value v = Value.pgJsonb(json); + assertEquals(Type.pgJsonb(), v.getType()); + assertFalse(v.isNull()); + assertEquals(json, v.getPgJsonb()); + assertEquals(json, v.getString()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgJsonbNull() { + Value v = Value.pgJsonb(null); + assertEquals(Type.pgJsonb(), v.getType()); + assertTrue(v.isNull()); + assertEquals(NULL_STRING, v.toString()); + assertThrowsWithMessage(v::getPgJsonb, "null value"); + assertThrowsWithMessage(v::getString, "null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void testPgJsonbEmpty() { + String json = "{}"; + Value v = Value.pgJsonb(json); + assertEquals(json, v.getPgJsonb()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgJsonbWithEmptyArray() { + String json = "[]"; + Value v = Value.pgJsonb(json); + assertEquals(json, v.getPgJsonb()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgJsonbWithArray() { + String json = + "[{\"color\":\"red\",\"value\":\"#f00\"},{\"color\":\"green\",\"value\":\"#0f0\"},{\"color\":\"blue\",\"value\":\"#00f\"},{\"color\":\"cyan\",\"value\":\"#0ff\"},{\"color\":\"magenta\",\"value\":\"#f0f\"},{\"color\":\"yellow\",\"value\":\"#ff0\"},{\"color\":\"black\",\"value\":\"#000\"}]"; + Value v = Value.pgJsonb(json); + assertEquals(json, v.getPgJsonb()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgJsonbNested() { + String json = + "[{\"id\":\"0001\",\"type\":\"donut\",\"name\":\"Cake\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"},{\"id\":\"1002\",\"type\":\"Chocolate\"},{\"id\":\"1003\",\"type\":\"Blueberry\"},{\"id\":\"1004\",\"type\":\"Devil's" + + " Food\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5005\",\"type\":\"Sugar\"},{\"id\":\"5007\",\"type\":\"Powdered" + + " Sugar\"},{\"id\":\"5006\",\"type\":\"Chocolate with" + + " Sprinkles\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]},{\"id\":\"0002\",\"type\":\"donut\",\"name\":\"Raised\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5005\",\"type\":\"Sugar\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]},{\"id\":\"0003\",\"type\":\"donut\",\"name\":\"Old" + + " Fashioned\",\"ppu\":0.55,\"batters\":{\"batter\":[{\"id\":\"1001\",\"type\":\"Regular\"},{\"id\":\"1002\",\"type\":\"Chocolate\"}]},\"topping\":[{\"id\":\"5001\",\"type\":\"None\"},{\"id\":\"5002\",\"type\":\"Glazed\"},{\"id\":\"5003\",\"type\":\"Chocolate\"},{\"id\":\"5004\",\"type\":\"Maple\"}]}]"; + Value v = Value.pgJsonb(json); + assertEquals(json, v.getPgJsonb()); + assertEquals(json, v.getAsString()); + } + + @Test + public void testPgOid() { + Value v = Value.pgOid(Long.valueOf(123)); + assertThat(v.getType()).isEqualTo(Type.pgOid()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64()).isEqualTo(123); + assertThat(v.toString()).isEqualTo("123"); + assertEquals("123", v.getAsString()); + } + + @Test + public void testPgOidNull() { + Value v = Value.pgOid(null); + assertThat(v.getType()).isEqualTo(Type.pgOid()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void bytes() { + ByteArray bytes = newByteArray("abc"); + Value v = Value.bytes(bytes); + assertThat(v.getType()).isEqualTo(Type.bytes()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBytes()).isSameInstanceAs(bytes); + assertThat(v.toString()).isEqualTo(bytes.toBase64()); + assertEquals(Base64.getEncoder().encodeToString(bytes.toByteArray()), v.getAsString()); + } + + @Test + public void bytesUnprintable() { + ByteArray bytes = ByteArray.copyFrom(new byte[] {'a', 0, 15, -1, 'e'}); + Value v = Value.bytes(bytes); + assertThat(v.getBytes()).isSameInstanceAs(bytes); + assertThat(v.toString()).isEqualTo(bytes.toBase64()); + assertEquals(Base64.getEncoder().encodeToString(bytes.toByteArray()), v.getAsString()); + } + + @Test + public void bytesNull() { + Value v = Value.bytes(null); + assertThat(v.getType()).isEqualTo(Type.bytes()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getBytes); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void timestamp() { + String timestamp = "2016-09-15T00:00:00Z"; + Timestamp t = Timestamp.parseTimestamp(timestamp); + Value v = Value.timestamp(t); + assertThat(v.getType()).isEqualTo(Type.timestamp()); + assertThat(v.isNull()).isFalse(); + assertThat(v.isCommitTimestamp()).isFalse(); + assertThat(v.getTimestamp()).isSameInstanceAs(t); + assertThat(v.toString()).isEqualTo(timestamp); + assertEquals(timestamp, v.getAsString()); + } + + @Test + public void timestampNull() { + Value v = Value.timestamp(null); + assertThat(v.getType()).isEqualTo(Type.timestamp()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + assertThat(v.isCommitTimestamp()).isFalse(); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getTimestamp); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void commitTimestamp() { + Value v = Value.timestamp(Value.COMMIT_TIMESTAMP); + assertThat(v.getType()).isEqualTo(Type.timestamp()); + assertThat(v.isNull()).isFalse(); + assertThat(v.isCommitTimestamp()).isTrue(); + assertThat(v.toString()).isEqualTo("spanner.commit_timestamp()"); + assertThat(v.toProto()) + .isEqualTo( + com.google.protobuf.Value.newBuilder() + .setStringValue("spanner.commit_timestamp()") + .build()); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getTimestamp); + assertThat(e.getMessage()).contains("Commit timestamp value"); + assertEquals("spanner.commit_timestamp()", v.getAsString()); + } + + @Test + public void date() { + String date = "2016-09-15"; + Date t = Date.parseDate(date); + Value v = Value.date(t); + assertThat(v.getType()).isEqualTo(Type.date()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getDate()).isSameInstanceAs(t); + assertThat(v.toString()).isEqualTo(date); + assertEquals(date, v.getAsString()); + } + + @Test + public void dateNull() { + Value v = Value.date(null); + assertThat(v.getType()).isEqualTo(Type.date()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getDate); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void uuid() { + UUID uuid = UUID.randomUUID(); + Value v = Value.uuid(uuid); + assertThat(v.getType()).isEqualTo(Type.uuid()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getUuid()).isSameInstanceAs(uuid); + assertThat(v.toString()).isEqualTo(uuid.toString()); + assertEquals(uuid.toString(), v.getAsString()); + } + + @Test + public void uuidNull() { + Value v = Value.uuid(null); + assertThat(v.getType()).isEqualTo(Type.uuid()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getUuid); + } + + public void interval() { + String interval = "P1Y2M3DT67H45M5.123478678S"; + Interval t = Interval.parseFromString(interval); + Value v = Value.interval(t); + assertThat(v.getType()).isEqualTo(Type.interval()); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInterval()).isSameInstanceAs(t); + assertThat(v.toString()).isEqualTo(interval); + assertEquals(interval, v.getAsString()); + } + + @Test + public void intervalNull() { + Value v = Value.interval(null); + assertThat(v.getType()).isEqualTo(Type.interval()); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInterval); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void protoMessage() { + SingerInfo singerInfo = SingerInfo.newBuilder().setSingerId(111).setGenre(Genre.FOLK).build(); + Value v = Value.protoMessage(singerInfo); + assertThat(v.getType()).isEqualTo(Type.proto(SingerInfo.getDescriptor().getFullName())); + assertThat(v.isNull()).isFalse(); + assertThat(v.getProtoMessage(SingerInfo.getDefaultInstance())).isEqualTo(singerInfo); + assertThat(v.getBytes().toByteArray()).isEqualTo(singerInfo.toByteArray()); + } + + @Test + public void protoMessageNull() { + Value v = Value.protoMessage(null, SingerInfo.getDescriptor().getFullName()); + assertThat(v.getType()).isEqualTo(Type.proto(SingerInfo.getDescriptor().getFullName())); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> { + v.getProtoMessage(SingerInfo.getDefaultInstance()); + }); + assertThat(e.getMessage()).contains("null value"); + } + + @Test + public void protoEnum() { + Genre genre = Genre.FOLK; + Value v = Value.protoEnum(genre); + assertThat(v.getType()).isEqualTo(Type.protoEnum(Genre.getDescriptor().getFullName())); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64()).isEqualTo(genre.getNumber()); + assertEquals(genre, v.getProtoEnum(Genre::forNumber)); + } + + @Test + public void protoEnumNull() { + Value v = Value.protoEnum(null, Genre.getDescriptor().getFullName()); + assertThat(v.getType()).isEqualTo(Type.protoEnum(Genre.getDescriptor().getFullName())); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> { + v.getProtoEnum(Genre::forNumber); + }); + assertThat(e.getMessage()).contains("null value"); + } + + @Test + public void boolArray() { + Value v = Value.boolArray(new boolean[] {true, false}); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBoolArray()).containsExactly(true, false).inOrder(); + assertThat(v.toString()).isEqualTo("[true,false]"); + assertEquals("[true,false]", v.getAsString()); + } + + @Test + public void boolArrayRange() { + Value v = Value.boolArray(new boolean[] {true, false, false, true, false}, 1, 3); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBoolArray()).containsExactly(false, false, true).inOrder(); + assertThat(v.toString()).isEqualTo("[false,false,true]"); + assertEquals("[false,false,true]", v.getAsString()); + } + + @Test + public void boolArrayNull() { + Value v = Value.boolArray((boolean[]) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getBoolArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void boolArrayFromList() { + Value v = Value.boolArray(Arrays.asList(true, null, false)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBoolArray()).containsExactly(true, null, false).inOrder(); + assertThat(v.toString()).isEqualTo("[true,NULL,false]"); + assertEquals("[true,NULL,false]", v.getAsString()); + } + + @Test + public void boolArrayFromListNull() { + Value v = Value.boolArray((Iterable) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getBoolArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void boolArrayFromPlainIterable() { + // Test to ensure that PrimitiveArrayFactory understands how to create an appropriate backing + // arrays from various sizes of plain Iterable input. This test also covers the code paths + // used by int64Array() and float64Array(). + for (int i = 0; i < 50; ++i) { + Boolean[] data = new Boolean[i]; + for (int j = 0; j < data.length; ++j) { + data[j] = (j % 3 == 2) ? null : ((j % 3) == 1); + } + String name = "boolArray() of length " + i; + Value v = Value.boolArray(plainIterable(data)); + assertWithMessage(name).that(v.isNull()).isFalse(); + assertWithMessage(name).that(v.getBoolArray()).containsExactly((Object[]) data).inOrder(); + assertEquals( + Arrays.stream(data) + .map(element -> String.valueOf(element).replace("null", "NULL")) + .collect(Collectors.joining(",", "[", "]")), + v.getAsString()); + } + } + + @Test + public void boolArrayTryGetInt64Array() { + Value value = Value.boolArray(Collections.singletonList(true)); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getInt64Array); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void int64Array() { + Value v = Value.int64Array(new long[] {1, 2}); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(1L, 2L).inOrder(); + assertThat(v.toString()).isEqualTo("[1,2]"); + assertEquals("[1,2]", v.getAsString()); + } + + @Test + public void int64ArrayRange() { + Value v = Value.int64Array(new long[] {1, 2, 3, 4, 5}, 1, 3); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(2L, 3L, 4L).inOrder(); + assertThat(v.toString()).isEqualTo("[2,3,4]"); + assertEquals("[2,3,4]", v.getAsString()); + } + + @Test + public void int64ArrayNull() { + Value v = Value.int64Array((long[]) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void int64ArrayWrapper() { + Value v = Value.int64Array(Arrays.asList(1L, null, 3L)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(1L, null, 3L).inOrder(); + assertThat(v.toString()).isEqualTo("[1,NULL,3]"); + assertEquals("[1,NULL,3]", v.getAsString()); + } + + @Test + public void int64ArrayWrapperNull() { + Value v = Value.int64Array((Iterable) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void int64ArrayTryGetBool() { + Value value = Value.int64Array(Collections.singletonList(1234L)); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBool); + assertThat(e.getMessage()).contains("Expected: BOOL actual: ARRAY"); + } + + @Test + public void int64ArrayNullTryGetBool() { + Value value = Value.int64Array((Iterable) null); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBool); + assertThat(e.getMessage()).contains("Expected: BOOL actual: ARRAY"); + } + + @Test + public void float32Array() { + Value v = Value.float32Array(new float[] {.1f, .2f}); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat32Array()).containsExactly(.1f, .2f).inOrder(); + assertThat(v.toString()).isEqualTo("[0.1,0.2]"); + assertEquals("[0.1,0.2]", v.getAsString()); + } + + @Test + public void float32ArrayRange() { + Value v = Value.float32Array(new float[] {.1f, .2f, .3f, .4f, .5f}, 1, 3); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat32Array()).containsExactly(.2f, .3f, .4f).inOrder(); + assertThat(v.toString()).isEqualTo("[0.2,0.3,0.4]"); + assertEquals("[0.2,0.3,0.4]", v.getAsString()); + } + + @Test + public void float32ArrayNull() { + Value v = Value.float32Array((float[]) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat32Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float32ArrayWrapper() { + Value v = Value.float32Array(Arrays.asList(.1f, null, .3f)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat32Array()).containsExactly(.1f, null, .3f).inOrder(); + assertThat(v.toString()).isEqualTo("[0.1,NULL,0.3]"); + assertEquals("[0.1,NULL,0.3]", v.getAsString()); + } + + @Test + public void float32ArrayWrapperNull() { + Value v = Value.float32Array((Iterable) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat32Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float32ArrayTryGetFloat64Array() { + Value value = Value.float32Array(Collections.singletonList(.1f)); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getFloat64Array); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void float64Array() { + Value v = Value.float64Array(new double[] {.1, .2}); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat64Array()).containsExactly(.1d, .2d).inOrder(); + assertThat(v.toString()).isEqualTo("[0.1,0.2]"); + assertEquals("[0.1,0.2]", v.getAsString()); + } + + @Test + public void float64ArrayRange() { + Value v = Value.float64Array(new double[] {.1, .2, .3, .4, .5}, 1, 3); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat64Array()).containsExactly(.2d, .3d, .4d).inOrder(); + assertThat(v.toString()).isEqualTo("[0.2,0.3,0.4]"); + assertEquals("[0.2,0.3,0.4]", v.getAsString()); + } + + @Test + public void float64ArrayNull() { + Value v = Value.float64Array((double[]) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float64ArrayWrapper() { + Value v = Value.float64Array(Arrays.asList(.1, null, .3)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getFloat64Array()).containsExactly(.1d, null, .3d).inOrder(); + assertThat(v.toString()).isEqualTo("[0.1,NULL,0.3]"); + assertEquals("[0.1,NULL,0.3]", v.getAsString()); + } + + @Test + public void float64ArrayWrapperNull() { + Value v = Value.float64Array((Iterable) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getFloat64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void float64ArrayTryGetInt64Array() { + Value value = Value.float64Array(Collections.singletonList(.1)); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getInt64Array); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void numericArray() { + Value v = + Value.numericArray(Arrays.asList(BigDecimal.valueOf(1, 1), null, BigDecimal.valueOf(3, 1))); + assertThat(v.isNull()).isFalse(); + assertThat(v.getNumericArray()) + .containsExactly(new BigDecimal("0.1"), null, new BigDecimal("0.3")) + .inOrder(); + assertThat(v.toString()).isEqualTo("[0.1,NULL,0.3]"); + assertEquals("[0.1,NULL,0.3]", v.getAsString()); + } + + @Test + public void pgNumericArray() { + final Value value = Value.pgNumericArray(Arrays.asList("1.23", null, "1.24")); + assertFalse("pgNumericArray value should not be null", value.isNull()); + assertEquals(Arrays.asList("1.23", null, "1.24"), value.getStringArray()); + assertEquals( + Arrays.asList(new BigDecimal("1.23"), null, new BigDecimal("1.24")), + value.getNumericArray()); + final List float64Array = value.getFloat64Array(); + assertEquals(1.23D, float64Array.get(0), 0.001); + assertNull(float64Array.get(1)); + assertEquals(1.24D, float64Array.get(2), 0.001); + assertEquals("[1.23,NULL,1.24]", value.getAsString()); + } + + @Test + public void pgNumericArrayWithNaNs() { + final Value value = Value.pgNumericArray(Arrays.asList("1.23", null, Value.NAN)); + assertFalse("pgNumericArray value should not be null", value.isNull()); + assertEquals(Arrays.asList("1.23", null, "NaN"), value.getStringArray()); + assertThrows(NumberFormatException.class, value::getNumericArray); + final List float64Array = value.getFloat64Array(); + assertEquals(1.23D, float64Array.get(0), 0.001); + assertNull(float64Array.get(1)); + assertEquals(Double.NaN, float64Array.get(2), 0.001); + assertEquals("[1.23,NULL,NaN]", value.getAsString()); + } + + @Test + public void numericArrayNull() { + Value v = Value.numericArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + + IllegalStateException e = assertThrows(IllegalStateException.class, v::getNumericArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void pgNumericArrayNull() { + final Value value = Value.pgNumericArray(null); + assertTrue("pgNumericArray value should be null", value.isNull()); + assertEquals(NULL_STRING, value.toString()); + + final IllegalStateException e1 = + assertThrows(IllegalStateException.class, value::getStringArray); + assertTrue("exception should mention value is null", e1.getMessage().contains("null value")); + final IllegalStateException e2 = + assertThrows(IllegalStateException.class, value::getNumericArray); + assertTrue("exception should mention value is null", e2.getMessage().contains("null value")); + final IllegalStateException e3 = + assertThrows(IllegalStateException.class, value::getFloat64Array); + assertTrue("exception should mention value is null", e3.getMessage().contains("null value")); + assertEquals("NULL", value.getAsString()); + } + + @Test + public void numericArrayTryGetInt64Array() { + Value value = Value.numericArray(Collections.singletonList(BigDecimal.valueOf(1, 1))); + + IllegalStateException e = assertThrows(IllegalStateException.class, value::getInt64Array); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void pgNumericArrayTryGetInt64Array() { + final Value value = Value.pgNumericArray(Collections.singletonList("1.23")); + + final IllegalStateException e = assertThrows(IllegalStateException.class, value::getInt64Array); + assertTrue( + "exception should mention type expectation", + e.getMessage().contains("Expected: ARRAY actual: ARRAY>")); + } + + @Test + public void stringArray() { + Value v = Value.stringArray(Arrays.asList("a", null, "c")); + assertThat(v.isNull()).isFalse(); + assertThat(v.getStringArray()).containsExactly("a", null, "c").inOrder(); + assertThat(v.toString()).isEqualTo("[a,NULL,c]"); + assertEquals("[a,NULL,c]", v.getAsString()); + } + + @Test + public void stringArrayNull() { + Value v = Value.stringArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getStringArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void stringArrayTryGetBytesArray() { + Value value = Value.stringArray(Collections.singletonList("a")); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBytesArray); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void jsonArray() { + String one = "{}"; + String two = null; + String three = "{\"color\":\"red\",\"value\":\"#f00\"}"; + Value v = Value.jsonArray(Arrays.asList(one, two, three)); + assertFalse(v.isNull()); + assertArrayEquals(new String[] {one, two, three}, v.getJsonArray().toArray()); + assertEquals("[{},NULL,{\"color\":\"red\",\"value\":\"#f00\"}]", v.toString()); + assertArrayEquals(new String[] {one, two, three}, v.getStringArray().toArray()); + assertEquals("[{},NULL,{\"color\":\"red\",\"value\":\"#f00\"}]", v.getAsString()); + } + + @Test + public void jsonArrayNull() { + Value v = Value.jsonArray(null); + assertTrue(v.isNull()); + assertEquals(NULL_STRING, v.toString()); + assertThrowsWithMessage(v::getJsonArray, "null value"); + assertThrowsWithMessage(v::getStringArray, "null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void jsonArrayTryGetBytesArray() { + Value value = Value.jsonArray(Collections.singletonList("{}")); + assertThrowsWithMessage(value::getBytesArray, "Expected: ARRAY actual: ARRAY"); + } + + @Test + public void jsonArrayTryGetFloat64Array() { + Value value = Value.jsonArray(Collections.singletonList("{}")); + assertThrowsWithMessage(value::getFloat64Array, "Expected: ARRAY actual: ARRAY"); + } + + @Test + public void testPgJsonbArray() { + String one = "{}"; + String two = null; + String three = "{\"color\":\"red\",\"value\":\"#f00\"}"; + Value v = Value.pgJsonbArray(Arrays.asList(one, two, three)); + assertFalse(v.isNull()); + assertArrayEquals(new String[] {one, two, three}, v.getPgJsonbArray().toArray()); + assertEquals("[{},NULL,{\"color\":\"red\",\"value\":\"#f00\"}]", v.toString()); + assertArrayEquals(new String[] {one, two, three}, v.getStringArray().toArray()); + assertEquals("[{},NULL,{\"color\":\"red\",\"value\":\"#f00\"}]", v.getAsString()); + } + + @Test + public void testPgJsonbArrayNull() { + Value v = Value.pgJsonbArray(null); + assertTrue(v.isNull()); + assertEquals(NULL_STRING, v.toString()); + assertThrowsWithMessage(v::getPgJsonbArray, "null value"); + assertThrowsWithMessage(v::getStringArray, "null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void testPgJsonbArrayTryGetBytesArray() { + Value value = Value.pgJsonbArray(Collections.singletonList("{}")); + assertThrowsWithMessage( + value::getBytesArray, "Expected: ARRAY actual: ARRAY>"); + } + + @Test + public void testPgJsonbArrayTryGetFloat64Array() { + Value value = Value.pgJsonbArray(Collections.singletonList("{}")); + assertThrowsWithMessage( + value::getFloat64Array, "Expected: ARRAY actual: ARRAY>"); + } + + @Test + public void testPgOidArray() { + Value v = Value.pgOidArray(new long[] {1, 2}); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(1L, 2L).inOrder(); + assertThat(v.toString()).isEqualTo("[1,2]"); + assertEquals("[1,2]", v.getAsString()); + } + + @Test + public void testPgOidArrayRange() { + Value v = Value.pgOidArray(new long[] {1, 2, 3, 4, 5}, 1, 3); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(2L, 3L, 4L).inOrder(); + assertThat(v.toString()).isEqualTo("[2,3,4]"); + assertEquals("[2,3,4]", v.getAsString()); + } + + @Test + public void pgOidArrayNull() { + Value v = Value.pgOidArray((long[]) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void testPgOidArrayWrapper() { + Value v = Value.pgOidArray(Arrays.asList(1L, null, 3L)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getInt64Array()).containsExactly(1L, null, 3L).inOrder(); + assertThat(v.toString()).isEqualTo("[1,NULL,3]"); + assertEquals("[1,NULL,3]", v.getAsString()); + } + + @Test + public void testPgOidArrayWrapperNull() { + Value v = Value.pgOidArray((Iterable) null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getInt64Array); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void testPgOidArrayTryGetBool() { + Value value = Value.pgOidArray(Collections.singletonList(1234L)); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBool); + assertThat(e.getMessage()).contains("Expected: BOOL actual: ARRAY>"); + } + + @Test + public void testPgOidArrayNullTryGetBool() { + Value value = Value.pgOidArray((Iterable) null); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getBoolArray); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY>"); + } + + @Test + public void bytesArray() { + ByteArray a = newByteArray("a"); + ByteArray c = newByteArray("c"); + Value v = Value.bytesArray(Arrays.asList(a, null, c)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getBytesArray()).containsExactly(a, null, c).inOrder(); + assertThat(v.toString()) + .isEqualTo( + String.format( + "[%s,NULL,%s]", + Base64.getEncoder().encodeToString("a".getBytes(StandardCharsets.UTF_8)), + Base64.getEncoder().encodeToString("c".getBytes(StandardCharsets.UTF_8)))); + assertEquals( + String.format( + "[%s,NULL,%s]", + Base64.getEncoder().encodeToString("a".getBytes(StandardCharsets.UTF_8)), + Base64.getEncoder().encodeToString("c".getBytes(StandardCharsets.UTF_8))), + v.getAsString()); + } + + @Test + public void bytesArrayNull() { + Value v = Value.bytesArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getBytesArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void bytesArrayTryGetStringArray() { + Value value = Value.bytesArray(Collections.singletonList(newByteArray("a"))); + IllegalStateException e = assertThrows(IllegalStateException.class, value::getStringArray); + assertThat(e.getMessage()).contains("Expected: ARRAY actual: ARRAY"); + } + + @Test + public void timestampArray() { + String t1 = "2015-09-15T00:00:00Z"; + String t2 = "2015-09-14T00:00:00Z"; + Value v = + Value.timestampArray( + Arrays.asList(Timestamp.parseTimestamp(t1), null, Timestamp.parseTimestamp(t2))); + assertThat(v.isNull()).isFalse(); + assertThat(v.getTimestampArray()) + .containsExactly(Timestamp.parseTimestamp(t1), null, Timestamp.parseTimestamp(t2)) + .inOrder(); + assertThat(v.toString()).isEqualTo("[" + t1 + ",NULL," + t2 + "]"); + assertEquals(String.format("[%s,NULL,%s]", t1, t2), v.getAsString()); + } + + @Test + public void timestampArrayNull() { + Value v = Value.timestampArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getTimestampArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void dateArray() { + String d1 = "2016-09-15"; + String d2 = "2016-09-14"; + + Value v = Value.dateArray(Arrays.asList(Date.parseDate(d1), null, Date.parseDate(d2))); + assertThat(v.isNull()).isFalse(); + assertThat(v.getDateArray()) + .containsExactly(Date.parseDate(d1), null, Date.parseDate(d2)) + .inOrder(); + assertThat(v.toString()).isEqualTo("[" + d1 + ",NULL," + d2 + "]"); + assertEquals(String.format("[%s,NULL,%s]", d1, d2), v.getAsString()); + } + + @Test + public void dateArrayNull() { + Value v = Value.dateArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getDateArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void uuidArray() { + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + Value v = Value.uuidArray(Arrays.asList(uuid1, null, uuid2)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getUuidArray()).containsExactly(uuid1, null, uuid2).inOrder(); + assertThat(v.toString()).isEqualTo("[" + uuid1.toString() + ",NULL," + uuid2.toString() + "]"); + assertEquals( + String.format("[%s,NULL,%s]", uuid1.toString(), uuid2.toString()), v.getAsString()); + } + + @Test + public void uuidArrayNull() { + Value v = Value.uuidArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getUuidArray); + } + + @Test + public void intervalArray() { + Interval interval1 = Interval.parseFromString("P123Y34M678DT478H345M345.76857863S"); + Interval interval2 = Interval.parseFromString("P-123Y-34M678DT-478H-345M-345.76857863S"); + + Value v = Value.intervalArray(Arrays.asList(interval1, null, interval2)); + assertThat(v.isNull()).isFalse(); + assertThat(v.getIntervalArray()).containsExactly(interval1, null, interval2).inOrder(); + assertThat(v.toString()) + .isEqualTo("[" + interval1.toISO8601() + ",NULL," + interval2.toISO8601() + "]"); + assertEquals( + String.format("[%s,NULL,%s]", interval1.toISO8601(), interval2.toISO8601()), + v.getAsString()); + } + + @Test + public void intervalArrayNull() { + Value v = Value.intervalArray(null); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getIntervalArray); + assertThat(e.getMessage()).contains("null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void protoMessageArray() { + SingerInfo singerInfo1 = SingerInfo.newBuilder().setSingerId(111).setGenre(Genre.FOLK).build(); + SingerInfo singerInfo2 = SingerInfo.newBuilder().setSingerId(222).build(); + Value v = + Value.protoMessageArray( + Arrays.asList(singerInfo1, null, singerInfo2), SingerInfo.getDescriptor()); + assertThat(v.getType()) + .isEqualTo(Type.array(Type.proto(SingerInfo.getDescriptor().getFullName()))); + assertThat(v.isNull()).isFalse(); + assertThat(v.getProtoMessageArray(SingerInfo.getDefaultInstance())) + .containsExactly(singerInfo1, null, singerInfo2); + assertThat(v.getBytesArray()) + .containsExactly( + ByteArray.copyFrom(singerInfo1.toByteArray()), + null, + ByteArray.copyFrom(singerInfo2.toByteArray())); + } + + @Test + public void protoMessageNullArray() { + Value v = Value.protoMessageArray(null, SingerInfo.getDescriptor()); + assertThat(v.getType()) + .isEqualTo(Type.array(Type.proto(SingerInfo.getDescriptor().getFullName()))); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> { + v.getProtoMessageArray(SingerInfo.getDefaultInstance()); + }); + assertThat(e.getMessage()).contains("null value"); + } + + @Test + public void protoEnumArray() { + Genre genre1 = Genre.ROCK; + Genre genre2 = Genre.JAZZ; + Value v = Value.protoEnumArray(Arrays.asList(genre1, null, genre2), Genre.getDescriptor()); + assertThat(v.getType()) + .isEqualTo(Type.array(Type.protoEnum(Genre.getDescriptor().getFullName()))); + assertThat(v.isNull()).isFalse(); + assertThat(v.getProtoEnumArray(Genre::forNumber)).containsExactly(genre1, null, genre2); + assertThat(v.getInt64Array()) + .containsExactly((long) genre1.getNumber(), null, (long) genre2.getNumber()); + } + + @Test + public void protoEnumNullArray() { + Value v = Value.protoEnumArray(null, Genre.getDescriptor()); + assertThat(v.getType()) + .isEqualTo(Type.array(Type.protoEnum(Genre.getDescriptor().getFullName()))); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> { + v.getProtoEnumArray(Genre::forNumber); + }); + assertThat(e.getMessage()).contains("null value"); + } + + @Test + public void struct() { + Struct struct = Struct.newBuilder().set("f1").to("v1").set("f2").to(30).build(); + Value v1 = Value.struct(struct); + assertThat(v1.getType()).isEqualTo(struct.getType()); + assertThat(v1.isNull()).isFalse(); + assertThat(v1.getStruct()).isEqualTo(struct); + assertThat(v1.toString()).isEqualTo("[v1, 30]"); + assertEquals("[v1, 30]", v1.getAsString()); + + Value v2 = Value.struct(struct.getType(), struct); + assertThat(v2).isEqualTo(v1); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + Value.struct( + Type.struct(Collections.singletonList(StructField.of("f3", Type.string()))), + struct)); + assertThat(e.getMessage()).contains("Mismatch between struct value and type."); + } + + @Test + public void nullStruct() { + List fieldTypes = + Arrays.asList( + Type.StructField.of("f1", Type.string()), Type.StructField.of("f2", Type.int64())); + + Value v = Value.struct(Type.struct(fieldTypes), null); + assertThat(v.getType().getStructFields()).isEqualTo(fieldTypes); + assertThat(v.isNull()).isTrue(); + assertThat(v.toString()).isEqualTo(NULL_STRING); + NullPointerException e = assertThrows(NullPointerException.class, () -> Value.struct(null)); + assertThat(e.getMessage()).contains("Illegal call to create a NULL struct value."); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void nullStructGetter() { + List fieldTypes = + Arrays.asList( + Type.StructField.of("f1", Type.string()), Type.StructField.of("f2", Type.int64())); + + Value v = Value.struct(Type.struct(fieldTypes), null); + assertThat(v.isNull()).isTrue(); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getStruct); + assertThat(e.getMessage()).contains("Illegal call to getter of null value."); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void structArrayField() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + List arrayElements = + Arrays.asList( + Struct.newBuilder().set("ff1").to("v1").set("ff2").to(1).build(), + null, + Struct.newBuilder().set("ff1").to("v3").set("ff2").to(3).build()); + Struct struct = + Struct.newBuilder() + .set("f1") + .to("x") + .set("f2") + .toStructArray(elementType, arrayElements) + .build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.array(elementType)))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isFalse(); + assertThat(struct.getString(0)).isEqualTo("x"); + assertThat(struct.getStructList(1)).isEqualTo(arrayElements); + } + + @Test + public void structArrayFieldNull() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + Struct struct = + Struct.newBuilder().set("f1").to("x").set("f2").toStructArray(elementType, null).build(); + assertThat(struct.getType()) + .isEqualTo( + Type.struct( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.array(elementType)))); + assertThat(struct.isNull(0)).isFalse(); + assertThat(struct.isNull(1)).isTrue(); + } + + @Test + public void structArray() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + List arrayElements = + Arrays.asList( + Struct.newBuilder().set("ff1").to("v1").set("ff2").to(1).build(), + null, + null, + Struct.newBuilder().set("ff1").to("v3").set("ff2").to(3).build()); + Value v = Value.structArray(elementType, arrayElements); + assertThat(v.isNull()).isFalse(); + assertThat(v.getType().getArrayElementType()).isEqualTo(elementType); + assertThat(v.getStructArray()).isEqualTo(arrayElements); + assertThat(v.toString()).isEqualTo("[[v1, 1],NULL,NULL,[v3, 3]]"); + assertEquals("[[v1, 1],NULL,NULL,[v3, 3]]", v.getAsString()); + } + + @Test + public void structArrayNull() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + Value v = Value.structArray(elementType, null); + assertThat(v.isNull()).isTrue(); + assertThat(v.getType().getArrayElementType()).isEqualTo(elementType); + assertThat(v.toString()).isEqualTo(NULL_STRING); + IllegalStateException e = assertThrows(IllegalStateException.class, v::getStructArray); + assertThat(e.getMessage()).contains("Illegal call to getter of null value"); + assertEquals("NULL", v.getAsString()); + } + + @Test + public void structArrayInvalidType() { + Type elementType = + Type.struct( + Arrays.asList( + Type.StructField.of("ff1", Type.string()), + Type.StructField.of("ff2", Type.int64()))); + // Second element has INT64 first field, not STRING. + List arrayElements = + Arrays.asList( + Struct.newBuilder().set("ff1").to("1").set("ff2").to(1).build(), + Struct.newBuilder().set("ff1").to(2).set("ff2").to(3).build()); + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, () -> Value.structArray(elementType, arrayElements)); + assertThat(e.getMessage()).contains("must have type STRUCT"); + } + + @Test + public void testValueToProto() { + // BASE types. + assertEquals( + com.google.protobuf.Value.newBuilder().setBoolValue(true).build(), + Value.bool(true).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setBoolValue(false).build(), + Value.bool(false).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.bool(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("1").build(), + Value.int64(1L).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.int64(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14f).build(), + Value.float32(3.14f).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.float32(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build(), + Value.float64(3.14d).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.float64(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("test").build(), + Value.string("test").toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.string(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("{}").build(), + Value.json("{}").toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.json(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setStringValue(ByteArray.copyFrom("test").toBase64()) + .build(), + Value.bytes(ByteArray.copyFrom("test")).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.bytes(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("3.14").build(), + Value.numeric(new BigDecimal("3.14")).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.numeric(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("1234.5678").build(), + Value.pgNumeric("1234.5678").toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.pgNumeric(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("2010-02-28").build(), + Value.date(Date.fromYearMonthDay(2010, 2, 28)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.date(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setStringValue("e0d8a283-29d8-49ce-8d4c-e1d8cb0ea047") + .build(), + Value.uuid(UUID.fromString("e0d8a283-29d8-49ce-8d4c-e1d8cb0ea047")).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.uuid(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder().setStringValue("P1Y2M3DT5H6M3.624567878S").build(), + Value.interval(Interval.fromMonthsDaysNanos(14, 3, BigInteger.valueOf(18363624567878L))) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.interval(null).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setStringValue("2012-04-10T15:16:17.123456789Z") + .build(), + Value.timestamp(Timestamp.parseTimestamp("2012-04-10T15:16:17.123456789Z")).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(), + Value.timestamp(null).toProto()); + + // ARRAY types. + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setBoolValue(true).build(), + com.google.protobuf.Value.newBuilder().setBoolValue(false).build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.boolArray(Arrays.asList(true, false, null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setStringValue("1").build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.int64Array(Arrays.asList(1L, null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14f).build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.float32Array(Arrays.asList(3.14f, null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setNumberValue(3.14d).build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.float64Array(Arrays.asList(3.14d, null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setStringValue("test").build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.stringArray(Arrays.asList("test", null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setStringValue("{}").build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.jsonArray(Arrays.asList("{}", null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue(ByteArray.copyFrom("test").toBase64()) + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.bytesArray(Arrays.asList(ByteArray.copyFrom("test"), null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setStringValue("3.14").build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.numericArray(Arrays.asList(new BigDecimal("3.14"), null)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder().setStringValue("1.23").build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build(), + com.google.protobuf.Value.newBuilder().setStringValue("NaN").build()))) + .build(), + Value.pgNumericArray(Arrays.asList("1.23", null, Value.NAN)).toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("2010-02-28") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.dateArray(Arrays.asList(Date.fromYearMonthDay(2010, 2, 28), null)).toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("3fb10ff0-4a9a-428a-bc20-a947181fd76d") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.uuidArray( + Arrays.asList(UUID.fromString("3fb10ff0-4a9a-428a-bc20-a947181fd76d"), null)) + .toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("P1Y2M3DT5H6M2.456787800S") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.intervalArray( + Arrays.asList( + Interval.fromMonthsDaysNanos(14, 3, new BigInteger("18362456787800")), null)) + .toProto()); + + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("2012-04-10T15:16:17.123456789Z") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()))) + .build(), + Value.timestampArray( + Arrays.asList(Timestamp.parseTimestamp("2012-04-10T15:16:17.123456789Z"), null)) + .toProto()); + + // STRUCT type with array field. + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setBoolValue(true) + .build(), + com.google.protobuf.Value.newBuilder() + .setBoolValue(false) + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder().add(Value.boolArray(Arrays.asList(true, false, null))).build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("1") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct(Struct.newBuilder().add(Value.int64Array(Arrays.asList(1L, null))).build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setNumberValue(3.14f) + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder().add(Value.float32Array(Arrays.asList(3.14f, null))).build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setNumberValue(3.14d) + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder().add(Value.float64Array(Arrays.asList(3.14d, null))).build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("test") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder().add(Value.stringArray(Arrays.asList("test", null))).build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue( + ByteArray.copyFrom("test").toBase64()) + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add(Value.bytesArray(Arrays.asList(ByteArray.copyFrom("test"), null))) + .build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("3.14") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add(Value.numericArray(Arrays.asList(new BigDecimal("3.14"), null))) + .build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("2010-02-28") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add(Value.dateArray(Arrays.asList(Date.fromYearMonthDay(2010, 2, 28), null))) + .build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue( + "9e2f9eac-8d6f-45c1-ac1d-c589daad8821") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add( + Value.uuidArray( + Arrays.asList( + UUID.fromString("9e2f9eac-8d6f-45c1-ac1d-c589daad8821"), null))) + .build()) + .toProto()); + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setStringValue("2012-04-10T15:16:17.123456789Z") + .build(), + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add( + Value.timestampArray( + Arrays.asList( + Timestamp.parseTimestamp("2012-04-10T15:16:17.123456789Z"), null))) + .build()) + .toProto()); + // Struct with pgNumeric + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("1.23").build()) + .build()) + .build(), + Value.struct(Struct.newBuilder().set("x").to(Value.pgNumeric("1.23")).build()).toProto()); + // Struct with pgNumeric Array + assertEquals( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + Arrays.asList( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build(), + com.google.protobuf.Value.newBuilder() + .setStringValue("1.23") + .build(), + com.google.protobuf.Value.newBuilder() + .setStringValue("NaN") + .build())) + .build()) + .build()) + .build()) + .build(), + Value.struct( + Struct.newBuilder() + .add(Value.pgNumericArray(Arrays.asList(null, "1.23", "NaN"))) + .build()) + .toProto()); + } + + @Test + public void testEqualsHashCode() { + EqualsTester tester = new EqualsTester(); + String emptyJson = "{}"; + String simpleJson = "{\"color\":\"red\",\"value\":\"#f00\"}"; + + tester.addEqualityGroup(Value.bool(true), Value.bool(Boolean.TRUE)); + tester.addEqualityGroup(Value.bool(false)); + tester.addEqualityGroup(Value.bool(null)); + + tester.addEqualityGroup(Value.int64(123), Value.int64(Long.valueOf(123))); + tester.addEqualityGroup(Value.int64(456)); + tester.addEqualityGroup(Value.int64(null)); + + tester.addEqualityGroup(Value.float32(1.23f), Value.float32(Float.valueOf(1.23f))); + tester.addEqualityGroup(Value.float32(4.56f)); + tester.addEqualityGroup(Value.float32(null)); + + tester.addEqualityGroup(Value.float64(1.23), Value.float64(Double.valueOf(1.23))); + tester.addEqualityGroup(Value.float64(4.56)); + tester.addEqualityGroup(Value.float64(null)); + + tester.addEqualityGroup( + Value.numeric(BigDecimal.valueOf(123, 2)), Value.numeric(new BigDecimal("1.23"))); + tester.addEqualityGroup(Value.numeric(BigDecimal.valueOf(456, 2))); + tester.addEqualityGroup(Value.numeric(null)); + + tester.addEqualityGroup(Value.pgNumeric("1234.5678"), Value.pgNumeric("1234.5678")); + tester.addEqualityGroup(Value.pgNumeric("NaN"), Value.pgNumeric(Value.NAN)); + tester.addEqualityGroup(Value.pgNumeric("8765.4321")); + tester.addEqualityGroup(Value.pgNumeric(null)); + + tester.addEqualityGroup(Value.pgOid(123L), Value.pgOid(Long.valueOf(123))); + tester.addEqualityGroup(Value.pgOid(456L)); + tester.addEqualityGroup(Value.pgOid(null)); + + tester.addEqualityGroup(Value.string("abc"), Value.string("abc")); + tester.addEqualityGroup(Value.string("def")); + tester.addEqualityGroup(Value.string(null)); + + tester.addEqualityGroup(Value.json(simpleJson), Value.json(simpleJson)); + tester.addEqualityGroup(Value.json("{}")); + tester.addEqualityGroup(Value.json("[]")); + tester.addEqualityGroup(Value.json(null)); + + tester.addEqualityGroup(Value.bytes(newByteArray("abc")), Value.bytes(newByteArray("abc"))); + tester.addEqualityGroup(Value.bytes(newByteArray("def"))); + tester.addEqualityGroup(Value.bytes(null)); + + tester.addEqualityGroup(Value.timestamp(null), Value.timestamp(null)); + tester.addEqualityGroup( + Value.timestamp(Value.COMMIT_TIMESTAMP), Value.timestamp(Value.COMMIT_TIMESTAMP)); + Timestamp now = Timestamp.now(); + tester.addEqualityGroup(Value.timestamp(now), Value.timestamp(now)); + tester.addEqualityGroup(Value.timestamp(Timestamp.ofTimeMicroseconds(0))); + + tester.addEqualityGroup(Value.date(null), Value.date(null)); + tester.addEqualityGroup( + Value.date(Date.fromYearMonthDay(2018, 2, 26)), + Value.date(Date.fromYearMonthDay(2018, 2, 26))); + tester.addEqualityGroup(Value.date(Date.fromYearMonthDay(2018, 2, 27))); + + UUID uuid = UUID.randomUUID(); + tester.addEqualityGroup(Value.uuid(null), Value.uuid(null)); + tester.addEqualityGroup(Value.uuid(uuid), Value.uuid(uuid)); + tester.addEqualityGroup(Value.uuid(UUID.randomUUID())); + + Struct structValue1 = Struct.newBuilder().set("f1").to(20).set("f2").to("def").build(); + Struct structValue2 = Struct.newBuilder().set("f1").to(20).set("f2").to("def").build(); + assertThat(Value.struct(structValue1).equals(Value.struct(structValue2))).isTrue(); + tester.addEqualityGroup(Value.struct(structValue1), Value.struct(structValue2)); + + Type structType1 = structValue1.getType(); + Type structType2 = Type.struct(Collections.singletonList(StructField.of("f1", Type.string()))); + tester.addEqualityGroup(Value.struct(structType1, null), Value.struct(structType1, null)); + tester.addEqualityGroup(Value.struct(structType2, null), Value.struct(structType2, null)); + + tester.addEqualityGroup( + Value.boolArray(Arrays.asList(false, true)), + Value.boolArray(new boolean[] {false, true}), + Value.boolArray(new boolean[] {true, false, true, false}, 1, 2), + Value.boolArray(plainIterable(false, true))); + tester.addEqualityGroup(Value.boolArray(Collections.singletonList(false))); + tester.addEqualityGroup(Value.boolArray((Iterable) null)); + + tester.addEqualityGroup( + Value.int64Array(Arrays.asList(1L, 2L)), + Value.int64Array(new long[] {1L, 2L}), + Value.int64Array(new long[] {0L, 1L, 2L, 3L}, 1, 2), + Value.int64Array(plainIterable(1L, 2L))); + tester.addEqualityGroup(Value.int64Array(Collections.singletonList(3L))); + tester.addEqualityGroup(Value.int64Array((Iterable) null)); + + tester.addEqualityGroup( + Value.float32Array(Arrays.asList(.1f, .2f)), + Value.float32Array(new float[] {.1f, .2f}), + Value.float32Array(new float[] {.0f, .1f, .2f, .3f}, 1, 2), + Value.float32Array(plainIterable(.1f, .2f))); + tester.addEqualityGroup(Value.float32Array(Collections.singletonList(.3f))); + tester.addEqualityGroup(Value.float32Array((Iterable) null)); + + tester.addEqualityGroup( + Value.float64Array(Arrays.asList(.1, .2)), + Value.float64Array(new double[] {.1, .2}), + Value.float64Array(new double[] {.0, .1, .2, .3}, 1, 2), + Value.float64Array(plainIterable(.1, .2))); + tester.addEqualityGroup(Value.float64Array(Collections.singletonList(.3))); + tester.addEqualityGroup(Value.float64Array((Iterable) null)); + + tester.addEqualityGroup( + Value.numericArray(Arrays.asList(BigDecimal.valueOf(1, 1), BigDecimal.valueOf(2, 1)))); + tester.addEqualityGroup( + Value.numericArray(Collections.singletonList(BigDecimal.valueOf(3, 1)))); + tester.addEqualityGroup(Value.numericArray(null)); + + tester.addEqualityGroup( + Value.pgNumericArray(Arrays.asList("1.23", null, Value.NAN)), + Value.pgNumericArray(Arrays.asList("1.23", null, "NaN"))); + tester.addEqualityGroup(Value.pgNumericArray(Collections.singletonList("1.25"))); + tester.addEqualityGroup(Value.pgNumericArray(null), Value.pgNumericArray(null)); + + tester.addEqualityGroup( + Value.pgOidArray(Arrays.asList(1L, 2L)), + Value.pgOidArray(new long[] {1L, 2L}), + Value.pgOidArray(new long[] {0L, 1L, 2L, 3L}, 1, 2), + Value.pgOidArray(plainIterable(1L, 2L))); + tester.addEqualityGroup(Value.pgOidArray(Collections.singletonList(3L))); + tester.addEqualityGroup(Value.pgOidArray(Collections.singletonList(null))); + tester.addEqualityGroup(Value.pgOidArray((Iterable) null)); + + tester.addEqualityGroup( + Value.stringArray(Arrays.asList("a", "b")), Value.stringArray(Arrays.asList("a", "b"))); + tester.addEqualityGroup(Value.stringArray(Collections.singletonList("c"))); + tester.addEqualityGroup(Value.stringArray(null)); + + tester.addEqualityGroup( + Value.jsonArray(Arrays.asList(emptyJson, simpleJson)), + Value.jsonArray(Arrays.asList(emptyJson, simpleJson))); + tester.addEqualityGroup(Value.jsonArray(Arrays.asList("[]"))); + tester.addEqualityGroup(Value.jsonArray(null)); + + tester.addEqualityGroup( + Value.bytesArray(Arrays.asList(newByteArray("a"), newByteArray("b"))), + Value.bytesArray(Arrays.asList(newByteArray("a"), newByteArray("b")))); + tester.addEqualityGroup(Value.bytesArray(Collections.singletonList(newByteArray("c")))); + tester.addEqualityGroup(Value.bytesArray(null)); + + tester.addEqualityGroup( + Value.timestampArray(Arrays.asList(null, now)), + Value.timestampArray(Arrays.asList(null, now))); + tester.addEqualityGroup(Value.timestampArray(null)); + + tester.addEqualityGroup( + Value.dateArray(Arrays.asList(null, Date.fromYearMonthDay(2018, 2, 26))), + Value.dateArray(Arrays.asList(null, Date.fromYearMonthDay(2018, 2, 26)))); + tester.addEqualityGroup(Value.dateArray(null)); + + tester.addEqualityGroup( + Value.uuidArray(Arrays.asList(null, uuid)), Value.uuidArray(Arrays.asList(null, uuid))); + tester.addEqualityGroup(Value.uuidArray(null)); + + tester.addEqualityGroup( + Value.intervalArray( + Arrays.asList(null, Interval.fromMonthsDaysNanos(14, 3, BigInteger.valueOf(0)))), + Value.intervalArray( + Arrays.asList(null, Interval.fromMonthsDaysNanos(14, 3, BigInteger.valueOf(0))))); + tester.addEqualityGroup(Value.intervalArray(null)); + + tester.addEqualityGroup( + Value.structArray(structType1, Arrays.asList(structValue1, null)), + Value.structArray(structType1, Arrays.asList(structValue2, null))); + tester.addEqualityGroup( + Value.structArray(structType1, Collections.singletonList(null)), + Value.structArray(structType1, Collections.singletonList(null))); + tester.addEqualityGroup( + Value.structArray(structType1, null), Value.structArray(structType1, null)); + tester.addEqualityGroup( + Value.structArray(structType1, new ArrayList<>()), + Value.structArray(structType1, new ArrayList<>())); + + tester.testEquals(); + } + + @Test + public void testGetAsString() { + assertEquals("true", Value.bool(true).getAsString()); + assertEquals("false", Value.bool(false).getAsString()); + + assertEquals("1", Value.int64(1L).getAsString()); + assertEquals(String.valueOf(Long.MAX_VALUE), Value.int64(Long.MAX_VALUE).getAsString()); + assertEquals(String.valueOf(Long.MIN_VALUE), Value.int64(Long.MIN_VALUE).getAsString()); + + assertEquals("3.14", Value.float32(3.14f).getAsString()); + assertEquals("NaN", Value.float32(Float.NaN).getAsString()); + assertEquals(String.valueOf(Float.MIN_VALUE), Value.float32(Float.MIN_VALUE).getAsString()); + assertEquals(String.valueOf(Float.MAX_VALUE), Value.float32(Float.MAX_VALUE).getAsString()); + + assertEquals("3.14", Value.float64(3.14d).getAsString()); + assertEquals("NaN", Value.float64(Double.NaN).getAsString()); + assertEquals(String.valueOf(Double.MIN_VALUE), Value.float64(Double.MIN_VALUE).getAsString()); + assertEquals(String.valueOf(Double.MAX_VALUE), Value.float64(Double.MAX_VALUE).getAsString()); + + assertEquals("3.14", Value.numeric(new BigDecimal("3.14")).getAsString()); + assertEquals( + "123456789.123456789", Value.numeric(new BigDecimal("123456789.123456789")).getAsString()); + + assertEquals("3.14", Value.pgNumeric("3.14").getAsString()); + assertEquals("123456789.123456789", Value.pgNumeric("123456789.123456789").getAsString()); + assertEquals("NaN", Value.pgNumeric("NaN").getAsString()); + + assertEquals("1", Value.pgOid(1L).getAsString()); + assertEquals(String.valueOf(Long.MAX_VALUE), Value.pgOid(Long.MAX_VALUE).getAsString()); + assertEquals(String.valueOf(Long.MIN_VALUE), Value.pgOid(Long.MIN_VALUE).getAsString()); + + assertEquals(Strings.repeat("foo", 36), Value.string(Strings.repeat("foo", 36)).getAsString()); + assertEquals(Strings.repeat("foo", 36), Value.json(Strings.repeat("foo", 36)).getAsString()); + assertEquals(Strings.repeat("foo", 36), Value.pgJsonb(Strings.repeat("foo", 36)).getAsString()); + + assertEquals( + "2023-01-10T18:59:00Z", + Value.timestamp(Timestamp.parseTimestamp("2023-01-10T18:59:00Z")).getAsString()); + assertEquals("2023-01-10", Value.date(Date.parseDate("2023-01-10")).getAsString()); + assertEquals( + "4ef8ba78-3bb5-4a8f-ae39-bf59a89a491d", + Value.uuid(UUID.fromString("4ef8ba78-3bb5-4a8f-ae39-bf59a89a491d")).getAsString()); + assertEquals( + "P1Y2M3DT4H5M6.789123456S", + Value.interval(Interval.parseFromString("P1Y2M3DT4H5M6.789123456S")).getAsString()); + + Random random = new Random(); + byte[] bytes = new byte[random.nextInt(256)]; + assertEquals( + Base64.getEncoder().encodeToString(bytes), + Value.bytes(ByteArray.copyFrom(bytes)).getAsString()); + assertEquals( + Base64.getEncoder().encodeToString(bytes), + Value.internalBytes(new LazyByteArray(Base64.getEncoder().encodeToString(bytes))) + .getAsString()); + } + + @Test + public void serialization() { + + reserializeAndAssert(Value.bool(true)); + reserializeAndAssert(Value.bool(false)); + reserializeAndAssert(Value.bool(null)); + + reserializeAndAssert(Value.int64(123)); + reserializeAndAssert(Value.int64(null)); + + reserializeAndAssert(Value.float32(1.23f)); + reserializeAndAssert(Value.float32(null)); + + reserializeAndAssert(Value.float64(1.23)); + reserializeAndAssert(Value.float64(null)); + + reserializeAndAssert(Value.numeric(BigDecimal.valueOf(123, 2))); + reserializeAndAssert(Value.numeric(null)); + + reserializeAndAssert(Value.pgNumeric("1.23")); + reserializeAndAssert(Value.pgNumeric(Value.NAN)); + reserializeAndAssert(Value.pgNumeric(null)); + + reserializeAndAssert(Value.pgOid(123L)); + reserializeAndAssert(Value.pgOid(null)); + + reserializeAndAssert(Value.string("abc")); + reserializeAndAssert(Value.string(null)); + + reserializeAndAssert(Value.json("{\"color\":\"red\",\"value\":\"#f00\"}")); + reserializeAndAssert(Value.json(null)); + + reserializeAndAssert(Value.bytes(newByteArray("abc"))); + reserializeAndAssert(Value.bytes(null)); + + reserializeAndAssert( + Value.struct(Struct.newBuilder().set("f").to(3).set("f").to((Date) null).build())); + reserializeAndAssert( + Value.struct( + Type.struct( + Arrays.asList( + Type.StructField.of("a", Type.string()), + Type.StructField.of("b", Type.int64()))), + null)); + + reserializeAndAssert(Value.boolArray(new boolean[] {false, true})); + reserializeAndAssert(Value.boolArray(BrokenSerializationList.of(true, false))); + reserializeAndAssert(Value.boolArray((Iterable) null)); + + reserializeAndAssert(Value.int64Array(BrokenSerializationList.of(1L, 2L))); + reserializeAndAssert(Value.int64Array(new long[] {1L, 2L})); + reserializeAndAssert(Value.int64Array((Iterable) null)); + + reserializeAndAssert(Value.float32Array(new float[] {.1f, .2f})); + reserializeAndAssert(Value.float32Array(BrokenSerializationList.of(.1f, .2f, .3f))); + reserializeAndAssert(Value.float32Array((Iterable) null)); + + reserializeAndAssert(Value.float64Array(new double[] {.1, .2})); + reserializeAndAssert(Value.float64Array(BrokenSerializationList.of(.1, .2, .3))); + reserializeAndAssert(Value.float64Array((Iterable) null)); + + reserializeAndAssert( + Value.numericArray( + Arrays.asList(BigDecimal.valueOf(1, 1), null, BigDecimal.valueOf(2, 1)))); + reserializeAndAssert( + Value.numericArray( + BrokenSerializationList.of( + BigDecimal.valueOf(1, 1), BigDecimal.valueOf(2, 1), BigDecimal.valueOf(3, 1)))); + reserializeAndAssert(Value.numericArray(null)); + + reserializeAndAssert(Value.pgNumericArray(Arrays.asList("1.23", null, Value.NAN))); + reserializeAndAssert( + Value.pgNumericArray(BrokenSerializationList.of("1.23", "1.24", Value.NAN))); + reserializeAndAssert(Value.pgNumericArray(null)); + + reserializeAndAssert( + Value.pgOidArray(BrokenSerializationList.of(Long.valueOf(1L), Long.valueOf(2L)))); + reserializeAndAssert( + Value.pgOidArray(BrokenSerializationList.of(Long.valueOf(1L), Long.valueOf(2L), null))); + reserializeAndAssert(Value.pgOidArray((Iterable) null)); + + reserializeAndAssert(Value.timestamp(null)); + reserializeAndAssert(Value.timestamp(Value.COMMIT_TIMESTAMP)); + reserializeAndAssert(Value.timestamp(Timestamp.now())); + reserializeAndAssert(Value.timestampArray(Arrays.asList(null, Timestamp.now()))); + + reserializeAndAssert(Value.date(null)); + reserializeAndAssert(Value.date(Date.fromYearMonthDay(2018, 2, 26))); + reserializeAndAssert(Value.dateArray(Arrays.asList(null, Date.fromYearMonthDay(2018, 2, 26)))); + + reserializeAndAssert(Value.uuid(null)); + reserializeAndAssert(Value.uuid(UUID.fromString("20d55f8b-5cd4-46ae-81bc-38f6b53c243b"))); + reserializeAndAssert( + Value.uuidArray( + Arrays.asList(null, UUID.fromString("20d55f8b-5cd4-46ae-81bc-38f6b53c243b")))); + + reserializeAndAssert(Value.interval(null)); + reserializeAndAssert( + Value.interval(Interval.fromMonthsDaysNanos(15, 7, BigInteger.valueOf(1234567891)))); + reserializeAndAssert( + Value.intervalArray( + Arrays.asList( + null, Interval.fromMonthsDaysNanos(15, 7, BigInteger.valueOf(1234567891))))); + + BrokenSerializationList of = BrokenSerializationList.of("a", "b"); + reserializeAndAssert(Value.stringArray(of)); + reserializeAndAssert(Value.stringArray(null)); + + BrokenSerializationList json = + BrokenSerializationList.of("{}", "{\"color\":\"red\",\"value\":\"#f00\"}"); + reserializeAndAssert(Value.jsonArray(json)); + reserializeAndAssert(Value.jsonArray(null)); + + reserializeAndAssert( + Value.bytesArray(BrokenSerializationList.of(newByteArray("a"), newByteArray("b")))); + reserializeAndAssert(Value.bytesArray(null)); + + Struct s1 = Struct.newBuilder().set("f1").to(1).build(); + Struct s2 = Struct.newBuilder().set("f1").to(2).build(); + reserializeAndAssert(Value.structArray(s1.getType(), BrokenSerializationList.of(s1, null, s2))); + reserializeAndAssert(Value.structArray(s1.getType(), null)); + } + + @Test(expected = IllegalStateException.class) + public void verifyBrokenSerialization() { + reserializeAndAssert(BrokenSerializationList.of(1, 2, 3)); + } + + @Test + public void testToValue() { + Value value = Value.toValue(null); + assertNull(value.getType()); + assertEquals("NULL", value.getAsString()); + + int i = 10; + value = Value.toValue(i); + assertNull(value.getType()); + assertEquals("10", value.getAsString()); + + Integer j = 10; + value = Value.toValue(j); + assertNull(value.getType()); + assertEquals("10", value.getAsString()); + + long k = 10L; + value = Value.toValue(k); + assertNull(value.getType()); + assertEquals("10", value.getAsString()); + + Long l = 10L; + value = Value.toValue(i); + assertNull(value.getType()); + assertEquals("10", value.getAsString()); + + boolean m = true; + value = Value.toValue(m); + assertEquals(Type.bool(), value.getType()); + assertTrue(value.getBool()); + + Boolean n = true; + value = Value.toValue(n); + assertEquals(Type.bool(), value.getType()); + assertTrue(value.getBool()); + + Float o = 0.3f; + value = Value.toValue(o); + assertEquals(Type.float32(), value.getType()); + assertEquals(0.3f, value.getFloat32(), 0); + + float p = 0.3f; + value = Value.toValue(p); + assertEquals(Type.float32(), value.getType()); + assertEquals(0.3f, value.getFloat32(), 0); + + Double q = 0.4d; + value = Value.toValue(q); + assertEquals(Type.float64(), value.getType()); + assertEquals(0.4d, value.getFloat64(), 0); + + double s = 0.5d; + value = Value.toValue(s); + assertEquals(Type.float64(), value.getType()); + assertEquals(0.5d, value.getFloat64(), 0); + + BigDecimal t = BigDecimal.valueOf(0.6d); + value = Value.toValue(t); + assertEquals(Type.numeric(), value.getType()); + assertEquals(t, value.getNumeric()); + + ByteArray bytes = ByteArray.copyFrom("hello"); + value = Value.toValue(bytes); + assertEquals(Type.bytes(), value.getType()); + assertEquals(bytes, value.getBytes()); + + byte[] byteArray = "hello".getBytes(); + value = Value.toValue(byteArray); + assertEquals(Type.bytes(), value.getType()); + assertEquals(bytes, value.getBytes()); + + Date date = Date.fromYearMonthDay(2018, 2, 26); + value = Value.toValue(date); + assertEquals(Type.date(), value.getType()); + assertEquals(date, value.getDate()); + + UUID uuid = UUID.randomUUID(); + value = Value.toValue(uuid); + assertEquals(Type.uuid(), value.getType()); + assertEquals(uuid, value.getUuid()); + + LocalDate localDate = LocalDate.of(2018, 2, 26); + value = Value.toValue(localDate); + assertEquals(Type.date(), value.getType()); + assertEquals(date, value.getDate()); + + TimeZone defaultTimezone = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("Europe/Paris")); + LocalDateTime localDateTime = LocalDateTime.of(2018, 2, 26, 11, 30, 10); + value = Value.toValue(localDateTime); + assertNull(value.getType()); + assertEquals("2018-02-26T10:30:10.000Z", value.getAsString()); + TimeZone.setDefault(defaultTimezone); + + OffsetDateTime offsetDateTime = OffsetDateTime.of(localDateTime, ZoneOffset.ofHours(10)); + value = Value.toValue(offsetDateTime); + assertNull(value.getType()); + assertEquals("2018-02-26T01:30:10.000Z", value.getAsString()); + + ZonedDateTime zonedDateTime = ZonedDateTime.of(localDateTime, ZoneId.of("Asia/Kolkata")); + value = Value.toValue(zonedDateTime); + assertNull(value.getType()); + assertEquals("2018-02-26T06:00:10.000Z", value.getAsString()); + + ProtocolMessageEnum protocolMessageEnum = IsolationLevel.SERIALIZABLE; + value = Value.toValue(protocolMessageEnum); + assertEquals( + Type.protoEnum("google.spanner.v1.TransactionOptions.IsolationLevel"), value.getType()); + assertEquals( + protocolMessageEnum, + value.getProtoEnum( + (val -> { + switch (val) { + case 1: + return IsolationLevel.SERIALIZABLE; + case 2: + return IsolationLevel.REPEATABLE_READ; + default: + return IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED; + } + }))); + + PartialResultSet partialResultSet = + PartialResultSet.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("hello").build()) + .build(); + value = Value.toValue(partialResultSet); + assertEquals(Type.proto("google.spanner.v1.PartialResultSet"), value.getType()); + assertEquals(partialResultSet, value.getProtoMessage(PartialResultSet.getDefaultInstance())); + + Interval interval = Interval.ofDays(10); + value = Value.toValue(interval); + assertEquals(Type.interval(), value.getType()); + assertEquals(interval, value.getInterval()); + + Struct struct = Struct.newBuilder().set("name").to(10L).build(); + value = Value.toValue(struct); + assertEquals(Type.struct(StructField.of("name", Type.int64())), value.getType()); + assertEquals(struct, value.getStruct()); + + Timestamp timestamp = Timestamp.now(); + value = Value.toValue(timestamp); + assertEquals(Type.timestamp(), value.getType()); + assertEquals(timestamp, value.getTimestamp()); + + List expectedBoolArray = Arrays.asList(true, false); + boolean[] bools1 = {true, false}; + value = Value.toValue(bools1); + assertEquals(Type.array(Type.bool()), value.getType()); + assertEquals(expectedBoolArray, value.getBoolArray()); + + Boolean[] bools2 = {true, false}; + value = Value.toValue(bools2); + assertEquals(Type.array(Type.bool()), value.getType()); + assertEquals(expectedBoolArray, value.getBoolArray()); + + List expectedFloatArray = Arrays.asList(0.1f, 0.2f, 0.3f); + Float[] floats1 = {0.1f, 0.2f, 0.3f}; + value = Value.toValue(floats1); + assertEquals(Type.array(Type.float32()), value.getType()); + assertEquals(expectedFloatArray, value.getFloat32Array()); + + float[] floats2 = {0.1f, 0.2f, 0.3f}; + value = Value.toValue(floats2); + assertEquals(Type.array(Type.float32()), value.getType()); + assertEquals(expectedFloatArray, value.getFloat32Array()); + + List expectedDoubleArray = Arrays.asList(0.1d, 0.2d, 0.3d, 0.4d); + Double[] doubles1 = {0.1d, 0.2d, 0.3d, 0.4d}; + value = Value.toValue(doubles1); + assertEquals(Type.array(Type.float64()), value.getType()); + assertEquals(expectedDoubleArray, value.getFloat64Array()); + + double[] doubles2 = {0.1d, 0.2d, 0.3d, 0.4d}; + value = Value.toValue(doubles2); + assertEquals(Type.array(Type.float64()), value.getType()); + assertEquals(expectedDoubleArray, value.getFloat64Array()); + + List expectedIntLongArray = Arrays.asList("1", "2", "3"); + int[] ints1 = {1, 2, 3}; + value = Value.toValue(ints1); + assertNull(value.getType()); + assertEquals(expectedIntLongArray, value.getAsStringList()); + + Integer[] ints2 = {1, 2, 3}; + value = Value.toValue(ints2); + assertNull(value.getType()); + assertEquals(expectedIntLongArray, value.getAsStringList()); + + Long[] longs1 = {1L, 2L, 3L}; + value = Value.toValue(longs1); + assertNull(value.getType()); + assertEquals(expectedIntLongArray, value.getAsStringList()); + + long[] longs2 = {1L, 2L, 3L}; + value = Value.toValue(longs2); + assertNull(value.getType()); + assertEquals(expectedIntLongArray, value.getAsStringList()); + + String string = "hello"; + value = Value.toValue(string); + assertNull(value.getType()); + assertEquals("hello", value.getAsString()); + } + + @Test + public void testToValueIterable() { + List booleans = Arrays.asList(true, false); + Value value = Value.toValue(booleans); + assertEquals(Type.array(Type.bool()), value.getType()); + assertEquals(booleans, value.getBoolArray()); + + List ints = Arrays.asList(1, 2, 3); + value = Value.toValue(ints); + assertNull(value.getType()); + assertEquals(Arrays.asList("1", "2", "3"), value.getAsStringList()); + + List longs = Arrays.asList(1L, 2L, 3L); + value = Value.toValue(longs); + assertNull(value.getType()); + assertEquals(Arrays.asList("1", "2", "3"), value.getAsStringList()); + + Set floats = new HashSet<>(Arrays.asList(0.1f, 0.2f, 0.3f)); + value = Value.toValue(floats); + assertEquals(Type.array(Type.float32()), value.getType()); + assertEquals(Arrays.asList(0.1f, 0.2f, 0.3f), value.getFloat32Array()); + + List doubles = Arrays.asList(0.1d, 0.2d, 0.3d, 0.4d); + value = Value.toValue(doubles); + assertEquals(Type.array(Type.float64()), value.getType()); + assertEquals(doubles, value.getFloat64Array()); + + List bigDecimals = + Arrays.asList(BigDecimal.valueOf(0.1d), BigDecimal.valueOf(0.2d)); + value = Value.toValue(bigDecimals); + assertEquals(Type.array(Type.numeric()), value.getType()); + assertEquals(bigDecimals, value.getNumericArray()); + + List byteArrays = + Arrays.asList(ByteArray.copyFrom("hello"), ByteArray.copyFrom("world")); + value = Value.toValue(byteArrays); + assertEquals(Type.array(Type.bytes()), value.getType()); + assertEquals(byteArrays, value.getBytesArray()); + + List bytes = Arrays.asList("hello".getBytes(), "world".getBytes()); + value = Value.toValue(bytes); + assertEquals(Type.array(Type.bytes()), value.getType()); + assertEquals(byteArrays, value.getBytesArray()); + + List intervals = Arrays.asList(Interval.ofDays(10), Interval.ofDays(20)); + value = Value.toValue(intervals); + assertEquals(Type.array(Type.interval()), value.getType()); + assertEquals(intervals, value.getIntervalArray()); + + List timestamps = Arrays.asList(Timestamp.now(), Timestamp.now()); + value = Value.toValue(timestamps); + assertEquals(Type.array(Type.timestamp()), value.getType()); + assertEquals(timestamps, value.getTimestampArray()); + + List dates = + Arrays.asList(Date.fromYearMonthDay(2024, 8, 23), Date.fromYearMonthDay(2024, 12, 27)); + value = Value.toValue(dates); + assertEquals(Type.array(Type.date()), value.getType()); + assertEquals(dates, value.getDateArray()); + + List uuids = Arrays.asList(UUID.randomUUID(), UUID.randomUUID()); + value = Value.toValue(uuids); + assertEquals(Type.array(Type.uuid()), value.getType()); + assertEquals(uuids, value.getUuidArray()); + + List localDates = + Arrays.asList(LocalDate.of(2024, 8, 23), LocalDate.of(2024, 12, 27)); + value = Value.toValue(localDates); + assertEquals(Type.array(Type.date()), value.getType()); + assertEquals(dates, value.getDateArray()); + + TimeZone defaultTimezone = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("Asia/Kolkata")); + List localDateTimes = + Arrays.asList( + LocalDateTime.of(2024, 8, 23, 1, 49, 52, 10), + LocalDateTime.of(2024, 12, 27, 1, 49, 52, 10)); + value = Value.toValue(localDateTimes); + assertNull(value.getType()); + assertEquals( + Arrays.asList("2024-08-22T20:19:52.000Z", "2024-12-26T20:19:52.000Z"), + value.getAsStringList()); + TimeZone.setDefault(defaultTimezone); + + List offsetDateTimes = + Arrays.asList( + LocalDateTime.of(2024, 8, 23, 1, 49, 52, 10).atOffset(ZoneOffset.ofHours(1)), + LocalDateTime.of(2024, 12, 27, 1, 49, 52, 10).atOffset(ZoneOffset.ofHours(1))); + value = Value.toValue(offsetDateTimes); + assertNull(value.getType()); + assertEquals( + Arrays.asList("2024-08-23T00:49:52.000Z", "2024-12-27T00:49:52.000Z"), + value.getAsStringList()); + + List zonedDateTimes = + Arrays.asList( + LocalDateTime.of(2024, 8, 23, 1, 49, 52, 10).atZone(ZoneId.of("UTC")), + LocalDateTime.of(2024, 12, 27, 1, 49, 52, 10).atZone(ZoneId.of("UTC"))); + value = Value.toValue(zonedDateTimes); + assertNull(value.getType()); + assertEquals( + Arrays.asList("2024-08-23T01:49:52.000Z", "2024-12-27T01:49:52.000Z"), + value.getAsStringList()); + } + + private static class BrokenSerializationList extends ForwardingList + implements Serializable { + private static final long serialVersionUID = 1L; + private final List delegate; + + public static BrokenSerializationList of(T... values) { + return new BrokenSerializationList<>(Arrays.asList(values)); + } + + private BrokenSerializationList(List delegate) { + this.delegate = delegate; + } + + @Override + protected List delegate() { + return delegate; + } + + private void readObject(@SuppressWarnings("unused") java.io.ObjectInputStream unusedStream) { + throw new IllegalStateException("Serialization disabled"); + } + + private void writeObject(@SuppressWarnings("unused") java.io.ObjectOutputStream unusedStream) { + throw new IllegalStateException("Serialization disabled"); + } + } + + private void assertThrowsWithMessage(Supplier supplier, String message) { + try { + supplier.get(); + fail("Expected exception"); + } catch (Exception e) { + assertTrue( + "Expected exception message to contain: \"" + + message + + "\", actual: \"" + + e.getMessage() + + "\"", + e.getMessage().contains(message)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/XGoogSpannerRequestIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/XGoogSpannerRequestIdTest.java new file mode 100644 index 000000000000..32d1ac29d2ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/XGoogSpannerRequestIdTest.java @@ -0,0 +1,316 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import io.grpc.Metadata; +import io.grpc.MethodDescriptor.MethodType; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.regex.Matcher; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class XGoogSpannerRequestIdTest { + public static long NON_DETERMINISTIC = -1; + + @Test + public void testEquals() { + XGoogSpannerRequestId reqID1 = XGoogSpannerRequestId.of(1, 1, 1, 1); + XGoogSpannerRequestId reqID2 = XGoogSpannerRequestId.of(1, 1, 1, 1); + assertEquals(reqID1, reqID2); + assertEquals(reqID1, reqID1); + assertEquals(reqID2, reqID2); + + XGoogSpannerRequestId reqID3 = XGoogSpannerRequestId.of(1, 1, 1, 2); + assertNotEquals(reqID1, reqID3); + assertNotEquals(reqID3, reqID1); + assertEquals(reqID3, reqID3); + } + + @Test + public void testEnsureHexadecimalFormatForRandProcessID() { + String str = XGoogSpannerRequestId.of(1, 2, 3, 4).toString(); + Matcher m = XGoogSpannerRequestId.REGEX.matcher(str); + assertTrue(m.matches()); + } + + public static class ServerHeaderEnforcer implements ServerInterceptor { + private final Map> unaryResults = + new ConcurrentHashMap<>(); + private final Map> streamingResults = + new ConcurrentHashMap<>(); + private final List gotValues = new CopyOnWriteArrayList<>(); + private final Set checkMethods; + + ServerHeaderEnforcer(Set checkMethods) { + this.checkMethods = checkMethods; + } + + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + final Metadata requestHeaders, + ServerCallHandler next) { + boolean isUnary = call.getMethodDescriptor().getType() == MethodType.UNARY; + String methodName = call.getMethodDescriptor().getFullMethodName(); + String gotReqIdStr = requestHeaders.get(XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + if (!this.checkMethods.contains(methodName)) { + return next.startCall(call, requestHeaders); + } + + Map> saver = this.streamingResults; + if (isUnary) { + saver = this.unaryResults; + } + + if (Objects.equals(gotReqIdStr, null) || Objects.equals(gotReqIdStr, "")) { + Status status = + Status.fromCode(Status.Code.INVALID_ARGUMENT) + .augmentDescription( + methodName + " lacks " + XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY); + call.close(status, requestHeaders); + return next.startCall(call, requestHeaders); + } + + assertNotNull(gotReqIdStr); + // Firstly assert and validate that at least we've got a requestId. + Matcher m = XGoogSpannerRequestId.REGEX.matcher(gotReqIdStr); + assertTrue(m.matches()); + + XGoogSpannerRequestId reqId = XGoogSpannerRequestId.of(gotReqIdStr); + if (!saver.containsKey(methodName)) { + saver.put(methodName, new CopyOnWriteArrayList()); + } + + saver.get(methodName).add(reqId); + + // Finally proceed with the call. + return next.startCall(call, requestHeaders); + } + + public String[] accumulatedValues() { + return this.gotValues.toArray(new String[0]); + } + + public void assertIntegrity() { + this.unaryResults.forEach(this::assertMonotonicityOfIds); + this.streamingResults.forEach(this::assertMonotonicityOfIds); + } + + private void assertMonotonicityOfIds(String prefix, List reqIds) { + int size = reqIds.size(); + + List violations = new ArrayList<>(); + for (int i = 1; i < size; i++) { + XGoogSpannerRequestId prev = reqIds.get(i - 1); + XGoogSpannerRequestId curr = reqIds.get(i); + if (prev.isGreaterThan(curr)) { + violations.add(String.format("#%d(%s) > #%d(%s)", i - 1, prev, i, curr)); + } + } + + if (violations.isEmpty()) { + return; + } + + throw new IllegalStateException( + prefix + + " monotonicity violation:" + + String.join("\n\t", violations.toArray(new String[0]))); + } + + public MethodAndRequestId[] accumulatedUnaryValues() { + List accumulated = new ArrayList<>(); + this.unaryResults.forEach( + (String method, CopyOnWriteArrayList values) -> { + for (XGoogSpannerRequestId value : values) { + accumulated.add(new MethodAndRequestId(method, value)); + } + }); + return accumulated.toArray(new MethodAndRequestId[0]); + } + + public MethodAndRequestId[] accumulatedStreamingValues() { + List accumulated = new ArrayList<>(); + this.streamingResults.forEach( + (String method, CopyOnWriteArrayList values) -> { + for (XGoogSpannerRequestId value : values) { + accumulated.add(new MethodAndRequestId(method, value)); + } + }); + return accumulated.toArray(new MethodAndRequestId[0]); + } + + public void checkExpectedUnaryXGoogRequestIds(MethodAndRequestId... wantUnaryValues) { + MethodAndRequestId[] gotUnaryValues = this.accumulatedUnaryValues(); + sortValues(gotUnaryValues); + for (int i = 0; i < gotUnaryValues.length && false; i++) { + System.out.println("\033[33misUnary: #" + i + ":: " + gotUnaryValues[i] + "\033[00m"); + } + assertArrayEquals(wantUnaryValues, gotUnaryValues); + } + + public void checkAtLeastHasExpectedUnaryXGoogRequestIds(MethodAndRequestId... wantUnaryValues) { + MethodAndRequestId[] gotUnaryValues = this.accumulatedUnaryValues(); + sortValues(gotUnaryValues); + for (int i = 0; i < gotUnaryValues.length && false; i++) { + System.out.println("\033[33misUnary: #" + i + ":: " + gotUnaryValues[i] + "\033[00m"); + } + if (wantUnaryValues.length < gotUnaryValues.length) { + MethodAndRequestId[] gotSliced = + Arrays.copyOfRange(gotUnaryValues, 0, wantUnaryValues.length); + assertArrayEquals(wantUnaryValues, gotSliced); + } else { + assertArrayEquals(wantUnaryValues, gotUnaryValues); + } + } + + public void checkExpectedUnaryXGoogRequestIdsAsSuffixes(MethodAndRequestId... wantUnaryValues) { + MethodAndRequestId[] gotUnaryValues = this.accumulatedUnaryValues(); + sortValues(gotUnaryValues); + for (int i = 0; i < gotUnaryValues.length && false; i++) { + System.out.println("\033[33misUnary: #" + i + ":: " + gotUnaryValues[i] + "\033[00m"); + } + if (wantUnaryValues.length < gotUnaryValues.length) { + MethodAndRequestId[] gotSliced = + Arrays.copyOfRange( + gotUnaryValues, + gotUnaryValues.length - wantUnaryValues.length, + gotUnaryValues.length); + assertArrayEquals(wantUnaryValues, gotSliced); + } else { + assertArrayEquals(wantUnaryValues, gotUnaryValues); + } + } + + private void sortValues(MethodAndRequestId[] values) { + massageValues(values); + Arrays.sort(values, new MethodAndRequestIdComparator()); + } + + public void checkExpectedStreamingXGoogRequestIds(MethodAndRequestId... wantStreamingValues) { + MethodAndRequestId[] gotStreamingValues = this.accumulatedStreamingValues(); + for (int i = 0; i < gotStreamingValues.length && false; i++) { + System.out.println( + "\033[32misStreaming: #" + i + ":: " + gotStreamingValues[i] + "\033[00m"); + } + sortValues(gotStreamingValues); + assertArrayEquals(wantStreamingValues, gotStreamingValues); + } + + public void reset() { + this.gotValues.clear(); + this.unaryResults.clear(); + this.streamingResults.clear(); + } + } + + public static class MethodAndRequestId { + String method; + XGoogSpannerRequestId requestId; + + public MethodAndRequestId(String method, XGoogSpannerRequestId requestId) { + this.method = method; + this.requestId = requestId; + } + + public String toString() { + return "{" + this.method + ":" + this.requestId.debugToString() + "}"; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof MethodAndRequestId)) { + return false; + } + MethodAndRequestId other = (MethodAndRequestId) o; + return Objects.equals(this.method, other.method) + && Objects.equals(this.requestId, other.requestId); + } + } + + static class MethodAndRequestIdComparator implements Comparator { + @Override + public int compare(MethodAndRequestId mr1, MethodAndRequestId mr2) { + int cmpMethod = mr1.method.compareTo(mr2.method); + if (cmpMethod != 0) { + return cmpMethod; + } + + if (Objects.equals(mr1.requestId, mr2.requestId)) { + return 0; + } + if (mr1.requestId.isGreaterThan(mr2.requestId)) { + return +1; + } + return -1; + } + } + + static void massageValues(MethodAndRequestId[] mreqs) { + for (int i = 0; i < mreqs.length; i++) { + MethodAndRequestId mreq = mreqs[i]; + // BatchCreateSessions is so hard to control as the round-robin doling out + // hence we might need to be able to scrub the nth_request that won't match + // nth_req in consecutive order of nth_client. + if (mreq.method.compareTo("google.spanner.v1.Spanner/BatchCreateSessions") == 0) { + mreqs[i] = + new MethodAndRequestId( + mreq.method, + mreq.requestId + .withNthRequest(NON_DETERMINISTIC) + .withChannelId(NON_DETERMINISTIC) + .withNthClientId(NON_DETERMINISTIC)); + } else if (mreq.method.compareTo("google.spanner.v1.Spanner/BeginTransaction") == 0 + || mreq.method.compareTo("google.spanner.v1.Spanner/ExecuteStreamingSql") == 0 + || mreq.method.compareTo("google.spanner.v1.Spanner/ExecuteSql") == 0 + || mreq.method.compareTo("google.spanner.v1.Spanner/CreateSession") == 0 + || mreq.method.compareTo("google.spanner.v1.Spanner/Commit") == 0) { + mreqs[i] = + new MethodAndRequestId(mreq.method, mreq.requestId.withNthClientId(NON_DETERMINISTIC)); + } + } + } + + public static MethodAndRequestId ofMethodAndRequestId(String method, String reqId) { + return new MethodAndRequestId(method, XGoogSpannerRequestId.of(reqId)); + } + + public static MethodAndRequestId ofMethodAndRequestId( + String method, XGoogSpannerRequestId reqId) { + return new MethodAndRequestId(method, reqId); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java new file mode 100644 index 000000000000..f878f2c7e3b4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java @@ -0,0 +1,3030 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.cloud.spanner.admin.database.v1.stub.HttpJsonDatabaseAdminStub; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupInstancePartition; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.EncryptionInfo; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreInfo; +import com.google.spanner.admin.database.v1.SplitPoints; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class DatabaseAdminClientHttpJsonTest { + private static MockHttpService mockService; + private static DatabaseAdminClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonDatabaseAdminStub.getMethodDescriptors(), + DatabaseAdminSettings.getDefaultEndpoint()); + DatabaseAdminSettings settings = + DatabaseAdminSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + DatabaseAdminSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = DatabaseAdminClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void listDatabasesTest() throws Exception { + Database responsesElement = Database.newBuilder().build(); + ListDatabasesResponse expectedResponse = + ListDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListDatabasesPagedResponse pagedListResponse = client.listDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabasesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabasesTest2() throws Exception { + Database responsesElement = Database.newBuilder().build(); + ListDatabasesResponse expectedResponse = + ListDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListDatabasesPagedResponse pagedListResponse = client.listDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabasesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String createStatement = "createStatement744686547"; + + Database actualResponse = client.createDatabaseAsync(parent, createStatement).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String createStatement = "createStatement744686547"; + client.createDatabaseAsync(parent, createStatement).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + String createStatement = "createStatement744686547"; + + Database actualResponse = client.createDatabaseAsync(parent, createStatement).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + String createStatement = "createStatement744686547"; + client.createDatabaseAsync(parent, createStatement).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void getDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + Database actualResponse = client.getDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.getDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-5262/instances/instance-5262/databases/database-5262"; + + Database actualResponse = client.getDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-5262/instances/instance-5262/databases/database-5262"; + client.getDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + Database database = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Database actualResponse = client.updateDatabaseAsync(database, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + Database database = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateDatabaseAsync(database, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void updateDatabaseDdlTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseDdlTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List statements = new ArrayList<>(); + + client.updateDatabaseDdlAsync(database, statements).get(); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateDatabaseDdlExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List statements = new ArrayList<>(); + client.updateDatabaseDdlAsync(database, statements).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void updateDatabaseDdlTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseDdlTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + List statements = new ArrayList<>(); + + client.updateDatabaseDdlAsync(database, statements).get(); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateDatabaseDdlExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + List statements = new ArrayList<>(); + client.updateDatabaseDdlAsync(database, statements).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void dropDatabaseTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + client.dropDatabase(database); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void dropDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.dropDatabase(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void dropDatabaseTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + + client.dropDatabase(database); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void dropDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + client.dropDatabase(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseDdlTest() throws Exception { + GetDatabaseDdlResponse expectedResponse = + GetDatabaseDdlResponse.newBuilder() + .addAllStatements(new ArrayList()) + .setProtoDescriptors(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + GetDatabaseDdlResponse actualResponse = client.getDatabaseDdl(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getDatabaseDdlExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.getDatabaseDdl(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseDdlTest2() throws Exception { + GetDatabaseDdlResponse expectedResponse = + GetDatabaseDdlResponse.newBuilder() + .addAllStatements(new ArrayList()) + .setProtoDescriptors(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + + GetDatabaseDdlResponse actualResponse = client.getDatabaseDdl(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getDatabaseDdlExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + client.getDatabaseDdl(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-1023/instances/instance-1023/databases/database-1023"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + + Backup actualResponse = client.createBackupAsync(parent, backup, backupId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + client.createBackupAsync(parent, backup, backupId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + + Backup actualResponse = client.createBackupAsync(parent, backup, backupId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + client.createBackupAsync(parent, backup, backupId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void copyBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void copyBackupExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void copyBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void copyBackupExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void copyBackupTest3() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void copyBackupExceptionTest3() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void copyBackupTest4() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void copyBackupExceptionTest4() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void getBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Backup actualResponse = client.getBackup(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.getBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3271/instances/instance-3271/backups/backup-3271"; + + Backup actualResponse = client.getBackup(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3271/instances/instance-3271/backups/backup-3271"; + client.getBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockService.addResponse(expectedResponse); + + Backup backup = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Backup actualResponse = client.updateBackup(backup, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateBackupExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + Backup backup = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackup(backup, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + client.deleteBackup(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.deleteBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3271/instances/instance-3271/backups/backup-3271"; + + client.deleteBackup(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3271/instances/instance-3271/backups/backup-3271"; + client.deleteBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupsTest() throws Exception { + Backup responsesElement = Backup.newBuilder().build(); + ListBackupsResponse expectedResponse = + ListBackupsResponse.newBuilder() + .setNextPageToken("") + .addAllBackups(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListBackupsPagedResponse pagedListResponse = client.listBackups(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listBackups(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupsTest2() throws Exception { + Backup responsesElement = Backup.newBuilder().build(); + ListBackupsResponse expectedResponse = + ListBackupsResponse.newBuilder() + .setNextPageToken("") + .addAllBackups(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListBackupsPagedResponse pagedListResponse = client.listBackups(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listBackups(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restoreDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restoreDatabaseExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void restoreDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restoreDatabaseExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void restoreDatabaseTest3() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restoreDatabaseExceptionTest3() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void restoreDatabaseTest4() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void restoreDatabaseExceptionTest4() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void listDatabaseOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListDatabaseOperationsResponse expectedResponse = + ListDatabaseOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListDatabaseOperationsPagedResponse pagedListResponse = client.listDatabaseOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabaseOperationsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listDatabaseOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListDatabaseOperationsResponse expectedResponse = + ListDatabaseOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListDatabaseOperationsPagedResponse pagedListResponse = client.listDatabaseOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabaseOperationsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listDatabaseOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListBackupOperationsResponse expectedResponse = + ListBackupOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListBackupOperationsPagedResponse pagedListResponse = client.listBackupOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupOperationsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listBackupOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListBackupOperationsResponse expectedResponse = + ListBackupOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListBackupOperationsPagedResponse pagedListResponse = client.listBackupOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupOperationsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listBackupOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseRolesTest() throws Exception { + DatabaseRole responsesElement = DatabaseRole.newBuilder().build(); + ListDatabaseRolesResponse expectedResponse = + ListDatabaseRolesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabaseRoles(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListDatabaseRolesPagedResponse pagedListResponse = client.listDatabaseRoles(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabaseRolesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabaseRolesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listDatabaseRoles(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseRolesTest2() throws Exception { + DatabaseRole responsesElement = DatabaseRole.newBuilder().build(); + ListDatabaseRolesResponse expectedResponse = + ListDatabaseRolesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabaseRoles(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + + ListDatabaseRolesPagedResponse pagedListResponse = client.listDatabaseRoles(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabaseRolesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listDatabaseRolesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + client.listDatabaseRoles(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void addSplitPointsTest() throws Exception { + AddSplitPointsResponse expectedResponse = AddSplitPointsResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List splitPoints = new ArrayList<>(); + + AddSplitPointsResponse actualResponse = client.addSplitPoints(database, splitPoints); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void addSplitPointsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List splitPoints = new ArrayList<>(); + client.addSplitPoints(database, splitPoints); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void addSplitPointsTest2() throws Exception { + AddSplitPointsResponse expectedResponse = AddSplitPointsResponse.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + List splitPoints = new ArrayList<>(); + + AddSplitPointsResponse actualResponse = client.addSplitPoints(database, splitPoints); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void addSplitPointsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + List splitPoints = new ArrayList<>(); + client.addSplitPoints(database, splitPoints); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + BackupSchedule actualResponse = client.updateBackupSchedule(backupSchedule, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackupSchedule(backupSchedule, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + client.deleteBackupSchedule(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + + client.deleteBackupSchedule(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupSchedulesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest2() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupSchedulesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void internalUpdateGraphOperationUnsupportedMethodTest() throws Exception { + // The internalUpdateGraphOperation() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java new file mode 100644 index 000000000000..380a0dd4d9ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java @@ -0,0 +1,2827 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupInstancePartition; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.DatabaseRole; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.EncryptionInfo; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.RestoreInfo; +import com.google.spanner.admin.database.v1.SplitPoints; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class DatabaseAdminClientTest { + private static MockDatabaseAdmin mockDatabaseAdmin; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private DatabaseAdminClient client; + + @BeforeClass + public static void startStaticServer() { + mockDatabaseAdmin = new MockDatabaseAdmin(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockDatabaseAdmin)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + DatabaseAdminSettings settings = + DatabaseAdminSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = DatabaseAdminClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void listDatabasesTest() throws Exception { + Database responsesElement = Database.newBuilder().build(); + ListDatabasesResponse expectedResponse = + ListDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListDatabasesPagedResponse pagedListResponse = client.listDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabasesRequest actualRequest = ((ListDatabasesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabasesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabasesTest2() throws Exception { + Database responsesElement = Database.newBuilder().build(); + ListDatabasesResponse expectedResponse = + ListDatabasesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabases(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListDatabasesPagedResponse pagedListResponse = client.listDatabases(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabasesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabasesRequest actualRequest = ((ListDatabasesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabasesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listDatabases(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String createStatement = "createStatement744686547"; + + Database actualResponse = client.createDatabaseAsync(parent, createStatement).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateDatabaseRequest actualRequest = ((CreateDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(createStatement, actualRequest.getCreateStatement()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String createStatement = "createStatement744686547"; + client.createDatabaseAsync(parent, createStatement).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String createStatement = "createStatement744686547"; + + Database actualResponse = client.createDatabaseAsync(parent, createStatement).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateDatabaseRequest actualRequest = ((CreateDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(createStatement, actualRequest.getCreateStatement()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String createStatement = "createStatement744686547"; + client.createDatabaseAsync(parent, createStatement).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + Database actualResponse = client.getDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetDatabaseRequest actualRequest = ((GetDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName name = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.getDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + Database actualResponse = client.getDatabase(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetDatabaseRequest actualRequest = ((GetDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getDatabase(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + Database database = Database.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Database actualResponse = client.updateDatabaseAsync(database, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateDatabaseRequest actualRequest = ((UpdateDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + Database database = Database.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateDatabaseAsync(database, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateDatabaseDdlTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseDdlTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List statements = new ArrayList<>(); + + client.updateDatabaseDdlAsync(database, statements).get(); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateDatabaseDdlRequest actualRequest = ((UpdateDatabaseDdlRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertEquals(statements, actualRequest.getStatementsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateDatabaseDdlExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List statements = new ArrayList<>(); + client.updateDatabaseDdlAsync(database, statements).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateDatabaseDdlTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateDatabaseDdlTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String database = "database1789464955"; + List statements = new ArrayList<>(); + + client.updateDatabaseDdlAsync(database, statements).get(); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateDatabaseDdlRequest actualRequest = ((UpdateDatabaseDdlRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(statements, actualRequest.getStatementsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateDatabaseDdlExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + List statements = new ArrayList<>(); + client.updateDatabaseDdlAsync(database, statements).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void dropDatabaseTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + client.dropDatabase(database); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DropDatabaseRequest actualRequest = ((DropDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void dropDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.dropDatabase(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void dropDatabaseTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String database = "database1789464955"; + + client.dropDatabase(database); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DropDatabaseRequest actualRequest = ((DropDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void dropDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + client.dropDatabase(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseDdlTest() throws Exception { + GetDatabaseDdlResponse expectedResponse = + GetDatabaseDdlResponse.newBuilder() + .addAllStatements(new ArrayList()) + .setProtoDescriptors(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + GetDatabaseDdlResponse actualResponse = client.getDatabaseDdl(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetDatabaseDdlRequest actualRequest = ((GetDatabaseDdlRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getDatabaseDdlExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.getDatabaseDdl(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getDatabaseDdlTest2() throws Exception { + GetDatabaseDdlResponse expectedResponse = + GetDatabaseDdlResponse.newBuilder() + .addAllStatements(new ArrayList()) + .setProtoDescriptors(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String database = "database1789464955"; + + GetDatabaseDdlResponse actualResponse = client.getDatabaseDdl(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetDatabaseDdlRequest actualRequest = ((GetDatabaseDdlRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getDatabaseDdlExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + client.getDatabaseDdl(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + ResourceName resource = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + + Backup actualResponse = client.createBackupAsync(parent, backup, backupId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupRequest actualRequest = ((CreateBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(backup, actualRequest.getBackup()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + client.createBackupAsync(parent, backup, backupId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + + Backup actualResponse = client.createBackupAsync(parent, backup, backupId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupRequest actualRequest = ((CreateBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(backup, actualRequest.getBackup()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + Backup backup = Backup.newBuilder().build(); + String backupId = "backupId2121930365"; + client.createBackupAsync(parent, backup, backupId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void copyBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CopyBackupRequest actualRequest = ((CopyBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertEquals(sourceBackup.toString(), actualRequest.getSourceBackup()); + Assert.assertEquals(expireTime, actualRequest.getExpireTime()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void copyBackupExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void copyBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CopyBackupRequest actualRequest = ((CopyBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertEquals(sourceBackup, actualRequest.getSourceBackup()); + Assert.assertEquals(expireTime, actualRequest.getExpireTime()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void copyBackupExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void copyBackupTest3() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CopyBackupRequest actualRequest = ((CopyBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertEquals(sourceBackup.toString(), actualRequest.getSourceBackup()); + Assert.assertEquals(expireTime, actualRequest.getExpireTime()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void copyBackupExceptionTest3() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String backupId = "backupId2121930365"; + BackupName sourceBackup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void copyBackupTest4() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("copyBackupTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + + Backup actualResponse = + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CopyBackupRequest actualRequest = ((CopyBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(backupId, actualRequest.getBackupId()); + Assert.assertEquals(sourceBackup, actualRequest.getSourceBackup()); + Assert.assertEquals(expireTime, actualRequest.getExpireTime()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void copyBackupExceptionTest4() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String backupId = "backupId2121930365"; + String sourceBackup = "sourceBackup823134653"; + Timestamp expireTime = Timestamp.newBuilder().build(); + client.copyBackupAsync(parent, backupId, sourceBackup, expireTime).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void getBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Backup actualResponse = client.getBackup(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupRequest actualRequest = ((GetBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.getBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupTest2() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + Backup actualResponse = client.getBackup(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupRequest actualRequest = ((GetBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupTest() throws Exception { + Backup expectedResponse = + Backup.newBuilder() + .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setVersionTime(Timestamp.newBuilder().build()) + .setExpireTime(Timestamp.newBuilder().build()) + .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) + .addAllReferencingDatabases(new ArrayList()) + .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .addAllReferencingBackups(new ArrayList()) + .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) + .addAllInstancePartitions(new ArrayList()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + Backup backup = Backup.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + Backup actualResponse = client.updateBackup(backup, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateBackupRequest actualRequest = ((UpdateBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(backup, actualRequest.getBackup()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateBackupExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + Backup backup = Backup.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackup(backup, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + client.deleteBackup(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupRequest actualRequest = ((DeleteBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupName name = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.deleteBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteBackup(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupRequest actualRequest = ((DeleteBackupRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteBackup(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupsTest() throws Exception { + Backup responsesElement = Backup.newBuilder().build(); + ListBackupsResponse expectedResponse = + ListBackupsResponse.newBuilder() + .setNextPageToken("") + .addAllBackups(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListBackupsPagedResponse pagedListResponse = client.listBackups(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupsRequest actualRequest = ((ListBackupsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listBackups(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupsTest2() throws Exception { + Backup responsesElement = Backup.newBuilder().build(); + ListBackupsResponse expectedResponse = + ListBackupsResponse.newBuilder() + .setNextPageToken("") + .addAllBackups(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBackupsPagedResponse pagedListResponse = client.listBackups(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupsRequest actualRequest = ((ListBackupsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBackups(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void restoreDatabaseTest() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreDatabaseRequest actualRequest = ((RestoreDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(databaseId, actualRequest.getDatabaseId()); + Assert.assertEquals(backup.toString(), actualRequest.getBackup()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreDatabaseExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void restoreDatabaseTest2() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreDatabaseRequest actualRequest = ((RestoreDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(databaseId, actualRequest.getDatabaseId()); + Assert.assertEquals(backup, actualRequest.getBackup()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreDatabaseExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void restoreDatabaseTest3() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreDatabaseRequest actualRequest = ((RestoreDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(databaseId, actualRequest.getDatabaseId()); + Assert.assertEquals(backup.toString(), actualRequest.getBackup()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreDatabaseExceptionTest3() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String databaseId = "databaseId1688905718"; + BackupName backup = BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]"); + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void restoreDatabaseTest4() throws Exception { + Database expectedResponse = + Database.newBuilder() + .setName(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString()) + .setCreateTime(Timestamp.newBuilder().build()) + .setRestoreInfo(RestoreInfo.newBuilder().build()) + .setEncryptionConfig(EncryptionConfig.newBuilder().build()) + .addAllEncryptionInfo(new ArrayList()) + .setVersionRetentionPeriod("versionRetentionPeriod-629783929") + .setEarliestVersionTime(Timestamp.newBuilder().build()) + .setDefaultLeader("defaultLeader759009962") + .setDatabaseDialect(DatabaseDialect.forNumber(0)) + .setEnableDropProtection(true) + .setReconciling(true) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("restoreDatabaseTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockDatabaseAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + + Database actualResponse = client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RestoreDatabaseRequest actualRequest = ((RestoreDatabaseRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(databaseId, actualRequest.getDatabaseId()); + Assert.assertEquals(backup, actualRequest.getBackup()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void restoreDatabaseExceptionTest4() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String databaseId = "databaseId1688905718"; + String backup = "backup-1396673086"; + client.restoreDatabaseAsync(parent, databaseId, backup).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void listDatabaseOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListDatabaseOperationsResponse expectedResponse = + ListDatabaseOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListDatabaseOperationsPagedResponse pagedListResponse = client.listDatabaseOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabaseOperationsRequest actualRequest = + ((ListDatabaseOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabaseOperationsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listDatabaseOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListDatabaseOperationsResponse expectedResponse = + ListDatabaseOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListDatabaseOperationsPagedResponse pagedListResponse = client.listDatabaseOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabaseOperationsRequest actualRequest = + ((ListDatabaseOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabaseOperationsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listDatabaseOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListBackupOperationsResponse expectedResponse = + ListBackupOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListBackupOperationsPagedResponse pagedListResponse = client.listBackupOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupOperationsRequest actualRequest = + ((ListBackupOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupOperationsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listBackupOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListBackupOperationsResponse expectedResponse = + ListBackupOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBackupOperationsPagedResponse pagedListResponse = client.listBackupOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupOperationsRequest actualRequest = + ((ListBackupOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupOperationsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBackupOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseRolesTest() throws Exception { + DatabaseRole responsesElement = DatabaseRole.newBuilder().build(); + ListDatabaseRolesResponse expectedResponse = + ListDatabaseRolesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabaseRoles(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListDatabaseRolesPagedResponse pagedListResponse = client.listDatabaseRoles(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabaseRolesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabaseRolesRequest actualRequest = ((ListDatabaseRolesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabaseRolesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listDatabaseRoles(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listDatabaseRolesTest2() throws Exception { + DatabaseRole responsesElement = DatabaseRole.newBuilder().build(); + ListDatabaseRolesResponse expectedResponse = + ListDatabaseRolesResponse.newBuilder() + .setNextPageToken("") + .addAllDatabaseRoles(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListDatabaseRolesPagedResponse pagedListResponse = client.listDatabaseRoles(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getDatabaseRolesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListDatabaseRolesRequest actualRequest = ((ListDatabaseRolesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listDatabaseRolesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listDatabaseRoles(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void addSplitPointsTest() throws Exception { + AddSplitPointsResponse expectedResponse = AddSplitPointsResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List splitPoints = new ArrayList<>(); + + AddSplitPointsResponse actualResponse = client.addSplitPoints(database, splitPoints); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + AddSplitPointsRequest actualRequest = ((AddSplitPointsRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertEquals(splitPoints, actualRequest.getSplitPointsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void addSplitPointsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + List splitPoints = new ArrayList<>(); + client.addSplitPoints(database, splitPoints); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void addSplitPointsTest2() throws Exception { + AddSplitPointsResponse expectedResponse = AddSplitPointsResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String database = "database1789464955"; + List splitPoints = new ArrayList<>(); + + AddSplitPointsResponse actualResponse = client.addSplitPoints(database, splitPoints); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + AddSplitPointsRequest actualRequest = ((AddSplitPointsRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(splitPoints, actualRequest.getSplitPointsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void addSplitPointsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + List splitPoints = new ArrayList<>(); + client.addSplitPoints(database, splitPoints); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupScheduleRequest actualRequest = + ((CreateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(backupScheduleId, actualRequest.getBackupScheduleId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupScheduleRequest actualRequest = + ((CreateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(backupScheduleId, actualRequest.getBackupScheduleId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupScheduleRequest actualRequest = ((GetBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupScheduleRequest actualRequest = ((GetBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + BackupSchedule actualResponse = client.updateBackupSchedule(backupSchedule, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateBackupScheduleRequest actualRequest = + ((UpdateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackupSchedule(backupSchedule, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + client.deleteBackupSchedule(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupScheduleRequest actualRequest = + ((DeleteBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteBackupSchedule(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupScheduleRequest actualRequest = + ((DeleteBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupSchedulesRequest actualRequest = ((ListBackupSchedulesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupSchedulesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest2() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupSchedulesRequest actualRequest = ((ListBackupSchedulesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupSchedulesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void internalUpdateGraphOperationTest() throws Exception { + InternalUpdateGraphOperationResponse expectedResponse = + InternalUpdateGraphOperationResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + String operationId = "operationId129704162"; + + InternalUpdateGraphOperationResponse actualResponse = + client.internalUpdateGraphOperation(database, operationId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + InternalUpdateGraphOperationRequest actualRequest = + ((InternalUpdateGraphOperationRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertEquals(operationId, actualRequest.getOperationId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void internalUpdateGraphOperationExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + String operationId = "operationId129704162"; + client.internalUpdateGraphOperation(database, operationId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void internalUpdateGraphOperationTest2() throws Exception { + InternalUpdateGraphOperationResponse expectedResponse = + InternalUpdateGraphOperationResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String database = "database1789464955"; + String operationId = "operationId129704162"; + + InternalUpdateGraphOperationResponse actualResponse = + client.internalUpdateGraphOperation(database, operationId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + InternalUpdateGraphOperationRequest actualRequest = + ((InternalUpdateGraphOperationRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(operationId, actualRequest.getOperationId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void internalUpdateGraphOperationExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + String operationId = "operationId129704162"; + client.internalUpdateGraphOperation(database, operationId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdmin.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdmin.java new file mode 100644 index 000000000000..3a689c8cde86 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdmin.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockDatabaseAdmin implements MockGrpcService { + private final MockDatabaseAdminImpl serviceImpl; + + public MockDatabaseAdmin() { + serviceImpl = new MockDatabaseAdminImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java new file mode 100644 index 000000000000..b262db7d4e0a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java @@ -0,0 +1,674 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.database.v1; + +import com.google.api.core.BetaApi; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.AddSplitPointsRequest; +import com.google.spanner.admin.database.v1.AddSplitPointsResponse; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseAdminGrpc.DatabaseAdminImplBase; +import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import com.google.spanner.admin.database.v1.DropDatabaseRequest; +import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListBackupsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; +import com.google.spanner.admin.database.v1.ListDatabaseRolesRequest; +import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; +import com.google.spanner.admin.database.v1.ListDatabasesRequest; +import com.google.spanner.admin.database.v1.ListDatabasesResponse; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockDatabaseAdminImpl extends DatabaseAdminImplBase { + private List requests; + private Queue responses; + + public MockDatabaseAdminImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void listDatabases( + ListDatabasesRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListDatabasesResponse) { + requests.add(request); + responseObserver.onNext(((ListDatabasesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListDatabases, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListDatabasesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createDatabase( + CreateDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getDatabase(GetDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Database) { + requests.add(request); + responseObserver.onNext(((Database) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Database.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateDatabase( + UpdateDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateDatabaseDdl( + UpdateDatabaseDdlRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateDatabaseDdl, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void dropDatabase(DropDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DropDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getDatabaseDdl( + GetDatabaseDdlRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof GetDatabaseDdlResponse) { + requests.add(request); + responseObserver.onNext(((GetDatabaseDdlResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetDatabaseDdl, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + GetDatabaseDdlResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof TestIamPermissionsResponse) { + requests.add(request); + responseObserver.onNext(((TestIamPermissionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method TestIamPermissions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + TestIamPermissionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createBackup( + CreateBackupRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateBackup, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void copyBackup(CopyBackupRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CopyBackup, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getBackup(GetBackupRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Backup) { + requests.add(request); + responseObserver.onNext(((Backup) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetBackup, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Backup.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateBackup(UpdateBackupRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Backup) { + requests.add(request); + responseObserver.onNext(((Backup) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateBackup, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Backup.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteBackup(DeleteBackupRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteBackup, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBackups( + ListBackupsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListBackupsResponse) { + requests.add(request); + responseObserver.onNext(((ListBackupsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBackups, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListBackupsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void restoreDatabase( + RestoreDatabaseRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method RestoreDatabase, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listDatabaseOperations( + ListDatabaseOperationsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListDatabaseOperationsResponse) { + requests.add(request); + responseObserver.onNext(((ListDatabaseOperationsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListDatabaseOperations, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + ListDatabaseOperationsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBackupOperations( + ListBackupOperationsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListBackupOperationsResponse) { + requests.add(request); + responseObserver.onNext(((ListBackupOperationsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBackupOperations, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + ListBackupOperationsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listDatabaseRoles( + ListDatabaseRolesRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListDatabaseRolesResponse) { + requests.add(request); + responseObserver.onNext(((ListDatabaseRolesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListDatabaseRoles, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListDatabaseRolesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void addSplitPoints( + AddSplitPointsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof AddSplitPointsResponse) { + requests.add(request); + responseObserver.onNext(((AddSplitPointsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method AddSplitPoints, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + AddSplitPointsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createBackupSchedule( + CreateBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateBackupSchedule, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getBackupSchedule( + GetBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetBackupSchedule, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateBackupSchedule( + UpdateBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateBackupSchedule, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteBackupSchedule( + DeleteBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteBackupSchedule, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBackupSchedules( + ListBackupSchedulesRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListBackupSchedulesResponse) { + requests.add(request); + responseObserver.onNext(((ListBackupSchedulesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBackupSchedules, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListBackupSchedulesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void internalUpdateGraphOperation( + InternalUpdateGraphOperationRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof InternalUpdateGraphOperationResponse) { + requests.add(request); + responseObserver.onNext(((InternalUpdateGraphOperationResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method InternalUpdateGraphOperation, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + InternalUpdateGraphOperationResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java new file mode 100644 index 000000000000..adb7056c08a9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java @@ -0,0 +1,2168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.cloud.spanner.admin.instance.v1.stub.HttpJsonInstanceAdminStub; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.FreeInstanceMetadata; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.InstancePartitionName; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaComputeCapacity; +import com.google.spanner.admin.instance.v1.ReplicaInfo; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class InstanceAdminClientHttpJsonTest { + private static MockHttpService mockService; + private static InstanceAdminClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonInstanceAdminStub.getMethodDescriptors(), + InstanceAdminSettings.getDefaultEndpoint()); + InstanceAdminSettings settings = + InstanceAdminSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + InstanceAdminSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = InstanceAdminClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void listInstanceConfigsTest() throws Exception { + InstanceConfig responsesElement = InstanceConfig.newBuilder().build(); + ListInstanceConfigsResponse expectedResponse = + ListInstanceConfigsResponse.newBuilder() + .setNextPageToken("") + .addAllInstanceConfigs(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstanceConfigsPagedResponse pagedListResponse = client.listInstanceConfigs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstanceConfigsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstanceConfigsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstanceConfigs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigsTest2() throws Exception { + InstanceConfig responsesElement = InstanceConfig.newBuilder().build(); + ListInstanceConfigsResponse expectedResponse = + ListInstanceConfigsResponse.newBuilder() + .setNextPageToken("") + .addAllInstanceConfigs(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2353"; + + ListInstanceConfigsPagedResponse pagedListResponse = client.listInstanceConfigs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstanceConfigsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstanceConfigsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + client.listInstanceConfigs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + mockService.addResponse(expectedResponse); + + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + + InstanceConfig actualResponse = client.getInstanceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstanceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + client.getInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceConfigTest2() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3640/instanceConfigs/instanceConfig-3640"; + + InstanceConfig actualResponse = client.getInstanceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstanceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3640/instanceConfigs/instanceConfig-3640"; + client.getInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + ProjectName parent = ProjectName.of("[PROJECT]"); + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + + InstanceConfig actualResponse = + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstanceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createInstanceConfigTest2() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-2353"; + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + + InstanceConfig actualResponse = + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstanceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void updateInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceConfig instanceConfig = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + InstanceConfig actualResponse = + client.updateInstanceConfigAsync(instanceConfig, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateInstanceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceConfig instanceConfig = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateInstanceConfigAsync(instanceConfig, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void deleteInstanceConfigTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + + client.deleteInstanceConfig(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstanceConfigExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + client.deleteInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstanceConfigTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3640/instanceConfigs/instanceConfig-3640"; + + client.deleteInstanceConfig(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstanceConfigExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3640/instanceConfigs/instanceConfig-3640"; + client.deleteInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstanceConfigOperationsResponse expectedResponse = + ListInstanceConfigOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstanceConfigOperationsPagedResponse pagedListResponse = + client.listInstanceConfigOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstanceConfigOperationsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstanceConfigOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstanceConfigOperationsResponse expectedResponse = + ListInstanceConfigOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2353"; + + ListInstanceConfigOperationsPagedResponse pagedListResponse = + client.listInstanceConfigOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstanceConfigOperationsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + client.listInstanceConfigOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancesTest() throws Exception { + Instance responsesElement = Instance.newBuilder().build(); + ListInstancesResponse expectedResponse = + ListInstancesResponse.newBuilder() + .setNextPageToken("") + .addAllInstances(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstancesPagedResponse pagedListResponse = client.listInstances(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstances(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancesTest2() throws Exception { + Instance responsesElement = Instance.newBuilder().build(); + ListInstancesResponse expectedResponse = + ListInstancesResponse.newBuilder() + .setNextPageToken("") + .addAllInstances(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-2353"; + + ListInstancesPagedResponse pagedListResponse = client.listInstances(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + client.listInstances(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionsTest() throws Exception { + InstancePartition responsesElement = InstancePartition.newBuilder().build(); + ListInstancePartitionsResponse expectedResponse = + ListInstancePartitionsResponse.newBuilder() + .setNextPageToken("") + .addAllInstancePartitions(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListInstancePartitionsPagedResponse pagedListResponse = client.listInstancePartitions(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancePartitionsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancePartitionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listInstancePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionsTest2() throws Exception { + InstancePartition responsesElement = InstancePartition.newBuilder().build(); + ListInstancePartitionsResponse expectedResponse = + ListInstancePartitionsResponse.newBuilder() + .setNextPageToken("") + .addAllInstancePartitions(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListInstancePartitionsPagedResponse pagedListResponse = client.listInstancePartitions(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancePartitionsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancePartitionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listInstancePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + Instance actualResponse = client.getInstance(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.getInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceTest2() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3514/instances/instance-3514"; + + Instance actualResponse = client.getInstance(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstanceExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3514/instances/instance-3514"; + client.getInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + ProjectName parent = ProjectName.of("[PROJECT]"); + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + + Instance actualResponse = client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createInstanceTest2() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-2353"; + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + + Instance actualResponse = client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstanceExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-2353"; + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void updateInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + Instance instance = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + + Instance actualResponse = client.updateInstanceAsync(instance, fieldMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + Instance instance = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + client.updateInstanceAsync(instance, fieldMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void deleteInstanceTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + client.deleteInstance(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.deleteInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstanceTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = "projects/project-3514/instances/instance-3514"; + + client.deleteInstance(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstanceExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = "projects/project-3514/instances/instance-3514"; + client.deleteInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-3043/instances/instance-3043"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-3043/instances/instance-3043"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-3043/instances/instance-3043"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-3043/instances/instance-3043"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + String resource = "projects/project-3043/instances/instance-3043"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String resource = "projects/project-3043/instances/instance-3043"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + mockService.addResponse(expectedResponse); + + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + + InstancePartition actualResponse = client.getInstancePartition(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstancePartitionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + client.getInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstancePartitionTest2() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-9266/instances/instance-9266/instancePartitions/instancePartition-9266"; + + InstancePartition actualResponse = client.getInstancePartition(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getInstancePartitionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-9266/instances/instance-9266/instancePartitions/instancePartition-9266"; + client.getInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + + InstancePartition actualResponse = + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstancePartitionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void createInstancePartitionTest2() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + String parent = "projects/project-8887/instances/instance-8887"; + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + + InstancePartition actualResponse = + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createInstancePartitionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void deleteInstancePartitionTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + + client.deleteInstancePartition(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstancePartitionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + client.deleteInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstancePartitionTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-9266/instances/instance-9266/instancePartitions/instancePartition-9266"; + + client.deleteInstancePartition(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteInstancePartitionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-9266/instances/instance-9266/instancePartitions/instancePartition-9266"; + client.deleteInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + InstancePartition instancePartition = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + + InstancePartition actualResponse = + client.updateInstancePartitionAsync(instancePartition, fieldMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateInstancePartitionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstancePartition instancePartition = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + client.updateInstancePartitionAsync(instancePartition, fieldMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } + + @Test + public void listInstancePartitionOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstancePartitionOperationsResponse expectedResponse = + ListInstancePartitionOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListInstancePartitionOperationsPagedResponse pagedListResponse = + client.listInstancePartitionOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancePartitionOperationsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listInstancePartitionOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstancePartitionOperationsResponse expectedResponse = + ListInstancePartitionOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-8887/instances/instance-8887"; + + ListInstancePartitionOperationsPagedResponse pagedListResponse = + client.listInstancePartitionOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listInstancePartitionOperationsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-8887/instances/instance-8887"; + client.listInstancePartitionOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void moveInstanceTest() throws Exception { + MoveInstanceResponse expectedResponse = MoveInstanceResponse.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("moveInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + + MoveInstanceResponse actualResponse = client.moveInstanceAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void moveInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + client.moveInstanceAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java new file mode 100644 index 000000000000..5181dbe256f0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java @@ -0,0 +1,1950 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstanceConfigsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionOperationsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancePartitionsPagedResponse; +import static com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient.ListInstancesPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.resourcenames.ResourceName; +import com.google.common.collect.Lists; +import com.google.iam.v1.AuditConfig; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.FieldMask; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.FreeInstanceMetadata; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.InstancePartitionName; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaComputeCapacity; +import com.google.spanner.admin.instance.v1.ReplicaInfo; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class InstanceAdminClientTest { + private static MockInstanceAdmin mockInstanceAdmin; + private static MockServiceHelper mockServiceHelper; + private LocalChannelProvider channelProvider; + private InstanceAdminClient client; + + @BeforeClass + public static void startStaticServer() { + mockInstanceAdmin = new MockInstanceAdmin(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockInstanceAdmin)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + InstanceAdminSettings settings = + InstanceAdminSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = InstanceAdminClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void listInstanceConfigsTest() throws Exception { + InstanceConfig responsesElement = InstanceConfig.newBuilder().build(); + ListInstanceConfigsResponse expectedResponse = + ListInstanceConfigsResponse.newBuilder() + .setNextPageToken("") + .addAllInstanceConfigs(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstanceConfigsPagedResponse pagedListResponse = client.listInstanceConfigs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstanceConfigsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstanceConfigsRequest actualRequest = ((ListInstanceConfigsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstanceConfigsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstanceConfigs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigsTest2() throws Exception { + InstanceConfig responsesElement = InstanceConfig.newBuilder().build(); + ListInstanceConfigsResponse expectedResponse = + ListInstanceConfigsResponse.newBuilder() + .setNextPageToken("") + .addAllInstanceConfigs(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListInstanceConfigsPagedResponse pagedListResponse = client.listInstanceConfigs(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstanceConfigsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstanceConfigsRequest actualRequest = ((ListInstanceConfigsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstanceConfigsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listInstanceConfigs(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + + InstanceConfig actualResponse = client.getInstanceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstanceConfigRequest actualRequest = ((GetInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstanceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + client.getInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceConfigTest2() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + InstanceConfig actualResponse = client.getInstanceConfig(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstanceConfigRequest actualRequest = ((GetInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstanceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + ProjectName parent = ProjectName.of("[PROJECT]"); + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + + InstanceConfig actualResponse = + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstanceConfigRequest actualRequest = + ((CreateInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(instanceConfig, actualRequest.getInstanceConfig()); + Assert.assertEquals(instanceConfigId, actualRequest.getInstanceConfigId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstanceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createInstanceConfigTest2() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + + InstanceConfig actualResponse = + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstanceConfigRequest actualRequest = + ((CreateInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(instanceConfig, actualRequest.getInstanceConfig()); + Assert.assertEquals(instanceConfigId, actualRequest.getInstanceConfigId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstanceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + String instanceConfigId = "instanceConfigId1750947762"; + client.createInstanceConfigAsync(parent, instanceConfig, instanceConfigId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateInstanceConfigTest() throws Exception { + InstanceConfig expectedResponse = + InstanceConfig.newBuilder() + .setName(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .addAllReplicas(new ArrayList()) + .addAllOptionalReplicas(new ArrayList()) + .setBaseConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .putAllLabels(new HashMap()) + .setEtag("etag3123477") + .addAllLeaderOptions(new ArrayList()) + .setReconciling(true) + .setStorageLimitPerProcessingUnit(-1769187130) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstanceConfigTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + InstanceConfig actualResponse = + client.updateInstanceConfigAsync(instanceConfig, updateMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateInstanceConfigRequest actualRequest = + ((UpdateInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(instanceConfig, actualRequest.getInstanceConfig()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateInstanceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceConfig instanceConfig = InstanceConfig.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateInstanceConfigAsync(instanceConfig, updateMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteInstanceConfigTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + + client.deleteInstanceConfig(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstanceConfigRequest actualRequest = + ((DeleteInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstanceConfigExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceConfigName name = InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]"); + client.deleteInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstanceConfigTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteInstanceConfig(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstanceConfigRequest actualRequest = + ((DeleteInstanceConfigRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstanceConfigExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteInstanceConfig(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstanceConfigOperationsResponse expectedResponse = + ListInstanceConfigOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstanceConfigOperationsPagedResponse pagedListResponse = + client.listInstanceConfigOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstanceConfigOperationsRequest actualRequest = + ((ListInstanceConfigOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstanceConfigOperationsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstanceConfigOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstanceConfigOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstanceConfigOperationsResponse expectedResponse = + ListInstanceConfigOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListInstanceConfigOperationsPagedResponse pagedListResponse = + client.listInstanceConfigOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstanceConfigOperationsRequest actualRequest = + ((ListInstanceConfigOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstanceConfigOperationsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listInstanceConfigOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancesTest() throws Exception { + Instance responsesElement = Instance.newBuilder().build(); + ListInstancesResponse expectedResponse = + ListInstancesResponse.newBuilder() + .setNextPageToken("") + .addAllInstances(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ProjectName parent = ProjectName.of("[PROJECT]"); + + ListInstancesPagedResponse pagedListResponse = client.listInstances(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancesList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancesRequest actualRequest = ((ListInstancesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + client.listInstances(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancesTest2() throws Exception { + Instance responsesElement = Instance.newBuilder().build(); + ListInstancesResponse expectedResponse = + ListInstancesResponse.newBuilder() + .setNextPageToken("") + .addAllInstances(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListInstancesPagedResponse pagedListResponse = client.listInstances(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancesList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancesRequest actualRequest = ((ListInstancesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listInstances(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionsTest() throws Exception { + InstancePartition responsesElement = InstancePartition.newBuilder().build(); + ListInstancePartitionsResponse expectedResponse = + ListInstancePartitionsResponse.newBuilder() + .setNextPageToken("") + .addAllInstancePartitions(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListInstancePartitionsPagedResponse pagedListResponse = client.listInstancePartitions(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancePartitionsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancePartitionsRequest actualRequest = + ((ListInstancePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancePartitionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listInstancePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionsTest2() throws Exception { + InstancePartition responsesElement = InstancePartition.newBuilder().build(); + ListInstancePartitionsResponse expectedResponse = + ListInstancePartitionsResponse.newBuilder() + .setNextPageToken("") + .addAllInstancePartitions(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListInstancePartitionsPagedResponse pagedListResponse = client.listInstancePartitions(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getInstancePartitionsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancePartitionsRequest actualRequest = + ((ListInstancePartitionsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancePartitionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listInstancePartitions(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + Instance actualResponse = client.getInstance(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstanceRequest actualRequest = ((GetInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.getInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstanceTest2() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + Instance actualResponse = client.getInstance(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstanceRequest actualRequest = ((GetInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstanceExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + ProjectName parent = ProjectName.of("[PROJECT]"); + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + + Instance actualResponse = client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstanceRequest actualRequest = ((CreateInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(instanceId, actualRequest.getInstanceId()); + Assert.assertEquals(instance, actualRequest.getInstance()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ProjectName parent = ProjectName.of("[PROJECT]"); + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createInstanceTest2() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + + Instance actualResponse = client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstanceRequest actualRequest = ((CreateInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(instanceId, actualRequest.getInstanceId()); + Assert.assertEquals(instance, actualRequest.getInstance()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstanceExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + String instanceId = "instanceId902024336"; + Instance instance = Instance.newBuilder().build(); + client.createInstanceAsync(parent, instanceId, instance).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void updateInstanceTest() throws Exception { + Instance expectedResponse = + Instance.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setNodeCount(1539922066) + .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .putAllLabels(new HashMap()) + .addAllEndpointUris(new ArrayList()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .setFreeInstanceMetadata(FreeInstanceMetadata.newBuilder().build()) + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + Instance instance = Instance.newBuilder().build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + + Instance actualResponse = client.updateInstanceAsync(instance, fieldMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateInstanceRequest actualRequest = ((UpdateInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(instance, actualRequest.getInstance()); + Assert.assertEquals(fieldMask, actualRequest.getFieldMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + Instance instance = Instance.newBuilder().build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + client.updateInstanceAsync(instance, fieldMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteInstanceTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + client.deleteInstance(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstanceRequest actualRequest = ((DeleteInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceName name = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.deleteInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstanceTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteInstance(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstanceRequest actualRequest = ((DeleteInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstanceExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteInstance(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void setIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + + Policy actualResponse = client.setIamPolicy(resource, policy); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + SetIamPolicyRequest actualRequest = ((SetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(policy, actualRequest.getPolicy()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void setIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + Policy policy = Policy.newBuilder().build(); + client.setIamPolicy(resource, policy); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getIamPolicyTest2() throws Exception { + Policy expectedResponse = + Policy.newBuilder() + .setVersion(351608024) + .addAllBindings(new ArrayList()) + .addAllAuditConfigs(new ArrayList()) + .setEtag(ByteString.EMPTY) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + + Policy actualResponse = client.getIamPolicy(resource); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetIamPolicyRequest actualRequest = ((GetIamPolicyRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getIamPolicyExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + client.getIamPolicy(resource); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockInstanceAdmin.addResponse(expectedResponse); + + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource.toString(), actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + ResourceName resource = InstanceName.of("[PROJECT]", "[INSTANCE]"); + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void testIamPermissionsTest2() throws Exception { + TestIamPermissionsResponse expectedResponse = + TestIamPermissionsResponse.newBuilder().addAllPermissions(new ArrayList()).build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + + TestIamPermissionsResponse actualResponse = client.testIamPermissions(resource, permissions); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + TestIamPermissionsRequest actualRequest = ((TestIamPermissionsRequest) actualRequests.get(0)); + + Assert.assertEquals(resource, actualRequest.getResource()); + Assert.assertEquals(permissions, actualRequest.getPermissionsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void testIamPermissionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String resource = "resource-341064690"; + List permissions = new ArrayList<>(); + client.testIamPermissions(resource, permissions); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + + InstancePartition actualResponse = client.getInstancePartition(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstancePartitionRequest actualRequest = + ((GetInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstancePartitionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + client.getInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getInstancePartitionTest2() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + InstancePartition actualResponse = client.getInstancePartition(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetInstancePartitionRequest actualRequest = + ((GetInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getInstancePartitionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + + InstancePartition actualResponse = + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstancePartitionRequest actualRequest = + ((CreateInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(instancePartition, actualRequest.getInstancePartition()); + Assert.assertEquals(instancePartitionId, actualRequest.getInstancePartitionId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstancePartitionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void createInstancePartitionTest2() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("createInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + String parent = "parent-995424086"; + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + + InstancePartition actualResponse = + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateInstancePartitionRequest actualRequest = + ((CreateInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(instancePartition, actualRequest.getInstancePartition()); + Assert.assertEquals(instancePartitionId, actualRequest.getInstancePartitionId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createInstancePartitionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + String instancePartitionId = "instancePartitionId1364450768"; + client.createInstancePartitionAsync(parent, instancePartition, instancePartitionId).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void deleteInstancePartitionTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + + client.deleteInstancePartition(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstancePartitionRequest actualRequest = + ((DeleteInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstancePartitionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstancePartitionName name = + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]"); + client.deleteInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteInstancePartitionTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteInstancePartition(name); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteInstancePartitionRequest actualRequest = + ((DeleteInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteInstancePartitionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteInstancePartition(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateInstancePartitionTest() throws Exception { + InstancePartition expectedResponse = + InstancePartition.newBuilder() + .setName( + InstancePartitionName.of("[PROJECT]", "[INSTANCE]", "[INSTANCE_PARTITION]") + .toString()) + .setConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .setDisplayName("displayName1714148973") + .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) + .setCreateTime(Timestamp.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .addAllReferencingDatabases(new ArrayList()) + .addAllReferencingBackups(new ArrayList()) + .setEtag("etag3123477") + .build(); + Operation resultOperation = + Operation.newBuilder() + .setName("updateInstancePartitionTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + + InstancePartition actualResponse = + client.updateInstancePartitionAsync(instancePartition, fieldMask).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateInstancePartitionRequest actualRequest = + ((UpdateInstancePartitionRequest) actualRequests.get(0)); + + Assert.assertEquals(instancePartition, actualRequest.getInstancePartition()); + Assert.assertEquals(fieldMask, actualRequest.getFieldMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateInstancePartitionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstancePartition instancePartition = InstancePartition.newBuilder().build(); + FieldMask fieldMask = FieldMask.newBuilder().build(); + client.updateInstancePartitionAsync(instancePartition, fieldMask).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void listInstancePartitionOperationsTest() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstancePartitionOperationsResponse expectedResponse = + ListInstancePartitionOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + + ListInstancePartitionOperationsPagedResponse pagedListResponse = + client.listInstancePartitionOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancePartitionOperationsRequest actualRequest = + ((ListInstancePartitionOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancePartitionOperationsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + InstanceName parent = InstanceName.of("[PROJECT]", "[INSTANCE]"); + client.listInstancePartitionOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listInstancePartitionOperationsTest2() throws Exception { + Operation responsesElement = Operation.newBuilder().build(); + ListInstancePartitionOperationsResponse expectedResponse = + ListInstancePartitionOperationsResponse.newBuilder() + .setNextPageToken("") + .addAllOperations(Arrays.asList(responsesElement)) + .build(); + mockInstanceAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListInstancePartitionOperationsPagedResponse pagedListResponse = + client.listInstancePartitionOperations(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getOperationsList().get(0), resources.get(0)); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListInstancePartitionOperationsRequest actualRequest = + ((ListInstancePartitionOperationsRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listInstancePartitionOperationsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listInstancePartitionOperations(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void moveInstanceTest() throws Exception { + MoveInstanceResponse expectedResponse = MoveInstanceResponse.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("moveInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + + MoveInstanceResponse actualResponse = client.moveInstanceAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + MoveInstanceRequest actualRequest = ((MoveInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertEquals(request.getTargetConfig(), actualRequest.getTargetConfig()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void moveInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + client.moveInstanceAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdmin.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdmin.java new file mode 100644 index 000000000000..a5871925a1b0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdmin.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockInstanceAdmin implements MockGrpcService { + private final MockInstanceAdminImpl serviceImpl; + + public MockInstanceAdmin() { + serviceImpl = new MockInstanceAdminImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java new file mode 100644 index 000000000000..b0d4f9a857bd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java @@ -0,0 +1,546 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.admin.instance.v1; + +import com.google.api.core.BetaApi; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.iam.v1.TestIamPermissionsRequest; +import com.google.iam.v1.TestIamPermissionsResponse; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.DeleteInstanceRequest; +import com.google.spanner.admin.instance.v1.GetInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.GetInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.GetInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceAdminGrpc.InstanceAdminImplBase; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstancePartition; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest; +import com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest; +import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; +import com.google.spanner.admin.instance.v1.ListInstancesRequest; +import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockInstanceAdminImpl extends InstanceAdminImplBase { + private List requests; + private Queue responses; + + public MockInstanceAdminImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void listInstanceConfigs( + ListInstanceConfigsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListInstanceConfigsResponse) { + requests.add(request); + responseObserver.onNext(((ListInstanceConfigsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListInstanceConfigs, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListInstanceConfigsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getInstanceConfig( + GetInstanceConfigRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof InstanceConfig) { + requests.add(request); + responseObserver.onNext(((InstanceConfig) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetInstanceConfig, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + InstanceConfig.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createInstanceConfig( + CreateInstanceConfigRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateInstanceConfig, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateInstanceConfig( + UpdateInstanceConfigRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateInstanceConfig, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteInstanceConfig( + DeleteInstanceConfigRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteInstanceConfig, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listInstanceConfigOperations( + ListInstanceConfigOperationsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListInstanceConfigOperationsResponse) { + requests.add(request); + responseObserver.onNext(((ListInstanceConfigOperationsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListInstanceConfigOperations, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + ListInstanceConfigOperationsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listInstances( + ListInstancesRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListInstancesResponse) { + requests.add(request); + responseObserver.onNext(((ListInstancesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListInstances, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListInstancesResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listInstancePartitions( + ListInstancePartitionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListInstancePartitionsResponse) { + requests.add(request); + responseObserver.onNext(((ListInstancePartitionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListInstancePartitions, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + ListInstancePartitionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getInstance(GetInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Instance) { + requests.add(request); + responseObserver.onNext(((Instance) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Instance.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createInstance( + CreateInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateInstance( + UpdateInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteInstance( + DeleteInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void setIamPolicy(SetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method SetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getIamPolicy(GetIamPolicyRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Policy) { + requests.add(request); + responseObserver.onNext(((Policy) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetIamPolicy, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Policy.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void testIamPermissions( + TestIamPermissionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof TestIamPermissionsResponse) { + requests.add(request); + responseObserver.onNext(((TestIamPermissionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method TestIamPermissions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + TestIamPermissionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getInstancePartition( + GetInstancePartitionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof InstancePartition) { + requests.add(request); + responseObserver.onNext(((InstancePartition) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetInstancePartition, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + InstancePartition.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void createInstancePartition( + CreateInstancePartitionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateInstancePartition, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteInstancePartition( + DeleteInstancePartitionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteInstancePartition, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateInstancePartition( + UpdateInstancePartitionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateInstancePartition, expected %s or" + + " %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listInstancePartitionOperations( + ListInstancePartitionOperationsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListInstancePartitionOperationsResponse) { + requests.add(request); + responseObserver.onNext(((ListInstancePartitionOperationsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListInstancePartitionOperations," + + " expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListInstancePartitionOperationsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void moveInstance( + MoveInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method MoveInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/BenchmarkValidator.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/BenchmarkValidator.java new file mode 100644 index 000000000000..225197af6c1d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/BenchmarkValidator.java @@ -0,0 +1,156 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmarking; + +import com.google.cloud.spanner.benchmarking.BenchmarkValidator.BaselineResult.BenchmarkResult; +import com.google.cloud.spanner.benchmarking.BenchmarkValidator.BaselineResult.BenchmarkResult.Percentile; +import com.google.gson.Gson; +import com.google.gson.reflect.TypeToken; +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class BenchmarkValidator { + + private final BaselineResult expectedResults; + private final List actualResults; + + public BenchmarkValidator(String baselineFile, String actualFile) { + Gson gson = new Gson(); + // Load expected result JSON from resource folder + this.expectedResults = gson.fromJson(loadJsonFromResources(baselineFile), BaselineResult.class); + // Load the actual result from current benchmarking run + this.actualResults = + gson.fromJson( + loadJsonFromFile(actualFile), + new TypeToken>() {}.getType()); + } + + void validate() { + // Validating the resultant percentile against expected percentile with allowed threshold + for (ActualBenchmarkResult actualResult : actualResults) { + BenchmarkResult expectResult = expectedResults.benchmarkResultMap.get(actualResult.benchmark); + if (expectResult == null) { + throw new ValidationException( + "Missing expected benchmark configuration for actual benchmarking"); + } + Map actualPercentilesMap = actualResult.primaryMetric.scorePercentiles; + // We will only be comparing the percentiles(p50, p90, p90) which are configured in the + // expected percentiles. This allows some checks to be disabled if required. + for (Percentile expectedPercentile : expectResult.scorePercentiles) { + String percentile = expectedPercentile.percentile; + double difference = + calculatePercentageDifference( + expectedPercentile.baseline, actualPercentilesMap.get(percentile)); + // if an absolute different in percentage is greater than allowed difference + // Then we are throwing validation error + if (Math.abs(Math.ceil(difference)) > expectedPercentile.difference) { + throw new ValidationException( + String.format( + "[%s][%s] Expected percentile %s[+/-%s] but got %s", + actualResult.benchmark, + percentile, + expectedPercentile.baseline, + expectedPercentile.difference, + actualPercentilesMap.get(percentile))); + } + } + } + } + + public static double calculatePercentageDifference(double base, double compareWith) { + if (base == 0) { + return 0.0; + } + return ((compareWith - base) / base) * 100; + } + + private String loadJsonFromFile(String file) { + try { + return new String(Files.readAllBytes(Paths.get(file))); + } catch (IOException e) { + throw new ValidationException("Failed to read file: " + file, e); + } + } + + private String loadJsonFromResources(String baselineFile) { + URL resourceUrl = getClass().getClassLoader().getResource(baselineFile); + if (resourceUrl == null) { + throw new ValidationException("File not found: " + baselineFile); + } + File file = new File(resourceUrl.getFile()); + return loadJsonFromFile(file.getAbsolutePath()); + } + + static class ActualBenchmarkResult { + String benchmark; + PrimaryMetric primaryMetric; + + static class PrimaryMetric { + Map scorePercentiles; + } + } + + static class BaselineResult { + Map benchmarkResultMap; + + static class BenchmarkResult { + List scorePercentiles; + + static class Percentile { + String percentile; + Double baseline; + Double difference; + } + } + } + + static class ValidationException extends RuntimeException { + ValidationException(String message) { + super(message); + } + + ValidationException(String message, Throwable cause) { + super(message, cause); + } + } + + private static String parseCommandLineArgs(String[] args, String key) { + if (args == null) { + return ""; + } + for (String arg : args) { + if (arg.startsWith("--" + key)) { + String[] splits = arg.split("="); + if (splits.length == 2) { + return splits[1].trim(); + } + } + } + return ""; + } + + public static void main(String[] args) { + String actualFile = parseCommandLineArgs(args, "file"); + new BenchmarkValidator("com/google/cloud/spanner/jmh/jmh-baseline.json", actualFile).validate(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/MonitoringServiceImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/MonitoringServiceImpl.java new file mode 100644 index 000000000000..aaa738761250 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/MonitoringServiceImpl.java @@ -0,0 +1,39 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmarking; + +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.MetricServiceGrpc.MetricServiceImplBase; +import com.google.protobuf.Empty; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; + +class MonitoringServiceImpl extends MetricServiceImplBase { + + @Override + public void createServiceTimeSeries( + CreateTimeSeriesRequest request, StreamObserver responseObserver) { + try { + Thread.sleep(100); + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (InterruptedException e) { + responseObserver.onError( + Status.CANCELLED.withCause(e).withDescription(e.getMessage()).asException()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/ReadBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/ReadBenchmark.java new file mode 100644 index 000000000000..eed461fc8972 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/benchmarking/ReadBenchmark.java @@ -0,0 +1,228 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.benchmarking; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Timeout; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; +import org.openjdk.jmh.results.format.ResultFormatType; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +@BenchmarkMode(Mode.SampleTime) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@Threads(10) +@Fork(1) +public class ReadBenchmark { + + @State(Scope.Benchmark) + public static class BenchmarkState { + + // Spanner state + Spanner spanner; + DatabaseClient databaseClient; + + // gRPC server + Server gRPCServer; + Server gRPCMonitoringServer; + + // Executors for handling parallel requests by gRPC server + ExecutorService gRPCServerExecutor; + + // Table + List columns = Arrays.asList("id", "name"); + String selectQuery = "SELECT * FROM [TABLE] WHERE ID = 1"; + + @Setup(Level.Trial) + public void setup() throws IOException { + // Enable JMH system property + System.setProperty("jmh.enabled", "true"); + + // Initializing mock spanner service + MockSpannerServiceImpl mockSpannerService = new MockSpannerServiceImpl(); + mockSpannerService.setAbortProbability(0.0D); + + // Initializing mock monitoring service + MonitoringServiceImpl mockMonitoringService = new MonitoringServiceImpl(); + + // Create a thread pool to handle concurrent requests + gRPCServerExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); + + // Creating Spanner Inprocess gRPC server + gRPCServer = + ServerBuilder.forPort(0) + .addService(mockSpannerService) + .executor(gRPCServerExecutor) + .build() + .start(); + + registerMocks(mockSpannerService); + + // Creating Monitoring Inprocess gRPC server + gRPCMonitoringServer = + ServerBuilder.forPort(0).addService(mockMonitoringService).build().start(); + + // Set the monitoring host port for exporter to forward requests to local netty gRPC server + System.setProperty( + "jmh.monitoring-server-port", String.valueOf(gRPCMonitoringServer.getPort())); + + spanner = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setCredentials(NoCredentials.getInstance()) + .setChannelConfigurator( + managedChannelBuilder -> + ManagedChannelBuilder.forAddress("0.0.0.0", gRPCServer.getPort()) + .usePlaintext()) + .build() + .getService(); + databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE_ID]", "[DATABASE_ID]")); + } + + private void registerMocks(MockSpannerServiceImpl mockSpannerService) { + ResultSetMetadata selectMetadata = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("name") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .build()) + .build(); + com.google.spanner.v1.ResultSet selectResultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .addValues( + com.google.protobuf.Value.newBuilder().setStringValue("[NAME]").build()) + .build()) + .setMetadata(selectMetadata) + .build(); + mockSpannerService.putStatementResult( + StatementResult.read( + "[TABLE]", KeySet.singleKey(Key.of()), this.columns, selectResultSet)); + mockSpannerService.putStatementResult( + StatementResult.query(Statement.of(this.selectQuery), selectResultSet)); + } + + @TearDown(Level.Trial) + public void tearDown() throws InterruptedException { + spanner.close(); + gRPCServer.shutdown(); + gRPCServerExecutor.shutdown(); + + // awaiting termination for servers and executors + gRPCServer.awaitTermination(10, TimeUnit.SECONDS); + gRPCServerExecutor.awaitTermination(10, TimeUnit.SECONDS); + } + } + + @Benchmark + @Warmup(time = 5, timeUnit = TimeUnit.MINUTES, iterations = 1) + @Measurement(time = 15, timeUnit = TimeUnit.MINUTES, iterations = 1) + @Timeout(time = 30, timeUnit = TimeUnit.MINUTES) + public void readBenchmark(BenchmarkState benchmarkState, Blackhole blackhole) { + try (ReadContext readContext = benchmarkState.databaseClient.singleUse()) { + try (ResultSet resultSet = + readContext.read("[TABLE]", KeySet.singleKey(Key.of("2")), benchmarkState.columns)) { + while (resultSet.next()) { + blackhole.consume(resultSet.getLong("id")); + } + } + } + } + + @Benchmark + @Warmup(time = 5, timeUnit = TimeUnit.MINUTES, iterations = 1) + @Measurement(time = 15, timeUnit = TimeUnit.MINUTES, iterations = 1) + @Timeout(time = 30, timeUnit = TimeUnit.MINUTES) + public void queryBenchmark(BenchmarkState benchmarkState, Blackhole blackhole) { + try (ReadContext readContext = benchmarkState.databaseClient.singleUse()) { + try (ResultSet resultSet = + readContext.executeQuery(Statement.of(benchmarkState.selectQuery))) { + while (resultSet.next()) { + blackhole.consume(resultSet.getLong("id")); + } + } + } + } + + public static void main(String[] args) throws RunnerException { + Options opt = + new OptionsBuilder() + .include(ReadBenchmark.class.getSimpleName()) + .result("jmh-result.json") + .resultFormat(ResultFormatType.JSON) + .build(); + new Runner(opt).run(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbortedTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbortedTest.java new file mode 100644 index 000000000000..8fec34c267e0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbortedTest.java @@ -0,0 +1,676 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.common.truth.Truth.assertThat; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.AbortInterceptor; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.it.ITTransactionRetryTest.CountTransactionRetryListener; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.LongStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AbortedTest extends AbstractMockServerTest { + + @Test + public void testCommitAborted() { + // Do two iterations to ensure that each iteration gets its own transaction, and that each + // transaction is the most recent transaction of that session. + for (int i = 0; i < 2; i++) { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT)); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + // do an insert + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a commit that will first abort, and then on retry will succeed + connection.commit(); + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_AFTER_INSERT)); + // verify that the insert succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + } + + @Test + public void testCommitAbortedDuringUpdateWithReturning() { + // Do two iterations to ensure that each iteration gets its own transaction, and that each + // transaction is the most recent transaction of that session. + for (int i = 0; i < 2; i++) { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult( + StatementResult.updateReturning(INSERT_RETURNING_STATEMENT, UPDATE_RETURNING_RESULTSET)); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + // do an insert with returning + connection.executeQuery( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted') THEN RETURN *")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a commit that will first abort, and then on retry will succeed + connection.commit(); + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_AFTER_INSERT)); + // verify that the insert succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + } + + @Test + public void testAbortedDuringRetryOfFailedQuery() { + final Statement invalidStatement = Statement.of("SELECT * FROM FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + connection.execute(INSERT_STATEMENT); + try (ResultSet rs = connection.executeQuery(invalidStatement)) { + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + // The transaction will be executed 3 times, which means that there will be 6 + // ExecuteSqlRequests: + // 1. The initial attempt. + // 2. The first retry attempt. This will fail on the invalid statement as it is aborted. + // 3. the second retry attempt. This will succeed. + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(6); + } + + @Test + public void testAbortedDuringRetryOfFailedUpdate() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + connection.execute(INSERT_STATEMENT); + try { + connection.execute(invalidStatement); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + // The transaction will be executed 3 times, which means that there will be 6 + // ExecuteSqlRequests: + // 1. The initial attempt. + // 2. The first retry attempt. This will fail on the invalid statement as it is aborted. + // 3. the second retry attempt. This will succeed. + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(6); + } + + @Test + public void testAbortedDuringRetryOfFailedUpdateWithReturning() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO THEN RETURN *"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + connection.execute(INSERT_STATEMENT); + try { + connection.execute(invalidStatement); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + // The transaction will be executed 3 times, which means that there will be 6 + // ExecuteSqlRequests: + // 1. The initial attempt. + // 2. The first retry attempt. This will fail on the invalid statement as it is aborted. + // 3. the second retry attempt. This will succeed. + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(6); + } + + @Test + public void testAbortedDuringRetryOfFailedBatchUpdate() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + connection.execute(INSERT_STATEMENT); + try { + connection.executeBatchUpdate(Collections.singletonList(invalidStatement)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)).isEqualTo(3); + } + + @Test + public void testAbortedDuringRetryOfFailedBatchUpdateWithReturning() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO THEN RETURN *"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + connection.execute(INSERT_STATEMENT); + try { + connection.executeBatchUpdate(Collections.singletonList(invalidStatement)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)).isEqualTo(3); + } + + @Test + public void testAbortedDuringRetryOfFailedQueryAsFirstStatement() { + final Statement invalidStatement = Statement.of("SELECT * FROM FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + // Abort the invalid statement on the third retry (listener counts from 0). The first retry will + // be triggered by the client library because the first statement of the transaction failed. + // That means that it also failed to return a transaction, and the first retry is only executed + // in order to execute an explicit BeginTransaction RPC: + + // 1: First statement fails => Retry because no transaction was returned + // 2: BeginTransaction + Invalid statement + Insert + Commit (aborted) => Retry + // 3: First statement fails => Retry because no transaction was returned + // 4: BeginTransaction + Invalid statement (aborted) => Retry + // 5: First statement fails => Retry because no transaction was returned + // 6: BeginTransaction + Invalid statement + Insert + Commit => Success + + try (ITConnection connection = + createConnection(createAbortRetryListener(2, invalidStatement, notFound))) { + try (ResultSet rs = connection.executeQuery(invalidStatement)) { + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + connection.executeUpdate(INSERT_STATEMENT); + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + // 6 times invalid query + 2 times INSERT. + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(8); + } + + @Test + public void testAbortedDuringRetryOfFailedUpdateAsFirstStatement() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortRetryListener(2, invalidStatement, notFound))) { + try { + connection.execute(invalidStatement); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + connection.execute(INSERT_STATEMENT); + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(8); + } + + @Test + public void testAbortedDuringRetryOfFailedUpdateWithReturningAsFirstStatement() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO THEN RETURN *"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortRetryListener(2, invalidStatement, notFound))) { + try { + connection.execute(invalidStatement); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + connection.execute(INSERT_STATEMENT); + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)).isEqualTo(8); + } + + @Test + public void testAbortedDuringRetryOfFailedBatchUpdateAsFirstStatement() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + try { + connection.executeBatchUpdate(Collections.singletonList(invalidStatement)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + connection.execute(INSERT_STATEMENT); + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)).isEqualTo(6); + } + + @Test + public void testAbortedDuringRetryOfFailedBatchUpdateWithReturningAsFirstStatement() { + final Statement invalidStatement = Statement.of("INSERT INTO FOO THEN RETURN *"); + StatusRuntimeException notFound = + Status.NOT_FOUND.withDescription("Table not found").asRuntimeException(); + mockSpanner.putStatementResult(StatementResult.exception(invalidStatement, notFound)); + try (ITConnection connection = + createConnection(createAbortFirstRetryListener(invalidStatement, notFound))) { + try { + connection.executeBatchUpdate(Collections.singletonList(invalidStatement)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + connection.execute(INSERT_STATEMENT); + // Force an abort and retry. + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertThat(mockSpanner.countRequestsOfType(CommitRequest.class)).isEqualTo(2); + assertThat(mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)).isEqualTo(6); + } + + @Test + public void testRetryUsesTags() { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT)); + try (ITConnection connection = createConnection()) { + connection.setTransactionTag("transaction-tag"); + connection.setStatementTag("statement-tag"); + connection.executeUpdate(INSERT_STATEMENT); + connection.setStatementTag("statement-tag"); + connection.executeBatchUpdate(Collections.singleton(INSERT_STATEMENT)); + connection.setStatementTag("statement-tag"); + connection.executeQuery(SELECT_COUNT_STATEMENT); + + mockSpanner.abortNextStatement(); + connection.commit(); + } + long executeSqlRequestCount = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("statement-tag") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(4L, executeSqlRequestCount); + + long executeBatchSqlRequestCount = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("statement-tag") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(2L, executeBatchSqlRequestCount); + + long commitRequestCount = + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(2L, commitRequestCount); + } + + @Test + public void testRetryUsesTagsWithUpdateReturning() { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.updateReturning(INSERT_RETURNING_STATEMENT, UPDATE_RETURNING_RESULTSET)); + try (ITConnection connection = createConnection()) { + connection.setTransactionTag("transaction-tag"); + connection.setStatementTag("statement-tag"); + connection.executeUpdate(INSERT_STATEMENT); + connection.setStatementTag("statement-tag"); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_RETURNING_STATEMENT)); + connection.setStatementTag("statement-tag"); + connection.executeQuery(SELECT_COUNT_STATEMENT); + connection.setStatementTag("statement-tag"); + connection.executeQuery(INSERT_RETURNING_STATEMENT); + + mockSpanner.abortNextStatement(); + connection.commit(); + } + long executeSqlRequestCount = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("statement-tag") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(6L, executeSqlRequestCount); + + long executeBatchSqlRequestCount = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("statement-tag") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(2L, executeBatchSqlRequestCount); + + long commitRequestCount = + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request.getRequestOptions().getRequestTag().equals("") + && request + .getRequestOptions() + .getTransactionTag() + .equals("transaction-tag")) + .count(); + assertEquals(2L, commitRequestCount); + } + + @Test + public void testRetryUsesAnalyzeModeForUpdate() { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, 0)); + try (ITConnection connection = createConnection()) { + assertEquals( + 0L, connection.analyzeUpdate(INSERT_STATEMENT, QueryAnalyzeMode.PLAN).getRowCountExact()); + + mockSpanner.abortNextStatement(); + connection.executeQuery(SELECT_COUNT_STATEMENT); + + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, 1)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + // 5 requests because: + // 1. Analyze INSERT + // 2. Execute SELECT COUNT(*) (Aborted) + // 3. Analyze INSERT (retry) + // 4. Execute SELECT COUNT(*) (retry) + // 5. Execute INSERT + assertEquals(5, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(QueryMode.PLAN, requests.get(0).getQueryMode()); + assertEquals(QueryMode.NORMAL, requests.get(1).getQueryMode()); + + // This used NORMAL because of https://github.com/googleapis/java-spanner/issues/2009. + assertEquals(QueryMode.PLAN, requests.get(2).getQueryMode()); + + assertEquals(QueryMode.NORMAL, requests.get(3).getQueryMode()); + assertEquals(QueryMode.NORMAL, requests.get(4).getQueryMode()); + } + + @Test + public void testAbortedWithBitReversedSequence() { + // A bit-reversed sequence can only be used in a read/write transaction. However, calling + // get_next_sequence_value will update the sequence durably, even if the transaction is aborted. + // That means that retrying a transaction that called get_next_sequence_value will always fail. + String getSequenceValuesSql = + "WITH t AS (\n" + + "\tselect get_next_sequence_value(sequence enhanced_sequence) AS n\n" + + "\tUNION ALL\n" + + "\tselect get_next_sequence_value(sequence enhanced_sequence) AS n\n" + + "\tUNION ALL\n" + + "\tselect get_next_sequence_value(sequence enhanced_sequence) AS n\n" + + "\tUNION ALL\n" + + "\tselect get_next_sequence_value(sequence enhanced_sequence) AS n\n" + + "\tUNION ALL\n" + + "\tselect get_next_sequence_value(sequence enhanced_sequence) AS n\n" + + ")\n" + + "SELECT n FROM t"; + mockSpanner.putStatementResult( + StatementResult.queryAndThen( + Statement.of(getSequenceValuesSql), + createBitReversedSequenceResultSet(1L, 5L), + createBitReversedSequenceResultSet(6L, 10L))); + long currentValue = 0L; + try (ITConnection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(Statement.of(getSequenceValuesSql))) { + while (resultSet.next()) { + assertEquals(Long.reverse(++currentValue), resultSet.getLong(0)); + } + } + mockSpanner.abortNextStatement(); + // The retry should fail, because the sequence will return new values during the retry. + assertThrows(AbortedDueToConcurrentModificationException.class, connection::commit); + } + } + + @Test + public void testTimeoutWithRetries() { + // Verifies that even though a single execution of a statement does not exceed the deadline, + // repeated retries of the statement does cause the deadline to be exceeded. + try (ITConnection connection = createConnection()) { + for (boolean autoCommit : new boolean[] {true, false}) { + connection.setAutocommit(autoCommit); + mockSpanner.setAbortProbability(1.0); + mockSpanner.setExecuteSqlExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(1, 0)); + + connection.setStatementTimeout(10, TimeUnit.MILLISECONDS); + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + if (!autoCommit) { + connection.rollback(); + } + } + } finally { + mockSpanner.setAbortProbability(0.0); + } + } + + static com.google.spanner.v1.ResultSet createBitReversedSequenceResultSet( + long startValue, long endValue) { + return com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("n") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addAllRows( + LongStream.range(startValue, endValue) + .map(Long::reverse) + .mapToObj( + id -> + ListValue.newBuilder() + .addValues( + Value.newBuilder().setStringValue(String.valueOf(id)).build()) + .build()) + .collect(Collectors.toList())) + .build(); + } + + ITConnection createConnection(TransactionRetryListener listener) { + ITConnection connection = + super.createConnection(ImmutableList.of(), ImmutableList.of(listener)); + connection.setAutocommit(false); + return connection; + } + + /** Creates a retry listener that will abort the first retry as well. */ + TransactionRetryListener createAbortFirstRetryListener( + final Statement invalidStatement, final StatusRuntimeException statementException) { + return createAbortRetryListener(0, invalidStatement, statementException); + } + + /** Creates a retry listener that will abort the n'th retry. */ + TransactionRetryListener createAbortRetryListener( + final int onAttempt, + final Statement invalidStatement, + final StatusRuntimeException statementException) { + return new TransactionRetryListener() { + @Override + public void retryStarting( + Timestamp transactionStarted, long transactionId, int retryAttempt) { + if (retryAttempt == onAttempt) { + mockSpanner.putStatementResult( + StatementResult.exception( + invalidStatement, + mockSpanner.createAbortedException(ByteString.copyFromUtf8("some-transaction")))); + } else { + mockSpanner.putStatementResult( + StatementResult.exception(invalidStatement, statementException)); + } + } + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) {} + }; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractConnectionImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractConnectionImplTest.java new file mode 100644 index 000000000000..0ad0588b68b0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractConnectionImplTest.java @@ -0,0 +1,1052 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.DialectNamespaceMapper.getNamespace; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.getTimeUnitAbbreviation; +import static com.google.cloud.spanner.connection.SpannerExceptionMatcher.matchCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * This test class and all its subclasses are used to generate the file + * ConnectionImplGeneratedSqlScriptTest.sql. + */ +@RunWith(Parameterized.class) +public abstract class AbstractConnectionImplTest { + public static final String UPDATE = "UPDATE foo SET bar=1"; + public static final String SELECT = "SELECT 1 AS TEST"; + public static final String DDL = + "CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id)"; + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + interface ConnectionConsumer { + void accept(Connection connection); + } + + @Rule public ExpectedException exception = ExpectedException.none(); + + /** + * This test class can generate a large sql file that represents all the statements and + * verifications that are executed by this test class. This file can be fed into other test cases + * (in other programming languages) to execute the same tests as the tests covered by all the + * subclasses of {@link AbstractConnectionImplTest}. + */ + private static final String LOG_FILE = + "src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql"; + + private static final String PG_LOG_FILE = + "src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql"; + + private static String getLogFile(Dialect dialect) { + switch (dialect) { + case GOOGLE_STANDARD_SQL: + return LOG_FILE; + case POSTGRESQL: + return PG_LOG_FILE; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown or unsupported dialect: " + dialect); + } + } + + private static final String DO_LOG_PROPERTY = "do_log_statements"; + private boolean doLog; + private PrintWriter writer; + + abstract Connection getConnection(); + + static void expectSpannerException( + String reason, ConnectionConsumer consumer, Connection connection) { + expectSpannerException(reason, consumer, connection, ErrorCode.FAILED_PRECONDITION); + } + + static void expectSpannerException( + String reason, ConnectionConsumer consumer, Connection connection, ErrorCode errorCode) { + SpannerException exception = null; + try { + consumer.accept(connection); + } catch (SpannerException e) { + exception = e; + } + assertThat(reason, exception, is(notNullValue())); + assertThat(reason, exception.getErrorCode(), is(equalTo(errorCode))); + } + + AbstractConnectionImplTest() {} + + /** Makes an empty test script. Can be called before a new script is to be generated. */ + void emptyScript(Dialect dialect) { + openLog(false, dialect); + closeLog(); + } + + void log(String statement) { + if (doLog) { + writer.println(statement); + } + } + + void logWithNamespace(String statement) { + if (doLog) { + writer.printf(statement, getNamespace(dialect)); + writer.println(); + } + } + + @Before + public void openLog() { + doLog = Boolean.parseBoolean(System.getProperty(DO_LOG_PROPERTY, "false")); + if (doLog) { + if (writer == null) { + openLog(true, this.dialect); + } + } else { + writer = null; + } + } + + private void openLog(boolean append, Dialect dialect) { + try { + writer = + new PrintWriter( + new OutputStreamWriter( + new FileOutputStream(getLogFile(dialect), append), StandardCharsets.UTF_8), + true); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @After + public void closeLog() { + if (writer != null) { + writer.close(); + writer = null; + } + } + + @Test + public void testClose() { + getConnection().close(); + } + + @Test + public void testIsClosed() { + Connection connection = getConnection(); + assertThat(connection.isClosed(), is(false)); + connection.close(); + assertThat(connection.isClosed(), is(true)); + } + + abstract boolean isSetAutocommitAllowed(); + + @Test + public void testSetAutocommit() { + try (Connection connection = getConnection()) { + if (isSetAutocommitAllowed()) { + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + + log("@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE"); + log("SHOW VARIABLE AUTOCOMMIT;"); + assertThat(connection.isAutocommit(), is(false)); + + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + + log("@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE"); + log("SHOW VARIABLE AUTOCOMMIT;"); + assertThat(connection.isAutocommit(), is(true)); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log("SET AUTOCOMMIT=" + (connection.isAutocommit() ? "FALSE;" : "TRUE;")); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.setAutocommit(!connection.isAutocommit()); + } + } + } + + abstract boolean isSetReadOnlyAllowed(); + + @Test + public void testSetReadOnly() { + try (Connection connection = getConnection()) { + if (isSetReadOnlyAllowed()) { + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + + logWithNamespace("@EXPECT RESULT_SET '%sREADONLY',FALSE"); + logWithNamespace("SHOW VARIABLE %sREADONLY;"); + assertThat(connection.isReadOnly(), is(false)); + + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + + logWithNamespace("@EXPECT RESULT_SET '%sREADONLY',TRUE"); + logWithNamespace("SHOW VARIABLE %sREADONLY;"); + assertThat(connection.isReadOnly(), is(true)); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SET %sREADONLY=" + (connection.isAutocommit() ? "FALSE;" : "TRUE;")); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.setReadOnly(!connection.isReadOnly()); + } + } + } + + @Test + public void testSetStatementTimeout() { + try (Connection connection = getConnection()) { + for (TimeUnit unit : ReadOnlyStalenessUtil.SUPPORTED_UNITS) { + log(String.format("SET STATEMENT_TIMEOUT='1%s';", getTimeUnitAbbreviation(unit))); + connection.setStatementTimeout(1L, unit); + + log( + String.format( + "@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1%s'", getTimeUnitAbbreviation(unit))); + log("SHOW VARIABLE STATEMENT_TIMEOUT;"); + assertThat(connection.getStatementTimeout(unit), is(equalTo(1L))); + + log( + String.format( + "SET STATEMENT_TIMEOUT=%s;", dialect == Dialect.POSTGRESQL ? "DEFAULT" : "null")); + connection.clearStatementTimeout(); + + log( + String.format( + "@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',%s", + dialect == Dialect.POSTGRESQL ? "'0'" : "null")); + log("SHOW VARIABLE STATEMENT_TIMEOUT;"); + assertThat(connection.getStatementTimeout(unit), is(equalTo(0L))); + assertThat(connection.hasStatementTimeout(), is(false)); + boolean gotException = false; + try { + // log("@EXPECT EXCEPTION INVALID_ARGUMENT"); + log(String.format("SET STATEMENT_TIMEOUT='0%s';", getTimeUnitAbbreviation(unit))); + connection.clearStatementTimeout(); + // connection.setStatementTimeout(0L, unit); + } catch (IllegalArgumentException e) { + gotException = true; + } + assertThat(gotException, is(false)); + log( + String.format( + "@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',%s", + dialect == Dialect.POSTGRESQL ? "'0'" : "null")); + log("SHOW VARIABLE STATEMENT_TIMEOUT;"); + assertThat(connection.getStatementTimeout(unit), is(equalTo(0L))); + assertThat(connection.hasStatementTimeout(), is(false)); + } + } + } + + abstract boolean isStartBatchDmlAllowed(); + + @Test + public void testStartBatchDml() { + try (Connection connection = getConnection()) { + if (isStartBatchDmlAllowed()) { + assertThat(connection.isReadOnly(), is(false)); + assertThat(connection.isDdlBatchActive() || connection.isDmlBatchActive(), is(false)); + + log("START BATCH DML;"); + connection.startBatchDml(); + assertThat(connection.isDmlBatchActive(), is(true)); + + expectSpannerException( + "Select should not be allowed after startBatchDml()", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(SELECT + ";"); + t.execute(Statement.of(SELECT)); + }, + connection); + expectSpannerException( + "DDL should not be allowed after startBatchDml()", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(DDL + ";"); + t.execute(Statement.of(DDL)); + }, + connection); + log(UPDATE + ";"); + connection.execute(Statement.of(UPDATE)); + assertThat(connection.isDmlBatchActive(), is(true)); + } + // startBatchDml is not allowed as a batch has already been started. + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log("START BATCH DML;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.startBatchDml(); + } + } + + abstract boolean isStartBatchDdlAllowed(); + + @Test + public void testStartBatchDdl() { + try (Connection connection = getConnection()) { + if (isStartBatchDdlAllowed()) { + assertThat(connection.isTransactionStarted(), is(false)); + assertThat(connection.isInTransaction(), is(equalTo(!connection.isAutocommit()))); + assertThat(connection.isDdlBatchActive() || connection.isDmlBatchActive(), is(false)); + + log("START BATCH DDL;"); + connection.startBatchDdl(); + assertThat(connection.isTransactionStarted(), is(false)); + assertThat(connection.isInTransaction(), is(false)); + assertThat(connection.isDdlBatchActive(), is(true)); + + expectSpannerException( + "Select should not be allowed after startBatchDdl()", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(SELECT + ";"); + t.execute(Statement.of(SELECT)); + }, + connection); + expectSpannerException( + "Update should not be allowed after startBatchDdl()", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(UPDATE + ";"); + t.execute(Statement.of(UPDATE)); + }, + connection); + log(DDL + ";"); + connection.execute(Statement.of(DDL)); + assertThat(connection.isTransactionStarted(), is(false)); + assertThat(connection.isDdlBatchActive(), is(true)); + } + // startBatchDdl is no longer allowed as a batch has already been started + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log("START BATCH DDL;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.startBatchDdl(); + } + } + + abstract boolean isRunBatchAllowed(); + + @Test + public void testRunBatch() { + try (Connection connection = getConnection()) { + if (!isRunBatchAllowed()) { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + log("RUN BATCH;"); + connection.runBatch(); + } + } + + abstract boolean isAbortBatchAllowed(); + + @Test + public void testAbortBatch() { + try (Connection connection = getConnection()) { + if (!isAbortBatchAllowed()) { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + log("ABORT BATCH;"); + connection.abortBatch(); + } + } + + abstract boolean isBeginTransactionAllowed(); + + abstract boolean isSelectAllowedAfterBeginTransaction(); + + abstract boolean isDmlAllowedAfterBeginTransaction(); + + abstract boolean isDdlAllowedAfterBeginTransaction(); + + @Test + public void testBeginTransaction() { + try (Connection connection = getConnection()) { + if (isBeginTransactionAllowed()) { + assertThat(connection.isTransactionStarted(), is(false)); + assertThat(connection.isInTransaction(), is(equalTo(!connection.isAutocommit()))); + + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + assertThat(connection.isTransactionStarted(), is(false)); + assertThat(connection.isInTransaction(), is(true)); + + if (isSelectAllowedAfterBeginTransaction()) { + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)); + } else { + expectSpannerException( + "Select should not be allowed after beginTransaction", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(SELECT + ";"); + t.execute(Statement.of(SELECT)); + }, + connection); + } + if (isDmlAllowedAfterBeginTransaction()) { + log(UPDATE + ";"); + connection.execute(Statement.of(UPDATE)); + } else { + expectSpannerException( + "Update should not be allowed after beginTransaction", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(UPDATE + ";"); + t.execute(Statement.of(UPDATE)); + }, + connection); + } + if (isDdlAllowedAfterBeginTransaction()) { + log(DDL + ";"); + connection.execute(Statement.of(DDL)); + } else { + expectSpannerException( + "DDL should not be allowed after beginTransaction", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(DDL + ";"); + t.execute(Statement.of(DDL)); + }, + connection); + } + assertThat(connection.isTransactionStarted(), is(true)); + } + // beginTransaction is no longer allowed as the transaction has already started + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log("BEGIN TRANSACTION;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.beginTransaction(); + } + } + + abstract boolean isSetTransactionTagAllowed(); + + @Test + public void testSetTransactionTag() { + try (Connection connection = getConnection()) { + String tag = "some-tag"; + if (isSetTransactionTagAllowed()) { + log( + String.format( + "SET %sTRANSACTION_TAG = '%s';", + DialectNamespaceMapper.getNamespace(dialect), tag)); + connection.setTransactionTag(tag); + assertEquals(tag, connection.getTransactionTag()); + } else { + expectSpannerException( + "SET TRANSACTION_TAG should not be allowed", + new ConnectionConsumer() { + @Override + public void accept(Connection t) { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log( + String.format( + "SET %sTRANSACTION_TAG = '%s';", + DialectNamespaceMapper.getNamespace(dialect), tag)); + t.setTransactionTag(tag); + } + }, + connection); + } + } + } + + abstract boolean isSetTransactionModeAllowed(TransactionMode mode); + + @Test + public void testSetTransactionMode() { + for (TransactionMode mode : TransactionMode.values()) { + testSetTransactionMode(mode); + } + } + + private void testSetTransactionMode(final TransactionMode mode) { + try (Connection connection = getConnection()) { + if (isSetTransactionModeAllowed(mode)) { + log("SET TRANSACTION " + mode.toString() + ";"); + connection.setTransactionMode(mode); + assertThat(connection.getTransactionMode(), is(equalTo(mode))); + } else { + expectSpannerException( + mode + " should not be allowed", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log("SET TRANSACTION " + mode.getStatementString() + ";"); + t.setTransactionMode(mode); + }, + connection); + } + } + } + + abstract boolean isGetTransactionModeAllowed(); + + @Test + public void testGetTransactionMode() { + try (Connection connection = getConnection()) { + if (isGetTransactionModeAllowed()) { + assertThat(connection.getTransactionMode(), is(notNullValue())); + } else { + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getTransactionMode(); + } + } + } + + abstract boolean isSetAutocommitDmlModeAllowed(); + + @Test + public void testSetAutocommitDmlMode() { + try (Connection connection = getConnection()) { + if (isSetAutocommitDmlModeAllowed()) { + for (AutocommitDmlMode mode : AutocommitDmlMode.values()) { + logWithNamespace("SET %sAUTOCOMMIT_DML_MODE='" + mode.toString() + "';"); + connection.setAutocommitDmlMode(mode); + + logWithNamespace("@EXPECT RESULT_SET '%sAUTOCOMMIT_DML_MODE','" + mode.toString() + "'"); + logWithNamespace("SHOW VARIABLE %sAUTOCOMMIT_DML_MODE;"); + assertThat(connection.getAutocommitDmlMode(), is(equalTo(mode))); + } + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace( + "SET %sAUTOCOMMIT_DML_MODE='" + + AutocommitDmlMode.PARTITIONED_NON_ATOMIC.toString() + + "';"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + } + } + } + + abstract boolean isGetAutocommitDmlModeAllowed(); + + @Test + public void testGetAutocommitDmlMode() { + try (Connection connection = getConnection()) { + if (isGetAutocommitDmlModeAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sAUTOCOMMIT_DML_MODE'"); + logWithNamespace("SHOW VARIABLE %sAUTOCOMMIT_DML_MODE;"); + assertThat(connection.getAutocommitDmlMode(), is(notNullValue())); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SHOW VARIABLE %sAUTOCOMMIT_DML_MODE;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getAutocommitDmlMode(); + } + } + } + + abstract boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode); + + @Test + public void testSetReadOnlyStaleness() { + for (TimestampBound staleness : getTestTimestampBounds()) { + testSetReadOnlyStaleness(staleness); + } + } + + private List getTestTimestampBounds() { + return Arrays.asList( + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(Timestamp.now()), + TimestampBound.ofMinReadTimestamp(Timestamp.now()), + TimestampBound.ofExactStaleness(1L, TimeUnit.SECONDS), + TimestampBound.ofMaxStaleness(100L, TimeUnit.MILLISECONDS), + TimestampBound.ofExactStaleness(100L, TimeUnit.MICROSECONDS)); + } + + private void testSetReadOnlyStaleness(final TimestampBound staleness) { + try (Connection connection = getConnection()) { + if (isSetReadOnlyStalenessAllowed(staleness.getMode())) { + logWithNamespace( + "SET %sREAD_ONLY_STALENESS='" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "';"); + connection.setReadOnlyStaleness(staleness); + + logWithNamespace( + "@EXPECT RESULT_SET '%sREAD_ONLY_STALENESS','" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "'"); + logWithNamespace("SHOW VARIABLE %sREAD_ONLY_STALENESS;"); + assertThat(connection.getReadOnlyStaleness(), is(equalTo(staleness))); + } else { + expectSpannerException( + staleness.getMode() + " should not be allowed", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace( + "SET %sREAD_ONLY_STALENESS='" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "';"); + t.setReadOnlyStaleness(staleness); + }, + connection); + } + } + } + + abstract boolean isGetReadOnlyStalenessAllowed(); + + @Test + public void testGetReadOnlyStaleness() { + try (Connection connection = getConnection()) { + if (isGetReadOnlyStalenessAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sREAD_ONLY_STALENESS'"); + logWithNamespace("SHOW VARIABLE %sREAD_ONLY_STALENESS;"); + assertThat(connection.getReadOnlyStaleness(), is(notNullValue())); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SHOW VARIABLE %sREAD_ONLY_STALENESS;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getReadOnlyStaleness(); + } + } + } + + boolean isSetOptimizerVersionAllowed() { + return !getConnection().isClosed(); + } + + @Test + public void testSetOptimizerVersion() { + try (Connection connection = getConnection()) { + if (isSetOptimizerVersionAllowed()) { + for (String version : new String[] {"1", "2", "latest", ""}) { + logWithNamespace("SET %sOPTIMIZER_VERSION='" + version + "';"); + connection.setOptimizerVersion(version); + + logWithNamespace("@EXPECT RESULT_SET '%sOPTIMIZER_VERSION','" + version + "'"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_VERSION;"); + assertThat(connection.getOptimizerVersion(), is(equalTo(version))); + } + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SET %sOPTIMIZER_VERSION='1';"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.setOptimizerVersion("1"); + } + } + } + + boolean isGetOptimizerVersionAllowed() { + return !getConnection().isClosed(); + } + + @Test + public void testGetOptimizerVersion() { + try (Connection connection = getConnection()) { + if (isGetOptimizerVersionAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sOPTIMIZER_VERSION'"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_VERSION;"); + assertThat(connection.getOptimizerVersion(), is(notNullValue())); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_VERSION;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getOptimizerVersion(); + } + } + } + + boolean isSetOptimizerStatisticsPackageAllowed() { + return !getConnection().isClosed(); + } + + @Test + public void testSetOptimizerStatisticsPackage() { + try (Connection connection = getConnection()) { + if (isSetOptimizerStatisticsPackageAllowed()) { + for (String statisticsPackage : new String[] {"custom-package", ""}) { + logWithNamespace("SET %sOPTIMIZER_STATISTICS_PACKAGE='" + statisticsPackage + "';"); + connection.setOptimizerStatisticsPackage(statisticsPackage); + + logWithNamespace( + "@EXPECT RESULT_SET '%sOPTIMIZER_STATISTICS_PACKAGE','" + statisticsPackage + "'"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_STATISTICS_PACKAGE;"); + assertThat(connection.getOptimizerStatisticsPackage(), is(equalTo(statisticsPackage))); + } + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SET %sOPTIMIZER_STATISTICS_PACKAGE='custom-package';"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.setOptimizerStatisticsPackage("custom-package"); + } + } + } + + boolean isGetOptimizerStatisticsPackageAllowed() { + return !getConnection().isClosed(); + } + + @Test + public void testGetOptimizerStatisticsPackage() { + try (Connection connection = getConnection()) { + if (isGetOptimizerStatisticsPackageAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sOPTIMIZER_STATISTICS_PACKAGE'"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_STATISTICS_PACKAGE;"); + assertThat(connection.getOptimizerStatisticsPackage(), is(notNullValue())); + } else { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + logWithNamespace("SHOW VARIABLE %sOPTIMIZER_STATISTICS_PACKAGE;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getOptimizerStatisticsPackage(); + } + } + } + + abstract boolean isCommitAllowed(); + + @Test + public void testCommit() { + try (Connection connection = getConnection()) { + if (!isCommitAllowed()) { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + log("COMMIT;"); + connection.commit(); + } + } + + abstract boolean isRollbackAllowed(); + + @Test + public void testRollback() { + try (Connection connection = getConnection()) { + if (!isRollbackAllowed()) { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + log("ROLLBACK;"); + connection.rollback(); + } + } + + abstract boolean expectedIsInTransaction(); + + @Test + public void testIsInTransaction() { + try (Connection connection = getConnection()) { + assertThat(connection.isInTransaction(), is(expectedIsInTransaction())); + } + } + + abstract boolean expectedIsTransactionStarted(); + + @Test + public void testIsTransactionStarted() { + try (Connection connection = getConnection()) { + assertThat(connection.isTransactionStarted(), is(expectedIsTransactionStarted())); + } + } + + abstract boolean isGetReadTimestampAllowed(); + + @Test + public void testGetReadTimestamp() { + try (Connection connection = getConnection()) { + if (isGetReadTimestampAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sREAD_TIMESTAMP'"); + logWithNamespace("SHOW VARIABLE %sREAD_TIMESTAMP;"); + assertThat(connection.getReadTimestamp(), is(notNullValue())); + } else { + logWithNamespace("@EXPECT RESULT_SET '%sREAD_TIMESTAMP',null"); + logWithNamespace("SHOW VARIABLE %sREAD_TIMESTAMP;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getReadTimestamp(); + } + } + } + + abstract boolean isGetCommitTimestampAllowed(); + + @Test + public void testGetCommitTimestamp() { + try (Connection connection = getConnection()) { + if (isGetCommitTimestampAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sCOMMIT_TIMESTAMP'"); + logWithNamespace("SHOW VARIABLE %sCOMMIT_TIMESTAMP;"); + assertThat(connection.getCommitTimestamp(), is(notNullValue())); + } else { + logWithNamespace("@EXPECT RESULT_SET '%sCOMMIT_TIMESTAMP',null"); + logWithNamespace("SHOW VARIABLE %sCOMMIT_TIMESTAMP;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getCommitTimestamp(); + } + } + } + + @Test + public void testGetCommitResponse() { + try (Connection connection = getConnection()) { + if (isGetCommitTimestampAllowed()) { + logWithNamespace("@EXPECT RESULT_SET '%sCOMMIT_TIMESTAMP'"); + logWithNamespace("SHOW VARIABLE %sCOMMIT_RESPONSE;"); + assertThat(connection.getCommitResponse(), is(notNullValue())); + } else { + logWithNamespace("@EXPECT RESULT_SET '%sCOMMIT_TIMESTAMP',null"); + logWithNamespace("SHOW VARIABLE %sCOMMIT_RESPONSE;"); + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + connection.getCommitResponse(); + } + } + } + + abstract boolean isExecuteAllowed(StatementType type); + + @Test + public void testExecute() { + for (StatementType type : + new StatementType[] {StatementType.QUERY, StatementType.UPDATE, StatementType.DDL}) { + testExecute(type); + } + } + + private void testExecute(final StatementType type) { + try (Connection connection = getConnection()) { + if (isExecuteAllowed(type)) { + log(getTestStatement(type).getSql() + ";"); + assertThat(connection.execute(getTestStatement(type)), is(notNullValue())); + } else { + expectSpannerException( + type + " should not be allowed", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(getTestStatement(type).getSql() + ";"); + t.execute(getTestStatement(type)); + }, + connection); + } + } + } + + private Statement getTestStatement(StatementType type) { + switch (type) { + case QUERY: + return Statement.of(SELECT); + case UPDATE: + return Statement.of(UPDATE); + case DDL: + return Statement.of(DDL); + case CLIENT_SIDE: + case UNKNOWN: + default: + throw new IllegalArgumentException("Unsupported type: " + type); + } + } + + @Test + public void testExecuteQuery() { + for (StatementType type : + new StatementType[] {StatementType.QUERY, StatementType.UPDATE, StatementType.DDL}) { + testExecuteQuery(type); + } + } + + private void testExecuteQuery(final StatementType type) { + try (Connection connection = getConnection()) { + if (type == StatementType.QUERY && isExecuteAllowed(StatementType.QUERY)) { + log("@EXPECT RESULT_SET 'TEST',1"); + log(getTestStatement(type).getSql() + ";"); + ResultSet rs = connection.executeQuery(getTestStatement(type)); + assertThat(rs, is(notNullValue())); + assertThat(rs.getStats(), is(nullValue())); + } else if (type == StatementType.QUERY) { + // it is a query, but queries are not allowed for this connection state + expectSpannerException( + type + " should not be allowed", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(getTestStatement(type).getSql() + ";"); + t.executeQuery(getTestStatement(type)); + }, + connection, + ErrorCode.FAILED_PRECONDITION); + } else { + expectSpannerException( + type + " should be an invalid argument", + t -> t.executeQuery(getTestStatement(type)), + connection, + ErrorCode.INVALID_ARGUMENT); + } + } + } + + @Test + public void testAnalyzeQuery() { + for (StatementType type : + new StatementType[] {StatementType.QUERY, StatementType.UPDATE, StatementType.DDL}) { + testAnalyzeQuery(type); + } + } + + private void testAnalyzeQuery(final StatementType type) { + // TODO: add log statements when ANALYZE ... sql statements are supported + try (Connection connection = getConnection()) { + for (QueryAnalyzeMode mode : QueryAnalyzeMode.values()) { + final QueryAnalyzeMode currentMode = mode; + if (type == StatementType.QUERY && isExecuteAllowed(StatementType.QUERY)) { + ResultSet rs = connection.analyzeQuery(getTestStatement(type), currentMode); + assertThat(rs, is(notNullValue())); + while (rs.next()) {} + assertThat(rs.getStats(), is(notNullValue())); + } else if (type == StatementType.QUERY) { + // it is a query, but queries are not allowed for this connection state + expectSpannerException( + type + " should not be allowed", + t -> t.analyzeQuery(getTestStatement(type), currentMode), + connection, + ErrorCode.FAILED_PRECONDITION); + } else { + expectSpannerException( + type + " should be an invalid argument", + t -> t.analyzeQuery(getTestStatement(type), currentMode), + connection, + ErrorCode.INVALID_ARGUMENT); + } + } + } + } + + @Test + public void testExecuteUpdate() { + for (StatementType type : + new StatementType[] {StatementType.QUERY, StatementType.UPDATE, StatementType.DDL}) { + testExecuteUpdate(type); + } + } + + private void testExecuteUpdate(final StatementType type) { + try (Connection connection = getConnection()) { + if (type == StatementType.UPDATE && isExecuteAllowed(StatementType.UPDATE)) { + log("@EXPECT UPDATE_COUNT 1"); + log(getTestStatement(type).getSql() + ";"); + assertThat(connection.executeUpdate(getTestStatement(type)), is(notNullValue())); + } else if (type == StatementType.UPDATE) { + // it is an update statement, but updates are not allowed for this connection state + expectSpannerException( + type + "should not be allowed", + t -> { + log("@EXPECT EXCEPTION FAILED_PRECONDITION"); + log(getTestStatement(type).getSql() + ";"); + t.executeUpdate(getTestStatement(type)); + }, + connection, + ErrorCode.FAILED_PRECONDITION); + } else { + expectSpannerException( + type + " should be an invalid argument", + t -> t.executeUpdate(getTestStatement(type)), + connection, + ErrorCode.INVALID_ARGUMENT); + } + } + } + + abstract boolean isWriteAllowed(); + + @Test + public void testWrite() { + try (Connection connection = getConnection()) { + if (!isWriteAllowed() || !connection.isAutocommit()) { + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + connection.write(createTestMutation()); + } + } + + @Test + public void testWriteIterable() { + try (Connection connection = getConnection()) { + if (!isWriteAllowed() || !connection.isAutocommit()) { + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + connection.write(Collections.singletonList(createTestMutation())); + } + } + + @Test + public void testBufferedWrite() { + try (Connection connection = getConnection()) { + if (!isWriteAllowed() || connection.isAutocommit()) { + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + connection.bufferedWrite(createTestMutation()); + } + } + + @Test + public void testBufferedWriteIterable() { + try (Connection connection = getConnection()) { + if (!isWriteAllowed() || connection.isAutocommit()) { + exception.expect(matchCode(ErrorCode.FAILED_PRECONDITION)); + } + connection.bufferedWrite(Collections.singletonList(createTestMutation())); + } + } + + private Mutation createTestMutation() { + return Mutation.newInsertBuilder("foo").set("id").to(1L).set("name").to("bar").build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java new file mode 100644 index 000000000000..67b4c8e05595 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java @@ -0,0 +1,352 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ForceCloseSpannerFunction; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.AbortInterceptor; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.SpannerPool.CheckAndCloseSpannersMode; +import com.google.common.util.concurrent.AbstractFuture; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc.OperationsImplBase; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.internal.LogExceptionRunnable; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public abstract class AbstractMockServerTest { + public static final long COUNT_BEFORE_INSERT = 0L; + public static final long COUNT_AFTER_INSERT = 1L; + public static final Statement SELECT_COUNT_STATEMENT = + Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"); + + protected static final Statement SELECT1_STATEMENT = Statement.of("SELECT 1"); + + private static final ResultSetMetadata SINGLE_COL_INT64_RESULTSET_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("C") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build(); + public static final com.google.spanner.v1.ResultSet SELECT_COUNT_RESULTSET_BEFORE_INSERT = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(COUNT_BEFORE_INSERT)) + .build()) + .build()) + .setMetadata(SINGLE_COL_INT64_RESULTSET_METADATA) + .build(); + public static final com.google.spanner.v1.ResultSet SELECT_COUNT_RESULTSET_AFTER_INSERT = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder().setStringValue(String.valueOf(COUNT_AFTER_INSERT)).build()) + .build()) + .setMetadata(SINGLE_COL_INT64_RESULTSET_METADATA) + .build(); + public static final com.google.spanner.v1.ResultSet UPDATE_RETURNING_RESULTSET = + ResultSet.newBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1)) + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("col") + .setType(Type.newBuilder().setCodeValue(TypeCode.INT64_VALUE)) + .build()))) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + + protected static final ResultSet SELECT1_RESULTSET = + ResultSet.newBuilder() + .setMetadata(SINGLE_COL_INT64_RESULTSET_METADATA) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + + public static final Statement INSERT_STATEMENT = + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')"); + public static final Statement INSERT_RETURNING_STATEMENT = + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted') THEN RETURN *"); + public static final Statement PG_INSERT_RETURNING_STATEMENT = + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted') RETURNING *"); + public static final long UPDATE_COUNT = 1L; + + public static final int RANDOM_RESULT_SET_ROW_COUNT = 100; + public static final Statement SELECT_RANDOM_STATEMENT = Statement.of("SELECT * FROM RANDOM"); + public static final com.google.spanner.v1.ResultSet RANDOM_RESULT_SET = + new RandomResultSetGenerator(RANDOM_RESULT_SET_ROW_COUNT).generate(); + + public static MockSpannerServiceImpl mockSpanner; + public static MockInstanceAdminImpl mockInstanceAdmin; + public static MockDatabaseAdminImpl mockDatabaseAdmin; + public static OperationsImplBase mockOperations; + private static Server server; + private static InetSocketAddress address; + + private static boolean futureParentHandlers; + private static boolean exceptionRunnableParentHandlers; + private static boolean nettyServerParentHandlers; + private static boolean clientStreamParentHandlers; + + @BeforeClass + public static void startStaticServer() throws Exception { + startStaticServer(createServerInterceptor()); + } + + public static void startStaticServer(ServerInterceptor interceptor) throws IOException { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockInstanceAdmin = new MockInstanceAdminImpl(); + mockDatabaseAdmin = new MockDatabaseAdminImpl(); + mockOperations = + new OperationsImplBase() { + @Override + public void getOperation( + GetOperationRequest request, StreamObserver responseObserver) { + responseObserver.onNext( + Operation.newBuilder() + .setDone(false) + .setName(request.getName()) + .setMetadata(Any.pack(Empty.getDefaultInstance())) + .build()); + responseObserver.onCompleted(); + } + }; + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .addService(mockOperations) + .intercept(interceptor) + .build() + .start(); + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.updateReturning(INSERT_RETURNING_STATEMENT, UPDATE_RETURNING_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.updateReturning(PG_INSERT_RETURNING_STATEMENT, UPDATE_RETURNING_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.query(SELECT_RANDOM_STATEMENT, RANDOM_RESULT_SET)); + mockSpanner.putStatementResult(StatementResult.query(SELECT1_STATEMENT, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + + futureParentHandlers = Logger.getLogger(AbstractFuture.class.getName()).getUseParentHandlers(); + exceptionRunnableParentHandlers = + Logger.getLogger(LogExceptionRunnable.class.getName()).getUseParentHandlers(); + nettyServerParentHandlers = + Logger.getLogger("io.grpc.netty.shaded.io.grpc.netty.NettyServerHandler") + .getUseParentHandlers(); + clientStreamParentHandlers = + Logger.getLogger("io.grpc.netty.shaded.io.grpc.netty.NettyServerHandler") + .getUseParentHandlers(); + Logger.getLogger(AbstractFuture.class.getName()).setUseParentHandlers(false); + Logger.getLogger(LogExceptionRunnable.class.getName()).setUseParentHandlers(false); + Logger.getLogger("io.grpc.netty.shaded.io.grpc.netty.NettyServerHandler") + .setUseParentHandlers(false); + Logger.getLogger("io.grpc.internal.AbstractClientStream").setUseParentHandlers(false); + } + + static ServerInterceptor createServerInterceptor() { + return new ServerInterceptor() { + @Override + public Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + return next.startCall(call, headers); + } + }; + } + + @AfterClass + public static void stopServer() { + try { + SpannerPool.INSTANCE.checkAndCloseSpanners( + CheckAndCloseSpannersMode.ERROR, + new ForceCloseSpannerFunction(500L, TimeUnit.MILLISECONDS)); + } finally { + Logger.getLogger(AbstractFuture.class.getName()).setUseParentHandlers(futureParentHandlers); + Logger.getLogger(LogExceptionRunnable.class.getName()) + .setUseParentHandlers(exceptionRunnableParentHandlers); + Logger.getLogger("io.grpc.netty.shaded.io.grpc.netty.NettyServerHandler") + .setUseParentHandlers(nettyServerParentHandlers); + Logger.getLogger("io.grpc.internal.AbstractClientStream") + .setUseParentHandlers(clientStreamParentHandlers); + } + server.shutdown(); + } + + @Before + public void setupResults() { + mockSpanner.clearRequests(); + mockSpanner.removeAllExecutionTimes(); + mockDatabaseAdmin.getRequests().clear(); + mockInstanceAdmin.getRequests().clear(); + } + + protected java.sql.Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection("jdbc:" + getBaseUrl()); + } + + protected ITConnection createConnection() { + return createConnection(Collections.emptyList(), Collections.emptyList()); + } + + ITConnection createConnection(String additionalUrlOptions) { + return createConnection(Collections.emptyList(), Collections.emptyList(), additionalUrlOptions); + } + + ITConnection createConnection( + AbortInterceptor interceptor, TransactionRetryListener transactionRetryListener) { + return createConnection( + Collections.singletonList(interceptor), + Collections.singletonList(transactionRetryListener)); + } + + ITConnection createConnection( + List interceptors, + List transactionRetryListeners) { + return createConnection(interceptors, transactionRetryListeners, ""); + } + + ITConnection createConnection( + List interceptors, + List transactionRetryListeners, + String additionalUrlOptions) { + ConnectionOptions.Builder builder = + ConnectionOptions.newBuilder() + .setUri(getBaseUrl() + additionalUrlOptions) + .setStatementExecutionInterceptors(interceptors); + configureConnectionOptions(builder); + ConnectionOptions options = builder.build(); + ITConnection connection = createITConnection(options); + for (TransactionRetryListener listener : transactionRetryListeners) { + connection.addTransactionRetryListener(listener); + } + return connection; + } + + protected ConnectionOptions.Builder configureConnectionOptions( + ConnectionOptions.Builder builder) { + return builder; + } + + protected String getBaseUrl() { + return String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true;autocommit=false;retryAbortsInternally=true", + server.getPort()); + } + + protected static int getPort() { + return server.getPort(); + } + + protected ExecuteSqlRequest getLastExecuteSqlRequest() { + List requests = mockSpanner.getRequests(); + for (int i = requests.size() - 1; i >= 0; i--) { + if (requests.get(i) instanceof ExecuteSqlRequest) { + return (ExecuteSqlRequest) requests.get(i); + } + } + throw new IllegalStateException("No ExecuteSqlRequest found in requests"); + } + + ITConnection createITConnection(ConnectionOptions options) { + return new ITConnectionImpl(options); + } + + boolean isMultiplexedSessionsEnabled(Spanner spanner) { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + } + + boolean isMultiplexedSessionsEnabledForPartitionedOps(Spanner spanner) { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionPartitionedOps(); + } + + boolean isMultiplexedSessionsEnabledForRW(Spanner spanner) { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptTest.java new file mode 100644 index 000000000000..f508f7c44c40 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnection; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnectionProvider; +import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; +import com.google.common.base.Preconditions; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public abstract class AbstractSqlScriptTest { + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + static class TestConnectionProvider implements GenericConnectionProvider { + final Dialect dialect; + + TestConnectionProvider(Dialect dialect) { + this.dialect = Preconditions.checkNotNull(dialect); + } + + @Override + public GenericConnection getConnection() { + return SpannerGenericConnection.of( + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build(), + dialect)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptVerifier.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptVerifier.java new file mode 100644 index 000000000000..29cc8b73206a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractSqlScriptVerifier.java @@ -0,0 +1,536 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Scanner; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Stream; + +/** + * Base class for SQL Script verifiers for both the generic Connection API and JDBC connections + * + *

    Simple parser/verifier for sql statements. This verifier is able to parse additional @EXPECT + * statements that defines the expected behavior of a sql statement. Possible uses are: + * + *

      + *
    • @EXPECT NO_RESULT: The following statement should not return a result (no {@link ResultSet} + * and no update count) + *
    • @EXPECT UPDATE_COUNT count: The following statement should return the specified + * update count + *
    • @EXPECT RESULT_SET: The following statement should return a {@link ResultSet} with two + * columns with the names ACTUAL and EXPECTED and containing at least one row. For each row, + * the values of ACTUAL and EXPECTED must be equal + *
    • @EXPECT RESULT_SET 'columnName': The following statement should return a {@link ResultSet} + * with a column with the specified name and containing at least one row (additional columns + * in the {@link ResultSet} are allowed). For each row, the value of the column must be not + * null + *
    • @EXPECT RESULT_SET 'columnName',value: The following statement should return a {@link + * ResultSet} with a column with the specified name and containing at least one row + * (additional columns in the {@link ResultSet} are allowed). For each row, the value of the + * column must be equal to the specified value + *
    • @EXPECT EXCEPTION code ['messagePrefix']: The following statement should throw a {@link + * SpannerException} with the specified code and starting with the (optional) message prefix + *
    • @EXPECT EQUAL 'variable1','variable2': The values of the two given variables should be + * equal. The value of a variable can be set using a @PUT statement. + *
    + * + * The parser can set a temporary variable value using a @PUT statement: {@code @PUT + * 'variable_name'\nSQL statement} The SQL statement must be a statement that returns a {@link + * ResultSet} containing exactly one row and one column. + * + *

    In addition the verifier can create new connections if the script contains NEW_CONNECTION; + * statements and the verifier has been created with a {@link GenericConnectionProvider}. See {@link + * ConnectionImplGeneratedSqlScriptTest} for an example for this. + */ +public abstract class AbstractSqlScriptVerifier { + private static final Pattern VERIFY_PATTERN = + Pattern.compile( + "(?is)\\s*(?:@EXPECT)\\s+(?NO_RESULT|RESULT_SET\\s*(?'.*?'(?,.*?)?)?|UPDATE_COUNT\\s*(?-?\\d{1,19})|EXCEPTION\\s*(?(?CANCELLED|UNKNOWN|INVALID_ARGUMENT|DEADLINE_EXCEEDED|NOT_FOUND|ALREADY_EXISTS|PERMISSION_DENIED|UNAUTHENTICATED|RESOURCE_EXHAUSTED|FAILED_PRECONDITION|ABORTED|OUT_OF_RANGE|UNIMPLEMENTED|INTERNAL|UNAVAILABLE|DATA_LOSS)(?:\\s*)(?'.*?')?)|EQUAL\\s+(?'.+?')\\s*,\\s*(?'.+?'))(\\n" + + "(?.*))?"); + + private static final String PUT_CONDITION = + "@PUT can only be used in combination with a statement that returns a" + + " result set containing exactly one row and one column"; + private static final Pattern PUT_PATTERN = + Pattern.compile("(?is)\\s*(?:@PUT)\\s+(?'.*?')" + "\\n(?.*)"); + + protected enum ExpectedResultType { + RESULT_SET, + UPDATE_COUNT, + NO_RESULT, + EXCEPTION, + EQUAL; + + StatementResult.ResultType getStatementResultType() { + switch (this) { + case NO_RESULT: + return StatementResult.ResultType.NO_RESULT; + case RESULT_SET: + return StatementResult.ResultType.RESULT_SET; + case UPDATE_COUNT: + return StatementResult.ResultType.UPDATE_COUNT; + case EXCEPTION: + case EQUAL: + default: + throw new IllegalArgumentException("not supported"); + } + } + } + + /** Result of an executed statement */ + protected abstract static class GenericStatementResult { + protected abstract StatementResult.ResultType getResultType(); + + protected abstract GenericResultSet getResultSet(); + + protected abstract long getUpdateCount(); + } + + /** + * Generic wrapper around a connection to a database. The underlying connection could be a Spanner + * {@link com.google.cloud.spanner.connection.Connection} or a JDBC {@link java.sql.Connection} + */ + public abstract static class GenericConnection implements AutoCloseable { + protected abstract GenericStatementResult execute(String sql) throws Exception; + + @Override + public abstract void close() throws Exception; + + public abstract Dialect getDialect(); + } + + /** + * Generic wrapper around a result set. The underlying result set could be a Spanner {@link + * ResultSet} or a JDBC {@link java.sql.ResultSet} + */ + protected abstract static class GenericResultSet { + protected abstract boolean next() throws Exception; + + protected abstract Object getValue(String col) throws Exception; + + protected abstract int getColumnCount() throws Exception; + + protected abstract Object getFirstValue() throws Exception; + } + + public interface GenericConnectionProvider { + GenericConnection getConnection(); + } + + /** Reads SQL statements from a file. Any copyright header in the file will be stripped away. */ + public static List readStatementsFromFile(String filename, Class resourceClass) { + try (InputStream is = resourceClass.getResourceAsStream(filename)) { + StringBuilder builder = new StringBuilder(); + try (Scanner scanner = new Scanner(is)) { + while (scanner.hasNextLine()) { + String line = scanner.nextLine(); + builder.append(line).append("\n"); + } + } + String script = builder.toString().replaceAll(StatementParserTest.COPYRIGHT_PATTERN, ""); + String[] array = script.split(";"); + List res = new ArrayList<>(array.length); + for (String statement : array) { + if (statement != null && statement.trim().length() > 0) { + res.add(statement); + } + } + return res; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private final GenericConnectionProvider connectionProvider; + + private final boolean logStatements; + + /** + * Constructor for a verifier that will take a {@link GenericConnection} as a parameter to the + * {@link AbstractSqlScriptVerifier#verifyStatementsInFile(GenericConnection, String, Class, + * boolean)} + */ + public AbstractSqlScriptVerifier() { + this(null); + } + + /** Constructor for a verifier that will use a connection provider for connections */ + public AbstractSqlScriptVerifier(GenericConnectionProvider provider) { + this.connectionProvider = provider; + this.logStatements = Boolean.parseBoolean(System.getProperty("log_sql_statements", "false")); + } + + /** + * Reads sql statements from the specified file name and executes and verifies these. Statements + * that are preceded by an @EXPECT statement are verified against the @EXPECT specification. + * Statements without an @EXPECT statement will be executed and its result will be ignored, unless + * the statement throws an exception, which will fail the test case. + * + *

    The {@link com.google.cloud.spanner.connection.Connection}s that the statements are executed + * on must be created by a {@link GenericConnectionProvider} + * + * @param filename The file name containing the statements. Statements must be separated by a + * semicolon (;) + * @param resourceClass The class that should be used to locate the resource specified by the file + * name + * @param allowParallel indicates whether the batches in the given script may be executed in + * parallel + */ + public void verifyStatementsInFile(String filename, Class resourceClass, boolean allowParallel) + throws Exception { + verifyStatementsInFile(null, filename, resourceClass, allowParallel); + } + + /** + * Reads sql statements from the specified file name and executes and verifies these. Statements + * that are preceded by an @EXPECT statement are verified against the @EXPECT specification. + * Statements without an @EXPECT statement will be executed and its result will be ignored, unless + * the statement throws an exception, which will fail the test case. + * + *

    The {@link com.google.cloud.spanner.connection.Connection}s that the statements are executed + * on must be created by a {@link GenericConnectionProvider} + * + * @param filename The file name containing the statements. Statements must be separated by a + * semicolon (;) + * @param resourceClass The class that should be used to locate the resource specified by the file + * name + * @deprecated use {@link AbstractSqlScriptVerifier#verifyStatementsInFile(String, Class, + * boolean)} instead. This method does not allow parallel batch execution. + */ + @Deprecated + public void verifyStatementsInFile(String filename, Class resourceClass) throws Exception { + this.verifyStatementsInFile(filename, resourceClass, false); + } + + /** + * Reads sql statements from the specified file name and executes and verifies these. Statements + * that are preceded by an @EXPECT statement are verified against the @EXPECT specification. + * Statements without an @EXPECT statement will be executed and its result will be ignored, unless + * the statement throws an exception, which will fail the test case. + * + * @param providedConnection The {@link com.google.cloud.spanner.connection.Connection} to execute + * the statements against + * @param filename The file name containing the statements. Statements must be separated by a + * semicolon (;) + * @param resourceClass The class that defines the package where to find the input file + * @deprecated use {@link AbstractSqlScriptVerifier#verifyStatementsInFile(GenericConnection, + * String, Class)})} instead. This method does not allow parallel batch execution. + */ + @Deprecated + public void verifyStatementsInFile( + GenericConnection providedConnection, String filename, Class resourceClass) + throws Exception { + this.verifyStatementsInFile(providedConnection, filename, resourceClass, false); + } + + /** + * Reads sql statements from the specified file name and executes and verifies these. Statements + * that are preceded by an @EXPECT statement are verified against the @EXPECT specification. + * Statements without an @EXPECT statement will be executed and its result will be ignored, unless + * the statement throws an exception, which will fail the test case. + * + * @param providedConnection The {@link com.google.cloud.spanner.connection.Connection} to execute + * the statements against + * @param filename The file name containing the statements. Statements must be separated by a + * semicolon (;) + * @param resourceClass The class that defines the package where to find the input file + * @param allowParallel indicates whether the batches in the given script may be executed in + * parallel + */ + public void verifyStatementsInFile( + GenericConnection providedConnection, + String filename, + Class resourceClass, + boolean allowParallel) + throws Exception { + List statements = readStatementsFromFile(filename, resourceClass); + List> batches = toBatches(statements); + + Stream> stream; + if (!allowParallel || logStatements) { + stream = batches.stream(); + } else { + stream = batches.parallelStream(); + } + stream.forEach( + batch -> { + try { + Map variables = new HashMap<>(); + GenericConnection connection; + if (providedConnection == null) { + connection = connectionProvider.getConnection(); + } else { + connection = providedConnection; + } + for (String sql : batch) { + if (logStatements) { + System.out.println( + "\n------------------------------------------------------\n" + + new Date() + + " ---- verifying statement:"); + System.out.println(sql); + } + verifyStatement(variables, connection, sql, connection.getDialect()); + } + connection.close(); + } catch (Exception e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + }); + } + + private List> toBatches(List statements) { + List> batches = new ArrayList<>(); + List currentBatch = new ArrayList<>(); + for (String statement : statements) { + String sql = statement.trim(); + if (sql.equalsIgnoreCase("NEW_CONNECTION")) { + if (!currentBatch.isEmpty()) { + batches.add(currentBatch); + } + currentBatch = new ArrayList<>(); + } else { + currentBatch.add(sql); + } + } + if (!currentBatch.isEmpty()) { + batches.add(currentBatch); + } + return batches; + } + + private void verifyStatement( + Map variables, + GenericConnection connection, + String statement, + Dialect dialect) + throws Exception { + statement = replaceVariables(variables, statement); + String statementWithoutComments = + AbstractStatementParser.getInstance(dialect).removeCommentsAndTrim(statement); + Matcher verifyMatcher = VERIFY_PATTERN.matcher(statementWithoutComments); + Matcher putMatcher = PUT_PATTERN.matcher(statementWithoutComments); + if (verifyMatcher.matches()) { + String sql = verifyMatcher.group("statement"); + String typeName = verifyMatcher.group("type"); + int endIndex = getFirstSpaceChar(typeName); + ExpectedResultType type = ExpectedResultType.valueOf(typeName.substring(0, endIndex)); + if (type == ExpectedResultType.EXCEPTION) { + String code = verifyMatcher.group("code"); + String messagePrefix = verifyMatcher.group("messagePrefix"); + try { + connection.execute(sql); + fail("expected exception: " + sql); + } catch (Exception e) { + verifyExpectedException(statementWithoutComments, e, code, messagePrefix); + } + } else if (type == ExpectedResultType.EQUAL) { + String variable1 = verifyMatcher.group("variable1"); + String variable2 = verifyMatcher.group("variable2"); + // get rid of the single quotes + variable1 = variable1.substring(1, variable1.length() - 1); + variable2 = variable2.substring(1, variable2.length() - 1); + assertThat( + "No variable with name " + variable1, variables.containsKey(variable1), is(true)); + assertThat( + "No variable with name " + variable2, variables.containsKey(variable2), is(true)); + Object value1 = variables.get(variable1); + Object value2 = variables.get(variable2); + if ((value1 instanceof Timestamp) && (value2 instanceof Timestamp)) { + // read timestamps are rounded + Timestamp ts1 = (Timestamp) value1; + Timestamp ts2 = (Timestamp) value2; + value1 = + Timestamp.ofTimeSecondsAndNanos(ts1.getSeconds(), (ts1.getNanos() / 1000) * 1000); + value2 = + Timestamp.ofTimeSecondsAndNanos(ts2.getSeconds(), (ts2.getNanos() / 1000) * 1000); + } + assertThat(value1, is(equalTo(value2))); + } else { + GenericStatementResult result = connection.execute(sql); + assertThat(statement, result.getResultType(), is(equalTo(type.getStatementResultType()))); + switch (type.getStatementResultType()) { + case NO_RESULT: + break; + case RESULT_SET: + String column = verifyMatcher.group("column"); + if (column == null) { + verifyActualVsExpectedResultSet(statement, result.getResultSet()); + } else { + String value = verifyMatcher.group("value"); + if (value != null) { + String[] parts = column.split(",", 2); + column = parts[0].trim(); + value = parts[1].trim(); + column = column.substring(1, column.length() - 1); + verifyResultSetValue(statement, result.getResultSet(), column, parseValue(value)); + } else { + // get rid of the quotation marks + column = column.substring(1, column.length() - 1); + verifyResultSetColumnNotNull(statement, result.getResultSet(), column); + } + } + break; + case UPDATE_COUNT: + long expectedUpdateCount = Long.parseLong(verifyMatcher.group("count").trim()); + assertThat(statement, result.getUpdateCount(), is(equalTo(expectedUpdateCount))); + break; + } + } + } else if (putMatcher.matches()) { + String sql = putMatcher.group("statement"); + String variable = putMatcher.group("variable"); + // get rid of the single quotes + variable = variable.substring(1, variable.length() - 1); + GenericStatementResult result = connection.execute(sql); + assertThat( + PUT_CONDITION, + result.getResultType(), + is(equalTo(com.google.cloud.spanner.connection.StatementResult.ResultType.RESULT_SET))); + GenericResultSet rs = result.getResultSet(); + assertThat(PUT_CONDITION, rs.next(), is(true)); + assertThat(PUT_CONDITION, rs.getColumnCount(), is(equalTo(1))); + variables.put(variable, rs.getFirstValue()); + assertThat(PUT_CONDITION, rs.next(), is(false)); + } else { + // just execute the statement + connection.execute(statement); + } + } + + private String replaceVariables(Map variables, String sql) { + for (String key : variables.keySet()) { + sql = sql.replaceAll("%%" + key + "%%", variables.get(key).toString()); + } + return sql; + } + + protected abstract void verifyExpectedException( + String statement, Exception e, String code, String messagePrefix); + + private static final Pattern INT64_PATTERN = Pattern.compile("\\d{1,19}"); + private static final Pattern ARRAY_INT64_PATTERN = + Pattern.compile("\\[\\s*\\d{1,19}(\\s*,\\s*\\d{1,19})*\\s*\\]"); + private static final Pattern FLOAT64_PATTERN = Pattern.compile("\\d{1,19}.\\d{1,19}"); + private static final String TS_PREFIX = "ts'"; + private static final String TS_SUFFIX = "'"; + private static final Pattern BOOLEAN_PATTERN = Pattern.compile("(?is)true|false"); + + private Object parseValue(String valueString) { + if (valueString == null || "".equals(valueString) || "null".equalsIgnoreCase(valueString)) { + return null; + } + if (valueString.startsWith("'") && valueString.endsWith("'")) { + return valueString.substring(1, valueString.length() - 1); + } + if (INT64_PATTERN.matcher(valueString).matches()) { + return Long.valueOf(valueString); + } + if (ARRAY_INT64_PATTERN.matcher(valueString).matches()) { + String[] stringArray = valueString.substring(1, valueString.length() - 1).split(","); + List res = new ArrayList<>(); + for (String string : stringArray) { + res.add(Long.valueOf(string)); + } + return res; + } + if (FLOAT64_PATTERN.matcher(valueString).matches()) { + return Double.valueOf(valueString); + } + if (valueString.startsWith(TS_PREFIX) && valueString.endsWith(TS_SUFFIX)) { + try { + return ReadOnlyStalenessUtil.parseRfc3339( + valueString.substring(TS_PREFIX.length(), valueString.length() - TS_SUFFIX.length())); + } catch (IllegalArgumentException e) { + // ignore, apparently not a valid a timestamp after all. + } + } + if (BOOLEAN_PATTERN.matcher(valueString).matches()) { + return Boolean.valueOf(valueString); + } + return valueString; + } + + private int getFirstSpaceChar(String input) { + for (int index = 0; index < input.length(); index++) { + if (Character.isWhitespace(input.charAt(index))) { + return index; + } + } + return input.length(); + } + + private void verifyResultSetColumnNotNull(String statement, GenericResultSet rs, String column) + throws Exception { + int count = 0; + while (rs.next()) { + assertThat(statement, getValue(rs, column), is(notNullValue())); + count++; + } + assertThat(count, is(not(equalTo(0)))); + } + + private void verifyResultSetValue( + String statement, GenericResultSet rs, String column, Object value) throws Exception { + int count = 0; + while (rs.next()) { + if (value == null) { + assertThat(statement, getValue(rs, column), is(nullValue())); + } else { + assertEquals(statement, getValue(rs, column), value); + } + count++; + } + assertThat(count, is(not(equalTo(0)))); + } + + private void verifyActualVsExpectedResultSet(String statement, GenericResultSet rs) + throws Exception { + int count = 0; + while (rs.next()) { + assertThat(statement, getValue(rs, "ACTUAL"), is(equalTo(getValue(rs, "EXPECTED")))); + count++; + } + assertThat(count, is(not(equalTo(0)))); + } + + private Object getValue(GenericResultSet rs, String col) throws Exception { + return rs.getValue(col); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AllTypesMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AllTypesMockServerTest.java new file mode 100644 index 000000000000..1f75885aaa44 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AllTypesMockServerTest.java @@ -0,0 +1,789 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.*; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Tests that all types can be read from Spanner and sent to Spanner. */ +@RunWith(Parameterized.class) +public class AllTypesMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + public static final Statement SELECT_STATEMENT = Statement.of("select * from all_types"); + + public static final boolean BOOL_VALUE = true; + public static final long INT64_VALUE = 1L; + public static final float FLOAT32_VALUE = 3.14f; + public static final double FLOAT64_VALUE = 3.14d; + public static final BigDecimal NUMERIC_VALUE = new BigDecimal("3.14"); + public static final String PG_NUMERIC_VALUE = "3.14"; + public static final String STRING_VALUE = "test-string"; + public static final String JSON_VALUE = "{\"key1\":\"value1\", \"key2\":\"value2\"}"; + public static final long PG_OID_VALUE = 1L; + public static final byte[] BYTES_VALUE = "test-bytes".getBytes(StandardCharsets.UTF_8); + public static final Date DATE_VALUE = Date.fromYearMonthDay(2024, 3, 2); + public static final UUID UUID_VALUE = UUID.randomUUID(); + public static final Timestamp TIMESTAMP_VALUE = + Timestamp.parseTimestamp("2024-03-02T07:07:00.20982735Z"); + + public static final List BOOL_ARRAY_VALUE = Arrays.asList(true, null, false); + public static final List INT64_ARRAY_VALUE = + Arrays.asList(100L, null, 200L, Long.MIN_VALUE, Long.MAX_VALUE); + public static final List FLOAT32_ARRAY_VALUE = + Arrays.asList( + -3.14f, null, 6.626f, Float.MIN_VALUE, Float.MAX_VALUE, Float.MIN_NORMAL, Float.NaN); + public static final List FLOAT64_ARRAY_VALUE = + Arrays.asList( + -3.14d, null, 6.626d, Double.MIN_VALUE, Double.MAX_VALUE, Double.MIN_NORMAL, Double.NaN); + public static final List NUMERIC_ARRAY_VALUE = + Arrays.asList( + new BigDecimal("-3.14"), + null, + new BigDecimal("99.99"), + BigDecimal.ZERO, + new BigDecimal("1e-9"), + new BigDecimal("-9.9999999999999999999999999999999999999E+28"), + new BigDecimal("9.9999999999999999999999999999999999999E+28")); + public static final List PG_NUMERIC_ARRAY_VALUE = + Arrays.asList( + "-3.14", + null, + "99.99", + "NaN", + "1e-9", + "-9.9999999999999999999999999999999999999E+28", + "9.9999999999999999999999999999999999999E+28"); + public static final List STRING_ARRAY_VALUE = + Arrays.asList("test-string1", null, "test-string2"); + public static final List JSON_ARRAY_VALUE = + Arrays.asList( + "{\"key1\":\"value1.1\", \"key2\":\"value1.2\"}", + null, + "{\"key1\":\"value3.1\", \"key2\":\"value3.2\"}"); + public static final List PG_OID_ARRAY_VALUE = + Arrays.asList(100L, null, 200L, Long.MIN_VALUE, Long.MAX_VALUE); + public static final List BYTES_ARRAY_VALUE = + Arrays.asList(ByteArray.copyFrom("test-bytes1"), null, ByteArray.copyFrom("test-bytes2")); + public static final List DATE_ARRAY_VALUE = + Arrays.asList( + Date.fromYearMonthDay(2024, 3, 1), + null, + Date.fromYearMonthDay(2024, 3, 3), + Date.fromYearMonthDay(1, 1, 1), + Date.fromYearMonthDay(9999, 12, 31)); + + public static final List UUID_ARRAY_VALUE = + Arrays.asList(UUID.randomUUID(), null, UUID.randomUUID()); + public static final List TIMESTAMP_ARRAY_VALUE = + Arrays.asList( + Timestamp.parseTimestamp("2024-03-01T07:07:00.20982735Z"), + null, + Timestamp.parseTimestamp("2024-03-03T07:07:00Z"), + Timestamp.MIN_VALUE, + Timestamp.MAX_VALUE); + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + setupAllTypesResultSet(dialect); + mockSpanner.putStatementResult(StatementResult.update(createInsertStatement(dialect), 1L)); + SpannerPool.closeSpannerPool(); + currentDialect = dialect; + } + } + + private void setupAllTypesResultSet(Dialect dialect) { + // Use RandomResultSetGenerator to generate metadata for a ResultSet with all types. + // This guarantees that this test will fail if a new type is added to RandomResultSetGenerator, + // but not added to this test. + // The columns in the result set are: + // COL1: BOOL + // COL2: INT64 + // COL3: FLOAT32 + // COL4: FLOAT64 + // COL5: NUMERIC / PG_NUMERIC + // COL6: STRING + // COL7: JSON / PG_JSONB + // COL8: BYTES + // COL9: DATE + // COL10: UUID + // COL11: TIMESTAMP + // COL12: PG_OID (added only for POSTGRESQL dialect) + // COL13-22: ARRAY<..> for the types above. + // Only for GoogleSQL: + // COL23: PROTO + // COL24: ENUM + // COL25: ARRAY + // COL26: ARRAY + // COL27: ARRAY (added only for POSTGRESQL dialect) + ListValue.Builder row1Builder = + ListValue.newBuilder() + .addValues(Value.newBuilder().setBoolValue(BOOL_VALUE)) + .addValues(Value.newBuilder().setStringValue(String.valueOf(INT64_VALUE)).build()) + .addValues(Value.newBuilder().setNumberValue(FLOAT32_VALUE)) + .addValues(Value.newBuilder().setNumberValue(FLOAT64_VALUE)) + .addValues( + Value.newBuilder() + .setStringValue( + dialect == Dialect.POSTGRESQL + ? PG_NUMERIC_VALUE + : NUMERIC_VALUE.toEngineeringString())) + .addValues(Value.newBuilder().setStringValue(STRING_VALUE)) + .addValues(Value.newBuilder().setStringValue(JSON_VALUE)) + .addValues( + Value.newBuilder().setStringValue(Base64.getEncoder().encodeToString(BYTES_VALUE))) + .addValues(Value.newBuilder().setStringValue(DATE_VALUE.toString())) + .addValues(Value.newBuilder().setStringValue(UUID_VALUE.toString())) + .addValues(Value.newBuilder().setStringValue(TIMESTAMP_VALUE.toString())); + if (dialect == Dialect.POSTGRESQL) { + row1Builder.addValues( + Value.newBuilder().setStringValue(String.valueOf(PG_OID_VALUE)).build()); + } + + row1Builder + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + BOOL_ARRAY_VALUE.stream() + .map( + b -> + b == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder().setBoolValue(b).build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + INT64_ARRAY_VALUE.stream() + .map( + l -> + l == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue(String.valueOf(l)) + .build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + FLOAT32_ARRAY_VALUE.stream() + .map( + f -> { + if (f == null) { + return Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build(); + } else if (Float.isNaN(f)) { + return Value.newBuilder().setStringValue("NaN").build(); + } else { + return Value.newBuilder().setNumberValue(f).build(); + } + }) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + FLOAT64_ARRAY_VALUE.stream() + .map( + d -> { + if (d == null) { + return Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build(); + } else if (Double.isNaN(d)) { + return Value.newBuilder().setStringValue("NaN").build(); + } else { + return Value.newBuilder().setNumberValue(d).build(); + } + }) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + dialect == Dialect.POSTGRESQL + ? PG_NUMERIC_ARRAY_VALUE.stream() + .map( + string -> + string == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder().setStringValue(string).build()) + .collect(Collectors.toList()) + : NUMERIC_ARRAY_VALUE.stream() + .map( + bigDecimal -> + bigDecimal == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue( + bigDecimal.toEngineeringString()) + .build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + STRING_ARRAY_VALUE.stream() + .map( + string -> + string == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder().setStringValue(string).build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + JSON_ARRAY_VALUE.stream() + .map( + json -> + json == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder().setStringValue(json).build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + BYTES_ARRAY_VALUE.stream() + .map( + byteArray -> + byteArray == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString(byteArray.toByteArray())) + .build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + DATE_ARRAY_VALUE.stream() + .map( + date -> + date == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue(date.toString()) + .build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + UUID_ARRAY_VALUE.stream() + .map( + uuid -> + uuid == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue(uuid.toString()) + .build()) + .collect(Collectors.toList())) + .build())) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + TIMESTAMP_ARRAY_VALUE.stream() + .map( + timestamp -> + timestamp == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue(timestamp.toString()) + .build()) + .collect(Collectors.toList())) + .build())); + + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + // Add PROTO values. + row1Builder + .addValues( + Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString( + SingerInfo.newBuilder() + .setSingerId(1L) + .setNationality("unknown") + .setBirthDate("1986-09-30") + .setGenre(Genre.POP) + .build() + .toByteArray())) + .build()) + .addValues(Value.newBuilder().setStringValue(String.valueOf(Genre.JAZZ_VALUE)).build()) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString( + SingerInfo.newBuilder() + .setSingerId(1L) + .setGenre(Genre.FOLK) + .setBirthDate("200-01-01") + .setNationality("no") + .build() + .toByteArray())) + .build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE)) + .addValues( + Value.newBuilder() + .setStringValue( + Base64.getEncoder() + .encodeToString( + SingerInfo.newBuilder() + .setSingerId(2L) + .setGenre(Genre.JAZZ) + .setBirthDate("200-01-02") + .setNationality("dk") + .build() + .toByteArray())) + .build()) + .build()) + .build()) + .addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Genre.ROCK_VALUE)) + .build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE)) + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Genre.ROCK_VALUE)) + .build()) + .build()) + .build()); + } + + if (dialect == Dialect.POSTGRESQL) { + // Add ARRAY values. + row1Builder.addValues( + Value.newBuilder() + .setListValue( + ListValue.newBuilder() + .addAllValues( + PG_OID_ARRAY_VALUE.stream() + .map( + l -> + l == null + ? Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build() + : Value.newBuilder() + .setStringValue(String.valueOf(l)) + .build()) + .collect(Collectors.toList())) + .build())); + } + + com.google.spanner.v1.ResultSet resultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + RandomResultSetGenerator.generateAllTypesMetadata( + RandomResultSetGenerator.generateAllTypes(dialect))) + .addRows(row1Builder.build()) + .build(); + mockSpanner.putStatementResults(StatementResult.query(SELECT_STATEMENT, resultSet)); + } + + public static Statement createInsertStatement(Dialect dialect) { + Statement.Builder builder = Statement.newBuilder("insert into all_types ("); + builder.append( + IntStream.rangeClosed(1, RandomResultSetGenerator.generateAllTypes(dialect).length) + .mapToObj(col -> "COL" + col) + .collect(Collectors.joining(", ", "", ") values ("))); + builder.append( + IntStream.rangeClosed(1, RandomResultSetGenerator.generateAllTypes(dialect).length) + .mapToObj(col -> "@p" + col) + .collect(Collectors.joining(", ", "", ")"))); + int param = 0; + builder + .bind("p" + ++param) + .to(BOOL_VALUE) + .bind("p" + ++param) + .to(INT64_VALUE) + .bind("p" + ++param) + .to(FLOAT32_VALUE) + .bind("p" + ++param) + .to(FLOAT64_VALUE) + .bind("p" + ++param) + .to( + dialect == Dialect.POSTGRESQL + ? com.google.cloud.spanner.Value.pgNumeric(PG_NUMERIC_VALUE) + : com.google.cloud.spanner.Value.numeric(NUMERIC_VALUE)) + .bind("p" + ++param) + .to(STRING_VALUE) + .bind("p" + ++param) + .to( + dialect == Dialect.POSTGRESQL + ? com.google.cloud.spanner.Value.pgJsonb(JSON_VALUE) + : com.google.cloud.spanner.Value.json(JSON_VALUE)) + .bind("p" + ++param) + .to(ByteArray.copyFrom(BYTES_VALUE)) + .bind("p" + ++param) + .to(DATE_VALUE) + .bind("p" + ++param) + .to(UUID_VALUE) + .bind("p" + ++param) + .to(TIMESTAMP_VALUE); + if (dialect == Dialect.POSTGRESQL) { + builder.bind("p" + ++param).to(PG_OID_VALUE); + } + builder + .bind("p" + ++param) + .toBoolArray(BOOL_ARRAY_VALUE) + .bind("p" + ++param) + .toInt64Array(INT64_ARRAY_VALUE) + .bind("p" + ++param) + .toFloat32Array(FLOAT32_ARRAY_VALUE) + .bind("p" + ++param) + .toFloat64Array(FLOAT64_ARRAY_VALUE) + .bind("p" + ++param) + .to( + dialect == Dialect.POSTGRESQL + ? com.google.cloud.spanner.Value.pgNumericArray(PG_NUMERIC_ARRAY_VALUE) + : com.google.cloud.spanner.Value.numericArray(NUMERIC_ARRAY_VALUE)) + .bind("p" + ++param) + .toStringArray(STRING_ARRAY_VALUE) + .bind("p" + ++param) + .to( + dialect == Dialect.POSTGRESQL + ? com.google.cloud.spanner.Value.pgJsonbArray(JSON_ARRAY_VALUE) + : com.google.cloud.spanner.Value.jsonArray(JSON_ARRAY_VALUE)) + .bind("p" + ++param) + .toBytesArray(BYTES_ARRAY_VALUE) + .bind("p" + ++param) + .toDateArray(DATE_ARRAY_VALUE) + .bind("p" + ++param) + .toUuidArray(UUID_ARRAY_VALUE) + .bind("p" + ++param) + .toTimestampArray(TIMESTAMP_ARRAY_VALUE); + if (dialect == Dialect.POSTGRESQL) { + builder.bind("p" + ++param).toInt64Array(PG_OID_ARRAY_VALUE); + } + return builder.build(); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testSelectAllTypes() { + try (Connection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(SELECT_STATEMENT)) { + assertTrue(resultSet.next()); + + int col = -1; + assertEquals(BOOL_VALUE, resultSet.getBoolean(++col)); + assertEquals(INT64_VALUE, resultSet.getLong(++col)); + assertEquals(FLOAT32_VALUE, resultSet.getFloat(++col), 0.0f); + assertEquals(FLOAT64_VALUE, resultSet.getDouble(++col), 0.0d); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_NUMERIC_VALUE, resultSet.getString(++col)); + } else { + assertEquals(NUMERIC_VALUE, resultSet.getBigDecimal(++col)); + } + assertEquals(STRING_VALUE, resultSet.getString(++col)); + assertEquals( + JSON_VALUE, + dialect == Dialect.POSTGRESQL ? resultSet.getPgJsonb(++col) : resultSet.getJson(++col)); + assertArrayEquals(BYTES_VALUE, resultSet.getBytes(++col).toByteArray()); + assertEquals(DATE_VALUE, resultSet.getDate(++col)); + assertEquals(UUID_VALUE, resultSet.getUuid(++col)); + assertEquals(TIMESTAMP_VALUE, resultSet.getTimestamp(++col)); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_OID_VALUE, resultSet.getLong(++col)); + } + + assertEquals(BOOL_ARRAY_VALUE, resultSet.getBooleanList(++col)); + assertEquals(INT64_ARRAY_VALUE, resultSet.getLongList(++col)); + assertEquals(FLOAT32_ARRAY_VALUE, resultSet.getFloatList(++col)); + assertEquals(FLOAT64_ARRAY_VALUE, resultSet.getDoubleList(++col)); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_NUMERIC_ARRAY_VALUE, resultSet.getStringList(++col)); + } else { + assertEquals(NUMERIC_ARRAY_VALUE, resultSet.getBigDecimalList(++col)); + } + assertEquals(STRING_ARRAY_VALUE, resultSet.getStringList(++col)); + assertEquals( + JSON_ARRAY_VALUE, + dialect == Dialect.POSTGRESQL + ? resultSet.getPgJsonbList(++col) + : resultSet.getJsonList(++col)); + assertEquals(BYTES_ARRAY_VALUE, resultSet.getBytesList(++col)); + assertEquals(DATE_ARRAY_VALUE, resultSet.getDateList(++col)); + assertEquals(UUID_ARRAY_VALUE, resultSet.getUuidList(++col)); + assertEquals(TIMESTAMP_ARRAY_VALUE, resultSet.getTimestampList(++col)); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_OID_ARRAY_VALUE, resultSet.getLongList(++col)); + } + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testInsertAllTypes() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(createInsertStatement(dialect))); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + Map paramTypes = request.getParamTypesMap(); + Map params = request.getParams().getFieldsMap(); + assertEquals(dialect == Dialect.POSTGRESQL ? 24 : 22, paramTypes.size()); + assertEquals(dialect == Dialect.POSTGRESQL ? 24 : 22, params.size()); + + // Verify param types. + ImmutableList expectedTypes; + if (dialect == Dialect.POSTGRESQL) { + expectedTypes = + ImmutableList.of( + TypeCode.BOOL, + TypeCode.INT64, + TypeCode.FLOAT32, + TypeCode.FLOAT64, + TypeCode.NUMERIC, + TypeCode.STRING, + TypeCode.JSON, + TypeCode.BYTES, + TypeCode.DATE, + TypeCode.UUID, + TypeCode.TIMESTAMP, + TypeCode.INT64); + } else { + expectedTypes = + ImmutableList.of( + TypeCode.BOOL, + TypeCode.INT64, + TypeCode.FLOAT32, + TypeCode.FLOAT64, + TypeCode.NUMERIC, + TypeCode.STRING, + TypeCode.JSON, + TypeCode.BYTES, + TypeCode.DATE, + TypeCode.UUID, + TypeCode.TIMESTAMP); + } + for (int col = 0; col < expectedTypes.size(); col++) { + assertEquals(expectedTypes.get(col), paramTypes.get("p" + (col + 1)).getCode()); + int arrayCol = col + expectedTypes.size(); + assertEquals(TypeCode.ARRAY, paramTypes.get("p" + (arrayCol + 1)).getCode()); + assertEquals( + expectedTypes.get(col), + paramTypes.get("p" + (arrayCol + 1)).getArrayElementType().getCode()); + } + + // Verify param values. + int col = 0; + assertEquals(BOOL_VALUE, params.get("p" + ++col).getBoolValue()); + assertEquals(String.valueOf(INT64_VALUE), params.get("p" + ++col).getStringValue()); + assertEquals(FLOAT32_VALUE, params.get("p" + ++col).getNumberValue(), 0.0d); + assertEquals(FLOAT64_VALUE, params.get("p" + ++col).getNumberValue(), 0.0d); + assertEquals( + dialect == Dialect.POSTGRESQL ? PG_NUMERIC_VALUE : NUMERIC_VALUE.toEngineeringString(), + params.get("p" + ++col).getStringValue()); + assertEquals(STRING_VALUE, params.get("p" + ++col).getStringValue()); + assertEquals(JSON_VALUE, params.get("p" + ++col).getStringValue()); + assertEquals( + Base64.getEncoder().encodeToString(BYTES_VALUE), + params.get("p" + ++col).getStringValue()); + assertEquals(DATE_VALUE.toString(), params.get("p" + ++col).getStringValue()); + assertEquals(UUID_VALUE.toString(), params.get("p" + ++col).getStringValue()); + assertEquals(TIMESTAMP_VALUE.toString(), params.get("p" + ++col).getStringValue()); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(String.valueOf(PG_OID_VALUE), params.get("p" + ++col).getStringValue()); + } + + assertEquals( + BOOL_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getBoolValue()) + .collect(Collectors.toList())); + assertEquals( + INT64_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : Long.valueOf(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + FLOAT32_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : (float) value.getNumberValue()) + .collect(Collectors.toList())); + assertEquals( + FLOAT64_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getNumberValue()) + .collect(Collectors.toList())); + if (dialect == Dialect.POSTGRESQL) { + assertEquals( + PG_NUMERIC_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getStringValue()) + .collect(Collectors.toList())); + } else { + assertEquals( + NUMERIC_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : new BigDecimal(value.getStringValue())) + .collect(Collectors.toList())); + } + assertEquals( + STRING_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getStringValue()) + .collect(Collectors.toList())); + assertEquals( + JSON_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getStringValue()) + .collect(Collectors.toList())); + assertEquals( + BYTES_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map( + value -> + value.hasNullValue() ? null : ByteArray.fromBase64(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + DATE_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : Date.parseDate(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + UUID_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : UUID.fromString(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + TIMESTAMP_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map( + value -> + value.hasNullValue() + ? null + : Timestamp.parseTimestamp(value.getStringValue())) + .collect(Collectors.toList())); + if (dialect == Dialect.POSTGRESQL) { + assertEquals( + PG_OID_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : Long.valueOf(value.getStringValue())) + .collect(Collectors.toList())); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java new file mode 100644 index 000000000000..6f76963fef7e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java @@ -0,0 +1,418 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.PlanNode; +import com.google.spanner.v1.QueryPlan; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.util.List; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AnalyzeStatementsTest extends AbstractMockServerTest { + private static final Statement PLAN_QUERY = + Statement.of("SELECT * FROM SomeTable WHERE Key LIKE @param ORDER BY Value"); + private static final Statement PLAN_UPDATE = + Statement.of("UPDATE SomeTable SET Value=Value+1 WHERE Key LIKE @param"); + + @BeforeClass + public static void setupAnalyzeResults() { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query( + PLAN_QUERY, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("Key") + .build()) + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("Value") + .build()) + .build()) + .setUndeclaredParameters( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("param") + .build()) + .build()) + .build()) + .setStats( + ResultSetStats.newBuilder() + .setQueryPlan( + QueryPlan.newBuilder() + .addPlanNodes( + PlanNode.newBuilder().setDisplayName("some-plan-node").build()) + .build()) + .build()) + .build())); + mockSpanner.putStatementResults( + MockSpannerServiceImpl.StatementResult.query( + PLAN_UPDATE, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setUndeclaredParameters( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("param") + .build()) + .build()) + .build()) + .setStats( + ResultSetStats.newBuilder() + .setQueryPlan( + QueryPlan.newBuilder() + .addPlanNodes( + PlanNode.newBuilder().setDisplayName("some-plan-node").build()) + .build()) + .build()) + .build())); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testAnalyzeQuery() { + for (boolean readOnly : new boolean[] {true, false}) { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(autocommit); + connection.setReadOnly(readOnly); + + try (ResultSet resultSet = connection.analyzeQuery(PLAN_QUERY, QueryAnalyzeMode.PLAN)) { + // Stats are only available after ResultSet#next() has returned false. + assertFalse(resultSet.next()); + + assertNotNull(resultSet.getStats()); + assertNotNull(resultSet.getStats().getQueryPlan()); + + assertNotNull(resultSet.getMetadata()); + assertEquals(1, resultSet.getMetadata().getUndeclaredParameters().getFieldsCount()); + assertEquals( + Type.newBuilder().setCode(TypeCode.STRING).build(), + resultSet.getMetadata().getUndeclaredParameters().getFields(0).getType()); + assertEquals( + "param", resultSet.getMetadata().getUndeclaredParameters().getFields(0).getName()); + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(PLAN_QUERY.getSql(), request.getSql()); + assertEquals(QueryMode.PLAN, request.getQueryMode()); + if (autocommit) { + // Autocommit should use a single-use read-only transaction. + assertTrue(request.getTransaction().hasSingleUse()); + assertTrue(request.getTransaction().getSingleUse().hasReadOnly()); + } else { + // Non-autocommit should start a transaction. + if (readOnly) { + // Read-only transaction begin is not inlined. + BeginTransactionRequest beginTransactionRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).stream() + .findFirst() + .orElse(null); + assertNotNull(beginTransactionRequest); + assertTrue(beginTransactionRequest.getOptions().hasReadOnly()); + assertTrue(request.getTransaction().hasId()); + } else { + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + } + } + } + } + } + + @Test + public void testAnalyzeUpdate() { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(autocommit); + + ResultSetStats stats = connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN); + assertNotNull(stats); + assertNotNull(stats.getQueryPlan()); + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(PLAN_UPDATE.getSql(), request.getSql()); + assertEquals(QueryMode.PLAN, request.getQueryMode()); + + // As it is a DML statement, we should always start a read/write transaction, even though it + // is not executed. This is required by Cloud Spanner. + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + + if (autocommit) { + // The read/write transaction should automatically be committed in case of autocommit. + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } else { + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + } + + @Test + public void testAnalyzeUpdateStatement() { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(autocommit); + + try (ResultSet resultSet = + connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN)) { + assertFalse(resultSet.next()); + + ResultSetStats stats = resultSet.getStats(); + assertNotNull(stats); + assertNotNull(stats.getQueryPlan()); + + assertNotNull(resultSet.getMetadata()); + assertEquals(1, resultSet.getMetadata().getUndeclaredParameters().getFieldsCount()); + assertEquals( + Type.newBuilder().setCode(TypeCode.STRING).build(), + resultSet.getMetadata().getUndeclaredParameters().getFields(0).getType()); + assertEquals( + "param", resultSet.getMetadata().getUndeclaredParameters().getFields(0).getName()); + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(PLAN_UPDATE.getSql(), request.getSql()); + assertEquals(QueryMode.PLAN, request.getQueryMode()); + + // As it is a DML statement, we should always start a read/write transaction, even though it + // is not executed. This is required by Cloud Spanner. + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + + if (autocommit) { + // The read/write transaction should automatically be committed in case of autocommit. + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } else { + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + } + + @Test + public void testAnalyzeUpdateReadOnly() { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(autocommit); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testAnalyzeUpdateStatementWithQuery() { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(autocommit); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdateStatement(PLAN_QUERY, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testAnalyzeUpdateStatementReadOnly() { + for (boolean autocommit : new boolean[] {true, false}) { + mockSpanner.clearRequests(); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(autocommit); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testAnalyzeUpdateDdlBatch() { + try (Connection connection = createConnection()) { + connection.startBatchDdl(); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAnalyzeUpdateStatementDdlBatch() { + try (Connection connection = createConnection()) { + connection.startBatchDdl(); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAnalyzeUpdateDmlBatch_AutoCommit() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.startBatchDml(); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAnalyzeUpdateDmlBatch_Transactional() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDml(); + + assertNotNull(connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(-1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } + + @Test + public void testAnalyzeUpdateStatementDmlBatch_AutoCommit() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.startBatchDml(); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAnalyzeUpdateStatementDmlBatch_Transactional() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDml(); + + connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN); + assertEquals(-1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AsyncStatementResultImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AsyncStatementResultImplTest.java new file mode 100644 index 000000000000..0a47e3ea2df9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AsyncStatementResultImplTest.java @@ -0,0 +1,99 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; + +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AsyncStatementResultImplTest { + + @Test + public void testNoResultGetResultSetAsync() { + AsyncStatementResult subject = + AsyncStatementResultImpl.noResult(ApiFutures.immediateFuture(null)); + assertThat(subject.getResultType()).isEqualTo(ResultType.NO_RESULT); + try { + subject.getResultSetAsync(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testNoResultGetUpdateCountAsync() { + AsyncStatementResult subject = + AsyncStatementResultImpl.noResult(ApiFutures.immediateFuture(null)); + assertThat(subject.getResultType()).isEqualTo(ResultType.NO_RESULT); + try { + subject.getUpdateCountAsync(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testResultSetGetResultSetAsync() { + AsyncStatementResult subject = AsyncStatementResultImpl.of(mock(AsyncResultSet.class)); + assertThat(subject.getResultType()).isEqualTo(ResultType.RESULT_SET); + assertThat(subject.getResultSetAsync()).isNotNull(); + } + + @Test + public void testResultSetGetUpdateCountAsync() { + AsyncStatementResult subject = AsyncStatementResultImpl.of(mock(AsyncResultSet.class)); + assertThat(subject.getResultType()).isEqualTo(ResultType.RESULT_SET); + try { + subject.getUpdateCountAsync(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testUpdateCountGetResultSetAsync() { + AsyncStatementResult subject = AsyncStatementResultImpl.of(ApiFutures.immediateFuture(1L)); + assertThat(subject.getResultType()).isEqualTo(ResultType.UPDATE_COUNT); + try { + subject.getResultSetAsync(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testUpdateCountGetUpdateCountAsync() { + AsyncStatementResult subject = AsyncStatementResultImpl.of(ApiFutures.immediateFuture(1L)); + assertThat(subject.getResultType()).isEqualTo(ResultType.UPDATE_COUNT); + assertThat(get(subject.getUpdateCountAsync())).isEqualTo(1L); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoCommitMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoCommitMockServerTest.java new file mode 100644 index 000000000000..961d90e14d85 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoCommitMockServerTest.java @@ -0,0 +1,263 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_LOCK_MODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class AutoCommitMockServerTest extends AbstractMockServerTest { + + @Parameter(0) + public IsolationLevel isolationLevel; + + @Parameter(1) + public ReadLockMode readLockMode; + + @Parameters(name = "isolationLevel = {0}, readLockMode = {1}") + public static Collection data() { + List result = new ArrayList<>(); + for (IsolationLevel isolationLevel : DEFAULT_ISOLATION_LEVEL.getValidValues()) { + for (ReadLockMode readLockMode : READ_LOCK_MODE.getValidValues()) { + result.add(new Object[] {isolationLevel, readLockMode}); + } + } + return result; + } + + @Override + protected ITConnection createConnection() { + return createConnection( + Collections.emptyList(), + Collections.emptyList(), + String.format( + ";default_isolation_level=%s;read_lock_mode=%s", isolationLevel, readLockMode)); + } + + @Test + public void testQuery() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT1_STATEMENT)) {} + try (ResultSet ignore = + connection.executeQuery(Statement.of("SHOW VARIABLE READ_LOCK_MODE"))) {} + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasSingleUse()); + assertTrue(request.getTransaction().getSingleUse().hasReadOnly()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + request.getTransaction().getSingleUse().getIsolationLevel()); + assertFalse(request.getLastStatement()); + } + + @Test + public void testDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.executeUpdate(INSERT_STATEMENT); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlFailed() { + Statement invalidInsert = Statement.of("insert into my_table (id, name) values (1, 'test')"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + invalidInsert, + Status.ALREADY_EXISTS.withDescription("Row 1 already exists").asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeUpdate(invalidInsert)); + assertEquals(ErrorCode.ALREADY_EXISTS, exception.getErrorCode()); + } + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatement()); + // There should be no rollback request on the server, as there was no transaction ID returned + // to the client. + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } + + @Test + public void testDmlReturning() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(INSERT_RETURNING_STATEMENT)) {} + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testBatchDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatements()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testPartitionedDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.executeUpdate(INSERT_STATEMENT); + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasPartitionedDml()); + assertEquals( + IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, beginRequest.getOptions().getIsolationLevel()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasId()); + assertFalse(request.getLastStatement()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlAborted() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + mockSpanner.abortNextTransaction(); + connection.executeUpdate(INSERT_STATEMENT); + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + for (ExecuteSqlRequest request : mockSpanner.getRequestsOfType(ExecuteSqlRequest.class)) { + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatement()); + } + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlReturningAborted() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + mockSpanner.abortNextTransaction(); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(INSERT_RETURNING_STATEMENT)) {} + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + for (ExecuteSqlRequest request : mockSpanner.getRequestsOfType(ExecuteSqlRequest.class)) { + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatement()); + } + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testBatchDmlAborted() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + mockSpanner.abortNextTransaction(); + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + for (ExecuteBatchDmlRequest request : + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class)) { + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertTrue(request.getLastStatements()); + } + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoDmlBatchMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoDmlBatchMockServerTest.java new file mode 100644 index 000000000000..b359bcb60cc4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutoDmlBatchMockServerTest.java @@ -0,0 +1,822 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import io.grpc.Status; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AutoDmlBatchMockServerTest extends AbstractMockServerTest { + + protected ITConnection createConnection() { + return createConnection(";auto_batch_dml=true"); + } + + @Test + public void testDmlInAutocommit_doesNotUseAutoBatching() { + // auto_batch_dml does not have any effect in auto-commit mode, as there is no guarantee that + // the application would ever call commit() or any other statement that would automatically + // flush the batch. + try (Connection connection = createConnection()) { + assertTrue(connection.isAutoBatchDml()); + connection.setAutocommit(true); + + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + } + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testQueryAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlWithReturningAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // DML with a THEN RETURN clause cannot be batched. This therefore flushes the batch and + // executes the INSERT ... THEN RETURN statement as a separate ExecuteSqlRequest. + try (ResultSet resultSet = connection.executeQuery(INSERT_RETURNING_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlWithReturningAfterDml_usingExecute() { + try (Connection connection = createConnection()) { + // Both execute(..) and executeUpdate(..) should trigger an auto-DML-batch, as long as the + // statement is suited for that. + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + // INSERT ... THEN RETURN is not suited for DML batching. This therefore automatically + // flushes the DML batch and executes it as a separate request. + StatementResult result = connection.execute(INSERT_RETURNING_STATEMENT); + assertEquals(ResultType.RESULT_SET, result.getResultType()); + try (ResultSet resultSet = result.getResultSet()) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlAfterQuery() { + try (Connection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testCommitAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testRollbackAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.rollback(); + } + + // Rolling back the transaction in the middle of an auto-batch should abort the batch. + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + // The rollback should be a no-op as there are no statements executed. + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } + + @Test + public void testSetAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.execute(Statement.of("set auto_partition_mode=true")); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testSetBetweenDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // A SET ... statement does not auto-flush a DML batch. + connection.execute(Statement.of("set auto_partition_mode=true")); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testShowAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.execute(Statement.of("show variable auto_partition_mode")); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testShowBetweenDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // A SHOW ... statement does not auto-flush a DML batch. + connection.execute(Statement.of("show variable auto_partition_mode")); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testRunBatchInAutoBatch() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + // This flushes the current batch. + assertArrayEquals(new long[] {1L, 1L}, connection.runBatch()); + + // This starts a new batch. + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request1 = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request1.getStatementsCount()); + ExecuteBatchDmlRequest request2 = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(1); + assertEquals(1, request2.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testStartBatchDmlInAutoBatch() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + // Explicitly starting a new batch when an auto-batch is already active is not supported. + SpannerException exception = assertThrows(SpannerException.class, connection::startBatchDml); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: Cannot start a DML batch when a batch is already active", + exception.getMessage()); + + // The above error does not invalidate the transaction, so we can still commit. + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testStartBatchDdlInAutoBatch() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + // Explicitly starting a DDL batch when an auto-batch is already active is not supported. + SpannerException exception = assertThrows(SpannerException.class, connection::startBatchDdl); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: Cannot start a DDL batch when a batch is already active", + exception.getMessage()); + + // The above error does not invalidate the transaction, so we can still commit. + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testExecuteDdlInAutoBatch() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + SpannerException exception = + assertThrows( + SpannerException.class, () -> connection.execute(Statement.of("CREATE TABLE foo"))); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: DDL-statements are not allowed inside a read/write transaction.", + exception.getMessage()); + + // The above error does not invalidate the transaction, so we can still commit. + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testSavepointBetweenDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // Setting a savepoint in the middle of an auto-DML-batch flushes the batch. + connection.savepoint("s1"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testRollbackToSavepointBetweenDml() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + connection.savepoint("s1"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // Rolling back to a savepoint aborts the current batch. + connection.rollbackToSavepoint("s1"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + // We only get one batch, as the rollback to savepoint statement aborts the first part of the + // batch. + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(1, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testReleaseSavepointBetweenDml() { + try (Connection connection = createConnection()) { + connection.savepoint("s1"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // Releasing a savepoint during a batch also flushes the batch. + connection.releaseSavepoint("s1"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAbortBatchAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.abortBatch(); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(1, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testExecuteBatchDmlAfterDml() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + // Executing a batch of DML statements should join the existing DML batch. + assertArrayEquals( + new long[] {1L, 1L}, + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT))); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(3, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testExecuteBatchDmlAndThenDml() { + try (Connection connection = createConnection()) { + // Executing a batch of DML statements should also initiate an auto-DML-batch. + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + + // Executing a single DML statement should join the existing DML batch. + connection.executeUpdate(INSERT_STATEMENT); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(3, request.getStatementsCount()); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testAnalyzeUpdateInAutoDmlBatch() { + try (Connection connection = createConnection()) { + connection.executeUpdate(INSERT_STATEMENT); + // Analyzing a DML statement does not flush the batch. + connection.analyzeUpdateStatement(INSERT_STATEMENT, QueryAnalyzeMode.PLAN); + connection.executeUpdate(INSERT_STATEMENT); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + // The analyzeUpdateStatement(..) call is executed as a separate ExecuteSqlRequest. + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testQueryWithErrorAfterDml() { + Statement invalidSelect = Statement.of("SELECT * FROM foo"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + invalidSelect, + Status.NOT_FOUND.withDescription("Table foo not found").asRuntimeException())); + + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeQuery(invalidSelect)); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlWithErrorInBatch() { + Statement invalidInsert = Statement.of("INSERT INTO foo (id) values (1)"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + invalidInsert, + Status.NOT_FOUND.withDescription("Table foo not found").asRuntimeException())); + + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // This statement is invalid and will eventually throw an exception. This does not happen + // until the batch is flushed. + assertEquals(1L, connection.executeUpdate(invalidInsert)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + + // This SELECT statement flushes the batch and is the one that gets the exception, even + // though the statement itself is valid. + SpannerBatchUpdateException exception = + assertThrows( + SpannerBatchUpdateException.class, () -> connection.executeQuery(SELECT1_STATEMENT)); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + // The batch exception contains the update count for the successful DML statements. + assertArrayEquals(new long[] {1L}, exception.getUpdateCounts()); + + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(3, request.getStatementsCount()); + // The query is never executed, as the DML batch that is being flushed before the query is + // executed fails. + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testUpdateCount() { + try (Connection connection = createConnection()) { + + // Setting a different update count is reflected in the update count that is returned by + // an auto-batch. + try { + // Disable update count verification to prevent errors. + connection.setAutoBatchDmlUpdateCountVerification(false); + + connection.setAutoBatchDmlUpdateCount(2L); + assertEquals(2L, connection.executeUpdate(INSERT_STATEMENT)); + // The update count can be modified during the batch. + connection.setAutoBatchDmlUpdateCount(3L); + assertEquals(3L, connection.executeUpdate(INSERT_STATEMENT)); + + connection.commit(); + + // The auto-batch update count setting is not used for explicit batches. + connection.startBatchDml(); + assertEquals(-1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.runBatch(); + connection.commit(); + } finally { + // TODO: Remove once a normal connection variable is used for this. + System.clearProperty("spanner.auto_batch_dml_update_count"); + System.clearProperty("spanner.auto_batch_dml_update_count_verification"); + } + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testUpdateCountVerification_failsIfDifferent() { + try (Connection connection = createConnection()) { + try { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + // Set a different (expected) update count. This will cause the batch to fail, as the + // actual update count will be 1. + connection.setAutoBatchDmlUpdateCount(3L); + assertEquals(3L, connection.executeUpdate(INSERT_STATEMENT)); + + assertThrows(SpannerException.class, connection::commit); + } finally { + // TODO: Remove once a normal connection variable is used for this. + System.clearProperty("spanner.auto_batch_dml_update_count"); + } + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testUpdateCountVerification_succeedsIfSame() { + Statement statement1 = Statement.of("insert into foo (id, value) values (1, 'One')"); + Statement statement2 = Statement.of("insert into foo (id, value) values (2, 'Two')"); + Statement statement3 = Statement.of("insert into foo (id, value) values (3, 'Three')"); + Statement statement4 = Statement.of("insert into foo (id, value) values (4, 'Four')"); + Statement statement5 = Statement.of("insert into foo (id, value) values (5, 'Five')"); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement1, 1L)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement2, 2L)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement3, 3L)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement4, 3L)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement5, 4L)); + + try (Connection connection = createConnection()) { + try { + connection.setAutoBatchDmlUpdateCount(1L); + assertEquals(1L, connection.executeUpdate(statement1)); + + connection.setAutoBatchDmlUpdateCount(2L); + assertEquals(2L, connection.executeUpdate(statement2)); + + connection.setAutoBatchDmlUpdateCount(3L); + assertArrayEquals( + new long[] {3L, 3L}, + connection.executeBatchUpdate(ImmutableList.of(statement3, statement4))); + + connection.setAutoBatchDmlUpdateCount(4L); + assertEquals(4L, connection.executeUpdate(statement5)); + + connection.commit(); + } finally { + // TODO: Remove once a normal connection variable is used for this. + System.clearProperty("spanner.auto_batch_dml_update_count"); + } + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(5, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionRetry() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + mockSpanner.abortNextStatement(); + connection.commit(); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionRetryFails() { + try (Connection connection = createConnection()) { + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + // Modify the update count that is returned by the insert statement. This will cause the + // retry attempt to fail. + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(INSERT_STATEMENT, 2L)); + mockSpanner.abortNextStatement(); + assertThrows(AbortedDueToConcurrentModificationException.class, connection::commit); + } finally { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(INSERT_STATEMENT, 1L)); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testSqlStatements() { + try (Connection connection = createConnection()) { + for (boolean enable : new boolean[] {true, false}) { + connection.execute(Statement.of("set auto_batch_dml = " + enable)); + try (ResultSet resultSet = + connection.executeQuery(Statement.of("show variable auto_batch_dml"))) { + assertTrue(resultSet.next()); + assertEquals(enable, resultSet.getBoolean("AUTO_BATCH_DML")); + assertFalse(resultSet.next()); + } + } + for (boolean enable : new boolean[] {true, false}) { + connection.execute( + Statement.of("set auto_batch_dml_update_count_verification = " + enable)); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of("show variable auto_batch_dml_update_count_verification"))) { + assertTrue(resultSet.next()); + assertEquals(enable, resultSet.getBoolean("AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION")); + assertFalse(resultSet.next()); + } + } + for (long updateCount : new long[] {0L, 5L, 100L}) { + connection.execute(Statement.of("set auto_batch_dml_update_count = " + updateCount)); + try (ResultSet resultSet = + connection.executeQuery(Statement.of("show variable auto_batch_dml_update_count"))) { + assertTrue(resultSet.next()); + assertEquals(updateCount, resultSet.getLong("AUTO_BATCH_DML_UPDATE_COUNT")); + assertFalse(resultSet.next()); + } + } + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.execute(Statement.of("set auto_batch_dml_update_count=-1"))); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertEquals( + "INVALID_ARGUMENT: Unknown value for AUTO_BATCH_DML_UPDATE_COUNT: -1", + exception.getMessage()); + } + } + + @Test + public void testPostgreSQLStatements() { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + try { + try (Connection connection = createConnection()) { + for (boolean enable : new boolean[] {true, false}) { + connection.execute( + Statement.of("set spanner.auto_batch_dml to " + (enable ? "on" : "off"))); + try (ResultSet resultSet = + connection.executeQuery(Statement.of("show variable spanner.auto_batch_dml"))) { + assertTrue(resultSet.next()); + assertEquals(enable, resultSet.getBoolean("SPANNER.AUTO_BATCH_DML")); + assertFalse(resultSet.next()); + } + } + for (boolean enable : new boolean[] {true, false}) { + connection.execute( + Statement.of("set spanner.auto_batch_dml_update_count_verification = " + enable)); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of("show spanner.auto_batch_dml_update_count_verification"))) { + assertTrue(resultSet.next()); + assertEquals( + enable, resultSet.getBoolean("SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION")); + assertFalse(resultSet.next()); + } + } + for (long updateCount : new long[] {0L, 5L, 100L}) { + connection.execute( + Statement.of("set spanner.auto_batch_dml_update_count to " + updateCount)); + try (ResultSet resultSet = + connection.executeQuery(Statement.of("show spanner.auto_batch_dml_update_count"))) { + assertTrue(resultSet.next()); + assertEquals(updateCount, resultSet.getLong("SPANNER.AUTO_BATCH_DML_UPDATE_COUNT")); + assertFalse(resultSet.next()); + } + } + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + connection.execute(Statement.of("set spanner.auto_batch_dml_update_count=-1"))); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertEquals( + "INVALID_ARGUMENT: Unknown value for SPANNER.AUTO_BATCH_DML_UPDATE_COUNT: -1", + exception.getMessage()); + } + } finally { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeConverterTest.java new file mode 100644 index 000000000000..dcfeb139c325 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeConverterTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.AutocommitDmlModeConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class AutocommitDmlModeConverterTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues(AutocommitDmlModeConverter.class, dialect); + assertThat(allowedValues, is(notNullValue())); + AutocommitDmlModeConverter converter = new AutocommitDmlModeConverter(allowedValues); + assertThat(converter.convert("transactional"), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + assertThat(converter.convert("TRANSACTIONAL"), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + assertThat(converter.convert("Transactional"), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + + assertThat( + converter.convert("partitioned_non_atomic"), + is(equalTo(AutocommitDmlMode.PARTITIONED_NON_ATOMIC))); + assertThat( + converter.convert("Partitioned_Non_Atomic"), + is(equalTo(AutocommitDmlMode.PARTITIONED_NON_ATOMIC))); + assertThat( + converter.convert("PARTITIONED_NON_ATOMIC"), + is(equalTo(AutocommitDmlMode.PARTITIONED_NON_ATOMIC))); + + assertThat(converter.convert(""), is(nullValue())); + assertThat(converter.convert(" "), is(nullValue())); + assertThat(converter.convert("random string"), is(nullValue())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeTest.java new file mode 100644 index 000000000000..bb845746e138 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AutocommitDmlModeTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AutocommitDmlModeTest { + private static final String UPDATE = "UPDATE foo SET bar=1"; + private static final String URI = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + + private DatabaseClient dbClient; + private TransactionContext txContext; + + @SuppressWarnings("unchecked") + private ConnectionImpl createConnection(ConnectionOptions options) { + dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + txContext = mock(TransactionContext.class); + Spanner spanner = mock(Spanner.class); + SpannerPool spannerPool = mock(SpannerPool.class); + when(spannerPool.getSpanner(any(ConnectionOptions.class), any(ConnectionImpl.class))) + .thenReturn(spanner); + DdlClient ddlClient = mock(DdlClient.class); + TransactionRunner txRunner = mock(TransactionRunner.class); + when(dbClient.readWriteTransaction()).thenReturn(txRunner); + when(txRunner.run(any(TransactionCallable.class))) + .thenAnswer( + invocation -> { + TransactionCallable callable = + (TransactionCallable) invocation.getArguments()[0]; + return callable.run(txContext); + }); + + TransactionManager txManager = mock(TransactionManager.class); + when(txManager.begin()).thenReturn(txContext); + when(dbClient.transactionManager()).thenReturn(txManager); + + return new ConnectionImpl(options, spannerPool, ddlClient, dbClient, mock(BatchClient.class)); + } + + @Test + public void testAutocommitDmlModeTransactional() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertTrue(connection.isAutocommit()); + assertFalse(connection.isReadOnly()); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, connection.getAutocommitDmlMode()); + + connection.execute(Statement.of(UPDATE)); + verify(txContext).executeUpdate(Statement.of(UPDATE), Options.lastStatement()); + verify(dbClient, never()).executePartitionedUpdate(Statement.of(UPDATE)); + } + } + + @Test + public void testAutocommitDmlModePartitioned() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(connection.isAutocommit(), is(true)); + assertThat(connection.isReadOnly(), is(false)); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + assertThat(connection.getAutocommitDmlMode(), is(AutocommitDmlMode.PARTITIONED_NON_ATOMIC)); + + connection.execute(Statement.of(UPDATE)); + verify(txContext, never()).executeUpdate(Statement.of(UPDATE)); + verify(dbClient).executePartitionedUpdate(Statement.of(UPDATE)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginPgTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginPgTransactionTest.java new file mode 100644 index 000000000000..068da4385fb6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginPgTransactionTest.java @@ -0,0 +1,218 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class BeginPgTransactionTest { + private final AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.POSTGRESQL); + + @Test + public void testBeginWithNoOption() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + int withIsolationLevel = 0; + int withoutIsolationLevel = 0; + for (String sql : + ImmutableList.of( + "begin", + "begin transaction", + "begin work", + "start", + "start transaction", + "start work", + "begin isolation level default", + "begin transaction isolation level default", + "begin work isolation level default", + "start isolation level default", + "start transaction isolation level default", + "start work isolation level default", + "begin isolation level serializable", + "begin transaction isolation level serializable", + "begin work isolation level serializable", + "start isolation level serializable", + "start transaction isolation level serializable", + "start work isolation level serializable")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + if (sql.contains("isolation") && !sql.contains("default")) { + withIsolationLevel++; + verify(connection, times(withIsolationLevel)).beginTransaction(any(IsolationLevel.class)); + } else { + withoutIsolationLevel++; + verify(connection, times(withoutIsolationLevel)).beginTransaction(); + } + verify(connection, never()).setTransactionMode(any()); + index++; + } + } + + @Test + public void testBeginReadOnly() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + for (String sql : + ImmutableList.of( + "begin read only", + "begin transaction read only", + "begin work read only", + "start read only", + "start transaction read only", + "start work read only")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, times(index)).beginTransaction(); + verify(connection, times(index)).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + verify(connection, never()).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + index++; + } + } + + @Test + public void testBeginReadWrite() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + int withIsolationLevel = 0; + int withoutIsolationLevel = 0; + for (String sql : + ImmutableList.of( + "begin read write", + "begin transaction read write", + "begin work read write", + "start read write", + "start transaction read write", + "start work read write")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + if (sql.contains("isolation") && !sql.contains("default")) { + withIsolationLevel++; + verify(connection, times(withIsolationLevel)).beginTransaction(any(IsolationLevel.class)); + } else { + withoutIsolationLevel++; + verify(connection, times(withoutIsolationLevel)).beginTransaction(); + } + verify(connection, times(index)).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + verify(connection, never()).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + index++; + } + } + + @Test + public void testBeginReadOnlyWithIsolationLevel() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + int withIsolationLevel = 0; + int withoutIsolationLevel = 0; + for (String sql : + ImmutableList.of( + "begin read only isolation level serializable", + "begin read only isolation level default", + "begin isolation level serializable read only", + "begin isolation level default read only", + "begin read write isolation level default read only", + "begin read write, isolation level default, read only", + "begin read write , \nisolation level default\n\t,read only")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + if (sql.contains("isolation") && !sql.contains("default")) { + withIsolationLevel++; + verify(connection, times(withIsolationLevel)).beginTransaction(any(IsolationLevel.class)); + } else { + withoutIsolationLevel++; + verify(connection, times(withoutIsolationLevel)).beginTransaction(); + } + verify(connection, times(index)).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + verify(connection, never()).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + index++; + } + } + + @Test + public void testBeginWithNotDeferrable() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + int withIsolationLevel = 0; + int withoutIsolationLevel = 0; + for (String sql : + ImmutableList.of( + "begin read only isolation level serializable not deferrable", + "begin read only isolation level default not deferrable", + "begin isolation level serializable read only not deferrable", + "begin isolation level default read only not deferrable", + "begin read write isolation level default read only not deferrable", + "begin read write, isolation level default, read only not deferrable", + "begin read write , \nisolation level default\n\t,read only \n\n not \t deferrable", + "begin not deferrable read only isolation level serializable", + "begin not deferrable read only isolation level default", + "begin not deferrable isolation level serializable read only", + "begin not deferrable isolation level default read only", + "begin not deferrable read write isolation level default read only", + "begin not deferrable read write, isolation level default, read only", + "begin not deferrable read write , \nisolation level default\n\t,read only")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + if (sql.contains("isolation") && !sql.contains("default")) { + withIsolationLevel++; + verify(connection, times(withIsolationLevel)).beginTransaction(any(IsolationLevel.class)); + } else { + withoutIsolationLevel++; + verify(connection, times(withoutIsolationLevel)).beginTransaction(); + } + verify(connection, times(index)).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + verify(connection, never()).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + + index++; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginTransactionTest.java new file mode 100644 index 000000000000..510d97dd83e2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BeginTransactionTest.java @@ -0,0 +1,129 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class BeginTransactionTest { + private final AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL); + + @Test + public void testBeginNoIsolationLevel() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + for (String sql : + ImmutableList.of( + "begin", + "begin transaction", + "start", + "start transaction", + "\t\n begin\n \ttransaction \n")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, times(index)).beginTransaction(); + verify(connection, never()).setTransactionMode(any()); + index++; + } + } + + @Test + public void testBeginRepeatableRead() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + for (String sql : + ImmutableList.of( + "begin isolation level repeatable read", + "begin transaction isolation level repeatable read", + "start isolation level repeatable read", + "start transaction isolation level repeatable read", + "start transaction isolation level repeatable read", + "start\n \ttransaction \t\nisolation\n\t level \t \nrepeatable \n \t read")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, times(index)).beginTransaction(IsolationLevel.REPEATABLE_READ); + verify(connection, never()).setTransactionMode(any()); + index++; + } + } + + @Test + public void testBeginSerializable() { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int index = 1; + for (String sql : + ImmutableList.of( + "begin isolation level serializable", + "begin transaction isolation level serializable", + "start isolation level serializable", + "start transaction isolation level serializable", + "start transaction isolation level serializable", + "start\n \ttransaction \t\nisolation\n\t level \t \nserializable \n \t ")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, times(index)).beginTransaction(IsolationLevel.SERIALIZABLE); + verify(connection, never()).setTransactionMode(any()); + index++; + } + } + + @Test + public void testInvalidStatements() { + for (String sql : + ImmutableList.of( + "begin isolation level", + "begin transaction level serializable", + "start isolation serializable", + "start transaction repeatable read", + "begin isolation level read committed", + "begin isloation level serializable", + "begin transaction isolation level repeatable", + "begin transaction isolation level serializable read", + "begin transaction isolation level repeatable_read")) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.UNKNOWN, statement.getType()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BooleanConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BooleanConverterTest.java new file mode 100644 index 000000000000..551848dea9b2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/BooleanConverterTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.BooleanConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class BooleanConverterTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues(BooleanConverter.class, dialect); + assertThat(allowedValues, is(notNullValue())); + BooleanConverter converter = new BooleanConverter(allowedValues); + assertThat(converter.convert("true"), is(equalTo(Boolean.TRUE))); + assertThat(converter.convert("TRUE"), is(equalTo(Boolean.TRUE))); + assertThat(converter.convert("True"), is(equalTo(Boolean.TRUE))); + + assertThat(converter.convert("false"), is(equalTo(Boolean.FALSE))); + assertThat(converter.convert("FALSE"), is(equalTo(Boolean.FALSE))); + assertThat(converter.convert("False"), is(equalTo(Boolean.FALSE))); + + assertThat(converter.convert(""), is(nullValue())); + assertThat(converter.convert(" "), is(nullValue())); + assertThat(converter.convert("random string"), is(nullValue())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CallTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CallTest.java new file mode 100644 index 000000000000..5a46d4cd58f0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CallTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CallTest extends AbstractMockServerTest { + + @Test + public void testCancelQuery() { + // 'CALL' should be recognized as a valid query keyword. + Statement statement = Statement.of("call cancel_query('1234')"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query( + statement, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("call_result_tvf") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .build()) + .build()) + .build())); + + try (Connection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(statement)) { + assertFalse(resultSet.next()); + assertEquals(1, resultSet.getColumnCount()); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ChecksumResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ChecksumResultSetTest.java new file mode 100644 index 000000000000..6201200ec076 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ChecksumResultSetTest.java @@ -0,0 +1,451 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Interval; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Struct.Builder; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.common.collect.ImmutableList; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.UUID; +import java.util.concurrent.Callable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ChecksumResultSetTest { + private static final Struct DIFFERENT_NON_NULL_VALUES = + Struct.newBuilder() + .set("boolVal") + .to(false) + .set("longVal") + .to(2 * 2) + .set("doubleVal") + .to(Value.float64(3.14d * 2d)) + .set("floatVal") + .to(Value.float32(3.14f * 3f)) + .set("bigDecimalVal") + .to(Value.numeric(BigDecimal.valueOf(123 * 2, 2))) + .set("pgNumericVal") + .to(Value.pgNumeric("2.46")) + .set("stringVal") + .to("testtest") + .set("jsonVal") + .to(Value.json("{\"color\":\"red\",\"value\":\"#ff0\"}")) + .set("pgJsonbVal") + .to(Value.pgJsonb("{\"color\":\"red\",\"value\":\"#00f\"}")) + .set("pgOidVal") + .to(Value.pgOid(2 * 2)) + .set("protoMessageVal") + .to(SingerInfo.newBuilder().setSingerId(23).build()) + .set("protoEnumVal") + .to(Genre.JAZZ) + .set("byteVal") + .to(Value.bytes(ByteArray.copyFrom("bytes".getBytes(StandardCharsets.UTF_8)))) + .set("timestamp") + .to(Timestamp.parseTimestamp("2022-08-04T11:20:00.123456789Z")) + .set("date") + .to(Date.fromYearMonthDay(2022, 8, 3)) + .set("uuid") + .to(UUID.randomUUID()) + .set("interval") + .to(Interval.parseFromString("P8Y2M3DT4H5M6.789123456S")) + .set("boolArray") + .to(Value.boolArray(Arrays.asList(Boolean.FALSE, null, Boolean.TRUE))) + .set("longArray") + .to(Value.int64Array(Arrays.asList(2L, null, 1L, 0L))) + .set("doubleArray") + .to(Value.float64Array(Arrays.asList(3.14d, null, 6.6626d, 10.1d))) + .set("floatArray") + .to(Value.float32Array(Arrays.asList(2.71f, null, 6.6626f, 10.1f))) + .set("bigDecimalArray") + .to(Value.numericArray(Arrays.asList(BigDecimal.TEN, null, BigDecimal.ONE))) + .set("pgNumericArray") + .to(Value.pgNumericArray(Arrays.asList("10", null, "1", "NaN"))) + .set("byteArray") + .to( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("test2"), null, ByteArray.copyFrom("test1")))) + .set("timestampArray") + .to( + Value.timestampArray( + Arrays.asList( + Timestamp.parseTimestamp("2000-01-01T00:00:00Z"), + null, + Timestamp.parseTimestamp("2022-07-04T10:24:00.123456789Z")))) + .set("dateArray") + .to( + Value.dateArray( + Arrays.asList(Date.parseDate("2000-01-01"), null, Date.parseDate("2022-08-03")))) + .set("uuidArray") + .to(Value.uuidArray(Arrays.asList(UUID.randomUUID(), UUID.randomUUID()))) + .set("intervalArray") + .to( + Value.intervalArray( + Arrays.asList( + Interval.parseFromString("P1Y2M-3DT4H5M6.789123456S"), + null, + Interval.parseFromString("P-1Y-2M-3DT-4H-5M-6.789123456S")))) + .set("stringArray") + .to(Value.stringArray(Arrays.asList("test2", null, "test1"))) + .set("jsonArray") + .to(Value.jsonArray(Arrays.asList("{\"color\":\"red\",\"value\":\"#f00\"}", null, "[]"))) + .set("pgJsonbArray") + .to( + Value.pgJsonbArray( + Arrays.asList("{\"color\":\"red\",\"value\":\"#f00\"}", null, "[]"))) + .set("pgOidArray") + .to(Value.pgOidArray(Arrays.asList(2L, null, 1L, 0L))) + .set("protoMessageArray") + .to( + Value.protoMessageArray( + Arrays.asList( + SingerInfo.newBuilder().setSingerId(23).build(), + SingerInfo.getDefaultInstance()), + SingerInfo.getDescriptor())) + .set("protoEnumArray") + .to(Value.protoEnumArray(Arrays.asList(Genre.JAZZ, Genre.ROCK), Genre.getDescriptor())) + .build(); + + @Test + public void testRetry() { + Type type = + Type.struct( + Type.StructField.of("boolVal", Type.bool()), + Type.StructField.of("longVal", Type.int64()), + Type.StructField.of("doubleVal", Type.float64()), + Type.StructField.of("floatVal", Type.float32()), + Type.StructField.of("bigDecimalVal", Type.numeric()), + Type.StructField.of("pgNumericVal", Type.pgNumeric()), + Type.StructField.of("stringVal", Type.string()), + Type.StructField.of("jsonVal", Type.json()), + Type.StructField.of("pgJsonbVal", Type.pgJsonb()), + Type.StructField.of("pgOidVal", Type.pgOid()), + Type.StructField.of( + "protoMessageVal", Type.proto(SingerInfo.getDescriptor().getFullName())), + Type.StructField.of( + "protoEnumVal", Type.protoEnum(Genre.getDescriptor().getFullName())), + Type.StructField.of("byteVal", Type.bytes()), + Type.StructField.of("timestamp", Type.timestamp()), + Type.StructField.of("date", Type.date()), + Type.StructField.of("uuid", Type.uuid()), + Type.StructField.of("interval", Type.interval()), + Type.StructField.of("boolArray", Type.array(Type.bool())), + Type.StructField.of("longArray", Type.array(Type.int64())), + Type.StructField.of("doubleArray", Type.array(Type.float64())), + Type.StructField.of("floatArray", Type.array(Type.float32())), + Type.StructField.of("bigDecimalArray", Type.array(Type.numeric())), + Type.StructField.of("pgNumericArray", Type.array(Type.pgNumeric())), + Type.StructField.of("byteArray", Type.array(Type.bytes())), + Type.StructField.of("timestampArray", Type.array(Type.timestamp())), + Type.StructField.of("dateArray", Type.array(Type.date())), + Type.StructField.of("uuidArray", Type.array(Type.uuid())), + Type.StructField.of("intervalArray", Type.array(Type.interval())), + Type.StructField.of("stringArray", Type.array(Type.string())), + Type.StructField.of("jsonArray", Type.array(Type.json())), + Type.StructField.of("pgJsonbArray", Type.array(Type.pgJsonb())), + Type.StructField.of("pgOidArray", Type.array(Type.pgOid())), + Type.StructField.of( + "protoMessageArray", + Type.array(Type.proto(SingerInfo.getDescriptor().getFullName()))), + Type.StructField.of( + "protoEnumArray", Type.array(Type.protoEnum(Genre.getDescriptor().getFullName())))); + Struct rowNonNullValues = + Struct.newBuilder() + .set("boolVal") + .to(true) + .set("longVal") + .to(2) + .set("doubleVal") + .to(Value.float64(3.14d)) + .set("floatVal") + .to(Value.float32(2.71f)) + .set("bigDecimalVal") + .to(Value.numeric(BigDecimal.valueOf(123, 2))) + .set("pgNumericVal") + .to(Value.pgNumeric("1.23")) + .set("stringVal") + .to("test") + .set("jsonVal") + .to(Value.json("{\"color\":\"red\",\"value\":\"#f00\"}")) + .set("pgJsonbVal") + .to(Value.pgJsonb("{\"color\":\"red\",\"value\":\"#f00\"}")) + .set("pgOidVal") + .to(Value.pgOid(2)) + .set("protoMessageVal") + .to(SingerInfo.newBuilder().setSingerId(98).setNationality("C1").build()) + .set("protoEnumVal") + .to(Genre.POP) + .set("byteVal") + .to(Value.bytes(ByteArray.copyFrom("test".getBytes(StandardCharsets.UTF_8)))) + .set("timestamp") + .to(Timestamp.parseTimestamp("2022-08-04T10:19:00.123456789Z")) + .set("date") + .to(Date.fromYearMonthDay(2022, 8, 4)) + .set("uuid") + .to(UUID.randomUUID()) + .set("interval") + .to(Interval.parseFromString("P1Y2M3DT4H5M6.789123456S")) + .set("boolArray") + .to(Value.boolArray(Arrays.asList(Boolean.TRUE, null, Boolean.FALSE))) + .set("longArray") + .to(Value.int64Array(Arrays.asList(1L, null, 2L))) + .set("doubleArray") + .to(Value.float64Array(Arrays.asList(3.14d, null, 6.6626d))) + .set("floatArray") + .to(Value.float32Array(Arrays.asList(2.71f, null, 6.6626f))) + .set("bigDecimalArray") + .to(Value.numericArray(Arrays.asList(BigDecimal.ONE, null, BigDecimal.TEN))) + .set("pgNumericArray") + .to(Value.pgNumericArray(Arrays.asList("1", null, "10"))) + .set("byteArray") + .to( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("test1"), null, ByteArray.copyFrom("test2")))) + .set("timestampArray") + .to( + Value.timestampArray( + Arrays.asList( + Timestamp.parseTimestamp("2000-01-01T00:00:00Z"), + null, + Timestamp.parseTimestamp("2022-08-04T10:24:00.123456789Z")))) + .set("dateArray") + .to( + Value.dateArray( + Arrays.asList( + Date.parseDate("2000-01-01"), null, Date.parseDate("2022-08-04")))) + .set("uuidArray") + .to(Value.uuidArray(Arrays.asList(UUID.randomUUID(), UUID.randomUUID()))) + .set("intervalArray") + .to( + Value.intervalArray( + Arrays.asList( + Interval.parseFromString("P1Y2M3DT4H5M6.789123456S"), + null, + Interval.parseFromString("P-1Y-2M-3DT-4H-5M-6.789123456S")))) + .set("stringArray") + .to(Value.stringArray(Arrays.asList("test1", null, "test2"))) + .set("jsonArray") + .to( + Value.jsonArray( + Arrays.asList("{\"color\":\"red\",\"value\":\"#f00\"}", null, "{}"))) + .set("pgJsonbArray") + .to( + Value.pgJsonbArray( + Arrays.asList("{\"color\":\"red\",\"value\":\"#f00\"}", null, "{}"))) + .set("pgOidArray") + .to(Value.pgOidArray(Arrays.asList(1L, null, 2L))) + .set("protoMessageArray") + .to( + Value.protoMessageArray( + Arrays.asList( + SingerInfo.newBuilder().setSingerId(11).setNationality("C1").build(), + SingerInfo.getDefaultInstance()), + SingerInfo.getDescriptor())) + .set("protoEnumArray") + .to(Value.protoEnumArray(Arrays.asList(Genre.POP, Genre.ROCK), Genre.getDescriptor())) + .build(); + Struct rowNullValues = + Struct.newBuilder() + .set("boolVal") + .to((Boolean) null) + .set("longVal") + .to((Long) null) + .set("doubleVal") + .to((Double) null) + .set("floatVal") + .to((Float) null) + .set("bigDecimalVal") + .to((BigDecimal) null) + .set("pgNumericVal") + .to(Value.pgNumeric(null)) + .set("stringVal") + .to((String) null) + .set("jsonVal") + .to(Value.json(null)) + .set("pgJsonbVal") + .to(Value.pgJsonb(null)) + .set("pgOidVal") + .to(Value.pgOid(null)) + .set("protoMessageVal") + .to(Value.protoMessage(null, SingerInfo.getDescriptor().getFullName())) + .set("protoEnumVal") + .to(Value.protoEnum(null, Genre.getDescriptor().getFullName())) + .set("byteVal") + .to((ByteArray) null) + .set("timestamp") + .to((Timestamp) null) + .set("date") + .to((Date) null) + .set("uuid") + .to((UUID) null) + .set("interval") + .to((Interval) null) + .set("boolArray") + .toBoolArray((Iterable) null) + .set("longArray") + .toInt64Array((Iterable) null) + .set("doubleArray") + .toFloat64Array((Iterable) null) + .set("floatArray") + .toFloat32Array((Iterable) null) + .set("bigDecimalArray") + .toNumericArray(null) + .set("pgNumericArray") + .toPgNumericArray(null) + .set("byteArray") + .toBytesArray(null) + .set("timestampArray") + .toTimestampArray(null) + .set("dateArray") + .toDateArray(null) + .set("uuidArray") + .toUuidArray(null) + .set("intervalArray") + .toIntervalArray(null) + .set("stringArray") + .toStringArray(null) + .set("jsonArray") + .toJsonArray(null) + .set("pgJsonbArray") + .toPgJsonbArray(null) + .set("pgOidArray") + .toPgOidArray((Iterable) null) + .set("protoMessageArray") + .to(Value.protoMessageArray(null, SingerInfo.getDescriptor())) + .set("protoEnumArray") + .to(Value.protoEnumArray(null, Genre.getDescriptor())) + .build(); + + ParsedStatement parsedStatement = mock(ParsedStatement.class); + Statement statement = Statement.of("select * from foo"); + when(parsedStatement.getStatement()).thenReturn(statement); + AbortedException abortedException = mock(AbortedException.class); + ReadWriteTransaction transaction = mock(ReadWriteTransaction.class); + when(transaction.runWithRetry(any(Callable.class))) + .thenAnswer(invocationOnMock -> ((Callable) invocationOnMock.getArgument(0)).call()); + when(transaction.getStatementExecutor()).thenReturn(mock(StatementExecutor.class)); + + ResultSet queryResult = + ResultSets.forRows(type, ImmutableList.of(rowNonNullValues, rowNullValues)); + ChecksumResultSet resultSet = + new ChecksumResultSet( + transaction, + DirectExecuteResultSet.ofResultSet(queryResult), + parsedStatement, + AnalyzeMode.NONE); + assertTrue(resultSet.next()); + assertTrue(resultSet.next()); + + // Ensure that retrying will return the same result. + ResultSet retryResult = + ResultSets.forRows(type, ImmutableList.of(rowNonNullValues, rowNullValues)); + when(transaction.internalExecuteQuery(parsedStatement, AnalyzeMode.NONE)) + .thenReturn(retryResult); + + // There have been no changes, so the retry should succeed. + resultSet.retry(abortedException); + + // Change field value from one non-null value to another non-null value. + for (StructField fieldToChange : rowNonNullValues.getType().getStructFields()) { + Builder builder = Struct.newBuilder(); + for (StructField field : rowNonNullValues.getType().getStructFields()) { + if (field.equals(fieldToChange)) { + builder.set(field.getName()).to(DIFFERENT_NON_NULL_VALUES.getValue(field.getName())); + } else { + builder.set(field.getName()).to(rowNonNullValues.getValue(field.getName())); + } + } + ResultSet newRetryResult = + ResultSets.forRows(type, ImmutableList.of(builder.build(), rowNullValues)); + when(transaction.internalExecuteQuery(parsedStatement, AnalyzeMode.NONE)) + .thenReturn(newRetryResult); + // The query result has changed, so this should now fail. + assertThrows( + "Missing exception for " + fieldToChange.getName(), + AbortedDueToConcurrentModificationException.class, + () -> resultSet.retry(abortedException)); + } + + // Change field value from non-null value to null value. + for (StructField fieldToChange : rowNonNullValues.getType().getStructFields()) { + Builder builder = Struct.newBuilder(); + for (StructField field : rowNonNullValues.getType().getStructFields()) { + if (field.equals(fieldToChange)) { + builder.set(field.getName()).to(rowNullValues.getValue(field.getName())); + } else { + builder.set(field.getName()).to(rowNonNullValues.getValue(field.getName())); + } + } + ResultSet newRetryResult = + ResultSets.forRows(type, ImmutableList.of(builder.build(), rowNullValues)); + when(transaction.internalExecuteQuery(parsedStatement, AnalyzeMode.NONE)) + .thenReturn(newRetryResult); + // The query result has changed, so this should now fail. + assertThrows( + "Missing exception for " + fieldToChange.getName(), + AbortedDueToConcurrentModificationException.class, + () -> resultSet.retry(abortedException)); + } + + // Change field value from null value to non-null value. + for (StructField fieldToChange : rowNonNullValues.getType().getStructFields()) { + Builder builder = Struct.newBuilder(); + for (StructField field : rowNullValues.getType().getStructFields()) { + if (field.equals(fieldToChange)) { + builder.set(field.getName()).to(rowNonNullValues.getValue(field.getName())); + } else { + builder.set(field.getName()).to(rowNullValues.getValue(field.getName())); + } + } + // In this case the modified values are in the second row that first only contained null + // values. + ResultSet newRetryResult = + ResultSets.forRows(type, ImmutableList.of(rowNonNullValues, builder.build())); + when(transaction.internalExecuteQuery(parsedStatement, AnalyzeMode.NONE)) + .thenReturn(newRetryResult); + // The query result has changed, so this should now fail. + assertThrows( + "Missing exception for " + fieldToChange.getName(), + AbortedDueToConcurrentModificationException.class, + () -> resultSet.retry(abortedException)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientContextMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientContextMockServerTest.java new file mode 100644 index 000000000000..093af070ea6a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientContextMockServerTest.java @@ -0,0 +1,353 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.protobuf.Value; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RequestOptions; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ClientContextMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + private static final RequestOptions.ClientContext CLIENT_CONTEXT = + RequestOptions.ClientContext.newBuilder() + .putSecureContext("test-key", Value.newBuilder().setStringValue("test-value").build()) + .build(); + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + SpannerPool.closeSpannerPool(); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testQuery_PropagatesClientContext() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + } + } + + @Test + public void testUpdate_PropagatesClientContext() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + } + } + + @Test + public void testBatchUpdate_PropagatesClientContext() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + } + } + + @Test + public void testCommit_PropagatesClientContext() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + } + } + + @Test + public void testBeginTransaction_PropagatesClientContextWithLazyStart() { + // The BeginTransaction option is inlined with the first statement. + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + connection.beginTransaction(); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals(CLIENT_CONTEXT, request.getRequestOptions().getClientContext()); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + } + } + + @Test + public void testBeginTransaction_PropagatesClientContextWithEagerStartAborted() { + // We can force an explicit BeginTransaction RPC by failing the first statement with an ABORTED + // error. If the statement fails before returning a transaction ID, the retry will use an + // explicit BeginTransaction RPC. + // Note: This relies on triggering a retry logic which is the only way to force explicit + // BeginTransaction in the standard Connection API flow without additional configuration (like + // setting delayTransactionStartUntilFirstWrite=false which is not exposed publicly here). + try (Connection connection = createConnection()) { + // Abort the next statement. This will cause the ExecuteSql request (which carries the + // BeginTransaction option) to fail with an ABORTED error. + // Since the request fails, the client does not receive the transaction ID. + // The retry logic in TransactionRunnerImpl/ReadWriteTransaction will then force an + // explicit BeginTransaction RPC to ensure a transaction is started before retrying the + // statement. + mockSpanner.abortNextStatement(); + + connection.setClientContext(CLIENT_CONTEXT); + connection.beginTransaction(); + connection.executeUpdate(INSERT_STATEMENT); + + // We expect two ExecuteSqlRequests. + // 1. The first one fails with ABORTED. This request includes the BeginTransaction option. + // 2. The retry. + int executeSqlCount = mockSpanner.countRequestsOfType(ExecuteSqlRequest.class); + assertEquals(2, executeSqlCount); + + for (ExecuteSqlRequest req : mockSpanner.getRequestsOfType(ExecuteSqlRequest.class)) { + assertEquals(CLIENT_CONTEXT, req.getRequestOptions().getClientContext()); + } + + // We also expect 1 BeginTransactionRequest because the retry used explicit BeginTransaction. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertEquals(CLIENT_CONTEXT, beginRequest.getRequestOptions().getClientContext()); + } + } + + @Test + public void testBeginTransaction_PropagatesClientContextWithEagerStartMutations() { + // We can also force an explicit BeginTransaction RPC by constructing a transaction + // that only issues mutations. Mutation RPCs cannot start a transaction, so + // if they are the only RPCs in the transaction, then an explicit BeginTransaction + // must be issued. + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + connection.beginTransaction(); + connection.bufferedWrite(Mutation.newInsertBuilder("my-table").set("my-col").to(1L).build()); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest request = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertEquals(CLIENT_CONTEXT, request.getRequestOptions().getClientContext()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertEquals(CLIENT_CONTEXT, commitRequest.getRequestOptions().getClientContext()); + } + } + + @Test + public void testDatabaseClient_ClientContextMerging() { + String projectId = "test-project"; + String instanceId = "test-instance"; + String databaseId = "test-database"; + + // 1. Define the default ClientContext in SpannerOptions. + RequestOptions.ClientContext defaultContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext("key1", Value.newBuilder().setStringValue("default_value1").build()) + .putSecureContext("key2", Value.newBuilder().setStringValue("default_value2").build()) + .build(); + + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .setHost("http://localhost:" + getPort()) + .usePlainText() + .setDefaultClientContext(defaultContext) + .build(); + + try (Spanner spanner = options.getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + // 2. Define the request-specific ClientContext that overrides one key and adds a new one. + RequestOptions.ClientContext requestContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext("key2", Value.newBuilder().setStringValue("request_value2").build()) + .putSecureContext("key3", Value.newBuilder().setStringValue("request_value3").build()) + .build(); + + // 3. Define the expected merged ClientContext (Union + Overwrite). + RequestOptions.ClientContext expectedContext = + RequestOptions.ClientContext.newBuilder() + .putSecureContext("key1", Value.newBuilder().setStringValue("default_value1").build()) + .putSecureContext("key2", Value.newBuilder().setStringValue("request_value2").build()) + .putSecureContext("key3", Value.newBuilder().setStringValue("request_value3").build()) + .build(); + + // Execute a query with the request context. + try (ResultSet rs = + client + .singleUse() + .executeQuery( + SELECT_COUNT_STATEMENT, + com.google.cloud.spanner.Options.clientContext(requestContext))) { + rs.next(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + RequestOptions.ClientContext actualContext = + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext(); + + assertEquals(expectedContext, actualContext); + + // Verify specifically that key2 was overwritten and key1 was preserved. + assertEquals( + "request_value2", actualContext.getSecureContextOrThrow("key2").getStringValue()); + assertEquals( + "default_value1", actualContext.getSecureContextOrThrow("key1").getStringValue()); + } + } + + @Test + public void testPersistence() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + + connection.executeUpdate(INSERT_STATEMENT); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(1) + .getRequestOptions() + .getClientContext()); + + connection.commit(); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + } + } + + @Test + public void testClearClientContext() { + try (Connection connection = createConnection()) { + connection.setClientContext(CLIENT_CONTEXT); + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + + assertEquals( + CLIENT_CONTEXT, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getClientContext()); + + connection.setClientContext(null); + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse( + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(1) + .getRequestOptions() + .hasClientContext()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java new file mode 100644 index 000000000000..4055f8e949c4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java @@ -0,0 +1,414 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.Test; + +/** + * Test that runs a pre-generated sql script for {@link ClientSideStatement}s. The sql script can be + * generated by running + * mvn -Ddo_log_statements=true exec:java -Dexec.mainClass=com.google.cloud.spanner.connection.SqlTestScriptsGenerator -Dexec.classpathScope="test" + * It is only necessary to generate a new test script if a new {@link ClientSideStatement} + * has been added, or the behavior of an existing {@link ClientSideStatement} has changed. + * + *

    This class does not need to be implemented for the client libraries of other programming + * languages. All test cases are covered by the sql file ClientSideStatementsTest.sql. + */ +public class ClientSideStatementsTest extends AbstractSqlScriptTest { + + private static String getScriptFile(Dialect dialect) { + switch (dialect) { + case GOOGLE_STANDARD_SQL: + return "ClientSideStatementsTest.sql"; + case POSTGRESQL: + return "postgresql/ClientSideStatementsTest.sql"; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Unknown or unsupported dialect: " + dialect); + } + } + + @Test + public void testIsQuery() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + ParsedStatement parsedStatement = parser.parse(Statement.of("show/spanner.statement_tag;")); + assertTrue(parsedStatement.isQuery()); + } + + @Test + public void testExecuteClientSideStatementsScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new TestConnectionProvider(dialect)); + verifier.verifyStatementsInFile(getScriptFile(dialect), getClass(), true); + } + + @Test + public void testClientSideStatementType() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + assertEquals( + ClientSideStatementType.BEGIN, + parser.parse(Statement.of("BEGIN TRANSACTION")).getClientSideStatementType()); + assertEquals( + ClientSideStatementType.COMMIT, + parser.parse(Statement.of("COMMIT TRANSACTION")).getClientSideStatementType()); + assertEquals( + ClientSideStatementType.ROLLBACK, + parser.parse(Statement.of("ROLLBACK TRANSACTION")).getClientSideStatementType()); + if (dialect == Dialect.POSTGRESQL) { + assertEquals( + ClientSideStatementType.ROLLBACK, + parser.parse(Statement.of("ABORT")).getClientSideStatementType()); + assertEquals( + ClientSideStatementType.ROLLBACK, + parser.parse(Statement.of("ABORT TRANSACTION")).getClientSideStatementType()); + assertEquals( + ClientSideStatementType.ROLLBACK, + parser.parse(Statement.of("ABORT WORK")).getClientSideStatementType()); + assertEquals( + ClientSideStatementType.ROLLBACK, + parser + .parse(Statement.of("ABORT TRANSACTION and no chain")) + .getClientSideStatementType()); + } + + for (ClientSideStatementImpl statement : parser.getClientSideStatements()) { + assertNotNull( + statement.toString() + " misses a statement type", statement.getStatementType()); + } + } + + private static class DurationTestData { + final String sql; + final Duration expected; + + DurationTestData(String sql, Duration expected) { + this.sql = sql; + this.expected = expected; + } + } + + @Test + public void testSetStatementTimeout() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + String resetValue = dialect == Dialect.POSTGRESQL ? "default" : "null"; + for (DurationTestData data : + new DurationTestData[] { + new DurationTestData("set statement_timeout=10", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = 10", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = 10 ", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout='10ms'", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = '10ms'", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = '10ms' ", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout='10ns'", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout = '10ns'", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout = '10ns' ", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout='10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set statement_timeout = '10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set statement_timeout = '10us' ", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData("set statement_timeout='10s'", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout = '10s'", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout = '10s' ", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout=" + resetValue, Duration.ZERO), + new DurationTestData("set statement_timeout = " + resetValue, Duration.ZERO), + new DurationTestData("set statement_timeout = " + resetValue + " ", Duration.ZERO), + }) { + ConnectionStatementExecutor executor = mock(ConnectionStatementExecutor.class); + when(executor.getDialect()).thenReturn(dialect); + ParsedStatement statement = parser.parse(Statement.of(data.sql)); + assertEquals( + ClientSideStatementType.SET_STATEMENT_TIMEOUT, statement.getClientSideStatementType()); + statement.getClientSideStatement().execute(executor, statement); + verify(executor).statementSetStatementTimeout(data.expected); + } + } + + @Test + public void testSetMaxCommitDelay() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + for (DurationTestData data : + new DurationTestData[] { + new DurationTestData("set " + prefix + "max_commit_delay=10", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay = 10", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay = 10 ", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay='10ms'", Duration.ofMillis(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ms'", Duration.ofMillis(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ms' ", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay='10ns'", Duration.ofNanos(10)), + new DurationTestData("set " + prefix + "max_commit_delay = '10ns'", Duration.ofNanos(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ns' ", Duration.ofNanos(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay='10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10us' ", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData("set " + prefix + "max_commit_delay='10s'", Duration.ofSeconds(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10s'", Duration.ofSeconds(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10s' ", Duration.ofSeconds(10)), + new DurationTestData("set " + prefix + "max_commit_delay=null", Duration.ZERO), + new DurationTestData("set " + prefix + "max_commit_delay = null", Duration.ZERO), + new DurationTestData("set " + prefix + "max_commit_delay = null ", Duration.ZERO), + }) { + ConnectionStatementExecutor executor = mock(ConnectionStatementExecutor.class); + when(executor.getDialect()).thenReturn(dialect); + ParsedStatement statement = parser.parse(Statement.of(data.sql)); + assertEquals( + ClientSideStatementType.SET_MAX_COMMIT_DELAY, statement.getClientSideStatementType()); + statement.getClientSideStatement().execute(executor, statement); + verify(executor).statementSetMaxCommitDelay(data.expected); + } + } + + private static PrintWriter writer; + + /** Generates the test script file */ + static void generateTestScript(Dialect dialect) throws Exception { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + try { + openLog(dialect); + ClientSideStatements statements = ClientSideStatements.getInstance(dialect); + for (ClientSideStatementImpl statement : statements.getCompiledStatements()) { + generateTestStatements(parser, statement); + } + } finally { + closeLog(); + } + } + + /** Writes the prerequisite statements + the given sql statement to a script file */ + private static void log(List pre, String sql) { + writeLog("NEW_CONNECTION"); + for (String prerequisite : pre) { + writeLog(prerequisite); + } + writeLog(sql); + } + + /** + * Writes the prerequisite statements + the given sql statement to a script file preceded by + * an @EXPECT EXCEPTION error statement + */ + private static void log(List pre, String statement, ErrorCode error) { + log(pre, "@EXPECT EXCEPTION " + error.name() + "\n" + statement); + } + + /** Writes the actual statement to the script file */ + private static void writeLog(String statement) { + writer.println(statement + ";"); + } + + private static void openLog(Dialect dialect) { + try { + writer = + new PrintWriter( + new OutputStreamWriter( + new FileOutputStream( + "src/test/resources/com/google/cloud/spanner/connection/" + + getScriptFile(dialect), + false), + StandardCharsets.UTF_8), + true); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @AfterClass + public static void closeLog() { + if (writer != null) { + writer.close(); + } + } + + /** Generates test statements for all {@link ClientSideStatement}s */ + private static void generateTestStatements( + AbstractStatementParser parser, ClientSideStatementImpl statement) { + for (String sql : statement.getExampleStatements()) { + log(statement.getExamplePrerequisiteStatements(), sql); + if (statement.getStatementType() != ClientSideStatementType.RUN_PARTITION + && statement.getStatementType() != ClientSideStatementType.SET_DIRECTED_READ) { + // Partition ids are case-sensitive. + // DirectedReadOptions are case-sensitive. + log(statement.getExamplePrerequisiteStatements(), upper(sql)); + log(statement.getExamplePrerequisiteStatements(), lower(sql)); + } + log(statement.getExamplePrerequisiteStatements(), withLeadingSpaces(sql)); + log(statement.getExamplePrerequisiteStatements(), withLeadingTabs(sql)); + log(statement.getExamplePrerequisiteStatements(), withLeadingLinefeeds(sql)); + log(statement.getExamplePrerequisiteStatements(), withTrailingSpaces(sql)); + log(statement.getExamplePrerequisiteStatements(), withTrailingTabs(sql)); + log(statement.getExamplePrerequisiteStatements(), withTrailingLinefeeds(sql)); + log(statement.getExamplePrerequisiteStatements(), withSpaces(sql)); + log(statement.getExamplePrerequisiteStatements(), withTabs(sql)); + log(statement.getExamplePrerequisiteStatements(), withLinefeeds(sql)); + + log( + statement.getExamplePrerequisiteStatements(), + withInvalidPrefix(sql), + ErrorCode.INVALID_ARGUMENT); + + boolean anySuffixAllowed = + statement.getStatementType() == ClientSideStatementType.PARTITION + || statement.getStatementType() == ClientSideStatementType.RUN_PARTITIONED_QUERY; + if (anySuffixAllowed) { + log(statement.getExamplePrerequisiteStatements(), withInvalidSuffix(sql)); + } else { + log( + statement.getExamplePrerequisiteStatements(), + withInvalidSuffix(sql), + parser.parse(Statement.of(withInvalidSuffix(sql))).isQuery() + ? ErrorCode.UNIMPLEMENTED + : ErrorCode.INVALID_ARGUMENT); + } + + final String[] replacements = { + "%", "_", "&", "$", "@", "!", "*", "(", ")", "-", "+", "-#", "/", "\\", "?", "-/", "/#", + "/-" + }; + for (String replacement : replacements) { + log( + statement.getExamplePrerequisiteStatements(), + withPrefix(replacement, sql), + ErrorCode.INVALID_ARGUMENT); + if (anySuffixAllowed) { + log(statement.getExamplePrerequisiteStatements(), withSuffix(replacement, sql)); + } else { + log( + statement.getExamplePrerequisiteStatements(), + withSuffix(replacement, sql), + parser.parse(Statement.of(withSuffix(replacement, sql))).isQuery() + ? ErrorCode.UNIMPLEMENTED + : ErrorCode.INVALID_ARGUMENT); + log( + statement.getExamplePrerequisiteStatements(), + replaceLastSpaceWith(replacement, sql), + parser.parse(Statement.of(replaceLastSpaceWith(replacement, sql))).isQuery() + ? ErrorCode.UNIMPLEMENTED + : ErrorCode.INVALID_ARGUMENT); + } + } + } + } + + private static String upper(String statement) { + return statement.toUpperCase(); + } + + private static String lower(String statement) { + return statement.toLowerCase(); + } + + private static String withLeadingSpaces(String statement) { + return " " + statement; + } + + private static String withLeadingTabs(String statement) { + return "\t\t\t" + statement; + } + + private static String withLeadingLinefeeds(String statement) { + return "\n\n\n" + statement; + } + + private static String withTrailingSpaces(String statement) { + return statement + " "; + } + + private static String withTrailingTabs(String statement) { + return statement + "\t\t"; + } + + private static String withTrailingLinefeeds(String statement) { + return statement + "\n\n"; + } + + private static String withSpaces(String statement) { + return statement.replaceAll(" ", " "); + } + + private static String withTabs(String statement) { + return statement.replaceAll(" ", "\t"); + } + + private static String withLinefeeds(String statement) { + // Do not replace spaces inside quotes + Matcher matcher = Pattern.compile("(.*)('.*')").matcher(statement); + if (matcher.matches()) { + return matcher.group(1).replaceAll(" ", "\n") + matcher.group(2); + } + return statement.replaceAll(" ", "\n"); + } + + private static String withInvalidPrefix(String statement) { + return "foo " + statement; + } + + private static String withInvalidSuffix(String statement) { + return statement + " bar"; + } + + private static String withPrefix(String prefix, String statement) { + return prefix + statement; + } + + private static String withSuffix(String suffix, String statement) { + return statement + suffix; + } + + private static String replaceLastSpaceWith(String replacement, String statement) { + if (statement.lastIndexOf(' ') > -1) { + return statement.substring(0, statement.lastIndexOf(' ')) + + replacement + + statement.substring(statement.lastIndexOf(' ') + 1); + } + return statement + replacement; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiAbortedTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiAbortedTest.java new file mode 100644 index 000000000000..1f4378ecd86b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiAbortedTest.java @@ -0,0 +1,676 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ConnectionOptions.Builder; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; +import com.google.common.collect.Collections2; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** Tests retry handling of read/write transactions using the Async Connection API. */ +public class ConnectionAsyncApiAbortedTest extends AbstractMockServerTest { + private static final class QueryResult { + final ApiFuture finished; + final AtomicInteger rowCount; + + QueryResult(ApiFuture finished, AtomicInteger rowCount) { + this.finished = finished; + this.rowCount = rowCount; + } + } + + private static final class RetryCounter implements TransactionRetryListener { + final CountDownLatch latch; + int retryCount = 0; + + RetryCounter() { + this(0); + } + + RetryCounter(int countDown) { + latch = new CountDownLatch(countDown); + } + + @Override + public void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt) { + retryCount++; + latch.countDown(); + } + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) {} + } + + private static final ExecutorService singleThreadedExecutor = Executors.newSingleThreadExecutor(); + private static final ExecutorService multiThreadedExecutor = Executors.newFixedThreadPool(8); + public static final int RANDOM_RESULT_SET_ROW_COUNT_2 = 50; + public static final Statement SELECT_RANDOM_STATEMENT_2 = Statement.of("SELECT * FROM RANDOM2"); + public static final com.google.spanner.v1.ResultSet RANDOM_RESULT_SET_2 = + new RandomResultSetGenerator(RANDOM_RESULT_SET_ROW_COUNT_2).generate(); + + @BeforeClass + public static void setupAdditionalResults() { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_RANDOM_STATEMENT_2, RANDOM_RESULT_SET_2)); + } + + @AfterClass + public static void stopExecutor() { + singleThreadedExecutor.shutdown(); + multiThreadedExecutor.shutdown(); + } + + @Before + public void setup() { + try (Connection connection = createConnection()) { + connection.getDialect(); + } + } + + @After + public void reset() { + mockSpanner.removeAllExecutionTimes(); + } + + ITConnection createConnection(TransactionRetryListener listener) { + ITConnection connection = + super.createConnection(ImmutableList.of(), ImmutableList.of(listener)); + connection.setAutocommit(false); + return connection; + } + + @Override + protected Builder configureConnectionOptions(Builder builder) { + return builder.setStatementExecutorType(StatementExecutorType.PLATFORM_THREAD); + } + + @Test + public void testSingleQueryAborted() { + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + QueryResult res = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT); + + assertThat(get(res.finished)).isNull(); + assertThat(res.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testTwoQueriesSecondAborted() { + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + QueryResult res1 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + QueryResult res2 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT_2); + + assertThat(get(res1.finished)).isNull(); + assertThat(res1.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(get(res2.finished)).isNull(); + assertThat(res2.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT_2); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testTwoQueriesBothAborted() throws InterruptedException { + RetryCounter counter = new RetryCounter(1); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + QueryResult res1 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT); + // Wait until the first query aborted. + assertThat(counter.latch.await(10L, TimeUnit.SECONDS)).isTrue(); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + QueryResult res2 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT_2); + + assertThat(get(res1.finished)).isNull(); + assertThat(res1.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(get(res2.finished)).isNull(); + assertThat(res2.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT_2); + assertThat(counter.retryCount).isEqualTo(2); + } + } + + @Test + public void testSingleQueryAbortedMidway() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + RANDOM_RESULT_SET_ROW_COUNT / 2)); + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + QueryResult res = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT); + + assertThat(get(res.finished)).isNull(); + assertThat(res.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testTwoQueriesSecondAbortedMidway() { + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + QueryResult res1 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + RANDOM_RESULT_SET_ROW_COUNT_2 / 2)); + QueryResult res2 = executeQueryAsync(connection, SELECT_RANDOM_STATEMENT_2); + + assertThat(get(res1.finished)).isNull(); + assertThat(res1.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(get(res2.finished)).isNull(); + assertThat(res2.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT_2); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testTwoQueriesOneAbortedMidway() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + Math.min(RANDOM_RESULT_SET_ROW_COUNT / 2, RANDOM_RESULT_SET_ROW_COUNT_2 / 2))); + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + // These AsyncResultSets will be consumed in parallel. One of them will (at random) abort + // halfway. + QueryResult res1 = + executeQueryAsync(connection, SELECT_RANDOM_STATEMENT, multiThreadedExecutor); + QueryResult res2 = + executeQueryAsync(connection, SELECT_RANDOM_STATEMENT_2, multiThreadedExecutor); + + assertThat(get(res1.finished)).isNull(); + assertThat(res1.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(get(res2.finished)).isNull(); + assertThat(res2.rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT_2); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testUpdateAndQueryAbortedMidway() throws InterruptedException { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + RANDOM_RESULT_SET_ROW_COUNT / 2)); + final RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + final SettableApiFuture rowCount = SettableApiFuture.create(); + final CountDownLatch updateLatch = new CountDownLatch(1); + final CountDownLatch queryLatch = new CountDownLatch(1); + ApiFuture finished; + try (AsyncResultSet rs = + connection.executeQueryAsync( + SELECT_RANDOM_STATEMENT, Options.bufferRows(RANDOM_RESULT_SET_ROW_COUNT / 2 - 1))) { + finished = + rs.setCallback( + singleThreadedExecutor, + new ReadyCallback() { + long count; + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + // Indicate that the query has been executed. + queryLatch.countDown(); + try { + // Wait until the update is on its way. + updateLatch.await(10L, TimeUnit.SECONDS); + while (true) { + switch (resultSet.tryNext()) { + case OK: + count++; + break; + case DONE: + rowCount.set(count); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + }); + } + // Wait until the query has actually executed. + queryLatch.await(10L, TimeUnit.SECONDS); + ApiFuture updateCount = connection.executeUpdateAsync(INSERT_STATEMENT); + updateCount.addListener(updateLatch::countDown, MoreExecutors.directExecutor()); + + // We should not commit before the AsyncResultSet has finished. + assertThat(get(finished)).isNull(); + ApiFuture commit = connection.commitAsync(); + + assertThat(get(rowCount)).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertThat(get(updateCount)).isEqualTo(UPDATE_COUNT); + assertThat(get(commit)).isNull(); + assertThat(counter.retryCount).isEqualTo(1); + + // Verify the order of the statements on the server. + List requests = + Lists.newArrayList( + Collections2.filter( + mockSpanner.getRequests(), input -> input instanceof ExecuteSqlRequest)); + // The entire transaction should be retried. + assertThat(requests).hasSize(4); + assertThat(((ExecuteSqlRequest) requests.get(0)).getSeqno()).isEqualTo(1L); + assertThat(((ExecuteSqlRequest) requests.get(0)).getSql()) + .isEqualTo(SELECT_RANDOM_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(1)).getSeqno()).isEqualTo(2L); + assertThat(((ExecuteSqlRequest) requests.get(1)).getSql()) + .isEqualTo(INSERT_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(2)).getSeqno()).isEqualTo(1L); + assertThat(((ExecuteSqlRequest) requests.get(2)).getSql()) + .isEqualTo(SELECT_RANDOM_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(3)).getSeqno()).isEqualTo(2L); + assertThat(((ExecuteSqlRequest) requests.get(3)).getSql()) + .isEqualTo(INSERT_STATEMENT.getSql()); + } + } + + @Test + public void testUpdateAndQueryAbortedMidway_UpdateCountChanged() throws InterruptedException { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + RANDOM_RESULT_SET_ROW_COUNT / 2)); + final RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + assertThat(counter.retryCount).isEqualTo(0); + final CountDownLatch updateLatch = new CountDownLatch(1); + final CountDownLatch queryLatch = new CountDownLatch(1); + ApiFuture finished; + try (AsyncResultSet rs = + connection.executeQueryAsync( + SELECT_RANDOM_STATEMENT, Options.bufferRows(RANDOM_RESULT_SET_ROW_COUNT / 2 - 1))) { + finished = + rs.setCallback( + singleThreadedExecutor, + resultSet -> { + // Indicate that the query has been executed. + queryLatch.countDown(); + try { + // Wait until the update is on its way. + updateLatch.await(10L, TimeUnit.SECONDS); + while (true) { + switch (resultSet.tryNext()) { + case OK: + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }); + } + // Wait until the query has actually executed. + queryLatch.await(10L, TimeUnit.SECONDS); + // Execute an update statement and wait until it has finished before allowing the + // AsyncResultSet to continue processing. Also change the result of the update statement after + // it has finished. The AsyncResultSet will see an aborted transaction halfway, and then + // during the retry, it will get a different result for this update statement. That will cause + // the retry to be aborted. + get(connection.executeUpdateAsync(INSERT_STATEMENT)); + try { + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT + 1)); + updateLatch.countDown(); + get(finished); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } finally { + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, UPDATE_COUNT)); + } + + // Verify the order of the statements on the server. + List requests = + Lists.newArrayList( + Collections2.filter( + mockSpanner.getRequests(), input -> input instanceof ExecuteSqlRequest)); + // The entire transaction should be retried, but will not succeed as the result of the update + // statement was different during the retry. + assertThat(requests).hasSize(4); + assertThat(((ExecuteSqlRequest) requests.get(0)).getSeqno()).isEqualTo(1L); + assertThat(((ExecuteSqlRequest) requests.get(0)).getSql()) + .isEqualTo(SELECT_RANDOM_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(1)).getSeqno()).isEqualTo(2L); + assertThat(((ExecuteSqlRequest) requests.get(1)).getSql()) + .isEqualTo(INSERT_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(2)).getSeqno()).isEqualTo(1L); + assertThat(((ExecuteSqlRequest) requests.get(2)).getSql()) + .isEqualTo(SELECT_RANDOM_STATEMENT.getSql()); + assertThat(((ExecuteSqlRequest) requests.get(3)).getSeqno()).isEqualTo(2L); + assertThat(((ExecuteSqlRequest) requests.get(3)).getSql()) + .isEqualTo(INSERT_STATEMENT.getSql()); + } + } + + @Test + public void testQueriesAbortedMidway_ResultsChanged() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")), + RANDOM_RESULT_SET_ROW_COUNT - 1)); + final Statement statement = Statement.of("SELECT * FROM TEST_TABLE"); + final RandomResultSetGenerator generator = + new RandomResultSetGenerator(RANDOM_RESULT_SET_ROW_COUNT - 10); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + final CountDownLatch latch = new CountDownLatch(1); + final RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + ApiFuture res1; + try (AsyncResultSet rs = + connection.executeQueryAsync(SELECT_RANDOM_STATEMENT, Options.bufferRows(5))) { + res1 = + rs.setCallback( + multiThreadedExecutor, + resultSet -> { + try { + latch.await(10L, TimeUnit.SECONDS); + while (true) { + switch (resultSet.tryNext()) { + case OK: + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (Throwable t) { + throw SpannerExceptionFactory.asSpannerException(t); + } + }); + } + try (AsyncResultSet rs = connection.executeQueryAsync(statement, Options.bufferRows(5))) { + rs.setCallback( + multiThreadedExecutor, + new ReadyCallback() { + boolean replaced; + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + if (!replaced) { + // Replace the result of the query on the server after the first execution. + mockSpanner.putStatementResult( + StatementResult.query(statement, generator.generate())); + replaced = true; + } + while (true) { + switch (resultSet.tryNext()) { + case OK: + break; + case DONE: + latch.countDown(); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } + }); + } + try { + get(res1); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } + } + } + + @Test + public void testBlindUpdateAborted() { + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + mockSpanner.abortNextStatement(); + ApiFuture updateCount = connection.executeUpdateAsync(INSERT_STATEMENT); + get(connection.commitAsync()); + + assertThat(get(updateCount)).isEqualTo(UPDATE_COUNT); + assertThat(counter.retryCount).isEqualTo(1); + } + } + + @Test + public void testBlindUpdateAborted_WithConcurrentModification() { + Statement update1 = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=100"); + mockSpanner.putStatementResult(StatementResult.update(update1, 100)); + + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + // Execute an update statement and then change the result for the next time it is executed. + get(connection.executeUpdateAsync(update1)); + mockSpanner.putStatementResult(StatementResult.update(update1, 200)); + + // Abort on the next statement. The retry should now fail because of the changed result of the + // first update. + mockSpanner.abortNextStatement(); + connection.executeUpdateAsync(INSERT_STATEMENT); + + try { + get(connection.commitAsync()); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } + } + } + + @Test + public void testMultipleBlindUpdatesAborted_WithConcurrentModification() { + Statement update1 = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=100"); + mockSpanner.putStatementResult(StatementResult.update(update1, 100)); + + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + // Execute an update statement and then change the result for the next time it is executed. + get(connection.executeUpdateAsync(update1)); + mockSpanner.putStatementResult(StatementResult.update(update1, 200)); + + // Abort the transaction on the next statement. The retry should now fail because of the + // changed result of the first update. + mockSpanner.abortNextStatement(); + + // Continue to (try to) execute blind updates. This should not cause any exceptions, although + // all of the returned futures will fail. + List> futures = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + futures.add(connection.executeUpdateAsync(INSERT_STATEMENT)); + } + + for (ApiFuture fut : futures) { + try { + get(fut); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } + } + } + } + + @Test + public void testBlindUpdateAborted_ThenAsyncQuery_WithConcurrentModification() { + Statement update1 = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=100"); + mockSpanner.putStatementResult(StatementResult.update(update1, 100)); + + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + // Execute an update statement and then change the result for the next time it is executed. + get(connection.executeUpdateAsync(update1)); + mockSpanner.putStatementResult(StatementResult.update(update1, 200)); + + // Abort on the next statement. The retry should now fail because of the changed result of the + // first update. + mockSpanner.abortNextStatement(); + connection.executeUpdateAsync(INSERT_STATEMENT); + + // Try to execute an async query. The callback should also receive the + // AbortedDueToConcurrentModificationException. + try (AsyncResultSet rs = connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + ApiFuture fut = + rs.setCallback( + singleThreadedExecutor, + resultSet -> { + // The following line should throw AbortedDueToConcurrentModificationException. + resultSet.tryNext(); + return CallbackResponse.DONE; + }); + try { + assertThat(get(fut)).isNull(); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } + } + + // Ensure that a rollback and then a new statement does succeed. + connection.rollbackAsync(); + try (AsyncResultSet rs = connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + ApiFuture fut = + rs.setCallback( + singleThreadedExecutor, + resultSet -> { + resultSet.tryNext(); + return CallbackResponse.DONE; + }); + assertThat(get(fut)).isNull(); + } + get(connection.commitAsync()); + } + } + + @Test + public void testBlindUpdateAborted_SelectResults() { + final Statement update1 = Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=100"); + mockSpanner.putStatementResult(StatementResult.update(update1, 100)); + + RetryCounter counter = new RetryCounter(); + try (Connection connection = createConnection(counter)) { + // Execute an update statement and then change the result for the next time it is executed. + connection.executeUpdate(update1); + // Abort on the next statement. The retry should now fail because of the changed result of the + // first update. + mockSpanner.abortNextStatement(); + mockSpanner.putStatementResult(StatementResult.update(update1, 200)); + connection.executeUpdateAsync(INSERT_STATEMENT); + ApiFuture commit = connection.commitAsync(); + + try (AsyncResultSet rs = connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + while (rs.next()) {} + } + get(connection.commitAsync()); + + try { + get(commit); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertThat(counter.retryCount).isEqualTo(1); + } + } + } + + private QueryResult executeQueryAsync(Connection connection, Statement statement) { + return executeQueryAsync(connection, statement, singleThreadedExecutor); + } + + private QueryResult executeQueryAsync( + Connection connection, Statement statement, Executor executor) { + ApiFuture res; + final AtomicInteger rowCount = new AtomicInteger(); + try (AsyncResultSet rs = connection.executeQueryAsync(statement, Options.bufferRows(5))) { + res = + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + }); + return new QueryResult(res, rowCount); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiTest.java new file mode 100644 index 000000000000..a2b176742e17 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionAsyncApiTest.java @@ -0,0 +1,886 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ForceCloseSpannerFunction; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerApiFutures; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ConnectionOptions.Builder; +import com.google.cloud.spanner.connection.SpannerPool.CheckAndCloseSpannersMode; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.common.base.Function; +import com.google.common.collect.Collections2; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.protobuf.AbstractMessage; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionAsyncApiTest extends AbstractMockServerTest { + private static ExecutorService executor = Executors.newSingleThreadExecutor(); + private static final Function AUTOCOMMIT = + input -> { + input.setAutocommit(true); + return null; + }; + private static final Function READ_ONLY = + input -> { + input.setReadOnly(true); + return null; + }; + private static final Function READ_WRITE = input -> null; + + @AfterClass + public static void stopExecutor() { + executor.shutdown(); + } + + @Before + public void setup() { + try (Connection connection = createConnection()) { + connection.getDialect(); + } + } + + @Override + protected Builder configureConnectionOptions(Builder builder) { + return builder.setStatementExecutorType(StatementExecutorType.PLATFORM_THREAD); + } + + @After + public void reset() { + mockSpanner.removeAllExecutionTimes(); + executor.shutdownNow(); + executor = Executors.newSingleThreadExecutor(); + } + + @Test + public void testExecuteQueryAsyncAutocommit() { + testExecuteQueryAsync(AUTOCOMMIT); + } + + @Test + public void testExecuteQueryAsyncAutocommitIsNonBlocking() { + testExecuteQueryAsyncIsNonBlocking(AUTOCOMMIT); + } + + @Test + public void testExecuteQueryAsStatementAsyncAutocommit() { + testExecuteQueryAsync(AUTOCOMMIT, true); + } + + @Test + public void testExecuteQueryAutocommit() { + testExecuteQuery(AUTOCOMMIT); + } + + @Test + public void testExecuteUpdateAsyncAutocommit() { + testExecuteUpdateAsync(AUTOCOMMIT); + } + + @Test + public void testExecuteUpdateAsyncAutocommitIsNonBlocking() { + testExecuteUpdateAsyncIsNonBlocking(AUTOCOMMIT); + } + + @Test + public void testExecuteUpdateAsStatementAsyncAutocommit() { + testExecuteUpdateAsync(AUTOCOMMIT, true); + } + + @Test + public void testExecuteUpdateAutocommit() { + testExecuteUpdate(AUTOCOMMIT); + } + + @Test + public void testExecuteBatchUpdateAsyncAutocommit() { + testExecuteBatchUpdateAsync(AUTOCOMMIT); + } + + @Test + public void testExecuteBatchUpdateAsyncAutocommitIsNonBlocking() { + testExecuteBatchUpdateAsyncIsNonBlocking(AUTOCOMMIT); + } + + @Test + public void testExecuteBatchUpdateAutocommit() { + testExecuteBatchUpdate(AUTOCOMMIT); + } + + @Test + public void testWriteAsyncAutocommit() { + testWriteAsync(AUTOCOMMIT); + } + + @Test + public void testWriteAutocommit() { + testWrite(AUTOCOMMIT); + } + + @Test + public void testExecuteQueryAsyncReadOnly() { + testExecuteQueryAsync(READ_ONLY); + } + + @Test + public void testExecuteQueryAsyncReadOnlyIsNonBlocking() { + testExecuteQueryAsyncIsNonBlocking(READ_ONLY); + } + + @Test + public void testExecuteQueryAsStatementAsyncReadOnly() { + testExecuteQueryAsync(READ_ONLY, true); + } + + @Test + public void testExecuteQueryReadOnly() { + testExecuteQuery(READ_ONLY); + } + + @Test + public void testExecuteQueryAsyncReadWrite() { + testExecuteQueryAsync(READ_WRITE); + } + + @Test + public void testExecuteQueryAsyncReadWriteIsNonBlocking() { + testExecuteQueryAsyncIsNonBlocking(READ_WRITE); + } + + @Test + public void testExecuteQueryAsStatementAsyncReadWrite() { + testExecuteQueryAsync(READ_WRITE, true); + } + + @Test + public void testExecuteQueryReadWrite() { + testExecuteQuery(READ_WRITE); + } + + @Test + public void testExecuteUpdateAsyncReadWrite() { + testExecuteUpdateAsync(READ_WRITE); + } + + @Test + public void testExecuteUpdateAsyncReadWriteIsNonBlocking() { + testExecuteUpdateAsyncIsNonBlocking(READ_WRITE); + } + + @Test + public void testExecuteUpdateAsStatementAsyncReadWrite() { + testExecuteUpdateAsync(READ_WRITE, true); + } + + @Test + public void testExecuteUpdateReadWrite() { + testExecuteUpdate(READ_WRITE); + } + + @Test + public void testExecuteBatchUpdateAsyncReadWrite() { + testExecuteBatchUpdateAsync(READ_WRITE); + } + + @Test + public void testExecuteBatchUpdateAsyncReadWriteIsNonBlocking() { + testExecuteBatchUpdateAsyncIsNonBlocking(READ_WRITE); + } + + @Test + public void testExecuteBatchUpdateReadWrite() { + testExecuteBatchUpdate(READ_WRITE); + } + + @Test + public void testBufferedWriteReadWrite() { + testBufferedWrite(READ_WRITE); + } + + @Test + public void testReadWriteMultipleAsyncStatements() { + try (Connection connection = createConnection()) { + assertThat(connection.isAutocommit()).isFalse(); + ApiFuture update1 = connection.executeUpdateAsync(INSERT_STATEMENT); + ApiFuture update2 = connection.executeUpdateAsync(INSERT_STATEMENT); + ApiFuture batch = + connection.executeBatchUpdateAsync(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + final SettableApiFuture rowCount = SettableApiFuture.create(); + try (AsyncResultSet rs = connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + rs.setCallback( + executor, + new ReadyCallback() { + int count = 0; + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + rowCount.set(count); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count++; + } + } + } catch (SpannerException e) { + rowCount.setException(e); + return CallbackResponse.DONE; + } + } + }); + } + ApiFuture commit = connection.commitAsync(); + assertThat(get(update1)).isEqualTo(UPDATE_COUNT); + assertThat(get(update2)).isEqualTo(UPDATE_COUNT); + assertThat(get(batch)).asList().containsExactly(1L, 1L); + assertThat(get(rowCount)).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + assertNull(get(commit)); + + // Get the last commit request. + CommitRequest commitRequest = + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .reduce((first, second) -> second) + .get(); + // Verify the order of the statements on the server. + List requests = + Lists.newArrayList( + Collections2.filter( + mockSpanner.getRequests(), + input -> + (input instanceof ExecuteSqlRequest + && ((ExecuteSqlRequest) input) + .getSession() + .equals(commitRequest.getSession())) + || (input instanceof ExecuteBatchDmlRequest + && ((ExecuteBatchDmlRequest) input) + .getSession() + .equals(commitRequest.getSession())))); + assertThat(requests).hasSize(4); + assertThat(requests.get(0)).isInstanceOf(ExecuteSqlRequest.class); + assertThat(((ExecuteSqlRequest) requests.get(0)).getSeqno()).isEqualTo(1L); + assertThat(requests.get(1)).isInstanceOf(ExecuteSqlRequest.class); + assertThat(((ExecuteSqlRequest) requests.get(1)).getSeqno()).isEqualTo(2L); + assertThat(requests.get(2)).isInstanceOf(ExecuteBatchDmlRequest.class); + assertThat(((ExecuteBatchDmlRequest) requests.get(2)).getSeqno()).isEqualTo(3L); + assertThat(requests.get(3)).isInstanceOf(ExecuteSqlRequest.class); + assertThat(((ExecuteSqlRequest) requests.get(3)).getSeqno()).isEqualTo(4L); + } + } + + @Test + public void testAutocommitRunBatch() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.execute(Statement.of("START BATCH DML")); + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + StatementResult res = connection.execute(Statement.of("RUN BATCH")); + assertThat(res.getResultType()).isEqualTo(ResultType.RESULT_SET); + try (ResultSet rs = res.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLongList(0)).containsExactly(1L, 1L); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testDmlBatchUpdateCount() { + Arrays.asList(Dialect.POSTGRESQL, Dialect.GOOGLE_STANDARD_SQL) + .forEach( + dialect -> { + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + SpannerPool.closeSpannerPool(); + try { + try (Connection connection = createConnection()) { + connection.execute( + Statement.of("set local " + prefix + "batch_dml_update_count = 1")); + connection.execute(Statement.of("START BATCH DML")); + List statements = Arrays.asList(INSERT_STATEMENT, INSERT_STATEMENT); + long[] updateCounts = connection.executeBatchUpdate(statements); + assertThat(updateCounts).asList().containsExactly(1L, 1L); + connection.execute(Statement.of("RUN BATCH")); + connection.commit(); + + connection.execute(Statement.of("START BATCH DML")); + statements = Arrays.asList(INSERT_STATEMENT, INSERT_STATEMENT); + updateCounts = connection.executeBatchUpdate(statements); + assertThat(updateCounts).asList().containsExactly(-1L, -1L); + connection.execute(Statement.of("RUN BATCH")); + connection.commit(); + } + } finally { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult( + Dialect.GOOGLE_STANDARD_SQL)); + } + }); + } + + @Test + public void testAutocommitRunBatchAsync() { + try (Connection connection = createConnection()) { + connection.executeAsync(Statement.of("SET AUTOCOMMIT = TRUE")); + connection.executeAsync(Statement.of("START BATCH DML")); + connection.executeAsync(INSERT_STATEMENT); + connection.executeAsync(INSERT_STATEMENT); + ApiFuture res = connection.runBatchAsync(); + assertThat(get(res)).asList().containsExactly(1L, 1L); + } + } + + @Test + public void testExecuteDdlAsync() { + try (Connection connection = createConnection()) { + connection.executeAsync(Statement.of("SET AUTOCOMMIT = TRUE")); + connection.executeAsync(Statement.of("START BATCH DDL")); + connection.executeAsync(Statement.of("CREATE TABLE FOO (ID INT64) PRIMARY KEY (ID)")); + connection.executeAsync(Statement.of("ABORT BATCH")); + } + } + + @Test + public void testExecuteInvalidStatementAsync() { + try (Connection connection = createConnection()) { + SpannerException e = + assertThrows( + SpannerException.class, + () -> + connection.executeAsync( + Statement.of("UPSERT INTO FOO (ID, VAL) VALUES (1, 'foo')"))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + + @Test + public void testExecuteClientSideQueryAsync() { + try (Connection connection = createConnection()) { + connection.executeAsync(Statement.of("SET AUTOCOMMIT = TRUE")); + final SettableApiFuture autocommit = SettableApiFuture.create(); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SHOW VARIABLE AUTOCOMMIT"))) { + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + autocommit.set(resultSet.getBoolean("AUTOCOMMIT")); + } + } + }); + } + assertThat(get(autocommit)).isTrue(); + } + } + + @Test + public void testExecuteInvalidQueryAsync() { + try (Connection connection = createConnection()) { + try { + connection.executeQueryAsync(INSERT_STATEMENT); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + } + + @Test + public void testExecuteInvalidUpdateAsync() { + try (Connection connection = createConnection()) { + try { + connection.executeUpdateAsync(SELECT_RANDOM_STATEMENT); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + } + + @Test + public void testExecuteInvalidBatchUpdateAsync() { + try (Connection connection = createConnection()) { + try { + connection.executeBatchUpdateAsync( + ImmutableList.of(INSERT_STATEMENT, SELECT_RANDOM_STATEMENT)); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + } + + @Test + public void testRunEmptyBatchAsync() { + try (Connection connection = createConnection()) { + connection.startBatchDml(); + assertThat(get(connection.runBatchAsync())).isEqualTo(new long[0]); + } + } + + private void testExecuteQueryAsync(Function connectionConfigurator) { + testExecuteQueryAsync(connectionConfigurator, false); + } + + private void testExecuteQueryAsync( + Function connectionConfigurator, boolean executeAsStatement) { + ApiFuture res; + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + final AtomicInteger rowCount = new AtomicInteger(); + final AtomicBoolean receivedTimeout = new AtomicBoolean(); + if (timeout) { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try (AsyncResultSet rs = + executeAsStatement + ? connection.executeAsync(SELECT_RANDOM_STATEMENT).getResultSetAsync() + : connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + res = + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + } catch (SpannerException e) { + receivedTimeout.set(e.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED); + throw e; + } + }); + } + try { + SpannerApiFutures.get(res); + assertThat(rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + if (connection.isReadOnly() || !connection.isInTransaction()) { + assertThat(connection.getReadTimestamp()).isNotNull(); + } + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(e.getSuppressed()).hasLength(1); + assertThat(e.getSuppressed()[0].getMessage()).contains(SELECT_RANDOM_STATEMENT.getSql()); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + assertThat(timeout).isTrue(); + assertThat(receivedTimeout.get()).isTrue(); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + } + + private void testExecuteQuery(Function connectionConfigurator) { + long rowCount = 0L; + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (rs.next()) { + rowCount++; + } + assertThat(rowCount).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + if (connection.isReadOnly() || !connection.isInTransaction()) { + assertThat(connection.getReadTimestamp()).isNotNull(); + } + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + } + + private void testExecuteUpdateAsync(Function connectionConfigurator) { + testExecuteUpdateAsync(connectionConfigurator, false); + } + + private void testExecuteUpdateAsync( + Function connectionConfigurator, boolean executeAsStatement) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + ApiFuture updateCount = + executeAsStatement + ? connection.executeAsync(INSERT_STATEMENT).getUpdateCountAsync() + : connection.executeUpdateAsync(INSERT_STATEMENT); + try { + assertThat(get(updateCount)).isEqualTo(1L); + if (connection.isInTransaction()) { + connection.commitAsync(); + } + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + } + + private void testExecuteUpdate(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try { + long updateCount = connection.executeUpdate(INSERT_STATEMENT); + assertThat(updateCount).isEqualTo(1L); + if (connection.isInTransaction()) { + connection.commit(); + } + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + } + + private void testExecuteBatchUpdateAsync(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setExecuteBatchDmlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + ApiFuture updateCounts = + connection.executeBatchUpdateAsync( + ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + try { + assertThat(get(updateCounts)).asList().containsExactly(1L, 1L); + if (connection.isInTransaction()) { + connection.commitAsync(); + } + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + } + + private void testExecuteBatchUpdate(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setExecuteBatchDmlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try { + long[] updateCounts = + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + assertThat(updateCounts).asList().containsExactly(1L, 1L); + if (connection.isInTransaction()) { + connection.commit(); + } + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + assertThat(timeout).isTrue(); + // Start a new transaction if a timeout occurred on a read/write transaction, as that will + // invalidate that transaction. + if (!connection.isReadOnly() && connection.isInTransaction()) { + connection.clearStatementTimeout(); + connection.rollback(); + } + } + } + } + // Close the Spanner pool to prevent requests from this test from interfering with other tests. + SpannerPool.INSTANCE.checkAndCloseSpanners( + CheckAndCloseSpannersMode.ERROR, + new ForceCloseSpannerFunction(100L, TimeUnit.MILLISECONDS)); + } + + private void testWriteAsync(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + ApiFuture fut = + connection.writeAsync( + ImmutableList.of( + Mutation.newInsertBuilder("foo").build(), + Mutation.newInsertBuilder("bar").build())); + try { + assertThat(get(fut)).isNull(); + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + } + } + } + } + + private void testWrite(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try { + connection.write( + ImmutableList.of( + Mutation.newInsertBuilder("foo").build(), + Mutation.newInsertBuilder("bar").build())); + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + } + } + } + } + + private void testBufferedWrite(Function connectionConfigurator) { + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + for (boolean timeout : new boolean[] {true, false}) { + if (timeout) { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(1000, 0)); + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + } else { + mockSpanner.removeAllExecutionTimes(); + connection.clearStatementTimeout(); + } + try { + connection.bufferedWrite( + ImmutableList.of( + Mutation.newInsertBuilder("foo").build(), + Mutation.newInsertBuilder("bar").build())); + connection.commitAsync(); + assertThat(connection.getCommitTimestamp()).isNotNull(); + assertThat(timeout).isFalse(); + } catch (SpannerException e) { + assertThat(timeout).isTrue(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.DEADLINE_EXCEEDED); + connection.clearStatementTimeout(); + connection.rollbackAsync(); + } + } + } + } + + private void testExecuteQueryAsyncIsNonBlocking( + Function connectionConfigurator) { + ApiFuture res; + final AtomicInteger rowCount = new AtomicInteger(); + mockSpanner.freeze(); + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + try (AsyncResultSet rs = connection.executeQueryAsync(SELECT_RANDOM_STATEMENT)) { + res = + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rowCount.incrementAndGet(); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + } + } + }); + mockSpanner.unfreeze(); + } + SpannerApiFutures.get(res); + assertThat(rowCount.get()).isEqualTo(RANDOM_RESULT_SET_ROW_COUNT); + } + } + + private void testExecuteUpdateAsyncIsNonBlocking( + Function connectionConfigurator) { + mockSpanner.freeze(); + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + ApiFuture updateCount = connection.executeUpdateAsync(INSERT_STATEMENT); + if (connection.isInTransaction()) { + connection.commitAsync(); + } + mockSpanner.unfreeze(); + assertThat(get(updateCount)).isEqualTo(1L); + assertThat(connection.getCommitTimestamp()).isNotNull(); + } + } + + private void testExecuteBatchUpdateAsyncIsNonBlocking( + Function connectionConfigurator) { + mockSpanner.freeze(); + try (Connection connection = createConnection()) { + connectionConfigurator.apply(connection); + ApiFuture updateCounts = + connection.executeBatchUpdateAsync(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + if (connection.isInTransaction()) { + connection.commitAsync(); + } + mockSpanner.unfreeze(); + assertThat(get(updateCounts)).asList().containsExactly(1L, 1L); + assertThat(connection.getCommitTimestamp()).isNotNull(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadOnlyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadOnlyTest.java new file mode 100644 index 000000000000..58e2c6f3e77e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadOnlyTest.java @@ -0,0 +1,938 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import java.util.concurrent.TimeUnit; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +/** + * The tests in this class do not need to be implemented for client libraries in other programming + * languages, as all test cases are covered by the file ConnectionImplGeneratedSqlScriptTest.sql + */ +@RunWith(Enclosed.class) +public class ConnectionImplAutocommitReadOnlyTest { + + public static class ConnectionImplAutocommitReadOnlyNoActionsTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query has been executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadOnlyAfterSelectTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + // no call to next() on ResultSet + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last statement was a query, next() has not yet been called, but as the connection api + // returns a directly executed resultset, the read timestamp is already available + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadOnlyAfterSelectAndResultSetNextTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)).next(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last statement was a query + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadOnlyAfterBeginTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // connection is in read-only mode + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // in a transaction, only exact allowed + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadOnlyAfterTemporaryTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + // readonly + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last action was a transaction that ended with a select query + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadOnlyAfterSetReadOnlyMaxStalenessTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + TimestampBound staleness = TimestampBound.ofMaxStaleness(10L, TimeUnit.SECONDS); + logWithNamespace( + "SET %sREAD_ONLY_STALENESS='" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "';"); + connection.setReadOnlyStaleness(staleness); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + // readonly + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadWriteTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadWriteTest.java new file mode 100644 index 000000000000..7edfe704c757 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplAutocommitReadWriteTest.java @@ -0,0 +1,1362 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +/** + * The tests in this class do not need to be implemented for client libraries in other programming + * languages, as all test cases are covered by the file ConnectionImplGeneratedSqlScriptTest.sql + */ +@RunWith(Enclosed.class) +public class ConnectionImplAutocommitReadWriteTest { + + public static class ConnectionImplAutocommitReadWriteNoActionsTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query has been executed + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterSelectTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + // no next() called + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last statement was a query, next() has not yet been called, but as the connection api + // returns a directly executed resultset, the read timestamp is already available + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterSelectAndResultSetNextTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + // the @expect ensures next() is called + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // the last action was a query that has retrieved data + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterUpdateTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log(UPDATE + ";"); + connection.execute(Statement.of(UPDATE)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return true; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterDdlTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log(DDL + ";"); + connection.execute(Statement.of(DDL)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // there is no transaction + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterBeginTransactionTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // in temporary transaction + return true; + } + + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + // default is a read-write transaction + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterTemporaryTransactionTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log(UPDATE + ";"); + connection.execute(Statement.of(UPDATE)); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return true; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return true; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return true; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterBeginReadOnlyTransactionTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log("SET TRANSACTION READ ONLY;"); + connection.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // in temporary transaction + return true; + } + + boolean isSetTransactionTagAllowed() { + // Transaction is read-only + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + // it's a read-only transaction + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplAutocommitReadWriteAfterStartDdlBatchTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=TRUE;"); + connection.setAutocommit(true); + log("START BATCH DDL;"); + connection.startBatchDdl(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return false; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + // it's a DDL batch + return type == StatementType.CLIENT_SIDE || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return true; + } + + @Override + boolean isAbortBatchAllowed() { + return true; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.java new file mode 100644 index 000000000000..b9cfac150802 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.java @@ -0,0 +1,122 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ConnectionImplAutocommitReadOnlyTest.ConnectionImplAutocommitReadOnlyNoActionsTest; +import com.google.common.collect.ImmutableSet; +import com.google.common.reflect.ClassPath; +import com.google.common.reflect.ClassPath.ClassInfo; +import java.io.IOException; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.List; +import org.junit.Test; +import org.junit.runner.JUnitCore; +import org.junit.runner.Result; + +/** + * This test executes a SQL script that has been generated from the log of all the subclasses of + * {@link AbstractConnectionImplTest} and covers the same test cases. Its aim is to verify that the + * connection reacts correctly in all possible states (i.e. DML statements should not be allowed + * when the connection is in read-only mode, or when a read-only transaction has started etc.) + * + *

    A new test script can be generated by running: + * mvn -Ddo_log_statements=true exec:java -Dexec.mainClass=com.google.cloud.spanner.connection.SqlTestScriptsGenerator -Dexec.classpathScope="test" + * It is only necessary to generate a new test script if the behavior of {@link + * com.google.cloud.spanner.connection.Connection} has changed (for example calling COMMIT is + * currently not allowed in AUTOCOMMIT mode, but this has changed to be a no-op). A new test script + * must also be generated if additional test cases have been added to {@link + * AbstractConnectionImplTest}. + */ +public class ConnectionImplGeneratedSqlScriptTest extends AbstractSqlScriptTest { + private String getFileName() { + switch (dialect) { + case POSTGRESQL: + return "postgresql/ConnectionImplGeneratedSqlScriptTest.sql"; + case GOOGLE_STANDARD_SQL: + default: + return "ConnectionImplGeneratedSqlScriptTest.sql"; + } + } + + @Test + public void testGeneratedScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new TestConnectionProvider(dialect)); + verifier.verifyStatementsInFile(getFileName(), getClass(), true); + } + + /** + * Generates the test SQL script. It should be noted that running this method multiple times + * without having changed anything in the underlying code, could still yield different script + * files, as the script is generated by running a number of JUnit test cases. The order in which + * these test cases are run is non-deterministic. That means that the generated sql script will + * still contain exactly the same test cases after each generation, but the order of the test + * cases in the script file is equal to the order in which the test cases were run the last time + * the script was generated. It is therefore also not recommended including this generation in an + * automatic build, but to generate the script only when there has been some fundamental change in + * the code. + * + *

    The sql test scripts can be generated by running + * mvn -Ddo_log_statements=true exec:java -Dexec.mainClass=com.google.cloud.spanner.connection.SqlTestScriptsGenerator -Dexec.classpathScope="test" + * + */ + static void generateTestScript() throws ClassNotFoundException, IOException { + // first make the current script file empty + AbstractConnectionImplTest test = new ConnectionImplAutocommitReadOnlyNoActionsTest(); + for (Dialect dialect : Dialect.values()) { + test.emptyScript(dialect); + } + JUnitCore junit = new JUnitCore(); + Class[] testClasses = getAbstractConnectionImplTestSubclasses(); + Result result = junit.run(testClasses); + if (!result.wasSuccessful()) { + throw new RuntimeException("Generating test script failed!"); + } + } + + private static Class[] getAbstractConnectionImplTestSubclasses() + throws IOException, ClassNotFoundException { + List> list = new ArrayList<>(); + ClassPath cp = ClassPath.from(ConnectionImplGeneratedSqlScriptTest.class.getClassLoader()); + ImmutableSet classes = + cp.getTopLevelClassesRecursive( + ConnectionImplGeneratedSqlScriptTest.class.getPackage().getName()); + for (ClassInfo c : classes) { + Class clazz = + ConnectionImplGeneratedSqlScriptTest.class.getClassLoader().loadClass(c.getName()); + addAbstractConnectionImplTestSubclassesToList(list, clazz); + } + Class[] res = new Class[list.size()]; + for (int i = 0; i < list.size(); i++) { + res[i] = list.get(i); + } + return res; + } + + private static void addAbstractConnectionImplTestSubclassesToList( + List> list, Class clazz) { + for (Class innerClass : clazz.getDeclaredClasses()) { + addAbstractConnectionImplTestSubclassesToList(list, innerClass); + } + if (!clazz.isInterface() + && !Modifier.isAbstract(clazz.getModifiers()) + && AbstractConnectionImplTest.class.isAssignableFrom(clazz)) { + list.add(clazz); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java new file mode 100644 index 000000000000..c1a5e2873dec --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java @@ -0,0 +1,2089 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.AbstractConnectionImplTest.DDL; +import static com.google.cloud.spanner.connection.AbstractConnectionImplTest.SELECT; +import static com.google.cloud.spanner.connection.AbstractConnectionImplTest.UPDATE; +import static com.google.cloud.spanner.connection.AbstractConnectionImplTest.expectSpannerException; +import static com.google.cloud.spanner.connection.ConnectionImpl.checkResultTypeAllowed; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.BatchTransactionId; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.CommitStats; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ForwardingResultSet; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.ConnectionImpl.UnitOfWorkType; +import com.google.cloud.spanner.connection.ConnectionStatementExecutorImpl.StatementTimeoutGetter; +import com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.GetExactStaleness; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.ByteStreams; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.ResultSetStats; +import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class ConnectionImplTest { + public static final String URI = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + + static class SimpleTransactionManager implements TransactionManager { + private TransactionState state; + private CommitResponse commitResponse; + private TransactionContext txContext; + private final boolean returnCommitStats; + + private SimpleTransactionManager(TransactionContext txContext, boolean returnCommitStats) { + this.txContext = txContext; + this.returnCommitStats = returnCommitStats; + } + + @Override + public TransactionContext begin() { + state = TransactionState.STARTED; + return txContext; + } + + @Override + public TransactionContext begin(AbortedException exception) { + return begin(); + } + + @Override + public void commit() { + Timestamp commitTimestamp = Timestamp.now(); + commitResponse = mock(CommitResponse.class); + when(commitResponse.getCommitTimestamp()).thenReturn(commitTimestamp); + if (returnCommitStats) { + CommitStats stats = mock(CommitStats.class); + when(commitResponse.hasCommitStats()).thenReturn(true); + when(stats.getMutationCount()).thenReturn(5L); + when(commitResponse.getCommitStats()).thenReturn(stats); + } + state = TransactionState.COMMITTED; + } + + @Override + public void rollback() { + state = TransactionState.ROLLED_BACK; + } + + @Override + public TransactionContext resetForRetry() { + return txContext; + } + + @Override + public Timestamp getCommitTimestamp() { + return commitResponse == null ? null : commitResponse.getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + return commitResponse; + } + + @Override + public TransactionState getState() { + return state; + } + + @Override + public void close() { + if (state != TransactionState.COMMITTED) { + state = TransactionState.ROLLED_BACK; + } + } + } + + private static class SimpleResultSet extends ForwardingResultSet { + private boolean nextCalled = false; + private boolean onValidRow = false; + private boolean hasNextReturnedFalse = false; + + SimpleResultSet(ResultSet delegate) { + super(delegate); + } + + @Override + public boolean next() { + nextCalled = true; + onValidRow = super.next(); + hasNextReturnedFalse = !onValidRow; + return onValidRow; + } + + boolean isNextCalled() { + return nextCalled; + } + + @Override + public ResultSetStats getStats() { + if (hasNextReturnedFalse) { + return super.getStats(); + } + return null; + } + + @Override + public long getLong(int columnIndex) { + if (onValidRow) { + return super.getLong(columnIndex); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "ResultSet is not positioned on a valid row"); + } + } + + private static ResultSet createSelect1MockResultSet() { + ResultSet mockResultSet = mock(ResultSet.class); + when(mockResultSet.next()).thenReturn(true, false); + when(mockResultSet.getLong(0)).thenReturn(1L); + when(mockResultSet.getLong("TEST")).thenReturn(1L); + when(mockResultSet.getType()).thenReturn(Type.struct()); + when(mockResultSet.getColumnType(0)).thenReturn(Type.int64()); + when(mockResultSet.getColumnType("TEST")).thenReturn(Type.int64()); + return mockResultSet; + } + + private static DdlClient createDefaultMockDdlClient() { + try { + DdlClient ddlClient = mock(DdlClient.class); + @SuppressWarnings("unchecked") + final OperationFuture operation = + mock(OperationFuture.class); + when(operation.get()).thenReturn(null); + UpdateDatabaseDdlMetadata metadata = UpdateDatabaseDdlMetadata.getDefaultInstance(); + ApiFuture futureMetadata = ApiFutures.immediateFuture(metadata); + when(operation.getMetadata()).thenReturn(futureMetadata); + when(ddlClient.executeDdl(anyString(), isNull())).thenCallRealMethod(); + when(ddlClient.executeDdl(anyList(), isNull())).thenReturn(operation); + return ddlClient; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + static ConnectionImpl createConnection(final ConnectionOptions options) { + return createConnection(options, Dialect.GOOGLE_STANDARD_SQL); + } + + public static ConnectionImpl createConnection(final ConnectionOptions options, Dialect dialect) { + Spanner spanner = mock(Spanner.class); + SpannerPool spannerPool = mock(SpannerPool.class); + when(spannerPool.getSpanner(any(ConnectionOptions.class), any(ConnectionImpl.class))) + .thenReturn(spanner); + DdlClient ddlClient = createDefaultMockDdlClient(); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(dialect); + ReadOnlyTransaction singleUseReadOnlyTx = mock(ReadOnlyTransaction.class); + + ResultSet mockResultSetWithStats = createSelect1MockResultSet(); + when(mockResultSetWithStats.getStats()).thenReturn(ResultSetStats.getDefaultInstance()); + + final SimpleResultSet select1ResultSet = new SimpleResultSet(createSelect1MockResultSet()); + final SimpleResultSet select1ResultSetWithStats = new SimpleResultSet(mockResultSetWithStats); + when(singleUseReadOnlyTx.executeQuery( + Mockito.argThat(statement -> statement.getSql().toUpperCase().startsWith("SHOW")))) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, "SHOW queries are not supported")); + when(singleUseReadOnlyTx.executeQuery(Statement.of(SELECT))) + .thenAnswer( + invocation -> { + if (select1ResultSet.nextCalled) { + // create a new mock + return new SimpleResultSet(createSelect1MockResultSet()); + } + return select1ResultSet; + }); + when(singleUseReadOnlyTx.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PLAN)) + .thenReturn(select1ResultSetWithStats); + when(singleUseReadOnlyTx.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PROFILE)) + .thenReturn(select1ResultSetWithStats); + when(singleUseReadOnlyTx.getReadTimestamp()) + .then( + invocation -> { + if (select1ResultSet.isNextCalled() || select1ResultSetWithStats.isNextCalled()) { + return Timestamp.now(); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "No query has returned with any data yet"); + }); + when(dbClient.singleUseReadOnlyTransaction(any(TimestampBound.class))) + .thenReturn(singleUseReadOnlyTx); + + when(dbClient.transactionManager(any())) + .thenAnswer( + invocation -> { + TransactionContext txContext = mock(TransactionContext.class); + when(txContext.executeQuery(Statement.of(SELECT))) + .thenAnswer( + ignored -> { + if (select1ResultSet.nextCalled) { + // create a new mock + return new SimpleResultSet(createSelect1MockResultSet()); + } + return select1ResultSet; + }); + when(txContext.executeQuery( + Mockito.argThat( + statement -> statement.getSql().toUpperCase().startsWith("SHOW")))) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.UNIMPLEMENTED, "SHOW queries are not supported")); + when(txContext.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PLAN)) + .thenReturn(select1ResultSetWithStats); + when(txContext.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PROFILE)) + .thenReturn(select1ResultSetWithStats); + when(txContext.executeUpdate(Statement.of(UPDATE))).thenReturn(1L); + return new SimpleTransactionManager(txContext, options.isReturnCommitStats()); + }); + + when(dbClient.readOnlyTransaction(any(TimestampBound.class))) + .thenAnswer( + invocation -> { + ReadOnlyTransaction tx = mock(ReadOnlyTransaction.class); + when(tx.executeQuery(Statement.of(SELECT))) + .thenAnswer( + ignored -> { + if (select1ResultSet.nextCalled) { + // create a new mock + return new SimpleResultSet(createSelect1MockResultSet()); + } + return select1ResultSet; + }); + when(tx.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PLAN)) + .thenReturn(select1ResultSetWithStats); + when(tx.analyzeQuery(Statement.of(SELECT), QueryAnalyzeMode.PROFILE)) + .thenReturn(select1ResultSetWithStats); + when(tx.getReadTimestamp()) + .then( + ignored -> { + if (select1ResultSet.isNextCalled() + || select1ResultSetWithStats.isNextCalled()) { + return Timestamp.now(); + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "No query has returned with any data yet"); + }); + return tx; + }); + + when(dbClient.readWriteTransaction()) + .thenAnswer( + new Answer() { + @Override + public TransactionRunner answer(InvocationOnMock invocation) { + return new TransactionRunner() { + private CommitResponse commitResponse; + + @Override + public T run(TransactionCallable callable) { + commitResponse = new CommitResponse(Timestamp.ofTimeSecondsAndNanos(1, 1)); + TransactionContext transaction = mock(TransactionContext.class); + when(transaction.executeUpdate(Statement.of(UPDATE), Options.lastStatement())) + .thenReturn(1L); + try { + return callable.run(transaction); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + } + + @Override + public Timestamp getCommitTimestamp() { + return commitResponse == null ? null : commitResponse.getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + return commitResponse; + } + + @Override + public TransactionRunner allowNestedTransaction() { + return this; + } + }; + } + }); + BatchClient batchClient = mock(BatchClient.class); + BatchReadOnlyTransaction batchReadOnlyTransaction = mock(BatchReadOnlyTransaction.class); + when(batchClient.batchReadOnlyTransaction(any(TimestampBound.class))) + .thenReturn(batchReadOnlyTransaction); + when(batchClient.batchReadOnlyTransaction(any(BatchTransactionId.class))) + .thenReturn(batchReadOnlyTransaction); + return new ConnectionImpl(options, spannerPool, ddlClient, dbClient, batchClient); + } + + @Test + public void testExecuteSetAutocommitOn() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";autocommit=false") + .build())) { + assertThat(subject.isAutocommit(), is(false)); + + StatementResult res = subject.execute(Statement.of("set autocommit = true")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isAutocommit(), is(true)); + } + } + + @Test + public void testExecuteSetAutocommitOff() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + + StatementResult res = subject.execute(Statement.of("set autocommit = false")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isAutocommit(), is(false)); + } + } + + @Test + public void testSetAutocommitToTrue_inAutoCommitAndNotInTransaction_noop() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + + subject.setAutocommit(true); + + assertTrue(subject.isAutocommit()); + } + } + + @Test + public void testSetAutocommitToTrue_inAutoCommitAndInTransaction_noop() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + subject.execute(Statement.of("begin transaction")); + + subject.setAutocommit(true); + + assertTrue(subject.isAutocommit()); + } + } + + @Test + public void testSetAutocommitToFalse_inAutoCommitAndNotInTransaction_autocommitModeChanged() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + + subject.setAutocommit(false); + + assertFalse(subject.isAutocommit()); + } + } + + @Test + public void testSetAutocommitToFalse_inAutoCommitAndInTransaction_throwsException() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + subject.execute(Statement.of("begin transaction")); + + SpannerException exception = + assertThrows(SpannerException.class, () -> subject.setAutocommit(false)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception + .getMessage() + .contains("Cannot set autocommit while in a temporary transaction")); + } + } + + @Test + public void testSetAutocommitToFalse_notInAutoCommitAndTransactionNotStarted_noop() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";autocommit=false") + .build())) { + assertThat(subject.isAutocommit(), is(false)); + + subject.setAutocommit(false); + + assertFalse(subject.isAutocommit()); + } + } + + @Test + public void testSetAutocommitToFalse_notInAutoCommitAndTransactionStarted_noop() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";autocommit=false") + .build())) { + assertThat(subject.isAutocommit(), is(false)); + subject.executeQuery(Statement.of(SELECT)); + + subject.setAutocommit(false); + + assertFalse(subject.isAutocommit()); + } + } + + @Test + public void + testSetAutocommitToTrue_notInAutoCommitAndTransactionNotStarted_autocommitModeChanged() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";autocommit=false") + .build())) { + assertThat(subject.isAutocommit(), is(false)); + + subject.setAutocommit(true); + + assertTrue(subject.isAutocommit()); + } + } + + @Test + public void testSetAutocommitToTrue_notInAutoCommitAndTransactionStarted_throwsException() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";autocommit=false") + .build())) { + assertThat(subject.isAutocommit(), is(false)); + subject.executeQuery(Statement.of(SELECT)); + + SpannerException exception = + assertThrows(SpannerException.class, () -> subject.setAutocommit(true)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage().contains("Cannot set autocommit while a transaction is active")); + } + } + + @Test + public void testExecuteGetAutocommit() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + + // assert that autocommit is true (default) + assertThat(subject.isAutocommit(), is(true)); + StatementResult res = subject.execute(Statement.of("show variable autocommit")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getBoolean("AUTOCOMMIT"), is(true)); + + // set autocommit to false and assert that autocommit is false + subject.execute(Statement.of("set autocommit = false")); + assertThat(subject.isAutocommit(), is(false)); + res = subject.execute(Statement.of("show variable autocommit")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getBoolean("AUTOCOMMIT"), is(false)); + } + } + + @Test + public void testExecuteSetReadOnlyOn() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isReadOnly(), is(false)); + + StatementResult res = subject.execute(Statement.of("set readonly = true")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isReadOnly(), is(true)); + } + } + + @Test + public void testExecuteSetReadOnlyOff() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";readonly=true") + .build())) { + assertThat(subject.isReadOnly(), is(true)); + + StatementResult res = subject.execute(Statement.of("set readonly = false")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isReadOnly(), is(false)); + } + } + + @Test + public void testExecuteGetReadOnly() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + + // assert that read only is false (default) + assertThat(subject.isReadOnly(), is(false)); + StatementResult res = subject.execute(Statement.of("show variable readonly")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getBoolean("READONLY"), is(false)); + + // set read only to true and assert that read only is true + subject.execute(Statement.of("set readonly = true")); + assertThat(subject.isReadOnly(), is(true)); + res = subject.execute(Statement.of("show variable readonly")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getBoolean("READONLY"), is(true)); + } + } + + @Test + public void testExecuteSetAutocommitDmlMode() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + assertThat(subject.getAutocommitDmlMode(), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + + StatementResult res = + subject.execute(Statement.of("set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat( + subject.getAutocommitDmlMode(), is(equalTo(AutocommitDmlMode.PARTITIONED_NON_ATOMIC))); + + res = subject.execute(Statement.of("set autocommit_dml_mode='TRANSACTIONAL'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getAutocommitDmlMode(), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + } + } + + @Test + public void testExecuteSetAutocommitDmlModeInvalidValue() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + assertThat(subject.getAutocommitDmlMode(), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + + ErrorCode expected = null; + try { + subject.execute(Statement.of("set autocommit_dml_mode='NON_EXISTENT_VALUE'")); + } catch (SpannerException e) { + expected = e.getErrorCode(); + } + assertThat(expected, is(equalTo(ErrorCode.INVALID_ARGUMENT))); + } + } + + @Test + public void testExecuteGetAutocommitDmlMode() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + assertThat(subject.getAutocommitDmlMode(), is(equalTo(AutocommitDmlMode.TRANSACTIONAL))); + + StatementResult res = subject.execute(Statement.of("show variable autocommit_dml_mode")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat( + res.getResultSet().getString("AUTOCOMMIT_DML_MODE"), + is(equalTo(AutocommitDmlMode.TRANSACTIONAL.toString()))); + + subject.execute(Statement.of("set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'")); + res = subject.execute(Statement.of("show variable autocommit_dml_mode")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat( + res.getResultSet().getString("AUTOCOMMIT_DML_MODE"), + is(equalTo(AutocommitDmlMode.PARTITIONED_NON_ATOMIC.toString()))); + } + } + + @Test + public void testExecuteSetOptimizerVersion() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerVersion(), is(equalTo(""))); + + StatementResult res = subject.execute(Statement.of("set optimizer_version='1'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerVersion(), is(equalTo("1"))); + + res = subject.execute(Statement.of("set optimizer_version='1000'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerVersion(), is(equalTo("1000"))); + + res = subject.execute(Statement.of("set optimizer_version='latest'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerVersion(), is(equalTo("latest"))); + + res = subject.execute(Statement.of("set optimizer_version=''")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerVersion(), is(equalTo(""))); + } + } + + @Test + public void testExecuteSetOptimizerVersionInvalidValue() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerVersion(), is(equalTo(""))); + + try { + subject.execute(Statement.of("set optimizer_version='NOT_A_VERSION'")); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(equalTo(ErrorCode.INVALID_ARGUMENT))); + } + } + } + + @Test + public void testExecuteGetOptimizerVersion() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerVersion(), is(equalTo(""))); + + StatementResult res = subject.execute(Statement.of("show variable optimizer_version")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getString("OPTIMIZER_VERSION"), is(equalTo(""))); + + subject.execute(Statement.of("set optimizer_version='1'")); + res = subject.execute(Statement.of("show variable optimizer_version")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getString("OPTIMIZER_VERSION"), is(equalTo("1"))); + } + } + + @Test + public void testExecuteSetOptimizerStatisticsPackage() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerStatisticsPackage(), is(equalTo(""))); + + StatementResult res = + subject.execute(Statement.of("set optimizer_statistics_package='custom-package'")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerStatisticsPackage(), is(equalTo("custom-package"))); + + res = subject.execute(Statement.of("set optimizer_statistics_package=''")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getOptimizerStatisticsPackage(), is(equalTo(""))); + } + } + + @Test + public void testExecuteSetOptimizerStatisticsPackageInvalidValue() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerVersion(), is(equalTo(""))); + + try { + subject.execute(Statement.of("set optimizer_statistics_package=' '")); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(equalTo(ErrorCode.INVALID_ARGUMENT))); + } + } + } + + @Test + public void testExecuteGetOptimizerStatisticsPackage() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getOptimizerStatisticsPackage(), is(equalTo(""))); + + StatementResult res = + subject.execute(Statement.of("show variable optimizer_statistics_package")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getString("OPTIMIZER_STATISTICS_PACKAGE"), is(equalTo(""))); + + subject.execute(Statement.of("set optimizer_statistics_package='custom-package'")); + res = subject.execute(Statement.of("show variable optimizer_statistics_package")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat( + res.getResultSet().getString("OPTIMIZER_STATISTICS_PACKAGE"), + is(equalTo("custom-package"))); + } + } + + @Test + public void testExecuteSetReturnCommitStats() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertFalse(subject.isReturnCommitStats()); + + StatementResult result = subject.execute(Statement.of("set return_commit_stats=true")); + assertEquals(ResultType.NO_RESULT, result.getResultType()); + assertTrue(subject.isReturnCommitStats()); + + result = subject.execute(Statement.of("set return_commit_stats=false")); + assertEquals(ResultType.NO_RESULT, result.getResultType()); + assertFalse(subject.isReturnCommitStats()); + } + } + + @Test + public void testExecuteSetReturnCommitStatsInvalidValue() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertFalse(subject.isReturnCommitStats()); + + try { + subject.execute(Statement.of("set return_commit_stats=yes")); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + } + + @Test + public void testExecuteGetReturnCommitStats() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertFalse(subject.isReturnCommitStats()); + + StatementResult returnCommitStatsFalse = + subject.execute(Statement.of("show variable return_commit_stats")); + assertEquals(ResultType.RESULT_SET, returnCommitStatsFalse.getResultType()); + assertTrue(returnCommitStatsFalse.getResultSet().next()); + assertFalse(returnCommitStatsFalse.getResultSet().getBoolean("RETURN_COMMIT_STATS")); + + subject.execute(Statement.of("set return_commit_stats=true")); + StatementResult returnCommitStatsTrue = + subject.execute(Statement.of("show variable return_commit_stats")); + assertEquals(ResultType.RESULT_SET, returnCommitStatsTrue.getResultType()); + assertTrue(returnCommitStatsTrue.getResultSet().next()); + assertTrue(returnCommitStatsTrue.getResultSet().getBoolean("RETURN_COMMIT_STATS")); + } + } + + @Test + public void testExecuteSetStatementTimeout() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getStatementTimeout(TimeUnit.MILLISECONDS), is(equalTo(0L))); + + for (TimeUnit unit : ReadOnlyStalenessUtil.SUPPORTED_UNITS) { + for (Long timeout : new Long[] {1L, 100L, 10000L, 315576000000L}) { + StatementResult res = + subject.execute( + Statement.of( + String.format( + "set statement_timeout='%d%s'", + timeout, ReadOnlyStalenessUtil.getTimeUnitAbbreviation(unit)))); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getStatementTimeout(unit), is(equalTo(timeout))); + assertThat(subject.hasStatementTimeout(), is(true)); + + StatementResult resNoTimeout = + subject.execute(Statement.of("set statement_timeout=null")); + assertThat(resNoTimeout.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getStatementTimeout(unit), is(equalTo(0L))); + assertThat(subject.hasStatementTimeout(), is(false)); + } + } + } + } + + @Test + public void testExecuteSetStatementTimeoutInvalidValue() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getStatementTimeout(TimeUnit.MILLISECONDS), is(equalTo(0L))); + + ErrorCode expected = null; + try { + subject.execute(Statement.of("set statement_timeout=-1")); + } catch (SpannerException e) { + expected = e.getErrorCode(); + } + assertThat(expected, is(equalTo(ErrorCode.INVALID_ARGUMENT))); + } + } + + @Test + public void testExecuteGetStatementTimeout() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getStatementTimeout(TimeUnit.MILLISECONDS), is(equalTo(0L))); + + for (TimeUnit unit : ReadOnlyStalenessUtil.SUPPORTED_UNITS) { + for (Long timeout : new Long[] {1L, 100L, 10000L, 315576000000L}) { + subject.execute( + Statement.of( + String.format( + "set statement_timeout='%d%s'", + timeout, ReadOnlyStalenessUtil.getTimeUnitAbbreviation(unit)))); + StatementResult res = subject.execute(Statement.of("show variable statement_timeout")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + TimeUnit appropriateUnit = + ReadOnlyStalenessUtil.getAppropriateTimeUnit(new StatementTimeoutGetter(subject)); + assertThat( + res.getResultSet().getString("STATEMENT_TIMEOUT"), + is( + equalTo( + subject.getStatementTimeout(appropriateUnit) + + ReadOnlyStalenessUtil.getTimeUnitAbbreviation(appropriateUnit)))); + + subject.execute(Statement.of("set statement_timeout=null")); + StatementResult resNoTimeout = + subject.execute(Statement.of("show variable statement_timeout")); + assertThat(resNoTimeout.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(resNoTimeout.getResultSet().next(), is(true)); + assertThat(resNoTimeout.getResultSet().isNull("STATEMENT_TIMEOUT"), is(true)); + } + } + } + } + + @Test + public void testExecuteGetReadTimestamp() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.beginTransaction(); + subject.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + subject.executeQuery(Statement.of(AbstractConnectionImplTest.SELECT)); + StatementResult res = subject.execute(Statement.of("show variable read_timestamp")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getTimestamp("READ_TIMESTAMP"), is(notNullValue())); + subject.commit(); + } + } + + @Test + public void testExecuteGetCommitTimestamp() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.beginTransaction(); + subject.executeQuery(Statement.of(AbstractConnectionImplTest.SELECT)).next(); + subject.commit(); + StatementResult res = subject.execute(Statement.of("show variable commit_timestamp")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat(res.getResultSet().getTimestamp("COMMIT_TIMESTAMP"), is(notNullValue())); + } + } + + @Test + public void testExecuteGetCommitResponse() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.beginTransaction(); + subject.executeQuery(Statement.of(AbstractConnectionImplTest.SELECT)).next(); + subject.commit(); + StatementResult response = subject.execute(Statement.of("show variable commit_response")); + assertEquals(ResultType.RESULT_SET, response.getResultType()); + assertTrue(response.getResultSet().next()); + assertNotNull(response.getResultSet().getTimestamp("COMMIT_TIMESTAMP")); + assertTrue(response.getResultSet().isNull("MUTATION_COUNT")); + assertFalse(response.getResultSet().next()); + } + + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";returnCommitStats=true") + .build())) { + subject.beginTransaction(); + subject.executeQuery(Statement.of(AbstractConnectionImplTest.SELECT)).next(); + subject.commit(); + StatementResult response = subject.execute(Statement.of("show variable commit_response")); + assertEquals(ResultType.RESULT_SET, response.getResultType()); + assertTrue(response.getResultSet().next()); + assertNotNull(response.getResultSet().getTimestamp("COMMIT_TIMESTAMP")); + assertFalse(response.getResultSet().isNull("MUTATION_COUNT")); + assertFalse(response.getResultSet().next()); + } + } + + private static final class StalenessDuration { + private final long duration; + private final TimeUnit unit; + + private StalenessDuration(long duration, TimeUnit unit) { + this.duration = duration; + this.unit = unit; + } + + @Override + public String toString() { + GetExactStaleness getExactStalenessFunction = + new GetExactStaleness(TimestampBound.ofExactStaleness(duration, unit)); + return ReadOnlyStalenessUtil.durationToString(getExactStalenessFunction); + } + } + + @Test + public void testExecuteGetReadOnlyStaleness() { + Map timestamps = new HashMap<>(); + timestamps.put(Mode.READ_TIMESTAMP, ReadOnlyStalenessUtil.parseRfc3339("2018-10-08T14:05:10Z")); + timestamps.put( + Mode.MIN_READ_TIMESTAMP, ReadOnlyStalenessUtil.parseRfc3339("2018-10-08T14:05:10.12345Z")); + Map durations = new HashMap<>(); + durations.put(Mode.EXACT_STALENESS, new StalenessDuration(1000L, TimeUnit.MILLISECONDS)); + durations.put(Mode.MAX_STALENESS, new StalenessDuration(1234567L, TimeUnit.MICROSECONDS)); + List stalenesses = + Arrays.asList( + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(timestamps.get(Mode.READ_TIMESTAMP)), + TimestampBound.ofMinReadTimestamp(timestamps.get(Mode.MIN_READ_TIMESTAMP)), + TimestampBound.ofExactStaleness( + durations.get(Mode.EXACT_STALENESS).duration, + durations.get(Mode.EXACT_STALENESS).unit), + TimestampBound.ofMaxStaleness( + durations.get(Mode.MAX_STALENESS).duration, + durations.get(Mode.MAX_STALENESS).unit)); + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + for (TimestampBound staleness : stalenesses) { + subject.setReadOnlyStaleness(staleness); + StatementResult res = subject.execute(Statement.of("show variable read_only_staleness")); + assertThat(res.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(res.getResultSet().next(), is(true)); + assertThat( + res.getResultSet().getString("READ_ONLY_STALENESS"), + is(equalTo(ReadOnlyStalenessUtil.timestampBoundToString(staleness)))); + } + } + } + + @Test + public void testExecuteSetReadOnlyStaleness() { + Map timestamps = new HashMap<>(); + timestamps.put(Mode.READ_TIMESTAMP, ReadOnlyStalenessUtil.parseRfc3339("2018-10-08T12:13:14Z")); + timestamps.put( + Mode.MIN_READ_TIMESTAMP, + ReadOnlyStalenessUtil.parseRfc3339("2018-10-08T14:13:14.1234+02:00")); + Map durations = new HashMap<>(); + durations.put(Mode.EXACT_STALENESS, new StalenessDuration(1000L, TimeUnit.MILLISECONDS)); + durations.put(Mode.MAX_STALENESS, new StalenessDuration(1234567L, TimeUnit.MICROSECONDS)); + List stalenesses = + Arrays.asList( + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(timestamps.get(Mode.READ_TIMESTAMP)), + TimestampBound.ofMinReadTimestamp(timestamps.get(Mode.MIN_READ_TIMESTAMP)), + TimestampBound.ofExactStaleness( + durations.get(Mode.EXACT_STALENESS).duration, + durations.get(Mode.EXACT_STALENESS).unit), + TimestampBound.ofMaxStaleness( + durations.get(Mode.MAX_STALENESS).duration, + durations.get(Mode.MAX_STALENESS).unit)); + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + for (TimestampBound staleness : stalenesses) { + StatementResult res = + subject.execute( + Statement.of( + String.format( + "set read_only_staleness='%s'", + ReadOnlyStalenessUtil.timestampBoundToString(staleness)))); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getReadOnlyStaleness(), is(equalTo(staleness))); + } + } + } + + @Test + public void testExecuteBeginTransaction() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isInTransaction(), is(false)); + + StatementResult res = subject.execute(Statement.of("begin transaction")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isInTransaction(), is(true)); + } + } + + @Test + public void testExecuteCommitTransaction() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.execute(Statement.of("begin transaction")); + assertThat(subject.isInTransaction(), is(true)); + + StatementResult res = subject.execute(Statement.of("commit")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isInTransaction(), is(false)); + } + } + + @Test + public void testExecuteRollbackTransaction() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.execute(Statement.of("begin")); + assertThat(subject.isInTransaction(), is(true)); + + StatementResult res = subject.execute(Statement.of("rollback")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.isInTransaction(), is(false)); + } + } + + @Test + public void testExecuteSetTransactionReadOnly() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.execute(Statement.of("begin")); + assertThat(subject.getTransactionMode(), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat(subject.isInTransaction(), is(true)); + + StatementResult res = subject.execute(Statement.of("set transaction read only")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getTransactionMode(), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + } + } + + @Test + public void testExecuteSetTransactionReadWrite() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";readonly=true") + .build())) { + subject.execute(Statement.of("begin")); + assertThat(subject.getTransactionMode(), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat(subject.isInTransaction(), is(true)); + + // end the current temporary transaction and turn off read-only mode + subject.execute(Statement.of("commit")); + subject.execute(Statement.of("set readonly = false")); + + subject.execute(Statement.of("begin")); + StatementResult res = subject.execute(Statement.of("set transaction read only")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getTransactionMode(), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + res = subject.execute(Statement.of("set transaction read write")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getTransactionMode(), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + } + } + + @Test + public void testExecuteStartDdlBatch() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + StatementResult res = subject.execute(Statement.of("start batch ddl")); + assertThat(res.getResultType(), is(equalTo(ResultType.NO_RESULT))); + assertThat(subject.getUnitOfWorkType(), is(equalTo(UnitOfWorkType.DDL_BATCH))); + assertThat(subject.isInTransaction(), is(false)); + } + } + + @Test + public void testDefaultIsAutocommit() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + assertThat(subject.isInTransaction(), is(false)); + } + } + + @Test + public void testDefaultIsReadWrite() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isReadOnly(), is(false)); + } + } + + @Test + public void testDefaultTransactionIsReadWrite() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + for (boolean autocommit : new Boolean[] {true, false}) { + subject.setAutocommit(autocommit); + subject.execute(Statement.of("begin")); + assertThat( + subject.getTransactionMode(), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + subject.commit(); + + subject.execute(Statement.of("begin")); + subject.execute(Statement.of("set transaction read only")); + assertThat( + subject.getTransactionMode(), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + subject.commit(); + + subject.execute(Statement.of("begin")); + assertThat( + subject.getTransactionMode(), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + subject.commit(); + + subject.execute(Statement.of("start batch ddl")); + assertThat(subject.getUnitOfWorkType(), is(equalTo(UnitOfWorkType.DDL_BATCH))); + subject.runBatch(); + + subject.execute(Statement.of("begin")); + assertThat( + subject.getTransactionMode(), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + subject.commit(); + } + } + } + + @Test + public void testDefaultTransactionIsReadOnly() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI + ";readOnly=true") + .build())) { + for (boolean autocommit : new Boolean[] {true, false}) { + subject.setAutocommit(autocommit); + subject.execute(Statement.of("begin")); + assertThat( + subject.getTransactionMode(), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + subject.commit(); + } + } + } + + /** + * ReadOnlyStaleness is a session setting for a connection. However, certain settings are only + * allowed when the connection is in autocommit mode. The setting therefore must be reset to its + * default {@link TimestampBound#strong()} when the current setting is not compatible with + * transactional mode. + */ + @Test + public void testResetReadOnlyStaleness() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.isAutocommit(), is(true)); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + + // the following values are always allowed + subject.setReadOnlyStaleness(TimestampBound.strong()); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + subject.setAutocommit(false); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + subject.setAutocommit(true); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + + subject.setReadOnlyStaleness(TimestampBound.ofReadTimestamp(Timestamp.MAX_VALUE)); + subject.setAutocommit(false); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofReadTimestamp(Timestamp.MAX_VALUE)))); + subject.setAutocommit(true); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofReadTimestamp(Timestamp.MAX_VALUE)))); + + subject.setReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + subject.setAutocommit(false); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)))); + subject.setAutocommit(true); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)))); + + // the following values are only allowed in autocommit mode. Turning off autocommit will + // return the setting to its default + subject.setReadOnlyStaleness(TimestampBound.ofMinReadTimestamp(Timestamp.MAX_VALUE)); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofMinReadTimestamp(Timestamp.MAX_VALUE)))); + subject.setAutocommit(false); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + subject.setAutocommit(true); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + + subject.setReadOnlyStaleness(TimestampBound.ofMaxStaleness(10L, TimeUnit.SECONDS)); + assertThat( + subject.getReadOnlyStaleness(), + is(equalTo(TimestampBound.ofMaxStaleness(10L, TimeUnit.SECONDS)))); + subject.setAutocommit(false); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + subject.setAutocommit(true); + assertThat(subject.getReadOnlyStaleness().getMode(), is(equalTo(TimestampBound.Mode.STRONG))); + } + } + + @Test + public void testChangeReadOnlyModeInAutocommit() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.execute(Statement.of(UPDATE)); + assertThat(subject.getCommitTimestamp(), is(notNullValue())); + + // change to read-only + subject.setReadOnly(true); + expectSpannerException( + "Updates should not be allowed in read-only mode", + connection -> connection.execute(Statement.of(UPDATE)), + subject); + assertThat(subject.executeQuery(Statement.of(SELECT)), is(notNullValue())); + + // change back to read-write + subject.setReadOnly(false); + subject.execute(Statement.of(UPDATE)); + assertThat(subject.getCommitTimestamp(), is(notNullValue())); + + // and back to read-only + subject.setReadOnly(true); + expectSpannerException( + "DDL should not be allowed in read-only mode", + connection -> connection.execute(Statement.of(DDL)), + subject); + assertThat(subject.executeQuery(Statement.of(SELECT)), is(notNullValue())); + } + } + + @Test + public void testChangeReadOnlyModeInTransactionalMode() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + subject.setAutocommit(false); + + subject.execute(Statement.of(UPDATE)); + subject.commit(); + assertThat(subject.getCommitTimestamp(), is(notNullValue())); + + // change to read-only + subject.setReadOnly(true); + expectSpannerException( + "Updates should not be allowed in read-only mode", + connection -> connection.execute(Statement.of(UPDATE)), + subject); + assertThat(subject.executeQuery(Statement.of(SELECT)), is(notNullValue())); + subject.commit(); + + // change back to read-write + subject.setReadOnly(false); + subject.execute(Statement.of(UPDATE)); + subject.commit(); + assertThat(subject.getCommitTimestamp(), is(notNullValue())); + + // and back to read-only + subject.setReadOnly(true); + expectSpannerException( + "DDL should not be allowed in read-only mode", + connection -> connection.execute(Statement.of(DDL)), + subject); + assertThat(subject.executeQuery(Statement.of(SELECT)), is(notNullValue())); + } + } + + @Test + public void testAddRemoveTransactionRetryListener() { + try (ConnectionImpl subject = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(subject.getTransactionRetryListeners().hasNext(), is(false)); + TransactionRetryListener listener = mock(TransactionRetryListener.class); + subject.addTransactionRetryListener(listener); + assertThat(subject.getTransactionRetryListeners().hasNext(), is(true)); + assertThat(subject.removeTransactionRetryListener(listener), is(true)); + assertThat(subject.getTransactionRetryListeners().hasNext(), is(false)); + assertThat(subject.removeTransactionRetryListener(listener), is(false)); + } + } + + @Test + public void testMergeQueryOptions() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + final UnitOfWork unitOfWork = mock(UnitOfWork.class); + when(unitOfWork.executeQueryAsync( + any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) + .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); + try (ConnectionImpl impl = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { + @Override + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + @Nonnull ParsedStatement parsedStatement, boolean isInternalMetadataQuery) { + return unitOfWork; + } + }) { + // Execute query with an optimizer version and statistics package set on the connection. + impl.setOptimizerVersion("1"); + impl.setOptimizerStatisticsPackage("custom-package-1"); + impl.executeQuery(Statement.of("SELECT FOO FROM BAR")); + verify(unitOfWork) + .executeQueryAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package-1") + .build()) + .build()), + AnalyzeMode.NONE); + + // Execute query with an optimizer version and statistics package set on the connection. + impl.setOptimizerVersion("2"); + impl.setOptimizerStatisticsPackage("custom-package-2"); + impl.executeQuery(Statement.of("SELECT FOO FROM BAR")); + verify(unitOfWork) + .executeQueryAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("2") + .setOptimizerStatisticsPackage("custom-package-2") + .build()) + .build()), + AnalyzeMode.NONE); + + // Execute query with an optimizer version and statistics package set on the connection and + // PrefetchChunks query + // option specified for the query. + QueryOption prefetchOption = Options.prefetchChunks(100); + impl.setOptimizerVersion("3"); + impl.setOptimizerStatisticsPackage("custom-package-3"); + impl.executeQuery(Statement.of("SELECT FOO FROM BAR"), prefetchOption); + verify(unitOfWork) + .executeQueryAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("3") + .setOptimizerStatisticsPackage("custom-package-3") + .build()) + .build()), + AnalyzeMode.NONE, + prefetchOption); + + // Execute query with an optimizer version and statistics package set on the connection, and + // the same options also + // passed in to the query. The specific options passed in to the query should take precedence. + impl.setOptimizerVersion("4"); + impl.setOptimizerStatisticsPackage("custom-package-4"); + impl.executeQuery( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("5") + .setOptimizerStatisticsPackage("custom-package-5") + .build()) + .build(), + prefetchOption); + verify(unitOfWork) + .executeQueryAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse( + Statement.newBuilder("SELECT FOO FROM BAR") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("5") + .setOptimizerStatisticsPackage("custom-package-5") + .build()) + .build()), + AnalyzeMode.NONE, + prefetchOption); + } + } + + @Test + public void testStatementTagAlwaysAllowed() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + final UnitOfWork unitOfWork = mock(UnitOfWork.class); + when(unitOfWork.executeQueryAsync( + any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) + .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { + @Override + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + @Nonnull ParsedStatement parsedStatement, boolean isInternalMetadataQuery) { + return unitOfWork; + } + }) { + assertTrue(connection.isAutocommit()); + + assertNull(connection.getStatementTag()); + connection.setStatementTag("tag"); + assertEquals("tag", connection.getStatementTag()); + connection.setStatementTag(null); + assertNull(connection.getStatementTag()); + + connection.setAutocommit(false); + + connection.setStatementTag("tag"); + assertEquals("tag", connection.getStatementTag()); + connection.setStatementTag(null); + assertNull(connection.getStatementTag()); + + // Start a transaction + connection.execute(Statement.of("SELECT FOO FROM BAR")); + connection.setStatementTag("tag"); + assertEquals("tag", connection.getStatementTag()); + connection.setStatementTag(null); + assertNull(connection.getStatementTag()); + } + } + + @Test + public void testTransactionTagAllowedInTransaction() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(false); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class))) { + assertFalse(connection.isAutocommit()); + + assertNull(connection.getTransactionTag()); + connection.setTransactionTag("tag"); + assertEquals("tag", connection.getTransactionTag()); + connection.setTransactionTag(null); + assertNull(connection.getTransactionTag()); + + // Committing or rolling back a transaction should clear the transaction tag for the next + // transaction. + connection.setTransactionTag("tag"); + assertEquals("tag", connection.getTransactionTag()); + connection.commit(); + assertNull(connection.getTransactionTag()); + + connection.setTransactionTag("tag"); + assertEquals("tag", connection.getTransactionTag()); + connection.rollback(); + assertNull(connection.getTransactionTag()); + + // Temporary transactions should also allow transaction tags. + connection.setAutocommit(false); + connection.beginTransaction(); + assertNull(connection.getTransactionTag()); + connection.setTransactionTag("tag"); + assertEquals("tag", connection.getTransactionTag()); + connection.commit(); + assertNull(connection.getTransactionTag()); + } + } + + @Test + public void testTransactionTagNotAllowedWithoutTransaction() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class))) { + assertTrue(connection.isAutocommit()); + + try { + connection.setTransactionTag("tag"); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + } + + @Test + public void testTransactionTagNotAllowedAfterTransactionStarted() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(false); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + final UnitOfWork unitOfWork = mock(UnitOfWork.class); + // Indicate that a transaction has been started. + when(unitOfWork.getState()).thenReturn(UnitOfWorkState.STARTED); + when(unitOfWork.executeQueryAsync( + any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) + .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); + when(unitOfWork.rollbackAsync(any(), any())).thenReturn(ApiFutures.immediateFuture(null)); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { + @Override + UnitOfWork createNewUnitOfWork( + boolean isInternalMetadataQuery, + boolean forceSingleUse, + boolean autoBatchDml, + StatementType statementType) { + return unitOfWork; + } + }) { + // Start a transaction + connection.execute(Statement.of("SELECT FOO FROM BAR")); + try { + connection.setTransactionTag("tag"); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + assertNull(connection.getTransactionTag()); + } + } + + @Test + public void testCheckResultTypeAllowed() { + AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL); + String query = "select * from foo"; + String dml = "update foo set bar=1 where true"; + String dmlReturning = "insert into foo (id, value) values (1, 'One') then return id"; + String ddl = "create table foo"; + String set = "set readonly=true"; + String show = "show variable readonly"; + String start = "start batch dml"; + + // null means all statements should be allowed. + ImmutableSet allowedResultTypes = null; + checkResultTypeAllowed(parser.parse(Statement.of(query)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dml)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dmlReturning)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(ddl)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(set)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(show)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(start)), allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(); + assertThrowResultNotAllowed(parser, query, allowedResultTypes); + assertThrowResultNotAllowed(parser, dml, allowedResultTypes); + assertThrowResultNotAllowed(parser, dmlReturning, allowedResultTypes); + assertThrowResultNotAllowed(parser, ddl, allowedResultTypes); + assertThrowResultNotAllowed(parser, set, allowedResultTypes); + assertThrowResultNotAllowed(parser, show, allowedResultTypes); + assertThrowResultNotAllowed(parser, start, allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.RESULT_SET); + checkResultTypeAllowed(parser.parse(Statement.of(query)), allowedResultTypes); + assertThrowResultNotAllowed(parser, dml, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dmlReturning)), allowedResultTypes); + assertThrowResultNotAllowed(parser, ddl, allowedResultTypes); + assertThrowResultNotAllowed(parser, set, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(show)), allowedResultTypes); + assertThrowResultNotAllowed(parser, start, allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.UPDATE_COUNT); + assertThrowResultNotAllowed(parser, query, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dml)), allowedResultTypes); + assertThrowResultNotAllowed(parser, dmlReturning, allowedResultTypes); + assertThrowResultNotAllowed(parser, ddl, allowedResultTypes); + assertThrowResultNotAllowed(parser, set, allowedResultTypes); + assertThrowResultNotAllowed(parser, show, allowedResultTypes); + assertThrowResultNotAllowed(parser, start, allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.NO_RESULT); + assertThrowResultNotAllowed(parser, query, allowedResultTypes); + assertThrowResultNotAllowed(parser, dml, allowedResultTypes); + assertThrowResultNotAllowed(parser, dmlReturning, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(ddl)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(set)), allowedResultTypes); + assertThrowResultNotAllowed(parser, show, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(start)), allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.RESULT_SET, ResultType.UPDATE_COUNT); + checkResultTypeAllowed(parser.parse(Statement.of(query)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dml)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dmlReturning)), allowedResultTypes); + assertThrowResultNotAllowed(parser, ddl, allowedResultTypes); + assertThrowResultNotAllowed(parser, set, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(show)), allowedResultTypes); + assertThrowResultNotAllowed(parser, start, allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.RESULT_SET, ResultType.NO_RESULT); + checkResultTypeAllowed(parser.parse(Statement.of(query)), allowedResultTypes); + assertThrowResultNotAllowed(parser, dml, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dmlReturning)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(ddl)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(set)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(show)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(start)), allowedResultTypes); + + allowedResultTypes = ImmutableSet.of(ResultType.UPDATE_COUNT, ResultType.NO_RESULT); + assertThrowResultNotAllowed(parser, query, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dml)), allowedResultTypes); + assertThrowResultNotAllowed(parser, dmlReturning, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(ddl)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(set)), allowedResultTypes); + assertThrowResultNotAllowed(parser, show, allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(start)), allowedResultTypes); + + allowedResultTypes = + ImmutableSet.of(ResultType.RESULT_SET, ResultType.UPDATE_COUNT, ResultType.NO_RESULT); + checkResultTypeAllowed(parser.parse(Statement.of(query)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dml)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(dmlReturning)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(ddl)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(set)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(show)), allowedResultTypes); + checkResultTypeAllowed(parser.parse(Statement.of(start)), allowedResultTypes); + } + + @Test + public void testSetRetryAbortsInternally() { + try (ConnectionImpl connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertFalse("Read-only should be disabled by default", connection.isReadOnly()); + assertTrue("Autocommit should be enabled by default", connection.isAutocommit()); + assertTrue( + "Retry aborts internally should be enabled by default on test connections", + connection.isRetryAbortsInternally()); + + // It should be possible to change this value also when in auto-commit mode. + connection.setRetryAbortsInternally(false); + assertFalse(connection.isRetryAbortsInternally()); + + // It should be possible to change this value also when in transactional mode, as long as + // there is no active transaction. + connection.setAutocommit(false); + connection.setRetryAbortsInternally(true); + assertTrue(connection.isRetryAbortsInternally()); + + // It should be possible to change the value when in read-only mode. + connection.setReadOnly(true); + connection.setRetryAbortsInternally(false); + assertFalse(connection.isRetryAbortsInternally()); + + // It should not be possible to change the value when there is an active transaction. + connection.setReadOnly(false); + connection.setAutocommit(false); + connection.execute(Statement.of(SELECT)); + assertThrows(SpannerException.class, () -> connection.setRetryAbortsInternally(true)); + // Verify that the value did not change. + assertFalse(connection.isRetryAbortsInternally()); + + // Rolling back the connection should allow us to set the property again. + connection.rollback(); + connection.setRetryAbortsInternally(true); + assertTrue(connection.isRetryAbortsInternally()); + } + } + + private void assertThrowResultNotAllowed( + AbstractStatementParser parser, String sql, ImmutableSet allowedResultTypes) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> checkResultTypeAllowed(parser.parse(Statement.of(sql)), allowedResultTypes)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + "Only statements that return a result of one of the following types are allowed")); + } + + @Test + public void testSetAndGetClientContext() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setUri(URI) + .setCredentials(NoCredentials.getInstance()) + .build())) { + com.google.spanner.v1.RequestOptions.ClientContext context = + com.google.spanner.v1.RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("test").build()) + .build(); + connection.setClientContext(context); + assertEquals(context, connection.getClientContext()); + } + } + + @Test + public void testResetClearsClientContext() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setUri(URI) + .setCredentials(NoCredentials.getInstance()) + .build())) { + com.google.spanner.v1.RequestOptions.ClientContext context = + com.google.spanner.v1.RequestOptions.ClientContext.newBuilder() + .putSecureContext( + "key", com.google.protobuf.Value.newBuilder().setStringValue("test").build()) + .build(); + connection.setClientContext(context); + assertEquals(context, connection.getClientContext()); + + connection.reset(); + assertNull(connection.getClientContext()); + } + } + + @Test + public void testProtoDescriptorsAlwaysAllowed() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + final UnitOfWork unitOfWork = mock(UnitOfWork.class); + final String protoDescriptorsFilePath = + "src/test/resources/com/google/cloud/spanner/descriptors.pb"; + when(unitOfWork.executeDdlAsync(any(), any(ParsedStatement.class))) + .thenReturn(ApiFutures.immediateFuture(null)); + when(unitOfWork.executeQueryAsync( + any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) + .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { + @Override + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + StatementType statementType, + ParsedStatement parsedStatement, + boolean isInternalMetadataQuery) { + return unitOfWork; + } + }) { + byte[] protoDescriptors; + try { + InputStream in = + ConnectionImplTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + + assertTrue(connection.isAutocommit()); + + assertNull(connection.getProtoDescriptors()); + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + connection.setAutocommit(false); + + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + // proto descriptor should reset after executing a DDL statement + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor should not reset if the statement is not a DDL statement + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("SELECT FOO FROM BAR")); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + // proto descriptor file path should reset after executing a DDL statement + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + assertNull(connection.getProtoDescriptorsFilePath()); + + // proto descriptor file path should not reset if the statement is not a DDL statement + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("SELECT FOO FROM BAR")); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + assertEquals(protoDescriptorsFilePath, connection.getProtoDescriptorsFilePath()); + + // test proto descriptor file path as input + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor set through file path should overwrite the proto descriptor set from + // byte[] + connection.setProtoDescriptors("protoDescriptors".getBytes()); + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor set through byte[] should overwrite the proto descriptor from file path + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + connection.setProtoDescriptors("protoDescriptors".getBytes()); + assertArrayEquals("protoDescriptors".getBytes(), connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadOnlyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadOnlyTest.java new file mode 100644 index 000000000000..627fadd79eb9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadOnlyTest.java @@ -0,0 +1,1236 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import java.util.concurrent.TimeUnit; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +/** + * The tests in this class do not need to be implemented for client libraries in other programming + * languages, as all test cases are covered by the file ConnectionImplGeneratedSqlScriptTest.sql + */ +@RunWith(Enclosed.class) +public class ConnectionImplTransactionalReadOnlyTest { + + public static class ConnectionImplTransactionalReadOnlyNoActionsTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query has been executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterSelectTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + // no call to next() on ResultSet + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // transaction has started + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return true; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last statement was a query, next() has not yet been called, but as the connection api + // returns a directly executed resultset, the read timestamp is already available + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterSelectAndResultSetNextTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)).next(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // transaction is running + return false; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // transaction has started + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return true; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last statement was a query + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterBeginTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // connection is in read-only mode + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // in a transaction, only exact allowed + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last action was a transaction that ended with a select query + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterRollbackTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("ROLLBACK;"); + connection.rollback(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // transaction was rolled back + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterSetReadOnlyMaxStalenessTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + TimestampBound staleness = TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS); + logWithNamespace( + "SET %sREAD_ONLY_STALENESS='" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "';"); + connection.setReadOnlyStaleness(staleness); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadOnlyAfterEmptyCommitTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=TRUE;"); + connection.setReadOnly(true); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return mode == TransactionMode.READ_ONLY_TRANSACTION; + } + + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last commit was empty + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // read-only + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadWriteTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadWriteTest.java new file mode 100644 index 000000000000..ba47421287a1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTransactionalReadWriteTest.java @@ -0,0 +1,2017 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TimestampBound.Mode; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import java.util.concurrent.TimeUnit; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +/** + * The tests in this class do not need to be implemented for client libraries in other programming + * languages, as all test cases are covered by the file ConnectionImplGeneratedSqlScriptTest.sql + */ +@RunWith(Enclosed.class) +public class ConnectionImplTransactionalReadWriteTest { + + public static class ConnectionImplTransactionalReadWriteNoActionsTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query has been executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterSelectTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + // no call to next() on ResultSet + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + @Override + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // transaction has started + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return true; + } + + @Override + boolean isGetReadTimestampAllowed() { + // read-write transactions never have a read-timestamp + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterSelectAndResultSetNextTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.executeQuery(Statement.of(SELECT)).next(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + // transaction is running + return false; + } + + @Override + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // transaction has started + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return true; + } + + @Override + boolean isGetReadTimestampAllowed() { + // read-write transactions never have a read-timestamp + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterBeginTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalArgumentException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + // in a transaction, only exact allowed + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // read-write transaction never have a read-timestamp + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // last action was a read-write transaction + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return true; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterRollbackTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("BEGIN TRANSACTION;"); + connection.beginTransaction(); + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("ROLLBACK;"); + connection.rollback(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // transaction was rolled back + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterSetReadOnlyMaxStalenessTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + TimestampBound staleness = TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS); + logWithNamespace( + "SET %sREAD_ONLY_STALENESS='" + + ReadOnlyStalenessUtil.timestampBoundToString(staleness) + + "';"); + connection.setReadOnlyStaleness(staleness); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterSetTransactionReadOnlyTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("SET TRANSACTION READ ONLY;"); + connection.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.QUERY; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterCommittedReadOnlyTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("SET TRANSACTION READ ONLY;"); + connection.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + // ensure there will be a read-timestamp available by calling next() + log("@EXPECT RESULT_SET 'TEST',1"); + log(SELECT + ";"); + connection.execute(Statement.of(SELECT)).getResultSet().next(); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return true; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // last transaction was a read-only transaction + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterStartDdlBatchTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("START BATCH DDL;"); + connection.startBatchDdl(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + @Override + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return false; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // no commit yet + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return true; + } + + @Override + boolean isAbortBatchAllowed() { + return true; + } + } + + public static class ConnectionImplTransactionalReadWriteInDdlBatchTransactionTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("START BATCH DDL;"); + connection.startBatchDdl(); + log(DDL + ";"); + connection.execute(Statement.of(DDL)); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + throw new IllegalStateException(); + } + + @Override + boolean isSetAutocommitAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyAllowed() { + return false; + } + + @Override + boolean isBeginTransactionAllowed() { + return false; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return false; + } + + @Override + boolean isSetTransactionTagAllowed() { + return false; + } + + @Override + boolean isGetTransactionModeAllowed() { + return false; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return false; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return false; + } + + @Override + boolean isCommitAllowed() { + return false; + } + + @Override + boolean isRollbackAllowed() { + return false; + } + + @Override + boolean expectedIsInTransaction() { + return false; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return false; + } + + @Override + boolean isStartBatchDmlAllowed() { + return false; + } + + @Override + boolean isStartBatchDdlAllowed() { + return false; + } + + @Override + boolean isRunBatchAllowed() { + return true; + } + + @Override + boolean isAbortBatchAllowed() { + return true; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterRanDdlBatchTest + extends AbstractConnectionImplTest { + + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("START BATCH DDL;"); + connection.startBatchDdl(); + log(DDL + ";"); + connection.execute(Statement.of(DDL)); + log("RUN BATCH;"); + connection.runBatch(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // ddl-batch has no commit timestamp + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } + + public static class ConnectionImplTransactionalReadWriteAfterEmptyCommitTest + extends AbstractConnectionImplTest { + @Override + Connection getConnection() { + log("NEW_CONNECTION;"); + Connection connection = + ConnectionImplTest.createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(ConnectionImplTest.URI) + .build()); + logWithNamespace("SET %sREADONLY=FALSE;"); + connection.setReadOnly(false); + log("SET AUTOCOMMIT=FALSE;"); + connection.setAutocommit(false); + log("COMMIT;"); + connection.commit(); + return connection; + } + + @Override + boolean isSelectAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDmlAllowedAfterBeginTransaction() { + return true; + } + + @Override + boolean isDdlAllowedAfterBeginTransaction() { + return false; + } + + @Override + boolean isSetAutocommitAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyAllowed() { + return true; + } + + @Override + boolean isBeginTransactionAllowed() { + return true; + } + + @Override + boolean isSetTransactionModeAllowed(TransactionMode mode) { + return true; + } + + @Override + boolean isSetTransactionTagAllowed() { + return true; + } + + @Override + boolean isGetTransactionModeAllowed() { + return true; + } + + @Override + boolean isSetAutocommitDmlModeAllowed() { + return false; + } + + @Override + boolean isGetAutocommitDmlModeAllowed() { + return true; + } + + @Override + boolean isSetReadOnlyStalenessAllowed(TimestampBound.Mode mode) { + return mode == Mode.STRONG || mode == Mode.EXACT_STALENESS || mode == Mode.READ_TIMESTAMP; + } + + @Override + boolean isGetReadOnlyStalenessAllowed() { + return true; + } + + @Override + boolean isCommitAllowed() { + return true; + } + + @Override + boolean isRollbackAllowed() { + return true; + } + + @Override + boolean expectedIsInTransaction() { + return true; + } + + @Override + boolean expectedIsTransactionStarted() { + return false; + } + + @Override + boolean isGetReadTimestampAllowed() { + // no query has been executed yet + return false; + } + + @Override + boolean isGetCommitTimestampAllowed() { + // empty commit + return false; + } + + @Override + boolean isExecuteAllowed(StatementType type) { + return type == StatementType.CLIENT_SIDE + || type == StatementType.QUERY + || type == StatementType.UPDATE + || type == StatementType.DDL; + } + + @Override + boolean isWriteAllowed() { + return true; + } + + @Override + boolean isStartBatchDmlAllowed() { + return true; + } + + @Override + boolean isStartBatchDdlAllowed() { + return true; + } + + @Override + boolean isRunBatchAllowed() { + return false; + } + + @Override + boolean isAbortBatchAllowed() { + return false; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java new file mode 100644 index 000000000000..f745e9ad63ed --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java @@ -0,0 +1,1530 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionOptions.Builder.EXTERNAL_HOST_PATTERN; +import static com.google.cloud.spanner.connection.ConnectionOptions.Builder.SPANNER_URI_PATTERN; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENDPOINT; +import static com.google.cloud.spanner.connection.ConnectionOptions.determineHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.BaseEncoding; +import com.google.common.io.Files; +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.regex.Matcher; +import org.junit.Test; +import org.junit.function.ThrowingRunnable; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionOptionsTest { + private static final String FILE_TEST_PATH = + Objects.requireNonNull(ConnectionOptionsTest.class.getResource("test-key.json")).getFile(); + private static final String DEFAULT_HOST = null; + private static final String TEST_PROJECT = "test-project-123"; + private static final String TEST_INSTANCE = "test-instance-123"; + private static final String TEST_DATABASE = "test-database-123"; + + @Test + public void testBuildWithURIWithDots() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/some-company.com:test-project-123/instances/test-instance-123/databases/test-database-123"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo(DEFAULT_HOST); + assertThat(options.getProjectId()).isEqualTo("some-company.com:test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(ConnectionOptions.DEFAULT_AUTOCOMMIT); + assertThat(options.isReadOnly()).isEqualTo(ConnectionOptions.DEFAULT_READONLY); + } + + @Test + public void testBuildWithValidURIAndCredentialsFileURL() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo(DEFAULT_HOST); + assertThat(options.getProjectId()).isEqualTo("test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(ConnectionOptions.DEFAULT_AUTOCOMMIT); + assertThat(options.isReadOnly()).isEqualTo(ConnectionOptions.DEFAULT_READONLY); + } + + @Test + public void testBuildWithValidURIAndProperties() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123?autocommit=false;readonly=true"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo(DEFAULT_HOST); + assertThat(options.getProjectId()).isEqualTo("test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(false); + assertThat(options.isReadOnly()).isEqualTo(true); + } + + @Test + public void testBuildWithHostAndValidURI() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner://test-spanner.googleapis.com/projects/test-project-123/instances/test-instance-123/databases/test-database-123"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo("https://test-spanner.googleapis.com"); + assertThat(options.getProjectId()).isEqualTo("test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(ConnectionOptions.DEFAULT_AUTOCOMMIT); + assertThat(options.isReadOnly()).isEqualTo(ConnectionOptions.DEFAULT_READONLY); + } + + @Test + public void testBuildWithLocalhostPortAndValidURI() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner://localhost:8443/projects/test-project-123/instances/test-instance-123/databases/test-database-123"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo("https://localhost:8443"); + assertThat(options.getProjectId()).isEqualTo("test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(ConnectionOptions.DEFAULT_AUTOCOMMIT); + assertThat(options.isReadOnly()).isEqualTo(ConnectionOptions.DEFAULT_READONLY); + } + + @Test + public void testBuildWithAutoConfigEmulator() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123?autoConfigEmulator=true"); + ConnectionOptions options = builder.build(); + assertEquals("http://localhost:9010", options.getHost()); + assertEquals("test-project-123", options.getProjectId()); + assertEquals("test-instance-123", options.getInstanceId()); + assertEquals("test-database-123", options.getDatabaseName()); + assertEquals(NoCredentials.getInstance(), options.getCredentials()); + assertTrue(options.isUsePlainText()); + } + + @Test + public void testDetermineHost() { + final String uriWithoutHost = + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123"; + Matcher matcherWithoutHost = SPANNER_URI_PATTERN.matcher(uriWithoutHost); + assertTrue(matcherWithoutHost.find()); + final String uriWithHost = + "cloudspanner://custom.host.domain:1234/projects/test-project-123/instances/test-instance-123/databases/test-database-123"; + Matcher matcherWithHost = SPANNER_URI_PATTERN.matcher(uriWithHost); + assertTrue(matcherWithHost.find()); + + assertEquals( + DEFAULT_HOST, + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ false, + ImmutableMap.of())); + assertEquals( + DEFAULT_HOST, + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ false, + ImmutableMap.of("FOO", "bar"))); + assertEquals( + "http://localhost:9010", + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ false, + ImmutableMap.of())); + assertEquals( + "http://localhost:9011", + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ false, + ImmutableMap.of("SPANNER_EMULATOR_HOST", "localhost:9011"))); + assertEquals( + "http://localhost:9010", + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ true, + ImmutableMap.of())); + assertEquals( + "http://localhost:9011", + determineHost( + matcherWithoutHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ true, + ImmutableMap.of("SPANNER_EMULATOR_HOST", "localhost:9011"))); + + // A host in the connection string has precedence over all other options. + assertEquals( + "https://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ false, + ImmutableMap.of())); + assertEquals( + "http://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ true, + ImmutableMap.of())); + assertEquals( + "http://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ true, + ImmutableMap.of())); + assertEquals( + "https://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ false, + ImmutableMap.of())); + assertEquals( + "http://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ false, + /* usePlainText= */ true, + ImmutableMap.of("SPANNER_EMULATOR_HOST", "localhost:9011"))); + assertEquals( + "https://custom.host.domain:1234", + determineHost( + matcherWithHost, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ false, + ImmutableMap.of("SPANNER_EMULATOR_HOST", "localhost:9011"))); + + // The 'endpoint' connection URL property can also be used to connect to the emulator. + // Using this property is sometimes easier than adding the URL to the host part of the + // connection string, for example because it can be added to the Properties object that + // is used by JDBC. + assertEquals( + "http://localhost:9010", + determineHost( + matcherWithoutHost, + "localhost:9010", + /* autoConfigEmulator= */ false, + /* usePlainText= */ true, + ImmutableMap.of())); + // A value for the 'endpoint' connection property overrides any value in the host group. + assertEquals( + "https://my.endpoint:1234", + determineHost( + matcherWithHost, + "my.endpoint:1234", + /* autoConfigEmulator= */ false, + /* usePlainText= */ false, + ImmutableMap.of("SPANNER_EMULATOR_HOST", "localhost:9011"))); + assertEquals( + "http://my.endpoint.local:1234", + determineHost( + matcherWithHost, + "my.endpoint.local:1234", + /* autoConfigEmulator= */ false, + /* usePlainText= */ true, + ImmutableMap.of())); + } + + @Test + public void testBuildWithRouteToLeader() { + final String BASE_URI = + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123"; + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri(BASE_URI + "?routeToLeader=false"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertEquals(options.getHost(), DEFAULT_HOST); + assertEquals(options.getProjectId(), TEST_PROJECT); + assertEquals(options.getInstanceId(), TEST_INSTANCE); + assertEquals(options.getDatabaseName(), TEST_DATABASE); + assertFalse(options.isRouteToLeader()); + + // Test for default behavior for routeToLeader property. + builder = ConnectionOptions.newBuilder().setUri(BASE_URI); + builder.setCredentialsUrl(FILE_TEST_PATH); + options = builder.build(); + assertTrue(options.isRouteToLeader()); + } + + @Test + public void testBuildWithEndToEndTracingEnabled() { + final String BASE_URI = + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123"; + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri(BASE_URI + "?enableEndToEndTracing=true"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertEquals(options.getHost(), DEFAULT_HOST); + assertEquals(options.getProjectId(), TEST_PROJECT); + assertEquals(options.getInstanceId(), TEST_INSTANCE); + assertEquals(options.getDatabaseName(), TEST_DATABASE); + assertTrue(options.isEndToEndTracingEnabled()); + + // Test for default behavior for enableEndToEndTracing property. + builder = ConnectionOptions.newBuilder().setUri(BASE_URI); + builder.setCredentialsUrl(FILE_TEST_PATH); + options = builder.build(); + assertFalse(options.isEndToEndTracingEnabled()); + } + + @Test + public void testBuildWithAutoConfigEmulatorAndHost() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner://central-emulator.local:8080/projects/test-project-123/instances/test-instance-123/databases/test-database-123?autoConfigEmulator=true"); + ConnectionOptions options = builder.build(); + assertEquals("http://central-emulator.local:8080", options.getHost()); + assertEquals("test-project-123", options.getProjectId()); + assertEquals("test-instance-123", options.getInstanceId()); + assertEquals("test-database-123", options.getDatabaseName()); + assertEquals(NoCredentials.getInstance(), options.getCredentials()); + assertTrue(options.isUsePlainText()); + } + + @Test + public void testBuildWithAutoConfigEmulatorAndEndpoint() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123?autoConfigEmulator=true;endpoint=central-emulator.local:8080"); + ConnectionOptions options = builder.build(); + assertEquals("http://central-emulator.local:8080", options.getHost()); + assertEquals("test-project-123", options.getProjectId()); + assertEquals("test-instance-123", options.getInstanceId()); + assertEquals("test-database-123", options.getDatabaseName()); + assertEquals(NoCredentials.getInstance(), options.getCredentials()); + assertTrue(options.isUsePlainText()); + } + + @Test + public void testBuildWithDefaultProjectPlaceholder() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "cloudspanner:/projects/default_project_id/instances/test-instance-123/databases/test-database-123"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo(DEFAULT_HOST); + String projectId = SpannerOptions.getDefaultProjectId(); + if (projectId == null) { + projectId = + ((ServiceAccountCredentials) new CredentialsService().createCredentials(FILE_TEST_PATH)) + .getProjectId(); + } + assertThat(options.getProjectId()).isEqualTo(projectId); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(ConnectionOptions.DEFAULT_AUTOCOMMIT); + assertThat(options.isReadOnly()).isEqualTo(ConnectionOptions.DEFAULT_READONLY); + } + + @Test + public void testBuilderSetUri() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + + // set valid uri's + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"); + builder.setUri("cloudspanner:/projects/test-project-123/instances/test-instance"); + builder.setUri("cloudspanner:/projects/test-project-123"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance/databases/test-database"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance"); + builder.setUri("cloudspanner://spanner.googleapis.com/projects/test-project-123"); + + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?autocommit=true"); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance?autocommit=true"); + builder.setUri("cloudspanner:/projects/test-project-123?autocommit=true"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance/databases/test-database?autocommit=true"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance?autocommit=true"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123?autocommit=true"); + + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?autocommit=true;readonly=false"); + builder.setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance?autocommit=true;readonly=false"); + builder.setUri("cloudspanner:/projects/test-project-123?autocommit=true;readonly=false"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance/databases/test-database?autocommit=true;readonly=false"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123/instances/test-instance?autocommit=true;readonly=false"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123?autocommit=true;readonly=false"); + builder.setUri( + "cloudspanner://spanner.googleapis.com/projects/test-project-123?statement_timeout='10s';transaction_timeout='60s'"); + + // set invalid uri's + setInvalidUri( + builder, "/projects/test-project-123/instances/test-instance/databases/test-database"); + setInvalidUri(builder, "cloudspanner:/test-project-123/test-instance/test-database"); + setInvalidUri( + builder, + "cloudspanner:spanner.googleapis.com/projects/test-project-123/instances/test-instance/databases/test-database"); + setInvalidUri( + builder, + "cloudspanner://spanner.googleapis.com/projects/test-project-$$$/instances/test-instance/databases/test-database"); + setInvalidUri( + builder, + "cloudspanner://spanner.googleapis.com/projects/test-project-123/databases/test-database"); + setInvalidUri( + builder, + "cloudspanner:/projects/test_project_123/instances/test-instance/databases/test-database"); + + // Set URI's that are valid, but that contain unknown properties. + setInvalidProperty( + builder, + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?read=false", + "read"); + setInvalidProperty( + builder, + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?read=false;autocommit=true", + "read"); + setInvalidProperty( + builder, + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?read=false;auto=true", + "read, auto"); + } + + private void setInvalidUri(ConnectionOptions.Builder builder, String uri) { + assertThrows(IllegalArgumentException.class, () -> builder.setUri(uri)); + } + + private void setInvalidProperty( + ConnectionOptions.Builder builder, String uri, String expectedInvalidProperties) { + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, () -> builder.setUri(uri)); + assertTrue(exception.getMessage(), exception.getMessage().contains(expectedInvalidProperties)); + } + + @Test + public void testParseUriProperty() { + final String baseUri = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + + assertThat(ConnectionOptions.parseUriProperty(baseUri, "autocommit")).isNull(); + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?autocommit=true", "autocommit")) + .isEqualTo("true"); + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?autocommit=false", "autocommit")) + .isEqualTo("false"); + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?autocommit=true;", "autocommit")) + .isEqualTo("true"); + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?autocommit=false;", "autocommit")) + .isEqualTo("false"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?autocommit=true;readOnly=false", "autocommit")) + .isEqualTo("true"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?autocommit=false;readOnly=false", "autocommit")) + .isEqualTo("false"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?readOnly=false;autocommit=true", "autocommit")) + .isEqualTo("true"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?readOnly=false;autocommit=false", "autocommit")) + .isEqualTo("false"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?readOnly=false;autocommit=true;foo=bar", "autocommit")) + .isEqualTo("true"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?readOnly=false;autocommit=false;foo=bar", "autocommit")) + .isEqualTo("false"); + + // case insensitive + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?AutoCommit=true", "autocommit")) + .isEqualTo("true"); + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?AutoCommit=false", "autocommit")) + .isEqualTo("false"); + + // ; instead of ? before the properties is ok + assertThat(ConnectionOptions.parseUriProperty(baseUri + ";autocommit=true", "autocommit")) + .isEqualTo("true"); + + // forgot the ? or ; before the properties + assertThat(ConnectionOptions.parseUriProperty(baseUri + "autocommit=true", "autocommit")) + .isNull(); + // substring is not ok + assertThat(ConnectionOptions.parseUriProperty(baseUri + "?isautocommit=true", "autocommit")) + .isNull(); + } + + @Test + public void testParseProperties() { + final String baseUri = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + assertThat(ConnectionOptions.parseProperties(baseUri + "?autocommit=true")) + .isEqualTo(Collections.singletonList("autocommit")); + assertThat(ConnectionOptions.parseProperties(baseUri + "?autocommit=true;readonly=false")) + .isEqualTo(Arrays.asList("autocommit", "readonly")); + assertThat(ConnectionOptions.parseProperties(baseUri + "?autocommit=true;READONLY=false")) + .isEqualTo(Arrays.asList("autocommit", "READONLY")); + assertThat(ConnectionOptions.parseProperties(baseUri + ";autocommit=true;readonly=false")) + .isEqualTo(Arrays.asList("autocommit", "readonly")); + assertThat(ConnectionOptions.parseProperties(baseUri + ";autocommit=true;readonly=false;")) + .isEqualTo(Arrays.asList("autocommit", "readonly")); + } + + @Test + public void testParsePropertiesSpecifiedMultipleTimes() { + final String baseUri = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?autocommit=true;autocommit=false", "autocommit")) + .isEqualTo("true"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + "?autocommit=false;autocommit=true", "autocommit")) + .isEqualTo("false"); + assertThat( + ConnectionOptions.parseUriProperty( + baseUri + ";autocommit=false;readonly=false;autocommit=true", "autocommit")) + .isEqualTo("false"); + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database" + + ";autocommit=false;readonly=false;autocommit=true"); + } + + @Test + public void testParseOAuthToken() { + assertThat( + ConnectionOptions.parseUriProperty( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database" + + "?oauthtoken=RsT5OjbzRn430zqMLgV3Ia", + "OAuthToken")) + .isEqualTo("RsT5OjbzRn430zqMLgV3Ia"); + // Try to use both credentials and an OAuth token. That should fail. + ConnectionOptions.Builder builder = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database" + + "?OAuthToken=RsT5OjbzRn430zqMLgV3Ia;credentials=/path/to/credentials.json"); + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, builder::build); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider and" + + " OAuth token")); + + // Now try to use only an OAuth token. + builder = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database" + + "?OAuthToken=RsT5OjbzRn430zqMLgV3Ia"); + ConnectionOptions options = builder.build(); + assertThat(options.getCredentials()).isInstanceOf(GoogleCredentials.class); + GoogleCredentials credentials = (GoogleCredentials) options.getCredentials(); + assertThat(credentials.getAccessToken().getTokenValue()).isEqualTo("RsT5OjbzRn430zqMLgV3Ia"); + } + + @Test + public void testSetOAuthToken() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setOAuthToken("RsT5OjbzRn430zqMLgV3Ia") + .build(); + assertThat(options.getCredentials()).isInstanceOf(GoogleCredentials.class); + GoogleCredentials credentials = (GoogleCredentials) options.getCredentials(); + assertThat(credentials.getAccessToken()).isNotNull(); + assertThat(credentials.getAccessToken().getTokenValue()).isEqualTo("RsT5OjbzRn430zqMLgV3Ia"); + } + + @Test + public void testSetOAuthTokenAndCredentials() { + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setOAuthToken("RsT5OjbzRn430zqMLgV3Ia") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider and" + + " OAuth token")); + } + + @Test + public void testLenient() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?lenient=true;foo=bar") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertThat(options.getWarnings()).isNotNull(); + assertThat(options.getWarnings()).contains("foo"); + assertThat(options.getWarnings()).doesNotContain("lenient"); + + options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?bar=foo;lenient=true") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertThat(options.getWarnings()).isNotNull(); + assertThat(options.getWarnings()).contains("bar"); + assertThat(options.getWarnings()).doesNotContain("lenient"); + + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?bar=foo") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + assertTrue(exception.getMessage(), exception.getMessage().contains("bar")); + } + + @Test + public void testMinSessions() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?minSessions=400") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertThat(options.getMinSessions()).isEqualTo(400); + assertThat(options.getSessionPoolOptions().getMinSessions()).isEqualTo(400); + } + + @Test + public void testMaxSessions() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxSessions=4000") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertThat(options.getMaxSessions()).isEqualTo(4000); + assertThat(options.getSessionPoolOptions().getMaxSessions()).isEqualTo(4000); + } + + @Test + public void testTrackSessionLeaks() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?trackSessionLeaks=false") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertFalse(options.getSessionPoolOptions().isTrackStackTraceOfSessionCheckout()); + } + + @Test + public void testTrackSessionLeaksDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertTrue(options.getSessionPoolOptions().isTrackStackTraceOfSessionCheckout()); + } + + @Test + public void testTrackConnectionLeaks() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?trackConnectionLeaks=false") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertFalse(options.isTrackConnectionLeaks()); + } + + @Test + public void testTrackConnectionLeaksDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertTrue(options.isTrackConnectionLeaks()); + } + + @Test + public void testDataBoostEnabled() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?dataBoostEnabled=true") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertTrue(options.isDataBoostEnabled()); + } + + @Test + public void testDataBoostEnabledDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertFalse(options.isDataBoostEnabled()); + } + + @Test + public void testAutoPartitionMode() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?autoPartitionMode=true") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertTrue(options.isAutoPartitionMode()); + } + + @Test + public void testAutoPartitionModeDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertFalse(options.isAutoPartitionMode()); + } + + @Test + public void testMaxPartitions() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitions=4") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertEquals(4, options.getMaxPartitions()); + } + + @Test + public void testMaxPartitionsDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertEquals(0, options.getMaxPartitions()); + } + + @Test + public void testMaxPartitionsInvalidValue() { + assertThrows( + SpannerException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitions=-1") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + } + + @Test + public void testMaxPartitionsNonNumeric() { + assertThrows( + SpannerException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitions=four") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + } + + @Test + public void testMaxPartitionedParallelism() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitionedParallelism=4") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertEquals(4, options.getMaxPartitionedParallelism()); + } + + @Test + public void testMaxPartitionedParallelismDefault() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentialsUrl(FILE_TEST_PATH) + .build(); + assertEquals(1, options.getMaxPartitionedParallelism()); + } + + @Test + public void testMaxPartitionedParallelismInvalidValue() { + assertThrows( + SpannerException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitionedParallelism=-1") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + } + + @Test + public void testMaxPartitionedParallelismNonNumeric() { + assertThrows( + SpannerException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxPartitionedParallelism=four") + .setCredentialsUrl(FILE_TEST_PATH) + .build()); + } + + @Test + public void testLocalConnectionError() { + String uri = + "cloudspanner://localhost:1/projects/test-project/instances/test-instance/databases/test-database?usePlainText=true"; + ConnectionOptions options = ConnectionOptions.newBuilder().setUri(uri).build(); + SpannerException exception = assertThrows(SpannerException.class, options::getConnection); + assertEquals(ErrorCode.UNAVAILABLE, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + String.format( + "The connection string '%s' contains host 'localhost:1', but no running", + uri))); + } + + @Test + public void testInvalidCredentials() { + String uri = + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?credentials=/some/non/existing/path"; + SpannerException exception = + assertThrows( + SpannerException.class, () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains("Invalid credentials path specified: /some/non/existing/path")); + } + + @Test + public void testNonBase64EncodedCredentials() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + () -> { + String uri = + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?encodedCredentials=not-a-base64-string/"; + SpannerException e = + assertThrows( + SpannerException.class, () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(e.getMessage()) + .contains("The encoded credentials could not be decoded as a base64 string."); + }); + } + + @Test + public void testInvalidEncodedCredentials() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + () -> { + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?encodedCredentials=%s", + BaseEncoding.base64Url() + .encode("not-a-credentials-JSON-string".getBytes(StandardCharsets.UTF_8))); + SpannerException e = + assertThrows( + SpannerException.class, () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertThat(e.getMessage()) + .contains( + "The encoded credentials do not contain a valid Google Cloud credentials JSON" + + " string."); + }); + } + + @Test + public void testValidEncodedCredentials() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + () -> { + String encoded = + BaseEncoding.base64Url().encode(Files.asByteSource(new File(FILE_TEST_PATH)).read()); + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?encodedCredentials=%s", + encoded); + + ConnectionOptions options = ConnectionOptions.newBuilder().setUri(uri).build(); + assertEquals( + new CredentialsService().createCredentials(FILE_TEST_PATH), options.getCredentials()); + }); + } + + @Test + public void testValidEncodedCredentials_WithoutEnablingProperty() throws Throwable { + String encoded = + BaseEncoding.base64Url().encode(Files.asByteSource(new File(FILE_TEST_PATH)).read()); + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?encodedCredentials=%s", + encoded); + + SpannerException exception = + assertThrows( + SpannerException.class, () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testSetCredentialsAndEncodedCredentials() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + () -> { + String encoded = + BaseEncoding.base64Url().encode(Files.asByteSource(new File(FILE_TEST_PATH)).read()); + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?credentials=%s;encodedCredentials=%s", + FILE_TEST_PATH, encoded); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertTrue( + e.getMessage(), + e.getMessage() + .contains( + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider" + + " and OAuth token")); + }); + } + + public static class TestCredentialsProvider implements CredentialsProvider { + @Override + public Credentials getCredentials() { + return NoCredentials.getInstance(); + } + } + + static void runWithSystemPropertyEnabled(String systemPropertyName, ThrowingRunnable runnable) + throws Throwable { + String originalValue = System.getProperty(systemPropertyName); + System.setProperty(systemPropertyName, "true"); + try { + runnable.run(); + } finally { + if (originalValue == null) { + System.clearProperty(systemPropertyName); + } else { + System.setProperty(systemPropertyName, originalValue); + } + } + } + + @Test + public void testValidCredentialsProvider() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, + () -> { + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?credentialsProvider=%s", + TestCredentialsProvider.class.getName()); + + ConnectionOptions options = ConnectionOptions.newBuilder().setUri(uri).build(); + assertEquals(NoCredentials.getInstance(), options.getCredentials()); + }); + } + + @Test + public void testValidCredentialsProvider_WithoutEnablingSystemProperty() { + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?credentialsProvider=%s", + TestCredentialsProvider.class.getName()); + SpannerException exception = + assertThrows( + SpannerException.class, () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: credentialsProvider can only be used if the system property" + + " ENABLE_CREDENTIALS_PROVIDER has been set to true. Start the application with the" + + " JVM command line option -DENABLE_CREDENTIALS_PROVIDER=true", + exception.getMessage()); + } + + @Test + public void testSetCredentialsAndCredentialsProvider() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, + () -> { + String uri = + String.format( + "cloudspanner:/projects/test-project/instances/test-instance/databases/test-database?credentials=%s;credentialsProvider=%s", + FILE_TEST_PATH, NoCredentialsProvider.class.getName()); + + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> ConnectionOptions.newBuilder().setUri(uri).build()); + assertTrue( + e.getMessage(), + e.getMessage() + .contains( + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider" + + " and OAuth token")); + }); + } + + @Test + public void testExternalChannelProvider() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY, + () -> { + String baseUri = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + baseUri + + "?channelProvider=com.google.cloud.spanner.connection.TestChannelProvider") + .setCredentials(NoCredentials.getInstance()) + .build(); + + TransportChannelProvider provider = options.getChannelProvider(); + assertTrue(provider instanceof InstantiatingGrpcChannelProvider); + }); + } + + @Test + public void testExternalChannelProvider_WithoutEnablingProperty() throws Throwable { + String baseUri = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + ConnectionOptions.newBuilder() + .setUri( + baseUri + + "?channelProvider=com.google.cloud.spanner.connection.TestChannelProvider") + .setCredentials(NoCredentials.getInstance()) + .build()); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testUseVirtualThreads() { + assertTrue( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?useVirtualThreads=true") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualThreads()); + assertFalse( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?useVirtualThreads=false") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualThreads()); + assertEquals( + ConnectionOptions.DEFAULT_USE_VIRTUAL_THREADS, + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualThreads()); + } + + @Test + public void testUseVirtualGrpcTransportThreads() { + assertTrue( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?useVirtualGrpcTransportThreads=true") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualGrpcTransportThreads()); + assertFalse( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?useVirtualGrpcTransportThreads=false") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualGrpcTransportThreads()); + assertEquals( + ConnectionOptions.DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS, + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .isUseVirtualThreads()); + } + + @Test + public void testMaxCommitDelay() { + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .getMaxCommitDelay()); + assertEquals( + Duration.ofMillis(10L), + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?maxCommitDelay=10") + .setCredentials(NoCredentials.getInstance()) + .build() + .getMaxCommitDelay()); + } + + @Test + public void testEnableApiTracing() { + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + assertTrue( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + assertFalse( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + } + + @Test + public void testExternalHostPatterns() { + Matcher matcherWithoutInstance = + EXTERNAL_HOST_PATTERN.matcher("cloudspanner://localhost:15000/databases/test-db"); + assertTrue(matcherWithoutInstance.matches()); + assertNull(matcherWithoutInstance.group("INSTANCEGROUP")); + assertEquals("test-db", matcherWithoutInstance.group("DATABASEGROUP")); + Matcher matcherWithProperty = + EXTERNAL_HOST_PATTERN.matcher( + "cloudspanner://localhost:15000/instances/default/databases/singers-db?usePlainText=true"); + assertTrue(matcherWithProperty.matches()); + assertEquals("default", matcherWithProperty.group("INSTANCEGROUP")); + assertEquals("singers-db", matcherWithProperty.group("DATABASEGROUP")); + Matcher matcherWithoutPort = + EXTERNAL_HOST_PATTERN.matcher( + "cloudspanner://localhost/instances/default/databases/test-db"); + assertTrue(matcherWithoutPort.matches()); + assertEquals("default", matcherWithoutPort.group("INSTANCEGROUP")); + assertEquals("test-db", matcherWithoutPort.group("DATABASEGROUP")); + assertEquals( + "http://localhost:15000", + determineHost( + matcherWithoutPort, + DEFAULT_ENDPOINT, + /* autoConfigEmulator= */ true, + /* usePlainText= */ true, + ImmutableMap.of())); + Matcher matcherWithProject = + EXTERNAL_HOST_PATTERN.matcher( + "cloudspanner://localhost:15000/projects/default/instances/default/databases/singers-db"); + assertFalse(matcherWithProject.matches()); + Matcher matcherWithoutHost = + EXTERNAL_HOST_PATTERN.matcher("cloudspanner:/instances/default/databases/singers-db"); + assertFalse(matcherWithoutHost.matches()); + Matcher matcherWithPrefixSpanner = + EXTERNAL_HOST_PATTERN.matcher("spanner://localhost:15000/databases/test-db"); + assertTrue(matcherWithPrefixSpanner.matches()); + assertNull(matcherWithPrefixSpanner.group("INSTANCEGROUP")); + assertEquals("test-db", matcherWithPrefixSpanner.group("DATABASEGROUP")); + } + + @Test + public void testBuildWithValidURIWithPrefixSpanner() { + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder(); + builder.setUri( + "spanner:/projects/test-project-123/instances/test-instance-123/databases/test-database-123?autocommit=false;readonly=true"); + builder.setCredentialsUrl(FILE_TEST_PATH); + ConnectionOptions options = builder.build(); + assertThat(options.getHost()).isEqualTo(DEFAULT_HOST); + assertThat(options.getProjectId()).isEqualTo("test-project-123"); + assertThat(options.getInstanceId()).isEqualTo("test-instance-123"); + assertThat(options.getDatabaseName()).isEqualTo("test-database-123"); + assertThat(options.getCredentials()) + .isEqualTo(new CredentialsService().createCredentials(FILE_TEST_PATH)); + assertThat(options.isAutocommit()).isEqualTo(false); + assertThat(options.isReadOnly()).isEqualTo(true); + } + + @Test + public void testExperimentalHost() { + ConnectionOptions.Builder builderWithoutExperimentalHostParam = ConnectionOptions.newBuilder(); + builderWithoutExperimentalHostParam.setUri( + "spanner://localhost:15000/instances/default/databases/singers-db;usePlainText=true"); + ConnectionOptions optionsWithoutExperimentalHostParam = + builderWithoutExperimentalHostParam.build(); + assertFalse(optionsWithoutExperimentalHostParam.isExperimentalHost()); + assertEquals(0, optionsWithoutExperimentalHostParam.getSessionPoolOptions().getMinSessions()); + assertTrue( + optionsWithoutExperimentalHostParam.getSessionPoolOptions().getUseMultiplexedSession()); + assertTrue( + optionsWithoutExperimentalHostParam + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()); + assertTrue( + optionsWithoutExperimentalHostParam + .getSessionPoolOptions() + .getUseMultiplexedSessionPartitionedOps()); + + ConnectionOptions.Builder builderWithExperimentalHostParam = ConnectionOptions.newBuilder(); + builderWithExperimentalHostParam.setUri( + "spanner://localhost:15000/projects/default/instances/default/databases/singers-db;usePlainText=true;isExperimentalHost=true"); + ConnectionOptions optionsWithExperimentalHostParam = builderWithExperimentalHostParam.build(); + assertTrue(optionsWithExperimentalHostParam.isExperimentalHost()); + assertEquals(0, optionsWithExperimentalHostParam.getSessionPoolOptions().getMinSessions()); + assertTrue(optionsWithExperimentalHostParam.getSessionPoolOptions().getUseMultiplexedSession()); + assertTrue( + optionsWithExperimentalHostParam.getSessionPoolOptions().getUseMultiplexedSessionForRW()); + assertTrue( + optionsWithExperimentalHostParam + .getSessionPoolOptions() + .getUseMultiplexedSessionPartitionedOps()); + } + + @Test + public void testEnableDirectAccess() { + ConnectionOptions.Builder builderWithoutDirectPathParam = ConnectionOptions.newBuilder(); + builderWithoutDirectPathParam.setUri( + "spanner://localhost:15000/instances/default/databases/singers-db;usePlainText=true"); + assertNull(builderWithoutDirectPathParam.build().isEnableDirectAccess()); + + ConnectionOptions.Builder builderWithDirectPathParamFalse = ConnectionOptions.newBuilder(); + builderWithDirectPathParamFalse.setUri( + "spanner://localhost:15000/instances/default/databases/singers-db;usePlainText=true;enableDirectAccess=false"); + assertFalse(builderWithDirectPathParamFalse.build().isEnableDirectAccess()); + + ConnectionOptions.Builder builderWithDirectPathParam = ConnectionOptions.newBuilder(); + builderWithDirectPathParam.setUri( + "spanner://localhost:15000/projects/default/instances/default/databases/singers-db;usePlainText=true;enableDirectAccess=true"); + assertTrue(builderWithDirectPathParam.build().isEnableDirectAccess()); + } + + @Test + public void testUniverseDomain() { + ConnectionImpl connection = mock(ConnectionImpl.class); + + // No universeDomain + AtomicBoolean executedConfigurator = new AtomicBoolean(false); + ConnectionOptions optionsWithNoUniverseDomainParam = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/default/instances/default/databases/singers-db?usePlainText=true") + .setConfigurator( + optionsBuilder -> { + executedConfigurator.set(true); + SpannerOptions spannerOptions = optionsBuilder.build(); + assertEquals("googleapis.com", spannerOptions.getUniverseDomain()); + assertEquals("https://spanner.googleapis.com", spannerOptions.getHost()); + }) + .build(); + Spanner spanner = SpannerPool.INSTANCE.getSpanner(optionsWithNoUniverseDomainParam, connection); + spanner.close(); + SpannerPool.INSTANCE.removeConnection(optionsWithNoUniverseDomainParam, connection); + assertTrue(executedConfigurator.get()); + + // only configuring universal domain + executedConfigurator.set(false); + ConnectionOptions optionsWithUniverseDomainParam = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/default/instances/default/databases/singers-db;universeDomain=abc.goog;usePlainText=true") + .setConfigurator( + optionsBuilder -> { + executedConfigurator.set(true); + SpannerOptions spannerOptions = optionsBuilder.build(); + assertEquals("abc.goog", spannerOptions.getUniverseDomain()); + assertEquals("https://spanner.abc.goog", spannerOptions.getHost()); + }) + .build(); + spanner = SpannerPool.INSTANCE.getSpanner(optionsWithUniverseDomainParam, connection); + spanner.close(); + SpannerPool.INSTANCE.removeConnection(optionsWithUniverseDomainParam, connection); + assertTrue(executedConfigurator.get()); + + // configuring both universal domain and host + executedConfigurator.set(false); + ConnectionOptions optionsWithHostAndUniverseDomainParam = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner://spanner.abc.goog/projects/default/instances/default/databases/singers-db;universeDomain=abc.goog;usePlainText=true") + .setConfigurator( + optionsBuilder -> { + executedConfigurator.set(true); + SpannerOptions spannerOptions = optionsBuilder.build(); + assertEquals("abc.goog", spannerOptions.getUniverseDomain()); + assertEquals("http://spanner.abc.goog", spannerOptions.getHost()); + }) + .build(); + spanner = SpannerPool.INSTANCE.getSpanner(optionsWithHostAndUniverseDomainParam, connection); + spanner.close(); + SpannerPool.INSTANCE.removeConnection(optionsWithHostAndUniverseDomainParam, connection); + assertTrue(executedConfigurator.get()); + + // configuring both universal domain and host(localhost) + executedConfigurator.set(false); + ConnectionOptions optionsWithLocalHostAndUniverseDomainParam = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner://localhost:15000/projects/default/instances/default/databases/singers-db;usePlainText=true;universeDomain=abc.goog") + .setConfigurator( + optionsBuilder -> { + executedConfigurator.set(true); + SpannerOptions spannerOptions = optionsBuilder.build(); + assertEquals("abc.goog", spannerOptions.getUniverseDomain()); + assertEquals("http://localhost:15000", spannerOptions.getHost()); + }) + .build(); + spanner = + SpannerPool.INSTANCE.getSpanner(optionsWithLocalHostAndUniverseDomainParam, connection); + spanner.close(); + SpannerPool.INSTANCE.removeConnection(optionsWithLocalHostAndUniverseDomainParam, connection); + assertTrue(executedConfigurator.get()); + + connection.close(); + } + + @Test + public void testEnableDynamicChannelPool() { + // Default value + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableDynamicChannelPool()); + // Enabled + assertTrue( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableDynamicChannelPool=true") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableDynamicChannelPool()); + } + + @Test + public void testDisableDynamicChannelPool() { + assertFalse( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableDynamicChannelPool=false") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableDynamicChannelPool()); + } + + @Test + public void testDcpMinChannels() { + // Default value + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpMinChannels()); + // Custom value + assertEquals( + Integer.valueOf(3), + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?dcpMinChannels=3") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpMinChannels()); + } + + @Test + public void testDcpMaxChannels() { + // Default value + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpMaxChannels()); + // Custom value + assertEquals( + Integer.valueOf(15), + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?dcpMaxChannels=15") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpMaxChannels()); + } + + @Test + public void testDcpInitialChannels() { + // Default value + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpInitialChannels()); + // Custom value + assertEquals( + Integer.valueOf(5), + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build() + .getDcpInitialChannels()); + } + + @Test + public void testDcpWithAllOptions() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database" + + "?enableDynamicChannelPool=true;dcpMinChannels=3;dcpMaxChannels=15;dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build(); + assertTrue(options.isEnableDynamicChannelPool()); + assertEquals(Integer.valueOf(3), options.getDcpMinChannels()); + assertEquals(Integer.valueOf(15), options.getDcpMaxChannels()); + assertEquals(Integer.valueOf(5), options.getDcpInitialChannels()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java new file mode 100644 index 000000000000..f86809ce13f6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java @@ -0,0 +1,301 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperty.create; +import static com.google.cloud.spanner.connection.ConnectionProperty.createKey; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.NonNegativeIntegerConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.StringValueConverter; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import java.util.Objects; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionPropertyTest { + + @Test + public void testCreateKey() { + assertEquals("my_property", createKey(/* extension= */ null, "my_property")); + assertEquals("my_property", createKey(/* extension= */ null, "My_Property")); + assertEquals("my_property", createKey(/* extension= */ null, "MY_PROPERTY")); + assertEquals("my_extension.my_property", createKey("my_extension", "my_property")); + assertEquals("my_extension.my_property", createKey("My_Extension", "My_Property")); + assertEquals("my_extension.my_property", createKey("MY_EXTENSION", "MY_PROPERTY")); + + //noinspection DataFlowIssue + assertThrows(SpannerException.class, () -> createKey("my_extension", /* name= */ null)); + assertThrows(SpannerException.class, () -> createKey("my_extension", "")); + } + + @Test + public void testCreate() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + "default_value", + StringValueConverter.INSTANCE, + Context.USER); + assertEquals("my_property", property.getName()); + assertEquals("Description of my_property", property.getDescription()); + assertEquals("default_value", property.getDefaultValue()); + assertEquals("my_value", Objects.requireNonNull(property.convert("my_value")).getValue()); + assertEquals(property.getContext(), Context.USER); + assertEquals("my_property", property.getKey()); + + ConnectionProperty startupProperty = + create( + "STARTUP_PROPERTY", + "Description of STARTUP_PROPERTY", + 1, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + // The name is folded to lower-case. + assertEquals("startup_property", startupProperty.getName()); + assertEquals("Description of STARTUP_PROPERTY", startupProperty.getDescription()); + assertEquals(Integer.valueOf(1), startupProperty.getDefaultValue()); + assertEquals( + Integer.valueOf(2), Objects.requireNonNull(startupProperty.convert("2")).getValue()); + assertEquals(startupProperty.getContext(), Context.STARTUP); + assertEquals("startup_property", startupProperty.getKey()); + } + + @Test + public void testEquals() { + ConnectionProperty property1 = + new ConnectionProperty<>( + /* extension= */ null, + "my_property", + "Description of property1", + "default_value_1", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property2 = + new ConnectionProperty<>( + /* extension= */ null, + "my_property", + "Description of property2", + "default_value_2", + null, + StringValueConverter.INSTANCE, + Context.USER); + ConnectionProperty property3 = + new ConnectionProperty<>( + "my_extension", + "my_property", + "Description of property3", + "default_value_3", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property4 = + new ConnectionProperty<>( + "my_extension", + "my_property", + "Description of property4", + "default_value_4", + null, + StringValueConverter.INSTANCE, + Context.USER); + ConnectionProperty property5 = + new ConnectionProperty<>( + /* extension= */ null, + "my_other_property", + "Description of property5", + "default_value_5", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property6 = + new ConnectionProperty<>( + "my_extension", + "my_other_property", + "Description of property6", + "default_value_6", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property7 = + new ConnectionProperty<>( + /* extension= */ null, + "MY_PROPERTY", + "Description of property7", + "default_value_7", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property8 = + new ConnectionProperty<>( + "MY_EXTENSION", + "my_property", + "Description of property8", + "default_value_8", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property9 = + new ConnectionProperty<>( + "my_extension", + "MY_PROPERTY", + "Description of property9", + "default_value_9", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + + // Equality is based only on the key. + // The key is the lower case combination of extension and name. + // If extension is null, then only the name is the key. + + // property1 = my_property + assertEquals(property1, property2); + assertNotEquals(property1, property3); + assertNotEquals(property1, property4); + assertNotEquals(property1, property5); + assertNotEquals(property1, property6); + assertEquals(property1, property7); + assertNotEquals(property1, property8); + assertNotEquals(property1, property9); + + // property2 = my_property + assertEquals(property2, property1); + assertNotEquals(property2, property3); + assertNotEquals(property2, property4); + assertNotEquals(property2, property5); + assertNotEquals(property2, property6); + assertEquals(property2, property7); + assertNotEquals(property2, property8); + assertNotEquals(property2, property9); + + // property3 = my_extension.my_property + assertNotEquals(property3, property1); + assertNotEquals(property3, property2); + assertEquals(property3, property4); + assertNotEquals(property3, property5); + assertNotEquals(property3, property6); + assertNotEquals(property3, property7); + assertEquals(property3, property8); + assertEquals(property3, property9); + + // property4 = my_extension.my_property + assertNotEquals(property4, property1); + assertNotEquals(property4, property2); + assertEquals(property4, property3); + assertNotEquals(property4, property5); + assertNotEquals(property4, property6); + assertNotEquals(property4, property7); + assertEquals(property4, property8); + assertEquals(property4, property9); + + // property5 = my_other_property + assertNotEquals(property5, property1); + assertNotEquals(property5, property2); + assertNotEquals(property5, property3); + assertNotEquals(property5, property4); + assertNotEquals(property5, property6); + assertNotEquals(property5, property7); + assertNotEquals(property5, property8); + assertNotEquals(property5, property9); + + // property6 = my_extension.my_other_property + assertNotEquals(property6, property1); + assertNotEquals(property6, property2); + assertNotEquals(property6, property3); + assertNotEquals(property6, property4); + assertNotEquals(property6, property5); + assertNotEquals(property6, property7); + assertNotEquals(property6, property8); + assertNotEquals(property6, property9); + + // property7 = MY_PROPERTY (same as property1 and property2) + assertEquals(property7, property1); + assertEquals(property7, property2); + assertNotEquals(property7, property3); + assertNotEquals(property7, property4); + assertNotEquals(property7, property5); + assertNotEquals(property7, property6); + assertNotEquals(property7, property8); + assertNotEquals(property7, property9); + + // property8 = MY_EXTENSION.my_property (same as property4) + assertNotEquals(property8, property1); + assertNotEquals(property8, property2); + assertEquals(property8, property3); + assertEquals(property8, property4); + assertNotEquals(property8, property5); + assertNotEquals(property8, property6); + assertNotEquals(property8, property7); + assertEquals(property8, property9); + + // property9 = my_extension.MY_PROPERTY (same as property4 and property8) + assertNotEquals(property9, property1); + assertNotEquals(property9, property2); + assertEquals(property9, property3); + assertEquals(property9, property4); + assertNotEquals(property9, property5); + assertNotEquals(property9, property6); + assertNotEquals(property9, property7); + assertEquals(property9, property8); + } + + @Test + public void testConvert() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + 1, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + assertEquals(Integer.valueOf(100), Objects.requireNonNull(property.convert("100")).getValue()); + assertThrows(SpannerException.class, () -> property.convert("foo")); + assertThrows(SpannerException.class, () -> property.convert("-100")); + } + + @Test + public void testCreateInitialValue() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + "default_value", + StringValueConverter.INSTANCE, + Context.USER); + + ConnectionPropertyValue initialValue = property.createInitialValue(null); + assertEquals(property.getDefaultValue(), initialValue.getValue()); + assertEquals(property.getDefaultValue(), initialValue.getResetValue()); + assertSame(initialValue.getProperty(), property); + + ConnectionPropertyValue startupValue = + new ConnectionPropertyValue<>(property, "other_value", "other_value"); + ConnectionPropertyValue initialValueWithStartupValue = + property.createInitialValue(startupValue); + assertEquals("other_value", initialValueWithStartupValue.getValue()); + assertEquals("other_value", initialValueWithStartupValue.getResetValue()); + assertSame(initialValueWithStartupValue.getProperty(), property); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java new file mode 100644 index 000000000000..39cc47552a12 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionPropertyValueTest { + + @Test + public void testSetValue() { + // This value can be set at any time. + ConnectionPropertyValue value = READONLY.createInitialValue(null); + assertEquals(READONLY.getDefaultValue(), value.getValue()); + + value.setValue(Boolean.FALSE, Context.STARTUP); + assertEquals(Boolean.FALSE, value.getValue()); + + value.setValue(Boolean.TRUE, Context.USER); + assertEquals(Boolean.TRUE, value.getValue()); + + value.setValue(Boolean.FALSE, Context.USER); + assertEquals(Boolean.FALSE, value.getValue()); + + // This value may only be set outside transactions. + ConnectionPropertyValue outsideTransactionOnlyValue = + AUTOCOMMIT_DML_MODE.createInitialValue(null); + assertEquals(AUTOCOMMIT_DML_MODE.getDefaultValue(), outsideTransactionOnlyValue.getValue()); + + outsideTransactionOnlyValue.setValue(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, Context.STARTUP); + assertEquals(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, outsideTransactionOnlyValue.getValue()); + + outsideTransactionOnlyValue.setValue(AutocommitDmlMode.TRANSACTIONAL, Context.USER); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, outsideTransactionOnlyValue.getValue()); + + // This value may only be set at startup. + ConnectionPropertyValue startupOnlyValue = + CONNECTION_STATE_TYPE.createInitialValue(null); + assertEquals(CONNECTION_STATE_TYPE.getDefaultValue(), startupOnlyValue.getValue()); + + startupOnlyValue.setValue(Type.TRANSACTIONAL, Context.STARTUP); + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + + // This property may not be set after startup.. + assertThrows( + SpannerException.class, + () -> startupOnlyValue.setValue(Type.NON_TRANSACTIONAL, Context.USER)); + // The value should not have changed. + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + + // This property may not be set in a transaction. + assertThrows( + SpannerException.class, + () -> startupOnlyValue.setValue(Type.NON_TRANSACTIONAL, Context.USER)); + // The value should not have changed. + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + } + + @Test + public void testCopy() { + ConnectionPropertyValue value = + new ConnectionPropertyValue<>( + /* property= */ AUTOCOMMIT_DML_MODE, + /* resetValue= */ AutocommitDmlMode.PARTITIONED_NON_ATOMIC, + /* value= */ AutocommitDmlMode.TRANSACTIONAL); + ConnectionPropertyValue copy = value.copy(); + + assertEquals(value, copy); + assertNotSame(value, copy); + assertEquals(value.getProperty(), copy.getProperty()); + assertEquals(value.getValue(), copy.getValue()); + assertEquals(value.getResetValue(), copy.getResetValue()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java new file mode 100644 index 000000000000..2b48c64d2e03 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java @@ -0,0 +1,303 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStateMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0})") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @BeforeClass + public static void enableTransactionalConnectionStateForPostgreSQL() { + System.setProperty( + ConnectionOptions.ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY, "true"); + } + + @AfterClass + public static void disableTransactionalConnectionStateForPostgreSQL() { + System.clearProperty( + ConnectionOptions.ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY); + } + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + // Reset the dialect result. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + protected String getBaseUrl() { + return String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true", + getPort()); + } + + ITConnection createConnection(ConnectionState.Type type) { + return createConnection(";" + CONNECTION_STATE_TYPE.getKey() + "=" + type.name()); + } + + String getPrefix() { + return dialect == Dialect.POSTGRESQL ? "SPANNER." : ""; + } + + @Test + public void testConnectionStateType() { + try (Connection connection = createConnection()) { + // The default ConnectionState.Type should depend on the dialect. + assertEquals( + dialect == Dialect.POSTGRESQL ? Type.TRANSACTIONAL : Type.NON_TRANSACTIONAL, + ((ConnectionImpl) connection).getConnectionStateType()); + } + // It should be possible to override the default ConnectionState.Type, irrespective of the + // database dialect. + try (Connection connection = createConnection(Type.TRANSACTIONAL)) { + assertEquals(Type.TRANSACTIONAL, ((ConnectionImpl) connection).getConnectionStateType()); + } + try (Connection connection = createConnection(Type.NON_TRANSACTIONAL)) { + assertEquals(Type.NON_TRANSACTIONAL, ((ConnectionImpl) connection).getConnectionStateType()); + } + } + + @Test + public void testAutocommitPersistsConnectionState() { + try (Connection connection = createConnection(";autocommit=true")) { + assertTrue(connection.isAutocommit()); + + assertEquals(AutocommitDmlMode.TRANSACTIONAL, connection.getAutocommitDmlMode()); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + assertEquals(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, connection.getAutocommitDmlMode()); + } + } + + @Test + public void testNonTransactionalState_commitsAutomatically() { + try (Connection connection = + createConnection(";connection_state_type=non_transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.NON_TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Rolling back should not have any impact on the connection state, as the connection state is + // non-transactional. + connection.rollback(); + assertTrue(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertTrue(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(false); + assertFalse(connection.isReturnCommitStats()); + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testTransactionalState_rollBacksConnectionState() { + try (Connection connection = + createConnection(";connection_state_type=transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Rolling back will undo the connection state change. + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertFalse(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testTransactionalState_commitsConnectionState() { + try (Connection connection = + createConnection(";connection_state_type=transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Committing will persist the connection state change. + connection.commit(); + assertTrue(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertTrue(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(false); + assertFalse(connection.isReturnCommitStats()); + connection.commit(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testLocalChangeIsLostAfterTransaction() { + // SET LOCAL ... has the same effect regardless of connection state type. + for (ConnectionState.Type type : Type.values()) { + try (ConnectionImpl connection = (ConnectionImpl) createConnection()) { + assertTrue(connection.isAutocommit()); + + for (boolean commit : new boolean[] {true, false}) { + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + connection.beginTransaction(); + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true, /* local= */ true); + assertTrue(connection.isReturnCommitStats()); + // Both rolling back and committing will undo the connection state change. + if (commit) { + connection.commit(); + } else { + connection.rollback(); + } + // The local change should now be undone. + assertFalse(connection.isReturnCommitStats()); + } + } + } + } + + @Test + public void testSetLocalWithSqlStatement() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + + assertTrue(connection.isRetryAbortsInternally()); + connection.execute( + Statement.of(String.format("set local %sretry_aborts_internally=false", getPrefix()))); + assertFalse(connection.isRetryAbortsInternally()); + connection.commit(); + assertTrue(connection.isRetryAbortsInternally()); + } + } + + @Test + public void testSetSessionWithSqlStatement() { + assumeTrue("Only PostgreSQL supports the 'session' keyword", dialect == Dialect.POSTGRESQL); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + + assertTrue(connection.isRetryAbortsInternally()); + connection.execute( + Statement.of(String.format("set session %sretry_aborts_internally=false", getPrefix()))); + assertFalse(connection.isRetryAbortsInternally()); + connection.commit(); + assertFalse(connection.isRetryAbortsInternally()); + } + } + + @Test + public void testSetLocalInvalidValue() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + + assertTrue(connection.isRetryAbortsInternally()); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + connection.execute( + Statement.of( + String.format("set local %sretry_aborts_internally=foo", getPrefix())))); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .endsWith( + String.format("Unknown value for %sRETRY_ABORTS_INTERNALLY: foo", getPrefix()))); + assertTrue(connection.isRetryAbortsInternally()); + } + } + + @Test + public void testGetConnectionProperty() { + try (Connection connection = createConnection()) { + ConnectionProperty unknownLength = ConnectionProperties.UNKNOWN_LENGTH; + assertEquals( + unknownLength.getDefaultValue(), connection.getConnectionPropertyValue(unknownLength)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java new file mode 100644 index 000000000000..cac113ea51d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java @@ -0,0 +1,267 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStateTest { + + @Parameters(name = "connectionStateType = {0}") + public static Object[] data() { + return ConnectionState.Type.values(); + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Parameter + public ConnectionState.Type connectionStateType; + + ConnectionState getNonTransactionalState() { + return new ConnectionState( + createConnectionOptionsBuilder().build().getInitialConnectionPropertyValues()); + } + + ConnectionState getTransactionalState() { + return new ConnectionState( + createConnectionOptionsBuilder() + .setConnectionPropertyValue(CONNECTION_STATE_TYPE, Type.TRANSACTIONAL) + .build() + .getInitialConnectionPropertyValues()); + } + + ConnectionOptions.Builder createConnectionOptionsBuilder() { + return ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()); + } + + ConnectionState getConnectionState() { + return connectionStateType == Type.TRANSACTIONAL + ? getTransactionalState() + : getNonTransactionalState(); + } + + @Test + public void testSetOutsideTransaction() { + ConnectionState state = getConnectionState(); + assertEquals(connectionStateType, state.getType()); + + assertEquals(false, state.getValue(READONLY).getValue()); + state.setValue(READONLY, true, Context.USER, /* inTransaction= */ false); + assertEquals(true, state.getValue(READONLY).getValue()); + } + + @Test + public void testSetToNullOutsideTransaction() { + ConnectionState state = getConnectionState(); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, state.getValue(AUTOCOMMIT_DML_MODE).getValue()); + state.setValue(AUTOCOMMIT_DML_MODE, null, Context.USER, /* inTransaction= */ false); + assertNull(state.getValue(AUTOCOMMIT_DML_MODE).getValue()); + } + + @Test + public void testSetInTransactionCommit() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is persisted if the transaction is committed. + state.commit(); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetInTransactionRollback() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is rolled back if the transaction is rolled back and the connection + // state is transactional. + state.rollback(); + // The value should rolled back to true if the state is transactional. + // The value should (still) be false if the state is non-transactional. + boolean expectedValue = connectionStateType == Type.TRANSACTIONAL; + assertEquals(expectedValue, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransactionCommit() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Reset the value to the default (true). + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction= */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is persisted if the transaction is committed. + state.commit(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransactionRollback() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Reset the value to the default (true). + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction= */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is rolled back if the transaction is rolled back and the connection + // state is transactional. + state.rollback(); + // The value should rolled back to false if the state is transactional. + // The value should (still) be true if the state is non-transactional. + boolean expectedValue = connectionStateType != Type.TRANSACTIONAL; + assertEquals(expectedValue, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetLocal() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setLocalValue(RETRY_ABORTS_INTERNALLY, false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is no longer visible once the transaction has ended, even if the + // transaction was committed. + state.commit(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetLocalForStartupProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> state.setLocalValue(CONNECTION_STATE_TYPE, Type.TRANSACTIONAL)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testSetInTransactionForStartupProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.setValue( + CONNECTION_STATE_TYPE, + Type.TRANSACTIONAL, + Context.USER, + /* inTransaction= */ true)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testSetStartupOnlyProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.setValue( + CONNECTION_STATE_TYPE, + Type.TRANSACTIONAL, + Context.USER, + /* inTransaction= */ false)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testReset() { + ConnectionState state = getConnectionState(); + // The default should be true. + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Resetting the property should reset it to the default value. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction= */ false); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransaction() { + ConnectionState state = getConnectionState(); + // The default should be true. + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction= */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Resetting the property should reset it to the default value. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction= */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetStartupOnlyProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.resetValue(CONNECTION_STATE_TYPE, Context.USER, /* inTransaction= */ false)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testInitialValueInConnectionUrl() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?retryAbortsInternally=false") + .setCredentials(NoCredentials.getInstance()) + .build(); + ConnectionState state = new ConnectionState(options.getInitialConnectionPropertyValues()); + + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, true, Context.USER, /* inTransaction= */ false); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Resetting the property should reset it to the value that was set in the connection URL. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction= */ false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java new file mode 100644 index 000000000000..3d386e7569d0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java @@ -0,0 +1,280 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.PgTransactionMode.AccessMode; +import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStatementExecutorTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private ConnectionImpl connection; + private ConnectionStatementExecutorImpl subject; + + @Before + public void createSubject() { + connection = mock(ConnectionImpl.class); + when(connection.getAutocommitDmlMode()).thenReturn(AutocommitDmlMode.TRANSACTIONAL); + when(connection.getReadOnlyStaleness()).thenReturn(TimestampBound.strong()); + when(connection.getDialect()).thenReturn(dialect); + subject = new ConnectionStatementExecutorImpl(connection); + } + + @Test + public void testGetConnection() { + assertThat(subject.getConnection(), is(equalTo(connection))); + } + + @Test + public void testStatementBeginTransaction() { + subject.statementBeginTransaction(null); + verify(connection).beginTransaction(); + } + + @Test + public void testStatementCommit() { + subject.statementCommit(); + verify(connection).commit(); + } + + @Test + public void testStatementGetAutocommit() { + subject.statementShowAutocommit(); + verify(connection).isAutocommit(); + } + + @Test + public void testStatementGetAutocommitDmlMode() { + subject.statementShowAutocommitDmlMode(); + verify(connection).getAutocommitDmlMode(); + } + + @Test + public void testStatementGetCommitTimestamp() { + subject.statementShowCommitTimestamp(); + verify(connection).getCommitTimestampOrNull(); + } + + @Test + public void testStatementGetReadOnly() { + subject.statementShowReadOnly(); + verify(connection).isReadOnly(); + } + + @Test + public void testStatementGetReadOnlyStaleness() { + subject.statementShowReadOnlyStaleness(); + verify(connection).getReadOnlyStaleness(); + } + + @Test + public void testStatementGetOptimizerVersion() { + subject.statementShowOptimizerVersion(); + verify(connection).getOptimizerVersion(); + } + + @Test + public void testStatementGetOptimizerStatisticsPackage() { + subject.statementShowOptimizerStatisticsPackage(); + verify(connection).getOptimizerStatisticsPackage(); + } + + @Test + public void testStatementGetReadTimestamp() { + subject.statementShowReadTimestamp(); + verify(connection).getReadTimestampOrNull(); + } + + @Test + public void testStatementGetStatementTimeout() { + subject.statementSetStatementTimeout(Duration.ofSeconds(1L)); + when(connection.hasStatementTimeout()).thenReturn(true); + subject.statementShowStatementTimeout(); + verify(connection, atLeastOnce()).getStatementTimeout(any(TimeUnit.class)); + subject.statementSetStatementTimeout(Duration.ZERO); + when(connection.hasStatementTimeout()).thenReturn(false); + } + + @Test + public void testStatementRollback() { + subject.statementRollback(); + verify(connection).rollback(); + } + + @Test + public void testStatementSetAutocommit() { + subject.statementSetAutocommit(Boolean.TRUE); + verify(connection).setAutocommit(true); + subject.statementSetAutocommit(Boolean.FALSE); + verify(connection).setAutocommit(false); + } + + @Test + public void testStatementSetAutocommitDmlMode() { + subject.statementSetAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + verify(connection).setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + subject.statementSetAutocommitDmlMode(AutocommitDmlMode.TRANSACTIONAL); + verify(connection).setAutocommitDmlMode(AutocommitDmlMode.TRANSACTIONAL); + } + + @Test + public void testStatementSetReadOnly() { + subject.statementSetReadOnly(Boolean.TRUE); + verify(connection).setReadOnly(true); + subject.statementSetReadOnly(Boolean.FALSE); + verify(connection).setReadOnly(false); + } + + @Test + public void testStatementSetReadOnlyStaleness() { + subject.statementSetReadOnlyStaleness(TimestampBound.strong()); + verify(connection).setReadOnlyStaleness(TimestampBound.strong()); + + subject.statementSetReadOnlyStaleness( + TimestampBound.ofReadTimestamp(Timestamp.parseTimestamp("2018-10-31T10:11:12.123Z"))); + verify(connection) + .setReadOnlyStaleness( + TimestampBound.ofReadTimestamp(Timestamp.parseTimestamp("2018-10-31T10:11:12.123Z"))); + + subject.statementSetReadOnlyStaleness( + TimestampBound.ofMinReadTimestamp(Timestamp.parseTimestamp("2018-10-31T10:11:12.123Z"))); + verify(connection) + .setReadOnlyStaleness( + TimestampBound.ofReadTimestamp(Timestamp.parseTimestamp("2018-10-31T10:11:12.123Z"))); + + subject.statementSetReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + verify(connection).setReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + + subject.statementSetReadOnlyStaleness( + TimestampBound.ofMaxStaleness(20L, TimeUnit.MILLISECONDS)); + verify(connection) + .setReadOnlyStaleness(TimestampBound.ofMaxStaleness(20L, TimeUnit.MILLISECONDS)); + } + + @Test + public void testStatementSetOptimizerVersion() { + subject.statementSetOptimizerVersion("1"); + verify(connection).setOptimizerVersion("1"); + subject.statementSetOptimizerVersion(""); + verify(connection).setOptimizerVersion(""); + subject.statementSetOptimizerVersion("LATEST"); + verify(connection).setOptimizerVersion("LATEST"); + } + + @Test + public void testStatementSetOptimizerStatisticsPackage() { + subject.statementSetOptimizerStatisticsPackage("custom-package"); + verify(connection).setOptimizerStatisticsPackage("custom-package"); + subject.statementSetOptimizerStatisticsPackage(""); + verify(connection).setOptimizerStatisticsPackage(""); + } + + @Test + public void testStatementSetStatementTimeout() { + subject.statementSetStatementTimeout(Duration.ofNanos(100)); + verify(connection).setStatementTimeout(100L, TimeUnit.NANOSECONDS); + } + + @Test + public void testStatementSetTransactionMode() { + subject.statementSetTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + verify(connection).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + subject.statementSetTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + verify(connection).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + } + + @Test + public void testStatementSetPgTransactionMode() { + PgTransactionMode readOnlyMode = new PgTransactionMode(); + readOnlyMode.setAccessMode(AccessMode.READ_ONLY_TRANSACTION); + subject.statementSetPgTransactionMode(readOnlyMode); + verify(connection).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + + PgTransactionMode readWriteMode = new PgTransactionMode(); + readWriteMode.setAccessMode(AccessMode.READ_WRITE_TRANSACTION); + subject.statementSetPgTransactionMode(readWriteMode); + verify(connection).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + } + + @Test + public void testStatementSetPgTransactionModeNoOp() { + PgTransactionMode noMode = new PgTransactionMode(); + PgTransactionMode defaultMode = new PgTransactionMode(); + defaultMode.setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_DEFAULT); + PgTransactionMode serializableMode = new PgTransactionMode(); + serializableMode.setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_SERIALIZABLE); + + subject.statementSetPgTransactionMode(noMode); + subject.statementSetPgTransactionMode(defaultMode); + subject.statementSetPgTransactionMode(serializableMode); + + verify(connection, never()).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + verify(connection, never()).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); + } + + @Test + public void testStatementSetProtoDescriptors() { + subject.statementSetProtoDescriptors("protoDescriptor".getBytes()); + verify(connection).setProtoDescriptors("protoDescriptor".getBytes()); + } + + @Test + public void testStatementSetProtoDescriptorsFilePath() { + String filePath = "com/google/cloud/spanner/descriptors.pb"; + subject.statementSetProtoDescriptorsFilePath(filePath); + verify(connection).setProtoDescriptorsFilePath(filePath); + } + + @Test + public void testStatementGetProtoDescriptors() { + subject.statementShowProtoDescriptors(); + verify(connection).getProtoDescriptors(); + } + + @Test + public void testStatementGetProtoDescriptorsFilePath() { + subject.statementShowProtoDescriptorsFilePath(); + verify(connection).getProtoDescriptorsFilePath(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithNoParametersTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithNoParametersTest.java new file mode 100644 index 000000000000..7e6376d671bb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithNoParametersTest.java @@ -0,0 +1,219 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.DialectNamespaceMapper.getNamespace; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStatementWithNoParametersTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private AbstractStatementParser parser; + + @Before + public void setup() { + parser = AbstractStatementParser.getInstance(dialect); + } + + ParsedStatement parse(String sql) { + return parser.parse(Statement.of(sql)); + } + + @Test + public void testExecuteGetAutocommit() { + ParsedStatement statement = parser.parse(Statement.of("show variable autocommit")); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementShowAutocommit()).thenCallRealMethod(); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).isAutocommit(); + } + + @Test + public void testExecuteGetReadOnly() { + ParsedStatement statement = + parser.parse( + Statement.of(String.format("show variable %sreadonly", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).isReadOnly(); + } + + @Test + public void testExecuteGetAutocommitDmlMode() { + ParsedStatement statement = + parser.parse( + Statement.of( + String.format("show variable %sautocommit_dml_mode", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getAutocommitDmlMode()).thenReturn(AutocommitDmlMode.TRANSACTIONAL); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getAutocommitDmlMode(); + } + + @Test + public void testExecuteGetStatementTimeout() { + ParsedStatement statement = parser.parse(Statement.of("show variable statement_timeout")); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementShowStatementTimeout()).thenCallRealMethod(); + when(connection.hasStatementTimeout()).thenReturn(true); + when(connection.getStatementTimeout(TimeUnit.NANOSECONDS)).thenReturn(1L); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(2)).getStatementTimeout(TimeUnit.NANOSECONDS); + } + + @Test + public void testExecuteGetReadTimestamp() { + ParsedStatement statement = + parser.parse( + Statement.of(String.format("show variable %sread_timestamp", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getReadTimestampOrNull()).thenReturn(Timestamp.now()); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getReadTimestampOrNull(); + } + + @Test + public void testExecuteGetCommitTimestamp() { + ParsedStatement statement = + parser.parse( + Statement.of(String.format("show variable %scommit_timestamp", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getCommitTimestampOrNull()).thenReturn(Timestamp.now()); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getCommitTimestampOrNull(); + } + + @Test + public void testExecuteGetReadOnlyStaleness() { + ParsedStatement statement = + parser.parse( + Statement.of( + String.format("show variable %sread_only_staleness", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getReadOnlyStaleness()).thenReturn(TimestampBound.strong()); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getReadOnlyStaleness(); + } + + @Test + public void testExecuteGetOptimizerVersion() { + ParsedStatement statement = + parser.parse( + Statement.of( + String.format("show variable %soptimizer_version", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getOptimizerVersion()).thenReturn("1"); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getOptimizerVersion(); + } + + @Test + public void testExecuteGetOptimizerStatisticsPackage() { + ParsedStatement statement = + parser.parse( + Statement.of( + String.format( + "show variable %soptimizer_statistics_package", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(dialect); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(connection.getOptimizerStatisticsPackage()).thenReturn("custom-package"); + statement.getClientSideStatement().execute(executor, statement); + verify(connection, times(1)).getOptimizerStatisticsPackage(); + } + + @Test + public void testExecuteBegin() { + ParsedStatement subject = parser.parse(Statement.of("begin")); + for (String statement : subject.getClientSideStatement().getExampleStatements()) { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + subject.getClientSideStatement().execute(executor, parse(statement)); + if (statement.contains("isolation") && !statement.contains("default")) { + verify(connection, times(1)).beginTransaction(any(IsolationLevel.class)); + } else { + verify(connection, times(1)).beginTransaction(); + } + } + } + + @Test + public void testExecuteCommit() { + ParsedStatement subject = parser.parse(Statement.of("commit")); + for (String statement : subject.getClientSideStatement().getExampleStatements()) { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementCommit()).thenCallRealMethod(); + subject.getClientSideStatement().execute(executor, parse(statement)); + verify(connection, times(1)).commit(); + } + } + + @Test + public void testExecuteRollback() { + ParsedStatement subject = parser.parse(Statement.of("rollback")); + for (String statement : subject.getClientSideStatement().getExampleStatements()) { + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementRollback()).thenCallRealMethod(); + subject.getClientSideStatement().execute(executor, parse(statement)); + verify(connection, times(1)).rollback(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java new file mode 100644 index 000000000000..0c86da54de10 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java @@ -0,0 +1,278 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.DialectNamespaceMapper.getNamespace; +import static org.junit.Assume.assumeTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.Mockito; + +@RunWith(Parameterized.class) +public class ConnectionStatementWithOneParameterTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private AbstractStatementParser parser; + + @Before + public void setup() { + parser = AbstractStatementParser.getInstance(dialect); + } + + ParsedStatement parse(String sql) { + return parser.parse(Statement.of(sql)); + } + + @Test + public void testExecuteSetAutocommit() { + ParsedStatement subject = parser.parse(Statement.of("set autocommit = true")); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetAutocommit(any(Boolean.class))).thenCallRealMethod(); + for (Boolean mode : new Boolean[] {Boolean.FALSE, Boolean.TRUE}) { + subject + .getClientSideStatement() + .execute(executor, parse(String.format("set autocommit = %s", mode))); + verify(connection, times(1)).setAutocommit(mode); + } + } + + @Test + public void testExecuteSetReadOnly() { + ParsedStatement subject = + parser.parse(Statement.of(String.format("set %sreadonly = true", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetReadOnly(any(Boolean.class))).thenCallRealMethod(); + for (Boolean mode : new Boolean[] {Boolean.FALSE, Boolean.TRUE}) { + subject + .getClientSideStatement() + .execute( + executor, parse(String.format("set %sreadonly = %s", getNamespace(dialect), mode))); + verify(connection, times(1)).setReadOnly(mode); + } + } + + @Test + public void testExecuteSetReadOnlyTo() { + assumeTrue("TO is only supported in PostgreSQL dialect", dialect == Dialect.POSTGRESQL); + ParsedStatement subject = + parser.parse(Statement.of(String.format("set %sreadonly to true", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetReadOnly(any(Boolean.class))).thenCallRealMethod(); + for (Boolean mode : new Boolean[] {Boolean.FALSE, Boolean.TRUE}) { + subject + .getClientSideStatement() + .execute( + executor, parse(String.format("set %sreadonly to %s", getNamespace(dialect), mode))); + verify(connection, times(1)).setReadOnly(mode); + } + } + + @Test + public void testExecuteSetAutocommitDmlMode() { + ParsedStatement subject = + parser.parse( + Statement.of(String.format("set %sautocommit_dml_mode='foo'", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetAutocommitDmlMode(any(AutocommitDmlMode.class))).thenCallRealMethod(); + for (AutocommitDmlMode mode : AutocommitDmlMode.values()) { + subject + .getClientSideStatement() + .execute( + executor, + parse( + String.format( + "set %sautocommit_dml_mode='%s'", getNamespace(dialect), mode.name()))); + verify(connection, times(1)).setAutocommitDmlMode(mode); + } + } + + @Test + public void testExecuteSetStatementTimeout() { + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.statementSetStatementTimeout(any(Duration.class))).thenCallRealMethod(); + ConnectionImpl connection = mock(ConnectionImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + for (TimeUnit unit : ReadOnlyStalenessUtil.SUPPORTED_UNITS) { + for (Long val : new Long[] {1L, 100L, 999L}) { + ParsedStatement subject = + parser.parse( + Statement.of( + String.format( + "set statement_timeout='%d%s'", + val, ReadOnlyStalenessUtil.getTimeUnitAbbreviation(unit)))); + subject.getClientSideStatement().execute(executor, subject); + verify(connection, times(1)).setStatementTimeout(val, unit); + } + } + if (dialect == Dialect.POSTGRESQL) { + for (Long val : new Long[] {1L, 100L, 999L}) { + Mockito.clearInvocations(connection); + ParsedStatement subject = + parser.parse(Statement.of(String.format("set statement_timeout=%d", val))); + subject.getClientSideStatement().execute(executor, subject); + verify(connection, times(1)).setStatementTimeout(val, TimeUnit.MILLISECONDS); + } + + ParsedStatement subject = parser.parse(Statement.of("set statement_timeout=default")); + subject.getClientSideStatement().execute(executor, subject); + } else { + ParsedStatement subject = parser.parse(Statement.of("set statement_timeout=null")); + subject.getClientSideStatement().execute(executor, subject); + } + verify(connection, times(1)).clearStatementTimeout(); + } + + @Test + public void testExecuteSetReadOnlyStaleness() { + ParsedStatement subject = + parser.parse( + Statement.of(String.format("set %sread_only_staleness='foo'", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetReadOnlyStaleness(any(TimestampBound.class))).thenCallRealMethod(); + for (TimestampBound val : + new TimestampBound[] { + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(Timestamp.now()), + TimestampBound.ofMinReadTimestamp(Timestamp.now()), + TimestampBound.ofExactStaleness(1000L, TimeUnit.SECONDS), + TimestampBound.ofMaxStaleness(2000L, TimeUnit.MICROSECONDS) + }) { + subject + .getClientSideStatement() + .execute( + executor, + parse( + String.format( + "set %sread_only_staleness='%s'", + getNamespace(dialect), timestampBoundToString(val)))); + verify(connection, times(1)).setReadOnlyStaleness(val); + } + } + + private String timestampBoundToString(TimestampBound staleness) { + switch (staleness.getMode()) { + case STRONG: + return "strong"; + case READ_TIMESTAMP: + return "read_timestamp " + staleness.getReadTimestamp().toString(); + case MIN_READ_TIMESTAMP: + return "min_read_timestamp " + staleness.getMinReadTimestamp().toString(); + case EXACT_STALENESS: + return "exact_staleness " + staleness.getExactStaleness(TimeUnit.SECONDS) + "s"; + case MAX_STALENESS: + return "max_staleness " + staleness.getMaxStaleness(TimeUnit.MICROSECONDS) + "us"; + default: + throw new IllegalStateException("Unknown mode: " + staleness.getMode()); + } + } + + @Test + public void testExecuteSetOptimizerVersion() { + ParsedStatement subject = + parser.parse( + Statement.of(String.format("set %soptimizer_version='foo'", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetOptimizerVersion(any(String.class))).thenCallRealMethod(); + for (String version : new String[] {"1", "200", "", "LATEST"}) { + subject + .getClientSideStatement() + .execute( + executor, + parse(String.format("set %soptimizer_version='%s'", getNamespace(dialect), version))); + verify(connection, times(1)).setOptimizerVersion(version); + } + } + + @Test + public void testExecuteSetOptimizerStatisticsPackage() { + ParsedStatement subject = + parser.parse( + Statement.of( + String.format("set %soptimizer_statistics_package='foo'", getNamespace(dialect)))); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = mock(ConnectionStatementExecutorImpl.class); + when(executor.getDialect()).thenReturn(dialect); + when(executor.getConnection()).thenReturn(connection); + when(executor.statementSetOptimizerStatisticsPackage(any(String.class))).thenCallRealMethod(); + for (String statisticsPackage : new String[] {"custom-package", ""}) { + subject + .getClientSideStatement() + .execute( + executor, + parse( + String.format( + "set %soptimizer_statistics_package='%s'", + getNamespace(dialect), statisticsPackage))); + verify(connection, times(1)).setOptimizerStatisticsPackage(statisticsPackage); + } + } + + @Test + public void testExecuteSetTransaction() { + ParsedStatement subject = parser.parse(Statement.of("set transaction read_only")); + ConnectionImpl connection = mock(ConnectionImpl.class); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + when(executor.getDialect()).thenReturn(dialect); + for (TransactionMode mode : TransactionMode.values()) { + subject + .getClientSideStatement() + .execute(executor, parse(String.format("set transaction %s", mode.getStatementString()))); + verify(connection, times(1)).setTransactionMode(mode); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java new file mode 100644 index 000000000000..c8469ae08a92 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java @@ -0,0 +1,602 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.RequestOptions; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; + +@RunWith(Enclosed.class) +public class ConnectionTest { + public static class EnvironmentConnectionOptionsTest extends AbstractMockServerTest { + @Test + public void testUseOptimizerVersionAndStatisticsPackageFromEnvironment() { + try { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Nonnull + @Override + public String getOptimizerVersion() { + return "20"; + } + + @Nonnull + @Override + public String getOptimizerStatisticsPackage() { + return "env-package"; + } + }); + try (Connection connection = createConnection()) { + // Do a query and verify that the version from the environment is used. + try (ResultSet rs = connection.executeQuery(SELECT_COUNT_STATEMENT)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // Verify query options from the environment. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("20"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("env-package"); + } + // Now set one of the query options on the connection. That option should be used in + // combination with the other option from the environment. + connection.execute(Statement.of("SET OPTIMIZER_VERSION='30'")); + connection.execute(Statement.of("SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'")); + try (ResultSet rs = connection.executeQuery(SELECT_COUNT_STATEMENT)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + // Optimizer version should come from the connection. + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("30"); + // Optimizer statistics package should come from the connection. + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("custom-package"); + } + // Now specify options directly for the query. These should override both the environment + // and what is set on the connection. + try (ResultSet rs = + connection.executeQuery( + Statement.newBuilder(SELECT_COUNT_STATEMENT.getSql()) + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("user-defined-version") + .setOptimizerStatisticsPackage("user-defined-statistics-package") + .build()) + .build())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + // Optimizer version should come from the query. + assertThat(request.getQueryOptions().getOptimizerVersion()) + .isEqualTo("user-defined-version"); + // Optimizer statistics package should come from the query. + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("user-defined-statistics-package"); + } + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + } + + public static class DefaultConnectionOptionsTest extends AbstractMockServerTest { + @Test + public void testDefaultOptimizerVersion() { + try (Connection connection = createConnection()) { + try (ResultSet rs = + connection.executeQuery(Statement.of("SHOW VARIABLE OPTIMIZER_VERSION"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testDefaultOptimizerStatisticsPackage() { + try (Connection connection = createConnection()) { + try (ResultSet rs = + connection.executeQuery(Statement.of("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testExecuteInvalidBatchUpdate() { + try (Connection connection = createConnection()) { + try { + connection.executeBatchUpdate( + ImmutableList.of(INSERT_STATEMENT, SELECT_RANDOM_STATEMENT)); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + } + + @Test + public void testQueryAborted() { + try (Connection connection = createConnection()) { + connection.setRetryAbortsInternally(false); + for (boolean abort : new Boolean[] {true, false}) { + try { + if (abort) { + mockSpanner.abortNextStatement(); + } + connection.executeQuery(SELECT_RANDOM_STATEMENT); + assertThat(abort).isFalse(); + connection.commit(); + } catch (AbortedException e) { + assertThat(abort).isTrue(); + connection.rollback(); + } + } + } + } + + @Test + public void testUpdateAborted() { + try (Connection connection = createConnection()) { + connection.setRetryAbortsInternally(false); + for (boolean abort : new Boolean[] {true, false}) { + try { + if (abort) { + mockSpanner.abortNextStatement(); + } + connection.executeUpdate(INSERT_STATEMENT); + assertThat(abort).isFalse(); + connection.commit(); + } catch (AbortedException e) { + assertThat(abort).isTrue(); + connection.rollback(); + } + } + } + } + + @Test + public void testBatchUpdateAborted() { + try (Connection connection = createConnection()) { + connection.setRetryAbortsInternally(false); + for (boolean abort : new Boolean[] {true, false}) { + try { + if (abort) { + mockSpanner.abortNextStatement(); + } + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + assertThat(abort).isFalse(); + connection.commit(); + } catch (AbortedException e) { + assertThat(abort).isTrue(); + connection.rollback(); + } + } + } + } + + @Test + public void testReset() { + try (ConnectionImpl connection = (ConnectionImpl) createConnection()) { + assertResetBooleanProperty( + connection, + true, + connection::setRetryAbortsInternally, + connection::isRetryAbortsInternally); + assertResetBooleanProperty( + connection, false, connection::setReadOnly, connection::isReadOnly); + assertResetBooleanProperty( + connection, false, connection::setAutocommit, connection::isAutocommit); + assertResetBooleanProperty( + connection, false, connection::setReturnCommitStats, connection::isReturnCommitStats); + assertResetBooleanProperty( + connection, + false, + connection::setDelayTransactionStartUntilFirstWrite, + connection::isDelayTransactionStartUntilFirstWrite); + assertResetBooleanProperty( + connection, + false, + connection::setKeepTransactionAlive, + connection::isKeepTransactionAlive); + assertResetBooleanProperty( + connection, false, connection::setDataBoostEnabled, connection::isDataBoostEnabled); + assertResetBooleanProperty( + connection, false, connection::setAutoPartitionMode, connection::isAutoPartitionMode); + assertResetBooleanProperty( + connection, + false, + connection::setExcludeTxnFromChangeStreams, + connection::isExcludeTxnFromChangeStreams); + + assertResetProperty( + connection, "", "1", connection::setOptimizerVersion, connection::getOptimizerVersion); + assertResetProperty( + connection, + null, + RpcPriority.LOW, + connection::setRPCPriority, + connection::getRPCPriority); + assertResetProperty( + connection, + DdlInTransactionMode.ALLOW_IN_EMPTY_TRANSACTION, + DdlInTransactionMode.AUTO_COMMIT_TRANSACTION, + connection::setDdlInTransactionMode, + connection::getDdlInTransactionMode); + assertResetProperty( + connection, 0, 4, connection::setMaxPartitions, connection::getMaxPartitions); + assertResetProperty( + connection, + 1, + 8, + connection::setMaxPartitionedParallelism, + connection::getMaxPartitionedParallelism); + assertResetProperty( + connection, + null, + Duration.ofMillis(20), + connection::setMaxCommitDelay, + connection::getMaxCommitDelay); + assertResetProperty( + connection, + TimestampBound.strong(), + TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS), + connection::setReadOnlyStaleness, + connection::getReadOnlyStaleness); + assertResetProperty( + connection, null, "tag", connection::setStatementTag, connection::getStatementTag); + assertResetProperty( + connection, null, "tag", connection::setTransactionTag, connection::getTransactionTag); + assertResetProperty( + connection, + null, + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("foo").build()) + .build()) + .build(), + connection::setDirectedRead, + connection::getDirectedRead); + assertResetProperty( + connection, + SavepointSupport.FAIL_AFTER_ROLLBACK, + SavepointSupport.ENABLED, + connection::setSavepointSupport, + connection::getSavepointSupport); + assertResetProperty( + connection, + null, + "descriptor".getBytes(StandardCharsets.UTF_8), + connection::setProtoDescriptors, + connection::getProtoDescriptors); + assertResetProperty( + connection, + null, + "filename", + connection::setProtoDescriptorsFilePath, + connection::getProtoDescriptorsFilePath); + + // Test the AutocommitDmlMode property that is only supported in auto-commit mode. + connection.rollback(); + connection.setAutocommit(true); + assertResetProperty( + connection, + AutocommitDmlMode.TRANSACTIONAL, + AutocommitDmlMode.PARTITIONED_NON_ATOMIC, + connection::setAutocommitDmlMode, + connection::getAutocommitDmlMode); + connection.setAutocommit(false); + + // Statement timeouts use a customer getter/setter, so we need to manually test that. + assertEquals(0L, connection.getStatementTimeout(TimeUnit.MILLISECONDS)); + connection.setStatementTimeout(10L, TimeUnit.SECONDS); + assertEquals(10L, connection.getStatementTimeout(TimeUnit.SECONDS)); + connection.reset(); + assertEquals(0L, connection.getStatementTimeout(TimeUnit.MILLISECONDS)); + } + } + + private void assertResetBooleanProperty( + ConnectionImpl connection, + boolean defaultValue, + Consumer setter, + Supplier getter) { + assertResetProperty(connection, defaultValue, !defaultValue, setter, getter); + } + + private void assertResetProperty( + ConnectionImpl connection, + T defaultValue, + T alternativeValue, + Consumer setter, + Supplier getter) { + assertEquals(defaultValue, getter.get()); + setter.accept(alternativeValue); + assertEquals(alternativeValue, getter.get()); + connection.reset(); + assertEquals(defaultValue, getter.get()); + } + } + + public static class ConnectionRPCPriorityTest extends AbstractMockServerTest { + + @AfterClass + public static void reset() { + mockSpanner.reset(); + } + + protected String getBaseUrl() { + return super.getBaseUrl() + ";rpcPriority=MEDIUM"; + } + + @Test + public void testQuery_RPCPriority() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + try (ResultSet rs = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testUpdate_RPCPriority() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testPartitionedUpdate_RPCPriority() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + + @Test + public void testBatchUpdate_RPCPriority() { + try (Connection connection = createConnection()) { + connection.executeBatchUpdate(Collections.singleton(INSERT_STATEMENT)); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + + @Test + public void testDmlBatch_RPCPriority() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + + connection.startBatchDml(); + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunBatch_RPCPriority() { + try (Connection connection = createConnection()) { + connection.startBatchDml(); + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + connection.runBatch(); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + RequestOptions.Priority.PRIORITY_MEDIUM, + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getPriority()); + mockSpanner.clearRequests(); + } + } + + @Test + public void testShowSetRPCPriority() { + try (Connection connection = createConnection()) { + connection.setRPCPriority(null); + try (ResultSet rs = + connection.execute(Statement.of("SHOW VARIABLE RPC_PRIORITY")).getResultSet()) { + assertTrue(rs.next()); + assertEquals("PRIORITY_UNSPECIFIED", rs.getString("RPC_PRIORITY")); + assertFalse(rs.next()); + } + connection.execute(Statement.of("SET RPC_PRIORITY='LOW'")); + try (ResultSet rs = + connection.execute(Statement.of("SHOW VARIABLE RPC_PRIORITY")).getResultSet()) { + assertTrue(rs.next()); + assertEquals("LOW", rs.getString("RPC_PRIORITY")); + assertFalse(rs.next()); + } + connection.execute(Statement.of("SET RPC_PRIORITY='HIGH'")); + try (ResultSet rs = + connection.execute(Statement.of("SHOW VARIABLE RPC_PRIORITY")).getResultSet()) { + assertTrue(rs.next()); + assertEquals("HIGH", rs.getString("RPC_PRIORITY")); + assertFalse(rs.next()); + } + } + } + } + + public static class DialectDetectionTest extends AbstractMockServerTest { + protected String getBaseUrl() { + return super.getBaseUrl() + ";minSessions=1"; + } + + @After + public void reset() { + // Reset dialect to default. + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.GOOGLE_STANDARD_SQL)); + mockSpanner.reset(); + mockSpanner.removeAllExecutionTimes(); + // Close all open Spanner instances to ensure that each test run gets a fresh instance. + SpannerPool.closeSpannerPool(); + } + + @Test + public void testDefaultGetDialect() { + try (Connection connection = createConnection()) { + assertEquals(Dialect.GOOGLE_STANDARD_SQL, connection.getDialect()); + } + } + + @Test + public void testPostgreSQLGetDialect() { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + try (Connection connection = createConnection()) { + assertEquals(Dialect.POSTGRESQL, connection.getDialect()); + } + } + + @Test + public void testGetDialect_DatabaseNotFound() throws Exception { + mockSpanner.setBatchCreateSessionsExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.stickyDatabaseNotFoundException("invalid-database")); + try (Connection connection = createConnection()) { + SpannerException exception = assertThrows(SpannerException.class, connection::getDialect); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + assertTrue(exception.getMessage().contains("Database with id invalid-database not found")); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java new file mode 100644 index 000000000000..f082fa7042a7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java @@ -0,0 +1,156 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionOptionsTest.runWithSystemPropertyEnabled; +import static org.junit.Assert.assertEquals; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.auth.Credentials; +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.OAuth2Credentials; +import io.grpc.ManagedChannelBuilder; +import java.io.ObjectStreamException; +import java.util.Date; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CredentialsProviderTest extends AbstractMockServerTest { + private static final AtomicInteger COUNTER = new AtomicInteger(); + + @BeforeClass + public static void resetCounter() { + COUNTER.set(0); + } + + private static final class TestCredentials extends OAuth2Credentials { + private final int id; + + private TestCredentials(int id) { + this.id = id; + } + + private Object readResolve() throws ObjectStreamException { + return this; + } + + @Override + public AccessToken refreshAccessToken() { + return AccessToken.newBuilder() + .setTokenValue("foo") + .setExpirationTime(new Date(Long.MAX_VALUE)) + .build(); + } + + public boolean equals(Object obj) { + if (!(obj instanceof TestCredentials)) { + return false; + } + return this.id == ((TestCredentials) obj).id; + } + + public int hashCode() { + return System.identityHashCode(this.id); + } + } + + static final class TestCredentialsProvider implements CredentialsProvider { + @Override + public Credentials getCredentials() { + return new TestCredentials(COUNTER.incrementAndGet()); + } + } + + @Test + public void testCredentialsProvider() throws Throwable { + runWithSystemPropertyEnabled( + ConnectionOptions.ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, + () -> { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri( + String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?credentialsProvider=%s", + getPort(), TestCredentialsProvider.class.getName())) + .setConfigurator( + spannerOptions -> { + spannerOptions.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + spannerOptions.setEnableDirectAccess(false); + }) + .build(); + + try (Connection connection = options.getConnection()) { + assertEquals( + TestCredentials.class, + ((ConnectionImpl) connection) + .getSpanner() + .getOptions() + .getCredentials() + .getClass()); + TestCredentials credentials = + (TestCredentials) + ((ConnectionImpl) connection).getSpanner().getOptions().getCredentials(); + assertEquals(1, credentials.id); + } + // The second connection should get the same credentials from the provider. + try (Connection connection = options.getConnection()) { + assertEquals( + TestCredentials.class, + ((ConnectionImpl) connection) + .getSpanner() + .getOptions() + .getCredentials() + .getClass()); + TestCredentials credentials = + (TestCredentials) + ((ConnectionImpl) connection).getSpanner().getOptions().getCredentials(); + assertEquals(1, credentials.id); + } + + // Creating new ConnectionOptions should refresh the credentials. + options = + ConnectionOptions.newBuilder() + .setUri( + String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?credentialsProvider=%s", + getPort(), TestCredentialsProvider.class.getName())) + .setConfigurator( + spannerOptions -> { + spannerOptions.setChannelConfigurator(ManagedChannelBuilder::usePlaintext); + spannerOptions.setEnableDirectAccess(false); + }) + .build(); + try (Connection connection = options.getConnection()) { + assertEquals( + TestCredentials.class, + ((ConnectionImpl) connection) + .getSpanner() + .getOptions() + .getCredentials() + .getClass()); + TestCredentials credentials = + (TestCredentials) + ((ConnectionImpl) connection).getSpanner().getOptions().getCredentials(); + assertEquals(2, credentials.id); + } + }); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsServiceTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsServiceTest.java new file mode 100644 index 000000000000..7b7c41817e45 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsServiceTest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for reading and parsing test key files and getting service accounts. */ +@RunWith(JUnit4.class) +public class CredentialsServiceTest { + private static final String FILE_TEST_PATH = + CredentialsServiceTest.class.getResource("test-key.json").getPath(); + private static final String SA_APP_DEFAULT_FILE_TEST_PATH = + CredentialsServiceTest.class.getResource("test-key-app-default.json").getPath(); + + private static final String TEST_PROJECT_ID = "test-project"; + private static final String APP_DEFAULT_PROJECT_ID = "app-default-test-project"; + + private final CredentialsService service = + new CredentialsService() { + + @Override + GoogleCredentials internalGetApplicationDefault() throws IOException { + // Read application default credentials directly from a specific file instead of actually + // fetching the default from the environment. + return ServiceAccountCredentials.fromStream( + // Calling `getResource().getPath()` on Windows returns a string that might start with + // something like `/C:/...`. Paths.get() interprets the leading / as part of the path + // and would be invalid. Use `new File().toPath()` to read from these files. + Files.newInputStream(new File(SA_APP_DEFAULT_FILE_TEST_PATH).toPath())); + } + }; + + @Test + public void testCreateCredentialsDefault() { + ServiceAccountCredentials credentials = + (ServiceAccountCredentials) service.createCredentials(null); + assertThat(credentials.getProjectId(), is(equalTo(APP_DEFAULT_PROJECT_ID))); + } + + @Test + public void testCreateCredentialsFile() { + ServiceAccountCredentials credentials = + (ServiceAccountCredentials) service.createCredentials(FILE_TEST_PATH); + assertThat(credentials.getProjectId(), is(equalTo(TEST_PROJECT_ID))); + } + + @Test(expected = SpannerException.class) + public void testCreateCredentialsInvalidFile() { + service.createCredentials("invalid_file_path.json"); + } + + @Test + public void testCreateCredentialsInvalidCloudStorage() { + try { + service.createCredentials("gs://test-bucket/test-blob"); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(equalTo(ErrorCode.INVALID_ARGUMENT))); + assertThat(e.getCause().getMessage(), is(equalTo(CredentialsService.GCS_NOT_SUPPORTED_MSG))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java new file mode 100644 index 000000000000..e500851b275d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java @@ -0,0 +1,712 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.common.io.ByteStreams; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.opentelemetry.api.trace.Span; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentMatcher; + +@RunWith(JUnit4.class) +public class DdlBatchTest { + + private DdlClient createDefaultMockDdlClient() { + return createDefaultMockDdlClient(false, 0L); + } + + private DdlClient createDefaultMockDdlClient(boolean exceptionOnGetResult) { + return createDefaultMockDdlClient(exceptionOnGetResult, 0L); + } + + private DdlClient createDefaultMockDdlClient(long waitForMillis) { + return createDefaultMockDdlClient(false, waitForMillis); + } + + private DdlClient createDefaultMockDdlClient( + boolean exceptionOnGetResult, final long waitForMillis) { + try { + DdlClient ddlClient = mock(DdlClient.class); + @SuppressWarnings("unchecked") + final OperationFuture operation = + mock(OperationFuture.class); + if (waitForMillis > 0L) { + when(operation.get()) + .thenAnswer( + invocation -> { + Thread.sleep(waitForMillis); + return null; + }); + } else if (exceptionOnGetResult) { + when(operation.get()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.UNKNOWN, "ddl statement failed")); + } else { + when(operation.get()).thenReturn(null); + } + UpdateDatabaseDdlMetadata.Builder metadataBuilder = UpdateDatabaseDdlMetadata.newBuilder(); + if (!exceptionOnGetResult) { + metadataBuilder.addCommitTimestamps( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L)); + } + ApiFuture metadataFuture = + ApiFutures.immediateFuture(metadataBuilder.build()); + when(operation.getMetadata()).thenReturn(metadataFuture); + when(ddlClient.executeDdl(anyString(), any())).thenReturn(operation); + when(ddlClient.executeDdl(anyList(), any())).thenReturn(operation); + doCallRealMethod() + .when(ddlClient) + .runWithRetryForMissingDefaultSequenceKind(any(), any(), any(), any()); + return ddlClient; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private DdlBatch createSubject() { + return createSubject(createDefaultMockDdlClient()); + } + + private DdlBatch createSubject(DdlClient ddlClient) { + return createSubject(ddlClient, mock(DatabaseClient.class)); + } + + private DdlBatch createSubject(DdlClient ddlClient, DatabaseClient dbClient) { + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + return DdlBatch.newBuilder() + .setDdlClient(ddlClient) + .setDatabaseClient(dbClient) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + } + + @Test + public void testExecuteQuery() { + DdlBatch batch = createSubject(); + try { + batch.executeQueryAsync(CallType.SYNC, mock(ParsedStatement.class), AnalyzeMode.NONE); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testExecuteCreateDatabase() { + DdlBatch batch = createSubject(); + assertThrows( + IllegalArgumentException.class, + () -> + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE DATABASE foo")))); + } + + @Test + public void testExecuteUpdate() { + DdlBatch batch = createSubject(); + try { + batch.executeUpdateAsync(CallType.SYNC, mock(ParsedStatement.class)); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testExecuteBatchUpdate() { + DdlBatch batch = createSubject(); + try { + batch.executeBatchUpdateAsync( + CallType.SYNC, Collections.singleton(mock(ParsedStatement.class))); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetCommitTimestamp() { + DdlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getCommitTimestamp(); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetCommitResponse() { + DdlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getCommitResponse(); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + assertNull(batch.getCommitResponseOrNull()); + } + + @Test + public void testGetReadTimestamp() { + DdlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getReadTimestamp(); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testWriteIterable() { + DdlBatch batch = createSubject(); + try { + batch.writeAsync( + CallType.SYNC, Collections.singletonList(Mutation.newInsertBuilder("foo").build())); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testIsReadOnly() { + DdlBatch batch = createSubject(); + assertThat(batch.isReadOnly(), is(false)); + } + + @Test + public void testGetStateAndIsActive() { + DdlBatch batch = createSubject(); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + get(batch.runBatchAsync(CallType.SYNC)); + assertThat(batch.getState(), is(UnitOfWorkState.RAN)); + assertThat(batch.isActive(), is(false)); + + batch = createSubject(); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + batch.abortBatch(); + assertThat(batch.getState(), is(UnitOfWorkState.ABORTED)); + assertThat(batch.isActive(), is(false)); + + DdlClient client = mock(DdlClient.class); + SpannerException exception = + SpannerExceptionFactory.newSpannerException(ErrorCode.FAILED_PRECONDITION, "test"); + doThrow(exception).when(client).executeDdl(anyList(), isNull()); + doCallRealMethod() + .when(client) + .runWithRetryForMissingDefaultSequenceKind(any(), any(), any(), any()); + batch = createSubject(client); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getStatement()).thenReturn(Statement.of("CREATE TABLE FOO")); + when(statement.getSql()).thenReturn("CREATE TABLE FOO"); + when(statement.getType()).thenReturn(StatementType.DDL); + batch.executeDdlAsync(CallType.SYNC, statement); + try { + get(batch.runBatchAsync(CallType.SYNC)); + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(equalTo(ErrorCode.FAILED_PRECONDITION))); + } + assertThat(batch.getState(), is(UnitOfWorkState.RUN_FAILED)); + assertThat(batch.isActive(), is(false)); + } + + private static IsListOfStringsWithSize isEmptyListOfStrings() { + return new IsListOfStringsWithSize(0); + } + + private static IsListOfStringsWithSize isListOfStringsWithSize(int size) { + return new IsListOfStringsWithSize(size); + } + + private static class IsListOfStringsWithSize implements ArgumentMatcher> { + private final int size; + + private IsListOfStringsWithSize(int size) { + this.size = size; + } + + @Override + public boolean matches(List list) { + return list.size() == size; + } + } + + @Test + public void testRunBatch() { + DdlClient client = createDefaultMockDdlClient(); + DdlBatch batch = createSubject(client); + get(batch.runBatchAsync(CallType.SYNC)); + assertThat(batch.getState(), is(UnitOfWorkState.RAN)); + verify(client, never()).executeDdl(anyString(), isNull()); + verify(client, never()).executeDdl(argThat(isEmptyListOfStrings()), isNull()); + + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.DDL); + when(statement.getStatement()).thenReturn(Statement.of("CREATE TABLE FOO")); + when(statement.getSql()).thenReturn("CREATE TABLE FOO"); + + client = createDefaultMockDdlClient(); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(1)), isNull()); + + client = createDefaultMockDdlClient(); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); + assertThat(batch.getState(), is(UnitOfWorkState.RAN)); + boolean exception = false; + try { + get(batch.runBatchAsync(CallType.SYNC)); + } catch (SpannerException e) { + if (e.getErrorCode() != ErrorCode.FAILED_PRECONDITION) { + throw e; + } + exception = true; + } + assertThat(exception, is(true)); + assertThat(batch.getState(), is(UnitOfWorkState.RAN)); + exception = false; + try { + batch.executeDdlAsync(CallType.SYNC, statement); + } catch (SpannerException e) { + if (e.getErrorCode() != ErrorCode.FAILED_PRECONDITION) { + throw e; + } + exception = true; + } + assertThat(exception, is(true)); + exception = false; + try { + batch.executeDdlAsync(CallType.SYNC, statement); + } catch (SpannerException e) { + if (e.getErrorCode() != ErrorCode.FAILED_PRECONDITION) { + throw e; + } + exception = true; + } + assertThat(exception, is(true)); + + client = createDefaultMockDdlClient(true); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + exception = false; + try { + get(batch.runBatchAsync(CallType.SYNC)); + } catch (SpannerException e) { + exception = true; + } + assertThat(exception, is(true)); + assertThat(batch.getState(), is(UnitOfWorkState.RUN_FAILED)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); + + // verify when protoDescriptors is null + client = createDefaultMockDdlClient(); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + batch = + DdlBatch.newBuilder() + .setDdlClient(client) + .setDatabaseClient(dbClient) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(null) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); + + // verify when protoDescriptors is not null + byte[] protoDescriptors; + try { + InputStream in = + DdlBatchTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + client = createDefaultMockDdlClient(); + batch = + DdlBatch.newBuilder() + .setDdlClient(client) + .setDatabaseClient(dbClient) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(protoDescriptors) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), any(byte[].class)); + } + + @Test + public void testUpdateCount() throws InterruptedException, ExecutionException { + DdlClient client = mock(DdlClient.class); + UpdateDatabaseDdlMetadata metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L - 1L)) + .addCommitTimestamps( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L)) + .addAllStatements(Arrays.asList("CREATE TABLE FOO", "CREATE TABLE BAR")) + .build(); + ApiFuture metadataFuture = ApiFutures.immediateFuture(metadata); + @SuppressWarnings("unchecked") + OperationFuture operationFuture = mock(OperationFuture.class); + when(operationFuture.get()).thenReturn(null); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + DdlBatch batch = + DdlBatch.newBuilder() + .withStatementExecutor(new StatementExecutor()) + .setDdlClient(client) + .setDatabaseClient(dbClient) + .setSpan(Span.getInvalid()) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE FOO"))); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE BAR"))); + long[] updateCounts = get(batch.runBatchAsync(CallType.SYNC)); + assertThat(updateCounts.length, is(equalTo(2))); + assertThat(updateCounts[0], is(equalTo(1L))); + assertThat(updateCounts[1], is(equalTo(1L))); + } + + @Test + public void testFailedUpdateCount() throws InterruptedException, ExecutionException { + DdlClient client = mock(DdlClient.class); + UpdateDatabaseDdlMetadata metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L - 1L)) + .addAllStatements(Arrays.asList("CREATE TABLE FOO", "CREATE TABLE INVALID_TABLE")) + .build(); + ApiFuture metadataFuture = ApiFutures.immediateFuture(metadata); + @SuppressWarnings("unchecked") + OperationFuture operationFuture = mock(OperationFuture.class); + when(operationFuture.get()) + .thenThrow( + new ExecutionException( + "ddl statement failed", Status.INVALID_ARGUMENT.asRuntimeException())); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + doCallRealMethod() + .when(client) + .runWithRetryForMissingDefaultSequenceKind(any(), any(), any(), any()); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + DdlBatch batch = + DdlBatch.newBuilder() + .withStatementExecutor(new StatementExecutor()) + .setDdlClient(client) + .setDatabaseClient(dbClient) + .setSpan(Span.getInvalid()) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE FOO"))); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE INVALID_TABLE"))); + try { + get(batch.runBatchAsync(CallType.SYNC)); + fail("missing expected exception"); + } catch (SpannerBatchUpdateException e) { + assertThat(e.getUpdateCounts().length, is(equalTo(2))); + assertThat(e.getUpdateCounts()[0], is(equalTo(1L))); + assertThat(e.getUpdateCounts()[1], is(equalTo(0L))); + } + } + + @Test + public void testFailedAfterFirstStatement() throws InterruptedException, ExecutionException { + DdlClient client = mock(DdlClient.class); + doCallRealMethod() + .when(client) + .runWithRetryForMissingDefaultSequenceKind(any(), any(), any(), any()); + UpdateDatabaseDdlMetadata metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps( + Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000L - 1L)) + .addAllStatements(Arrays.asList("CREATE TABLE FOO", "CREATE TABLE INVALID_TABLE")) + .build(); + ApiFuture metadataFuture = ApiFutures.immediateFuture(metadata); + @SuppressWarnings("unchecked") + OperationFuture operationFuture = mock(OperationFuture.class); + when(operationFuture.get()) + .thenThrow( + new ExecutionException( + "ddl statement failed", Status.INVALID_ARGUMENT.asRuntimeException())); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + DdlBatch batch = + DdlBatch.newBuilder() + .withStatementExecutor(new StatementExecutor()) + .setDdlClient(client) + .setDatabaseClient(dbClient) + .setSpan(Span.getInvalid()) + .setConnectionState(new ConnectionState(new HashMap<>())) + .build(); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE FOO"))); + batch.executeDdlAsync( + CallType.SYNC, + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("CREATE TABLE INVALID_TABLE"))); + try { + get(batch.runBatchAsync(CallType.SYNC)); + fail("missing expected exception"); + } catch (SpannerBatchUpdateException e) { + assertThat(e.getUpdateCounts().length, is(equalTo(2))); + assertThat(e.getUpdateCounts()[0], is(equalTo(1L))); + assertThat(e.getUpdateCounts()[1], is(equalTo(0L))); + } + } + + @Test + public void testAbort() { + DdlClient client = createDefaultMockDdlClient(); + DdlBatch batch = createSubject(client); + batch.abortBatch(); + assertThat(batch.getState(), is(UnitOfWorkState.ABORTED)); + verify(client, never()).executeDdl(anyString(), isNull()); + verify(client, never()).executeDdl(anyList(), isNull()); + + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.DDL); + when(statement.getStatement()).thenReturn(Statement.of("CREATE TABLE FOO")); + when(statement.getSql()).thenReturn("CREATE TABLE FOO"); + + client = createDefaultMockDdlClient(); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.abortBatch(); + verify(client, never()).executeDdl(anyList(), isNull()); + + client = createDefaultMockDdlClient(); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.abortBatch(); + verify(client, never()).executeDdl(anyList(), isNull()); + + client = createDefaultMockDdlClient(); + batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.abortBatch(); + verify(client, never()).executeDdl(anyList(), isNull()); + boolean exception = false; + try { + get(batch.runBatchAsync(CallType.SYNC)); + } catch (SpannerException e) { + if (e.getErrorCode() != ErrorCode.FAILED_PRECONDITION) { + throw e; + } + exception = true; + } + assertThat(exception, is(true)); + verify(client, never()).executeDdl(anyList(), isNull()); + } + + @Test + public void testCancel() { + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.DDL); + when(statement.getStatement()).thenReturn(Statement.of("CREATE TABLE FOO")); + when(statement.getSql()).thenReturn("CREATE TABLE FOO"); + + DdlClient client = createDefaultMockDdlClient(10000L); + final DdlBatch batch = createSubject(client); + batch.executeDdlAsync(CallType.SYNC, statement); + Executors.newSingleThreadScheduledExecutor() + .schedule(batch::cancel, 100, TimeUnit.MILLISECONDS); + try { + get(batch.runBatchAsync(CallType.SYNC)); + fail("expected CANCELLED"); + } catch (SpannerException e) { + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } + } + + @Test + public void testCommit() { + DdlBatch batch = createSubject(); + try { + batch.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testRollback() { + DdlBatch batch = createSubject(); + try { + batch.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testExtractUpdateCounts() { + DdlBatch batch = createSubject(); + UpdateDatabaseDdlMetadata metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(1000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(2000L).build()) + .addStatements("CREATE TABLE FOO") + .addStatements("CREATE TABLE BAR") + .addStatements("CREATE TABLE BAZ") + .build(); + long[] updateCounts = batch.extractUpdateCounts(metadata); + assertThat(updateCounts, is(equalTo(new long[] {1L, 1L, 0L}))); + + metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(1000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(2000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(3000L).build()) + .addStatements("CREATE TABLE FOO") + .addStatements("CREATE TABLE BAR") + .addStatements("CREATE TABLE BAZ") + .build(); + updateCounts = batch.extractUpdateCounts(metadata); + assertThat(updateCounts, is(equalTo(new long[] {1L, 1L, 1L}))); + + metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(1000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(2000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(3000L).build()) + .addStatements("CREATE TABLE FOO") + .addStatements("CREATE TABLE BAR") + .addStatements("CREATE TABLE BAZ") + .build(); + updateCounts = batch.extractUpdateCounts(metadata); + assertThat(updateCounts, is(equalTo(new long[] {1L, 1L, 1L}))); + + // This is not something Cloud Spanner should return, but the method can handle it. + metadata = + UpdateDatabaseDdlMetadata.newBuilder() + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(1000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(2000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(3000L).build()) + .addCommitTimestamps(Timestamp.newBuilder().setSeconds(4000L).build()) + .addStatements("CREATE TABLE FOO") + .addStatements("CREATE TABLE BAR") + .addStatements("CREATE TABLE BAZ") + .build(); + updateCounts = batch.extractUpdateCounts(metadata); + assertThat(updateCounts, is(equalTo(new long[] {1L, 1L, 1L}))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java new file mode 100644 index 000000000000..3a25437354fb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java @@ -0,0 +1,132 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Suppliers; +import com.google.common.io.ByteStreams; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DdlClientTests { + + private final String projectId = "test-project"; + private final String instanceId = "test-instance"; + private final String databaseId = "test-database"; + + private DdlClient createSubject(DatabaseAdminClient client) { + return DdlClient.newBuilder() + .setDialectSupplier(Suppliers.ofInstance(Dialect.GOOGLE_STANDARD_SQL)) + .setProjectId(projectId) + .setInstanceId(instanceId) + .setDatabaseName(databaseId) + .setDatabaseAdminClient(client) + .build(); + } + + @Test + public void testExecuteDdl() throws InterruptedException, ExecutionException { + byte[] protoDescriptors; + try { + InputStream in = + DdlBatchTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + + DatabaseAdminClient client = mock(DatabaseAdminClient.class); + Database database = mock(Database.class); + Database.Builder databaseBuilder = mock(Database.Builder.class); + @SuppressWarnings("unchecked") + OperationFuture operation = mock(OperationFuture.class); + + when(operation.get()).thenReturn(null); + when(client.newDatabaseBuilder((DatabaseId.of(projectId, instanceId, databaseId)))) + .thenReturn(databaseBuilder); + when(databaseBuilder.setProtoDescriptors(protoDescriptors)).thenReturn(databaseBuilder); + when(databaseBuilder.build()).thenReturn(database); + when(client.updateDatabaseDdl(eq(database), anyList(), isNull())).thenReturn(operation); + + DdlClient subject = createSubject(client); + String ddl = "CREATE TABLE FOO"; + subject.executeDdl(ddl, null); + verify(databaseBuilder, never()).setProtoDescriptors(any(byte[].class)); + verify(client).updateDatabaseDdl(database, Collections.singletonList(ddl), null); + + subject = createSubject(client); + List ddlList = Arrays.asList("CREATE TABLE FOO", "DROP TABLE FOO"); + subject.executeDdl(ddlList, null); + verify(databaseBuilder, never()).setProtoDescriptors(any(byte[].class)); + verify(client).updateDatabaseDdl(database, ddlList, null); + + subject = createSubject(client); + ddlList = Arrays.asList("CREATE PROTO BUNDLE", "CREATE TABLE FOO"); + subject.executeDdl(ddlList, protoDescriptors); + verify(databaseBuilder).setProtoDescriptors(protoDescriptors); + verify(client).updateDatabaseDdl(database, ddlList, null); + } + + @Test + public void testIsCreateDatabase() { + for (Dialect dialect : Dialect.values()) { + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE foo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE \"foo\"")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE `foo`")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE\tfoo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE\n foo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE\t\n foo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASE")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "CREATE\t \n DATABASE foo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "create\t \n DATABASE foo")); + assertTrue(DdlClient.isCreateDatabaseStatement(dialect, "create database foo")); + + assertFalse(DdlClient.isCreateDatabaseStatement(dialect, "CREATE VIEW foo")); + assertFalse(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABAS foo")); + assertFalse(DdlClient.isCreateDatabaseStatement(dialect, "CREATE DATABASEfoo")); + assertFalse(DdlClient.isCreateDatabaseStatement(dialect, "CREATE foo")); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlTest.java new file mode 100644 index 000000000000..3585421e32cf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlTest.java @@ -0,0 +1,410 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MissingDefaultSequenceKindException; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.rpc.Code; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.v1.CommitRequest; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DdlTest extends AbstractMockServerTest { + + @After + public void reset() { + mockDatabaseAdmin.reset(); + } + + @Override + protected String getBaseUrl() { + return String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true", + getPort()); + } + + private void addUpdateDdlResponse() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } + + private void addUpdateDdlResponse(com.google.rpc.Status error) { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + // .setResponse(Any.pack(Empty.getDefaultInstance())) + .setError(error) + .build()); + } + + @Test + public void testSingleAnalyzeStatement() { + addUpdateDdlResponse(); + + try (Connection connection = createConnection()) { + StatementResult result = connection.execute(Statement.of("analyze")); + assertEquals(ResultType.NO_RESULT, result.getResultType()); + } + + List requests = + mockDatabaseAdmin.getRequests().stream() + .filter(request -> request instanceof UpdateDatabaseDdlRequest) + .map(request -> (UpdateDatabaseDdlRequest) request) + .collect(Collectors.toList()); + assertEquals(1, requests.size()); + assertEquals(1, requests.get(0).getStatementsCount()); + assertEquals("analyze", requests.get(0).getStatements(0)); + } + + @Test + public void testBatchedAnalyzeStatement() { + addUpdateDdlResponse(); + + try (Connection connection = createConnection()) { + connection.startBatchDdl(); + assertEquals( + ResultType.NO_RESULT, + connection + .execute(Statement.of("create table foo (id int64) primary key (id)")) + .getResultType()); + assertEquals( + ResultType.NO_RESULT, connection.execute(Statement.of("analyze")).getResultType()); + connection.runBatch(); + } + + List requests = + mockDatabaseAdmin.getRequests().stream() + .filter(request -> request instanceof UpdateDatabaseDdlRequest) + .map(request -> (UpdateDatabaseDdlRequest) request) + .collect(Collectors.toList()); + assertEquals(1, requests.size()); + assertEquals(2, requests.get(0).getStatementsCount()); + assertEquals("create table foo (id int64) primary key (id)", requests.get(0).getStatements(0)); + assertEquals("analyze", requests.get(0).getStatements(1)); + } + + @Test + public void testDdlAtStartOfTransaction() { + Statement statement = Statement.of("create table foo (id int64) primary key (id)"); + for (DdlInTransactionMode mode : DdlInTransactionMode.values()) { + mockDatabaseAdmin.getRequests().clear(); + if (mode != DdlInTransactionMode.FAIL) { + addUpdateDdlResponse(); + } + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setDdlInTransactionMode(mode); + + if (mode == DdlInTransactionMode.FAIL) { + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.execute(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } else { + assertEquals(ResultType.NO_RESULT, connection.execute(statement).getResultType()); + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + } + } + } + } + + @Test + public void testDdlBatchAtStartOfTransaction() { + for (DdlInTransactionMode mode : DdlInTransactionMode.values()) { + mockDatabaseAdmin.getRequests().clear(); + if (mode != DdlInTransactionMode.FAIL) { + addUpdateDdlResponse(); + } + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setDdlInTransactionMode(mode); + + if (mode == DdlInTransactionMode.FAIL) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.execute(Statement.of("start batch ddl"))); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } else { + connection.execute(Statement.of("start batch ddl")); + connection.execute(Statement.of("create table foo")); + connection.execute(Statement.of("alter table bar")); + connection.execute(Statement.of("run batch")); + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + UpdateDatabaseDdlRequest request = + (UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(0); + assertEquals(2, request.getStatementsCount()); + } + } + } + } + + @Test + public void testDdlInTransaction() { + Statement statement = Statement.of("create table foo (id int64) primary key (id)"); + for (DdlInTransactionMode mode : DdlInTransactionMode.values()) { + mockDatabaseAdmin.getRequests().clear(); + if (mode == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + addUpdateDdlResponse(); + } + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setDdlInTransactionMode(mode); + + connection.execute(INSERT_STATEMENT); + + if (mode != DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.execute(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } else { + assertEquals(ResultType.NO_RESULT, connection.execute(statement).getResultType()); + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + } + } + + @Test + public void testDdlBatchInTransaction() { + for (DdlInTransactionMode mode : DdlInTransactionMode.values()) { + mockDatabaseAdmin.getRequests().clear(); + if (mode == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + addUpdateDdlResponse(); + } + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setDdlInTransactionMode(mode); + + connection.execute(INSERT_STATEMENT); + + if (mode != DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.execute(Statement.of("start batch ddl"))); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } else { + connection.execute(Statement.of("start batch ddl")); + connection.execute(Statement.of("create table foo")); + connection.execute(Statement.of("alter table bar")); + connection.execute(Statement.of("run batch")); + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + UpdateDatabaseDdlRequest request = + (UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(0); + assertEquals(2, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + } + } + + @Test + public void testMissingDefaultSequenceKindException() { + addUpdateDdlResponse( + com.google.rpc.Status.newBuilder() + .setCode(Code.INVALID_ARGUMENT_VALUE) + .setMessage( + "The sequence kind of an identity column id2 is not specified. Please specify the" + + " sequence kind explicitly or set the database option" + + " `default_sequence_kind`.") + .build()); + try (Connection connection = createConnection()) { + assertNull(connection.getDefaultSequenceKind()); + assertThrows( + MissingDefaultSequenceKindException.class, + () -> + connection.execute( + Statement.of("create table foo (id2 int64 auto_increment primary key"))); + } + // The request should not be retried. + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + } + + @Test + public void testSetsDefaultSequenceKindAndRetriesStatement() { + addUpdateDdlResponse( + com.google.rpc.Status.newBuilder() + .setCode(Code.INVALID_ARGUMENT_VALUE) + .setMessage( + "The sequence kind of an identity column id2 is not specified. Please specify the" + + " sequence kind explicitly or set the database option" + + " `default_sequence_kind`.") + .build()); + // This will be the response for the 'alter database' statement. + addUpdateDdlResponse(); + // This will be the response for the 'create table' statement after the retry. + addUpdateDdlResponse(); + try (Connection connection = createConnection()) { + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.execute(Statement.of("create table foo (id2 int64 auto_increment primary key")); + } + List requests = mockDatabaseAdmin.getRequests(); + assertEquals(3, requests.size()); + assertEquals( + "create table foo (id2 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(0)).getStatements(0)); + assertEquals( + "alter database `db` set options (default_sequence_kind='bit_reversed_positive')", + ((UpdateDatabaseDdlRequest) requests.get(1)).getStatements(0)); + assertEquals( + "create table foo (id2 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(2)).getStatements(0)); + } + + @Test + public void testMissingDefaultSequenceKindExceptionInBatch() { + addUpdateDdlResponse( + com.google.rpc.Status.newBuilder() + .setCode(Code.INVALID_ARGUMENT_VALUE) + .setMessage( + "The sequence kind of an identity column id2 is not specified. Please specify the" + + " sequence kind explicitly or set the database option" + + " `default_sequence_kind`.") + .build()); + try (Connection connection = createConnection()) { + assertNull(connection.getDefaultSequenceKind()); + connection.startBatchDdl(); + connection.execute(Statement.of("create table foo (id2 int64 auto_increment primary key")); + SpannerBatchUpdateException exception = + assertThrows(SpannerBatchUpdateException.class, connection::runBatch); + } + // The request should not be retried. + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + } + + @Test + public void testSetsDefaultSequenceKindAndRetriesBatch() { + addUpdateDdlResponse( + com.google.rpc.Status.newBuilder() + .setCode(Code.INVALID_ARGUMENT_VALUE) + .setMessage( + "The sequence kind of an identity column id2 is not specified. Please specify the" + + " sequence kind explicitly or set the database option" + + " `default_sequence_kind`.") + .build()); + // This will be the response for the 'alter database' statement. + addUpdateDdlResponse(); + // This will be the response for the 'create table' statements after the retry. + addUpdateDdlResponse(); + try (Connection connection = createConnection()) { + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.startBatchDdl(); + connection.execute(Statement.of("create table foo (id1 int64 auto_increment primary key")); + connection.execute(Statement.of("create table bar (id2 int64 auto_increment primary key")); + connection.runBatch(); + } + List requests = mockDatabaseAdmin.getRequests(); + assertEquals(3, requests.size()); + assertEquals( + "create table foo (id1 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(0)).getStatements(0)); + assertEquals( + "create table bar (id2 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(0)).getStatements(1)); + assertEquals( + "alter database `db` set options (default_sequence_kind='bit_reversed_positive')", + ((UpdateDatabaseDdlRequest) requests.get(1)).getStatements(0)); + assertEquals( + "create table foo (id1 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(0)).getStatements(0)); + assertEquals( + "create table bar (id2 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) requests.get(0)).getStatements(1)); + } + + @Test + public void testStripTrailingSemicolon() { + addUpdateDdlResponse(); + addUpdateDdlResponse(); + addUpdateDdlResponse(); + addUpdateDdlResponse(); + try (Connection connection = createConnection()) { + connection.execute(Statement.of("drop table foo;")); + connection.execute(Statement.of("drop table foo \n\t;\n\t ")); + connection.execute(Statement.of("drop table foo")); + + connection.startBatchDdl(); + connection.execute(Statement.of("create table foo (id1 int64 auto_increment primary key;")); + connection.execute( + Statement.of("create table foo (id1 int64 auto_increment primary key \n\t;\n\t ")); + connection.execute(Statement.of("create table foo (id2 int64 auto_increment primary key")); + connection.runBatch(); + } + assertEquals(4, mockDatabaseAdmin.getRequests().size()); + assertEquals( + "drop table foo", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(0)).getStatements(0)); + assertEquals( + "drop table foo \n\t", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(1)).getStatements(0)); + assertEquals( + "drop table foo", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(2)).getStatements(0)); + + assertEquals( + 3, + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(3)).getStatementsCount()); + assertEquals( + "create table foo (id1 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(3)).getStatements(0)); + assertEquals( + "create table foo (id1 int64 auto_increment primary key \n\t", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(3)).getStatements(1)); + assertEquals( + "create table foo (id2 int64 auto_increment primary key", + ((UpdateDatabaseDdlRequest) mockDatabaseAdmin.getRequests().get(3)).getStatements(2)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java new file mode 100644 index 000000000000..b187b5d601e3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java @@ -0,0 +1,163 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DecodeMode; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DecodeModeTest extends AbstractMockServerTest { + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testAllDecodeModes() throws Exception { + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + String sql = "select * from random"; + Statement statement = Statement.of(sql); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(statement, generator.generate())); + + try (Connection connection = createConnection()) { + for (boolean multiThreaded : new boolean[] {true, false}) { + for (boolean readonly : new boolean[] {true, false}) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setReadOnly(readonly); + connection.setAutocommit(autocommit); + + int receivedRows = 0; + // DecodeMode#DIRECT is not supported in read/write transactions, as the protobuf value + // is + // used for checksum calculation. + try (ResultSet direct = + connection.executeQuery( + statement, + !readonly && !autocommit + ? Options.decodeMode(DecodeMode.LAZY_PER_ROW) + : Options.decodeMode(DecodeMode.DIRECT)); + ResultSet lazyPerRow = + connection.executeQuery( + statement, Options.decodeMode(DecodeMode.LAZY_PER_ROW)); + ResultSet lazyPerCol = + connection.executeQuery( + statement, Options.decodeMode(DecodeMode.LAZY_PER_COL))) { + while (direct.next() && lazyPerRow.next() && lazyPerCol.next()) { + assertEquals(direct.getColumnCount(), lazyPerRow.getColumnCount()); + assertEquals(direct.getColumnCount(), lazyPerCol.getColumnCount()); + if (multiThreaded) { + ExecutorService service = Executors.newFixedThreadPool(direct.getColumnCount()); + List> futures = new ArrayList<>(direct.getColumnCount()); + for (int col = 0; col < direct.getColumnCount(); col++) { + final int colNumber = col; + futures.add( + service.submit( + () -> checkRowValues(colNumber, direct, lazyPerRow, lazyPerCol))); + } + service.shutdown(); + for (Future future : futures) { + future.get(); + } + } else { + for (int col = 0; col < direct.getColumnCount(); col++) { + checkRowValues(col, direct, lazyPerRow, lazyPerCol); + } + } + receivedRows++; + } + assertEquals(numRows, receivedRows); + } + if (!autocommit) { + connection.commit(); + } + } + } + } + } + } + + private void checkRowValues( + int col, ResultSet direct, ResultSet lazyPerRow, ResultSet lazyPerCol) { + // Randomly decode and get a column to trigger parallel decoding of one column. + lazyPerCol.getValue(ThreadLocalRandom.current().nextInt(lazyPerCol.getColumnCount())); + + // Test getting the entire row as a struct both as the first thing we do, and as the + // last thing we do. This ensures that the method works as expected both when a row + // is lazily decoded by this method, and when it has already been decoded by another + // method. + if (col % 2 == 0) { + assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); + assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); + } + assertEquals(direct.isNull(col), lazyPerRow.isNull(col)); + assertEquals(direct.isNull(col), lazyPerCol.isNull(col)); + assertEquals(direct.getValue(col), lazyPerRow.getValue(col)); + assertEquals(direct.getValue(col), lazyPerCol.getValue(col)); + if (col % 2 == 1) { + assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); + assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); + } + } + + @Test + public void testDecodeModeDirect_failsInReadWriteTransaction() { + int numRows = 1; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + String sql = "select * from random"; + Statement statement = Statement.of(sql); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(statement, generator.generate())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + try (ResultSet resultSet = + connection.executeQuery(statement, Options.decodeMode(DecodeMode.DIRECT))) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + "Executing queries with DecodeMode#DIRECT is not supported in read/write" + + " transactions.")); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DelayTransactionStartUntilFirstWriteMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DelayTransactionStartUntilFirstWriteMockServerTest.java new file mode 100644 index 000000000000..00fb90bf8853 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DelayTransactionStartUntilFirstWriteMockServerTest.java @@ -0,0 +1,575 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class DelayTransactionStartUntilFirstWriteMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + @Before + public void setupDialect() { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + SpannerPool.closeSpannerPool(); + } + + @Test + public void testEnable() { + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + try (Connection connection = createConnection(";delayTransactionStartUntilFirstWrite=true")) { + assertTrue(connection.isDelayTransactionStartUntilFirstWrite()); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of( + String.format( + "show variable %sdelay_transaction_start_until_first_write", prefix)))) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean(0)); + assertTrue( + resultSet.getBoolean( + prefix.toUpperCase() + "DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE")); + assertFalse(resultSet.next()); + } + } + try (Connection connection = createConnection()) { + assertFalse(connection.isDelayTransactionStartUntilFirstWrite()); + connection.execute( + Statement.of( + String.format("set %sdelay_transaction_start_until_first_write=true", prefix))); + assertTrue(connection.isDelayTransactionStartUntilFirstWrite()); + } + } + + @Test + public void testDisable() { + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + try (Connection connection = createConnection()) { + assertFalse(connection.isDelayTransactionStartUntilFirstWrite()); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of( + String.format( + "show variable %sdelay_transaction_start_until_first_write", prefix)))) { + assertTrue(resultSet.next()); + assertFalse(resultSet.getBoolean(0)); + assertFalse( + resultSet.getBoolean( + prefix.toUpperCase() + "DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE")); + assertFalse(resultSet.next()); + } + } + try (Connection connection = createConnection(";delayTransactionStartUntilFirstWrite=true")) { + assertTrue(connection.isDelayTransactionStartUntilFirstWrite()); + connection.execute( + Statement.of( + String.format("set %sdelay_transaction_start_until_first_write=false", prefix))); + assertFalse(connection.isDelayTransactionStartUntilFirstWrite()); + } + } + + @Test + public void testDefaultUsesRealTransactions() { + try (Connection connection = createConnection()) { + executeRandomQuery(connection); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithOneQuery() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + for (boolean commit : new boolean[] {true, false}) { + executeRandomQuery(connection); + if (commit) { + connection.commit(); + } else { + connection.rollback(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(request.hasTransaction()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testTransactionWithTwoQueries() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + for (boolean commit : new boolean[] {true, false}) { + executeRandomQuery(connection); + executeRandomQuery(connection); + if (commit) { + connection.commit(); + } else { + connection.rollback(); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).hasTransaction()); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1).hasTransaction()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testTransactionWithSingleDml() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByDml() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + ExecuteSqlRequest dmlRequest = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithDmlFollowedByQuery() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + connection.executeUpdate(INSERT_STATEMENT); + executeRandomQuery(connection); + connection.commit(); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest dmlRequest = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(queryRequest.hasTransaction()); + assertTrue(queryRequest.getTransaction().hasId()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByBatchDml() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest dmlRequest = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithBatchDmlFollowedByQuery() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + executeRandomQuery(connection); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest dmlRequest = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(queryRequest.hasTransaction()); + assertTrue(queryRequest.getTransaction().hasId()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByDmlReturning() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + try (ResultSet resultSet = + connection.executeQuery( + dialect == Dialect.POSTGRESQL + ? PG_INSERT_RETURNING_STATEMENT + : INSERT_RETURNING_STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + connection.commit(); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + ExecuteSqlRequest dmlRequest = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithDmlReturningFollowedByQuery() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + try (ResultSet resultSet = + connection.executeQuery( + dialect == Dialect.POSTGRESQL + ? PG_INSERT_RETURNING_STATEMENT + : INSERT_RETURNING_STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + executeRandomQuery(connection); + connection.commit(); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest dmlRequest = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(dmlRequest.hasTransaction()); + assertTrue(dmlRequest.getTransaction().hasBegin()); + assertTrue(dmlRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(queryRequest.hasTransaction()); + assertTrue(queryRequest.getTransaction().hasId()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByMutations() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + // Mutations don't start a transaction, as they are only included in the commit call anyways. + connection.bufferedWrite( + Mutation.newInsertOrUpdateBuilder("foo").set("id").to(1L).set("value").to("one").build()); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithMutationsFollowedByQuery() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + // Mutations don't start a transaction, as they are only included in the commit call anyways. + connection.bufferedWrite( + Mutation.newInsertOrUpdateBuilder("foo").set("id").to(1L).set("value").to("one").build()); + executeRandomQuery(connection); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByDmlAborted() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + connection.executeUpdate(INSERT_STATEMENT); + mockSpanner.abortNextStatement(); + connection.commit(); + + // There should be 3 ExecuteSqlRequests: + // 1. The initial query. + // 2. The initial DML statement. + // 3. The retried DML statement. The initial query is not part of the transaction, and + // therefore also not retried. + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + ExecuteSqlRequest firstDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(firstDmlRequest.hasTransaction()); + assertTrue(firstDmlRequest.getTransaction().hasBegin()); + assertTrue(firstDmlRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest secondDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(2); + assertTrue(secondDmlRequest.hasTransaction()); + assertTrue(secondDmlRequest.getTransaction().hasBegin()); + assertTrue(secondDmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithDmlFollowedByQueryAborted() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + connection.executeUpdate(INSERT_STATEMENT); + executeRandomQuery(connection); + mockSpanner.abortNextStatement(); + connection.commit(); + + // There should be 4 ExecuteSqlRequests: + // 1. The initial query. + // 2. The initial DML statement. + // 3. The retried query. + // 4. The retried DML statement. + assertEquals(4, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest firstQueryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(firstQueryRequest.hasTransaction()); + assertTrue(firstQueryRequest.getTransaction().hasBegin()); + assertTrue(firstQueryRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest firstDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(firstDmlRequest.hasTransaction()); + assertTrue(firstDmlRequest.getTransaction().hasId()); + + ExecuteSqlRequest secondQueryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(2); + assertTrue(secondQueryRequest.hasTransaction()); + assertTrue(firstQueryRequest.getTransaction().hasBegin()); + assertTrue(firstQueryRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest secondDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(3); + assertTrue(secondDmlRequest.hasTransaction()); + assertTrue(secondDmlRequest.getTransaction().hasId()); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithDmlFollowedByQueryWithFailedRetry() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + connection.executeUpdate(INSERT_STATEMENT); + executeRandomQuery(connection); + mockSpanner.abortNextStatement(); + // Change the results that is returned by the query. This will make the retry fail. + mockSpanner.putStatementResult( + StatementResult.query( + SELECT_RANDOM_STATEMENT, new RandomResultSetGenerator(10).generate())); + assertThrows(AbortedDueToConcurrentModificationException.class, connection::commit); + + // There should be 4 ExecuteSqlRequests: + // 1. The initial query. + // 2. The initial DML statement. + // 3. The retried query. + // 4. The retried DML statement. + assertEquals(4, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest firstQueryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(firstQueryRequest.hasTransaction()); + assertTrue(firstQueryRequest.getTransaction().hasBegin()); + assertTrue(firstQueryRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest firstDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(firstDmlRequest.hasTransaction()); + assertTrue(firstDmlRequest.getTransaction().hasId()); + + ExecuteSqlRequest secondQueryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(2); + assertTrue(secondQueryRequest.hasTransaction()); + assertTrue(firstQueryRequest.getTransaction().hasBegin()); + assertTrue(firstQueryRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest secondDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(3); + assertTrue(secondDmlRequest.hasTransaction()); + assertTrue(secondDmlRequest.getTransaction().hasId()); + + // There is only one Commit request, as the retry fails on the query that returns different + // results during the retry attempt. + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithQueryFollowedByDmlAborted_RetrySucceedsWithModifiedQueryResults() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + connection.executeUpdate(INSERT_STATEMENT); + mockSpanner.abortNextStatement(); + // Change the results that is returned by the query. This will not affect the retry, as the + // query is not part of the transaction. + mockSpanner.putStatementResult( + StatementResult.query( + SELECT_RANDOM_STATEMENT, new RandomResultSetGenerator(10).generate())); + connection.commit(); + + // There should be 3 ExecuteSqlRequests: + // 1. The initial query. + // 2. The initial DML statement. + // 3. The retried DML statement. + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest queryRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertFalse(queryRequest.hasTransaction()); + ExecuteSqlRequest firstDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(firstDmlRequest.hasTransaction()); + assertTrue(firstDmlRequest.getTransaction().hasBegin()); + assertTrue(firstDmlRequest.getTransaction().getBegin().hasReadWrite()); + ExecuteSqlRequest secondDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(2); + assertTrue(secondDmlRequest.hasTransaction()); + assertTrue(secondDmlRequest.getTransaction().hasBegin()); + assertTrue(secondDmlRequest.getTransaction().getBegin().hasReadWrite()); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + } + + @Test + public void testTransactionWithRollbackToSavepointWithoutRealTransaction() { + try (Connection connection = createConnection()) { + connection.setDelayTransactionStartUntilFirstWrite(true); + + executeRandomQuery(connection); + connection.savepoint("s1"); + executeRandomQuery(connection); + connection.rollbackToSavepoint("s1"); + executeRandomQuery(connection); + + connection.commit(); + + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).hasTransaction()); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1).hasTransaction()); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(2).hasTransaction()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } + } + + private void executeRandomQuery(Connection connection) { + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectExecuteResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectExecuteResultSetTest.java new file mode 100644 index 000000000000..b14f837ff7bb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectExecuteResultSetTest.java @@ -0,0 +1,350 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DirectExecuteResultSetTest { + + private DirectExecuteResultSet createSubject() { + ResultSet delegate = + ResultSets.forRows( + Type.struct(StructField.of("test", Type.int64())), + Collections.singletonList(Struct.newBuilder().set("test").to(1L).build())); + return DirectExecuteResultSet.ofResultSet(delegate); + } + + @Test + public void testMethodCallBeforeNext() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "ofResultSet", + "equals", + "hashCode", + "getType", + "getColumnCount", + "getColumnIndex", + "getColumnType"); + DirectExecuteResultSet subject = createSubject(); + callMethods(subject, excludedMethods, IllegalStateException.class); + } + + @Test + public void testMethodCallAfterClose() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "getType", + "getColumnCount", + "getColumnIndex", + "getColumnType", + "ofResultSet", + "equals", + "hashCode"); + DirectExecuteResultSet subject = createSubject(); + subject.next(); + subject.close(); + callMethods(subject, excludedMethods, IllegalStateException.class); + } + + @Test + public void testMethodCallAfterNextHasReturnedFalse() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "getType", + "getColumnCount", + "getColumnIndex", + "getColumnType", + "ofResultSet", + "equals", + "hashCode"); + DirectExecuteResultSet subject = createSubject(); + subject.next(); + subject.next(); + callMethods(subject, excludedMethods, IndexOutOfBoundsException.class); + } + + private void callMethods( + DirectExecuteResultSet subject, + List excludedMethods, + Class expectedException) + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + for (Method method : DirectExecuteResultSet.class.getDeclaredMethods()) { + if (Modifier.isPublic(method.getModifiers()) && !excludedMethods.contains(method.getName())) { + boolean exception = false; + int numberOfParameters = method.getParameterTypes().length; + Class firstParameterType = null; + if (numberOfParameters >= 1) { + firstParameterType = method.getParameterTypes()[0]; + } + try { + switch (numberOfParameters) { + case 0: + method.invoke(subject); + break; + case 1: + if (firstParameterType == String.class) { + method.invoke(subject, "test"); + } else if (firstParameterType == int.class) { + method.invoke(subject, 0); + } else { + fail("unknown parameter type"); + } + break; + case 2: + Class secondParameterType = method.getParameterTypes()[1]; + Object firstArgument = null, secondArgument = null; + + if (firstParameterType == String.class) { + firstArgument = "test"; + } else if (firstParameterType == int.class) { + firstArgument = 0; + } + + if (secondParameterType == Function.class) { + Function lambdaFunction = + (val) -> Genre.forNumber(val.intValue()); + secondArgument = lambdaFunction; + } else if (secondParameterType == AbstractMessage.class) { + secondArgument = SingerInfo.getDefaultInstance(); + } + + if (firstArgument != null && secondArgument != null) { + method.invoke(subject, firstArgument, secondArgument); + } else { + fail("unknown parameter type"); + } + break; + default: + fail("method with more than 2 parameters is unknown"); + } + } catch (InvocationTargetException e) { + if (e.getCause().getClass().equals(expectedException)) { + // expected + exception = true; + } else { + throw e; + } + } + assertThat( + method.getName() + " did not throw an IllegalStateException", exception, is(true)); + } + } + } + + @Test + public void testValidMethodCall() throws IllegalArgumentException { + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, false); + DirectExecuteResultSet subject = DirectExecuteResultSet.ofResultSet(delegate); + subject.next(); + + subject.getBoolean(0); + verify(delegate).getBoolean(0); + subject.getBoolean("test0"); + verify(delegate).getBoolean("test0"); + subject.getBooleanArray(1); + verify(delegate).getBooleanArray(1); + subject.getBooleanArray("test1"); + verify(delegate).getBooleanArray("test1"); + subject.getBooleanList(2); + verify(delegate).getBooleanList(2); + subject.getBooleanList("test2"); + verify(delegate).getBooleanList("test2"); + + subject.getBytes(0); + verify(delegate).getBytes(0); + subject.getBytes("test0"); + verify(delegate).getBytes("test0"); + subject.getBytesList(2); + verify(delegate).getBytesList(2); + subject.getBytesList("test2"); + verify(delegate).getBytesList("test2"); + + subject.getDate(0); + verify(delegate).getDate(0); + subject.getDate("test0"); + verify(delegate).getDate("test0"); + subject.getDateList(2); + verify(delegate).getDateList(2); + subject.getDateList("test2"); + verify(delegate).getDateList("test2"); + + subject.getDouble(0); + verify(delegate).getDouble(0); + subject.getDouble("test0"); + verify(delegate).getDouble("test0"); + subject.getDoubleArray(1); + verify(delegate).getDoubleArray(1); + subject.getDoubleArray("test1"); + verify(delegate).getDoubleArray("test1"); + subject.getDoubleList(2); + verify(delegate).getDoubleList(2); + subject.getDoubleList("test2"); + verify(delegate).getDoubleList("test2"); + + subject.getBigDecimal(0); + verify(delegate).getBigDecimal(0); + subject.getBigDecimal("test0"); + verify(delegate).getBigDecimal("test0"); + subject.getBigDecimalList(1); + verify(delegate).getBigDecimalList(1); + subject.getBigDecimalList("test1"); + verify(delegate).getBigDecimalList("test1"); + subject.getBigDecimalList(2); + verify(delegate).getBigDecimalList(2); + subject.getBigDecimalList("test2"); + verify(delegate).getBigDecimalList("test2"); + + subject.getLong(0); + verify(delegate).getLong(0); + subject.getLong("test0"); + verify(delegate).getLong("test0"); + subject.getLongArray(1); + verify(delegate).getLongArray(1); + subject.getLongArray("test1"); + verify(delegate).getLongArray("test1"); + subject.getLongList(2); + verify(delegate).getLongList(2); + subject.getLongList("test2"); + verify(delegate).getLongList("test2"); + + subject.getString(0); + verify(delegate).getString(0); + subject.getString("test0"); + verify(delegate).getString("test0"); + subject.getStringList(2); + verify(delegate).getStringList(2); + subject.getStringList("test2"); + verify(delegate).getStringList("test2"); + + subject.getJson(0); + verify(delegate).getJson(0); + subject.getJson("test0"); + verify(delegate).getJson("test0"); + subject.getJsonList(2); + verify(delegate).getJsonList(2); + subject.getJsonList("test2"); + verify(delegate).getJsonList("test2"); + + subject.getPgJsonb(0); + verify(delegate).getPgJsonb(0); + subject.getPgJsonb("test0"); + verify(delegate).getPgJsonb("test0"); + subject.getPgJsonbList(2); + verify(delegate).getPgJsonbList(2); + subject.getPgJsonbList("test2"); + verify(delegate).getPgJsonbList("test2"); + + subject.getProtoMessage(0, SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessage(0, SingerInfo.getDefaultInstance()); + subject.getProtoMessage("test0", SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessage("test0", SingerInfo.getDefaultInstance()); + subject.getProtoMessageList(0, SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessageList(0, SingerInfo.getDefaultInstance()); + subject.getProtoMessageList("test0", SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessageList("test0", SingerInfo.getDefaultInstance()); + + Function lambdaFunction = Genre::forNumber; + subject.getProtoEnum(0, lambdaFunction); + verify(delegate).getProtoEnum(0, lambdaFunction); + subject.getProtoEnum("test0", lambdaFunction); + verify(delegate).getProtoEnum("test0", lambdaFunction); + subject.getProtoEnumList(0, lambdaFunction); + verify(delegate).getProtoEnumList(0, lambdaFunction); + subject.getProtoEnumList("test0", lambdaFunction); + verify(delegate).getProtoEnumList("test0", lambdaFunction); + + subject.getStructList(0); + subject.getStructList("test0"); + + subject.getTimestamp(0); + verify(delegate).getTimestamp(0); + subject.getTimestamp("test0"); + verify(delegate).getTimestamp("test0"); + subject.getTimestampList(2); + verify(delegate).getTimestampList(2); + subject.getTimestampList("test2"); + verify(delegate).getTimestampList("test2"); + + subject.getColumnCount(); + verify(delegate).getColumnCount(); + subject.getColumnIndex("test"); + verify(delegate).getColumnIndex("test"); + subject.getColumnType(100); + verify(delegate).getColumnType(100); + subject.getColumnType("test"); + verify(delegate).getColumnType("test"); + subject.getCurrentRowAsStruct(); + verify(delegate).getCurrentRowAsStruct(); + subject.getType(); + verify(delegate).getType(); + subject.isNull(50); + verify(delegate).isNull(50); + subject.isNull("test"); + verify(delegate).isNull("test"); + + while (subject.next()) { + // ignore + } + subject.getStats(); + verify(delegate).getStats(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtilTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtilTest.java new file mode 100644 index 000000000000..0669cb5aad6f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtilTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static junit.framework.TestCase.assertEquals; + +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Tests converting {@link DirectedReadOptions} to/from JSON. The test cases here are not very + * extensive, as it a very simple wrapper around the {@link com.google.protobuf.util.JsonFormat} + * class, which has its own test suite. The tests in this class only serve as a simple verification + * that the formatter works as expected. + */ +@RunWith(JUnit4.class) +public class DirectedReadOptionsUtilTest { + + @Test + public void testToString() { + assertRoundTrip("", DirectedReadOptions.newBuilder().build()); + assertRoundTrip( + "{\"includeReplicas\":{}}", + DirectedReadOptions.newBuilder() + .setIncludeReplicas(IncludeReplicas.newBuilder().build()) + .build()); + assertRoundTrip( + "{\"includeReplicas\":{\"replicaSelections\":[{\"location\":\"eu-west1\",\"type\":\"READ_ONLY\"}]}}", + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setType(Type.READ_ONLY) + .setLocation("eu-west1") + .build()) + .build()) + .build()); + assertRoundTrip( + "{\"excludeReplicas\":{\"replicaSelections\":[{\"location\":\"eu-west1\",\"type\":\"READ_ONLY\"}]}}", + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setType(Type.READ_ONLY) + .setLocation("eu-west1") + .build()) + .build()) + .build()); + } + + private void assertRoundTrip(String json, DirectedReadOptions options) { + assertEquals(json, DirectedReadOptionsUtil.toString(options)); + assertEquals(options, DirectedReadOptionsUtil.parse(json)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadTest.java new file mode 100644 index 000000000000..9c784913e5d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DirectedReadTest.java @@ -0,0 +1,315 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static junit.framework.TestCase.assertEquals; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.*; +import com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.StructType.Field; +import java.util.Collection; +import java.util.List; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class DirectedReadTest extends AbstractMockServerTest { + private static final Statement READ_STATEMENT = Statement.of("SELECT 1 AS C"); + + private static final Statement GOOGLESQL_DML_STATEMENT = + Statement.of("INSERT INTO T (id) VALUES (1) THEN RETURN ID"); + private static final Statement POSTGRESQL_DML_STATEMENT = + Statement.of("INSERT INTO T (id) VALUES (1) RETURNING ID"); + + @Parameters(name = "dialect = {0}") + public static Collection data() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (Dialect dialect : Dialect.values()) { + builder.add(new Object[] {dialect}); + } + return builder.build(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @BeforeClass + public static void setupQueryResults() { + com.google.spanner.v1.ResultSet resultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("C") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(READ_STATEMENT, resultSet)); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query( + GOOGLESQL_DML_STATEMENT, + resultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query( + POSTGRESQL_DML_STATEMENT, + resultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + } + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + // Reset the dialect result. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + currentDialect = dialect; + } + } + + private String getVariablePrefix() { + return dialect == Dialect.POSTGRESQL ? "spanner." : ""; + } + + private Statement getDmlStatement() { + return dialect == Dialect.POSTGRESQL ? POSTGRESQL_DML_STATEMENT : GOOGLESQL_DML_STATEMENT; + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testNoDirectedReadByDefault() { + try (Connection connection = createConnection()) { + for (boolean readOnly : new boolean[] {true, false}) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.setReadOnly(readOnly); + + executeReadQuery(connection); + assertDirectedReadOptions(DirectedReadOptions.getDefaultInstance()); + + if (!autocommit) { + connection.commit(); + } + mockSpanner.clearRequests(); + } + } + } + } + + @Test + public void testSetDirectedRead() { + for (DirectedReadOptions expected : + new DirectedReadOptions[] { + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setLocation("eu-west1") + .setType(ReplicaSelection.Type.READ_ONLY) + .build()) + .build()) + .build(), + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setLocation("eu-west1") + .setType(ReplicaSelection.Type.READ_ONLY) + .build()) + .build()) + .build(), + DirectedReadOptions.newBuilder().build() + }) { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.execute( + Statement.of( + String.format( + "set %sdirected_read='%s'", + getVariablePrefix(), DirectedReadOptionsUtil.toString(expected)))); + Repeat.twice( + () -> { + executeReadQuery(connection); + assertDirectedReadOptions(expected); + mockSpanner.clearRequests(); + }); + + // Reset to default. + connection.execute( + Statement.of(String.format("set %sdirected_read=''", getVariablePrefix()))); + executeReadQuery(connection); + assertDirectedReadOptions(DirectedReadOptions.getDefaultInstance()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testDirectedReadIsIgnoredForDmlInAutoCommit() { + DirectedReadOptions options = + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setLocation("eu-west1") + .setType(ReplicaSelection.Type.READ_ONLY) + .build()) + .build()) + .build(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.execute( + Statement.of( + String.format( + "set %sdirected_read='%s'", + getVariablePrefix(), DirectedReadOptionsUtil.toString(options)))); + // DML should not use directed read. + executeDmlQuery(connection); + assertDirectedReadOptions(DirectedReadOptions.getDefaultInstance()); + } + } + + @Test + public void testDirectedReadIsIgnoredInReadWriteTransaction() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + + for (Statement statement : new Statement[] {READ_STATEMENT, getDmlStatement()}) { + connection.execute( + Statement.of( + String.format( + "set %sdirected_read='%s'", + getVariablePrefix(), + DirectedReadOptionsUtil.toString( + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setType(ReplicaSelection.Type.READ_WRITE) + .setLocation("us-west1") + .build()) + .build()) + .build())))); + // This uses a read/write transaction, which will ignore any DirectedReadOptions. + executeQuery(connection, statement); + assertDirectedReadOptions(DirectedReadOptions.getDefaultInstance()); + + connection.commit(); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testDirectedReadIsUsedInReadOnlyTransaction() { + DirectedReadOptions expected = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setType(ReplicaSelection.Type.READ_WRITE) + .setLocation("us-west1") + .build()) + .build()) + .build(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + + connection.execute( + Statement.of( + String.format( + "set %sdirected_read='%s'", + getVariablePrefix(), DirectedReadOptionsUtil.toString(expected)))); + // This uses a read-only transaction, which will use the DirectedReadOptions. + // Repeatedly executing a query on the same connection will also continue to use the + // DirectedReadOptions that have been set for the connection. + Repeat.twice( + () -> { + executeReadQuery(connection); + assertDirectedReadOptions(expected); + mockSpanner.clearRequests(); + }); + } + } + + private void executeReadQuery(Connection connection) { + executeQuery(connection, READ_STATEMENT); + } + + private void executeDmlQuery(Connection connection) { + executeQuery(connection, getDmlStatement()); + } + + private void executeQuery(Connection connection, Statement statement) { + try (ResultSet resultSet = connection.executeQuery(statement)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + } + + private void assertDirectedReadOptions(DirectedReadOptions expected) { + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(expected, request.getDirectedReadOptions()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java new file mode 100644 index 000000000000..ab04bb61a54a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import io.opentelemetry.api.trace.Span; +import java.util.Arrays; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DmlBatchTest { + + private final ParsedStatement statement1 = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2")); + private final ParsedStatement statement2 = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("UPDATE FOO SET BAR=2 WHERE BAZ=3")); + + private DmlBatch createSubject() { + UnitOfWork transaction = mock(UnitOfWork.class); + when(transaction.executeBatchUpdateAsync(any(), eq(Arrays.asList(statement1, statement2)))) + .thenReturn(ApiFutures.immediateFuture(new long[] {3L, 5L})); + return createSubject(transaction); + } + + private DmlBatch createSubject(UnitOfWork transaction) { + return DmlBatch.newBuilder() + .setTransaction(transaction) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + } + + @Test + public void testExecuteQuery() { + DmlBatch batch = createSubject(); + try { + batch.executeQueryAsync(CallType.SYNC, mock(ParsedStatement.class), AnalyzeMode.NONE); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testExecuteDdl() { + DmlBatch batch = createSubject(); + try { + batch.executeDdlAsync(CallType.SYNC, mock(ParsedStatement.class)); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetReadTimestamp() { + DmlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getReadTimestamp(); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testIsReadOnly() { + DmlBatch batch = createSubject(); + assertThat(batch.isReadOnly(), is(false)); + } + + @Test + public void testGetCommitTimestamp() { + DmlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getCommitTimestamp(); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetCommitResponse() { + DmlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + try { + batch.getCommitResponse(); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetCommitResponseOrNull() { + DmlBatch batch = createSubject(); + get(batch.runBatchAsync(CallType.SYNC)); + assertNull(batch.getCommitResponseOrNull()); + } + + @Test + public void testWriteIterable() { + DmlBatch batch = createSubject(); + try { + batch.writeAsync( + CallType.SYNC, Collections.singletonList(Mutation.newInsertBuilder("foo").build())); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetStateAndIsActive() { + DmlBatch batch = createSubject(); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + get(batch.runBatchAsync(CallType.SYNC)); + assertThat(batch.getState(), is(UnitOfWorkState.RAN)); + assertThat(batch.isActive(), is(false)); + + batch = createSubject(); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + batch.abortBatch(); + assertThat(batch.getState(), is(UnitOfWorkState.ABORTED)); + assertThat(batch.isActive(), is(false)); + + UnitOfWork tx = mock(UnitOfWork.class); + when(tx.executeBatchUpdateAsync(any(), anyList())) + .thenReturn(ApiFutures.immediateFailedFuture(mock(SpannerException.class))); + batch = createSubject(tx); + assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); + assertThat(batch.isActive(), is(true)); + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getStatement()).thenReturn(Statement.of("UPDATE TEST SET COL1=2")); + when(statement.getSql()).thenReturn("UPDATE TEST SET COL1=2"); + when(statement.getType()).thenReturn(StatementType.UPDATE); + get(batch.executeUpdateAsync(CallType.SYNC, statement)); + boolean exception = false; + try { + get(batch.runBatchAsync(CallType.SYNC)); + } catch (SpannerException e) { + exception = true; + } + assertThat(exception, is(true)); + assertThat(batch.getState(), is(UnitOfWorkState.RUN_FAILED)); + assertThat(batch.isActive(), is(false)); + } + + @Test + public void testCommit() { + DmlBatch batch = createSubject(); + try { + batch.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testRollback() { + DmlBatch batch = createSubject(); + try { + batch.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java new file mode 100644 index 000000000000..e494f9df8b10 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertNotNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DurationConverter; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DurationConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + DurationConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertNotNull(allowedValues); + DurationConverter converter = new DurationConverter(allowedValues); + assertThat(converter.convert("'100ms'"), is(equalTo(Duration.ofMillis(100L)))); + assertThat(converter.convert("100"), is(equalTo(Duration.ofMillis(100)))); + assertThat(converter.convert("'0ms'"), is(Duration.ZERO)); + assertThat(converter.convert("'-100ms'"), is(nullValue())); + assertThat( + converter.convert("'315576000000000ms'"), is(equalTo(Duration.ofSeconds(315576000000L)))); + assertThat(converter.convert("'1000ms'"), is(equalTo(Duration.ofSeconds(1L)))); + assertThat( + converter.convert("'1001ms'"), + is(equalTo(Duration.ofSeconds(1L, TimeUnit.MILLISECONDS.toNanos(1L))))); + + assertThat(converter.convert("'1ns'"), is(equalTo(Duration.ofNanos(1)))); + assertThat(converter.convert("'1us'"), is(equalTo(Duration.ofNanos(1000)))); + assertThat(converter.convert("'1ms'"), is(equalTo(Duration.ofNanos(1000000)))); + assertThat(converter.convert("'999999999ns'"), is(equalTo(Duration.ofNanos(999999999)))); + assertThat(converter.convert("'1s'"), is(equalTo(Duration.ofSeconds(1L)))); + + assertThat(converter.convert("''"), is(nullValue())); + assertThat(converter.convert("' '"), is(nullValue())); + assertThat(converter.convert("'random string'"), is(nullValue())); + + assertThat(converter.convert("null"), is(equalTo(Duration.ZERO))); + assertThat(converter.convert("NULL"), is(equalTo(Duration.ZERO))); + assertThat(converter.convert("Null"), is(equalTo(Duration.ZERO))); + assertThat(converter.convert("'null'"), is(nullValue())); + assertThat(converter.convert("'NULL'"), is(nullValue())); + assertThat(converter.convert("'Null'"), is(nullValue())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/EmulatorUtilTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/EmulatorUtilTest.java new file mode 100644 index 000000000000..db1b232a37f9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/EmulatorUtilTest.java @@ -0,0 +1,324 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.EmulatorUtil.maybeCreateInstanceAndDatabase; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import java.util.concurrent.ExecutionException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class EmulatorUtilTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Test + public void testCreateInstanceAndDatabase_bothSucceed() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()).thenReturn(mock(Instance.class)); + + DatabaseAdminClient databaseClient = mock(DatabaseAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture databaseOperationFuture = + mock(OperationFuture.class); + + when(spanner.getDatabaseAdminClient()).thenReturn(databaseClient); + when(databaseClient.createDatabase( + eq("test-instance"), + eq(dialect.createDatabaseStatementFor("test-database")), + eq(dialect), + eq(ImmutableList.of()))) + .thenReturn(databaseOperationFuture); + when(databaseOperationFuture.get()).thenReturn(mock(Database.class)); + + maybeCreateInstanceAndDatabase( + spanner, DatabaseId.of("test-project", "test-instance", "test-database"), dialect); + + // Verify that both the instance and the database was created. + verify(instanceClient) + .createInstance( + InstanceInfo.newBuilder(InstanceId.of("test-project", "test-instance")) + .setDisplayName("Automatically Generated Test Instance") + .setInstanceConfigId(InstanceConfigId.of("test-project", "emulator-config")) + .setNodeCount(1) + .build()); + verify(databaseClient) + .createDatabase( + "test-instance", + dialect.createDatabaseStatementFor("test-database"), + dialect, + ImmutableList.of()); + } + + @Test + public void testCreateInstanceAndDatabase_bothFailWithAlreadyExists() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()) + .thenThrow( + new ExecutionException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ALREADY_EXISTS, "Instance already exists"))); + + DatabaseAdminClient databaseClient = mock(DatabaseAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture databaseOperationFuture = + mock(OperationFuture.class); + + when(spanner.getDatabaseAdminClient()).thenReturn(databaseClient); + when(databaseClient.createDatabase( + eq("test-instance"), + eq(dialect.createDatabaseStatementFor("test-database")), + eq(dialect), + eq(ImmutableList.of()))) + .thenReturn(databaseOperationFuture); + when(databaseOperationFuture.get()) + .thenThrow( + new ExecutionException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ALREADY_EXISTS, "Database already exists"))); + + maybeCreateInstanceAndDatabase( + spanner, DatabaseId.of("test-project", "test-instance", "test-database"), dialect); + + // Verify that both the instance and the database was created. + verify(instanceClient) + .createInstance( + InstanceInfo.newBuilder(InstanceId.of("test-project", "test-instance")) + .setDisplayName("Automatically Generated Test Instance") + .setInstanceConfigId(InstanceConfigId.of("test-project", "emulator-config")) + .setNodeCount(1) + .build()); + verify(databaseClient) + .createDatabase( + "test-instance", + dialect.createDatabaseStatementFor("test-database"), + dialect, + ImmutableList.of()); + } + + @Test + public void testCreateInstanceAndDatabase_propagatesOtherErrorsOnInstanceCreation() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()) + .thenThrow( + new ExecutionException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid instance options"))); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + maybeCreateInstanceAndDatabase( + spanner, + DatabaseId.of("test-project", "test-instance", "test-database"), + dialect)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + } + + @Test + public void testCreateInstanceAndDatabase_propagatesInterruptsOnInstanceCreation() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()).thenThrow(new InterruptedException()); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + maybeCreateInstanceAndDatabase( + spanner, + DatabaseId.of("test-project", "test-instance", "test-database"), + dialect)); + assertEquals(ErrorCode.CANCELLED, exception.getErrorCode()); + } + + @Test + public void testCreateInstanceAndDatabase_propagatesOtherErrorsOnDatabaseCreation() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()).thenReturn(mock(Instance.class)); + + DatabaseAdminClient databaseClient = mock(DatabaseAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture databaseOperationFuture = + mock(OperationFuture.class); + + when(spanner.getDatabaseAdminClient()).thenReturn(databaseClient); + when(databaseClient.createDatabase( + eq("test-instance"), + eq(dialect.createDatabaseStatementFor("test-database")), + eq(dialect), + eq(ImmutableList.of()))) + .thenReturn(databaseOperationFuture); + when(databaseOperationFuture.get()) + .thenThrow( + new ExecutionException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid database options"))); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + maybeCreateInstanceAndDatabase( + spanner, + DatabaseId.of("test-project", "test-instance", "test-database"), + dialect)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + } + + @Test + public void testCreateInstanceAndDatabase_propagatesInterruptsOnDatabaseCreation() + throws InterruptedException, ExecutionException { + Spanner spanner = mock(Spanner.class); + SpannerOptions options = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(options); + when(options.getCredentials()).thenReturn(NoCredentials.getInstance()); + + InstanceAdminClient instanceClient = mock(InstanceAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture instanceOperationFuture = + mock(OperationFuture.class); + + when(spanner.getInstanceAdminClient()).thenReturn(instanceClient); + when(instanceClient.createInstance(any(InstanceInfo.class))) + .thenReturn(instanceOperationFuture); + when(instanceOperationFuture.get()).thenReturn(mock(Instance.class)); + + DatabaseAdminClient databaseClient = mock(DatabaseAdminClient.class); + @SuppressWarnings("unchecked") + OperationFuture databaseOperationFuture = + mock(OperationFuture.class); + + when(spanner.getDatabaseAdminClient()).thenReturn(databaseClient); + when(databaseClient.createDatabase( + eq("test-instance"), + eq(dialect.createDatabaseStatementFor("test-database")), + eq(dialect), + eq(ImmutableList.of()))) + .thenReturn(databaseOperationFuture); + when(databaseOperationFuture.get()).thenThrow(new InterruptedException()); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + maybeCreateInstanceAndDatabase( + spanner, + DatabaseId.of("test-project", "test-instance", "test-database"), + dialect)); + assertEquals(ErrorCode.CANCELLED, exception.getErrorCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExceptionMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExceptionMockServerTest.java new file mode 100644 index 000000000000..fcc05405b2d9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExceptionMockServerTest.java @@ -0,0 +1,280 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.common.collect.ImmutableSet; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.util.Arrays; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExceptionMockServerTest extends AbstractMockServerTest { + private static final Statement UPDATE_STATEMENT = + Statement.of("update foo set bar=1 where baz=1"); + private static final Statement SELECT_STATEMENT = Statement.of("select * from foo"); + private static final StatusRuntimeException NOT_FOUND_EXCEPTION = + Status.INVALID_ARGUMENT.withDescription("Table 'foo' not found").asRuntimeException(); + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testUpdateAsyncException() { + mockSpanner.putStatementResult( + StatementResult.exception(UPDATE_STATEMENT, NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows( + SpannerException.class, () -> get(connection.executeUpdateAsync(UPDATE_STATEMENT))); + assertNotNull(exception.getSuppressed()); + assertEquals(1, exception.getSuppressed().length); + Throwable suppressed = exception.getSuppressed()[0]; + String methodName = new Object() {}.getClass().getEnclosingMethod().getName(); + assertTrue( + Arrays.stream(suppressed.getStackTrace()) + .anyMatch( + element -> + element.getClassName().equals(ExceptionMockServerTest.class.getName()) + && element.getMethodName().equals(methodName))); + } + } + + @Test + public void testUpdateException() { + mockSpanner.putStatementResult( + StatementResult.exception(UPDATE_STATEMENT, NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeUpdate(UPDATE_STATEMENT)); + assertNotNull(exception.getSuppressed()); + assertEquals(0, exception.getSuppressed().length); + } + } + + @Test + public void testQueryAsyncException() { + mockSpanner.putStatementResult( + StatementResult.exception(SELECT_STATEMENT, NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.executeQueryAsync(SELECT_STATEMENT).toList(row -> row)); + assertNotNull(exception.getSuppressed()); + assertEquals(1, exception.getSuppressed().length); + Throwable suppressed = exception.getSuppressed()[0]; + String methodName = new Object() {}.getClass().getEnclosingMethod().getName(); + assertTrue( + Arrays.stream(suppressed.getStackTrace()) + .anyMatch( + element -> + element.getClassName().equals(ExceptionMockServerTest.class.getName()) + && element.getMethodName().equals(methodName))); + } + } + + @Test + public void testQueryException() { + mockSpanner.putStatementResult( + StatementResult.exception(SELECT_STATEMENT, NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_STATEMENT).next()); + assertNotNull(exception.getSuppressed()); + assertEquals(0, exception.getSuppressed().length); + } + } + + @Test + public void testCommitAsyncException() { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofException(NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + connection.bufferedWrite(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + SpannerException exception = + assertThrows(SpannerException.class, () -> get(connection.commitAsync())); + assertNotNull(exception.getSuppressed()); + assertEquals(1, exception.getSuppressed().length); + Throwable suppressed = exception.getSuppressed()[0]; + String methodName = new Object() {}.getClass().getEnclosingMethod().getName(); + assertTrue( + Arrays.stream(suppressed.getStackTrace()) + .anyMatch( + element -> + element.getClassName().equals(ExceptionMockServerTest.class.getName()) + && element.getMethodName().equals(methodName))); + } + } + + @Test + public void testCommitException() { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofException(NOT_FOUND_EXCEPTION)); + + try (Connection connection = createConnection()) { + connection.bufferedWrite(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + SpannerException exception = assertThrows(SpannerException.class, connection::commit); + assertNotNull(exception.getSuppressed()); + assertEquals(0, exception.getSuppressed().length); + } + } + + @Test + public void testAllowedResultType() { + mockSpanner.putStatementResult( + StatementResult.query(SELECT_COUNT_STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + mockSpanner.putStatementResult(StatementResult.update(INSERT_STATEMENT, 1L)); + mockSpanner.putStatementResult( + StatementResult.updateReturning( + INSERT_RETURNING_STATEMENT, SELECT_COUNT_RESULTSET_AFTER_INSERT)); + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + Statement ddl = Statement.of("create table foo"); + + try (Connection connection = createConnection()) { + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(SELECT_COUNT_STATEMENT, ImmutableSet.of())) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> + connection.execute( + SELECT_COUNT_STATEMENT, ImmutableSet.of(ResultType.UPDATE_COUNT))) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> + connection.execute( + SELECT_COUNT_STATEMENT, ImmutableSet.of(ResultType.NO_RESULT))) + .getErrorCode()); + assertNotNull( + connection + .execute(SELECT_COUNT_STATEMENT, ImmutableSet.of(ResultType.RESULT_SET)) + .getResultSet()); + + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(INSERT_STATEMENT, ImmutableSet.of())) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> + connection.execute(INSERT_STATEMENT, ImmutableSet.of(ResultType.RESULT_SET))) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(INSERT_STATEMENT, ImmutableSet.of(ResultType.NO_RESULT))) + .getErrorCode()); + assertNotNull( + connection + .execute(INSERT_STATEMENT, ImmutableSet.of(ResultType.UPDATE_COUNT)) + .getUpdateCount()); + + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(INSERT_RETURNING_STATEMENT, ImmutableSet.of())) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> + connection.execute( + INSERT_RETURNING_STATEMENT, ImmutableSet.of(ResultType.UPDATE_COUNT))) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> + connection.execute( + INSERT_RETURNING_STATEMENT, ImmutableSet.of(ResultType.NO_RESULT))) + .getErrorCode()); + assertNotNull( + connection + .execute(INSERT_RETURNING_STATEMENT, ImmutableSet.of(ResultType.RESULT_SET)) + .getResultSet()); + + // Commit the current transaction and switch to autocommit to allow DDL. + connection.commit(); + connection.setAutocommit(true); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows(SpannerException.class, () -> connection.execute(ddl, ImmutableSet.of())) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(ddl, ImmutableSet.of(ResultType.RESULT_SET))) + .getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + assertThrows( + SpannerException.class, + () -> connection.execute(ddl, ImmutableSet.of(ResultType.UPDATE_COUNT))) + .getErrorCode()); + assertNotNull(connection.execute(ddl, ImmutableSet.of(ResultType.NO_RESULT))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java new file mode 100644 index 000000000000..5fc8fc7cfef6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java @@ -0,0 +1,236 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExcludeTxnFromChangeStreamsMockServerTest extends AbstractMockServerTest { + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testAutoCommit_includedByDefault() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.executeUpdate(INSERT_STATEMENT); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitBatchDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitUpdateReturning() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeQuery(INSERT_RETURNING_STATEMENT); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testPartitionedDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest request = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(request.hasOptions()); + assertTrue(request.getOptions().hasPartitionedDml()); + assertTrue(request.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransaction_includedByDefault() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionBatchDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + connection.commit(); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionUpdateReturning() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeQuery(INSERT_RETURNING_STATEMENT); + connection.commit(); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testSqlStatements() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + + connection.execute(Statement.of("set exclude_txn_from_change_streams = true")); + assertTrue(connection.isExcludeTxnFromChangeStreams()); + + try (ResultSet resultSet = + connection + .execute(Statement.of("show variable exclude_txn_from_change_streams")) + .getResultSet()) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean("EXCLUDE_TXN_FROM_CHANGE_STREAMS")); + assertFalse(resultSet.next()); + } + + connection.setAutocommit(false); + connection.execute(Statement.of("set exclude_txn_from_change_streams = true")); + assertTrue(connection.isExcludeTxnFromChangeStreams()); + connection.execute(INSERT_STATEMENT); + assertThrows( + SpannerException.class, + () -> connection.execute(Statement.of("set exclude_txn_from_change_streams=false"))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainCommandConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainCommandConverterTest.java new file mode 100644 index 000000000000..b927668d4722 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainCommandConverterTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ExplainCommandConverter; +import org.junit.Test; + +public class ExplainCommandConverterTest { + @Test + public void testConvert() { + ExplainCommandConverter explainCommandConverter = new ExplainCommandConverter(); + assertEquals( + "select * from table1", explainCommandConverter.convert("explain select * from table1")); + assertEquals( + "select * \t from table1", + explainCommandConverter.convert("explain \tselect * \t from table1")); + assertEquals( + "select * \t from table1", + explainCommandConverter.convert("EXPLAIN \tselect * \t from table1")); + assertEquals( + "select * \t from table1", + explainCommandConverter.convert("ExplAIn \tselect * \t from table1")); + assertEquals( + "select * \t from table1", + explainCommandConverter.convert("explain \n select * \t from table1")); + assertEquals( + "select * \t from table1", + explainCommandConverter.convert("explain \n \t select * \t from table1")); + assertEquals("foo", explainCommandConverter.convert("explain foo")); + assertEquals(null, explainCommandConverter.convert("explain")); + + assertEquals( + "analyze select * from table1", + explainCommandConverter.convert("explain analyze select * from table1")); + assertEquals( + "analyze \tselect * \t from table1", + explainCommandConverter.convert("explain \t analyze \tselect * \t from table1")); + assertEquals( + "analyze \n select * \t from table1", + explainCommandConverter.convert("explain \n analyze \n select * \t from table1")); + assertEquals( + "ANALYZE \n select * \t from table1", + explainCommandConverter.convert("EXPLAIN \n ANALYZE \n select * \t from table1")); + assertEquals( + "aNALyzE \n select * \t from table1", + explainCommandConverter.convert("ExPLaiN \n aNALyzE \n select * \t from table1")); + assertEquals( + "analyze \t select * \t from table1", + explainCommandConverter.convert("explain \n analyze \t select * \t from table1")); + assertEquals("analyze foo", explainCommandConverter.convert("explain analyze foo")); + assertEquals("analyze", explainCommandConverter.convert("explain analyze")); + assertEquals( + "(analyze \t select * \t from table1", + explainCommandConverter.convert("explain \n (analyze \t select * \t from table1")); + assertEquals( + "(analyze \t select * \t from table1)", + explainCommandConverter.convert("explain \n (analyze \t select * \t from table1)")); + + assertEquals( + "analyse select * from table1", + explainCommandConverter.convert("explain analyse select * from table1")); + assertEquals( + "analyse \tselect * \t from table1", + explainCommandConverter.convert("explain \t analyse \tselect * \t from table1")); + assertEquals("analyse foo", explainCommandConverter.convert("explain analyse foo")); + assertEquals("analyse", explainCommandConverter.convert("explain analyse")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainTest.java new file mode 100644 index 000000000000..1aeb394cd9e2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExplainTest.java @@ -0,0 +1,330 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.PlanNode; +import com.google.spanner.v1.QueryPlan; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.util.List; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExplainTest extends AbstractMockServerTest { + private static final Statement EXPLAIN_STATEMENT_QUERY = + Statement.of("SELECT * FROM SomeTable ORDER BY Value"); + + @BeforeClass + public static void setupAnalyzeResults() { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + Struct metadata = + Struct.newBuilder() + .putFields("subquery_cluster_node", Value.newBuilder().setStringValue("1").build()) + .build(); + Struct cpuTime = + Struct.newBuilder() + .putFields("unit", Value.newBuilder().setStringValue("msec").build()) + .putFields("total_time", Value.newBuilder().setStringValue("10").build()) + .build(); + Struct executionStats = + Struct.newBuilder() + .putFields("cpu_time", Value.newBuilder().setStructValue(cpuTime).build()) + .build(); + ResultSetStats resultSetStats = + ResultSetStats.newBuilder() + .setQueryPlan( + QueryPlan.newBuilder() + .addPlanNodes( + PlanNode.newBuilder() + .setDisplayName("some-plan-node") + .setMetadata(metadata) + .setExecutionStats(executionStats) + .build()) + .build()) + .build(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query( + EXPLAIN_STATEMENT_QUERY, + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("Key") + .build()) + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("Value") + .build()) + .build()) + .build()) + .setStats(resultSetStats) + .build())); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + private void testExplain(String statement) { + mockSpanner.clearRequests(); + final Statement explainStatement = Statement.of(statement); + + try (Connection connection = createConnection()) { + + try (ResultSet resultSet = connection.execute(explainStatement).getResultSet()) { + int count = 0; + + while (resultSet.next()) { + if (count == 1) { + fail( + "The resultset was expected t contains exactly 1 row but it contains more than 1" + + " row"); + } + + ++count; + + com.google.cloud.spanner.Struct row = resultSet.getCurrentRowAsStruct(); + + assertEquals(1, row.getColumnCount()); + assertNotNull(row.getString("QUERY PLAN")); + String expectedQueryPlan = "some-plan-node : { subquery_cluster_node : 1 }"; + assertEquals(expectedQueryPlan, row.getString("QUERY PLAN")); + } + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(EXPLAIN_STATEMENT_QUERY.getSql(), request.getSql()); + assertEquals(QueryMode.PLAN, request.getQueryMode()); + } + + private void testExplainAnalyze(String statement) { + mockSpanner.clearRequests(); + final Statement explainAnalyzeStatement = Statement.of(statement); + try (Connection connection = createConnection()) { + + try (ResultSet resultSet = connection.execute(explainAnalyzeStatement).getResultSet()) { + int count = 0; + + while (resultSet.next()) { + if (count == 1) { + fail( + "The resultset was expected t contains exactly 1 row but it contains more than 1" + + " row"); + } + + ++count; + + com.google.cloud.spanner.Struct row = resultSet.getCurrentRowAsStruct(); + + assertEquals(2, row.getColumnCount()); + + assertNotNull(row.getString("QUERY PLAN")); + String expectedQueryPlan = "some-plan-node : { subquery_cluster_node : 1 }"; + assertEquals(expectedQueryPlan, row.getString("QUERY PLAN")); + + assertNotNull(row.getString("EXECUTION STATS")); + String expectedExecutionStats = "cpu_time : { unit : msec , total_time : 10 }"; + assertEquals(expectedExecutionStats, row.getString("EXECUTION STATS")); + } + } + } + + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + assertEquals(1, requests.size()); + ExecuteSqlRequest request = requests.get(0); + assertEquals(EXPLAIN_STATEMENT_QUERY.getSql(), request.getSql()); + assertEquals(QueryMode.PROFILE, request.getQueryMode()); + } + + @Test + public void testValidExplain() { + String statement = "Explain " + EXPLAIN_STATEMENT_QUERY; + testExplain(statement); + + statement = "explain " + EXPLAIN_STATEMENT_QUERY; + testExplain(statement); + + statement = "explain " + EXPLAIN_STATEMENT_QUERY; + testExplain(statement); + + statement = "explain \t (" + EXPLAIN_STATEMENT_QUERY + ") "; + testExplain(statement); + + statement = " explain \t ( \n " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + + statement = " ExpLAin ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + + statement = " EXPLAIN ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + } + + @Test + public void testValidExplainWithFalseAnalyze() { + String statement = " explain (analyze false) " + EXPLAIN_STATEMENT_QUERY; + testExplain(statement); + + statement = " explain (analyze FALSE) " + EXPLAIN_STATEMENT_QUERY + " "; + testExplain(statement); + + statement = " explain (analyze fAlsE) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + + statement = " explain (analyze 0) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + + statement = " explain (analyze off) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplain(statement); + + statement = + " explain (analyze false, analyze true, analyze false, analyze false) " + + EXPLAIN_STATEMENT_QUERY; + testExplain(statement); + + statement = + " explain ( analyze off , analyze true , analyze 0 ) ( " + + EXPLAIN_STATEMENT_QUERY + + " ) "; + testExplain(statement); + + statement = + " explain ( analyze off , analyze 0 , analyze 0 ) ( " + + EXPLAIN_STATEMENT_QUERY + + " ) "; + testExplain(statement); + + statement = + " explain ( analyze off , analyze, analyze 0 , analyze false ) ( " + + EXPLAIN_STATEMENT_QUERY + + " ) "; + testExplain(statement); + } + + @Test + public void testValidExplainAnalyze() { + String statement = "Explain analyze " + EXPLAIN_STATEMENT_QUERY; + testExplainAnalyze(statement); + + statement = "explain analyze " + EXPLAIN_STATEMENT_QUERY; + testExplainAnalyze(statement); + + statement = "explain analyze " + EXPLAIN_STATEMENT_QUERY; + testExplainAnalyze(statement); + + statement = "explain analyze (" + EXPLAIN_STATEMENT_QUERY + ") "; + testExplainAnalyze(statement); + + statement = " explain ( analyze true ) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = " ExpLAin( analyze 1 ) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = " ExpLAin( analyze On ) ( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = " EXPLAIN(analyze)( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = + " EXPLAIN(analyze , analyze false , analyze 1)( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = + " EXPLAIN(analyze , aNAlyzE false , analyze )( " + EXPLAIN_STATEMENT_QUERY + " ) "; + testExplainAnalyze(statement); + + statement = + " EXPLAIN(analyze off , analyze false , AnalYZE )( " + + EXPLAIN_STATEMENT_QUERY + + " ) "; + testExplainAnalyze(statement); + + statement = + " EXPLAIN(analyze \n off , analyze false , analyze )( " + + EXPLAIN_STATEMENT_QUERY + + " \t ) "; + testExplainAnalyze(statement); + + statement = + " EXPLAIN(analyse \n off , analyze false , analyse )( " + + EXPLAIN_STATEMENT_QUERY + + " \t ) "; + testExplainAnalyze(statement); + } + + @Test + public void testInvalidExplain() { + + String statement = " explain verbose " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement)); + + String statement2 = " explain foo " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement2)); + + String statement3 = " explain analyze analyze " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement3)); + + String statement4 = " explain analyze true " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement4)); + + String statement5 = " explain (analyze true , verbose ) " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement5)); + + String statement6 = " explain (analyze hello) " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement6)); + + String statement7 = " explain (analyze true , verbose , costs ) " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement7)); + + String statement8 = " explain (analyze true , verbose , costs " + EXPLAIN_STATEMENT_QUERY; + assertThrows(SpannerException.class, () -> testExplain(statement8)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/GrpcInterceptorProviderTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/GrpcInterceptorProviderTest.java new file mode 100644 index 000000000000..0845d1d9c360 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/GrpcInterceptorProviderTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.common.collect.ImmutableList; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.MethodDescriptor; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GrpcInterceptorProviderTest extends AbstractMockServerTest { + private static final AtomicBoolean INTERCEPTOR_CALLED = new AtomicBoolean(false); + + public static final class TestGrpcInterceptorProvider implements GrpcInterceptorProvider { + @Override + public List getInterceptors() { + return ImmutableList.of( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + INTERCEPTOR_CALLED.set(true); + return next.newCall(method, callOptions); + } + }); + } + } + + @Before + public void clearInterceptorUsedFlag() { + INTERCEPTOR_CALLED.set(false); + } + + @Test + public void testGrpcInterceptorProviderIsNotUsedByDefault() { + assertFalse(INTERCEPTOR_CALLED.get()); + try (Connection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + while (resultSet.next()) { + // ignore + } + } + } + assertFalse(INTERCEPTOR_CALLED.get()); + } + + @Test + public void testGrpcInterceptorProviderIsUsedWhenConfigured() { + System.setProperty("ENABLE_GRPC_INTERCEPTOR_PROVIDER", "true"); + assertFalse(INTERCEPTOR_CALLED.get()); + try (Connection connection = + createConnection( + ";grpc_interceptor_provider=" + TestGrpcInterceptorProvider.class.getName())) { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + while (resultSet.next()) { + // ignore + } + } + } finally { + System.clearProperty("ENABLE_GRPC_INTERCEPTOR_PROVIDER"); + } + assertTrue(INTERCEPTOR_CALLED.get()); + } + + @Test + public void testGrpcInterceptorProviderRequiresSystemProperty() { + assertFalse(INTERCEPTOR_CALLED.get()); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + createConnection( + ";grpc_interceptor_provider=" + TestGrpcInterceptorProvider.class.getName())); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains( + "grpc_interceptor_provider can only be used if the system property" + + " ENABLE_GRPC_INTERCEPTOR_PROVIDER has been set to true. Start the" + + " application with the JVM command line option" + + " -DENABLE_GRPC_INTERCEPTOR_PROVIDER=true")); + assertFalse(INTERCEPTOR_CALLED.get()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITAbstractSpannerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITAbstractSpannerTest.java new file mode 100644 index 000000000000..5194d64eef6e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITAbstractSpannerTest.java @@ -0,0 +1,391 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.appendExperimentalHost; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.GceTestEnvConfig; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnection; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnectionProvider; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; +import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; +import com.google.common.base.Strings; +import com.google.rpc.RetryInfo; +import io.grpc.Metadata; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import java.lang.reflect.Field; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; + +/** + * Base class for integration tests. This class is located in this package to be able to access + * package-private methods of the Connection API + */ +public abstract class ITAbstractSpannerTest { + protected class ITConnectionProvider implements GenericConnectionProvider { + public ITConnectionProvider() {} + + @Override + public GenericConnection getConnection() { + return SpannerGenericConnection.of(createConnection()); + } + } + + public interface ITConnection extends Connection {} + + private ITConnection createITConnection(ConnectionOptions options) { + return new ITConnectionImpl(options); + } + + protected void closeSpanner() { + ConnectionOptions.closeSpanner(); + } + + public static class AbortInterceptor implements StatementExecutionInterceptor { + /** We need to replicate the enum here as it is not visible outside the connection package */ + public enum ExecutionStep { + /** The initial execution of a statement (DML/Query) */ + EXECUTE_STATEMENT, + /** A call to {@link ResultSet#next()} */ + CALL_NEXT_ON_RESULT_SET, + /** Execution of the statement during a transaction retry */ + RETRY_STATEMENT, + /** A call to {@link ResultSet#next()} during transaction retry */ + RETRY_NEXT_ON_RESULT_SET; + + static ExecutionStep of(StatementExecutionStep step) { + return ExecutionStep.valueOf(step.name()); + } + } + + private double probability; + private boolean onlyInjectOnce = false; + private final Random random = new Random(); + + private boolean usingMultiplexedsession = false; + + public AbortInterceptor(double probability) { + Preconditions.checkArgument(probability >= 0.0D && probability <= 1.0D); + this.probability = probability; + } + + public void setProbability(double probability) { + Preconditions.checkArgument(probability >= 0.0D && probability <= 1.0D); + this.probability = probability; + } + + /** Set this value to true to automatically set the probability to zero after an abort */ + public void setOnlyInjectOnce(boolean value) { + this.onlyInjectOnce = value; + } + + /** + * Set this value to true if a multiplexed session is being used. Determining this directly from + * TransactionManagerImpl is challenging as it is a private class. + */ + public void setUsingMultiplexedSession(boolean value) { + this.usingMultiplexedsession = value; + } + + protected boolean shouldAbort(String statement, ExecutionStep step) { + return probability > random.nextDouble(); + } + + @Override + public void intercept( + ParsedStatement statement, StatementExecutionStep step, UnitOfWork transaction) { + if (shouldAbort(statement.getSql(), ExecutionStep.of(step))) { + // ugly hack warning: inject the aborted state into the transaction manager to simulate an + // abort + if (transaction instanceof ReadWriteTransaction) { + try { + Field field = ReadWriteTransaction.class.getDeclaredField("txManager"); + field.setAccessible(true); + Stopwatch watch = Stopwatch.createStarted(); + while (field.get(transaction) == null && watch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.sleep(1L); + } + TransactionManager tx = (TransactionManager) field.get(transaction); + if (tx == null) { + return; + } + Class cls = Class.forName("com.google.cloud.spanner.TransactionManagerImpl"); + if (usingMultiplexedsession) { + Field stateField = cls.getDeclaredField("txnState"); + stateField.setAccessible(true); + if (tx.getState() == null) { + return; + } + tx.rollback(); + stateField.set(tx, TransactionState.ABORTED); + } else { + Class cls2 = + Class.forName( + "com.google.cloud.spanner.SessionPool$AutoClosingTransactionManager"); + Field delegateField = cls2.getDeclaredField("delegate"); + delegateField.setAccessible(true); + watch = watch.reset().start(); + while (delegateField.get(tx) == null && watch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.sleep(1L); + } + TransactionManager delegate = (TransactionManager) delegateField.get(tx); + if (delegate == null) { + return; + } + Field stateField = cls.getDeclaredField("txnState"); + stateField.setAccessible(true); + + // First rollback the delegate, and then pretend it aborted. + // We should call rollback on the delegate and not the wrapping + // AutoClosingTransactionManager, as the latter would cause the session to be returned + // to the session pool. + delegate.rollback(); + stateField.set(delegate, TransactionState.ABORTED); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + if (onlyInjectOnce) { + probability = 0; + } + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "Transaction was aborted by interceptor", + createAbortedExceptionWithMinimalRetry()); + } + } + } + + private static StatusRuntimeException createAbortedExceptionWithMinimalRetry() { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(com.google.protobuf.Duration.newBuilder().setNanos(1).setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + return io.grpc.Status.ABORTED.asRuntimeException(trailers); + } + } + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String DEFAULT_KEY_FILE = null; + public static Database database; + + public static String getKeyFile() { + return System.getProperty(GceTestEnvConfig.GCE_CREDENTIALS_FILE, DEFAULT_KEY_FILE); + } + + public static boolean hasValidKeyFile() { + return getKeyFile() != null && Files.exists(Paths.get(getKeyFile())); + } + + protected static IntegrationTestEnv getTestEnv() { + return env; + } + + protected static Database getDatabase() { + return database; + } + + /** + * Returns a connection URL that is extracted from the given {@link SpannerOptions} and database + * in the form + * cloudspanner:[//host]/projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID + */ + public static StringBuilder extractConnectionUrl(SpannerOptions options, Database database) { + StringBuilder url = new StringBuilder("cloudspanner:"); + if (options.getHost() != null) { + url.append(options.getHost().substring(options.getHost().indexOf(':') + 1)); + } + url.append("/").append(database.getId().getName()); + if (options.getCredentials() == NoCredentials.getInstance()) { + url.append(";usePlainText=true"); + } + if (isExperimentalHost()) { + appendExperimentalHost(url); + } + return url; + } + + @BeforeClass + public static void setup() { + database = env.getTestHelper().createTestDatabase(); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + /** + * Creates a new default connection to a test database. Use the method {@link + * ITAbstractSpannerTest#appendConnectionUri(StringBuilder)} to append additional connection + * options to the connection URI. + * + * @return the newly opened connection. + */ + public ITConnection createConnection() { + return createConnection(Collections.emptyList(), Collections.emptyList()); + } + + public ITConnection createConnection(AbortInterceptor interceptor) { + return createConnection(Collections.singletonList(interceptor), Collections.emptyList()); + } + + public ITConnection createConnection( + AbortInterceptor interceptor, TransactionRetryListener transactionRetryListener) { + return createConnection( + Collections.singletonList(interceptor), + Collections.singletonList(transactionRetryListener)); + } + + /** + * Creates a new default connection to a test database. Use the method {@link + * ITAbstractSpannerTest#appendConnectionUri(StringBuilder)} to append additional connection + * options to the connection URI. + * + * @param interceptors Interceptors that should be executed after each statement + * @param transactionRetryListeners Transaction retry listeners that should be added to the {@link + * Connection} + * @return the newly opened connection. + */ + public ITConnection createConnection( + List interceptors, + List transactionRetryListeners) { + StringBuilder url = + extractConnectionUrl(getTestEnv().getTestHelper().getOptions(), getDatabase()); + appendConnectionUri(url); + ConnectionOptions.Builder builder = + ConnectionOptions.newBuilder() + .setUri(url.toString()) + .setStatementExecutionInterceptors(interceptors); + if (hasValidKeyFile()) { + builder.setCredentialsUrl(getKeyFile()); + } + ConnectionOptions options = builder.build(); + ITConnection connection = createITConnection(options); + for (TransactionRetryListener listener : transactionRetryListeners) { + connection.addTransactionRetryListener(listener); + } + return connection; + } + + protected void appendConnectionUri(StringBuilder uri) {} + + /** + * Override this method to instruct the test to create a default test table in the form: + * + *

    +   * CREATE TABLE TEST (ID INT64 NOT NULL, NAME STRING(100) NOT NULL) PRIMARY KEY (ID)
    +   * 
    + * + * Note that the table is not re-created for each test case, but is preserved between test cases. + * It is the responsibility of the test class to either empty the table at the end of each test + * case, or keep track of the state of the test table and execute the test cases in a specific + * order. + * + * @return true if the default test table should be created. + */ + protected boolean doCreateDefaultTestTable() { + return false; + } + + @Before + public void createTestTable() { + if (doCreateDefaultTestTable()) { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + if (!tableExists(connection, "TEST")) { + connection.setAutocommit(false); + connection.startBatchDdl(); + connection.execute( + Statement.of( + "CREATE TABLE TEST (ID INT64 NOT NULL, NAME STRING(100) NOT NULL) PRIMARY KEY" + + " (ID)")); + connection.runBatch(); + } + } + } + } + + protected boolean tableExists(Connection connection, String table) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(table)); + try (ResultSet rs = + connection.executeQuery( + Statement.newBuilder( + String.format( + "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE" + + " UPPER(TABLE_NAME)=UPPER(\'%s\')", + table)) + .build())) { + while (rs.next()) { + return true; + } + } + return false; + } + + protected boolean indexExists(Connection connection, String table, String index) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(index)); + try (ResultSet rs = + connection.executeQuery( + Statement.newBuilder( + "SELECT INDEX_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE" + + " UPPER(TABLE_NAME)=@table_name AND UPPER(INDEX_NAME)=@index_name") + .bind("table_name") + .to(table) + .bind("index_name") + .to(index.toUpperCase()) + .build())) { + while (rs.next()) { + return true; + } + } + return false; + } + + protected boolean isMultiplexedSessionsEnabledForRW(Spanner spanner) { + if (spanner.getOptions() == null || spanner.getOptions().getSessionPoolOptions() == null) { + return false; + } + return spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITConnectionImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITConnectionImpl.java new file mode 100644 index 000000000000..343b24a10150 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ITConnectionImpl.java @@ -0,0 +1,25 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; + +/** Implementation of {@link ITConnection} for Spanner generic (not JDBC) connections. */ +public class ITConnectionImpl extends ConnectionImpl implements ITConnection { + ITConnectionImpl(ConnectionOptions options) { + super(options); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/InternalMetadataQueryMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/InternalMetadataQueryMockServerTest.java new file mode 100644 index 000000000000..89c4dd0b0386 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/InternalMetadataQueryMockServerTest.java @@ -0,0 +1,156 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.Connection.InternalMetadataQuery; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartitionQueryRequest; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class InternalMetadataQueryMockServerTest extends AbstractMockServerTest { + private static final Statement STATEMENT = + Statement.of("SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES"); + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + @BeforeClass + public static void setupInternalMetadataQueryResults() { + mockSpanner.putStatementResult( + StatementResult.query(STATEMENT, SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + } + + @Before + public void setupDialect() { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testInternalMetadataQueryInAutocommit() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + verifyInternalMetadataQuery(connection); + } + } + + @Test + public void testInternalMetadataQueryWithStaleness() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnlyStaleness(TimestampBound.ofMaxStaleness(10L, TimeUnit.SECONDS)); + verifyInternalMetadataQuery(connection); + } + } + + @Test + public void testInternalMetadataQueryReadOnlyTransaction() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + verifyInternalMetadataQuery(connection); + } + } + + @Test + public void testInternalMetadataQueryReadOnlyTransactionWithStaleness() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + connection.setReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + verifyInternalMetadataQuery(connection); + } + } + + @Test + public void testInternalMetadataQueryReadWriteTransaction() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + verifyInternalMetadataQuery(connection); + } + } + + @Test + public void testInternalMetadataQueryInDmlBatch() { + try (Connection connection = createConnection()) { + connection.startBatchDml(); + verifyInternalMetadataQuery(connection); + connection.runBatch(); + } + } + + @Test + public void testInternalMetadataQueryInDdlBatch() { + try (Connection connection = createConnection()) { + connection.startBatchDdl(); + verifyInternalMetadataQuery(connection); + connection.runBatch(); + } + } + + @Test + public void testInternalMetadataQueryInAutoPartitionMode() { + try (Connection connection = createConnection()) { + connection.setAutoPartitionMode(true); + verifyInternalMetadataQuery(connection); + } + } + + private void verifyInternalMetadataQuery(Connection connection) { + try (ResultSet resultSet = connection.executeQuery(STATEMENT, InternalMetadataQuery.INSTANCE)) { + assertTrue(resultSet.next()); + assertEquals(0L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasSingleUse()); + assertTrue(request.getTransaction().getSingleUse().hasReadOnly()); + assertTrue(request.getTransaction().getSingleUse().getReadOnly().hasStrong()); + assertEquals(ByteString.EMPTY, request.getPartitionToken()); + assertEquals(0, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/LocalConnectionCheckerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/LocalConnectionCheckerTest.java new file mode 100644 index 000000000000..daf42b55af46 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/LocalConnectionCheckerTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1_RESULTSET; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.net.InetSocketAddress; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class LocalConnectionCheckerTest { + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private LocalConnectionChecker connectionChecker; + + @BeforeClass + public static void beforeClass() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + + final InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build(); + server.start(); + } + + @AfterClass + public static void afterClass() throws Exception { + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() { + mockSpanner.reset(); + connectionChecker = new LocalConnectionChecker(); + } + + @Test + public void testMockSpanner() { + final String uri = + String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true", + server.getPort()); + final ConnectionOptions connectionOptions = ConnectionOptions.newBuilder().setUri(uri).build(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(SELECT1, SELECT1_RESULTSET)); + + try (Connection connection = connectionOptions.getConnection(); + ResultSet resultSet = connection.executeQuery(SELECT1)) { + while (resultSet.next()) {} + } + } + + @Test + public void testNoRunningEmulator() { + final int port = server.getPort() - 1; + final String uri = + String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true", + port); + final ConnectionOptions connectionOptions = ConnectionOptions.newBuilder().setUri(uri).build(); + + try { + connectionChecker.checkLocalConnection(connectionOptions); + fail("Unavailable exception expected"); + } catch (SpannerException e) { + assertEquals( + "UNAVAILABLE: The connection string '" + + uri + + "' contains host 'localhost:" + + port + + "', but no running emulator or other server could be found at that address.\n" + + "Please check the connection string and/or that the emulator is running.", + e.getMessage()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MaxCommitDelayTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MaxCommitDelayTest.java new file mode 100644 index 000000000000..ca7fa18e97a8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MaxCommitDelayTest.java @@ -0,0 +1,196 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static junit.framework.TestCase.assertEquals; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Duration; +import com.google.spanner.v1.CommitRequest; +import java.time.temporal.ChronoUnit; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class MaxCommitDelayTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Collection data() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (Dialect dialect : Dialect.values()) { + builder.add(new Object[] {dialect}); + } + return builder.build(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + // Reset the dialect result. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + currentDialect = dialect; + } + } + + private Mutation createMutation() { + return Mutation.newInsertBuilder("foo").set("id").to(1L).build(); + } + + private String getVariablePrefix() { + return dialect == Dialect.POSTGRESQL ? "spanner." : ""; + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testNoMaxCommitDelayByDefault() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + executeCommit(connection); + assertMaxCommitDelay(Duration.getDefaultInstance(), false); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testZeroMaxCommitDelay() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.setMaxCommitDelay(java.time.Duration.ZERO); + executeCommit(connection); + assertMaxCommitDelay(Duration.getDefaultInstance(), true); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testMaxCommitDelayInConnectionString() { + try (Connection connection = createConnection(";maxCommitDelay=1000")) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + executeCommit(connection); + assertMaxCommitDelay(Duration.newBuilder().setSeconds(1).build(), true); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testZeroMaxCommitDelayInConnectionString() { + try (Connection connection = createConnection(";maxCommitDelay=0")) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + executeCommit(connection); + assertMaxCommitDelay(Duration.getDefaultInstance(), true); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testSetMaxCommitDelay() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + for (boolean useSql : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + + if (useSql) { + connection.execute( + Statement.of(String.format("set %smax_commit_delay='40ms'", getVariablePrefix()))); + } else { + connection.setMaxCommitDelay(java.time.Duration.of(40_000, ChronoUnit.MICROS)); + } + + // Execute two transactions to verify that the new setting is applied to all transactions, + // and not only the first one that is executed after setting the max_commit_delay value. + Repeat.twice( + () -> { + executeCommit(connection); + assertMaxCommitDelay( + Duration.newBuilder().setNanos((int) TimeUnit.MILLISECONDS.toNanos(40)).build(), + true); + mockSpanner.clearRequests(); + }); + + if (useSql) { + // This is translated to Duration.ZERO. + connection.execute( + Statement.of(String.format("set %smax_commit_delay=null", getVariablePrefix()))); + } else { + connection.setMaxCommitDelay(null); + } + executeCommit(connection); + // The SQL statement set max_commit_delay=null is translated to Duration.ZERO. + assertMaxCommitDelay(Duration.getDefaultInstance(), useSql); + mockSpanner.clearRequests(); + + if (useSql) { + connection.execute( + Statement.of(String.format("set %smax_commit_delay=0", getVariablePrefix()))); + } else { + connection.setMaxCommitDelay(java.time.Duration.ZERO); + } + executeCommit(connection); + assertMaxCommitDelay(Duration.getDefaultInstance(), true); + mockSpanner.clearRequests(); + } + } + } + } + + void executeCommit(Connection connection) { + if (connection.isAutocommit()) { + connection.write(createMutation()); + } else { + connection.bufferedWrite(createMutation()); + connection.commit(); + } + } + + private void assertMaxCommitDelay(Duration expected, boolean hasMaxCommitDelay) { + List requests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(1, requests.size()); + CommitRequest request = requests.get(0); + assertEquals(expected, request.getMaxCommitDelay()); + assertEquals(hasMaxCommitDelay, request.hasMaxCommitDelay()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MergedResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MergedResultSetTest.java new file mode 100644 index 000000000000..6d3950efbc37 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/MergedResultSetTest.java @@ -0,0 +1,254 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ForwardingResultSet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSetsHelper; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class MergedResultSetTest { + + private static final class MockedResults { + final Connection connection; + final List partitions; + final List allRows; + final int minErrorIndex; + + MockedResults( + Connection connection, List partitions, List allRows, int minErrorIndex) { + this.connection = connection; + this.partitions = partitions; + this.allRows = allRows; + this.minErrorIndex = minErrorIndex; + } + } + + private static final class ResultSetWithError extends ForwardingResultSet { + private final int errorIndex; + private int currentIndex = 0; + + ResultSetWithError(ResultSet delegate, int errorIndex) { + super(delegate); + this.errorIndex = errorIndex; + } + + @Override + public boolean next() { + if (currentIndex == errorIndex) { + throw SpannerExceptionFactory.newSpannerException(ErrorCode.INTERNAL, "test error"); + } + currentIndex++; + return super.next(); + } + } + + @Parameter(0) + public int numPartitions; + + @Parameter(1) + public int maxRowsPerPartition; + + @Parameter(2) + public int maxParallelism; + + @Parameters(name = "numPartitions = {0}, maxRowsPerPartition = {1}, maxParallelism = {2}") + public static Collection parameters() { + List params = new ArrayList<>(); + for (int numPartitions : new int[] {0, 1, 2, 5, 8}) { + for (int maxRowsPerPartition : new int[] {0, 1, 5, 10, 100}) { + for (int maxParallelism : new int[] {0, 1, 2, 4, 8}) { + params.add(new Object[] {numPartitions, maxRowsPerPartition, maxParallelism}); + } + } + } + return params; + } + + private MockedResults setupResults(boolean withErrors, boolean withEmptyResults) { + Random random = new Random(); + Connection connection = mock(Connection.class); + List partitions = new ArrayList<>(); + List allRows = new ArrayList<>(); + int minErrorIndex = Integer.MAX_VALUE; + for (int index = 0; index < numPartitions; index++) { + String partition = String.valueOf(index); + partitions.add(partition); + int numRows = maxRowsPerPartition == 0 ? 0 : random.nextInt(maxRowsPerPartition) + 1; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + com.google.spanner.v1.ResultSet proto = generator.generate(); + if (withErrors) { + // Add a random error somewhere in the result. + int errorIndex = numRows == 0 ? 0 : random.nextInt(numRows); + minErrorIndex = Math.min(minErrorIndex, errorIndex); + when(connection.runPartition(partition)) + .thenReturn(new ResultSetWithError(ResultSetsHelper.fromProto(proto), errorIndex)); + } else { + if (withEmptyResults && numPartitions > 1 && index == 0) { + when(connection.runPartition(partition)) + .thenReturn( + ResultSetsHelper.fromProto( + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType(StructType.newBuilder().build()) + .build()) + .build())); + } else { + when(connection.runPartition(partition)).thenReturn(ResultSetsHelper.fromProto(proto)); + try (ResultSet resultSet = ResultSetsHelper.fromProto(proto)) { + while (resultSet.next()) { + allRows.add(resultSet.getCurrentRowAsStruct()); + } + } + } + } + } + return new MockedResults(connection, partitions, allRows, minErrorIndex); + } + + @Test + public void testAllResultsAreReturned() { + MockedResults results = setupResults(/* withErrors= */ false, /* withEmptyResults= */ false); + BitSet rowsFound = new BitSet(results.allRows.size()); + try (MergedResultSet resultSet = + new MergedResultSet(results.connection, results.partitions, maxParallelism)) { + while (resultSet.next()) { + assertRowExists(results.allRows, resultSet.getCurrentRowAsStruct(), rowsFound); + } + // Verify that we can get the metadata after having gotten all rows. + // This failed in the initial release of this feature for result sets that were empty. The + // reason for that was that the initial implementation would do a call to currentRowAsStruct, + // which would always be null for result sets that never returned any data. + assertNotNull(resultSet.getMetadata()); + if (numPartitions == 0) { + assertEquals(0, resultSet.getColumnCount()); + } else { + assertEquals(26, resultSet.getColumnCount()); + assertEquals(Type.bool(), resultSet.getColumnType(0)); + assertEquals(Type.bool(), resultSet.getColumnType("COL0")); + assertEquals(10, resultSet.getColumnIndex("COL10")); + } + // Check that all rows were found. + assertEquals(results.allRows.size(), rowsFound.nextClearBit(0)); + // Check extended metadata. + assertEquals(numPartitions, resultSet.getNumPartitions()); + if (maxParallelism > 0) { + assertEquals(Math.min(numPartitions, maxParallelism), resultSet.getParallelism()); + } else { + int processors = Runtime.getRuntime().availableProcessors(); + assertEquals(Math.min(numPartitions, processors), resultSet.getParallelism()); + } + } + } + + @Test + public void testResultSetStopsAfterFirstError() { + MockedResults results = setupResults(/* withErrors= */ true, /* withEmptyResults= */ false); + try (MergedResultSet resultSet = + new MergedResultSet(results.connection, results.partitions, maxParallelism)) { + if (numPartitions > 0) { + AtomicInteger rowCount = new AtomicInteger(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> { + while (resultSet.next()) { + rowCount.getAndIncrement(); + } + }); + assertEquals(ErrorCode.INTERNAL, exception.getErrorCode()); + assertTrue(exception.getMessage(), exception.getMessage().contains("test error")); + // The result set should continue to throw the same error if we continue to call next(). + SpannerException nextException = assertThrows(SpannerException.class, resultSet::next); + assertEquals(exception, nextException); + // We should see at least minErrorIndex rows before an error. + assertTrue(rowCount.get() >= results.minErrorIndex); + } + } + } + + @Test + public void testResultSetReturnsNonEmptyMetadata() { + MockedResults results = setupResults(/* withErrors= */ false, /* withEmptyResults= */ true); + BitSet rowsFound = new BitSet(results.allRows.size()); + try (MergedResultSet resultSet = + new MergedResultSet(results.connection, results.partitions, maxParallelism)) { + if (numPartitions > 0) { + assertNotNull(resultSet.getMetadata()); + assertEquals(26, resultSet.getMetadata().getRowType().getFieldsCount()); + } + while (resultSet.next()) { + assertRowExists(results.allRows, resultSet.getCurrentRowAsStruct(), rowsFound); + } + if (numPartitions == 0) { + assertEquals(0, resultSet.getColumnCount()); + } else { + assertEquals(26, resultSet.getColumnCount()); + assertEquals(Type.bool(), resultSet.getColumnType(0)); + assertEquals(Type.bool(), resultSet.getColumnType("COL0")); + assertEquals(10, resultSet.getColumnIndex("COL10")); + } + // Check that all rows were found. + assertEquals(results.allRows.size(), rowsFound.nextClearBit(0)); + // Check extended metadata. + assertEquals(numPartitions, resultSet.getNumPartitions()); + if (maxParallelism > 0) { + assertEquals(Math.min(numPartitions, maxParallelism), resultSet.getParallelism()); + } else { + int processors = Runtime.getRuntime().availableProcessors(); + assertEquals(Math.min(numPartitions, processors), resultSet.getParallelism()); + } + } + } + + private void assertRowExists(List expectedRows, Struct row, BitSet rowsFound) { + for (int i = 0; i < expectedRows.size(); i++) { + if (row.equals(expectedRows.get(i))) { + rowsFound.set(i); + return; + } + } + fail("row not found: " + row); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java new file mode 100644 index 000000000000..6145d9770b5c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.UnitOfWork.EndTransactionCallback; + +class NoopEndTransactionCallback implements EndTransactionCallback { + static final NoopEndTransactionCallback INSTANCE = new NoopEndTransactionCallback(); + + private NoopEndTransactionCallback() {} + + @Override + public void onSuccess() {} + + @Override + public void onFailure() {} +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java new file mode 100644 index 000000000000..a8e579feb1a3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java @@ -0,0 +1,681 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.Repeat.twice; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.SpannerOptionsTestHelper; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.EventData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class OpenTelemetryTracingTest extends AbstractMockServerTest { + private static InMemorySpanExporter spanExporter; + + private static OpenTelemetrySdk openTelemetry; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptionsTestHelper.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + GlobalOpenTelemetry.resetForTest(); + + spanExporter = InMemorySpanExporter.create(); + + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) + .build(); + + openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .buildAndRegisterGlobal(); + } + + @AfterClass + public static void closeOpenTelemetry() { + SpannerPool.closeSpannerPool(); + if (openTelemetry != null) { + openTelemetry.close(); + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + spanExporter.reset(); + } + + Connection createTestConnection() { + return createTestConnection(getBaseUrl() + ";enableExtendedTracing=true"); + } + + Connection createTestConnection(String url) { + return ConnectionOptions.newBuilder() + .setTracingPrefix("CloudSpannerJdbc") + .setUri(url) + .build() + .getConnection(); + } + + @Test + public void testSingleUseQuery_withoutSqlStatement() { + try (Connection connection = createTestConnection(getBaseUrl())) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + // There are two query spans: One for getting the database dialect, and one for the actual + // test query. + assertEquals( + 2, + spans.stream() + .filter(span -> span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery")) + .count()); + // Verify that both do not have any db.statement attribute. + assertEquals( + 2, + spans.stream() + .filter( + span -> + span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery") + && span.getAttributes().get(AttributeKey.stringKey("db.statement")) == null) + .count()); + } + + @Test + public void testSingleUseQuery_withoutSqlStatement_usingEnvVar() { + SpannerPool.closeSpannerPool(); + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableExtendedTracing() { + return true; + } + }); + + try (Connection connection = createTestConnection(getBaseUrl())) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of( + AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testSingleUseQuery() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of( + AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testSingleUseUpdate() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.executeUpdate(INSERT_STATEMENT); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of( + AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSingleUseBatchUpdate() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + String threadName = executeQuerySpan.getAttributes().get(AttributeKey.stringKey("thread.name")); + assertEquals(Thread.currentThread().getName(), threadName); + assertContains("CloudSpannerOperation.Commit", spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSingleUseDdl() { + String ddl = "CREATE TABLE foo (id int64) PRIMARY KEY (id)"; + addUpdateDdlResponse(); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.execute(Statement.of(ddl)); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerJdbc.DdlStatement", + Attributes.of(AttributeKey.stringKey("db.statement"), ddl), + spans); + } + + @Test + public void testSingleUseDdlBatch() { + String ddl1 = "CREATE TABLE foo (id int64, value string(max)) PRIMARY KEY (id)"; + String ddl2 = "CREATE INDEX idx_foo ON foo (value)"; + addUpdateDdlResponse(); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.startBatchDdl(); + connection.execute(Statement.of(ddl1)); + connection.execute(Statement.of(ddl2)); + connection.runBatch(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerJdbc.DdlBatch", + Attributes.of(AttributeKey.stringArrayKey("db.statement"), ImmutableList.of(ddl1, ddl2)), + spans); + } + + @Test + public void testMultiUseReadOnlyQueries() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + twice( + () -> { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + }); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadOnlyTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertParent("CloudSpannerJdbc.ReadOnlyTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testMultiUseReadWriteQueries() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + twice( + () -> { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + }); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteUpdates() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteBatchUpdates() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + + twice( + () -> { + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + }); + + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.BatchUpdate", + 2, + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteAborted() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", 1, Attributes.empty(), spans); + SpanData transactionSpan = + spans.stream() + .filter(span -> span.getName().equals("CloudSpannerJdbc.ReadWriteTransaction")) + .findFirst() + .orElseThrow(IllegalStateException::new); + assertEquals( + Boolean.TRUE, + transactionSpan.getAttributes().get(AttributeKey.booleanKey("transaction.retried"))); + assertEquals(1, transactionSpan.getTotalRecordedEvents()); + EventData event = transactionSpan.getEvents().get(0); + assertEquals( + "Transaction aborted. Backing off for 0 milliseconds and retrying.", event.getName()); + // The transaction is retried, so we get the ExecuteUpdate and Commit spans twice. + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", 2, Attributes.empty(), spans); + + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSavepoint() { + Statement statement1 = Statement.of("insert into foo (id) values (1)"); + Statement statement2 = Statement.of("insert into foo (id) values (2)"); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement1, 1)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement2, 1)); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setSavepointSupport(SavepointSupport.ENABLED); + assertEquals(1L, connection.executeUpdate(statement1)); + connection.savepoint("test"); + assertEquals(1L, connection.executeUpdate(statement2)); + connection.rollbackToSavepoint("test"); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + // Statement 1 is executed 2 times, because the original transaction needs to be + // retried after the transaction was rolled back to the savepoint. + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), statement1.getSql()), + spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 1, + Attributes.of(AttributeKey.stringKey("db.statement"), statement2.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + + // Verify that we have two Cloud Spanner transactions, and that these are both children of one + // JDBC transaction. + List transactionSpans = + getSpans("CloudSpanner.ReadWriteTransaction", Attributes.empty(), spans); + assertEquals(2, transactionSpans.size()); + assertEquals( + transactionSpans.get(0).getParentSpanId(), transactionSpans.get(1).getParentSpanId()); + List jdbcTransactionSpans = + getSpans("CloudSpannerJdbc.ReadWriteTransaction", Attributes.empty(), spans); + assertEquals(1, jdbcTransactionSpans.size()); + assertEquals( + jdbcTransactionSpans.get(0).getSpanId(), transactionSpans.get(0).getParentSpanId()); + List commitSpans = + getSpans("CloudSpannerOperation.Commit", Attributes.empty(), spans); + assertEquals(1, commitSpans.size()); + assertEquals(transactionSpans.get(1).getSpanId(), commitSpans.get(0).getParentSpanId()); + } + + @Test + public void testTransactionTag() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setTransactionTag("my_tag"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpanner.ReadWriteTransaction", + 1, + Attributes.of(AttributeKey.stringKey("transaction.tag"), "my_tag"), + spans); + } + + @Test + public void testStatementTag() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setStatementTag("my_tag"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 1, + Attributes.of(AttributeKey.stringKey("statement.tag"), "my_tag"), + spans); + } + + void assertContains(String expected, List spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.stream().anyMatch(span -> span.getName().equals(expected))); + } + + void assertContains(String expected, Attributes attributes, List spans) { + assertContains(expected, 1, attributes, spans); + } + + void assertContains(String expected, int count, Attributes attributes, List spans) { + assertEquals( + "Expected " + spansToString(spans) + " to contain " + expected, + count, + spans.stream().filter(span -> equalsSpan(span, expected, attributes)).count()); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + void assertParent(String expectedParent, String child, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + SpanData childSpan = getSpan(child, spans); + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + + void assertParent( + String expectedParent, String child, Attributes attributes, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + List childSpans = getSpans(child, attributes, spans); + for (SpanData childSpan : childSpans) { + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + } + + SpanData getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + SpanData getSpan(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .collect(Collectors.toList()); + } + + private String spansToString(List spans) { + return spans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } + + private void addUpdateDdlResponse() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionIdTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionIdTest.java new file mode 100644 index 000000000000..850d0cedfaa7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionIdTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.Base64; +import java.util.zip.GZIPOutputStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class PartitionIdTest { + private static class TestObject implements Serializable { + private final String id; + + TestObject(String id) { + this.id = id; + } + + public String getId() { + return id; + } + } + + @Test + public void testDeserializeInvalid() throws IOException { + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + try (ObjectOutputStream objectOutputStream = + new ObjectOutputStream(new GZIPOutputStream(byteArrayOutputStream))) { + objectOutputStream.writeObject(new TestObject("foo")); + } + String base64 = Base64.getUrlEncoder().encodeToString(byteArrayOutputStream.toByteArray()); + + SpannerException exception = + assertThrows(SpannerException.class, () -> PartitionId.decodeFromString(base64)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertEquals( + ErrorCode.INVALID_ARGUMENT + + ": The id does not contain a valid PartitionId instance; " + + "com.google.cloud.spanner.connection.PartitionIdTest$TestObject", + exception.getMessage()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java new file mode 100644 index 000000000000..d3fb5181247d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java @@ -0,0 +1,791 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartitionQueryRequest; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class PartitionedQueryMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + SpannerPool.closeSpannerPool(); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testPartitionQuery() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = + connection.partitionQuery( + Statement.newBuilder("select * from my_table where id=@id") + .bind("p1") + .to(1L) + .build(), + PartitionOptions.newBuilder().build())) { + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getColumnCount()); + PartitionId partitionId = PartitionId.decodeFromString(resultSet.getString("PARTITION")); + assertNotNull(partitionId); + assertFalse(resultSet.next()); + } + } + if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(3, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testPartitionQueryInReadOnlyTransaction() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = + connection.partitionQuery( + Statement.newBuilder("select * from my_table where id=@id") + .bind("p1") + .to(1L) + .build(), + PartitionOptions.newBuilder().build())) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testMixNormalAndPartitionQueryInReadOnlyTransaction() { + List readTimestamps = new ArrayList<>(); + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + for (int i = 0; i < 2; i++) { + try (ResultSet resultSet = connection.executeQuery(SELECT_COUNT_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(0L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + try (ResultSet resultSet = + connection.partitionQuery( + Statement.newBuilder("select * from my_table where id=@id") + .bind("p1") + .to(1L) + .build(), + PartitionOptions.newBuilder().build())) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + readTimestamps.add(connection.getReadTimestamp()); + connection.commit(); + } + if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(3, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + // The above will start two transactions: + // 1. The initial 'normal' read-only transaction. + // 2. The batch read-only transaction. The latter will use the same read timestamp as the normal + // read-only transaction. + assertEquals(4, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + + List beginRequests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertTrue(beginRequests.get(0).getOptions().getReadOnly().hasStrong()); + assertTrue(beginRequests.get(1).getOptions().getReadOnly().hasReadTimestamp()); + assertTrue(beginRequests.get(2).getOptions().getReadOnly().hasStrong()); + assertTrue(beginRequests.get(3).getOptions().getReadOnly().hasReadTimestamp()); + + assertEquals( + readTimestamps.get(0), + Timestamp.fromProto(beginRequests.get(1).getOptions().getReadOnly().getReadTimestamp())); + assertEquals( + readTimestamps.get(1), + Timestamp.fromProto(beginRequests.get(3).getOptions().getReadOnly().getReadTimestamp())); + } + + @Test + public void testRunPartition() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = Statement.of("select * from random_table"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 5; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + boolean isMultiplexedSessionCreated = false; + for (boolean dataBoostEnabled : new boolean[] {false, true}) { + connection.setDataBoostEnabled(dataBoostEnabled); + assertEquals(dataBoostEnabled, connection.isDataBoostEnabled()); + int rowCount = 0; + try (ResultSet partitions = + connection.partitionQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + + int partitionCount = 0; + while (partitions.next()) { + try (ResultSet resultSet = connection.runPartition(partitions.getString("PARTITION"))) { + while (resultSet.next()) { + rowCount++; + } + } + partitionCount++; + } + assertEquals(maxPartitions, partitionCount); + // The mock server is not smart enough to actually only return a partition of a query, so + // each partition just returns all rows of the query. + assertEquals(generatedRowCount * maxPartitions, rowCount); + } + int createSessionRequestCounts = + mockSpanner.countRequestsOfType(CreateSessionRequest.class); + int expectedCreateSessionsRPC = 1; + if (isMultiplexedSessionsEnabled(connection.getSpanner()) && !isMultiplexedSessionCreated) { + // in one of the iterations there will be one additional RPC to create a multiplexed + // session + assertTrue( + createSessionRequestCounts >= expectedCreateSessionsRPC + && createSessionRequestCounts <= expectedCreateSessionsRPC + 1); + if (createSessionRequestCounts == expectedCreateSessionsRPC + 1) { + isMultiplexedSessionCreated = true; + } + } else if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner()) + && isMultiplexedSessionCreated) { + // When multiplexed session will be reused for each iteration. + assertEquals(0, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals( + expectedCreateSessionsRPC, + mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + assertEquals(maxPartitions, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse( + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .anyMatch(request -> request.getPartitionToken().isEmpty())); + assertFalse( + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .anyMatch(request -> request.getDataBoostEnabled() != dataBoostEnabled)); + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunPartitionUsingSql() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + String paramName = dialect == Dialect.POSTGRESQL ? "$1" : "@p1"; + Statement statement = + Statement.newBuilder(String.format("select * from random_table where active=%s", paramName)) + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + + int maxPartitions = 5; + boolean isMultiplexedSessionCreated = false; + try (Connection connection = createConnection()) { + connection.execute(Statement.of("set autocommit=true")); + assertTrue(connection.isAutocommit()); + for (boolean dataBoostEnabled : new boolean[] {false, true}) { + connection.execute( + Statement.of(String.format("set %sdata_boost_enabled=%s", prefix, dataBoostEnabled))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %sdata_boost_enabled", prefix)))) { + assertTrue(resultSet.next()); + assertEquals(dataBoostEnabled, resultSet.getBoolean(0)); + assertFalse(resultSet.next()); + } + connection.execute( + Statement.of(String.format("set %smax_partitions=%d", prefix, maxPartitions))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %smax_partitions", prefix)))) { + assertTrue(resultSet.next()); + assertEquals(maxPartitions, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + for (boolean useLiteral : new boolean[] {true, false}) { + try (ResultSet partitions = + connection.executeQuery( + Statement.newBuilder("partition " + statement.getSql()) + .bind("p1") + .to(true) + .build())) { + int rowCount = 0; + int partitionCount = 0; + while (partitions.next()) { + try (ResultSet resultSet = + connection.executeQuery( + useLiteral + ? Statement.of( + "RUN\tPARTITION\n '" + partitions.getString("PARTITION") + "'") + : Statement.newBuilder("RUN PARTITION") + .bind("PARTITION") + .to(partitions.getString("PARTITION")) + .build())) { + while (resultSet.next()) { + rowCount++; + } + } + partitionCount++; + } + assertEquals(maxPartitions, partitionCount); + // The mock server is not smart enough to actually only return a partition of a query, + // so each partition just returns all rows of the query. + assertEquals(generatedRowCount * maxPartitions, rowCount); + } + int createSessionRequestCounts = + mockSpanner.countRequestsOfType(CreateSessionRequest.class); + int expectedCreateSessionsRPC = 1; + if (isMultiplexedSessionsEnabled(connection.getSpanner()) + && !isMultiplexedSessionCreated) { + // in one of the iterations there will be one additional RPC to create a multiplexed + // session + assertTrue( + createSessionRequestCounts >= expectedCreateSessionsRPC + && createSessionRequestCounts <= expectedCreateSessionsRPC + 1); + if (createSessionRequestCounts == expectedCreateSessionsRPC + 1) { + isMultiplexedSessionCreated = true; + } + } else if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner()) + && isMultiplexedSessionCreated) { + // When multiplexed session will be reused for each iteration. + assertEquals(0, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals( + expectedCreateSessionsRPC, + mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + assertEquals(maxPartitions, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse( + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .anyMatch(request -> request.getPartitionToken().isEmpty())); + assertFalse( + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .anyMatch(request -> request.getDataBoostEnabled() != dataBoostEnabled)); + mockSpanner.clearRequests(); + } + } + } + } + + @Test + public void testRunPartitionedQuery() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 5; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + // The mock server is not smart enough to actually return only a partition of the query. + // Instead, the server returns the same query result for each partition, so the actual row + // count will be maxPartitions * generatedRowCount. + assertEquals(maxPartitions * generatedRowCount, rowCount); + } + if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testRunEmptyPartitionedQuery() { + int generatedRowCount = 0; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 5; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertFalse(resultSet.next()); + assertNotNull(resultSet.getMetadata()); + assertEquals(26, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(26, resultSet.getType().getStructFields().size()); + } + if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testGetMetadataWithoutNextCall() { + int generatedRowCount = 1; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertNotNull(resultSet.getMetadata()); + assertEquals(26, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(26, resultSet.getType().getStructFields().size()); + + assertTrue(resultSet.next()); + assertNotNull(resultSet.getMetadata()); + assertEquals(26, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(26, resultSet.getType().getStructFields().size()); + + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testGetMetadataWithoutNextCallOnEmptyResultSet() { + int generatedRowCount = 0; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertNotNull(resultSet.getMetadata()); + assertEquals(26, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(26, resultSet.getType().getStructFields().size()); + + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testGetMetadataWithoutNextCallOnResultSetWithError() { + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult( + StatementResult.exception(statement, Status.NOT_FOUND.asRuntimeException())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertThrows(SpannerException.class, resultSet::getMetadata); + assertThrows(SpannerException.class, resultSet::getType); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testRunPartitionedQueryUsingSql() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + + int maxPartitions = 5; + int maxParallelism = 4; + try (Connection connection = createConnection()) { + connection.execute(Statement.of("set autocommit=true")); + assertTrue(connection.isAutocommit()); + for (boolean dataBoostEnabled : new boolean[] {false, true}) { + connection.execute( + Statement.of(String.format("set %sdata_boost_enabled=%s", prefix, dataBoostEnabled))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %sdata_boost_enabled", prefix)))) { + assertTrue(resultSet.next()); + assertEquals(dataBoostEnabled, resultSet.getBoolean(0)); + assertFalse(resultSet.next()); + } + connection.execute( + Statement.of(String.format("set %smax_partitions=%d", prefix, maxPartitions))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %smax_partitions", prefix)))) { + assertTrue(resultSet.next()); + assertEquals(maxPartitions, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + connection.execute( + Statement.of( + String.format("set %smax_partitioned_parallelism=%d", prefix, maxParallelism))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of( + String.format("show variable %smax_partitioned_parallelism", prefix)))) { + assertTrue(resultSet.next()); + assertEquals(maxParallelism, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + + try (ResultSet resultSet = + connection.executeQuery( + Statement.newBuilder( + "run\tpartitioned query\n" + + " select * from random_table where active=@active") + .bind("active") + .to(true) + .build())) { + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + // The mock server is not smart enough to actually return only a partition of the query. + // Instead, the server returns the same query result for each partition, so the actual row + // count will be maxPartitions * generatedRowCount. + assertEquals(maxPartitions * generatedRowCount, rowCount); + } + } + if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(3, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + // We have 2 requests of each, as we run the query with data boost both enabled and disabled. + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testRunPartitionedQueryWithError() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 5; + try (Connection connection = createConnection()) { + // Make sure the query that determines the dialect has been executed before setting any query + // errors. This ensures that the error is returned for the partitioned query, and not for the + // query that determines the dialect. + assertEquals(dialect, connection.getDialect()); + int errorIndex = new Random().nextInt(generatedRowCount); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStreamException( + Status.RESOURCE_EXHAUSTED.withDescription("test error").asRuntimeException(), + errorIndex)); + connection.setAutocommit(true); + + int rowCount = 0; + try (ResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + while (resultSet.next()) { + rowCount++; + } + fail( + "missing expected exception for error index " + + errorIndex + + ". Got row count " + + rowCount); + } catch (SpannerException exception) { + assertEquals(ErrorCode.RESOURCE_EXHAUSTED, exception.getErrorCode()); + assertTrue(exception.getMessage().contains("test error")); + // The mock server is not smart enough to actually return only a partition of the query. + // Instead, the server returns the same query result for each partition, so the actual row + // count will be (at most): + // maxPartitions * generatedRowCount - (generatedRowCount - errorIndex). + // The error that is added to the stream is removed the first time it is encountered, so + // only one of the partition executors will see it. + assertTrue( + String.format( + "rowCount (%d) should be <= maxPartitions (%d) * generatedRowCount (%d) -" + + " (generatedRowCount (%d) - errorIndex (%d))", + rowCount, maxPartitions, generatedRowCount, generatedRowCount, errorIndex), + rowCount <= (maxPartitions * generatedRowCount - (generatedRowCount - errorIndex))); + } + if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testRunPartitionedQueryWithMaxParallelism() { + int generatedRowCount = 20; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 15; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + for (int maxParallelism : new int[] {0, 1, 2, 5, 20}) { + connection.setMaxPartitionedParallelism(maxParallelism); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + int expectedParallelism; + if (maxParallelism == 0) { + expectedParallelism = + Math.min(maxPartitions, Runtime.getRuntime().availableProcessors()); + } else { + expectedParallelism = Math.min(maxParallelism, maxPartitions); + } + assertEquals(expectedParallelism, resultSet.getParallelism()); + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + // The mock server is not smart enough to actually return only a partition of the query. + // Instead, the server returns the same query result for each partition, so the actual row + // count will be maxPartitions * generatedRowCount. + assertEquals(maxPartitions * generatedRowCount, rowCount); + } + } + if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(6, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(5, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(5, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(5, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testAutoPartitionMode() { + int generatedRowCount = 5; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + + int maxPartitions = 4; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setMaxPartitions(maxPartitions); + + connection.execute(Statement.of(String.format("set %sauto_partition_mode=true", prefix))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %sauto_partition_mode", prefix)))) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean(0)); + assertFalse(resultSet.next()); + } + + try (ResultSet resultSet = connection.executeQuery(statement)) { + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + assertEquals(maxPartitions * generatedRowCount, rowCount); + } + try (ResultSet resultSet = connection.execute(statement).getResultSet()) { + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + assertEquals(maxPartitions * generatedRowCount, rowCount); + } + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeQueryAsync(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception.getMessage().contains("Partitioned queries cannot be executed asynchronously")); + exception = assertThrows(SpannerException.class, () -> connection.executeAsync(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception.getMessage().contains("Partitioned queries cannot be executed asynchronously")); + + // Turn off autocommit mode. This will cause the next query to start a read/write transaction. + // These also do not support partitioned queries. + connection.setAutocommit(false); + exception = assertThrows(SpannerException.class, () -> connection.executeQuery(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains("Partition query is not supported for read/write transaction")); + exception = assertThrows(SpannerException.class, () -> connection.execute(statement)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .contains("Partition query is not supported for read/write transaction")); + + if (isMultiplexedSessionsEnabledForPartitionedOps(connection.getSpanner())) { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else if (isMultiplexedSessionsEnabled(connection.getSpanner())) { + assertEquals(3, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } else { + assertEquals(2, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + } + } + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java new file mode 100644 index 000000000000..ca0ae403e50c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.PgDurationConverter; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class PgDurationConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + PgDurationConverter.class, Dialect.POSTGRESQL); + assertNotNull(allowedValues); + PgDurationConverter converter = new PgDurationConverter(allowedValues); + + assertEquals(Duration.ofNanos(1000000), converter.convert("1")); + assertEquals(Duration.ofSeconds(1L), converter.convert("1000")); + assertEquals(Duration.ofSeconds(1L, 1000000), converter.convert("1001")); + + assertEquals( + Duration.ofNanos((int) TimeUnit.MILLISECONDS.toNanos(100L)), converter.convert("'100ms'")); + assertEquals(Duration.ZERO, converter.convert("'0ms'")); + assertNull(converter.convert("'-100ms'")); + assertEquals(Duration.ofSeconds(315576000000L), converter.convert("'315576000000000ms'")); + assertEquals(Duration.ofSeconds(1L), converter.convert("'1s'")); + assertEquals( + Duration.ofSeconds(1L, (int) TimeUnit.MILLISECONDS.toNanos(1L)), + converter.convert("'1001ms'")); + + assertEquals(Duration.ofNanos(1), converter.convert("'1ns'")); + assertEquals(Duration.ofNanos(1000), converter.convert("'1us'")); + assertEquals(Duration.ofNanos(1000000), converter.convert("'1ms'")); + assertEquals(Duration.ofNanos(999999999), converter.convert("'999999999ns'")); + assertEquals(Duration.ofSeconds(1L), converter.convert("'1s'")); + + assertNull(converter.convert("''")); + assertNull(converter.convert("' '")); + assertNull(converter.convert("'random string'")); + + assertEquals(Duration.ZERO, converter.convert("default")); + assertEquals(Duration.ZERO, converter.convert("DEFAULT")); + assertEquals(Duration.ZERO, converter.convert("Default")); + assertNull(converter.convert("'default'")); + assertNull(converter.convert("'DEFAULT'")); + assertNull(converter.convert("'Default'")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgTransactionModeConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgTransactionModeConverterTest.java new file mode 100644 index 000000000000..f65628def7a9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgTransactionModeConverterTest.java @@ -0,0 +1,190 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.PgTransactionMode.AccessMode.READ_ONLY_TRANSACTION; +import static com.google.cloud.spanner.connection.PgTransactionMode.AccessMode.READ_WRITE_TRANSACTION; +import static com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel.ISOLATION_LEVEL_DEFAULT; +import static com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel.ISOLATION_LEVEL_REPEATABLE_READ; +import static com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel.ISOLATION_LEVEL_SERIALIZABLE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.PgTransactionModeConverter; +import com.google.cloud.spanner.connection.PgTransactionMode.AccessMode; +import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class PgTransactionModeConverterTest { + + static PgTransactionMode create(AccessMode accessMode) { + return create(accessMode, null); + } + + static PgTransactionMode create(IsolationLevel isolationLevel) { + return create(null, isolationLevel); + } + + static PgTransactionMode create(AccessMode accessMode, IsolationLevel isolationLevel) { + PgTransactionMode mode = new PgTransactionMode(); + mode.setAccessMode(accessMode); + mode.setIsolationLevel(isolationLevel); + return mode; + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + PgTransactionModeConverter.class, Dialect.POSTGRESQL); + + assertNotNull(allowedValues); + PgTransactionModeConverter converter = new PgTransactionModeConverter(allowedValues); + + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("read write")); + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("READ WRITE")); + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("Read Write")); + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("read write")); + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("READ\nWRITE")); + assertEquals(create(READ_WRITE_TRANSACTION), converter.convert("Read\tWrite")); + + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("read only")); + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("READ ONLY")); + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("Read Only")); + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("read only")); + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("READ\nONLY")); + assertEquals(create(READ_ONLY_TRANSACTION), converter.convert("Read\tOnly")); + + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("isolation level default")); + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("ISOLATION LEVEL DEFAULT")); + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("Isolation Level Default")); + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("isolation level default")); + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("ISOLATION\nLEVEL\nDEFAULT")); + assertEquals(create(ISOLATION_LEVEL_DEFAULT), converter.convert("Isolation\tLevel\tDefault")); + + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), converter.convert("isolation level serializable")); + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), converter.convert("ISOLATION LEVEL SERIALIZABLE")); + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), converter.convert("Isolation Level Serializable")); + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), + converter.convert("isolation level serializable")); + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), converter.convert("ISOLATION\nLEVEL\nSERIALIZABLE")); + assertEquals( + create(ISOLATION_LEVEL_SERIALIZABLE), converter.convert("Isolation\tLevel\tSerializable")); + + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("isolation level repeatable read")); + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("ISOLATION LEVEL REPEATABLE READ")); + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("Isolation Level Repeatable Read")); + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("isolation level repeatable read")); + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("ISOLATION\nLEVEL\nREPEATABLE\nREAD")); + assertEquals( + create(ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("Isolation\tLevel\tRepeatable\tRead")); + + assertEquals(new PgTransactionMode(), converter.convert("")); + assertEquals(new PgTransactionMode(), converter.convert(" ")); + assertNull(converter.convert("random string")); + assertNull(converter.convert("read_write")); + assertNull(converter.convert("READ_WRITE")); + assertNull(converter.convert("read_only")); + assertNull(converter.convert("Read_Only")); + assertNull(converter.convert("READ_ONLY")); + + assertNull(converter.convert("isolation_level default")); + assertNull(converter.convert("isolationlevel default")); + assertNull(converter.convert("isolation level read committed")); + assertNull(converter.convert("isolation level ")); + assertNull(converter.convert("isolation level_default")); + + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write isolation level default")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert(" read write isolation level default ")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write, isolation level default")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write,isolation level default")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write , isolation level default")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write\nisolation level default")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("isolation level default read write ")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("isolation level default, read write")); + assertEquals( + create(READ_WRITE_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("isolation level default\nread write")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read only isolation level default")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_SERIALIZABLE), + converter.convert("read only isolation level serializable")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("read only isolation level repeatable read")); + + assertNull(converter.convert("isolation level default, read-only")); + assertNull(converter.convert("isolation level default, read")); + + // This is consistent with the behavior of PostgreSQL. Specifying multiple access modes or + // isolation levels in the same string will return the last mode that is specified. + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_DEFAULT), + converter.convert("read write, isolation level default, read only")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_SERIALIZABLE), + converter.convert("isolation level default, read only, isolation level serializable")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert("isolation level default, read only, isolation level repeatable read")); + assertEquals( + create(READ_ONLY_TRANSACTION, ISOLATION_LEVEL_REPEATABLE_READ), + converter.convert( + "read write, isolation level default, read only isolation level repeatable read")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java new file mode 100644 index 000000000000..766bc987d37c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java @@ -0,0 +1,64 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ProtoDescriptorsConverter; +import com.google.common.io.ByteStreams; +import java.io.InputStream; +import java.util.Base64; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ProtoDescriptorsConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + ProtoDescriptorsConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertNotNull(allowedValues); + ProtoDescriptorsConverter converter = new ProtoDescriptorsConverter(allowedValues); + + byte[] protoDescriptors; + try { + InputStream in = + ProtoDescriptorsConverterTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + + assertNull(converter.convert("")); + assertNull(converter.convert("null")); + assertNull(converter.convert(null)); + assertNull(converter.convert("random string")); + + assertArrayEquals( + converter.convert(Base64.getEncoder().encodeToString(protoDescriptors)), protoDescriptors); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java new file mode 100644 index 000000000000..29e4a2b591ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ProtoDescriptorsFileConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ProtoDescriptorsFileConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + ProtoDescriptorsFileConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertNotNull(allowedValues); + ProtoDescriptorsFileConverter converter = new ProtoDescriptorsFileConverter(allowedValues); + + assertNull(converter.convert("")); + assertNull(converter.convert(null)); + + String filePath = "com/google/cloud/spanner/descriptors.pb"; + assertEquals(converter.convert(filePath), filePath); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java new file mode 100644 index 000000000000..6d5d4106638f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.ExecuteSqlRequest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class QueryOptionsTest extends AbstractMockServerTest { + + @Test + public void testUseOptimizerVersionFromConnectionUrl() { + try (Connection connection = createConnection(";optimizerVersion=10")) { + Repeat.twice( + () -> { + executeSelect1AndConsumeResults(connection); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals("10", request.getQueryOptions().getOptimizerVersion()); + + mockSpanner.clearRequests(); + }); + } + } + + @Test + public void testUseOptimizerVersionFromStatement() { + try (Connection connection = createConnection()) { + connection.execute(Statement.of("set optimizer_version='7'")); + + Repeat.twice( + () -> { + executeSelect1AndConsumeResults(connection); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals("7", request.getQueryOptions().getOptimizerVersion()); + + mockSpanner.clearRequests(); + }); + } + } + + private void executeSelect1AndConsumeResults(Connection connection) { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RandomResultSetGenerator.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RandomResultSetGenerator.java new file mode 100644 index 000000000000..e21e0020f6e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RandomResultSetGenerator.java @@ -0,0 +1,322 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.common.io.BaseEncoding; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.protobuf.util.Timestamps; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.UUID; + +/** + * Utility class for generating {@link ResultSet}s containing columns with all possible data types + * of Cloud Spanner filled with random data. + */ +public class RandomResultSetGenerator { + public static Type[] generateAllTypes(Dialect dialect) { + List types = + new ArrayList( + Arrays.asList( + Type.newBuilder().setCode(TypeCode.BOOL).build(), + Type.newBuilder().setCode(TypeCode.INT64).build(), + Type.newBuilder().setCode(TypeCode.FLOAT32).build(), + Type.newBuilder().setCode(TypeCode.FLOAT64).build(), + dialect == Dialect.POSTGRESQL + ? Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build() + : Type.newBuilder().setCode(TypeCode.NUMERIC).build(), + Type.newBuilder().setCode(TypeCode.STRING).build(), + dialect == Dialect.POSTGRESQL + ? Type.newBuilder() + .setCode(TypeCode.JSON) + .setTypeAnnotation(TypeAnnotationCode.PG_JSONB) + .build() + : Type.newBuilder().setCode(TypeCode.JSON).build(), + Type.newBuilder().setCode(TypeCode.BYTES).build(), + Type.newBuilder().setCode(TypeCode.DATE).build(), + Type.newBuilder().setCode(TypeCode.UUID).build(), + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build())); + if (dialect == Dialect.POSTGRESQL) { + types.add( + Type.newBuilder() + .setCode(TypeCode.INT64) + .setTypeAnnotation(TypeAnnotationCode.PG_OID) + .build()); + } + types.addAll( + Arrays.asList( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.BOOL)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.INT64)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.FLOAT32)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.FLOAT64)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + dialect == Dialect.POSTGRESQL + ? Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + : Type.newBuilder().setCode(TypeCode.NUMERIC)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.STRING)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + dialect == Dialect.POSTGRESQL + ? Type.newBuilder() + .setCode(TypeCode.JSON) + .setTypeAnnotation(TypeAnnotationCode.PG_JSONB) + : Type.newBuilder().setCode(TypeCode.JSON)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.BYTES)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.DATE)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.UUID)) + .build(), + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType(Type.newBuilder().setCode(TypeCode.TIMESTAMP)) + .build())); + + appendProtoTypes(types, dialect); + + if (dialect == Dialect.POSTGRESQL) { + types.add( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.INT64) + .setTypeAnnotation(TypeAnnotationCode.PG_OID)) + .build()); + } + + Type[] typeArray = new Type[types.size()]; + typeArray = types.toArray(typeArray); + return typeArray; + } + + /** To append Proto & Enum types * */ + private static void appendProtoTypes(List types, Dialect dialect) { + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + types.add( + Type.newBuilder() + .setCode(TypeCode.PROTO) + .setProtoTypeFqn(SingerInfo.getDescriptor().getFullName()) + .build()); + types.add( + Type.newBuilder() + .setCode(TypeCode.ENUM) + .setProtoTypeFqn(Genre.getDescriptor().getFullName()) + .build()); + types.add( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.PROTO) + .setProtoTypeFqn(SingerInfo.getDescriptor().getFullName())) + .build()); + types.add( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.ENUM) + .setProtoTypeFqn(Genre.getDescriptor().getFullName())) + .build()); + } + } + + public static ResultSetMetadata generateAllTypesMetadata(Type[] types) { + StructType.Builder rowTypeBuilder = StructType.newBuilder(); + for (int col = 0; col < types.length; col++) { + rowTypeBuilder.addFields(Field.newBuilder().setName("COL" + col).setType(types[col])).build(); + } + ResultSetMetadata.Builder builder = ResultSetMetadata.newBuilder(); + builder.setRowType(rowTypeBuilder.build()); + return builder.build(); + } + + private final ResultSetMetadata metadata; + private final Dialect dialect; + private final Type[] types; + private final int rowCount; + private final Random random = new Random(); + + public RandomResultSetGenerator(int rowCount) { + this(rowCount, Dialect.GOOGLE_STANDARD_SQL); + } + + public RandomResultSetGenerator(int rowCount, Dialect dialect) { + this.rowCount = rowCount; + this.dialect = dialect; + this.types = generateAllTypes(dialect); + this.metadata = generateAllTypesMetadata(types); + } + + public ResultSet generate() { + ResultSet.Builder builder = ResultSet.newBuilder(); + for (int row = 0; row < rowCount; row++) { + ListValue.Builder rowBuilder = ListValue.newBuilder(); + for (Type type : types) { + Value.Builder valueBuilder = Value.newBuilder(); + setRandomValue(valueBuilder, type); + rowBuilder.addValues(valueBuilder.build()); + } + builder.addRows(rowBuilder.build()); + } + builder.setMetadata(metadata); + return builder.build(); + } + + private void setRandomValue(Value.Builder builder, Type type) { + if (randomNull()) { + builder.setNullValue(NullValue.NULL_VALUE); + } else { + switch (type.getCode()) { + case ARRAY: + int length = random.nextInt(20) + 1; + ListValue.Builder arrayBuilder = ListValue.newBuilder(); + for (int i = 0; i < length; i++) { + Value.Builder valueBuilder = Value.newBuilder(); + setRandomValue(valueBuilder, type.getArrayElementType()); + arrayBuilder.addValues(valueBuilder.build()); + } + builder.setListValue(arrayBuilder.build()); + break; + case BOOL: + builder.setBoolValue(random.nextBoolean()); + break; + case STRING: + case BYTES: + case PROTO: + byte[] bytes = new byte[random.nextInt(200)]; + random.nextBytes(bytes); + builder.setStringValue(BaseEncoding.base64().encode(bytes)); + break; + case JSON: + builder.setStringValue("\"" + random.nextInt(200) + "\":\"" + random.nextInt(200) + "\""); + break; + case DATE: + Date date = + Date.fromYearMonthDay( + random.nextInt(2019) + 1, random.nextInt(11) + 1, random.nextInt(28) + 1); + builder.setStringValue(date.toString()); + break; + case UUID: + UUID uuid = UUID.randomUUID(); + builder.setStringValue(uuid.toString()); + break; + case FLOAT32: + if (randomNaN()) { + builder.setNumberValue(Float.NaN); + } else { + builder.setNumberValue(random.nextFloat()); + } + break; + case FLOAT64: + if (randomNaN()) { + builder.setNumberValue(Double.NaN); + } else { + builder.setNumberValue(random.nextDouble()); + } + break; + case NUMERIC: + if (dialect == Dialect.POSTGRESQL && randomNaN()) { + builder.setStringValue("NaN"); + } else { + builder.setStringValue( + BigDecimal.valueOf(random.nextDouble()) + .setScale(9, RoundingMode.HALF_UP) + .toString()); + } + break; + case INT64: + case ENUM: + builder.setStringValue(String.valueOf(random.nextLong())); + break; + case TIMESTAMP: + com.google.protobuf.Timestamp ts = + Timestamps.add( + Timestamps.EPOCH, + com.google.protobuf.Duration.newBuilder() + .setSeconds(random.nextInt(100_000_000)) + .setNanos(random.nextInt(1000_000_000)) + .build()); + builder.setStringValue(Timestamp.fromProto(ts).toString()); + break; + case STRUCT: + case TYPE_CODE_UNSPECIFIED: + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown or unsupported type: " + type.getCode()); + } + } + } + + private boolean randomNull() { + return random.nextInt(10) == 0; + } + + private boolean randomNaN() { + return random.nextInt(10) == 0; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessConverterTest.java new file mode 100644 index 000000000000..5c62765466df --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessConverterTest.java @@ -0,0 +1,176 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ReadOnlyStalenessConverter; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ReadOnlyStalenessConverterTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + static String getAllowedValues( + Class> converterClass, Dialect dialect) + throws CompileException { + Set statements = + ClientSideStatements.getInstance(dialect).getCompiledStatements(); + for (ClientSideStatementImpl statement : statements) { + if (statement.getSetStatement() != null + && converterClass.getName().endsWith(statement.getSetStatement().getConverterName())) { + return statement.getSetStatement().getAllowedValues(); + } + } + return null; + } + + @Test + public void testConvert() throws CompileException { + String allowedValues = getAllowedValues(ReadOnlyStalenessConverter.class, dialect); + assertThat(allowedValues, is(notNullValue())); + ReadOnlyStalenessConverter converter = new ReadOnlyStalenessConverter(allowedValues); + + assertThat(converter.convert("strong"), is(equalTo(TimestampBound.strong()))); + assertThat(converter.convert("Strong"), is(equalTo(TimestampBound.strong()))); + assertThat(converter.convert("STRONG"), is(equalTo(TimestampBound.strong()))); + + assertThat( + converter.convert("read_timestamp 2018-10-01T23:11:15.10001Z"), + is( + equalTo( + TimestampBound.ofReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.10001Z"))))); + assertThat( + converter.convert("Read_Timestamp 2018-10-01T23:11:15.999Z"), + is( + equalTo( + TimestampBound.ofReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.999Z"))))); + assertThat( + converter.convert("READ_TIMESTAMP 2018-10-01T23:11:15.1000Z"), + is( + equalTo( + TimestampBound.ofReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.1000Z"))))); + assertThat( + converter.convert("read_timestamp 2018-10-01T23:11:15.999999999Z"), + is( + equalTo( + TimestampBound.ofReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.999999999Z"))))); + assertThat( + converter.convert("read_timestamp\t2018-10-01T23:11:15.10001Z"), + is( + equalTo( + TimestampBound.ofReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.10001Z"))))); + assertThat(converter.convert("read_timestamp\n2018-10-01T23:11:15.10001Z"), is(nullValue())); + + assertThat( + converter.convert("min_read_timestamp 2018-10-01T23:11:15.10001Z"), + is( + equalTo( + TimestampBound.ofMinReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.10001Z"))))); + assertThat( + converter.convert("Min_Read_Timestamp 2018-10-01T23:11:15.999Z"), + is( + equalTo( + TimestampBound.ofMinReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.999Z"))))); + assertThat( + converter.convert("MIN_READ_TIMESTAMP 2018-10-01T23:11:15.1000Z"), + is( + equalTo( + TimestampBound.ofMinReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.1000Z"))))); + assertThat( + converter.convert("min_read_timestamp 2018-10-01T23:11:15.999999999Z"), + is( + equalTo( + TimestampBound.ofMinReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.999999999Z"))))); + assertThat( + converter.convert("min_read_timestamp\t2018-10-01T23:11:15.10001Z"), + is( + equalTo( + TimestampBound.ofMinReadTimestamp( + Timestamp.parseTimestamp("2018-10-01T23:11:15.10001Z"))))); + assertThat( + converter.convert("min_read_timestamp\n2018-10-01T23:11:15.10001Z"), is(nullValue())); + + assertThat( + converter.convert("exact_staleness 10s"), + is(equalTo(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)))); + assertThat( + converter.convert("Exact_Staleness 100ms"), + is(equalTo(TimestampBound.ofExactStaleness(100L, TimeUnit.MILLISECONDS)))); + assertThat( + converter.convert("EXACT_STALENESS 99999us"), + is(equalTo(TimestampBound.ofExactStaleness(99999L, TimeUnit.MICROSECONDS)))); + assertThat( + converter.convert("exact_staleness 999999999ns"), + is(equalTo(TimestampBound.ofExactStaleness(999999999L, TimeUnit.NANOSECONDS)))); + assertThat( + converter.convert("exact_staleness " + Long.MAX_VALUE + "ns"), + is(equalTo(TimestampBound.ofExactStaleness(Long.MAX_VALUE, TimeUnit.NANOSECONDS)))); + + assertThat( + converter.convert("max_staleness 10s"), + is(equalTo(TimestampBound.ofMaxStaleness(10L, TimeUnit.SECONDS)))); + assertThat( + converter.convert("Max_Staleness 100ms"), + is(equalTo(TimestampBound.ofMaxStaleness(100L, TimeUnit.MILLISECONDS)))); + assertThat( + converter.convert("MAX_STALENESS 99999us"), + is(equalTo(TimestampBound.ofMaxStaleness(99999L, TimeUnit.MICROSECONDS)))); + assertThat( + converter.convert("max_staleness 999999999ns"), + is(equalTo(TimestampBound.ofMaxStaleness(999999999L, TimeUnit.NANOSECONDS)))); + assertThat( + converter.convert("max_staleness " + Long.MAX_VALUE + "ns"), + is(equalTo(TimestampBound.ofMaxStaleness(Long.MAX_VALUE, TimeUnit.NANOSECONDS)))); + + assertThat(converter.convert(""), is(nullValue())); + assertThat(converter.convert(" "), is(nullValue())); + assertThat(converter.convert("random string"), is(nullValue())); + assertThat(converter.convert("read_timestamp"), is(nullValue())); + assertThat(converter.convert("min_read_timestamp"), is(nullValue())); + assertThat(converter.convert("exact_staleness"), is(nullValue())); + assertThat(converter.convert("max_staleness"), is(nullValue())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessTest.java new file mode 100644 index 000000000000..18116475f950 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadOnlyStalenessTest { + private static final String URI = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?readOnly=true"; + private static final String SELECT = "select foo from bar"; + + private final DatabaseClient dbClient = mock(DatabaseClient.class); + + private ConnectionImpl createConnection(ConnectionOptions options) { + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + Spanner spanner = mock(Spanner.class); + SpannerPool spannerPool = mock(SpannerPool.class); + when(spannerPool.getSpanner(any(ConnectionOptions.class), any(ConnectionImpl.class))) + .thenReturn(spanner); + DdlClient ddlClient = mock(DdlClient.class); + ReadOnlyTransaction singleUseReadOnlyTx = mock(ReadOnlyTransaction.class); + when(singleUseReadOnlyTx.executeQuery(Statement.of(SELECT))).thenReturn(mock(ResultSet.class)); + when(dbClient.singleUseReadOnlyTransaction(any(TimestampBound.class))) + .thenReturn(singleUseReadOnlyTx); + ReadOnlyTransaction readOnlyTx = mock(ReadOnlyTransaction.class); + when(readOnlyTx.executeQuery(Statement.of(SELECT))).thenReturn(mock(ResultSet.class)); + when(dbClient.readOnlyTransaction(any(TimestampBound.class))).thenReturn(readOnlyTx); + + return new ConnectionImpl(options, spannerPool, ddlClient, dbClient, mock(BatchClient.class)); + } + + @Test + public void testDefaultReadOnlyStalenessAutocommitOnce() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(connection.isAutocommit(), is(true)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).singleUseReadOnlyTransaction(TimestampBound.strong()); + } + } + + @Test + public void testDefaultReadOnlyStalenessAutocommitTwice() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(connection.isAutocommit(), is(true)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + connection.execute(Statement.of(SELECT)); + verify(dbClient, times(2)).singleUseReadOnlyTransaction(TimestampBound.strong()); + } + } + + @Test + public void testDefaultReadOnlyStalenessAutocommitChanging() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + assertThat(connection.isAutocommit(), is(true)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).singleUseReadOnlyTransaction(TimestampBound.strong()); + + connection.setReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + connection.execute(Statement.of(SELECT)); + verify(dbClient) + .singleUseReadOnlyTransaction(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + + connection.setReadOnlyStaleness(TimestampBound.ofMaxStaleness(5L, TimeUnit.SECONDS)); + connection.execute(Statement.of(SELECT)); + verify(dbClient) + .singleUseReadOnlyTransaction(TimestampBound.ofMaxStaleness(5L, TimeUnit.SECONDS)); + + connection.setReadOnlyStaleness(TimestampBound.ofReadTimestamp(Timestamp.MIN_VALUE)); + connection.execute(Statement.of(SELECT)); + verify(dbClient) + .singleUseReadOnlyTransaction(TimestampBound.ofReadTimestamp(Timestamp.MIN_VALUE)); + + connection.setReadOnlyStaleness(TimestampBound.ofMinReadTimestamp(Timestamp.MAX_VALUE)); + connection.execute(Statement.of(SELECT)); + verify(dbClient) + .singleUseReadOnlyTransaction(TimestampBound.ofMinReadTimestamp(Timestamp.MAX_VALUE)); + } + } + + @Test + public void testDefaultReadOnlyStalenessTransactionalOnce() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + connection.setAutocommit(false); + assertThat(connection.isAutocommit(), is(false)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).readOnlyTransaction(TimestampBound.strong()); + } + } + + @Test + public void testDefaultReadOnlyStalenessTransactionalTwice() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + connection.setAutocommit(false); + assertThat(connection.isAutocommit(), is(false)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + connection.execute(Statement.of(SELECT)); + connection.commit(); + // one transaction + verify(dbClient, times(1)).readOnlyTransaction(TimestampBound.strong()); + + connection.execute(Statement.of(SELECT)); + connection.commit(); + connection.execute(Statement.of(SELECT)); + // two transactions (plus one above) + verify(dbClient, times(3)).readOnlyTransaction(TimestampBound.strong()); + } + } + + @Test + public void testDefaultReadOnlyStalenessTransactionalChanging() { + try (Connection connection = + createConnection( + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setUri(URI) + .build())) { + connection.setAutocommit(false); + assertThat(connection.isAutocommit(), is(false)); + assertThat(connection.isReadOnly(), is(true)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).readOnlyTransaction(TimestampBound.strong()); + connection.commit(); + + connection.setReadOnlyStaleness(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).readOnlyTransaction(TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS)); + connection.commit(); + + connection.setReadOnlyStaleness(TimestampBound.ofReadTimestamp(Timestamp.MIN_VALUE)); + connection.execute(Statement.of(SELECT)); + verify(dbClient).readOnlyTransaction(TimestampBound.ofReadTimestamp(Timestamp.MIN_VALUE)); + connection.commit(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtilTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtilTest.java new file mode 100644 index 000000000000..abd88bb6c8ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtilTest.java @@ -0,0 +1,170 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.durationToString; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.getTimeUnitAbbreviation; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.parseRfc3339; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.parseTimeUnit; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.TimestampBound; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadOnlyStalenessUtilTest { + + @Test + public void testParseRfc3339() { + Map timestamps = new HashMap<>(); + timestamps.put( + "2018-03-01T10:11:12.999Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 999000000)); + timestamps.put("2018-10-28T02:00:00+02:00", Timestamp.ofTimeSecondsAndNanos(1540684800L, 0)); + timestamps.put("2018-10-28T03:00:00+01:00", Timestamp.ofTimeSecondsAndNanos(1540692000L, 0)); + timestamps.put( + "2018-01-01T00:00:00.000000001Z", Timestamp.ofTimeSecondsAndNanos(1514764800L, 1)); + timestamps.put("2018-10-28T02:00:00Z", Timestamp.ofTimeSecondsAndNanos(1540692000L, 0)); + timestamps.put( + "2018-12-31T23:59:59.999999999Z", Timestamp.ofTimeSecondsAndNanos(1546300799L, 999999999)); + timestamps.put( + "2018-03-01T10:11:12.9999Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 999900000)); + timestamps.put( + "2018-03-01T10:11:12.000000001Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 1)); + timestamps.put( + "2018-03-01T10:11:12.100000000Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 100000000)); + timestamps.put( + "2018-03-01T10:11:12.100000001Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 100000001)); + timestamps.put("2018-03-01T10:11:12-10:00", Timestamp.ofTimeSecondsAndNanos(1519935072L, 0)); + timestamps.put( + "2018-03-01T10:11:12.999999999Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 999999999)); + timestamps.put("2018-03-01T10:11:12-12:00", Timestamp.ofTimeSecondsAndNanos(1519942272L, 0)); + timestamps.put("2018-10-28T03:00:00Z", Timestamp.ofTimeSecondsAndNanos(1540695600L, 0)); + timestamps.put("2018-10-28T02:30:00Z", Timestamp.ofTimeSecondsAndNanos(1540693800L, 0)); + timestamps.put( + "2018-03-01T10:11:12.123Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 123000000)); + timestamps.put("2018-10-28T02:30:00+02:00", Timestamp.ofTimeSecondsAndNanos(1540686600L, 0)); + timestamps.put( + "2018-03-01T10:11:12.123456789Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 123456789)); + timestamps.put( + "2018-03-01T10:11:12.1000Z", Timestamp.ofTimeSecondsAndNanos(1519899072L, 100000000)); + + for (Entry ts : timestamps.entrySet()) { + Timestamp gTimestamp = parseRfc3339(ts.getKey()); + assertThat( + "Seconds for timestamp " + ts + " do not match", + gTimestamp.getSeconds(), + is(equalTo(ts.getValue().getSeconds()))); + assertThat( + "Nanos for timestamp " + ts + " do not match", + gTimestamp.getNanos(), + is(equalTo(ts.getValue().getNanos()))); + } + } + + @Test + public void testParseTimeUnit() { + assertThat(parseTimeUnit("s"), is(equalTo(TimeUnit.SECONDS))); + assertThat(parseTimeUnit("ms"), is(equalTo(TimeUnit.MILLISECONDS))); + assertThat(parseTimeUnit("us"), is(equalTo(TimeUnit.MICROSECONDS))); + assertThat(parseTimeUnit("ns"), is(equalTo(TimeUnit.NANOSECONDS))); + } + + @Test + public void testGetTimeUnitAbbreviation() { + assertThat(getTimeUnitAbbreviation(TimeUnit.SECONDS), is(equalTo("s"))); + assertThat(getTimeUnitAbbreviation(TimeUnit.MILLISECONDS), is(equalTo("ms"))); + assertThat(getTimeUnitAbbreviation(TimeUnit.MICROSECONDS), is(equalTo("us"))); + assertThat(getTimeUnitAbbreviation(TimeUnit.NANOSECONDS), is(equalTo("ns"))); + + List supportedTimeUnits = + Arrays.asList( + TimeUnit.SECONDS, TimeUnit.MILLISECONDS, TimeUnit.MICROSECONDS, TimeUnit.NANOSECONDS); + for (TimeUnit unit : TimeUnit.values()) { + if (supportedTimeUnits.contains(unit)) { + assertThat(getTimeUnitAbbreviation(unit), is(notNullValue())); + } else { + String value = null; + try { + value = getTimeUnitAbbreviation(unit); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.INVALID_ARGUMENT) { + value = "unsupported"; + } + } + assertThat(value, is(equalTo("unsupported"))); + } + } + } + + @Test + public void testStalenessToString() { + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(10L, TimeUnit.NANOSECONDS))), + is(equalTo("10ns"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(1000L, TimeUnit.NANOSECONDS))), + is(equalTo("1us"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(100000L, TimeUnit.NANOSECONDS))), + is(equalTo("100us"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(999999L, TimeUnit.NANOSECONDS))), + is(equalTo("999999ns"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(1L, TimeUnit.SECONDS))), + is(equalTo("1s"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(1000L, TimeUnit.MILLISECONDS))), + is(equalTo("1s"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(1001L, TimeUnit.MILLISECONDS))), + is(equalTo("1001ms"))); + assertThat( + durationToString( + new ReadOnlyStalenessUtil.MaxStalenessGetter( + TimestampBound.ofMaxStaleness(1000000000L, TimeUnit.NANOSECONDS))), + is(equalTo("1s"))); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java new file mode 100644 index 000000000000..0c592d858041 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java @@ -0,0 +1,488 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadOnlyTransactionTest { + + private static final class SimpleReadOnlyTransaction + implements com.google.cloud.spanner.ReadOnlyTransaction { + private Timestamp readTimestamp = null; + private final TimestampBound staleness; + + private SimpleReadOnlyTransaction(TimestampBound staleness) { + this.staleness = staleness; + } + + @Override + public ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public Struct readRow(String table, Key key, Iterable columns) { + return null; + } + + @Override + public Struct readRowUsingIndex(String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public ResultSet executeQuery(Statement statement, QueryOption... options) { + if (readTimestamp == null) { + switch (staleness.getMode()) { + case STRONG: + readTimestamp = Timestamp.now(); + break; + case READ_TIMESTAMP: + readTimestamp = staleness.getReadTimestamp(); + break; + case MIN_READ_TIMESTAMP: + readTimestamp = staleness.getMinReadTimestamp(); + break; + case EXACT_STALENESS: + Calendar cal = Calendar.getInstance(); + cal.add( + Calendar.MILLISECOND, (int) -staleness.getExactStaleness(TimeUnit.MILLISECONDS)); + readTimestamp = Timestamp.of(cal.getTime()); + break; + case MAX_STALENESS: + cal = Calendar.getInstance(); + cal.add(Calendar.MILLISECOND, (int) -staleness.getMaxStaleness(TimeUnit.MILLISECONDS)); + readTimestamp = Timestamp.of(cal.getTime()); + break; + default: + throw new IllegalStateException(); + } + } + return mock(ResultSet.class); + } + + @Override + public ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode) { + ResultSet res = executeQuery(statement); + when(res.getStats()).thenReturn(ResultSetStats.getDefaultInstance()); + return res; + } + + @Override + public void close() {} + + @Override + public Timestamp getReadTimestamp() { + return readTimestamp; + } + + @Override + public AsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public AsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public ApiFuture readRowAsync(String table, Key key, Iterable columns) { + return null; + } + + @Override + public ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public AsyncResultSet executeQueryAsync(Statement statement, QueryOption... options) { + return null; + } + } + + private ReadOnlyTransaction createSubject() { + return createSubject(TimestampBound.strong()); + } + + private ReadOnlyTransaction createSubject(TimestampBound staleness) { + DatabaseClient client = mock(DatabaseClient.class); + when(client.readOnlyTransaction(staleness)) + .thenReturn(new SimpleReadOnlyTransaction(staleness)); + return ReadOnlyTransaction.newBuilder() + .setDatabaseClient(client) + .setBatchClient(mock(BatchClient.class)) + .setReadOnlyStaleness(staleness) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + } + + @Test + public void testExecuteDdl() { + ParsedStatement ddl = mock(ParsedStatement.class); + when(ddl.getType()).thenReturn(StatementType.DDL); + try { + createSubject().executeDdlAsync(CallType.SYNC, ddl); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testExecuteUpdate() { + ParsedStatement update = mock(ParsedStatement.class); + when(update.getType()).thenReturn(StatementType.UPDATE); + try { + createSubject().executeUpdateAsync(CallType.SYNC, update); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testWriteIterable() { + Mutation mutation = Mutation.newInsertBuilder("foo").build(); + try { + createSubject().writeAsync(CallType.SYNC, Arrays.asList(mutation, mutation)); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testRunBatch() { + ReadOnlyTransaction subject = createSubject(); + try { + subject.runBatchAsync(CallType.SYNC); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testAbortBatch() { + ReadOnlyTransaction subject = createSubject(); + try { + subject.abortBatch(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testGetCommitTimestamp() { + ReadOnlyTransaction transaction = createSubject(); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat(transaction.getState(), is(UnitOfWorkState.COMMITTED)); + try { + transaction.getCommitTimestamp(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testGetCommitResponse() { + ReadOnlyTransaction transaction = createSubject(); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + try { + transaction.getCommitResponse(); + fail("expected FAILED_PRECONDITION"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + + @Test + public void testGetCommitResponseOrNull() { + ReadOnlyTransaction transaction = createSubject(); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertNull(transaction.getCommitResponseOrNull()); + } + + @Test + public void testIsReadOnly() { + assertThat(createSubject().isReadOnly(), is(true)); + } + + @Test + public void testExecuteQuery() { + for (TimestampBound staleness : getTestTimestampBounds()) { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + + ReadOnlyTransaction transaction = createSubject(staleness); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)); + assertThat(rs, is(notNullValue())); + assertThat(rs.getStats(), is(nullValue())); + } + } + + @Test + public void testExecuteQueryWithOptionsTest() { + String sql = "SELECT * FROM FOO"; + QueryOption option = Options.prefetchChunks(10000); + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of(sql); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + DatabaseClient client = mock(DatabaseClient.class); + com.google.cloud.spanner.ReadOnlyTransaction tx = + mock(com.google.cloud.spanner.ReadOnlyTransaction.class); + ResultSet resWithOptions = mock(ResultSet.class); + ResultSet resWithoutOptions = mock(ResultSet.class); + when(tx.executeQuery(Statement.of(sql), option)).thenReturn(resWithOptions); + when(tx.executeQuery(Statement.of(sql))).thenReturn(resWithoutOptions); + when(client.readOnlyTransaction(TimestampBound.strong())).thenReturn(tx); + + ReadOnlyTransaction transaction = + ReadOnlyTransaction.newBuilder() + .setDatabaseClient(client) + .setBatchClient(mock(BatchClient.class)) + .setReadOnlyStaleness(TimestampBound.strong()) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + ResultSet expectedWithOptions = DirectExecuteResultSet.ofResultSet(resWithOptions); + assertThat( + get( + transaction.executeQueryAsync( + CallType.SYNC, parsedStatement, AnalyzeMode.NONE, option)), + is(equalTo(expectedWithOptions))); + ResultSet expectedWithoutOptions = DirectExecuteResultSet.ofResultSet(resWithoutOptions); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(equalTo(expectedWithoutOptions))); + } + + @Test + public void testPlanQuery() { + for (TimestampBound staleness : getTestTimestampBounds()) { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + + ReadOnlyTransaction transaction = createSubject(staleness); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.PLAN)); + assertThat(rs, is(notNullValue())); + // get all results and then get the stats + while (rs.next()) { + // do nothing + } + assertThat(rs.getStats(), is(notNullValue())); + } + } + + @Test + public void testProfileQuery() { + for (TimestampBound staleness : getTestTimestampBounds()) { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + + ReadOnlyTransaction transaction = createSubject(staleness); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.PROFILE)); + assertThat(rs, is(notNullValue())); + // get all results and then get the stats + while (rs.next()) { + // do nothing + } + assertThat(rs.getStats(), is(notNullValue())); + } + } + + @Test + public void testGetReadTimestamp() { + for (TimestampBound staleness : getTestTimestampBounds()) { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + + ReadOnlyTransaction transaction = createSubject(staleness); + boolean expectedException = false; + try { + transaction.getReadTimestamp(); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION) { + expectedException = true; + } + } + assertThat(expectedException, is(true)); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(notNullValue())); + assertThat(transaction.getReadTimestamp(), is(notNullValue())); + } + } + + private List getTestTimestampBounds() { + return Arrays.asList( + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(Timestamp.now()), + TimestampBound.ofMinReadTimestamp(Timestamp.now()), + TimestampBound.ofExactStaleness(1L, TimeUnit.SECONDS), + TimestampBound.ofMaxStaleness(100L, TimeUnit.MILLISECONDS)); + } + + @Test + public void testState() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + when(parsedStatement.getSql()).thenReturn(statement.getSql()); + + ReadOnlyTransaction transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); + assertThat(transaction.isActive(), is(false)); + + transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(notNullValue())); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); + assertThat(transaction.isActive(), is(false)); + + // start a new transaction + transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); + assertThat(transaction.isActive(), is(false)); + + transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(notNullValue())); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); + assertThat(transaction.isActive(), is(false)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java new file mode 100644 index 000000000000..7d0fa94c9b0b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java @@ -0,0 +1,870 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ProtobufResultSet; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.rpc.RetryInfo; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import io.grpc.Metadata; +import io.grpc.StatusRuntimeException; +import io.grpc.protobuf.ProtoUtils; +import io.opentelemetry.api.trace.Span; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadWriteTransactionTest { + + private enum CommitBehavior { + SUCCEED, + FAIL, + ABORT + } + + private static class SimpleTransactionManager implements TransactionManager { + private TransactionState state; + private CommitResponse commitResponse; + private TransactionContext txContext; + private CommitBehavior commitBehavior; + + private SimpleTransactionManager(TransactionContext txContext, CommitBehavior commitBehavior) { + this.txContext = txContext; + this.commitBehavior = commitBehavior; + } + + @Override + public TransactionContext begin() { + state = TransactionState.STARTED; + return txContext; + } + + @Override + public TransactionContext begin(AbortedException exception) { + return begin(); + } + + @Override + public void commit() { + switch (commitBehavior) { + case SUCCEED: + commitResponse = new CommitResponse(Timestamp.ofTimeSecondsAndNanos(1, 1)); + state = TransactionState.COMMITTED; + break; + case FAIL: + state = TransactionState.COMMIT_FAILED; + throw SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "commit failed"); + case ABORT: + state = TransactionState.COMMIT_FAILED; + commitBehavior = CommitBehavior.SUCCEED; + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "commit aborted", createAbortedExceptionWithMinimalRetry()); + default: + throw new IllegalStateException(); + } + } + + @Override + public void rollback() { + state = TransactionState.ROLLED_BACK; + } + + @Override + public TransactionContext resetForRetry() { + return txContext; + } + + @Override + public Timestamp getCommitTimestamp() { + return commitResponse == null ? null : commitResponse.getCommitTimestamp(); + } + + public CommitResponse getCommitResponse() { + return commitResponse; + } + + @Override + public TransactionState getState() { + return state; + } + + @Override + public void close() { + if (state != TransactionState.COMMITTED) { + state = TransactionState.ROLLED_BACK; + } + } + } + + private ReadWriteTransaction createSubject() { + return createSubject(CommitBehavior.SUCCEED, false); + } + + private ReadWriteTransaction createSubject(CommitBehavior commitBehavior) { + return createSubject(commitBehavior, false); + } + + private ReadWriteTransaction createSubject( + final CommitBehavior commitBehavior, boolean withRetry) { + DatabaseClient client = mock(DatabaseClient.class); + when(client.transactionManager()) + .thenAnswer( + invocation -> { + TransactionContext txContext = mock(TransactionContext.class); + when(txContext.executeQuery(any(Statement.class))).thenReturn(mock(ResultSet.class)); + ResultSet rsWithStats = mock(ResultSet.class); + when(rsWithStats.getStats()).thenReturn(ResultSetStats.getDefaultInstance()); + when(txContext.analyzeQuery(any(Statement.class), any(QueryAnalyzeMode.class))) + .thenReturn(rsWithStats); + when(txContext.executeUpdate(any(Statement.class))).thenReturn(1L); + return new SimpleTransactionManager(txContext, commitBehavior); + }); + return ReadWriteTransaction.newBuilder() + .setDatabaseClient(client) + .setRetryAbortsInternally(withRetry) + .setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) + .setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK) + .setTransactionRetryListeners(Collections.emptyList()) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + } + + @Test + public void testExecuteDdl() { + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.DDL); + + ReadWriteTransaction transaction = createSubject(); + try { + transaction.executeDdlAsync(CallType.SYNC, statement); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testRunBatch() { + ReadWriteTransaction subject = createSubject(); + try { + subject.runBatchAsync(CallType.SYNC); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testAbortBatch() { + ReadWriteTransaction subject = createSubject(); + try { + subject.abortBatch(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testExecuteQuery() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)); + assertThat(rs, is(notNullValue())); + assertThat(rs.getStats(), is(nullValue())); + } + + @Test + public void testPlanQuery() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.PLAN)); + assertThat(rs, is(notNullValue())); + while (rs.next()) { + // do nothing + } + assertThat(rs.getStats(), is(notNullValue())); + } + + @Test + public void testProfileQuery() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.PROFILE)); + assertThat(rs, is(notNullValue())); + while (rs.next()) { + // do nothing + } + assertThat(rs.getStats(), is(notNullValue())); + } + + @Test + public void testExecuteUpdate() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + Statement statement = Statement.of("UPDATE FOO SET BAR=1 WHERE ID=2"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + assertThat(get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)), is(1L)); + } + + @Test + public void testExecuteQueryWithDmlReturningWithoutRetry() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + when(parsedStatement.hasReturningClause()).thenReturn(true); + Statement statement = Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'x') THEN RETURN *"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(/* commitBehavior= */ CommitBehavior.SUCCEED); + ResultSet rs = + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)); + assertThat(rs, is(notNullValue())); + } + + @Test + public void testGetCommitTimestampBeforeCommit() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + Statement statement = Statement.of("UPDATE FOO SET BAR=1 WHERE ID=2"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + assertThat(get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)), is(1L)); + try { + transaction.getCommitTimestamp(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testGetCommitTimestampAfterCommit() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + Statement statement = Statement.of("UPDATE FOO SET BAR=1 WHERE ID=2"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + assertThat(get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)), is(1L)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + + assertThat(transaction.getCommitTimestamp(), is(notNullValue())); + } + + @Test + public void testGetReadTimestamp() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(notNullValue())); + try { + transaction.getReadTimestamp(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testState() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + assertThat( + get(transaction.executeQueryAsync(CallType.SYNC, parsedStatement, AnalyzeMode.NONE)), + is(notNullValue())); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); + assertThat(transaction.isActive(), is(false)); + + // start a new transaction + transaction = createSubject(); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); + assertThat(transaction.isActive(), is(false)); + + // start a new transaction that will fail on commit + transaction = createSubject(CommitBehavior.FAIL); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + try { + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + } catch (SpannerException e) { + // ignore + } + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMIT_FAILED))); + assertThat(transaction.isActive(), is(false)); + + // start a new transaction that will abort on commit + transaction = createSubject(CommitBehavior.ABORT); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + try { + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + } catch (AbortedException e) { + // ignore + } + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMIT_FAILED))); + assertThat(transaction.isActive(), is(false)); + + // Start a new transaction that will abort on commit, but with internal retry enabled, so it + // will in the end succeed. + transaction = createSubject(CommitBehavior.ABORT, true); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); + assertThat(transaction.isActive(), is(true)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + assertThat( + transaction.getState(), + is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); + assertThat(transaction.isActive(), is(false)); + } + + @Test + public void testIsReadOnly() { + assertThat(createSubject().isReadOnly(), is(false)); + } + + private enum RetryResults { + SAME, + DIFFERENT + } + + @Test + public void testRetry() { + for (RetryResults results : RetryResults.values()) { + String sql1 = "UPDATE FOO SET BAR=1 WHERE BAZ>=100 AND BAZ<200"; + String sql2 = "UPDATE FOO SET BAR=2 WHERE BAZ>=200 AND BAZ<300"; + DatabaseClient client = mock(DatabaseClient.class); + ParsedStatement update1 = mock(ParsedStatement.class); + when(update1.getType()).thenReturn(StatementType.UPDATE); + when(update1.isUpdate()).thenReturn(true); + when(update1.getStatement()).thenReturn(Statement.of(sql1)); + ParsedStatement update2 = mock(ParsedStatement.class); + when(update2.getType()).thenReturn(StatementType.UPDATE); + when(update2.isUpdate()).thenReturn(true); + when(update2.getStatement()).thenReturn(Statement.of(sql2)); + + TransactionManager txManager = mock(TransactionManager.class); + TransactionContext txContext1 = mock(TransactionContext.class); + when(txManager.begin()).thenReturn(txContext1); + when(txManager.getState()).thenReturn(null, TransactionState.STARTED); + when(client.transactionManager()).thenReturn(txManager); + when(txContext1.executeUpdate(Statement.of(sql1))).thenReturn(90L); + when(txContext1.executeUpdate(Statement.of(sql2))).thenReturn(80L); + + TransactionContext txContext2 = mock(TransactionContext.class); + when(txManager.resetForRetry()).thenReturn(txContext2); + when(client.transactionManager()).thenReturn(txManager); + if (results == RetryResults.SAME) { + when(txContext2.executeUpdate(Statement.of(sql1))).thenReturn(90L); + when(txContext2.executeUpdate(Statement.of(sql2))).thenReturn(80L); + } else if (results == RetryResults.DIFFERENT) { + when(txContext2.executeUpdate(Statement.of(sql1))).thenReturn(90L); + when(txContext2.executeUpdate(Statement.of(sql2))).thenReturn(90L); + } + + // first abort, then do nothing + doThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "commit aborted", createAbortedExceptionWithMinimalRetry())) + .doNothing() + .when(txManager) + .commit(); + + ReadWriteTransaction subject = + ReadWriteTransaction.newBuilder() + .setRetryAbortsInternally(true) + .setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) + .setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK) + .setTransactionRetryListeners(Collections.emptyList()) + .setDatabaseClient(client) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + subject.executeUpdateAsync(CallType.SYNC, update1); + subject.executeUpdateAsync(CallType.SYNC, update2); + boolean expectedException = false; + try { + get(subject.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + } catch (SpannerException e) { + if (results == RetryResults.DIFFERENT && e.getErrorCode() == ErrorCode.ABORTED) { + // expected + expectedException = true; + } else { + throw e; + } + } + assertThat(expectedException, is(results == RetryResults.DIFFERENT)); + } + } + + @Test + public void testChecksumResultSet() { + DatabaseClient client = mock(DatabaseClient.class); + ReadWriteTransaction transaction = + ReadWriteTransaction.newBuilder() + .setRetryAbortsInternally(true) + .setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) + .setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK) + .setTransactionRetryListeners(Collections.emptyList()) + .setDatabaseClient(client) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + ParsedStatement parsedStatement = mock(ParsedStatement.class); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + + String arrayJson = + "[{\"color\":\"red\",\"value\":\"#f00\"},{\"color\":\"green\",\"value\":\"#0f0\"},{\"color\":\"blue\",\"value\":\"#00f\"},{\"color\":\"cyan\",\"value\":\"#0ff\"},{\"color\":\"magenta\",\"value\":\"#f0f\"},{\"color\":\"yellow\",\"value\":\"#ff0\"},{\"color\":\"black\",\"value\":\"#000\"}]"; + String emptyArrayJson = "[]"; + String simpleJson = "{\"color\":\"red\",\"value\":\"#f00\"}"; + SingerInfo protoMessageVal = + SingerInfo.newBuilder() + .setSingerId(111) + .setNationality("COUNTRY1") + .setGenre(Genre.FOLK) + .build(); + ProtocolMessageEnum protoEnumVal = Genre.ROCK; + ProtobufResultSet delegate1 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("NAME", Type.string()), + StructField.of("AMOUNT", Type.numeric()), + StructField.of("JSON", Type.json()), + StructField.of( + "PROTO", Type.proto(protoMessageVal.getDescriptorForType().getFullName())), + StructField.of( + "PROTOENUM", + Type.protoEnum(protoEnumVal.getDescriptorForType().getFullName()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("NAME") + .to("TEST 1") + .set("AMOUNT") + .to(BigDecimal.valueOf(550, 2)) + .set("JSON") + .to(Value.json(simpleJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(protoEnumVal) + .build(), + Struct.newBuilder() + .set("ID") + .to(2L) + .set("NAME") + .to("TEST 2") + .set("AMOUNT") + .to(BigDecimal.valueOf(750, 2)) + .set("JSON") + .to(Value.json(arrayJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(Genre.JAZZ) + .build())); + ChecksumResultSet rs1 = + transaction.createChecksumResultSet(delegate1, parsedStatement, AnalyzeMode.NONE); + ProtobufResultSet delegate2 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("NAME", Type.string()), + StructField.of("AMOUNT", Type.numeric()), + StructField.of("JSON", Type.json()), + StructField.of( + "PROTO", Type.proto(protoMessageVal.getDescriptorForType().getFullName())), + StructField.of( + "PROTOENUM", + Type.protoEnum(protoEnumVal.getDescriptorForType().getFullName()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("NAME") + .to("TEST 1") + .set("AMOUNT") + .to(new BigDecimal("5.50")) + .set("JSON") + .to(Value.json(simpleJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(protoEnumVal) + .build(), + Struct.newBuilder() + .set("ID") + .to(2L) + .set("NAME") + .to("TEST 2") + .set("AMOUNT") + .to(new BigDecimal("7.50")) + .set("JSON") + .to(Value.json(arrayJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(Genre.JAZZ) + .build())); + ChecksumResultSet rs2 = + transaction.createChecksumResultSet(delegate2, parsedStatement, AnalyzeMode.NONE); + // rs1 and rs2 are equal, rs3 contains the same rows, but in a different order + ProtobufResultSet delegate3 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("NAME", Type.string()), + StructField.of("AMOUNT", Type.numeric()), + StructField.of("JSON", Type.json()), + StructField.of( + "PROTO", Type.proto(protoMessageVal.getDescriptorForType().getFullName())), + StructField.of( + "PROTOENUM", + Type.protoEnum(protoEnumVal.getDescriptorForType().getFullName()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(2L) + .set("NAME") + .to("TEST 2") + .set("AMOUNT") + .to(new BigDecimal("7.50")) + .set("JSON") + .to(Value.json(arrayJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(Genre.JAZZ) + .build(), + Struct.newBuilder() + .set("ID") + .to(1L) + .set("NAME") + .to("TEST 1") + .set("AMOUNT") + .to(new BigDecimal("5.50")) + .set("JSON") + .to(Value.json(simpleJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(protoEnumVal) + .build())); + ChecksumResultSet rs3 = + transaction.createChecksumResultSet(delegate3, parsedStatement, AnalyzeMode.NONE); + + // rs4 contains the same rows as rs1 and rs2, but also an additional row + ProtobufResultSet delegate4 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("NAME", Type.string()), + StructField.of("AMOUNT", Type.numeric()), + StructField.of("JSON", Type.json()), + StructField.of( + "PROTO", Type.proto(protoMessageVal.getDescriptorForType().getFullName())), + StructField.of( + "PROTOENUM", + Type.protoEnum(protoEnumVal.getDescriptorForType().getFullName()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("NAME") + .to("TEST 1") + .set("AMOUNT") + .to(new BigDecimal("5.50")) + .set("JSON") + .to(Value.json(simpleJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(protoEnumVal) + .build(), + Struct.newBuilder() + .set("ID") + .to(2L) + .set("NAME") + .to("TEST 2") + .set("AMOUNT") + .to(new BigDecimal("7.50")) + .set("JSON") + .to(Value.json(arrayJson)) + .set("PROTO") + .to(protoMessageVal) + .set("PROTOENUM") + .to(Genre.JAZZ) + .build(), + Struct.newBuilder() + .set("ID") + .to(3L) + .set("NAME") + .to("TEST 3") + .set("AMOUNT") + .to(new BigDecimal("9.99")) + .set("JSON") + .to(Value.json(emptyArrayJson)) + .set("PROTO") + .to(null, SingerInfo.getDescriptor()) + .set("PROTOENUM") + .to(Genre.POP) + .build())); + ChecksumResultSet rs4 = + transaction.createChecksumResultSet(delegate4, parsedStatement, AnalyzeMode.NONE); + + assertThat(rs1.getChecksum(), is(equalTo(rs2.getChecksum()))); + while (rs1.next() && rs2.next() && rs3.next() && rs4.next()) { + assertThat(rs1.getChecksum(), is(equalTo(rs2.getChecksum()))); + assertThat(rs1.getChecksum(), is(not(equalTo(rs3.getChecksum())))); + assertThat(rs1.getChecksum(), is(equalTo(rs4.getChecksum()))); + } + assertThat(rs1.getChecksum(), is(equalTo(rs2.getChecksum()))); + assertThat(rs1.getChecksum(), is(not(equalTo(rs3.getChecksum())))); + // rs4 contains one more row than rs1, but the last row of rs4 hasn't been consumed yet + assertThat(rs1.getChecksum(), is(equalTo(rs4.getChecksum()))); + assertThat(rs4.next(), is(true)); + assertThat(rs1.getChecksum(), is(not(equalTo(rs4.getChecksum())))); + } + + @Test + public void testChecksumResultSetWithArray() { + DatabaseClient client = mock(DatabaseClient.class); + ReadWriteTransaction transaction = + ReadWriteTransaction.newBuilder() + .setRetryAbortsInternally(true) + .setIsolationLevel(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED) + .setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK) + .setTransactionRetryListeners(Collections.emptyList()) + .setDatabaseClient(client) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .build(); + ParsedStatement parsedStatement = mock(ParsedStatement.class); + Statement statement = Statement.of("SELECT * FROM FOO"); + when(parsedStatement.getStatement()).thenReturn(statement); + ProtobufResultSet delegate1 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("PRICES", Type.array(Type.int64()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("PRICES") + .toInt64Array(new long[] {1L, 2L}) + .build(), + Struct.newBuilder() + .set("ID") + .to(2L) + .set("PRICES") + .toInt64Array(new long[] {3L, 4L}) + .build())); + ChecksumResultSet rs1 = + transaction.createChecksumResultSet(delegate1, parsedStatement, AnalyzeMode.NONE); + ProtobufResultSet delegate2 = + (ProtobufResultSet) + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("PRICES", Type.array(Type.int64()))), + Arrays.asList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("PRICES") + .toInt64Array(new long[] {1L, 2L}) + .build(), + Struct.newBuilder() + .set("ID") + .to(2L) + .set("PRICES") + .toInt64Array(new long[] {3L, 5L}) + .build())); + ChecksumResultSet rs2 = + transaction.createChecksumResultSet(delegate2, parsedStatement, AnalyzeMode.NONE); + + rs1.next(); + rs2.next(); + assertThat(rs1.getChecksum(), is(equalTo(rs2.getChecksum()))); + rs1.next(); + rs2.next(); + assertThat(rs1.getChecksum(), is(not(equalTo(rs2.getChecksum())))); + } + + @Test + public void testGetCommitResponseBeforeCommit() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + Statement statement = Statement.of("UPDATE FOO SET BAR=1 WHERE ID=2"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)); + try { + transaction.getCommitResponse(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + assertNull(transaction.getCommitResponseOrNull()); + } + + @Test + public void testGetCommitResponseAfterCommit() { + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.UPDATE); + when(parsedStatement.isUpdate()).thenReturn(true); + Statement statement = Statement.of("UPDATE FOO SET BAR=1 WHERE ID=2"); + when(parsedStatement.getStatement()).thenReturn(statement); + + ReadWriteTransaction transaction = createSubject(); + get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); + + assertNotNull(transaction.getCommitResponse()); + assertNotNull(transaction.getCommitResponseOrNull()); + } + + private static StatusRuntimeException createAbortedExceptionWithMinimalRetry() { + Metadata.Key key = ProtoUtils.keyForProto(RetryInfo.getDefaultInstance()); + Metadata trailers = new Metadata(); + RetryInfo retryInfo = + RetryInfo.newBuilder() + .setRetryDelay(com.google.protobuf.Duration.newBuilder().setNanos(1).setSeconds(0L)) + .build(); + trailers.put(key, retryInfo); + return io.grpc.Status.ABORTED.asRuntimeException(trailers); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/Repeat.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/Repeat.java new file mode 100644 index 000000000000..b1a4559d8c56 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/Repeat.java @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +/** Simple util class for repeating (test) functions. */ +class Repeat { + + /** Repeat the given {@link Runnable} twice. */ + static void twice(Runnable runnable) { + repeat(runnable, 2); + } + + /** Repeat the given {@link Runnable} n times. */ + static void repeat(Runnable runnable, int n) { + for (int ignore = 0; ignore < n; ignore++) { + runnable.run(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSetTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSetTest.java new file mode 100644 index 000000000000..4617c47bc6b1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReplaceableForwardingResultSetTest.java @@ -0,0 +1,392 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReplaceableForwardingResultSetTest { + + private ReplaceableForwardingResultSet createSubject() { + ResultSet delegate = + ResultSets.forRows( + Type.struct(StructField.of("test", Type.int64())), + Collections.singletonList(Struct.newBuilder().set("test").to(1L).build())); + return new ReplaceableForwardingResultSet(delegate); + } + + @Test + public void testReplace() { + ResultSet delegate1 = + ResultSets.forRows( + Type.struct(StructField.of("test", Type.int64())), + Arrays.asList( + Struct.newBuilder().set("test").to(1L).build(), + Struct.newBuilder().set("test").to(2L).build())); + // First verify the behavior without replacing. + try (ResultSet rs = new ReplaceableForwardingResultSet(delegate1)) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("test"), is(equalTo(1L))); + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("test"), is(equalTo(2L))); + assertThat(rs.next(), is(false)); + } + + delegate1 = + ResultSets.forRows( + Type.struct(StructField.of("test", Type.int64())), + Arrays.asList( + Struct.newBuilder().set("test").to(1L).build(), + Struct.newBuilder().set("test").to(2L).build())); + ResultSet delegate2 = + ResultSets.forRows( + Type.struct(StructField.of("test", Type.int64())), + Arrays.asList( + Struct.newBuilder().set("test").to(1L).build(), + Struct.newBuilder().set("test").to(3L).build())); + // Then verify the behavior with replacing. + try (ReplaceableForwardingResultSet rs = new ReplaceableForwardingResultSet(delegate1)) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("test"), is(equalTo(1L))); + // Advance the delegate result set that will be used as replacement. + delegate2.next(); + // Replace the result set. + rs.replaceDelegate(delegate2); + // Verify that the replacement is being used. + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("test"), is(equalTo(3L))); + assertThat(rs.next(), is(false)); + } + } + + @Test + public void testMethodCallBeforeNext() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "equals", + "hashCode"); + ReplaceableForwardingResultSet subject = createSubject(); + // Test that all methods throw an IllegalStateException except the excluded methods when called + // before a call to ResultSet#next(). + callMethods(subject, excludedMethods, IllegalStateException.class); + } + + @Test + public void testMethodCallAfterClose() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "getType", + "getColumnCount", + "getColumnIndex", + "getColumnType", + "ofResultSet", + "equals", + "hashCode"); + ReplaceableForwardingResultSet subject = createSubject(); + subject.next(); + subject.close(); + // Test that all methods throw an SpannerException except the excluded methods when called on a + // closed ResultSet. + callMethods(subject, excludedMethods, SpannerException.class); + } + + @Test + public void testMethodCallAfterNextHasReturnedFalse() + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + List excludedMethods = + Arrays.asList( + "canGetProtobufValue", + "getStats", + "getMetadata", + "next", + "close", + "getType", + "getColumnCount", + "getColumnIndex", + "getColumnType", + "ofResultSet", + "equals", + "hashCode"); + ReplaceableForwardingResultSet subject = createSubject(); + subject.next(); + subject.next(); + // Test that all methods throw an IndexOutOfBoundsException except the excluded methods when + // called after a call to ResultSet#next() has returned false. + callMethods(subject, excludedMethods, IndexOutOfBoundsException.class); + } + + private void callMethods( + ReplaceableForwardingResultSet subject, + List excludedMethods, + Class expectedException) + throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { + for (Method method : ReplaceableForwardingResultSet.class.getDeclaredMethods()) { + if (Modifier.isPublic(method.getModifiers()) && !excludedMethods.contains(method.getName())) { + boolean exception = false; + int numberOfParameters = method.getParameterTypes().length; + Class firstParameterType = null; + if (numberOfParameters >= 1) { + firstParameterType = method.getParameterTypes()[0]; + } + try { + switch (numberOfParameters) { + case 0: + method.invoke(subject); + break; + case 1: + if (firstParameterType == String.class) { + method.invoke(subject, "test"); + } else if (firstParameterType == int.class) { + method.invoke(subject, 0); + } else { + fail("unknown parameter type"); + } + break; + case 2: + Class secondParameterType = method.getParameterTypes()[1]; + Object firstArgument = null, secondArgument = null; + + if (firstParameterType == String.class) { + firstArgument = "test"; + } else if (firstParameterType == int.class) { + firstArgument = 0; + } + + if (secondParameterType == Function.class) { + Function lambdaFunction = + (val) -> Genre.forNumber(val.intValue()); + secondArgument = lambdaFunction; + } else if (secondParameterType == AbstractMessage.class) { + secondArgument = SingerInfo.getDefaultInstance(); + } + + if (firstArgument != null && secondArgument != null) { + method.invoke(subject, firstArgument, secondArgument); + } else { + fail("unknown parameter type"); + } + break; + default: + fail("method with more than 2 parameters is unknown"); + } + } catch (InvocationTargetException e) { + if (e.getCause().getClass().equals(expectedException)) { + // expected + exception = true; + } else { + throw e; + } + } + assertThat( + method.getName() + " did not throw an IllegalStateException", exception, is(true)); + } + } + } + + @Test + public void testValidMethodCall() throws IllegalArgumentException { + ResultSet delegate = mock(ResultSet.class); + when(delegate.next()).thenReturn(true, true, false); + try (ReplaceableForwardingResultSet subject = new ReplaceableForwardingResultSet(delegate)) { + subject.next(); + + // Cloud Spanner result sets use zero-based column indices, as opposed to the one-based column + // indices used by JDBC. The subject.getBoolean(0) and further zero-based calls below should + // therefore not cause any exceptions. + subject.getBoolean(0); + verify(delegate).getBoolean(0); + subject.getBoolean("test0"); + verify(delegate).getBoolean("test0"); + subject.getBooleanArray(1); + verify(delegate).getBooleanArray(1); + subject.getBooleanArray("test1"); + verify(delegate).getBooleanArray("test1"); + subject.getBooleanList(2); + verify(delegate).getBooleanList(2); + subject.getBooleanList("test2"); + verify(delegate).getBooleanList("test2"); + + subject.getBytes(0); + verify(delegate).getBytes(0); + subject.getBytes("test0"); + verify(delegate).getBytes("test0"); + subject.getBytesList(2); + verify(delegate).getBytesList(2); + subject.getBytesList("test2"); + verify(delegate).getBytesList("test2"); + + subject.getDate(0); + verify(delegate).getDate(0); + subject.getDate("test0"); + verify(delegate).getDate("test0"); + subject.getDateList(2); + verify(delegate).getDateList(2); + subject.getDateList("test2"); + verify(delegate).getDateList("test2"); + + subject.getDouble(0); + verify(delegate).getDouble(0); + subject.getDouble("test0"); + verify(delegate).getDouble("test0"); + subject.getDoubleArray(1); + verify(delegate).getDoubleArray(1); + subject.getDoubleArray("test1"); + verify(delegate).getDoubleArray("test1"); + subject.getDoubleList(2); + verify(delegate).getDoubleList(2); + subject.getDoubleList("test2"); + verify(delegate).getDoubleList("test2"); + + subject.getBigDecimal(0); + verify(delegate).getBigDecimal(0); + subject.getBigDecimal("test0"); + verify(delegate).getBigDecimal("test0"); + subject.getBigDecimalList(1); + verify(delegate).getBigDecimalList(1); + subject.getBigDecimalList("test1"); + verify(delegate).getBigDecimalList("test1"); + subject.getBigDecimalList(2); + verify(delegate).getBigDecimalList(2); + subject.getBigDecimalList("test2"); + verify(delegate).getBigDecimalList("test2"); + + subject.getLong(0); + verify(delegate).getLong(0); + subject.getLong("test0"); + verify(delegate).getLong("test0"); + subject.getLongArray(1); + verify(delegate).getLongArray(1); + subject.getLongArray("test1"); + verify(delegate).getLongArray("test1"); + subject.getLongList(2); + verify(delegate).getLongList(2); + subject.getLongList("test2"); + verify(delegate).getLongList("test2"); + + subject.getString(0); + verify(delegate).getString(0); + subject.getString("test0"); + verify(delegate).getString("test0"); + subject.getStringList(2); + verify(delegate).getStringList(2); + subject.getStringList("test2"); + verify(delegate).getStringList("test2"); + + subject.getJson(0); + verify(delegate).getJson(0); + subject.getJson("test0"); + verify(delegate).getJson("test0"); + subject.getJsonList(2); + verify(delegate).getJsonList(2); + subject.getJsonList("test2"); + verify(delegate).getJsonList("test2"); + + subject.getProtoMessage(0, SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessage(0, SingerInfo.getDefaultInstance()); + subject.getProtoMessage("test0", SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessage("test0", SingerInfo.getDefaultInstance()); + subject.getProtoMessageList(0, SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessageList(0, SingerInfo.getDefaultInstance()); + subject.getProtoMessageList("test0", SingerInfo.getDefaultInstance()); + verify(delegate).getProtoMessageList("test0", SingerInfo.getDefaultInstance()); + + Function lambdaFunction = Genre::forNumber; + subject.getProtoEnum(0, lambdaFunction); + verify(delegate).getProtoEnum(0, lambdaFunction); + subject.getProtoEnum("test0", lambdaFunction); + verify(delegate).getProtoEnum("test0", lambdaFunction); + subject.getProtoEnumList(0, lambdaFunction); + verify(delegate).getProtoEnumList(0, lambdaFunction); + subject.getProtoEnumList("test0", lambdaFunction); + verify(delegate).getProtoEnumList("test0", lambdaFunction); + + subject.getStructList(0); + subject.getStructList("test0"); + + subject.getTimestamp(0); + verify(delegate).getTimestamp(0); + subject.getTimestamp("test0"); + verify(delegate).getTimestamp("test0"); + subject.getTimestampList(2); + verify(delegate).getTimestampList(2); + subject.getTimestampList("test2"); + verify(delegate).getTimestampList("test2"); + + subject.getColumnCount(); + verify(delegate).getColumnCount(); + subject.getColumnIndex("test"); + verify(delegate).getColumnIndex("test"); + subject.getColumnType(100); + verify(delegate).getColumnType(100); + subject.getColumnType("test"); + verify(delegate).getColumnType("test"); + subject.getCurrentRowAsStruct(); + verify(delegate).getCurrentRowAsStruct(); + subject.getType(); + verify(delegate).getType(); + subject.isNull(50); + verify(delegate).isNull(50); + subject.isNull("test"); + verify(delegate).isNull("test"); + + while (subject.next()) { + // ignore + } + subject.getStats(); + verify(delegate).getStats(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RetryDmlAsPartitionedDmlMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RetryDmlAsPartitionedDmlMockServerTest.java new file mode 100644 index 000000000000..d5e44fdcefc9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RetryDmlAsPartitionedDmlMockServerTest.java @@ -0,0 +1,284 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import com.google.protobuf.Any; +import com.google.rpc.Help; +import com.google.rpc.Help.Link; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class RetryDmlAsPartitionedDmlMockServerTest extends AbstractMockServerTest { + private enum ExceptionType { + MutationLimitExceeded { + @Override + StatusRuntimeException createException() { + return createTransactionMutationLimitExceededException(); + } + }, + ResourceLimitExceeded { + @Override + StatusRuntimeException createException() { + return createTransactionResourceLimitExceededException(); + } + }; + + abstract StatusRuntimeException createException(); + } + + @Parameters(name = "exception = {0}") + public static Object[] data() { + return ExceptionType.values(); + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Parameter + public ExceptionType exceptionType; + + static StatusRuntimeException createTransactionMutationLimitExceededException() { + Metadata.Key key = + Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER); + Help help = + Help.newBuilder() + .addLinks( + Link.newBuilder() + .setDescription("Cloud Spanner limits documentation.") + .setUrl("https://cloud.google.com/spanner/docs/limits") + .build()) + .build(); + com.google.rpc.Status status = + com.google.rpc.Status.newBuilder().addDetails(Any.pack(help)).build(); + + Metadata trailers = new Metadata(); + trailers.put(key, status.toByteArray()); + + return Status.INVALID_ARGUMENT + .withDescription("The transaction contains too many mutations.") + .asRuntimeException(trailers); + } + + static StatusRuntimeException createTransactionResourceLimitExceededException() { + return Status.INVALID_ARGUMENT + .withDescription("Transaction resource limits exceeded") + .asRuntimeException(); + } + + @Test + public void testTransactionMutationLimitExceeded_isNotRetriedByDefault() { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(exceptionType.createException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, connection.getAutocommitDmlMode()); + + TransactionMutationLimitExceededException exception = + assertThrows( + TransactionMutationLimitExceededException.class, + () -> connection.executeUpdate(Statement.of("update test set value=1 where true"))); + assertEquals(0, exception.getSuppressed().length); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getLastStatement()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionMutationLimitExceeded_canBeRetriedAsPDML() { + Statement statement = Statement.of("update test set value=1 where true"); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(exceptionType.createException())); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(statement, 100000L)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + long updateCount = connection.executeUpdate(statement); + assertEquals(100000L, updateCount); + } + // Verify that the request is retried as Partitioned DML. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // The transactional request uses inline-begin. + ExecuteSqlRequest transactionalRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(transactionalRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(transactionalRequest.getLastStatement()); + + // Partitioned DML uses an explicit BeginTransaction RPC. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasPartitionedDml()); + ExecuteSqlRequest partitionedDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(partitionedDmlRequest.getTransaction().hasId()); + assertFalse(partitionedDmlRequest.getLastStatement()); + + // Partitioned DML transactions are not committed. + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionMutationLimitExceeded_retryAsPDMLFails() { + Statement statement = Statement.of("insert into test (id, value) select -id, value from test"); + // The transactional update statement uses ExecuteSql(..). + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(exceptionType.createException())); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + statement, + Status.INVALID_ARGUMENT + .withDescription("This statement is not supported with Partitioned DML") + .asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + // The connection throws TransactionMutationLimitExceededException if the retry using + // partitioned DML fails. The exception from the failed retry is returned as a suppressed + // exception of the TransactionMutationLimitExceededException. + TransactionMutationLimitExceededException exception = + assertThrows( + TransactionMutationLimitExceededException.class, + () -> connection.executeUpdate(statement)); + assertEquals(1, exception.getSuppressed().length); + assertEquals(SpannerException.class, exception.getSuppressed()[0].getClass()); + SpannerException spannerException = (SpannerException) exception.getSuppressed()[0]; + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + assertTrue( + spannerException.getMessage(), + spannerException + .getMessage() + .contains("This statement is not supported with Partitioned DML")); + } + // Verify that the request was retried as Partitioned DML. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // The transactional request uses inline-begin. + ExecuteSqlRequest transactionalRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(transactionalRequest.getTransaction().getBegin().hasReadWrite()); + assertTrue(transactionalRequest.getLastStatement()); + + // Partitioned DML uses an explicit BeginTransaction RPC. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasPartitionedDml()); + ExecuteSqlRequest partitionedDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(partitionedDmlRequest.getTransaction().hasId()); + assertFalse(partitionedDmlRequest.getLastStatement()); + + // Partitioned DML transactions are not committed. + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testSqlStatements() { + for (Dialect dialect : Dialect.values()) { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + String prefix = dialect == Dialect.POSTGRESQL ? "SPANNER." : ""; + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %sautocommit_dml_mode", prefix)))) { + assertTrue(resultSet.next()); + assertEquals( + AutocommitDmlMode.TRANSACTIONAL.name(), + resultSet.getString(String.format("%sAUTOCOMMIT_DML_MODE", prefix))); + assertFalse(resultSet.next()); + } + connection.execute( + Statement.of( + String.format( + "set %sautocommit_dml_mode =" + + " 'transactional_with_fallback_to_partitioned_non_atomic'", + prefix))); + try (ResultSet resultSet = + connection.executeQuery( + Statement.of(String.format("show variable %sautocommit_dml_mode", prefix)))) { + assertTrue(resultSet.next()); + assertEquals( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC.name(), + resultSet.getString(String.format("%sAUTOCOMMIT_DML_MODE", prefix))); + assertFalse(resultSet.next()); + } + } + } + } + + @Test + public void testTransactionMutationLimitExceeded_isWrappedAsCauseOfBatchUpdateException() { + String sql = "update test set value=1 where true"; + Statement statement = Statement.of(sql); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + statement, exceptionType.createException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, connection.getAutocommitDmlMode()); + + connection.startBatchDml(); + connection.execute(statement); + SpannerBatchUpdateException batchUpdateException = + assertThrows(SpannerBatchUpdateException.class, connection::runBatch); + assertNotNull(batchUpdateException.getCause()); + assertEquals( + TransactionMutationLimitExceededException.class, + batchUpdateException.getCause().getClass()); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java new file mode 100644 index 000000000000..84fd326d08a1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.RpcPriorityConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class RpcPriorityConverterTest { + + @Test + public void testConvert() throws CompileException { + String allowedValues = "'(HIGH|MEDIUM|LOW|NULL)'"; + RpcPriorityConverter converter = + new ClientSideStatementValueConverters.RpcPriorityConverter(allowedValues); + assertEquals(RpcPriority.HIGH, converter.convert("high")); + assertEquals(RpcPriority.HIGH, converter.convert("HIGH")); + assertEquals(RpcPriority.HIGH, converter.convert("High")); + + assertEquals(RpcPriority.MEDIUM, converter.convert("medium")); + assertEquals(RpcPriority.MEDIUM, converter.convert("Medium")); + + assertEquals(RpcPriority.LOW, converter.convert("Low")); + + assertNull(converter.convert("")); + assertNull(converter.convert(" ")); + assertNull(converter.convert("random string")); + assertEquals(RpcPriority.UNSPECIFIED, converter.convert("NULL")); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RunTransactionMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RunTransactionMockServerTest.java new file mode 100644 index 000000000000..d4af5dd4e7e8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RunTransactionMockServerTest.java @@ -0,0 +1,282 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_LOCK_MODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Status; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class RunTransactionMockServerTest extends AbstractMockServerTest { + + @Test + public void testRunTransaction() { + for (IsolationLevel isolationLevel : DEFAULT_ISOLATION_LEVEL.getValidValues()) { + for (ReadLockMode readLockMode : READ_LOCK_MODE.getValidValues()) { + try (Connection connection = createConnection()) { + connection.setDefaultIsolationLevel(isolationLevel); + connection.setReadLockMode(readLockMode); + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + return null; + }); + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + TransactionOptions transactionOptions = + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getTransaction() + .getBegin(); + assertEquals(isolationLevel, transactionOptions.getIsolationLevel()); + assertEquals(readLockMode, transactionOptions.getReadWrite().getReadLockMode()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunTransactionInAutoCommit() { + for (IsolationLevel isolationLevel : DEFAULT_ISOLATION_LEVEL.getValidValues()) { + for (ReadLockMode readLockMode : READ_LOCK_MODE.getValidValues()) { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setDefaultIsolationLevel(isolationLevel); + connection.setReadLockMode(readLockMode); + + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + return null; + }); + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + TransactionOptions transactionOptions = + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getTransaction() + .getBegin(); + assertEquals(isolationLevel, transactionOptions.getIsolationLevel()); + assertEquals(readLockMode, transactionOptions.getReadWrite().getReadLockMode()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunTransactionInReadOnly() { + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + + assertEquals( + RANDOM_RESULT_SET_ROW_COUNT, + connection + .runTransaction( + transaction -> { + int rows = 0; + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + rows++; + } + } + return rows; + }) + .intValue()); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } + + @Test + public void testRunTransaction_rollbacksAfterException() { + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException( + Status.INVALID_ARGUMENT + .withDescription("invalid statement") + .asRuntimeException())); + // This statement will fail. + transaction.executeUpdate(INSERT_STATEMENT); + return null; + })); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertTrue(exception.getMessage(), exception.getMessage().contains("invalid statement")); + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } + + @Test + public void testRunTransactionCommitAborted() { + for (IsolationLevel isolationLevel : DEFAULT_ISOLATION_LEVEL.getValidValues()) { + for (ReadLockMode readLockMode : READ_LOCK_MODE.getValidValues()) { + final AtomicInteger attempts = new AtomicInteger(); + try (Connection connection = createConnection()) { + connection.setDefaultIsolationLevel(isolationLevel); + connection.setReadLockMode(readLockMode); + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + if (attempts.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + return null; + }); + } + assertEquals(2, attempts.get()); + assertEquals(4, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + + for (int i : new int[] {0, 2}) { + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(i); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + } + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunTransactionDmlAborted() { + final AtomicInteger attempts = new AtomicInteger(); + try (Connection connection = createConnection()) { + assertTrue(connection.isRetryAbortsInternally()); + connection.runTransaction( + transaction -> { + assertFalse(transaction.isRetryAbortsInternally()); + if (attempts.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + return null; + }); + assertTrue(connection.isRetryAbortsInternally()); + } + assertEquals(2, attempts.get()); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testRunTransactionQueryAborted() { + final AtomicInteger attempts = new AtomicInteger(); + try (Connection connection = createConnection()) { + int rowCount = + connection.runTransaction( + transaction -> { + if (attempts.incrementAndGet() == 1) { + mockSpanner.abortNextStatement(); + } + int rows = 0; + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + rows++; + } + } + return rows; + }); + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, rowCount); + } + assertEquals(2, attempts.get()); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testCommitInRunTransaction() { + try (Connection connection = createConnection()) { + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + SpannerException exception = assertThrows(SpannerException.class, transaction::commit); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: Cannot call commit when a transaction runner is active", + exception.getMessage()); + return null; + }); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testRollbackInRunTransaction() { + try (Connection connection = createConnection()) { + connection.runTransaction( + transaction -> { + assertEquals(1L, transaction.executeUpdate(INSERT_STATEMENT)); + SpannerException exception = + assertThrows(SpannerException.class, transaction::rollback); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: Cannot call rollback when a transaction runner is active", + exception.getMessage()); + return null; + }); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(RollbackRequest.class)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java new file mode 100644 index 000000000000..7e7fde96c1e7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java @@ -0,0 +1,740 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.AbstractMessage; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RollbackRequest; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class SavepointMockServerTest extends AbstractMockServerTest { + + // This test uses both platform threads and virtual threads (when available). We use specifically + // this test for testing virtual threads, because it relies heavily on the internal checksum retry + // strategy. This is the only significant calculation that is executed by the StatementExecutor + // thread, meaning that this shows that using a virtual thread for those calculations also works. + @Parameters(name = "dialect = {0}, useVirtualThreads = {1}") + public static Collection data() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (Dialect dialect : Dialect.values()) { + for (boolean useVirtualThreads : new boolean[] {true, false}) { + builder.add(new Object[] {dialect, useVirtualThreads}); + } + } + return builder.build(); + } + + @Parameter(0) + public Dialect dialect; + + @Parameter(1) + public boolean useVirtualThreads; + + private Dialect currentDialect; + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + // Reset the dialect result. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Override + public ITConnection createConnection() { + return createConnection( + ";useVirtualThreads=" + + useVirtualThreads + + ";useVirtualGrpcTransportThreads=" + + useVirtualThreads); + } + + @Test + public void testCreateSavepoint() { + try (Connection connection = createConnection()) { + connection.savepoint("s1"); + + if (dialect == Dialect.POSTGRESQL) { + // PostgreSQL allows multiple savepoints with the same name. + connection.savepoint("s1"); + } else { + assertThrows(SpannerException.class, () -> connection.savepoint("s1")); + } + + // Test invalid identifiers. + assertThrows(SpannerException.class, () -> connection.savepoint(null)); + assertThrows(SpannerException.class, () -> connection.savepoint("")); + assertThrows(SpannerException.class, () -> connection.savepoint("1")); + assertThrows(SpannerException.class, () -> connection.savepoint("-foo")); + assertThrows(SpannerException.class, () -> connection.savepoint(Strings.repeat("t", 129))); + } + } + + @Test + public void testCreateSavepointWhenDisabled() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.DISABLED); + assertThrows(SpannerException.class, () -> connection.savepoint("s1")); + } + } + + @Test + public void testReleaseSavepoint() { + try (Connection connection = createConnection()) { + connection.savepoint("s1"); + connection.releaseSavepoint("s1"); + assertThrows(SpannerException.class, () -> connection.releaseSavepoint("s1")); + + connection.savepoint("s1"); + connection.savepoint("s2"); + connection.releaseSavepoint("s1"); + // Releasing a savepoint also removes all savepoints after it. + assertThrows(SpannerException.class, () -> connection.releaseSavepoint("s2")); + + if (dialect == Dialect.POSTGRESQL) { + // PostgreSQL allows multiple savepoints with the same name. + connection.savepoint("s1"); + connection.savepoint("s2"); + connection.savepoint("s1"); + connection.releaseSavepoint("s1"); + connection.releaseSavepoint("s2"); + connection.releaseSavepoint("s1"); + assertThrows(SpannerException.class, () -> connection.releaseSavepoint("s1")); + } + } + } + + @Test + public void testRollbackToSavepoint() { + for (SavepointSupport savepointSupport : + new SavepointSupport[] {SavepointSupport.ENABLED, SavepointSupport.FAIL_AFTER_ROLLBACK}) { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(savepointSupport); + + connection.savepoint("s1"); + connection.rollbackToSavepoint("s1"); + // Rolling back to a savepoint does not remove it, so we can roll back multiple times to the + // same savepoint. + connection.rollbackToSavepoint("s1"); + + connection.savepoint("s2"); + connection.rollbackToSavepoint("s1"); + // Rolling back to a savepoint removes all savepoints after it. + assertThrows(SpannerException.class, () -> connection.rollbackToSavepoint("s2")); + + if (dialect == Dialect.POSTGRESQL) { + // PostgreSQL allows multiple savepoints with the same name. + connection.savepoint("s2"); + connection.savepoint("s1"); + connection.rollbackToSavepoint("s1"); + connection.rollbackToSavepoint("s2"); + connection.rollbackToSavepoint("s1"); + connection.rollbackToSavepoint("s1"); + connection.releaseSavepoint("s1"); + assertThrows(SpannerException.class, () -> connection.rollbackToSavepoint("s1")); + } + } + } + } + + @Test + public void testSavepointInAutoCommit() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + assertThrows(SpannerException.class, () -> connection.savepoint("s1")); + + // Starting a 'manual' transaction in autocommit mode should enable savepoints. + connection.beginTransaction(); + connection.savepoint("s1"); + connection.releaseSavepoint("s1"); + } + } + + @Test + public void testRollbackToSavepointInReadOnlyTransaction() { + for (SavepointSupport savepointSupport : + new SavepointSupport[] {SavepointSupport.ENABLED, SavepointSupport.FAIL_AFTER_ROLLBACK}) { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(savepointSupport); + connection.setReadOnly(true); + + // Read-only transactions also support savepoints, but they do not do anything. This feature + // is here purely for compatibility. + connection.savepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + + connection.rollbackToSavepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + // Committing a read-only transaction is necessary to mark the end of the transaction. + // It is a no-op on Cloud Spanner. + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasReadOnly()); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + mockSpanner.clearRequests(); + } + } + + @Test + public void testRollbackToSavepointInReadWriteTransaction() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + connection.savepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + + connection.rollbackToSavepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + connection.commit(); + + // Read/write transactions are started with inlined Begin transaction options. + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + + List requests = + mockSpanner.getRequests().stream() + .filter( + request -> + request instanceof ExecuteSqlRequest + || request instanceof RollbackRequest + || request instanceof CommitRequest) + .collect(Collectors.toList()); + assertEquals(4, requests.size()); + int index = 0; + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(CommitRequest.class, requests.get(index++).getClass()); + } + } + + @Test + public void testRollbackToSavepointWithDmlStatements() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + // First do a query that is included in the transaction. + try (ResultSet resultSet = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + // Set a savepoint and execute a couple of DML statements. + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.savepoint("s2"); + connection.executeUpdate(INSERT_STATEMENT); + // Rollback the last DML statement and commit. + connection.rollbackToSavepoint("s2"); + + connection.commit(); + + // Read/write transactions are started with inlined Begin transaction options. + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(5, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + + List requests = + mockSpanner.getRequests().stream() + .filter( + request -> + request instanceof ExecuteSqlRequest + || request instanceof RollbackRequest + || request instanceof CommitRequest) + .collect(Collectors.toList()); + assertEquals(7, requests.size()); + int index = 0; + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(CommitRequest.class, requests.get(index++).getClass()); + } + } + + @Test + public void testRollbackToSavepointFails() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + // Set a savepoint and execute a couple of DML statements. + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + // Change the result of the initial query. + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + // Rollback to before the DML statements. + // This will succeed as long as we don't execute any further statements. + connection.rollbackToSavepoint("s1"); + + // Trying to commit the transaction or execute any other statements on the transaction will + // fail. + assertThrows(AbortedDueToConcurrentModificationException.class, connection::commit); + + // Read/write transactions are started with inlined Begin transaction options. + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(4, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + + List requests = + mockSpanner.getRequests().stream() + .filter( + request -> + request instanceof ExecuteSqlRequest + || request instanceof RollbackRequest + || request instanceof CommitRequest) + .collect(Collectors.toList()); + assertEquals(6, requests.size()); + int index = 0; + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + } + } + + @Test + public void testRollbackToSavepointWithFailAfterRollback() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK); + + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + // Set a savepoint and execute a couple of DML statements. + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + // Rollback to before the DML statements. + // This will succeed as long as we don't execute any further statements. + connection.rollbackToSavepoint("s1"); + + // Trying to commit the transaction or execute any other statements on the transaction will + // fail with an FAILED_PRECONDITION error, as using a transaction after a rollback to + // savepoint has been disabled. + SpannerException exception = assertThrows(SpannerException.class, connection::commit); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + assertEquals( + "FAILED_PRECONDITION: Using a read/write transaction after rolling back to a " + + "savepoint is not supported with SavepointSupport=FAIL_AFTER_ROLLBACK", + exception.getMessage()); + } + } + + @Test + public void testRollbackToSavepointSucceedsWithRollback() { + for (SavepointSupport savepointSupport : + new SavepointSupport[] {SavepointSupport.ENABLED, SavepointSupport.FAIL_AFTER_ROLLBACK}) { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(savepointSupport); + + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + // Change the result of the initial query and set a savepoint. + connection.savepoint("s1"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + // This will succeed as long as we don't execute any further statements. + connection.rollbackToSavepoint("s1"); + + // Rolling back the transaction should now be a no-op, as it has already been rolled back. + connection.rollback(); + + // Read/write transactions are started with inlined Begin transaction options. + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + mockSpanner.clearRequests(); + } + } + + @Test + public void testMultipleRollbacksWithChangedResults() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.savepoint("s2"); + connection.executeUpdate(INSERT_STATEMENT); + + // Change the result of the initial query to make sure that any retry will fail. + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + // This will succeed as long as we don't execute any further statements. + connection.rollbackToSavepoint("s2"); + // Rolling back one further should also work. + connection.rollbackToSavepoint("s1"); + + // Rolling back the transaction should now be a no-op, as it has already been rolled back. + connection.rollback(); + + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } + + @Test + public void testMultipleRollbacks() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.savepoint("s2"); + connection.executeUpdate(INSERT_STATEMENT); + + // First roll back one step and then one more. + connection.rollbackToSavepoint("s2"); + connection.rollbackToSavepoint("s1"); + + // This will only commit the SELECT query. + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(4, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + + List requests = + mockSpanner.getRequests().stream() + .filter( + request -> + request instanceof ExecuteSqlRequest + || request instanceof RollbackRequest + || request instanceof CommitRequest) + .collect(Collectors.toList()); + assertEquals(6, requests.size()); + int index = 0; + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(CommitRequest.class, requests.get(index++).getClass()); + } + } + + @Test + public void testRollbackMutations() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + connection.bufferedWrite(Mutation.newInsertBuilder("foo1").build()); + connection.savepoint("s1"); + connection.executeUpdate(INSERT_STATEMENT); + connection.bufferedWrite(Mutation.newInsertBuilder("foo2").build()); + connection.savepoint("s2"); + connection.executeUpdate(INSERT_STATEMENT); + connection.bufferedWrite(Mutation.newInsertBuilder("foo3").build()); + + connection.rollbackToSavepoint("s1"); + + // This will only commit the first mutation. + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + CommitRequest commitRequest = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertEquals(1, commitRequest.getMutationsCount()); + assertEquals("foo1", commitRequest.getMutations(0).getInsert().getTable()); + } + } + + @Test + public void testRollbackBatchDml() { + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + + connection.executeUpdate(INSERT_STATEMENT); + connection.savepoint("s1"); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + connection.savepoint("s2"); + + connection.executeUpdate(INSERT_STATEMENT); + connection.savepoint("s3"); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + connection.savepoint("s4"); + + connection.rollbackToSavepoint("s2"); + + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(RollbackRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + + List requests = + mockSpanner.getRequests().stream() + .filter( + request -> + request instanceof ExecuteSqlRequest + || request instanceof RollbackRequest + || request instanceof CommitRequest + || request instanceof ExecuteBatchDmlRequest) + .collect(Collectors.toList()); + assertEquals(8, requests.size()); + int index = 0; + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteBatchDmlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteBatchDmlRequest.class, requests.get(index++).getClass()); + assertEquals(RollbackRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteSqlRequest.class, requests.get(index++).getClass()); + assertEquals(ExecuteBatchDmlRequest.class, requests.get(index++).getClass()); + assertEquals(CommitRequest.class, requests.get(index++).getClass()); + } + } + + @Test + public void testRollbackToSavepointWithoutInternalRetries() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setRetryAbortsInternally(false); + + connection.savepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + // This should work. + connection.rollbackToSavepoint("s1"); + // Resuming after a rollback is not supported without internal retries enabled. + assertThrows(SpannerException.class, () -> connection.executeUpdate(INSERT_STATEMENT)); + } + } + + @Test + public void testRollbackToSavepointWithoutInternalRetriesInReadOnlyTransaction() { + Statement statement = Statement.of("select * from foo where bar=true"); + int numRows = 10; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + try (Connection connection = createConnection()) { + connection.setRetryAbortsInternally(false); + connection.setReadOnly(true); + + connection.savepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + + // Both rolling back and resuming after a rollback are supported in a read-only transaction, + // even if internal retries have been disabled. + connection.rollbackToSavepoint("s1"); + try (ResultSet resultSet = connection.executeQuery(statement)) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(numRows, count); + } + } + } + + @Test + public void testKeepAlive() throws InterruptedException, TimeoutException { + String keepAliveTag = "test_keep_alive_tag"; + System.setProperty("spanner.connection.keep_alive_interval_millis", "1"); + System.setProperty("spanner.connection.keep_alive_query_tag", keepAliveTag); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + connection.setKeepTransactionAlive(true); + // Start a transaction by executing a statement. + connection.execute(INSERT_STATEMENT); + // Verify that we get a keep-alive request. + verifyHasKeepAliveRequest(keepAliveTag); + // Set a savepoint, execute another statement, and rollback to the savepoint. + // The keep-alive should not be sent after the transaction has been rolled back to the + // savepoint. + connection.savepoint("s1"); + connection.execute(INSERT_STATEMENT); + connection.rollbackToSavepoint("s1"); + String keepAliveTagAfterRollback = "test_keep_alive_tag_after_rollback"; + System.setProperty("spanner.connection.keep_alive_query_tag", keepAliveTagAfterRollback); + mockSpanner.waitForRequestsToContain(RollbackRequest.class, 1000L); + mockSpanner.clearRequests(); + + // Verify that we don't get any new keep-alive requests from this point. + Thread.sleep(2L); + assertEquals(0, countKeepAliveRequest(keepAliveTagAfterRollback)); + // Resume the transaction and verify that we get a keep-alive again. + connection.execute(INSERT_STATEMENT); + verifyHasKeepAliveRequest(keepAliveTagAfterRollback); + } finally { + System.clearProperty("spanner.connection.keep_alive_interval_millis"); + System.clearProperty("spanner.connection.keep_alive_query_tag"); + } + } + + private void verifyHasKeepAliveRequest(String tag) throws InterruptedException, TimeoutException { + mockSpanner.waitForRequestsToContain( + r -> { + if (!(r instanceof ExecuteSqlRequest)) { + return false; + } + ExecuteSqlRequest request = (ExecuteSqlRequest) r; + return request.getSql().equals("SELECT 1") + && request.getRequestOptions().getRequestTag().equals(tag); + }, + 1000L); + } + + private long countKeepAliveRequest(String tag) { + return mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + request.getSql().equals("SELECT 1") + && request.getRequestOptions().getRequestTag().equals(tag)) + .count(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java new file mode 100644 index 000000000000..1ea76845c401 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java @@ -0,0 +1,217 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.AbstractMultiUseTransaction.Savepoint; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.trace.Span; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SavepointTest { + static class TestTransaction extends ReadOnlyTransaction { + TestTransaction() { + super( + ReadOnlyTransaction.newBuilder() + .setSpan(Span.getInvalid()) + .withStatementExecutor(mock(StatementExecutor.class))); + } + } + + @Test + public void testCreateSavepoint_GoogleSql() { + Dialect dialect = Dialect.GOOGLE_STANDARD_SQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + + // GoogleSql does not allow duplicate savepoint names. + assertThrows(SpannerException.class, () -> transaction.savepoint("s1", dialect)); + assertThrows(SpannerException.class, () -> transaction.savepoint("s2", dialect)); + } + + @Test + public void testCreateSavepoint_PostgreSQL() { + Dialect dialect = Dialect.POSTGRESQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + + // PostgreSQL allows duplicate savepoint names. + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2"), Savepoint.of("s2")), + transaction.getSavepoints()); + transaction.savepoint("s1", dialect); + assertEquals( + ImmutableList.of( + Savepoint.of("s1"), Savepoint.of("s2"), Savepoint.of("s2"), Savepoint.of("s1")), + transaction.getSavepoints()); + } + + @Test + public void testReleaseSavepoint_GoogleSql() { + Dialect dialect = Dialect.GOOGLE_STANDARD_SQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + transaction.releaseSavepoint("s1"); + assertEquals(ImmutableList.of(), transaction.getSavepoints()); + + transaction.savepoint("s1", dialect); + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.releaseSavepoint("s2"); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.releaseSavepoint("s1"); + assertEquals(ImmutableList.of(), transaction.getSavepoints()); + + assertThrows(SpannerException.class, () -> transaction.releaseSavepoint("s1")); + + transaction.savepoint("s1", dialect); + assertThrows(SpannerException.class, () -> transaction.releaseSavepoint("s2")); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + } + + @Test + public void testReleaseSavepoint_PostgreSQL() { + Dialect dialect = Dialect.POSTGRESQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + transaction.releaseSavepoint("s1"); + assertEquals(ImmutableList.of(), transaction.getSavepoints()); + + transaction.savepoint("s1", dialect); + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.releaseSavepoint("s2"); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.releaseSavepoint("s1"); + assertEquals(ImmutableList.of(), transaction.getSavepoints()); + + assertThrows(SpannerException.class, () -> transaction.releaseSavepoint("s1")); + + transaction.savepoint("s1", dialect); + assertThrows(SpannerException.class, () -> transaction.releaseSavepoint("s2")); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + transaction.savepoint("s1", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2"), Savepoint.of("s1")), + transaction.getSavepoints()); + transaction.releaseSavepoint("s1"); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + } + + @Test + public void testRollbackToSavepoint_GoogleSql() { + Dialect dialect = Dialect.GOOGLE_STANDARD_SQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s1", SavepointSupport.ENABLED); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s2", SavepointSupport.ENABLED); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s1", SavepointSupport.ENABLED); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + assertThrows( + SpannerException.class, + () -> transaction.rollbackToSavepoint("s2", SavepointSupport.ENABLED)); + } + + @Test + public void testRollbackToSavepoint_PostgreSQL() { + Dialect dialect = Dialect.POSTGRESQL; + TestTransaction transaction = new TestTransaction(); + + transaction.savepoint("s1", dialect); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s1", SavepointSupport.ENABLED); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s2", SavepointSupport.ENABLED); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2")), transaction.getSavepoints()); + transaction.rollbackToSavepoint("s1", SavepointSupport.ENABLED); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + assertThrows( + SpannerException.class, + () -> transaction.rollbackToSavepoint("s2", SavepointSupport.ENABLED)); + assertEquals(ImmutableList.of(Savepoint.of("s1")), transaction.getSavepoints()); + + transaction.savepoint("s2", dialect); + transaction.savepoint("s1", dialect); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2"), Savepoint.of("s1")), + transaction.getSavepoints()); + transaction.rollbackToSavepoint("s1", SavepointSupport.ENABLED); + assertEquals( + ImmutableList.of(Savepoint.of("s1"), Savepoint.of("s2"), Savepoint.of("s1")), + transaction.getSavepoints()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetPgSessionCharacteristicsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetPgSessionCharacteristicsTest.java new file mode 100644 index 000000000000..fc509b42a1fd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetPgSessionCharacteristicsTest.java @@ -0,0 +1,271 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SetPgSessionCharacteristicsTest { + private final AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.POSTGRESQL); + + @Test + public void testSetIsolationLevelDefault() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = "set session characteristics as transaction isolation level default"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, never()).setReadOnly(anyBoolean()); + verify(connection).setDefaultIsolationLevel(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED); + } + + @Test + public void testSetIsolationLevelSerializable() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = "set session characteristics as transaction isolation level serializable"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, never()).setReadOnly(anyBoolean()); + verify(connection).setDefaultIsolationLevel(IsolationLevel.SERIALIZABLE); + } + + @Test + public void testSetIsolationLevelRepeatableRead() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = "set session characteristics as transaction isolation level repeatable read"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection, never()).setReadOnly(anyBoolean()); + verify(connection).setDefaultIsolationLevel(IsolationLevel.REPEATABLE_READ); + } + + @Test + public void testSetIsolationLevelReadOnly() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = "set\tsession\ncharacteristics as transaction read only"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection).setReadOnly(true); + verify(connection, never()).setReadOnly(false); + verify(connection, never()).setDefaultIsolationLevel(any(IsolationLevel.class)); + } + + @Test + public void testSetIsolationLevelReadWrite() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = "set session characteristics as transaction read write"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection).setReadOnly(false); + verify(connection, never()).setReadOnly(true); + verify(connection, never()).setDefaultIsolationLevel(any(IsolationLevel.class)); + } + + @Test + public void testSetIsolationLevelSerializableReadWrite() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = + "set session characteristics as transaction isolation level serializable read" + + " write"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection).setReadOnly(false); + verify(connection, never()).setReadOnly(true); + verify(connection).setDefaultIsolationLevel(IsolationLevel.SERIALIZABLE); + } + + @Test + public void testSetIsolationLevelSerializableReadOnly() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = + "set session characteristics as transaction isolation level serializable read" + + " only"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection).setReadOnly(true); + verify(connection).setDefaultIsolationLevel(IsolationLevel.SERIALIZABLE); + } + + @Test + public void testSetMultipleTransactionModes() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + String sql = + "set session characteristics as transaction isolation level default, read only, isolation" + + " level serializable, read write"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + + verify(connection).setReadOnly(false); + verify(connection, never()).setReadOnly(true); + verify(connection).setDefaultIsolationLevel(IsolationLevel.SERIALIZABLE); + } + + @Test + public void testDefaultTransactionIsolation() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + + int count = 0; + for (String sql : + new String[] { + "set default_transaction_isolation = serializable", + "set default_transaction_isolation = 'serializable'", + "set default_transaction_isolation to serializable", + "set default_transaction_isolation to 'serializable'", + "set default_transaction_isolation to 'SERIALIZABLE'", + "set default_transaction_isolation to \"SERIALIZABLE\"", + "set default_transaction_isolation to default", + "set default_transaction_isolation to DEFAULT", + "set default_transaction_isolation = default", + "set default_transaction_isolation = DEFAULT" + }) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + count++; + } + + verify(connection, never()).setReadOnly(anyBoolean()); + verify(connection, times(count)).setDefaultIsolationLevel(any(IsolationLevel.class)); + } + + @Test + public void testDefaultTransactionReadOnlyTrue() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + String[] statements = + new String[] { + "set default_transaction_read_only = true", + "set default_transaction_read_only = 'true'", + "set default_transaction_read_only = \"true\"", + "set default_transaction_read_only to true", + "set default_transaction_read_only to 'true'", + "set default_transaction_read_only to \"true\"", + "set default_transaction_read_only = t", + "set default_transaction_read_only = 'tr'", + "set default_transaction_read_only = \"tru\"", + "set default_transaction_read_only to tru", + "set default_transaction_read_only to 'tr'", + "set default_transaction_read_only to \"t\"", + "set default_transaction_read_only = on", + "set default_transaction_read_only = 1", + "set default_transaction_read_only = yes", + "set default_transaction_read_only = ye", + "set default_transaction_read_only = y", + }; + + for (String sql : statements) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + } + + verify(connection, times(statements.length)).setReadOnly(true); + } + + @Test + public void testDefaultTransactionReadOnlyFalse() { + ConnectionImpl connection = mock(ConnectionImpl.class); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + ConnectionStatementExecutorImpl executor = new ConnectionStatementExecutorImpl(connection); + String[] statements = + new String[] { + "set default_transaction_read_only = false", + "set default_transaction_read_only = 'false'", + "set default_transaction_read_only = \"false\"", + "set default_transaction_read_only to false", + "set default_transaction_read_only to 'false'", + "set default_transaction_read_only to \"false\"", + "set default_transaction_read_only = f", + "set default_transaction_read_only = 'fa'", + "set default_transaction_read_only = \"fal\"", + "set default_transaction_read_only to fal", + "set default_transaction_read_only to 'fa'", + "set default_transaction_read_only to \"f\"", + "set default_transaction_read_only = off", + "set default_transaction_read_only = of", + "set default_transaction_read_only = 0", + "set default_transaction_read_only = no", + "set default_transaction_read_only = n", + }; + + for (String sql : statements) { + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertEquals(sql, StatementType.CLIENT_SIDE, statement.getType()); + statement.getClientSideStatement().execute(executor, statement); + } + + verify(connection, times(statements.length)).setReadOnly(false); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetReadOnlyStalenessSqlScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetReadOnlyStalenessSqlScriptTest.java new file mode 100644 index 000000000000..ec0f225bf9c8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetReadOnlyStalenessSqlScriptTest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import org.junit.Test; + +public class SetReadOnlyStalenessSqlScriptTest extends AbstractSqlScriptTest { + private String getFileName() { + switch (dialect) { + case POSTGRESQL: + return "postgresql/SetReadOnlyStalenessTest.sql"; + case GOOGLE_STANDARD_SQL: + default: + return "SetReadOnlyStalenessTest.sql"; + } + } + + @Test + public void testSetReadOnlyStalenessScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new TestConnectionProvider(dialect)); + verifier.verifyStatementsInFile(getFileName(), getClass(), true); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetStatementTimeoutSqlScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetStatementTimeoutSqlScriptTest.java new file mode 100644 index 000000000000..1a55618787d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SetStatementTimeoutSqlScriptTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import org.junit.Test; + +public class SetStatementTimeoutSqlScriptTest extends AbstractSqlScriptTest { + + private String getFile(Dialect dialect) { + switch (dialect) { + case POSTGRESQL: + return "postgresql/SetStatementTimeoutTest.sql"; + case GOOGLE_STANDARD_SQL: + default: + return "SetStatementTimeoutTest.sql"; + } + } + + @Test + public void testSetStatementTimeoutScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new TestConnectionProvider(dialect)); + verifier.verifyStatementsInFile(getFile(dialect), getClass(), true); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SimpleParserTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SimpleParserTest.java new file mode 100644 index 000000000000..4747af2093f5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SimpleParserTest.java @@ -0,0 +1,250 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.SimpleParser.Result.NOT_FOUND; +import static com.google.cloud.spanner.connection.SimpleParser.Result.found; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.Dialect; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class SimpleParserTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + SimpleParser parserFor(String sql) { + return new SimpleParser(dialect, sql); + } + + @Test + public void testResultHashCode() { + assertEquals(0, NOT_FOUND.hashCode()); + assertEquals(found("foo").hashCode(), found("foo").hashCode()); + assertNotEquals(found("foo").hashCode(), found("bar").hashCode()); + assertNotEquals(NOT_FOUND.hashCode(), found("bar").hashCode()); + } + + @Test + public void testResultEquals() { + assertEquals(found("foo"), found("foo")); + assertNotEquals(found("foo"), found("bar")); + assertNotEquals(NOT_FOUND, found("bar")); + assertNotEquals(found("foo"), new Object()); + assertNotEquals(NOT_FOUND, new Object()); + } + + @Test + public void testResultToString() { + assertEquals("foo", found("foo").toString()); + assertEquals("NOT FOUND", NOT_FOUND.toString()); + } + + @Test + public void testResultGetValue() { + assertEquals("foo", found("foo").getValue()); + assertNull(NOT_FOUND.getValue()); + } + + @Test + public void testEatToken() { + assertTrue(parserFor("(foo").eatToken('(')); + assertTrue(parserFor("(").eatToken('(')); + assertTrue(parserFor("( ").eatToken('(')); + assertTrue(parserFor("\t( foo").eatToken('(')); + + assertFalse(parserFor("foo(").eatToken('(')); + assertFalse(parserFor("").eatToken('(')); + } + + @Test + public void testEatTokenAdvancesPosition() { + SimpleParser parser = parserFor("@{test=value}"); + assertEquals(0, parser.getPos()); + assertTrue(parser.eatToken('@')); + assertEquals(1, parser.getPos()); + + assertFalse(parser.eatToken('(')); + assertEquals(1, parser.getPos()); + + assertTrue(parser.eatToken('{')); + assertEquals(2, parser.getPos()); + } + + @Test + public void testEatTokensAdvancesPosition() { + SimpleParser parser = parserFor("@{test=value}"); + assertEquals(0, parser.getPos()); + assertTrue(parser.eatTokens('@', '{')); + assertEquals(2, parser.getPos()); + + assertFalse(parser.eatTokens('@', '{')); + assertEquals(2, parser.getPos()); + + parser = parserFor("@ /* comment */ { test=value}"); + assertEquals(0, parser.getPos()); + assertTrue(parser.eatTokens('@', '{')); + assertEquals("@ /* comment */ {".length(), parser.getPos()); + } + + @Test + public void testPeekTokenKeepsPosition() { + SimpleParser parser = parserFor("@{test=value}"); + assertEquals(0, parser.getPos()); + assertTrue(parser.peekToken('@')); + assertEquals(0, parser.getPos()); + + assertFalse(parser.peekToken('{')); + assertEquals(0, parser.getPos()); + } + + @Test + public void testPeekTokensKeepsPosition() { + SimpleParser parser = parserFor("@{test=value}"); + assertEquals(0, parser.getPos()); + assertTrue(parser.peekTokens('@', '{')); + assertEquals(0, parser.getPos()); + } + + @Test + public void testEatIdentifier() { + assertEquals(found("foo"), parserFor("foo").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo(id)").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo bar").eatIdentifier()); + + assertEquals(found("foo"), parserFor(" foo bar").eatIdentifier()); + assertEquals(found("foo"), parserFor("\tfoo").eatIdentifier()); + assertEquals(found("bar"), parserFor("\n bar").eatIdentifier()); + assertEquals(found("foo"), parserFor(" foo").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo\"bar\"").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo.bar").eatIdentifier()); + + assertEquals(found("foo"), parserFor("foo) bar").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo- bar").eatIdentifier()); + assertEquals(found("foo"), parserFor("foo/ bar").eatIdentifier()); + assertEquals(found("foo$"), parserFor("foo$ bar").eatIdentifier()); + assertEquals(found("f$oo"), parserFor("f$oo bar").eatIdentifier()); + assertEquals(found("_foo"), parserFor("_foo bar").eatIdentifier()); + assertEquals(found("øfoo"), parserFor("øfoo bar").eatIdentifier()); + + assertEquals(NOT_FOUND, parserFor("\"foo").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("\\foo").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("1foo").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("-foo").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("$foo").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor(" ").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("\n").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("/* comment */").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("-- comment").eatIdentifier()); + assertEquals(NOT_FOUND, parserFor("/* comment").eatIdentifier()); + + String nestedCommentFollowedByIdentifier = + "/* comment /* nested comment */ " + + "still a comment if nested comments are supported, " + + "and otherwise the start of an identifier. */ test"; + if (AbstractStatementParser.getInstance(dialect).supportsNestedComments()) { + assertEquals(found("test"), parserFor(nestedCommentFollowedByIdentifier).eatIdentifier()); + } else { + // The parser does not look ahead if the rest of the SQL string is malformed. It just reads + // from the current position. + assertEquals(found("still"), parserFor(nestedCommentFollowedByIdentifier).eatIdentifier()); + } + + if (AbstractStatementParser.getInstance(dialect).supportsHashSingleLineComments()) { + assertEquals(found("test"), parserFor("# comment\ntest").eatIdentifier()); + } else { + // '#' is not a valid start of an identifier. + assertEquals(NOT_FOUND, parserFor("# not a comment\ntest").eatIdentifier()); + } + } + + @Test + public void testEatSingleQuotedString() { + assertEquals(found("test"), parserFor("'test'").eatSingleQuotedString()); + assertEquals(found("test"), parserFor(" 'test' ").eatSingleQuotedString()); + assertEquals(found("test"), parserFor("\n'test'").eatSingleQuotedString()); + assertEquals(found("test"), parserFor("\t'test'").eatSingleQuotedString()); + assertEquals(found("test test"), parserFor(" 'test test' ").eatSingleQuotedString()); + assertEquals(found("test\t"), parserFor("'test\t'").eatSingleQuotedString()); + assertEquals( + found("test"), parserFor("/* comment */'test'/*comment*/").eatSingleQuotedString()); + assertEquals(found("test"), parserFor("-- comment\n'test'--comment\n").eatSingleQuotedString()); + assertEquals( + found("test /* not a comment */"), + parserFor("'test /* not a comment */'").eatSingleQuotedString()); + + assertEquals(NOT_FOUND, parserFor("test").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("'test").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("test'").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("\"test\"").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("'test\n'").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("'\ntest'").eatSingleQuotedString()); + assertEquals(NOT_FOUND, parserFor("'te\nst'").eatSingleQuotedString()); + } + + @Test + public void testEatSingleQuotedStringAdvancesPosition() { + SimpleParser parser = parserFor("'test 1' 'test 2' "); + assertEquals(found("test 1"), parser.eatSingleQuotedString()); + assertEquals("'test 1'".length(), parser.getPos()); + assertEquals(found("test 2"), parser.eatSingleQuotedString()); + assertEquals("'test 1' 'test 2'".length(), parser.getPos()); + assertEquals(NOT_FOUND, parser.eatSingleQuotedString()); + assertEquals(parser.getSql().length(), parser.getPos()); + } + + @Test + public void testSkipHint() { + assumeTrue("Hints in PostgreSQL are comments", dialect == Dialect.GOOGLE_STANDARD_SQL); + + assertEquals("SELECT 1", skipHint("SELECT 1")); + assertEquals("SELECT 1", skipHint("@{rpc_priority=HIGH}SELECT 1")); + assertEquals("SELECT 1", skipHint("@{statement_tag='test'}SELECT 1")); + assertEquals(" \nSELECT 1", skipHint(" @{statement_tag = 'test'} \nSELECT 1")); + assertEquals( + " /* comment after */ SELECT 1", + skipHint("/* comment before */ @{statement_tag='test'} /* comment after */ SELECT 1")); + assertEquals( + " -- comment after\nSELECT 1", + skipHint("-- comment before\n @{statement_tag='test'} -- comment after\nSELECT 1")); + assertEquals( + "-- comment @{statement_tag='test'}\n -- also a comment\nSELECT 1", + skipHint("-- comment @{statement_tag='test'}\n -- also a comment\nSELECT 1")); + } + + static String skipHint(String sql) { + SimpleParser parser = new SimpleParser(Dialect.GOOGLE_STANDARD_SQL, sql); + parser.skipHint(); + return parser.getSql().substring(parser.getPos()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java new file mode 100644 index 000000000000..bf4d8655d095 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java @@ -0,0 +1,896 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_ONLY_STALENESS; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.StatementExecutor.StatementTimeout; +import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.common.base.Preconditions; +import com.google.common.io.ByteStreams; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class SingleUseTransactionTest { + private static final String VALID_QUERY = "SELECT * FROM FOO"; + private static final String INVALID_QUERY = "SELECT * FROM BAR"; + private static final String SLOW_QUERY = "SELECT * FROM SLOW_TABLE"; + private static final String VALID_UPDATE = "UPDATE FOO SET BAR=1"; + private static final String INVALID_UPDATE = "UPDATE BAR SET FOO=1"; + private static final String SLOW_UPDATE = "UPDATE SLOW_TABLE SET FOO=1"; + private static final String VALID_DDL = "CREATE TABLE FOO"; + private static final long VALID_UPDATE_COUNT = 99L; + + private final StatementExecutor executor = new StatementExecutor(); + + private enum CommitBehavior { + SUCCEED, + FAIL, + ABORT + } + + /** Creates a {@link StatementTimeout} that will never timeout. */ + static StatementTimeout nullTimeout() { + return new StatementTimeout(); + } + + /** Creates a {@link StatementTimeout} with the given duration. */ + static StatementTimeout timeout(long timeout, TimeUnit unit) { + Preconditions.checkArgument(timeout > 0L); + Preconditions.checkArgument(StatementTimeout.isValidTimeoutUnit(unit)); + StatementTimeout res = new StatementTimeout(); + res.setTimeoutValue(timeout, unit); + return res; + } + + private static class SimpleTransactionManager implements TransactionManager { + private TransactionState state; + private CommitResponse commitResponse; + private TransactionContext txContext; + private CommitBehavior commitBehavior; + + private SimpleTransactionManager(TransactionContext txContext, CommitBehavior commitBehavior) { + this.txContext = txContext; + this.commitBehavior = commitBehavior; + } + + @Override + public TransactionContext begin() { + state = TransactionState.STARTED; + return txContext; + } + + @Override + public TransactionContext begin(AbortedException exception) { + return begin(); + } + + @Override + public void commit() { + switch (commitBehavior) { + case SUCCEED: + commitResponse = new CommitResponse(Timestamp.ofTimeSecondsAndNanos(1, 1)); + state = TransactionState.COMMITTED; + break; + case FAIL: + state = TransactionState.COMMIT_FAILED; + throw SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "commit failed"); + case ABORT: + state = TransactionState.COMMIT_FAILED; + commitBehavior = CommitBehavior.SUCCEED; + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "commit aborted"); + default: + throw new IllegalStateException(); + } + } + + @Override + public void rollback() { + state = TransactionState.ROLLED_BACK; + } + + @Override + public TransactionContext resetForRetry() { + return txContext; + } + + @Override + public Timestamp getCommitTimestamp() { + return commitResponse.getCommitTimestamp(); + } + + @Override + public CommitResponse getCommitResponse() { + return commitResponse; + } + + @Override + public TransactionState getState() { + return state; + } + + @Override + public void close() { + if (state != TransactionState.COMMITTED) { + state = TransactionState.ROLLED_BACK; + } + } + } + + private static final class SimpleReadOnlyTransaction + implements com.google.cloud.spanner.ReadOnlyTransaction { + private Timestamp readTimestamp = null; + private final TimestampBound staleness; + + private SimpleReadOnlyTransaction(TimestampBound staleness) { + this.staleness = staleness; + } + + @Override + public ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public Struct readRow(String table, Key key, Iterable columns) { + return null; + } + + @Override + public Struct readRowUsingIndex(String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public ResultSet executeQuery(Statement statement, QueryOption... options) { + if (statement.equals(Statement.of(VALID_QUERY))) { + if (readTimestamp == null) { + switch (staleness.getMode()) { + case STRONG: + readTimestamp = Timestamp.now(); + break; + case READ_TIMESTAMP: + readTimestamp = staleness.getReadTimestamp(); + break; + case MIN_READ_TIMESTAMP: + readTimestamp = staleness.getMinReadTimestamp(); + break; + case EXACT_STALENESS: + Calendar cal = Calendar.getInstance(); + cal.add( + Calendar.MILLISECOND, (int) -staleness.getExactStaleness(TimeUnit.MILLISECONDS)); + readTimestamp = Timestamp.of(cal.getTime()); + break; + case MAX_STALENESS: + cal = Calendar.getInstance(); + cal.add( + Calendar.MILLISECOND, (int) -staleness.getMaxStaleness(TimeUnit.MILLISECONDS)); + readTimestamp = Timestamp.of(cal.getTime()); + break; + default: + throw new IllegalStateException(); + } + } + return mock(ResultSet.class); + } else if (statement.equals(Statement.of(SLOW_QUERY))) { + try { + Thread.sleep(10L); + } catch (InterruptedException e) { + // ignore + } + readTimestamp = Timestamp.now(); + return mock(ResultSet.class); + } else if (statement.equals(Statement.of(INVALID_QUERY))) { + throw SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "invalid query"); + } else { + throw new IllegalArgumentException(); + } + } + + @Override + public ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode queryMode) { + ResultSet rs = executeQuery(statement); + when(rs.getStats()).thenReturn(ResultSetStats.getDefaultInstance()); + return rs; + } + + @Override + public void close() {} + + @Override + public Timestamp getReadTimestamp() { + return readTimestamp; + } + + @Override + public AsyncResultSet readAsync( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public AsyncResultSet readUsingIndexAsync( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return null; + } + + @Override + public ApiFuture readRowAsync(String table, Key key, Iterable columns) { + return null; + } + + @Override + public ApiFuture readRowUsingIndexAsync( + String table, String index, Key key, Iterable columns) { + return null; + } + + @Override + public AsyncResultSet executeQueryAsync(Statement statement, QueryOption... options) { + return null; + } + } + + private DdlClient createDefaultMockDdlClient() { + try { + DdlClient ddlClient = mock(DdlClient.class); + @SuppressWarnings("unchecked") + final OperationFuture operation = + mock(OperationFuture.class); + when(operation.get()).thenReturn(null); + when(ddlClient.executeDdl(anyString(), any())).thenCallRealMethod(); + when(ddlClient.executeDdl(anyList(), any())).thenReturn(operation); + doCallRealMethod() + .when(ddlClient) + .runWithRetryForMissingDefaultSequenceKind(any(), any(), any(), any()); + return ddlClient; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private SingleUseTransaction createSubject() { + return createSubject( + createDefaultMockDdlClient(), + false, + TimestampBound.strong(), + AutocommitDmlMode.TRANSACTIONAL, + CommitBehavior.SUCCEED, + 0L, + null); + } + + private SingleUseTransaction createSubject(AutocommitDmlMode dmlMode) { + return createSubject( + createDefaultMockDdlClient(), + false, + TimestampBound.strong(), + dmlMode, + CommitBehavior.SUCCEED, + 0L, + null); + } + + private SingleUseTransaction createSubject(CommitBehavior commitBehavior) { + return createSubject( + createDefaultMockDdlClient(), + false, + TimestampBound.strong(), + AutocommitDmlMode.TRANSACTIONAL, + commitBehavior, + 0L, + null); + } + + private SingleUseTransaction createDdlSubject(DdlClient ddlClient) { + return createSubject( + ddlClient, + false, + TimestampBound.strong(), + AutocommitDmlMode.TRANSACTIONAL, + CommitBehavior.SUCCEED, + 0L, + null); + } + + private SingleUseTransaction createProtoDescriptorsSubject( + DdlClient ddlClient, byte[] protoDescriptors) { + return createSubject( + ddlClient, + false, + TimestampBound.strong(), + AutocommitDmlMode.TRANSACTIONAL, + CommitBehavior.SUCCEED, + 0L, + protoDescriptors); + } + + private SingleUseTransaction createReadOnlySubject(TimestampBound staleness) { + return createSubject( + createDefaultMockDdlClient(), + true, + staleness, + AutocommitDmlMode.TRANSACTIONAL, + CommitBehavior.SUCCEED, + 0L, + null); + } + + private SingleUseTransaction createSubject( + DdlClient ddlClient, + boolean readOnly, + TimestampBound staleness, + AutocommitDmlMode dmlMode, + final CommitBehavior commitBehavior, + long timeout, + byte[] protoDescriptors) { + DatabaseClient dbClient = mock(DatabaseClient.class); + com.google.cloud.spanner.ReadOnlyTransaction singleUse = + new SimpleReadOnlyTransaction(staleness); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + when(dbClient.singleUseReadOnlyTransaction(staleness)).thenReturn(singleUse); + + final TransactionContext txContext = mock(TransactionContext.class); + when(txContext.executeUpdate(Statement.of(VALID_UPDATE))).thenReturn(VALID_UPDATE_COUNT); + when(txContext.executeUpdate(Statement.of(VALID_UPDATE), Options.lastStatement())) + .thenReturn(VALID_UPDATE_COUNT); + when(txContext.executeUpdate(Statement.of(SLOW_UPDATE))) + .thenAnswer( + invocation -> { + Thread.sleep(1000L); + return VALID_UPDATE_COUNT; + }); + when(txContext.executeUpdate(Statement.of(INVALID_UPDATE))) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "invalid update")); + when(txContext.executeUpdate(Statement.of(INVALID_UPDATE), Options.lastStatement())) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "invalid update")); + SimpleTransactionManager txManager = new SimpleTransactionManager(txContext, commitBehavior); + when(dbClient.transactionManager()).thenReturn(txManager); + + when(dbClient.executePartitionedUpdate(Statement.of(VALID_UPDATE))) + .thenReturn(VALID_UPDATE_COUNT); + when(dbClient.executePartitionedUpdate(Statement.of(INVALID_UPDATE))) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.UNKNOWN, "invalid update")); + + ConnectionState connectionState = new ConnectionState(new HashMap<>()); + connectionState.setValue(AUTOCOMMIT_DML_MODE, dmlMode, Context.STARTUP, false); + connectionState.setValue(READONLY, readOnly, Context.STARTUP, false); + connectionState.setValue(READ_ONLY_STALENESS, staleness, Context.STARTUP, false); + + when(dbClient.readWriteTransaction()) + .thenAnswer( + new Answer() { + @Override + public TransactionRunner answer(InvocationOnMock invocation) { + return new TransactionRunner() { + private CommitResponse commitResponse; + + @Override + public T run(TransactionCallable callable) { + if (commitBehavior == CommitBehavior.SUCCEED) { + T res; + try { + res = callable.run(txContext); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + commitResponse = new CommitResponse(Timestamp.ofTimeSecondsAndNanos(1, 1)); + return res; + } else if (commitBehavior == CommitBehavior.FAIL) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.UNKNOWN, "commit failed"); + } else { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "commit aborted"); + } + } + + @Override + public Timestamp getCommitTimestamp() { + if (commitResponse == null) { + throw new IllegalStateException("no commit timestamp"); + } + return commitResponse.getCommitTimestamp(); + } + + public CommitResponse getCommitResponse() { + if (commitResponse == null) { + throw new IllegalStateException("no commit response"); + } + return commitResponse; + } + + @Override + public TransactionRunner allowNestedTransaction() { + return this; + } + }; + } + }); + + return SingleUseTransaction.newBuilder() + .setDatabaseClient(dbClient) + .setBatchClient(mock(BatchClient.class)) + .setDdlClient(ddlClient) + .setConnectionState(connectionState) + .setStatementTimeout( + timeout == 0L ? nullTimeout() : timeout(timeout, TimeUnit.MILLISECONDS)) + .withStatementExecutor(executor) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(protoDescriptors) + .build(); + } + + private ParsedStatement createParsedDdl(String sql) { + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.DDL); + when(statement.getStatement()).thenReturn(Statement.of(sql)); + when(statement.getSql()).thenReturn(sql); + return statement; + } + + private ParsedStatement createParsedQuery(String sql) { + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.QUERY); + when(statement.isQuery()).thenReturn(true); + when(statement.getStatement()).thenReturn(Statement.of(sql)); + return statement; + } + + private ParsedStatement createParsedUpdate(String sql) { + ParsedStatement statement = mock(ParsedStatement.class); + when(statement.getType()).thenReturn(StatementType.UPDATE); + when(statement.isUpdate()).thenReturn(true); + when(statement.getStatement()).thenReturn(Statement.of(sql)); + return statement; + } + + private List getTestTimestampBounds() { + return Arrays.asList( + TimestampBound.strong(), + TimestampBound.ofReadTimestamp(Timestamp.now()), + TimestampBound.ofMinReadTimestamp(Timestamp.now()), + TimestampBound.ofExactStaleness(1L, TimeUnit.SECONDS), + TimestampBound.ofMaxStaleness(100L, TimeUnit.MILLISECONDS)); + } + + @Test + public void testCommit() { + SingleUseTransaction subject = createSubject(); + try { + subject.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testRollback() { + SingleUseTransaction subject = createSubject(); + try { + subject.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testRunBatch() { + SingleUseTransaction subject = createSubject(); + try { + subject.runBatchAsync(CallType.SYNC); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testAbortBatch() { + SingleUseTransaction subject = createSubject(); + try { + subject.abortBatch(); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testExecuteDdl() { + String sql = "CREATE TABLE FOO"; + ParsedStatement ddl = createParsedDdl(sql); + DdlClient ddlClient = createDefaultMockDdlClient(); + SingleUseTransaction subject = createDdlSubject(ddlClient); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, null); + } + + @Test + public void testExecuteDdlWithProtoDescriptors() { + String sql = "CREATE TABLE FOO"; + ParsedStatement ddl = createParsedDdl(sql); + DdlClient ddlClient = createDefaultMockDdlClient(); + // verify when protoDescriptors value is null + SingleUseTransaction subject = createProtoDescriptorsSubject(ddlClient, null); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, null); + + // verify when protoDescriptors value is not null + byte[] protoDescriptors; + try { + InputStream in = + SingleUseTransactionTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + subject = createProtoDescriptorsSubject(ddlClient, protoDescriptors); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, protoDescriptors); + } + + @Test + public void testExecuteCreateDatabase() { + String sql = "CREATE DATABASE FOO"; + ParsedStatement ddl = createParsedDdl(sql); + DdlClient ddlClient = createDefaultMockDdlClient(); + when(ddlClient.executeCreateDatabase(sql, Dialect.GOOGLE_STANDARD_SQL)) + .thenReturn(mock(OperationFuture.class)); + + SingleUseTransaction singleUseTransaction = createDdlSubject(ddlClient); + get(singleUseTransaction.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeCreateDatabase(sql, Dialect.GOOGLE_STANDARD_SQL); + } + + @Test + public void testExecuteQuery() { + for (TimestampBound staleness : getTestTimestampBounds()) { + for (AnalyzeMode analyzeMode : AnalyzeMode.values()) { + SingleUseTransaction subject = createReadOnlySubject(staleness); + ResultSet rs = + get( + subject.executeQueryAsync( + CallType.SYNC, createParsedQuery(VALID_QUERY), analyzeMode)); + assertThat(rs).isNotNull(); + assertThat(subject.getReadTimestamp()).isNotNull(); + assertThat(subject.getState()) + .isEqualTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED); + while (rs.next()) { + // just loop to the end to get stats + } + if (analyzeMode == AnalyzeMode.NONE) { + assertThat(rs.getStats()).isNull(); + } else { + assertThat(rs.getStats()).isNotNull(); + } + } + } + for (TimestampBound staleness : getTestTimestampBounds()) { + SingleUseTransaction subject = createReadOnlySubject(staleness); + try { + get( + subject.executeQueryAsync( + CallType.SYNC, createParsedQuery(INVALID_QUERY), AnalyzeMode.NONE)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + } + assertThat(subject.getState()) + .isEqualTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMIT_FAILED); + } + } + + @Test + public void testExecuteQueryWithOptionsTest() { + String sql = "SELECT * FROM FOO"; + QueryOption option = Options.prefetchChunks(10000); + ParsedStatement parsedStatement = mock(ParsedStatement.class); + when(parsedStatement.getType()).thenReturn(StatementType.QUERY); + when(parsedStatement.isQuery()).thenReturn(true); + Statement statement = Statement.of(sql); + when(parsedStatement.getStatement()).thenReturn(statement); + DatabaseClient client = mock(DatabaseClient.class); + com.google.cloud.spanner.ReadOnlyTransaction tx = + mock(com.google.cloud.spanner.ReadOnlyTransaction.class); + when(tx.executeQuery(Statement.of(sql), option)).thenReturn(mock(ResultSet.class)); + when(client.singleUseReadOnlyTransaction(TimestampBound.strong())).thenReturn(tx); + + ConnectionState connectionState = new ConnectionState(new HashMap<>()); + connectionState.setValue( + AUTOCOMMIT_DML_MODE, AutocommitDmlMode.TRANSACTIONAL, Context.STARTUP, false); + connectionState.setValue(READ_ONLY_STALENESS, TimestampBound.strong(), Context.STARTUP, false); + + SingleUseTransaction transaction = + SingleUseTransaction.newBuilder() + .setDatabaseClient(client) + .setBatchClient(mock(BatchClient.class)) + .setDdlClient(mock(DdlClient.class)) + .setConnectionState(connectionState) + .withStatementExecutor(executor) + .setSpan(Span.getInvalid()) + .build(); + assertThat( + get( + transaction.executeQueryAsync( + CallType.SYNC, parsedStatement, AnalyzeMode.NONE, option))) + .isNotNull(); + } + + @Test + public void testExecuteUpdate_Transactional_Valid() { + ParsedStatement update = createParsedUpdate(VALID_UPDATE); + SingleUseTransaction subject = createSubject(); + long updateCount = get(subject.executeUpdateAsync(CallType.SYNC, update)); + assertThat(updateCount).isEqualTo(VALID_UPDATE_COUNT); + assertThat(subject.getCommitTimestamp()).isNotNull(); + assertThat(subject.getState()) + .isEqualTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED); + } + + @Test + public void testExecuteUpdate_Transactional_Invalid() { + ParsedStatement update = createParsedUpdate(INVALID_UPDATE); + SingleUseTransaction subject = createSubject(); + try { + get(subject.executeUpdateAsync(CallType.SYNC, update)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("invalid update"); + } + } + + @Test + public void testExecuteUpdate_Transactional_Valid_FailedCommit() { + ParsedStatement update = createParsedUpdate(VALID_UPDATE); + SingleUseTransaction subject = createSubject(CommitBehavior.FAIL); + try { + get(subject.executeUpdateAsync(CallType.SYNC, update)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("commit failed"); + } + } + + @Test + public void testExecuteUpdate_Partitioned_Valid() { + ParsedStatement update = createParsedUpdate(VALID_UPDATE); + SingleUseTransaction subject = createSubject(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + long updateCount = get(subject.executeUpdateAsync(CallType.SYNC, update)); + assertThat(updateCount).isEqualTo(VALID_UPDATE_COUNT); + assertThat(subject.getState()) + .isEqualTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED); + } + + @Test + public void testExecuteUpdate_Partitioned_Invalid() { + ParsedStatement update = createParsedUpdate(INVALID_UPDATE); + SingleUseTransaction subject = createSubject(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + try { + get(subject.executeUpdateAsync(CallType.SYNC, update)); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("invalid update"); + } + } + + @Test + public void testWriteIterable() { + SingleUseTransaction subject = createSubject(); + Mutation mutation = Mutation.newInsertBuilder("FOO").build(); + get(subject.writeAsync(CallType.SYNC, Arrays.asList(mutation, mutation))); + assertThat(subject.getCommitTimestamp()).isNotNull(); + assertThat(subject.getState()) + .isEqualTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED); + } + + @Test + public void testWriteIterableFail() { + SingleUseTransaction subject = createSubject(CommitBehavior.FAIL); + Mutation mutation = Mutation.newInsertBuilder("FOO").build(); + try { + get(subject.writeAsync(CallType.SYNC, Arrays.asList(mutation, mutation))); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("commit failed"); + } + } + + @Test + public void testMultiUse() { + for (TimestampBound staleness : getTestTimestampBounds()) { + SingleUseTransaction subject = createReadOnlySubject(staleness); + ResultSet rs = + get( + subject.executeQueryAsync( + CallType.SYNC, createParsedQuery(VALID_QUERY), AnalyzeMode.NONE)); + assertThat(rs).isNotNull(); + assertThat(subject.getReadTimestamp()).isNotNull(); + try { + get( + subject.executeQueryAsync( + CallType.SYNC, createParsedQuery(VALID_QUERY), AnalyzeMode.NONE)); + fail("missing expected exception"); + } catch (IllegalStateException e) { + // Expected exception + } + } + + String sql = "CREATE TABLE FOO"; + ParsedStatement ddl = createParsedDdl(sql); + DdlClient ddlClient = createDefaultMockDdlClient(); + SingleUseTransaction subject = createDdlSubject(ddlClient); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, null); + try { + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + fail("missing expected exception"); + } catch (IllegalStateException e) { + // Expected exception + } + + ParsedStatement update = createParsedUpdate(VALID_UPDATE); + subject = createSubject(); + long updateCount = get(subject.executeUpdateAsync(CallType.SYNC, update)); + assertThat(updateCount).isEqualTo(VALID_UPDATE_COUNT); + assertThat(subject.getCommitTimestamp()).isNotNull(); + try { + get(subject.executeUpdateAsync(CallType.SYNC, update)); + fail("missing expected exception"); + } catch (IllegalStateException e) { + // Expected exception + } + + subject = createSubject(); + get( + subject.writeAsync( + CallType.SYNC, Collections.singleton(Mutation.newInsertBuilder("FOO").build()))); + assertThat(subject.getCommitTimestamp()).isNotNull(); + try { + get( + subject.writeAsync( + CallType.SYNC, Collections.singleton(Mutation.newInsertBuilder("FOO").build()))); + fail("missing expected exception"); + } catch (IllegalStateException e) { + // Expected exception + } + + subject = createSubject(); + Mutation mutation = Mutation.newInsertBuilder("FOO").build(); + get(subject.writeAsync(CallType.SYNC, Arrays.asList(mutation, mutation))); + assertThat(subject.getCommitTimestamp()).isNotNull(); + try { + get(subject.writeAsync(CallType.SYNC, Arrays.asList(mutation, mutation))); + fail("missing expected exception"); + } catch (IllegalStateException e) { + // Expected exception + } + } + + @Test + public void testGetCommitResponseAfterUpdate() { + ParsedStatement update = createParsedUpdate(VALID_UPDATE); + SingleUseTransaction transaction = createSubject(); + get(transaction.executeUpdateAsync(CallType.SYNC, update)); + assertNotNull(transaction.getCommitResponse()); + assertNotNull(transaction.getCommitResponseOrNull()); + } + + @Test + public void testGetCommitResponseAfterQuery() { + ParsedStatement query = createParsedQuery(VALID_QUERY); + SingleUseTransaction transaction = createSubject(); + get(transaction.executeQueryAsync(CallType.SYNC, query, AnalyzeMode.NONE)); + try { + transaction.getCommitResponse(); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + assertNull(transaction.getCommitResponseOrNull()); + } + + @Test + public void testGetCommitResponseAfterDdl() { + ParsedStatement ddl = createParsedDdl(VALID_DDL); + SingleUseTransaction transaction = createSubject(); + get(transaction.executeDdlAsync(CallType.SYNC, ddl)); + try { + transaction.getCommitResponse(); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + assertNull(transaction.getCommitResponseOrNull()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerExceptionMatcher.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerExceptionMatcher.java new file mode 100644 index 000000000000..b12f65872686 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerExceptionMatcher.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.common.base.Preconditions; +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; + +public final class SpannerExceptionMatcher extends BaseMatcher { + private final ErrorCode errorCode; + private final String message; + + public static SpannerExceptionMatcher matchCode(ErrorCode errorCode) { + Preconditions.checkNotNull(errorCode); + return new SpannerExceptionMatcher(errorCode, null); + } + + public static SpannerExceptionMatcher matchCodeAndMessage(ErrorCode errorCode, String message) { + Preconditions.checkNotNull(errorCode); + Preconditions.checkNotNull(message); + return new SpannerExceptionMatcher(errorCode, message); + } + + private SpannerExceptionMatcher(ErrorCode errorCode, String message) { + this.errorCode = errorCode; + this.message = message; + } + + @Override + public boolean matches(Object item) { + if (item instanceof SpannerException) { + SpannerException exception = (SpannerException) item; + if (message == null) { + return exception.getErrorCode().equals(errorCode); + } + return exception.getErrorCode().equals(errorCode) + && exception.getMessage().equals(errorCode.name() + ": " + message); + } + return false; + } + + @Override + public void describeTo(Description description) { + description.appendText(SpannerException.class.getName() + " with code " + errorCode.name()); + if (message != null) { + description.appendText(" - " + SpannerException.class.getName() + " with message " + message); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java new file mode 100644 index 000000000000..b03288354b24 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java @@ -0,0 +1,774 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.auth.Credentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionImpl.LeakedConnectionException; +import com.google.cloud.spanner.connection.SpannerPool.CheckAndCloseSpannersMode; +import com.google.cloud.spanner.connection.SpannerPool.SpannerPoolKey; +import com.google.common.base.Ticker; +import com.google.common.testing.FakeTicker; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import java.io.ByteArrayOutputStream; +import java.io.OutputStream; +import java.util.concurrent.TimeUnit; +import java.util.logging.ConsoleHandler; +import java.util.logging.Handler; +import java.util.logging.Logger; +import java.util.logging.StreamHandler; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerPoolTest { + private static final String URI = + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database"; + private ConnectionImpl connection1 = mock(ConnectionImpl.class); + private ConnectionImpl connection2 = mock(ConnectionImpl.class); + private ConnectionImpl connection3 = mock(ConnectionImpl.class); + private String credentials1 = "credentials1"; + private String credentials2 = "credentials2"; + private ConnectionOptions options1 = mock(ConnectionOptions.class); + private ConnectionOptions options2 = mock(ConnectionOptions.class); + private ConnectionOptions options3 = mock(ConnectionOptions.class); + private ConnectionOptions options4 = mock(ConnectionOptions.class); + + private ConnectionOptions options5 = mock(ConnectionOptions.class); + private ConnectionOptions options6 = mock(ConnectionOptions.class); + private ConnectionOptions options7 = mock(ConnectionOptions.class); + private ConnectionOptions options8 = mock(ConnectionOptions.class); + + private ConnectionOptions optionsOpenTelemetry1 = mock(ConnectionOptions.class); + private ConnectionOptions optionsOpenTelemetry2 = mock(ConnectionOptions.class); + private ConnectionOptions optionsOpenTelemetry3 = mock(ConnectionOptions.class); + + private SpannerPool createSubjectAndMocks() { + return createSubjectAndMocks(0L, Ticker.systemTicker()); + } + + private SpannerPool createSubjectAndMocks( + long closeSpannerAfterMillisecondsUnused, Ticker ticker) { + SpannerPool pool = + new SpannerPool(closeSpannerAfterMillisecondsUnused, ticker) { + @Override + Spanner createSpanner(SpannerPoolKey key, ConnectionOptions options) { + return mock(Spanner.class); + } + }; + + OpenTelemetry openTelemetry1 = OpenTelemetrySdk.builder().build(); + OpenTelemetry openTelemetry2 = OpenTelemetrySdk.builder().build(); + + when(options1.getCredentialsUrl()).thenReturn(credentials1); + when(options1.getProjectId()).thenReturn("test-project-1"); + when(options2.getCredentialsUrl()).thenReturn(credentials2); + when(options2.getProjectId()).thenReturn("test-project-1"); + + when(options3.getCredentialsUrl()).thenReturn(credentials1); + when(options3.getProjectId()).thenReturn("test-project-2"); + when(options4.getCredentialsUrl()).thenReturn(credentials2); + when(options4.getProjectId()).thenReturn("test-project-2"); + + // ConnectionOptions with no specific credentials. + when(options5.getProjectId()).thenReturn("test-project-3"); + when(options6.getProjectId()).thenReturn("test-project-3"); + when(options7.getProjectId()).thenReturn("test-project-3"); + when(options7.isRouteToLeader()).thenReturn(true); + when(options8.getProjectId()).thenReturn("test-project-3"); + when(options8.isRouteToLeader()).thenReturn(false); + + when(optionsOpenTelemetry1.getProjectId()).thenReturn("test-project-1"); + when(optionsOpenTelemetry1.getOpenTelemetry()).thenReturn(openTelemetry1); + when(optionsOpenTelemetry2.getProjectId()).thenReturn("test-project-1"); + when(optionsOpenTelemetry2.getOpenTelemetry()).thenReturn(openTelemetry1); + when(optionsOpenTelemetry3.getProjectId()).thenReturn("test-project-1"); + when(optionsOpenTelemetry3.getOpenTelemetry()).thenReturn(openTelemetry2); + + return pool; + } + + @AfterClass + public static void closeSpannerPool() { + SpannerPool.closeSpannerPool(); + } + + @Test + public void testGetSpanner() { + SpannerPool pool = createSubjectAndMocks(); + Spanner spanner1; + Spanner spanner2; + + // assert equal + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options1, connection2); + assertEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options2, connection1); + spanner2 = pool.getSpanner(options2, connection2); + assertEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options3, connection1); + spanner2 = pool.getSpanner(options3, connection2); + assertEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options4, connection1); + spanner2 = pool.getSpanner(options4, connection2); + assertEquals(spanner1, spanner2); + // Options 5 and 6 both use default credentials. + spanner1 = pool.getSpanner(options5, connection1); + spanner2 = pool.getSpanner(options6, connection2); + assertEquals(spanner1, spanner2); + + // assert not equal + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options2, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options3, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options4, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options2, connection1); + spanner2 = pool.getSpanner(options3, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options2, connection1); + spanner2 = pool.getSpanner(options4, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options3, connection1); + spanner2 = pool.getSpanner(options4, connection2); + assertNotEquals(spanner1, spanner2); + spanner1 = pool.getSpanner(options7, connection1); + spanner2 = pool.getSpanner(options8, connection2); + assertNotEquals(spanner1, spanner2); + } + + @Test + public void testRemoveConnection() { + SpannerPool pool = createSubjectAndMocks(); + Spanner spanner1; + Spanner spanner2; + + // assert equal + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options1, connection2); + assertThat(spanner1).isEqualTo(spanner2); + // one connection removed, assert that we would still get the same Spanner + pool.removeConnection(options1, connection1); + spanner1 = pool.getSpanner(options1, connection1); + assertThat(spanner1).isEqualTo(spanner2); + // remove two connections, assert that we would still get the same Spanner, as Spanners are not + // directly closed and removed. + pool.removeConnection(options1, connection1); + pool.removeConnection(options1, connection2); + spanner1 = pool.getSpanner(options1, connection1); + assertThat(spanner1).isEqualTo(spanner2); + // remove the last connection again + pool.removeConnection(options1, connection1); + } + + private static Logger log = Logger.getLogger(SpannerPool.class.getName()); + private static OutputStream logCapturingStream; + private static StreamHandler customLogHandler; + private static boolean useParentHandlers; + + private void attachLogCapturer() { + logCapturingStream = new ByteArrayOutputStream(); + Logger currentLogger = log; + Handler[] handlers = new Handler[0]; + while (handlers.length == 0 && currentLogger != null) { + handlers = currentLogger.getHandlers(); + currentLogger = currentLogger.getParent(); + } + if (handlers.length == 0) { + handlers = new Handler[1]; + handlers[0] = new ConsoleHandler(); + } + customLogHandler = new StreamHandler(logCapturingStream, handlers[0].getFormatter()); + useParentHandlers = log.getUseParentHandlers(); + log.setUseParentHandlers(false); + log.addHandler(customLogHandler); + } + + private String getTestCapturedLog() { + customLogHandler.flush(); + return logCapturingStream.toString(); + } + + @AfterClass + public static void resetUseParentHandlers() { + if (useParentHandlers) { + log.setUseParentHandlers(true); + } + } + + @Test + public void testRemoveConnectionOptionsNotRegistered() { + attachLogCapturer(); + final String expectedLogPart = "There is no Spanner registered for ConnectionOptions"; + SpannerPool pool = createSubjectAndMocks(); + pool.getSpanner(options1, connection1); + pool.removeConnection(options2, connection1); + String capturedLog = getTestCapturedLog(); + assertThat(capturedLog.contains(expectedLogPart)).isTrue(); + } + + @Test + public void testRemoveConnectionConnectionNotRegistered() { + attachLogCapturer(); + final String expectedLogPart = "There are no connections registered for ConnectionOptions"; + SpannerPool pool = createSubjectAndMocks(); + pool.getSpanner(options1, connection1); + pool.removeConnection(options1, connection2); + String capturedLog = getTestCapturedLog(); + assertThat(capturedLog.contains(expectedLogPart)).isTrue(); + } + + @Test + public void testRemoveConnectionConnectionAlreadyRemoved() { + attachLogCapturer(); + final String expectedLogPart = "There are no connections registered for ConnectionOptions"; + SpannerPool pool = createSubjectAndMocks(); + pool.getSpanner(options1, connection1); + pool.removeConnection(options1, connection1); + pool.removeConnection(options1, connection1); + String capturedLog = getTestCapturedLog(); + assertThat(capturedLog.contains(expectedLogPart)).isTrue(); + } + + @Test + public void testCloseSpanner() { + attachLogCapturer(); + SpannerPool pool = createSubjectAndMocks(); + Spanner spanner = pool.getSpanner(options1, connection1); + // verify that closing is not possible until all connections have been removed + boolean exception = false; + try { + pool.checkAndCloseSpanners(); + } catch (SpannerException e) { + exception = e.getErrorCode() == ErrorCode.FAILED_PRECONDITION; + } + assertThat(exception).isTrue(); + + // remove the connection and verify that it is possible to close + pool.removeConnection(options1, connection1); + pool.checkAndCloseSpanners(); + verify(spanner).close(); + + final String expectedLogPart = + "WARNING: There is/are 1 connection(s) still open. Close all connections before stopping" + + " the application"; + Spanner spanner2 = pool.getSpanner(options1, connection1); + pool.checkAndCloseSpanners(CheckAndCloseSpannersMode.WARN); + String capturedLog = getTestCapturedLog(); + assertThat(capturedLog.contains(expectedLogPart)).isTrue(); + verify(spanner2, never()).close(); + + // remove the connection and verify that it is possible to close + pool.removeConnection(options1, connection1); + pool.checkAndCloseSpanners(CheckAndCloseSpannersMode.WARN); + verify(spanner2).close(); + } + + @Test + public void testLeakedConnection() { + attachLogCapturer(); + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOptions( + SessionPoolOptions.newBuilder() + .setMinSessions(0) + .setAutoDetectDialect(false) + .build()) + .setUri(URI) + .build(); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + // create an actual connection object but not in a try-with-resources block + Connection connection = + new ConnectionImpl( + options, SpannerPool.INSTANCE, ddlClient, dbClient, mock(BatchClient.class)); + // try to close the application which should fail + try { + ConnectionOptions.closeSpanner(); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + String capturedLog = getTestCapturedLog(); + assertThat(capturedLog.contains(LeakedConnectionException.class.getName())).isTrue(); + assertThat(capturedLog.contains("testLeakedConnection")).isTrue(); + // Now close the connection to avoid trouble with other test cases. + connection.close(); + } + + @Test + public void testCloseUnusedSpanners() { + SpannerPool pool = createSubjectAndMocks(); + Spanner spanner1; + Spanner spanner2; + Spanner spanner3; + + // create two connections that use the same Spanner + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options1, connection2); + assertThat(spanner1).isEqualTo(spanner2); + + // all spanners are in use, this should have no effect + pool.closeUnusedSpanners(-1L); + verify(spanner1, never()).close(); + + // close one connection. This should also have no effect. + pool.removeConnection(options1, connection1); + pool.closeUnusedSpanners(-1L); + verify(spanner1, never()).close(); + + // close the other connection as well, the Spanner object should now be closed. + pool.removeConnection(options1, connection2); + pool.closeUnusedSpanners(-1L); + verify(spanner1).close(); + + // create three connections that use two different Spanners + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options2, connection2); + spanner3 = pool.getSpanner(options2, connection3); + assertThat(spanner1).isNotEqualTo(spanner2); + assertThat(spanner2).isEqualTo(spanner3); + + // all spanners are in use, this should have no effect + pool.closeUnusedSpanners(-1L); + verify(spanner1, never()).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection1. That should also mark spanner1 as no longer in use + pool.removeConnection(options1, connection1); + pool.closeUnusedSpanners(-1L); + verify(spanner1).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection2. That should have no effect, as connection3 is still using spanner2 + pool.removeConnection(options2, connection2); + pool.closeUnusedSpanners(-1L); + verify(spanner1).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection3. Now all should be closed. + pool.removeConnection(options2, connection3); + pool.closeUnusedSpanners(-1L); + verify(spanner1).close(); + verify(spanner2).close(); + verify(spanner3).close(); + } + + private static final long TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS = 60_000L; + private static final long TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS = + TimeUnit.NANOSECONDS.convert(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); + private static final long MILLISECOND = TimeUnit.NANOSECONDS.convert(1L, TimeUnit.MILLISECONDS); + + @Test + public void testAutomaticCloser() { + FakeTicker ticker = new FakeTicker(); + SpannerPool pool = createSubjectAndMocks(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS, ticker); + Spanner spanner1; + Spanner spanner2; + Spanner spanner3; + + // create two connections that use the same Spanner + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options1, connection2); + assertThat(spanner1).isEqualTo(spanner2); + + // all spanners are in use, this should have no effect + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1, never()).close(); + + // close one connection. This should also have no effect. + pool.removeConnection(options1, connection1); + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1, never()).close(); + + // close the other connection as well, the Spanner object should now be closed. + pool.removeConnection(options1, connection2); + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1).close(); + + // create three connections that use two different Spanners + spanner1 = pool.getSpanner(options1, connection1); + spanner2 = pool.getSpanner(options2, connection2); + spanner3 = pool.getSpanner(options2, connection3); + assertThat(spanner1).isNotEqualTo(spanner2); + assertThat(spanner2).isEqualTo(spanner3); + + // all spanners are in use, this should have no effect + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1, never()).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection1. That should also mark spanner1 as no longer in use + pool.removeConnection(options1, connection1); + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection2. That should have no effect, as connection3 is still using spanner2 + pool.removeConnection(options2, connection2); + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1).close(); + verify(spanner2, never()).close(); + verify(spanner3, never()).close(); + + // close connection3. Now all should be closed. + pool.removeConnection(options2, connection3); + ticker.advance(TEST_AUTOMATIC_CLOSE_TIMEOUT_NANOS + MILLISECOND); + pool.closeUnusedSpanners(TEST_AUTOMATIC_CLOSE_TIMEOUT_MILLIS); + verify(spanner1).close(); + verify(spanner2).close(); + verify(spanner3).close(); + } + + @Test + public void testSpannerPoolKeyEquality() { + ConnectionOptions options1 = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner://localhost:9010/projects/p1/instances/i/databases/d" + + "?minSessions=200;maxSessions=400;numChannels=8;usePlainText=true;userAgent=test-agent") + .setCredentials(mock(Credentials.class)) + .build(); + // options2 equals the default session pool options, and is therefore equal to ConnectionOptions + // without any session pool configuration. + ConnectionOptions options2 = + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?minSessions=100;maxSessions=400") + .setCredentials(NoCredentials.getInstance()) + .build(); + ConnectionOptions options3 = + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build(); + // Not passing in routeToLeader in Connection URI is equivalent to passing it as true, + // as routeToLeader is true by default. + ConnectionOptions options4 = + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?routeToLeader=true") + .setCredentials(NoCredentials.getInstance()) + .build(); + ConnectionOptions options5 = + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?routeToLeader=false") + .setCredentials(NoCredentials.getInstance()) + .build(); + + SpannerPoolKey key1 = SpannerPoolKey.of(options1); + SpannerPoolKey key2 = SpannerPoolKey.of(options2); + SpannerPoolKey key3 = SpannerPoolKey.of(options3); + SpannerPoolKey key4 = SpannerPoolKey.of(options4); + SpannerPoolKey key5 = SpannerPoolKey.of(options5); + + assertNotEquals(key1, key2); + assertEquals(key2, key3); + assertNotEquals(key1, key3); + assertNotEquals(key1, new Object()); + assertEquals(key3, key4); + assertNotEquals(key4, key5); + } + + @Test + public void testEnableApiTracing() { + SpannerPoolKey keyWithoutApiTracingConfig = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingEnabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingDisabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build()); + + assertNotEquals(keyWithoutApiTracingConfig, keyWithApiTracingEnabled); + assertNotEquals(keyWithoutApiTracingConfig, keyWithApiTracingDisabled); + assertNotEquals(keyWithApiTracingEnabled, keyWithApiTracingDisabled); + + assertEquals( + keyWithApiTracingEnabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithApiTracingDisabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithoutApiTracingConfig, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build())); + } + + @Test + public void testEnableEndToEndTracing() { + SpannerPoolKey keyWithoutApiTracingConfig = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingEnabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableEndToEndTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingDisabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableEndToEndTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build()); + + assertNotEquals(keyWithoutApiTracingConfig, keyWithApiTracingEnabled); + assertEquals(keyWithoutApiTracingConfig, keyWithApiTracingDisabled); + assertNotEquals(keyWithApiTracingEnabled, keyWithApiTracingDisabled); + + assertEquals( + keyWithApiTracingEnabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableEndToEndTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithApiTracingDisabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableEndToEndTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithoutApiTracingConfig, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build())); + } + + @Test + public void testOpenTelemetry() { + SpannerPool pool = createSubjectAndMocks(); + Spanner spanner1; + Spanner spanner2; + + // assert equal + spanner1 = pool.getSpanner(optionsOpenTelemetry1, connection1); + spanner2 = pool.getSpanner(optionsOpenTelemetry2, connection2); + assertEquals(spanner1, spanner2); + + // assert not equal + spanner1 = pool.getSpanner(optionsOpenTelemetry1, connection1); + spanner2 = pool.getSpanner(optionsOpenTelemetry3, connection2); + assertNotEquals(spanner1, spanner2); + } + + @Test + public void testDynamicChannelPoolSettings() { + SpannerPoolKey keyWithoutDcp = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpEnabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpDisabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=false") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpAndMinChannels = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true;dcpMinChannels=3") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpAndMaxChannels = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true;dcpMaxChannels=15") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpAndInitialChannels = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true;dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build()); + + // DCP settings should affect the SpannerPoolKey + assertNotEquals(keyWithoutDcp, keyWithDcpEnabled); + assertNotEquals(keyWithoutDcp, keyWithDcpDisabled); + assertNotEquals(keyWithDcpEnabled, keyWithDcpDisabled); + + // Different channel settings should create different keys + assertNotEquals(keyWithDcpEnabled, keyWithDcpAndMinChannels); + assertNotEquals(keyWithDcpEnabled, keyWithDcpAndMaxChannels); + assertNotEquals(keyWithDcpEnabled, keyWithDcpAndInitialChannels); + + // Same configuration should create equal keys + assertEquals( + keyWithDcpEnabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithDcpAndMinChannels, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=true;dcpMinChannels=3") + .setCredentials(NoCredentials.getInstance()) + .build())); + } + + @Test + public void testDynamicChannelPoolWithAllSettings() { + SpannerPoolKey keyWithAllDcpSettings = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d" + + "?enableDynamicChannelPool=true;dcpMinChannels=3;dcpMaxChannels=15;dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDifferentMaxChannels = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d" + + "?enableDynamicChannelPool=true;dcpMinChannels=3;dcpMaxChannels=20;dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build()); + + assertNotEquals(keyWithAllDcpSettings, keyWithDifferentMaxChannels); + + // Same configuration should be equal + assertEquals( + keyWithAllDcpSettings, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d" + + "?enableDynamicChannelPool=true;dcpMinChannels=3;dcpMaxChannels=15;dcpInitialChannels=5") + .setCredentials(NoCredentials.getInstance()) + .build())); + } + + @Test + public void testExplicitlyDisabledDynamicChannelPool() { + SpannerPoolKey keyWithoutDcpSetting = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithDcpExplicitlyDisabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=false") + .setCredentials(NoCredentials.getInstance()) + .build()); + + // Keys should be different because one has explicit false and one has null (default) + assertNotEquals(keyWithoutDcpSetting, keyWithDcpExplicitlyDisabled); + + // Verify the explicit false setting is preserved + assertEquals( + keyWithDcpExplicitlyDisabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/p/instances/i/databases/d?enableDynamicChannelPool=false") + .setCredentials(NoCredentials.getInstance()) + .build())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerStatementParserTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerStatementParserTest.java new file mode 100644 index 000000000000..048f95ed7784 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerStatementParserTest.java @@ -0,0 +1,298 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.ErrorCode.INVALID_ARGUMENT; +import static com.google.cloud.spanner.connection.StatementParserTest.assertUnclosedLiteral; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.StatementParserTest.CommentInjector; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerStatementParserTest { + + static String skip(String sql) { + return skip(sql, 0); + } + + static String skip(String sql, int currentIndex) { + int position = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .skip(sql, currentIndex, null); + return sql.substring(currentIndex, position); + } + + @Test + public void testRemoveCommentsAndTrim() { + AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL); + + // Statements that should parse correctly + String[] validStatements = + new String[] { + "SELECT '\\\\'", // SELECT '\\' (escaped backslash, followed by quote) + "SELECT '\\''", // SELECT '\'' (escaped quote, followed by an actual closing quote) + "SELECT '\\\\\\\\'" // SELECT '\\\\' (two escaped backslashes) + }; + for (String sql : validStatements) { + assertEquals(sql, parser.removeCommentsAndTrim(sql)); + } + + // Statements that contain an unclosed literal because the final quote is + // escaped + String[] invalidStatements = + new String[] { + "SELECT '\\'" // SELECT '\' (escaped closing quote) + }; + + for (String sql : invalidStatements) { + try { + parser.removeCommentsAndTrim(sql); + fail("Expected SpannerException for unclosed literal: " + sql); + } catch (SpannerException e) { + assertEquals(INVALID_ARGUMENT, e.getErrorCode()); + } + } + } + + @Test + public void testReturningClauseWithBackslashes() { + AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL); + + // Valid returning clause, double backslash in string literal should be handled + // correctly. + String sqlWithReturning = "INSERT INTO my_table (value) VALUES ('foo \\\\ bar') THEN RETURN id"; + assertTrue(parser.parse(Statement.of(sqlWithReturning)).hasReturningClause()); + + // No returning clause, `then return` is inside a string literal with a double + // backslash. + String sqlWithoutReturning = "INSERT INTO my_table (value) VALUES ('then \\\\ return')"; + assertFalse(parser.parse(Statement.of(sqlWithoutReturning)).hasReturningClause()); + } + + @Test + public void testSkip() { + assertEquals("", skip("")); + assertEquals("1", skip("1 ")); + assertEquals("1", skip("12 ")); + assertEquals("2", skip("12 ", 1)); + assertEquals("", skip("12", 2)); + + assertEquals("'foo'", skip("'foo' ", 0)); + assertEquals("'foo'", skip("'foo''bar' ", 0)); + assertEquals("'foo'", skip("'foo' 'bar' ", 0)); + assertEquals("'bar'", skip("'foo''bar' ", 5)); + assertEquals("'foo\"bar\"'", skip("'foo\"bar\"' ", 0)); + assertEquals("\"foo'bar'\"", skip("\"foo'bar'\" ", 0)); + assertEquals("`foo'bar'`", skip("`foo'bar'` ", 0)); + assertEquals("'test\\\\'", skip("'test\\\\'", 0)); + + assertEquals("'''foo'bar'''", skip("'''foo'bar''' ", 0)); + assertEquals("'''foo\\'bar'''", skip("'''foo\\'bar''' ", 0)); + assertEquals("'''foo\\'\\'bar'''", skip("'''foo\\'\\'bar''' ", 0)); + assertEquals("'''foo\\'\\'\\'bar'''", skip("'''foo\\'\\'\\'bar''' ", 0)); + assertEquals("\"\"\"foo'bar\"\"\"", skip("\"\"\"foo'bar\"\"\"", 0)); + assertEquals("```foo'bar```", skip("```foo'bar```", 0)); + + assertEquals("-- comment\n", skip("-- comment\nselect * from foo", 0)); + assertEquals("# comment\n", skip("# comment\nselect * from foo", 0)); + assertEquals("/* comment */", skip("/* comment */ select * from foo", 0)); + assertEquals( + "/* comment /* GoogleSQL does not support nested comments */", + skip("/* comment /* GoogleSQL does not support nested comments */ select * from foo", 0)); + // GoogleSQL does not support dollar-quoted strings. + assertEquals("$", skip("$tag$not a string$tag$ select * from foo", 0)); + + assertEquals("/* 'test' */", skip("/* 'test' */ foo")); + assertEquals("-- 'test' \n", skip("-- 'test' \n foo")); + assertEquals("'/* test */'", skip("'/* test */' foo")); + + // Raw strings do not consider '\' as something that starts an escape sequence, but any + // quote character following it is still preserved within the string, as the definition of a + // raw string says that 'both characters are preserved'. + assertEquals("'foo\\''", skip("'foo\\'' ", 0)); + assertEquals("'foo\\''", skip("r'foo\\'' ", 1)); + assertEquals("'''foo\\'\\'\\'bar'''", skip("'''foo\\'\\'\\'bar''' ", 0)); + } + + @Test + public void testConvertPositionalParametersToNamedParameters() { + AbstractStatementParser parser = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL); + + for (String comment : + new String[] { + "-- test comment\n", + "/* another test comment */", + "/* comment\nwith\nmultiple\nlines\n */", + "/* comment /* with nested */ comment */" + }) { + for (CommentInjector injector : CommentInjector.values()) { + assertEquals( + injector.inject("select * %sfrom foo where name=@p1", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("select * %sfrom foo where name=?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1%s'?test?\"?test?\"?'@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'?test?\"?test?\"?'?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1'?it\\'?s'%s@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?'?it\\'?s'%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1'?it\\\"?s'%s@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?'?it\\\"?s'%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1\"?it\\\"?s\"%s@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?\"?it\\\"?s\"%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1%s'''?it\\''?s'''@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'''?it\\''?s'''?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1\"\"\"?it\\\"\"?s\"\"\"%s@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?\"\"\"?it\\\"\"?s\"\"\"%s?", comment)) + .sqlWithNamedParameters); + + // GoogleSQL does not support dollar-quoted strings, so these are all ignored. + assertEquals( + injector.inject("@p1$$@p2it$@p3s$$%s@p4", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?$$?it$?s$$%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1$tag$@p2it$$@p3s$tag$%s@p4", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?$tag$?it$$?s$tag$%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1%s$$@p2it\\'?s \t ?it\\'?s'$$@p3", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s$$?it\\'?s \t ?it\\'?s'$$?", comment)) + .sqlWithNamedParameters); + + // Note: GoogleSQL does not allowa a single-quoted string literal to contain line feeds. + assertUnclosedLiteral(parser, injector.inject("?'?it\\''?s \n ?it\\''?s'%s?", comment)); + assertEquals( + "@p1'?it\\''@p2s \n @p3it\\''@p4s@p5", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\''?s \n ?it\\''?s?") + .sqlWithNamedParameters); + assertEquals( + injector.inject("@p1%s'''?it\\''?s \n ?it\\''?s'''@p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'''?it\\''?s \n ?it\\''?s'''?", comment)) + .sqlWithNamedParameters); + + assertEquals( + injector.inject( + "select 1, @p1, 'test?test', \"test?test\", %sfoo.* from `foo` where col1=@p2 and" + + " col2='test' and col3=@p3 and col4='?' and col5=\"?\" and col6='?''?''?'", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select 1, ?, 'test?test', \"test?test\", %sfoo.* from `foo` where col1=?" + + " and col2='test' and col3=? and col4='?' and col5=\"?\" and" + + " col6='?''?''?'", + comment)) + .sqlWithNamedParameters); + + assertEquals( + injector.inject( + "select * " + + "%sfrom foo " + + "where name=@p1 " + + "and col2 like @p2 " + + "and col3 > @p3", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + + "%sfrom foo " + + "where name=? " + + "and col2 like ? " + + "and col3 > ?", + comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("select * " + "from foo " + "where id between @p1%s and @p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + "from foo " + "where id between ?%s and ?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("select * " + "from foo " + "limit @p1 %s offset @p2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject("select * " + "from foo " + "limit ? %s offset ?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject( + "select * " + + "from foo " + + "where col1=@p1 " + + "and col2 like @p2 " + + " %s " + + "and col3 > @p3 " + + "and col4 < @p4 " + + "and col5 != @p5 " + + "and col6 not in (@p6, @p7, @p8) " + + "and col7 in (@p9, @p10, @p11) " + + "and col8 between @p12 and @p13", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + + "from foo " + + "where col1=? " + + "and col2 like ? " + + " %s " + + "and col3 > ? " + + "and col4 < ? " + + "and col5 != ? " + + "and col6 not in (?, ?, ?) " + + "and col7 in (?, ?, ?) " + + "and col8 between ? and ?", + comment)) + .sqlWithNamedParameters); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlScriptVerifier.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlScriptVerifier.java new file mode 100644 index 000000000000..8e88d4a14200 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlScriptVerifier.java @@ -0,0 +1,189 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.StatementResult.ResultType; + +/** + * SQL script verifier implementation for Spanner {@link + * com.google.cloud.spanner.connection.Connection} + * + * @see AbstractSqlScriptVerifier for more information + */ +public class SqlScriptVerifier extends AbstractSqlScriptVerifier { + + static class ConnectionGenericStatementResult extends GenericStatementResult { + private final StatementResult result; + + private ConnectionGenericStatementResult(StatementResult result) { + this.result = result; + } + + @Override + protected ResultType getResultType() { + return result.getResultType(); + } + + @Override + protected GenericResultSet getResultSet() { + return new ConnectionGenericResultSet(result.getResultSet()); + } + + @Override + protected long getUpdateCount() { + return result.getUpdateCount(); + } + } + + static class ConnectionGenericResultSet extends GenericResultSet { + private final ResultSet resultSet; + + private ConnectionGenericResultSet(ResultSet resultSet) { + this.resultSet = resultSet; + } + + @Override + protected boolean next() { + return resultSet.next(); + } + + @Override + protected Object getValue(String col) { + if (resultSet.isNull(col)) { + return null; + } + Type type = resultSet.getColumnType(col); + switch (type.getCode()) { + case ARRAY: + return getArrayValue(resultSet, col, type.getArrayElementType()); + case BOOL: + return resultSet.getBoolean(col); + case BYTES: + return resultSet.getBytes(col); + case DATE: + return resultSet.getDate(col); + case FLOAT64: + return resultSet.getDouble(col); + case INT64: + return resultSet.getLong(col); + case STRING: + return resultSet.getString(col); + case TIMESTAMP: + return resultSet.getTimestamp(col); + case STRUCT: + throw new IllegalArgumentException("type struct not supported"); + } + throw new IllegalArgumentException("unknown type: " + type); + } + + private Object getArrayValue(ResultSet rs, String col, Type type) { + switch (type.getCode()) { + case BOOL: + return rs.getBooleanList(col); + case BYTES: + return rs.getBytesList(col); + case DATE: + return rs.getDateList(col); + case FLOAT64: + return rs.getDoubleList(col); + case INT64: + return rs.getLongList(col); + case STRING: + return rs.getStringList(col); + case STRUCT: + return rs.getStructList(col); + case TIMESTAMP: + return rs.getTimestampList(col); + case ARRAY: + throw new IllegalArgumentException("array of array not supported"); + } + throw new IllegalArgumentException("unknown type: " + type); + } + + @Override + protected int getColumnCount() { + return resultSet.getColumnCount(); + } + + @Override + protected Object getFirstValue() { + return getValue(resultSet.getType().getStructFields().get(0).getName()); + } + } + + public static class SpannerGenericConnection extends GenericConnection { + private final Connection connection; + + public static SpannerGenericConnection of(Connection connection) { + return new SpannerGenericConnection(connection); + } + + private SpannerGenericConnection(Connection connection) { + this.connection = connection; + } + + @Override + protected GenericStatementResult execute(String sql) { + return new ConnectionGenericStatementResult(connection.execute(Statement.of(sql))); + } + + @Override + public void close() { + if (this.connection != null) { + this.connection.close(); + } + } + + @Override + public Dialect getDialect() { + return connection.getDialect(); + } + } + + public SqlScriptVerifier() { + this(null); + } + + public SqlScriptVerifier(GenericConnectionProvider provider) { + super(provider); + } + + @Override + protected void verifyExpectedException( + String statement, Exception e, String code, String messagePrefix) { + assertTrue(e instanceof SpannerException); + SpannerException spannerException = (SpannerException) e; + assertEquals( + statement + " resulted in " + spannerException, + ErrorCode.valueOf(code), + spannerException.getErrorCode()); + if (messagePrefix != null) { + assertTrue( + statement, + e.getMessage().startsWith(messagePrefix.substring(1, messagePrefix.length() - 1))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlTestScriptsGenerator.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlTestScriptsGenerator.java new file mode 100644 index 000000000000..654ffb9dc15e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SqlTestScriptsGenerator.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; + +/** + * Class that runs all generators of SQL test scripts for the Connection API. + * + *

    Run this generator if new client side statements have been added, or if any existing client + * side statements have been modified. The generator can be executed from the command line in the + * google-cloud-spanner directory like this: + * mvn -Ddo_log_statements=true exec:java -Dexec.mainClass=com.google.cloud.spanner.connection.SqlTestScriptsGenerator -Dexec.classpathScope="test" + * + */ +public class SqlTestScriptsGenerator { + + /** Main method for generating the test script */ + public static void main(String[] args) throws Exception { + for (Dialect dialect : Dialect.values()) { + ClientSideStatementsTest.generateTestScript(dialect); + } + ConnectionImplGeneratedSqlScriptTest.generateTestScript(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementHintParserTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementHintParserTest.java new file mode 100644 index 000000000000..d1f276849f01 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementHintParserTest.java @@ -0,0 +1,210 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.StatementHintParser.NO_HINTS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.common.collect.ImmutableMap; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class StatementHintParserTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + StatementHintParser parserFor(String sql) { + return new StatementHintParser(dialect, sql); + } + + String getStartHint() { + return dialect == Dialect.POSTGRESQL ? "/*@" : "@{"; + } + + String getEndHint() { + return dialect == Dialect.POSTGRESQL ? "*/" : "}"; + } + + String encloseInHint(String sql) { + return getStartHint() + sql + getEndHint(); + } + + @Test + public void testNoHints() { + assertFalse(parserFor("select foo from bar").hasStatementHints()); + assertFalse(parserFor("/* comment */ select foo from bar").hasStatementHints()); + assertFalse(parserFor("select foo from bar").hasStatementHints()); + assertFalse(parserFor("select foo from bar").hasStatementHints()); + } + + @Test + public void testExtractHints() { + StatementHintParser parser; + + parser = parserFor(encloseInHint("statement_tag=tag1") + " select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals(ImmutableMap.of("statement_tag", "tag1"), parser.getClientSideStatementHints()); + assertEquals(" select 1", parser.getSqlWithoutClientSideHints()); + + parser = parserFor(encloseInHint("statement_tag=tag1, other_hint=value") + " select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals(ImmutableMap.of("statement_tag", "tag1"), parser.getClientSideStatementHints()); + assertEquals( + encloseInHint(" other_hint=value") + " select 1", parser.getSqlWithoutClientSideHints()); + + parser = parserFor(encloseInHint("other_hint=value") + " select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals(NO_HINTS, parser.getClientSideStatementHints()); + assertEquals( + encloseInHint("other_hint=value") + " select 1", parser.getSqlWithoutClientSideHints()); + + parser = parserFor(encloseInHint("statement_tag=tag1, rpc_priority=high") + " select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("statement_tag", "tag1", "rpc_priority", "high"), + parser.getClientSideStatementHints()); + assertEquals(" select 1", parser.getSqlWithoutClientSideHints()); + + parser = parserFor(encloseInHint("rpc_priority=medium, statement_tag='value 2'") + " select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("rpc_priority", "medium", "statement_tag", "value 2"), + parser.getClientSideStatementHints()); + assertEquals(" select 1", parser.getSqlWithoutClientSideHints()); + + parser = + parserFor( + "/* comment */ " + + encloseInHint( + "/*comment*/statement_tag--comment\n" + + "=--comment\nvalue1\n,rpc_priority=Low/*comment*/") + + " /* yet another comment */ select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("statement_tag", "value1", "rpc_priority", "Low"), + parser.getClientSideStatementHints()); + assertEquals(" /* yet another comment */ select 1", parser.getSqlWithoutClientSideHints()); + + parser = + parserFor( + "/* comment */ " + + encloseInHint( + "/*comment*/statement_tag--comment\n" + + "=--comment\nvalue1\n," + + "/* other hint comment */ other_hint='some value',\n" + + "rpc_priority=Low/*comment*/") + + " /* yet another comment */ select 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("statement_tag", "value1", "rpc_priority", "Low"), + parser.getClientSideStatementHints()); + assertEquals( + "/* comment */ " + + encloseInHint( + "/*comment*//* other hint comment */ other_hint='some value',\n" + "/*comment*/") + + " /* yet another comment */ select 1", + parser.getSqlWithoutClientSideHints()); + + parser = + parserFor( + encloseInHint( + "statement_tag=tag1,\n" + + "other_hint1='some value',\n" + + "rpc_priority=low,\n" + + "other_hint2=value") + + "\nselect 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("statement_tag", "tag1", "rpc_priority", "low"), + parser.getClientSideStatementHints()); + assertEquals( + encloseInHint("\nother_hint1='some value',\n" + "\n" + "other_hint2=value") + "\nselect 1", + parser.getSqlWithoutClientSideHints()); + + parser = + parserFor( + encloseInHint( + "hint1=value1,\n" + + "other_hint1='some value',\n" + + "rpc_priority=low,\n" + + "other_hint2=value") + + "\nselect 1"); + assertTrue(parser.hasStatementHints()); + assertEquals(ImmutableMap.of("rpc_priority", "low"), parser.getClientSideStatementHints()); + assertEquals( + encloseInHint( + "hint1=value1,\n" + "other_hint1='some value',\n" + "\n" + "other_hint2=value") + + "\nselect 1", + parser.getSqlWithoutClientSideHints()); + + parser = + parserFor( + encloseInHint( + "hint1=value1,\n" + + "hint2=value2,\n" + + "rpc_priority=low,\n" + + "statement_tag=tag") + + "\nselect 1"); + assertTrue(parser.hasStatementHints()); + assertEquals( + ImmutableMap.of("rpc_priority", "low", "statement_tag", "tag"), + parser.getClientSideStatementHints()); + assertEquals( + encloseInHint("hint1=value1,\nhint2=value2,\n\n") + "\nselect 1", + parser.getSqlWithoutClientSideHints()); + } + + @Test + public void testExtractInvalidHints() { + assertInvalidHints("@{statement_tag=value value}"); + assertInvalidHints("@statement_tag=value"); + assertInvalidHints("{statement_tag=value}"); + assertInvalidHints("@{statement_tag=value"); + assertInvalidHints("@{statement_tag=value,"); + assertInvalidHints("@{statement_tag=value,}"); + assertInvalidHints("@statement_tag=value}"); + assertInvalidHints("@{statement_tag=}"); + assertInvalidHints("@{=value}"); + assertInvalidHints("@{}"); + assertInvalidHints("@{statement_tag=value,}"); + assertInvalidHints("@{statement_tag=value1,hint2=value2,}"); + assertInvalidHints("@{@statement_tag=value1}"); + assertInvalidHints("@{statement_tag=@value1}"); + assertInvalidHints("@{statement_tag value1}"); + assertInvalidHints("@{statement_tag='value1}"); + assertInvalidHints("@{statement_tag=value1'}"); + } + + private void assertInvalidHints(String sql) { + StatementHintParser parser = parserFor(sql); + assertEquals(NO_HINTS, parser.getClientSideStatementHints()); + assertSame(sql, parser.getSqlWithoutClientSideHints()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserBenchmark.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserBenchmark.java new file mode 100644 index 000000000000..e028f8027cf2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserBenchmark.java @@ -0,0 +1,81 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Warmup; + +@Fork(value = 1, warmups = 0) +@Warmup(iterations = 1, time = 5) +@Measurement(iterations = 5, time = 5) +public class StatementParserBenchmark { + private static final Dialect dialect = Dialect.POSTGRESQL; + private static final AbstractStatementParser PARSER = + AbstractStatementParser.getInstance(dialect); + + private static final String LONG_QUERY_TEXT = + generateLongStatement("SELECT * FROM foo WHERE 1", 100 * 1024); // 100kb + + private static final String LONG_DML_TEXT = + generateLongStatement("update foo set bar=1 WHERE 1", 100 * 1024); // 100kb + + /** Generates a long SQL-looking string. */ + private static String generateLongStatement(String prefix, int length) { + StringBuilder sb = new StringBuilder(length + 50); + sb.append(prefix); + while (sb.length() < length) { + sb.append(" OR abcdefghijklmnopqrstuvwxyz='abcdefghijklmnopqrstuvwxyz'"); + } + return sb.toString(); + } + + @Benchmark + public ParsedStatement isQueryTest() { + return PARSER.internalParse( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)"), + QueryOptions.getDefaultInstance()); + } + + @Benchmark + public ParsedStatement longQueryTest() { + return PARSER.internalParse(Statement.of(LONG_QUERY_TEXT), QueryOptions.getDefaultInstance()); + } + + @Benchmark + public ParsedStatement longDmlTest() { + return PARSER.internalParse(Statement.of(LONG_DML_TEXT), QueryOptions.getDefaultInstance()); + } + + public static void main(String[] args) throws Exception { + for (int i = 0; i < 100000; i++) { + if (PARSER.internalParse(Statement.of(LONG_QUERY_TEXT), QueryOptions.getDefaultInstance()) + == null) { + throw new AssertionError(); + } + if (PARSER.internalParse(Statement.of(LONG_DML_TEXT), QueryOptions.getDefaultInstance()) + == null) { + throw new AssertionError(); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserTest.java new file mode 100644 index 000000000000..300517faaf0c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementParserTest.java @@ -0,0 +1,1980 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.common.cache.CacheStats; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.truth.Truth; +import java.io.File; +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.List; +import java.util.Scanner; +import java.util.Set; +import java.util.UUID; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class StatementParserTest { + public static final String COPYRIGHT_PATTERN = + "\\/\\*\n" + + " \\* Copyright \\d{4} Google LLC\n" + + " \\*\n" + + " \\* Licensed under the Apache License, Version 2.0 \\(the \"License\"\\);\n" + + " \\* you may not use this file except in compliance with the License.\n" + + " \\* You may obtain a copy of the License at\n" + + " \\*\n" + + " \\* http://www.apache.org/licenses/LICENSE-2.0\n" + + " \\*\n" + + " \\* Unless required by applicable law or agreed to in writing, software\n" + + " \\* distributed under the License is distributed on an \"AS IS\" BASIS,\n" + + " \\* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + + " \\* See the License for the specific language governing permissions and\n" + + " \\* limitations under the License.\n" + + " \\*\\/\n"; + private static final Pattern EXPECT_PATTERN = Pattern.compile("(?is)\\s*(?:@EXPECT)\\s+'(.*)'"); + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private AbstractStatementParser parser; + + @BeforeClass + public static void enableStatementCacheStats() { + AbstractStatementParser.resetParsers(); + System.setProperty("spanner.record_statement_cache_stats", "true"); + } + + @AfterClass + public static void disableStatementCacheStats() { + System.clearProperty("spanner.record_statement_cache_stats"); + } + + @Before + public void setupParser() { + parser = AbstractStatementParser.getInstance(dialect); + } + + private static final ImmutableMap COMMENTS_SCRIPTS = + ImmutableMap.of( + Dialect.GOOGLE_STANDARD_SQL, + "CommentsTest.sql", + Dialect.POSTGRESQL, + "postgresql/CommentsTest.sql"); + + @Test + public void testRemoveCommentsInScript() { + List statements = readStatementsFromFile(COMMENTS_SCRIPTS.get(dialect)); + String currentlyExpected = ""; + for (String statement : statements) { + String sql = statement.trim(); + if (sql.startsWith("@EXPECT")) { + Matcher matcher = EXPECT_PATTERN.matcher(sql); + if (matcher.matches()) { + currentlyExpected = matcher.group(1); + } else { + throw new IllegalArgumentException("Unknown @EXPECT statement: " + sql); + } + } else { + assertThat(parser.removeCommentsAndTrim(statement)).isEqualTo(currentlyExpected); + } + } + } + + @Test + public void testRemoveComments() { + assertThat(parser.removeCommentsAndTrim("")).isEqualTo(""); + assertThat(parser.removeCommentsAndTrim("SELECT * FROM FOO")).isEqualTo("SELECT * FROM FOO"); + assertThat(parser.removeCommentsAndTrim("-- This is a one line comment\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/* This is a simple multi line comment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim("/* This is a \nmulti line comment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/* This\nis\na\nmulti\nline\ncomment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + + assertEquals( + "SELECT \"FOO\" FROM \"BAR\" WHERE name='test'", + parser.removeCommentsAndTrim( + "-- Single line comment\nSELECT \"FOO\" FROM \"BAR\" WHERE name='test'")); + assertEquals( + "SELECT \"FOO\" FROM \"BAR\" WHERE name='test' and id=1", + parser.removeCommentsAndTrim( + "/* Multi\n" + + "line\n" + + "comment\n" + + "*/SELECT \"FOO\" FROM \"BAR\" WHERE name='test' and id=1")); + + if (dialect == Dialect.POSTGRESQL) { + // PostgreSQL allows string literals and quoted identifiers to contain newline characters. + assertEquals( + "SELECT \"FOO\nBAR\" FROM \"BAR\" WHERE name='test\ntest'", + parser.removeCommentsAndTrim( + "-- Single line comment\nSELECT \"FOO\nBAR\" FROM \"BAR\" WHERE name='test\ntest'")); + assertEquals( + "SELECT \"FOO\nBAR\" FROM \"BAR\" WHERE name='test\ntest' and id=1", + parser.removeCommentsAndTrim( + "/* Multi\n" + + "line\n" + + "comment\n" + + "*/SELECT \"FOO\nBAR\" FROM \"BAR\" WHERE name='test\ntest' and id=1")); + assertEquals( + "SELECT 1", + parser.removeCommentsAndTrim( + "/* This block comment surrounds a query which itself has a block comment...\n" + + "SELECT /* embedded single line */ 'embedded' AS x2;\n" + + "*/\n" + + "SELECT 1")); + } + } + + @Test + public void testGoogleStandardSQLRemoveCommentsGsql() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + assertThat(parser.removeCommentsAndTrim("/*GSQL*/")).isEqualTo(""); + assertThat(parser.removeCommentsAndTrim("/*GSQL*/SELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/*GSQL*/-- This is a one line comment\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/*GSQL*//* This is a simple multi line comment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/*GSQL*//* This is a \nmulti line comment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + assertThat( + parser.removeCommentsAndTrim( + "/*GSQL*//* This\nis\na\nmulti\nline\ncomment */\nSELECT * FROM FOO")) + .isEqualTo("SELECT * FROM FOO"); + } + + @Test + public void testStatementWithCommentContainingSlash() { + String sql = + "/*\n" + + " * Script for testing invalid/unrecognized statements\n" + + " */\n" + + "\n" + + "-- MERGE into test comment MERGE -- \n" + + "@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement'\n" + + "MERGE INTO Singers s\n" + + "/*** test ****/" + + "USING (VALUES (1, 'John', 'Doe')) v\n" + + "ON v.column1 = s.SingerId\n" + + "WHEN NOT MATCHED \n" + + " INSERT VALUES (v.column1, v.column2, v.column3)\n" + + "WHEN MATCHED\n" + + " UPDATE SET FirstName = v.column2,\n" + + " LastName = v.column3;"; + String sqlWithoutComments = + "@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement'\n" + + "MERGE INTO Singers s\n" + + "USING (VALUES (1, 'John', 'Doe')) v\n" + + "ON v.column1 = s.SingerId\n" + + "WHEN NOT MATCHED \n" + + " INSERT VALUES (v.column1, v.column2, v.column3)\n" + + "WHEN MATCHED\n" + + " UPDATE SET FirstName = v.column2,\n" + + " LastName = v.column3"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertThat(statement.getSqlWithoutComments()).isEqualTo(sqlWithoutComments); + } + + @Test + public void testStatementWithCommentContainingSlashAndNoAsteriskOnNewLine() { + String sql = + "/*\n" + + " * Script for testing invalid/unrecognized statements\n" + + " foo bar baz" + + " */\n" + + "\n" + + "-- MERGE INTO test comment MERGE\n" + + "@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement'\n" + + "MERGE INTO Singers s\n" + + "USING (VALUES (1, 'John', 'Doe')) v\n" + + "ON v.column1 = s.SingerId\n" + + "-- test again --\n" + + "WHEN NOT MATCHED \n" + + " INSERT VALUES (v.column1, v.column2, v.column3)\n" + + "WHEN MATCHED\n" + + " UPDATE SET FirstName = v.column2,\n" + + " LastName = v.column3;"; + String sqlWithoutComments = + "@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement'\n" + + "MERGE INTO Singers s\n" + + "USING (VALUES (1, 'John', 'Doe')) v\n" + + "ON v.column1 = s.SingerId\n" + + "\nWHEN NOT MATCHED \n" + + " INSERT VALUES (v.column1, v.column2, v.column3)\n" + + "WHEN MATCHED\n" + + " UPDATE SET FirstName = v.column2,\n" + + " LastName = v.column3"; + ParsedStatement statement = parser.parse(Statement.of(sql)); + assertThat(statement.getSqlWithoutComments()).isEqualTo(sqlWithoutComments); + } + + @Test + public void testPostgresSQLDialectDollarQuoted() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + assertThat(parser.removeCommentsAndTrim("$$foo$$")).isEqualTo("$$foo$$"); + assertThat(parser.removeCommentsAndTrim("$$--foo$$")).isEqualTo("$$--foo$$"); + assertThat(parser.removeCommentsAndTrim("$$\nline 1\n--line2$$")) + .isEqualTo("$$\nline 1\n--line2$$"); + assertThat(parser.removeCommentsAndTrim("$bar$--foo$bar$")).isEqualTo("$bar$--foo$bar$"); + assertThat( + parser.removeCommentsAndTrim( + "$bar$\nThis is a valid string\n -- That could contain special characters$bar$")) + .isEqualTo("$bar$\nThis is a valid string\n -- That could contain special characters$bar$"); + + assertThat(parser.removeCommentsAndTrim("SELECT FOO$BAR FROM SOME_TABLE")) + .isEqualTo("SELECT FOO$BAR FROM SOME_TABLE"); + assertThat(parser.removeCommentsAndTrim("SELECT FOO$BAR -- This is a comment\nFROM SOME_TABLE")) + .isEqualTo("SELECT FOO$BAR \nFROM SOME_TABLE"); + assertThat( + parser.removeCommentsAndTrim("SELECT FOO, $BAR -- This is a comment\nFROM SOME_TABLE")) + .isEqualTo("SELECT FOO, $BAR \nFROM SOME_TABLE"); + } + + @Test + public void testPostgreSQLDialectUnicodeEscapedIdentifiers() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + assertEquals( + "SELECT 'tricky' AS \"\\\"", parser.removeCommentsAndTrim("SELECT 'tricky' AS \"\\\"")); + assertEquals( + "SELECT 'tricky' AS U&\"\\\" UESCAPE '!'", + parser.removeCommentsAndTrim("SELECT 'tricky' AS U&\"\\\" UESCAPE '!'")); + assertEquals( + "SELECT '\\' AS \"tricky\"", parser.removeCommentsAndTrim("SELECT '\\' AS \"tricky\"")); + assertEquals("SELECT 'foo''bar'", parser.removeCommentsAndTrim("SELECT 'foo''bar'")); + assertEquals("SELECT 'foo\"bar'", parser.removeCommentsAndTrim("SELECT 'foo\"bar'")); + assertEquals("SELECT 'foo\"\"bar'", parser.removeCommentsAndTrim("SELECT 'foo\"\"bar'")); + assertEquals( + "SELECT 'foo'", parser.removeCommentsAndTrim("SELECT /* This is a 'comment' */ 'foo'")); + assertEquals( + "SELECT 'foo'", + parser.removeCommentsAndTrim("SELECT /* This is a '''comment''' */ 'foo'")); + assertEquals( + "SELECT '''foo''' FROM bar", + parser.removeCommentsAndTrim("SELECT /* This is a '''comment''' */ '''foo''' FROM bar")); + assertEquals( + "SELECT '''foo''' FROM \"\"\"\\bar\\\"\"\"", + parser.removeCommentsAndTrim( + "SELECT /* This is a '''comment''' */ '''foo''' FROM \"\"\"\\bar\\\"\"\"")); + } + + @Test + public void testPostgreSQLDialectSupportsEmbeddedComments() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + final String sql = + "/* This is a comment /* This is an embedded comment */ This is after the embedded comment" + + " */ SELECT 1"; + assertEquals("SELECT 1", parser.removeCommentsAndTrim(sql)); + } + + @Test + public void testGoogleStandardSQLDialectDoesNotSupportEmbeddedComments() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + final String sql = + "/* This is a comment /* This is an embedded comment */ This is after the embedded comment" + + " */ SELECT 1"; + assertEquals( + "This is after the embedded comment */ SELECT 1", parser.removeCommentsAndTrim(sql)); + } + + @Test + public void testPostgreSQLDialectUnterminatedComment() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + final String sql = + "/* This is a comment /* This is still a comment */ this is unterminated SELECT 1"; + try { + // Cloud Spanner would see this as a valid comment, while PostgreSQL + // requires 'embedded' comments to be properly terminated. + parser.removeCommentsAndTrim(sql); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertTrue( + "Message should contain 'unterminated block comment'", + e.getMessage().contains("unterminated block comment")); + } + } + + @Test + public void testGoogleStandardSqlDialectDialectUnterminatedComment() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + final String sql = + "/* This is a comment /* This is still a comment */ this is unterminated SELECT 1"; + assertEquals("this is unterminated SELECT 1", parser.removeCommentsAndTrim(sql)); + } + + @Test + public void testShowStatements() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + assertThat(parser.parse(Statement.of("show variable autocommit bar")).getType()) + .isEqualTo(StatementType.QUERY); + assertThat(parser.parse(Statement.of("show variable autocommit")).getType()) + .isEqualTo(StatementType.CLIENT_SIDE); + if (dialect == Dialect.POSTGRESQL) { + assertThat(parser.parse(Statement.of("show autocommit")).getType()) + .isEqualTo(StatementType.CLIENT_SIDE); + assertThat( + parser.parse(Statement.of("show variable spanner.retry_aborts_internally")).getType()) + .isEqualTo(StatementType.CLIENT_SIDE); + } else { + assertThat(parser.parse(Statement.of("show autocommit")).getType()) + .isEqualTo(StatementType.QUERY); + assertThat(parser.parse(Statement.of("show variable retry_aborts_internally")).getType()) + .isEqualTo(StatementType.CLIENT_SIDE); + } + + assertThat(parser.parse(Statement.of("show variable retry_aborts_internally bar")).getType()) + .isEqualTo(StatementType.QUERY); + } + + @Test + public void testGoogleStandardSQLDialectStatementWithHashTagSingleLineComment() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + // Supports # based comments + assertThat( + parser + .parse(Statement.of("# this is a comment\nselect * from foo")) + .getSqlWithoutComments()) + .isEqualTo("select * from foo"); + assertThat( + parser + .parse(Statement.of("select * from foo\n#this is a comment")) + .getSqlWithoutComments()) + .isEqualTo("select * from foo"); + assertThat( + parser + .parse(Statement.of("select *\nfrom foo # this is a comment\nwhere bar=1")) + .getSqlWithoutComments()) + .isEqualTo("select *\nfrom foo \nwhere bar=1"); + } + + @Test + public void testPostgreSQLDialectStatementWithHashTagSingleLineComment() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + // Does not support # based comments + assertThat( + parser + .parse(Statement.of("# this is a comment\nselect * from foo")) + .getSqlWithoutComments()) + .isEqualTo("# this is a comment\nselect * from foo"); + assertThat( + parser + .parse(Statement.of("select * from foo\n#this is a comment")) + .getSqlWithoutComments()) + .isEqualTo("select * from foo\n#this is a comment"); + assertThat( + parser + .parse(Statement.of("select *\nfrom foo # this is a comment\nwhere bar=1")) + .getSqlWithoutComments()) + .isEqualTo("select *\nfrom foo # this is a comment\nwhere bar=1"); + } + + @Test + public void testIsDdlStatement() { + assertThat(parser.isDdlStatement("")).isFalse(); + assertThat(parser.isDdlStatement("random text")).isFalse(); + assertThat(parser.isDdlStatement("CREATETABLE")).isFalse(); + assertThat(parser.isDdlStatement("CCREATE TABLE")).isFalse(); + assertThat(parser.isDdlStatement("SELECT 1")).isFalse(); + assertThat(parser.isDdlStatement("SELECT FOO FROM BAR")).isFalse(); + assertThat(parser.isDdlStatement("INSERT INTO FOO (ID, NAME) VALUES (1, 'NAME')")).isFalse(); + assertThat(parser.isDdlStatement("UPDATE FOO SET NAME='NAME' WHERE ID=1")).isFalse(); + assertThat(parser.isDdlStatement("DELETE FROM FOO")).isFalse(); + + assertThat( + parser.isDdlStatement("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isTrue(); + assertThat(parser.isDdlStatement("alter table foo add Description string(100)")).isTrue(); + assertThat(parser.isDdlStatement("drop table foo")).isTrue(); + assertThat(parser.isDdlStatement("Create index BAR on foo (name)")).isTrue(); + + assertThat( + parser + .parse( + Statement.of( + "\t\tCREATE\n" + + "\t TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "\n\n\nCREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "-- this is a comment\n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "/* multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "/** java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "-- SELECT in a single line comment \n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "/* SELECT in a multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + assertThat( + parser + .parse( + Statement.of( + "/** SELECT in a java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")) + .isDdl()) + .isTrue(); + + assertTrue( + parser + .parse( + Statement.of( + "CREATE VIEW SingerNames\n" + + "SQL SECURITY INVOKER\n" + + "AS SELECT SingerId as SingerId,\n" + + " CONCAT(Singers.FirstName, Singers.LastName) as Name\n" + + " FROM Singers")) + .isDdl()); + assertTrue( + parser + .parse(Statement.of("create view SingerNames as select FullName from Singers")) + .isDdl()); + assertTrue( + parser + .parse( + Statement.of( + "/* this is a comment */ create view SingerNames as select FullName from" + + " Singers")) + .isDdl()); + assertTrue( + parser + .parse( + Statement.of( + "create /* this is a comment */ view SingerNames as select FullName from" + + " Singers")) + .isDdl()); + assertTrue( + parser + .parse( + Statement.of( + "create \n" + + " -- This is a comment \n" + + " view SingerNames as select FullName from Singers")) + .isDdl()); + assertTrue( + parser + .parse( + Statement.of( + " \t \n" + + " create \n" + + " \t view \n" + + " \t SingerNames as select FullName from Singers")) + .isDdl()); + assertTrue(parser.parse(Statement.of("DROP VIEW SingerNames")).isDdl()); + assertTrue( + parser + .parse( + Statement.of( + "ALTER VIEW SingerNames\n" + + "AS SELECT SingerId as SingerId,\n" + + " CONCAT(Singers.FirstName, Singers.LastName) as Name\n" + + " FROM Singers")) + .isDdl()); + + assertTrue(parser.parse(Statement.of("analyze")).isDdl()); + assertTrue(parser.parse(Statement.of("Analyze")).isDdl()); + assertTrue(parser.parse(Statement.of("ANALYZE")).isDdl()); + assertTrue(parser.parse(Statement.of("\t ANALYZE\n ")).isDdl()); + assertTrue(parser.parse(Statement.of("/* This is a comment */ ANALYZE ")).isDdl()); + assertTrue(parser.parse(Statement.of("-- comment\n ANALYZE ")).isDdl()); + assertTrue(parser.parse(Statement.of("RENAME TABLE foo TO foo2")).isDdl()); + assertTrue( + parser.parse(Statement.of("/* Fix typo */ RENAME TABLE studens TO students")).isDdl()); + assertTrue( + parser.parse(Statement.of("/* Fix typo */ rename TABLE studens TO students")).isDdl()); + assertTrue( + parser.parse(Statement.of("RENAME INDEX idx_foo TO idx_students_last_name")).isDdl()); + assertTrue( + parser.parse(Statement.of("/* Fix typo */ Rename TABLE studens TO students")).isDdl()); + assertTrue( + parser + .parse(Statement.of(" \t\nRENAME\n INDEX idx_foo TO idx_students_last_name")) + .isDdl()); + } + + @Test + public void testIsQuery() { + assertFalse(parser.isQuery("")); + assertFalse(parser.isQuery("random text")); + assertFalse(parser.isQuery("SELECT1")); + assertFalse(parser.isQuery("SSELECT 1")); + + assertTrue(parser.isQuery("SELECT 1")); + assertTrue(parser.isQuery("select 1")); + assertTrue(parser.isQuery("SELECT foo FROM bar WHERE id=@id")); + + assertFalse(parser.isQuery("INSERT INTO FOO (ID, NAME) VALUES (1, 'NAME')")); + assertFalse(parser.isQuery("UPDATE FOO SET NAME='NAME' WHERE ID=1")); + assertFalse(parser.isQuery("DELETE FROM FOO")); + assertFalse(parser.isQuery("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + assertFalse(parser.isQuery("alter table foo add Description string(100)")); + assertFalse(parser.isQuery("drop table foo")); + assertFalse(parser.isQuery("Create index BAR on foo (name)")); + + assertTrue(parser.isQuery("select * from foo")); + + assertFalse(parser.isQuery("INSERT INTO FOO (ID, NAME) SELECT ID+1, NAME FROM FOO")); + + assertTrue( + parser.isQuery( + "WITH subQ1 AS (SELECT SchoolID FROM Roster),\n" + + " subQ2 AS (SELECT OpponentID FROM PlayerStats)\n" + + "SELECT * FROM subQ1\n" + + "UNION ALL\n" + + "SELECT * FROM subQ2")); + assertTrue( + parser.isQuery( + "with subQ1 AS (SELECT SchoolID FROM Roster),\n" + + " subQ2 AS (SELECT OpponentID FROM PlayerStats)\n" + + "select * FROM subQ1\n" + + "UNION ALL\n" + + "SELECT * FROM subQ2")); + assertTrue( + parser + .parse( + Statement.of( + "-- this is a comment\nwith foo as (select * from bar)\nselect * from foo")) + .isQuery()); + + assertTrue(parser.parse(Statement.of("-- this is a comment\nselect * from foo")).isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "/* multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "SELECT ID, NAME\n" + + "FROM\tTEST\n" + + "\tWHERE ID=1")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "/** java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "select max(id) from test")) + .isQuery()); + assertTrue( + parser.parse(Statement.of("-- INSERT in a single line comment \n select 1")).isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "/* UPDATE in a multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "SELECT 1")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "/** DELETE in a java doc comment\n" + + "* with more information on the next line\n" + + "*/\n\n\n\n" + + " -- UPDATE test\n" + + "SELECT 1")) + .isQuery()); + + assertTrue( + parser + .parse( + Statement.of( + "GRAPH FinGraph\n" + "MATCH (n)\n" + "RETURN LABELS(n) AS label, n.id")) + .isQuery()); + assertTrue( + parser.parse(Statement.of("FROM Produce\n" + "|> WHERE item != 'bananas'")).isQuery()); + + assertTrue( + parser + .parse( + Statement.of( + "(\n" + + " SELECT * FROM Foo\n" + + " EXCEPT ALL\n" + + " SELECT 1\n" + + ")\n" + + "EXCEPT ALL\n" + + "SELECT 2")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "(\n" + + " (SELECT * FROM Foo)\n" + + " EXCEPT ALL\n" + + " SELECT 1\n" + + ")\n" + + "EXCEPT ALL\n" + + "SELECT 2")) + .isQuery()); + assertFalse(parser.parse(Statement.of("(show variable autocommit;\n")).isQuery()); + } + + @Test + public void testGoogleStandardSQLDialectIsQuery_QueryHints() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + // Supports query hints, PostgreSQL dialect does NOT + // Valid query hints. + assertTrue( + parser + .parse(Statement.of("@{JOIN_METHOD=HASH_JOIN} SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse(Statement.of("@ {JOIN_METHOD=HASH_JOIN} SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse(Statement.of("@{ JOIN_METHOD=HASH_JOIN} SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse(Statement.of("@{JOIN_METHOD=HASH_JOIN } SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse(Statement.of("@{JOIN_METHOD=HASH_JOIN}\nSELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of("@{\nJOIN_METHOD = HASH_JOIN \t}\n\t SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "@{JOIN_METHOD=HASH_JOIN}\n" + + " -- Single line comment\n" + + "SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "@{JOIN_METHOD=HASH_JOIN}\n" + + " /* Multi line comment\n" + + " with more comments\n" + + " */SELECT * FROM PersonsTable")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "@{JOIN_METHOD=HASH_JOIN} WITH subQ1 AS (SELECT SchoolID FROM Roster),\n" + + " subQ2 AS (SELECT OpponentID FROM PlayerStats)\n" + + "SELECT * FROM subQ1\n" + + "UNION ALL\n" + + "SELECT * FROM subQ2")) + .isQuery()); + + // Multiple query hints. + assertTrue( + parser + .parse( + Statement.of("@{FORCE_INDEX=index_name, JOIN_METHOD=HASH_JOIN} SELECT * FROM tbl")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of("@{FORCE_INDEX=index_name, JOIN_METHOD=HASH_JOIN} Select * FROM tbl")) + .isQuery()); + assertTrue( + parser + .parse( + Statement.of( + "@{FORCE_INDEX=index_name,\n" + + "JOIN_METHOD=HASH_JOIN}\n" + + "WITH subQ1 AS (SELECT SchoolID FROM Roster),\n" + + " subQ2 AS (SELECT OpponentID FROM PlayerStats)\n" + + "SELECT * FROM subQ1\n" + + "UNION ALL\n" + + "SELECT * FROM subQ2")) + .isQuery()); + + // Invalid query hints. + assertFalse( + parser.parse(Statement.of("@{JOIN_METHOD=HASH_JOIN SELECT * FROM PersonsTable")).isQuery()); + assertFalse( + parser.parse(Statement.of("@JOIN_METHOD=HASH_JOIN} SELECT * FROM PersonsTable")).isQuery()); + assertFalse( + parser.parse(Statement.of("@JOIN_METHOD=HASH_JOIN SELECT * FROM PersonsTable")).isQuery()); + assertFalse( + parser + .parse( + Statement.of( + "@{FORCE_INDEX=index_name} @{JOIN_METHOD=HASH_JOIN} UPDATE tbl set FOO=1 WHERE" + + " ID=2")) + .isQuery()); + } + + @Test + public void testIsUpdate_QueryHints() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + // Supports query hints, PostgreSQL dialect does NOT + // Valid query hints. + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive} UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@ {LOCK_SCANNED_RANGES=exclusive} UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{ LOCK_SCANNED_RANGES=exclusive} UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive } UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive}\nUPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{\nLOCK_SCANNED_RANGES = exclusive \t}\n\t UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive}\n" + + " -- Single line comment\n" + + "UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive}\n" + + " /* Multi line comment\n" + + " with more comments\n" + + " */UPDATE FOO SET NAME='foo' WHERE ID=1")); + + // Multiple query hints. + assertTrue( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive} @{USE_ADDITIONAL_PARALLELISM=TRUE} UPDATE FOO SET" + + " NAME='foo' WHERE ID=1")); + + // Invalid query hints. + assertFalse( + parser.isUpdateStatement( + "@{LOCK_SCANNED_RANGES=exclusive UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertFalse( + parser.isUpdateStatement( + "@LOCK_SCANNED_RANGES=exclusive} UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertFalse( + parser.isUpdateStatement( + "@LOCK_SCANNED_RANGES=exclusive UPDATE FOO SET NAME='foo' WHERE ID=1")); + } + + @Test + public void testIsUpdate_InsertStatements() { + assertFalse(parser.isUpdateStatement("")); + assertFalse(parser.isUpdateStatement("random text")); + assertFalse(parser.isUpdateStatement("INSERTINTO FOO (ID) VALUES (1)")); + assertFalse(parser.isUpdateStatement("IINSERT INTO FOO (ID) VALUES (1)")); + assertTrue(parser.isUpdateStatement("INSERT INTO FOO (ID) VALUES (1)")); + assertTrue(parser.isUpdateStatement("insert into foo (id) values (1)")); + assertTrue(parser.isUpdateStatement("INSERT into Foo (id)\nSELECT id FROM bar WHERE id=@id")); + assertFalse(parser.isUpdateStatement("SELECT 1")); + assertFalse(parser.isUpdateStatement("SELECT NAME FROM FOO WHERE ID=1")); + assertFalse( + parser.isUpdateStatement("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + assertFalse(parser.isUpdateStatement("alter table foo add Description string(100)")); + assertFalse(parser.isUpdateStatement("drop table foo")); + assertFalse(parser.isUpdateStatement("Create index BAR on foo (name)")); + assertFalse(parser.isUpdateStatement("select * from foo")); + assertTrue(parser.isUpdateStatement("INSERT INTO FOO (ID, NAME) SELECT ID+1, NAME FROM FOO")); + assertTrue( + parser + .parse(Statement.of("-- this is a comment\ninsert into foo (id) values (1)")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "INSERT INTO FOO\n" + + "(ID)\tVALUES\n" + + "\t(1)")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "Insert intO foo (id) select 1")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "-- SELECT in a single line comment \n insert into foo (id) values (1)")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* CREATE in a multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "INSERT INTO FOO (ID) VALUES (1)")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** DROP in a java doc comment\n" + + "* with more information on the next line\n" + + "*/\n\n\n\n" + + " -- SELECT test\n" + + "insert into foo (id) values (1)")) + .isUpdate()); + } + + @Test + public void testIsUpdate_UpdateStatements() { + assertFalse(parser.isUpdateStatement("")); + assertFalse(parser.isUpdateStatement("random text")); + assertFalse(parser.isUpdateStatement("UPDATEFOO SET NAME='foo' WHERE ID=1")); + assertFalse(parser.isUpdateStatement("UUPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue(parser.isUpdateStatement("UPDATE FOO SET NAME='foo' WHERE ID=1")); + assertTrue(parser.isUpdateStatement("update foo set name='foo' where id=1")); + assertTrue( + parser.isUpdateStatement("update foo set name=\n(SELECT name FROM bar WHERE id=@id)")); + assertFalse(parser.isUpdateStatement("SELECT 1")); + assertFalse(parser.isUpdateStatement("SELECT NAME FROM FOO WHERE ID=1")); + assertFalse( + parser.isUpdateStatement("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + assertFalse(parser.isUpdateStatement("alter table foo add Description string(100)")); + assertFalse(parser.isUpdateStatement("drop table foo")); + assertFalse(parser.isUpdateStatement("Create index BAR on foo (name)")); + assertFalse(parser.isUpdateStatement("select * from foo")); + assertTrue( + parser.isUpdateStatement( + "UPDATE FOO SET NAME=(SELECT NAME FROM FOO) WHERE ID=(SELECT ID+1 FROM FOO)")); + + assertTrue( + parser + .parse(Statement.of("-- this is a comment\nupdate foo set name='foo' where id=@id")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "UPDATE FOO\n" + + "SET NAME=\t'foo'\n" + + "\tWHERE ID=1")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "UPDATE FOO SET NAME=(select 'bar')")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of("-- SELECT in a single line comment \n update foo set name='bar'")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* CREATE in a multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "UPDATE FOO SET NAME='BAR'")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** DROP in a java doc comment\n" + + "* with more information on the next line\n" + + "*/\n\n\n\n" + + " -- SELECT test\n" + + "update foo set bar='foo'")) + .isUpdate()); + } + + @Test + public void testIsUpdate_DeleteStatements() { + assertFalse(parser.isUpdateStatement("")); + assertFalse(parser.isUpdateStatement("random text")); + assertFalse(parser.isUpdateStatement("DELETEFROM FOO WHERE ID=1")); + assertFalse(parser.isUpdateStatement("DDELETE FROM FOO WHERE ID=1")); + assertTrue(parser.isUpdateStatement("DELETE FROM FOO WHERE ID=1")); + assertTrue(parser.isUpdateStatement("delete from foo where id=1")); + assertTrue( + parser.isUpdateStatement( + "delete from foo where name=\n(SELECT name FROM bar WHERE id=@id)")); + assertFalse(parser.isUpdateStatement("SELECT 1")); + assertFalse(parser.isUpdateStatement("SELECT NAME FROM FOO WHERE ID=1")); + assertFalse( + parser.isUpdateStatement("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + assertFalse(parser.isUpdateStatement("alter table foo add Description string(100)")); + assertFalse(parser.isUpdateStatement("drop table foo")); + assertFalse(parser.isUpdateStatement("Create index BAR on foo (name)")); + assertFalse(parser.isUpdateStatement("select * from foo")); + assertTrue( + parser.isUpdateStatement( + "UPDATE FOO SET NAME=(SELECT NAME FROM FOO) WHERE ID=(SELECT ID+1 FROM FOO)")); + + assertTrue( + parser + .parse(Statement.of("-- this is a comment\ndelete from foo where id=@id")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "DELETE FROM FOO\n\n" + + "\tWHERE ID=1")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** java doc comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "DELETE FROM FOO WHERE NAME=(select 'bar')")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "-- SELECT in a single line comment \n delete from foo where name='bar'")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/* CREATE in a multi line comment\n" + + "* with more information on the next line\n" + + "*/\n" + + "DELETE FROM FOO WHERE NAME='BAR'")) + .isUpdate()); + assertTrue( + parser + .parse( + Statement.of( + "/** DROP in a java doc comment\n" + + "* with more information on the next line\n" + + "*/\n\n\n\n" + + " -- SELECT test\n" + + "delete from foo where bar='foo'")) + .isUpdate()); + } + + @Test + public void testParseStatementsWithNoParameters() throws CompileException { + for (ClientSideStatementImpl statement : getAllStatements()) { + if (statement.getSetStatement() == null) { + for (String testStatement : statement.getExampleStatements()) { + testParseStatement(testStatement, statement.getClass()); + } + } + } + } + + @Test + public void testParseStatementsWithOneParameterAtTheEnd() throws CompileException { + for (ClientSideStatementImpl statement : getAllStatements()) { + if (statement.getSetStatement() != null) { + for (String testStatement : statement.getExampleStatements()) { + testParseStatementWithOneParameterAtTheEnd(testStatement, statement.getClass()); + } + } + } + } + + private Set getAllStatements() throws CompileException { + return ClientSideStatements.getInstance(dialect).getCompiledStatements(); + } + + private void assertParsing( + String value, Class statementClass) { + assertThat(this.parse(value)).isEqualTo(statementClass); + } + + private void testParseStatement( + String statement, Class statementClass) { + Truth.assertWithMessage("\"" + statement + "\" should be " + statementClass.getName()) + .that(this.parse(statement)) + .isEqualTo(statementClass); + assertParsing(upper(statement), statementClass); + assertParsing(lower(statement), statementClass); + assertParsing(withSpaces(statement), statementClass); + assertParsing(withTabs(statement), statementClass); + assertParsing(withLinefeeds(statement), statementClass); + assertParsing(withLeadingSpaces(statement), statementClass); + assertParsing(withLeadingTabs(statement), statementClass); + assertParsing(withLeadingLinefeeds(statement), statementClass); + assertParsing(withTrailingSpaces(statement), statementClass); + assertParsing(withTrailingTabs(statement), statementClass); + assertParsing(withTrailingLinefeeds(statement), statementClass); + + assertThat(parse(withInvalidPrefix(statement))).isNull(); + + ClientSideStatementImpl parseClientSideStatement = parser.parseClientSideStatement(statement); + boolean anySuffixAllowed = + parseClientSideStatement.getStatementType() == ClientSideStatementType.PARTITION + || parseClientSideStatement.getStatementType() + == ClientSideStatementType.RUN_PARTITIONED_QUERY; + if (anySuffixAllowed) { + assertThat(parse(withInvalidSuffix(statement))).isNotNull(); + } else { + assertThat(parse(withInvalidSuffix(statement))).isNull(); + } + + assertThat(parse(withPrefix("%", statement))).isNull(); + assertThat(parse(withPrefix("_", statement))).isNull(); + assertThat(parse(withPrefix("&", statement))).isNull(); + assertThat(parse(withPrefix("$", statement))).isNull(); + assertThat(parse(withPrefix("@", statement))).isNull(); + assertThat(parse(withPrefix("!", statement))).isNull(); + assertThat(parse(withPrefix("*", statement))).isNull(); + assertThat(parse(withPrefix("(", statement))).isNull(); + assertThat(parse(withPrefix(")", statement))).isNull(); + + if (!anySuffixAllowed) { + Truth.assertWithMessage(withSuffix("%", statement) + " is not a valid statement") + .that(parse(withSuffix("%", statement))) + .isNull(); + assertThat(parse(withSuffix("_", statement))).isNull(); + assertThat(parse(withSuffix("&", statement))).isNull(); + assertThat(parse(withSuffix("$", statement))).isNull(); + assertThat(parse(withSuffix("@", statement))).isNull(); + assertThat(parse(withSuffix("!", statement))).isNull(); + assertThat(parse(withSuffix("*", statement))).isNull(); + assertThat(parse(withSuffix("(", statement))).isNull(); + assertThat(parse(withSuffix(")", statement))).isNull(); + } + } + + private void testParseStatementWithOneParameterAtTheEnd( + String statement, Class statementClass) { + Truth.assertWithMessage("\"" + statement + "\" should be " + statementClass.getName()) + .that(this.parse(statement)) + .isEqualTo(statementClass); + assertParsing(upper(statement), statementClass); + assertParsing(lower(statement), statementClass); + assertParsing(withSpaces(statement), statementClass); + assertParsing(withTabs(statement), statementClass); + assertParsing(withLinefeeds(statement), statementClass); + assertParsing(withLeadingSpaces(statement), statementClass); + assertParsing(withLeadingTabs(statement), statementClass); + assertParsing(withLeadingLinefeeds(statement), statementClass); + assertParsing(withTrailingSpaces(statement), statementClass); + assertParsing(withTrailingTabs(statement), statementClass); + assertParsing(withTrailingLinefeeds(statement), statementClass); + + assertThat(parse(withInvalidPrefix(statement))).isNull(); + assertParsing(withInvalidSuffix(statement), statementClass); + + assertThat(parse(withPrefix("%", statement))).isNull(); + assertThat(parse(withPrefix("_", statement))).isNull(); + assertThat(parse(withPrefix("&", statement))).isNull(); + assertThat(parse(withPrefix("$", statement))).isNull(); + assertThat(parse(withPrefix("@", statement))).isNull(); + assertThat(parse(withPrefix("!", statement))).isNull(); + assertThat(parse(withPrefix("*", statement))).isNull(); + assertThat(parse(withPrefix("(", statement))).isNull(); + assertThat(parse(withPrefix(")", statement))).isNull(); + + assertParsing(withSuffix("%", statement), statementClass); + assertParsing(withSuffix("_", statement), statementClass); + assertParsing(withSuffix("&", statement), statementClass); + assertParsing(withSuffix("$", statement), statementClass); + assertParsing(withSuffix("@", statement), statementClass); + assertParsing(withSuffix("!", statement), statementClass); + assertParsing(withSuffix("*", statement), statementClass); + assertParsing(withSuffix("(", statement), statementClass); + assertParsing(withSuffix(")", statement), statementClass); + } + + @Test + public void testGoogleStandardSQLDialectConvertPositionalParametersToNamedParameters() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + assertEquals( + "select * from foo where name=@p1", + parser.convertPositionalParametersToNamedParameters('?', "select * from foo where name=?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?test?\"?test?\"?'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?test?\"?test?\"?'?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?it\\'?s'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\'?s'?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?it\\\"?s'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\\"?s'?") + .sqlWithNamedParameters); + assertEquals( + "@p1\"?it\\\"?s\"@p2", + parser.convertPositionalParametersToNamedParameters('?', "?\"?it\\\"?s\"?") + .sqlWithNamedParameters); + assertEquals( + "@p1'''?it\\'?s'''@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it\\'?s'''?") + .sqlWithNamedParameters); + assertEquals( + "@p1\"\"\"?it\\\"?s\"\"\"@p2", + parser.convertPositionalParametersToNamedParameters('?', "?\"\"\"?it\\\"?s\"\"\"?") + .sqlWithNamedParameters); + + assertEquals( + "@p1`?it\\`?s`@p2", + parser.convertPositionalParametersToNamedParameters('?', "?`?it\\`?s`?") + .sqlWithNamedParameters); + assertEquals( + "@p1```?it\\`?s```@p2", + parser.convertPositionalParametersToNamedParameters('?', "?```?it\\`?s```?") + .sqlWithNamedParameters); + assertEquals( + "@p1'''?it\\'?s \n ?it\\'?s'''@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it\\'?s \n ?it\\'?s'''?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?test?\\\\'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?test?\\\\'?") + .sqlWithNamedParameters); + + assertUnclosedLiteral(parser, "?'?it\\'?s \n ?it\\'?s'?"); + assertUnclosedLiteral(parser, "?'?it\\'?s \n ?it\\'?s?"); + assertUnclosedLiteral(parser, "?'''?it\\'?s \n ?it\\'?s'?"); + + assertEquals( + "select 1, @p1, 'test?test', \"test?test\", foo.* from `foo` where col1=@p2 and col2='test'" + + " and col3=@p3 and col4='?' and col5=\"?\" and col6='?''?''?'", + parser.convertPositionalParametersToNamedParameters( + '?', + "select 1, ?, 'test?test', \"test?test\", foo.* from `foo` where col1=? and" + + " col2='test' and col3=? and col4='?' and col5=\"?\" and col6='?''?''?'") + .sqlWithNamedParameters); + + assertEquals( + "select * " + "from foo " + "where name=@p1 " + "and col2 like @p2 " + "and col3 > @p3", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + "from foo " + "where name=? " + "and col2 like ? " + "and col3 > ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "where id between @p1 and @p2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "where id between ? and ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "limit @p1 offset @p2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "limit ? offset ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + + "from foo " + + "where col1=@p1 " + + "and col2 like @p2 " + + "and col3 > @p3 " + + "and col4 < @p4 " + + "and col5 != @p5 " + + "and col6 not in (@p6, @p7, @p8) " + + "and col7 in (@p9, @p10, @p11) " + + "and col8 between @p12 and @p13", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + + "from foo " + + "where col1=? " + + "and col2 like ? " + + "and col3 > ? " + + "and col4 < ? " + + "and col5 != ? " + + "and col6 not in (?, ?, ?) " + + "and col7 in (?, ?, ?) " + + "and col8 between ? and ?") + .sqlWithNamedParameters); + } + + enum CommentInjector { + NONE { + @Override + String inject(String sql, String comment) { + return String.format(sql, ""); + } + }, + BEFORE { + @Override + String inject(String sql, String comment) { + return comment + String.format(sql, ""); + } + }, + IN_THE_MIDDLE { + @Override + String inject(String sql, String comment) { + return String.format(sql, comment); + } + }, + AFTER { + @Override + String inject(String sql, String comment) { + return String.format(sql, "") + comment; + } + }; + + abstract String inject(String sql, String comment); + } + + @Test + public void testPostgreSQLDialectDialectConvertPositionalParametersToNamedParameters() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + for (String comment : + new String[] { + "-- test comment\n", + "/* another test comment */", + "/* comment\nwith\nmultiple\nlines\n */", + "/* comment /* with nested */ comment */" + }) { + for (CommentInjector injector : CommentInjector.values()) { + assertEquals( + injector.inject("select * %sfrom foo where name=$1", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("select * %sfrom foo where name=?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1%s'?test?\"?test?\"?'$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'?test?\"?test?\"?'?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1'?it\\''?s'%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?'?it\\''?s'%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1'?it\\\"?s'%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?'?it\\\"?s'%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1\"?it\\\"\"?s\"%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?\"?it\\\"\"?s\"%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1%s'''?it\\''?s'''$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'''?it\\''?s'''?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1\"\"\"?it\\\"\"?s\"\"\"%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?\"\"\"?it\\\"\"?s\"\"\"%s?", comment)) + .sqlWithNamedParameters); + + assertEquals( + injector.inject("$1$$?it$?s$$%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?$$?it$?s$$%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1$tag$?it$$?s$tag$%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?$tag$?it$$?s$tag$%s?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("$1%s$$?it\\'?s \n ?it\\'?s$$$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s$$?it\\'?s \n ?it\\'?s$$?", comment)) + .sqlWithNamedParameters); + + // Note: PostgreSQL allows a single-quoted string literal to contain line feeds. + assertEquals( + injector.inject("$1'?it\\''?s \n ?it\\''?s'%s$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?'?it\\''?s \n ?it\\''?s'%s?", comment)) + .sqlWithNamedParameters); + assertUnclosedLiteral(parser, "?'?it\\''?s \n ?it\\''?s?"); + assertEquals( + injector.inject("$1%s'''?it\\''?s \n ?it\\''?s'$2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', injector.inject("?%s'''?it\\''?s \n ?it\\''?s'?", comment)) + .sqlWithNamedParameters); + + assertEquals( + injector.inject( + "select 1, $1, 'test?test', \"test?test\", %sfoo.* from `foo` where col1=$2 and" + + " col2='test' and col3=$3 and col4='?' and col5=\"?\" and col6='?''?''?'", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select 1, ?, 'test?test', \"test?test\", %sfoo.* from `foo` where col1=?" + + " and col2='test' and col3=? and col4='?' and col5=\"?\" and" + + " col6='?''?''?'", + comment)) + .sqlWithNamedParameters); + + assertEquals( + injector.inject( + "select * " + + "%sfrom foo " + + "where name=$1 " + + "and col2 like $2 " + + "and col3 > $3", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + + "%sfrom foo " + + "where name=? " + + "and col2 like ? " + + "and col3 > ?", + comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("select * " + "from foo " + "where id between $1%s and $2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + "from foo " + "where id between ?%s and ?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject("select * " + "from foo " + "limit $1 %s offset $2", comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject("select * " + "from foo " + "limit ? %s offset ?", comment)) + .sqlWithNamedParameters); + assertEquals( + injector.inject( + "select * " + + "from foo " + + "where col1=$1 " + + "and col2 like $2 " + + " %s " + + "and col3 > $3 " + + "and col4 < $4 " + + "and col5 != $5 " + + "and col6 not in ($6, $7, $8) " + + "and col7 in ($9, $10, $11) " + + "and col8 between $12 and $13", + comment), + parser.convertPositionalParametersToNamedParameters( + '?', + injector.inject( + "select * " + + "from foo " + + "where col1=? " + + "and col2 like ? " + + " %s " + + "and col3 > ? " + + "and col4 < ? " + + "and col5 != ? " + + "and col6 not in (?, ?, ?) " + + "and col7 in (?, ?, ?) " + + "and col8 between ? and ?", + comment)) + .sqlWithNamedParameters); + } + } + } + + @Test + public void testPostgreSQLGetQueryParameters() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + PostgreSQLStatementParser parser = (PostgreSQLStatementParser) this.parser; + assertEquals(ImmutableSet.of(), parser.getQueryParameters("select * from foo")); + assertEquals( + ImmutableSet.of("$1"), parser.getQueryParameters("select * from foo where bar=$1")); + assertEquals( + ImmutableSet.of("$1", "$2", "$3"), + parser.getQueryParameters("select $2 from foo where bar=$1 and baz=$3")); + assertEquals( + ImmutableSet.of("$1", "$3"), + parser.getQueryParameters("select '$2' from foo where bar=$1 and baz in ($1, $3)")); + assertEquals( + ImmutableSet.of("$1"), + parser.getQueryParameters("select '$2' from foo where bar=$1 and baz=$foo")); + assertEquals( + ImmutableSet.of("$1"), + parser.getQueryParameters( + "/* @lock_scanned_ranges = exclusive */ select -- random comment\n" + + " '$2' from foo /* comment /* with nested comment */ outside of nested comment */" + + " where bar=$1 and baz=$foo")); + } + + @Test + public void testGoogleSQLReturningClause() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + SpannerStatementParser parser = (SpannerStatementParser) this.parser; + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then return *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then\nreturn *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)\nthen\n\n\nreturn\n*")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)then return *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then return(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)then return(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then/*comment*/return *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then return /*then return*/ *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)then/*comment*/return *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)then/*comment*/return(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)then /*comment*/return(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of("insert into x (a,b) values (1,2)/*comment*/then/*comment*/return(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of( + "insert into x (a,b) values" + + " (1,2)/*comment*/then/*comment*/return/*comment*/(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of( + "insert into x (a,b) values (1,2)/*comment" + + "*/then" + + "/*comment" + + "*/return/*" + + "comment*/(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("delete from x where y=\"z\"then return *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x select 'then return' as returning then return *")) + .hasReturningClause()); + assertTrue( + parser.parse(Statement.of("delete from x where 10=`z`then return *")).hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) returning (a)")) + .hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) /*then return **/")) + .hasReturningClause()); + assertFalse( + parser.parse(Statement.of("insert into x (a,b) values (1,2)")).hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)thenreturn*")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into t(a) select \"x\"then return*")) + .hasReturningClause()); + } + + @Test + public void testPostgreSQLReturningClause() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + PostgreSQLStatementParser parser = (PostgreSQLStatementParser) this.parser; + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) returning *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)returning *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) returning(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)returning(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x (a,b) values (1,2)/*comment*/returning(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of("insert into x (a,b) values (1,2)/*comment*/returning/*comment*/(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of( + "insert into x (a,b) values (1,2)/*comment" + "*/returning/*" + "comment*/(a)")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x select 1 as returning returning *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x select 'returning' as returning returning *")) + .hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into x select 'returning'as returning returning *")) + .hasReturningClause()); + assertTrue( + parser.parse(Statement.of("delete from x where y=\"z\"returning *")).hasReturningClause()); + assertTrue( + parser.parse(Statement.of("delete from x where y='z'returning *")).hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into t1 select 1/*as /*returning*/ returning*/returning *")) + .hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into x (a,b) values (1,2) then return (a)")) + .hasReturningClause()); + assertFalse( + parser.parse(Statement.of("insert into x (a,b) values (1,2)")).hasReturningClause()); + assertFalse( + parser.parse(Statement.of("insert into t1 select 1 as returning")).hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into t1 select 1\nas\n\nreturning")) + .hasReturningClause()); + assertFalse( + parser.parse(Statement.of("insert into t1 select 1asreturning")).hasReturningClause()); + assertTrue( + parser + .parse(Statement.of("insert into t1 select 1 as/*eomment*/returning returning *")) + .hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("UPDATE x SET y = $$ RETURNING a, b, c$$ WHERE z = 123")) + .hasReturningClause()); + assertFalse( + parser + .parse( + Statement.of("UPDATE x SET y = $foobar$ RETURNING a, b, c$foobar$ WHERE z = 123")) + .hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("UPDATE x SET y = $returning$ returning $returning$ WHERE z = 123")) + .hasReturningClause()); + assertTrue( + parser + .parse( + Statement.of( + "UPDATE x SET y = $returning$returning$returning$ WHERE z = 123 ReTuRnInG *")) + .hasReturningClause()); + assertTrue( + parser.parse(Statement.of("insert into t1 select 1 returning*")).hasReturningClause()); + assertTrue( + parser.parse(Statement.of("insert into t1 select 2returning*")).hasReturningClause()); + assertTrue( + parser.parse(Statement.of("insert into t1 select 10e2returning*")).hasReturningClause()); + assertFalse( + parser + .parse(Statement.of("insert into t1 select 'test''returning *'")) + .hasReturningClause()); + assertTrue( + parser.parse(Statement.of("insert into t select 2,3returning*")).hasReturningClause()); + assertTrue( + parser.parse(Statement.of("insert into t1 select 10.returning*")).hasReturningClause()); + } + + int skipSingleLineComment(String sql, int prefixLength, int startIndex) { + return AbstractStatementParser.skipSingleLineComment( + dialect, sql, prefixLength, startIndex, null); + } + + int skipMultiLineComment(String sql, int startIndex) { + return parser.skipMultiLineComment(sql, startIndex, null); + } + + @Test + public void testConcatenatedLiterals() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + + assertTrue(parser.isUpdateStatement("UPDATE foo SET name='foo' 'bar' WHERE ID=1")); + assertTrue(parser.isQuery("SELECT 'boo' 'bar' FROM my_table WHERE value='lit1''lit2'")); + assertTrue( + parser.checkReturningClause( + "insert into my_table (id, value) values (1, 'value' '1') then return id")); + assertTrue(parser.isUpdateStatement("UPDATE foo SET name=b'foo' b'bar' WHERE ID=1")); + + assertEquals( + 1, + parser.convertPositionalParametersToNamedParameters( + '?', "select 'foo?''bar?' where id=? and value='?' '?'") + .numberOfParameters, + 1); + } + + @Test + public void testSkipSingleLineComment() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + assertEquals(7, skipSingleLineComment("-- foo\n", 2, 0)); + assertEquals(7, skipSingleLineComment("-- foo\nbar", 2, 0)); + assertEquals(6, skipSingleLineComment("-- foo", 2, 0)); + assertEquals(11, skipSingleLineComment("bar -- foo\n", 2, 4)); + assertEquals(11, skipSingleLineComment("bar -- foo\nbar", 2, 4)); + assertEquals(10, skipSingleLineComment("bar -- foo", 2, 4)); + } + + @Test + public void testSkipMultiLineComment() { + assumeTrue(dialect == Dialect.POSTGRESQL); + + assertEquals(9, skipMultiLineComment("/* foo */", 0)); + assertEquals(9, skipMultiLineComment("/* foo */ bar", 0)); + assertEquals(6, skipMultiLineComment("/* foo", 0)); + assertEquals(8, skipMultiLineComment("/* foo *", 0)); + assertEquals(9, skipMultiLineComment("/* foo **", 0)); + assertEquals(10, skipMultiLineComment("/* foo **/ ", 0)); + assertEquals(13, skipMultiLineComment("bar /* foo */", 4)); + assertEquals(13, skipMultiLineComment("bar /* foo */bar", 4)); + assertEquals(10, skipMultiLineComment("bar /* foo", 4)); + + assertEquals( + "/* foo /* inner comment */ not in inner comment */".length(), + skipMultiLineComment("/* foo /* inner comment */ not in inner comment */ bar", 0)); + } + + @Test + public void testStatementCache_NonParameterizedStatement() { + CacheStats statsBefore = parser.getStatementCacheStats(); + + String sql = "select foo from bar where id=" + UUID.randomUUID(); + ParsedStatement parsedStatement1 = parser.parse(Statement.of(sql)); + assertEquals(StatementType.QUERY, parsedStatement1.getType()); + + ParsedStatement parsedStatement2 = parser.parse(Statement.of(sql)); + assertEquals(StatementType.QUERY, parsedStatement2.getType()); + + // Even though the parsed statements are cached, the returned instances are not the same. + // This makes sure that statements with the same SQL string and different parameter values + // can use the cache. + assertNotSame(parsedStatement1, parsedStatement2); + + CacheStats statsAfter = parser.getStatementCacheStats(); + CacheStats stats = statsAfter.minus(statsBefore); + + // The first query had a cache miss. The second a cache hit. + assertEquals(1, stats.missCount()); + assertEquals(1, stats.hitCount()); + } + + @Test + public void testStatementCache_ParameterizedStatement() { + CacheStats statsBefore = parser.getStatementCacheStats(); + + String sql = + "select " + + UUID.randomUUID() + + " from bar where id=" + + (dialect == Dialect.POSTGRESQL ? "$1" : "@p1"); + Statement statement1 = Statement.newBuilder(sql).bind("p1").to(1L).build(); + Statement statement2 = Statement.newBuilder(sql).bind("p1").to(2L).build(); + + ParsedStatement parsedStatement1 = parser.parse(statement1); + assertEquals(StatementType.QUERY, parsedStatement1.getType()); + assertEquals(parsedStatement1.getStatement(), statement1); + + ParsedStatement parsedStatement2 = parser.parse(statement2); + assertEquals(StatementType.QUERY, parsedStatement2.getType()); + assertEquals(parsedStatement2.getStatement(), statement2); + + // Even though the parsed statements are cached, the returned instances are not the same. + // This makes sure that statements with the same SQL string and different parameter values + // can use the cache. + assertNotSame(parsedStatement1, parsedStatement2); + + CacheStats statsAfter = parser.getStatementCacheStats(); + CacheStats stats = statsAfter.minus(statsBefore); + + // The first query had a cache miss. The second a cache hit. + assertEquals(1, stats.missCount()); + assertEquals(1, stats.hitCount()); + } + + @Test + public void testClientSideStatementWithComment() { + String sql = "-- Null (no timeout)\n" + "SET STATEMENT_TIMEOUT=null"; + ParsedStatement parsedStatement = parser.parse(Statement.of(sql)); + assertEquals(StatementType.CLIENT_SIDE, parsedStatement.getType()); + assertEquals( + ClientSideStatementType.SET_STATEMENT_TIMEOUT, + parsedStatement.getClientSideStatementType()); + } + + static void assertUnclosedLiteral(AbstractStatementParser parser, String sql) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> parser.convertPositionalParametersToNamedParameters('?', sql)); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + assertTrue( + exception.getMessage(), + exception + .getMessage() + .startsWith( + ErrorCode.INVALID_ARGUMENT.name() + + ": SQL statement contains an unclosed literal: " + + sql)); + } + + @SuppressWarnings("unchecked") + private Class parse(String statement) { + ClientSideStatementImpl optional = parser.parseClientSideStatement(statement); + return optional != null ? (Class) optional.getClass() : null; + } + + private String upper(String statement) { + return statement.toUpperCase(); + } + + private String lower(String statement) { + return statement.toLowerCase(); + } + + private String withLeadingSpaces(String statement) { + return " " + statement; + } + + private String withLeadingTabs(String statement) { + return "\t\t\t" + statement; + } + + private String withLeadingLinefeeds(String statement) { + return "\n\n\n" + statement; + } + + private String withTrailingSpaces(String statement) { + return statement + " "; + } + + private String withTrailingTabs(String statement) { + return statement + "\t\t"; + } + + private String withTrailingLinefeeds(String statement) { + return statement + "\n\n"; + } + + private String withSpaces(String statement) { + return statement.replaceAll(" ", " "); + } + + private String withTabs(String statement) { + return statement.replaceAll(" ", "\t"); + } + + private String withLinefeeds(String statement) { + return statement.replaceAll(" ", "\n"); + } + + private String withInvalidPrefix(String statement) { + return "foo " + statement; + } + + private String withInvalidSuffix(String statement) { + return statement + " bar"; + } + + private String withPrefix(String prefix, String statement) { + return prefix + statement; + } + + private String withSuffix(String suffix, String statement) { + return statement + suffix; + } + + private List readStatementsFromFile(String filename) { + File file = new File(getClass().getResource(filename).getFile()); + StringBuilder builder = new StringBuilder(); + try (Scanner scanner = new Scanner(file)) { + while (scanner.hasNextLine()) { + String line = scanner.nextLine(); + builder.append(line).append("\n"); + } + } catch (FileNotFoundException e) { + throw new RuntimeException(e); + } + String script = builder.toString().replaceAll(COPYRIGHT_PATTERN, ""); + String[] array = script.split(";"); + List res = new ArrayList<>(array.length); + for (String statement : array) { + if (statement != null && statement.trim().length() > 0) { + res.add(statement); + } + } + return res; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java new file mode 100644 index 000000000000..0a55ff2c4202 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java @@ -0,0 +1,224 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; + +import com.google.cloud.ByteArray; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class StatementResultImplTest { + + @Test + public void testNoResultGetResultSet() { + StatementResult subject = StatementResultImpl.noResult(); + assertThat(subject.getResultType(), is(equalTo(ResultType.NO_RESULT))); + try { + subject.getResultSet(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testNoResultGetUpdateCount() { + StatementResult subject = StatementResultImpl.noResult(); + assertThat(subject.getResultType(), is(equalTo(ResultType.NO_RESULT))); + try { + subject.getUpdateCount(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testResultSetGetResultSet() { + StatementResult subject = StatementResultImpl.of(mock(ResultSet.class)); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat(subject.getResultSet(), is(notNullValue())); + } + + @Test + public void testResultSetGetUpdateCount() { + StatementResult subject = StatementResultImpl.of(mock(ResultSet.class)); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + try { + subject.getUpdateCount(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testUpdateCountGetResultSet() { + StatementResult subject = StatementResultImpl.of(1L); + assertThat(subject.getResultType(), is(equalTo(ResultType.UPDATE_COUNT))); + try { + subject.getResultSet(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + + @Test + public void testUpdateCountGetUpdateCount() { + StatementResult subject = StatementResultImpl.of(1L); + assertThat(subject.getResultType(), is(equalTo(ResultType.UPDATE_COUNT))); + assertThat(subject.getUpdateCount(), is(notNullValue())); + } + + @Test + public void testBooleanResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet("foo", Boolean.TRUE, ClientSideStatementType.SHOW_AUTOCOMMIT); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), is(equalTo(ClientSideStatementType.SHOW_AUTOCOMMIT))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getBoolean("foo"), is(true)); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testLongResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet("foo", 10L, ClientSideStatementType.SHOW_READ_ONLY_STALENESS); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_READ_ONLY_STALENESS))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getLong("foo"), is(equalTo(10L))); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testLongArrayResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", new long[] {1L, 2L, 3L}, ClientSideStatementType.SHOW_RETRY_ABORTS_INTERNALLY); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_RETRY_ABORTS_INTERNALLY))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getLongArray("foo"), is(equalTo(new long[] {1L, 2L, 3L}))); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testStringResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", "bar", ClientSideStatementType.SHOW_READ_ONLY_STALENESS); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_READ_ONLY_STALENESS))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getString("foo"), is(equalTo("bar"))); + assertThat(subject.getResultSet().next(), is(false)); + + subject = + StatementResultImpl.resultSet( + "path", "descriptors.pb", ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getString("path"), is(equalTo("descriptors.pb"))); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testEnumResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", TransactionMode.READ_ONLY_TRANSACTION, ClientSideStatementType.SHOW_READONLY); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), is(equalTo(ClientSideStatementType.SHOW_READONLY))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat( + subject.getResultSet().getString("foo"), + is(equalTo(TransactionMode.READ_ONLY_TRANSACTION.toString()))); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testTimestampResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", + Timestamp.ofTimeSecondsAndNanos(10L, 10), + ClientSideStatementType.SHOW_READ_TIMESTAMP); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_READ_TIMESTAMP))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat( + subject.getResultSet().getTimestamp("foo"), + is(equalTo(Timestamp.ofTimeSecondsAndNanos(10L, 10)))); + assertThat(subject.getResultSet().next(), is(false)); + } + + @Test + public void testBytesResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", "protoDescriptors".getBytes(), ClientSideStatementType.SHOW_PROTO_DESCRIPTORS); + assertEquals(subject.getResultType(), ResultType.RESULT_SET); + assertEquals( + subject.getClientSideStatementType(), ClientSideStatementType.SHOW_PROTO_DESCRIPTORS); + assertNotNull(subject.getResultSet()); + assertTrue(subject.getResultSet().next()); + assertEquals( + subject.getResultSet().getBytes("foo"), ByteArray.copyFrom("protoDescriptors".getBytes())); + assertFalse(subject.getResultSet().next()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementTimeoutTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementTimeoutTest.java new file mode 100644 index 000000000000..e854b3d9d907 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementTimeoutTest.java @@ -0,0 +1,1196 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ForceCloseSpannerFunction; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractConnectionImplTest.ConnectionConsumer; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.SpannerPool.CheckAndCloseSpannersMode; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; +import com.google.common.base.Stopwatch; +import com.google.longrunning.Operation; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.Status; +import java.time.Duration; +import java.util.ArrayList; +import java.util.ConcurrentModificationException; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class StatementTimeoutTest extends AbstractMockServerTest { + + private static final String SLOW_SELECT = "SELECT foo FROM bar"; + private static final String INVALID_SELECT = "SELECT FROM bar"; // missing columns / * + private static final String SLOW_DDL = "CREATE TABLE foo"; + private static final String FAST_DDL = "CREATE TABLE fast_table"; + private static final String SLOW_UPDATE = "UPDATE foo SET col1=1 WHERE id=2"; + + /** Execution time for statements that have been defined as slow. */ + private static final int EXECUTION_TIME_SLOW_STATEMENT = 10_000; + + /** + * This timeout should be high enough that it will never be exceeded, even on a slow build + * environment, but still significantly lower than the expected execution time of the slow + * statements. + */ + private static final long TIMEOUT_FOR_FAST_STATEMENTS = 1000L; + + /** + * This timeout should be low enough that it will not make the test case unnecessarily slow, but + * still high enough that it would normally not be exceeded for a statement that is executed + * directly. + */ + private static final int TIMEOUT_FOR_SLOW_STATEMENTS = 50; + + // Set a global timeout to ensure that tests that freeze the mock serer fail within a reasonable + // amount of time if they misbehave. + @Rule public Timeout globalTimeout = Timeout.seconds(10); + + @Parameters(name = "statementExecutorType = {0}") + public static Object[] parameters() { + return StatementExecutorType.values(); + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Parameter + public StatementExecutorType statementExecutorType; + + protected ITConnection createConnection(String additionalUrlOptions) { + String urlSuffix = + ";trackSessionLeaks=false" + (additionalUrlOptions == null ? "" : additionalUrlOptions); + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri(getBaseUrl() + urlSuffix) + .setStatementExecutorType(statementExecutorType) + .setConfigurator( + optionsConfigurator -> { + optionsConfigurator + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelayDuration(Duration.ofMillis(1L)) + .setMaxRetryDelayDuration(Duration.ofMillis(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeoutDuration(Duration.ofMinutes(10L)) + .build())); + optionsConfigurator.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessionsDuration(Duration.ofSeconds(5L)) + .build()); + }) + .build(); + return createITConnection(options); + } + + protected ITConnection createConnection() { + return createConnection(""); + } + + @Before + public void setup() { + // Set up a connection and get the dialect to ensure that the auto-detect-dialect query has + // already been executed when any of the test cases start. This is necessary to ensure that any + // errors or slow execution times that are used by the different test cases are not applied to + // the query that is used for automatic dialect detection. + try (Connection connection = createConnection()) { + connection.getDialect(); + } + } + + @After + public void clearExecutionTimes() { + mockSpanner.removeAllExecutionTimes(); + SpannerPool.INSTANCE.checkAndCloseSpanners( + CheckAndCloseSpannersMode.ERROR, new ForceCloseSpannerFunction(5L, TimeUnit.MILLISECONDS)); + } + + @Test + public void testTimeoutExceptionReadOnlyAutocommit() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnly(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testUrlTimeoutExceptionReadOnlyAutocommit() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = + createConnection(";statement_timeout='" + TIMEOUT_FOR_SLOW_STATEMENTS + "ms'")) { + connection.setAutocommit(true); + connection.setReadOnly(true); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadOnlyAutocommitMultipleStatements() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnly(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testTimeoutExceptionReadOnlyTransactional() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadOnlyTransactionMultipleStatements() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // do a rollback without any chance of a timeout + connection.clearStatementTimeout(); + connection.rollback(); + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommit() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitMultipleStatements() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testUrlStatementTimeoutOverrideToSucceed() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = + createConnection(";statement_timeout='" + TIMEOUT_FOR_SLOW_STATEMENTS + "ms'")) { + connection.setAutocommit(true); + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + + // Remove slow behavior and verify a fast query succeeds after overriding the timeout. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitSlowUpdate() { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitSlowUpdateMultipleStatements() { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.execute(Statement.of(SLOW_UPDATE))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a new update that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + assertEquals(UPDATE_COUNT, connection.execute(INSERT_STATEMENT).getUpdateCount().longValue()); + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitSlowCommit() { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + // First verify that the fast update does not timeout when in transactional mode (as it is the + // commit that is slow). + connection.setAutocommit(false); + connection.execute(INSERT_STATEMENT); + connection.rollback(); + + // Then verify that the update does timeout when executed in autocommit mode, as the commit + // gRPC call will be slow. + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + connection.setAutocommit(true); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitSlowCommitMultipleStatements() { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a query in autocommit mode. This will use a single-use read-only transaction that + // does not need to commit, i.e. it should succeed. + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testTimeoutExceptionReadWriteAutocommitPartitioned() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + // First verify that the statement will not timeout by default. + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + connection.execute(INSERT_STATEMENT); + + // Now slow down the execution and verify that it times out. PDML uses the ExecuteStreamingSql + // RPC. + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteTransactional() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteTransactionMultipleStatements() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // Assert that multiple statements after each other will timeout the first time, and then + // throw a SpannerException with code FAILED_PRECONDITION. + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + if (i == 0) { + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } else { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + // do a rollback without any chance of a timeout + connection.clearStatementTimeout(); + connection.rollback(); + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } + } + + @Test + public void testTimeoutExceptionReadWriteTransactionalSlowCommit() { + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = assertThrows(SpannerException.class, connection::commit); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionReadWriteTransactionalSlowRollback() { + mockSpanner.setRollbackExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + // Rollback timeouts are not propagated as exceptions, as all errors during a Rollback RPC are + // ignored by the client library. + connection.rollback(); + } + } + + private static final class ConnectionReadOnlyAutocommit implements ConnectionConsumer { + @Override + public void accept(Connection t) { + t.setAutocommit(true); + t.setReadOnly(true); + } + } + + @Test + public void testInterruptedExceptionReadOnlyAutocommit() + throws InterruptedException, ExecutionException { + testInterruptedException(new ConnectionReadOnlyAutocommit()); + } + + private static final class ConnectionReadOnlyTransactional implements ConnectionConsumer { + @Override + public void accept(Connection t) { + t.setReadOnly(true); + t.setAutocommit(false); + } + } + + @Test + public void testInterruptedExceptionReadOnlyTransactional() + throws InterruptedException, ExecutionException { + testInterruptedException(new ConnectionReadOnlyTransactional()); + } + + private static final class ConnectionReadWriteAutocommit implements ConnectionConsumer { + @Override + public void accept(Connection t) { + t.setAutocommit(true); + t.setReadOnly(false); + } + } + + @Test + public void testInterruptedExceptionReadWriteAutocommit() + throws InterruptedException, ExecutionException { + testInterruptedException(new ConnectionReadWriteAutocommit()); + } + + private static final class ConnectionReadWriteTransactional implements ConnectionConsumer { + @Override + public void accept(Connection t) { + t.setAutocommit(false); + t.setReadOnly(false); + } + } + + @Test + public void testInterruptedExceptionReadWriteTransactional() + throws InterruptedException, ExecutionException { + testInterruptedException(new ConnectionReadWriteTransactional()); + } + + private void testInterruptedException(final ConnectionConsumer consumer) + throws InterruptedException, ExecutionException { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + CountDownLatch latch = new CountDownLatch(1); + SettableApiFuture thread = SettableApiFuture.create(); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + Future future = + executor.submit( + () -> { + try (Connection connection = createConnection()) { + consumer.accept(connection); + connection.setStatementTimeout(10000L, TimeUnit.MILLISECONDS); + + thread.set(Thread.currentThread()); + latch.countDown(); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_RANDOM_STATEMENT)) {} + return false; + } catch (SpannerException e) { + return e.getErrorCode() == ErrorCode.CANCELLED; + } + }); + assertTrue(latch.await(10L, TimeUnit.SECONDS)); + waitForRequestsToContain(ExecuteSqlRequest.class); + thread.get().interrupt(); + assertTrue(future.get()); + } finally { + executor.shutdownNow(); + } + } + + @Test + public void testInvalidQueryReadOnlyAutocommit() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.INVALID_ARGUMENT.asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnly(true); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(Statement.of(INVALID_SELECT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + + @Test + public void testInvalidQueryReadOnlyTransactional() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.INVALID_ARGUMENT.asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(Statement.of(INVALID_SELECT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + + @Test + public void testInvalidQueryReadWriteAutocommit() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.INVALID_ARGUMENT.asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(Statement.of(INVALID_SELECT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + + @Test + public void testInvalidQueryReadWriteTransactional() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.INVALID_ARGUMENT.asRuntimeException())); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(Statement.of(INVALID_SELECT))); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + } + } + + static void waitForRequestsToContain(Class request) { + try { + mockSpanner.waitForRequestsToContain(request, EXECUTION_TIME_SLOW_STATEMENT); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + throw SpannerExceptionFactory.propagateTimeout(e); + } + } + + private void waitForDdlRequestOnServer() { + try { + Stopwatch watch = Stopwatch.createStarted(); + while (watch.elapsed(TimeUnit.MILLISECONDS) < EXECUTION_TIME_SLOW_STATEMENT) { + try { + List requests = new ArrayList<>(mockDatabaseAdmin.getRequests()); + if (requests.stream().anyMatch(request -> request instanceof UpdateDatabaseDdlRequest)) { + break; + } + } catch (ConcurrentModificationException ignore) { + // Just ignore and retry. + } + //noinspection BusyWait + Thread.sleep(1L); + } + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + + @Test + public void testCancelReadOnlyAutocommit() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnly(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadOnlyAutocommitMultipleStatements() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + // TODO: Look into this for multiplexed sessions. + assumeTrue(System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS") == null); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReadOnly(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + + SpannerException exception = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, exception.getErrorCode()); + + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadOnlyTransactional() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadOnlyTransactionalMultipleStatements() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + connection.setAutocommit(false); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(Statement.of(SLOW_SELECT))); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + // rollback and do another fast query + connection.rollback(); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteAutocommit() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteAutocommitMultipleStatements() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + + // try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteAutocommitSlowUpdate() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteAutocommitSlowCommit() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(CommitRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(INSERT_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteTransactional() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + } + } + + @Test + public void testCancelReadWriteTransactionalMultipleStatements() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForRequestsToContain(ExecuteSqlRequest.class); + connection.cancel(); + }); + SpannerException e = + assertThrows( + SpannerException.class, () -> connection.executeQuery(SELECT_RANDOM_STATEMENT)); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + // Rollback the transaction as it is no longer usable. + connection.rollback(); + + // Try to do a new query that is fast. + mockSpanner.removeAllExecutionTimes(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + try (ResultSet rs = connection.executeQuery(SELECT_RANDOM_STATEMENT)) { + assertNotNull(rs); + } + } finally { + executor.shutdownNow(); + } + } + } + + static void addSlowMockDdlOperation() { + addSlowMockDdlOperations(1); + } + + static void addSlowMockDdlOperations(int count) { + addMockDdlOperations(count, false); + } + + static void addFastMockDdlOperation() { + addFastMockDdlOperations(1); + } + + static void addFastMockDdlOperations(int count) { + addMockDdlOperations(count, true); + } + + static void addMockDdlOperations(int count, boolean done) { + for (int i = 0; i < count; i++) { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .addStatements(SLOW_DDL) + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(done) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } + } + + @Test + public void testCancelDdlBatch() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + addSlowMockDdlOperation(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDdl(); + connection.execute(Statement.of(SLOW_DDL)); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForDdlRequestOnServer(); + connection.cancel(); + }); + SpannerException e = assertThrows(SpannerException.class, connection::runBatch); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + connection.closeAsync(); + } + } + + @Test + public void testCancelDdlAutocommit() { + assumeFalse( + "Direct executor does not yet support cancelling statements", + statementExecutorType == StatementExecutorType.DIRECT_EXECUTOR); + + addSlowMockDdlOperation(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + executor.execute( + () -> { + waitForDdlRequestOnServer(); + connection.cancel(); + }); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(Statement.of(SLOW_DDL))); + assertEquals(ErrorCode.CANCELLED, e.getErrorCode()); + } finally { + executor.shutdownNow(); + } + connection.closeAsync(); + } + } + + @Test + public void testTimeoutExceptionDdlAutocommit() { + addSlowMockDdlOperations(10); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(Statement.of(SLOW_DDL))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + + connection.closeAsync(); + } + } + + @Test + public void testTimeoutExceptionDdlAutocommitMultipleStatements() { + addSlowMockDdlOperations(20); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + SpannerException e = + assertThrows(SpannerException.class, () -> connection.execute(Statement.of(SLOW_DDL))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a new DDL statement that is fast. + mockDatabaseAdmin.reset(); + addFastMockDdlOperation(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + assertNotNull(connection.execute(Statement.of(FAST_DDL))); + } + } + + @Test + public void testTimeoutExceptionDdlBatch() { + addSlowMockDdlOperations(10); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDdl(); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + + // the following statement will NOT time out as the statement is only buffered locally + connection.execute(Statement.of(SLOW_DDL)); + // the runBatch() statement sends the statement to the server and should time out + SpannerException e = assertThrows(SpannerException.class, connection::runBatch); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + } + + @Test + public void testTimeoutExceptionDdlBatchMultipleStatements() { + addSlowMockDdlOperations(20); + + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setStatementTimeout(TIMEOUT_FOR_SLOW_STATEMENTS, TimeUnit.MILLISECONDS); + + // assert that multiple statements after each other also time out + for (int i = 0; i < 2; i++) { + connection.startBatchDdl(); + connection.execute(Statement.of(SLOW_DDL)); + SpannerException e = assertThrows(SpannerException.class, connection::runBatch); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + } + // try to do a new DDL statement that is fast. + mockDatabaseAdmin.reset(); + addFastMockDdlOperation(); + connection.setStatementTimeout(TIMEOUT_FOR_FAST_STATEMENTS, TimeUnit.MILLISECONDS); + connection.startBatchDdl(); + assertNotNull(connection.execute(Statement.of(FAST_DDL))); + connection.runBatch(); + } + } + + @Test + public void testTimeoutDifferentTimeUnits() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(EXECUTION_TIME_SLOW_STATEMENT, 0)); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + for (TimeUnit unit : ReadOnlyStalenessUtil.SUPPORTED_UNITS) { + // Only set the timeout, don't execute a statement with the timeout to prevent unnecessarily + // slowing down the build time. + connection.setStatementTimeout(1L, unit); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TaggingTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TaggingTest.java new file mode 100644 index 000000000000..80210cb70945 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TaggingTest.java @@ -0,0 +1,894 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class TaggingTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + SpannerPool.closeSpannerPool(); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testStatementTagNotAllowedForCommit() { + try (Connection connection = createConnection()) { + connection.setStatementTag("tag-1"); + SpannerException exception = assertThrows(SpannerException.class, connection::commit); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + } + + @Test + public void testStatementTagNotAllowedForRollback() { + try (Connection connection = createConnection()) { + connection.setStatementTag("tag-1"); + SpannerException exception = assertThrows(SpannerException.class, connection::rollback); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + } + + @Test + public void testStatementTagNotAllowedInsideBatch() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.startBatchDml(); + + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.setStatementTag("tag-1")); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + + connection.abortBatch(); + } + } + } + + @Test + public void testQuery_NoTags() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testUpdate_NoTags() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testPartitionedUpdate_NoTags() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testBatchUpdate_NoTags() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testQuery_StatementTag() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.setStatementTag("tag-1"); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-1", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + // The tag should automatically be cleared after a statement. + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testUpdate_StatementTag() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.setStatementTag("tag-2"); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-2", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testPartitionedUpdate_StatementTag() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.setStatementTag("tag-4"); + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-4", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + connection.executeUpdate(INSERT_STATEMENT); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testBatchUpdate_StatementTag() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.setStatementTag("tag-3"); + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "tag-3", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testQuery_TransactionTag() { + try (Connection connection = createConnection()) { + connection.setTransactionTag("tag-1"); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-1", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-1", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + // The tag should automatically be cleared after a statement. + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT_COUNT_STATEMENT)) {} + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testUpdate_TransactionTag() { + try (Connection connection = createConnection()) { + connection.setTransactionTag("tag-2"); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-2", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-2", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testBatchUpdate_TransactionTag() { + try (Connection connection = createConnection()) { + connection.setTransactionTag("tag-3"); + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-3", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "tag-3", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + + connection.executeBatchUpdate(Collections.singletonList(INSERT_STATEMENT)); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testDmlBatch_StatementTag() { + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + + connection.setStatementTag("batch-tag"); + connection.startBatchDml(); + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "batch-tag", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testRunBatch_TransactionTag() { + try (Connection connection = createConnection()) { + connection.setTransactionTag("batch-tag"); + connection.startBatchDml(); + connection.execute(INSERT_STATEMENT); + connection.execute(INSERT_STATEMENT); + connection.runBatch(); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "batch-tag", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + assertEquals( + "", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "batch-tag", + mockSpanner + .getRequestsOfType(CommitRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testShowSetTags() { + try (Connection connection = createConnection()) { + connection.execute(Statement.of(String.format("SET %sSTATEMENT_TAG='tag1'", prefix()))); + try (ResultSet rs = + connection + .execute(Statement.of(String.format("SHOW VARIABLE %sSTATEMENT_TAG", prefix()))) + .getResultSet()) { + assertTrue(rs.next()); + assertEquals("tag1", rs.getString(String.format("%sSTATEMENT_TAG", prefix()))); + assertFalse(rs.next()); + } + connection.execute(Statement.of(String.format("SET %sSTATEMENT_TAG=''", prefix()))); + try (ResultSet rs = + connection + .execute(Statement.of(String.format("SHOW VARIABLE %sSTATEMENT_TAG", prefix()))) + .getResultSet()) { + assertTrue(rs.next()); + assertEquals("", rs.getString(String.format("%sSTATEMENT_TAG", prefix()))); + assertFalse(rs.next()); + } + connection.execute(Statement.of(String.format("SET %sTRANSACTION_TAG='tag2'", prefix()))); + try (ResultSet rs = + connection + .execute(Statement.of(String.format("SHOW VARIABLE %sTRANSACTION_TAG", prefix()))) + .getResultSet()) { + assertTrue(rs.next()); + assertEquals("tag2", rs.getString(String.format("%sTRANSACTION_TAG", prefix()))); + assertFalse(rs.next()); + } + connection.execute(Statement.of(String.format("SET %sTRANSACTION_TAG=''", prefix()))); + try (ResultSet rs = + connection + .execute(Statement.of(String.format("SHOW VARIABLE %sTRANSACTION_TAG", prefix()))) + .getResultSet()) { + assertTrue(rs.next()); + assertEquals("", rs.getString(String.format("%sTRANSACTION_TAG", prefix()))); + assertFalse(rs.next()); + } + } + } + + @Test + public void testQuery_StatementTagHint() { + String sql = SELECT_COUNT_STATEMENT.getSql(); + + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + //noinspection EmptyTryBlock + try (ResultSet ignore = + connection.executeQuery(Statement.of(statementTagHint("tag-1") + sql))) {} + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-1", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testUpdate_StatementTagHint() { + String sql = INSERT_STATEMENT.getSql(); + + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.executeUpdate(Statement.of(statementTagHint("tag-2") + sql)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-2", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testPartitionedUpdate_StatementTagHint() { + String sql = INSERT_STATEMENT.getSql(); + + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.executeUpdate(Statement.of(statementTagHint("tag-4") + sql)); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + "tag-4", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteSqlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + + @Test + public void testBatchUpdate_StatementTagHint() { + String sql = INSERT_STATEMENT.getSql(); + + try (Connection connection = createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + connection.executeBatchUpdate( + Collections.singletonList(Statement.of(statementTagHint("tag-3") + sql))); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals( + "tag-3", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getRequestTag()); + assertEquals( + "", + mockSpanner + .getRequestsOfType(ExecuteBatchDmlRequest.class) + .get(0) + .getRequestOptions() + .getTransactionTag()); + + mockSpanner.clearRequests(); + } + } + } + + private String statementTagHint(String tag) { + switch (dialect) { + case POSTGRESQL: + return "/*@statement_tag='" + tag + "'*/"; + case GOOGLE_STANDARD_SQL: + default: + return "@{statement_tag='" + tag + "'}"; + } + } + + private String prefix() { + return dialect == Dialect.POSTGRESQL ? "SPANNER." : ""; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TestChannelProvider.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TestChannelProvider.java new file mode 100644 index 000000000000..7331c8d03c77 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TestChannelProvider.java @@ -0,0 +1,26 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.cloud.spanner.v1.SpannerSettings; + +public final class TestChannelProvider implements ConnectionOptions.ExternalChannelProvider { + public TransportChannelProvider getChannelProvider(String host, int port) { + return SpannerSettings.defaultTransportChannelProvider(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionMockServerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionMockServerTest.java new file mode 100644 index 000000000000..45f68b11a5b6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionMockServerTest.java @@ -0,0 +1,402 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.DEFAULT_ISOLATION_LEVEL; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_LOCK_MODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import io.grpc.Deadline.Ticker; +import io.grpc.Status; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class TransactionMockServerTest extends AbstractMockServerTest { + + @Parameter(0) + public IsolationLevel isolationLevel; + + @Parameter(1) + public ReadLockMode readLockMode; + + @Parameters(name = "isolationLevel = {0}, readLockMode = {1}") + public static Collection data() { + List result = new ArrayList<>(); + for (IsolationLevel isolationLevel : DEFAULT_ISOLATION_LEVEL.getValidValues()) { + for (ReadLockMode readLockMode : READ_LOCK_MODE.getValidValues()) { + result.add(new Object[] {isolationLevel, readLockMode}); + } + } + return result; + } + + @Override + protected ITConnection createConnection() { + return createConnection( + Collections.emptyList(), + Collections.emptyList(), + String.format( + ";default_isolation_level=%s;read_lock_mode=%s", isolationLevel, readLockMode)); + } + + @Test + public void testQuery() { + try (Connection connection = createConnection()) { + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT1_STATEMENT)) {} + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDml() { + try (Connection connection = createConnection()) { + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testFailedFirstDml() { + Statement invalidInsert = Statement.of("insert into my_table (id, name) values (1, 'test')"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + invalidInsert, + Status.ALREADY_EXISTS.withDescription("Row 1 already exists").asRuntimeException())); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeUpdate(invalidInsert)); + assertEquals(ErrorCode.ALREADY_EXISTS, exception.getErrorCode()); + connection.commit(); + } + // The transaction should be internally retried with an explicit BeginTransaction request, as + // the first statement in the transaction failed. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testFailedFirstAndLastDml() { + Statement invalidInsert = + Statement.of("insert into my_table (id, name) values (1, 'test') then return id"); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + invalidInsert, + Status.ALREADY_EXISTS.withDescription("Row 1 already exists").asRuntimeException())); + + try (Connection connection = createConnection()) { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> connection.executeQuery(invalidInsert, Options.lastStatement())); + assertEquals(ErrorCode.ALREADY_EXISTS, exception.getErrorCode()); + + // The same error should be repeated for the commit. + exception = assertThrows(SpannerException.class, connection::commit); + assertEquals(ErrorCode.ALREADY_EXISTS, exception.getErrorCode()); + } + // The transaction should be not be retried, as the last_statement flag was set. + assertEquals(0, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // There is no CommitRequest, because the statement never returned a transaction ID. + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testDmlReturning() { + try (Connection connection = createConnection()) { + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(INSERT_RETURNING_STATEMENT)) {} + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testBatchDml() { + try (Connection connection = createConnection()) { + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatements()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testBeginTransactionIsolationLevel() { + SpannerPool.closeSpannerPool(); + for (Dialect dialect : new Dialect[] {Dialect.POSTGRESQL, Dialect.GOOGLE_STANDARD_SQL}) { + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + + try (Connection connection = super.createConnection()) { + for (IsolationLevel isolationLevel : + new IsolationLevel[] {IsolationLevel.REPEATABLE_READ, IsolationLevel.SERIALIZABLE}) { + for (ReadLockMode readLockMode : + new ReadLockMode[] {ReadLockMode.PESSIMISTIC, ReadLockMode.OPTIMISTIC}) { + for (boolean useSql : new boolean[] {true, false}) { + if (useSql) { + connection.execute( + Statement.of( + "begin transaction isolation level " + + isolationLevel.name().replace("_", " "))); + } else { + connection.beginTransaction(isolationLevel); + } + if (dialect == Dialect.POSTGRESQL) { + connection.execute( + Statement.of("set spanner.read_lock_mode = '" + readLockMode.name() + "'")); + } else { + connection.execute( + Statement.of("set read_lock_mode = '" + readLockMode.name() + "'")); + } + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, + request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + mockSpanner.clearRequests(); + } + } + } + } + SpannerPool.closeSpannerPool(); + } + } + + @Test + public void testSetTransactionIsolationLevel() { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + + try (Connection connection = super.createConnection()) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setAutocommit(autocommit); + + for (IsolationLevel isolationLevel : + new IsolationLevel[] {IsolationLevel.REPEATABLE_READ, IsolationLevel.SERIALIZABLE}) { + for (ReadLockMode readLockMode : + new ReadLockMode[] {ReadLockMode.OPTIMISTIC, ReadLockMode.PESSIMISTIC}) { + // Manually start a transaction if autocommit is enabled. + if (autocommit) { + connection.execute(Statement.of("begin")); + } + connection.execute( + Statement.of( + "set transaction isolation level " + isolationLevel.name().replace("_", " "))); + connection.execute( + Statement.of("set spanner.read_lock_mode = '" + readLockMode.name() + "'")); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals( + readLockMode, request.getTransaction().getBegin().getReadWrite().getReadLockMode()); + assertFalse(request.getLastStatement()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + mockSpanner.clearRequests(); + } + } + } + } + SpannerPool.closeSpannerPool(); + } + + @Test + public void testTransactionTimeout() { + // Use a fake ticker to be able to advance the clock without having to sleep for X ms. + AtomicLong nanos = new AtomicLong(); + Ticker ticker = + new Ticker() { + @Override + public long nanoTime() { + return nanos.get(); + } + }; + ConnectionOptions options = + ConnectionOptions.newBuilder().setUri(getBaseUrl()).setTicker(ticker).build(); + + try (Connection connection = options.getConnection()) { + // Set the transaction timeout to 500 milliseconds. + connection.setTransactionTimeout(Duration.ofMillis(500)); + + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT1_STATEMENT)) {} + // Advance the time by 100ms. + nanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(100)); + // Execute another statement. This should still succeed. + connection.execute(INSERT_STATEMENT); + + // Advance the time by 401ms. The deadline has now been exceeded and the commit should fail. + nanos.addAndGet(TimeUnit.MILLISECONDS.toNanos(401)); + SpannerException exception = assertThrows(SpannerException.class, connection::commit); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + // Verify that a transaction timeout does not apply to statements in auto-commit. + // Create a connection without a fake ticker. + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + // Set the transaction timeout so low that it will always be exceeded. + connection.setTransactionTimeout(Duration.ofNanos(1)); + + // This statement should succeed, as it does not use a transaction. + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT1_STATEMENT)) {} + + // This statement also succeeds, because it uses a read-only transaction. + connection.setAutocommit(false); + connection.setReadOnly(true); + //noinspection EmptyTryBlock + try (ResultSet ignore = connection.executeQuery(SELECT1_STATEMENT)) {} + connection.commit(); + + // This statement fails, because it uses a read/write transaction. + connection.setReadOnly(false); + SpannerException exception = + assertThrows(SpannerException.class, () -> connection.executeQuery(SELECT1_STATEMENT)); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + } + + @Test + public void testCanUseAllMethodsWithInternalRetriesDisabled() { + // Verify that all query/update methods work as expected when internal retries have been + // disabled. + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setRetryAbortsInternally(false); + + try (ResultSet result = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(result.next()); + assertEquals(1L, result.getLong(0)); + assertFalse(result.next()); + } + assertEquals(1, connection.executeUpdate(INSERT_STATEMENT)); + try (ResultSet result = connection.executeQuery(INSERT_RETURNING_STATEMENT)) { + assertTrue(result.next()); + assertEquals(1L, result.getLong(0)); + assertFalse(result.next()); + } + + StatementResult statementResult = connection.execute(SELECT1_STATEMENT); + assertEquals(ResultType.RESULT_SET, statementResult.getResultType()); + try (ResultSet result = statementResult.getResultSet()) { + assertTrue(result.next()); + assertEquals(1L, result.getLong(0)); + assertFalse(result.next()); + } + + statementResult = connection.execute(INSERT_STATEMENT); + assertEquals(ResultType.UPDATE_COUNT, statementResult.getResultType()); + assertEquals(1L, statementResult.getUpdateCount().longValue()); + + statementResult = connection.execute(INSERT_RETURNING_STATEMENT); + assertEquals(ResultType.RESULT_SET, statementResult.getResultType()); + try (ResultSet result = statementResult.getResultSet()) { + assertTrue(result.next()); + assertEquals(1L, result.getLong(0)); + assertFalse(result.next()); + } + connection.commit(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionModeConverterTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionModeConverterTest.java new file mode 100644 index 000000000000..f19b4b2eea16 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/TransactionModeConverterTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.TransactionModeConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class TransactionModeConverterTest { + + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + TransactionModeConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertThat(allowedValues, is(notNullValue())); + TransactionModeConverter converter = new TransactionModeConverter(allowedValues); + assertThat( + converter.convert("read write"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat( + converter.convert("READ WRITE"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat( + converter.convert("Read Write"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat( + converter.convert("read write"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat( + converter.convert("READ\nWRITE"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + assertThat( + converter.convert("Read\tWrite"), is(equalTo(TransactionMode.READ_WRITE_TRANSACTION))); + + assertThat(converter.convert("read only"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat(converter.convert("READ ONLY"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat(converter.convert("Read Only"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat( + converter.convert("read only"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat(converter.convert("READ\nONLY"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + assertThat(converter.convert("Read\tOnly"), is(equalTo(TransactionMode.READ_ONLY_TRANSACTION))); + + assertThat(converter.convert(""), is(nullValue())); + assertThat(converter.convert(" "), is(nullValue())); + assertThat(converter.convert("random string"), is(nullValue())); + assertThat(converter.convert("read_write"), is(nullValue())); + assertThat(converter.convert("Read_Write"), is(nullValue())); + assertThat(converter.convert("READ_WRITE"), is(nullValue())); + assertThat(converter.convert("read_only"), is(nullValue())); + assertThat(converter.convert("Read_Only"), is(nullValue())); + assertThat(converter.convert("READ_ONLY"), is(nullValue())); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITAsyncTransactionRetryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITAsyncTransactionRetryTest.java new file mode 100644 index 000000000000..e25e376ca222 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITAsyncTransactionRetryTest.java @@ -0,0 +1,1011 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.TransactionRetryListener; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * This integration test tests the different scenarios for automatically retrying read/write + * transactions, both when possible and when the transaction must abort because of a concurrent + * update. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITAsyncTransactionRetryTest extends ITAbstractSpannerTest { + private static final Logger logger = + Logger.getLogger(ITAsyncTransactionRetryTest.class.getName()); + + @Rule public TestName testName = new TestName(); + + private static final ExecutorService executor = Executors.newFixedThreadPool(4); + + @AfterClass + public static void shutdownExecutor() { + executor.shutdown(); + } + + @Override + protected void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false;retryAbortsInternally=true"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + /** Clear the test table before each test run */ + @Before + public void clearTable() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + get(connection.commitAsync()); + } + } + + @Before + public void clearStatistics() { + RETRY_STATISTICS.clear(); + } + + @Before + public void logStart() { + logger.fine( + "--------------------------------------------------------------\n" + + testName.getMethodName() + + " started"); + } + + @After + public void logFinished() { + logger.fine( + "--------------------------------------------------------------\n" + + testName.getMethodName() + + " finished"); + } + + /** Simple data structure to keep track of retry statistics */ + private static class RetryStatistics { + private int totalRetryAttemptsStarted; + private int totalRetryAttemptsFinished; + private int totalSuccessfulRetries; + private int totalErroredRetries; + private int totalNestedAborts; + private int totalMaxAttemptsExceeded; + private int totalConcurrentModifications; + + private void clear() { + totalRetryAttemptsStarted = 0; + totalRetryAttemptsFinished = 0; + totalSuccessfulRetries = 0; + totalErroredRetries = 0; + totalNestedAborts = 0; + totalMaxAttemptsExceeded = 0; + totalConcurrentModifications = 0; + } + } + + /** + * Static to allow access from the {@link CountTransactionRetryListener}. Statistics are + * automatically cleared before each test case. + */ + public static final RetryStatistics RETRY_STATISTICS = new RetryStatistics(); + + /** + * Simple {@link TransactionRetryListener} that keeps track of the total count of the different + * transaction retry events of a {@link Connection}. Note that as {@link + * TransactionRetryListener}s are instantiated once per connection, the listener keeps track of + * the total statistics of a connection and not only of the last transaction. + */ + public static class CountTransactionRetryListener implements TransactionRetryListener { + + @Override + public void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt) { + RETRY_STATISTICS.totalRetryAttemptsStarted++; + } + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) { + RETRY_STATISTICS.totalRetryAttemptsFinished++; + switch (result) { + case RETRY_ABORTED_AND_MAX_ATTEMPTS_EXCEEDED: + RETRY_STATISTICS.totalMaxAttemptsExceeded++; + break; + case RETRY_ABORTED_AND_RESTARTING: + RETRY_STATISTICS.totalNestedAborts++; + break; + case RETRY_ABORTED_DUE_TO_CONCURRENT_MODIFICATION: + RETRY_STATISTICS.totalConcurrentModifications++; + break; + case RETRY_ERROR: + RETRY_STATISTICS.totalErroredRetries++; + break; + case RETRY_SUCCESSFUL: + RETRY_STATISTICS.totalSuccessfulRetries++; + break; + default: + break; + } + } + } + + private ApiFuture getTestRecordCountAsync(Connection connection) { + final SettableApiFuture count = SettableApiFuture.create(); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count.set(resultSet.getLong("C")); + break; + } + } + }); + } + return count; + } + + private void verifyRecordCount(Connection connection, long expected) { + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("C")).isEqualTo(expected); + assertThat(rs.next()).isFalse(); + } + } + + /** Test successful retry when the commit aborts */ + @Test + public void testCommitAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + ApiFuture count = getTestRecordCountAsync(connection); + // do an insert + ApiFuture updateCount = + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a commit that will first abort, and then on retry will succeed + ApiFuture commit = connection.commitAsync(); + + assertThat(get(count)).isEqualTo(0L); + // Wait until the commit has finished before checking retry stats. + assertThat(get(commit)).isNull(); + assertThat(get(updateCount)).isEqualTo(1L); + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted >= 1).isTrue(); + assertThat(RETRY_STATISTICS.totalRetryAttemptsFinished >= 1).isTrue(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + assertThat(RETRY_STATISTICS.totalErroredRetries).isEqualTo(0); + assertThat(RETRY_STATISTICS.totalConcurrentModifications).isEqualTo(0); + assertThat(RETRY_STATISTICS.totalMaxAttemptsExceeded).isEqualTo(0); + // verify that the insert succeeded + verifyRecordCount(connection, 1L); + } + } + + /** Test successful retry when an insert statement aborts */ + @Test + public void testInsertAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + ApiFuture count = getTestRecordCountAsync(connection); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do an insert that will abort + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // do a commit + ApiFuture commit = connection.commitAsync(); + assertThat(get(count)).isEqualTo(0L); + assertThat(get(commit)).isNull(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + // verify that the insert succeeded + verifyRecordCount(connection, 1L); + } + } + + /** Test successful retry when an update statement aborts */ + @Test + public void testUpdateAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + ApiFuture count = getTestRecordCountAsync(connection); + // insert a test record + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do an update that will abort + connection.executeUpdateAsync( + Statement.of("UPDATE TEST SET NAME='update aborted' WHERE ID=1")); + // do a commit + ApiFuture commit = connection.commitAsync(); + assertThat(get(count)).isEqualTo(0L); + assertThat(get(commit)).isNull(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + // verify that the update succeeded + try (AsyncResultSet rs = + connection.executeQueryAsync( + Statement.of( + "SELECT COUNT(*) AS C FROM TEST WHERE ID=1 AND NAME='update aborted'"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("C")).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + /** Test successful retry when a query aborts */ + @Test + public void testQueryAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert a test record + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a query that will abort + final SettableApiFuture countAfterInsert = SettableApiFuture.create(); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + countAfterInsert.set(resultSet.getLong("C")); + break; + } + } + } catch (Throwable t) { + countAfterInsert.setException(t); + return CallbackResponse.DONE; + } + }); + } + connection.commitAsync(); + assertThat(get(countAfterInsert)).isEqualTo(1L); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + // verify that the update succeeded + try (ResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("C")).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + + /** Test successful retry when a call to {@link ResultSet#next()} aborts */ + @Test + public void testNextCallAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert two test records + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // do a query + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // the first record should be accessible without any problems + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("ID")).isEqualTo(1L); + + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("ID")).isEqualTo(2L); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + // there should be only two records + assertThat(rs.next()).isFalse(); + } + connection.commitAsync(); + // verify that the transaction succeeded + verifyRecordCount(connection, 2L); + } + } + + /** Test successful retry after multiple aborts */ + @Test + public void testMultipleAborts() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + ApiFuture count = getTestRecordCountAsync(connection); + // do three inserts which all will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')"))); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')"))); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')"))); + + ApiFuture commit = connection.commitAsync(); + assertThat(get(count)).isEqualTo(0L); + assertThat(get(commit)).isNull(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries).isAtLeast(3); + // verify that the inserts succeeded + verifyRecordCount(connection, 3L); + } + } + + /** + * Tests that a transaction retry can be successful after a select, as long as the select returns + * the same results during the retry + */ + @Test + public void testAbortAfterSelect() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + ApiFuture count = getTestRecordCountAsync(connection); + // insert a test record + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + // select the test record + final SettableApiFuture initialRecord = SettableApiFuture.create(); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST WHERE ID=1"))) { + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + initialRecord.set(resultSet.getCurrentRowAsStruct()); + } + } + } catch (Throwable t) { + initialRecord.setException(t); + return CallbackResponse.DONE; + } + }); + } + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + + // select the first test record again + final SettableApiFuture secondRecord = SettableApiFuture.create(); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST WHERE ID=1"))) { + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + secondRecord.set(resultSet.getCurrentRowAsStruct()); + } + } + } catch (Throwable t) { + secondRecord.setException(t); + return CallbackResponse.DONE; + } + }); + } + ApiFuture commit = connection.commitAsync(); + assertThat(get(count)).isEqualTo(0L); + assertThat(get(initialRecord)).isEqualTo(get(secondRecord)); + assertThat(get(commit)).isNull(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1).isTrue(); + } + } + + /** + * Test a successful retry when a {@link ResultSet} has been consumed half way. The {@link + * ResultSet} should still be at the same position and still behave as if the original transaction + * did not abort. + */ + @Test + public void testAbortWithResultSetHalfway() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert two test records + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // iterate one step + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("ID")).isEqualTo(1L); + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + // iterate another step + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("ID")).isEqualTo(2L); + // ensure we are at the end of the result set + assertThat(rs.next()).isFalse(); + } + get(connection.commitAsync()); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries).isAtLeast(1); + // verify that all the inserts succeeded + verifyRecordCount(connection, 3L); + } + } + + /** Test successful retry after a {@link ResultSet} has been fully consumed. */ + @Test + public void testAbortWithResultSetFullyConsumed() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert two test records + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and iterate over them + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // do nothing, just consume the result set + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + }); + } + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + get(connection.commitAsync()); + + assertThat(RETRY_STATISTICS.totalSuccessfulRetries).isAtLeast(1); + // verify that all the inserts succeeded + verifyRecordCount(connection, 3L); + } + } + + @Test + public void testAbortWithConcurrentInsert() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert two test records + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and consume the entire result set + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + get( + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + })); + } + // open a new connection and transaction and do an additional insert + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + get(connection2.commitAsync()); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + ApiFuture updateCount = + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + try { + get(updateCount); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertRetryStatistics(1, 1, 0); + } + } + } + + @Test + public void testAbortWithConcurrentDelete() { + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + get(connection.commitAsync()); + } + // open a new connection and select the two test records + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // select the test records and consume the entire result set + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + get( + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + })); + } + // open a new connection and transaction and remove one of the test records + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdateAsync(Statement.of("DELETE FROM TEST WHERE ID=1")); + get(connection2.commitAsync()); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')"))); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertRetryStatistics(1, 1, 0); + } + } + } + + @Test + public void testAbortWithConcurrentUpdate() { + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + get(connection.commitAsync()); + } + // open a new connection and select the two test records + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // select the test records and consume the entire result set + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + get( + rs.setCallback( + executor, + resultSet -> { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + })); + } + // open a new connection and transaction and update one of the test records + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdateAsync( + Statement.of("UPDATE TEST SET NAME='test updated' WHERE ID=2")); + get(connection2.commitAsync()); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')"))); + fail("Missing expected exception"); + } catch (AbortedDueToConcurrentModificationException e) { + assertRetryStatistics(1, 1, 0); + } + } + } + + /** + * Test that shows that a transaction retry is possible even when there is a concurrent insert + * that has an impact on a query that has been executed, as long as the user hasn't actually seen + * the relevant part of the result of the query + */ + @Test + public void testAbortWithUnseenConcurrentInsert() throws InterruptedException { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert three test records + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + // select the test records and consume part of the result set + final AtomicInteger count = new AtomicInteger(); + final AtomicLong lastSeenId = new AtomicLong(); + final CountDownLatch latch1 = new CountDownLatch(1); + final CountDownLatch latch2 = new CountDownLatch(1); + // Use buffer size 1. This means that the underlying result set will see 2 records (1 in the + // buffer and 1 waiting to be put in the buffer). + try (AsyncResultSet rs = + connection.executeQueryAsync( + Statement.of("SELECT * FROM TEST ORDER BY ID"), Options.bufferRows(1))) { + ApiFuture finished = + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + count.incrementAndGet(); + lastSeenId.set(resultSet.getLong("ID")); + break; + } + if (count.get() == 1) { + // Let the other transaction proceed. + latch1.countDown(); + // Wait until the transaction has been aborted and retried. + if (!latch2.await(120L, TimeUnit.SECONDS)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, "Timeout while waiting for latch2"); + } + } + } + } catch (Throwable t) { + throw SpannerExceptionFactory.asSpannerException(t); + } + }); + // Open a new connection and transaction and do an additional insert. This insert will be + // included in a retry of the above query, but this has not yet been 'seen' by the user, + // hence is not a problem for retrying the transaction. + try (ITConnection connection2 = createConnection()) { + assertThat(latch1.await(60L, TimeUnit.SECONDS)).isTrue(); + connection2.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + get(connection2.commitAsync()); + } + // now try to do an insert that will abort. The retry should still succeed. + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + int currentRetryCount = RETRY_STATISTICS.totalRetryAttemptsStarted; + get( + connection.executeUpdateAsync( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (5, 'test 5')"))); + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted).isAtLeast(currentRetryCount + 1); + // Consume the rest of the result set. The insert by the other transaction should now be + // included in the result set as the transaction retried. Although this means that the + // result + // is different after a retry, it is not different as seen by the user, as the user didn't + // know that the result set did not have any more results before the transaction retry. + latch2.countDown(); + get(finished); + // record with id 5 should not be visible, as it was added to the transaction after the + // query + // was executed + assertThat(count.get()).isEqualTo(4); + assertThat(lastSeenId.get()).isEqualTo(4L); + } + get(connection.commitAsync()); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries).isAtLeast(1); + } + } + + /** Test the successful retry of a transaction with a large {@link ResultSet} */ + @Test + public void testRetryLargeResultSet() { + final int NUMBER_OF_TEST_RECORDS = 100000; + final long UPDATED_RECORDS = 1000L; + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = createConnection()) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert test records + for (int i = 0; i < NUMBER_OF_TEST_RECORDS; i++) { + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(i).set("NAME").to("test " + i).build()); + if (i % 1000 == 0) { + connection.commitAsync(); + } + } + get(connection.commitAsync()); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // select the test records and iterate over them + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + ApiFuture finished = + rs.setCallback( + executor, + resultSet -> { + // do nothing, just consume the result set + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + }); + // Wait until the entire result set has been consumed. + get(finished); + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // Do an update that will abort and retry. + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdateAsync( + Statement.newBuilder("UPDATE TEST SET NAME='updated' WHERE ID<@max_id") + .bind("max_id") + .to(UPDATED_RECORDS) + .build()); + connection.commitAsync(); + // verify that the update succeeded + try (AsyncResultSet rs = + connection.executeQueryAsync( + Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE NAME='updated'"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("C")).isEqualTo(UPDATED_RECORDS); + assertThat(rs.next()).isFalse(); + } + // Verify that the transaction retried. + assertRetryStatistics(1, 0, 1); + } + } + + /** Test the successful retry of a transaction with a high chance of multiple aborts */ + @Test + public void testRetryHighAbortRate() { + final int NUMBER_OF_TEST_RECORDS = 10000; + final long UPDATED_RECORDS = 1000L; + // abort on 25% of all statements + AbortInterceptor interceptor = new AbortInterceptor(0.25D); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert test records + for (int i = 0; i < NUMBER_OF_TEST_RECORDS; i++) { + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(i).set("NAME").to("test " + i).build()); + if (i % 1000 == 0) { + connection.commitAsync(); + } + } + connection.commitAsync(); + // select the test records and iterate over them + // reduce the abort rate to 0.01% as each next() call could abort + interceptor.setProbability(0.0001D); + try (AsyncResultSet rs = + connection.executeQueryAsync(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + ApiFuture finished = + rs.setCallback( + executor, + resultSet -> { + // do nothing, just consume the result set + while (true) { + switch (resultSet.tryNext()) { + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + break; + } + } + }); + // Wait until the entire result set has been consumed. + get(finished); + } + // increase the abort rate to 50% + interceptor.setProbability(0.50D); + connection.executeUpdateAsync( + Statement.newBuilder("UPDATE TEST SET NAME='updated' WHERE ID<@max_id") + .bind("max_id") + .to(UPDATED_RECORDS) + .build()); + // Wait for the commit to finish, as it could be that the transaction is aborted so many times + // that the last update does not succeed. + get(connection.commitAsync()); + // verify that the update succeeded + try (AsyncResultSet rs = + connection.executeQueryAsync( + Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE NAME='updated'"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("C")).isEqualTo(UPDATED_RECORDS); + assertThat(rs.next()).isFalse(); + } + get(connection.commitAsync()); + } catch (AbortedException e) { + // This could happen if the number of aborts exceeds the max number of retries. + logger.log(Level.FINE, "testRetryHighAbortRate aborted because of too many retries", e); + } + logger.fine("Total number of retries started: " + RETRY_STATISTICS.totalRetryAttemptsStarted); + logger.fine("Total number of retries finished: " + RETRY_STATISTICS.totalRetryAttemptsFinished); + logger.fine("Total number of retries successful: " + RETRY_STATISTICS.totalSuccessfulRetries); + logger.fine("Total number of retries aborted: " + RETRY_STATISTICS.totalNestedAborts); + logger.fine( + "Total number of times the max retry count was exceeded: " + + RETRY_STATISTICS.totalMaxAttemptsExceeded); + } + + private void assertRetryStatistics( + int minAttemptsStartedExpected, + int concurrentModificationsExpected, + int successfulRetriesExpected) { + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted).isAtLeast(minAttemptsStartedExpected); + assertThat(RETRY_STATISTICS.totalConcurrentModifications) + .isEqualTo(concurrentModificationsExpected); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries).isAtLeast(successfulRetriesExpected); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java new file mode 100644 index 000000000000..42358a647a1a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SerialIntegrationTest; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Test opening multiple generic (not JDBC) Spanner connections. This test should not be run in + * parallel with other tests in the same JVM, as it tries to close all active connections, and + * should not try to close connections of other integration tests. + */ +@Category(SerialIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITBulkConnectionTest extends ITAbstractSpannerTest { + private static final int NUMBER_OF_TEST_CONNECTIONS = 250; + + @Test + public void testBulkCreateConnectionsSingleThreaded() { + List connections = new ArrayList<>(); + for (int i = 0; i < NUMBER_OF_TEST_CONNECTIONS; i++) { + connections.add(createConnection()); + } + for (ITConnection connection : connections) { + try (ResultSet rs = connection.executeQuery(Statement.of("select 1"))) { + assertThat(rs.next(), is(true)); + assertThat(connection.getReadTimestamp(), is(notNullValue())); + } + } + for (ITConnection connection : connections) { + connection.close(); + } + // close Spanner instances explicitly. This method will throw an exception if there are any + // connections still open in the pool + closeSpanner(); + } + + @Test + public void testBulkCreateConnectionsMultiThreaded() throws InterruptedException { + ExecutorService executor = Executors.newFixedThreadPool(50); + for (int i = 0; i < NUMBER_OF_TEST_CONNECTIONS; i++) { + executor.submit( + () -> { + try (ITConnection connection = createConnection()) { + try (ResultSet rs = connection.executeQuery(Statement.of("select 1"))) { + assertThat(rs.next(), is(true)); + assertThat(connection.getReadTimestamp(), is(notNullValue())); + } + } + return null; + }); + } + executor.shutdown(); + executor.awaitTermination(10L, TimeUnit.SECONDS); + // close Spanner instances explicitly. This method will throw an exception if there are any + // connections still open in the pool + closeSpanner(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITCommitResponseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITCommitResponseTest.java new file mode 100644 index 000000000000..91393eed3014 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITCommitResponseTest.java @@ -0,0 +1,156 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITCommitResponseTest extends ITAbstractSpannerTest { + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @Before + public void clearTestData() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + connection.commit(); + } + } + + @Test + public void testDefaultNoCommitStats() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + connection.commit(); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertFalse(connection.getCommitResponse().hasCommitStats()); + } + } + + @Test + public void testReturnCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.setReturnCommitStats(true); + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + connection.commit(); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertTrue(connection.getCommitResponse().hasCommitStats()); + assertEquals(2L, connection.getCommitResponse().getCommitStats().getMutationCount()); + } + } + + @Test + public void testReturnCommitStatsUsingSql() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.execute(Statement.of("SET RETURN_COMMIT_STATS=TRUE")); + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + connection.commit(); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertTrue(connection.getCommitResponse().hasCommitStats()); + assertEquals(2L, connection.getCommitResponse().getCommitStats().getMutationCount()); + try (ResultSet resultSet = + connection.execute(Statement.of("SHOW VARIABLE COMMIT_RESPONSE")).getResultSet()) { + assertTrue(resultSet.next()); + assertNotNull(resultSet.getTimestamp("COMMIT_TIMESTAMP")); + assertEquals(2L, resultSet.getLong("MUTATION_COUNT")); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testAutocommitDefaultNoCommitStats() { + try (ITConnection connection = createConnection()) { + connection.setAutocommit(true); + connection.write( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertFalse(connection.getCommitResponse().hasCommitStats()); + } + } + + @Test + public void testAutocommitReturnCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.setAutocommit(true); + connection.setReturnCommitStats(true); + connection.write( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertTrue(connection.getCommitResponse().hasCommitStats()); + assertEquals(2L, connection.getCommitResponse().getCommitStats().getMutationCount()); + } + } + + @Test + public void testAutocommitReturnCommitStatsUsingSql() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.execute(Statement.of("SET AUTOCOMMIT=TRUE")); + connection.execute(Statement.of("SET RETURN_COMMIT_STATS=TRUE")); + connection.write( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + assertNotNull(connection.getCommitResponse()); + assertNotNull(connection.getCommitResponse().getCommitTimestamp()); + assertTrue(connection.getCommitResponse().hasCommitStats()); + assertEquals(2L, connection.getCommitResponse().getCommitStats().getMutationCount()); + try (ResultSet resultSet = + connection.execute(Statement.of("SHOW VARIABLE COMMIT_RESPONSE")).getResultSet()) { + assertTrue(resultSet.next()); + assertNotNull(resultSet.getTimestamp("COMMIT_TIMESTAMP")); + assertEquals(2L, resultSet.getLong("MUTATION_COUNT")); + assertFalse(resultSet.next()); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDdlTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDdlTest.java new file mode 100644 index 000000000000..affc7ad2a181 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDdlTest.java @@ -0,0 +1,212 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseNotFoundException; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MissingDefaultSequenceKindException; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import java.util.Arrays; +import java.util.Collections; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Execute DDL statements using the generic connection API. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDdlTest extends ITAbstractSpannerTest { + @BeforeClass + public static void setup() { + // This overrides the default behavior that creates a single database for the test class. This + // test needs a separate database per method. + } + + @Before + public void createTestDatabase() { + database = env.getTestHelper().createTestDatabase(); + } + + @Test + public void testSqlScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new ITConnectionProvider()); + verifier.verifyStatementsInFile("ITDdlTest.sql", SqlScriptVerifier.class, false); + } + + @Test + public void testCreateDatabase() { + DatabaseAdminClient client = getTestEnv().getTestHelper().getClient().getDatabaseAdminClient(); + String instance = getTestEnv().getTestHelper().getInstanceId().getInstance(); + String name = getTestEnv().getTestHelper().getUniqueDatabaseId(); + + assertThrows(DatabaseNotFoundException.class, () -> client.getDatabase(instance, name)); + + try (Connection connection = createConnection()) { + connection.execute(Statement.of(String.format("CREATE DATABASE `%s`", name))); + assertNotNull(client.getDatabase(instance, name)); + } finally { + client.dropDatabase(instance, name); + } + } + + @Test + public void testDefaultSequenceKind() { + try (Connection connection = createConnection()) { + Statement statement = + Statement.of( + "create table test (id int64 auto_increment primary key, value string(max))"); + + // Creating a table with an auto_increment column fails if no default sequence kind has been + // set. + assertNull(connection.getDefaultSequenceKind()); + assertThrows(MissingDefaultSequenceKindException.class, () -> connection.execute(statement)); + + // Setting a default sequence kind on the connection should make the statement succeed. + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.execute(statement); + + assertEquals( + 1L, connection.executeUpdate(Statement.of("insert into test (value) values ('One')"))); + try (ResultSet resultSet = connection.executeQuery(Statement.of("select * from test"))) { + assertTrue(resultSet.next()); + assertEquals("One", resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testDefaultSequenceKind_PostgreSQL() throws Exception { + DatabaseAdminClient client = getTestEnv().getTestHelper().getClient().getDatabaseAdminClient(); + String instance = getTestEnv().getTestHelper().getInstanceId().getInstance(); + String name = getTestEnv().getTestHelper().getUniqueDatabaseId(); + + Database database = + client + .createDatabase( + instance, + "create database \"" + name + "\"", + Dialect.POSTGRESQL, + Collections.emptyList()) + .get(); + + StringBuilder url = extractConnectionUrl(getTestEnv().getTestHelper().getOptions(), database); + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder().setUri(url.toString()); + if (hasValidKeyFile()) { + builder.setCredentialsUrl(getKeyFile()); + } + ConnectionOptions options = builder.build(); + + try (Connection connection = options.getConnection()) { + Statement statement = + Statement.of("create table test (id serial primary key, value varchar)"); + + // Creating a table with an auto_increment column fails if no default sequence kind has been + // set. + assertNull(connection.getDefaultSequenceKind()); + assertThrows(MissingDefaultSequenceKindException.class, () -> connection.execute(statement)); + + // Setting a default sequence kind on the connection should make the statement succeed. + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.execute(statement); + + assertEquals( + 1L, connection.executeUpdate(Statement.of("insert into test (value) values ('One')"))); + try (ResultSet resultSet = connection.executeQuery(Statement.of("select * from test"))) { + assertTrue(resultSet.next()); + assertEquals("One", resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } finally { + client.dropDatabase(instance, name); + } + } + + @Test + public void testDefaultSequenceKindInBatch() { + try (Connection connection = createConnection()) { + Statement statement1 = + Statement.of("create table testseq1 (id1 int64 primary key, value string(max))"); + Statement statement2 = + Statement.of( + "create table testseq2 (id2 int64 auto_increment primary key, value string(max))"); + + // Creating a table with an auto_increment column fails if no default sequence kind has been + // set. + assertNull(connection.getDefaultSequenceKind()); + connection.startBatchDdl(); + connection.execute(statement1); + connection.execute(statement2); + SpannerBatchUpdateException exception = + assertThrows(SpannerBatchUpdateException.class, connection::runBatch); + long updateCount = Arrays.stream(exception.getUpdateCounts()).sum(); + // The emulator refuses the entire batch. Spanner executes the first statement and fails on + // the second statement. + if (EmulatorSpannerHelper.isUsingEmulator()) { + assertEquals(0, updateCount); + } else { + assertEquals(1, updateCount); + } + + // Setting a default sequence kind on the connection should make the statement succeed. + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.startBatchDdl(); + if (updateCount == 0) { + connection.execute(statement1); + } + connection.execute(statement2); + connection.runBatch(); + } + } + + @Test + public void testDefaultSequenceKindRetriesBatchCorrectly() { + try (Connection connection = createConnection()) { + Statement statement1 = + Statement.of("create table testseq1 (id1 int64 primary key, value string(max))"); + Statement statement2 = + Statement.of( + "create table testseq2 (id2 int64 auto_increment primary key, value string(max))"); + + connection.setDefaultSequenceKind("bit_reversed_positive"); + connection.startBatchDdl(); + connection.execute(statement1); + connection.execute(statement2); + connection.runBatch(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDelayBeginTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDelayBeginTransactionTest.java new file mode 100644 index 000000000000..e3a134d83d89 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDelayBeginTransactionTest.java @@ -0,0 +1,296 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.collect.ImmutableList; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDelayBeginTransactionTest extends ITAbstractSpannerTest { + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false;delayTransactionStartUntilFirstWrite=true"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @Before + public void setupTestData() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + connection.commit(); + + connection.bufferedWrite( + ImmutableList.of( + Mutation.newInsertBuilder("TEST").set("id").to(1L).set("name").to("One").build(), + Mutation.newInsertBuilder("TEST").set("id").to(2L).set("name").to("Two").build())); + connection.commit(); + } + } + + @Test + public void testReadExistingData() { + try (ITConnection connection = createConnection()) { + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select * from test order by id"))) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong("ID")); + assertEquals("One", resultSet.getString("NAME")); + + assertTrue(resultSet.next()); + assertEquals(2L, resultSet.getLong("ID")); + assertEquals("Two", resultSet.getString("NAME")); + + assertFalse(resultSet.next()); + } + // This is effectively a no-op when using delayed-begin-transaction and the transaction has + // only executed queries. + connection.commit(); + } + } + + @Test + public void testReadThenWrite() { + try (ITConnection connection = createConnection()) { + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select * from test order by id"))) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong("ID")); + assertEquals("One", resultSet.getString("NAME")); + + assertTrue(resultSet.next()); + assertEquals(2L, resultSet.getLong("ID")); + assertEquals("Two", resultSet.getString("NAME")); + + assertFalse(resultSet.next()); + } + connection.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build()); + + connection.commit(); + + // Verify that the new row was inserted. + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select count(*) from test"))) { + assertTrue(resultSet.next()); + assertEquals(3L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testWriteThenRead() { + try (ITConnection connection = createConnection()) { + connection.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build()); + + // Verify that we can read both existing data and the row that we just inserted. + try (ResultSet resultSet = + connection.executeQuery( + Statement.of("select * from test where id in (2,3) order by id"))) { + assertTrue(resultSet.next()); + assertEquals(2L, resultSet.getLong("ID")); + assertEquals("Two", resultSet.getString("NAME")); + + assertTrue(resultSet.next()); + assertEquals(3L, resultSet.getLong("ID")); + assertEquals("Three", resultSet.getString("NAME")); + + assertFalse(resultSet.next()); + } + + connection.commit(); + + // Verify that the new row was inserted. + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select count(*) from test"))) { + assertTrue(resultSet.next()); + assertEquals(3L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testConcurrentReadAndWrites() { + try (ITConnection connection1 = createConnection(); + ITConnection connection2 = createConnection()) { + // Scan all the rows in the test table using the first connection. + int originalRowCount = 0; + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + while (resultSet.next()) { + originalRowCount++; + } + } + // Insert a new row using the other transaction. + connection2.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build()); + // Rescan the table using the first transaction. This should not return the new row yet, as it + // has not been committed. It should also not cause any concurrency issues with the other + // transaction. + int rowCount = 0; + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + while (resultSet.next()) { + rowCount++; + } + } + assertEquals(originalRowCount, rowCount); + // Commit the transaction that inserted a new row. + connection2.commit(); + // The new row should now be visible to the first transaction. + rowCount = 0; + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + while (resultSet.next()) { + rowCount++; + } + } + assertEquals(originalRowCount + 1, rowCount); + connection1.commit(); + } + } + + @Test + public void testConcurrentWrites() { + assumeFalse( + "The emulator does not support concurrent transactions", + EmulatorSpannerHelper.isUsingEmulator()); + + try (ITConnection connection1 = createConnection(); + ITConnection connection2 = createConnection()) { + // Scan all the rows in the test table using the first connection. + int originalRowCount = 0; + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + while (resultSet.next()) { + originalRowCount++; + } + } + // Insert new rows using both transactions. These are non-conflicting and should therefore be + // possible. + connection2.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build()); + connection1.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(4L) + .bind("name") + .to("Four") + .build()); + // Commit the transaction for connection2. This should make the row it inserted visible to + // connection1, as connection1's transaction started after connection2. + connection2.commit(); + + // Rescan the table using the first transaction. This should now return both the above new + // rows. + int rowCount = 0; + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + while (resultSet.next()) { + rowCount++; + } + } + assertEquals(originalRowCount + 2, rowCount); + connection1.commit(); + } + } + + @Test + public void testConflictingTransactions() { + assumeFalse( + "The emulator does not support concurrent transactions", + EmulatorSpannerHelper.isUsingEmulator()); + + try (ITConnection connection1 = createConnection(); + ITConnection connection2 = createConnection()) { + // Insert new rows using both transactions. These are non-conflicting and should therefore be + // possible. + connection2.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build()); + connection1.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(4L) + .bind("name") + .to("Four") + .build()); + // Scan all the rows in the test table using the first connection. This will now use the + // read/write transaction and will conflict with the other transaction. + try (ResultSet resultSet = + connection1.executeQuery(Statement.of("select * from test order by id"))) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // ignore and just consume the results + } + } + // Commit the transaction for connection2. + connection2.commit(); + // The first transaction will be aborted. + assertThrows(AbortedDueToConcurrentModificationException.class, connection1::commit); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java new file mode 100644 index 000000000000..83f9beb7cf73 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java @@ -0,0 +1,333 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AsyncStatementResult; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.StatementResult; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.cloud.spanner.connection.TransactionMode; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Execute DML Returning statements using the generic connection API. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITDmlReturningTest extends ITAbstractSpannerTest { + private final ImmutableMap UPDATE_RETURNING_MAP = + ImmutableMap.of( + Dialect.GOOGLE_STANDARD_SQL, + Statement.of("UPDATE Singers SET LastName = 'XYZ' WHERE FirstName = 'ABC' THEN RETURN *"), + Dialect.POSTGRESQL, + Statement.of("UPDATE Singers SET LastName = 'XYZ' WHERE FirstName = 'ABC' RETURNING *")); + private final ImmutableMap DDL_MAP = + ImmutableMap.of( + Dialect.GOOGLE_STANDARD_SQL, + "CREATE TABLE Singers (" + + " SingerId INT64," + + " FirstName STRING(1024)," + + " LastName STRING(1024)" + + ") PRIMARY KEY(SingerId)", + Dialect.POSTGRESQL, + "CREATE TABLE Singers (" + + " SingerId BIGINT PRIMARY KEY," + + " FirstName character varying(1024)," + + " LastName character varying(1024))"); + private static final Set IS_INITIALIZED = new HashSet<>(); + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private boolean checkAndSetInitialized() { + return !IS_INITIALIZED.add(dialect); + } + + @Before + public void setupTable() { + if (!checkAndSetInitialized()) { + database = + env.getTestHelper() + .createTestDatabase(dialect, Collections.singleton(DDL_MAP.get(dialect))); + } + DatabaseClient client = env.getTestHelper().getDatabaseClient(database); + client.write(ImmutableList.of(Mutation.delete("SINGERS", KeySet.all()))); + + List firstNames = Arrays.asList("ABC", "ABC", "DEF", "PQR", "ABC"); + List lastNames = Arrays.asList("XYZ", "DEF", "XYZ", "ABC", "GHI"); + List mutations = new ArrayList<>(); + for (int id = 1; id <= 5; id++) { + mutations.add( + Mutation.newInsertBuilder("SINGERS") + .set("SINGERID") + .to(id) + .set("FIRSTNAME") + .to(firstNames.get(id - 1)) + .set("LASTNAME") + .to(lastNames.get(id - 1)) + .build()); + } + env.getTestHelper().getDatabaseClient(database).write(mutations); + } + + @Test + public void testDmlReturningExecuteQuery() { + try (Connection connection = createConnection()) { + try (ResultSet rs = connection.executeQuery(UPDATE_RETURNING_MAP.get(dialect))) { + assertEquals(rs.getColumnCount(), 3); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertFalse(rs.next()); + assertNotNull(rs.getStats()); + assertEquals(rs.getStats().getRowCountExact(), 3); + } + } + } + + @Test + public void testDmlReturningExecuteQueryAsync() { + try (Connection connection = createConnection()) { + try (AsyncResultSet rs = connection.executeQueryAsync(UPDATE_RETURNING_MAP.get(dialect))) { + rs.setCallback( + Executors.newSingleThreadExecutor(), + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + assertEquals(resultSet.getColumnCount(), 3); + assertEquals(resultSet.getString(1), "ABC"); + break; + case DONE: + assertNotNull(resultSet.getStats()); + assertEquals(resultSet.getStats().getRowCountExact(), 3); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + return CallbackResponse.DONE; + } + }); + } + } + } + + @Test + public void testDmlReturningExecuteUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeUpdate(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testDmlReturningExecuteUpdateAsync() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeUpdateAsync(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testDmlReturningExecuteBatchUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + final Statement updateStmt = Preconditions.checkNotNull(UPDATE_RETURNING_MAP.get(dialect)); + long[] counts = + connection.executeBatchUpdate(ImmutableList.of(updateStmt, updateStmt, updateStmt)); + assertArrayEquals(counts, new long[] {3, 3, 3}); + } + } + + @Test + public void testDmlReturningExecuteBatchUpdateAsync() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + final Statement updateStmt = Preconditions.checkNotNull(UPDATE_RETURNING_MAP.get(dialect)); + long[] counts = + connection + .executeBatchUpdateAsync(ImmutableList.of(updateStmt, updateStmt, updateStmt)) + .get(); + assertArrayEquals(counts, new long[] {3, 3, 3}); + } catch (ExecutionException | InterruptedException e) { + // ignore + } + } + + @Test + public void testDmlReturningExecute() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + StatementResult res = connection.execute(UPDATE_RETURNING_MAP.get(dialect)); + assertEquals(res.getResultType(), ResultType.RESULT_SET); + try (ResultSet rs = res.getResultSet()) { + assertEquals(rs.getColumnCount(), 3); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertTrue(rs.next()); + assertEquals(rs.getString(1), "ABC"); + assertFalse(rs.next()); + assertNotNull(rs.getStats()); + assertEquals(rs.getStats().getRowCountExact(), 3); + } + } + } + + @Test + public void testDmlReturningExecuteAsync() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + AsyncStatementResult res = connection.executeAsync(UPDATE_RETURNING_MAP.get(dialect)); + assertEquals(res.getResultType(), ResultType.RESULT_SET); + try (AsyncResultSet rs = res.getResultSetAsync()) { + rs.setCallback( + Executors.newSingleThreadExecutor(), + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + assertEquals(resultSet.getColumnCount(), 3); + assertEquals(resultSet.getString(1), "ABC"); + break; + case DONE: + assertNotNull(resultSet.getStats()); + assertEquals(resultSet.getStats().getRowCountExact(), 3); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + }); + } + } + } + + @Test + public void testDmlReturningExecuteQueryReadOnlyMode() { + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeQuery(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testDmlReturningExecuteQueryReadOnlyTransaction() { + try (Connection connection = createConnection()) { + connection.setReadOnly(false); + connection.setAutocommit(false); + connection.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeQuery(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testDmlReturningExecuteQueryAsyncReadOnlyMode() { + try (Connection connection = createConnection()) { + connection.setReadOnly(true); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeQueryAsync(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void testDmlReturningExecuteQueryAsyncReadOnlyTransaction() { + try (Connection connection = createConnection()) { + connection.setReadOnly(false); + connection.setAutocommit(false); + connection.setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); + SpannerException e = + assertThrows( + SpannerException.class, + () -> connection.executeQueryAsync(UPDATE_RETURNING_MAP.get(dialect))); + assertEquals(e.getErrorCode(), ErrorCode.FAILED_PRECONDITION); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java new file mode 100644 index 000000000000..fe651554dd5a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITEmulatorConcurrentTransactionsTest extends ITAbstractSpannerTest { + @Parameters(name = "Use auto-savepoints={0}") + public static Object[] parameters() { + return new Object[] {Boolean.TRUE, Boolean.FALSE}; + } + + @Parameter public boolean useAutoSavepointsForEmulator; + + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autoConfigEmulator=true;autoCommit=false;useAutoSavepointsForEmulator=") + .append(useAutoSavepointsForEmulator); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @BeforeClass + public static void onlyOnEmulator() { + assumeTrue("This test is only intended for the emulator", isUsingEmulator()); + } + + @Before + public void clearTestData() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + connection.commit(); + } + } + + @Test + public void testInnerTransaction() { + try (Connection connection1 = createConnection(); + Connection connection2 = createConnection()) { + // Run two transactions: + // 1. First start one transaction. + // 2. Then start another transaction and commit that transaction. + // 3. Then commit the first transaction. + assertEquals( + 1, + connection1.executeUpdate(Statement.of("insert into test (id, name) values (1, 'One')"))); + assertEquals( + 1, + connection2.executeUpdate(Statement.of("insert into test (id, name) values (2, 'Two')"))); + connection2.commit(); + connection1.commit(); + } + verifyRowCount(2L); + } + + @Test + public void testOverlappingTransactions() { + try (Connection connection1 = createConnection(); + Connection connection2 = createConnection()) { + // Run two transactions: + // 1. First start one transaction. + // 2. Then start another transaction. + // 3. Then commit the first transaction. + // 4. Then commit the second transaction. + assertEquals( + 1, + connection1.executeUpdate(Statement.of("insert into test (id, name) values (1, 'One')"))); + assertEquals( + 1, + connection2.executeUpdate(Statement.of("insert into test (id, name) values (2, 'Two')"))); + connection1.commit(); + connection2.commit(); + } + verifyRowCount(2L); + } + + @Test + public void testSingleThreadRandomTransactions() { + AtomicInteger numRowsInserted = new AtomicInteger(); + runRandomTransactions(numRowsInserted); + verifyRowCount(numRowsInserted.get()); + } + + @Test + public void testMultiThreadedRandomTransactions() throws Exception { + int numThreads = ThreadLocalRandom.current().nextInt(10) + 5; + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + AtomicInteger numRowsInserted = new AtomicInteger(); + List> futures = new ArrayList<>(numThreads); + for (int thread = 0; thread < numThreads; thread++) { + futures.add(executor.submit(() -> runRandomTransactions(numRowsInserted))); + } + executor.shutdown(); + assertTrue(executor.awaitTermination(60L, TimeUnit.SECONDS)); + // Get the results of each transaction so the test case fails with a logical error message if + // any of the transactions failed. + for (Future future : futures) { + assertNull(future.get()); + } + verifyRowCount(numRowsInserted.get()); + } + + private void runRandomTransactions(AtomicInteger numRowsInserted) { + int numTransactions = ThreadLocalRandom.current().nextInt(25) + 5; + String sql = "insert into test (id, name) values (@id, 'test')"; + List connections = new ArrayList<>(numTransactions); + try { + for (int i = 0; i < numTransactions; i++) { + connections.add(createConnection()); + } + while (!connections.isEmpty()) { + int index = ThreadLocalRandom.current().nextInt(connections.size()); + Connection connection = connections.get(index); + if (ThreadLocalRandom.current().nextInt(10) < 5) { + connection.commit(); + connection.close(); + assertEquals(connection, connections.remove(index)); + } else { + assertEquals( + 1, + connection.executeUpdate( + Statement.newBuilder(sql) + .bind("id") + .to(ThreadLocalRandom.current().nextLong()) + .build())); + numRowsInserted.incrementAndGet(); + } + try { + // Make sure to have a small wait between statements. + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 5)); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } + } finally { + for (Connection connection : connections) { + connection.close(); + } + } + } + + private void verifyRowCount(long expected) { + try (Connection connection = createConnection()) { + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select count(1) from test"))) { + assertTrue(resultSet.next()); + assertEquals(expected, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITExplainTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITExplainTest.java new file mode 100644 index 000000000000..2cb0bb61ef9b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITExplainTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import java.util.Arrays; +import java.util.Collections; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITExplainTest extends ITAbstractSpannerTest { + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false"); + } + + @BeforeClass + public static void setupPostgreSQL() { + assumeFalse("Emulator does not support PostgreSQL Dialect", isUsingEmulator()); + database = env.getTestHelper().createTestDatabase(Dialect.POSTGRESQL, Collections.emptyList()); + } + + @Before + public void createTestTable() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + if (!tableExists(connection, "TEST")) { + connection.setAutocommit(false); + connection.startBatchDdl(); + connection.execute( + Statement.of( + "CREATE TABLE TEST (ID INT NOT NULL PRIMARY KEY, NAME VARCHAR(100) NOT NULL)")); + connection.runBatch(); + } + } + } + + @Test + public void testExplainStatement() { + assumeFalse("Emulator does not support PostgreSQL Dialect", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.bufferedWrite( + Arrays.asList( + Mutation.newInsertBuilder("TEST").set("ID").to(3L).set("NAME").to("TEST-3").build(), + Mutation.newInsertBuilder("TEST").set("ID").to(4L).set("NAME").to("TEST-4").build())); + connection.commit(); + + ResultSet resultSet = + connection.execute(Statement.of("EXPLAIN SELECT * from TEST")).getResultSet(); + while (resultSet.next()) { + assertNotNull(resultSet.getString("QUERY PLAN")); + } + assertEquals(1, resultSet.getColumnCount()); + } + } + + @Test + public void testExplainAnalyzeStatement() { + assumeFalse("Emulator does not support PostgreSQL Dialect", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + connection.bufferedWrite( + Arrays.asList( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST-1").build(), + Mutation.newInsertBuilder("TEST").set("ID").to(2L).set("NAME").to("TEST-2").build())); + connection.commit(); + + ResultSet resultSet = + connection.execute(Statement.of("EXPLAIN ANALYZE SELECT * from TEST")).getResultSet(); + while (resultSet.next()) { + assertNotNull(resultSet.getString("QUERY PLAN")); + assertNotNull(resultSet.getString("EXECUTION STATS")); + } + assertEquals(2, resultSet.getColumnCount()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java new file mode 100644 index 000000000000..62eba4f4315a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITQueryOptionsTest extends ITAbstractSpannerTest { + + private static final String TEST_QUERY_OPTIONS = "ITSqlScriptTest_TestQueryOptions.sql"; + + private SqlScriptVerifier verifier; + + @Before + public void setUp() { + verifier = new SqlScriptVerifier(); + } + + @Test + public void verifiesQueryOptions() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), TEST_QUERY_OPTIONS, SqlScriptVerifier.class); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadOnlySpannerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadOnlySpannerTest.java new file mode 100644 index 000000000000..c86b8ec34e18 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadOnlySpannerTest.java @@ -0,0 +1,246 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import java.math.BigInteger; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * This test class runs a SQL script for testing a connection in read-only mode, but also contains a + * number of separate test methods that cannot be expressed in a pure SQL test. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITReadOnlySpannerTest extends ITAbstractSpannerTest { + private static final long TEST_ROWS_COUNT = 1000L; + + @Override + protected void appendConnectionUri(StringBuilder url) { + url.append(";readOnly=true"); + } + + @Before + public void createTestTables() throws Exception { + try (ITConnection connection = createConnection()) { + if (!(tableExists(connection, "NUMBERS") && tableExists(connection, "PRIME_NUMBERS"))) { + // create tables + SqlScriptVerifier verifier = new SqlScriptVerifier(new ITConnectionProvider()); + verifier.verifyStatementsInFile( + "ITReadOnlySpannerTest_CreateTables.sql", SqlScriptVerifier.class, false); + + // fill tables with data + connection.setAutocommit(false); + connection.setReadOnly(false); + for (long number = 1L; number <= TEST_ROWS_COUNT; number++) { + connection.bufferedWrite( + Mutation.newInsertBuilder("NUMBERS") + .set("number") + .to(number) + .set("name") + .to(Long.toBinaryString(number)) + .build()); + } + for (long number = 1L; number <= TEST_ROWS_COUNT; number++) { + if (BigInteger.valueOf(number).isProbablePrime(Integer.MAX_VALUE)) { + connection.bufferedWrite( + Mutation.newInsertBuilder("PRIME_NUMBERS") + .set("prime_number") + .to(number) + .set("binary_representation") + .to(Long.toBinaryString(number)) + .build()); + } + } + connection.commit(); + } + } + } + + @Test + public void testSqlScript() throws Exception { + // Wait 100ms to ensure that staleness tests in the script succeed. + Thread.sleep(100L); + SqlScriptVerifier verifier = new SqlScriptVerifier(new ITConnectionProvider()); + verifier.verifyStatementsInFile("ITReadOnlySpannerTest.sql", SqlScriptVerifier.class, false); + } + + @Test + public void testStatementTimeoutTransactional() { + try (ITConnection connection = createConnection()) { + connection.beginTransaction(); + connection.setStatementTimeout(1L, TimeUnit.MILLISECONDS); + try (ResultSet rs = + connection.executeQuery( + Statement.of( + "SELECT (SELECT COUNT(*) FROM PRIME_NUMBERS)/(SELECT COUNT(*) FROM NUMBERS) AS" + + " PRIME_NUMBER_RATIO"))) { + fail("Expected exception"); + } + // should never be reached + connection.commit(); + } catch (SpannerException ex) { + assertEquals(ErrorCode.DEADLINE_EXCEEDED, ex.getErrorCode()); + } + } + + @Test + public void testStatementTimeoutTransactionalMultipleStatements() { + try (ITConnection connection = createConnection()) { + connection.beginTransaction(); + for (int i = 0; i < 3; i++) { + connection.setStatementTimeout(1L, TimeUnit.MICROSECONDS); + try (ResultSet rs = + connection.executeQuery( + Statement.of( + "SELECT (SELECT COUNT(*) FROM PRIME_NUMBERS)/(SELECT COUNT(*) FROM NUMBERS) AS" + + " PRIME_NUMBER_RATIO"))) { + fail("Missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(ErrorCode.DEADLINE_EXCEEDED)); + } + } + connection.commit(); + } + } + + @Test + public void testStatementTimeoutAutocommit() { + try (ITConnection connection = createConnection()) { + assertThat(connection.isAutocommit(), is(true)); + connection.setStatementTimeout(1L, TimeUnit.MILLISECONDS); + try (ResultSet rs = + connection.executeQuery( + Statement.of( + "SELECT (SELECT COUNT(*) FROM PRIME_NUMBERS)/(SELECT COUNT(*) FROM NUMBERS) AS" + + " PRIME_NUMBER_RATIO"))) { + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.DEADLINE_EXCEEDED, ex.getErrorCode()); + } + } + } + + @Test + public void testAnalyzeQuery() { + assumeFalse("analyze query is not supported on the emulator", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + for (QueryAnalyzeMode mode : QueryAnalyzeMode.values()) { + try (ResultSet rs = + connection.analyzeQuery( + Statement.of( + "SELECT (SELECT COUNT(*) FROM PRIME_NUMBERS)/(SELECT COUNT(*) FROM NUMBERS) AS" + + " PRIME_NUMBER_RATIO"), + mode)) { + // next has not yet returned false + assertThat(rs.getStats(), is(nullValue())); + while (rs.next()) { + // ignore + } + assertThat(rs.getStats(), is(notNullValue())); + } + } + } + } + + @Test + public void testQueryWithOptions() { + try (ITConnection connection = createConnection()) { + try (ResultSet rs = + connection.executeQuery( + Statement.of( + "SELECT (SELECT CAST(COUNT(*) AS FLOAT64) FROM PRIME_NUMBERS)/(SELECT COUNT(*)" + + " FROM NUMBERS) AS PRIME_NUMBER_RATIO"), + Options.prefetchChunks(100000))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getDouble(0), is(notNullValue())); + assertThat(rs.next(), is(false)); + } + } + } + + @Test + public void testMultipleOpenResultSets() throws InterruptedException { + try (ITConnection connection = createConnection()) { + final ResultSet rs1 = connection.executeQuery(Statement.of("SELECT * FROM PRIME_NUMBERS")); + final ResultSet rs2 = connection.executeQuery(Statement.of("SELECT * FROM NUMBERS")); + ExecutorService exec = Executors.newFixedThreadPool(2); + exec.submit( + () -> { + while (rs1.next()) {} + }); + exec.submit( + () -> { + while (rs2.next()) {} + }); + exec.shutdown(); + exec.awaitTermination(1000L, TimeUnit.SECONDS); + rs1.close(); + rs2.close(); + } + } + + @Test + public void testGetMetadataFromAnalyzeQuery() { + assumeFalse("analyze query is not supported on the emulator", isUsingEmulator()); + try (ITConnection connection = createConnection()) { + // Request a query plan without executing the query and verify that we can get the column + // metadata of the query without calling resultSet.next() first. + try (ResultSet resultSet = + connection.analyzeQuery( + Statement.of("SELECT number, name FROM NUMBERS"), QueryAnalyzeMode.PLAN)) { + assertEquals(2, resultSet.getColumnCount()); + + assertEquals(0, resultSet.getColumnIndex("number")); + assertEquals("number", resultSet.getType().getStructFields().get(0).getName()); + assertEquals(Type.int64(), resultSet.getColumnType(0)); + assertEquals(Type.int64(), resultSet.getColumnType("number")); + + assertEquals(1, resultSet.getColumnIndex("name")); + assertEquals("name", resultSet.getType().getStructFields().get(1).getName()); + assertEquals(Type.string(), resultSet.getColumnType(1)); + assertEquals(Type.string(), resultSet.getColumnType("name")); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadWriteAutocommitSpannerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadWriteAutocommitSpannerTest.java new file mode 100644 index 000000000000..8c9aaba823ba --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITReadWriteAutocommitSpannerTest.java @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.spanner.v1.ResultSetStats; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.MethodSorters; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ITReadWriteAutocommitSpannerTest extends ITAbstractSpannerTest { + + @Override + protected void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=true"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @Test + public void test01_SqlScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new ITConnectionProvider()); + verifier.verifyStatementsInFile( + "ITReadWriteAutocommitSpannerTest.sql", SqlScriptVerifier.class, false); + } + + @Test + public void test02_WriteMutation() { + try (ITConnection connection = createConnection()) { + connection.write( + Mutation.newInsertBuilder("TEST").set("ID").to(9999L).set("NAME").to("FOO").build()); + assertThat(connection.getCommitTimestamp(), is(notNullValue())); + } + } + + @Test + public void test03_MultipleStatements_WithTimeouts() { + try (ITConnection connection = createConnection()) { + // do an insert that should succeed + assertThat( + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1000, 'test')")), + is(equalTo(1L))); + // check that the insert succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM TEST WHERE ID=1000"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("test"))); + assertThat(rs.next(), is(false)); + } + + // do an update that should always time out (both on real Spanner as well as on the emulator) + connection.setStatementTimeout(1L, TimeUnit.NANOSECONDS); + try { + connection.executeUpdate(Statement.of("UPDATE TEST SET NAME='test18' WHERE ID=1000")); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode(), is(equalTo(ErrorCode.DEADLINE_EXCEEDED))); + } + // remove the timeout setting + connection.clearStatementTimeout(); + + // do a delete that should succeed + connection.executeUpdate(Statement.of("DELETE FROM TEST WHERE ID=1000")); + // verify that the delete did succeed + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM TEST WHERE ID=1000"))) { + assertThat(rs.next(), is(false)); + } + } + } + + @Test + public void test04_BatchUpdate() { + try (ITConnection connection = createConnection()) { + long[] updateCounts = + connection.executeBatchUpdate( + Arrays.asList( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (10, 'Batch value 1')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (11, 'Batch value 2')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (12, 'Batch value 3')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (13, 'Batch value 4')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (14, 'Batch value 5')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (15, 'Batch value 6')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (16, 'Batch value 7')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (17, 'Batch value 8')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (18, 'Batch value 9')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (19, 'Batch value 10')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (20, 'Batch value 11')"))); + assertThat( + updateCounts, is(equalTo(new long[] {1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L}))); + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT COUNT(*) FROM TEST WHERE ID>=10 AND ID<=20"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong(0), is(equalTo(11L))); + } + } + } + + @Test + public void test05_BatchUpdateWithException() { + try (ITConnection con1 = createConnection(); + ITConnection con2 = createConnection()) { + try { + con1.executeBatchUpdate( + Arrays.asList( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (21, 'Batch value 1')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (22, 'Batch value 2')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (23, 'Batch value 3')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (24, 'Batch value 4')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (25, 'Batch value 5')"), + Statement.of("INSERT INTO TEST_NOT_FOUND (ID, NAME) VALUES (26, 'Batch value 6')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (27, 'Batch value 7')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (28, 'Batch value 8')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (29, 'Batch value 9')"), + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (30, 'Batch value 10')"))); + fail("Missing batch update exception"); + } catch (SpannerBatchUpdateException e) { + assertThat(e.getUpdateCounts(), is(equalTo(new long[] {1L, 1L, 1L, 1L, 1L}))); + } + // Verify that the values cannot be read on the connection that did the insert. + try (ResultSet rs = + con1.executeQuery(Statement.of("SELECT COUNT(*) FROM TEST WHERE ID>=21 AND ID<=30"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong(0), is(equalTo(0L))); + } + // Verify that the values can also not be read on another connection. + try (ResultSet rs = + con2.executeQuery(Statement.of("SELECT COUNT(*) FROM TEST WHERE ID>=21 AND ID<=30"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong(0), is(equalTo(0L))); + } + } + } + + @Test + public void test06_AnalyzeUpdate() { + assumeFalse( + "Emulator does not support PLAN and PROFILE", EmulatorSpannerHelper.isUsingEmulator()); + + // PLAN should not execute the update. + try (ITConnection connection = createConnection()) { + ResultSetStats resultSetStats = + connection.analyzeUpdate( + Statement.of("UPDATE TEST SET NAME='test_updated' WHERE ID > 0"), + QueryAnalyzeMode.PLAN); + + assertNotNull(resultSetStats); + assertTrue(resultSetStats.hasQueryPlan()); + assertFalse(resultSetStats.hasQueryStats()); + + // The backend indicates that the statement would return an exact row count, but as the + // statement is not executed, the actual row count is zero. + assertTrue(resultSetStats.hasRowCountExact()); + assertEquals(0, resultSetStats.getRowCountExact()); + } + + try (ITConnection connection = createConnection()) { + ResultSetStats resultSetStats = + connection.analyzeUpdate( + Statement.of("UPDATE TEST SET NAME='test_updated' WHERE ID > 0"), + QueryAnalyzeMode.PROFILE); + + // Executing the update in PROFILE mode should execute the update + assertNotNull(resultSetStats); + assertTrue(resultSetStats.hasQueryPlan()); + assertTrue(resultSetStats.hasQueryStats()); + + assertTrue(resultSetStats.hasRowCountExact()); + assertTrue(resultSetStats.getRowCountExact() > 0); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITRetryDmlAsPartitionedDmlTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITRetryDmlAsPartitionedDmlTest.java new file mode 100644 index 000000000000..d09bcf9b64d4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITRetryDmlAsPartitionedDmlTest.java @@ -0,0 +1,207 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import com.google.cloud.spanner.connection.AutocommitDmlMode; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.TransactionRetryListenerImpl; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITRetryDmlAsPartitionedDmlTest extends ITAbstractSpannerTest { + private static final int NUM_ROWS = 100000; + + @BeforeClass + public static void setup() { + // This shadows the setup() method in the super class and prevents it from being executed. + // That allows us to have a custom setup method in this class. + assumeFalse("Skipping the test due to a known bug b/422916293", isExperimentalHost()); + } + + @BeforeClass + public static void setupTestData() { + assumeFalse("The emulator does not enforce the mutation limit", isUsingEmulator()); + + database = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TEST (ID INT64 NOT NULL, NAME STRING(100) NOT NULL) PRIMARY KEY" + + " (ID)"); + DatabaseClient client = env.getTestHelper().getClient().getDatabaseClient(database.getId()); + int rowsCreated = 0; + int batchSize = 5000; + while (rowsCreated < NUM_ROWS) { + List mutations = new ArrayList<>(batchSize); + for (int row = rowsCreated; row < rowsCreated + batchSize; row++) { + mutations.add( + Mutation.newInsertOrUpdateBuilder("TEST") + .set("id") + .to(row) + .set("name") + .to("Row " + row) + .build()); + } + client.writeAtLeastOnce(mutations); + rowsCreated += batchSize; + } + } + + @Test + public void testDmlFailsIfMutationLimitExceeded() { + // TODO(sakthivelmani) - Re-enable once b/422916293 is resolved + assumeFalse( + "Skipping the test due to a known bug b/422916293", + env.getTestHelper().getOptions().isEnableDirectAccess()); + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + assertThrows( + TransactionMutationLimitExceededException.class, + () -> + connection.executeUpdate( + Statement.of("update test set name=name || ' - updated' where true"))); + } + } + + @Test + public void testRetryDmlAsPartitionedDml() throws Exception { + // TODO(sakthivelmani) - Re-enable once b/422916293 is resolved + assumeFalse( + "Skipping the test due to a known bug b/422916293", + env.getTestHelper().getOptions().isEnableDirectAccess()); + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + // Set up a listener that gets a callback when a DML statement is retried as Partitioned DML. + SettableApiFuture startExecutionIdFuture = SettableApiFuture.create(); + SettableApiFuture finishedExecutionIdFuture = SettableApiFuture.create(); + SettableApiFuture lowerBoundUpdateCountFuture = SettableApiFuture.create(); + connection.addTransactionRetryListener( + new TransactionRetryListenerImpl() { + @Override + public void retryDmlAsPartitionedDmlStarting( + UUID executionId, + Statement statement, + TransactionMutationLimitExceededException exception) { + startExecutionIdFuture.set(executionId); + } + + @Override + public void retryDmlAsPartitionedDmlFinished( + UUID executionId, Statement statement, long updateCount) { + finishedExecutionIdFuture.set(executionId); + lowerBoundUpdateCountFuture.set(updateCount); + } + }); + + long updateCount = + connection.executeUpdate( + Statement.of("update test set name=name || ' - updated' where true")); + assertEquals(NUM_ROWS, updateCount); + assertEquals( + startExecutionIdFuture.get(1, TimeUnit.SECONDS), + finishedExecutionIdFuture.get(1, TimeUnit.SECONDS)); + assertEquals(updateCount, lowerBoundUpdateCountFuture.get(1, TimeUnit.SECONDS).longValue()); + } + } + + @Test + public void testRetryDmlAsPartitionedDml_failsForLargeInserts() throws Exception { + // TODO(sakthivelmani) - Re-enable once b/422916293 is resolved + assumeFalse( + "Skipping the test due to a known bug b/422916293", + env.getTestHelper().getOptions().isEnableDirectAccess()); + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + // Set up a listener that gets a callback when a DML statement is retried as Partitioned DML. + SettableApiFuture startExecutionIdFuture = SettableApiFuture.create(); + SettableApiFuture failedExecutionIdFuture = SettableApiFuture.create(); + SettableApiFuture executionExceptionFuture = SettableApiFuture.create(); + connection.addTransactionRetryListener( + new TransactionRetryListenerImpl() { + @Override + public void retryDmlAsPartitionedDmlStarting( + UUID executionId, + Statement statement, + TransactionMutationLimitExceededException exception) { + startExecutionIdFuture.set(executionId); + } + + @Override + public void retryDmlAsPartitionedDmlFailed( + UUID executionId, Statement statement, Throwable exception) { + failedExecutionIdFuture.set(executionId); + executionExceptionFuture.set(exception); + } + }); + + // Note that the executeUpdate method throws the original + // TransactionMutationLimitExceededException, and not the exception that is thrown when the + // statement is retried as Partitioned DML. + TransactionMutationLimitExceededException mutationLimitExceededException = + assertThrows( + TransactionMutationLimitExceededException.class, + () -> + connection.executeUpdate( + Statement.of("insert into test (id, name) select -id, name from test"))); + assertEquals( + startExecutionIdFuture.get(1, TimeUnit.SECONDS), + failedExecutionIdFuture.get(1, TimeUnit.SECONDS)); + Throwable executionException = executionExceptionFuture.get(1L, TimeUnit.SECONDS); + assertEquals(SpannerException.class, executionException.getClass()); + SpannerException spannerException = (SpannerException) executionException; + // Verify that this exception indicates that the INSERT statement could not be executed as + // Partitioned DML. + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + assertTrue( + spannerException.getMessage(), + spannerException.getMessage().contains("INSERT is not supported for Partitioned DML.")); + assertEquals(1, mutationLimitExceededException.getSuppressed().length); + assertSame(spannerException, mutationLimitExceededException.getSuppressed()[0]); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSavepointTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSavepointTest.java new file mode 100644 index 000000000000..2a0995713e18 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSavepointTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SavepointSupport; +import com.google.common.collect.ImmutableList; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITSavepointTest extends ITAbstractSpannerTest { + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @Before + public void clearTestData() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + connection.commit(); + } + } + + @Test + public void testRollbackDmlStatement() { + try (ITConnection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + assertEquals( + 1L, + connection.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(1L) + .bind("name") + .to("One") + .build())); + connection.savepoint("s1"); + assertEquals( + 1L, + connection.executeUpdate( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(2L) + .bind("name") + .to("Two") + .build())); + + connection.rollbackToSavepoint("s1"); + connection.commit(); + + try (ResultSet resultSet = connection.executeQuery(Statement.of("select * from test"))) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertEquals("One", resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testRollbackMutations() { + try (ITConnection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + connection.bufferedWrite( + Mutation.newInsertBuilder("test").set("id").to(1L).set("name").to("One").build()); + connection.savepoint("s1"); + connection.bufferedWrite( + Mutation.newInsertBuilder("test").set("id").to(2L).set("name").to("Two").build()); + connection.savepoint("s2"); + connection.bufferedWrite( + Mutation.newInsertBuilder("test").set("id").to(3L).set("name").to("Three").build()); + connection.savepoint("s3"); + connection.bufferedWrite( + Mutation.newInsertBuilder("test").set("id").to(4L).set("name").to("Four").build()); + connection.savepoint("s4"); + + connection.rollbackToSavepoint("s2"); + connection.commit(); + + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select * from test order by id"))) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertEquals("One", resultSet.getString(1)); + assertTrue(resultSet.next()); + assertEquals(2L, resultSet.getLong(0)); + assertEquals("Two", resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } + } + + @Test + public void testRollbackBatchDmlStatement() { + try (ITConnection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + assertArrayEquals( + new long[] {1L, 1L}, + connection.executeBatchUpdate( + ImmutableList.of( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(1L) + .bind("name") + .to("One") + .build(), + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(2L) + .bind("name") + .to("Two") + .build()))); + connection.savepoint("s1"); + assertArrayEquals( + new long[] {1L, 1L}, + connection.executeBatchUpdate( + ImmutableList.of( + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .build(), + Statement.newBuilder("insert into test (id, name) values (@id, @name)") + .bind("id") + .to(4L) + .bind("name") + .to("Four") + .build()))); + + connection.rollbackToSavepoint("s1"); + connection.commit(); + + try (ResultSet resultSet = + connection.executeQuery(Statement.of("select * from test order by id"))) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertEquals("One", resultSet.getString(1)); + assertTrue(resultSet.next()); + assertEquals(2L, resultSet.getLong(0)); + assertEquals("Two", resultSet.getString(1)); + assertFalse(resultSet.next()); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlMusicScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlMusicScriptTest.java new file mode 100644 index 000000000000..9d9a9d9310a2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlMusicScriptTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnection; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; +import java.util.ArrayList; +import java.util.List; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.MethodSorters; + +/** + * Integration test that runs one long sql script using the default Singers/Albums/Songs/Concerts + * data model + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ITSqlMusicScriptTest extends ITAbstractSpannerTest { + private static final String SCRIPT_FILE = "ITSqlMusicScriptTest.sql"; + + @Test + public void test01_RunScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(); + try (GenericConnection connection = SpannerGenericConnection.of(createConnection())) { + verifier.verifyStatementsInFile(connection, SCRIPT_FILE, SqlScriptVerifier.class, false); + } + } + + @Test + public void test02_RunAbortedTest() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + + final long SINGER_ID = 2L; + final long VENUE_ID = 68L; + final long NUMBER_OF_SINGERS = 30L; + final long NUMBER_OF_ALBUMS = 60L; + final long NUMBER_OF_SONGS = 149L; + final long NUMBER_OF_CONCERTS = 100L; + long numberOfSongs = 0L; + AbortInterceptor interceptor = new AbortInterceptor(0.0D); + try (ITConnection connection = createConnection(interceptor)) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + connection.setAutocommit(false); + connection.setRetryAbortsInternally(true); + // Read all data from the different music tables in the transaction + // The previous test deleted the first two Singers records. + long expectedId = 3L; + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM Singers ORDER BY SingerId"))) { + while (rs.next()) { + assertThat(rs.getLong("SingerId"), is(equalTo(expectedId))); + expectedId++; + } + } + assertThat(expectedId, is(equalTo(NUMBER_OF_SINGERS + 1L))); + expectedId = 3L; + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM Albums ORDER BY AlbumId"))) { + while (rs.next()) { + assertThat(rs.getLong("AlbumId"), is(equalTo(expectedId))); + expectedId++; + // 31 and 32 were deleted by the first test script. + if (expectedId == 31L || expectedId == 32L) { + expectedId = 33L; + } + } + } + assertThat(expectedId, is(equalTo(NUMBER_OF_ALBUMS + 1L))); + expectedId = 1L; + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM Songs ORDER BY TrackId"))) { + while (rs.next()) { + assertThat(rs.getLong("TrackId"), is(equalTo(expectedId))); + expectedId++; + numberOfSongs++; + // 40, 64, 76, 86 and 96 were deleted by the first test script. + if (expectedId == 40L + || expectedId == 64L + || expectedId == 76L + || expectedId == 86L + || expectedId == 96L) { + expectedId++; + } + } + } + assertThat(expectedId, is(equalTo(NUMBER_OF_SONGS + 1L))); + // Concerts are not in the table hierarchy, so no records have been deleted. + expectedId = 1L; + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT * FROM Concerts ORDER BY VenueId"))) { + while (rs.next()) { + assertThat(rs.getLong("VenueId"), is(equalTo(expectedId))); + expectedId++; + } + } + assertThat(expectedId, is(equalTo(NUMBER_OF_CONCERTS + 1L))); + + // make one small concurrent change in a different transaction + List originalPrices; + List newPrices; + try (ITConnection connection2 = createConnection()) { + assertThat(connection2.isAutocommit(), is(true)); + try (ResultSet rs = + connection2.executeQuery( + Statement.newBuilder( + "SELECT TicketPrices FROM Concerts WHERE SingerId=@singer AND" + + " VenueId=@venue") + .bind("singer") + .to(SINGER_ID) + .bind("venue") + .to(VENUE_ID) + .build())) { + assertThat(rs.next(), is(true)); + originalPrices = rs.getLongList(0); + // increase one of the prices by 1 + newPrices = new ArrayList<>(originalPrices); + newPrices.set(1, originalPrices.get(1) + 1); + connection2.executeUpdate( + Statement.newBuilder( + "UPDATE Concerts SET TicketPrices=@prices WHERE SingerId=@singer AND" + + " VenueId=@venue") + .bind("prices") + .toInt64Array(newPrices) + .bind("singer") + .to(SINGER_ID) + .bind("venue") + .to(VENUE_ID) + .build()); + } + } + + // try to add a new song and then try to commit, but trigger an abort on commit + connection.bufferedWrite( + Mutation.newInsertBuilder("Songs") + .set("SingerId") + .to(3L) + .set("AlbumId") + .to(3L) + .set("TrackId") + .to(1L) + .set("SongName") + .to("Aborted") + .set("Duration") + .to(1L) + .set("SongGenre") + .to("Unknown") + .build()); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // the transaction retry should fail because of the concurrent modification + boolean expectedException = false; + try { + connection.commit(); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + // verify that the commit aborted, an internal retry was started and then aborted because of + // the concurrent modification + assertThat(expectedException, is(true)); + // Rollback the transaction to start a new one. + connection.rollback(); + // verify that the prices were changed + try (ResultSet rs = + connection.executeQuery( + Statement.newBuilder( + "SELECT TicketPrices FROM Concerts WHERE SingerId=@singer AND VenueId=@venue") + .bind("singer") + .to(SINGER_ID) + .bind("venue") + .to(VENUE_ID) + .build())) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLongList(0), is(equalTo(newPrices))); + } + // verify that the new song was not written to the database + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) FROM Songs"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong(0), is(equalTo(numberOfSongs))); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlScriptTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlScriptTest.java new file mode 100644 index 000000000000..026495605eda --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITSqlScriptTest.java @@ -0,0 +1,218 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.MethodSorters; + +/** + * Integration test that creates and fills a test database entirely using only sql scripts, and then + * performs all possible operations on this test database using only sql scripts. This test uses the + * generic connection API. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ITSqlScriptTest extends ITAbstractSpannerTest { + private static final String CREATE_TABLES_FILE = "ITSqlScriptTest_CreateTables.sql"; + private static final String INSERT_AND_VERIFY_TEST_DATA = "ITSqlScriptTest_InsertTestData.sql"; + private static final String TEST_GET_READ_TIMESTAMP = "ITSqlScriptTest_TestGetReadTimestamp.sql"; + private static final String TEST_GET_COMMIT_TIMESTAMP = + "ITSqlScriptTest_TestGetCommitTimestamp.sql"; + private static final String TEST_TEMPORARY_TRANSACTIONS = + "ITSqlScriptTest_TestTemporaryTransactions.sql"; + private static final String TEST_TRANSACTION_MODE = "ITSqlScriptTest_TestTransactionMode.sql"; + private static final String TEST_TRANSACTION_MODE_READ_ONLY = + "ITSqlScriptTest_TestTransactionMode_ReadOnly.sql"; + private static final String TEST_READ_ONLY_STALENESS = + "ITSqlScriptTest_TestReadOnlyStaleness.sql"; + private static final String TEST_AUTOCOMMIT_DML_MODE = + "ITSqlScriptTest_TestAutocommitDmlMode.sql"; + private static final String TEST_AUTOCOMMIT_READ_ONLY = + "ITSqlScriptTest_TestAutocommitReadOnly.sql"; + private static final String TEST_STATEMENT_TIMEOUT = "ITSqlScriptTest_TestStatementTimeout.sql"; + private static final String TEST_SET_STATEMENTS = "ITSqlScriptTest_TestSetStatements.sql"; + private static final String TEST_INVALID_STATEMENTS = "ITSqlScriptTest_TestInvalidStatements.sql"; + + private final SqlScriptVerifier verifier = new SqlScriptVerifier(); + + @Test + public void test01_CreateTables() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + CREATE_TABLES_FILE, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test02_InsertTestData() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + INSERT_AND_VERIFY_TEST_DATA, + SqlScriptVerifier.class, + false); + } catch (SpannerException e) { + if (isUsingEmulator() && e.getErrorCode() == ErrorCode.ALREADY_EXISTS) { + // Errors in a transaction are 'sticky' on the emulator, so any query in the same + // transaction will return the same error as the error generated by a previous (update) + // statement. + } + } + } + + @Test + public void test03_TestGetReadTimestamp() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_GET_READ_TIMESTAMP, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test04_TestGetCommitTimestamp() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_GET_COMMIT_TIMESTAMP, + SqlScriptVerifier.class, + false); + } catch (SpannerException e) { + if (isUsingEmulator() && e.getErrorCode() == ErrorCode.INVALID_ARGUMENT) { + // Errors in a transaction are 'sticky' on the emulator, so any query in the same + // transaction will return the same error as the error generated by a previous statement. + } + } + } + + @Test + public void test05_TestTemporaryTransactions() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_TEMPORARY_TRANSACTIONS, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test06_TestTransactionMode() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_TRANSACTION_MODE, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test07_TestTransactionModeReadOnly() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_TRANSACTION_MODE_READ_ONLY, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test08_TestReadOnlyStaleness() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_READ_ONLY_STALENESS, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test09_TestAutocommitDmlMode() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_AUTOCOMMIT_DML_MODE, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test10_TestAutocommitReadOnly() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_AUTOCOMMIT_READ_ONLY, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test11_TestStatementTimeout() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_STATEMENT_TIMEOUT, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test12_TestSetStatements() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_SET_STATEMENTS, + SqlScriptVerifier.class, + false); + } + } + + @Test + public void test13_TestInvalidStatements() throws Exception { + try (ITConnection connection = createConnection()) { + verifier.verifyStatementsInFile( + SpannerGenericConnection.of(connection), + TEST_INVALID_STATEMENTS, + SqlScriptVerifier.class, + false); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionModeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionModeTest.java new file mode 100644 index 000000000000..33b059c8bf90 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionModeTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.SqlScriptVerifier; +import java.util.Arrays; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITTransactionModeTest extends ITAbstractSpannerTest { + @Override + public void appendConnectionUri(StringBuilder uri) { + uri.append(";autocommit=false"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + @Test + public void testSqlScript() throws Exception { + SqlScriptVerifier verifier = new SqlScriptVerifier(new ITConnectionProvider()); + verifier.verifyStatementsInFile("ITTransactionModeTest.sql", SqlScriptVerifier.class, false); + } + + @Test + public void testDoAllowBufferedWriteInReadWriteTransaction() { + try (ITConnection connection = createConnection()) { + assertThat(connection.isAutocommit(), is(false)); + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST").build()); + connection.commit(); + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT NAME FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST"))); + assertThat(rs.next(), is(false)); + } + connection.bufferedWrite( + Mutation.newUpdateBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST2").build()); + connection.commit(); + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT NAME FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST2"))); + assertThat(rs.next(), is(false)); + } + connection.bufferedWrite(Mutation.delete("TEST", Key.of(1L))); + connection.commit(); + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT NAME FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(false)); + } + } + } + + @Test + public void testDoAllowBufferedWriteIterableInReadWriteTransaction() { + try (ITConnection connection = createConnection()) { + assertThat(connection.isAutocommit(), is(false)); + connection.bufferedWrite( + Arrays.asList( + Mutation.newInsertBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST-1").build(), + Mutation.newInsertBuilder("TEST").set("ID").to(2L).set("NAME").to("TEST-2").build())); + connection.commit(); + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT NAME FROM TEST WHERE ID IN (1,2) ORDER BY ID"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST-1"))); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST-2"))); + assertThat(rs.next(), is(false)); + } + connection.bufferedWrite( + Arrays.asList( + Mutation.newUpdateBuilder("TEST").set("ID").to(1L).set("NAME").to("TEST-1-2").build(), + Mutation.newUpdateBuilder("TEST") + .set("ID") + .to(2L) + .set("NAME") + .to("TEST-2-2") + .build())); + connection.commit(); + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT NAME FROM TEST WHERE ID IN (1,2) ORDER BY ID"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST-1-2"))); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("NAME"), is(equalTo("TEST-2-2"))); + assertThat(rs.next(), is(false)); + } + connection.bufferedWrite( + Arrays.asList(Mutation.delete("TEST", Key.of(1L)), Mutation.delete("TEST", Key.of(2L)))); + connection.commit(); + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT NAME FROM TEST WHERE ID IN (1,2) ORDER BY ID"))) { + assertThat(rs.next(), is(false)); + } + } + } + + @Test + public void testDoNotAllowBufferedWriteInReadOnlyTransaction() { + try (ITConnection connection = createConnection()) { + connection.execute(Statement.of("SET TRANSACTION READ ONLY")); + assertThat(connection.isAutocommit(), is(false)); + try { + connection.bufferedWrite(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + } + + @Test + public void testDoNotAllowBufferedWriteIterableInReadOnlyTransaction() { + try (ITConnection connection = createConnection()) { + connection.execute(Statement.of("SET TRANSACTION READ ONLY")); + assertThat(connection.isAutocommit(), is(false)); + try { + connection.bufferedWrite( + Arrays.asList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).build(), + Mutation.newInsertBuilder("FOO").set("ID").to(2L).build())); + fail("Expected exception"); + } catch (SpannerException ex) { + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + } + } + + @Test + public void testDoNotAllowBufferedWriteInDdlBatch() { + try (ITConnection connection = createConnection()) { + connection.startBatchDdl(); + assertThat(connection.isAutocommit(), is(false)); + assertThat(connection.isDdlBatchActive(), is(true)); + try { + connection.bufferedWrite(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + } + + @Test + public void testDoNotAllowBufferedWriteIterableInDdlBatch() { + try (ITConnection connection = createConnection()) { + connection.startBatchDdl(); + assertThat(connection.isAutocommit(), is(false)); + assertThat(connection.isDdlBatchActive(), is(true)); + try { + connection.bufferedWrite( + Arrays.asList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).build(), + Mutation.newInsertBuilder("FOO").set("ID").to(2L).build())); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionRetryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionRetryTest.java new file mode 100644 index 000000000000..54f714a13aa3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITTransactionRetryTest.java @@ -0,0 +1,1655 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest; +import com.google.cloud.spanner.connection.TransactionRetryListener; +import java.sql.Connection; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * This integration test tests the different scenarios for automatically retrying read/write + * transactions, both when possible and when the transaction must abort because of a concurrent + * update. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITTransactionRetryTest extends ITAbstractSpannerTest { + private static final Logger logger = Logger.getLogger(ITTransactionRetryTest.class.getName()); + + @Rule public TestName testName = new TestName(); + + @Override + protected void appendConnectionUri(StringBuilder uri) { + // This test uses virtual threads when available to verify that the checksum calculation can + // reliably be executed by virtual threads. + uri.append( + ";autocommit=false;retryAbortsInternally=true;useVirtualThreads=true;useVirtualGrpcTransportThreads=true"); + } + + @Override + public boolean doCreateDefaultTestTable() { + return true; + } + + /** Clear the test table before each test run */ + @Before + public void clearTable() { + try (ITConnection connection = createConnection()) { + connection.bufferedWrite(Mutation.delete("TEST", KeySet.all())); + connection.commit(); + } + } + + @Before + public void clearStatistics() { + RETRY_STATISTICS.clear(); + } + + @Before + public void logStart() { + logger.fine( + "--------------------------------------------------------------\n" + + testName.getMethodName() + + " started"); + } + + @After + public void logFinished() { + logger.fine( + "--------------------------------------------------------------\n" + + testName.getMethodName() + + " finished"); + } + + /** Simple data structure to keep track of retry statistics */ + private static class RetryStatistics { + private int totalRetryAttemptsStarted; + private int totalRetryAttemptsFinished; + private int totalSuccessfulRetries; + private int totalErroredRetries; + private int totalNestedAborts; + private int totalMaxAttemptsExceeded; + private int totalConcurrentModifications; + + private void clear() { + totalRetryAttemptsStarted = 0; + totalRetryAttemptsFinished = 0; + totalSuccessfulRetries = 0; + totalErroredRetries = 0; + totalNestedAborts = 0; + totalMaxAttemptsExceeded = 0; + totalConcurrentModifications = 0; + } + } + + /** + * Static to allow access from the {@link CountTransactionRetryListener}. Statistics are + * automatically cleared before each test case. + */ + public static final RetryStatistics RETRY_STATISTICS = new RetryStatistics(); + + /** + * Simple {@link TransactionRetryListener} that keeps track of the total count of the different + * transaction retry events of a {@link Connection}. Note that as {@link + * TransactionRetryListener}s are instantiated once per connection, the listener keeps track of + * the total statistics of a connection and not only of the last transaction. + */ + public static class CountTransactionRetryListener implements TransactionRetryListener { + + @Override + public void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt) { + RETRY_STATISTICS.totalRetryAttemptsStarted++; + } + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) { + RETRY_STATISTICS.totalRetryAttemptsFinished++; + switch (result) { + case RETRY_ABORTED_AND_MAX_ATTEMPTS_EXCEEDED: + RETRY_STATISTICS.totalMaxAttemptsExceeded++; + break; + case RETRY_ABORTED_AND_RESTARTING: + RETRY_STATISTICS.totalNestedAborts++; + break; + case RETRY_ABORTED_DUE_TO_CONCURRENT_MODIFICATION: + RETRY_STATISTICS.totalConcurrentModifications++; + break; + case RETRY_ERROR: + RETRY_STATISTICS.totalErroredRetries++; + break; + case RETRY_SUCCESSFUL: + RETRY_STATISTICS.totalSuccessfulRetries++; + break; + default: + break; + } + } + } + + /** Test successful retry when the commit aborts */ + @Test + public void testCommitAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + // do an insert + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a commit that will first abort, and then on retry will succeed + connection.commit(); + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted >= 1, is(true)); + assertThat(RETRY_STATISTICS.totalRetryAttemptsFinished >= 1, is(true)); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + assertThat(RETRY_STATISTICS.totalErroredRetries, is(equalTo(0))); + assertThat(RETRY_STATISTICS.totalConcurrentModifications, is(equalTo(0))); + assertThat(RETRY_STATISTICS.totalMaxAttemptsExceeded, is(equalTo(0))); + // verify that the insert succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry when an insert statement aborts */ + @Test + public void testInsertAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do an insert that will abort + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // do a commit + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that the insert succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry when an update statement aborts */ + @Test + public void testUpdateAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + // insert a test record + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do an update that will abort + connection.executeUpdate(Statement.of("UPDATE TEST SET NAME='update aborted' WHERE ID=1")); + // do a commit + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that the update succeeded + try (ResultSet rs = + connection.executeQuery( + Statement.of( + "SELECT COUNT(*) AS C FROM TEST WHERE ID=1 AND NAME='update aborted'"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry when a query aborts */ + @Test + public void testQueryAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert a test record + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + // do a query that will abort + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + // do a commit + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that the update succeeded + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry when a call to {@link ResultSet#next()} aborts */ + @Test + public void testNextCallAborted() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // do a query + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // the first record should be accessible without any problems + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(1L))); + + // indicate that the next statement should abort + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(2L))); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // there should be only two records + assertThat(rs.next(), is(false)); + } + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that the transaction succeeded + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(2L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry after multiple aborts */ + @Test + public void testMultipleAborts() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // do three inserts which all will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 3, is(true)); + // verify that the insert succeeded + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(3L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** + * Tests that a transaction retry can be successful after a select, as long as the select returns + * the same results during the retry + */ + @Test + public void testAbortAfterSelect() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // verify that the there is no test record + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(0L))); + assertThat(rs.next(), is(false)); + } + // insert a test record + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + // select the test record + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(1L))); + assertThat(rs.getString("NAME"), is(equalTo("test 1"))); + assertThat(rs.next(), is(false)); + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the first test record again + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(1L))); + assertThat(rs.getString("NAME"), is(equalTo("test 1"))); + assertThat(rs.next(), is(false)); + } + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + } + } + + /** + * Test a successful retry when a {@link ResultSet} has been consumed half way. The {@link + * ResultSet} should still be at the same position and still behave as if the original transaction + * did not abort. + */ + @Test + public void testAbortWithResultSetHalfway() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // iterate one step + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(1L))); + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + // iterate another step + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(2L))); + // ensure we are at the end of the result set + assertThat(rs.next(), is(false)); + } + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that all the inserts succeeded + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(3L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** Test successful retry after a {@link ResultSet} has been fully consumed. */ + @Test + public void testAbortWithResultSetFullyConsumed() { + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and iterate over them + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing, just consume the result set + } + } + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // do another insert that will abort and retry + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + // verify that all the inserts succeeded + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(3L))); + assertThat(rs.next(), is(false)); + } + } + } + + @Test + public void testAbortWithConcurrentInsert() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and consume the entire result set + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing + } + } + // open a new connection and transaction and do an additional insert + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + assertRetryStatistics(1, 1, 0); + } + } + + @Test + public void testAbortWithConcurrentDelete() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // open a new connection and select the two test records + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // select the test records and consume the entire result set + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing + } + } + // open a new connection and transaction and remove one of the test records + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("DELETE FROM TEST WHERE ID=1")); + connection2.commit(); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + assertRetryStatistics(1, 1, 0); + } + } + + @Test + public void testAbortWithConcurrentUpdate() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // open a new connection and select the two test records + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // select the test records and consume the entire result set + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing + } + } + // open a new connection and transaction and update one of the test records + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("UPDATE TEST SET NAME='test updated' WHERE ID=2")); + connection2.commit(); + } + // now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + assertRetryStatistics(1, 1, 0); + } + } + + /** + * Test that shows that a transaction retry is possible even when there is a concurrent insert + * that has an impact on a query that has been executed, as long as the user hasn't actually seen + * the relevant part of the result of the query + */ + @Test + public void testAbortWithUnseenConcurrentInsert() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and consume part of the result set + ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID")); + assertThat(rs.next(), is(true)); + assertThat(rs.next(), is(true)); + // Open a new connection and transaction and do an additional insert. This insert will be + // included in a retry of the above query, but this has not yet been 'seen' by the user, + // hence is not a problem for retrying the transaction. + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // now try to do an insert that will abort. The retry should still succeed. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + int currentRetryCount = RETRY_STATISTICS.totalRetryAttemptsStarted; + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted >= currentRetryCount + 1, is(true)); + // Consume the rest of the result set. The insert by the other transaction should now be + // included in the result set as the transaction retried. Although this means that the result + // is different after a retry, it is not different as seen by the user, as the user didn't + // know that the result set did not have any more results before the transaction retry. + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(3L))); + // record with id 4 should not be visible, as it was added to the transaction after the query + // was executed + assertThat(rs.next(), is(false)); + rs.close(); + connection.commit(); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= 1, is(true)); + } + } + + /** + * This test shows what happens when an abort occurs on a call to {@link ResultSet#next()} on a + * {@link ResultSet} that has an concurrent insert. As long as the user hasn't consumed the {@link + * ResultSet} so far that the concurrent insert has been seen, the retry will succeed. When the + * user has consumed the {@link ResultSet} to the point where the concurrent insert is visible, + * the retry will fail. + */ + @Test + public void testAbortWithUnseenConcurrentInsertAbortOnNext() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + // no calls to next(), this should succeed + assertThat(testAbortWithUnseenConcurrentInsertAbortOnNext(0) >= 1, is(true)); + // 1 call to next() should also succeed, as there were 2 records in the original result set + assertThat(testAbortWithUnseenConcurrentInsertAbortOnNext(1) >= 1, is(true)); + // 2 calls to next() should also succeed, as there were 2 records in the original result set and + // the user doesn't know yet that the next call to next() will return true instead of false + // after the concurrent insert + assertThat(testAbortWithUnseenConcurrentInsertAbortOnNext(2) >= 1, is(true)); + + boolean expectedException = false; + try { + // 3 calls to next() should fail, as the user would now see the inserted record + testAbortWithUnseenConcurrentInsertAbortOnNext(3); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + } + + private int testAbortWithUnseenConcurrentInsertAbortOnNext(int callsToNext) + throws AbortedDueToConcurrentModificationException { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + int retries = 0; + clearTable(); + clearStatistics(); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + int totalRecordsSeen = 0; + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and consume part or all of the result set + ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID")); + for (int counter = 0; counter < callsToNext; counter++) { + if (rs.next()) { + totalRecordsSeen++; + } + } + // Open a new connection and transaction and do an additional insert. This insert will be + // included in a retry of the above query. Any transaction retry will fail/succeed depending + // on whether the user has consumed enough of the result set to potentially have seen this + // insert. + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // Now consume the rest of the result set, but trigger a transaction retry by aborting the + // first next() call. Without a retry, the result set should only contain 2 records. With a + // successful retry, the result set contains 3 results. The retry will only succeed as long + // as the user has not consumed enough of the result set to know whether there should have + // been a record with ID 3 or not. + + // First verify that the transaction has not yet retried. + int currentRetryCount = RETRY_STATISTICS.totalRetryAttemptsStarted; + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + + // Try to consume the rest of the result set. + // This will fail with an AbortedDueToConcurrentModificationException if the retry fails. + while (rs.next()) { + totalRecordsSeen++; + if (totalRecordsSeen == 3) { + assertThat(rs.getLong("ID"), is(equalTo(3L))); + } + } + // Verify that the transaction retried. + assertThat(RETRY_STATISTICS.totalSuccessfulRetries > currentRetryCount, is(true)); + rs.close(); + connection.commit(); + retries = RETRY_STATISTICS.totalSuccessfulRetries; + } + return retries; + } + + /** + * Test that shows that a transaction that has aborted is considered to be rolled back, and new + * statements will be executed in a new transaction + */ + @Test + public void testAbortWithConcurrentInsertAndContinue() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // Select the test records and consume the entire result set. + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing + } + } + // Open a new connection and transaction and do an additional insert + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // Now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + assertRetryStatistics(1, 1, 0); + // Rollback the aborted transaction to start a new one. + connection.rollback(); + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + // there should be one record from the transaction on connection2 + assertThat(rs.next(), is(true)); + assertThat(rs.next(), is(false)); + } + } + } + + /** + * Test that shows the following: + * + *

      + *
    1. The transaction aborts at commit + *
    2. A retry starts and succeeds + *
    3. The commit is applied again and aborts again + *
    4. The retry is started again and then succeeds + *
    + */ + @Test + public void testAbortTwiceOnCommit() { + AbortInterceptor interceptor = + new AbortInterceptor(0) { + private int commitCount = 0; + + @Override + protected boolean shouldAbort(String statement, ExecutionStep step) { + if ("COMMIT".equalsIgnoreCase(statement)) { + commitCount++; + return commitCount <= 2; + } + return false; + } + }; + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + connection.commit(); + // Assert that the transaction was retried twice. + assertRetryStatistics(2, 0, 2); + // Verify that the insert succeeded. + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. The transaction aborts at commit + *
    2. A retry starts and then aborts at the insert statement + *
    3. The retry is restarted and then succeeds + *
    + */ + @Test + public void testNestedAbortOnInsert() { + AbortInterceptor interceptor = + new AbortInterceptor(0) { + private int commitCount = 0; + private int insertCount = 0; + + @Override + protected boolean shouldAbort(String statement, ExecutionStep step) { + if ("COMMIT".equalsIgnoreCase(statement)) { + commitCount++; + return commitCount == 1; + } else if (statement.startsWith("INSERT INTO TEST")) { + insertCount++; + return insertCount == 2; + } + return false; + } + }; + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + connection.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test aborted')")); + connection.commit(); + // Assert that the transaction was retried (a restarted retry is counted as one successful + // retry). + assertRetryStatistics(2, 0, 1); + assertThat(RETRY_STATISTICS.totalNestedAborts > 0, is(true)); + // Verify that the insert succeeded. + try (ResultSet rs = + connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE ID=1"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(1L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. The transaction aborts at commit + *
    2. A retry starts and then aborts at a next call in a result set + *
    3. The retry is restarted and then succeeds + *
    + */ + @Test + public void testNestedAbortOnNextCall() { + AbortInterceptor interceptor = + new AbortInterceptor(0) { + private int nextCallsDuringRetry = 0; + private int commitCount = 0; + + @Override + protected boolean shouldAbort(String statement, ExecutionStep step) { + if ("COMMIT".equalsIgnoreCase(statement)) { + // Note that commit always has ExecutionStep == EXECUTE_STATEMENT, as a commit can + // never + // really be retried (it is always the last statement in a transaction, and if it + // fails + // because of an aborted exception, the entire transaction is retried, and the commit + // statement is then applied again). + commitCount++; + return commitCount == 1; + } else if (statement.equals("SELECT * FROM TEST ORDER BY ID") + && step == ExecutionStep.RETRY_NEXT_ON_RESULT_SET) { + nextCallsDuringRetry++; + return nextCallsDuringRetry == 1; + } + return false; + } + }; + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // Insert two test records. + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // Select the test records. + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // Iterate one step. This step should abort during the retry the first time. + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(1L))); + // Do another insert that will not be visible to the result set. + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + // iterate another step + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("ID"), is(equalTo(2L))); + // Ensure we are at the end of the result set. + assertThat(rs.next(), is(false)); + } + connection.commit(); + // Verify that the transaction retried. + assertRetryStatistics(2, 0, 1); + assertThat(RETRY_STATISTICS.totalNestedAborts > 0, is(true)); + // Verify that all the inserts succeeded. + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT COUNT(*) AS C FROM TEST"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(3L))); + assertThat(rs.next(), is(false)); + } + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. Transaction 1 does two inserts in table TEST + *
    2. Transaction 1 selects all records from table TEST + *
    3. Transaction 2 inserts a record into TEST + *
    4. Transaction 1 does another insert into TEST that aborts + *
    5. Transaction 1 starts a retry that aborts at the SELECT statement (i.e. before the + * concurrent modification has been seen) + *
    6. Transaction 1 restarts the retry that now aborts due to a concurrent modification + * exception + *
    + */ + @Test + public void testNestedAbortWithConcurrentInsert() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = + new AbortInterceptor(0) { + private boolean alreadyAborted = false; + + @Override + protected boolean shouldAbort(String statement, ExecutionStep step) { + // Abort during retry on the select statement. + if (!alreadyAborted + && statement.equals("SELECT * FROM TEST ORDER BY ID") + && step == ExecutionStep.RETRY_STATEMENT) { + alreadyAborted = true; + return true; + } + return super.shouldAbort(statement, step); + } + }; + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert two test records + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + // select the test records and consume the entire result set + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing + } + } + // open a new connection and transaction and do an additional insert + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // Now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertThat(expectedException, is(true)); + assertRetryStatistics(2, 1, 0); + assertThat(RETRY_STATISTICS.totalNestedAborts > 0, is(true)); + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit + *
    2. Transaction 1 updates the names of all records in the TEST table + *
    3. Transaction 2 inserts a record in the TEST table and commits + *
    4. Transaction 1 does another insert into TEST that aborts + *
    5. Transaction 1 starts a retry that aborts due to a concurrent modification exception as + * the number of updated records will be different + *
    + */ + @Test + public void testAbortWithDifferentUpdateCount() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // open a new connection and update one of the records + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + connection.executeUpdate( + Statement.of("UPDATE TEST SET NAME='test update that will fail' WHERE TRUE")); + // open a new connection and transaction and update the same test record + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + connection2.commit(); + } + // Now try to do an insert that will abort. The retry should now fail as there has been a + // concurrent modification. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (4, 'test 4')")); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + assertRetryStatistics(1, 1, 0); + assertThat(expectedException, is(true)); + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit + *
    2. Try to query a non-existing table. This will lead to an exception. + *
    3. Query all the records from the TEST table and consume the result set + *
    4. Insert another record into TEST that aborts + *
    5. The transaction successfully retries + *
    + */ + @Test + public void testAbortWithExceptionOnSelect() { + assumeFalse( + "resume after error in transaction is not supported on the emulator", isUsingEmulator()); + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // do a select that will fail + boolean expectedException = false; + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM FOO"))) { + while (rs.next()) { + // do nothing + } + } catch (SpannerException e) { + // expected + expectedException = true; + } + assertThat(expectedException, is(true)); + // do a select that will succeed + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // now try to do an insert that will abort. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + assertRetryStatistics(1, 0, 1); + } + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit. + *
    2. Try to query the non-existing table FOO. This will lead to an exception. + *
    3. Query all the records from the TEST table and consume the result set. + *
    4. Open another connection and create the table FOO. + *
    5. Insert another record into TEST that aborts. + *
    6. The transaction is internally retried. The retry fails as the SELECT statement on FOO + * will now succeed. + *
    + */ + @Test + public void testAbortWithExceptionOnSelectAndConcurrentModification() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + boolean abortedDueToConcurrentModification = false; + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // do a select that will fail + boolean expectedException = false; + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM FOO"))) { + while (rs.next()) { + // do nothing + } + } catch (SpannerException e) { + // expected + expectedException = true; + } + assertThat(expectedException, is(true)); + // do a select that will succeed + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // CREATE FOO + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + } + // Now try to do an insert that will abort. The subsequent retry will fail as the SELECT * + // FROM FOO now returns a result. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + abortedDueToConcurrentModification = true; + } + } + // DROP FOO regardless of the result to avoid any interference with other test cases + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute(Statement.of("DROP TABLE FOO")); + } + assertThat(abortedDueToConcurrentModification, is(true)); + assertRetryStatistics(1, 1, 0); + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit. + *
    2. Try to insert a record in the non-existing table FOO. This will lead to an exception. + *
    3. Query all the records from the TEST table and consume the result set. + *
    4. Open another connection and create the table FOO. + *
    5. Insert another record into TEST that aborts. + *
    6. The transaction is internally retried. The retry fails as the insert statement on FOO + * will now succeed. + *
    + */ + @Test + public void testAbortWithExceptionOnInsertAndConcurrentModification() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + boolean abortedDueToConcurrentModification = false; + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // do an insert that will fail + boolean expectedException = false; + try { + connection.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (1, 'test 1')")); + } catch (SpannerException e) { + // expected + expectedException = true; + } + assertThat(expectedException, is(true)); + // do a select that will succeed + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // CREATE FOO + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + } + // Now try to do an insert that will abort. The subsequent retry will fail as the INSERT INTO + // FOO now succeeds. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + abortedDueToConcurrentModification = true; + } + } + // DROP FOO regardless of the result to avoid any interference with other test cases + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute(Statement.of("DROP TABLE FOO")); + } + assertThat(abortedDueToConcurrentModification, is(true)); + assertRetryStatistics(1, 1, 0); + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit. + *
    2. Create the table FOO and insert a test record. + *
    3. Query the table FOO. + *
    4. Query all the records from the TEST table and consume the result set. + *
    5. Open another connection and drop the table FOO. + *
    6. Insert another record into TEST that aborts. + *
    7. The transaction is internally retried. The retry fails as the SELECT statement on FOO + * will now fail. + *
    + */ + @Test + public void testAbortWithDroppedTableConcurrentModification() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + boolean abortedDueToConcurrentModification = false; + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // CREATE FOO + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + connection2.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (1, 'test 1')")); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM FOO"))) { + while (rs.next()) { + // do nothing + } + } + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // DROP FOO using a different connection + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute(Statement.of("DROP TABLE FOO")); + } + // Now try to do an insert that will abort. The subsequent retry will fail as the SELECT * + // FROM FOO now fails. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + abortedDueToConcurrentModification = true; + } + } + assertThat(abortedDueToConcurrentModification, is(true)); + assertRetryStatistics(1, 1, 0); + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit. + *
    2. Create the table FOO and insert a test record and commit. + *
    3. Insert another record into the table FOO. + *
    4. Query all the records from the TEST table and consume the result set. + *
    5. Open another connection and drop the table FOO. + *
    6. Insert another record into TEST that aborts. + *
    7. The transaction is internally retried. The retry fails as the INSERT statement on FOO + * will now fail. + *
    + */ + @Test + public void testAbortWithInsertOnDroppedTableConcurrentModification() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + boolean abortedDueToConcurrentModification = false; + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // CREATE FOO + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + connection2.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (1, 'test 1')")); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // insert a record into FOO + connection.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (2, 'test 2')")); + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // DROP FOO using a different connection + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute(Statement.of("DROP TABLE FOO")); + } + // Now try to do an insert that will abort. The subsequent retry will fail as the INSERT INTO + // FOO now fails. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (3, 'test 3')")); + } catch (AbortedDueToConcurrentModificationException e) { + abortedDueToConcurrentModification = true; + } + } + assertThat(abortedDueToConcurrentModification, is(true)); + assertRetryStatistics(1, 1, 0); + } + + /** + * Test that shows the following: + * + *
      + *
    1. Insert two records into table TEST and commit. + *
    2. Create the table FOO and insert two test records and commit. + *
    3. Query all the records from the TEST table and consume the result set. + *
    4. Query all the records from the FOO table and consume only part of the result set. + *
    5. Open another connection and drop the table FOO. + *
    6. Try to consume the rest of the FOO result set. This aborts. + *
    7. The transaction is internally retried. The retry fails as the SELECT statement on FOO + * will now fail. + *
    + */ + @Test + public void testAbortWithCursorHalfwayDroppedTableConcurrentModification() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + boolean abortedDueToConcurrentModification = false; + AbortInterceptor interceptor = new AbortInterceptor(0); + // first insert two test records + try (ITConnection connection = createConnection()) { + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection.executeUpdate(Statement.of("INSERT INTO TEST (ID, NAME) VALUES (2, 'test 2')")); + connection.commit(); + } + // CREATE FOO + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute( + Statement.of("CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + connection2.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (1, 'test 1')")); + connection2.executeUpdate(Statement.of("INSERT INTO FOO (ID, NAME) VALUES (2, 'test 2')")); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST"))) { + while (rs.next()) { + // do nothing + } + } + // SELECT FROM FOO and consume part of the result set + ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM FOO")); + assertThat(rs.next(), is(true)); + // DROP FOO using a different connection + try (ITConnection connection2 = createConnection()) { + connection2.setAutocommit(true); + connection2.execute(Statement.of("DROP TABLE FOO")); + } + // try to continue to consume the result set, but this will now abort. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + try { + // This will fail as the retry will not succeed. + rs.next(); + } catch (AbortedDueToConcurrentModificationException e) { + abortedDueToConcurrentModification = true; + } finally { + rs.close(); + } + } + assertThat(abortedDueToConcurrentModification, is(true)); + assertRetryStatistics(1, 1, 0); + } + + /** Test the successful retry of a transaction with a large {@link ResultSet} */ + @Test + public void testRetryLargeResultSet() { + final int NUMBER_OF_TEST_RECORDS = 100000; + final long UPDATED_RECORDS = 1000L; + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = createConnection()) { + // insert test records + for (int i = 0; i < NUMBER_OF_TEST_RECORDS; i++) { + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(i).set("NAME").to("test " + i).build()); + if (i % 1000 == 0) { + connection.commit(); + } + } + connection.commit(); + } + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // select the test records and iterate over them + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing, just consume the result set + } + } + // Do an update that will abort and retry. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + connection.executeUpdate( + Statement.newBuilder("UPDATE TEST SET NAME='updated' WHERE ID<@max_id") + .bind("max_id") + .to(UPDATED_RECORDS) + .build()); + connection.commit(); + // verify that the update succeeded + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE NAME='updated'"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(UPDATED_RECORDS))); + assertThat(rs.next(), is(false)); + } + // Verify that the transaction retried. + assertRetryStatistics(1, 0, 1); + } + } + + /** Test the successful retry of a transaction with a high chance of multiple aborts */ + @Test + public void testRetryHighAbortRate() { + // TODO(sriharshach): Remove this skip once backend support empty transactions to commit. + assumeFalse( + "Skipping for multiplexed sessions since it does not allow empty transactions to commit", + env.getTestHelper().getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW()); + final int NUMBER_OF_TEST_RECORDS = 10000; + final long UPDATED_RECORDS = 1000L; + // abort on 25% of all statements + AbortInterceptor interceptor = new AbortInterceptor(0.25D); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + // insert test records + for (int i = 0; i < NUMBER_OF_TEST_RECORDS; i++) { + connection.bufferedWrite( + Mutation.newInsertBuilder("TEST").set("ID").to(i).set("NAME").to("test " + i).build()); + if (i % 1000 == 0) { + connection.commit(); + } + } + connection.commit(); + // select the test records and iterate over them + // reduce the abort rate to 0.01% as each next() call could abort + interceptor.setProbability(0.0001D); + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + while (rs.next()) { + // do nothing, just consume the result set + } + } + // increase the abort rate to 50% + interceptor.setProbability(0.50D); + connection.executeUpdate( + Statement.newBuilder("UPDATE TEST SET NAME='updated' WHERE ID<@max_id") + .bind("max_id") + .to(UPDATED_RECORDS) + .build()); + connection.commit(); + // verify that the update succeeded + try (ResultSet rs = + connection.executeQuery( + Statement.of("SELECT COUNT(*) AS C FROM TEST WHERE NAME='updated'"))) { + assertThat(rs.next(), is(true)); + assertThat(rs.getLong("C"), is(equalTo(UPDATED_RECORDS))); + assertThat(rs.next(), is(false)); + } + connection.commit(); + } catch (AbortedException e) { + // This could happen if the number of aborts exceeds the max number of retries. + logger.log(Level.FINE, "testRetryHighAbortRate aborted because of too many retries", e); + } + logger.fine("Total number of retries started: " + RETRY_STATISTICS.totalRetryAttemptsStarted); + logger.fine("Total number of retries finished: " + RETRY_STATISTICS.totalRetryAttemptsFinished); + logger.fine("Total number of retries successful: " + RETRY_STATISTICS.totalSuccessfulRetries); + logger.fine("Total number of retries aborted: " + RETRY_STATISTICS.totalNestedAborts); + logger.fine( + "Total number of times the max retry count was exceeded: " + + RETRY_STATISTICS.totalMaxAttemptsExceeded); + } + + @Test + public void testAbortWithConcurrentInsertOnEmptyTable() { + assumeFalse("concurrent transactions are not supported on the emulator", isUsingEmulator()); + + AbortInterceptor interceptor = new AbortInterceptor(0); + try (ITConnection connection = + createConnection(interceptor, new CountTransactionRetryListener())) { + // select the test records but do not consume the result set + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + // Open a new connection and transaction and do an insert. This insert will be + // included in a retry of the above query, but this has not yet been 'seen' by the user, + // hence is not a problem for retrying the transaction. + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection2.commit(); + } + // Now try to consume the result set, but the call to next() will throw an AbortedException. + // The retry should still succeed. + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + int currentSuccessfulRetryCount = RETRY_STATISTICS.totalSuccessfulRetries; + assertThat(rs.next(), is(true)); + assertThat( + RETRY_STATISTICS.totalSuccessfulRetries, is(equalTo(currentSuccessfulRetryCount + 1))); + assertThat(rs.next(), is(false)); + } + connection.commit(); + + // Now do the same, but this time we will consume the empty result set. The retry should now + // fail. + clearTable(); + clearStatistics(); + try (ResultSet rs = connection.executeQuery(Statement.of("SELECT * FROM TEST ORDER BY ID"))) { + assertThat(rs.next(), is(false)); + // Open a new connection and transaction and do an insert. This insert will be + // included in a retry of the above query, and this time it will cause the retry to fail. + try (ITConnection connection2 = createConnection()) { + connection2.executeUpdate( + Statement.of("INSERT INTO TEST (ID, NAME) VALUES (1, 'test 1')")); + connection2.commit(); + } + // this time the abort will occur on the call to commit() + interceptor.setUsingMultiplexedSession( + isMultiplexedSessionsEnabledForRW(connection.getSpanner())); + interceptor.setProbability(1.0); + interceptor.setOnlyInjectOnce(true); + boolean expectedException = false; + try { + connection.commit(); + } catch (AbortedDueToConcurrentModificationException e) { + expectedException = true; + } + // No successful retries. + assertRetryStatistics(1, 1, 0); + assertThat(expectedException, is(true)); + } + } + } + + private void assertRetryStatistics( + int minAttemptsStartedExpected, + int concurrentModificationsExpected, + int successfulRetriesExpected) { + assertThat(RETRY_STATISTICS.totalRetryAttemptsStarted >= minAttemptsStartedExpected, is(true)); + assertThat( + RETRY_STATISTICS.totalConcurrentModifications, + is(equalTo(concurrentModificationsExpected))); + assertThat(RETRY_STATISTICS.totalSuccessfulRetries >= successfulRetriesExpected, is(true)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/CustomerManagedEncryptionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/CustomerManagedEncryptionTest.java new file mode 100644 index 000000000000..5a8fe204fb35 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/CustomerManagedEncryptionTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.encryption; + +import static org.junit.Assert.*; + +import com.google.spanner.admin.database.v1.EncryptionConfig; +import org.junit.Test; + +/** Unit tests for {@link CustomerManagedEncryption}. */ +public class CustomerManagedEncryptionTest { + + @Test + public void testFromProtoWithDefaultInstance() { + final CustomerManagedEncryption actual = + CustomerManagedEncryption.fromProtoOrNull(EncryptionConfig.getDefaultInstance()); + + assertNull(actual); + } + + @Test + public void testFromProto() { + final CustomerManagedEncryption expected = new CustomerManagedEncryption("kms-key-name"); + final EncryptionConfig encryptionConfig = + EncryptionConfig.newBuilder().setKmsKeyName("kms-key-name").build(); + + final CustomerManagedEncryption actual = + CustomerManagedEncryption.fromProtoOrNull(encryptionConfig); + + assertEquals(expected, actual); + } + + @Test + public void testEqualsAndHashCode() { + final CustomerManagedEncryption customerManagedEncryption1 = + new CustomerManagedEncryption("kms-key-name"); + final CustomerManagedEncryption customerManagedEncryption2 = + new CustomerManagedEncryption("kms-key-name"); + + assertEquals(customerManagedEncryption1, customerManagedEncryption2); + assertEquals(customerManagedEncryption1.hashCode(), customerManagedEncryption2.hashCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapperTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapperTest.java new file mode 100644 index 000000000000..4ce300c2f668 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigProtoMapperTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.encryption; + +import static org.junit.Assert.assertEquals; + +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig; +import org.junit.Test; + +/** Unit tests for {@link com.google.cloud.spanner.encryption.EncryptionConfigProtoMapper} */ +public class EncryptionConfigProtoMapperTest { + + public static final String KMS_KEY_NAME = "kms-key-name"; + + @Test + public void testEncryptionConfig() { + final EncryptionConfig expected = + EncryptionConfig.newBuilder().setKmsKeyName(KMS_KEY_NAME).build(); + + final EncryptionConfig actual = + EncryptionConfigProtoMapper.encryptionConfig(new CustomerManagedEncryption(KMS_KEY_NAME)); + + assertEquals(expected, actual); + } + + @Test + public void testCreateBackupConfigCustomerManagedEncryption() { + final CreateBackupEncryptionConfig expected = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(KMS_KEY_NAME) + .build(); + + final CreateBackupEncryptionConfig actual = + EncryptionConfigProtoMapper.createBackupEncryptionConfig( + new CustomerManagedEncryption(KMS_KEY_NAME)); + + assertEquals(expected, actual); + } + + @Test + public void testCreateBackupConfigGoogleDefaultEncryption() { + final CreateBackupEncryptionConfig expected = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + + final CreateBackupEncryptionConfig actual = + EncryptionConfigProtoMapper.createBackupEncryptionConfig(GoogleDefaultEncryption.INSTANCE); + + assertEquals(expected, actual); + } + + @Test + public void testCreateBackupConfigUseDatabaseEncryption() { + final CreateBackupEncryptionConfig expected = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + + final CreateBackupEncryptionConfig actual = + EncryptionConfigProtoMapper.createBackupEncryptionConfig(UseDatabaseEncryption.INSTANCE); + + assertEquals(expected, actual); + } + + @Test(expected = IllegalArgumentException.class) + public void testCreateBackupInvalidEncryption() { + EncryptionConfigProtoMapper.createBackupEncryptionConfig(null); + } + + @Test + public void testRestoreDatabaseConfigCustomerManagedEncryption() { + final RestoreDatabaseEncryptionConfig expected = + RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(KMS_KEY_NAME) + .build(); + + final RestoreDatabaseEncryptionConfig actual = + EncryptionConfigProtoMapper.restoreDatabaseEncryptionConfig( + new CustomerManagedEncryption(KMS_KEY_NAME)); + + assertEquals(expected, actual); + } + + @Test + public void testRestoreDatabaseConfigGoogleDefaultEncryption() { + final RestoreDatabaseEncryptionConfig expected = + RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + + final RestoreDatabaseEncryptionConfig actual = + EncryptionConfigProtoMapper.restoreDatabaseEncryptionConfig( + GoogleDefaultEncryption.INSTANCE); + + assertEquals(expected, actual); + } + + @Test + public void testRestoreDatabaseConfigUseBackupEncryption() { + final RestoreDatabaseEncryptionConfig expected = + RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType( + RestoreDatabaseEncryptionConfig.EncryptionType + .USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION) + .build(); + + final RestoreDatabaseEncryptionConfig actual = + EncryptionConfigProtoMapper.restoreDatabaseEncryptionConfig(UseBackupEncryption.INSTANCE); + + assertEquals(expected, actual); + } + + @Test(expected = IllegalArgumentException.class) + public void testRestoreDatabaseConfigInvalidEncryption() { + EncryptionConfigProtoMapper.restoreDatabaseEncryptionConfig(null); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigsTest.java new file mode 100644 index 000000000000..82f997a1c40a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionConfigsTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.encryption; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +import org.junit.Test; + +/** Unit tests for {@link EncryptionConfigs} */ +public class EncryptionConfigsTest { + + @Test + public void testCustomerManagedEncryption() { + final CustomerManagedEncryption expected = new CustomerManagedEncryption("kms-key-name"); + + final CustomerManagedEncryption actual = + EncryptionConfigs.customerManagedEncryption("kms-key-name"); + + assertEquals(expected, actual); + } + + @Test(expected = IllegalArgumentException.class) + public void testCustomerManagedEncryptionNullKeyName() { + EncryptionConfigs.customerManagedEncryption(null); + } + + @Test + public void testGoogleDefaultEncryption() { + assertSame(EncryptionConfigs.googleDefaultEncryption(), GoogleDefaultEncryption.INSTANCE); + } + + @Test + public void testUseDatabaseEncryption() { + assertSame(EncryptionConfigs.useDatabaseEncryption(), UseDatabaseEncryption.INSTANCE); + } + + @Test + public void testUseBackupEncryption() { + assertSame(EncryptionConfigs.useBackupEncryption(), UseBackupEncryption.INSTANCE); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionInfoTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionInfoTest.java new file mode 100644 index 000000000000..88a11d19c8f0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/encryption/EncryptionInfoTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.encryption; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.rpc.Code; +import com.google.rpc.Status; +import org.junit.Test; + +/** Unit tests for {@link com.google.cloud.spanner.encryption.EncryptionInfo} */ +public class EncryptionInfoTest { + + private static final String KMS_KEY_VERSION = "kms-key-version"; + private static final com.google.spanner.admin.database.v1.EncryptionInfo.Type + CUSTOMER_MANAGED_ENCRYPTION = + com.google.spanner.admin.database.v1.EncryptionInfo.Type.CUSTOMER_MANAGED_ENCRYPTION; + private static final Status OK_STATUS = Status.newBuilder().setCode(Code.OK_VALUE).build(); + + @Test + public void testEncryptionInfoFromProtoDefaultInstance() { + final EncryptionInfo encryptionInfo = + EncryptionInfo.fromProtoOrNull( + com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + + assertNull(encryptionInfo); + } + + @Test + public void testEncryptionInfoFromProto() { + final EncryptionInfo actualEncryptionInfo = + EncryptionInfo.fromProtoOrNull( + com.google.spanner.admin.database.v1.EncryptionInfo.newBuilder() + .setEncryptionStatus(OK_STATUS) + .setEncryptionTypeValue(CUSTOMER_MANAGED_ENCRYPTION.getNumber()) + .setKmsKeyVersion(KMS_KEY_VERSION) + .build()); + + final EncryptionInfo expectedEncryptionInfo = + new EncryptionInfo(KMS_KEY_VERSION, CUSTOMER_MANAGED_ENCRYPTION, OK_STATUS); + + assertEquals(expectedEncryptionInfo, actualEncryptionInfo); + } + + @Test + public void testEqualsAndHashCode() { + final EncryptionInfo encryptionInfo1 = + EncryptionInfo.fromProtoOrNull( + com.google.spanner.admin.database.v1.EncryptionInfo.newBuilder() + .setEncryptionStatus(OK_STATUS) + .setEncryptionTypeValue(CUSTOMER_MANAGED_ENCRYPTION.getNumber()) + .setKmsKeyVersion(KMS_KEY_VERSION) + .build()); + final EncryptionInfo encryptionInfo2 = + EncryptionInfo.fromProtoOrNull( + com.google.spanner.admin.database.v1.EncryptionInfo.newBuilder() + .setEncryptionStatus(OK_STATUS) + .setEncryptionTypeValue(CUSTOMER_MANAGED_ENCRYPTION.getNumber()) + .setKmsKeyVersion(KMS_KEY_VERSION) + .build()); + + assertEquals(encryptionInfo1, encryptionInfo2); + assertEquals(encryptionInfo1.hashCode(), encryptionInfo2.hashCode()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/DialectTestParameter.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/DialectTestParameter.java new file mode 100644 index 000000000000..bb17bed55122 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/DialectTestParameter.java @@ -0,0 +1,50 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import com.google.cloud.spanner.Dialect; +import java.util.Map; + +public class DialectTestParameter { + + final Dialect dialect; + final String createTableFile; + final Map executeQueriesFiles; + final String[] queries; + + DialectTestParameter( + Dialect dialect, + String createTableFile, + Map executeQueriesFiles, + String[] queries) { + this.dialect = dialect; + this.createTableFile = createTableFile; + this.executeQueriesFiles = executeQueriesFiles; + this.queries = queries; + } + + DialectTestParameter(Dialect dialect) { + this.dialect = dialect; + this.createTableFile = ""; + this.executeQueriesFiles = null; + this.queries = null; + } + + public String toString() { + return this.dialect.name(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java new file mode 100644 index 000000000000..1ca69fe975d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java @@ -0,0 +1,372 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncRunner; +import com.google.cloud.spanner.AsyncTransactionManager; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.util.concurrent.SettableFuture; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for asynchronous APIs. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITAsyncAPITest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String TABLE_NAME = "TestTable"; + private static final String INDEX_NAME = "TestTableByValue"; + private static final List ALL_COLUMNS = Arrays.asList("Key", "StringValue"); + private static final Type TABLE_TYPE = + Type.struct( + StructField.of("Key", Type.string()), StructField.of("StringValue", Type.string())); + + private static Database db; + private static DatabaseClient client; + private static ExecutorService executor; + + @BeforeClass + public static void setUpDatabase() { + db = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TestTable (" + + " Key STRING(MAX) NOT NULL," + + " StringValue STRING(MAX)," + + ") PRIMARY KEY (Key)", + "CREATE INDEX TestTableByValue ON TestTable(StringValue)", + "CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)"); + client = env.getTestHelper().getDatabaseClient(db); + executor = Executors.newSingleThreadExecutor(); + } + + @AfterClass + public static void cleanup() { + executor.shutdown(); + } + + @Before + public void setupData() { + client.write(Collections.singletonList(Mutation.delete(TABLE_NAME, KeySet.all()))); + // Includes k0..k14. Note that strings k{10,14} sort between k1 and k2. + List mutations = new ArrayList<>(); + for (int i = 0; i < 15; ++i) { + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k" + i) + .set("StringValue") + .to("v" + i) + .build()); + } + client.write(mutations); + } + + @Test + public void emptyReadAsync() throws Exception { + final SettableFuture result = SettableFuture.create(); + AsyncResultSet resultSet = + client + .singleUse(TimestampBound.strong()) + .readAsync( + TABLE_NAME, + KeySet.range(KeyRange.closedOpen(Key.of("k99"), Key.of("z"))), + ALL_COLUMNS); + resultSet.setCallback( + executor, + rs -> { + try { + while (true) { + switch (rs.tryNext()) { + case OK: + fail("received unexpected data"); + case NOT_READY: + return CallbackResponse.CONTINUE; + case DONE: + assertThat(rs.getType()).isEqualTo(TABLE_TYPE); + result.set(true); + return CallbackResponse.DONE; + } + } + } catch (Throwable t) { + result.setException(t); + return CallbackResponse.DONE; + } + }); + assertThat(result.get()).isTrue(); + } + + @Test + public void indexEmptyReadAsync() throws Exception { + final SettableFuture result = SettableFuture.create(); + AsyncResultSet resultSet = + client + .singleUse(TimestampBound.strong()) + .readUsingIndexAsync( + TABLE_NAME, + INDEX_NAME, + KeySet.range(KeyRange.closedOpen(Key.of("v99"), Key.of("z"))), + ALL_COLUMNS); + resultSet.setCallback( + executor, + rs -> { + try { + while (true) { + switch (rs.tryNext()) { + case OK: + fail("received unexpected data"); + case NOT_READY: + return CallbackResponse.CONTINUE; + case DONE: + assertThat(rs.getType()).isEqualTo(TABLE_TYPE); + result.set(true); + return CallbackResponse.DONE; + } + } + } catch (Throwable t) { + result.setException(t); + return CallbackResponse.DONE; + } + }); + assertThat(result.get()).isTrue(); + } + + @Test + public void pointReadAsync() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + assertThat(row.get()).isNotNull(); + assertThat(row.get().getString(0)).isEqualTo("k1"); + assertThat(row.get().getString(1)).isEqualTo("v1"); + // Ensure that the Struct implementation supports equality properly. + assertThat(row.get()) + .isEqualTo(Struct.newBuilder().set("Key").to("k1").set("StringValue").to("v1").build()); + } + + @Test + public void indexPointReadAsync() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowUsingIndexAsync(TABLE_NAME, INDEX_NAME, Key.of("v1"), ALL_COLUMNS); + assertThat(row.get()).isNotNull(); + assertThat(row.get().getString(0)).isEqualTo("k1"); + assertThat(row.get().getString(1)).isEqualTo("v1"); + } + + @Test + public void pointReadNotFound() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync(TABLE_NAME, Key.of("k999"), ALL_COLUMNS); + assertThat(row.get()).isNull(); + } + + @Test + public void indexPointReadNotFound() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowUsingIndexAsync(TABLE_NAME, INDEX_NAME, Key.of("v999"), ALL_COLUMNS); + assertThat(row.get()).isNull(); + } + + @Test + public void invalidDatabase() throws Exception { + RemoteSpannerHelper helper = env.getTestHelper(); + DatabaseClient invalidClient = + helper.getClient().getDatabaseClient(DatabaseId.of(helper.getInstanceId(), "invalid")); + Thread.sleep(ThreadLocalRandom.current().nextLong(100L)); + try { + // The NOT_FOUND error can come from both the call to invalidClient.singleUse() as well as + // from the call to row.get(), which is why both need to be inside the try block. + ApiFuture row = + invalidClient + .singleUse(TimestampBound.strong()) + .readRowAsync(TABLE_NAME, Key.of("k99"), ALL_COLUMNS); + row.get(); + fail("missing expected exception"); + } catch (ExecutionException | SpannerException thrownException) { + SpannerException spannerException; + if (thrownException instanceof ExecutionException) { + spannerException = (SpannerException) thrownException.getCause(); + } else { + spannerException = (SpannerException) thrownException; + } + assertEquals(ErrorCode.NOT_FOUND, spannerException.getErrorCode()); + } + } + + @Test + public void tableNotFound() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync("BadTableName", Key.of("k1"), ALL_COLUMNS); + try { + row.get(); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(se.getMessage()).contains("BadTableName"); + } + } + + @Test + public void columnNotFound() throws Exception { + ApiFuture row = + client + .singleUse(TimestampBound.strong()) + .readRowAsync(TABLE_NAME, Key.of("k1"), Arrays.asList("Key", "BadColumnName")); + try { + row.get(); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(se.getMessage()).contains("BadColumnName"); + } + } + + @Test + public void asyncRunnerFireAndForgetInvalidUpdate() throws Exception { + try { + assertThat(client.singleUse().readRow("TestTable", Key.of("k999"), ALL_COLUMNS)).isNull(); + AsyncRunner runner = client.runAsync(); + ApiFuture res = + runner.runAsync( + txn -> { + // The error returned by this update statement will not bubble up and fail the + // transaction. + txn.executeUpdateAsync(Statement.of("UPDATE BadTableName SET FOO=1 WHERE ID=2")); + return txn.executeUpdateAsync( + Statement.of( + "INSERT INTO TestTable (Key, StringValue) VALUES ('k999', 'v999')")); + }, + executor); + assertThat(res.get()).isEqualTo(1L); + assertThat(client.singleUse().readRow("TestTable", Key.of("k999"), ALL_COLUMNS)).isNotNull(); + } finally { + client.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("TestTable", Key.of("k999")))); + assertThat(client.singleUse().readRow("TestTable", Key.of("k999"), ALL_COLUMNS)).isNull(); + } + } + + @Test + public void testAsyncRunnerReturnsCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + AsyncRunner runner = client.runAsync(Options.commitStats()); + runner.runAsync( + transaction -> { + transaction.buffer( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k_commit_stats") + .set("StringValue") + .to("Should return commit stats") + .build()); + return ApiFutures.immediateFuture(null); + }, + executor); + assertNotNull(get(runner.getCommitResponse()).getCommitStats()); + // MutationCount = 2 columns + 2 secondary indexes. + assertEquals(4L, get(runner.getCommitResponse()).getCommitStats().getMutationCount()); + } + + @Test + public void testAsyncTransactionManagerReturnsCommitStats() throws InterruptedException { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (AsyncTransactionManager manager = client.transactionManagerAsync(Options.commitStats())) { + TransactionContextFuture context = manager.beginAsync(); + while (true) { + try { + get( + context + .then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k_commit_stats") + .set("StringValue") + .to("Should return commit stats") + .build()); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync()); + assertNotNull(get(manager.getCommitResponse()).getCommitStats()); + assertEquals(4L, get(manager.getCommitResponse()).getCommitStats().getMutationCount()); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + context = manager.resetForRetryAsync(); + } + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java new file mode 100644 index 000000000000..82493bfdbe2b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java @@ -0,0 +1,511 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.AsyncRunner; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for asynchronous APIs. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITAsyncExamplesTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String TABLE_NAME = "TestTable"; + private static final String INDEX_NAME = "TestTableByValue"; + private static final List ALL_COLUMNS = Arrays.asList("Key", "StringValue"); + private static final ImmutableList ALL_VALUES_IN_PK_ORDER = + ImmutableList.of( + "v0", "v1", "v10", "v11", "v12", "v13", "v14", "v2", "v3", "v4", "v5", "v6", "v7", "v8", + "v9"); + + private static Database db; + private static DatabaseClient client; + private static ExecutorService executor; + + @BeforeClass + public static void setUpDatabase() { + db = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TestTable (" + + " Key STRING(MAX) NOT NULL," + + " StringValue STRING(MAX)," + + ") PRIMARY KEY (Key)", + "CREATE INDEX TestTableByValue ON TestTable(StringValue)", + "CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)"); + client = env.getTestHelper().getDatabaseClient(db); + + // Includes k0..k14. Note that strings k{10,14} sort between k1 and k2. + List mutations = new ArrayList<>(); + for (int i = 0; i < 15; ++i) { + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k" + i) + .set("StringValue") + .to("v" + i) + .build()); + } + client.write(mutations); + executor = Executors.newScheduledThreadPool(8); + } + + @AfterClass + public static void cleanup() { + executor.shutdown(); + } + + @Test + public void readAsync() throws Exception { + final SettableApiFuture> future = SettableApiFuture.create(); + try (AsyncResultSet rs = client.singleUse().readAsync(TABLE_NAME, KeySet.all(), ALL_COLUMNS)) { + rs.setCallback( + executor, + new ReadyCallback() { + final List values = new LinkedList<>(); + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + future.set(values); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(resultSet.getString("StringValue")); + break; + } + } + } catch (Throwable t) { + future.setException(t); + return CallbackResponse.DONE; + } + } + }); + } + assertThat(future.get()).containsExactlyElementsIn(ALL_VALUES_IN_PK_ORDER); + } + + @Test + public void readUsingIndexAsync() throws Exception { + final SettableApiFuture> future = SettableApiFuture.create(); + try (AsyncResultSet rs = + client.singleUse().readUsingIndexAsync(TABLE_NAME, INDEX_NAME, KeySet.all(), ALL_COLUMNS)) { + rs.setCallback( + executor, + new ReadyCallback() { + final List values = new LinkedList<>(); + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + future.set(values); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(resultSet.getString("StringValue")); + break; + } + } + } catch (Throwable t) { + future.setException(t); + return CallbackResponse.DONE; + } + } + }); + } + assertThat(future.get()).containsExactlyElementsIn(ALL_VALUES_IN_PK_ORDER); + } + + @Test + public void readRowAsync() throws Exception { + ApiFuture row = client.singleUse().readRowAsync(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + assertThat(row.get().getString("StringValue")).isEqualTo("v1"); + } + + @Test + public void readRowUsingIndexAsync() throws Exception { + ApiFuture row = + client + .singleUse() + .readRowUsingIndexAsync(TABLE_NAME, INDEX_NAME, Key.of("v2"), ALL_COLUMNS); + assertThat(row.get().getString("Key")).isEqualTo("k2"); + } + + @Test + public void executeQueryAsync() throws Exception { + final ImmutableList keys = ImmutableList.of("k3", "k4"); + final SettableApiFuture> future = SettableApiFuture.create(); + try (AsyncResultSet rs = + client + .singleUse() + .executeQueryAsync( + Statement.newBuilder("SELECT StringValue FROM TestTable WHERE Key IN UNNEST(@keys)") + .bind("keys") + .toStringArray(keys) + .build())) { + rs.setCallback( + executor, + new ReadyCallback() { + final List values = new LinkedList<>(); + + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + future.set(values); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(resultSet.getString("StringValue")); + break; + } + } + } catch (Throwable t) { + future.setException(t); + return CallbackResponse.DONE; + } + } + }); + } + assertThat(future.get()).containsExactly("v3", "v4"); + } + + @Test + public void runAsync() throws Exception { + AsyncRunner runner = client.runAsync(); + ApiFuture insertCount = + runner.runAsync( + txn -> { + // Even though this is a shoot-and-forget asynchronous DML statement, it is + // guaranteed to be executed within the transaction before the commit is executed. + return txn.executeUpdateAsync( + Statement.newBuilder( + "INSERT INTO TestTable (Key, StringValue) VALUES (@key, @value)") + .bind("key") + .to("k999") + .bind("value") + .to("v999") + .build()); + }, + executor); + assertThat(insertCount.get()).isEqualTo(1L); + if (env.getTestHelper().getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW()) { + // The runAsync() method should only be called once on the runner. + // However, due to a bug in regular sessions, it can be executed multiple times on the same + // runner. + runner = client.runAsync(); + } + ApiFuture deleteCount = + runner.runAsync( + txn -> + txn.executeUpdateAsync( + Statement.newBuilder("DELETE FROM TestTable WHERE Key=@key") + .bind("key") + .to("k999") + .build()), + executor); + assertThat(deleteCount.get()).isEqualTo(1L); + } + + @Test + public void runAsyncBatchUpdate() throws Exception { + AsyncRunner runner = client.runAsync(); + ApiFuture insertCount = + runner.runAsync( + txn -> { + // Even though this is a shoot-and-forget asynchronous DML statement, it is + // guaranteed to be executed within the transaction before the commit is executed. + return txn.batchUpdateAsync( + ImmutableList.of( + Statement.newBuilder( + "INSERT INTO TestTable (Key, StringValue) VALUES (@key, @value)") + .bind("key") + .to("k997") + .bind("value") + .to("v997") + .build(), + Statement.newBuilder( + "INSERT INTO TestTable (Key, StringValue) VALUES (@key, @value)") + .bind("key") + .to("k998") + .bind("value") + .to("v998") + .build(), + Statement.newBuilder( + "INSERT INTO TestTable (Key, StringValue) VALUES (@key, @value)") + .bind("key") + .to("k999") + .bind("value") + .to("v999") + .build())); + }, + executor); + assertThat(insertCount.get()).asList().containsExactly(1L, 1L, 1L); + if (env.getTestHelper().getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW()) { + // The runAsync() method should only be called once on the runner. + // However, due to a bug in regular sessions, it can be executed multiple times on the same + // runner. + runner = client.runAsync(); + } + ApiFuture deleteCount = + runner.runAsync( + txn -> + txn.batchUpdateAsync( + ImmutableList.of( + Statement.newBuilder("DELETE FROM TestTable WHERE Key=@key") + .bind("key") + .to("k997") + .build(), + Statement.newBuilder("DELETE FROM TestTable WHERE Key=@key") + .bind("key") + .to("k998") + .build(), + Statement.newBuilder("DELETE FROM TestTable WHERE Key=@key") + .bind("key") + .to("k999") + .build())), + executor); + assertThat(deleteCount.get()).asList().containsExactly(1L, 1L, 1L); + } + + @Test + public void readOnlyTransaction() throws Exception { + ImmutableList keys1 = ImmutableList.of("k10", "k11", "k12"); + ImmutableList keys2 = ImmutableList.of("k1", "k2", "k3"); + ApiFuture> values1; + ApiFuture> values2; + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (AsyncResultSet rs = + tx.executeQueryAsync( + Statement.newBuilder("SELECT * FROM TestTable WHERE Key IN UNNEST(@keys)") + .bind("keys") + .toStringArray(keys1) + .build())) { + values1 = rs.toListAsync(input -> input.getString("StringValue"), executor); + } + try (AsyncResultSet rs = + tx.executeQueryAsync( + Statement.newBuilder("SELECT * FROM TestTable WHERE Key IN UNNEST(@keys)") + .bind("keys") + .toStringArray(keys2) + .build())) { + values2 = rs.toListAsync(input -> input.getString("StringValue"), executor); + } + } + + ApiFuture>> allAsListValues = + ApiFutures.allAsList(Arrays.asList(values1, values2)); + ApiFuture> allValues = + ApiFutures.transform( + allAsListValues, + input -> + Iterables.mergeSorted( + input, Comparator.comparing(o -> Integer.valueOf(o.substring(1)))), + executor); + assertThat(allValues.get()).containsExactly("v1", "v2", "v3", "v10", "v11", "v12"); + } + + @Test + public void pauseResume() throws Exception { + Statement unevenStatement = + Statement.of( + "SELECT * FROM TestTable WHERE MOD(CAST(SUBSTR(Key, 2) AS INT64), 2) = 1 ORDER BY" + + " CAST(SUBSTR(Key, 2) AS INT64)"); + Statement evenStatement = + Statement.of( + "SELECT * FROM TestTable WHERE MOD(CAST(SUBSTR(Key, 2) AS INT64), 2) = 0 ORDER BY" + + " CAST(SUBSTR(Key, 2) AS INT64)"); + + final Object lock = new Object(); + final SettableApiFuture evenFinished = SettableApiFuture.create(); + final SettableApiFuture unevenFinished = SettableApiFuture.create(); + final CountDownLatch evenReturnedFirstRow = new CountDownLatch(1); + final Deque allValues = new LinkedList<>(); + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + try (AsyncResultSet evenRs = tx.executeQueryAsync(evenStatement); + AsyncResultSet unevenRs = tx.executeQueryAsync(unevenStatement)) { + evenRs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + evenFinished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + synchronized (lock) { + allValues.add(resultSet.getString("StringValue")); + } + evenReturnedFirstRow.countDown(); + return CallbackResponse.PAUSE; + } + } + } catch (Throwable t) { + evenFinished.setException(t); + return CallbackResponse.DONE; + } + }); + + unevenRs.setCallback( + executor, + resultSet -> { + try { + // Make sure the even result set has returned the first before we start the uneven + // results. + evenReturnedFirstRow.await(); + while (true) { + switch (resultSet.tryNext()) { + case DONE: + unevenFinished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + synchronized (lock) { + allValues.add(resultSet.getString("StringValue")); + } + return CallbackResponse.PAUSE; + } + } + } catch (Throwable t) { + unevenFinished.setException(t); + return CallbackResponse.DONE; + } + }); + while (!(evenFinished.isDone() && unevenFinished.isDone())) { + synchronized (lock) { + if (allValues.peekLast() != null) { + if (Integer.parseInt(allValues.peekLast().substring(1)) % 2 == 1) { + evenRs.resume(); + } else { + unevenRs.resume(); + } + } + if (allValues.size() == 15) { + unevenRs.resume(); + evenRs.resume(); + } + } + } + } + } + assertThat(ApiFutures.allAsList(Arrays.asList(evenFinished, unevenFinished)).get()) + .containsExactly(Boolean.TRUE, Boolean.TRUE); + assertThat(allValues) + .containsExactly( + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", + "v14"); + } + + @Test + public void cancel() throws Exception { + final List values = new LinkedList<>(); + final SettableApiFuture finished = SettableApiFuture.create(); + final CountDownLatch receivedFirstRow = new CountDownLatch(1); + final CountDownLatch cancelled = new CountDownLatch(1); + try (AsyncResultSet rs = client.singleUse().readAsync(TABLE_NAME, KeySet.all(), ALL_COLUMNS)) { + rs.setCallback( + executor, + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case DONE: + finished.set(true); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + case OK: + values.add(resultSet.getString("StringValue")); + receivedFirstRow.countDown(); + cancelled.await(); + break; + } + } + } catch (Throwable t) { + finished.setException(t); + return CallbackResponse.DONE; + } + }); + receivedFirstRow.await(); + rs.cancel(); + } + cancelled.countDown(); + try { + finished.get(); + fail("missing expected exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.CANCELLED); + assertThat(values).containsExactly("v0"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java new file mode 100644 index 000000000000..7489f3f9a478 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java @@ -0,0 +1,285 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.*; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterators; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.ProjectName; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Integration tests for testing the auto-generated database admin {@link + * com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient} and instance admin clients {@link + * com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient} + */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITAutogeneratedAdminClientTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseAdminClient dbAdminClient; + + private static InstanceAdminClient instanceAdminClient; + private static RemoteSpannerHelper testHelper; + + private static List databasesToDrop; + + @Parameter public DatabaseDialect dialect; + + @Parameters(name = "Dialect = {0}") + public static List data() { + return ImmutableList.of(DatabaseDialect.GOOGLE_STANDARD_SQL, DatabaseDialect.POSTGRESQL); + } + + @BeforeClass + public static void setUp() { + assumeFalse("Experimental Host does not support database roles", isExperimentalHost()); + assumeFalse("Emulator does not support database roles", isUsingEmulator()); + testHelper = env.getTestHelper(); + dbAdminClient = testHelper.getClient().createDatabaseAdminClient(); + instanceAdminClient = testHelper.getClient().createInstanceAdminClient(); + databasesToDrop = new ArrayList<>(); + } + + @AfterClass + public static void cleanup() throws Exception { + if (databasesToDrop != null) { + for (DatabaseName databaseName : databasesToDrop) { + try { + dbAdminClient.dropDatabase(databaseName); + } catch (Exception e) { + System.err.println( + "Failed to drop database " + databaseName + ", skipping...: " + e.getMessage()); + } + } + } + } + + @Test + public void grantAndRevokeDatabaseRolePermissions() throws Exception { + // Create database with table and role permission. + final String dbRoleParent = "parent"; + final String databaseId = testHelper.getUniqueDatabaseId(); + final InstanceId instanceId = testHelper.getInstanceId(); + + final String createTableT = getCreateTableStatement(); + final String createRoleParent = String.format("CREATE ROLE %s", dbRoleParent); + final String grantSelectOnTableToParent = + dialect == DatabaseDialect.POSTGRESQL + ? String.format("GRANT SELECT ON TABLE T TO %s", dbRoleParent) + : String.format("GRANT SELECT ON TABLE T TO ROLE %s", dbRoleParent); + final Database createdDatabase = + createAndUpdateDatabase( + testHelper.getOptions().getProjectId(), + instanceId, + databaseId, + ImmutableList.of(createTableT, createRoleParent, grantSelectOnTableToParent)); + + // Connect to db with dbRoleParent. + SpannerOptions options = + testHelper.getOptions().toBuilder().setDatabaseRole(dbRoleParent).build(); + + Spanner spanner = options.getService(); + DatabaseId id = DatabaseId.of(createdDatabase.getName()); + DatabaseClient dbClient = spanner.getDatabaseClient(id); + + // Test SELECT permissions to role dbRoleParent on table T. + // Query using dbRoleParent should return result. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + assertTrue(rs.next()); + assertEquals(dbClient.getDatabaseRole(), dbRoleParent); + } catch (PermissionDeniedException e) { + // This is not expected + fail("Got PermissionDeniedException when it should not have occurred."); + } + + // Revoke select Permission for dbRoleParent. + final String revokeSelectOnTableFromParent = + dialect == DatabaseDialect.POSTGRESQL + ? String.format("REVOKE SELECT ON TABLE T FROM %s", dbRoleParent) + : String.format("REVOKE SELECT ON TABLE T FROM ROLE %s", dbRoleParent); + + dbAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(options.getProjectId(), instanceId.getInstance(), databaseId), + ImmutableList.of(revokeSelectOnTableFromParent)) + .get(5, TimeUnit.MINUTES); + + // Test SELECT permissions to role dbRoleParent on table T. + // Query using dbRoleParent should return PermissionDeniedException. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.PERMISSION_DENIED); + assertThat(e.getMessage()).contains(dbRoleParent); + } + // Drop role and table. + final String dropTableT = "DROP TABLE T"; + final String dropRoleParent = String.format("DROP ROLE %s", dbRoleParent); + dbAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(options.getProjectId(), instanceId.getInstance(), databaseId), + ImmutableList.of(dropTableT, dropRoleParent)) + .get(5, TimeUnit.MINUTES); + databasesToDrop.add(DatabaseName.parse(createdDatabase.getName())); + } + + @Test + public void roleWithNoPermissions() throws Exception { + final String dbRoleOrphan = testHelper.getUniqueDatabaseRole(); + final String databaseId = testHelper.getUniqueDatabaseId(); + final InstanceId instanceId = testHelper.getInstanceId(); + + final String createTableT = getCreateTableStatement(); + final String createRoleOrphan = String.format("CREATE ROLE %s", dbRoleOrphan); + + final Database createdDatabase = + createAndUpdateDatabase( + testHelper.getOptions().getProjectId(), + instanceId, + databaseId, + ImmutableList.of(createTableT, createRoleOrphan)); + + // Connect to db with dbRoleOrphan + SpannerOptions options = + testHelper.getOptions().toBuilder().setDatabaseRole(dbRoleOrphan).build(); + + Spanner spanner = options.getService(); + DatabaseId id = DatabaseId.of(createdDatabase.getName()); + DatabaseClient dbClient = spanner.getDatabaseClient(id); + + // Test SELECT permissions to role dbRoleOrphan on table T. + // Query using dbRoleOrphan should return PermissionDeniedException. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.PERMISSION_DENIED); + assertThat(e.getMessage()).contains(dbRoleOrphan); + } + // Drop role and table. + final String dropTableT = "DROP TABLE T"; + final String dropRoleParent = String.format("DROP ROLE %s", dbRoleOrphan); + dbAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(options.getProjectId(), instanceId.getInstance(), databaseId), + ImmutableList.of(dropTableT, dropRoleParent)) + .get(5, TimeUnit.MINUTES); + + databasesToDrop.add(DatabaseName.parse(createdDatabase.getName())); + } + + @Test + public void instanceConfigOperations() { + List configs = new ArrayList<>(); + Iterators.addAll( + configs, + instanceAdminClient + .listInstanceConfigs(ProjectName.of(testHelper.getOptions().getProjectId())) + .iterateAll() + .iterator()); + assertThat(configs.isEmpty()).isFalse(); + InstanceConfig config = instanceAdminClient.getInstanceConfig(configs.get(0).getName()); + assertThat(config.getName()).isEqualTo(configs.get(0).getName()); + } + + private Database createAndUpdateDatabase( + String projectId, + final InstanceId instanceId, + final String databaseId, + final List statements) + throws Exception { + if (dialect == DatabaseDialect.POSTGRESQL) { + // DDL statements other than are not allowed in database creation request + // for PostgreSQL-enabled databases. + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId.getInstance()).toString()) + .setCreateStatement(getCreateDatabaseStatement(databaseId, dialect)) + .setDatabaseDialect(dialect) + .build(); + Database database = + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(10, TimeUnit.MINUTES); + dbAdminClient.updateDatabaseDdlAsync(database.getName(), statements).get(5, TimeUnit.MINUTES); + return database; + } else { + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId.getInstance()).toString()) + .setCreateStatement(getCreateDatabaseStatement(databaseId, dialect)) + .setDatabaseDialect(dialect) + .addAllExtraStatements(statements) + .build(); + return dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(10, TimeUnit.MINUTES); + } + } + + private String getCreateTableStatement() { + if (dialect == DatabaseDialect.POSTGRESQL) { + return "CREATE TABLE IF NOT EXISTS T (" + " \"K\" VARCHAR PRIMARY KEY" + ")"; + } else { + return "CREATE TABLE IF NOT EXISTS T (" + " K STRING(MAX)" + ") PRIMARY KEY (K)"; + } + } + + static String getCreateDatabaseStatement( + final String databaseName, final DatabaseDialect dialect) { + if (dialect == DatabaseDialect.GOOGLE_STANDARD_SQL) { + return "CREATE DATABASE `" + databaseName + "`"; + } else { + return "CREATE DATABASE \"" + databaseName + "\""; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchDmlTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchDmlTest.java new file mode 100644 index 000000000000..2decef6158e3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchDmlTest.java @@ -0,0 +1,264 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for DML. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public final class ITBatchDmlTest { + + private static Database db; + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static final String INSERT_DML = + "INSERT INTO T (k, v) VALUES ('boo1', 1), ('boo2', 2), ('boo3', 3), ('boo4', 4);"; + private static final String UPDATE_DML = "UPDATE T SET T.V = 100 WHERE T.K LIKE 'boo%';"; + private static final String DELETE_DML = "DELETE FROM T WHERE T.K like 'boo%';"; + private static DatabaseClient client; + + @BeforeClass + public static void createDatabase() { + db = env.getTestHelper().createTestDatabase(); + client = env.getTestHelper().getDatabaseClient(db); + } + + @Before + public void createTable() throws Exception { + String ddl = + "CREATE TABLE T (" + " K STRING(MAX) NOT NULL," + " V INT64," + ") PRIMARY KEY (K)"; + OperationFuture op = + db.updateDdl(Collections.singletonList(ddl), null); + op.get(); + } + + @After + public void dropTable() throws Exception { + String ddl = "DROP TABLE T"; + OperationFuture op = + db.updateDdl(Collections.singletonList(ddl), null); + op.get(); + } + + @Test + public void noStatementsInRequest() { + // TODO(sriharshach): Remove this skip once backend support empty transactions to commit. + assumeFalse( + "Skipping for multiplexed sessions since it does not allow empty transactions to commit", + isUsingMultiplexedSessionsForRW()); + final TransactionCallable callable = + transaction -> { + List stmts = new ArrayList<>(); + long[] rowCounts; + try { + rowCounts = transaction.batchUpdate(stmts); + Assert.fail("Expecting an exception."); + } catch (SpannerException e) { + assertThat(e instanceof SpannerBatchUpdateException).isFalse(); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + rowCounts = new long[0]; + } + return rowCounts; + }; + TransactionRunner runner = client.readWriteTransaction(); + long[] rowCounts = runner.run(callable); + assertThat(rowCounts.length).isEqualTo(0); + } + + @Test + public void batchDml() { + final TransactionCallable callable = + transaction -> { + List stmts = new ArrayList<>(); + stmts.add(Statement.of(INSERT_DML)); + stmts.add(Statement.of(UPDATE_DML)); + stmts.add(Statement.of(DELETE_DML)); + return transaction.batchUpdate(stmts); + }; + TransactionRunner runner = client.readWriteTransaction(); + long[] rowCounts = runner.run(callable); + assertThat(rowCounts.length).isEqualTo(3); + for (long rc : rowCounts) { + assertThat(rc).isEqualTo(4); + } + } + + @Test + public void mixedBatchDmlAndDml() { + final TransactionCallable callable = + transaction -> { + long rowCount = transaction.executeUpdate(Statement.of(INSERT_DML)); + List stmts = new ArrayList<>(); + stmts.add(Statement.of(UPDATE_DML)); + stmts.add(Statement.of(DELETE_DML)); + long[] batchRowCounts = transaction.batchUpdate(stmts); + long[] rowCounts = new long[batchRowCounts.length + 1]; + System.arraycopy(batchRowCounts, 0, rowCounts, 0, batchRowCounts.length); + rowCounts[batchRowCounts.length] = rowCount; + return rowCounts; + }; + TransactionRunner runner = client.readWriteTransaction(); + long[] rowCounts = runner.run(callable); + assertThat(rowCounts.length).isEqualTo(3); + for (long rc : rowCounts) { + assertThat(rc).isEqualTo(4); + } + } + + @Test + public void errorBatchDmlIllegalStatement() { + final TransactionCallable callable = + transaction -> { + List stmts = new ArrayList<>(); + stmts.add(Statement.of(INSERT_DML)); + stmts.add(Statement.of("some illegal statement")); + stmts.add(Statement.of(UPDATE_DML)); + return transaction.batchUpdate(stmts); + }; + TransactionRunner runner = client.readWriteTransaction(); + try { + runner.run(callable); + Assert.fail("Expecting an exception."); + } catch (SpannerBatchUpdateException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("is not valid DML."); + long[] rowCounts = e.getUpdateCounts(); + assertThat(rowCounts.length).isEqualTo(1); + for (long rc : rowCounts) { + assertThat(rc).isEqualTo(4); + } + } + } + + @Test + public void errorBatchDmlAlreadyExist() { + final TransactionCallable callable = + transaction -> { + List stmts = new ArrayList<>(); + stmts.add(Statement.of(INSERT_DML)); + stmts.add(Statement.of(INSERT_DML)); // should fail + stmts.add(Statement.of(UPDATE_DML)); + return transaction.batchUpdate(stmts); + }; + TransactionRunner runner = client.readWriteTransaction(); + try { + runner.run(callable); + Assert.fail("Expecting an exception."); + } catch (SpannerBatchUpdateException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.ALREADY_EXISTS); + long[] rowCounts = e.getUpdateCounts(); + assertThat(rowCounts.length).isEqualTo(1); + for (long rc : rowCounts) { + assertThat(rc).isEqualTo(4); + } + } + } + + @Test + public void largeBatchDml() { + List stmts = new LinkedList<>(); + for (int i = 0; i < 40; i++) { + stmts.add(Statement.of("INSERT INTO T (k, v) VALUES ('boo" + i + "', " + i + ");")); + } + + for (int i = 0; i < 40; i++) { + stmts.add(Statement.of("DELETE FROM T WHERE T.K = 'boo" + i + "';")); + } + long[] expectedRowCounts = new long[stmts.size()]; + Arrays.fill(expectedRowCounts, 1L); + + final TransactionCallable callable = transaction -> transaction.batchUpdate(stmts); + TransactionRunner runner = client.readWriteTransaction(); + long[] actualRowCounts = runner.run(callable); + assertThat(actualRowCounts.length).isEqualTo(80); + assertThat(expectedRowCounts).isEqualTo(actualRowCounts); + } + + @Test + public void largeBatchDml_withParameterisedStatements() { + List stmts = new LinkedList<>(); + String insertQuery = "INSERT INTO T(k, v) VALUES(@key, @val)"; + for (int i = 0; i < 80; i++) { + stmts.add( + Statement.newBuilder(insertQuery) + .bind("key") + .to("'boo" + i + "'") + .bind("val") + .to(i) + .build()); + } + long[] expectedRowCounts = new long[stmts.size()]; + Arrays.fill(expectedRowCounts, 1L); + + final TransactionCallable callable = transaction -> transaction.batchUpdate(stmts); + TransactionRunner runner = client.readWriteTransaction(); + long[] actualRowCounts = runner.run(callable); + + assertThat(actualRowCounts.length).isEqualTo(80); + assertThat(expectedRowCounts).isEqualTo(actualRowCounts); + } + + @Test + public void largeBatchDml_withNonParameterisedStatements() { + List stmts = new LinkedList<>(); + for (int i = 0; i < 80; i++) { + stmts.add(Statement.of("INSERT INTO T (k, v) VALUES ('boo" + i + "', " + i + ");")); + } + long[] expectedRowCounts = new long[stmts.size()]; + Arrays.fill(expectedRowCounts, 1L); + + final TransactionCallable callable = transaction -> transaction.batchUpdate(stmts); + TransactionRunner runner = client.readWriteTransaction(); + long[] actualRowCounts = runner.run(callable); + + assertThat(actualRowCounts.length).isEqualTo(80); + assertThat(expectedRowCounts).isEqualTo(actualRowCounts); + } + + boolean isUsingMultiplexedSessionsForRW() { + return env.getTestHelper().getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchReadTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchReadTest.java new file mode 100644 index 000000000000..d18239cf283d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBatchReadTest.java @@ -0,0 +1,386 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.connection.ITAbstractSpannerTest.extractConnectionUrl; +import static com.google.cloud.spanner.connection.ITAbstractSpannerTest.getKeyFile; +import static com.google.cloud.spanner.connection.ITAbstractSpannerTest.hasValidKeyFile; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.ByteArray; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.BatchTransactionId; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.Partition; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.PartitionedQueryResultSet; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Integration test reading large amounts of data using the Batch APIs. The size of data ensures + * that multiple partitions are returned by the server. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITBatchReadTest { + private static int numRows; + + private static final int WRITE_BATCH_SIZE = 1 << 20; + private static final String TABLE_NAME = "BatchTestTable"; + private static final String INDEX_NAME = "TestIndexByValue"; + private static final long STALENESS_MILLISEC = 1000; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static HashFunction hasher; + private static BatchClient googleStandardSQLBatchClient; + private static Database googleStandardDatabase; + private static BatchClient postgreSQLBatchClient; + private static Database postgreSQLDatabase; + private static final Random RANDOM = new Random(); + + private BatchReadOnlyTransaction batchTxn; + + @Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + @Parameter public DialectTestParameter dialect; + + // Generate a large number of rows to allow multiple read/query partitions. + private static List manyRows() { + List rows = new ArrayList<>(); + rows.addAll(Collections.nCopies(1000, 4096)); + rows.addAll(Collections.nCopies(100, 40960)); + rows.addAll(Collections.nCopies(25, 409600)); + rows.addAll(Collections.nCopies(10, 4 << 20)); + return rows; + } + + @BeforeClass + public static void setUpDatabase() throws Exception { + googleStandardDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE " + + TABLE_NAME + + " (" + + " Key INT64 NOT NULL," + + " Data BYTES(MAX)," + + " Fingerprint INT64," + + " Size INT64," + + ") PRIMARY KEY (Key)", + "CREATE INDEX " + INDEX_NAME + " ON " + TABLE_NAME + "(Fingerprint)"); + hasher = Hashing.goodFastHash(64); + googleStandardSQLBatchClient = env.getTestHelper().getBatchClient(googleStandardDatabase); + + List databaseClients = new ArrayList<>(); + databaseClients.add(env.getTestHelper().getDatabaseClient(googleStandardDatabase)); + + postgreSQLDatabase = + env.getTestHelper().createTestDatabase(Dialect.POSTGRESQL, Collections.emptyList()); + env.getTestHelper() + .getClient() + .getDatabaseAdminClient() + .updateDatabaseDdl( + env.getTestHelper().getInstanceId().getInstance(), + postgreSQLDatabase.getId().getDatabase(), + ImmutableList.of( + "CREATE TABLE " + + TABLE_NAME + + " (" + + " Key bigint not null primary key," + + " Data bytea," + + " Fingerprint bigint," + + " Size bigint" + + ")", + "CREATE INDEX " + INDEX_NAME + " ON " + TABLE_NAME + "(Fingerprint)"), + null) + .get(); + postgreSQLBatchClient = env.getTestHelper().getBatchClient(postgreSQLDatabase); + databaseClients.add(env.getTestHelper().getDatabaseClient(postgreSQLDatabase)); + + List rows = manyRows(); + numRows = rows.size(); + for (DatabaseClient dbClient : databaseClients) { + List mutations = new ArrayList<>(); + int totalSize = 0; + int i = 0; + for (int row : rows) { + byte[] data = new byte[row]; + RANDOM.nextBytes(data); + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to(i) + .set("Data") + .to(ByteArray.copyFrom(data)) + .set("Fingerprint") + .to(hasher.hashBytes(data).asLong()) + .set("Size") + .to(row) + .build()); + totalSize += row; + i++; + if (totalSize >= WRITE_BATCH_SIZE) { + dbClient.write(mutations); + mutations.clear(); + totalSize = 0; + } + } + if (!mutations.isEmpty()) { + dbClient.write(mutations); + } + } + // Our read/queries are executed with some staleness. + Thread.sleep(2 * STALENESS_MILLISEC); + } + + private BatchClient getBatchClient() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return postgreSQLBatchClient; + } + return googleStandardSQLBatchClient; + } + + private Database getDatabase() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return postgreSQLDatabase; + } + return googleStandardDatabase; + } + + @Test + public void read() { + BitSet seenRows = new BitSet(numRows); + TimestampBound bound = getRandomBound(); + PartitionOptions partitionParams = getRandomPartitionOptions(); + batchTxn = getBatchClient().batchReadOnlyTransaction(bound); + List partitions = + batchTxn.partitionRead( + partitionParams, + TABLE_NAME, + KeySet.all(), + Arrays.asList("Key", "Data", "Fingerprint", "Size")); + BatchTransactionId txnID = batchTxn.getBatchTransactionId(); + fetchAndValidateRows(partitions, txnID, seenRows); + } + + @Test + public void readUsingIndex() { + TimestampBound bound = getRandomBound(); + PartitionOptions partitionParams = getRandomPartitionOptions(); + batchTxn = getBatchClient().batchReadOnlyTransaction(bound); + List partitions = + batchTxn.partitionReadUsingIndex( + partitionParams, + TABLE_NAME, + INDEX_NAME, + KeySet.all(), + Collections.singletonList("Fingerprint")); + BatchTransactionId txnID = batchTxn.getBatchTransactionId(); + int numRowsRead = 0; + for (Partition p : partitions) { + BatchReadOnlyTransaction batchTxnOnEachWorker = + getBatchClient().batchReadOnlyTransaction(txnID); + try (ResultSet result = batchTxnOnEachWorker.execute(p)) { + while (result.next()) { + numRowsRead++; + } + } + } + assertThat(numRowsRead).isEqualTo(numRows); + } + + @Test + public void dataBoostRead() { + assumeFalse("data boost is not supported on experimental host yet", isExperimentalHost()); + BitSet seenRows = new BitSet(numRows); + TimestampBound bound = getRandomBound(); + PartitionOptions partitionParams = getRandomPartitionOptions(); + batchTxn = getBatchClient().batchReadOnlyTransaction(bound); + List partitions = + batchTxn.partitionRead( + partitionParams, + TABLE_NAME, + KeySet.all(), + Arrays.asList("Key", "Data", "Fingerprint", "Size"), + Options.dataBoostEnabled(true)); + BatchTransactionId txnID = batchTxn.getBatchTransactionId(); + fetchAndValidateRows(partitions, txnID, seenRows); + } + + @After + public void tearDown() { + if (batchTxn != null) { + batchTxn.close(); + } + } + + @Test + public void query() { + BitSet seenRows = new BitSet(numRows); + TimestampBound bound = getRandomBound(); + PartitionOptions partitionParams = getRandomPartitionOptions(); + batchTxn = getBatchClient().batchReadOnlyTransaction(bound); + List partitions = + batchTxn.partitionQuery( + partitionParams, + Statement.of("SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME)); + BatchTransactionId txnID = batchTxn.getBatchTransactionId(); + fetchAndValidateRows(partitions, txnID, seenRows); + } + + private PartitionOptions getRandomPartitionOptions() { + int desiredBytesPerBatch = 1 << 30; + int maxPartitionCount = 100; + PartitionOptions parameters = + PartitionOptions.newBuilder() + .setPartitionSizeBytes(desiredBytesPerBatch) + .setMaxPartitions(maxPartitionCount) + .build(); + if (RANDOM.nextInt(2) == 1) { + parameters = PartitionOptions.getDefaultInstance(); + } + return parameters; + } + + @Test + public void dataBoostQuery() { + assumeFalse("data boost is not supported on experimental host yet", isExperimentalHost()); + BitSet seenRows = new BitSet(numRows); + TimestampBound bound = getRandomBound(); + PartitionOptions partitionParams = getRandomPartitionOptions(); + batchTxn = getBatchClient().batchReadOnlyTransaction(bound); + List partitions = + batchTxn.partitionQuery( + partitionParams, + Statement.of("SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME), + Options.dataBoostEnabled(true)); + BatchTransactionId txnID = batchTxn.getBatchTransactionId(); + fetchAndValidateRows(partitions, txnID, seenRows); + } + + @Test + public void testRunPartitionedQuery() { + StringBuilder url = extractConnectionUrl(env.getTestHelper().getOptions(), getDatabase()); + ConnectionOptions.Builder builder = ConnectionOptions.newBuilder().setUri(url.toString()); + if (hasValidKeyFile()) { + builder.setCredentialsUrl(getKeyFile()); + } + ConnectionOptions options = builder.build(); + try (Connection connection = options.getConnection()) { + // Use dynamic parallelism. + connection.setMaxPartitionedParallelism(0); + + BitSet seenRows = new BitSet(numRows); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + Statement.of("SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME), + getRandomPartitionOptions())) { + validate(resultSet, seenRows); + // verify all rows were read from the database. + assertEquals(numRows, seenRows.nextClearBit(0)); + + assertTrue( + "Partitions: " + resultSet.getNumPartitions(), resultSet.getNumPartitions() >= 1); + assertEquals( + "Actual parallelism: " + resultSet.getParallelism(), + Math.min(resultSet.getNumPartitions(), Runtime.getRuntime().availableProcessors()), + resultSet.getParallelism()); + } + } + } + + private TimestampBound getRandomBound() { + Date date = new Date(); + switch (RANDOM.nextInt(3)) { + case 0: + return TimestampBound.strong(); + case 1: + return TimestampBound.ofExactStaleness(STALENESS_MILLISEC, TimeUnit.MILLISECONDS); + default: + return TimestampBound.ofReadTimestamp( + Timestamp.of(new Date(date.getTime() - STALENESS_MILLISEC))); + } + } + + private void fetchAndValidateRows( + List partitions, BatchTransactionId txnID, BitSet seenRows) { + for (Partition p : partitions) { + BatchReadOnlyTransaction batchTxnOnEachWorker = + getBatchClient().batchReadOnlyTransaction(txnID); + try (ResultSet result = batchTxnOnEachWorker.execute(p)) { + // validate no duplicate rows; verify all columns read. + validate(result, seenRows); + } + } + // verify all rows were read from the database. + assertThat(seenRows.nextClearBit(0)).isEqualTo(numRows); + } + + private void validate(ResultSet resultSet, BitSet seenRows) { + while (resultSet.next()) { + assertThat(seenRows.get((int) resultSet.getLong(0))).isFalse(); + seenRows.set((int) resultSet.getLong(0)); + ByteArray data = resultSet.getBytes(1); + assertThat(data.length()).isEqualTo(resultSet.getLong(3)); + assertThat(resultSet.getLong(2)).isEqualTo(hasher.hashBytes(data.toByteArray()).asLong()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java new file mode 100644 index 000000000000..f0e1cf208613 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.spanner.*; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.base.Stopwatch; +import com.google.monitoring.v3.ListTimeSeriesRequest; +import com.google.monitoring.v3.ListTimeSeriesResponse; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeInterval; +import com.google.protobuf.util.Timestamps; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITBuiltInMetricsTest { + + private static Database db; + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static DatabaseClient client; + + private static MetricServiceClient metricClient; + + private static java.util.List METRICS = + new java.util.ArrayList() { + { + add("operation_latencies"); + add("attempt_latencies"); + add("operation_count"); + add("attempt_count"); + add("afe_latencies"); + } + }; + + @BeforeClass + public static void setUp() throws IOException { + assumeFalse("not applicable for experimental host", isExperimentalHost()); + assumeFalse("This test requires credentials", EmulatorSpannerHelper.isUsingEmulator()); + metricClient = MetricServiceClient.create(); + // Enable BuiltinMetrics when the metrics are GA'ed + db = env.getTestHelper().createTestDatabase(); + client = env.getTestHelper().getDatabaseClient(db); + if (!env.getTestHelper().getOptions().isEnableDirectAccess()) { + METRICS.add("gfe_latencies"); + } + } + + @After + public void tearDown() { + if (metricClient != null) { + metricClient.close(); + } + } + + @Test + public void testBuiltinMetricsWithDefaultOTEL() throws Exception { + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + Instant start = Instant.now().minus(Duration.ofMinutes(2)); + Instant end = Instant.now().plus(Duration.ofMinutes(3)); + ProjectName name = ProjectName.of(env.getTestHelper().getOptions().getProjectId()); + + TimeInterval interval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) + .build(); + + client + .readWriteTransaction() + .run(transaction -> transaction.executeQuery(Statement.of("Select 1"))); + + for (String metric : METRICS) { + String metricFilter = + String.format( + "metric.type=\"spanner.googleapis.com/client/%s\"" + + " AND resource.type=\"spanner_instance\"" + + " AND metric.labels.method=\"Spanner.Commit\"" + + " AND resource.labels.instance_id=\"%s\"" + + " AND metric.labels.database=\"%s\"", + metric, db.getId().getInstanceId().getInstance(), db.getId().getDatabase()); + + ListTimeSeriesRequest.Builder requestBuilder = + ListTimeSeriesRequest.newBuilder() + .setName(name.toString()) + .setFilter(metricFilter) + .setInterval(interval) + .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); + + ListTimeSeriesRequest request = requestBuilder.build(); + + ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request); + while (response.getTimeSeriesCount() == 0 + && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 3) { + // Call listTimeSeries every minute + Thread.sleep(Duration.ofMinutes(1).toMillis()); + response = metricClient.listTimeSeriesCallable().call(request); + } + + assertWithMessage("Metric " + metric + " didn't return any data.") + .that(response.getTimeSeriesCount()) + .isGreaterThan(0); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITCommitTimestampTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITCommitTimestampTest.java new file mode 100644 index 000000000000..d10375c4fc26 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITCommitTimestampTest.java @@ -0,0 +1,362 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration test for commit timestamp of Cloud Spanner. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITCommitTimestampTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + private static DatabaseClient client; + private static DatabaseAdminClient dbAdminClient; + private static RemoteSpannerHelper testHelper; + private static String instanceId; + private static String databaseId; + + @BeforeClass + public static void setUp() { + testHelper = env.getTestHelper(); + db = + testHelper.createTestDatabase( + "CREATE TABLE T (" + + "K STRING(MAX) NOT NULL," + + "T1 TIMESTAMP OPTIONS (allow_commit_timestamp = true)," + + "T2 TIMESTAMP OPTIONS (allow_commit_timestamp = true)," + + "T3 TIMESTAMP," + + ") PRIMARY KEY (K)"); + client = testHelper.getDatabaseClient(db); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + instanceId = testHelper.getInstanceId().getInstance(); + databaseId = db.getId().getDatabase(); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @After + public void deleteAllTestRecords() { + client.write(ImmutableList.of(Mutation.delete("T", KeySet.all()))); + } + + private Timestamp write(Mutation m) { + return client.write(Collections.singletonList(m)); + } + + private Struct readRow(DatabaseClient client, String table, Key key, String... columns) { + return client.singleUse(TimestampBound.strong()).readRow(table, key, Arrays.asList(columns)); + } + + @Test + public void writeCommitTimestamp() { + // 1. timestamps auto populated and returned should be the same + Timestamp commitTimestamp = + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("a") + .set("T1") + .to(Value.COMMIT_TIMESTAMP) + .set("T2") + .to(Value.COMMIT_TIMESTAMP) + .build()); + Struct row = readRow(client, "T", Key.of("a"), "T1", "T2"); + assertThat(row.getTimestamp(0)).isEqualTo(commitTimestamp); + assertThat(row.getTimestamp(1)).isEqualTo(commitTimestamp); + + // 2. attempt to write CommitTimestamp to not enabled column should fail + // error_catalog error CommitTimestampOptionNotEnabled + try { + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("a") + .set("T3") + .to(Value.COMMIT_TIMESTAMP) + .build()); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void consistency() { + // 1. timestamps populated are consistent in order + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("a") + .set("T1") + .to(Value.COMMIT_TIMESTAMP) + .build()); + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("b") + .set("T1") + .to(Value.COMMIT_TIMESTAMP) + .build()); + Struct row1 = readRow(client, "T", Key.of("a"), "T1"); + Struct row2 = readRow(client, "T", Key.of("b"), "T1"); + assertThat(row2.getTimestamp(0)).isGreaterThan(row1.getTimestamp(0)); + } + + @Test + public void schemaChangeTimestampInFuture() throws Exception { + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("a") + .set("T3") + .to(Timestamp.MAX_VALUE) + .build()); + + // error_catalog error CommitTimestampNotInFuture + String statement = "ALTER TABLE T ALTER COLUMN T3 SET OPTIONS (allow_commit_timestamp=true)"; + try { + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(statement), null) + .get(); + fail("missing expected exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void insertTimestampInFuture() { + // error_catalog error TimestampInFuture + try { + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to("a") + .set("T1") + .to(Timestamp.MAX_VALUE) + .build()); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + @Test + public void invalidColumnOption() throws Exception { + // error_catalog error DDLStatementWithError + String statement = "ALTER TABLE T ALTER COLUMN T3 SET OPTIONS (bogus=null)"; + try { + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(statement), null) + .get(); + fail("missing expected exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void invalidColumnOptionValue() throws Exception { + // error_catalog error DDLStatementWithErrors + String statement = "ALTER TABLE T ALTER COLUMN T3 SET OPTIONS (allow_commit_timestamp=bogus)"; + try { + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(statement), null) + .get(); + fail("missing expected exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void invalidColumnType() throws Exception { + assumeFalse( + "Validation currently not available in experimental host mode - tracked via b/442339325", + isExperimentalHost()); + // error_catalog error OptionErrorList + String statement = "ALTER TABLE T ADD COLUMN T4 INT64 OPTIONS (allow_commit_timestamp=true)"; + try { + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(statement), null) + .get(); + fail("missing expected exception"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + private void alterColumnOption(String databaseId, String table, String opt) throws Exception { + String statement = + "ALTER TABLE " + + table + + " ALTER COLUMN ts" + + " SET OPTIONS (allow_commit_timestamp=" + + opt + + ")"; + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(statement), null) + .get(); + } + + private void writeAndVerify(DatabaseClient client, Timestamp ts) { + Timestamp commitTimestamp = + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("T1").set("ts").to(ts).build(), + Mutation.newInsertOrUpdateBuilder("T2").set("ts").to(ts).build(), + Mutation.newInsertOrUpdateBuilder("T3").set("ts").to(ts).build())); + if (ts == Value.COMMIT_TIMESTAMP) { + ts = commitTimestamp; + } + assertThat(readRow(client, "T1", Key.of(ts), "ts").getTimestamp(0)).isEqualTo(ts); + assertThat(readRow(client, "T2", Key.of(ts), "ts").getTimestamp(0)).isEqualTo(ts); + assertThat(readRow(client, "T3", Key.of(ts), "ts").getTimestamp(0)).isEqualTo(ts); + } + + // 1) Write timestamps in the past + // 2) Set all interleaved tables allow_commit_timestamp=true + // 3) Use commit timestamp in all tables + // 4) Set all interleaved tables allow_commit_timestamp=null + // 5) Write timestamps in the future + @Test + public void interleavedTable() throws Exception { + Database db = + testHelper.createTestDatabase( + "CREATE TABLE T1 (ts TIMESTAMP) PRIMARY KEY (ts)", + "CREATE TABLE T2 (ts TIMESTAMP) PRIMARY KEY (ts), INTERLEAVE IN PARENT T1", + "CREATE TABLE T3 (ts TIMESTAMP) PRIMARY KEY (ts), INTERLEAVE IN PARENT T2"); + DatabaseClient client = testHelper.getDatabaseClient(db); + String databaseId = db.getId().getDatabase(); + + Timestamp timeNow = Timestamp.ofTimeMicroseconds(Instant.now().toEpochMilli() * 1000); + Timestamp timeFuture = + Timestamp.ofTimeMicroseconds( + Instant.now().plus(Duration.ofDays(300)).toEpochMilli() * 1000); + + writeAndVerify(client, timeNow); + + alterColumnOption(databaseId, "T1", "true"); + alterColumnOption(databaseId, "T2", "true"); + alterColumnOption(databaseId, "T3", "true"); + writeAndVerify(client, Value.COMMIT_TIMESTAMP); + + alterColumnOption(databaseId, "T1", "null"); + alterColumnOption(databaseId, "T2", "null"); + alterColumnOption(databaseId, "T3", "null"); + writeAndVerify(client, timeFuture); + } + + // In interleaved table, use of commit timestamp in child table is not allowed + // if parent tables are not allow_commit_timestamp=true + @Test + public void interleavedTableHierarchy1() { + Database db = + testHelper.createTestDatabase( + "CREATE TABLE T1 (ts TIMESTAMP) PRIMARY KEY (ts)", + "CREATE TABLE T2 (ts TIMESTAMP) PRIMARY KEY (ts), INTERLEAVE IN PARENT T1", + "CREATE TABLE T3 (ts TIMESTAMP OPTIONS (allow_commit_timestamp = true)) " + + "PRIMARY KEY (ts), INTERLEAVE IN PARENT T2"); + DatabaseClient client = testHelper.getDatabaseClient(db); + + // error_catalog error CommitTimestampOptionNotEnabled + try { + client.write( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T3") + .set("ts") + .to(Value.COMMIT_TIMESTAMP) + .build())); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } + + // In interleaved table, use of commit timestamp in parent table is not + // allowed if child tables are not allow_commit_timestamp=true + @Test + public void interleavedTableHierarchy2() { + Database db = + testHelper.createTestDatabase( + "CREATE TABLE T1 (ts TIMESTAMP OPTIONS (allow_commit_timestamp = true)) " + + "PRIMARY KEY (ts)", + "CREATE TABLE T2 (ts TIMESTAMP) PRIMARY KEY (ts), INTERLEAVE IN PARENT T1", + "CREATE TABLE T3 (ts TIMESTAMP OPTIONS (allow_commit_timestamp = true)) " + + "PRIMARY KEY (ts), INTERLEAVE IN PARENT T2"); + DatabaseClient client = testHelper.getDatabaseClient(db); + + // error_catalog error CommitTimestampOptionNotEnabled + try { + client.write( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T1") + .set("ts") + .to(Value.COMMIT_TIMESTAMP) + .build())); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDMLTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDMLTest.java new file mode 100644 index 000000000000..3bc87577e8ad --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDMLTest.java @@ -0,0 +1,416 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** Integration tests for DML. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public final class ITDMLTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + /** Sequence for assigning unique keys to test cases. */ + private static int seq; + + /** Id prefix per test case. */ + private static int id; + + private static final String INSERT_DML = + "INSERT INTO T (k, v) VALUES ('%d-boo1', 1), ('%d-boo2', 2), ('%d-boo3', 3), ('%d-boo4', 4);"; + private static final String UPDATE_DML = "UPDATE T SET V = 100 WHERE K LIKE '%d-boo%%';"; + private static final String DELETE_DML = "DELETE FROM T WHERE K like '%d-boo%%';"; + + private static final long DML_COUNT = 4; + + private static boolean throwAbortOnce = false; + + @BeforeClass + public static void setUpDatabase() { + Database googleStandardSQLDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " V INT64," + + ") PRIMARY KEY (K)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + Arrays.asList( + "CREATE TABLE T (" + " K VARCHAR PRIMARY KEY," + " V BIGINT" + ")")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Before + public void increaseTestIdAndDeleteTestData() { + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + googleStandardSQLClient.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("T", KeySet.all()))); + } else { + postgreSQLClient.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("T", KeySet.all()))); + } + id++; + } + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + private static String uniqueKey() { + return "k" + seq++; + } + + private String insertDml() { + return String.format(INSERT_DML, id, id, id, id); + } + + private String updateDml() { + return String.format(UPDATE_DML, id); + } + + private String deleteDml() { + return String.format(DELETE_DML, id); + } + + private void executeUpdate(long expectedCount, final String... stmts) { + final TransactionCallable callable = + transaction -> { + long rowCount = 0; + for (String stmt : stmts) { + if (throwAbortOnce) { + throwAbortOnce = false; + throw SpannerExceptionFactory.newSpannerException(ErrorCode.ABORTED, "Abort in test"); + } + + rowCount += transaction.executeUpdate(Statement.of(stmt)); + } + return rowCount; + }; + TransactionRunner runner = getClient(dialect.dialect).readWriteTransaction(); + Long rowCount = runner.run(callable); + assertThat(rowCount).isEqualTo(expectedCount); + } + + private DatabaseClient getClient(Dialect dialect) { + if (dialect == Dialect.POSTGRESQL) { + return postgreSQLClient; + } + return googleStandardSQLClient; + } + + @Test + public void abortOnceShouldSucceedAfterRetry() { + try { + throwAbortOnce = true; + executeUpdate(DML_COUNT, insertDml()); + assertThat(throwAbortOnce).isFalse(); + } catch (AbortedException e) { + fail("Abort Exception not caught and retried"); + } + } + + @Test + public void partitionedDML() { + executeUpdate(DML_COUNT, insertDml()); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(1); + + long rowCount = getClient(dialect.dialect).executePartitionedUpdate(Statement.of(updateDml())); + // Note: With PDML there is a possibility of network replay or partial update to occur, causing + // this assert to fail. We should remove this assert if it is a recurring failure in IT tests. + assertThat(rowCount).isEqualTo(DML_COUNT); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(100); + + rowCount = getClient(dialect.dialect).executePartitionedUpdate(Statement.of(deleteDml())); + assertThat(rowCount).isEqualTo(DML_COUNT); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V"))) + .isNull(); + } + + @Test + public void standardDML() { + executeUpdate(DML_COUNT, insertDml()); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(1); + executeUpdate(DML_COUNT, updateDml()); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(100); + executeUpdate(DML_COUNT, deleteDml()); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V"))) + .isNull(); + } + + @Test + public void standardDMLWithError() { + try { + executeUpdate(0, "SELECT * FROM T;"); + fail("Expected illegal argument exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()) + .contains("DML response missing stats possibly due to non-DML statement as input"); + assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); + } + } + + @Test + public void standardDMLWithDuplicates() { + executeUpdate(DML_COUNT, insertDml()); + + executeUpdate( + 4, + String.format("UPDATE T SET v = 200 WHERE k = '%d-boo1';", id), + String.format("UPDATE T SET v = 300 WHERE k = '%d-boo1';", id), + String.format("UPDATE T SET v = 400 WHERE k = '%d-boo1';", id), + String.format("UPDATE T SET v = 500 WHERE k = '%d-boo1';", id)); + assertThat( + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(500); + + executeUpdate(DML_COUNT, deleteDml(), deleteDml()); + } + + @Test + public void standardDMLReadYourWrites() { + executeUpdate(DML_COUNT, insertDml()); + + final TransactionCallable callable = + transaction -> { + long rowCount = + transaction.executeUpdate( + Statement.of(String.format("UPDATE T SET v = v * 2 WHERE k = '%d-boo2';", id))); + assertThat(rowCount).isEqualTo(1); + assertThat( + transaction + .readRow( + "T", Key.of(String.format("%d-boo2", id)), Collections.singletonList("v")) + .getLong(0)) + .isEqualTo(2 * 2); + return null; + }; + TransactionRunner runner = getClient(dialect.dialect).readWriteTransaction(); + runner.run(callable); + + executeUpdate(DML_COUNT, deleteDml()); + } + + @Test + public void standardDMLRollback() { + class UserException extends Exception { + UserException(String message) { + super(message); + } + } + final TransactionCallable callable = + transaction -> { + long rowCount = transaction.executeUpdate(Statement.of(insertDml())); + assertThat(rowCount).isEqualTo(DML_COUNT); + throw new UserException("failing to commit"); + }; + + try { + TransactionRunner runner = getClient(dialect.dialect).readWriteTransaction(); + runner.run(callable); + fail("Expected user exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("failing to commit"); + assertThat(e.getCause()).isInstanceOf(UserException.class); + } + + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read( + "T", + KeySet.range(KeyRange.prefix(Key.of(String.format("%d-boo", id)))), + Collections.singletonList("K")); + assertThat(resultSet.next()).isFalse(); + } + + @Test + public void standardDMLAndMutations() { + final String key1 = uniqueKey(); + final String key2 = uniqueKey(); + final TransactionCallable callable = + transaction -> { + // DML + long rowCount = + transaction.executeUpdate( + Statement.of("INSERT INTO T (k, v) VALUES ('" + key1 + "', 1)")); + assertThat(rowCount).isEqualTo(1); + + // Mutations + transaction.buffer( + Mutation.newInsertOrUpdateBuilder("T").set("K").to(key2).set("V").to(2).build()); + return null; + }; + TransactionRunner runner = getClient(dialect.dialect).readWriteTransaction(); + runner.run(callable); + + KeySet.Builder keys = KeySet.newBuilder(); + keys.addKey(Key.of(key1)).addKey(Key.of(key2)); + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read("T", keys.build(), Collections.singletonList("K")); + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + assertThat(rowCount).isEqualTo(2); + } + + private void executeQuery(long expectedCount, final String... stmts) { + final TransactionCallable callable = + transaction -> { + long rowCount = 0; + for (final String stmt : stmts) { + ResultSet resultSet = transaction.executeQuery(Statement.of(stmt)); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getStats()).isNotNull(); + rowCount += resultSet.getStats().getRowCountExact(); + } + return rowCount; + }; + TransactionRunner runner = getClient(dialect.dialect).readWriteTransaction(); + Long rowCount = runner.run(callable); + assertThat(rowCount).isEqualTo(expectedCount); + } + + @Test + public void standardDMLWithExecuteSQL() { + executeQuery(DML_COUNT, insertDml()); + // checks for multi-stmts within a txn, therefore also verifying seqNo. + executeQuery(DML_COUNT * 2, updateDml(), deleteDml()); + } + + @Test + public void testUntypedNullValues() { + DatabaseClient client = getClient(dialect.dialect); + String sql; + if (dialect.dialect == Dialect.POSTGRESQL) { + sql = "INSERT INTO T (K, V) VALUES ($1, $2)"; + } else { + sql = "INSERT INTO T (K, V) VALUES (@p1, @p2)"; + } + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder(sql) + .bind("p1") + .to("k1") + .bind("p2") + .to((Value) null) + .build())); + + assertNotNull(updateCount); + assertEquals(1L, updateCount.longValue()); + + // Read the row back and verify that the value is null. + try (ResultSet resultSet = client.singleUse().executeQuery(Statement.of("SELECT V FROM T"))) { + assertTrue(resultSet.next()); + assertTrue(resultSet.isNull(0)); + assertFalse(resultSet.next()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminDialectAwareTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminDialectAwareTest.java new file mode 100644 index 000000000000..584dc1fc2472 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminDialectAwareTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.StreamSupport; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Dialect aware integration tests for {@link com.google.cloud.spanner.DatabaseAdminClient}. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITDatabaseAdminDialectAwareTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final Duration OPERATION_TIMEOUT = Duration.ofMinutes(20); + private DatabaseAdminClient client; + private RemoteSpannerHelper testHelper; + private List databases; + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Before + public void setUp() { + testHelper = env.getTestHelper(); + client = testHelper.getClient().getDatabaseAdminClient(); + databases = new ArrayList<>(); + } + + @After + public void tearDown() { + if (databases != null) { + for (DatabaseId id : databases) { + try { + client.dropDatabase(id.getInstanceId().getInstance(), id.getDatabase()); + } catch (Exception e) { + System.err.println("Could not drop database " + id + ", skipping...: " + e.getMessage()); + } + } + } + } + + @Test + public void testCreateDatabaseWithDialect() throws Exception { + assumeFalse("emulator does not support different dialects", isUsingEmulator()); + + final String projectId = testHelper.getInstanceId().getProject(); + final String instanceId = testHelper.getInstanceId().getInstance(); + final String databaseId = testHelper.getUniqueDatabaseId(); + + final Database databaseToCreate = + client + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(dialect) + .build(); + databases.add(databaseToCreate.getId()); + + // Creates the database with the dialect set + final Database createdDatabase = + client + .createDatabase(databaseToCreate, Collections.emptyList()) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + assertEquals(dialect, createdDatabase.getDialect()); + + // Test dialect in database retrieval + final Database retrievedDatabase = client.getDatabase(instanceId, databaseId); + assertEquals(dialect, retrievedDatabase.getDialect()); + + // Test dialect database listing + final Optional maybeListedDatabase = + StreamSupport.stream(client.listDatabases(instanceId).iterateAll().spliterator(), false) + .filter(database -> database.getId().getDatabase().equals(databaseId)) + .findFirst(); + assertTrue("Expected to find database in list", maybeListedDatabase.isPresent()); + assertEquals(dialect, maybeListedDatabase.get().getDialect()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminTest.java new file mode 100644 index 000000000000..c986e7b8df1e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseAdminTest.java @@ -0,0 +1,320 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.cloud.spanner.DatabaseRole; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.junit.Assert; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link com.google.cloud.spanner.DatabaseAdminClient}. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDatabaseAdminTest { + private static final long TIMEOUT_MINUTES = 5; + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static final Logger logger = Logger.getLogger(ITDatabaseAdminTest.class.getName()); + private DatabaseAdminClient dbAdminClient; + private RemoteSpannerHelper testHelper; + + @Before + public void setUp() { + testHelper = env.getTestHelper(); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + } + + @Test + public void testDatabaseOperations() throws Exception { + final String instanceId = testHelper.getInstanceId().getInstance(); + final String createTableT = "CREATE TABLE T (\n" + " K STRING(MAX),\n" + ") PRIMARY KEY(K)"; + + final Database createdDatabase = testHelper.createTestDatabase(createTableT); + final String databaseId = createdDatabase.getId().getDatabase(); + + assertEquals(databaseId, createdDatabase.getId().getDatabase()); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, createdDatabase.getDialect()); + + final Database retrievedDatabase = dbAdminClient.getDatabase(instanceId, databaseId); + assertEquals(databaseId, retrievedDatabase.getId().getDatabase()); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, retrievedDatabase.getDialect()); + + Optional maybeDatabaseInList = Optional.empty(); + for (Database listedDatabase : dbAdminClient.listDatabases(instanceId).iterateAll()) { + if (listedDatabase.getId().getDatabase().equals(databaseId)) { + maybeDatabaseInList = Optional.of(listedDatabase); + break; + } + } + assertTrue("Expected to find database in list", maybeDatabaseInList.isPresent()); + assertEquals(databaseId, maybeDatabaseInList.get().getId().getDatabase()); + assertEquals(Dialect.GOOGLE_STANDARD_SQL, maybeDatabaseInList.get().getDialect()); + + final String createTableT2 = + "CREATE TABLE T2 (\n" + " K2 STRING(MAX),\n" + ") PRIMARY KEY(K2)"; + dbAdminClient + .updateDatabaseDdl(instanceId, databaseId, ImmutableList.of(createTableT2), null) + .get(5, TimeUnit.MINUTES); + + final List databaseDdl = dbAdminClient.getDatabaseDdl(instanceId, databaseId); + assertEquals(databaseDdl, ImmutableList.of(createTableT, createTableT2)); + + dbAdminClient.dropDatabase(instanceId, databaseId); + + try { + dbAdminClient.getDatabase(instanceId, databaseId); + fail("Expected exception"); + } catch (SpannerException e) { + assertEquals(ErrorCode.NOT_FOUND, e.getErrorCode()); + } + } + + @Test + public void updateDdlRetry() throws Exception { + String instanceId = testHelper.getInstanceId().getInstance(); + String statement1 = "CREATE TABLE T (\n" + " K STRING(MAX),\n" + ") PRIMARY KEY(K)"; + Database db = testHelper.createTestDatabase(statement1); + String dbId = db.getId().getDatabase(); + + String statement2 = "CREATE TABLE T2 (\n" + " K2 STRING(MAX),\n" + ") PRIMARY KEY(K2)"; + OperationFuture op1 = + dbAdminClient.updateDatabaseDdl(instanceId, dbId, ImmutableList.of(statement2), "myop"); + OperationFuture op2 = + dbAdminClient.updateDatabaseDdl(instanceId, dbId, ImmutableList.of(statement2), "myop"); + op1.get(TIMEOUT_MINUTES, TimeUnit.MINUTES); + op2.get(TIMEOUT_MINUTES, TimeUnit.MINUTES); + + // Remove the progress list from the metadata before comparing, as there could be small + // differences between the two in the reported progress depending on exactly when each + // operation was fetched from the backend. + UpdateDatabaseDdlMetadata metadata1 = + op1.getMetadata().get().toBuilder().clearProgress().build(); + UpdateDatabaseDdlMetadata metadata2 = + op2.getMetadata().get().toBuilder().clearProgress().build(); + assertThat(metadata1).isEqualTo(metadata2); + } + + @Test + public void databaseOperationsViaEntity() throws Exception { + String statement1 = "CREATE TABLE T (\n" + " K STRING(MAX),\n" + ") PRIMARY KEY(K)"; + Database db = testHelper.createTestDatabase(statement1); + String dbId = db.getId().getDatabase(); + assertThat(db.getId().getDatabase()).isEqualTo(dbId); + + db = db.reload(); + assertThat(db.getId().getDatabase()).isEqualTo(dbId); + + String statement2 = "CREATE TABLE T2 (\n" + " K2 STRING(MAX),\n" + ") PRIMARY KEY(K2)"; + OperationFuture op2 = db.updateDdl(ImmutableList.of(statement2), null); + op2.get(TIMEOUT_MINUTES, TimeUnit.MINUTES); + Iterable statementsInDb = db.getDdl(); + assertThat(statementsInDb).containsExactly(statement1, statement2); + db.drop(); + try { + db.reload(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + } + + @Test + public void listPagination() { + String instanceId = testHelper.getInstanceId().getInstance(); + List dbIds = new ArrayList<>(3); + for (int n = 0; n < 3; n++) { + dbIds.add(testHelper.createTestDatabase().getId().getDatabase()); + } + Page page = dbAdminClient.listDatabases(instanceId, Options.pageSize(1)); + List dbIdsGot = new ArrayList<>(); + // A valid page will contain 0 or 1 elements. + while (page != null && page.getValues().iterator().hasNext()) { + Database db = Iterables.getOnlyElement(page.getValues()); + dbIdsGot.add(db.getId().getDatabase()); + // page.getNextPage() will return null if the previous call did not return a 'nextPageToken'. + // That is an indication that the server knows that there are no more results. The method may + // however also return a page with zero results. That happens if there was another result on + // the server when the previous call was executed (and returned a nextPageToken), but that + // result has been deleted in the meantime. + page = page.getNextPage(); + } + assertThat(dbIdsGot).containsAtLeastElementsIn(dbIds); + } + + @Test + public void createAndListDatabaseRoles() throws Exception { + assumeFalse("Experimental Host does not support database roles", isExperimentalHost()); + assumeFalse("Emulator does not support create & list database roles", isUsingEmulator()); + List dbRoles = + ImmutableList.of( + testHelper.getUniqueDatabaseRole(), + testHelper.getUniqueDatabaseRole(), + testHelper.getUniqueDatabaseRole()); + + String instanceId = testHelper.getInstanceId().getInstance(); + Database database = testHelper.createTestDatabase(); + + // Create the roles in Db. + List dbRolesCreateStatements = new ArrayList<>(); + for (String dbRole : dbRoles) { + dbRolesCreateStatements.add(String.format("CREATE ROLE %s", dbRole)); + } + dbAdminClient + .updateDatabaseDdl( + instanceId, database.getId().getDatabase(), dbRolesCreateStatements, null) + .get(); + + // List roles from Db. + Page page = + dbAdminClient.listDatabaseRoles(instanceId, database.getId().getDatabase()); + List dbRolesGot = new ArrayList<>(); + while (page != null && page.getValues().iterator().hasNext()) { + for (DatabaseRole value : page.getValues()) { + String[] split = value.getName().split("/"); + dbRolesGot.add(split[split.length - 1]); + } + page = page.getNextPage(); + } + assertThat(dbRolesGot).containsAtLeastElementsIn(dbRoles); + + // Delete the created roles. + List dbRolesDropStatements = new ArrayList<>(); + for (String dbRole : dbRoles) { + dbRolesDropStatements.add(String.format("DROP ROLE %s", dbRole)); + } + dbAdminClient + .updateDatabaseDdl(instanceId, database.getId().getDatabase(), dbRolesDropStatements, null) + .get(); + + // List roles from Db. Deleted roles should not be present in list. + Page pageRemainingRoles = + dbAdminClient.listDatabaseRoles(instanceId, database.getId().getDatabase()); + List dbRolesRemaining = new ArrayList<>(); + while (pageRemainingRoles != null && pageRemainingRoles.getValues().iterator().hasNext()) { + for (DatabaseRole value : pageRemainingRoles.getValues()) { + String[] split = value.getName().split("/"); + dbRolesRemaining.add(split[split.length - 1]); + } + pageRemainingRoles = pageRemainingRoles.getNextPage(); + } + assertThat(dbRolesRemaining).containsNoneIn(dbRoles); + } + + @Test + public void updateDatabaseInvalidFieldsToUpdate() { + assumeFalse("Emulator does not drop database protection", isUsingEmulator()); + Database database = testHelper.createTestDatabase(); + logger.log(Level.INFO, "Created database: {0}", database.getId().getName()); + + Database databaseToUpdate = + dbAdminClient.newDatabaseBuilder(database.getId()).enableDropProtection().build(); + // Don't provide any fields to update. + OperationFuture op = + dbAdminClient.updateDatabase(databaseToUpdate); + + try { + op.get(5, TimeUnit.MINUTES); + Assert.fail("No exception thrown"); + } catch (ExecutionException | InterruptedException | TimeoutException e) { + Assert.assertTrue(e.getCause() instanceof SpannerException); + SpannerException exception = ((SpannerException) e.getCause()); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + } + } + + @Test + public void dropDatabaseWithProtectionEnabled() throws Exception { + assumeFalse("Tracking the failure via b/441255724", isExperimentalHost()); + assumeFalse("Emulator does not drop database protection", isUsingEmulator()); + String instanceId = testHelper.getInstanceId().getInstance(); + Database database = testHelper.createTestDatabase(); + logger.log(Level.INFO, "Created database: {0}", database.getId().getName()); + + // Enable drop protection for the database. + Database databaseToUpdate = + dbAdminClient.newDatabaseBuilder(database.getId()).enableDropProtection().build(); + OperationFuture op = + dbAdminClient.updateDatabase(databaseToUpdate, DatabaseField.DROP_PROTECTION); + Database updatedDatabase = op.get(5, TimeUnit.MINUTES); + assertEquals(updatedDatabase.getId().getName(), database.getId().getName()); + assertTrue(updatedDatabase.isDropProtectionEnabled()); + + String databaseId = database.getId().getDatabase(); + + // Assert that dropping a database with protection enabled fails due to precondition violation. + SpannerException e = + assertThrows( + SpannerException.class, () -> dbAdminClient.dropDatabase(instanceId, databaseId)); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + + // Assert that deleting the instance also fails due to precondition violation. + e = + assertThrows( + SpannerException.class, + () -> testHelper.getClient().getInstanceAdminClient().deleteInstance(instanceId)); + assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); + + // Disable drop protection for the database. + databaseToUpdate = + dbAdminClient.newDatabaseBuilder(database.getId()).disableDropProtection().build(); + op = dbAdminClient.updateDatabase(databaseToUpdate, DatabaseField.DROP_PROTECTION); + updatedDatabase = op.get(5, TimeUnit.MINUTES); + assertEquals(updatedDatabase.getId().getName(), database.getId().getName()); + assertFalse(updatedDatabase.isDropProtectionEnabled()); + + // Dropping the database should succeed now. + dbAdminClient.dropDatabase(instanceId, databaseId); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseRolePermissionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseRolePermissionTest.java new file mode 100644 index 000000000000..4947401992b0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseRolePermissionTest.java @@ -0,0 +1,245 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.*; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Integration tests for Role Permissions using {@link com.google.cloud.spanner.DatabaseRole}. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITDatabaseRolePermissionTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseAdminClient dbAdminClient; + private static RemoteSpannerHelper testHelper; + + private static List databasesToDrop; + + @Parameter public DialectTestParameter dialect; + + @Parameters(name = "Dialect = {0}") + public static List data() { + return ImmutableList.of( + new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL), + new DialectTestParameter(Dialect.POSTGRESQL)); + } + + @BeforeClass + public static void setUp() { + assumeFalse("Experimental Host does not support database roles", isExperimentalHost()); + assumeFalse("Emulator does not support database roles", isUsingEmulator()); + testHelper = env.getTestHelper(); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + databasesToDrop = new ArrayList<>(); + } + + @AfterClass + public static void cleanup() throws Exception { + if (databasesToDrop != null) { + for (DatabaseId id : databasesToDrop) { + try { + dbAdminClient.dropDatabase(id.getInstanceId().getInstance(), id.getDatabase()); + } catch (Exception e) { + System.err.println("Failed to drop database " + id + ", skipping...: " + e.getMessage()); + } + } + } + } + + @Test + public void grantAndRevokeDatabaseRolePermissions() throws Exception { + // Create database with table and role permission. + final String dbRoleParent = "parent"; + final String databaseId = testHelper.getUniqueDatabaseId(); + final InstanceId instanceId = testHelper.getInstanceId(); + + final String createTableT = getCreateTableStatement(); + final String createRoleParent = String.format("CREATE ROLE %s", dbRoleParent); + final String grantSelectOnTableToParent = + dialect.dialect == Dialect.POSTGRESQL + ? String.format("GRANT SELECT ON TABLE T TO %s", dbRoleParent) + : String.format("GRANT SELECT ON TABLE T TO ROLE %s", dbRoleParent); + final Database createdDatabase = + createAndUpdateDatabase( + instanceId, + databaseId, + ImmutableList.of(createTableT, createRoleParent, grantSelectOnTableToParent)); + + // Connect to db with dbRoleParent. + SpannerOptions options = + testHelper.getOptions().toBuilder().setDatabaseRole(dbRoleParent).build(); + + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(createdDatabase.getId()); + + // Test SELECT permissions to role dbRoleParent on table T. + // Query using dbRoleParent should return result. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + assertTrue(rs.next()); + assertEquals(dbClient.getDatabaseRole(), dbRoleParent); + } catch (PermissionDeniedException e) { + // This is not expected + fail("Got PermissionDeniedException when it should not have occurred."); + } + + // Revoke select Permission for dbRoleParent. + final String revokeSelectOnTableFromParent = + dialect.dialect == Dialect.POSTGRESQL + ? String.format("REVOKE SELECT ON TABLE T FROM %s", dbRoleParent) + : String.format("REVOKE SELECT ON TABLE T FROM ROLE %s", dbRoleParent); + + dbAdminClient + .updateDatabaseDdl( + instanceId.getInstance(), + databaseId, + ImmutableList.of(revokeSelectOnTableFromParent), + null) + .get(5, TimeUnit.MINUTES); + + // Test SELECT permissions to role dbRoleParent on table T. + // Query using dbRoleParent should return PermissionDeniedException. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.PERMISSION_DENIED); + assertThat(e.getMessage()).contains(dbRoleParent); + } + // Drop role and table. + final String dropTableT = "DROP TABLE T"; + final String dropRoleParent = String.format("DROP ROLE %s", dbRoleParent); + dbAdminClient + .updateDatabaseDdl( + instanceId.getInstance(), + databaseId, + ImmutableList.of(dropTableT, dropRoleParent), + null) + .get(5, TimeUnit.MINUTES); + databasesToDrop.add(createdDatabase.getId()); + } + + @Test + public void roleWithNoPermissions() throws Exception { + final String dbRoleOrphan = testHelper.getUniqueDatabaseRole(); + final String databaseId = testHelper.getUniqueDatabaseId(); + final InstanceId instanceId = testHelper.getInstanceId(); + + final String createTableT = getCreateTableStatement(); + final String createRoleOrphan = String.format("CREATE ROLE %s", dbRoleOrphan); + + final Database createdDatabase = + createAndUpdateDatabase( + instanceId, databaseId, ImmutableList.of(createTableT, createRoleOrphan)); + + // Connect to db with dbRoleOrphan + SpannerOptions options = + testHelper.getOptions().toBuilder().setDatabaseRole(dbRoleOrphan).build(); + + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(createdDatabase.getId()); + + // Test SELECT permissions to role dbRoleOrphan on table T. + // Query using dbRoleOrphan should return PermissionDeniedException. + try (ResultSet rs = + dbClient.singleUse().executeQuery(Statement.of("SELECT COUNT(*) as cnt FROM T"))) { + SpannerException e = assertThrows(SpannerException.class, () -> rs.next()); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.PERMISSION_DENIED); + assertThat(e.getMessage()).contains(dbRoleOrphan); + } + // Drop role and table. + final String dropTableT = "DROP TABLE T"; + final String dropRoleParent = String.format("DROP ROLE %s", dbRoleOrphan); + dbAdminClient + .updateDatabaseDdl( + instanceId.getInstance(), databaseId, Arrays.asList(dropTableT, dropRoleParent), null) + .get(5, TimeUnit.MINUTES); + databasesToDrop.add(createdDatabase.getId()); + } + + private Database createAndUpdateDatabase( + final InstanceId instanceId, final String databaseId, final List statements) + throws Exception { + if (dialect.dialect == Dialect.POSTGRESQL) { + // DDL statements other than are not allowed in database creation request + // for PostgreSQL-enabled databases. + final Database database = + dbAdminClient + .createDatabase( + dbAdminClient + .newDatabaseBuilder(DatabaseId.of(instanceId, databaseId)) + .setDialect(dialect.dialect) + .build(), + ImmutableList.of()) + .get(5, TimeUnit.MINUTES); + dbAdminClient + .updateDatabaseDdl(instanceId.getInstance(), databaseId, statements, null) + .get(5, TimeUnit.MINUTES); + return database; + } else { + return dbAdminClient + .createDatabase( + dbAdminClient + .newDatabaseBuilder(DatabaseId.of(instanceId, databaseId)) + .setDialect(dialect.dialect) + .build(), + statements) + .get(5, TimeUnit.MINUTES); + } + } + + private String getCreateTableStatement() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "CREATE TABLE T (" + " \"K\" VARCHAR PRIMARY KEY" + ")"; + } else { + return "CREATE TABLE T (" + " K STRING(MAX)" + ") PRIMARY KEY (K)"; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseTest.java new file mode 100644 index 000000000000..ed8f67379e45 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDatabaseTest.java @@ -0,0 +1,237 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.client.util.ExponentialBackOff; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseNotFoundException; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceNotFoundException; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SessionNotFoundException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Collections; +import javax.annotation.Nullable; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for database admin functionality: DDL etc. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDatabaseTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Test + public void badDdl() { + try { + env.getTestHelper().createTestDatabase("CREATE TABLE T ( Illegal Way To Define A Table )"); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(ex.getMessage()).contains("Syntax error on line 1"); + } + } + + @Test + public void databaseDeletedTest() throws Exception { + // Create a test db, do a query, then delete it and verify that it returns + // DatabaseNotFoundExceptions. + Database db = env.getTestHelper().createTestDatabase(); + DatabaseClient client = env.getTestHelper().getClient().getDatabaseClient(db.getId()); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + + // Delete the database. + db.drop(); + // We need to wait a little before Spanner actually starts sending DatabaseNotFound errors. + ExponentialBackOff backoff = + new ExponentialBackOff.Builder() + .setInitialIntervalMillis(1000) + .setMaxElapsedTimeMillis(65000) + .setMaxIntervalMillis(5000) + .build(); + ResourceNotFoundException notFoundException = null; + long millis; + while ((millis = backoff.nextBackOffMillis()) != ExponentialBackOff.STOP) { + //noinspection BusyWait + Thread.sleep(millis); + // Queries to this database should eventually return DatabaseNotFoundExceptions. + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + } catch (DatabaseNotFoundException e) { + // This is what we expect. + notFoundException = e; + break; + } + } + assertThat(notFoundException).isNotNull(); + + // Now re-create a database with the same name. + OperationFuture op = + env.getTestHelper() + .getClient() + .getDatabaseAdminClient() + .createDatabase( + db.getId().getInstanceId().getInstance(), + db.getId().getDatabase(), + Collections.emptyList()); + Database newDb = op.get(); + + // Now try to query using the old session and verify that we also now (eventually) get a + // 'Database not found' error. + backoff = + new ExponentialBackOff.Builder() + .setInitialIntervalMillis(1000) + .setMaxElapsedTimeMillis(65000) + .setMaxIntervalMillis(5000) + .build(); + + notFoundException = null; + while ((millis = backoff.nextBackOffMillis()) != ExponentialBackOff.STOP) { + //noinspection BusyWait + Thread.sleep(millis); + // Queries to this database should eventually return DatabaseNotFoundExceptions. + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + } catch (DatabaseNotFoundException databaseNotFoundException) { + // This is what we expect. + notFoundException = databaseNotFoundException; + break; + } catch (SessionNotFoundException sessionNotFoundException) { + if (isUsingEmulator()) { + // This is expected on the emulator, as the emulator does not see a difference between two + // different databases with the same name. The original session from the first database is + // however not present on the newly created database, which is why we get a + // SessionNotFoundException. + notFoundException = sessionNotFoundException; + break; + } else { + throw sessionNotFoundException; + } + } + } + if (!isUsingEmulator()) { + assertThat(notFoundException).isNotNull(); + } + + // Now get a new DatabaseClient for the database. This should now result in a valid + // DatabaseClient. + DatabaseClient newClient = env.getTestHelper().getClient().getDatabaseClient(newDb.getId()); + try (ResultSet rs = newClient.singleUse().executeQuery(Statement.of("SELECT 1"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void instanceNotFound() { + assumeFalse( + "experimental hosts only support pre-created default instance", isExperimentalHost()); + InstanceId testId = env.getTestHelper().getInstanceId(); + InstanceId nonExistingInstanceId = + InstanceId.of(testId.getProject(), testId.getInstance() + "-na"); + DatabaseClient client = + env.getTestHelper() + .getClient() + .getDatabaseClient(DatabaseId.of(nonExistingInstanceId, "some-db")); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + fail("missing expected exception"); + } catch (InstanceNotFoundException e) { + assertThat(e.getResourceName()).isEqualTo(nonExistingInstanceId.getName()); + } + } + + @Test + public void testNumericPrimaryKey() { + final String table = "NumericTable"; + + // Creates table with numeric primary key + Database database = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE " + table + " (" + "Id NUMERIC NOT NULL" + ") PRIMARY KEY (Id)"); + + // Writes data into the table + final DatabaseClient databaseClient = + env.getTestHelper().getClient().getDatabaseClient(database.getId()); + final ArrayList mutations = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + mutations.add(Mutation.newInsertBuilder(table).set("Id").to(new BigDecimal(i + "")).build()); + } + databaseClient.write(mutations); + + // Reads the data to verify the writes + try (final ResultSet resultSet = + databaseClient.singleUse().read(table, KeySet.all(), Collections.singletonList("Id"))) { + for (int i = 0; resultSet.next(); i++) { + assertEquals(new BigDecimal(i + ""), resultSet.getBigDecimal("Id")); + } + } + + // Deletes data from the table, leaving only the Id = 0 row + databaseClient + .readWriteTransaction() + .run( + new TransactionCallable() { + @Nullable + @Override + public Object run(TransactionContext transaction) throws Exception { + transaction.executeUpdate(Statement.of("DELETE FROM " + table + " WHERE Id > 0")); + return null; + } + }); + + // Reads the data to verify the deletes only left a single row left + try (final ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT COUNT(1) as cnt FROM " + table))) { + resultSet.next(); + assertEquals(1L, resultSet.getLong("cnt")); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectPathFallback.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectPathFallback.java new file mode 100644 index 000000000000..bf6c14509730 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectPathFallback.java @@ -0,0 +1,302 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertWithMessage; +import static com.google.common.truth.TruthJUnit.assume; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.auth.oauth2.ComputeEngineCredentials; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.base.Stopwatch; +import io.grpc.ManagedChannelBuilder; +import io.grpc.alts.ComputeEngineChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.channel.ChannelDuplexHandler; +import io.grpc.netty.shaded.io.netty.channel.ChannelFactory; +import io.grpc.netty.shaded.io.netty.channel.ChannelHandlerContext; +import io.grpc.netty.shaded.io.netty.channel.ChannelPromise; +import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioSocketChannel; +import io.grpc.netty.shaded.io.netty.util.ReferenceCountUtil; +import java.io.IOException; +import java.lang.reflect.Field; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Test DirectPath fallback behavior by injecting a ChannelHandler into the netty stack that will + * disrupt IPv6 communications. + * + *

    WARNING: this test can only be run on a GCE VM and will explicitly ignore + * GOOGLE_APPLICATION_CREDENTIALS and use the service account associated with the VM. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDirectPathFallback { + // A threshold of completed read calls to observe to ascertain IPv6 is working. + // This was determined experimentally to account for both gRPC-LB RPCs and Bigtable api RPCs. + private static final int MIN_COMPLETE_READ_CALLS = 40; + private static final int NUM_RPCS_TO_SEND = 20; + + // IP address prefixes allocated for DirectPath backends. + private static final String DP_IPV6_PREFIX = "2001:4860:8040"; + private static final String DP_IPV4_PREFIX = "34.126"; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private AtomicBoolean blackholeDpAddr = new AtomicBoolean(); + private AtomicInteger numBlocked = new AtomicInteger(); + private AtomicInteger numDpAddrRead = new AtomicInteger(); + private boolean isDpAddr; + + private ChannelFactory channelFactory; + private EventLoopGroup eventLoopGroup; + private RemoteSpannerHelper testHelper; + + private static final String TABLE_NAME = "TestTable"; + private static final List ALL_COLUMNS = Arrays.asList("Key", "StringValue"); + private static Database db; + private static DatabaseClient client; + + // TODO(mohanli): Remove this temporary endpoint once DirectPath goes to public beta. + private static final String DIRECT_PATH_ENDPOINT = "aa423245250f2bbf.sandbox.googleapis.com:443"; + private static final String ENABLE_DIRECT_ACCESS = "spanner.enable_direct_access"; + + public ITDirectPathFallback() { + // Create a transport channel provider that can intercept ipv6 packets. + channelFactory = new MyChannelFactory(); + eventLoopGroup = new NioEventLoopGroup(); + } + + @Before + public void setup() { + assume() + .withMessage("DirectPath integration tests can only run against DirectPathEnv") + .that(Boolean.getBoolean(ENABLE_DIRECT_ACCESS)) + .isTrue(); + // Get default spanner options for Ingetration test + SpannerOptions.Builder builder = env.getTestHelper().getOptions().toBuilder(); + // Set instrumented transport provider + builder.setChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setAttemptDirectPath(true) + .setEndpoint(DIRECT_PATH_ENDPOINT) + .setPoolSize(1) + .setChannelConfigurator( + managedChannelBuilder -> { + injectNettyChannelHandler(managedChannelBuilder); + // Fail fast when blackhole is active + managedChannelBuilder.keepAliveTime(1, TimeUnit.SECONDS); + managedChannelBuilder.keepAliveTimeout(1, TimeUnit.SECONDS); + return managedChannelBuilder; + }) + .build()); + // Forcefully ignore GOOGLE_APPLICATION_CREDENTIALS + builder.setCredentials( + FixedCredentialsProvider.create(ComputeEngineCredentials.create()).getCredentials()); + + // Create a new testHelper with the instrumented transport provider + testHelper = RemoteSpannerHelper.create(builder.build(), env.getTestHelper().getInstanceId()); + + db = + testHelper.createTestDatabase( + "CREATE TABLE TestTable (" + + " Key STRING(MAX) NOT NULL," + + " StringValue STRING(MAX)," + + ") PRIMARY KEY (Key)"); + client = testHelper.getDatabaseClient(db); + List mutations = new ArrayList<>(); + for (int i = 0; i < 3; ++i) { + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k" + i) + .set("StringValue") + .to("v" + i) + .build()); + } + client.write(mutations); + } + + @After + public void teardown() { + if (testHelper != null) { + testHelper.cleanUp(); + testHelper.getClient().close(); + } + if (eventLoopGroup != null) { + eventLoopGroup.shutdownGracefully(); + } + } + + @Test + public void testFallback() throws InterruptedException, TimeoutException { + // Precondition: wait for DirectPath to connect + assertWithMessage("Failed to observe RPCs over DirectPath").that(exerciseDirectPath()).isTrue(); + + // Enable the blackhole, which will prevent communication with grpclb and thus DirectPath. + blackholeDpAddr.set(true); + + // Send a request, which should be routed over IPv4 and CFE. + client.singleUse(TimestampBound.strong()).readRow(TABLE_NAME, Key.of("k0"), ALL_COLUMNS); + + // Verify that the above check was meaningful, by verifying that the blackhole actually dropped + // packets. + assertWithMessage("Failed to detect any IPv6 traffic in blackhole") + .that(numBlocked.get()) + .isGreaterThan(0); + + // Make sure that the client will start reading from IPv6 again by sending new requests and + // checking the injected IPv6 counter has been updated. + blackholeDpAddr.set(false); + + assertWithMessage("Failed to upgrade back to DirectPath").that(exerciseDirectPath()).isTrue(); + } + + private boolean exerciseDirectPath() throws InterruptedException { + Stopwatch stopwatch = Stopwatch.createStarted(); + numDpAddrRead.set(0); + + boolean seenEnough = false; + + while (!seenEnough && stopwatch.elapsed(TimeUnit.MINUTES) < 2) { + for (int i = 0; i < NUM_RPCS_TO_SEND; i++) { + client.singleUse(TimestampBound.strong()).readRow(TABLE_NAME, Key.of("k0"), ALL_COLUMNS); + } + Thread.sleep(100); + seenEnough = numDpAddrRead.get() >= MIN_COMPLETE_READ_CALLS; + } + return seenEnough; + } + + /** + * This is a giant hack to enable testing DirectPath CFE fallback. + * + *

    It unwraps the {@link ComputeEngineChannelBuilder} to inject a NettyChannelHandler to signal + * IPv6 packet loss. + */ + private void injectNettyChannelHandler(ManagedChannelBuilder channelBuilder) { + try { + // Extract the delegate NettyChannelBuilder using reflection + Field delegateField = ComputeEngineChannelBuilder.class.getDeclaredField("delegate"); + delegateField.setAccessible(true); + + ComputeEngineChannelBuilder gceChannelBuilder = + ((ComputeEngineChannelBuilder) channelBuilder); + Object delegateChannelBuilder = delegateField.get(gceChannelBuilder); + + NettyChannelBuilder nettyChannelBuilder = (NettyChannelBuilder) delegateChannelBuilder; + nettyChannelBuilder.channelFactory(channelFactory); + nettyChannelBuilder.eventLoopGroup(eventLoopGroup); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Failed to inject the netty ChannelHandler", e); + } + } + + /** + * @see com.google.cloud.bigtable.data.v2.it.DirectPathFallbackIT.MyChannelHandler + */ + private class MyChannelFactory implements ChannelFactory { + @Override + public NioSocketChannel newChannel() { + NioSocketChannel channel = new NioSocketChannel(); + channel.pipeline().addLast(new MyChannelHandler()); + + return channel; + } + } + + /** + * A netty {@link io.grpc.netty.shaded.io.netty.channel.ChannelHandler} that can be instructed to + * make IPv6 packets disappear + */ + private class MyChannelHandler extends ChannelDuplexHandler { + @Override + public void connect( + ChannelHandlerContext ctx, + SocketAddress remoteAddress, + SocketAddress localAddress, + ChannelPromise promise) + throws Exception { + + if (remoteAddress instanceof InetSocketAddress) { + InetAddress inetAddress = ((InetSocketAddress) remoteAddress).getAddress(); + String addr = inetAddress.getHostAddress(); + isDpAddr = addr.startsWith(DP_IPV6_PREFIX) || addr.startsWith(DP_IPV4_PREFIX); + } + + if (!(isDpAddr && blackholeDpAddr.get())) { + super.connect(ctx, remoteAddress, localAddress, promise); + } else { + // Fail the connection fast + promise.setFailure(new IOException("fake error")); + } + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + boolean dropCall = isDpAddr && blackholeDpAddr.get(); + if (dropCall) { + // Don't notify the next handler and increment counter + numBlocked.incrementAndGet(); + ReferenceCountUtil.release(msg); + } else { + super.channelRead(ctx, msg); + } + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { + boolean dropCall = isDpAddr && blackholeDpAddr.get(); + if (dropCall) { + // Don't notify the next handler and increment counter + numBlocked.incrementAndGet(); + } else { + if (isDpAddr) { + numDpAddrRead.incrementAndGet(); + } + super.channelReadComplete(ctx); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectedReadsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectedReadsTest.java new file mode 100644 index 000000000000..217da5f4bc5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDirectedReadsTest.java @@ -0,0 +1,185 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.MockSpannerTestUtil.SELECT1; +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionRunner; +import com.google.common.collect.Lists; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITDirectedReadsTest { + + private static final DirectedReadOptions DIRECTED_READ_OPTIONS = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-west1").build())) + .build(); + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + + @BeforeClass + public static void setUp() { + db = + env.getTestHelper() + .createTestDatabase("CREATE TABLE TEST (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)"); + } + + @AfterClass + public static void tearDown() { + db.drop(); + } + + @Test + public void testReadWriteTransactionRunner_queryWithDirectedReadOptionsViaRequest_throwsError() { + // Directed Read Options set at an RPC level is not acceptable for RW transaction + + assumeFalse("Emulator does not support directed reads", isUsingEmulator()); + SpannerOptions options = env.getTestHelper().getOptions().toBuilder().build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.executeQuery( + SELECT1, Options.directedRead(DIRECTED_READ_OPTIONS))) { + while (resultSet.next()) {} + } + return null; + })); + + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertTrue( + e.getMessage() + .contains("Directed reads can only be performed in a read-only transaction.")); + } + } + + @Test + public void testReadWriteTransactionRunner_readWithDirectedReadOptionsViaRequest_throwsError() { + // Directed Read Options set at an RPC level is not acceptable for RW transaction + + assumeFalse("Emulator does not support directed reads", isUsingEmulator()); + SpannerOptions options = env.getTestHelper().getOptions().toBuilder().build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + TransactionRunner runner = client.readWriteTransaction(); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + runner.run( + transaction -> { + try (ResultSet resultSet = + transaction.read( + "TEST", + KeySet.singleKey(Key.of(1L)), + Lists.newArrayList("NAME"), + Options.directedRead(DIRECTED_READ_OPTIONS))) { + while (resultSet.next()) {} + } + return null; + })); + + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertTrue( + e.getMessage() + .contains("Directed reads can only be performed in a read-only transaction.")); + } + } + + @Test + public void testReadWriteTransactionManager_readWithDirectedReadOptionsViaRequest_throwsError() { + // Directed Read Options set at an RPC level is not acceptable for RW transaction + + assumeFalse("Emulator does not support directed reads", isUsingEmulator()); + SpannerOptions options = env.getTestHelper().getOptions().toBuilder().build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + try (TransactionManager manager = client.transactionManager()) { + SpannerException e = + assertThrows( + SpannerException.class, + () -> { + TransactionContext transaction = manager.begin(); + try { + while (true) { + + ResultSet resultSet = + transaction.read( + "TEST", + KeySet.singleKey(Key.of(1L)), + Lists.newArrayList("NAME"), + Options.directedRead(DIRECTED_READ_OPTIONS)); + while (resultSet.next()) {} + + manager.commit(); + assertNotNull(manager.getCommitTimestamp()); + break; + } + } catch (AbortedException ex) { + transaction = manager.resetForRetry(); + } + }); + assertEquals(ErrorCode.INVALID_ARGUMENT, e.getErrorCode()); + assertTrue( + e.getMessage() + .contains("Directed reads can only be performed in a read-only transaction.")); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDmlReturningTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDmlReturningTest.java new file mode 100644 index 000000000000..d96e148432fa --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITDmlReturningTest.java @@ -0,0 +1,414 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** Integration tests for DML Returning. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public final class ITDmlReturningTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + /** Id prefix per test case. */ + private static int id; + + private static final long DML_COUNT = 4; + + @BeforeClass + public static void setUpDatabase() { + Database googleStandardSQLDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " V INT64," + + ") PRIMARY KEY (K)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + Collections.singletonList( + "CREATE TABLE T (" + + " \"K\" VARCHAR PRIMARY KEY," + + " \"V\" BIGINT" + + ")")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Before + public void increaseTestIdAndDeleteTestData() { + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + googleStandardSQLClient.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("T", KeySet.all()))); + } else { + postgreSQLClient.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("T", KeySet.all()))); + } + id++; + } + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + private String getInsertDmlReturningTemplate() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "INSERT INTO T (\"K\", \"V\") VALUES ('%d-boo1', 1), ('%d-boo2', 2), ('%d-boo3', 3)," + + " ('%d-boo4', 4) RETURNING *"; + } + return "INSERT INTO T (K, V) VALUES ('%d-boo1', 1), ('%d-boo2', 2), ('%d-boo3', 3), ('%d-boo4'," + + " 4) THEN RETURN *"; + } + + private String getUpdateDmlReturningTemplate() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "UPDATE T SET \"V\" = 100 WHERE \"K\" LIKE '%d-boo%%' RETURNING *"; + } + return "UPDATE T SET V = 100 WHERE K LIKE '%d-boo%%' THEN RETURN *"; + } + + private String getDeleteDmlReturningTemplate() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "DELETE FROM T WHERE \"K\" like '%d-boo%%' RETURNING *"; + } + return "DELETE FROM T WHERE K like '%d-boo%%' THEN RETURN *"; + } + + private String getDeleteDmlTemplate() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "DELETE FROM T WHERE \"K\" like '%d-boo%%'"; + } + return "DELETE FROM T WHERE K like '%d-boo%%'"; + } + + private String insertDmlReturning() { + return String.format(getInsertDmlReturningTemplate(), id, id, id, id); + } + + private String updateDmlReturning() { + return String.format(getUpdateDmlReturningTemplate(), id); + } + + private String deleteDmlReturning() { + return String.format(getDeleteDmlReturningTemplate(), id); + } + + private String deleteDml() { + return String.format(getDeleteDmlTemplate(), id); + } + + private DatabaseClient getClient() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return postgreSQLClient; + } + return googleStandardSQLClient; + } + + @Test + public void dmlReturningWithExecuteUpdate() { + executeUpdate(DML_COUNT, insertDmlReturning()); + // checks for multi-stmts within a txn, therefore also verifying seqNo. + executeUpdate(DML_COUNT * 2, updateDmlReturning(), deleteDmlReturning()); + } + + private void executeUpdate(long expectedCount, final String... stmts) { + final TransactionCallable callable = + transaction -> { + long rowCount = 0; + for (String stmt : stmts) { + rowCount += transaction.executeUpdate(Statement.of(stmt)); + } + return rowCount; + }; + TransactionRunner runner = getClient().readWriteTransaction(); + Long rowCount = runner.run(callable); + assertEquals((Long) expectedCount, rowCount); + } + + @Test + public void dmlReturningWithExecuteUpdateAsync() { + executeUpdateAsync(DML_COUNT, insertDmlReturning()); + // checks for multi-stmts within a txn, therefore also verifying seqNo. + executeUpdateAsync(DML_COUNT * 2, updateDmlReturning(), deleteDmlReturning()); + } + + private void executeUpdateAsync(long expectedCount, final String... stmts) { + final TransactionCallable callable = + transaction -> { + long rowCount = 0; + for (String stmt : stmts) { + rowCount += transaction.executeUpdateAsync(Statement.of(stmt)).get(1, TimeUnit.MINUTES); + } + return rowCount; + }; + TransactionRunner runner = getClient().readWriteTransaction(); + Long rowCount = runner.run(callable); + assertEquals((Long) expectedCount, rowCount); + } + + @Test + public void dmlReturningWithExecutePartitionedUpdate() { + assumeFalse( + "The emulator does not dis-allow THEN RETURN statements for PDML", isUsingEmulator()); + + SpannerException e = + assertThrows( + SpannerException.class, + () -> getClient().executePartitionedUpdate(Statement.of(updateDmlReturning()))); + assertEquals(ErrorCode.UNIMPLEMENTED, e.getErrorCode()); + } + + @Test + public void dmlReturningWithExecuteQuery() { + List rows = executeQuery(DML_COUNT, insertDmlReturning()); + assertEquals( + 1, + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(idx + 1, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + rows = executeQuery(DML_COUNT, updateDmlReturning()); + assertEquals( + 100, + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(100, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + rows = executeQuery(DML_COUNT, deleteDmlReturning()); + assertNull( + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V"))); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(100, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + } + + private List executeQuery(long expectedCount, String stmt) { + List rows = new ArrayList<>(); + final TransactionCallable callable = + transaction -> { + // Make sure we start with an empty list if the transaction is aborted and retried. + rows.clear(); + ResultSet resultSet = transaction.executeQuery(Statement.of(stmt)); + // resultSet.next() returns false, when no more row exists. + // So, number of times resultSet.next() returns true, is the number of rows + // returned by the DML Returning statement. + while (resultSet.next()) { + rows.add(resultSet.getCurrentRowAsStruct()); + } + assertFalse(resultSet.next()); + assertNotNull(resultSet.getStats()); + assertEquals(expectedCount, resultSet.getStats().getRowCountExact()); + return null; + }; + TransactionRunner runner = getClient().readWriteTransaction(); + runner.run(callable); + rows.sort(Comparator.comparing(a -> a.getString("K"))); + return rows; + } + + @Test + public void dmlReturningWithExecuteQueryAsync() { + List rows = executeQueryAsync(DML_COUNT, insertDmlReturning()); + assertEquals( + 1, + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(idx + 1, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + rows = executeQueryAsync(DML_COUNT, updateDmlReturning()); + assertEquals( + 100, + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V")) + .getLong(0)); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(100, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + rows = executeQueryAsync(DML_COUNT, deleteDmlReturning()); + assertNull( + getClient() + .singleUse() + .readRow("T", Key.of(String.format("%d-boo1", id)), Collections.singletonList("V"))); + + // Check if keys(K) and V have expected values. + for (int idx = 0; idx < rows.size(); idx++) { + assertEquals(100, rows.get(idx).getLong("V")); + assertEquals(String.format("%d-boo%d", id, idx + 1), rows.get(idx).getString("K")); + } + } + + private List executeQueryAsync(long expectedCount, String stmt) { + List rows = new ArrayList<>(); + final TransactionCallable callable = + transaction -> { + rows.clear(); + AsyncResultSet rs = transaction.executeQueryAsync(Statement.of(stmt)); + rs.setCallback( + Executors.newSingleThreadExecutor(), + resultSet -> { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + rows.add(resultSet.getCurrentRowAsStruct()); + break; + case DONE: + assertNotNull(resultSet.getStats()); + assertEquals(resultSet.getStats().getRowCountExact(), expectedCount); + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + return CallbackResponse.DONE; + } + }); + return null; + }; + TransactionRunner runner = getClient().readWriteTransaction(); + runner.run(callable); + rows.sort(Comparator.comparing(a -> a.getString("K"))); + return rows; + } + + @Test + public void dmlReturningWithBatchUpdate() { + // Check if batchUpdate works well with a mix of Simple DML and DML Returning statements. + long[] rowCounts = batchUpdate(insertDmlReturning(), updateDmlReturning(), deleteDml()); + assertEquals(3, rowCounts.length); + assertEquals(DML_COUNT, rowCounts[0]); + assertEquals(DML_COUNT, rowCounts[1]); + assertEquals(DML_COUNT, rowCounts[2]); + } + + private long[] batchUpdate(final String... stmts) { + final TransactionCallable callable = + transaction -> + transaction.batchUpdate( + Arrays.stream(stmts).map(Statement::of).collect(Collectors.toList())); + TransactionRunner runner = getClient().readWriteTransaction(); + return runner.run(callable); + } + + @Test + public void dmlReturningWithBatchUpdateAsync() { + // Check if batchUpdateAsync works well with a mix of Simple DML and DML Returning statements. + long[] rowCounts = batchUpdateAsync(insertDmlReturning(), updateDmlReturning(), deleteDml()); + assertEquals(3, rowCounts.length); + assertEquals(DML_COUNT, rowCounts[0]); + assertEquals(DML_COUNT, rowCounts[1]); + assertEquals(DML_COUNT, rowCounts[2]); + } + + private long[] batchUpdateAsync(final String... stmts) { + final TransactionCallable callable = + transaction -> + transaction + .batchUpdateAsync( + Arrays.stream(stmts).map(Statement::of).collect(Collectors.toList())) + .get(); + TransactionRunner runner = getClient().readWriteTransaction(); + return runner.run(callable); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITEndToEndTracingTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITEndToEndTracingTest.java new file mode 100644 index 000000000000..52d1bc94629c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITEndToEndTracingTest.java @@ -0,0 +1,161 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ResourceExhaustedException; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.IntegrationTestEnv.TestEnvOptions; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptionsHelper; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.trace.v1.TraceServiceClient; +import com.google.cloud.trace.v1.TraceServiceSettings; +import com.google.common.base.Stopwatch; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for End to End Tracing. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITEndToEndTracingTest { + public static Collection testEnvOptions = + Arrays.asList(TestEnvOptions.USE_END_TO_END_TRACING); + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(testEnvOptions); + private static DatabaseClient googleStandardSQLClient; + + static { + SpannerOptionsHelper.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + } + + private static String selectValueQuery = "SELECT @p1 + @p1"; + + @BeforeClass + public static void setUp() { + setUpDatabase(); + } + + public static void setUpDatabase() { + // Empty database. + Database googleStandardSQLDatabase = env.getTestHelper().createTestDatabase(); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + private void assertTrace(String traceId) throws IOException, InterruptedException { + TraceServiceSettings settings = + env.getTestHelper().getOptions().getCredentials() == null + ? TraceServiceSettings.newBuilder().build() + : TraceServiceSettings.newBuilder() + .setCredentialsProvider( + FixedCredentialsProvider.create( + env.getTestHelper().getOptions().getCredentials())) + .build(); + try (TraceServiceClient client = TraceServiceClient.create(settings)) { + boolean foundTrace = false; + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + while (!foundTrace && metricsPollingStopwatch.elapsed(TimeUnit.SECONDS) < 30) { + // Try every 5 seconds + Thread.sleep(5000); + try { + foundTrace = + client + .getTrace(env.getTestHelper().getInstanceId().getProject(), traceId) + .getSpansList() + .stream() + .anyMatch(span -> "Spanner.ExecuteStreamingSql".equals(span.getName())); + } catch (ApiException apiException) { + assumeTrue( + apiException.getStatusCode() != null + && StatusCode.Code.NOT_FOUND.equals(apiException.getStatusCode().getCode())); + System.out.println("Trace NOT_FOUND error ignored"); + } + } + assertTrue(foundTrace); + } catch (ResourceExhaustedException resourceExhaustedException) { + if (resourceExhaustedException + .getMessage() + .contains("Quota exceeded for quota metric 'Read requests (free)'")) { + // Ignore and allow the test to succeed. + System.out.println("RESOURCE_EXHAUSTED error ignored"); + } else { + throw resourceExhaustedException; + } + } + } + + private Struct executeWithRowResultType(Statement statement, Type expectedRowType) { + ResultSet resultSet = statement.executeQuery(googleStandardSQLClient.singleUse()); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getType()).isEqualTo(expectedRowType); + Struct row = resultSet.getCurrentRowAsStruct(); + assertThat(resultSet.next()).isFalse(); + return row; + } + + @Test + public void simpleSelect() throws IOException, InterruptedException { + assumeTrue("Temporarily disabling test because it is failing", false); + Tracer tracer = + env.getTestHelper() + .getOptions() + .getOpenTelemetry() + .getTracer(ITEndToEndTracingTest.class.getName()); + Span span = tracer.spanBuilder("simpleSelect").startSpan(); + Scope scope = span.makeCurrent(); + Type rowType = Type.struct(StructField.of("", Type.int64())); + Struct row = + executeWithRowResultType( + Statement.newBuilder(selectValueQuery).bind("p1").to(1234).build(), rowType); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLong(0)).isEqualTo(2468); + scope.close(); + span.end(); + assertTrace(span.getSpanContext().getTraceId()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITFloat32Test.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITFloat32Test.java new file mode 100644 index 000000000000..6a973f1c4ae9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITFloat32Test.java @@ -0,0 +1,400 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITFloat32Test { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + return Arrays.asList( + new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL), + new DialectTestParameter(Dialect.POSTGRESQL)); + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + private static final String[] GOOGLE_STANDARD_SQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " Key STRING(MAX) NOT NULL," + + " Float32Value FLOAT32," + + " Float32ArrayValue ARRAY," + + ") PRIMARY KEY (Key)" + }; + + private static final String[] POSTGRESQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " Key VARCHAR PRIMARY KEY," + + " Float32Value REAL," + + " Float32ArrayValue REAL[]" + + ")" + }; + + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() + throws ExecutionException, InterruptedException, TimeoutException { + Database googleStandardSQLDatabase = + env.getTestHelper().createTestDatabase(GOOGLE_STANDARD_SQL_SCHEMA); + + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase(Dialect.POSTGRESQL, Arrays.asList(POSTGRESQL_SCHEMA)); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @Before + public void before() { + client = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + } + + @AfterClass + public static void tearDown() throws Exception { + ConnectionOptions.closeSpanner(); + } + + /** Sequence used to generate unique keys. */ + private static int seq; + + private static String uniqueString() { + return String.format("k%04d", seq++); + } + + private String lastKey; + + private Timestamp write(Mutation m) { + return client.write(Collections.singletonList(m)); + } + + private Mutation.WriteBuilder baseInsert() { + return Mutation.newInsertOrUpdateBuilder("T").set("Key").to(lastKey = uniqueString()); + } + + private Struct readRow(String table, String key, String... columns) { + return client + .singleUse(TimestampBound.strong()) + .readRow(table, Key.of(key), Arrays.asList(columns)); + } + + private Struct readLastRow(String... columns) { + return readRow("T", lastKey, columns); + } + + @Test + public void writeFloat32() { + write(baseInsert().set("Float32Value").to(2.0f).build()); + Struct row = readLastRow("Float32Value"); + assertFalse(row.isNull(0)); + assertEquals(2.0f, row.getFloat(0), 0.0f); + } + + @Test + public void writeFloat32NonNumbers() { + + write(baseInsert().set("Float32Value").to(Float.NEGATIVE_INFINITY).build()); + Struct row = readLastRow("Float32Value"); + assertFalse(row.isNull(0)); + assertEquals(Float.NEGATIVE_INFINITY, row.getFloat(0), 0.0f); + + write(baseInsert().set("Float32Value").to(Float.POSITIVE_INFINITY).build()); + row = readLastRow("Float32Value"); + assertFalse(row.isNull(0)); + assertEquals(Float.POSITIVE_INFINITY, row.getFloat(0), 0.0); + + write(baseInsert().set("Float32Value").to(Float.NaN).build()); + row = readLastRow("Float32Value"); + assertFalse(row.isNull(0)); + assertTrue(Float.isNaN(row.getFloat(0))); + } + + @Test + public void writeFloat32Null() { + write(baseInsert().set("Float32Value").to((Float) null).build()); + Struct row = readLastRow("Float32Value"); + assertTrue(row.isNull(0)); + } + + @Test + public void writeFloat32ArrayNull() { + write(baseInsert().set("Float32ArrayValue").toFloat32Array((float[]) null).build()); + Struct row = readLastRow("Float32ArrayValue"); + assertTrue(row.isNull(0)); + } + + @Test + public void writeFloat32ArrayEmpty() { + write(baseInsert().set("Float32ArrayValue").toFloat32Array(new float[] {}).build()); + Struct row = readLastRow("Float32ArrayValue"); + assertFalse(row.isNull(0)); + assertTrue(row.getFloatList(0).isEmpty()); + } + + @Test + public void writeFloat32Array() { + write( + baseInsert() + .set("Float32ArrayValue") + .toFloat32Array(Arrays.asList(null, 1.0f, 2.0f)) + .build()); + Struct row = readLastRow("Float32ArrayValue"); + assertFalse(row.isNull(0)); + assertEquals(row.getFloatList(0), Arrays.asList(null, 1.0f, 2.0f)); + assertThrows(NullPointerException.class, () -> row.getFloatArray(0)); + } + + @Test + public void writeFloat32ArrayNoNulls() { + write(baseInsert().set("Float32ArrayValue").toFloat32Array(Arrays.asList(1.0f, 2.0f)).build()); + Struct row = readLastRow("Float32ArrayValue"); + assertFalse(row.isNull(0)); + assertEquals(2, row.getFloatArray(0).length); + assertEquals(1.0f, row.getFloatArray(0)[0], 0.0f); + assertEquals(2.0f, row.getFloatArray(0)[1], 0.0f); + } + + private String getInsertStatementWithLiterals() { + String statement = "INSERT INTO T (Key, Float32Value, Float32ArrayValue) VALUES "; + + if (dialect.dialect == Dialect.POSTGRESQL) { + statement += + "('dml1', 3.14::float8, array[1.1]::float4[]), ('dml2', '3.14'::float4," + + " array[3.14::float4, 3.14::float8]::float4[]), ('dml3', 'nan'::real," + + " array['inf'::real, (3.14::float8)::float4, 1.2, '-inf']::float4[]), ('dml4'," + + " 1.175494e-38::real, array[1.175494e-38, 3.4028234e38, -3.4028234e38]::real[])," + + " ('dml5', null, null)"; + } else { + statement += + "('dml1', 3.14, [CAST(1.1 AS FLOAT32)]), ('dml2', CAST('3.14' AS FLOAT32)," + + " array[CAST(3.14 AS FLOAT32), 3.14]), ('dml3', CAST('nan' AS FLOAT32)," + + " array[CAST('inf' AS FLOAT32), CAST(CAST(3.14 AS FLOAT64) AS FLOAT32), 1.2," + + " CAST('-inf' AS FLOAT32)]), ('dml4', 1.175494e-38, [CAST(1.175494e-38 AS FLOAT32)," + + " 3.4028234e38, -3.4028234e38]), ('dml5', null, null)"; + } + return statement; + } + + @Test + public void float32Literals() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(Statement.of(getInsertStatementWithLiterals())); + return null; + }); + + verifyContents("dml"); + } + + private String getInsertStatementWithParameters() { + String pgStatement = + "INSERT INTO T (Key, Float32Value, Float32ArrayValue) VALUES " + + "('param1', $1, $2), " + + "('param2', $3, $4), " + + "('param3', $5, $6), " + + "('param4', $7, $8), " + + "('param5', $9, $10)"; + + return (dialect.dialect == Dialect.POSTGRESQL) ? pgStatement : pgStatement.replace("$", "@p"); + } + + @Test + public void float32Parameter() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder(getInsertStatementWithParameters()) + .bind("p1") + .to(Value.float32(3.14f)) + .bind("p2") + .to(Value.float32Array(Arrays.asList(1.1f))) + .bind("p3") + .to(Value.float32(3.14f)) + .bind("p4") + .to(Value.float32Array(new float[] {3.14f, 3.14f})) + .bind("p5") + .to(Value.float32(Float.NaN)) + .bind("p6") + .to( + Value.float32Array( + Arrays.asList( + Float.POSITIVE_INFINITY, 3.14f, 1.2f, Float.NEGATIVE_INFINITY))) + .bind("p7") + .to(Value.float32(Float.MIN_NORMAL)) + .bind("p8") + .to( + Value.float32Array( + Arrays.asList( + Float.MIN_NORMAL, Float.MAX_VALUE, -1 * Float.MAX_VALUE))) + .bind("p9") + .to(Value.float32(null)) + .bind("p10") + .to(Value.float32Array((float[]) null)) + .build()); + return null; + }); + + verifyContents("param"); + } + + private String getInsertStatementForUntypedParameters() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "INSERT INTO T (key, float32value, float32arrayvalue) VALUES " + + "('untyped1', ($1)::float4, ($2)::float4[])"; + } + return "INSERT INTO T (Key, Float32Value, Float32ArrayValue) VALUES " + + "('untyped1', CAST(@p1 AS FLOAT32), CAST(@p2 AS ARRAY))"; + } + + @Test + public void float32UntypedParameter() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder(getInsertStatementForUntypedParameters()) + .bind("p1") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setNumberValue((double) 3.14f) + .build())) + .bind("p2") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue( + com.google.protobuf.ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setNumberValue((double) Float.MIN_NORMAL))) + .build())) + .build()); + return null; + }); + + Struct row = readRow("T", "untyped1", "Float32Value", "Float32ArrayValue"); + // Float32Value + assertFalse(row.isNull(0)); + assertEquals(3.14f, row.getFloat(0), 0.00001f); + // Float32ArrayValue + assertFalse(row.isNull(1)); + assertEquals(1, row.getFloatList(1).size()); + assertEquals(Float.MIN_NORMAL, row.getFloatList(1).get(0), 0.00001f); + } + + private void verifyContents(String keyPrefix) { + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.of( + "SELECT Key AS key, Float32Value AS float32value, Float32ArrayValue AS float32arrayvalue FROM T WHERE Key LIKE '{keyPrefix}%' ORDER BY key" + .replace("{keyPrefix}", keyPrefix)))) { + + assertTrue(resultSet.next()); + + assertEquals(3.14f, resultSet.getFloat("float32value"), 0.00001f); + assertEquals(Value.float32(3.14f), resultSet.getValue("float32value")); + + assertArrayEquals(new float[] {1.1f}, resultSet.getFloatArray("float32arrayvalue"), 0.00001f); + + assertTrue(resultSet.next()); + + assertEquals(3.14f, resultSet.getFloat("float32value"), 0.00001f); + assertEquals(Arrays.asList(3.14f, 3.14f), resultSet.getFloatList("float32arrayvalue")); + assertEquals( + Value.float32Array(new float[] {3.14f, 3.14f}), resultSet.getValue("float32arrayvalue")); + + assertTrue(resultSet.next()); + assertTrue(Float.isNaN(resultSet.getFloat("float32value"))); + assertTrue(Float.isNaN(resultSet.getValue("float32value").getFloat32())); + assertEquals( + Arrays.asList(Float.POSITIVE_INFINITY, 3.14f, 1.2f, Float.NEGATIVE_INFINITY), + resultSet.getFloatList("float32arrayvalue")); + assertEquals( + Value.float32Array( + Arrays.asList(Float.POSITIVE_INFINITY, 3.14f, 1.2f, Float.NEGATIVE_INFINITY)), + resultSet.getValue("float32arrayvalue")); + + assertTrue(resultSet.next()); + assertEquals(Float.MIN_NORMAL, resultSet.getFloat("float32value"), 0.00001f); + assertEquals(Float.MIN_NORMAL, resultSet.getValue("float32value").getFloat32(), 0.00001f); + assertEquals(3, resultSet.getFloatList("float32arrayvalue").size()); + assertEquals(Float.MIN_NORMAL, resultSet.getFloatList("float32arrayvalue").get(0), 0.00001); + assertEquals(Float.MAX_VALUE, resultSet.getFloatList("float32arrayvalue").get(1), 0.00001f); + assertEquals( + -1 * Float.MAX_VALUE, resultSet.getFloatList("float32arrayvalue").get(2), 0.00001f); + assertEquals(3, resultSet.getValue("float32arrayvalue").getFloat32Array().size()); + + assertTrue(resultSet.next()); + assertTrue(resultSet.isNull("float32value")); + assertTrue(resultSet.isNull("float32arrayvalue")); + + assertFalse(resultSet.next()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITForeignKeyDeleteCascadeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITForeignKeyDeleteCascadeTest.java new file mode 100644 index 000000000000..3600422669c4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITForeignKeyDeleteCascadeTest.java @@ -0,0 +1,486 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITForeignKeyDeleteCascadeTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + private static final String TABLE_NAME_SINGER = "Singer"; + private static final String TABLE_NAME_CONCERT = "Concert"; + private static final String DELETE_RULE_COLUMN_NAME = "DELETE_RULE"; + + private static Database GOOGLE_STANDARD_SQL_DATABASE; + private static Database POSTGRESQL_DATABASE; + private static final List dbs = new ArrayList<>(); + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + @BeforeClass + public static void setUpDatabase() { + GOOGLE_STANDARD_SQL_DATABASE = + env.getTestHelper() + .createTestDatabase( + ImmutableList.of( + "CREATE TABLE Singer (\n" + + " singer_id INT64 NOT NULL,\n" + + " first_name STRING(1024),\n" + + ") PRIMARY KEY(singer_id)\n", + "CREATE TABLE Concert (\n" + + " venue_id INT64 NOT NULL,\n" + + " singer_id INT64 NOT NULL,\n" + + " CONSTRAINT Fk_Concert_Singer FOREIGN KEY (singer_id) REFERENCES Singer" + + " (singer_id) ON DELETE CASCADE\n" + + ") PRIMARY KEY(venue_id, singer_id)")); + POSTGRESQL_DATABASE = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + ImmutableList.of( + "CREATE TABLE Singer (\n" + + " singer_id BIGINT PRIMARY KEY,\n" + + " first_name VARCHAR\n" + + ")", + "CREATE TABLE Concert (\n" + + " venue_id BIGINT NOT NULL,\n" + + " singer_id BIGINT NOT NULL,\n" + + " PRIMARY KEY (venue_id, singer_id),\n" + + " CONSTRAINT \"Fk_Concert_Singer\" FOREIGN KEY (singer_id)" + + " REFERENCES Singer (singer_id) ON DELETE CASCADE\n" + + " )")); + + dbs.add(GOOGLE_STANDARD_SQL_DATABASE); + dbs.add(POSTGRESQL_DATABASE); + } + + @AfterClass + public static void tearDown() { + for (Database db : dbs) { + db.drop(); + } + dbs.clear(); + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_withCreateDDLStatements() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + try (final ResultSet rs = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT DELETE_RULE\n" + + "FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS\n" + + "WHERE CONSTRAINT_NAME ='Fk_Concert_Singer'"))) { + assertTrue(rs.next()); + // TODO: Enable for the emulator when it returns the correct value for DELETE_RULE. + if (!isUsingEmulator()) { + assertEquals("CASCADE", rs.getString(0)); + } + assertFalse(rs.next()); + } + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_withAlterDDLStatements() throws Exception { + // Creating new tables within this test to ensure we don't pollute tables used by other tests in + // this class. + List createStatements; + if (dialect.dialect == Dialect.POSTGRESQL) { + createStatements = + ImmutableList.of( + "CREATE TABLE Singer (\n" + + " singer_id BIGINT PRIMARY KEY,\n" + + " first_name VARCHAR\n" + + ")", + "CREATE TABLE ConcertV2 (\n" + + " venue_id BIGINT NOT NULL,\n" + + " singer_id BIGINT NOT NULL,\n" + + " PRIMARY KEY (venue_id, singer_id)\n" + + " )", + "ALTER TABLE ConcertV2 ADD CONSTRAINT \"Fk_Concert_Singer_V2\" FOREIGN KEY(singer_id)" + + " REFERENCES Singer(singer_id) ON DELETE CASCADE"); + } else { + createStatements = + ImmutableList.of( + "CREATE TABLE Singer (\n" + + " singer_id INT64 NOT NULL,\n" + + " first_name STRING(1024),\n" + + ") PRIMARY KEY(singer_id)\n", + "CREATE TABLE ConcertV2 (\n" + + " venue_id INT64 NOT NULL,\n" + + " singer_id INT64 NOT NULL,\n" + + ") PRIMARY KEY(venue_id, singer_id)", + "ALTER TABLE ConcertV2 ADD CONSTRAINT Fk_Concert_Singer_V2 FOREIGN KEY(singer_id)" + + " REFERENCES Singer(singer_id) ON DELETE CASCADE"); + } + final Database createdDatabase = + env.getTestHelper().createTestDatabase(dialect.dialect, createStatements); + dbs.add(createdDatabase); + + final DatabaseClient databaseClient = env.getTestHelper().getDatabaseClient(createdDatabase); + + try (final ResultSet rs = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT DELETE_RULE\n" + + "FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS\n" + + "WHERE CONSTRAINT_NAME ='Fk_Concert_Singer_V2'"))) { + assertTrue(rs.next()); + // TODO: Enable when the emulator returns the correct value for this column. + if (!isUsingEmulator()) { + assertEquals("CASCADE", rs.getString(0)); + } + assertFalse(rs.next()); + } + + // remove the foreign key delete cascade constraint + getDatabaseAdminClient() + .updateDatabaseDdl( + env.getTestHelper().getInstanceId().getInstance(), + createdDatabase.getId().getDatabase(), + ImmutableList.of( + "ALTER TABLE ConcertV2\n" + "DROP CONSTRAINT Fk_Concert_Singer_V2", + "ALTER TABLE ConcertV2 ADD CONSTRAINT Fk_Concert_Singer_V2 FOREIGN KEY(singer_id)" + + " REFERENCES Singer(singer_id) "), + null) + .get(); + + try (final ResultSet rs = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT DELETE_RULE\n" + + "FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS\n" + + "WHERE LOWER(CONSTRAINT_NAME) ='fk_concert_singer_v2'"))) { + assertTrue(rs.next()); + assertEquals("NO ACTION", rs.getString(0)); + assertFalse(rs.next()); + } + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_verifyValidInsertions() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + final String singerInsertStatement = + "INSERT INTO Singer (singer_id, first_name) VALUES (" + generateQueryParameters(2) + ")"; + final Statement singerInsertStatementWithValues = + Statement.newBuilder(singerInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(1L) + .bind("p2") + .to("singerName") + .build(); + + final String concertInsertStatement = + "INSERT INTO Concert (venue_id, singer_id) VALUES (" + generateQueryParameters(2) + ")"; + final Statement concertInsertStatementWithValues = + Statement.newBuilder(concertInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(1L) + .bind("p2") + .to(1L) + .build(); + + // successful inserts into referenced and referencing tables + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.batchUpdate( + ImmutableList.of( + singerInsertStatementWithValues, concertInsertStatementWithValues)); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME_SINGER))) { + + resultSet.next(); + assertEquals(1, resultSet.getLong("singer_id")); + assertEquals("singerName", resultSet.getString("first_name")); + + assertFalse(resultSet.next()); + } + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME_CONCERT))) { + + resultSet.next(); + assertEquals(1, resultSet.getLong("singer_id")); + assertEquals(1, resultSet.getLong("venue_id")); + + assertFalse(resultSet.next()); + } + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_verifyInvalidInsertions() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + + // unsuccessful inserts into referencing tables when foreign key is not inserted into referenced + // table + final String concertInsertStatement = + "INSERT INTO Concert (venue_id, singer_id) VALUES (" + generateQueryParameters(2) + ")"; + final Statement concertInsertStatementWithInvalidValues = + Statement.newBuilder(concertInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(2L) + .bind("p2") + .to(2L) + .build(); + + SpannerException ex = + assertThrows( + SpannerException.class, + () -> + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(concertInsertStatementWithInvalidValues); + return null; + })); + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + assertTrue(ex.getMessage(), ex.getMessage().contains("Cannot find referenced")); + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_forDeletions() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + + final String singerInsertStatement = + "INSERT INTO Singer (singer_id, first_name) VALUES (" + generateQueryParameters(2) + ")"; + final Statement singerInsertStatementWithValues = + Statement.newBuilder(singerInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(3L) + .bind("p2") + .to("singerName") + .build(); + + final String concertInsertStatement = + "INSERT INTO Concert (venue_id, singer_id) VALUES (" + generateQueryParameters(2) + ")"; + final Statement concertInsertStatementWithValues = + Statement.newBuilder(concertInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(3L) + .bind("p2") + .to(3L) + .build(); + + // successful inserts into referenced and referencing tables + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.batchUpdate( + ImmutableList.of( + singerInsertStatementWithValues, concertInsertStatementWithValues)); + return null; + }); + + // execute delete + final Statement singerDeleteStatementWithValues = + Statement.newBuilder("DELETE FROM Singer WHERE singer_id = " + generateQueryParameters(1)) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(3L) + .build(); + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(singerDeleteStatementWithValues); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME_SINGER))) { + assertFalse(resultSet.next()); + } + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME_CONCERT))) { + assertFalse(resultSet.next()); + } + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_forMutations_onConflictDueToParentTable() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + + // inserting and deleting the referenced key within the same mutation are considered + // conflicting operations, thus this results in an exception. + SpannerException ex = + assertThrows( + SpannerException.class, + () -> + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Arrays.asList( + Mutation.newInsertBuilder("Singer") + .set("singer_id") + .to(4L) + .set("first_name") + .to("singerName") + .build(), + Mutation.delete("Singer", Key.of(4L)))); + return null; + })); + assertEquals(ErrorCode.FAILED_PRECONDITION, ex.getErrorCode()); + } + + @Test + public void testForeignKeyDeleteCascadeConstraints_forMutations_onConflictsDueToChildTable() { + final DatabaseClient databaseClient = getCreatedDatabaseClient(); + + // referencing a foreign key in child table and deleting the referenced key in parent table + // within the same mutations are considered conflicting operations. + final String singerInsertStatement = + "INSERT INTO Singer (singer_id, first_name) VALUES (" + generateQueryParameters(2) + ")"; + final Statement singerInsertStatementWithValues = + Statement.newBuilder(singerInsertStatement) + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(5L) + .bind("p2") + .to("singerName") + .build(); + + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(singerInsertStatementWithValues); + return null; + }); + assertThrows( + SpannerException.class, + () -> + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Arrays.asList( + Mutation.newInsertBuilder("Concert") + .set("first_name") + .to(5L) + .set("singer_id") + .to(5L) + .build(), + Mutation.delete("Singer", Key.of(5L)))); + return null; + })); + } + + private DatabaseAdminClient getDatabaseAdminClient() { + return env.getTestHelper().getClient().getDatabaseAdminClient(); + } + + private DatabaseClient getCreatedDatabaseClient() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return env.getTestHelper().getDatabaseClient(POSTGRESQL_DATABASE); + } + return env.getTestHelper().getDatabaseClient(GOOGLE_STANDARD_SQL_DATABASE); + } + + /** + * Returns '@p1, @p2, ..., @pNumParams' for GoogleSQL and $1, $2, ..., $NumParams' for PostgreSQL + */ + private String generateQueryParameters(final int numParams) { + final List params; + if (dialect.dialect == Dialect.POSTGRESQL) { + params = + IntStream.range(1, numParams + 1) + .mapToObj(paramIndex -> "$" + paramIndex) + .collect(Collectors.toList()); + + } else { + params = + IntStream.range(1, numParams + 1) + .mapToObj(paramIndex -> "@p" + paramIndex) + .collect(Collectors.toList()); + } + if (params.size() == 1) { + return params.get(0); + } + return String.join(",", params); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITInstanceAdminTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITInstanceAdminTest.java new file mode 100644 index 000000000000..4e6a87bebf9d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITInstanceAdminTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.common.collect.Iterators; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link com.google.cloud.spanner.InstanceAdminClient}. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITInstanceAdminTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(true); + static InstanceAdminClient instanceClient; + + @BeforeClass + public static void setUp() { + assumeFalse( + "instance / instanceConfig operations are not supported on experimental host", + isExperimentalHost()); + instanceClient = env.getTestHelper().getClient().getInstanceAdminClient(); + } + + @Test + public void instanceConfigOperations() { + List configs = new ArrayList<>(); + Iterators.addAll(configs, instanceClient.listInstanceConfigs().iterateAll().iterator()); + assertThat(configs.isEmpty()).isFalse(); + InstanceConfig config = + instanceClient.getInstanceConfig(configs.get(0).getId().getInstanceConfig()); + assertThat(config.getId()).isEqualTo(configs.get(0).getId()); + config = config.reload(); + assertThat(config.getId()).isEqualTo(configs.get(0).getId()); + } + + @Ignore("Feature is not yet enabled in production") + @Test + public void instanceConfigLeaderOptions() { + assumeFalse("The emulator does not support leader options", isUsingEmulator()); + List configs = new ArrayList<>(); + Iterators.addAll(configs, instanceClient.listInstanceConfigs().iterateAll().iterator()); + + configs.forEach(config -> assertThat(config.getReplicas()).isNotEmpty()); + configs.forEach(config -> assertThat(config.getLeaderOptions()).isNotEmpty()); + } + + @Test + public void listInstances() { + assumeFalse("The emulator does not support filtering on instances", isUsingEmulator()); + + Instance instance = + Iterators.getOnlyElement( + instanceClient + .listInstances( + Options.filter( + "name:instances/" + env.getTestHelper().getInstanceId().getInstance())) + .iterateAll() + .iterator()); + assertThat(instance.getId()).isEqualTo(env.getTestHelper().getInstanceId()); + } + + @Test + public void updateInstance() throws Exception { + assumeFalse("The emulator does not support updating instances", isUsingEmulator()); + + Instance instance = + instanceClient.getInstance(env.getTestHelper().getInstanceId().getInstance()); + String rand = new Random().nextInt() + ""; + String newDisplayName = "instance test" + rand; + InstanceInfo toUpdate = + InstanceInfo.newBuilder(env.getTestHelper().getInstanceId()) + .setDisplayName(newDisplayName) + .setNodeCount(instance.getNodeCount() + 1) + .build(); + // Only update display name + OperationFuture op = + instanceClient.updateInstance(toUpdate, InstanceInfo.InstanceField.DISPLAY_NAME); + Instance newInstance = op.get(); + assertThat(newInstance.getNodeCount()).isEqualTo(instance.getNodeCount()); + assertThat(newInstance.getDisplayName()).isEqualTo(newDisplayName); + + Instance newInstanceFromGet = + instanceClient.getInstance(env.getTestHelper().getInstanceId().getInstance()); + assertThat(newInstanceFromGet).isEqualTo(newInstance); + + toUpdate = + InstanceInfo.newBuilder(instance.getId()).setDisplayName(instance.getDisplayName()).build(); + instanceClient.updateInstance(toUpdate, InstanceInfo.InstanceField.DISPLAY_NAME).get(); + } + + @Test + public void updateInstanceWithAutoscalingConfig() throws Exception { + assumeFalse( + "The emulator does not support updating instances with autoscaler", isUsingEmulator()); + + Instance instance = + instanceClient.getInstance(env.getTestHelper().getInstanceId().getInstance()); + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder() + .setMinProcessingUnits(1000) + .setMaxProcessingUnits(2000)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + InstanceInfo toUpdate = + InstanceInfo.newBuilder(env.getTestHelper().getInstanceId()) + .setNodeCount(0) + .setAutoscalingConfig(autoscalingConfig) + .build(); + OperationFuture op = + instanceClient.updateInstance(toUpdate, InstanceInfo.InstanceField.AUTOSCALING_CONFIG); + Instance newInstance = op.get(); + assertThat(newInstance.getAutoscalingConfig()).isEqualTo(autoscalingConfig); + + Instance newInstanceFromGet = + instanceClient.getInstance(env.getTestHelper().getInstanceId().getInstance()); + assertThat(newInstanceFromGet).isEqualTo(newInstance); + + // Revert back to the instance original state. + toUpdate = + InstanceInfo.newBuilder(instance.getId()) + .setAutoscalingConfig(null) + .setNodeCount(instance.getNodeCount()) + .build(); + instanceClient + .updateInstance( + toUpdate, + InstanceInfo.InstanceField.AUTOSCALING_CONFIG, + InstanceInfo.InstanceField.NODE_COUNT) + .get(); + } + + @Test + public void updateInstanceViaEntity() throws Exception { + assumeFalse("The emulator does not support updating instances", isUsingEmulator()); + + Instance instance = + instanceClient.getInstance(env.getTestHelper().getInstanceId().getInstance()); + String rand = new Random().nextInt() + ""; + String newDisplayName = "instance test" + rand; + Instance toUpdate = + instance.toBuilder() + .setDisplayName(newDisplayName) + .setNodeCount(instance.getNodeCount() + 1) + .build(); + // Only update display name + OperationFuture op = + toUpdate.update(InstanceInfo.InstanceField.DISPLAY_NAME); + Instance newInstance = op.get(); + assertThat(newInstance.getNodeCount()).isEqualTo(instance.getNodeCount()); + assertThat(newInstance.getDisplayName()).isEqualTo(newDisplayName); + + Instance newInstanceFromGet = instance.reload(); + assertThat(newInstanceFromGet).isEqualTo(newInstance); + + toUpdate = newInstance.toBuilder().setDisplayName(instance.getDisplayName()).build(); + toUpdate.update(InstanceInfo.InstanceField.DISPLAY_NAME).get(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITIntervalTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITIntervalTest.java new file mode 100644 index 000000000000..3af1464612ec --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITIntervalTest.java @@ -0,0 +1,265 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.*; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITIntervalTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + return Arrays.asList( + new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL), + new DialectTestParameter(Dialect.POSTGRESQL)); + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + private static final String[] GOOGLE_STANDARD_SQL_SCHEMA = + new String[] { + "CREATE TABLE IntervalTable (\n" + + " key STRING(MAX),\n" + + " create_time TIMESTAMP,\n" + + " expiry_time TIMESTAMP,\n" + + " expiry_within_month bool AS (expiry_time - create_time < INTERVAL 30 DAY),\n" + + " interval_array_len INT64 AS (ARRAY_LENGTH(ARRAY[INTERVAL '1-2 3 4:5:6'" + + " YEAR TO SECOND]))\n" + + ") PRIMARY KEY (key)" + }; + + private static final String[] POSTGRESQL_SCHEMA = + new String[] { + "CREATE TABLE IntervalTable (\n" + + " key text primary key,\n" + + " create_time timestamptz,\n" + + " expiry_time timestamptz,\n" + + " expiry_within_month bool GENERATED ALWAYS AS (expiry_time - create_time < INTERVAL" + + " '30' DAY) STORED,\n" + + " interval_array_len bigint GENERATED ALWAYS AS (ARRAY_LENGTH(ARRAY[INTERVAL '1-2 3" + + " 4:5:6'], 1)) STORED\n" + + ")" + }; + + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() + throws ExecutionException, InterruptedException, TimeoutException { + Database googleStandardSQLDatabase = + env.getTestHelper().createTestDatabase(GOOGLE_STANDARD_SQL_SCHEMA); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase(Dialect.POSTGRESQL, Arrays.asList(POSTGRESQL_SCHEMA)); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @Before + public void before() { + client = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + } + + @AfterClass + public static void tearDown() throws Exception { + ConnectionOptions.closeSpanner(); + } + + /** Sequence used to generate unique keys. */ + private static int seq; + + private static String uniqueString() { + return String.format("k%04d", seq++); + } + + private String lastKey; + + private Timestamp write(Mutation m) { + return client.write(Collections.singletonList(m)); + } + + private Mutation.WriteBuilder baseInsert() { + return Mutation.newInsertOrUpdateBuilder("IntervalTable") + .set("Key") + .to(lastKey = uniqueString()); + } + + @Test + public void writeToTableWithIntervalExpressions() { + write( + baseInsert() + .set("create_time") + .to(Timestamp.parseTimestamp("2004-11-30T04:53:54Z")) + .set("expiry_time") + .to(Timestamp.parseTimestamp("2004-12-15T04:53:54Z")) + .build()); + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.of( + "SELECT expiry_within_month, interval_array_len FROM IntervalTable WHERE key='" + + lastKey + + "'"))) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean(0)); + assertEquals(1, resultSet.getLong(1)); + } + } + + @Test + public void queryInterval() { + try (ResultSet resultSet = + client + .singleUse() + .executeQuery(Statement.of("SELECT INTERVAL '1' DAY + INTERVAL '1' MONTH AS Col1"))) { + assertTrue(resultSet.next()); + assertEquals(resultSet.getInterval(0), Interval.fromMonthsDaysNanos(1, 1, BigInteger.ZERO)); + } + } + + @Test + public void queryWithIntervalParam() { + write( + baseInsert() + .set("create_time") + .to(Timestamp.parseTimestamp("2004-08-30T04:53:54Z")) + .set("expiry_time") + .to(Timestamp.parseTimestamp("2004-12-15T04:53:54Z")) + .build()); + + String query; + if (dialect.dialect == Dialect.POSTGRESQL) { + query = + "SELECT COUNT(*) FROM IntervalTable WHERE create_time < TIMESTAMPTZ" + + " '2004-11-30T10:23:54+0530' - $1"; + } else { + query = + "SELECT COUNT(*) FROM IntervalTable WHERE create_time <" + + " TIMESTAMP('2004-11-30T10:23:54+0530') - @p1"; + } + + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.newBuilder(query) + .bind("p1") + .to(Value.interval(Interval.ofDays(30))) + .build())) { + assertTrue(resultSet.next()); + assertEquals(resultSet.getLong(0), 1L); + } + } + + @Test + public void queryWithIntervalArrayParam() { + String query; + if (dialect.dialect == Dialect.POSTGRESQL) { + query = "SELECT $1"; + } else { + query = "SELECT @p1"; + } + + List intervalList = + Arrays.asList( + Interval.parseFromString("P1Y2M3DT4H5M6.789123S"), + null, + Interval.parseFromString("P-1Y-2M-3DT-4H-5M-6.789123S"), + null); + + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.newBuilder(query) + .bind("p1") + .to(Value.intervalArray(intervalList)) + .build())) { + assertTrue(resultSet.next()); + assertEquals(resultSet.getIntervalList(0), intervalList); + } + } + + @Test + public void queryWithUntypedIntervalParam() { + String query; + if (dialect.dialect == Dialect.POSTGRESQL) { + query = "SELECT (INTERVAL '1' DAY > $1) AS Col1"; + } else { + query = "SELECT (INTERVAL '1' DAY > @p1) AS Col1"; + } + + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.newBuilder(query) + .bind("p1") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setStringValue("PT1.5S") + .build())) + .build())) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean(0)); + } + } + + @Test + public void queryIntervalArray() { + String query = + "SELECT ARRAY[CAST('P1Y2M3DT4H5M6.789123S' AS INTERVAL), null," + + " CAST('P-1Y-2M-3DT-4H-5M-6.789123S' AS INTERVAL)] AS Col1"; + try (ResultSet resultSet = client.singleUse().executeQuery(Statement.of(query))) { + assertTrue(resultSet.next()); + assertEquals( + Arrays.asList( + Interval.parseFromString("P1Y2M3DT4H5M6.789123S"), + null, + Interval.parseFromString("P-1Y-2M-3DT-4H-5M-6.789123S")), + resultSet.getIntervalList(0)); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITJsonWriteReadTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITJsonWriteReadTest.java new file mode 100644 index 000000000000..abaf4f07d272 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITJsonWriteReadTest.java @@ -0,0 +1,158 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.io.Resources; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITJsonWriteReadTest { + + private static final String RESOURCES_DIR = "com/google/cloud/spanner/it/"; + private static final String VALID_JSON_DIR = "valid"; + private static final String INVALID_JSON_DIR = "invalid"; + + private static final String TABLE_NAME = "TestTable"; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static DatabaseClient databaseClient; + + @BeforeClass + public static void beforeClass() { + final RemoteSpannerHelper testHelper = env.getTestHelper(); + final Database database = + testHelper.createTestDatabase( + "CREATE TABLE " + + TABLE_NAME + + "(" + + "Id INT64 NOT NULL," + + "json JSON" + + ") PRIMARY KEY (Id)"); + databaseClient = testHelper.getDatabaseClient(database); + } + + @Test + public void testWriteValidJsonValues() throws IOException { + List resources = getJsonFilePaths(RESOURCES_DIR + File.separator + VALID_JSON_DIR); + + long id = 0L; + List mutations = new ArrayList<>(); + for (String resource : resources) { + String jsonStr = + Resources.toString( + Resources.getResource(this.getClass(), VALID_JSON_DIR + File.separator + resource), + StandardCharsets.UTF_8); + Mutation mutation = + Mutation.newInsertBuilder(TABLE_NAME) + .set("Id") + .to(id++) + .set("json") + .to(Value.json(jsonStr)) + .build(); + mutations.add(mutation); + } + databaseClient.write(mutations); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT COUNT(*) FROM " + TABLE_NAME))) { + resultSet.next(); + assertEquals(resultSet.getLong(0), resources.size()); + } + } + + @Test + public void testWriteAndReadInvalidJsonValues() throws IOException { + assumeFalse("Tracking the failure via b/441255097 for experimental host", isExperimentalHost()); + List resources = getJsonFilePaths(RESOURCES_DIR + File.separator + INVALID_JSON_DIR); + + AtomicLong id = new AtomicLong(100); + for (String resource : resources) { + String jsonStr = + Resources.toString( + Resources.getResource(this.getClass(), INVALID_JSON_DIR + File.separator + resource), + StandardCharsets.UTF_8); + + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + databaseClient.write( + Collections.singletonList( + Mutation.newInsertBuilder(TABLE_NAME) + .set("Id") + .to(id.getAndIncrement()) + .set("json") + .to(Value.json(jsonStr)) + .build()))); + + if (env.getTestHelper() + .getOptions() + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()) { + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + } else { + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + } + } + + private List getJsonFilePaths(String folder) throws IOException { + String fixturesRoot = Resources.getResource(folder).getPath(); + final Path fixturesRootPath = Paths.get(fixturesRoot); + return Files.walk(fixturesRootPath) + .filter(Files::isRegularFile) + .map(path -> fixturesRootPath.relativize(path).toString()) + .filter(path -> path.endsWith(".json")) + .collect(Collectors.toList()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITLargeReadTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITLargeReadTest.java new file mode 100644 index 000000000000..83d505e21241 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITLargeReadTest.java @@ -0,0 +1,220 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.common.collect.ImmutableList; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Integration test reading large amounts of data. The size of data ensures that multiple chunks are + * returned by the server. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITLargeReadTest { + private static int numRows; + + private static final int WRITE_BATCH_SIZE = 1 << 20; + private static final String TABLE_NAME = "TestTable"; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + private static HashFunction hasher; + + // Generate a combination of small and large row sizes to allow multiple read/query restarts and + // to exercise chunking. + private static List rowSizes() { + List rowSizes = new ArrayList<>(); + rowSizes.addAll(Collections.nCopies(1000, 4096)); + rowSizes.addAll(Collections.nCopies(100, 40960)); + rowSizes.addAll(Collections.nCopies(25, 409600)); + rowSizes.addAll(Collections.nCopies(10, 4 << 20)); + Collections.shuffle(rowSizes); + return rowSizes; + } + + @BeforeClass + public static void setUpDatabase() { + Database googleStandardSQLDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TestTable (" + + " Key INT64 NOT NULL," + + " Data BYTES(MAX)," + + " Fingerprint INT64," + + " Size INT64," + + ") PRIMARY KEY (Key)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + ImmutableList.of( + "CREATE TABLE TestTable (" + + " Key BIGINT PRIMARY KEY," + + " Data BYTEA," + + " Fingerprint BIGINT," + + " Size BIGINT" + + ")")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + hasher = Hashing.goodFastHash(64); + + List mutations = new ArrayList<>(); + Random rnd = new Random(); + int totalSize = 0; + int i = 0; + for (int rowSize : rowSizes()) { + numRows++; + byte[] data = new byte[rowSize]; + rnd.nextBytes(data); + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to(i) + .set("Data") + .to(ByteArray.copyFrom(data)) + .set("Fingerprint") + .to(hasher.hashBytes(data).asLong()) + .set("Size") + .to(rowSize) + .build()); + totalSize += rowSize; + i++; + if (totalSize >= WRITE_BATCH_SIZE) { + googleStandardSQLClient.write(mutations); + postgreSQLClient.write(mutations); + mutations.clear(); + totalSize = 0; + } + } + googleStandardSQLClient.write(mutations); + postgreSQLClient.write(mutations); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + private DatabaseClient getClient(Dialect dialect) { + if (dialect == Dialect.POSTGRESQL) { + return postgreSQLClient; + } + return googleStandardSQLClient; + } + + @Test + public void read() { + try (ResultSet resultSet = + getClient(dialect.dialect) + .singleUse() + .read(TABLE_NAME, KeySet.all(), Arrays.asList("Key", "Data", "Fingerprint", "Size"))) { + validate(resultSet); + } + } + + @Test + public void readWithSmallPrefetchChunks() { + try (ResultSet resultSet = + getClient(dialect.dialect) + .singleUse() + .read( + TABLE_NAME, + KeySet.all(), + Arrays.asList("Key", "Data", "Fingerprint", "Size"), + Options.prefetchChunks(1))) { + validate(resultSet); + } + } + + @Test + public void query() { + try (ResultSet resultSet = + getClient(dialect.dialect) + .singleUse() + .executeQuery( + Statement.of( + "SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME + " ORDER BY Key"))) { + validate(resultSet); + } + } + + @Test + public void queryWithSmallPrefetchChunks() { + try (ResultSet resultSet = + getClient(dialect.dialect) + .singleUse() + .executeQuery( + Statement.of( + "SELECT Key, Data, Fingerprint, Size FROM " + TABLE_NAME + " ORDER BY Key"), + Options.prefetchChunks(1))) { + validate(resultSet); + } + } + + private void validate(ResultSet resultSet) { + int i = 0; + while (resultSet.next()) { + assertThat(resultSet.getLong(0)).isEqualTo(i); + ByteArray data = resultSet.getBytes(1); + assertThat(data.length()).isEqualTo(resultSet.getLong(3)); + assertThat(resultSet.getLong(2)).isEqualTo(hasher.hashBytes(data.toByteArray()).asLong()); + assertThat(++i).isAtMost(numRows); + } + assertThat(i).isEqualTo(numRows); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITMutableCredentialsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITMutableCredentialsTest.java new file mode 100644 index 000000000000..c136305bcdd0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITMutableCredentialsTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.MutableCredentials; +import com.google.cloud.spanner.SerialIntegrationTest; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.ProjectName; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(SerialIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITMutableCredentialsTest { + + private static final String INVALID_CERT_PATH = + "/com/google/cloud/spanner/connection/test-key.json"; + + @ClassRule public static final IntegrationTestEnv env = new IntegrationTestEnv(); + + @Test + public void testMutableCredentialsUpdateAuthorizationForRunningClient() throws IOException { + GoogleCredentials validCredentials = null; + + // accept cert path overridden by environment variable for local testing + if (System.getenv("GOOGLE_ACCOUNT_CREDENTIALS") != null) { + try (InputStream stream = + Files.newInputStream(Paths.get(System.getenv("GOOGLE_ACCOUNT_CREDENTIALS")))) { + validCredentials = GoogleCredentials.fromStream(stream); + } + } else { + try { + validCredentials = GoogleCredentials.getApplicationDefault(); + } catch (IOException e) { + } + } + + // credentials must be ServiceAccountCredentials + assumeTrue(validCredentials instanceof ServiceAccountCredentials); + + ServiceAccountCredentials invalidCredentials; + try (InputStream stream = + ITMutableCredentialsTest.class.getResourceAsStream(INVALID_CERT_PATH)) { + invalidCredentials = ServiceAccountCredentials.fromStream(stream); + } + + // create MutableCredentials first with valid credentials + MutableCredentials mutableCredentials = + new MutableCredentials((ServiceAccountCredentials) validCredentials); + + SpannerOptions options = + env.getTestHelper().getOptions().toBuilder() + // this setting is required in the scenario SPANNER_EMULATOR_HOST is set otherwise + // SpannerOptions overrides credentials to NoCredentials + .setEmulatorHost(null) + .setCredentials(mutableCredentials) + .build(); + + ProjectName projectName = ProjectName.of(options.getProjectId()); + try (Spanner spanner = options.getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + instanceAdminClient.listInstances(projectName); + + // update mutableCredentials now to use an invalid credentials + mutableCredentials.updateCredentials(invalidCredentials); + + try { + // this call should now fail with new invalid credentials + instanceAdminClient.listInstances(projectName); + fail("Expected UNAUTHENTICATED after switching to invalid credentials"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("UNAUTHENTICATED")); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgJsonbTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgJsonbTest.java new file mode 100644 index 000000000000..275fbe6545fe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgJsonbTest.java @@ -0,0 +1,536 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITPgJsonbTest { + + private static final Duration OPERATION_TIMEOUT = Duration.ofMinutes(5); + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static RemoteSpannerHelper testHelper; + private static DatabaseAdminClient databaseAdminClient; + private static List databasesToDrop; + private static String projectId; + private static String instanceId; + private static String databaseId; + private DatabaseClient databaseClient; + private String tableName; + + @BeforeClass + public static void beforeClass() throws Exception { + testHelper = env.getTestHelper(); + databaseAdminClient = testHelper.getClient().getDatabaseAdminClient(); + databasesToDrop = new ArrayList<>(); + projectId = testHelper.getInstanceId().getProject(); + instanceId = testHelper.getInstanceId().getInstance(); + databaseId = testHelper.getUniqueDatabaseId(); + final Database database = + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(); + databaseAdminClient + .createDatabase(database, Collections.emptyList()) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + databasesToDrop.add(database.getId()); + } + + @AfterClass + public static void afterClass() { + if (databasesToDrop != null) { + for (DatabaseId id : databasesToDrop) { + try { + databaseAdminClient.dropDatabase(id.getInstanceId().getInstance(), id.getDatabase()); + } catch (Exception e) { + System.err.println("Failed to drop database " + id + ", skipping...: " + e.getMessage()); + } + } + } + } + + @Before + public void setUp() throws Exception { + databaseClient = + testHelper.getClient().getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + tableName = testHelper.getUniqueDatabaseId(); + databaseAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "CREATE TABLE \"" + + tableName + + "\" (id BIGINT PRIMARY KEY, col1 JSONB, colarray JSONB[])"), + null) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } + + @Test + public void testPgJsonbAsPrimaryKey() { + // JSONB is not allowed as a primary key. + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> + databaseAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "CREATE TABLE with_jsonb_pk (id jsonb primary key)"), + null) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)); + SpannerException spannerException = + SpannerExceptionFactory.asSpannerException(executionException.getCause()); + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + assertTrue( + spannerException.getMessage(), + spannerException + .getMessage() + .contains( + "Column with_jsonb_pk.id has type PG.JSONB, but is part of the primary key.")); + } + + @Test + public void testPgJsonbInSecondaryIndex() { + // JSONB is not allowed as a key in a secondary index. + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> + databaseAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "CREATE INDEX idx_jsonb on \"" + tableName + "\" (col1)"), + null) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)); + SpannerException spannerException = + SpannerExceptionFactory.asSpannerException(executionException.getCause()); + assertEquals(ErrorCode.FAILED_PRECONDITION, spannerException.getErrorCode()); + if (isUsingEmulator()) { + assertTrue( + spannerException.getMessage(), + spannerException + .getMessage() + .contains("Cannot reference PG.JSONB col1 in the creation of index idx_jsonb.")); + } else { + assertTrue( + spannerException.getMessage(), + spannerException + .getMessage() + .contains("Index idx_jsonb is defined on a column of unsupported type PG.JSONB.")); + } + } + + private static final String JSON_VALUE_1 = "{\"color\":\"red\",\"value\":\"#f00\"}"; + private static final String JSON_VALUE_2 = + "[" + + " {\"color\":\"red\",\"value\":\"#f00\"}," + + " {\"color\":\"green\",\"value\":\"#0f0\"}," + + " {\"color\":\"blue\",\"value\":\"#00f\"}" + + "]"; + private static final List JSON_ARRAY1 = Arrays.asList(JSON_VALUE_1, JSON_VALUE_2); + private static final List JSON_ARRAY2 = Arrays.asList("[]", "{}"); + + @Test + public void testLiteralPgJsonb() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.of( + String.format( + "INSERT INTO %s (id, col1, colarray) VALUES " + + "(1, '%s', array[%s]::jsonb[]), " + + "(2, '%s', array[%s]::jsonb[]), " + + "(3, '{}', array['{}']::jsonb[]), " + + "(4, '[]', array['[]']::jsonb[]), " + + "(5, null, null)", + tableName, + JSON_VALUE_1, + // Convert array into string with literals separated by comma + // [a, b, c, null] -> 'a','b','c',null + JSON_ARRAY1.stream() + .map(item -> (item == null ? null : "'" + item + "'")) + .collect(Collectors.joining(",")), + JSON_VALUE_2, + JSON_ARRAY2.stream() + .map(item -> (item == null ? null : "'" + item + "'")) + .collect(Collectors.joining(","))))); + return null; + }); + + verifyContents(); + } + + @Test + public void testPgJsonbParameter() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO " + + tableName + + " (id, col1, colarray) VALUES" + + " (1, $1, $2)" + + ", (2, $3, $4)" + + ", (3, $5, $6)" + + ", (4, $7, $8)" + + ", (5, $9, $10)") + .bind("p1") + .to(Value.pgJsonb(JSON_VALUE_1)) + .bind("p2") + .to(Value.pgJsonbArray(JSON_ARRAY1)) + .bind("p3") + .to(Value.pgJsonb(JSON_VALUE_2)) + .bind("p4") + .to(Value.pgJsonbArray(JSON_ARRAY2)) + .bind("p5") + .to(Value.pgJsonb("{}")) + .bind("p6") + .to(Value.pgJsonbArray(Collections.singletonList("{}"))) + .bind("p7") + .to(Value.pgJsonb("[]")) + .bind("p8") + .to(Value.pgJsonbArray(Collections.singletonList("[]"))) + .bind("p9") + .to(Value.pgJsonb(null)) + .bind("p10") + .to(Value.pgJsonbArray(null)) + .build()); + return null; + }); + + verifyContents(); + } + + private ListValue getJsonListValue(List jsonList) { + return ListValue.newBuilder() + .addAllValues( + jsonList.stream() + .map(json -> com.google.protobuf.Value.newBuilder().setStringValue(json).build()) + .collect(Collectors.toList())) + .build(); + } + + @Test + public void testPgJsonbUntypedParameter() { + // Verify that we can use Jsonb as an untyped parameter. This is especially important for + // PGAdapter and the JDBC driver, as these will often use untyped parameters. + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO " + + tableName + + " (id, col1, colarray) VALUES" + + " (1, $1, $2)" + + ", (2, $3, $4)" + + ", (3, $5, $6)" + + ", (4, $7, $8)" + + ", (5, $9, $10)") + .bind("p1") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setStringValue(JSON_VALUE_1) + .build())) + .bind("p2") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue(getJsonListValue(JSON_ARRAY1)) + .build())) + .bind("p3") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setStringValue(JSON_VALUE_2) + .build())) + .bind("p4") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue(getJsonListValue(JSON_ARRAY2)) + .build())) + .bind("p5") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder().setStringValue("{}").build())) + .bind("p6") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue(getJsonListValue(Collections.singletonList("{}"))) + .build())) + .bind("p7") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder().setStringValue("[]").build())) + .bind("p8") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue(getJsonListValue(Collections.singletonList("[]"))) + .build())) + .bind("p9") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .bind("p10") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())) + .build()); + return null; + }); + + verifyContents(); + } + + @Test + public void testMutationsWithPgJsonbAsString() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(JSON_VALUE_1) + .set("colarray") + .to(Value.pgJsonbArray(JSON_ARRAY1)) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(2) + .set("col1") + .to(JSON_VALUE_2) + .set("colarray") + .to(Value.pgJsonbArray(JSON_ARRAY2)) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(3) + .set("col1") + .to("{}") + .set("colarray") + .to(Value.pgJsonbArray(Collections.singletonList("{}"))) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(4) + .set("col1") + .to("[]") + .set("colarray") + .to(Value.pgJsonbArray(Collections.singletonList("[]"))) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(5) + .set("col1") + .to((String) null) + .set("colarray") + .to(Value.pgJsonbArray(null)) + .build())); + return null; + }); + + verifyContents(); + } + + @Test + public void testMutationsWithPgJsonbAsValue() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(Value.pgJsonb(JSON_VALUE_1)) + .set("colarray") + .to(Value.pgJsonbArray(JSON_ARRAY1)) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(2) + .set("col1") + .to(Value.pgJsonb(JSON_VALUE_2)) + .set("colarray") + .to(Value.pgJsonbArray(JSON_ARRAY2)) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(3) + .set("col1") + .to(Value.pgJsonb("{}")) + .set("colarray") + .to(Value.pgJsonbArray(Collections.singletonList("{}"))) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(4) + .set("col1") + .to(Value.pgJsonb("[]")) + .set("colarray") + .to(Value.pgJsonbArray(Collections.singletonList("[]"))) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(5) + .set("col1") + .to(Value.pgJsonb(null)) + .set("colarray") + .to(Value.pgJsonbArray(null)) + .build())); + return null; + }); + + verifyContents(); + } + + private void verifyContents() { + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + assertTrue(resultSet.next()); + // Note: We do not use the JSON_VALUE_1 constant here, because the backend prettifies the + // value a little, which means that there is a small difference between what we insert and + // what we get back. + assertEquals("{\"color\": \"red\", \"value\": \"#f00\"}", resultSet.getPgJsonb("col1")); + assertEquals( + Value.pgJsonb("{\"color\": \"red\", \"value\": \"#f00\"}"), resultSet.getValue("col1")); + assertEquals( + Arrays.asList( + "{\"color\": \"red\", \"value\": \"#f00\"}", + "[{\"color\": \"red\", \"value\": \"#f00\"}, " + + "{\"color\": \"green\", \"value\": \"#0f0\"}, " + + "{\"color\": \"blue\", \"value\": \"#00f\"}]"), + resultSet.getPgJsonbList("colarray")); + assertEquals( + Value.pgJsonbArray( + Arrays.asList( + "{\"color\": \"red\", \"value\": \"#f00\"}", + "[{\"color\": \"red\", \"value\": \"#f00\"}, " + + "{\"color\": \"green\", \"value\": \"#0f0\"}, " + + "{\"color\": \"blue\", \"value\": \"#00f\"}]")), + resultSet.getValue("colarray")); + + assertTrue(resultSet.next()); + assertEquals( + "[" + + "{\"color\": \"red\", \"value\": \"#f00\"}, " + + "{\"color\": \"green\", \"value\": \"#0f0\"}, " + + "{\"color\": \"blue\", \"value\": \"#00f\"}" + + "]", + resultSet.getPgJsonb("col1")); + assertEquals( + Value.pgJsonb( + "[" + + "{\"color\": \"red\", \"value\": \"#f00\"}, " + + "{\"color\": \"green\", \"value\": \"#0f0\"}, " + + "{\"color\": \"blue\", \"value\": \"#00f\"}" + + "]"), + resultSet.getValue("col1")); + assertEquals(JSON_ARRAY2, resultSet.getPgJsonbList("colarray")); + assertEquals(Value.pgJsonbArray(JSON_ARRAY2), resultSet.getValue("colarray")); + + assertTrue(resultSet.next()); + assertEquals("{}", resultSet.getPgJsonb("col1")); + assertEquals(Value.pgJsonb("{}"), resultSet.getValue("col1")); + assertEquals(Collections.singletonList("{}"), resultSet.getPgJsonbList("colarray")); + assertEquals( + Value.pgJsonbArray(Collections.singletonList("{}")), resultSet.getValue("colarray")); + + assertTrue(resultSet.next()); + assertEquals("[]", resultSet.getPgJsonb("col1")); + assertEquals(Value.pgJsonb("[]"), resultSet.getValue("col1")); + assertEquals(Collections.singletonList("[]"), resultSet.getPgJsonbList("colarray")); + assertEquals( + Value.pgJsonbArray(Collections.singletonList("[]")), resultSet.getValue("colarray")); + + assertTrue(resultSet.next()); + assertTrue(resultSet.isNull("col1")); + assertTrue(resultSet.isNull("colarray")); + + assertFalse(resultSet.next()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgNumericTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgNumericTest.java new file mode 100644 index 000000000000..76025b07175a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPgNumericTest.java @@ -0,0 +1,460 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import java.math.BigDecimal; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITPgNumericTest { + + private static final Duration OPERATION_TIMEOUT = Duration.ofMinutes(5); + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static RemoteSpannerHelper testHelper; + private static DatabaseAdminClient databaseAdminClient; + private static List databasesToDrop; + private static String projectId; + private static String instanceId; + private static String databaseId; + private DatabaseClient databaseClient; + private String tableName; + + @BeforeClass + public static void beforeClass() throws Exception { + testHelper = env.getTestHelper(); + databaseAdminClient = testHelper.getClient().getDatabaseAdminClient(); + databasesToDrop = new ArrayList<>(); + projectId = testHelper.getInstanceId().getProject(); + instanceId = testHelper.getInstanceId().getInstance(); + databaseId = testHelper.getUniqueDatabaseId(); + final Database database = + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(); + databaseAdminClient + .createDatabase(database, Collections.emptyList()) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + databasesToDrop.add(database.getId()); + } + + @AfterClass + public static void afterClass() throws Exception { + if (databasesToDrop != null) { + for (DatabaseId id : databasesToDrop) { + try { + databaseAdminClient.dropDatabase(id.getInstanceId().getInstance(), id.getDatabase()); + } catch (Exception e) { + System.err.println("Failed to drop database " + id + ", skipping...: " + e.getMessage()); + } + } + } + } + + @Before + public void setUp() throws Exception { + databaseClient = + testHelper.getClient().getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + tableName = testHelper.getUniqueDatabaseId(); + databaseAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "CREATE TABLE \"" + tableName + "\" (id BIGINT PRIMARY KEY, col1 NUMERIC)"), + null) + .get(OPERATION_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } + + @Test + public void testLiteralPgNumeric() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.of( + "INSERT INTO " + + tableName + + " (id, col1) VALUES" + + " (1, 1.23)" + + ", (2, 'NaN')" + + ", (3, null)")); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertEquals("NaN", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } + + @Test + public void testParameterizedWithPgNumericAsValue() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO " + + tableName + + " (id, col1) VALUES" + + " (1, $1)" + + ", (2, $2)" + + ", (3, $3)") + .bind("p1") + .to(Value.pgNumeric("1.23")) + .bind("p2") + .to(Value.pgNumeric("NaN")) + .bind("p3") + .to(Value.pgNumeric(null)) + .build()); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertEquals("NaN", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } + + @Test + public void testParameterizedWithPgNumericAsDouble() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO " + + tableName + + " (id, col1) VALUES" + + " (1, $1)" + + ", (2, $2)" + + ", (3, $3)") + .bind("p1") + .to(1.23D) + .bind("p2") + .to(Double.NaN) + .bind("p3") + .to((Double) null) + .build()); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertEquals("NaN", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } + + @Test + public void testParameterizedWithPgNumericAsInt() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder("INSERT INTO " + tableName + " (id, col1) VALUES (1, $1)") + .bind("p1") + .to(1) + .build()); + return null; + }); + + try (ResultSet resultSet = + databaseClient.singleUse().executeQuery(Statement.of("SELECT * FROM " + tableName))) { + + resultSet.next(); + assertEquals("1", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1"), resultSet.getValue("col1")); + } + } + + @Test + public void testParameterizedWithPgNumericAsLong() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder("INSERT INTO " + tableName + " (id, col1) VALUES (1, $1)") + .bind("p1") + .to(1L) + .build()); + return null; + }); + + try (ResultSet resultSet = + databaseClient.singleUse().executeQuery(Statement.of("SELECT * FROM " + tableName))) { + + resultSet.next(); + assertEquals("1", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1"), resultSet.getValue("col1")); + } + } + + @Test + public void testMutationsWithPgNumericAsString() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to("1.23") + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(2) + .set("col1") + .to("NaN") + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(3) + .set("col1") + .to((String) null) + .build())); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertEquals("NaN", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } + + @Test + public void testMutationsWithPgNumericAsInt() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(1) + .build())); + return null; + }); + + try (ResultSet resultSet = + databaseClient.singleUse().executeQuery(Statement.of("SELECT * FROM " + tableName))) { + + resultSet.next(); + assertEquals("1", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1"), resultSet.getValue("col1")); + } + } + + @Test + public void testMutationsWithPgNumericAsLong() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(1L) + .build())); + return null; + }); + + try (ResultSet resultSet = + databaseClient.singleUse().executeQuery(Statement.of("SELECT * FROM " + tableName))) { + + resultSet.next(); + assertEquals("1", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1"), resultSet.getValue("col1")); + } + } + + @Test + public void testMutationsWithPgNumericAsBigDecimal() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(new BigDecimal("1.23")) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(3) + .set("col1") + .to((BigDecimal) null) + .build())); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } + + @Test + public void testMutationsWithPgNumericAsValue() { + databaseClient + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + ImmutableList.of( + Mutation.newInsertBuilder(tableName) + .set("id") + .to(1) + .set("col1") + .to(Value.pgNumeric("1.23")) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(2) + .set("col1") + .to(Value.pgNumeric("NaN")) + .build(), + Mutation.newInsertBuilder(tableName) + .set("id") + .to(3) + .set("col1") + .to(Value.pgNumeric(null)) + .build())); + return null; + }); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + tableName + " ORDER BY id"))) { + + resultSet.next(); + assertEquals("1.23", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("1.23"), resultSet.getValue("col1")); + + resultSet.next(); + assertEquals("NaN", resultSet.getString("col1")); + assertEquals(Value.pgNumeric("NaN"), resultSet.getValue("col1")); + + resultSet.next(); + assertTrue(resultSet.isNull("col1")); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrCreateDatabaseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrCreateDatabaseTest.java new file mode 100644 index 000000000000..6056b857b18a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrCreateDatabaseTest.java @@ -0,0 +1,144 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseNotFoundException; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITPitrCreateDatabaseTest { + + private static final Duration OPERATION_TIMEOUT = Duration.ofMinutes(2); + private static final String VERSION_RETENTION_PERIOD = "7d"; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private RemoteSpannerHelper testHelper; + private DatabaseAdminClient dbAdminClient; + private List databasesToDrop; + + @BeforeClass + public static void doNotRunOnEmulator() { + assumeFalse("PITR-lite features are not supported by the emulator", isUsingEmulator()); + } + + @Before + public void setUp() { + testHelper = env.getTestHelper(); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + databasesToDrop = new ArrayList<>(); + } + + @After + public void tearDown() { + for (Database database : databasesToDrop) { + final DatabaseId id = database.getId(); + dbAdminClient.dropDatabase(id.getInstanceId().getInstance(), id.getDatabase()); + } + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughCreateDatabase() throws Exception { + final String instanceId = testHelper.getInstanceId().getInstance(); + final String databaseId = testHelper.getUniqueDatabaseId(); + final String extraStatement = + "ALTER DATABASE " + + databaseId + + " SET OPTIONS (version_retention_period = '" + + VERSION_RETENTION_PERIOD + + "')"; + + final Database database = createDatabase(instanceId, databaseId, extraStatement); + + assertThat(database.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + assertThat(database.getEarliestVersionTime()).isNotNull(); + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughGetDatabase() throws Exception { + final String instanceId = testHelper.getInstanceId().getInstance(); + final String databaseId = testHelper.getUniqueDatabaseId(); + final String extraStatement = + "ALTER DATABASE " + + databaseId + + " SET OPTIONS (version_retention_period = '" + + VERSION_RETENTION_PERIOD + + "')"; + + createDatabase(instanceId, databaseId, extraStatement); + final Database database = dbAdminClient.getDatabase(instanceId, databaseId); + + assertThat(database.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + assertThat(database.getEarliestVersionTime()).isNotNull(); + } + + @Test(expected = DatabaseNotFoundException.class) + public void returnsAnErrorWhenAnInvalidVersionRetentionPeriodIsGiven() { + final String instanceId = testHelper.getInstanceId().getInstance(); + final String databaseId = testHelper.getUniqueDatabaseId(); + final String extraStatement = + "ALTER DATABASE " + databaseId + " SET OPTIONS (version_retention_period = '0d')"; + + try { + createDatabase(instanceId, databaseId, extraStatement); + fail("Expected invalid argument error when setting invalid version retention period"); + } catch (Exception e) { + SpannerException spannerException = (SpannerException) e.getCause(); + assertThat(spannerException.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + + // Expects a database not found exception + dbAdminClient.getDatabase(instanceId, databaseId); + } + + private Database createDatabase( + final String instanceId, final String databaseId, final String extraStatement) + throws Exception { + final Database database = + dbAdminClient + .createDatabase(instanceId, databaseId, Collections.singletonList(extraStatement)) + .get(OPERATION_TIMEOUT.toNanos(), TimeUnit.NANOSECONDS); + databasesToDrop.add(database); + + return database; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrUpdateDatabaseTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrUpdateDatabaseTest.java new file mode 100644 index 000000000000..c730a7fa36c3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITPitrUpdateDatabaseTest.java @@ -0,0 +1,207 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITPitrUpdateDatabaseTest { + + private static final Duration OPERATION_TIMEOUT = Duration.ofMinutes(20); + private static final String VERSION_RETENTION_PERIOD = "7d"; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseAdminClient dbAdminClient; + private static DatabaseClient dbClient; + private static String instanceId; + private static String databaseId; + private static UpdateDatabaseDdlMetadata metadata; + + @BeforeClass + public static void setUp() throws Exception { + assumeFalse("PITR-lite features are not supported by the emulator", isUsingEmulator()); + + final RemoteSpannerHelper testHelper = env.getTestHelper(); + final String projectId = testHelper.getOptions().getProjectId(); + instanceId = testHelper.getInstanceId().getInstance(); + databaseId = testHelper.getUniqueDatabaseId(); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + + createDatabase(dbAdminClient, instanceId, databaseId, Collections.emptyList()); + metadata = + updateVersionRetentionPeriod( + dbAdminClient, instanceId, databaseId, VERSION_RETENTION_PERIOD); + + dbClient = + testHelper.getClient().getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + } + + @AfterClass + public static void tearDown() { + if (!isUsingEmulator()) { + dbAdminClient.dropDatabase(instanceId, databaseId); + } + } + + @Test + public void checksThatTheOperationWasNotThrottled() { + assertThat(metadata.getThrottled()).isFalse(); + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughGetDatabase() { + final Database database = dbAdminClient.getDatabase(instanceId, databaseId); + + assertThat(database.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + assertThat(database.getEarliestVersionTime()).isNotNull(); + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughListDatabases() { + final Page page = dbAdminClient.listDatabases(instanceId); + + for (Database database : page.iterateAll()) { + if (!database.getId().getDatabase().equals(databaseId)) { + continue; + } + assertThat(database.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + assertThat(database.getEarliestVersionTime()).isNotNull(); + } + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughGetDatabaseDdl() { + final List ddls = dbAdminClient.getDatabaseDdl(instanceId, databaseId); + + boolean hasVersionRetentionPeriodStatement = false; + for (String ddl : ddls) { + hasVersionRetentionPeriodStatement = + ddl.contains("version_retention_period = '" + VERSION_RETENTION_PERIOD + "'"); + if (hasVersionRetentionPeriodStatement) { + break; + } + } + assertThat(hasVersionRetentionPeriodStatement).isTrue(); + } + + @Test + public void returnsTheVersionRetentionPeriodSetThroughInformationSchema() { + try (final ResultSet rs = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT OPTION_VALUE AS version_retention_period " + + "FROM INFORMATION_SCHEMA.DATABASE_OPTIONS " + + "WHERE SCHEMA_NAME = '' AND OPTION_NAME = 'version_retention_period'"))) { + + String versionRetentionPeriod = null; + while (rs.next()) { + versionRetentionPeriod = rs.getString("version_retention_period"); + } + + assertThat(versionRetentionPeriod).isEqualTo(VERSION_RETENTION_PERIOD); + } + } + + @Test + public void returnsAnErrorWhenAnInvalidRetentionPeriodIsGiven() { + try { + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "ALTER DATABASE " + + databaseId + + " SET OPTIONS (version_retention_period = '0d')"), + "op_invalid_retention_period_" + databaseId) + .get(OPERATION_TIMEOUT.toNanos(), TimeUnit.NANOSECONDS); + fail("Expected invalid argument error when setting invalid version retention period"); + } catch (Exception e) { + SpannerException spannerException = (SpannerException) e.getCause(); + assertThat(spannerException.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + + final Database database = dbAdminClient.getDatabase(instanceId, databaseId); + + assertThat(database.getVersionRetentionPeriod()).isEqualTo(VERSION_RETENTION_PERIOD); + assertThat(database.getEarliestVersionTime()).isNotNull(); + } + + private static Database createDatabase( + final DatabaseAdminClient dbAdminClient, + final String instanceId, + final String databaseId, + final Iterable extraStatements) + throws Exception { + return dbAdminClient + .createDatabase(instanceId, databaseId, extraStatements) + .get(OPERATION_TIMEOUT.toNanos(), TimeUnit.NANOSECONDS); + } + + private static UpdateDatabaseDdlMetadata updateVersionRetentionPeriod( + final DatabaseAdminClient dbAdminClient, + final String instanceId, + final String databaseId, + final String versionRetentionPeriod) + throws Exception { + final OperationFuture op = + dbAdminClient.updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + "ALTER DATABASE " + + databaseId + + " SET OPTIONS ( version_retention_period = '" + + versionRetentionPeriod + + "' )"), + "updateddl_version_retention_period"); + op.get(OPERATION_TIMEOUT.toNanos(), TimeUnit.NANOSECONDS); + return op.getMetadata().get(OPERATION_TIMEOUT.toNanos(), TimeUnit.NANOSECONDS); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java new file mode 100644 index 000000000000..a2693f6a6d51 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java @@ -0,0 +1,410 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SingerProto.Genre; +import com.google.cloud.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.InvalidProtocolBufferException.InvalidWireTypeException; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +// Integration Tests to test DDL, DML and DQL for Proto Columns and Enums +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITProtoColumnTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseId databaseID; + private static DatabaseAdminClient dbAdminClient; + private static DatabaseClient databaseClient; + + @BeforeClass + public static void setUpDatabase() throws Exception { + assumeFalse( + "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); + RemoteSpannerHelper testHelper = env.getTestHelper(); + databaseID = DatabaseId.of(testHelper.getInstanceId(), testHelper.getUniqueDatabaseId()); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + createDatabase(); + databaseClient = testHelper.getClient().getDatabaseClient(databaseID); + } + + public static void createDatabase() throws Exception { + InputStream in = + ITProtoColumnTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + final Database databaseToCreate = + dbAdminClient.newDatabaseBuilder(databaseID).setProtoDescriptors(in).build(); + final Database createdDatabase = + dbAdminClient + .createDatabase( + databaseToCreate, + Arrays.asList( + "CREATE PROTO BUNDLE (" + + "examples.spanner.music.SingerInfo," + + "examples.spanner.music.Genre," + + ")", + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo examples.spanner.music.SingerInfo," + + " SingerGenre examples.spanner.music.Genre," + + " SingerNationality STRING(1024) AS (SingerInfo.nationality) STORED," + + " ) PRIMARY KEY (SingerNationality, SingerGenre)", + "CREATE TABLE Types (" + + " RowID INT64 NOT NULL," + + " Int64a INT64," + + " Bytes BYTES(MAX)," + + " Int64Array ARRAY," + + " BytesArray ARRAY," + + " ProtoMessage examples.spanner.music.SingerInfo," + + " ProtoEnum examples.spanner.music.Genre," + + " ProtoMessageArray ARRAY," + + " ProtoEnumArray ARRAY," + + " ) PRIMARY KEY (RowID)", + "CREATE INDEX SingerByNationalityAndGenre ON Singers(SingerNationality," + + " SingerGenre) STORING (SingerId, FirstName, LastName)")) + .get(5, TimeUnit.MINUTES); + + assertEquals(databaseID.getDatabase(), createdDatabase.getId().getDatabase()); + + GetDatabaseDdlResponse response = + dbAdminClient.getDatabaseDdlResponse( + databaseID.getInstanceId().getInstance(), databaseID.getDatabase()); + assertNotNull(response.getProtoDescriptors()); + in.close(); + } + + @AfterClass + public static void afterClass() throws Exception { + try { + if (!isUsingEmulator()) { + dbAdminClient.dropDatabase( + databaseID.getInstanceId().getInstance(), databaseID.getDatabase()); + } + } catch (Exception e) { + System.err.println( + "Failed to drop database " + + dbAdminClient + .getDatabase(databaseID.getInstanceId().getInstance(), databaseID.getDatabase()) + .getId() + + ", skipping...: " + + e.getMessage()); + } + } + + @After + public void after() throws Exception { + databaseClient.write(ImmutableList.of(Mutation.delete("Types", KeySet.all()))); + databaseClient.write(ImmutableList.of(Mutation.delete("Singers", KeySet.all()))); + } + + /** + * Test to check data update and read queries on Proto Messages, Proto Enums and their arrays. + * Test also checks for compatability between following types: 1. Proto Messages and Bytes 2. + * Proto Enums and Int64 + */ + @Test + public void testProtoColumnsUpdateAndRead() { + assumeFalse( + "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + ByteArray singerInfoBytes = ByteArray.copyFrom(singerInfo.toByteArray()); + + Genre genre = Genre.JAZZ; + long genreConst = genre.getNumber(); + + List singerInfoList = + Arrays.asList(singerInfo, null, SingerInfo.getDefaultInstance()); + List singerInfoBytesList = + Arrays.asList( + singerInfoBytes, + null, + ByteArray.copyFrom(SingerInfo.getDefaultInstance().toByteArray())); + + List enumList = Arrays.asList(Genre.FOLK, null, Genre.ROCK); + List enumConstList = + Arrays.asList((long) Genre.FOLK_VALUE, null, (long) Genre.ROCK_VALUE); + + // Inserting two rows with same data except rowID as it's used as PK. + databaseClient.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Types") + .set("RowID") + .to(1) + .set("Int64a") + .to(genreConst) + .set("Bytes") + .to(singerInfoBytes) + .set("Int64Array") + .toInt64Array(enumConstList) + .set("BytesArray") + .toBytesArray(singerInfoBytesList) + .set("ProtoMessage") + .to(singerInfo) + .set("ProtoEnum") + .to(genre) + .set("ProtoMessageArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .set("ProtoEnumArray") + .toProtoEnumArray(enumList, Genre.getDescriptor()) + .build(), + // Inter Compatability check between ProtoMessages/Bytes and Int64/Enum. + Mutation.newInsertOrUpdateBuilder("Types") + .set("RowID") + .to(2) + .set("Int64a") + .to(genre) + .set("Bytes") + .to(singerInfo) + .set("Int64Array") + .toProtoEnumArray(enumList, Genre.getDescriptor()) + .set("BytesArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .set("ProtoMessage") + .to(singerInfoBytes) + .set("ProtoEnum") + .to(genreConst) + .set("ProtoMessageArray") + .toBytesArray(singerInfoBytesList) + .set("ProtoEnumArray") + .toInt64Array(enumConstList) + .build())); + + try (ResultSet resultSet = + databaseClient.singleUse().executeQuery(Statement.of("SELECT * FROM " + "Types"))) { + + for (int i = 0; i < 2; i++) { + resultSet.next(); + assertEquals(i + 1, resultSet.getLong("RowID")); + assertEquals(genreConst, resultSet.getLong("Int64a")); + assertEquals(singerInfoBytes, resultSet.getBytes("Bytes")); + assertEquals(enumConstList, resultSet.getLongList("Int64Array")); + assertEquals(singerInfoBytesList, resultSet.getBytesList("BytesArray")); + assertEquals( + singerInfo, resultSet.getProtoMessage("ProtoMessage", SingerInfo.getDefaultInstance())); + assertEquals(genre, resultSet.getProtoEnum("ProtoEnum", Genre::forNumber)); + assertEquals( + singerInfoList, + resultSet.getProtoMessageList("ProtoMessageArray", SingerInfo.getDefaultInstance())); + assertEquals(enumList, resultSet.getProtoEnumList("ProtoEnumArray", Genre::forNumber)); + + // Check compatability between Proto Messages and Bytes + assertEquals(singerInfoBytes, resultSet.getBytes("ProtoMessage")); + assertEquals( + singerInfo, resultSet.getProtoMessage("Bytes", SingerInfo.getDefaultInstance())); + + assertEquals(singerInfoBytesList, resultSet.getBytesList("ProtoMessageArray")); + assertEquals( + singerInfoList, + resultSet.getProtoMessageList("BytesArray", SingerInfo.getDefaultInstance())); + + // Check compatability between Proto Enum and Int64 + assertEquals(genreConst, resultSet.getLong("ProtoEnum")); + assertEquals(genre, resultSet.getProtoEnum("Int64a", Genre::forNumber)); + + assertEquals(enumConstList, resultSet.getLongList("ProtoEnumArray")); + assertEquals(enumList, resultSet.getProtoEnumList("Int64Array", Genre::forNumber)); + } + } + } + + // Test to check Parameterized Queries, Primary Keys and Indexes. + @Test + public void testProtoColumnsDMLParameterizedQueriesPKAndIndexes() { + assumeFalse( + "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); + + SingerInfo singerInfo1 = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + Genre genre1 = Genre.FOLK; + + SingerInfo singerInfo2 = + SingerInfo.newBuilder().setSingerId(2).setNationality("Country2").build(); + Genre genre2 = Genre.JAZZ; + + databaseClient + .readWriteTransaction() + .run( + transaction -> { + Statement statement1 = + Statement.newBuilder( + "INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo," + + " SingerGenre) VALUES (1, \"FirstName1\", \"LastName1\"," + + " @singerInfo, @singerGenre)") + .bind("singerInfo") + .to(singerInfo1) + .bind("singerGenre") + .to(genre1) + .build(); + + Statement statement2 = + Statement.newBuilder( + "INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo," + + " SingerGenre) VALUES (2, \"FirstName2\", \"LastName2\"," + + " @singerInfo, @singerGenre)") + .bind("singerInfo") + .to(singerInfo2) + .bind("singerGenre") + .to(genre2) + .build(); + + transaction.batchUpdate(Arrays.asList(statement1, statement2)); + return null; + }); + + // Read all rows based on Proto Message field and Proto Enum Primary key column values + try (ResultSet resultSet1 = + databaseClient + .singleUse() + .read( + "Singers", + KeySet.newBuilder() + .addKey(Key.of("Country1", Genre.FOLK)) + .addKey(Key.of("Country2", Genre.JAZZ)) + .build(), + Arrays.asList("SingerId", "FirstName", "LastName", "SingerInfo", "SingerGenre"))) { + resultSet1.next(); + assertEquals(1, resultSet1.getLong("SingerId")); + assertEquals("FirstName1", resultSet1.getString("FirstName")); + assertEquals("LastName1", resultSet1.getString("LastName")); + assertEquals( + singerInfo1, resultSet1.getProtoMessage("SingerInfo", SingerInfo.getDefaultInstance())); + assertEquals(genre1, resultSet1.getProtoEnum("SingerGenre", Genre::forNumber)); + + resultSet1.next(); + assertEquals(2, resultSet1.getLong("SingerId")); + assertEquals("FirstName2", resultSet1.getString("FirstName")); + assertEquals("LastName2", resultSet1.getString("LastName")); + assertEquals( + singerInfo2, resultSet1.getProtoMessage("SingerInfo", SingerInfo.getDefaultInstance())); + assertEquals(genre2, resultSet1.getProtoEnum("SingerGenre", Genre::forNumber)); + } + + // Read rows using Index on Proto Message field and Proto Enum column + try (ResultSet resultSet2 = + databaseClient + .singleUse() + .readUsingIndex( + "Singers", + "SingerByNationalityAndGenre", + KeySet.singleKey(Key.of("Country2", Genre.JAZZ)), + Arrays.asList("SingerId", "FirstName", "LastName"))) { + + resultSet2.next(); + assertEquals(2, resultSet2.getLong("SingerId")); + assertEquals("FirstName2", resultSet2.getString("FirstName")); + assertEquals("LastName2", resultSet2.getString("LastName")); + } + + // Filter using Parameterized DQL + try (ResultSet resultSet3 = + databaseClient + .singleUse() + .executeQuery( + Statement.newBuilder( + "SELECT SingerId, SingerInfo, SingerGenre FROM Singers WHERE" + + " SingerInfo.Nationality=@country AND SingerGenre=@genre") + .bind("country") + .to("Country2") + .bind("genre") + .to(Genre.JAZZ) + .build())) { + resultSet3.next(); + assertEquals(2, resultSet3.getLong("SingerId")); + assertEquals( + singerInfo2, resultSet3.getProtoMessage("SingerInfo", SingerInfo.getDefaultInstance())); + assertEquals(genre2, resultSet3.getProtoEnum("SingerGenre", Genre::forNumber)); + } + } + + // Test the exception in case Invalid protocol message object is provided while deserializing the + // data. + @Test + public void testProtoMessageDeserializationError() { + assumeFalse( + "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); + + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + + databaseClient.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Types") + .set("RowID") + .to(1) + .set("ProtoMessage") + .to(singerInfo) + .build())); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .read("Types", KeySet.all(), Collections.singletonList("ProtoMessage"))) { + + resultSet.next(); + + SpannerException e = + assertThrows( + SpannerException.class, + () -> resultSet.getProtoMessage("ProtoMessage", Backup.getDefaultInstance())); + + // Underlying cause is InvalidWireTypeException + assertEquals(InvalidWireTypeException.class, e.getCause().getClass()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryOptionsTest.java new file mode 100644 index 000000000000..baa8235cd562 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryOptionsTest.java @@ -0,0 +1,236 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITQueryOptionsTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() { + assumeFalse("Emulator ignores query options", isUsingEmulator()); + + // Empty database. + db = + env.getTestHelper() + .createTestDatabase("CREATE TABLE TEST (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)"); + client = env.getTestHelper().getDatabaseClient(db); + } + + @Test + public void executeQuery() { + // Version '1' should work. + // Statistics package 'custom-package' should work. + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder("SELECT 1") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build())) { + while (rs.next()) { + assertThat(rs.getLong(0)).isEqualTo(1L); + } + } + // Version 'latest' should also work. + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder("SELECT 1") + .withQueryOptions( + QueryOptions.newBuilder().setOptimizerVersion("latest").build()) + .build())) { + while (rs.next()) { + assertThat(rs.getLong(0)).isEqualTo(1L); + } + } + // Version '100000' should not work. + try (ResultSet rs = + client + .singleUse() + .executeQuery( + Statement.newBuilder("SELECT 1") + .withQueryOptions( + QueryOptions.newBuilder().setOptimizerVersion("100000").build()) + .build())) { + while (rs.next()) { + fail("should not get any results"); + } + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Query optimizer version: 100000 is not supported"); + } + } + + @Test + public void executeUpdate() { + // Optimizer version 1 should work. + // Optimizer statistics package 'custom-package' should work. + assertThat( + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder("INSERT INTO TEST (ID, NAME) VALUES (@id, @name)") + .bind("id") + .to(1L) + .bind("name") + .to("One") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build()))) + .isEqualTo(1L); + + // Version 'latest' should also work. + assertThat( + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder("INSERT INTO TEST (ID, NAME) VALUES (@id, @name)") + .bind("id") + .to(2L) + .bind("name") + .to("Two") + .withQueryOptions( + QueryOptions.newBuilder().setOptimizerVersion("latest").build()) + .build()))) + .isEqualTo(1L); + + // Version '100000' is an invalid value and should cause an error. + try { + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder("INSERT INTO TEST (ID, NAME) VALUES (@id, @name)") + .bind("id") + .to(3L) + .bind("name") + .to("Three") + .withQueryOptions( + QueryOptions.newBuilder().setOptimizerVersion("100000").build()) + .build())); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Query optimizer version: 100000 is not supported"); + } + + // Setting an optimizer version and statistics package for PDML should also be allowed. + assertThat( + client.executePartitionedUpdate( + Statement.newBuilder("UPDATE TEST SET NAME='updated' WHERE 1=1") + .withQueryOptions( + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build())) + .isEqualTo(2L); + } + + @Test + public void spannerOptions() { + // Version '1' should work. + // Statistics package 'custom-package' should work. + try (Spanner spanner = + env.getTestHelper().getOptions().toBuilder() + .setDefaultQueryOptions( + db.getId(), + QueryOptions.newBuilder() + .setOptimizerVersion("1") + .setOptimizerStatisticsPackage("custom-package") + .build()) + .build() + .getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + while (rs.next()) { + assertThat(rs.getLong(0)).isEqualTo(1L); + } + } + } + // Version 'latest' should also work. + try (Spanner spanner = + env.getTestHelper().getOptions().toBuilder() + .setDefaultQueryOptions( + db.getId(), QueryOptions.newBuilder().setOptimizerVersion("latest").build()) + .build() + .getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + while (rs.next()) { + assertThat(rs.getLong(0)).isEqualTo(1L); + } + } + } + // Version '100000' should not work. + try (Spanner spanner = + env.getTestHelper().getOptions().toBuilder() + .setDefaultQueryOptions( + db.getId(), QueryOptions.newBuilder().setOptimizerVersion("100000").build()) + .build() + .getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1"))) { + while (rs.next()) { + fail("should not get any results"); + } + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Query optimizer version: 100000 is not supported"); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryTest.java new file mode 100644 index 000000000000..eb3f1b00edda --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueryTest.java @@ -0,0 +1,1539 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.base.Strings.isNullOrEmpty; +import static com.google.common.truth.Truth.assertThat; +import static java.util.Arrays.asList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Interval; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.base.Joiner; +import com.google.common.collect.Iterables; +import com.google.spanner.v1.ResultSetStats; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** Integration tests for query execution. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITQueryTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + private String selectValueQuery; + + @BeforeClass + public static void setUpDatabase() { + // Empty database. + Database googleStandardSQLDatabase = env.getTestHelper().createTestDatabase(); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + if (!isUsingEmulator()) { + Database postgreSQLDatabase = + env.getTestHelper().createTestDatabase(Dialect.POSTGRESQL, Collections.emptyList()); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Before + public void initSelectValueQuery() { + selectValueQuery = "SELECT @p1"; + if (dialect.dialect == Dialect.POSTGRESQL) { + selectValueQuery = "SELECT $1"; + } + } + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + // "PG dialect tests are not supported by the emulator" + if (!isUsingEmulator()) { + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + } + return params; + } + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + private DatabaseClient getClient(Dialect dialect) { + if (dialect == Dialect.POSTGRESQL) { + return postgreSQLClient; + } + return googleStandardSQLClient; + } + + @Test + public void simple() { + Struct row = execute(Statement.of("SELECT 1"), Type.int64()); + assertThat(row.getLong(0)).isEqualTo(1); + } + + @Test + public void badQuery() { + SpannerException exception = + assertThrows( + SpannerException.class, + () -> execute(Statement.of("SELECT Apples AND Oranges"), Type.int64())); + assertEquals(ErrorCode.INVALID_ARGUMENT, exception.getErrorCode()); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertTrue( + exception.getMessage(), + exception.getMessage().contains("column \"apples\" does not exist")); + // See https://www.postgresql.org/docs/current/errcodes-appendix.html + // '42703' == undefined_column + assumeFalse( + "Skipping PGErrorCode check on experimental host due to b/473270453", + isExperimentalHost()); + assertEquals("42703", exception.getPostgreSQLErrorCode()); + } else { + assertTrue( + exception.getMessage(), exception.getMessage().contains("Unrecognized name: Apples")); + } + } + + @Test + public void arrayOfStruct() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Type structType = + Type.struct(StructField.of("C1", Type.string()), StructField.of("C2", Type.int64())); + Struct row = + execute( + Statement.of( + "SELECT ARRAY(SELECT AS STRUCT C1, C2 " + + "FROM (SELECT 'a' AS C1, 1 AS C2 UNION ALL SELECT 'b' AS C1, 2 AS C2) " + + "ORDER BY C1 ASC)"), + Type.array(structType)); + assertThat(row.isNull(0)).isFalse(); + List value = row.getStructList(0); + assertThat(value.size()).isEqualTo(2); + assertThat(value.get(0).getType()).isEqualTo(structType); + assertThat(value.get(0).getString(0)).isEqualTo("a"); + assertThat(value.get(0).getLong(1)).isEqualTo(1); + assertThat(value.get(1).getType()).isEqualTo(structType); + assertThat(value.get(1).getString(0)).isEqualTo("b"); + assertThat(value.get(1).getLong(1)).isEqualTo(2); + + // Also confirm that an STRUCT> implements equality correctly with respect to + // a manually constructed Struct. + Struct expectedRow = + Struct.newBuilder() + .set("") + .toStructArray( + Type.struct( + asList( + StructField.of("C1", Type.string()), StructField.of("C2", Type.int64()))), + asList( + Struct.newBuilder().set("C1").to("a").set("C2").to(1).build(), + Struct.newBuilder().set("C1").to("b").set("C2").to(2).build())) + .build(); + assertThat(row).isEqualTo(expectedRow); + } + + @Test + public void arrayOfStructEmpty() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Type structType = + Type.struct(StructField.of("", Type.string()), StructField.of("", Type.int64())); + Struct row = + execute( + Statement.of("SELECT ARRAY(SELECT AS STRUCT * FROM (SELECT 'a', 1) WHERE 0 = 1)"), + Type.array(structType)); + assertThat(row.isNull(0)).isFalse(); + List value = row.getStructList(0); + assertThat(value.size()).isEqualTo(0); + } + + @Ignore // Not yet supported by the backend. + @Test + public void arrayOfStructNull() { + Type structType = + Type.struct(StructField.of("", Type.string()), StructField.of("", Type.int64())); + Struct row = + execute( + Statement.of("SELECT CAST (NULL AS ARRAY>)"), + Type.array(structType)); + assertThat(row.isNull(0)).isTrue(); + } + + @Ignore // Not yet supported by the backend. + @Test + public void arrayOfStructNullElement() { + Type structType = + Type.struct(StructField.of("", Type.string()), StructField.of("", Type.int64())); + Struct row = + execute( + Statement.of( + "SELECT ARRAY(SELECT AS STRUCT 'a', 1" + + " UNION ALL SELECT CAST (NULL AS STRUCT))"), + Type.array(structType)); + assertThat(row.isNull(0)).isFalse(); + List value = row.getStructList(0); + assertThat(value.size()).isEqualTo(2); + assertThat(value.get(0).getType()).isEqualTo(structType); + assertThat(value.get(0).getString(0)).isEqualTo("a"); + assertThat(value.get(0).getLong(1)).isEqualTo(1); + assertThat(value.get(1)).isNull(); + } + + @Test + public void bindBool() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to(true).build(), Type.bool()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBoolean(0)).isEqualTo(true); + } + + @Test + public void bindBoolNull() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((Boolean) null), Type.bool()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindInt64() { + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(1234), Type.int64()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLong(0)).isEqualTo(1234); + } + + @Test + public void bindInt64Null() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((Long) null), Type.int64()); + assertThat(row.isNull(0)).isTrue(); + } + + // TODO: Remove once FLOAT32 is supported in production. + private static boolean isUsingCloudDevel() { + String jobType = System.getenv("JOB_TYPE"); + + // Assumes that the jobType contains the string "cloud-devel" to signal that + // the environment is cloud-devel. + return !isNullOrEmpty(jobType) && jobType.contains("cloud-devel"); + } + + @Test + public void bindFloat32() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to(2.0f), Type.float32()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getFloat(0)).isWithin(0.0f).of(2.0f); + } + + @Test + public void bindFloat32Null() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((Float) null), Type.float32()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindPgOid() { + if (dialect.dialect == Dialect.POSTGRESQL) { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to(Value.pgOid(1234)), + Type.pgOid()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLong(0)).isEqualTo(1234); + } + } + + @Test + public void bindPgOidNull() { + if (dialect.dialect == Dialect.POSTGRESQL) { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to(Value.pgOid(null)), + Type.pgOid()); + assertThat(row.isNull(0)).isTrue(); + } + } + + @Test + public void bindFloat64() { + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(2.0), Type.float64()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDouble(0)).isWithin(0.0).of(2.0); + } + + @Test + public void bindFloat64Null() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to((Double) null), Type.float64()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindString() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to("abc"), Type.string()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getString(0)).isEqualTo("abc"); + } + + @Test + public void bindStringNull() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((String) null), Type.string()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindJson() { + assumeFalse("JSON are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .to(Value.json("{\"rating\":9,\"open\":true}")), + Type.json()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getJson(0)).isEqualTo("{\"open\":true,\"rating\":9}"); + } + + @Test + public void bindJsonEmpty() { + assumeFalse("JSON are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to(Value.json("{}")), Type.json()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getJson(0)).isEqualTo("{}"); + } + + @Test + public void bindJsonNull() { + assumeFalse("JSON is not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to(Value.json(null)), Type.json()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindBytes() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to(ByteArray.copyFrom("xyz")), + Type.bytes()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(ByteArray.copyFrom("xyz")); + } + + @Test + public void bindBytesNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to((ByteArray) null), Type.bytes()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindTimestamp() { + Timestamp t = Timestamp.parseTimestamp("2016-09-18T00:00:00Z"); + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(t), Type.timestamp()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestamp(0)).isEqualTo(t); + } + + @Test + public void bindTimestampNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to((Timestamp) null), + Type.timestamp()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindDate() { + Date d = Date.parseDate("2016-09-18"); + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(d), Type.date()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDate(0)).isEqualTo(d); + } + + @Test + public void bindDateNull() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((Date) null), Type.date()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindUuid() { + UUID uuid = UUID.randomUUID(); + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(uuid), Type.uuid()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getUuid(0)).isEqualTo(uuid); + } + + @Test + public void bindUuidNull() { + Struct row = + execute(Statement.newBuilder(selectValueQuery).bind("p1").to((UUID) null), Type.uuid()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindInterval() { + Interval d = Interval.parseFromString("P1Y2M3DT4H5M6.789123S"); + Struct row = execute(Statement.newBuilder(selectValueQuery).bind("p1").to(d), Type.interval()); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getInterval(0)).isEqualTo(d); + } + + @Test + public void bindIntervalNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").to((Interval) null), Type.interval()); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindNumeric() { + BigDecimal b = new BigDecimal("1.1"); + Statement.Builder statement = Statement.newBuilder(selectValueQuery); + Type expectedType = Type.numeric(); + Object expectedValue = b; + if (dialect.dialect == Dialect.POSTGRESQL) { + expectedType = Type.pgNumeric(); + expectedValue = Value.pgNumeric(b.toString()); + statement.bind("p1").to(Value.pgNumeric(b.toString())); + } else { + statement.bind("p1").to(b); + } + Struct row = execute(statement, expectedType); + assertThat(row.isNull(0)).isFalse(); + Object got; + if (dialect.dialect == Dialect.POSTGRESQL) { + got = row.getValue(0); + } else { + got = row.getBigDecimal(0); + } + assertThat(got).isEqualTo(expectedValue); + } + + @Test + public void bindNumericNull() { + Statement.Builder statement = Statement.newBuilder(selectValueQuery); + Type expectedType = Type.numeric(); + if (dialect.dialect == Dialect.POSTGRESQL) { + expectedType = Type.pgNumeric(); + statement.bind("p1").to(Value.pgNumeric(null)); + } else { + statement.bind("p1").to((BigDecimal) null); + } + Struct row = execute(statement, expectedType); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindNumeric_doesNotPreservePrecision() { + BigDecimal b = new BigDecimal("1.10"); + Statement.Builder statement = Statement.newBuilder(selectValueQuery); + Type expectedType = Type.numeric(); + // Cloud Spanner does not store precision, and will therefore return 1.10 as 1.1. + Object expectedValue = b.stripTrailingZeros(); + if (dialect.dialect == Dialect.POSTGRESQL) { + expectedType = Type.pgNumeric(); + // Cloud Spanner with PG dialect store precision, and will therefore return 1.10. + expectedValue = Value.pgNumeric(b.toString()); + statement.bind("p1").to(Value.pgNumeric(b.toString())); + } else { + statement.bind("p1").to(b); + } + Struct row = execute(statement, expectedType); + assertThat(row.isNull(0)).isFalse(); + Object got; + if (dialect.dialect == Dialect.POSTGRESQL) { + got = row.getValue(0); + } else { + got = row.getBigDecimal(0); + } + assertThat(got).isNotEqualTo(b); + assertThat(got).isEqualTo(expectedValue); + } + + @Test + public void bindBoolArray() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toBoolArray(asList(true, null, false)), + Type.array(Type.bool())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBooleanList(0)).containsExactly(true, null, false).inOrder(); + } + + @Test + public void bindBoolArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toBoolArray(Collections.emptyList()), + Type.array(Type.bool())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBooleanList(0)).containsExactly(); + } + + @Test + public void bindBoolArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toBoolArray((boolean[]) null), + Type.array(Type.bool())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindInt64Array() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toInt64Array(asList(null, 1L, 2L)), + Type.array(Type.int64())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLongList(0)).containsExactly(null, 1L, 2L).inOrder(); + } + + @Test + public void bindInt64ArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toInt64Array(Collections.emptyList()), + Type.array(Type.int64())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLongList(0)).containsExactly(); + } + + @Test + public void bindInt64ArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toInt64Array((long[]) null), + Type.array(Type.int64())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindFloat32Array() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toFloat32Array( + asList( + null, + 1.0f, + 2.0f, + Float.NEGATIVE_INFINITY, + Float.POSITIVE_INFINITY, + Float.NaN)), + Type.array(Type.float32())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getFloatList(0)) + .containsExactly( + null, 1.0f, 2.0f, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, Float.NaN) + .inOrder(); + } + + @Test + public void bindFloat32ArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toFloat32Array(Collections.emptyList()), + Type.array(Type.float32())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getFloatList(0)).containsExactly(); + } + + @Test + public void bindFloat32ArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toFloat32Array((float[]) null), + Type.array(Type.float32())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindFloat64Array() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toFloat64Array( + asList( + null, + 1.0, + 2.0, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NaN)), + Type.array(Type.float64())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDoubleList(0)) + .containsExactly( + null, 1.0, 2.0, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN) + .inOrder(); + } + + @Test + public void bindFloat64ArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toFloat64Array(Collections.emptyList()), + Type.array(Type.float64())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDoubleList(0)).containsExactly(); + } + + @Test + public void bindFloat64ArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toFloat64Array((double[]) null), + Type.array(Type.float64())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindStringArray() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toStringArray(asList("a", "b", null)), + Type.array(Type.string())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly("a", "b", null).inOrder(); + } + + @Test + public void bindStringArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toStringArray(Collections.emptyList()), + Type.array(Type.string())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly(); + } + + @Test + public void bindStringArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toStringArray(null), + Type.array(Type.string())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindJsonArray() { + assumeFalse( + "array JSON binding is not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toJsonArray(asList("{}", "[]", "{\"rating\":9,\"open\":true}", null)), + Type.array(Type.json())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getJsonList(0)) + .containsExactly("{}", "[]", "{\"open\":true,\"rating\":9}", null) + .inOrder(); + } + + @Test + public void bindJsonArrayEmpty() { + assumeFalse("JSON is not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toJsonArray(Collections.emptyList()), + Type.array(Type.json())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getJsonList(0)).isEqualTo(Collections.emptyList()); + } + + @Test + public void bindJsonArrayNull() { + assumeFalse("JSON is not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toJsonArray(null), + Type.array(Type.json())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindBytesArray() { + ByteArray e1 = ByteArray.copyFrom("x"); + ByteArray e2 = ByteArray.copyFrom("y"); + ByteArray e3 = null; + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toBytesArray(asList(e1, e2, e3)), + Type.array(Type.bytes())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytesList(0)).containsExactly(e1, e2, e3).inOrder(); + } + + @Test + public void bindBytesArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toBytesArray(Collections.emptyList()), + Type.array(Type.bytes())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytesList(0)).isEmpty(); + } + + @Test + public void bindBytesArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toBytesArray(null), + Type.array(Type.bytes())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindTimestampArray() { + Timestamp t1 = Timestamp.parseTimestamp("2016-09-18T00:00:00Z"); + Timestamp t2 = Timestamp.parseTimestamp("2016-09-19T00:00:00Z"); + + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toTimestampArray(asList(t1, t2, null)), + Type.array(Type.timestamp())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestampList(0)).containsExactly(t1, t2, null).inOrder(); + } + + @Test + public void bindTimestampArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toTimestampArray(Collections.emptyList()), + Type.array(Type.timestamp())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestampList(0)).containsExactly(); + } + + @Test + public void bindTimestampArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toTimestampArray(null), + Type.array(Type.timestamp())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindDateArray() { + Date d1 = Date.parseDate("2016-09-18"); + Date d2 = Date.parseDate("2016-09-19"); + + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toDateArray(asList(d1, d2, null)), + Type.array(Type.date())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDateList(0)).containsExactly(d1, d2, null).inOrder(); + } + + @Test + public void bindDateArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toDateArray(Collections.emptyList()), + Type.array(Type.date())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDateList(0)).containsExactly(); + } + + @Test + public void bindDateArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toDateArray(null), + Type.array(Type.date())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindUuidArray() { + UUID u1 = UUID.randomUUID(); + UUID u2 = UUID.randomUUID(); + + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toUuidArray(asList(u1, u2, null)), + Type.array(Type.uuid())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getUuidList(0)).containsExactly(u1, u2, null).inOrder(); + } + + @Test + public void bindUuidArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toUuidArray(Collections.emptyList()), + Type.array(Type.uuid())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getUuidList(0)).containsExactly(); + } + + @Test + public void bindUuidArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toUuidArray(null), + Type.array(Type.uuid())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindIntervalArray() { + Interval d1 = Interval.parseFromString("P-1Y-2M-3DT4H5M6.789123S"); + Interval d2 = Interval.parseFromString("P1Y2M3DT-4H-5M-6.789123S"); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toIntervalArray(asList(d1, d2, null)), + Type.array(Type.interval())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getIntervalList(0)).containsExactly(d1, d2, null).inOrder(); + } + + @Test + public void bindIntervalArrayEmpty() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toIntervalArray(Collections.emptyList()), + Type.array(Type.interval())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getIntervalList(0)).containsExactly(); + } + + @Test + public void bindIntervalArrayNull() { + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toIntervalArray(null), + Type.array(Type.interval())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindNumericArrayGoogleStandardSQL() { + assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + BigDecimal b1 = new BigDecimal("3.14"); + BigDecimal b2 = new BigDecimal("6.626"); + + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toNumericArray(asList(b1, b2, null)), + Type.array(Type.numeric())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBigDecimalList(0)).containsExactly(b1, b2, null).inOrder(); + } + + @Test + public void bindNumericArrayPostgreSQL() { + assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toPgNumericArray(asList("3.14", "6.626", null)), + Type.array(Type.pgNumeric())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly("3.14", "6.626", null).inOrder(); + } + + @Test + public void bindNumericArrayEmptyGoogleStandardSQL() { + assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toNumericArray(Collections.emptyList()), + Type.array(Type.numeric())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBigDecimalList(0)).containsExactly(); + } + + @Test + public void bindNumericArrayEmptyPostgreSQL() { + assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery) + .bind("p1") + .toPgNumericArray(Collections.emptyList()), + Type.array(Type.pgNumeric())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly(); + } + + @Test + public void bindNumericArrayNullGoogleStandardSQL() { + assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toNumericArray(null), + Type.array(Type.numeric())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindNumericArrayNullPostgreSQL() { + assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toPgNumericArray(null), + Type.array(Type.pgNumeric())); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void bindNumericArray_doesNotPreservePrecision() { + assumeFalse( + "array numeric binding is not supported on POSTGRESQL", + dialect.dialect == Dialect.POSTGRESQL); + assumeFalse("Emulator does not yet support NUMERIC", EmulatorSpannerHelper.isUsingEmulator()); + BigDecimal b1 = new BigDecimal("3.14"); + BigDecimal b2 = new BigDecimal("6.626070"); + + Struct row = + execute( + Statement.newBuilder(selectValueQuery).bind("p1").toNumericArray(asList(b1, b2, null)), + Type.array(Type.numeric())); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBigDecimalList(0)) + .containsExactly(b1.stripTrailingZeros(), b2.stripTrailingZeros(), null) + .inOrder(); + } + + @Test + public void unsupportedSelectStructValue() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + assumeFalse("The emulator accepts this query", isUsingEmulator()); + Struct p = structValue(); + try { + execute(Statement.newBuilder(selectValueQuery).bind("p1").to(p).build(), p.getType()); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.UNIMPLEMENTED); + assertThat(ex.getMessage()) + .contains( + "Unsupported query shape: A struct value cannot be returned as a column value."); + } + } + + @Test + public void unsupportedSelectArrayStructValue() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + assumeFalse( + "Emulator evaluates this expression differently than Cloud Spanner", isUsingEmulator()); + + Struct p = structValue(); + try { + execute( + Statement.newBuilder("SELECT @p") + .bind("p") + .toStructArray(p.getType(), Collections.singletonList(p)) + .build(), + p.getType()); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.UNIMPLEMENTED); + assertThat(ex.getMessage()) + .contains( + "Unsupported query shape: " + + "This query can return a null-valued array of struct, " + + "which is not supported by Spanner."); + } + } + + @Test + public void invalidAmbiguousFieldAccess() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = Struct.newBuilder().set("f1").to(20).set("f1").to("abc").build(); + try { + execute(Statement.newBuilder("SELECT @p.f1").bind("p").to(p).build(), Type.int64()); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(ex.getMessage()).contains("Struct field name f1 is ambiguous"); + } + } + + private Struct structValue() { + // TODO: Add test for interval once interval is supported in emulator. + return Struct.newBuilder() + .set("f_int") + .to(10) + .set("f_bool") + .to(false) + .set("f_double") + .to(3.4) + .set("f_timestamp") + .to(Timestamp.ofTimeMicroseconds(20)) + .set("f_date") + .to(Date.fromYearMonthDay(1, 3, 1)) + .set("f_string") + .to("hello") + .set("f_bytes") + .to(ByteArray.copyFrom("bytes")) + .build(); + } + + @Test + public void bindStruct() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + // TODO: Add test for interval once interval is supported in emulator. + Struct p = structValue(); + String query = + "SELECT " + + "@p.f_int," + + "@p.f_bool," + + "@p.f_double," + + "@p.f_timestamp," + + "@p.f_date," + + "@p.f_string," + + "@p.f_bytes"; + + Struct row = + executeWithRowResultType(Statement.newBuilder(query).bind("p").to(p).build(), p.getType()); + assertThat(row).isEqualTo(p); + } + + @Test + public void bindArrayOfStruct() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct arrayElement = structValue(); + List p = asList(arrayElement, null); + + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p)") + .bind("p") + .toStructArray(arrayElement.getType(), p) + .build(), + arrayElement.getType()); + + assertThat(rows).hasSize(p.size()); + assertThat(rows.get(0)).isEqualTo(p.get(0)); + + // Field accesses on a null struct element (because of SELECT *) return null values. + Struct structElementFromNull = rows.get(1); + // assertThat(structElementFromNull.isNull()).isFalse(); + for (int i = 0; i < arrayElement.getType().getStructFields().size(); ++i) { + assertThat(structElementFromNull.isNull(i)).isTrue(); + } + } + + @Test + public void bindStructNull() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder("SELECT @p IS NULL") + .bind("p") + .to( + Type.struct( + asList( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.float64()))), + null) + .build(), + Type.bool()); + assertThat(row.getBoolean(0)).isTrue(); + } + + @Test + public void bindArrayOfStructNull() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Type elementType = + Type.struct( + asList( + Type.StructField.of("f1", Type.string()), + Type.StructField.of("f2", Type.float64()))); + + Struct row = + execute( + Statement.newBuilder("SELECT @p IS NULL") + .bind("p") + .toStructArray(elementType, null) + .build(), + Type.bool()); + assertThat(row.getBoolean(0)).isTrue(); + } + + @Test + public void bindEmptyStruct() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = Struct.newBuilder().build(); + Struct row = + execute(Statement.newBuilder("SELECT @p IS NULL").bind("p").to(p).build(), Type.bool()); + assertThat(row.getBoolean(0)).isFalse(); + } + + @Test + public void bindStructWithUnnamedFields() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = Struct.newBuilder().add(Value.int64(1337)).add(Value.int64(7331)).build(); + Struct row = + executeWithRowResultType( + Statement.newBuilder("SELECT * FROM UNNEST([@p])").bind("p").to(p).build(), + p.getType()); + assertThat(row.getLong(0)).isEqualTo(1337); + assertThat(row.getLong(1)).isEqualTo(7331); + } + + @Test + public void bindStructWithDuplicateFieldNames() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = + Struct.newBuilder() + .set("f1") + .to(Value.int64(1337)) + .set("f1") + .to(Value.string("1337")) + .build(); + Struct row = + executeWithRowResultType( + Statement.newBuilder("SELECT * FROM UNNEST([@p])").bind("p").to(p).build(), + p.getType()); + assertThat(row.getLong(0)).isEqualTo(1337); + assertThat(row.getString(1)).isEqualTo("1337"); + } + + @Test + public void bindEmptyArrayOfStruct() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Type elementType = Type.struct(Collections.singletonList(StructField.of("f1", Type.date()))); + List p = Collections.emptyList(); + assertThat(p).isEmpty(); + + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p)") + .bind("p") + .toStructArray(elementType, p) + .build(), + elementType); + assertThat(rows).isEmpty(); + } + + @Test + public void bindStructWithNullStructField() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Type emptyStructType = Type.struct(new ArrayList<>()); + Struct p = Struct.newBuilder().set("f1").to(emptyStructType, null).build(); + + Struct row = + execute(Statement.newBuilder("SELECT @p.f1 IS NULL").bind("p").to(p).build(), Type.bool()); + assertThat(row.getBoolean(0)).isTrue(); + } + + @Test + public void bindStructWithBoolArrayFieldThatContainsNulls() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = + Struct.newBuilder() + .set("boolArray") + .to(Value.boolArray(Arrays.asList(true, false, null))) + .build(); + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p.boolArray) ORDER BY 1") + .bind("p") + .to(p) + .build(), + Type.struct(StructField.of("", Type.bool()))); + assertTrue(rows.get(0).isNull(0)); + assertFalse(rows.get(1).getBoolean(0)); + assertTrue(rows.get(2).getBoolean(0)); + } + + @Test + public void bindStructWithInt64ArrayFieldThatContainsNulls() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = + Struct.newBuilder() + .set("int64Array") + .to(Value.int64Array(Arrays.asList(1L, 100L, null))) + .build(); + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p.int64Array) ORDER BY 1") + .bind("p") + .to(p) + .build(), + Type.struct(StructField.of("", Type.int64()))); + assertTrue(rows.get(0).isNull(0)); + assertEquals(1L, rows.get(1).getLong(0)); + assertEquals(100L, rows.get(2).getLong(0)); + } + + @Test + public void bindStructWithFloat64ArrayFieldThatContainsNulls() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct p = + Struct.newBuilder() + .set("float64Array") + .to(Value.float64Array(Arrays.asList(1d, 3.14d, null))) + .build(); + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p.float64Array) ORDER BY 1") + .bind("p") + .to(p) + .build(), + Type.struct(StructField.of("", Type.float64()))); + assertTrue(rows.get(0).isNull(0)); + assertEquals(1d, rows.get(1).getDouble(0), 0d); + assertEquals(3.14d, rows.get(2).getDouble(0), 0d); + } + + @Test + public void bindStructWithStructField() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct nestedStruct = Struct.newBuilder().set("ff1").to("abc").build(); + Struct p = Struct.newBuilder().set("f1").to(nestedStruct).build(); + + Struct row = + executeWithRowResultType( + Statement.newBuilder("SELECT @p.f1.ff1").bind("p").to(p).build(), + nestedStruct.getType()); + assertThat(row.getString(0)).isEqualTo("abc"); + } + + @Test + public void bindStructWithArrayOfStructField() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct arrayElement1 = Struct.newBuilder().set("ff1").to("abc").build(); + Struct arrayElement2 = Struct.newBuilder().set("ff1").to("def").build(); + Struct p = + Struct.newBuilder() + .set("f1") + .toStructArray(arrayElement1.getType(), asList(arrayElement1, arrayElement2)) + .build(); + + List rows = + resultRows( + Statement.newBuilder("SELECT * FROM UNNEST(@p.f1)").bind("p").to(p).build(), + arrayElement1.getType()); + assertThat(rows.get(0).getString(0)).isEqualTo("abc"); + assertThat(rows.get(1).getString(0)).isEqualTo("def"); + } + + @Test + public void unboundParameter() { + String query = "SELECT @v"; + if (dialect.dialect == Dialect.POSTGRESQL) { + query = "SELECT $1"; + } + ResultSet resultSet = + Statement.of(query) + .executeQuery(getClient(dialect.dialect).singleUse(TimestampBound.strong())); + try { + resultSet.next(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void positiveInfinity() { + assumeFalse( + "function ieee_divide not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = execute(Statement.newBuilder("SELECT IEEE_DIVIDE(1, 0)"), Type.float64()); + assertThat(row.getDouble(0)).isPositiveInfinity(); + } + + @Test + public void negativeInfinity() { + assumeFalse( + "function ieee_divide not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = execute(Statement.newBuilder("SELECT IEEE_DIVIDE(-1, 0)"), Type.float64()); + assertThat(row.getDouble(0)).isNegativeInfinity(); + } + + @Test + public void notANumber() { + assumeFalse( + "function ieee_divide not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = execute(Statement.newBuilder("SELECT IEEE_DIVIDE(0, 0)"), Type.float64()); + assertThat(row.getDouble(0)).isNaN(); + } + + @Test + public void nonNumberArray() { + assumeFalse( + "function ieee_divide not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + Struct row = + execute( + Statement.newBuilder( + "SELECT [IEEE_DIVIDE(1, 0), IEEE_DIVIDE(-1, 0), IEEE_DIVIDE(0, 0)]"), + Type.array(Type.float64())); + assertThat(row.getDoubleList(0)).hasSize(3); + assertThat(row.getDoubleList(0).get(0)).isPositiveInfinity(); + assertThat(row.getDoubleList(0).get(1)).isNegativeInfinity(); + assertThat(row.getDoubleList(0).get(2)).isNaN(); + } + + @Test + public void largeErrorText() { + assumeFalse( + "regexp_contains is not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + String veryLongString = Joiner.on("").join(Iterables.limit(Iterables.cycle("x"), 8000)); + Statement statement = + Statement.newBuilder("SELECT REGEXP_CONTAINS(@value, @regexp)") + .bind("value") + .to("") + .bind("regexp") + .to("(" + veryLongString) + .build(); + ResultSet resultSet = + statement.executeQuery(getClient(dialect.dialect).singleUse(TimestampBound.strong())); + try { + resultSet.next(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + assertThat(ex.getMessage()).contains("Cannot parse regular expression"); + } + } + + @Test + public void queryRealTable() { + Database populatedDb; + if (dialect.dialect == Dialect.POSTGRESQL) { + populatedDb = + env.getTestHelper() + .createTestDatabase( + dialect.dialect, + Arrays.asList("CREATE TABLE T ( K VARCHAR PRIMARY KEY, V VARCHAR )")); + } else { + populatedDb = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T ( K STRING(MAX) NOT NULL, V STRING(MAX) ) PRIMARY KEY (K)"); + } + DatabaseClient client = env.getTestHelper().getDatabaseClient(populatedDb); + client.writeAtLeastOnce( + asList( + Mutation.newInsertBuilder("T").set("K").to("k1").set("V").to("v1").build(), + Mutation.newInsertBuilder("T").set("K").to("k2").set("V").to("v2").build(), + Mutation.newInsertBuilder("T").set("K").to("k3").set("V").to("v3").build(), + Mutation.newInsertBuilder("T").set("K").to("k4").set("V").to("v4").build())); + + String query = "SELECT k, v FROM T WHERE k >= @p1 AND k < @p2 ORDER BY K ASC"; + if (dialect.dialect == Dialect.POSTGRESQL) { + query = "SELECT k, v FROM T WHERE k >= $1 AND k < $2 ORDER BY K ASC"; + } + Statement statement = + Statement.newBuilder(query).bind("p1").to("k13").bind("p2").to("k32").build(); + ResultSet resultSet = statement.executeQuery(client.singleUse(TimestampBound.strong())); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getType()) + .isEqualTo( + Type.struct(StructField.of("k", Type.string()), StructField.of("v", Type.string()))); + assertThat(resultSet.getString(0)).isEqualTo("k2"); + assertThat(resultSet.getString(1)).isEqualTo("v2"); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getString("k")).isEqualTo("k3"); + assertThat(resultSet.getString("v")).isEqualTo("v3"); + assertThat(resultSet.next()).isFalse(); + } + + @Test + public void analyzePlan() { + assumeFalse("Emulator does not support Analyze Plan", isUsingEmulator()); + + Statement statement = Statement.of("SELECT 1 AS data UNION ALL SELECT 2"); + ResultSet resultSet = + statement.analyzeQuery( + getClient(dialect.dialect).singleUse(TimestampBound.strong()), QueryAnalyzeMode.PLAN); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getType()).isEqualTo(Type.struct(StructField.of("data", Type.int64()))); + ResultSetStats receivedStats = resultSet.getStats(); + assertThat(receivedStats).isNotNull(); + assertThat(receivedStats.hasQueryPlan()).isTrue(); + assertThat(receivedStats.hasQueryStats()).isFalse(); + } + + @Test + public void analyzeProfile() { + assumeFalse("Emulator does not support Analyze Profile", isUsingEmulator()); + + String query = "SELECT 1 AS data UNION ALL SELECT 2 AS data ORDER BY data"; + if (dialect.dialect == Dialect.POSTGRESQL) { + // "Statements with set operations and ORDER BY are not supported" + query = "SELECT 1 AS data UNION ALL SELECT 2 AS data"; + } + Statement statement = Statement.of(query); + ResultSet resultSet = + statement.analyzeQuery( + getClient(dialect.dialect).singleUse(TimestampBound.strong()), + QueryAnalyzeMode.PROFILE); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getType()).isEqualTo(Type.struct(StructField.of("data", Type.int64()))); + assertThat(resultSet.getLong(0)).isEqualTo(1); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getLong(0)).isEqualTo(2); + assertThat(resultSet.next()).isFalse(); + ResultSetStats receivedStats = resultSet.getStats(); + assertThat(receivedStats).isNotNull(); + assertThat(receivedStats.hasQueryPlan()).isTrue(); + assertThat(receivedStats.hasQueryStats()).isTrue(); + } + + @Test + public void testSelectArrayOfStructs() { + assumeFalse("structs are not supported on POSTGRESQL", dialect.dialect == Dialect.POSTGRESQL); + try (ResultSet resultSet = + getClient(dialect.dialect) + .singleUse() + .executeQuery( + Statement.of( + "WITH points AS\n" + + " (SELECT [1, 5] as point\n" + + " UNION ALL SELECT [2, 8] as point\n" + + " UNION ALL SELECT [3, 7] as point\n" + + " UNION ALL SELECT [4, 1] as point\n" + + " UNION ALL SELECT [5, 7] as point)\n" + + "SELECT ARRAY(\n" + + " SELECT STRUCT(point)\n" + + " FROM points)\n" + + " AS coordinates"))) { + assertTrue(resultSet.next()); + assertEquals(resultSet.getColumnCount(), 1); + assertThat(resultSet.getStructList(0)) + .containsExactly( + Struct.newBuilder().set("point").to(Value.int64Array(new long[] {1L, 5L})).build(), + Struct.newBuilder().set("point").to(Value.int64Array(new long[] {2L, 8L})).build(), + Struct.newBuilder().set("point").to(Value.int64Array(new long[] {3L, 7L})).build(), + Struct.newBuilder().set("point").to(Value.int64Array(new long[] {4L, 1L})).build(), + Struct.newBuilder().set("point").to(Value.int64Array(new long[] {5L, 7L})).build()); + assertFalse(resultSet.next()); + } + } + + private List resultRows(Statement statement, Type expectedRowType) { + ArrayList results = new ArrayList<>(); + ResultSet resultSet = + statement.executeQuery(getClient(dialect.dialect).singleUse(TimestampBound.strong())); + while (resultSet.next()) { + Struct row = resultSet.getCurrentRowAsStruct(); + results.add(row); + } + assertThat(resultSet.getType()).isEqualTo(expectedRowType); + assertThat(resultSet.next()).isFalse(); + return results; + } + + private Struct executeWithRowResultType(Statement statement, Type expectedRowType) { + ResultSet resultSet = + statement.executeQuery(getClient(dialect.dialect).singleUse(TimestampBound.strong())); + assertThat(resultSet.next()).isTrue(); + assertThat(resultSet.getType()).isEqualTo(expectedRowType); + Struct row = resultSet.getCurrentRowAsStruct(); + assertThat(resultSet.next()).isFalse(); + return row; + } + + private Struct execute(Statement statement, Type expectedColumnType) { + Type rowType = Type.struct(StructField.of("", expectedColumnType)); + if (dialect.dialect == Dialect.POSTGRESQL) { + rowType = Type.struct(StructField.of("?column?", expectedColumnType)); + } + return executeWithRowResultType(statement, rowType); + } + + private Struct execute(Statement.Builder builder, Type expectedColumnType) { + return execute(builder.build(), expectedColumnType); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueueTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueueTest.java new file mode 100644 index 000000000000..eba3fb2865b9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITQueueTest.java @@ -0,0 +1,163 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.*; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.*; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** Integration test for Cloud Spanner Queue. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITQueueTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + return params; + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + private static DatabaseClient googleStandardSQLClient; + + private static final String[] GOOGLE_STANDARD_SQL_SCHEMA = + new String[] { + "CREATE Queue Q1 (" + + " Id INT64 NOT NULL," + + " Payload BYTES(MAX) NOT NULL," + + ") PRIMARY KEY (Id), " + + "OPTIONS (receive_mode = 'PULL')", + "CREATE TABLE T1 (" + + " K1 INT64 NOT NULL," + + " K INT64 NOT NULL," + + ") PRIMARY KEY (K1)" + }; + + private static DatabaseClient client; + + private Struct readRow(String queue, Key key, String... columns) { + return client.singleUse(TimestampBound.strong()).readRow(queue, key, Arrays.asList(columns)); + } + + @BeforeClass + public static void setUpTestSuite() { + // TODO: remove once the feature is fully enabled in prod + assumeTrue("Queue tests are temporarily disabled", false); + Database googleStandardSQLDatabase = + env.getTestHelper().createTestDatabase(GOOGLE_STANDARD_SQL_SCHEMA); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + System.out.println("Database created"); + } + + @Before + public void setUp() { + // TODO: add postgres schema & client after feature is enabled + client = googleStandardSQLClient; + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Test + public void testSendAndAckMutation() { + client.write( + Arrays.asList( + Mutation.newSendBuilder("Q1") + .setKey(Key.of(1)) + .setPayload(Value.bytes(ByteArray.copyFrom("payload1"))) + .build(), + Mutation.newSendBuilder("Q1") + .setKey(Key.of(2)) + .setPayload(Value.bytes(ByteArray.copyFrom("payload2"))) + .build(), + Mutation.newSendBuilder("Q1") + .setKey(Key.of(3)) + .setPayload(Value.bytes(ByteArray.copyFrom("payload3"))) + .setDeliveryTime(Instant.now()) + .build())); + + // Verifying messages are in the queue. + Struct row = readRow("Q1", Key.of(1), "Payload"); + assertThat(row == null).isFalse(); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(ByteArray.copyFrom("payload1")); + + row = readRow("Q1", Key.of(2), "Payload"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(ByteArray.copyFrom("payload2")); + + row = readRow("Q1", Key.of(3), "Payload"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(ByteArray.copyFrom("payload3")); + + // Ack-ing the first two messages. + client.write( + Arrays.asList( + Mutation.newAckBuilder("Q1").setKey(Key.of(1)).build(), + Mutation.newAckBuilder("Q1").setKey(Key.of(2)).build())); + + // Verifying the first 2 messages are acked and remvoed from the queue + row = readRow("Q1", Key.of(1), "Payload"); + assertThat(row == null).isTrue(); + row = readRow("Q1", Key.of(2), "Payload"); + assertThat(row == null).isTrue(); + row = readRow("Q1", Key.of(3), "Payload"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(ByteArray.copyFrom("payload3")); + } + + @Test + public void testAckNotFound() { + // Enable IgnoreNotFound. + client.write( + Collections.singletonList( + Mutation.newAckBuilder("Q1").setKey(Key.of(1)).setIgnoreNotFound(true).build())); + Struct row = readRow("Q1", Key.of(1), "Payload"); + assertThat(row == null).isTrue(); + + // Disable IgnoreNotFound. + SpannerException thrown = + assertThrows( + SpannerException.class, + () -> + client.write( + Collections.singletonList( + Mutation.newAckBuilder("Q1") + .setKey(Key.of(1)) + .setIgnoreNotFound(false) + .build()))); + assertThat(thrown).hasMessageThat().contains("NOT_FOUND: Message not found"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadOnlyTxnTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadOnlyTxnTest.java new file mode 100644 index 000000000000..c93e53f212b8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadOnlyTxnTest.java @@ -0,0 +1,357 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.common.collect.ImmutableList; +import java.util.Collections; +import java.util.List; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for read-only transactions. For various reasons, it's not possible to test all + * concurrency modes with high confidence using a black-box test. For example, it's legal for + * min-read-timestamp to return the same data that exact-timestamp would, so these cases cannot be + * distinguished. Hence, these integration tests only minimally verify that read-only transactions + * work at all, and unit tests are relied on for validating that modes are encoded correctly. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITReadOnlyTxnTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String TABLE_NAME = "TestTable"; + private static DatabaseClient sharedClient; + private static List sharedHistory; + private List history; + private DatabaseClient client; + + private static class History { + private final Timestamp timestamp; + private final String value; + private final long minCommitNanoTime; + + private History(Timestamp timestamp, String value, long minCommitNanoTime) { + this.timestamp = timestamp; + this.value = value; + this.minCommitNanoTime = minCommitNanoTime; + } + } + + @BeforeClass + public static void setUpSharedDatabase() { + ImmutableList.Builder historyBuilder = ImmutableList.builder(); + sharedClient = newTestDatabase(historyBuilder); + sharedHistory = historyBuilder.build(); + } + + private static DatabaseClient newTestDatabase(ImmutableList.Builder historyBuilder) { + Database newDb = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TestTable ( StringValue STRING(MAX) ) PRIMARY KEY ()"); + DatabaseClient newClient = env.getTestHelper().getDatabaseClient(newDb); + for (int i = 0; i < 5; ++i) { + writeNewValue(newClient, i, historyBuilder); + } + return newClient; + } + + private static void writeNewValue( + DatabaseClient client, int i, @Nullable ImmutableList.Builder historyBuilder) { + String value = "v" + i; + Mutation m = Mutation.newInsertOrUpdateBuilder(TABLE_NAME).set("StringValue").to(value).build(); + long minCommitNanoTime = System.nanoTime(); + Timestamp timestamp = client.writeAtLeastOnce(Collections.singletonList(m)); + if (historyBuilder != null) { + historyBuilder.add(new History(timestamp, value, minCommitNanoTime)); + } + } + + @Before + public void setUp() { + history = sharedHistory; + client = sharedClient; + } + + private static Struct readRow(ReadContext ctx) { + return ctx.readRow(TABLE_NAME, Key.of(), Collections.singletonList("StringValue")); + } + + private static Struct queryRow(ReadContext ctx) { + ResultSet resultSet = Statement.of("SELECT StringValue FROM TestTable").executeQuery(ctx); + // TODO(user): Consider a library routine to consume a single row from a query. + assertThat(resultSet.next()).isTrue(); + Struct row = resultSet.getCurrentRowAsStruct(); + assertThat(resultSet.next()).isFalse(); + return row; + } + + @Test + public void singleStrong() { + History expected = history.get(history.size() - 1); + + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(); + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isAtLeast(expected.timestamp); + + row = readRow(client.singleUse()); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + } + + @Test + public void singleReadTimestamp() { + History expected = history.get(2); + + TimestampBound bound = TimestampBound.ofReadTimestamp(expected.timestamp); + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(bound); + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isEqualTo(expected.timestamp); + + row = readRow(client.singleUse(bound)); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + } + + @Test + public void query() { + // We don't exhaustively test query with all modes - the read tests give us enough confidence + // that transaction options are generated appropriately. Just do one test for each type of + // context to ensure that transaction options are set at all. + History expected = history.get(2); + + TimestampBound bound = TimestampBound.ofReadTimestamp(expected.timestamp); + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(bound); + Struct row = queryRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isEqualTo(expected.timestamp); + + readContext = client.readOnlyTransaction(bound); + row = queryRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isEqualTo(expected.timestamp); + readContext.close(); + + row = queryRow(client.singleUse(bound)); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + } + + @Test + public void singleMinReadTimestamp() { + int minimumIndex = 2; + History minimum = history.get(minimumIndex); + NavigableMap possibleValues = new TreeMap<>(); + for (History item : history.subList(minimumIndex, history.size())) { + possibleValues.put(item.timestamp, item.value); + } + + TimestampBound bound = TimestampBound.ofMinReadTimestamp(minimum.timestamp); + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(bound); + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(readContext.getReadTimestamp()).isAtLeast(minimum.timestamp); + assertThat(row.getString(0)) + .isEqualTo(possibleValues.floorEntry(readContext.getReadTimestamp()).getValue()); + + row = readRow(client.singleUse(bound)); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isIn(possibleValues.values()); + } + + @Test + public void singleExactStaleness() { + // TODO(user): Use a shorter deadline (when supported) and pass on the call to Cloud Spanner. + long deadlineNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(1); + + // The only exact staleness values that can be tested reliably are before the first item or + // later than the last item: we choose the former. + // + // Pick a staleness that is "guaranteed" not to observe the first write. Note that this + // guarantee doesn't strictly hold in the absence of enforced read deadlines, but we use a + // deadline large enough to make it practically true. + long stalenessNanos = 1 + deadlineNanoTime - history.get(0).minCommitNanoTime; + TimestampBound bound = TimestampBound.ofExactStaleness(stalenessNanos, TimeUnit.NANOSECONDS); + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(bound); + Struct row = readRow(readContext); + assertThat(row).isNull(); + assertThat(readContext.getReadTimestamp().toSqlTimestamp()) + .isLessThan(history.get(0).timestamp.toSqlTimestamp()); + + row = readRow(client.singleUse(bound)); + assertThat(row).isNull(); + } + + @Test + public void singleMaxStaleness() { + History minimum = history.get(2); + NavigableMap possibleValues = new TreeMap<>(); + for (History item : history.subList(2, history.size())) { + possibleValues.put(item.timestamp, item.value); + } + + // Pick a staleness that cannot precede the second write (which, in practice, is the staleness + // that exceeds the minimum commit time for the subsequent write). + long stalenessNanos = System.nanoTime() - history.get(3).minCommitNanoTime; + TimestampBound bound = TimestampBound.ofMaxStaleness(stalenessNanos, TimeUnit.NANOSECONDS); + ReadOnlyTransaction readContext = client.singleUseReadOnlyTransaction(bound); + + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(readContext.getReadTimestamp()).isAtLeast(minimum.timestamp); + assertThat(row.getString(0)) + .isEqualTo(possibleValues.floorEntry(readContext.getReadTimestamp()).getValue()); + + row = readRow(client.singleUse(bound)); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isIn(possibleValues.values()); + } + + private void setUpPrivateDatabase() { + ImmutableList.Builder historyBuilder = ImmutableList.builder(); + client = newTestDatabase(historyBuilder); + history = historyBuilder.build(); + } + + private void insertAndReadAgain( + ReadOnlyTransaction readContext, + Timestamp expectedTimestamp, + @Nullable String expectedValue) { + writeNewValue(client, history.size(), null); + + Struct row = readRow(readContext); + if (expectedValue == null) { + assertThat(row).isNull(); + } else { + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expectedValue); + } + assertThat(readContext.getReadTimestamp()).isEqualTo(expectedTimestamp); + } + + @Test + public void multiStrong() { + setUpPrivateDatabase(); + + History expected = history.get(history.size() - 1); + + try (ReadOnlyTransaction readContext = client.readOnlyTransaction()) { + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isAtLeast(expected.timestamp); + insertAndReadAgain(readContext, readContext.getReadTimestamp(), expected.value); + } + } + + @Test + public void multiReadTimestamp() { + setUpPrivateDatabase(); + + History expected = history.get(2); + + try (ReadOnlyTransaction readContext = + client.readOnlyTransaction(TimestampBound.ofReadTimestamp(expected.timestamp))) { + Struct row = readRow(readContext); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo(expected.value); + assertThat(readContext.getReadTimestamp()).isEqualTo(expected.timestamp); + + insertAndReadAgain(readContext, readContext.getReadTimestamp(), expected.value); + } + } + + @Test + public void multiMinReadTimestamp() { + // Cannot use bounded modes with multi-read transactions. + try (ReadOnlyTransaction tx = + client.readOnlyTransaction(TimestampBound.ofMinReadTimestamp(history.get(2).timestamp))) { + try (ResultSet rs = tx.executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + fail("Expected exception"); + } + } catch (IllegalArgumentException ex) { + assertNotNull(ex.getMessage()); + } + } + + @Test + public void multiExactStaleness() { + setUpPrivateDatabase(); + + // See singleExactStaleness() for why we pick this timestamp. We expect to see no value. + long deadlineNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(1); + long stalenessNanos = 1 + deadlineNanoTime - history.get(0).minCommitNanoTime; + + try (ReadOnlyTransaction readContext = + client.readOnlyTransaction( + TimestampBound.ofExactStaleness(stalenessNanos, TimeUnit.NANOSECONDS))) { + Struct row = readRow(readContext); + assertThat(row).isNull(); + assertThat(readContext.getReadTimestamp().toSqlTimestamp()) + .isLessThan(history.get(0).timestamp.toSqlTimestamp()); + + insertAndReadAgain(readContext, readContext.getReadTimestamp(), null); + } + } + + @Test + public void multiMaxStaleness() { + // Cannot use bounded modes with multi-read transactions. + try (ReadOnlyTransaction tx = + client.readOnlyTransaction(TimestampBound.ofMaxStaleness(1, TimeUnit.SECONDS))) { + try (ResultSet rs = tx.executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + fail("Expected exception"); + } + } catch (IllegalArgumentException ex) { + assertNotNull(ex.getMessage()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadTest.java new file mode 100644 index 000000000000..2d888996465a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITReadTest.java @@ -0,0 +1,542 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.SpannerMatchers.isSpannerException; +import static com.google.cloud.spanner.Type.StructField; +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import io.grpc.Context; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import org.hamcrest.MatcherAssert; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Integration tests for read and query. + * + *

    See also {@link ITWriteTest}, which provides coverage of writing and reading back all Cloud + * Spanner types. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITReadTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String TABLE_NAME = "TestTable"; + private static final String INDEX_NAME = "TestTableByValue"; + private static final String DESC_INDEX_NAME = "TestTableByValueDesc"; + private static final List ALL_COLUMNS = Arrays.asList("Key", "StringValue"); + private static final Type TABLE_TYPE = + Type.struct( + StructField.of("key", Type.string()), StructField.of("stringvalue", Type.string())); + private static DirectedReadOptions DIRECTED_READ_OPTIONS = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setLocation("us-west1") + .setType(ReplicaSelection.Type.READ_ONLY) + .build()) + .setAutoFailoverDisabled(true)) + .build(); + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + @BeforeClass + public static void setUpDatabase() { + Database googleStandardSQLDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE TestTable (" + + " key STRING(MAX) NOT NULL," + + " stringvalue STRING(MAX)," + + ") PRIMARY KEY (key)", + "CREATE INDEX TestTableByValue ON TestTable(stringvalue)", + "CREATE INDEX TestTableByValueDesc ON TestTable(stringvalue DESC)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + if (!isUsingEmulator()) { + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + Arrays.asList( + "CREATE TABLE TestTable (" + + " Key VARCHAR PRIMARY KEY," + + " StringValue VARCHAR" + + ")", + "CREATE INDEX TestTableByValue ON TestTable(StringValue)", + "CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + // Includes k0..k14. Note that strings k{10,14} sort between k1 and k2. + List mutations = new ArrayList<>(); + for (int i = 0; i < 15; ++i) { + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("key") + .to("k" + i) + .set("stringvalue") + .to("v" + i) + .build()); + } + googleStandardSQLClient.write(mutations); + if (!isUsingEmulator()) { + postgreSQLClient.write(mutations); + } + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + // "PG dialect tests are not supported by the emulator" + if (!isUsingEmulator()) { + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + } + return params; + } + + @Parameterized.Parameter(0) + public DialectTestParameter dialect; + + private DatabaseClient getClient(Dialect dialect) { + if (dialect == Dialect.POSTGRESQL) { + return postgreSQLClient; + } + return googleStandardSQLClient; + } + + @Test + public void emptyRead() { + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read( + TABLE_NAME, + KeySet.range(KeyRange.closedOpen(Key.of("k99"), Key.of("z"))), + ALL_COLUMNS); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getType()).isEqualTo(TABLE_TYPE); + } + + @Test + public void indexEmptyRead() { + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readUsingIndex( + TABLE_NAME, + INDEX_NAME, + KeySet.range(KeyRange.closedOpen(Key.of("v99"), Key.of("z"))), + ALL_COLUMNS); + assertThat(resultSet.next()).isFalse(); + assertThat(resultSet.getType()).isEqualTo(TABLE_TYPE); + } + + @Test + public void pointRead() { + Struct row = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo("k1"); + assertThat(row.getString(1)).isEqualTo("v1"); + // Ensure that the Struct implementation supports equality properly. + assertThat(row) + .isEqualTo(Struct.newBuilder().set("key").to("k1").set("stringvalue").to("v1").build()); + } + + @Test + public void indexPointRead() { + Struct row = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRowUsingIndex(TABLE_NAME, INDEX_NAME, Key.of("v1"), ALL_COLUMNS); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo("k1"); + assertThat(row.getString(1)).isEqualTo("v1"); + } + + @Test + public void pointReadNotFound() { + Struct row = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k999"), ALL_COLUMNS); + assertThat(row).isNull(); + } + + @Test + public void indexPointReadNotFound() { + Struct row = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRowUsingIndex(TABLE_NAME, INDEX_NAME, Key.of("v999"), ALL_COLUMNS); + assertThat(row).isNull(); + } + + @Test + public void rangeReads() { + checkRange(Source.BASE_TABLE, KeySet.singleKey(Key.of("k1")), 1); + checkRange(Source.BASE_TABLE, KeyRange.closedOpen(Key.of("k3"), Key.of("k5")), 3, 4); + checkRange(Source.BASE_TABLE, KeyRange.closedClosed(Key.of("k3"), Key.of("k5")), 3, 4, 5); + checkRange(Source.BASE_TABLE, KeyRange.openClosed(Key.of("k3"), Key.of("k5")), 4, 5); + checkRange(Source.BASE_TABLE, KeyRange.openOpen(Key.of("k3"), Key.of("k5")), 4); + + // Partial key specification. + checkRange(Source.BASE_TABLE, KeyRange.closedClosed(Key.of("k7"), Key.of()), 7, 8, 9); + checkRange(Source.BASE_TABLE, KeyRange.openClosed(Key.of("k7"), Key.of()), 8, 9); + checkRange(Source.BASE_TABLE, KeyRange.closedOpen(Key.of(), Key.of("k11")), 0, 1, 10); + checkRange(Source.BASE_TABLE, KeyRange.closedClosed(Key.of(), Key.of("k11")), 0, 1, 10, 11); + + // The following produce empty ranges. + // TODO(user): Consider a multi-part key to illustrate partial key behavior. + checkRange(Source.BASE_TABLE, KeyRange.closedOpen(Key.of("k7"), Key.of())); + checkRange(Source.BASE_TABLE, KeyRange.openOpen(Key.of("k7"), Key.of())); + checkRange(Source.BASE_TABLE, KeyRange.openOpen(Key.of(), Key.of("k11"))); + checkRange(Source.BASE_TABLE, KeyRange.openClosed(Key.of(), Key.of("k11"))); + + // Prefix is component-wise, not string prefix. + checkRange(Source.BASE_TABLE, KeyRange.prefix(Key.of("k1")), 1); + checkRange( + Source.BASE_TABLE, KeyRange.closedOpen(Key.of("k1"), Key.of("k2")), 1, 10, 11, 12, 13, 14); + + checkRange(Source.BASE_TABLE, KeySet.all(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14); + } + + @Test + public void limitRead() { + checkRangeWithLimit( + Source.BASE_TABLE, 2, KeyRange.closedClosed(Key.of("k3"), Key.of("k7")), 3, 4); + checkRangeWithLimit( + Source.BASE_TABLE, 0, KeyRange.closedClosed(Key.of("k3"), Key.of("k7")), 3, 4, 5, 6, 7); + } + + @Test + public void indexRangeReads() { + checkRange(Source.INDEX, KeySet.singleKey(Key.of("v1")), 1); + checkRange(Source.INDEX, KeyRange.closedOpen(Key.of("v3"), Key.of("v5")), 3, 4); + checkRange(Source.INDEX, KeyRange.closedClosed(Key.of("v3"), Key.of("v5")), 3, 4, 5); + checkRange(Source.INDEX, KeyRange.openClosed(Key.of("v3"), Key.of("v5")), 4, 5); + checkRange(Source.INDEX, KeyRange.openOpen(Key.of("v3"), Key.of("v5")), 4); + + // Partial key specification. + checkRange(Source.INDEX, KeyRange.closedClosed(Key.of("v7"), Key.of()), 7, 8, 9); + checkRange(Source.INDEX, KeyRange.openClosed(Key.of("v7"), Key.of()), 8, 9); + checkRange(Source.INDEX, KeyRange.closedOpen(Key.of(), Key.of("v11")), 0, 1, 10); + checkRange(Source.INDEX, KeyRange.closedClosed(Key.of(), Key.of("v11")), 0, 1, 10, 11); + + // The following produce empty ranges. + checkRange(Source.INDEX, KeyRange.closedOpen(Key.of("v7"), Key.of())); + checkRange(Source.INDEX, KeyRange.openOpen(Key.of("v7"), Key.of())); + checkRange(Source.INDEX, KeyRange.openOpen(Key.of(), Key.of("v11"))); + checkRange(Source.INDEX, KeyRange.openClosed(Key.of(), Key.of("v11"))); + + // Prefix is component-wise, not string prefix. + checkRange(Source.INDEX, KeyRange.prefix(Key.of("v1")), 1); + checkRange( + Source.INDEX, KeyRange.closedOpen(Key.of("v1"), Key.of("v2")), 1, 10, 11, 12, 13, 14); + checkRange(Source.INDEX, KeySet.all(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14); + + // Read from an index with DESC ordering. + checkRange(Source.DESC_INDEX, KeySet.all(), 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + @Test + public void limitReadUsingIndex() { + checkRangeWithLimit(Source.INDEX, 2, KeyRange.closedClosed(Key.of("v3"), Key.of("v7")), 3, 4); + checkRangeWithLimit( + Source.DESC_INDEX, 2, KeyRange.closedClosed(Key.of("v7"), Key.of("v3")), 7, 6); + } + + @Test + public void multiPointRead() { + KeySet keys = + KeySet.newBuilder().addKey(Key.of("k3")).addKey(Key.of("k5")).addKey(Key.of("k7")).build(); + checkRange(Source.BASE_TABLE, keys, 3, 5, 7); + } + + @Test + public void indexMultiPointRead() { + KeySet keys = + KeySet.newBuilder().addKey(Key.of("v3")).addKey(Key.of("v5")).addKey(Key.of("v7")).build(); + checkRange(Source.INDEX, keys, 3, 5, 7); + } + + @Test + public void rowsAreSnapshots() { + List rows = new ArrayList<>(); + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read( + TABLE_NAME, + KeySet.newBuilder() + .addKey(Key.of("k2")) + .addKey(Key.of("k3")) + .addKey(Key.of("k4")) + .build(), + ALL_COLUMNS); + while (resultSet.next()) { + rows.add(resultSet.getCurrentRowAsStruct()); + } + + assertThat(rows.size()).isEqualTo(3); + assertThat(rows.get(0).getString(0)).isEqualTo("k2"); + assertThat(rows.get(0).getString(1)).isEqualTo("v2"); + assertThat(rows.get(1).getString(0)).isEqualTo("k3"); + assertThat(rows.get(1).getString(1)).isEqualTo("v3"); + assertThat(rows.get(2).getString(0)).isEqualTo("k4"); + assertThat(rows.get(2).getString(1)).isEqualTo("v4"); + } + + @Test + public void pointReadWithDirectedReadOptions() { + try (ResultSet rs = + getClient(dialect.dialect) + .singleUse() + .read( + TABLE_NAME, + KeySet.singleKey(Key.of("k1")), + ALL_COLUMNS, + Options.directedRead(DIRECTED_READ_OPTIONS))) { + assertTrue(rs.next()); + assertEquals("k1", rs.getString(0)); + assertEquals("v1", rs.getString(1)); + assertFalse(rs.next()); + } + } + + @Test + public void invalidDatabase() { + RemoteSpannerHelper helper = env.getTestHelper(); + DatabaseClient invalidClient = + helper.getClient().getDatabaseClient(DatabaseId.of(helper.getInstanceId(), "invalid")); + try { + invalidClient + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k99"), ALL_COLUMNS); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + } + + @Test + public void tableNotFound() { + try { + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow("BadTableName", Key.of("k1"), ALL_COLUMNS); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(ex.getMessage()).contains("BadTableName"); + } + } + + @Test + public void columnNotFound() { + try { + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k1"), Arrays.asList("Key", "BadColumnName")); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(ex.getMessage()).contains("BadColumnName"); + } + } + + @Test + public void cursorErrorDeferred() { + // Error should be deferred until next(). This gives consistent behavior with respect to + // non-blocking implementations (e.g., gRPC). + ResultSet resultSet = + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read("BadTableName", KeySet.singleKey(Key.of("k1")), ALL_COLUMNS); + try { + resultSet.next(); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + assertThat(ex.getMessage()).contains("BadTableName"); + } + } + + @Test + public void cancellation() { + Context.CancellableContext context = Context.current().withCancellation(); + Runnable work = + context.wrap( + () -> { + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + }); + context.cancel(new RuntimeException("Cancelled by test")); + + try { + work.run(); + fail("missing expected exception"); + } catch (SpannerException e) { + MatcherAssert.assertThat(e, isSpannerException(ErrorCode.CANCELLED)); + } + } + + @Test + public void deadline() { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + Context.CancellableContext context = + Context.current().withDeadlineAfter(10, TimeUnit.NANOSECONDS, executor); + Runnable work = + context.wrap( + () -> { + getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readRow(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + }); + + try { + work.run(); + fail("missing expected exception"); + } catch (SpannerException e) { + MatcherAssert.assertThat(e, isSpannerException(ErrorCode.DEADLINE_EXCEEDED)); + } finally { + executor.shutdown(); + } + } + + private enum Source { + BASE_TABLE, + INDEX, + DESC_INDEX, + } + + private void checkReadRange(Source source, KeySet keySet, long limit, int[] expectedRows) { + Map expected = new LinkedHashMap<>(); + for (int expectedRow : expectedRows) { + expected.put("k" + expectedRow, "v" + expectedRow); + } + + ResultSet resultSet; + switch (source) { + case INDEX: + resultSet = + limit != 0 + ? getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readUsingIndex( + TABLE_NAME, INDEX_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) + : getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readUsingIndex(TABLE_NAME, INDEX_NAME, keySet, ALL_COLUMNS); + break; + case DESC_INDEX: + resultSet = + limit != 0 + ? getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readUsingIndex( + TABLE_NAME, DESC_INDEX_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) + : getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .readUsingIndex(TABLE_NAME, DESC_INDEX_NAME, keySet, ALL_COLUMNS); + break; + case BASE_TABLE: + resultSet = + limit != 0 + ? getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read(TABLE_NAME, keySet, ALL_COLUMNS, Options.limit(limit)) + : getClient(dialect.dialect) + .singleUse(TimestampBound.strong()) + .read(TABLE_NAME, keySet, ALL_COLUMNS); + break; + default: + throw new IllegalArgumentException("Invalid source"); + } + Map rows = new LinkedHashMap<>(); + while (resultSet.next()) { + rows.put(resultSet.getString(0), resultSet.getString(1)); + } + + assertWithMessage("read of " + keySet).that(rows).isEqualTo(expected); + } + + private void checkRange(Source source, KeyRange range, int... expectedRows) { + checkRange(source, KeySet.range(range), expectedRows); + } + + private void checkRange(Source source, KeySet keySet, int... expectedRows) { + checkReadRange(source, keySet, 0, expectedRows); + } + + private void checkRangeWithLimit(Source source, long limit, KeyRange range, int... expectedRows) { + checkReadRange(source, KeySet.range(range), limit, expectedRows); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITResultSetGetValue.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITResultSetGetValue.java new file mode 100644 index 000000000000..894d46a8090f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITResultSetGetValue.java @@ -0,0 +1,795 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.primitives.Doubles; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITResultSetGetValue { + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + if (!EmulatorSpannerHelper.isUsingEmulator()) { + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + } + return params; + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + // For floats / doubles comparison + private static final double DELTA = 1e-15; + private static final String TABLE_NAME = "TestTable"; + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + private DatabaseClient databaseClient; + + @BeforeClass + public static void beforeClass() + throws ExecutionException, InterruptedException, TimeoutException { + Database googleStandardSqlDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE " + + TABLE_NAME + + "(" + + "Id INT64 NOT NULL," + + "bool BOOL," + + "int64 INT64," + + "float64 FLOAT64," + + "numeric NUMERIC," + + "string STRING(MAX)," + + "bytes BYTES(MAX)," + + "timestamp TIMESTAMP," + + "date DATE," + + "json JSON," + + "boolArray ARRAY," + + "int64Array ARRAY," + + "float64Array ARRAY," + + "numericArray ARRAY," + + "stringArray ARRAY," + + "bytesArray ARRAY," + + "timestampArray ARRAY," + + "dateArray ARRAY," + + "jsonArray ARRAY" + + ") PRIMARY KEY (Id)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSqlDatabase); + if (!EmulatorSpannerHelper.isUsingEmulator()) { + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + Collections.singletonList( + "CREATE TABLE " + + TABLE_NAME + + "(" + + "id BIGINT PRIMARY KEY," + + "bool BOOL," + + "int64 BIGINT," + + "float64 DOUBLE PRECISION," + + "numeric NUMERIC," + + "string VARCHAR," + + "bytes BYTEA," + + "timestamp TIMESTAMPTZ," + + "date DATE," + + "boolArray BOOL[]," + + "int64Array BIGINT[]," + + "float64Array DOUBLE PRECISION[]," + + "numericArray NUMERIC[]," + + "stringArray VARCHAR[]," + + "bytesArray BYTEA[]," + + "dateArray DATE[]," + + "timestampArray TIMESTAMPTZ[]" + + ")")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + } + + @Before + public void before() { + databaseClient = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @Test + public void testReadNonNullValuesGoogleStandardSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + databaseClient.write( + Collections.singletonList( + Mutation.newInsertBuilder(TABLE_NAME) + .set("Id") + .to(1L) + .set("bool") + .to(true) + .set("int64") + .to(10L) + .set("float64") + .to(20D) + .set("numeric") + .to(new BigDecimal("30")) + .set("string") + .to("stringValue") + .set("bytes") + .to(ByteArray.copyFrom("bytesValue")) + .set("timestamp") + .to(Timestamp.ofTimeSecondsAndNanos(1, 0)) + .set("date") + .to(Date.fromYearMonthDay(2021, 1, 2)) + .set("json") + .to(Value.json("{\"key\":\"value\"}")) + .set("boolArray") + .toBoolArray(new boolean[] {false, true}) + .set("int64Array") + .toInt64Array(new long[] {100L, 200L}) + .set("float64Array") + .toFloat64Array(new double[] {1000D, 2000D}) + .set("numericArray") + .toNumericArray(Arrays.asList(new BigDecimal("10000"), new BigDecimal("20000"))) + .set("stringArray") + .toStringArray(Arrays.asList("string1", "string2")) + .set("bytesArray") + .toBytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))) + .set("timestampArray") + .toTimestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(10, 0), + Timestamp.ofTimeSecondsAndNanos(20, 0))) + .set("dateArray") + .toDateArray( + Arrays.asList( + Date.fromYearMonthDay(2021, 2, 3), Date.fromYearMonthDay(2021, 3, 4))) + .set("jsonArray") + .toJsonArray(Arrays.asList("{\"key1\":\"value1\"}", "{\"key2\":\"value2\"}")) + .build())); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE Id = 1"))) { + resultSet.next(); + + assertEquals(Value.int64(1L), resultSet.getValue("Id")); + assertEquals(Value.bool(true), resultSet.getValue("bool")); + assertEquals(Value.int64(10L), resultSet.getValue("int64")); + assertEquals(20D, resultSet.getValue("float64").getFloat64(), 1e-15); + assertEquals(Value.numeric(new BigDecimal("30")), resultSet.getValue("numeric")); + assertEquals(Value.string("stringValue"), resultSet.getValue("string")); + assertEquals(Value.bytes(ByteArray.copyFrom("bytesValue")), resultSet.getValue("bytes")); + assertEquals( + Value.timestamp(Timestamp.ofTimeSecondsAndNanos(1, 0)), resultSet.getValue("timestamp")); + assertEquals(Value.date(Date.fromYearMonthDay(2021, 1, 2)), resultSet.getValue("date")); + assertEquals(Value.json("{\"key\":\"value\"}"), resultSet.getValue("json")); + assertEquals(Value.boolArray(new boolean[] {false, true}), resultSet.getValue("boolArray")); + assertEquals(Value.int64Array(new long[] {100L, 200L}), resultSet.getValue("int64Array")); + assertArrayEquals( + new double[] {1000D, 2000D}, + Doubles.toArray(resultSet.getValue("float64Array").getFloat64Array()), + 1e-15); + assertEquals( + Value.numericArray(Arrays.asList(new BigDecimal("10000"), new BigDecimal("20000"))), + resultSet.getValue("numericArray")); + assertEquals( + Value.stringArray(Arrays.asList("string1", "string2")), + resultSet.getValue("stringArray")); + assertEquals( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))), + resultSet.getValue("bytesArray")); + assertEquals( + Value.timestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(10, 0), Timestamp.ofTimeSecondsAndNanos(20, 0))), + resultSet.getValue("timestampArray")); + assertEquals( + Value.dateArray( + Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), Date.fromYearMonthDay(2021, 3, 4))), + resultSet.getValue("dateArray")); + assertEquals( + Value.jsonArray(Arrays.asList("{\"key1\":\"value1\"}", "{\"key2\":\"value2\"}")), + resultSet.getValue("jsonArray")); + } + } + + @Test + public void testReadNonNullValuesPostgreSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + databaseClient.write( + Collections.singletonList( + Mutation.newInsertBuilder(TABLE_NAME) + .set("id") + .to(1L) + .set("bool") + .to(true) + .set("int64") + .to(10L) + .set("float64") + .to(20D) + .set("numeric") + .to(new BigDecimal("30")) + .set("string") + .to("stringValue") + .set("bytes") + .to(ByteArray.copyFrom("bytesValue")) + .set("date") + .to(Date.fromYearMonthDay(2021, 1, 2)) + .set("timestamp") + .to(Timestamp.ofTimeSecondsAndNanos(1, 0)) + .set("boolArray") + .toBoolArray(new boolean[] {false, true}) + .set("int64Array") + .toInt64Array(new long[] {100L, 200L}) + .set("float64Array") + .toFloat64Array(new double[] {1000D, 2000D}) + .set("numericArray") + .toNumericArray(Arrays.asList(new BigDecimal("10000"), new BigDecimal("20000"))) + .set("stringArray") + .toStringArray(Arrays.asList("string1", "string2")) + .set("bytesArray") + .toBytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))) + .set("timestampArray") + .toTimestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(10, 0), + Timestamp.ofTimeSecondsAndNanos(20, 0))) + .set("dateArray") + .toDateArray( + Arrays.asList( + Date.fromYearMonthDay(2021, 2, 3), Date.fromYearMonthDay(2021, 3, 4))) + .build())); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE id = 1"))) { + resultSet.next(); + assertEquals(Value.int64(1L), resultSet.getValue("id")); + assertEquals(Value.bool(true), resultSet.getValue("bool")); + assertEquals(Value.int64(10L), resultSet.getValue("int64")); + assertEquals(20D, resultSet.getValue("float64").getFloat64(), 1e-15); + assertEquals(Value.pgNumeric("30"), resultSet.getValue("numeric")); + assertEquals(Value.string("stringValue"), resultSet.getValue("string")); + assertEquals(Value.bytes(ByteArray.copyFrom("bytesValue")), resultSet.getValue("bytes")); + assertEquals( + Value.timestamp(Timestamp.ofTimeSecondsAndNanos(1, 0)), resultSet.getValue("timestamp")); + assertEquals(Value.date(Date.fromYearMonthDay(2021, 1, 2)), resultSet.getValue("date")); + assertEquals(Value.boolArray(new boolean[] {false, true}), resultSet.getValue("boolarray")); + assertEquals(Value.int64Array(new long[] {100L, 200L}), resultSet.getValue("int64array")); + assertArrayEquals( + new double[] {1000D, 2000D}, + Doubles.toArray(resultSet.getValue("float64array").getFloat64Array()), + 1e-15); + assertEquals( + Value.pgNumericArray(Arrays.asList("10000", "20000")), + resultSet.getValue("numericarray")); + assertEquals( + Value.stringArray(Arrays.asList("string1", "string2")), + resultSet.getValue("stringarray")); + assertEquals( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))), + resultSet.getValue("bytesarray")); + assertEquals( + Value.timestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(10, 0), Timestamp.ofTimeSecondsAndNanos(20, 0))), + resultSet.getValue("timestamparray")); + assertEquals( + Value.dateArray( + Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), Date.fromYearMonthDay(2021, 3, 4))), + resultSet.getValue("datearray")); + } + } + + @Test + public void testReadNullValuesGoogleStandardSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + databaseClient.write( + Collections.singletonList(Mutation.newInsertBuilder(TABLE_NAME).set("Id").to(2L).build())); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE Id = 2"))) { + resultSet.next(); + + assertEquals(Value.int64(2L), resultSet.getValue("Id")); + assertTrue(resultSet.getValue("bool").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("bool").getBool()); + assertTrue(resultSet.getValue("int64").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("int64").getInt64()); + assertTrue(resultSet.getValue("float64").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("float64").getFloat64()); + assertTrue(resultSet.getValue("numeric").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("numeric").getNumeric()); + assertTrue(resultSet.getValue("string").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("string").getString()); + assertTrue(resultSet.getValue("bytes").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("bytes").getBytes()); + assertTrue(resultSet.getValue("timestamp").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("timestamp").getTimestamp()); + assertTrue(resultSet.getValue("date").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("date").getDate()); + assertTrue(resultSet.getValue("json").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("json").getJson()); + assertTrue(resultSet.getValue("boolArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("boolArray").getBoolArray()); + assertTrue(resultSet.getValue("int64Array").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("int64Array").getInt64Array()); + assertTrue(resultSet.getValue("float64Array").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("float64Array").getFloat64Array()); + assertTrue(resultSet.getValue("numericArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("numericArray").getNumericArray()); + assertTrue(resultSet.getValue("stringArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("stringArray").getStringArray()); + assertTrue(resultSet.getValue("bytesArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("bytesArray").getBytesArray()); + assertTrue(resultSet.getValue("timestampArray").isNull()); + assertThrows( + IllegalStateException.class, + () -> resultSet.getValue("timestampArray").getTimestampArray()); + assertTrue(resultSet.getValue("dateArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("dateArray").getDateArray()); + assertTrue(resultSet.getValue("jsonArray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("jsonArray").getJsonArray()); + } + } + + @Test + public void testReadNullValuesPostgreSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + databaseClient.write( + Collections.singletonList(Mutation.newInsertBuilder(TABLE_NAME).set("id").to(2L).build())); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE id = 2"))) { + resultSet.next(); + + assertEquals(Value.int64(2L), resultSet.getValue("id")); + assertTrue(resultSet.getValue("bool").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("bool").getBool()); + assertTrue(resultSet.getValue("int64").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("int64").getInt64()); + assertTrue(resultSet.getValue("float64").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("float64").getFloat64()); + assertTrue(resultSet.getValue("numeric").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("numeric").getNumeric()); + assertTrue(resultSet.getValue("string").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("string").getString()); + assertTrue(resultSet.getValue("bytes").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("bytes").getBytes()); + assertTrue(resultSet.getValue("timestamp").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("timestamp").getTimestamp()); + assertTrue(resultSet.getValue("date").isNull()); + assertThrows(IllegalStateException.class, () -> resultSet.getValue("date").getDate()); + assertTrue(resultSet.getValue("boolarray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("boolarray").getBoolArray()); + assertTrue(resultSet.getValue("int64array").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("int64array").getInt64Array()); + assertTrue(resultSet.getValue("float64array").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("float64array").getFloat64Array()); + assertTrue(resultSet.getValue("numericarray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("numericarray").getNumericArray()); + assertTrue(resultSet.getValue("stringarray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("stringarray").getStringArray()); + assertTrue(resultSet.getValue("bytesarray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("bytesarray").getBytesArray()); + assertTrue(resultSet.getValue("timestamparray").isNull()); + assertThrows( + IllegalStateException.class, + () -> resultSet.getValue("timestamparray").getTimestampArray()); + assertTrue(resultSet.getValue("datearray").isNull()); + assertThrows( + IllegalStateException.class, () -> resultSet.getValue("datearray").getDateArray()); + } + } + + @Test + public void testReadNullValuesInArraysGoogleStandardSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + databaseClient.write( + Collections.singletonList( + Mutation.newInsertBuilder(TABLE_NAME) + .set("Id") + .to(3L) + .set("boolArray") + .toBoolArray(Arrays.asList(true, null)) + .set("int64Array") + .toInt64Array(Arrays.asList(null, 2L)) + .set("float64Array") + .toFloat64Array(Arrays.asList(null, 10D)) + .set("numericArray") + .toNumericArray(Arrays.asList(new BigDecimal("10000"), null)) + .set("stringArray") + .toStringArray(Arrays.asList(null, "string2")) + .set("bytesArray") + .toBytesArray(Arrays.asList(ByteArray.copyFrom("bytes1"), null)) + .set("timestampArray") + .toTimestampArray(Arrays.asList(null, Timestamp.ofTimeSecondsAndNanos(20, 0))) + .set("dateArray") + .toDateArray(Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), null)) + .set("jsonArray") + .toJsonArray(Arrays.asList("{\"key1\":\"value1\"}", null)) + .build())); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE Id = 3"))) { + resultSet.next(); + + assertEquals(Value.int64(3L), resultSet.getValue("Id")); + assertEquals(Value.boolArray(Arrays.asList(true, null)), resultSet.getValue("boolArray")); + assertEquals(Value.int64Array(Arrays.asList(null, 2L)), resultSet.getValue("int64Array")); + assertNull(resultSet.getValue("float64Array").getFloat64Array().get(0)); + assertEquals(10D, resultSet.getValue("float64Array").getFloat64Array().get(1), DELTA); + assertEquals( + Value.numericArray(Arrays.asList(new BigDecimal("10000"), null)), + resultSet.getValue("numericArray")); + assertEquals( + Value.stringArray(Arrays.asList(null, "string2")), resultSet.getValue("stringArray")); + assertEquals( + Value.bytesArray(Arrays.asList(ByteArray.copyFrom("bytes1"), null)), + resultSet.getValue("bytesArray")); + assertEquals( + Value.timestampArray(Arrays.asList(null, Timestamp.ofTimeSecondsAndNanos(20, 0))), + resultSet.getValue("timestampArray")); + assertEquals( + Value.dateArray(Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), null)), + resultSet.getValue("dateArray")); + assertEquals( + Value.jsonArray(Arrays.asList("{\"key1\":\"value1\"}", null)), + resultSet.getValue("jsonArray")); + } + } + + @Test + public void testReadNullValuesInArraysPostgreSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + databaseClient.write( + Collections.singletonList( + Mutation.newInsertBuilder(TABLE_NAME) + .set("Id") + .to(3L) + .set("boolArray") + .toBoolArray(Arrays.asList(true, null)) + .set("int64Array") + .toInt64Array(Arrays.asList(null, 2L)) + .set("float64Array") + .toFloat64Array(Arrays.asList(null, 10D)) + .set("numericArray") + .toNumericArray(Arrays.asList(new BigDecimal("10000"), null)) + .set("stringArray") + .toStringArray(Arrays.asList(null, "string2")) + .set("bytesArray") + .toBytesArray(Arrays.asList(ByteArray.copyFrom("bytes1"), null)) + .set("timestampArray") + .toTimestampArray(Arrays.asList(null, Timestamp.ofTimeSecondsAndNanos(20, 0))) + .set("dateArray") + .toDateArray(Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), null)) + .build())); + + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery(Statement.of("SELECT * FROM " + TABLE_NAME + " WHERE Id = 3"))) { + resultSet.next(); + + assertEquals(Value.int64(3L), resultSet.getValue("id")); + assertEquals(Value.boolArray(Arrays.asList(true, null)), resultSet.getValue("boolarray")); + assertEquals(Value.int64Array(Arrays.asList(null, 2L)), resultSet.getValue("int64array")); + assertNull(resultSet.getValue("float64array").getFloat64Array().get(0)); + assertEquals(10D, resultSet.getValue("float64array").getFloat64Array().get(1), DELTA); + assertEquals( + Value.pgNumericArray(Arrays.asList("10000", null)), resultSet.getValue("numericarray")); + assertEquals( + Value.stringArray(Arrays.asList(null, "string2")), resultSet.getValue("stringarray")); + assertEquals( + Value.bytesArray(Arrays.asList(ByteArray.copyFrom("bytes1"), null)), + resultSet.getValue("bytesarray")); + assertEquals( + Value.timestampArray(Arrays.asList(null, Timestamp.ofTimeSecondsAndNanos(20, 0))), + resultSet.getValue("timestamparray")); + assertEquals( + Value.dateArray(Arrays.asList(Date.fromYearMonthDay(2021, 2, 3), null)), + resultSet.getValue("datearray")); + } + } + + @Test + public void testReadNonFloat64LiteralsGoogleStandardSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT TRUE AS bool,1 AS int64,CAST('100' AS NUMERIC) AS numeric,'stringValue'" + + " AS string,CAST('bytesValue' AS BYTES) AS" + + " bytes,CAST('1970-01-01T00:00:01Z' AS TIMESTAMP) AS" + + " timestamp,CAST('2021-02-03' AS DATE) AS date,[false, true] AS" + + " boolArray,[1, 2] AS int64Array,[CAST('100' AS NUMERIC), CAST('200' AS" + + " NUMERIC)] AS numericArray,['string1', 'string2'] AS" + + " stringArray,[CAST('bytes1' AS BYTES), CAST('bytes2' AS BYTES)] AS" + + " bytesArray,[CAST('1970-01-01T00:00:01.000000002Z' AS TIMESTAMP)," + + " CAST('1970-01-01T00:00:02.000000003Z' AS TIMESTAMP)] AS" + + " timestampArray,[CAST('2020-01-02' AS DATE), CAST('2021-02-03' AS DATE)]" + + " AS dateArray,ARRAY(SELECT STRUCT( TRUE AS structBool, 1 AS" + + " structInt64, CAST('100' AS NUMERIC) AS structNumeric, 'stringValue'" + + " AS structString, CAST('bytesValue' AS BYTES) AS structBytes, " + + " CAST('1970-01-01T00:00:01Z' AS TIMESTAMP) AS structTimestamp, " + + " CAST('2020-01-02' AS DATE) AS structDate, [false, true] AS" + + " structBoolArray, [1, 2] AS structInt64Array, [CAST('100' AS NUMERIC)," + + " CAST('200' AS NUMERIC)] AS structNumericArray, ['string1', 'string2']" + + " AS structStringArray, [CAST('bytes1' AS BYTES), CAST('bytes2' AS" + + " BYTES)] AS structBytesArray, [CAST('1970-01-01T00:00:01.000000002Z' AS" + + " TIMESTAMP), CAST('1970-01-01T00:00:02.000000003Z' AS TIMESTAMP)] AS" + + " structTimestampArray, [CAST('2020-01-02' AS DATE), CAST('2021-02-03'" + + " AS DATE)] AS structDateArray)) AS structArray"))) { + resultSet.next(); + + assertEquals(Value.bool(true), resultSet.getValue("bool")); + assertEquals(Value.int64(1L), resultSet.getValue("int64")); + assertEquals(Value.numeric(new BigDecimal("100")), resultSet.getValue("numeric")); + assertEquals(Value.string("stringValue"), resultSet.getValue("string")); + assertEquals(Value.bytes(ByteArray.copyFrom("bytesValue")), resultSet.getValue("bytes")); + assertEquals( + Value.timestamp(Timestamp.ofTimeSecondsAndNanos(1, 0)), resultSet.getValue("timestamp")); + assertEquals(Value.date(Date.fromYearMonthDay(2021, 2, 3)), resultSet.getValue("date")); + assertEquals(Value.boolArray(new boolean[] {false, true}), resultSet.getValue("boolArray")); + assertEquals(Value.int64Array(new long[] {1L, 2L}), resultSet.getValue("int64Array")); + assertEquals( + Value.numericArray(Arrays.asList(new BigDecimal("100"), new BigDecimal("200"))), + resultSet.getValue("numericArray")); + assertEquals( + Value.stringArray(Arrays.asList("string1", "string2")), + resultSet.getValue("stringArray")); + assertEquals( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))), + resultSet.getValue("bytesArray")); + assertEquals( + Value.timestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(1, 2), Timestamp.ofTimeSecondsAndNanos(2, 3))), + resultSet.getValue("timestampArray")); + assertEquals( + Value.dateArray( + Arrays.asList(Date.fromYearMonthDay(2020, 1, 2), Date.fromYearMonthDay(2021, 2, 3))), + resultSet.getValue("dateArray")); + assertEquals( + Value.structArray( + Type.struct( + StructField.of("structBool", Type.bool()), + StructField.of("structInt64", Type.int64()), + StructField.of("structNumeric", Type.numeric()), + StructField.of("structString", Type.string()), + StructField.of("structBytes", Type.bytes()), + StructField.of("structTimestamp", Type.timestamp()), + StructField.of("structDate", Type.date()), + StructField.of("structBoolArray", Type.array(Type.bool())), + StructField.of("structInt64Array", Type.array(Type.int64())), + StructField.of("structNumericArray", Type.array(Type.numeric())), + StructField.of("structStringArray", Type.array(Type.string())), + StructField.of("structBytesArray", Type.array(Type.bytes())), + StructField.of("structTimestampArray", Type.array(Type.timestamp())), + StructField.of("structDateArray", Type.array(Type.date()))), + Collections.singletonList( + Struct.newBuilder() + .set("structBool") + .to(Value.bool(true)) + .set("structInt64") + .to(Value.int64(1L)) + .set("structNumeric") + .to(new BigDecimal("100")) + .set("structString") + .to("stringValue") + .set("structBytes") + .to(ByteArray.copyFrom("bytesValue")) + .set("structTimestamp") + .to(Timestamp.ofTimeSecondsAndNanos(1, 0)) + .set("structDate") + .to(Date.fromYearMonthDay(2020, 1, 2)) + .set("structBoolArray") + .toBoolArray(new boolean[] {false, true}) + .set("structInt64Array") + .toInt64Array(new long[] {1L, 2L}) + .set("structNumericArray") + .toNumericArray(Arrays.asList(new BigDecimal("100"), new BigDecimal("200"))) + .set("structStringArray") + .toStringArray(Arrays.asList("string1", "string2")) + .set("structBytesArray") + .toBytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))) + .set("structTimestampArray") + .toTimestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(1, 2), + Timestamp.ofTimeSecondsAndNanos(2, 3))) + .set("structDateArray") + .toDateArray( + Arrays.asList( + Date.fromYearMonthDay(2020, 1, 2), Date.fromYearMonthDay(2021, 2, 3))) + .build())), + resultSet.getValue("structArray")); + } + } + + @Test + public void testReadNonFloat64LiteralsPostgreSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT TRUE AS bool,1 AS int64,CAST('100' AS numeric) AS numeric,'stringValue'" + + " AS string,CAST('bytesValue' AS BYTEA) AS" + + " bytes,CAST('1970-01-01T00:00:01 UTC' AS TIMESTAMPTZ) AS" + + " timestamp,CAST('2021-02-03' AS DATE) AS date,ARRAY[false, true] AS" + + " boolArray,ARRAY[1, 2] AS int64Array,ARRAY[CAST('100' AS NUMERIC)," + + " CAST('200' AS NUMERIC)] AS numericArray,ARRAY['string1', 'string2'] AS" + + " stringArray,ARRAY[CAST('bytes1' AS BYTEA), CAST('bytes2' AS BYTEA)] AS" + + " bytesArray,ARRAY[CAST('1970-01-01T00:00:01 UTC' AS TIMESTAMPTZ)," + + " CAST('1970-01-01T00:00:02 UTC' AS TIMESTAMPTZ)] AS" + + " timestampArray,ARRAY[CAST('2020-01-02' AS DATE), CAST('2021-02-03' AS" + + " DATE)] AS dateArray"))) { + resultSet.next(); + + assertEquals(Value.bool(true), resultSet.getValue("bool")); + assertEquals(Value.int64(1L), resultSet.getValue("int64")); + assertEquals(Value.pgNumeric("100"), resultSet.getValue("numeric")); + assertEquals(Value.string("stringValue"), resultSet.getValue("string")); + assertEquals(Value.bytes(ByteArray.copyFrom("bytesValue")), resultSet.getValue("bytes")); + assertEquals( + Value.timestamp(Timestamp.ofTimeSecondsAndNanos(1, 0)), resultSet.getValue("timestamp")); + assertEquals(Value.date(Date.fromYearMonthDay(2021, 2, 3)), resultSet.getValue("date")); + assertEquals(Value.boolArray(new boolean[] {false, true}), resultSet.getValue("boolarray")); + assertEquals(Value.int64Array(new long[] {1L, 2L}), resultSet.getValue("int64array")); + assertEquals( + Value.pgNumericArray(Arrays.asList("100", "200")), resultSet.getValue("numericarray")); + assertEquals( + Value.stringArray(Arrays.asList("string1", "string2")), + resultSet.getValue("stringarray")); + assertEquals( + Value.bytesArray( + Arrays.asList(ByteArray.copyFrom("bytes1"), ByteArray.copyFrom("bytes2"))), + resultSet.getValue("bytesarray")); + assertEquals( + Value.timestampArray( + Arrays.asList( + Timestamp.ofTimeSecondsAndNanos(1, 0), Timestamp.ofTimeSecondsAndNanos(2, 0))), + resultSet.getValue("timestamparray")); + assertEquals( + Value.dateArray( + Arrays.asList(Date.fromYearMonthDay(2020, 1, 2), Date.fromYearMonthDay(2021, 2, 3))), + resultSet.getValue("datearray")); + } + } + + @Test + public void testReadFloat64LiteralsGoogleStandardSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT " + + "10.0 AS float64," + + "[20.0, 30.0] AS float64Array," + + "ARRAY(SELECT STRUCT(" + + " 40.0 AS structFloat64," + + " [50.0, 60.0] AS structFloat64Array" + + ")) AS structArray"))) { + resultSet.next(); + + final Struct struct = resultSet.getValue("structArray").getStructArray().get(0); + + assertEquals(10D, resultSet.getValue("float64").getFloat64(), DELTA); + assertArrayEquals( + new double[] {20D, 30D}, + Doubles.toArray(resultSet.getValue("float64Array").getFloat64Array()), + DELTA); + assertEquals(40D, struct.getDouble("structFloat64"), DELTA); + assertArrayEquals( + new double[] {50D, 60D}, struct.getDoubleArray("structFloat64Array"), DELTA); + } + } + + @Test + public void testReadFloat64LiteralsPostgreSQL() { + Assume.assumeTrue(dialect.dialect == Dialect.POSTGRESQL); + try (ResultSet resultSet = + databaseClient + .singleUse() + .executeQuery( + Statement.of("SELECT 10.0 AS float64, " + "ARRAY[20.0, 30.0] AS float64Array"))) { + resultSet.next(); + assertEquals(10D, resultSet.getValue("float64").getFloat64(), DELTA); + assertArrayEquals( + new double[] {20D, 30D}, + Doubles.toArray(resultSet.getValue("float64array").getFloat64Array()), + DELTA); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITSpannerOptionsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITSpannerOptionsTest.java new file mode 100644 index 000000000000..399fd62a6d8b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITSpannerOptionsTest.java @@ -0,0 +1,68 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITSpannerOptionsTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + + @BeforeClass + public static void setUp() { + db = env.getTestHelper().createTestDatabase(); + } + + @AfterClass + public static void tearDown() { + db.drop(); + } + + @Test + public void testCompression() { + for (String compressorName : new String[] {"gzip", "identity", null}) { + SpannerOptions options = + env.getTestHelper().getOptions().toBuilder().setCompressorName(compressorName).build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + try (ResultSet rs = client.singleUse().executeQuery(Statement.of("SELECT 1 AS COL1"))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(0)).isEqualTo(1L); + assertThat(rs.next()).isFalse(); + } + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerAsyncTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerAsyncTest.java new file mode 100644 index 000000000000..31e338476bc7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerAsyncTest.java @@ -0,0 +1,311 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncTransactionManager; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +@Category(ParallelIntegrationTest.class) +public class ITTransactionManagerAsyncTest { + + @Parameter(0) + public Executor executor; + + @Parameters(name = "executor = {0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + {MoreExecutors.directExecutor()}, + {Executors.newSingleThreadExecutor()}, + {Executors.newFixedThreadPool(4)}, + }); + } + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + private static Spanner spanner; + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() { + // Empty database. + db = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " BoolValue BOOL," + + ") PRIMARY KEY (K)"); + spanner = env.getTestHelper().getClient(); + client = spanner.getDatabaseClient(db.getId()); + } + + @Before + public void clearTable() { + client.write(ImmutableList.of(Mutation.delete("T", KeySet.all()))); + } + + @Test + public void testSimpleInsert() throws ExecutionException, InterruptedException { + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture txn = manager.beginAsync(); + while (true) { + assertThat(manager.getState()).isEqualTo(TransactionState.STARTED); + try { + txn.then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newInsertBuilder("T") + .set("K") + .to("Key1") + .set("BoolValue") + .to(true) + .build()); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + assertThat(manager.getState()).isEqualTo(TransactionState.COMMITTED); + Struct row = + client.singleUse().readRow("T", Key.of("Key1"), Arrays.asList("K", "BoolValue")); + assertThat(row.getString(0)).isEqualTo("Key1"); + assertThat(row.getBoolean(1)).isTrue(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetryAsync(); + } + } + } + } + + @Test + public void testInvalidInsert() throws InterruptedException { + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture txn = manager.beginAsync(); + while (true) { + try { + txn.then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newInsertBuilder("InvalidTable") + .set("K") + .to("Key1") + .set("BoolValue") + .to(true) + .build()); + return ApiFutures.immediateFuture(null); + }, + executor) + .commitAsync() + .get(); + fail("Expected exception"); + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetryAsync(); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(SpannerException.class); + SpannerException se = (SpannerException) e.getCause(); + if (env.getTestHelper() + .getOptions() + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()) { + // Backend currently returns INVALID_ARGUMENT, however this will be changed to NOT_FOUND + // in future. + assertThat(se.getErrorCode()).isAnyOf(ErrorCode.NOT_FOUND, ErrorCode.INVALID_ARGUMENT); + } else { + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + // expected + break; + } + } + assertThat(manager.getState()).isEqualTo(TransactionState.COMMIT_FAILED); + // We cannot retry for non aborted errors. + try { + manager.resetForRetryAsync(); + fail("Expected exception"); + } catch (IllegalStateException ex) { + assertNotNull(ex.getMessage()); + } + } + } + + @Test + public void testRollback() throws InterruptedException { + try (AsyncTransactionManager manager = client.transactionManagerAsync()) { + TransactionContextFuture txn = manager.beginAsync(); + while (true) { + txn.then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newInsertBuilder("T") + .set("K") + .to("Key2") + .set("BoolValue") + .to(true) + .build()); + return ApiFutures.immediateFuture(null); + }, + executor); + try { + manager.rollbackAsync(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + txn = manager.resetForRetryAsync(); + } + } + assertThat(manager.getState()).isEqualTo(TransactionState.ROLLED_BACK); + // Row should not have been inserted. + assertThat(client.singleUse().readRow("T", Key.of("Key2"), Arrays.asList("K", "BoolValue"))) + .isNull(); + } + } + + @Ignore( + "Cloud Spanner now seems to return CANCELLED instead of ABORTED when a transaction is" + + " invalidated by a later transaction in the same session") + @Test + public void testAbortAndRetry() throws InterruptedException, ExecutionException { + assumeFalse( + "Emulator does not support more than 1 simultaneous transaction. " + + "This test would therefore loop indefinitely on the emulator.", + isUsingEmulator()); + + client.write( + Collections.singletonList( + Mutation.newInsertBuilder("T").set("K").to("Key3").set("BoolValue").to(true).build())); + try (AsyncTransactionManager manager1 = client.transactionManagerAsync()) { + TransactionContextFuture txn1 = manager1.beginAsync(); + AsyncTransactionManager manager2; + TransactionContextFuture txn2; + AsyncTransactionStep txn2Step1; + while (true) { + try { + AsyncTransactionStep txn1Step1 = + txn1.then( + (transaction, ignored) -> + transaction.readRowAsync( + "T", Key.of("Key3"), Arrays.asList("K", "BoolValue")), + executor); + manager2 = client.transactionManagerAsync(); + txn2 = manager2.beginAsync(); + txn2Step1 = + txn2.then( + (transaction, ignored) -> + transaction.readRowAsync( + "T", Key.of("Key3"), Arrays.asList("K", "BoolValue")), + executor); + + AsyncTransactionStep txn1Step2 = + txn1Step1.then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newUpdateBuilder("T") + .set("K") + .to("Key3") + .set("BoolValue") + .to(false) + .build()); + return ApiFutures.immediateFuture(null); + }, + executor); + + txn2Step1.get(); + txn1Step2.commitAsync().get(); + break; + } catch (AbortedException e) { + Thread.sleep(e.getRetryDelayInMillis()); + // It is possible that it was txn2 that aborted. + // In that case we should just retry without resetting anything. + if (manager1.getState() == TransactionState.ABORTED) { + txn1 = manager1.resetForRetryAsync(); + } + } + } + + // txn2 should have been aborted. + try { + txn2Step1.commitAsync().get(); + fail("Expected to abort"); + } catch (AbortedException e) { + assertThat(manager2.getState()).isEqualTo(TransactionState.ABORTED); + txn2 = manager2.resetForRetryAsync(); + } + AsyncTransactionStep txn2Step2 = + txn2.then( + (transaction, ignored) -> { + transaction.buffer( + Mutation.newUpdateBuilder("T") + .set("K") + .to("Key3") + .set("BoolValue") + .to(true) + .build()); + return ApiFutures.immediateFuture(null); + }, + executor); + txn2Step2.commitAsync().get(); + Struct row = client.singleUse().readRow("T", Key.of("Key3"), Arrays.asList("K", "BoolValue")); + assertThat(row.getString(0)).isEqualTo("Key3"); + assertThat(row.getBoolean(1)).isTrue(); + manager2.close(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerTest.java new file mode 100644 index 000000000000..116d6c24368e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionManagerTest.java @@ -0,0 +1,302 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TransactionContext; +import com.google.cloud.spanner.TransactionManager; +import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITTransactionManagerTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static DatabaseClient client; + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + if (!EmulatorSpannerHelper.isUsingEmulator()) { + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + } + return params; + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + @BeforeClass + public static void setUpDatabase() + throws ExecutionException, InterruptedException, TimeoutException { + + Database googleStandardSQLDatabase = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " BoolValue BOOL," + + ") PRIMARY KEY (K)"); + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + if (!EmulatorSpannerHelper.isUsingEmulator()) { + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase( + Dialect.POSTGRESQL, + Collections.singletonList( + "CREATE TABLE T (" + + " K VARCHAR PRIMARY KEY," + + " BoolValue BOOL" + + ")")); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + } + + @Before + public void before() { + client = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + // Delete all test data + client.write(ImmutableList.of(Mutation.delete("T", KeySet.all()))); + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + @SuppressWarnings("resource") + @Test + public void simpleInsert() throws InterruptedException { + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + assertThat(manager.getState()).isEqualTo(TransactionState.STARTED); + txn.buffer( + Mutation.newInsertBuilder("T").set("K").to("Key1").set("BoolValue").to(true).build()); + try { + manager.commit(); + assertThat(manager.getState()).isEqualTo(TransactionState.COMMITTED); + Struct row = + client.singleUse().readRow("T", Key.of("Key1"), Arrays.asList("K", "BoolValue")); + assertThat(row.getString(0)).isEqualTo("Key1"); + assertThat(row.getBoolean(1)).isTrue(); + break; + } catch (AbortedException e) { + long retryDelayInMillis = e.getRetryDelayInMillis(); + if (retryDelayInMillis > 0) { + Thread.sleep(retryDelayInMillis); + } + txn = manager.resetForRetry(); + } + } + } + } + + @SuppressWarnings("resource") + @Test + public void invalidInsert() throws InterruptedException { + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + txn.buffer( + Mutation.newInsertBuilder("InvalidTable") + .set("K") + .to("Key1") + .set("BoolValue") + .to(true) + .build()); + try { + manager.commit(); + fail("Expected exception"); + } catch (AbortedException e) { + long retryDelayInMillis = e.getRetryDelayInMillis(); + if (retryDelayInMillis > 0) { + Thread.sleep(retryDelayInMillis); + } + txn = manager.resetForRetry(); + } catch (SpannerException e) { + // expected + break; + } + } + assertThat(manager.getState()).isEqualTo(TransactionState.COMMIT_FAILED); + // We cannot retry for non aborted errors. + try { + manager.resetForRetry(); + fail("Expected exception"); + } catch (IllegalStateException ex) { + assertNotNull(ex.getMessage()); + } + } + } + + @SuppressWarnings("resource") + @Test + public void rollback() throws InterruptedException { + try (TransactionManager manager = client.transactionManager()) { + TransactionContext txn = manager.begin(); + while (true) { + txn.buffer( + Mutation.newInsertBuilder("T").set("K").to("Key2").set("BoolValue").to(true).build()); + try { + manager.rollback(); + break; + } catch (AbortedException e) { + long retryDelayInMillis = e.getRetryDelayInMillis(); + if (retryDelayInMillis > 0) { + Thread.sleep(retryDelayInMillis); + } + txn = manager.resetForRetry(); + } + } + assertThat(manager.getState()).isEqualTo(TransactionState.ROLLED_BACK); + // Row should not have been inserted. + assertThat(client.singleUse().readRow("T", Key.of("Key2"), Arrays.asList("K", "BoolValue"))) + .isNull(); + } + } + + @SuppressWarnings("resource") + @Test + public void abortAndRetry() throws InterruptedException { + assumeFalse( + "Emulator does not support more than 1 simultaneous transaction. " + + "This test would therefore loop indefinitely on the emulator.", + isUsingEmulator()); + + client.write( + Collections.singletonList( + Mutation.newInsertBuilder("T").set("K").to("Key3").set("BoolValue").to(true).build())); + try (TransactionManager manager1 = client.transactionManager()) { + TransactionContext txn1 = manager1.begin(); + TransactionManager manager2; + TransactionContext txn2; + while (true) { + try { + txn1.readRow("T", Key.of("Key3"), Arrays.asList("K", "BoolValue")); + manager2 = client.transactionManager(); + txn2 = manager2.begin(); + txn2.readRow("T", Key.of("Key3"), Arrays.asList("K", "BoolValue")); + + txn1.buffer( + Mutation.newUpdateBuilder("T") + .set("K") + .to("Key3") + .set("BoolValue") + .to(false) + .build()); + manager1.commit(); + break; + } catch (AbortedException e) { + long retryDelayInMillis = e.getRetryDelayInMillis(); + if (retryDelayInMillis > 0) { + Thread.sleep(retryDelayInMillis); + } + // It is possible that it was txn2 that aborted. + // In that case we should just retry without resetting anything. + if (manager1.getState() == TransactionState.ABORTED) { + txn1 = manager1.resetForRetry(); + } + } + } + + // txn2 should have been aborted. + try { + manager2.commit(); + fail("Expected to abort"); + } catch (AbortedException e) { + assertThat(manager2.getState()).isEqualTo(TransactionState.ABORTED); + txn2 = manager2.resetForRetry(); + } + txn2.buffer( + Mutation.newUpdateBuilder("T").set("K").to("Key3").set("BoolValue").to(true).build()); + manager2.commit(); + Struct row = client.singleUse().readRow("T", Key.of("Key3"), Arrays.asList("K", "BoolValue")); + assertThat(row.getString(0)).isEqualTo("Key3"); + assertThat(row.getBoolean(1)).isTrue(); + manager2.close(); + } + } + + @SuppressWarnings("resource") + @Test + public void testTransactionManagerReturnsCommitStats() throws InterruptedException { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + try (TransactionManager manager = client.transactionManager(Options.commitStats())) { + TransactionContext transaction = manager.begin(); + while (true) { + transaction.buffer( + Mutation.newInsertBuilder("T") + .set("K") + .to("KeyCommitStats") + .set("BoolValue") + .to(true) + .build()); + try { + manager.commit(); + assertNotNull(manager.getCommitResponse().getCommitStats()); + assertEquals(2L, manager.getCommitResponse().getCommitStats().getMutationCount()); + break; + } catch (AbortedException e) { + long retryDelayInMillis = e.getRetryDelayInMillis(); + if (retryDelayInMillis > 0) { + Thread.sleep(retryDelayInMillis); + } + transaction = manager.resetForRetry(); + } + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionTest.java new file mode 100644 index 000000000000..da55d1d5367f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITTransactionTest.java @@ -0,0 +1,694 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.Builder.DefaultReadWriteTransactionOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.Vector; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for read-write transactions. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITTransactionTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static Database db; + private static DatabaseClient client; + private static Database largeMessageDb; + private static DatabaseClient largeMessageClient; + + /** Sequence for assigning unique keys to test cases. */ + private static int seq; + + @BeforeClass + public static void setUpDatabase() { + db = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " V INT64," + + ") PRIMARY KEY (K)"); + client = env.getTestHelper().getDatabaseClient(db); + + largeMessageDb = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " col0 BYTES(MAX)," + + " col1 BYTES(MAX)," + + " col2 BYTES(MAX)," + + " col3 BYTES(MAX)," + + " col4 BYTES(MAX)," + + " col5 BYTES(MAX)," + + " col6 BYTES(MAX)," + + " col7 BYTES(MAX)," + + " col8 BYTES(MAX)," + + " col9 BYTES(MAX)," + + ") PRIMARY KEY (K)"); + largeMessageClient = env.getTestHelper().getDatabaseClient(largeMessageDb); + } + + @Before + public void removeTestData() { + client.writeAtLeastOnce(Collections.singletonList(Mutation.delete("T", KeySet.all()))); + largeMessageClient.writeAtLeastOnce( + Collections.singletonList(Mutation.delete("T", KeySet.all()))); + } + + private static String uniqueKey() { + return "k" + seq++; + } + + private interface ReadStrategy { + Struct read(ReadContext ctx, String key); + } + + private void doBasicsTest(final ReadStrategy strategy) throws InterruptedException { + final String key = uniqueKey(); + + // Initial value. + client.write( + Collections.singletonList( + Mutation.newInsertBuilder("T").set("K").to(key).set("V").to(0).build())); + + final int numThreads = 3; + + final CountDownLatch commitBarrier = new CountDownLatch(numThreads); + final CountDownLatch complete = new CountDownLatch(numThreads); + + final TransactionCallable callable = + transaction -> { + Struct row = strategy.read(transaction, key); + long newValue = row.getLong(0) + 1; + transaction.buffer( + Mutation.newUpdateBuilder("T").set("K").to(key).set("V").to(newValue).build()); + commitBarrier.countDown(); + // Synchronize so that all threads attempt to commit at the same time. + Uninterruptibles.awaitUninterruptibly(commitBarrier); + return newValue; + }; + + // We start multiple threads all attempting to update the same value concurrently. We expect + // to see at least some of the corresponding transactions abort. + final Vector results = new Vector<>(); + final Vector commitTimestamps = new Vector<>(); + class TxnThread extends Thread { + @Override + public void run() { + TransactionRunner runner = client.readWriteTransaction(); + Long result = runner.run(callable); + results.add(result); + commitTimestamps.add(runner.getCommitTimestamp()); + complete.countDown(); + } + } + for (int i = 0; i < numThreads; ++i) { + new TxnThread().start(); + } + complete.await(); + + assertThat(results).hasSize(numThreads); + List expectedResults = new ArrayList<>(); + for (int i = 0; i < numThreads; ++i) { + expectedResults.add(i + 1L); + } + assertThat(results).containsAtLeastElementsIn(expectedResults); + assertThat(Sets.newHashSet(commitTimestamps)).hasSize(numThreads); + + assertThat( + client + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(key), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo((long) numThreads); + } + + @Test + public void basicsUsingRead() throws InterruptedException { + assumeFalse("Emulator does not support multiple parallel transactions", isUsingEmulator()); + + doBasicsTest( + (context, key) -> context.readRow("T", Key.of(key), Collections.singletonList("V"))); + } + + @Test + public void basicsUsingQuery() throws InterruptedException { + assumeFalse("Emulator does not support multiple parallel transactions", isUsingEmulator()); + + doBasicsTest( + (context, key) -> { + ResultSet resultSet = + context.executeQuery( + Statement.newBuilder("SELECT V FROM T WHERE K = @key") + .bind("key") + .to(key) + .build()); + assertThat(resultSet.next()).isTrue(); + Struct row = resultSet.getCurrentRowAsStruct(); + assertThat(resultSet.next()).isFalse(); + return row; + }); + } + + @Test + public void isolationLevelAndReadLockModeSetAtClientLevelTest() { + SpannerOptions options = + env.getTestHelper().getOptions().toBuilder() + .setDefaultTransactionOptions( + DefaultReadWriteTransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.REPEATABLE_READ) + .setReadLockMode(ReadLockMode.OPTIMISTIC) + .build()) + .build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + Long updatedRows = + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.of("INSERT INTO T (K, V) VALUES ('test1', 2)"))); + assertThat(updatedRows).isEqualTo(1L); + } + } + + @Test + public void isolationLevelAndReadLockModeSetAtClientAndTxnLevelTest() { + SpannerOptions options = + env.getTestHelper().getOptions().toBuilder() + .setDefaultTransactionOptions( + DefaultReadWriteTransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.REPEATABLE_READ) + .setReadLockMode(ReadLockMode.OPTIMISTIC) + .build()) + .build(); + try (Spanner spanner = options.getService()) { + DatabaseClient client = spanner.getDatabaseClient(db.getId()); + Long updatedRows = + client + .readWriteTransaction( + Options.isolationLevel(IsolationLevel.SERIALIZABLE), + Options.readLockMode(ReadLockMode.PESSIMISTIC)) + .run( + transaction -> + transaction.executeUpdate( + Statement.of("INSERT INTO T (K, V) VALUES ('test1', 2)"))); + assertThat(updatedRows).isEqualTo(1L); + } + } + + @Test + public void userExceptionPreventsCommit() { + class UserException extends Exception { + UserException(String message) { + super(message); + } + } + + final String key = uniqueKey(); + + TransactionCallable callable = + transaction -> { + transaction.buffer(Mutation.newInsertOrUpdateBuilder("T").set("K").to(key).build()); + throw new UserException("User failure"); + }; + + try { + client.readWriteTransaction().run(callable); + fail("Expected user exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.UNKNOWN); + assertThat(e.getMessage()).contains("User failure"); + assertThat(e.getCause()).isInstanceOf(UserException.class); + } + + Struct row = + client + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(key), Collections.singletonList("K")); + assertThat(row).isNull(); + } + + @Test + public void userExceptionIsSpannerException() { + final String key = uniqueKey(); + + TransactionCallable callable = + transaction -> { + transaction.buffer(Mutation.newInsertOrUpdateBuilder("T").set("K").to(key).build()); + throw newSpannerException(ErrorCode.OUT_OF_RANGE, "User failure"); + }; + + try { + client.readWriteTransaction().run(callable); + fail("Expected user exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.OUT_OF_RANGE); + assertThat(e.getMessage()).contains("User failure"); + } + + Struct row = + client + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(key), Collections.singletonList("K")); + assertThat(row).isNull(); + } + + @Test + public void readAbort() throws Exception { + assumeFalse("Emulator does not support multiple parallel transactions", isUsingEmulator()); + + final String key1 = uniqueKey(); + final String key2 = uniqueKey(); + + client.write( + Arrays.asList( + Mutation.newInsertBuilder("T").set("K").to(key1).set("V").to(0).build(), + Mutation.newInsertBuilder("T").set("K").to(key2).set("V").to(1).build())); + + final CountDownLatch t1Started = new CountDownLatch(1); + final CountDownLatch t1Done = new CountDownLatch(1); + final CountDownLatch t2Running = new CountDownLatch(1); + final CountDownLatch t2Done = new CountDownLatch(1); + + final SettableFuture t1Result = SettableFuture.create(); + final SettableFuture t2Result = SettableFuture.create(); + + // Thread 1 performs a read before notifying that it has started and allowing + // thread 2 to start. This ensures that it establishes a senior lock priority relative to + // thread 2. It then waits for thread 2 to read, so that both threads have shared locks on + // key1, before continuing to commit; the act of committing means that thread 1's lock is + // upgraded and thread 2's transaction is aborted. When thread 1 is done, thread 2 tries a + // second read, which will abort. Both threads will mask SpannerExceptions to ensure that + // the implementation does not require TransactionCallable to propagate them. + Thread t1 = + new Thread( + () -> { + try { + client + .readWriteTransaction() + .run( + transaction -> { + try { + Struct row = + transaction.readRow( + "T", Key.of(key1), Collections.singletonList("V")); + t1Started.countDown(); + Uninterruptibles.awaitUninterruptibly(t2Running); + transaction.buffer( + Mutation.newUpdateBuilder("T") + .set("K") + .to(key1) + .set("V") + .to(row.getLong(0) + 1) + .build()); + return null; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.ABORTED) { + assertThat(e).isInstanceOf(AbortedException.class); + } + throw new RuntimeException("Swallowed exception: " + e.getMessage()); + } + }); + t1Result.set(null); + } catch (Throwable t) { + t1Result.setException(t); + } finally { + t1Done.countDown(); + } + }); + Thread t2 = + new Thread( + () -> { + try { + client + .readWriteTransaction() + .run( + transaction -> { + try { + Struct r1 = + transaction.readRow( + "T", Key.of(key1), Collections.singletonList("V")); + t2Running.countDown(); + Uninterruptibles.awaitUninterruptibly(t1Done); + Struct r2 = + transaction.readRow( + "T", Key.of(key2), Collections.singletonList("V")); + transaction.buffer( + Mutation.newUpdateBuilder("T") + .set("K") + .to(key2) + .set("V") + .to(r1.getLong(0) + r2.getLong(0)) + .build()); + return null; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.ABORTED) { + assertThat(e).isInstanceOf(AbortedException.class); + } + throw new RuntimeException("Swallowed exception: " + e.getMessage()); + } + }); + t2Result.set(null); + } catch (Throwable t) { + t2Result.setException(t); + } finally { + t2Done.countDown(); + } + }); + + t1.start(); + Uninterruptibles.awaitUninterruptibly(t1Started); + // Thread 2 will abort on the first attempt and should retry; wait for completion to confirm. + t2.start(); + assertThat(t2Done.await(1, TimeUnit.MINUTES)).isTrue(); + + // Check that both transactions effects are visible. + assertThat(t1Result.get()).isNull(); + assertThat(t2Result.get()).isNull(); + assertThat( + client + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(key1), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(1); + assertThat( + client + .singleUse(TimestampBound.strong()) + .readRow("T", Key.of(key2), Collections.singletonList("V")) + .getLong(0)) + .isEqualTo(2); + } + + private void doNestedRwTransaction() { + client + .readWriteTransaction() + .run( + transaction -> { + client.readWriteTransaction().run(transaction1 -> null); + + return null; + }); + } + + @Test + public void nestedReadWriteTxnThrows() { + try { + doNestedRwTransaction(); + fail("Expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INTERNAL); + assertThat(e.getMessage()).contains("not supported"); + } + } + + @Test + public void nestedReadOnlyTxnThrows() { + try { + client + .readWriteTransaction() + .run( + transaction -> { + try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { + tx.getReadTimestamp(); + } + + return null; + }); + fail("Expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INTERNAL); + assertThat(e.getMessage()).contains("not supported"); + } + } + + @Test + public void nestedBatchTxnThrows() { + try { + client + .readWriteTransaction() + .run( + transaction -> { + BatchClient batchClient = env.getTestHelper().getBatchClient(db); + BatchReadOnlyTransaction batchTxn = + batchClient.batchReadOnlyTransaction(TimestampBound.strong()); + batchTxn.partitionReadUsingIndex( + PartitionOptions.getDefaultInstance(), + "Test", + "Index", + KeySet.all(), + Collections.singletonList("Fingerprint")); + + return null; + }); + fail("Expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INTERNAL); + assertThat(e.getMessage()).contains("not supported"); + } + } + + @Test + public void nestedSingleUseReadTxnThrows() { + try { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + client.singleUseReadOnlyTransaction().executeQuery(Statement.of("SELECT 1"))) { + rs.next(); + } + + return null; + }); + fail("Expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INTERNAL); + assertThat(e.getMessage()).contains("not supported"); + } + } + + @Test + public void nestedTxnSucceedsWhenAllowed() { + assumeFalse("Emulator does not support multiple parallel transactions", isUsingEmulator()); + // TODO(sriharshach): Remove this skip once backend support empty transactions to commit. + assumeFalse( + "Skipping for multiplexed sessions since it does not allow empty transactions to commit", + isUsingMultiplexedSessionsForRW()); + client + .readWriteTransaction() + .allowNestedTransaction() + .run( + transaction -> { + client.singleUseReadOnlyTransaction(); + + return null; + }); + } + + @Test + public void testTxWithCaughtError() { + assumeFalse( + "Emulator does not recover from an error within a transaction", + EmulatorSpannerHelper.isUsingEmulator()); + + long updateCount = + client + .readWriteTransaction() + .run( + transaction -> { + try { + transaction.executeUpdate(Statement.of("UPDATE T SET V=2 WHERE")); + fail("missing expected exception"); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.ABORTED) { + // Aborted -> Let the transaction be retried + throw e; + } + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + return transaction.executeUpdate( + Statement.of("INSERT INTO T (K, V) VALUES ('One', 1)")); + }); + assertThat(updateCount).isEqualTo(1L); + } + + @Test + public void testTxWithConstraintError() { + assumeFalse( + "Emulator does not recover from an error within a transaction", + EmulatorSpannerHelper.isUsingEmulator()); + + // First insert a single row. + client.writeAtLeastOnce( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("T").set("K").to("One").set("V").to(1L).build())); + + try { + client + .readWriteTransaction() + .run( + transaction -> { + try { + // Try to insert a duplicate row. This statement will fail. When the statement + // is executed against an already existing transaction (i.e. + // inlineBegin=false), the entire transaction will remain invalid and cannot + // be committed. When it is executed as the first statement of a transaction + // that also tries to start a transaction, then no transaction will be started + // and the next statement will start the transaction. This will cause the + // transaction to succeed. + transaction.executeUpdate(Statement.of("INSERT INTO T (K, V) VALUES ('One', 1)")); + fail("missing expected exception"); + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.ABORTED) { + // Aborted -> Let the transaction be retried + throw e; + } + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.ALREADY_EXISTS); + } + return transaction.executeUpdate( + Statement.of("INSERT INTO T (K, V) VALUES ('Two', 2)")); + }); + fail("missing expected ALREADY_EXISTS error"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.ALREADY_EXISTS); + } + } + + @Test + public void testTxWithUncaughtError() { + try { + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(Statement.of("UPDATE T SET V=2 WHERE"))); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void testTxWithLargeMessageSize() { + int bytesPerColumn = 10000000; // 10MB + String key = uniqueKey(); + Random random = new Random(); + List mutations = new ArrayList(); + Mutation.WriteBuilder builder = Mutation.newInsertOrUpdateBuilder("T").set("K").to(key); + for (int j = 0; j < 7; j++) { + byte[] data = new byte[bytesPerColumn]; + random.nextBytes(data); + builder + .set("col" + j) + .to(com.google.cloud.spanner.Value.bytes(com.google.cloud.ByteArray.copyFrom(data))); + } + mutations.add(builder.build()); + // This large message is under the 100MB limit. + largeMessageClient.write(mutations); + } + + @Test + public void testTxWithUncaughtErrorAfterSuccessfulBegin() { + try { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(Statement.of("INSERT INTO T (K, V) VALUES ('One', 1)")); + return transaction.executeUpdate(Statement.of("UPDATE T SET V=2 WHERE")); + }); + fail("missing expected exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void testTransactionRunnerReturnsCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + final String key = uniqueKey(); + TransactionRunner runner = client.readWriteTransaction(Options.commitStats()); + runner.run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("T").set("K").to(key).set("V").to(0).build()); + return null; + }); + assertNotNull(runner.getCommitResponse().getCommitStats()); + // MutationCount = 2 (2 columns). + assertEquals(2L, runner.getCommitResponse().getCommitStats().getMutationCount()); + } + + boolean isUsingMultiplexedSessionsForRW() { + return env.getTestHelper().getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITUuidTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITUuidTest.java new file mode 100644 index 000000000000..7bec70930c7a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITUuidTest.java @@ -0,0 +1,443 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Class for running integration tests for UUID data type. It tests read and write operations + * involving UUID as key and non-key columns. + */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITUuidTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + return Arrays.asList( + new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL), + new DialectTestParameter(Dialect.POSTGRESQL)); + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + private static final String[] GOOGLE_STANDARD_SQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " Key STRING(MAX) NOT NULL," + + " UuidValue UUID," + + " UuidArrayValue ARRAY," + + ") PRIMARY KEY (Key)", + "CREATE TABLE UK (" + " Key UUID NOT NULL," + ") PRIMARY KEY (Key)", + }; + + private static final String[] POSTGRESQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " Key VARCHAR PRIMARY KEY," + + " UuidValue UUID," + + " UuidArrayValue UUID[]" + + ")", + "CREATE TABLE UK (" + " Key UUID PRIMARY KEY" + ")", + }; + + private static DatabaseClient client; + + private UUID uuid1 = UUID.fromString("aac68fbe-6847-48b1-8373-110950aeaf3a"); + ; + private UUID uuid2 = UUID.fromString("f5868be9-7983-4cfa-adf3-2e9f13f2019d"); + + @BeforeClass + public static void setUpDatabase() + throws ExecutionException, InterruptedException, TimeoutException { + Database googleStandardSQLDatabase = + env.getTestHelper().createTestDatabase(GOOGLE_STANDARD_SQL_SCHEMA); + + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase(Dialect.POSTGRESQL, Arrays.asList(POSTGRESQL_SCHEMA)); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @Before + public void before() { + client = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + } + + @AfterClass + public static void tearDown() throws Exception { + ConnectionOptions.closeSpanner(); + } + + /** Sequence used to generate unique keys. */ + private static int seq; + + private static String uniqueString() { + return String.format("k%04d", seq++); + } + + private String lastKey; + + private Timestamp write(Mutation m) { + return client.write(Collections.singletonList(m)); + } + + private Mutation.WriteBuilder baseInsert() { + return Mutation.newInsertOrUpdateBuilder("T").set("Key").to(lastKey = uniqueString()); + } + + private Struct readRow(String table, String key, String... columns) { + return client + .singleUse(TimestampBound.strong()) + .readRow(table, Key.of(key), Arrays.asList(columns)); + } + + private Struct readLastRow(String... columns) { + return readRow("T", lastKey, columns); + } + + private Timestamp deleteAllRows(String table) { + return write(Mutation.delete(table, KeySet.all())); + } + + @Test + public void writeUuid() { + UUID uuid = UUID.randomUUID(); + write(baseInsert().set("UuidValue").to(uuid).build()); + Struct row = readLastRow("UuidValue"); + assertFalse(row.isNull(0)); + assertEquals(uuid, row.getUuid(0)); + } + + @Test + public void writeUuidNull() { + write(baseInsert().set("UuidValue").to((UUID) null).build()); + Struct row = readLastRow("UuidValue"); + assertTrue(row.isNull(0)); + } + + @Test + public void writeUuidArrayNull() { + write(baseInsert().set("UuidArrayValue").toUuidArray(null).build()); + Struct row = readLastRow("UuidArrayValue"); + assertTrue(row.isNull(0)); + } + + @Test + public void writeUuidArrayEmpty() { + write(baseInsert().set("UuidArrayValue").toUuidArray(Collections.emptyList()).build()); + Struct row = readLastRow("UuidArrayValue"); + assertFalse(row.isNull(0)); + assertTrue(row.getUuidList(0).isEmpty()); + } + + @Test + public void writeUuidArray() { + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + write( + baseInsert().set("UuidArrayValue").toUuidArray(Arrays.asList(null, uuid1, uuid2)).build()); + Struct row = readLastRow("UuidArrayValue"); + assertFalse(row.isNull(0)); + assertEquals(row.getUuidList(0), Arrays.asList(null, uuid1, uuid2)); + } + + @Test + public void writeUuidArrayNoNulls() { + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + + write(baseInsert().set("UuidArrayValue").toUuidArray(Arrays.asList(uuid1, uuid2)).build()); + Struct row = readLastRow("UuidArrayValue"); + assertFalse(row.isNull(0)); + assertEquals(2, row.getUuidList(0).size()); + assertEquals(uuid1, row.getUuidList(0).get(0)); + assertEquals(uuid2, row.getUuidList(0).get(1)); + } + + private String getInsertStatementWithLiterals() { + String statement = "INSERT INTO T (Key, UuidValue, UuidArrayValue) VALUES "; + + if (dialect.dialect == Dialect.POSTGRESQL) { + statement += + "('dml1', 'aac68fbe-6847-48b1-8373-110950aeaf3a'," + + " array['aac68fbe-6847-48b1-8373-110950aeaf3a'::uuid]), ('dml2'," + + " 'aac68fbe-6847-48b1-8373-110950aeaf3a'::uuid," + + " array['aac68fbe-6847-48b1-8373-110950aeaf3a'::uuid]),('dml3', null, null)," + + " ('dml4', 'aac68fbe-6847-48b1-8373-110950aeaf3a'::uuid," + + " array['aac68fbe-6847-48b1-8373-110950aeaf3a'::uuid," + + " 'f5868be9-7983-4cfa-adf3-2e9f13f2019d'::uuid, null])"; + } else { + statement += + "('dml1', 'aac68fbe-6847-48b1-8373-110950aeaf3a'," + + " [CAST('aac68fbe-6847-48b1-8373-110950aeaf3a' AS UUID)]), ('dml2'," + + " CAST('aac68fbe-6847-48b1-8373-110950aeaf3a' AS UUID)," + + " [CAST('aac68fbe-6847-48b1-8373-110950aeaf3a' AS UUID)]), ('dml3', null, null)," + + " ('dml4', 'aac68fbe-6847-48b1-8373-110950aeaf3a'," + + " [CAST('aac68fbe-6847-48b1-8373-110950aeaf3a' AS UUID)," + + " CAST('f5868be9-7983-4cfa-adf3-2e9f13f2019d' AS UUID), null])"; + } + return statement; + } + + @Test + public void uuidLiterals() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate(Statement.of(getInsertStatementWithLiterals())); + return null; + }); + + verifyNonKeyContents("dml"); + } + + private String getInsertStatementWithParameters() { + String statement = + "INSERT INTO T (Key, UuidValue, UuidArrayValue) VALUES " + + "('param1', $1, $2), " + + "('param2', $3, $4), " + + "('param3', $5, $6), " + + "('param4', $7, $8)"; + + return (dialect.dialect == Dialect.POSTGRESQL) ? statement : statement.replace("$", "@p"); + } + + @Test + public void uuidParameter() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder(getInsertStatementWithParameters()) + .bind("p1") + .to(Value.uuid(uuid1)) + .bind("p2") + .to(Value.uuidArray(Collections.singletonList(uuid1))) + .bind("p3") + .to(Value.uuid(uuid1)) + .bind("p4") + .to(Value.uuidArray(Collections.singletonList(uuid1))) + .bind("p5") + .to(Value.uuid(null)) + .bind("p6") + .to(Value.uuidArray(null)) + .bind("p7") + .to(Value.uuid(uuid1)) + .bind("p8") + .to(Value.uuidArray(Arrays.asList(uuid1, uuid2, null))) + .build()); + return null; + }); + + verifyNonKeyContents("param"); + } + + private String getInsertStatementForUntypedParameters() { + if (dialect.dialect == Dialect.POSTGRESQL) { + return "INSERT INTO T (key, uuidValue, uuidArrayValue) VALUES " + + "('untyped1', ($1)::uuid, ($2)::uuid[])"; + } + return "INSERT INTO T (Key, UuidValue, UuidArrayValue) VALUES " + + "('untyped1', CAST(@p1 AS UUID), CAST(@p2 AS ARRAY))"; + } + + @Test + public void uuidUntypedParameter() { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder(getInsertStatementForUntypedParameters()) + .bind("p1") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setStringValue("aac68fbe-6847-48b1-8373-110950aeaf3a") + .build())) + .bind("p2") + .to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setListValue( + com.google.protobuf.ListValue.newBuilder() + .addValues( + com.google.protobuf.Value.newBuilder() + .setStringValue( + "aac68fbe-6847-48b1-8373-110950aeaf3a"))) + .build())) + .build()); + return null; + }); + + Struct row = readRow("T", "untyped1", "UuidValue", "UuidArrayValue"); + assertEquals(UUID.fromString("aac68fbe-6847-48b1-8373-110950aeaf3a"), row.getUuid(0)); + assertEquals( + Collections.singletonList(UUID.fromString("aac68fbe-6847-48b1-8373-110950aeaf3a")), + row.getUuidList(1)); + } + + private String getInsertStatementWithKeyLiterals(UUID uuid1, UUID uuid2) { + String statement = "INSERT INTO UK (Key) VALUES "; + if (dialect.dialect == Dialect.POSTGRESQL) { + statement += "('" + uuid1.toString() + "')," + "('" + uuid2.toString() + "'::uuid)"; + } else { + statement += "('" + uuid1.toString() + "')," + "(CAST('" + uuid2.toString() + "' AS UUID))"; + } + return statement; + } + + @Test + public void uuidAsKeyLiteral() { + deleteAllRows("UK"); + + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.of(getInsertStatementWithKeyLiterals(uuid1, uuid2))); + return null; + }); + + verifyKeyContents(Arrays.asList(uuid1, uuid2)); + } + + private String getInsertStatementWithKeyParameters() { + String statement = "INSERT INTO UK (Key) VALUES " + "($1)," + "($2)"; + return (dialect.dialect == Dialect.POSTGRESQL) ? statement : statement.replace("$", "@p"); + } + + @Test + public void uuidAsKeyParameter() { + deleteAllRows("UK"); + UUID uuid1 = UUID.fromString("fb907080-48a4-4615-b2c4-c8ccb5bb66a4"); + UUID uuid2 = UUID.fromString("faee3a78-cc54-42fc-baa2-53197fb89e8a"); + + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.newBuilder(getInsertStatementWithKeyParameters()) + .bind("p1") + .to(Value.uuid(uuid1)) + .bind("p2") + .to(Value.uuid(uuid2)) + .build()); + return null; + }); + + verifyKeyContents(Arrays.asList(uuid2, uuid1)); + } + + private void verifyKeyContents(List uuids) { + try (ResultSet resultSet = + client.singleUse().executeQuery(Statement.of("SELECT Key AS key FROM UK ORDER BY key"))) { + + for (UUID uuid : uuids) { + assertTrue(resultSet.next()); + assertEquals(uuid, resultSet.getUuid("key")); + assertEquals(Value.uuid(uuid), resultSet.getValue("key")); + } + } + } + + private void verifyNonKeyContents(String keyPrefix) { + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.of( + "SELECT Key AS key, UuidValue AS uuidvalue, UuidArrayValue AS uuidarrayvalue FROM T WHERE Key LIKE '{keyPrefix}%' ORDER BY key" + .replace("{keyPrefix}", keyPrefix)))) { + + // Row 1 + assertTrue(resultSet.next()); + assertEquals(uuid1, resultSet.getUuid("uuidvalue")); + assertEquals(Value.uuid(uuid1), resultSet.getValue("uuidvalue")); + assertEquals(Collections.singletonList(uuid1), resultSet.getUuidList("uuidarrayvalue")); + assertEquals( + Value.uuidArray(Collections.singletonList(uuid1)), resultSet.getValue("uuidarrayvalue")); + + // Row 2 + assertTrue(resultSet.next()); + assertEquals(uuid1, resultSet.getUuid("uuidvalue")); + assertEquals(Value.uuid(uuid1), resultSet.getValue("uuidvalue")); + assertEquals(Collections.singletonList(uuid1), resultSet.getUuidList("uuidarrayvalue")); + assertEquals( + Value.uuidArray(Collections.singletonList(uuid1)), resultSet.getValue("uuidarrayvalue")); + + // Row 3 + assertTrue(resultSet.next()); + assertTrue(resultSet.isNull("uuidvalue")); + assertTrue(resultSet.isNull("uuidarrayvalue")); + + // Row 4 + assertTrue(resultSet.next()); + assertEquals(uuid1, resultSet.getUuid("uuidvalue")); + assertEquals(Value.uuid(uuid1), resultSet.getValue("uuidvalue")); + assertEquals(Arrays.asList(uuid1, uuid2, null), resultSet.getUuidList("uuidarrayvalue")); + assertEquals( + Value.uuidArray(Arrays.asList(uuid1, uuid2, null)), resultSet.getValue("uuidarrayvalue")); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITVPCNegativeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITVPCNegativeTest.java new file mode 100644 index 000000000000..0a0e53a887f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITVPCNegativeTest.java @@ -0,0 +1,362 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.Policy; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SerialIntegrationTest; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.base.Strings; +import com.google.longrunning.OperationsClient; +import com.google.longrunning.OperationsSettings; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for VPC-SC */ +@Category(SerialIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITVPCNegativeTest { + private static final Logger logger = Logger.getLogger(ITVPCNegativeTest.class.getName()); + private static final String IN_VPCSC_TEST = System.getenv("GOOGLE_CLOUD_TESTS_IN_VPCSC"); + private static final String OUTSIDE_VPC_PROJECT = + System.getenv("GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT"); + + private Spanner spanner; + private InstanceAdminClient instanceAdminClient; + private DatabaseAdminClient databaseAdminClient; + private DatabaseClient databaseClient; + private InstanceId instanceId; + private BackupId backupId; + + @BeforeClass + public static void setUpClass() { + assumeFalse("Not applicable for experimental host", isExperimentalHost()); + assumeTrue( + "To run tests, GOOGLE_CLOUD_TESTS_IN_VPCSC environment variable needs to be set to True", + IN_VPCSC_TEST != null && IN_VPCSC_TEST.equalsIgnoreCase("true")); + assertFalse( + "GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT environment variable needs " + + "to be set to a GCP project that is outside the VPC perimeter", + Strings.isNullOrEmpty(OUTSIDE_VPC_PROJECT)); + assumeTrue(Strings.isNullOrEmpty(System.getenv("SPANNER_EMULATOR_HOST"))); + } + + @Before + public void setUp() { + instanceId = InstanceId.of(OUTSIDE_VPC_PROJECT, "nonexistent-instance"); + backupId = BackupId.of(OUTSIDE_VPC_PROJECT, "nonexistent-instance", "nonexistent-backup"); + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId(instanceId.getProject()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + // Do not eagerly create sessions since they will fail outside the VPC. + .setMinSessions(0) + // Client shouldn't block if sessions cannot be created + .setFailIfPoolExhausted() + .build()) + .build(); + spanner = options.getService(); + instanceAdminClient = spanner.getInstanceAdminClient(); + databaseAdminClient = spanner.getDatabaseAdminClient(); + databaseClient = + spanner.getDatabaseClient( + DatabaseId.of(OUTSIDE_VPC_PROJECT, "nonexistent-instance", "nonexistent-database")); + } + + @After + public void tearDown() { + spanner.close(); + } + + private void checkExceptionForVPCError(SpannerException e) { + assertEquals(ErrorCode.PERMISSION_DENIED, e.getErrorCode()); + assertThat(e.getMessage()).contains("Request is prohibited by organization's policy"); + } + + @Test + public void deniedListInstanceConfigs() { + try { + instanceAdminClient.listInstanceConfigs(); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedGetInstanceConfig() { + try { + instanceAdminClient.getInstanceConfig("nonexistent-configs"); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedListInstances() { + try { + instanceAdminClient.listInstances(); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedGetInstance() { + try { + instanceAdminClient.getInstance("non-existent"); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedListDatabases() { + try { + databaseAdminClient.listDatabases("nonexistent-instance", Options.pageSize(1)); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedGetDatabase() { + try { + databaseAdminClient.getDatabase("nonexistent-instance", "nonexistent-database"); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedRead() { + // Getting a session and starting a read is non-blocking and will not cause an exception. Trying + // to get results from the result set will. + ResultSet rs = + databaseClient + .singleUse() + .read("nonexistent-table", KeySet.all(), Collections.singletonList("nonexistent-col")); + try { + // Tests that the initial create session request returns a permission denied. + rs.next(); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedCreateBackup() throws InterruptedException { + try { + databaseAdminClient + .createBackup(instanceId.getInstance(), "newbackup-id", "nonexistent-db", Timestamp.now()) + .get(); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (ExecutionException e) { + Throwable thrown = e.getCause(); + checkExceptionForVPCError((SpannerException) thrown); + } + } + + @Test + public void deniedGetBackup() { + try { + databaseAdminClient.getBackup(instanceId.getInstance(), backupId.getBackup()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedUpdateBackup() { + try { + databaseAdminClient.updateBackup( + instanceId.getInstance(), backupId.getBackup(), Timestamp.now()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedListBackup() { + try { + databaseAdminClient.listBackups(instanceId.getInstance()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedDeleteBackup() { + try { + databaseAdminClient.deleteBackup(instanceId.getInstance(), backupId.getBackup()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedRestoreDatabase() throws InterruptedException { + try { + databaseAdminClient + .restoreDatabase( + instanceId.getInstance(), "nonexistent-backup", instanceId.getInstance(), "newdb-id") + .get(); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (ExecutionException e) { + Throwable thrown = e.getCause(); + checkExceptionForVPCError((SpannerException) thrown); + } + } + + @Test + public void deniedListBackupOperationsInInstance() { + try { + databaseAdminClient.listBackupOperations(instanceId.getInstance()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedListDatabaseOperationsInInstance() { + try { + databaseAdminClient.listDatabaseOperations(instanceId.getInstance()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedGetBackupIamPolicy() { + try { + databaseAdminClient.getBackupIAMPolicy(instanceId.getInstance(), backupId.getBackup()); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedSetBackupIamPolicy() { + try { + Policy policy = Policy.newBuilder().build(); + databaseAdminClient.setBackupIAMPolicy( + backupId.getInstanceId().getInstance(), backupId.getBackup(), policy); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedTestBackupIamPermissions() { + try { + List permissions = new ArrayList<>(); + databaseAdminClient.testBackupIAMPermissions( + backupId.getInstanceId().getInstance(), backupId.getBackup(), permissions); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedCancelBackupOperation() { + try { + databaseAdminClient.cancelOperation(backupId.getName() + "/operations/nonexistentop"); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedGetBackupOperation() { + try { + databaseAdminClient.getOperation(backupId.getName() + "/operations/nonexistentop"); + fail("Expected PERMISSION_DENIED SpannerException"); + } catch (SpannerException e) { + checkExceptionForVPCError(e); + } + } + + @Test + public void deniedListBackupOperations() throws IOException { + try (OperationsClient client = + OperationsClient.create( + OperationsSettings.newBuilder() + .setTransportChannelProvider(InstantiatingGrpcChannelProvider.newBuilder().build()) + .setEndpoint("spanner.googleapis.com:443") + .setCredentialsProvider( + FixedCredentialsProvider.create(GoogleCredentials.getApplicationDefault())) + .build())) { + client.listOperations(backupId.getName() + "/operations", ""); + fail("Expected PermissionDeniedException"); + } catch (PermissionDeniedException e) { + assertThat(e.getMessage()).contains("Request is prohibited by organization's policy"); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWithGrpcGcpTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWithGrpcGcpTest.java new file mode 100644 index 000000000000..8fdf687e444b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWithGrpcGcpTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for using gRPC-GCP extension. */ +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +public class ITWithGrpcGcpTest { + + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static final String TABLE_NAME = "TestTable"; + private static final List ALL_COLUMNS = Arrays.asList("Key", "StringValue"); + + private static RemoteSpannerHelper testHelper; + private static Database db; + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() { + // Get default spanner options for an integration test. + SpannerOptions.Builder builder = env.getTestHelper().getOptions().toBuilder(); + builder.enableGrpcGcpExtension(); + + // Create a new testHelper with the gRPC-GCP extension enabled. + testHelper = RemoteSpannerHelper.create(builder.build(), env.getTestHelper().getInstanceId()); + + db = + env.getTestHelper() + .createTestDatabase( + "CREATE TABLE " + + TABLE_NAME + + " (" + + " Key STRING(MAX) NOT NULL," + + " StringValue STRING(MAX)," + + ") PRIMARY KEY (Key)"); + client = testHelper.getDatabaseClient(db); + + List mutations = new ArrayList<>(); + for (int i = 0; i < 3; ++i) { + mutations.add( + Mutation.newInsertOrUpdateBuilder(TABLE_NAME) + .set("Key") + .to("k" + i) + .set("StringValue") + .to("v" + i) + .build()); + } + client.write(mutations); + } + + @Test + public void singleRead() { + Struct row = + client.singleUse(TimestampBound.strong()).readRow(TABLE_NAME, Key.of("k1"), ALL_COLUMNS); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo("k1"); + assertThat(row.getString(1)).isEqualTo("v1"); + } + + @Test + public void usingTransaction() { + final Long updatedCount = + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.of( + "UPDATE " + TABLE_NAME + " SET StringValue='v2upd' WHERE Key='k2'"))); + assertThat(updatedCount).isEqualTo(1L); + + Struct row = + client.singleUse(TimestampBound.strong()).readRow(TABLE_NAME, Key.of("k2"), ALL_COLUMNS); + assertThat(row).isNotNull(); + assertThat(row.getString(0)).isEqualTo("k2"); + assertThat(row.getString(1)).isEqualTo("v2upd"); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWriteTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWriteTest.java new file mode 100644 index 000000000000..5dde683bcb8a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITWriteTest.java @@ -0,0 +1,1535 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.cloud.spanner.SpannerMatchers.isSpannerException; +import static com.google.cloud.spanner.Type.array; +import static com.google.cloud.spanner.Type.json; +import static com.google.cloud.spanner.Type.pgJsonb; +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.MutationGroup; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.NullValue; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.v1.BatchWriteResponse; +import io.grpc.Context; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.hamcrest.MatcherAssert; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** Integration test for writing data to Cloud Spanner. */ +@Category(ParallelIntegrationTest.class) +@RunWith(Parameterized.class) +public class ITWriteTest { + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + @Parameterized.Parameters(name = "Dialect = {0}") + public static List data() { + List params = new ArrayList<>(); + params.add(new DialectTestParameter(Dialect.GOOGLE_STANDARD_SQL)); + params.add(new DialectTestParameter(Dialect.POSTGRESQL)); + return params; + } + + @Parameterized.Parameter() public DialectTestParameter dialect; + + private static DatabaseClient googleStandardSQLClient; + private static DatabaseClient postgreSQLClient; + + private static final String[] GOOGLE_STANDARD_SQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " K STRING(MAX) NOT NULL," + + " BoolValue BOOL," + + " Int64Value INT64," + + " Float64Value FLOAT64," + + " StringValue STRING(MAX)," + + " JsonValue JSON," + + " BytesValue BYTES(MAX)," + + " TimestampValue TIMESTAMP OPTIONS (allow_commit_timestamp = true)," + + " DateValue DATE," + + " NumericValue NUMERIC," + + " BoolArrayValue ARRAY," + + " Int64ArrayValue ARRAY," + + " Float64ArrayValue ARRAY," + + " StringArrayValue ARRAY," + + " JsonArrayValue ARRAY," + + " BytesArrayValue ARRAY," + + " TimestampArrayValue ARRAY," + + " DateArrayValue ARRAY," + + " NumericArrayValue ARRAY," + + ") PRIMARY KEY (K)", + "CREATE TABLE T1 (" + + " K1 STRING(MAX) NOT NULL," + + " K STRING(MAX) NOT NULL," + + " CONSTRAINT FK FOREIGN KEY (K) REFERENCES T(K)" + + ") PRIMARY KEY (K1)" + }; + + private static final String[] POSTGRESQL_SCHEMA = + new String[] { + "CREATE TABLE T (" + + " K VARCHAR PRIMARY KEY," + + " BoolValue BOOL," + + " Int64Value BIGINT," + + " Float64Value DOUBLE PRECISION," + + " StringValue VARCHAR," + + " JsonValue JSONB," + + " BytesValue BYTEA," + + " TimestampValue SPANNER.COMMIT_TIMESTAMP," + + " DateValue DATE," + + " NumericValue NUMERIC," + + " BoolArrayValue BOOL[]," + + " Int64ArrayValue BIGINT[]," + + " Float64ArrayValue DOUBLE PRECISION[]," + + " StringArrayValue VARCHAR[]," + + " JsonArrayValue JSONB[]," + + " BytesArrayValue BYTEA[]," + + " TimestampArrayValue TIMESTAMPTZ[]," + + " DateArrayValue DATE[]," + + " NumericArrayValue NUMERIC[]" + + ")", + "CREATE TABLE T1 (" + + " K1 VARCHAR PRIMARY KEY," + + " K VARCHAR," + + " CONSTRAINT FK FOREIGN KEY (K) REFERENCES T(K)" + + ")" + }; + + /** Sequence used to generate unique keys. */ + private static int seq; + + private static DatabaseClient client; + + @BeforeClass + public static void setUpDatabase() + throws ExecutionException, InterruptedException, TimeoutException { + Database googleStandardSQLDatabase = + env.getTestHelper().createTestDatabase(GOOGLE_STANDARD_SQL_SCHEMA); + + googleStandardSQLClient = env.getTestHelper().getDatabaseClient(googleStandardSQLDatabase); + Database postgreSQLDatabase = + env.getTestHelper() + .createTestDatabase(Dialect.POSTGRESQL, Arrays.asList(POSTGRESQL_SCHEMA)); + postgreSQLClient = env.getTestHelper().getDatabaseClient(postgreSQLDatabase); + } + + @Before + public void before() { + client = + dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? googleStandardSQLClient : postgreSQLClient; + } + + @AfterClass + public static void teardown() { + ConnectionOptions.closeSpanner(); + } + + private static String uniqueString() { + return String.format("k%04d", seq++); + } + + private String lastKey; + + private Timestamp write(Mutation m) { + return client.write(Collections.singletonList(m)); + } + + private Mutation.WriteBuilder baseInsert() { + return Mutation.newInsertOrUpdateBuilder("T").set("K").to(lastKey = uniqueString()); + } + + private Struct readLastRow(String... columns) { + return readRow("T", lastKey, columns); + } + + private Struct readRow(String table, String key, String... columns) { + return client + .singleUse(TimestampBound.strong()) + .readRow(table, Key.of(key), Arrays.asList(columns)); + } + + @Test + public void writeAtLeastOnce() { + client.writeAtLeastOnce( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(lastKey = uniqueString()) + .set("StringValue") + .to("v1") + .build())); + Struct row = readLastRow("StringValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getString(0)).isEqualTo("v1"); + } + + @Test + public void batchWriteAtLeastOnce() { + assumeFalse("Emulator does not support BatchWriteAtLeastOnce", isUsingEmulator()); + final String k1 = uniqueString(), k2 = uniqueString(), k3 = uniqueString(), k4 = uniqueString(); + lastKey = k3; + final List mutationGroups = + ImmutableList.of( + MutationGroup.of( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(k1) + .set("StringValue") + .to("v1") + .set("BoolValue") + .to(true) + .build(), + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(k2) + .set("StringValue") + .to("v2") + .build()), + MutationGroup.of( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(k3) + .set("StringValue") + .to("v1") + .set("BoolValue") + .to(false) + .build(), + Mutation.newInsertOrUpdateBuilder("T1").set("K1").to(k4).set("K").to(k3).build())); + ServerStream responses = client.batchWriteAtLeastOnce(mutationGroups); + Set responseIndexes = new HashSet<>(); + Set appliedMutationIndexes = new HashSet<>(); + for (BatchWriteResponse response : responses) { + responseIndexes.addAll(response.getIndexesList()); + if (response.getStatus().equals(Status.newBuilder().setCode(Code.OK_VALUE).build())) { + appliedMutationIndexes.addAll(response.getIndexesList()); + assertNotNull(response.getCommitTimestamp()); + } + } + assertEquals(responseIndexes, new HashSet<>(Arrays.asList(0, 1))); + + Struct row; + // assert row with key k1 + if (appliedMutationIndexes.contains(0)) { + row = readRow("T", k1, "StringValue", "BoolValue"); + assertEquals(row.getString(0), "v1"); + assertTrue(row.getBoolean(1)); + row = readRow("T", k2, "StringValue", "BoolValue"); + assertEquals(row.getString(0), "v2"); + assertTrue(row.isNull(1)); + } + + // assert row with key k4, and corresponding referencing table. + if (appliedMutationIndexes.contains(1)) { + row = readRow("T", k3, "StringValue", "BoolValue"); + assertEquals(row.getString(0), "v1"); + assertFalse(row.getBoolean(1)); + row = readRow("T1", k4, "K"); + assertEquals(row.getString(0), k3); + } + } + + @Test + public void testWriteWithMaxCommitDelay() { + CommitResponse response = + client.writeWithOptions( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(lastKey = uniqueString()) + .set("StringValue") + .to("v1") + .build()), + Options.maxCommitDelay(java.time.Duration.ofMillis(100))); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + } + + @Test + public void testWriteReturnsCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + CommitResponse response = + client.writeWithOptions( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(lastKey = uniqueString()) + .set("StringValue") + .to("v1") + .build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + assertEquals(2L, response.getCommitStats().getMutationCount()); + } + + @Test + public void testWriteAtLeastOnceReturnsCommitStats() { + assumeFalse("Emulator does not return commit statistics", isUsingEmulator()); + CommitResponse response = + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(lastKey = uniqueString()) + .set("StringValue") + .to("v1") + .build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + assertEquals(2L, response.getCommitStats().getMutationCount()); + } + + @Test + public void writeAlreadyExists() { + client.write( + Collections.singletonList( + Mutation.newInsertBuilder("T") + .set("K") + .to(lastKey = "key1") + .set("StringValue") + .to("v1") + .build())); + Struct row = readLastRow("StringValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getString(0)).isEqualTo("v1"); + + try { + client.write( + Collections.singletonList( + Mutation.newInsertBuilder("T") + .set("K") + .to(lastKey) + .set("StringValue") + .to("v2") + .build())); + fail("missing expected ALREADY_EXISTS exception"); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.ALREADY_EXISTS); + } + row = readLastRow("StringValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getString(0)).isEqualTo("v1"); + } + + @Ignore // TODO(user): Fix this - backend currently accepts empty mutation. + @Test + public void emptyWrite() { + try { + client.write(Collections.emptyList()); + fail("Expected exception"); + } catch (SpannerException ex) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } + } + + @Test + public void writeBool() { + write(baseInsert().set("BoolValue").to(true).build()); + Struct row = readLastRow("BoolValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBoolean(0)).isTrue(); + } + + @Test + public void writeBoolNull() { + write(baseInsert().set("BoolValue").to((Boolean) null).build()); + Struct row = readLastRow("BoolValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeInt64() { + write(baseInsert().set("Int64Value").to(1234).build()); + Struct row = readLastRow("Int64Value"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLong(0)).isEqualTo(1234L); + } + + @Test + public void writeInt64Null() { + write(baseInsert().set("Int64Value").to((Long) null).build()); + Struct row = readLastRow("Int64Value"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeFloat64() { + write(baseInsert().set("Float64Value").to(2.0).build()); + Struct row = readLastRow("Float64Value"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDouble(0)).isWithin(0.0).of(2.0); + } + + @Test + public void writeFloat64NonNumbers() { + write(baseInsert().set("Float64Value").to(Double.NEGATIVE_INFINITY).build()); + Struct row = readLastRow("Float64Value"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDouble(0)).isNegativeInfinity(); + + write(baseInsert().set("Float64Value").to(Double.POSITIVE_INFINITY).build()); + row = readLastRow("Float64Value"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDouble(0)).isPositiveInfinity(); + + write(baseInsert().set("Float64Value").to(Double.NaN).build()); + row = readLastRow("Float64Value"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDouble(0)).isNaN(); + } + + @Test + public void writeFloat64Null() { + write(baseInsert().set("Float64Value").to((Double) null).build()); + Struct row = readLastRow("Float64Value"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeString() { + write(baseInsert().set("StringValue").to("V1").build()); + Struct row = readLastRow("StringValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getString(0)).isEqualTo("V1"); + } + + @Test + public void writeStringNull() { + write(baseInsert().set("StringValue").to((String) null).build()); + Struct row = readLastRow("StringValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeJson() { + write(baseInsert().set("JsonValue").to(Value.json("{\"rating\":9,\"open\":true}")).build()); + Struct row = readLastRow("JsonValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonvalue")).isEqualTo(pgJsonb()); + assertThat(row.getPgJsonb(0)).isEqualTo("{\"open\": true, \"rating\": 9}"); + } else { + assertThat(row.getColumnType("JsonValue")).isEqualTo(json()); + assertThat(row.getJson(0)).isEqualTo("{\"open\":true,\"rating\":9}"); + } + } + + @Test + public void writeJsonEmpty() { + write(baseInsert().set("JsonValue").to(Value.json("{}")).build()); + Struct row = readLastRow("JsonValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonvalue")).isEqualTo(pgJsonb()); + assertThat(row.getPgJsonb(0)).isEqualTo("{}"); + } else { + assertThat(row.getColumnType("JsonValue")).isEqualTo(json()); + assertThat(row.getJson(0)).isEqualTo("{}"); + } + } + + @Test + public void writeJsonNull() { + write(baseInsert().set("JsonValue").to(Value.json(null)).build()); + Struct row = readLastRow("JsonValue"); + assertThat(row.isNull(0)).isTrue(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonvalue")).isEqualTo(pgJsonb()); + } else { + assertThat(row.getColumnType("JsonValue")).isEqualTo(json()); + } + } + + @Test + public void writeBytes() { + ByteArray data = ByteArray.copyFrom("V1"); + write(baseInsert().set("BytesValue").to(data).build()); + Struct row = readLastRow("BytesValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytes(0)).isEqualTo(data); + } + + @Test + public void writeBytesAsString() { + Random random = new Random(); + byte[] data = new byte[256]; + random.nextBytes(data); + String base64 = Base64.getEncoder().encodeToString(data); + write(baseInsert().set("BytesValue").to(base64).build()); + Struct row = readLastRow("BytesValue"); + assertFalse(row.isNull(0)); + assertArrayEquals(data, row.getBytes(0).toByteArray()); + assertEquals(base64, row.getValue(0).getAsString()); + } + + @Test + public void writeBytesAsStringUsingDml() { + Random random = new Random(); + byte[] data = new byte[256]; + random.nextBytes(data); + String base64 = Base64.getEncoder().encodeToString(data); + Long updateCount = + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder( + "insert into T (BytesValue, K) values (" + + queryParamString(1) + + ", " + + queryParamString(2) + + ")") + .bind("p1") + .to(Value.bytesFromBase64(base64)) + .bind("p2") + .to(lastKey = uniqueString()) + .build())); + assertNotNull(updateCount); + assertEquals(1L, updateCount.longValue()); + + Struct row = readLastRow("BytesValue"); + assertFalse(row.isNull(0)); + assertArrayEquals(data, row.getBytes(0).toByteArray()); + assertEquals(base64, row.getValue(0).getAsString()); + } + + String queryParamString(int index) { + return dialect.dialect == Dialect.GOOGLE_STANDARD_SQL ? "@p" + index : "$" + index; + } + + @Test + public void writeBytesRandom() { + // Pseudo-random test for byte encoding. We explicitly set a random seed so that multiple + // test runs cover more data, but any failing test run can be reproduced easily. + Random rnd = new Random(); + long seed = rnd.nextLong(); + rnd.setSeed(seed); + + Map expected = new HashMap<>(); + boolean pass = false; + try { + for (int length : new int[] {1, 2, 5, 11}) { + byte[] data = new byte[length]; + for (int i = 0; i < 3; ++i) { + rnd.nextBytes(data); + String key = uniqueString(); + ByteArray value = ByteArray.copyFrom(data); + expected.put(key, value); + write( + Mutation.newInsertOrUpdateBuilder("T") + .set("K") + .to(key) + .set("BytesValue") + .to(value) + .build()); + } + } + KeySet.Builder keys = KeySet.newBuilder(); + for (String key : expected.keySet()) { + keys.addKey(Key.of(key)); + } + ResultSet resultSet = + client + .singleUse(TimestampBound.strong()) + .read("T", keys.build(), Arrays.asList("K", "BytesValue")); + while (resultSet.next()) { + String key = resultSet.getString(0); + ByteArray value = resultSet.getBytes(1); + assertThat(expected).containsKey(key); + ByteArray expectedValue = expected.remove(key); + assertThat(value).isEqualTo(expectedValue); + } + assertThat(expected).isEmpty(); + pass = true; + } finally { + if (!pass) { + System.out.println("To reproduce failure, use seed " + seed); + } + } + } + + @Test + public void writeBytesNull() { + write(baseInsert().set("BytesValue").to((ByteArray) null).build()); + Struct row = readLastRow("BytesValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeTimestamp() { + Timestamp timestamp = Timestamp.parseTimestamp("2016-09-15T00:00:00.111111Z"); + write(baseInsert().set("TimestampValue").to(timestamp).build()); + Struct row = readLastRow("TimestampValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestamp(0)).isEqualTo(timestamp); + } + + @Test + public void writeTimestampNull() { + write(baseInsert().set("TimestampValue").to((Timestamp) null).build()); + Struct row = readLastRow("TimestampValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeCommitTimestamp() { + Timestamp commitTimestamp = + write(baseInsert().set("TimestampValue").to(Value.COMMIT_TIMESTAMP).build()); + Struct row = readLastRow("TimestampValue"); + assertThat(row.getTimestamp(0)).isEqualTo(commitTimestamp); + } + + @Test + public void writeDate() { + Date date = Date.parseDate("2016-09-15"); + write(baseInsert().set("DateValue").to(date).build()); + Struct row = readLastRow("DateValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDate(0)).isEqualTo(date); + } + + @Test + public void writeDateNull() { + write(baseInsert().set("DateValue").to((Date) null).build()); + Struct row = readLastRow("DateValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeNumeric() { + write(baseInsert().set("NumericValue").to("3.141592").build()); + Struct row = readLastRow("NumericValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + assertThat(row.getBigDecimal(0)).isEqualTo(BigDecimal.valueOf(3141592, 6)); + } else { + assertThat(row.getString(0)).isEqualTo("3.141592"); + } + } + + @Test + public void writeNumericNull() { + write(baseInsert().set("NumericValue").to((String) null).build()); + Struct row = readLastRow("NumericValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeBoolArrayNull() { + write(baseInsert().set("BoolArrayValue").toBoolArray((boolean[]) null).build()); + Struct row = readLastRow("BoolArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeBoolArrayEmpty() { + write(baseInsert().set("BoolArrayValue").toBoolArray(new boolean[] {}).build()); + Struct row = readLastRow("BoolArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBooleanList(0)).containsExactly(); + } + + @Test + public void writeBoolArray() { + write(baseInsert().set("BoolArrayValue").toBoolArray(Arrays.asList(true, null, false)).build()); + Struct row = readLastRow("BoolArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBooleanList(0)).containsExactly(true, null, false).inOrder(); + try { + row.getBooleanArray(0); + fail("Expected exception"); + } catch (NullPointerException ex) { + assertNotNull(ex.getMessage()); + } + } + + @Test + public void writeBoolArrayNoNulls() { + write(baseInsert().set("BoolArrayValue").toBoolArray(Arrays.asList(true, false)).build()); + Struct row = readLastRow("BoolArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBooleanArray(0)).isEqualTo(new boolean[] {true, false}); + } + + @Test + public void writeInt64ArrayNull() { + write(baseInsert().set("Int64ArrayValue").toInt64Array((long[]) null).build()); + Struct row = readLastRow("Int64ArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeInt64ArrayEmpty() { + write(baseInsert().set("Int64ArrayValue").toInt64Array(new long[] {}).build()); + Struct row = readLastRow("Int64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLongList(0)).containsExactly(); + } + + @Test + public void writeInt64Array() { + write(baseInsert().set("Int64ArrayValue").toInt64Array(Arrays.asList(1L, 2L, null)).build()); + Struct row = readLastRow("Int64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLongList(0)).containsExactly(1L, 2L, null).inOrder(); + try { + row.getLongArray(0); + fail("Expected exception"); + } catch (NullPointerException ex) { + assertNotNull(ex.getMessage()); + } + } + + @Test + public void writeInt64ArrayNoNulls() { + write(baseInsert().set("Int64ArrayValue").toInt64Array(Arrays.asList(1L, 2L)).build()); + Struct row = readLastRow("Int64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getLongArray(0)).isEqualTo(new long[] {1, 2}); + } + + @Test + public void writeFloat64ArrayNull() { + write(baseInsert().set("Float64ArrayValue").toFloat64Array((double[]) null).build()); + Struct row = readLastRow("Float64ArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeFloat64ArrayEmpty() { + write(baseInsert().set("Float64ArrayValue").toFloat64Array(new double[] {}).build()); + Struct row = readLastRow("Float64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDoubleList(0)).containsExactly(); + } + + @Test + public void writeFloat64Array() { + write( + baseInsert() + .set("Float64ArrayValue") + .toFloat64Array(Arrays.asList(null, 1.0, 2.0)) + .build()); + Struct row = readLastRow("Float64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDoubleList(0)).containsExactly(null, 1.0, 2.0).inOrder(); + try { + row.getDoubleArray(0); + fail("Expected exception"); + } catch (NullPointerException ex) { + assertNotNull(ex.getMessage()); + } + } + + @Test + public void writeFloat64ArrayNoNulls() { + write(baseInsert().set("Float64ArrayValue").toFloat64Array(Arrays.asList(1.0, 2.0)).build()); + Struct row = readLastRow("Float64ArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDoubleArray(0).length).isEqualTo(2); + assertThat(row.getDoubleArray(0)[0]).isWithin(0.0).of(1.0); + assertThat(row.getDoubleArray(0)[1]).isWithin(0.0).of(2.0); + } + + @Test + public void writeStringArrayNull() { + write(baseInsert().set("StringArrayValue").toStringArray(null).build()); + Struct row = readLastRow("StringArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeStringArrayEmpty() { + write(baseInsert().set("StringArrayValue").toStringArray(Collections.emptyList()).build()); + Struct row = readLastRow("StringArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly(); + } + + @Test + public void writeStringArray() { + write( + baseInsert().set("StringArrayValue").toStringArray(Arrays.asList("a", null, "b")).build()); + Struct row = readLastRow("StringArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getStringList(0)).containsExactly("a", null, "b").inOrder(); + } + + @Test + public void writeJsonArrayNull() { + write(baseInsert().set("JsonArrayValue").toJsonArray(null).build()); + Struct row = readLastRow("JsonArrayValue"); + assertThat(row.isNull(0)).isTrue(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonarrayvalue")).isEqualTo(array(pgJsonb())); + } else { + assertThat(row.getColumnType("JsonArrayValue")).isEqualTo(array(json())); + } + } + + @Test + public void writeJsonArrayEmpty() { + write(baseInsert().set("JsonArrayValue").toJsonArray(Collections.emptyList()).build()); + Struct row = readLastRow("JsonArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonarrayvalue")).isEqualTo(array(pgJsonb())); + assertThat(row.getPgJsonbList(0)).containsExactly(); + } else { + assertThat(row.getColumnType("JsonArrayValue")).isEqualTo(array(json())); + assertThat(row.getJsonList(0)).containsExactly(); + } + } + + @Test + public void writeJsonArray() { + write(baseInsert().set("JsonArrayValue").toJsonArray(Arrays.asList("[]", null, "{}")).build()); + Struct row = readLastRow("JsonArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonarrayvalue")).isEqualTo(array(pgJsonb())); + assertThat(row.getPgJsonbList(0)).containsExactly("[]", null, "{}").inOrder(); + } else { + assertThat(row.getColumnType("JsonArrayValue")).isEqualTo(array(json())); + assertThat(row.getJsonList(0)).containsExactly("[]", null, "{}").inOrder(); + } + } + + @Test + public void writeJsonArrayNoNulls() { + write( + baseInsert() + .set("JsonArrayValue") + .toJsonArray(Arrays.asList("[]", "{\"color\":\"red\",\"value\":\"#f00\"}", "{}")) + .build()); + Struct row = readLastRow("JsonArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.POSTGRESQL) { + assertThat(row.getColumnType("jsonarrayvalue")).isEqualTo(array(pgJsonb())); + assertThat(row.getPgJsonbList(0)) + .containsExactly("[]", "{\"color\": \"red\", \"value\": \"#f00\"}", "{}") + .inOrder(); + } else { + assertThat(row.getColumnType("JsonArrayValue")).isEqualTo(array(json())); + assertThat(row.getJsonList(0)) + .containsExactly("[]", "{\"color\":\"red\",\"value\":\"#f00\"}", "{}") + .inOrder(); + } + } + + @Test + public void writeBytesArrayNull() { + write(baseInsert().set("BytesArrayValue").toBytesArray(null).build()); + Struct row = readLastRow("BytesArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeBytesArrayEmpty() { + write(baseInsert().set("BytesArrayValue").toBytesArray(Collections.emptyList()).build()); + Struct row = readLastRow("BytesArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytesList(0)).containsExactly(); + } + + @Test + public void writeBytesArray() { + List data = Arrays.asList(ByteArray.copyFrom("a"), ByteArray.copyFrom("b"), null); + write(baseInsert().set("BytesArrayValue").toBytesArray(data).build()); + Struct row = readLastRow("BytesArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getBytesList(0)).isEqualTo(data); + } + + @Test + public void writeTimestampArrayNull() { + write(baseInsert().set("TimestampArrayValue").toTimestampArray(null).build()); + Struct row = readLastRow("TimestampArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeTimestampArrayEmpty() { + write( + baseInsert().set("TimestampArrayValue").toTimestampArray(Collections.emptyList()).build()); + Struct row = readLastRow("TimestampArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestampList(0)).containsExactly(); + } + + @Test + public void writeTimestampArray() { + Timestamp t1 = Timestamp.parseTimestamp("2016-09-18T00:00:00Z"); + Timestamp t2 = Timestamp.parseTimestamp("2016-09-19T00:00:00Z"); + write( + baseInsert() + .set("TimestampArrayValue") + .toTimestampArray(Arrays.asList(t1, null, t2)) + .build()); + Struct row = readLastRow("TimestampArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getTimestampList(0)).containsExactly(t1, null, t2).inOrder(); + } + + @Test + public void writeDateArrayNull() { + write(baseInsert().set("DateArrayValue").toDateArray(null).build()); + Struct row = readLastRow("DateArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeDateArrayEmpty() { + write(baseInsert().set("DateArrayValue").toDateArray(Collections.emptyList()).build()); + Struct row = readLastRow("DateArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDateList(0)).containsExactly(); + } + + @Test + public void writeDateArray() { + Date d1 = Date.parseDate("2016-09-18"); + Date d2 = Date.parseDate("2016-09-19"); + write(baseInsert().set("DateArrayValue").toDateArray(Arrays.asList(d1, null, d2)).build()); + Struct row = readLastRow("DateArrayValue"); + assertThat(row.isNull(0)).isFalse(); + assertThat(row.getDateList(0)).containsExactly(d1, null, d2).inOrder(); + } + + @Test + public void writeNumericArrayNull() { + write(baseInsert().set("NumericArrayValue").toNumericArray(null).build()); + Struct row = readLastRow("NumericArrayValue"); + assertThat(row.isNull(0)).isTrue(); + } + + @Test + public void writeNumericArrayEmpty() { + write(baseInsert().set("NumericArrayValue").toNumericArray(ImmutableList.of()).build()); + Struct row = readLastRow("NumericArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + assertThat(row.getBigDecimalList(0)).containsExactly(); + } else { + assertThat(row.getStringList(0)).containsExactly(); + } + } + + @Test + public void writeNumericArray() { + write( + baseInsert() + .set("NumericArrayValue") + .toNumericArray( + Arrays.asList(new BigDecimal("3.141592"), new BigDecimal("6.626"), null)) + .build()); + Struct row = readLastRow("NumericArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + assertThat(row.getBigDecimalList(0)) + .containsExactly(BigDecimal.valueOf(3141592, 6), BigDecimal.valueOf(6626, 3), null); + } else { + assertThat(row.getStringList(0)).containsExactly("3.141592", "6.626", null).inOrder(); + } + } + + @Test + public void writeNumericArrayNoNulls() { + write( + baseInsert() + .set("NumericArrayValue") + .toNumericArray(Arrays.asList(new BigDecimal("3.141592"), new BigDecimal("6.626"))) + .build()); + Struct row = readLastRow("NumericArrayValue"); + assertThat(row.isNull(0)).isFalse(); + if (dialect.dialect == Dialect.GOOGLE_STANDARD_SQL) { + assertThat(row.getBigDecimalList(0)) + .containsExactly(BigDecimal.valueOf(3141592, 6), BigDecimal.valueOf(6626, 3)); + } else { + assertThat(row.getStringList(0)) + .containsExactly( + BigDecimal.valueOf(3141592, 6).toString(), BigDecimal.valueOf(6626, 3).toString()) + .inOrder(); + } + } + + @Test + public void tableNotFound() { + // TODO(user): More precise matchers! Customer code needs to discern table not found, column + // not found, etc. + try { + write( + Mutation.newInsertBuilder("TableThatDoesNotExist") + .set("K") + .to(uniqueString()) + .set("StringuniqueString(Value") + .to("V1") + .build()); + fail("Expected exception"); + } catch (SpannerException ex) { + if (env.getTestHelper() + .getOptions() + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()) { + // Backend currently returns INVALID_ARGUMENT, however this will be changed to NOT_FOUND in + // future. + assertThat(ex.getErrorCode()).isAnyOf(ErrorCode.NOT_FOUND, ErrorCode.INVALID_ARGUMENT); + } else { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + } + } + + @Test + public void columnNotFound() { + try { + write(baseInsert().set("ColumnThatDoesNotExist").to("V1").build()); + fail("Expected exception"); + } catch (SpannerException ex) { + if (env.getTestHelper() + .getOptions() + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()) { + // Backend currently returns INVALID_ARGUMENT, however this will be changed to NOT_FOUND in + // future. + assertThat(ex.getErrorCode()).isAnyOf(ErrorCode.NOT_FOUND, ErrorCode.INVALID_ARGUMENT); + } else { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } + } + } + + @Test + public void incorrectType() { + try { + write(baseInsert().set("StringValue").to(1.234).build()); + fail("Expected exception"); + } catch (SpannerException ex) { + if (env.getTestHelper() + .getOptions() + .getSessionPoolOptions() + .getUseMultiplexedSessionForRW()) { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + } else { + assertThat(ex.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); + assertThat(ex.getMessage()).contains("STRING"); + } + } + } + + @Test + public void cancellation() { + Context.CancellableContext context = Context.current().withCancellation(); + context.cancel(new RuntimeException("Cancelled by test")); + Runnable work = + context.wrap( + () -> { + write(baseInsert().set("BoolValue").to(true).build()); + }); + + try { + work.run(); + } catch (SpannerException e) { + MatcherAssert.assertThat(e, isSpannerException(ErrorCode.CANCELLED)); + } + } + + @Test + public void deadline() { + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + // Cloud Spanner is fast, but not this fast :-) + Context.CancellableContext context = + Context.current().withDeadlineAfter(10, TimeUnit.NANOSECONDS, executor); + Runnable work = + context.wrap( + () -> { + write(baseInsert().set("BoolValue").to(true).build()); + }); + + try { + work.run(); + } catch (SpannerException e) { + MatcherAssert.assertThat(e, isSpannerException(ErrorCode.DEADLINE_EXCEEDED)); + } finally { + executor.shutdown(); + } + } + + @Test + public void testWriteUntypedNullValuesGoogleSQL() { + assumeFalse( + "PostgreSQL uses a different parameter format", dialect.dialect == Dialect.POSTGRESQL); + Value untypedNull = + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()); + assertEquals( + Long.valueOf(1L), + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder( + "insert into T (K,BoolValue,Int64Value,Float64Value,StringValue," + + "JsonValue,BytesValue,TimestampValue,DateValue,NumericValue," + + "BoolArrayValue,Int64ArrayValue,Float64ArrayValue," + + "StringArrayValue,JsonArrayValue,BytesArrayValue," + + "TimestampArrayValue,DateArrayValue,NumericArrayValue) values" + + " (@k, @bool, @int64, @float64, @string, @json, @bytes," + + " @timestamp, @date, @numeric, @boolArray, @int64Array," + + " @float64Array, @stringArray, @jsonArray, @bytesArray," + + " @timestampArray, @dateArray, @numericArray)") + .bind("k") + .to(uniqueString()) + .bind("bool") + .to(untypedNull) + .bind("int64") + .to(untypedNull) + .bind("float64") + .to(untypedNull) + .bind("string") + .to(untypedNull) + .bind("json") + .to(untypedNull) + .bind("bytes") + .to(untypedNull) + .bind("timestamp") + .to(untypedNull) + .bind("date") + .to(untypedNull) + .bind("numeric") + .to(untypedNull) + .bind("boolArray") + .to(untypedNull) + .bind("int64Array") + .to(untypedNull) + .bind("float64Array") + .to(untypedNull) + .bind("stringArray") + .to(untypedNull) + .bind("jsonArray") + .to(untypedNull) + .bind("bytesArray") + .to(untypedNull) + .bind("timestampArray") + .to(untypedNull) + .bind("dateArray") + .to(untypedNull) + .bind("numericArray") + .to(untypedNull) + .build()))); + } + + @Test + public void testTypeNamesGoogleSQL() { + assumeTrue( + "GoogleSQL uses different type names", dialect.dialect == Dialect.GOOGLE_STANDARD_SQL); + + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.newBuilder( + "select column_name, spanner_type " + + "from information_schema.columns " + + "where table_schema=@schema " + + "and table_name=@table " + + "order by ordinal_position") + .bind("schema") + .to(dialect.dialect.getDefaultSchema()) + .bind("table") + .to("T") + .build())) { + assertTrue(resultSet.next()); + assertEquals("K", resultSet.getString("column_name")); + assertEquals( + Type.string().getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type").replaceFirst("\\((?:\\d+|MAX)\\)", "")); + + assertTrue(resultSet.next()); + assertEquals("BoolValue", resultSet.getString("column_name")); + assertEquals( + Type.bool().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("Int64Value", resultSet.getString("column_name")); + assertEquals( + Type.int64().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("Float64Value", resultSet.getString("column_name")); + assertEquals( + Type.float64().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("StringValue", resultSet.getString("column_name")); + assertEquals( + Type.string().getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type").replaceFirst("\\((?:\\d+|MAX)\\)", "")); + + assertTrue(resultSet.next()); + assertEquals("JsonValue", resultSet.getString("column_name")); + assertEquals( + Type.json().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("BytesValue", resultSet.getString("column_name")); + assertEquals( + Type.bytes().getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type").replaceFirst("\\((?:\\d+|MAX)\\)", "")); + + assertTrue(resultSet.next()); + assertEquals("TimestampValue", resultSet.getString("column_name")); + assertEquals( + Type.timestamp().getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("DateValue", resultSet.getString("column_name")); + assertEquals( + Type.date().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("NumericValue", resultSet.getString("column_name")); + assertEquals( + Type.numeric().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("BoolArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.bool()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("Int64ArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.int64()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("Float64ArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.float64()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("StringArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.string()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type").replaceFirst("\\((?:\\d+|MAX)\\)", "")); + + assertTrue(resultSet.next()); + assertEquals("JsonArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.json()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("BytesArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.bytes()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type").replaceFirst("\\((?:\\d+|MAX)\\)", "")); + + assertTrue(resultSet.next()); + assertEquals("TimestampArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.timestamp()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("DateArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.date()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("NumericArrayValue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.numeric()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertFalse(resultSet.next()); + } + } + + @Test + public void testWriteUntypedNullValuesPostgreSQL() { + assumeTrue( + "PostgreSQL uses a different parameter format", dialect.dialect == Dialect.POSTGRESQL); + Value untypedNull = + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()); + assertEquals( + Long.valueOf(1L), + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder( + "insert into T (" + + "K," + + "BoolValue," + + "Int64Value," + + "Float64Value," + + "StringValue," + + "JsonValue," + + "BytesValue," + + "TimestampValue," + + "DateValue," + + "NumericValue," + + "BoolArrayValue," + + "Int64ArrayValue," + + "Float64ArrayValue," + + "StringArrayValue," + + "JsonArrayValue," + + "BytesArrayValue," + + "TimestampArrayValue," + + "DateArrayValue," + + "NumericArrayValue" + + ") values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, " + + "$11, $12, $13, $14, $15, $16, $17, $18, $19)") + .bind("p1") + .to(uniqueString()) + .bind("p2") + .to(untypedNull) + .bind("p3") + .to(untypedNull) + .bind("p4") + .to(untypedNull) + .bind("p5") + .to(untypedNull) + .bind("p6") + .to(untypedNull) + .bind("p7") + .to(untypedNull) + .bind("p8") + .to(untypedNull) + .bind("p9") + .to(untypedNull) + .bind("p10") + .to(untypedNull) + .bind("p11") + .to(untypedNull) + .bind("p12") + .to(untypedNull) + .bind("p13") + .to(untypedNull) + .bind("p14") + .to(untypedNull) + .bind("p15") + .to(untypedNull) + .bind("p16") + .to(untypedNull) + .bind("p17") + .to(untypedNull) + .bind("p18") + .to(untypedNull) + .bind("p19") + .to(untypedNull) + .build()))); + } + + @Test + public void testTypeNamesPostgreSQL() { + assumeTrue("PostgreSQL uses different type names", dialect.dialect == Dialect.POSTGRESQL); + + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + Statement.newBuilder( + "select column_name, spanner_type " + + "from information_schema.columns " + + "where table_schema=$1 " + + "and table_name=$2 " + + "order by ordinal_position") + .bind("p1") + .to(dialect.dialect.getDefaultSchema()) + .bind("p2") + .to("t") + .build())) { + assertTrue(resultSet.next()); + assertEquals("k", resultSet.getString("column_name")); + assertEquals( + Type.string().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("boolvalue", resultSet.getString("column_name")); + assertEquals( + Type.bool().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("int64value", resultSet.getString("column_name")); + assertEquals( + Type.int64().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("float64value", resultSet.getString("column_name")); + assertEquals( + Type.float64().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("stringvalue", resultSet.getString("column_name")); + assertEquals( + Type.string().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("jsonvalue", resultSet.getString("column_name")); + assertEquals( + Type.pgJsonb().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("bytesvalue", resultSet.getString("column_name")); + assertEquals( + Type.bytes().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("timestampvalue", resultSet.getString("column_name")); + assertEquals("spanner.commit_timestamp", resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("datevalue", resultSet.getString("column_name")); + assertEquals( + Type.date().getSpannerTypeName(dialect.dialect), resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("numericvalue", resultSet.getString("column_name")); + assertEquals( + Type.pgNumeric().getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("boolarrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.bool()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("int64arrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.int64()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("float64arrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.float64()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("stringarrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.string()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("jsonarrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.pgJsonb()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("bytesarrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.bytes()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("timestamparrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.timestamp()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("datearrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.date()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertTrue(resultSet.next()); + assertEquals("numericarrayvalue", resultSet.getString("column_name")); + assertEquals( + Type.array(Type.pgNumeric()).getSpannerTypeName(dialect.dialect), + resultSet.getString("spanner_type")); + + assertFalse(resultSet.next()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/slow/ITBackupTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/slow/ITBackupTest.java new file mode 100644 index 000000000000..bc03637fe3c7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/slow/ITBackupTest.java @@ -0,0 +1,917 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it.slow; + +import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; +import static com.google.cloud.spanner.testing.ExperimentalHostHelper.isExperimentalHost; +import static com.google.cloud.spanner.testing.TimestampHelper.afterDays; +import static com.google.cloud.spanner.testing.TimestampHelper.afterMinutes; +import static com.google.cloud.spanner.testing.TimestampHelper.daysAgo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.api.client.util.Lists; +import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.api.gax.rpc.FailedPreconditionException; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.BackupInfo.State; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Restore; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SlowTest; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.cloud.spanner.testing.RemoteSpannerHelper; +import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; +import com.google.common.collect.Iterables; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreSourceType; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.Status.Code; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.StreamSupport; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.MethodSorters; + +/** + * Integration tests creating, reading, updating and deleting backups. This test class combines + * several tests into one long test to reduce the total execution time. + */ +@Category(SlowTest.class) +@RunWith(JUnit4.class) +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ITBackupTest { + private static final long BACKUP_TIMEOUT_MINUTES = 30L; + private static final long DATABASE_TIMEOUT_MINUTES = 5; + private static final Logger logger = Logger.getLogger(ITBackupTest.class.getName()); + private static final String EXPECTED_OP_NAME_FORMAT = "%s/backups/%s/operations/"; + private static final String KMS_KEY_NAME_PROPERTY = "spanner.testenv.kms_key.name"; + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + private static String keyName; + + private static DatabaseAdminClient dbAdminClient; + private static Instance instance; + private static RemoteSpannerHelper testHelper; + private static final List databases = new ArrayList<>(); + private static final List backups = new ArrayList<>(); + private static String projectId; + private static String instanceId; + + @BeforeClass + public static void setup() { + assumeFalse("backups are not supported on experimental host yet", isExperimentalHost()); + assumeFalse("backups are not supported on the emulator", isUsingEmulator()); + keyName = System.getProperty(KMS_KEY_NAME_PROPERTY); + Preconditions.checkNotNull( + keyName, + "Key name is null, please set a key to be used for this test. The necessary permissions" + + " should be grant to the spanner service account according to the CMEK user guide."); + + logger.info("Setting up tests"); + testHelper = env.getTestHelper(); + dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); + InstanceAdminClient instanceAdminClient = testHelper.getClient().getInstanceAdminClient(); + instance = instanceAdminClient.getInstance(testHelper.getInstanceId().getInstance()); + projectId = testHelper.getInstanceId().getProject(); + instanceId = testHelper.getInstanceId().getInstance(); + logger.info("Finished setup"); + + // Cancel any backup operation that has been started by this integration test if it has been + // running for at least 6 hours. + logger.info("Cancelling long-running test backup operations"); + Pattern pattern = Pattern.compile(".*/backups/testbck_\\d{6}_\\d{4}_bck\\d/operations/.*"); + try { + for (Operation operation : + dbAdminClient.listBackupOperations(instance.getId().getInstance()).iterateAll()) { + Matcher matcher = pattern.matcher(operation.getName()); + if (matcher.matches()) { + if (!operation.getDone()) { + Timestamp currentTime = Timestamp.now(); + Timestamp startTime = + Timestamp.fromProto( + operation + .getMetadata() + .unpack(CreateBackupMetadata.class) + .getProgress() + .getStartTime()); + long diffSeconds = currentTime.getSeconds() - startTime.getSeconds(); + if (TimeUnit.HOURS.convert(diffSeconds, TimeUnit.SECONDS) >= 6L) { + logger.warning( + String.format( + "Cancelling test backup operation %s that was started at %s", + operation.getName(), startTime)); + dbAdminClient.cancelOperation(operation.getName()); + } + } + } + } + } catch (InvalidProtocolBufferException e) { + logger.log(Level.WARNING, "Could not list all existing backup operations.", e); + } + logger.info("Finished checking existing test backup operations"); + } + + @AfterClass + public static void tearDown() throws Exception { + logger.info("Starting test teardown"); + for (String backup : backups) { + logger.info(String.format("Waiting for optimize operation for backup %s to finish", backup)); + waitForDbOperations(backup); + logger.info(String.format("Deleting backup %s", backup)); + dbAdminClient.deleteBackup(testHelper.getInstanceId().getInstance(), backup); + } + backups.clear(); + for (String db : databases) { + logger.info(String.format("Dropping database %s", db)); + dbAdminClient.dropDatabase(testHelper.getInstanceId().getInstance(), db); + } + } + + private static void waitForDbOperations(String backupId) throws InterruptedException { + try { + Backup backupMetadata = + dbAdminClient.getBackup(testHelper.getInstanceId().getInstance(), backupId); + assertNotNull(backupMetadata.getProto()); + boolean allDbOpsDone = false; + while (!allDbOpsDone) { + allDbOpsDone = true; + for (String referencingDb : backupMetadata.getProto().getReferencingDatabasesList()) { + String filter = + String.format( + "name:%s/operations/ AND " + + "(" + + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) OR " + + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.RestoreDatabaseMetadata)" + + ")", + referencingDb); + for (Operation op : + dbAdminClient + .listDatabaseOperations( + testHelper.getInstanceId().getInstance(), Options.filter(filter)) + .iterateAll()) { + if (!op.getDone()) { + Thread.sleep(5000L); + allDbOpsDone = false; + break; + } + } + } + } + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.NOT_FOUND) { + return; + } + throw e; + } + } + + @Test + public void test01_Backups() throws InterruptedException, ExecutionException, TimeoutException { + final String databaseId = testHelper.getUniqueDatabaseId() + "_db1"; + final Database sourceDatabase = + dbAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(keyName)) + .build(); + logger.info(String.format("Creating test database %s", databaseId)); + OperationFuture createDatabaseOperation = + dbAdminClient.createDatabase( + sourceDatabase, + Collections.singletonList( + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + // Make sure the database has been created before we try to create a backup. + Database database = createDatabaseOperation.get(DATABASE_TIMEOUT_MINUTES, TimeUnit.MINUTES); + databases.add(database.getId().getDatabase()); + // Insert some data to make sure the backup will have a size>0. + DatabaseClient client = testHelper.getDatabaseClient(database); + client.writeAtLeastOnce( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("FOO") + .set("ID") + .to(1L) + .set("NAME") + .to("TEST") + .build())); + + // Verifies that the database encryption has been properly set + testDatabaseEncryption(database, keyName); + // Verifies that the database dialect has been properly set + testDatabaseDialect(database, Dialect.GOOGLE_STANDARD_SQL); + + // Create a backup of the database. + String backupId = testHelper.getUniqueBackupId() + "_bck1"; + Timestamp expireTime = afterDays(7); + Timestamp versionTime = getCurrentTimestamp(client); + logger.info(String.format("Creating backup %s", backupId)); + // This backup has the version time specified as the server's current timestamp + // This backup is encrypted with a customer managed key + // The expiry time is 7 days in the future. + final Backup backupToCreate = + dbAdminClient + .newBackupBuilder(BackupId.of(projectId, instanceId, backupId)) + .setDatabase(database.getId()) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(keyName)) + .build(); + OperationFuture operation = + dbAdminClient.createBackup(backupToCreate); + backups.add(backupId); + + // Execute metadata tests as part of this integration test to reduce total execution time. + testMetadata(operation, backupId, database); + + // Ensure that the backup has been created before we proceed. + logger.info("Waiting for backup operation to finish"); + Backup backup = operation.get(BACKUP_TIMEOUT_MINUTES, TimeUnit.MINUTES); + + // Verifies that backup version time is the specified one + testBackupVersionTime(backup, versionTime); + // Verifies that backup encryption has been properly set + testBackupEncryption(backup, keyName); + + // Insert some more data into the database to get a timestamp from the server. + Timestamp commitTs = + client.writeAtLeastOnce( + Collections.singletonList( + Mutation.newInsertOrUpdateBuilder("FOO") + .set("ID") + .to(2L) + .set("NAME") + .to("TEST2") + .build())); + + // Test listing operations. + // List all backups. + logger.info("Listing all backups"); + assertTrue(Iterables.contains(instance.listBackups().iterateAll(), backup)); + // List all backups whose names contain 'bck1'. + logger.info("Listing backups with name bck1"); + assertTrue( + Iterables.elementsEqual( + dbAdminClient + .listBackups( + instanceId, Options.filter(String.format("name:%s", backup.getId().getName()))) + .iterateAll(), + Collections.singleton(backup))); + logger.info("Listing ready backups"); + Iterable readyBackups = + dbAdminClient.listBackups(instanceId, Options.filter("state:READY")).iterateAll(); + assertTrue(Iterables.contains(readyBackups, backup)); + // List all backups for databases whose names contain 'db1'. + logger.info("Listing backups for database db1"); + assertTrue( + Iterables.elementsEqual( + dbAdminClient + .listBackups( + instanceId, + Options.filter(String.format("database:%s", database.getId().getName()))) + .iterateAll(), + Collections.singleton(backup))); + // List all backups that were created before a certain time. + Timestamp ts = Timestamp.ofTimeSecondsAndNanos(commitTs.getSeconds(), 0); + logger.info(String.format("Listing backups created before %s", ts)); + assertTrue( + Iterables.contains( + dbAdminClient + .listBackups(instanceId, Options.filter(String.format("create_time<\"%s\"", ts))) + .iterateAll(), + backup)); + // List all backups with a size > 0. + logger.info("Listing backups with size>0"); + assertTrue( + Iterables.contains( + dbAdminClient.listBackups(instanceId, Options.filter("size_bytes>0")).iterateAll(), + backup)); + + // Test pagination. + testPagination(); + logger.info("Finished listBackup tests"); + + // Execute other tests as part of this integration test to reduce total execution time. + testGetBackup(database, backupId, expireTime); + testUpdateBackup(backup); + testCreateInvalidExpirationDate(database); + testRestore(backup, versionTime, keyName); + + testCancelBackupOperation(database); + // Finished all tests. + logger.info("Finished all backup tests"); + } + + @Test + public void test02_RetryNonIdempotentRpcsReturningLongRunningOperations() throws Exception { + assumeFalse( + "Querying long-running operations is not supported on the emulator", isUsingEmulator()); + + // RPCs that return a long-running operation such as CreateDatabase, CreateBackup and + // RestoreDatabase are non-idempotent and can normally not be automatically retried in case of a + // transient failure. The client library will however automatically query the backend to check + // whether the corresponding operation was started or not, and if it was, it will pick up the + // existing operation. If no operation is found, a new RPC call will be executed to start the + // operation. + + List databases = new ArrayList<>(); + String initialDatabaseId; + Timestamp initialDbCreateTime; + + // CreateDatabase + InjectErrorInterceptorProvider createDbInterceptor = + new InjectErrorInterceptorProvider("CreateDatabase"); + SpannerOptions options = + testHelper.getOptions().toBuilder().setInterceptorProvider(createDbInterceptor).build(); + try (Spanner spanner = options.getService()) { + initialDatabaseId = testHelper.getUniqueDatabaseId(); + DatabaseAdminClient client = spanner.getDatabaseAdminClient(); + OperationFuture op = + client.createDatabase( + testHelper.getInstanceId().getInstance(), initialDatabaseId, Collections.emptyList()); + databases.add(op.get(DATABASE_TIMEOUT_MINUTES, TimeUnit.MINUTES)); + // Keep track of the original create time of this database, as we will drop this database + // later and create another one with the exact same name. That means that the ListOperations + // call will return at least two CreateDatabase operations. The retry logic should always + // pick the last one. + initialDbCreateTime = op.get(DATABASE_TIMEOUT_MINUTES, TimeUnit.MINUTES).getCreateTime(); + // Assert that the CreateDatabase RPC was called only once, and that the operation tracking + // was resumed through a GetOperation call. + assertEquals(1, createDbInterceptor.methodCount.get()); + assertTrue(createDbInterceptor.getOperationCount.get() >= 1); + } + + // CreateBackup + InjectErrorInterceptorProvider createBackupInterceptor = + new InjectErrorInterceptorProvider("CreateBackup"); + options = + testHelper.getOptions().toBuilder().setInterceptorProvider(createBackupInterceptor).build(); + String backupId = String.format("test-bck-%08d", new Random().nextInt(100000000)); + try (Spanner spanner = options.getService()) { + String databaseId = databases.get(0).getId().getDatabase(); + DatabaseAdminClient client = spanner.getDatabaseAdminClient(); + OperationFuture op = + client.createBackup( + testHelper.getInstanceId().getInstance(), + backupId, + databaseId, + Timestamp.ofTimeSecondsAndNanos( + Timestamp.now().getSeconds() + TimeUnit.SECONDS.convert(7L, TimeUnit.DAYS), 0)); + Stopwatch watch = Stopwatch.createStarted(); + while (createBackupInterceptor.methodCount.get() < 1 + && createBackupInterceptor.getOperationCount.get() < 1 + && watch.elapsed(TimeUnit.SECONDS) < 120) { + //noinspection BusyWait + Thread.sleep(5000L); + } + client.cancelOperation(op.getName()); + // Assert that the CreateBackup RPC was called only once, and that the operation tracking + // was resumed through a GetOperation call. + assertEquals(1, createBackupInterceptor.methodCount.get()); + assertTrue(createBackupInterceptor.getOperationCount.get() >= 1); + } + + // RestoreBackup + if (!backups.isEmpty()) { + InjectErrorInterceptorProvider restoreBackupInterceptor = + new InjectErrorInterceptorProvider("RestoreDatabase"); + options = + testHelper.getOptions().toBuilder() + .setInterceptorProvider(restoreBackupInterceptor) + .build(); + try (Spanner spanner = options.getService()) { + String restoredDbId = testHelper.getUniqueDatabaseId(); + DatabaseAdminClient client = spanner.getDatabaseAdminClient(); + OperationFuture op = + client.restoreDatabase( + testHelper.getInstanceId().getInstance(), + backups.get(0), + testHelper.getInstanceId().getInstance(), + restoredDbId); + Stopwatch watch = Stopwatch.createStarted(); + while (restoreBackupInterceptor.methodCount.get() < 1 + && restoreBackupInterceptor.getOperationCount.get() < 1 + && watch.elapsed(TimeUnit.SECONDS) < 120) { + //noinspection BusyWait + Thread.sleep(5000L); + } + try { + client.cancelOperation(op.getName()); + } catch (SpannerException | ExecutionException e) { + // Ignore, this can happen, as the restore operation sometimes fails to start if there + // is already a restore operation running on the instance. + } + // Assert that the RestoreDatabase RPC was called only once, and that the operation + // tracking was resumed through a GetOperation call. + assertEquals(1, restoreBackupInterceptor.methodCount.get()); + assertTrue(restoreBackupInterceptor.getOperationCount.get() >= 1); + } + } + + // Create another database with the exact same name as the first database. + createDbInterceptor = new InjectErrorInterceptorProvider("CreateDatabase"); + options = + testHelper.getOptions().toBuilder().setInterceptorProvider(createDbInterceptor).build(); + try (Spanner spanner = options.getService()) { + DatabaseAdminClient client = spanner.getDatabaseAdminClient(); + // First drop the initial database. + client.dropDatabase(testHelper.getInstanceId().getInstance(), initialDatabaseId); + // Now re-create a database with the exact same name. + OperationFuture op = + client.createDatabase( + testHelper.getInstanceId().getInstance(), initialDatabaseId, Collections.emptyList()); + // Check that the second database was created and has a greater creation time than the + // first. + Timestamp secondCreationTime = + op.get(DATABASE_TIMEOUT_MINUTES, TimeUnit.MINUTES).getCreateTime(); + // TODO: Change this to greaterThan when the create time of a database is reported back by + // the server. + assertTrue(secondCreationTime.compareTo(initialDbCreateTime) >= 0); + // Assert that the CreateDatabase RPC was called only once, and that the operation tracking + // was resumed through a GetOperation call. + assertEquals(1, createDbInterceptor.methodCount.get()); + assertTrue(createDbInterceptor.getOperationCount.get() >= 1); + } + } + + @Test + public void test03_Delete() throws InterruptedException { + Assert.assertFalse("No backups created", backups.isEmpty()); + String backupId = backups.get(0); + waitForDbOperations(backupId); + // Get the backup. + logger.info(String.format("Fetching backup %s", backupId)); + Backup backup = instance.getBackup(backupId); + // Delete it. + logger.info(String.format("Deleting backup %s", backupId)); + backup.delete(); + // Try to get it again. This should cause a NOT_FOUND error. + logger.info(String.format("Fetching non-existent backup %s", backupId)); + SpannerException exception = + assertThrows(SpannerException.class, () -> instance.getBackup(backupId)); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + // Try to delete the non-existent backup. This should be a no-op. + logger.info(String.format("Deleting non-existent backup %s", backupId)); + backup.delete(); + logger.info("Finished delete tests"); + } + + @Test(expected = SpannerException.class) + public void test04_backupCreationWithVersionTimeTooFarInThePastFails() throws Exception { + final Database testDatabase = testHelper.createTestDatabase(); + final DatabaseId databaseId = testDatabase.getId(); + final InstanceId instanceId = databaseId.getInstanceId(); + final String backupId = testHelper.getUniqueBackupId(); + final Timestamp expireTime = afterDays(7); + final Timestamp versionTime = daysAgo(30); + final Backup backupToCreate = + dbAdminClient + .newBackupBuilder(BackupId.of(instanceId, backupId)) + .setDatabase(databaseId) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + + getOrThrow(dbAdminClient.createBackup(backupToCreate)); + } + + @Test(expected = SpannerException.class) + public void test05_backupCreationWithVersionTimeInTheFutureFails() throws Exception { + final Database testDatabase = testHelper.createTestDatabase(); + final DatabaseId databaseId = testDatabase.getId(); + final InstanceId instanceId = databaseId.getInstanceId(); + final String backupId = testHelper.getUniqueBackupId(); + final Timestamp expireTime = afterDays(7); + final Timestamp versionTime = afterDays(1); + final Backup backupToCreate = + dbAdminClient + .newBackupBuilder(BackupId.of(instanceId, backupId)) + .setDatabase(databaseId) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + + getOrThrow(dbAdminClient.createBackup(backupToCreate)); + } + + private void getOrThrow(OperationFuture operation) + throws InterruptedException, ExecutionException { + try { + operation.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof SpannerException) { + throw (SpannerException) e.getCause(); + } else { + throw e; + } + } + } + + private Timestamp getCurrentTimestamp(DatabaseClient client) { + try (ResultSet resultSet = + client.singleUse().executeQuery(Statement.of("SELECT CURRENT_TIMESTAMP()"))) { + resultSet.next(); + return resultSet.getTimestamp(0); + } + } + + private void testBackupVersionTime(Backup backup, Timestamp versionTime) { + logger.info("Verifying backup version time for " + backup.getId()); + assertEquals(versionTime, backup.getVersionTime()); + logger.info("Done verifying backup version time for " + backup.getId()); + } + + private void testDatabaseEncryption(Database database, String expectedKey) { + logger.info("Verifying database encryption for " + database.getId()); + assertNotNull(database.getEncryptionConfig()); + assertEquals(expectedKey, database.getEncryptionConfig().getKmsKeyName()); + logger.info("Done verifying database encryption for " + database.getId()); + } + + private void testDatabaseDialect(Database database, Dialect expectedDialect) { + logger.info("Verifying dialect for " + database.getId()); + assertNotNull(database.getDialect()); + assertEquals(expectedDialect, database.getDialect()); + logger.info("Done verifying database dialect for " + database.getId()); + } + + private void testBackupEncryption(Backup backup, String expectedKey) { + logger.info("Verifying backup encryption for " + backup.getId()); + assertNotNull(backup.getEncryptionInfo()); + assertTrue(backup.getEncryptionInfo().getKmsKeyVersion().contains(expectedKey)); + logger.info("Done verifying backup encryption for " + backup.getId()); + } + + private void testMetadata( + OperationFuture operation, String backupId, Database database) + throws InterruptedException, ExecutionException { + + logger.info("Getting operation metadata"); + CreateBackupMetadata metadata1 = operation.getMetadata().get(); + String expectedOperationName1 = + String.format(EXPECTED_OP_NAME_FORMAT, testHelper.getInstanceId().getName(), backupId); + assertTrue(operation.getName().startsWith(expectedOperationName1)); + assertEquals(database.getId().getName(), metadata1.getDatabase()); + assertEquals(BackupId.of(testHelper.getInstanceId(), backupId).getName(), metadata1.getName()); + logger.info("Finished metadata tests"); + } + + private void testCreateInvalidExpirationDate(Database database) { + // This is not allowed, the expiration date must be at least 6 hours in the future. + Timestamp expireTime = daysAgo(1); + String backupId = testHelper.getUniqueBackupId(); + logger.info(String.format("Creating backup %s with invalid expiration date", backupId)); + OperationFuture op = + dbAdminClient.createBackup( + instanceId, backupId, database.getId().getDatabase(), expireTime); + backups.add(backupId); + ExecutionException executionException = assertThrows(ExecutionException.class, op::get); + Throwable cause = executionException.getCause(); + assertEquals(SpannerException.class, cause.getClass()); + SpannerException spannerException = (SpannerException) cause; + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + } + + private void testCancelBackupOperation(Database database) + throws InterruptedException, ExecutionException { + Timestamp expireTime = afterDays(7); + String backupId = testHelper.getUniqueBackupId(); + logger.info(String.format("Starting to create backup %s", backupId)); + OperationFuture op = + dbAdminClient.createBackup( + instanceId, backupId, database.getId().getDatabase(), expireTime); + backups.add(backupId); + // Cancel the backup operation. + logger.info(String.format("Cancelling the creation of backup %s", backupId)); + dbAdminClient.cancelOperation(op.getName()); + logger.info("Fetching backup operations"); + boolean operationFound = false; + for (Operation operation : + dbAdminClient + .listBackupOperations( + instanceId, Options.filter(String.format("name:%s", op.getName()))) + .iterateAll()) { + assertEquals(Code.CANCELLED.value(), operation.getError().getCode()); + operationFound = true; + } + assertTrue(operationFound); + logger.info("Finished cancel test"); + } + + private void testGetBackup(Database database, String backupId, Timestamp expireTime) { + // Get the most recent version of the backup. + logger.info(String.format("Getting backup %s", backupId)); + Backup backup = instance.getBackup(backupId); + assertEquals(State.READY, backup.getState()); + assertTrue(backup.getSize() > 0L); + assertEquals(expireTime, backup.getExpireTime()); + assertEquals(database.getId(), backup.getDatabase()); + } + + private void testUpdateBackup(Backup backup) { + // Update the expire time. + Timestamp tomorrow = afterDays(1); + backup = backup.toBuilder().setExpireTime(tomorrow).build(); + logger.info( + String.format("Updating expire time of backup %s to 1 week", backup.getId().getBackup())); + backup.updateExpireTime(); + // Re-get the backup and ensure the expire time was updated. + logger.info(String.format("Reloading backup %s", backup.getId().getBackup())); + backup = backup.reload(); + assertEquals(tomorrow, backup.getExpireTime()); + + // Try to set the expire time to 5 minutes in the future. + Timestamp in5Minutes = afterMinutes(5); + final Backup backupWithNewExpireTime = backup.toBuilder().setExpireTime(in5Minutes).build(); + logger.info( + String.format( + "Updating expire time of backup %s to 5 minutes", backup.getId().getBackup())); + SpannerException spannerException = + assertThrows(SpannerException.class, backupWithNewExpireTime::updateExpireTime); + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + + // Re-get the backup and ensure the expire time is still in one week. + backup = backup.reload(); + assertEquals(tomorrow, backup.getExpireTime()); + } + + private void testPagination() { + logger.info("Listing backups using pagination"); + + // First get all current backups without using pagination so we can compare that list with + // the same list when pagination fails. + List initialBackups = + Lists.newArrayList(dbAdminClient.listBackups(instanceId).iterateAll()); + + int numBackups = 0; + logger.info("Fetching first page"); + Page page = dbAdminClient.listBackups(instanceId, Options.pageSize(1)); + assertEquals(1, Iterables.size(page.getValues())); + numBackups++; + assertFalse(page.hasNextPage()); + Set seenPageTokens = new HashSet<>(); + seenPageTokens.add(""); + while (page.hasNextPage()) { + logger.info( + String.format( + "Fetching page %d with page token %s", numBackups + 1, page.getNextPageToken())); + // The backend should not return the same page token twice. + if (seenPageTokens.contains(page.getNextPageToken())) { + // This should not happen, so to try to figure out why we list all the backups here to see + // if there's anything that we can figure out from the list of backups now compared with + // the initial list (for example that a new backup has been added while we were iterating). + logger.info("Pagination of backups failed. Initial list of backups was:"); + for (Backup backup : initialBackups) { + logger.info(backup.getId().toString()); + } + logger.info("Current list of backups is:"); + List currentBackups = + Lists.newArrayList(dbAdminClient.listBackups(instanceId).iterateAll()); + for (Backup backup : currentBackups) { + logger.info(backup.getId().toString()); + } + } + assertFalse(Iterables.contains(seenPageTokens, page.getNextPageToken())); + seenPageTokens.add(page.getNextPageToken()); + page = + dbAdminClient.listBackups( + instanceId, Options.pageToken(page.getNextPageToken()), Options.pageSize(1)); + assertEquals(1, Iterables.size(page.getValues())); + numBackups++; + } + assertTrue(numBackups >= 1); + } + + private void testRestore(Backup backup, Timestamp versionTime, String expectedKey) + throws InterruptedException, ExecutionException { + // Restore the backup to a new database. + String restoredDb = testHelper.getUniqueDatabaseId(); + String restoreOperationName; + OperationFuture restoreOperation; + int attempts = 0; + while (true) { + try { + logger.info( + String.format( + "Restoring backup %s to database %s", backup.getId().getBackup(), restoredDb)); + final Restore restore = + dbAdminClient + .newRestoreBuilder(backup.getId(), DatabaseId.of(projectId, instanceId, restoredDb)) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(expectedKey)) + .build(); + restoreOperation = dbAdminClient.restoreDatabase(restore); + restoreOperationName = restoreOperation.getName(); + break; + } catch (ExecutionException e) { + if (e.getCause() instanceof FailedPreconditionException + && e.getCause() + .getMessage() + .contains("Please retry the operation once the pending restores complete")) { + attempts++; + if (attempts == 10) { + logger.info( + "Restore operation failed 10 times because of other pending restores. Skipping" + + " restore test."); + return; + } + // wait and then retry. + logger.info( + String.format( + "Restoring backup %s to database %s must wait because of other pending restore" + + " operation", + backup.getId().getBackup(), restoredDb)); + //noinspection BusyWait + Thread.sleep(60_000L); + } else { + throw e; + } + } + } + databases.add(restoredDb); + logger.info(String.format("Restore operation %s running", restoreOperationName)); + RestoreDatabaseMetadata metadata = restoreOperation.getMetadata().get(); + assertEquals(backup.getId().getName(), metadata.getBackupInfo().getBackup()); + assertEquals(RestoreSourceType.BACKUP, metadata.getSourceType()); + assertEquals( + DatabaseId.of(testHelper.getInstanceId(), restoredDb).getName(), metadata.getName()); + assertEquals(versionTime, Timestamp.fromProto(metadata.getBackupInfo().getVersionTime())); + + // Ensure the operations show up in the right collections. + // TODO: Re-enable when it is clear why this fails on the CI environment. + // verifyRestoreOperations(backupOp.getName(), restoreOperationName); + + // Wait until the restore operation has finished successfully. + Database database = restoreOperation.get(); + assertEquals(restoredDb, database.getId().getDatabase()); + + // Reloads the database + final Database reloadedDatabase = database.reload(); + assertNotNull(reloadedDatabase.getProto()); + assertEquals( + versionTime, + Timestamp.fromProto( + reloadedDatabase.getProto().getRestoreInfo().getBackupInfo().getVersionTime())); + testDatabaseEncryption(reloadedDatabase, expectedKey); + testDatabaseDialect(reloadedDatabase, Dialect.GOOGLE_STANDARD_SQL); + + // Restoring the backup to an existing database should fail. + logger.info( + String.format( + "Restoring backup %s to existing database %s", backup.getId().getBackup(), restoredDb)); + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> backup.restore(DatabaseId.of(testHelper.getInstanceId(), restoredDb)).get()); + assertEquals(SpannerException.class, executionException.getCause().getClass()); + SpannerException spannerException = (SpannerException) executionException.getCause(); + assertEquals(ErrorCode.ALREADY_EXISTS, spannerException.getErrorCode()); + } + + // TODO: Remove when this verification can be re-enabled. + @SuppressWarnings("unused") + private void verifyRestoreOperations( + final String backupOperationName, final String restoreOperationName) { + assertTrue( + StreamSupport.stream(instance.listBackupOperations().iterateAll().spliterator(), false) + .anyMatch(input -> input.getName().equals(backupOperationName))); + assertFalse( + StreamSupport.stream(instance.listBackupOperations().iterateAll().spliterator(), false) + .anyMatch(input -> input.getName().equals(restoreOperationName))); + assertFalse( + StreamSupport.stream(instance.listDatabaseOperations().iterateAll().spliterator(), false) + .anyMatch(input -> input.getName().equals(backupOperationName))); + assertTrue( + StreamSupport.stream(instance.listDatabaseOperations().iterateAll().spliterator(), false) + .anyMatch(input -> input.getName().equals(restoreOperationName))); + } + + private static final class InjectErrorInterceptorProvider implements GrpcInterceptorProvider { + final AtomicBoolean injectError = new AtomicBoolean(true); + final AtomicInteger getOperationCount = new AtomicInteger(); + final AtomicInteger methodCount = new AtomicInteger(); + final String methodName; + + private InjectErrorInterceptorProvider(String methodName) { + this.methodName = methodName; + } + + @Override + public List getInterceptors() { + ClientInterceptor interceptor = + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + if (method.getFullMethodName().contains("GetOperation")) { + getOperationCount.incrementAndGet(); + } + if (!method.getFullMethodName().contains(methodName)) { + return next.newCall(method, callOptions); + } + + methodCount.incrementAndGet(); + final AtomicBoolean errorInjected = new AtomicBoolean(); + final ClientCall clientCall = next.newCall(method, callOptions); + + return new SimpleForwardingClientCall(clientCall) { + @Override + public void start(Listener responseListener, Metadata headers) { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onMessage(RespT message) { + if (injectError.getAndSet(false)) { + errorInjected.set(true); + clientCall.cancel("Cancelling call for injected error", null); + } else { + super.onMessage(message); + } + } + + @Override + public void onClose(Status status, Metadata metadata) { + if (errorInjected.get()) { + status = Status.UNAVAILABLE.augmentDescription("INJECTED BY TEST"); + } + super.onClose(status, metadata); + } + }, + headers); + } + }; + } + }; + return Collections.singletonList(interceptor); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/ChannelFinderGoldenTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/ChannelFinderGoldenTest.java new file mode 100644 index 000000000000..525313f1ab4e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/ChannelFinderGoldenTest.java @@ -0,0 +1,203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.protobuf.TextFormat; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RoutingHint; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.MethodDescriptor; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import spanner.cloud.location.FinderTestCase; +import spanner.cloud.location.FinderTestCases; + +@RunWith(JUnit4.class) +public class ChannelFinderGoldenTest { + + @Test + public void goldenTest() throws Exception { + FinderTestCases.Builder builder = FinderTestCases.newBuilder(); + try (InputStream inputStream = + getClass().getClassLoader().getResourceAsStream("finder_test.textproto"); + InputStreamReader reader = + new InputStreamReader(Objects.requireNonNull(inputStream), StandardCharsets.UTF_8)) { + TextFormat.merge(reader, builder); + } + + FinderTestCases testCases = builder.build(); + + for (FinderTestCase testCase : testCases.getTestCaseList()) { + FakeEndpointCache endpointCache = new FakeEndpointCache(); + ChannelFinder finder = new ChannelFinder(endpointCache); + finder.useDeterministicRandom(); + + for (FinderTestCase.Event event : testCase.getEventList()) { + if (event.hasCacheUpdate()) { + finder.update(event.getCacheUpdate()); + } + + if (!event.getUnhealthyServersList().isEmpty()) { + endpointCache.setUnhealthyServers(new HashSet<>(event.getUnhealthyServersList())); + } else { + endpointCache.setUnhealthyServers(Collections.emptySet()); + } + + switch (event.getRequestCase()) { + case READ: + ReadRequest.Builder readBuilder = event.getRead().toBuilder(); + ChannelEndpoint readEndpoint = finder.findServer(readBuilder); + assertHintAndServer( + testCase.getName(), event, readBuilder.getRoutingHint(), readEndpoint); + break; + case SQL: + ExecuteSqlRequest.Builder sqlBuilder = event.getSql().toBuilder(); + ChannelEndpoint sqlEndpoint = finder.findServer(sqlBuilder); + assertHintAndServer( + testCase.getName(), event, sqlBuilder.getRoutingHint(), sqlEndpoint); + break; + case REQUEST_NOT_SET: + default: + break; + } + } + } + } + + private static void assertHintAndServer( + String testCaseName, + FinderTestCase.Event event, + RoutingHint actualHint, + ChannelEndpoint endpoint) { + assertEquals( + "RoutingHint mismatch for test case: " + testCaseName, event.getHint(), actualHint); + String expectedServer = event.getServer(); + if (!expectedServer.isEmpty()) { + assertNotNull("Expected server for test case: " + testCaseName, endpoint); + assertEquals(expectedServer, endpoint.getAddress()); + } else { + assertNull("Expected no server for test case: " + testCaseName, endpoint); + } + } + + private static final class FakeEndpointCache implements ChannelEndpointCache { + private final Map endpoints = new HashMap<>(); + private final FakeEndpoint defaultEndpoint = new FakeEndpoint("default"); + private volatile Set unhealthyServers = Collections.emptySet(); + + void setUnhealthyServers(Set unhealthyServers) { + this.unhealthyServers = unhealthyServers; + } + + @Override + public ChannelEndpoint defaultChannel() { + return defaultEndpoint; + } + + @Override + public ChannelEndpoint get(String address) { + return endpoints.computeIfAbsent(address, FakeEndpoint::new); + } + + @Override + public void evict(String address) { + endpoints.remove(address); + } + + @Override + public void shutdown() { + endpoints.clear(); + } + + private final class FakeEndpoint implements ChannelEndpoint { + private final String address; + + private FakeEndpoint(String address) { + this.address = address; + } + + @Override + public String getAddress() { + return address; + } + + @Override + public boolean isHealthy() { + return !unhealthyServers.contains(address); + } + + @Override + public ManagedChannel getChannel() { + return new ManagedChannel() { + @Override + public ManagedChannel shutdown() { + return this; + } + + @Override + public ManagedChannel shutdownNow() { + return this; + } + + @Override + public boolean isShutdown() { + return false; + } + + @Override + public boolean isTerminated() { + return false; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return true; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public String authority() { + return address; + } + }; + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java new file mode 100644 index 000000000000..b8d57edaf402 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java @@ -0,0 +1,1312 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.common.truth.Truth.assertThat; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.HeaderProvider; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.auth.Credentials; +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.OAuth2Credentials; +import com.google.cloud.NoCredentials; +import com.google.cloud.ServiceOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; +import com.google.cloud.grpc.fallback.GcpFallbackChannelOptions; +import com.google.cloud.grpc.fallback.GcpFallbackOpenTelemetry; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.SpannerOptionsHelper; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionRunner; +import com.google.cloud.spanner.spi.v1.GapicSpannerRpc.AdminRequestsLimitExceededRetryAlgorithm; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ListValue; +import com.google.rpc.ErrorInfo; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.auth.MoreCallCredentials; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class GapicSpannerRpcTest { + + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + private static final Statement UPDATE_FOO_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + + private static final String STATIC_OAUTH_TOKEN = "STATIC_TEST_OAUTH_TOKEN"; + private static final String VARIABLE_OAUTH_TOKEN = "VARIABLE_TEST_OAUTH_TOKEN"; + private static final OAuth2Credentials STATIC_CREDENTIALS = + OAuth2Credentials.create( + new AccessToken( + STATIC_OAUTH_TOKEN, + new java.util.Date( + System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(1L, TimeUnit.DAYS)))); + private static final OAuth2Credentials VARIABLE_CREDENTIALS = + OAuth2Credentials.create( + new AccessToken( + VARIABLE_OAUTH_TOKEN, + new java.util.Date( + System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(1L, TimeUnit.DAYS)))); + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static InetSocketAddress address; + private static final Map optionsMap = new HashMap<>(); + private static Metadata lastSeenHeaders; + private static String defaultUserAgent; + private static Spanner spanner; + private static boolean isRouteToLeader; + private static boolean isEndToEndTracing; + private static boolean isTraceContextPresent; + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Before + public void startServer() throws Exception { + // Enable OpenTelemetry tracing. + SpannerOptionsHelper.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + + assumeTrue( + "Skip tests when emulator is enabled as this test interferes with the check whether the" + + " emulator is running", + System.getenv("SPANNER_EMULATOR_HOST") == null); + + defaultUserAgent = "spanner-java/" + GaxProperties.getLibraryVersion(GapicSpannerRpc.class); + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); + + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + // Add a server interceptor that will check that we receive the variable OAuth token + // from the CallCredentials, and not the one set as static credentials. + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next) { + lastSeenHeaders = headers; + String auth = + headers.get(Key.of("authorization", Metadata.ASCII_STRING_MARSHALLER)); + assertThat(auth).isEqualTo("Bearer " + VARIABLE_OAUTH_TOKEN); + String clientLibToken = + headers.get( + Metadata.Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER)); + assertNotNull(clientLibToken); + assertTrue( + clientLibToken.contains(ServiceOptions.getGoogApiClientLibName() + "/")); + if (call.getMethodDescriptor() + .equals(SpannerGrpc.getExecuteStreamingSqlMethod()) + || call.getMethodDescriptor().equals(SpannerGrpc.getExecuteSqlMethod())) { + String traceParentHeader = + headers.get(Key.of("traceparent", Metadata.ASCII_STRING_MARSHALLER)); + isTraceContextPresent = (traceParentHeader != null); + String routeToLeaderHeader = + headers.get( + Key.of( + "x-goog-spanner-route-to-leader", + Metadata.ASCII_STRING_MARSHALLER)); + String endToEndTracingHeader = + headers.get( + Key.of( + "x-goog-spanner-end-to-end-tracing", + Metadata.ASCII_STRING_MARSHALLER)); + isRouteToLeader = + (routeToLeaderHeader != null && routeToLeaderHeader.equals("true")); + isEndToEndTracing = + (endToEndTracingHeader != null && endToEndTracingHeader.equals("true")); + } + return Contexts.interceptCall(Context.current(), call, headers, next); + } + }) + .build() + .start(); + optionsMap.put(Option.CHANNEL_HINT, 1L); + spanner = createSpannerOptions().getService(); + } + + @After + public void reset() throws InterruptedException { + if (mockSpanner != null) { + mockSpanner.reset(); + } + if (spanner != null) { + spanner.close(); + } + if (server != null) { + server.shutdown(); + server.awaitTermination(); + } + isRouteToLeader = false; + isEndToEndTracing = false; + isTraceContextPresent = false; + } + + @Test + public void testCallCredentialsProviderPreferenceAboveCredentials() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(STATIC_CREDENTIALS) + .setCallCredentialsProvider(() -> MoreCallCredentials.from(VARIABLE_CREDENTIALS)) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + // GoogleAuthLibraryCallCredentials doesn't implement equals, so we can only check for the + // existence. + assertNotNull( + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCredentials()); + rpc.shutdown(); + } + + @Test + public void testCallCredentialsProviderReturnsNull() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(STATIC_CREDENTIALS) + .setCallCredentialsProvider(() -> null) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertNull( + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCredentials()); + rpc.shutdown(); + } + + @Test + public void testNoCallCredentials() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCredentials(STATIC_CREDENTIALS) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertNull( + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCredentials()); + rpc.shutdown(); + } + + @Test + public void testClientCompressorGzip() { + SpannerOptions options = + SpannerOptions.newBuilder().setProjectId("some-project").setCompressorName("gzip").build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertEquals( + "gzip", + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCompressor()); + rpc.shutdown(); + } + + @Test + public void testClientCompressorIdentity() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setCompressorName("identity") + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertEquals( + "identity", + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCompressor()); + rpc.shutdown(); + } + + @Test + public void testClientCompressorDefault() { + SpannerOptions options = SpannerOptions.newBuilder().setProjectId("some-project").build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertNull( + rpc.newCallContext( + optionsMap, + "/some/resource", + GetSessionRequest.getDefaultInstance(), + SpannerGrpc.getGetSessionMethod()) + .getCallOptions() + .getCompressor()); + rpc.shutdown(); + } + + private static final class TimeoutHolder { + + private Duration timeout; + } + + @Test + public void testCallContextTimeout() { + // Create a CallContextConfigurator that uses a variable timeout value. + final TimeoutHolder timeoutHolder = new TimeoutHolder(); + CallContextConfigurator configurator = + new CallContextConfigurator() { + @Override + public ApiCallContext configure( + ApiCallContext context, ReqT request, MethodDescriptor method) { + // Only configure a timeout for the ExecuteSql method as this method is used for + // executing DML statements. + if (request instanceof ExecuteSqlRequest + && method.equals(SpannerGrpc.getExecuteSqlMethod())) { + ExecuteSqlRequest sqlRequest = (ExecuteSqlRequest) request; + // Sequence numbers are only assigned for DML statements, which means that + // this is an update statement. + if (sqlRequest.getSeqno() > 0L) { + return context.withTimeoutDuration(timeoutHolder.timeout); + } + } + return null; + } + }; + + mockSpanner.setExecuteSqlExecutionTime(SimulatedExecutionTime.ofMinimumAndRandomTime(10, 0)); + final DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + Context context = + Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator); + context.run( + () -> { + // First try with a 1ns timeout. This should always cause a DEADLINE_EXCEEDED + // exception. + timeoutHolder.timeout = Duration.ofNanos(1L); + SpannerException e = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); + + // Then try with a longer timeout. This should now succeed. + timeoutHolder.timeout = Duration.ofMinutes(1L); + long updateCount = + client + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + assertEquals(1L, updateCount); + }); + } + + @Test + public void testNewCallContextWithNullRequestAndNullMethod() { + SpannerOptions options = SpannerOptions.newBuilder().setProjectId("some-project").build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + assertNotNull(rpc.newCallContext(optionsMap, "/some/resource", null, null)); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithRouteToLeaderHeader() { + SpannerOptions options = + SpannerOptions.newBuilder().setProjectId("some-project").enableLeaderAwareRouting().build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod(), + true); + assertNotNull(callContext); + assertEquals( + ImmutableList.of("true"), + callContext.getExtraHeaders().get("x-goog-spanner-route-to-leader")); + assertEquals( + ImmutableList.of("projects/some-project"), + callContext.getExtraHeaders().get(ApiClientHeaderProvider.getDefaultResourceHeaderKey())); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithoutRouteToLeaderHeader() { + SpannerOptions options = + SpannerOptions.newBuilder().enableLeaderAwareRouting().setProjectId("some-project").build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod(), + false); + assertNotNull(callContext); + assertNull(callContext.getExtraHeaders().get("x-goog-spanner-route-to-leader")); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithRouteToLeaderHeaderAndLarDisabled() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .disableLeaderAwareRouting() + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod(), + true); + assertNotNull(callContext); + assertNull(callContext.getExtraHeaders().get("x-goog-spanner-route-to-leader")); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithEndToEndTracingHeader() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setEnableEndToEndTracing(true) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod()); + assertNotNull(callContext); + assertEquals( + ImmutableList.of("true"), + callContext.getExtraHeaders().get("x-goog-spanner-end-to-end-tracing")); + assertEquals( + ImmutableList.of("projects/some-project"), + callContext.getExtraHeaders().get(ApiClientHeaderProvider.getDefaultResourceHeaderKey())); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithoutEndToEndTracingHeader() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setEnableEndToEndTracing(false) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod()); + assertNotNull(callContext); + assertNull(callContext.getExtraHeaders().get("x-goog-spanner-end-to-end-tracing")); + rpc.shutdown(); + } + + @Test + public void testEndToEndTracingHeaderWithEnabledTracing() { + final SpannerOptions options = + createSpannerOptions().toBuilder().setEnableEndToEndTracing(true).build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertTrue(isEndToEndTracing); + } + + @Test + public void testEndToEndTracingHeaderWithDisabledTracing() { + final SpannerOptions options = + createSpannerOptions().toBuilder().setEnableEndToEndTracing(false).build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertFalse(isEndToEndTracing); + } + + @Test + public void testAdminRequestsLimitExceededRetryAlgorithm() { + AdminRequestsLimitExceededRetryAlgorithm alg = + new AdminRequestsLimitExceededRetryAlgorithm<>(); + + assertThat(alg.shouldRetry(null, 1L)).isFalse(); + + ErrorInfo info = + ErrorInfo.newBuilder() + .putMetadata("quota_limit", "AdminMethodQuotaPerMinutePerProject") + .build(); + Metadata.Key key = + Metadata.Key.of( + info.getDescriptorForType().getFullName() + Metadata.BINARY_HEADER_SUFFIX, + ProtoLiteUtils.metadataMarshaller(info)); + Metadata trailers = new Metadata(); + trailers.put(key, info); + + SpannerException adminRateExceeded = + SpannerExceptionFactory.newSpannerException( + Status.RESOURCE_EXHAUSTED.withDescription("foo").asRuntimeException(trailers)); + assertThat(alg.shouldRetry(adminRateExceeded, null)).isTrue(); + + SpannerException numDatabasesExceeded = + SpannerExceptionFactory.newSpannerException( + Status.RESOURCE_EXHAUSTED + .withDescription("Too many databases on instance") + .asRuntimeException()); + assertThat(alg.shouldRetry(numDatabasesExceeded, null)).isFalse(); + + assertThat(alg.shouldRetry(new Exception("random exception"), null)).isFalse(); + } + + @Test + public void testDefaultUserAgent() { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertThat(lastSeenHeaders.get(Key.of("user-agent", Metadata.ASCII_STRING_MARSHALLER))) + .contains(defaultUserAgent); + } + + @Test + public void testCustomUserAgent() { + for (String headerId : new String[] {"user-agent", "User-Agent", "USER-AGENT"}) { + final HeaderProvider userAgentHeaderProvider = + () -> { + final Map headers = new HashMap<>(); + headers.put(headerId, "test-agent"); + return headers; + }; + final SpannerOptions options = + createSpannerOptions().toBuilder().setHeaderProvider(userAgentHeaderProvider).build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertThat(lastSeenHeaders.get(Key.of("user-agent", Metadata.ASCII_STRING_MARSHALLER))) + .contains("test-agent " + defaultUserAgent); + } + } + } + + @Test + public void testTraceContextHeaderWithOpenTelemetryAndEndToEndTracingEnabled() { + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(SdkTracerProvider.builder().setSampler(Sampler.alwaysOn()).build()) + .build(); + + final SpannerOptions options = + createSpannerOptions().toBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(true) + .build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertTrue(isTraceContextPresent); + } + } + + @Test + public void testTraceContextHeaderWithOpenTelemetryAndEndToEndTracingDisabled() { + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(SdkTracerProvider.builder().setSampler(Sampler.alwaysOn()).build()) + .build(); + + final SpannerOptions options = + createSpannerOptions().toBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(false) + .build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertFalse(isTraceContextPresent); + } + } + + @Test + public void testTraceContextHeaderWithoutOpenTelemetry() { + final SpannerOptions options = createSpannerOptions(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertFalse(isTraceContextPresent); + } + } + + @Test + public void testRouteToLeaderHeaderForReadOnly() { + final SpannerOptions options = + createSpannerOptions().toBuilder().enableLeaderAwareRouting().build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertFalse(isRouteToLeader); + } + } + + @Test + public void testRouteToLeaderHeaderForReadWrite() { + final SpannerOptions options = + createSpannerOptions().toBuilder().enableLeaderAwareRouting().build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertTrue(isRouteToLeader); + } + + @Test + public void testRouteToLeaderHeaderWithLeaderAwareRoutingDisabled() { + final SpannerOptions options = + createSpannerOptions().toBuilder().disableLeaderAwareRouting().build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertFalse(isRouteToLeader); + } + + @Test + public void testClientLibToken() { + SpannerOptions options = createSpannerOptions(); + try (Spanner spanner = options.getService()) { + DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + } + Key key = Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); + assertTrue(lastSeenHeaders.containsKey(key)); + assertTrue( + lastSeenHeaders.get(key), + Objects.requireNonNull(lastSeenHeaders.get(key)) + .contains(ServiceOptions.getGoogApiClientLibName() + "/")); + // Check that the default header value is only included once in the header. + // We do this by splitting the entire header by the default header value. The resulting array + // should have 2 elements. + assertEquals( + lastSeenHeaders.get(key), + 2, + Objects.requireNonNull(lastSeenHeaders.get(key)) + .split(ServiceOptions.getGoogApiClientLibName()) + .length); + assertTrue( + lastSeenHeaders.get(key), + Objects.requireNonNull(lastSeenHeaders.get(key)).contains("gl-java/")); + } + + @Test + public void testCustomClientLibToken_alsoContainsDefaultToken() { + SpannerOptions options = + createSpannerOptions().toBuilder().setClientLibToken("pg-adapter").build(); + try (Spanner spanner = options.getService()) { + DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + } + Key key = Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); + assertTrue(lastSeenHeaders.containsKey(key)); + assertTrue( + lastSeenHeaders.get(key), + Objects.requireNonNull(lastSeenHeaders.get(key)).contains("pg-adapter")); + assertTrue( + lastSeenHeaders.get(key), + Objects.requireNonNull(lastSeenHeaders.get(key)) + .contains(ServiceOptions.getGoogApiClientLibName() + "/")); + assertTrue( + lastSeenHeaders.get(key), + Objects.requireNonNull(lastSeenHeaders.get(key)).contains("gl-java/")); + } + + @Test + public void testGetDatabaseAdminStubSettings_whenStubInitialized_assertNonNullClientSetting() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + + assertNotNull(rpc.getDatabaseAdminStubSettings()); + + rpc.shutdown(); + } + + @Test + public void testGetInstanceAdminStubSettings_whenStubInitialized_assertNonNullClientSetting() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + + assertNotNull(rpc.getInstanceAdminStubSettings()); + + rpc.shutdown(); + } + + @Test + public void testAdminStubSettings_whenStubNotInitialized_assertNullClientSetting() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + + assertNull(rpc.getDatabaseAdminStubSettings()); + assertNull(rpc.getInstanceAdminStubSettings()); + + rpc.shutdown(); + } + + @Test + public void testCreateSession_assertSessionProto() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + + Session session = rpc.createSession("DATABASE_NAME", null, null, null); + assertNotNull(session); + assertNotNull(session.getCreateTime()); + assertEquals(false, session.getMultiplexed()); + rpc.shutdown(); + } + + @Test + public void testCreateSession_whenMultiplexedSessionIsTrue_assertSessionProto() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + + Session session = rpc.createSession("DATABASE_NAME", null, null, null, true); + assertNotNull(session); + assertNotNull(session.getCreateTime()); + assertEquals(true, session.getMultiplexed()); + rpc.shutdown(); + } + + @Test + public void testCreateSession_whenMultiplexedSessionIsFalse_assertSessionProto() { + SpannerOptions options = createSpannerOptions(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + + Session session = rpc.createSession("DATABASE_NAME", null, null, null, false); + assertNotNull(session); + assertNotNull(session.getCreateTime()); + assertEquals(false, session.getMultiplexed()); + rpc.shutdown(); + } + + @Test + public void testChannelEndpointCacheFactoryUsedWhenLocationApiEnabled() { + AtomicBoolean factoryCalled = new AtomicBoolean(false); + ChannelEndpointCacheFactory factory = + baseProvider -> { + factoryCalled.set(true); + return new GrpcChannelEndpointCache(baseProvider); + }; + + try { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableLocationApi() { + return true; + } + }); + SpannerOptions options = + createSpannerOptions().toBuilder().setChannelEndpointCacheFactory(factory).build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + rpc.shutdown(); + assertTrue(factoryCalled.get()); + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + @Test + public void testLocationApiDoesNotOverrideExplicitChannelProvider() { + AtomicBoolean factoryCalled = new AtomicBoolean(false); + ChannelEndpointCacheFactory factory = + baseProvider -> { + factoryCalled.set(true); + return new GrpcChannelEndpointCache(baseProvider); + }; + + AtomicBoolean providerUsed = new AtomicBoolean(false); + TransportChannelProvider channelProvider = + new RecordingTransportChannelProvider( + address.getHostString(), server.getPort(), providerUsed); + + try { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableLocationApi() { + return true; + } + }); + SpannerOptions options = + createSpannerOptions().toBuilder() + .setChannelProvider(channelProvider) + .setChannelEndpointCacheFactory(factory) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + rpc.shutdown(); + assertTrue(providerUsed.get()); + assertFalse(factoryCalled.get()); + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + @Test + public void testLocationApiDisabledInOptionsDoesNotCreateKeyAwareChannelProvider() { + AtomicBoolean factoryCalled = new AtomicBoolean(false); + ChannelEndpointCacheFactory factory = + baseProvider -> { + factoryCalled.set(true); + return new GrpcChannelEndpointCache(baseProvider); + }; + + try { + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableLocationApi() { + return false; + } + }); + SpannerOptions options = + createSpannerOptions().toBuilder().setChannelEndpointCacheFactory(factory).build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, true); + rpc.shutdown(); + assertFalse(factoryCalled.get()); + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + @Test + public void testGrpcGcpExtensionPreservesChannelConfigurator() throws Exception { + InstantiatingGrpcChannelProvider.Builder channelProviderBuilder = + InstantiatingGrpcChannelProvider.newBuilder(); + AtomicBoolean baseConfiguratorCalled = new AtomicBoolean(false); + channelProviderBuilder.setChannelConfigurator( + builder -> { + baseConfiguratorCalled.set(true); + return builder; + }); + + SpannerOptions options = + SpannerOptions.newBuilder().setProjectId("[PROJECT]").enableGrpcGcpExtension().build(); + + java.lang.reflect.Method method = + GapicSpannerRpc.class.getDeclaredMethod( + "maybeEnableGrpcGcpExtension", + InstantiatingGrpcChannelProvider.Builder.class, + SpannerOptions.class); + method.setAccessible(true); + method.invoke(null, channelProviderBuilder, options); + + ApiFunction chainedConfigurator = + channelProviderBuilder.getChannelConfigurator(); + chainedConfigurator.apply(NettyChannelBuilder.forAddress("localhost", 1)); + + assertTrue(baseConfiguratorCalled.get()); + } + + @Test + public void testGrpcGcpOtelMetricsDisabledSkipsMeterInjection() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setGrpcGcpOtelMetricsEnabled(false) + .build(); + + java.lang.reflect.Method method = + GapicSpannerRpc.class.getDeclaredMethod( + "grpcGcpOptionsWithMetricsAndDcp", SpannerOptions.class); + method.setAccessible(true); + GcpManagedChannelOptions grpcGcpOptions = + (GcpManagedChannelOptions) method.invoke(null, options); + GcpMetricsOptions metricsOptions = grpcGcpOptions.getMetricsOptions(); + + assertNotNull(metricsOptions); + assertNull(metricsOptions.getOpenTelemetryMeter()); + } + + private static final class RecordingTransportChannelProvider implements TransportChannelProvider { + private final String host; + private final int port; + private final AtomicBoolean used; + + private RecordingTransportChannelProvider(String host, int port, AtomicBoolean used) { + this.host = host; + this.port = port; + this.used = used; + } + + @Override + public GrpcTransportChannel getTransportChannel() throws IOException { + used.set(true); + return GrpcTransportChannel.newBuilder() + .setManagedChannel(ManagedChannelBuilder.forAddress(host, port).usePlaintext().build()) + .build(); + } + + @Override + public String getTransportName() { + return GrpcTransportChannel.getGrpcTransportName(); + } + + @Override + public boolean needsEndpoint() { + return false; + } + + @Override + public boolean needsCredentials() { + return false; + } + + @Override + public boolean needsExecutor() { + return false; + } + + @Override + public boolean needsHeaders() { + return false; + } + + @Override + public boolean shouldAutoClose() { + return true; + } + + @Override + public TransportChannelProvider withEndpoint(String endpoint) { + return this; + } + + @Override + public TransportChannelProvider withCredentials(Credentials credentials) { + return this; + } + + @Override + public TransportChannelProvider withHeaders(Map headers) { + return this; + } + + @Override + public TransportChannelProvider withPoolSize(int poolSize) { + return this; + } + + @Override + public TransportChannelProvider withExecutor(ScheduledExecutorService executor) { + return this; + } + + @Override + public TransportChannelProvider withExecutor(Executor executor) { + return this; + } + + @Override + public boolean acceptsPoolSize() { + return false; + } + } + + private SpannerOptions createSpannerOptions() { + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + // Set a custom channel configurator to allow http instead of https. + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setEnableDirectAccess(false) + .setHost("http://" + endpoint) + // Set static credentials that will return the static OAuth test token. + .setCredentials(STATIC_CREDENTIALS) + // Also set a CallCredentialsProvider. These credentials should take precedence above + // the static credentials. + .setCallCredentialsProvider(() -> MoreCallCredentials.from(VARIABLE_CREDENTIALS)) + .build(); + } + + static class TestableGapicSpannerRpc extends GapicSpannerRpc { + public TestableGapicSpannerRpc(SpannerOptions options) { + super(options); + } + + @Override + GcpFallbackChannelOptions createFallbackChannelOptions( + GcpFallbackOpenTelemetry fallbackTelemetry, int minFailedCalls) { + // Override default 1-minute period to 10ms for instant testing + return GcpFallbackChannelOptions.newBuilder() + .setPrimaryChannelName("directpath") + .setFallbackChannelName("cloudpath") + .setMinFailedCalls(10) + .setPeriod(Duration.ofMillis(5)) + .setGcpFallbackOpenTelemetry(fallbackTelemetry) + .build(); + } + } + + @Test + public void testFallbackIntegration_doesNotSwitchWhenThresholdNotMet() throws Exception { + // Setup OpenTelemetry to capture metrics + InMemoryMetricReader metricReader = InMemoryMetricReader.create(); + SdkMeterProvider meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader).build(); + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider).build(); + + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableGcpFallback() { + return true; + } + }); + try { + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setEnableDirectAccess(true) + .setHost("http://localhost:1") // Closed port + .setCredentials(NoCredentials.getInstance()) + .setOpenTelemetry(openTelemetry); + // Make sure the ExecuteBatchDml RPC fails quickly to keep the test fast. + // Note that the timeout is actually not used. It is the fact that it does not retry that + // makes it fail fast. + builder + .getSpannerStubSettingsBuilder() + .executeBatchDmlSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofSeconds(10)); + // Setup Options with invalid host to force error + SpannerOptions options = builder.build(); + + TestableGapicSpannerRpc rpc = new TestableGapicSpannerRpc(options); + try { + // Make a call that is expected to fail + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + rpc.executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest.newBuilder() + .setSession("projects/p/instances/i/databases/d/sessions/s") + .build(), + null)); + assertEquals(ErrorCode.UNAVAILABLE, exception.getErrorCode()); + + // Wait briefly for the 10ms period to trigger the fallback check + Thread.sleep(10); + + // Verify Fallback via Metrics + Collection metrics = metricReader.collectAllMetrics(); + boolean fallbackOccurred = + metrics.stream() + .anyMatch(md -> md.getName().contains("fallback_count") && hasValue(md)); + + assertFalse("Fallback metric should not be present", fallbackOccurred); + + } finally { + rpc.shutdown(); + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + static class TestableGapicSpannerRpcWithLowerMinFailedCalls extends GapicSpannerRpc { + public TestableGapicSpannerRpcWithLowerMinFailedCalls(SpannerOptions options) { + super(options); + } + + @Override + GcpFallbackChannelOptions createFallbackChannelOptions( + GcpFallbackOpenTelemetry fallbackTelemetry, int minFailedCalls) { + // Override default 1-minute period to 10ms for instant testing + return GcpFallbackChannelOptions.newBuilder() + .setPrimaryChannelName("directpath") + .setFallbackChannelName("cloudpath") + .setMinFailedCalls(1) + .setPeriod(Duration.ofMillis(5)) + .setGcpFallbackOpenTelemetry(fallbackTelemetry) + .build(); + } + } + + @Test + public void testFallbackIntegration_switchesToFallbackOnFailure() throws Exception { + // Setup OpenTelemetry to capture metrics + InMemoryMetricReader metricReader = InMemoryMetricReader.create(); + SdkMeterProvider meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader).build(); + OpenTelemetrySdk openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider).build(); + + SpannerOptions.useEnvironment( + new SpannerOptions.SpannerEnvironment() { + @Override + public boolean isEnableGcpFallback() { + return true; + } + }); + try { + SpannerOptions.Builder builder = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setEnableDirectAccess(true) + .setHost("http://localhost:1") // Closed port + .setCredentials(NoCredentials.getInstance()) + .setOpenTelemetry(openTelemetry); + // Make sure the ExecuteBatchDml RPC fails quickly to keep the test fast. + // Note that the timeout is actually not used. It is the fact that it does not retry that + // makes it fail fast. + builder + .getSpannerStubSettingsBuilder() + .executeBatchDmlSettings() + .setSimpleTimeoutNoRetriesDuration(Duration.ofSeconds(10)); + // Setup Options with invalid host to force error + SpannerOptions options = builder.build(); + + TestableGapicSpannerRpcWithLowerMinFailedCalls rpc = + new TestableGapicSpannerRpcWithLowerMinFailedCalls(options); + try { + // Make a call that is expected to fail + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + rpc.executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest.newBuilder() + .setSession("projects/p/instances/i/databases/d/sessions/s") + .build(), + null)); + assertEquals(ErrorCode.UNAVAILABLE, exception.getErrorCode()); + + // Wait briefly for the 10ms period to trigger the fallback check + Thread.sleep(10); + + // Verify Fallback via Metrics + Collection metrics = metricReader.collectAllMetrics(); + boolean fallbackOccurred = + metrics.stream() + .anyMatch(md -> md.getName().contains("fallback_count") && hasValue(md)); + + assertTrue( + "Fallback metric should be present, indicating GcpFallbackChannel is active", + fallbackOccurred); + + } finally { + rpc.shutdown(); + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + private boolean hasValue(MetricData metricData) { + return metricData.getLongSumData().getPoints().stream().anyMatch(point -> point.getValue() > 0); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java new file mode 100644 index 000000000000..bcded26d685f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java @@ -0,0 +1,375 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.*; + +import com.google.auth.oauth2.AccessToken; +import com.google.auth.oauth2.OAuth2Credentials; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.TypeCode; +import io.grpc.ForwardingServerCall; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.auth.MoreCallCredentials; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.opencensus.stats.AggregationData; +import io.opencensus.stats.View; +import io.opencensus.stats.ViewData; +import io.opencensus.tags.TagKey; +import io.opencensus.tags.TagValue; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GfeLatencyTest { + + private static final String STATIC_OAUTH_TOKEN = "STATIC_TEST_OAUTH_TOKEN"; + private static final String VARIABLE_OAUTH_TOKEN = "VARIABLE_TEST_OAUTH_TOKEN"; + private static final OAuth2Credentials STATIC_CREDENTIALS = + OAuth2Credentials.create( + new AccessToken( + STATIC_OAUTH_TOKEN, + new java.util.Date( + System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(1L, TimeUnit.DAYS)))); + private static final OAuth2Credentials VARIABLE_CREDENTIALS = + OAuth2Credentials.create( + new AccessToken( + VARIABLE_OAUTH_TOKEN, + new java.util.Date( + System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(1L, TimeUnit.DAYS)))); + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static Spanner spanner; + private static DatabaseClient databaseClient; + + private static MockSpannerServiceImpl mockSpannerNoHeader; + private static Server serverNoHeader; + private static Spanner spannerNoHeader; + private static DatabaseClient databaseClientNoHeader; + + private static final String INSTANCE_ID = "fake-instance"; + private static final String DATABASE_ID = "fake-database"; + private static final String PROJECT_ID = "fake-project"; + + private static final int MAXIMUM_RETRIES = 50000; + + private static final AtomicInteger FAKE_SERVER_TIMING = + new AtomicInteger(new Random().nextInt(1000) + 1); + + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + StructType.Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + private static final Statement UPDATE_FOO_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + + @BeforeClass + public static void startServer() throws Exception { + //noinspection deprecation + SpannerRpcViews.registerGfeLatencyAndHeaderMissingCountViews(); + + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall serverCall, + Metadata headers, + ServerCallHandler serverCallHandler) { + return serverCallHandler.startCall( + new ForwardingServerCall.SimpleForwardingServerCall( + serverCall) { + @Override + public void sendHeaders(Metadata headers) { + headers.put( + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), + String.format("gfet4t7; dur=%d", FAKE_SERVER_TIMING.get())); + super.sendHeaders(headers); + } + }, + headers); + } + }) + .build() + .start(); + spanner = createSpannerOptions(address, server).getService(); + databaseClient = spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + + mockSpannerNoHeader = new MockSpannerServiceImpl(); + mockSpannerNoHeader.setAbortProbability(0.0D); + mockSpannerNoHeader.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpannerNoHeader.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); + InetSocketAddress addressNoHeader = new InetSocketAddress("localhost", 0); + serverNoHeader = + NettyServerBuilder.forAddress(addressNoHeader) + .addService(mockSpannerNoHeader) + .build() + .start(); + spannerNoHeader = createSpannerOptions(addressNoHeader, serverNoHeader).getService(); + databaseClientNoHeader = + spannerNoHeader.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + if (spanner != null) { + spanner.close(); + server.shutdown(); + server.awaitTermination(); + } + + if (spannerNoHeader != null) { + spannerNoHeader.close(); + serverNoHeader.shutdown(); + serverNoHeader.awaitTermination(); + } + } + + @After + public void reset() { + mockSpanner.reset(); + mockSpannerNoHeader.reset(); + } + + @Test + public void testGfeLatencyExecuteStreamingSql() throws InterruptedException { + try (ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + long latency = + getMetric( + SpannerRpcViews.SPANNER_GFE_LATENCY_VIEW, + "google.spanner.v1.Spanner/ExecuteStreamingSql", + false); + assertEquals(FAKE_SERVER_TIMING.get(), latency); + } + + @Test + public void testGfeLatencyExecuteSql() throws InterruptedException { + databaseClient + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + + long latency = + getMetric( + SpannerRpcViews.SPANNER_GFE_LATENCY_VIEW, + "google.spanner.v1.Spanner/ExecuteSql", + false); + assertEquals(FAKE_SERVER_TIMING.get(), latency); + } + + @Test + public void testGfeMissingHeaderCountExecuteStreamingSql() throws InterruptedException { + try (ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + long count = + getMetric( + SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, + "google.spanner.v1.Spanner/ExecuteStreamingSql", + false); + assertEquals(0, count); + + try (ResultSet rs = databaseClientNoHeader.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + long count1 = + getMetric( + SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, + "google.spanner.v1.Spanner/ExecuteStreamingSql", + true); + assertTrue(count1 >= 1); + } + + @Test + public void testGfeMissingHeaderExecuteSql() throws InterruptedException { + databaseClient + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + long count = + getMetric( + SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, + "google.spanner.v1.Spanner/Commit", + false); + assertEquals(0, count); + + databaseClientNoHeader + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + long count1 = + getMetric( + SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, + "google.spanner.v1.Spanner/Commit", + true); + assertEquals(1, count1); + } + + private static SpannerOptions createSpannerOptions(InetSocketAddress address, Server server) { + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + // Set a custom channel configurator to allow http instead of https. + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setEnableDirectAccess(false) + .setHost("http://" + endpoint) + // Set static credentials that will return the static OAuth test token. + .setCredentials(STATIC_CREDENTIALS) + // Also set a CallCredentialsProvider. These credentials should take precedence above + // the static credentials. + .setCallCredentialsProvider(() -> MoreCallCredentials.from(VARIABLE_CREDENTIALS)) + .build(); + } + + private long getAggregationValueAsLong(AggregationData aggregationData) { + return MoreObjects.firstNonNull( + aggregationData.match( + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.SumDataDouble arg) { + return (long) Preconditions.checkNotNull(arg).getSum(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.SumDataLong arg) { + return Preconditions.checkNotNull(arg).getSum(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.CountData arg) { + return Preconditions.checkNotNull(arg).getCount(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.DistributionData arg) { + return (long) Preconditions.checkNotNull(arg).getMean(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.LastValueDataDouble arg) { + return (long) Preconditions.checkNotNull(arg).getLastValue(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.LastValueDataLong arg) { + return Preconditions.checkNotNull(arg).getLastValue(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData arg) { + throw new UnsupportedOperationException(); + } + }), + -1L); + } + + private long getMetric(View view, String method, boolean withOverride) { + List tagValues = new java.util.ArrayList<>(); + for (TagKey column : view.getColumns()) { + if (column == SpannerRpcViews.INSTANCE_ID) { + tagValues.add(TagValue.create(INSTANCE_ID)); + } else if (column == SpannerRpcViews.DATABASE_ID) { + tagValues.add(TagValue.create(DATABASE_ID)); + } else if (column == SpannerRpcViews.METHOD) { + tagValues.add(TagValue.create(method)); + } else if (column == SpannerRpcViews.PROJECT_ID) { + tagValues.add(TagValue.create(PROJECT_ID)); + } + } + for (int i = 0; i < MAXIMUM_RETRIES; i++) { + Thread.yield(); + ViewData viewData = SpannerRpcViews.viewManager.getView(view.getName()); + assertNotNull(viewData); + if (viewData.getAggregationMap() != null) { + Map, AggregationData> aggregationMap = viewData.getAggregationMap(); + AggregationData aggregationData = aggregationMap.get(tagValues); + if (aggregationData == null + || withOverride && getAggregationValueAsLong(aggregationData) == 0) { + continue; + } + return getAggregationValueAsLong(aggregationData); + } + } + return -1; + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCacheTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCacheTest.java new file mode 100644 index 000000000000..d44da87add75 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GrpcChannelEndpointCacheTest.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.cloud.spanner.SpannerException; +import io.grpc.ManagedChannelBuilder; +import org.junit.Test; + +public class GrpcChannelEndpointCacheTest { + + private static InstantiatingGrpcChannelProvider createProvider(String endpoint) { + return InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(endpoint) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .build(); + } + + @Test + public void defaultChannelIsCached() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + try { + ChannelEndpoint defaultChannel = cache.defaultChannel(); + ChannelEndpoint server = cache.get(defaultChannel.getAddress()); + assertThat(server).isSameInstanceAs(defaultChannel); + } finally { + cache.shutdown(); + } + } + + @Test + public void getCachesPerAddress() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + try { + ChannelEndpoint first = cache.get("localhost:1111"); + ChannelEndpoint second = cache.get("localhost:1111"); + ChannelEndpoint third = cache.get("localhost:2222"); + + assertThat(second).isSameInstanceAs(first); + assertThat(third).isNotSameInstanceAs(first); + } finally { + cache.shutdown(); + } + } + + @Test + public void evictRemovesNonDefaultServer() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + try { + ChannelEndpoint first = cache.get("localhost:1111"); + cache.evict("localhost:1111"); + ChannelEndpoint second = cache.get("localhost:1111"); + + assertThat(second).isNotSameInstanceAs(first); + } finally { + cache.shutdown(); + } + } + + @Test + public void evictIgnoresDefaultChannel() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + try { + ChannelEndpoint defaultChannel = cache.defaultChannel(); + cache.evict(defaultChannel.getAddress()); + ChannelEndpoint server = cache.get(defaultChannel.getAddress()); + + assertThat(server).isSameInstanceAs(defaultChannel); + } finally { + cache.shutdown(); + } + } + + @Test + public void shutdownPreventsNewServers() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + cache.shutdown(); + + assertThrows(SpannerException.class, () -> cache.get("localhost:1111")); + assertThat(cache.defaultChannel().getChannel().isShutdown()).isTrue(); + } + + @Test + public void healthReflectsChannelShutdown() throws Exception { + GrpcChannelEndpointCache cache = new GrpcChannelEndpointCache(createProvider("localhost:1234")); + try { + ChannelEndpoint server = cache.get("localhost:1111"); + assertThat(server.isHealthy()).isTrue(); + + server.getChannel().shutdownNow(); + assertThat(server.isHealthy()).isFalse(); + } finally { + cache.shutdown(); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyAwareChannelTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyAwareChannelTest.java new file mode 100644 index 000000000000..123ffba1d43c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyAwareChannelTest.java @@ -0,0 +1,885 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Group; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.Range; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.SpannerGrpc; +import com.google.spanner.v1.Tablet; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class KeyAwareChannelTest { + private static final String DEFAULT_ADDRESS = "default:1234"; + private static final String SESSION = + "projects/p/instances/i/databases/d/sessions/test-session-id"; + + @Test + public void cancelBeforeStartPreservesTrailersAndSkipsDelegateCreation() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + + Metadata causeTrailers = new Metadata(); + Metadata.Key key = Metadata.Key.of("debug", Metadata.ASCII_STRING_MARSHALLER); + causeTrailers.put(key, "timeout"); + RuntimeException cause = + Status.DEADLINE_EXCEEDED + .withDescription("server timeout") + .asRuntimeException(causeTrailers); + + call.cancel("cancelled by client", cause); + CapturingListener listener = new CapturingListener<>(); + call.start(listener, new Metadata()); + + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(0); + assertThat(listener.closeCount).isEqualTo(1); + assertThat(listener.closedStatus.getCode()).isEqualTo(Status.Code.CANCELLED); + assertThat(listener.closedStatus.getDescription()).isEqualTo("cancelled by client"); + assertThat(listener.closedTrailers.get(key)).isEqualTo("timeout"); + } + + @Test + public void cancelAfterStartBeforeSendSkipsDelegateCreation() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + + CapturingListener listener = new CapturingListener<>(); + call.start(listener, new Metadata()); + call.cancel("cancel", null); + call.sendMessage(ExecuteSqlRequest.newBuilder().setSession(SESSION).build()); + + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(0); + assertThat(listener.closeCount).isEqualTo(1); + assertThat(listener.closedStatus.getCode()).isEqualTo(Status.Code.CANCELLED); + } + + @Test + public void cancelAfterDelegateCreationDelegatesToUnderlyingCall() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + + CapturingListener listener = new CapturingListener<>(); + call.start(listener, new Metadata()); + call.sendMessage(ExecuteSqlRequest.newBuilder().setSession(SESSION).build()); + + @SuppressWarnings("unchecked") + RecordingClientCall delegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + + RuntimeException cause = new RuntimeException("boom"); + call.cancel("cancel now", cause); + + assertThat(delegate.cancelCalled).isTrue(); + assertThat(delegate.cancelMessage).isEqualTo("cancel now"); + assertThat(delegate.cancelCause).isSameInstanceAs(cause); + assertThat(listener.closeCount).isEqualTo(0); + } + + @Test + public void sendMessageBeforeStartThrows() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + + assertThrows( + IllegalStateException.class, + () -> call.sendMessage(ExecuteSqlRequest.newBuilder().setSession(SESSION).build())); + } + + @Test + public void deadlineExceededFromDelegateIsForwardedToListener() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + CapturingListener listener = new CapturingListener<>(); + + call.start(listener, new Metadata()); + call.sendMessage(ExecuteSqlRequest.newBuilder().setSession(SESSION).build()); + + @SuppressWarnings("unchecked") + RecordingClientCall delegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + + Metadata trailers = new Metadata(); + Metadata.Key key = Metadata.Key.of("timeout", Metadata.ASCII_STRING_MARSHALLER); + trailers.put(key, "true"); + Status status = Status.DEADLINE_EXCEEDED.withDescription("rpc timeout"); + delegate.emitOnClose(status, trailers); + + assertThat(listener.closeCount).isEqualTo(1); + assertThat(listener.closedStatus).isEqualTo(status); + assertThat(listener.closedTrailers.get(key)).isEqualTo("true"); + } + + @Test + public void timeoutOnCommitClearsTransactionAffinity() throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("tx-1"); + + ClientCall beginCall = + harness.channel.newCall(SpannerGrpc.getBeginTransactionMethod(), CallOptions.DEFAULT); + beginCall.start(new CapturingListener(), new Metadata()); + beginCall.sendMessage(BeginTransactionRequest.newBuilder().setSession(SESSION).build()); + + @SuppressWarnings("unchecked") + RecordingClientCall beginDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + beginDelegate.emitOnMessage(Transaction.newBuilder().setId(transactionId).build()); + beginDelegate.emitOnClose(Status.OK, new Metadata()); + + ClientCall commitCall = + harness.channel.newCall(SpannerGrpc.getCommitMethod(), CallOptions.DEFAULT); + commitCall.start(new CapturingListener(), new Metadata()); + commitCall.sendMessage( + CommitRequest.newBuilder().setSession(SESSION).setTransactionId(transactionId).build()); + + assertThat(harness.endpointCache.getCount(DEFAULT_ADDRESS)).isEqualTo(1); + + @SuppressWarnings("unchecked") + RecordingClientCall commitDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + commitDelegate.emitOnClose(Status.DEADLINE_EXCEEDED, new Metadata()); + + ClientCall rollbackCall = + harness.channel.newCall(SpannerGrpc.getRollbackMethod(), CallOptions.DEFAULT); + rollbackCall.start(new CapturingListener(), new Metadata()); + rollbackCall.sendMessage( + RollbackRequest.newBuilder().setSession(SESSION).setTransactionId(transactionId).build()); + + assertThat(harness.endpointCache.getCount(DEFAULT_ADDRESS)).isEqualTo(1); + } + + @Test + public void requestAfterCancelBeforeSendIsIgnored() throws Exception { + TestHarness harness = createHarness(); + ClientCall call = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + + CapturingListener listener = new CapturingListener<>(); + call.start(listener, new Metadata()); + call.cancel("cancel", null); + call.request(10); + call.sendMessage(ExecuteSqlRequest.newBuilder().setSession(SESSION).build()); + + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(0); + assertThat(listener.closeCount).isEqualTo(1); + assertThat(listener.closedStatus.getCode()).isEqualTo(Status.Code.CANCELLED); + } + + @Test + public void resultSetCacheUpdateRoutesSubsequentRequest() throws Exception { + TestHarness harness = createHarness(); + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("a")).build()) + .build(); + + ClientCall firstCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + firstCall.start(new CapturingListener(), new Metadata()); + firstCall.sendMessage(request); + + @SuppressWarnings("unchecked") + RecordingClientCall firstDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + + CacheUpdate cacheUpdate = + CacheUpdate.newBuilder() + .setDatabaseId(7L) + .addRange( + Range.newBuilder() + .setStartKey(bytes("a")) + .setLimitKey(bytes("z")) + .setGroupUid(9L) + .setSplitId(1L) + .setGeneration(bytes("1"))) + .addGroup( + Group.newBuilder() + .setGroupUid(9L) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(3L) + .setServerAddress("routed:1234") + .setIncarnation(bytes("1")) + .setDistance(0))) + .build(); + + firstDelegate.emitOnMessage(ResultSet.newBuilder().setCacheUpdate(cacheUpdate).build()); + + ClientCall secondCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + secondCall.start(new CapturingListener(), new Metadata()); + secondCall.sendMessage(request); + + assertThat(harness.endpointCache.callCountForAddress(DEFAULT_ADDRESS)).isEqualTo(1); + assertThat(harness.endpointCache.callCountForAddress("routed:1234")).isEqualTo(1); + } + + @Test + public void readOnlyTransactionRoutesEachReadIndependently() throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("ro-tx-1"); + + // 1. Begin a read-only transaction (stale read). + ClientCall beginCall = + harness.channel.newCall(SpannerGrpc.getBeginTransactionMethod(), CallOptions.DEFAULT); + CapturingListener beginListener = new CapturingListener<>(); + beginCall.start(beginListener, new Metadata()); + beginCall.sendMessage( + BeginTransactionRequest.newBuilder() + .setSession(SESSION) + .setOptions( + TransactionOptions.newBuilder() + .setReadOnly( + TransactionOptions.ReadOnly.newBuilder() + .setReturnReadTimestamp(true) + .build())) + .build()); + + // BeginTransaction goes to default channel. + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(1); + + @SuppressWarnings("unchecked") + RecordingClientCall beginDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + beginDelegate.emitOnMessage(Transaction.newBuilder().setId(transactionId).build()); + beginDelegate.emitOnClose(Status.OK, new Metadata()); + + // 2. Populate cache with routing data for two different key ranges. + CacheUpdate cacheUpdate = + CacheUpdate.newBuilder() + .setDatabaseId(7L) + .addRange( + Range.newBuilder() + .setStartKey(bytes("a")) + .setLimitKey(bytes("m")) + .setGroupUid(1L) + .setSplitId(1L) + .setGeneration(bytes("1"))) + .addRange( + Range.newBuilder() + .setStartKey(bytes("m")) + .setLimitKey(bytes("z")) + .setGroupUid(2L) + .setSplitId(2L) + .setGeneration(bytes("1"))) + .addGroup( + Group.newBuilder() + .setGroupUid(1L) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(1L) + .setServerAddress("server-a:1234") + .setIncarnation(bytes("1")) + .setDistance(0))) + .addGroup( + Group.newBuilder() + .setGroupUid(2L) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(2L) + .setServerAddress("server-b:1234") + .setIncarnation(bytes("1")) + .setDistance(0))) + .build(); + + // Seed the cache via a dummy query response with cache update. + ClientCall seedCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + seedCall.start(new CapturingListener(), new Metadata()); + seedCall.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("a")).build()) + .build()); + @SuppressWarnings("unchecked") + RecordingClientCall seedDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + seedDelegate.emitOnMessage(ResultSet.newBuilder().setCacheUpdate(cacheUpdate).build()); + + // 3. Send a streaming read with key in range [a, m) → should go to server-a. + ClientCall readCallA = + harness.channel.newCall(SpannerGrpc.getStreamingReadMethod(), CallOptions.DEFAULT); + readCallA.start(new CapturingListener(), new Metadata()); + readCallA.sendMessage( + ReadRequest.newBuilder() + .setSession(SESSION) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId)) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("b")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-a:1234")).isEqualTo(1); + + // 4. Send an ExecuteStreamingSql with key in range [m, z) → should go to server-b. + ClientCall queryCallB = + harness.channel.newCall(SpannerGrpc.getExecuteStreamingSqlMethod(), CallOptions.DEFAULT); + queryCallB.start(new CapturingListener(), new Metadata()); + queryCallB.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId)) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("n")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-b:1234")).isEqualTo(1); + + // Neither read was pinned to the default host (besides the initial begin + seed). + // default had: 1 begin + 1 seed = 2 calls + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(2); + } + + @Test + public void readOnlyInlinedBeginExecuteSqlRoutesSubsequentRequestsIndependently() + throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("ro-inline-sql"); + + seedCache(harness, createTwoRangeCacheUpdate()); + + // First query begins a read-only transaction inline and routes to server-a. + ClientCall firstCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + firstCall.start(new CapturingListener(), new Metadata()); + firstCall.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setTransaction( + TransactionSelector.newBuilder() + .setBegin( + TransactionOptions.newBuilder() + .setReadOnly( + TransactionOptions.ReadOnly.newBuilder() + .setReturnReadTimestamp(true) + .build()) + .build())) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("b")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-a:1234")).isEqualTo(1); + + @SuppressWarnings("unchecked") + RecordingClientCall firstDelegate = + (RecordingClientCall) + harness.endpointCache.latestCallForAddress("server-a:1234"); + firstDelegate.emitOnMessage( + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction(Transaction.newBuilder().setId(transactionId))) + .build()); + + // Second query in same txn should route by key to server-b, not affinity-pin to server-a. + ClientCall secondCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + secondCall.start(new CapturingListener(), new Metadata()); + secondCall.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId)) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("n")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-a:1234")).isEqualTo(1); + assertThat(harness.endpointCache.callCountForAddress("server-b:1234")).isEqualTo(1); + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(1); + } + + @Test + public void readOnlyInlinedBeginReadRoutesSubsequentRequestsIndependently() throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("ro-inline-read"); + + seedCache(harness, createTwoRangeCacheUpdate()); + + // First read begins a read-only transaction inline and routes to server-a. + ClientCall firstCall = + harness.channel.newCall(SpannerGrpc.getStreamingReadMethod(), CallOptions.DEFAULT); + firstCall.start(new CapturingListener(), new Metadata()); + firstCall.sendMessage( + ReadRequest.newBuilder() + .setSession(SESSION) + .setTransaction( + TransactionSelector.newBuilder() + .setBegin( + TransactionOptions.newBuilder() + .setReadOnly( + TransactionOptions.ReadOnly.newBuilder() + .setReturnReadTimestamp(true) + .build()) + .build())) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("b")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-a:1234")).isEqualTo(1); + + @SuppressWarnings("unchecked") + RecordingClientCall firstDelegate = + (RecordingClientCall) + harness.endpointCache.latestCallForAddress("server-a:1234"); + firstDelegate.emitOnMessage( + PartialResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction(Transaction.newBuilder().setId(transactionId))) + .build()); + + // Second read in same txn should route by key to server-b, not affinity-pin to server-a. + ClientCall secondCall = + harness.channel.newCall(SpannerGrpc.getStreamingReadMethod(), CallOptions.DEFAULT); + secondCall.start(new CapturingListener(), new Metadata()); + secondCall.sendMessage( + ReadRequest.newBuilder() + .setSession(SESSION) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId)) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("n")).build()) + .build()); + + assertThat(harness.endpointCache.callCountForAddress("server-a:1234")).isEqualTo(1); + assertThat(harness.endpointCache.callCountForAddress("server-b:1234")).isEqualTo(1); + assertThat(harness.defaultManagedChannel.callCount()).isEqualTo(1); + } + + @Test + public void readOnlyTransactionDoesNotRecordAffinity() throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("ro-tx-2"); + + // Begin a read-only transaction. + ClientCall beginCall = + harness.channel.newCall(SpannerGrpc.getBeginTransactionMethod(), CallOptions.DEFAULT); + beginCall.start(new CapturingListener(), new Metadata()); + beginCall.sendMessage( + BeginTransactionRequest.newBuilder() + .setSession(SESSION) + .setOptions( + TransactionOptions.newBuilder() + .setReadOnly( + TransactionOptions.ReadOnly.newBuilder() + .setReturnReadTimestamp(true) + .build())) + .build()); + + @SuppressWarnings("unchecked") + RecordingClientCall beginDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + beginDelegate.emitOnMessage(Transaction.newBuilder().setId(transactionId).build()); + beginDelegate.emitOnClose(Status.OK, new Metadata()); + + // No affinity should be recorded for the default endpoint. + // Verify by checking that the endpoint cache was never queried for affinity lookup. + // The default endpoint getCount tracks affinity lookups. + assertThat(harness.endpointCache.getCount(DEFAULT_ADDRESS)).isEqualTo(0); + + // Send a read using the transaction ID (no cache populated, so falls back to default). + ClientCall readCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + readCall.start(new CapturingListener(), new Metadata()); + readCall.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId)) + .build()); + + // The read goes to default (no cache data), but NOT because of affinity. + // No affinity lookup should have been performed for the read-only txn. + assertThat(harness.endpointCache.getCount(DEFAULT_ADDRESS)).isEqualTo(0); + + // Now receive a response with the transaction ID — should NOT record affinity. + @SuppressWarnings("unchecked") + RecordingClientCall readDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + readDelegate.emitOnMessage( + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setTransaction(Transaction.newBuilder().setId(transactionId))) + .build()); + + // Still no affinity recorded. + assertThat(harness.endpointCache.getCount(DEFAULT_ADDRESS)).isEqualTo(0); + } + + @Test + public void readOnlyTransactionCleanupOnClose() throws Exception { + TestHarness harness = createHarness(); + ByteString transactionId = ByteString.copyFromUtf8("ro-tx-3"); + + // Begin a read-only transaction. + ClientCall beginCall = + harness.channel.newCall(SpannerGrpc.getBeginTransactionMethod(), CallOptions.DEFAULT); + beginCall.start(new CapturingListener(), new Metadata()); + beginCall.sendMessage( + BeginTransactionRequest.newBuilder() + .setSession(SESSION) + .setOptions( + TransactionOptions.newBuilder() + .setReadOnly( + TransactionOptions.ReadOnly.newBuilder() + .setReturnReadTimestamp(true) + .build())) + .build()); + + @SuppressWarnings("unchecked") + RecordingClientCall beginDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + beginDelegate.emitOnMessage(Transaction.newBuilder().setId(transactionId).build()); + beginDelegate.emitOnClose(Status.OK, new Metadata()); + + // Clear transaction affinity (simulates MultiUseReadOnlyTransaction.close()). + harness.channel.clearTransactionAffinity(transactionId); + } + + private static CacheUpdate createTwoRangeCacheUpdate() { + return CacheUpdate.newBuilder() + .setDatabaseId(7L) + .addRange( + Range.newBuilder() + .setStartKey(bytes("a")) + .setLimitKey(bytes("m")) + .setGroupUid(1L) + .setSplitId(1L) + .setGeneration(bytes("1"))) + .addRange( + Range.newBuilder() + .setStartKey(bytes("m")) + .setLimitKey(bytes("z")) + .setGroupUid(2L) + .setSplitId(2L) + .setGeneration(bytes("1"))) + .addGroup( + Group.newBuilder() + .setGroupUid(1L) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(1L) + .setServerAddress("server-a:1234") + .setIncarnation(bytes("1")) + .setDistance(0))) + .addGroup( + Group.newBuilder() + .setGroupUid(2L) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(2L) + .setServerAddress("server-b:1234") + .setIncarnation(bytes("1")) + .setDistance(0))) + .build(); + } + + private static void seedCache(TestHarness harness, CacheUpdate cacheUpdate) { + ClientCall seedCall = + harness.channel.newCall(SpannerGrpc.getExecuteSqlMethod(), CallOptions.DEFAULT); + seedCall.start(new CapturingListener(), new Metadata()); + seedCall.sendMessage( + ExecuteSqlRequest.newBuilder() + .setSession(SESSION) + .setRoutingHint(RoutingHint.newBuilder().setKey(bytes("a")).build()) + .build()); + + @SuppressWarnings("unchecked") + RecordingClientCall seedDelegate = + (RecordingClientCall) + harness.defaultManagedChannel.latestCall(); + seedDelegate.emitOnMessage(ResultSet.newBuilder().setCacheUpdate(cacheUpdate).build()); + } + + private static TestHarness createHarness() throws IOException { + FakeEndpointCache endpointCache = new FakeEndpointCache(DEFAULT_ADDRESS); + InstantiatingGrpcChannelProvider provider = + InstantiatingGrpcChannelProvider.newBuilder().setEndpoint("localhost:9999").build(); + KeyAwareChannel channel = KeyAwareChannel.create(provider, baseProvider -> endpointCache); + return new TestHarness(channel, endpointCache, endpointCache.defaultManagedChannel()); + } + + private static final class TestHarness { + private final KeyAwareChannel channel; + private final FakeEndpointCache endpointCache; + private final FakeManagedChannel defaultManagedChannel; + + private TestHarness( + KeyAwareChannel channel, + FakeEndpointCache endpointCache, + FakeManagedChannel defaultManagedChannel) { + this.channel = channel; + this.endpointCache = endpointCache; + this.defaultManagedChannel = defaultManagedChannel; + } + } + + private static final class CapturingListener extends ClientCall.Listener { + private int closeCount; + @Nullable private Status closedStatus; + @Nullable private Metadata closedTrailers; + + @Override + public void onClose(Status status, Metadata trailers) { + this.closeCount++; + this.closedStatus = status; + this.closedTrailers = trailers; + } + } + + private static final class FakeEndpointCache implements ChannelEndpointCache { + private final String defaultAddress; + private final FakeEndpoint defaultEndpoint; + private final Map endpoints = new HashMap<>(); + private final Map getCount = new HashMap<>(); + + private FakeEndpointCache(String defaultAddress) { + this.defaultAddress = defaultAddress; + this.defaultEndpoint = new FakeEndpoint(defaultAddress); + } + + @Override + public ChannelEndpoint defaultChannel() { + return defaultEndpoint; + } + + @Override + public ChannelEndpoint get(String address) { + getCount.put(address, getCount.getOrDefault(address, 0) + 1); + if (defaultAddress.equals(address)) { + return defaultEndpoint; + } + return endpoints.computeIfAbsent(address, FakeEndpoint::new); + } + + @Override + public void evict(String address) { + endpoints.remove(address); + } + + @Override + public void shutdown() { + defaultEndpoint.channel.shutdown(); + for (FakeEndpoint endpoint : endpoints.values()) { + endpoint.channel.shutdown(); + } + endpoints.clear(); + } + + int getCount(String address) { + return getCount.getOrDefault(address, 0); + } + + FakeManagedChannel defaultManagedChannel() { + return defaultEndpoint.channel; + } + + int callCountForAddress(String address) { + if (defaultAddress.equals(address)) { + return defaultEndpoint.channel.callCount(); + } + FakeEndpoint endpoint = endpoints.get(address); + return endpoint == null ? 0 : endpoint.channel.callCount(); + } + + RecordingClientCall latestCallForAddress(String address) { + if (defaultAddress.equals(address)) { + return defaultEndpoint.channel.latestCall(); + } + FakeEndpoint endpoint = endpoints.get(address); + if (endpoint == null) { + throw new IllegalStateException("No endpoint for address: " + address); + } + return endpoint.channel.latestCall(); + } + } + + private static final class FakeEndpoint implements ChannelEndpoint { + private final String address; + private final FakeManagedChannel channel; + + private FakeEndpoint(String address) { + this.address = address; + this.channel = new FakeManagedChannel(address); + } + + @Override + public String getAddress() { + return address; + } + + @Override + public boolean isHealthy() { + return true; + } + + @Override + public ManagedChannel getChannel() { + return channel; + } + } + + private static final class FakeManagedChannel extends ManagedChannel { + private final String authority; + private final List> calls = new ArrayList<>(); + private boolean shutdown; + + private FakeManagedChannel(String authority) { + this.authority = authority; + } + + @Override + public ManagedChannel shutdown() { + shutdown = true; + return this; + } + + @Override + public ManagedChannel shutdownNow() { + shutdown = true; + return this; + } + + @Override + public boolean isShutdown() { + return shutdown; + } + + @Override + public boolean isTerminated() { + return shutdown; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return shutdown; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + RecordingClientCall call = new RecordingClientCall<>(); + calls.add(call); + return call; + } + + @Override + public String authority() { + return authority; + } + + int callCount() { + return calls.size(); + } + + RecordingClientCall latestCall() { + return calls.get(calls.size() - 1); + } + } + + private static final class RecordingClientCall + extends ClientCall { + @Nullable private ClientCall.Listener listener; + private boolean cancelCalled; + @Nullable private String cancelMessage; + @Nullable private Throwable cancelCause; + + @Override + public void start(ClientCall.Listener responseListener, Metadata headers) { + this.listener = responseListener; + } + + @Override + public void request(int numMessages) {} + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) { + this.cancelCalled = true; + this.cancelMessage = message; + this.cancelCause = cause; + } + + @Override + public void halfClose() {} + + @Override + public void sendMessage(RequestT message) {} + + void emitOnMessage(ResponseT response) { + if (listener != null) { + listener.onMessage(response); + } + } + + void emitOnClose(Status status, Metadata trailers) { + if (listener != null) { + listener.onClose(status, trailers); + } + } + } + + private static ByteString bytes(String value) { + return ByteString.copyFromUtf8(value); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheGoldenTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheGoldenTest.java new file mode 100644 index 000000000000..763a36dbfd64 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheGoldenTest.java @@ -0,0 +1,204 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.protobuf.TextFormat; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.RoutingHint; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.MethodDescriptor; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import spanner.cloud.location.RangeCacheTestCase; +import spanner.cloud.location.RangeCacheTestCases; + +@RunWith(JUnit4.class) +public class KeyRangeCacheGoldenTest { + + private static final int DEFAULT_MIN_ENTRIES_FOR_RANDOM_PICK = 1000; + + @Test + public void goldenTest() throws Exception { + RangeCacheTestCases.Builder builder = RangeCacheTestCases.newBuilder(); + try (InputStream inputStream = + getClass().getClassLoader().getResourceAsStream("range_cache_test.textproto"); + InputStreamReader reader = + new InputStreamReader(Objects.requireNonNull(inputStream), StandardCharsets.UTF_8)) { + TextFormat.merge(reader, builder); + } + + RangeCacheTestCases testCases = builder.build(); + + for (RangeCacheTestCase testCase : testCases.getTestCaseList()) { + FakeEndpointCache endpointCache = new FakeEndpointCache(); + KeyRangeCache cache = new KeyRangeCache(endpointCache); + cache.useDeterministicRandom(); + + for (RangeCacheTestCase.Step step : testCase.getStepList()) { + if (step.hasUpdate()) { + cache.addRanges(step.getUpdate()); + } + for (RangeCacheTestCase.Step.Test test : step.getTestList()) { + cache.setMinCacheEntriesForRandomPick(DEFAULT_MIN_ENTRIES_FOR_RANDOM_PICK); + int minEntries = test.getMinCacheEntriesForRandomPick(); + if (minEntries != 0) { + cache.setMinCacheEntriesForRandomPick(minEntries); + } + + RoutingHint.Builder hintBuilder = RoutingHint.newBuilder(); + if (!test.getKey().isEmpty()) { + hintBuilder.setKey(test.getKey()); + } + if (!test.getLimitKey().isEmpty()) { + hintBuilder.setLimitKey(test.getLimitKey()); + } + + DirectedReadOptions directedReadOptions = + test.hasDirectedReadOptions() + ? test.getDirectedReadOptions() + : DirectedReadOptions.getDefaultInstance(); + + KeyRangeCache.RangeMode rangeMode = + test.getRangeMode() == RangeCacheTestCase.Step.Test.RangeMode.PICK_RANDOM + ? KeyRangeCache.RangeMode.PICK_RANDOM + : KeyRangeCache.RangeMode.COVERING_SPLIT; + + ChannelEndpoint server = + cache.fillRoutingHint(test.getLeader(), rangeMode, directedReadOptions, hintBuilder); + + assertEquals( + "RoutingHint mismatch for test case: " + testCase.getName(), + test.getResult(), + hintBuilder.build()); + if (!test.getServer().isEmpty()) { + assertNotNull("Expected server for test case: " + testCase.getName(), server); + assertEquals(test.getServer(), server.getAddress()); + } else { + assertNull("Expected no server for test case: " + testCase.getName(), server); + } + } + } + + cache.clear(); + } + } + + private static final class FakeEndpointCache implements ChannelEndpointCache { + private final Map endpoints = new HashMap<>(); + private final FakeEndpoint defaultEndpoint = new FakeEndpoint("default"); + + @Override + public ChannelEndpoint defaultChannel() { + return defaultEndpoint; + } + + @Override + public ChannelEndpoint get(String address) { + return endpoints.computeIfAbsent(address, FakeEndpoint::new); + } + + @Override + public void evict(String address) { + endpoints.remove(address); + } + + @Override + public void shutdown() { + endpoints.clear(); + } + } + + private static final class FakeEndpoint implements ChannelEndpoint { + private final String address; + private final ManagedChannel channel = new FakeManagedChannel(); + + FakeEndpoint(String address) { + this.address = address; + } + + @Override + public String getAddress() { + return address; + } + + @Override + public boolean isHealthy() { + return true; + } + + @Override + public ManagedChannel getChannel() { + return channel; + } + } + + private static final class FakeManagedChannel extends ManagedChannel { + private boolean shutdown = false; + + @Override + public ManagedChannel shutdown() { + shutdown = true; + return this; + } + + @Override + public boolean isShutdown() { + return shutdown; + } + + @Override + public boolean isTerminated() { + return shutdown; + } + + @Override + public ManagedChannel shutdownNow() { + shutdown = true; + return this; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return shutdown; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public String authority() { + return "fake"; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheTest.java new file mode 100644 index 000000000000..2405aa7a062b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRangeCacheTest.java @@ -0,0 +1,271 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import com.google.protobuf.ByteString; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.Group; +import com.google.spanner.v1.Range; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.Tablet; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.MethodDescriptor; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class KeyRangeCacheTest { + + @Test + public void skipsUnhealthyTabletAfterItIsCached() { + FakeEndpointCache endpointCache = new FakeEndpointCache(); + KeyRangeCache cache = new KeyRangeCache(endpointCache); + + cache.addRanges( + CacheUpdate.newBuilder() + .addRange( + Range.newBuilder() + .setStartKey(bytes("a")) + .setLimitKey(bytes("z")) + .setGroupUid(5) + .setSplitId(1) + .setGeneration(bytes("1"))) + .addGroup( + Group.newBuilder() + .setGroupUid(5) + .setGeneration(bytes("1")) + .setLeaderIndex(0) + .addTablets( + Tablet.newBuilder() + .setTabletUid(1) + .setServerAddress("server1") + .setIncarnation(bytes("1")) + .setDistance(0)) + .addTablets( + Tablet.newBuilder() + .setTabletUid(2) + .setServerAddress("server2") + .setIncarnation(bytes("1")) + .setDistance(0))) + .build()); + + RoutingHint.Builder initialHint = RoutingHint.newBuilder().setKey(bytes("a")); + ChannelEndpoint initialServer = + cache.fillRoutingHint( + /* preferLeader= */ false, + KeyRangeCache.RangeMode.COVERING_SPLIT, + DirectedReadOptions.getDefaultInstance(), + initialHint); + assertNotNull(initialServer); + + endpointCache.setHealthy("server1", false); + + RoutingHint.Builder hint = RoutingHint.newBuilder().setKey(bytes("a")); + ChannelEndpoint server = + cache.fillRoutingHint( + /* preferLeader= */ false, + KeyRangeCache.RangeMode.COVERING_SPLIT, + DirectedReadOptions.getDefaultInstance(), + hint); + + assertNotNull(server); + assertEquals("server2", server.getAddress()); + assertEquals(1, hint.getSkippedTabletUidCount()); + assertEquals(1L, hint.getSkippedTabletUid(0).getTabletUid()); + } + + @Test + public void shrinkToEvictsRanges() { + FakeEndpointCache endpointCache = new FakeEndpointCache(); + KeyRangeCache cache = new KeyRangeCache(endpointCache); + + final int numRanges = 100; + for (int i = 0; i < numRanges; i++) { + CacheUpdate update = + CacheUpdate.newBuilder() + .addRange( + Range.newBuilder() + .setStartKey(bytes(String.format("%04d", i))) + .setLimitKey(bytes(String.format("%04d", i + 1))) + .setGroupUid(i) + .setSplitId(i) + .setGeneration(bytes("1"))) + .addGroup( + Group.newBuilder() + .setGroupUid(i) + .setGeneration(bytes("1")) + .addTablets( + Tablet.newBuilder() + .setTabletUid(i) + .setServerAddress("server" + i) + .setIncarnation(bytes("1")))) + .build(); + cache.addRanges(update); + } + + checkContents(cache, numRanges, numRanges); + + int shrinkTo = numRanges - numRanges / 4; + cache.shrinkTo(shrinkTo); + checkContents(cache, shrinkTo, 3 * numRanges / 4); + + cache.shrinkTo(numRanges / 8); + checkContents(cache, numRanges / 8, 7 * numRanges / 8); + + cache.shrinkTo(0); + checkContents(cache, 0, numRanges); + } + + private static void checkContents(KeyRangeCache cache, int expectedSize, int mustBeInCache) { + assertEquals(expectedSize, cache.size()); + int hitCount = 0; + for (int i = 0; i < 100; i++) { + RoutingHint.Builder hint = RoutingHint.newBuilder().setKey(bytes(String.format("%04d", i))); + ChannelEndpoint server = + cache.fillRoutingHint( + /* preferLeader= */ false, + KeyRangeCache.RangeMode.COVERING_SPLIT, + DirectedReadOptions.getDefaultInstance(), + hint); + if (i > mustBeInCache) { + assertNotNull(server); + } + if (server != null) { + hitCount++; + assertEquals("server" + i, server.getAddress()); + } + } + assertEquals(expectedSize, hitCount); + } + + private static ByteString bytes(String value) { + return ByteString.copyFromUtf8(value); + } + + private static final class FakeEndpointCache implements ChannelEndpointCache { + private final Map endpoints = new HashMap<>(); + private final FakeEndpoint defaultEndpoint = new FakeEndpoint("default"); + + @Override + public ChannelEndpoint defaultChannel() { + return defaultEndpoint; + } + + @Override + public ChannelEndpoint get(String address) { + return endpoints.computeIfAbsent(address, FakeEndpoint::new); + } + + @Override + public void evict(String address) { + endpoints.remove(address); + } + + @Override + public void shutdown() { + endpoints.clear(); + } + + void setHealthy(String address, boolean healthy) { + FakeEndpoint endpoint = endpoints.get(address); + if (endpoint != null) { + endpoint.setHealthy(healthy); + } + } + } + + private static final class FakeEndpoint implements ChannelEndpoint { + private final String address; + private final ManagedChannel channel = new FakeManagedChannel(); + private boolean healthy = true; + + FakeEndpoint(String address) { + this.address = address; + } + + @Override + public String getAddress() { + return address; + } + + @Override + public boolean isHealthy() { + return healthy; + } + + @Override + public ManagedChannel getChannel() { + return channel; + } + + void setHealthy(boolean healthy) { + this.healthy = healthy; + } + } + + private static final class FakeManagedChannel extends ManagedChannel { + private boolean shutdown = false; + + @Override + public ManagedChannel shutdown() { + shutdown = true; + return this; + } + + @Override + public boolean isShutdown() { + return shutdown; + } + + @Override + public boolean isTerminated() { + return shutdown; + } + + @Override + public ManagedChannel shutdownNow() { + shutdown = true; + return this; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return shutdown; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public String authority() { + return "fake"; + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeCacheTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeCacheTest.java new file mode 100644 index 000000000000..bcf89e529aa3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeCacheTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import com.google.protobuf.TextFormat; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ReadRequest; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class KeyRecipeCacheTest { + + @Test + public void fingerprintReadUsesShape() throws Exception { + ReadRequest req = + parseRead( + "table: \"T\"\n" + + "columns: \"c1\"\n" + + "columns: \"c2\"\n" + + "key_set { keys { values { string_value: \"foo\" } } }\n"); + + long fp = KeyRecipeCache.fingerprint(req); + assertNotEquals(0, fp); + assertEquals(fp, KeyRecipeCache.fingerprint(req)); + + ReadRequest diffTable = ReadRequest.newBuilder(req).setTable("U").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(diffTable)); + + ReadRequest diffIndex = ReadRequest.newBuilder(req).setIndex("I").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(diffIndex)); + + ReadRequest diffColumn = ReadRequest.newBuilder(req).setColumns(0, "c3").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(diffColumn)); + + ReadRequest extraColumn = ReadRequest.newBuilder(req).addColumns("c4").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(extraColumn)); + + ReadRequest removeColumn = ReadRequest.newBuilder(req).clearColumns().addColumns("c1").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(removeColumn)); + + ReadRequest sameShape = + ReadRequest.newBuilder(req) + .clearKeySet() + .setKeySet(req.getKeySet().toBuilder().build()) + .build(); + assertEquals(fp, KeyRecipeCache.fingerprint(sameShape)); + + ReadRequest.Builder diffKeyValueBuilder = ReadRequest.newBuilder(req); + diffKeyValueBuilder + .getKeySetBuilder() + .getKeysBuilder(0) + .getValuesBuilder(0) + .setStringValue("bar"); + ReadRequest diffKeyValue = diffKeyValueBuilder.build(); + assertEquals(fp, KeyRecipeCache.fingerprint(diffKeyValue)); + } + + @Test + public void fingerprintExecuteSqlUsesParamShape() throws Exception { + ExecuteSqlRequest req = + parseExecuteSql( + "sql: \"SELECT * FROM T WHERE p1 = @p1 AND p2 = @p2\"\n" + + "params {\n" + + " fields { key: \"p1\" value { string_value: \"foo\" } }\n" + + " fields { key: \"p2\" value { string_value: \"99\" } }\n" + + "}\n" + + "param_types { key: \"p2\" value { code: INT64 } }\n" + + "query_options {\n" + + " optimizer_version: \"1\"\n" + + " optimizer_statistics_package: \"stats\"\n" + + "}\n"); + + long fp = KeyRecipeCache.fingerprint(req); + assertNotEquals(0, fp); + assertEquals(fp, KeyRecipeCache.fingerprint(req)); + + ExecuteSqlRequest diffSql = ExecuteSqlRequest.newBuilder(req).setSql("SELECT * FROM U").build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(diffSql)); + + ExecuteSqlRequest.Builder removeParamBuilder = ExecuteSqlRequest.newBuilder(req); + removeParamBuilder.getParamsBuilder().removeFields("p1"); + ExecuteSqlRequest removeParam = removeParamBuilder.build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(removeParam)); + + ExecuteSqlRequest.Builder addParamBuilder = ExecuteSqlRequest.newBuilder(req); + addParamBuilder.getParamsBuilder().putFields("p3", parseValue("string_value: \"foo\"")); + ExecuteSqlRequest addParam = addParamBuilder.build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(addParam)); + + ExecuteSqlRequest changeType = + ExecuteSqlRequest.newBuilder(req).putParamTypes("p1", parseType("code: BYTES")).build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(changeType)); + + ExecuteSqlRequest.Builder changeParamValueBuilder = ExecuteSqlRequest.newBuilder(req); + changeParamValueBuilder.getParamsBuilder().putFields("p1", parseValue("string_value: \"bar\"")); + ExecuteSqlRequest changeParamValue = changeParamValueBuilder.build(); + assertEquals(fp, KeyRecipeCache.fingerprint(changeParamValue)); + + ExecuteSqlRequest.Builder changeKindBuilder = ExecuteSqlRequest.newBuilder(req); + changeKindBuilder.getParamsBuilder().putFields("p1", parseValue("bool_value: true")); + ExecuteSqlRequest changeKind = changeKindBuilder.build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(changeKind)); + + ExecuteSqlRequest.Builder changeOptionsBuilder = ExecuteSqlRequest.newBuilder(req); + changeOptionsBuilder.getQueryOptionsBuilder().setOptimizerStatisticsPackage("stats_v2"); + ExecuteSqlRequest changeOptions = changeOptionsBuilder.build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(changeOptions)); + + ExecuteSqlRequest.Builder changeOptimizerBuilder = ExecuteSqlRequest.newBuilder(req); + changeOptimizerBuilder.getQueryOptionsBuilder().setOptimizerVersion("2"); + ExecuteSqlRequest changeOptimizer = changeOptimizerBuilder.build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(changeOptimizer)); + + ExecuteSqlRequest clearOptions = ExecuteSqlRequest.newBuilder(req).clearQueryOptions().build(); + assertNotEquals(fp, KeyRecipeCache.fingerprint(clearOptions)); + } + + @Test + public void computeKeysSetsRoutingHint() throws Exception { + KeyRecipeCache cache = new KeyRecipeCache(); + cache.addRecipes( + parseRecipeList( + "schema_generation: \"1\"\n" + + "recipe {\n" + + " table_name: \"T\"\n" + + " part { tag: 1 }\n" + + " part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " identifier: \"k\"\n" + + " }\n" + + "}\n")); + + ReadRequest.Builder request = + parseRead( + "table: \"T\"\n" + + "columns: \"c1\"\n" + + "key_set { keys { values { string_value: \"foo\" } } }\n") + .toBuilder(); + + cache.computeKeys(request); + assertTrue(request.getRoutingHint().getOperationUid() > 0); + assertEquals("1", request.getRoutingHint().getSchemaGeneration().toStringUtf8()); + assertTrue(request.getRoutingHint().getKey().size() > 0); + } + + private static ReadRequest parseRead(String text) throws TextFormat.ParseException { + ReadRequest.Builder builder = ReadRequest.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static ExecuteSqlRequest parseExecuteSql(String text) throws TextFormat.ParseException { + ExecuteSqlRequest.Builder builder = ExecuteSqlRequest.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static com.google.protobuf.Value parseValue(String text) + throws TextFormat.ParseException { + com.google.protobuf.Value.Builder builder = com.google.protobuf.Value.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static com.google.spanner.v1.Type parseType(String text) + throws TextFormat.ParseException { + com.google.spanner.v1.Type.Builder builder = com.google.spanner.v1.Type.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static com.google.spanner.v1.RecipeList parseRecipeList(String text) + throws TextFormat.ParseException { + com.google.spanner.v1.RecipeList.Builder builder = + com.google.spanner.v1.RecipeList.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeTest.java new file mode 100644 index 000000000000..3e946f10dc44 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/KeyRecipeTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Struct; +import com.google.protobuf.TextFormat; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class KeyRecipeTest { + + @Test + public void queryParamsUsesStructIdentifiers() throws Exception { + com.google.spanner.v1.KeyRecipe recipeProto = + createRecipe( + "part { tag: 1 }\n" + + "part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " identifier: \"p0\"\n" + + " struct_identifiers: 1\n" + + "}\n"); + + Struct params = + parseStruct( + "fields {\n" + + " key: \"p0\"\n" + + " value {\n" + + " list_value { values { string_value: \"a\" } values { string_value: \"b\" }" + + " }\n" + + " }\n" + + "}\n"); + + KeyRecipe recipe = KeyRecipe.create(recipeProto); + TargetRange target = recipe.queryParamsToTargetRange(params); + assertEquals(expectedKey("b"), target.start); + assertTrue(target.limit.isEmpty()); + } + + @Test + public void queryParamsUsesConstantValue() throws Exception { + com.google.spanner.v1.KeyRecipe recipeProto = + createRecipe( + "part { tag: 1 }\n" + + "part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " value { string_value: \"const\" }\n" + + "}\n"); + + KeyRecipe recipe = KeyRecipe.create(recipeProto); + TargetRange target = recipe.queryParamsToTargetRange(Struct.getDefaultInstance()); + assertEquals(expectedKey("const"), target.start); + assertTrue(target.limit.isEmpty()); + } + + @Test + public void queryParamsCaseInsensitiveFallback() throws Exception { + com.google.spanner.v1.KeyRecipe recipeProto = + createRecipe( + "part { tag: 1 }\n" + + "part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " identifier: \"id\"\n" + + "}\n"); + + Struct params = + parseStruct( + "fields {\n" + " key: \"Id\"\n" + " value { string_value: \"foo\" }\n" + "}\n"); + + KeyRecipe recipe = KeyRecipe.create(recipeProto); + TargetRange target = recipe.queryParamsToTargetRange(params); + assertEquals(expectedKey("foo"), target.start); + assertTrue(target.limit.isEmpty()); + } + + @Test + public void queryParamsCaseInsensitiveDuplicateUsesLastValue() throws Exception { + com.google.spanner.v1.KeyRecipe recipeProto = + createRecipe( + "part { tag: 1 }\n" + + "part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " identifier: \"ID\"\n" + + "}\n"); + + // Both "Id" and "id" normalize to "id"; the last one ("id"→"bar") wins. + Struct params = + parseStruct( + "fields {\n" + + " key: \"Id\"\n" + + " value { string_value: \"foo\" }\n" + + "}\n" + + "fields {\n" + + " key: \"id\"\n" + + " value { string_value: \"bar\" }\n" + + "}\n"); + + KeyRecipe recipe = KeyRecipe.create(recipeProto); + TargetRange target = recipe.queryParamsToTargetRange(params); + assertEquals(expectedKey("bar"), target.start); + assertFalse(target.approximate); + assertTrue(target.limit.isEmpty()); + } + + @Test + public void queryParamsCaseInsensitiveSafeForTurkishDotI() throws Exception { + // Turkish upper-case İ (U+0130) lower-cases to two characters under Locale.ROOT + // (i + combining dot above), so "SİCİL".length() != "SİCİL".toLowerCase(ROOT).length() + // and "SİCİL".equalsIgnoreCase("SİCİL".toLowerCase(ROOT)) is false. + // This is still safe because both the recipe identifier (server-sent) and the user's + // bound parameter name go through the same Locale.ROOT lower-casing before the + // HashMap lookup, so they produce the same string on both sides and the match succeeds. + com.google.spanner.v1.KeyRecipe recipeProto = + createRecipe( + "part { tag: 1 }\n" + + "part {\n" + + " order: ASCENDING\n" + + " null_order: NULLS_FIRST\n" + + " type { code: STRING }\n" + + " identifier: \"SİCİL\"\n" + + "}\n"); + + Struct params = + parseStruct( + "fields {\n" + " key: \"SİCİL\"\n" + " value { string_value: \"test\" }\n" + "}\n"); + + KeyRecipe recipe = KeyRecipe.create(recipeProto); + TargetRange target = recipe.queryParamsToTargetRange(params); + assertEquals(expectedKey("test"), target.start); + assertTrue(target.limit.isEmpty()); + } + + private static com.google.spanner.v1.KeyRecipe createRecipe(String text) + throws TextFormat.ParseException { + com.google.spanner.v1.KeyRecipe.Builder builder = com.google.spanner.v1.KeyRecipe.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static Struct parseStruct(String text) throws TextFormat.ParseException { + Struct.Builder builder = Struct.newBuilder(); + TextFormat.merge(text, builder); + return builder.build(); + } + + private static ByteString expectedKey(String value) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendCompositeTag(out, 1); + SsFormat.appendNotNullMarkerNullOrderedFirst(out); + SsFormat.appendStringIncreasing(out, value); + return ByteString.copyFrom(out.toByteArray()); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RecipeGoldenTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RecipeGoldenTest.java new file mode 100644 index 000000000000..dc1fde01f0cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RecipeGoldenTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; + +import com.google.protobuf.TextFormat; +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Objects; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import spanner.cloud.location.RecipeTestCase; +import spanner.cloud.location.RecipeTestCases; + +@RunWith(JUnit4.class) +public class RecipeGoldenTest { + + // Pattern to match unknown TypeCode enum values (e.g., TOKENLIST) and replace with + // TYPE_CODE_UNSPECIFIED. This handles cases where the textproto contains enum values + // not yet available in the public API. + private static final Pattern UNKNOWN_TYPE_CODE_PATTERN = Pattern.compile("code:\\s*TOKENLIST"); + + @Test + public void goldenTest() throws Exception { + String content; + try (InputStream inputStream = + getClass().getClassLoader().getResourceAsStream("recipe_test.textproto"); + BufferedReader reader = + new BufferedReader( + new InputStreamReader( + Objects.requireNonNull(inputStream), StandardCharsets.UTF_8))) { + content = reader.lines().collect(Collectors.joining("\n")); + } + + // Replace unknown enum values with TYPE_CODE_UNSPECIFIED so parsing succeeds. + // Test cases with unrecognized types will produce invalid recipes that get skipped. + content = UNKNOWN_TYPE_CODE_PATTERN.matcher(content).replaceAll("code: TYPE_CODE_UNSPECIFIED"); + + RecipeTestCases.Builder builder = RecipeTestCases.newBuilder(); + TextFormat.merge(content, builder); + + RecipeTestCases testCases = builder.build(); + + for (RecipeTestCase testCase : testCases.getTestCaseList()) { + if (testCase.getName().contains("Random")) { + continue; + } + + if (testCase.getRecipes().getRecipeCount() == 0) { + continue; + } + + KeyRecipe recipe; + try { + recipe = KeyRecipe.create(testCase.getRecipes().getRecipe(0)); + } catch (IllegalArgumentException e) { + for (RecipeTestCase.Test test : testCase.getTestList()) { + assertEquals( + "Invalid recipe should result in approximate=true in test case: " + + testCase.getName(), + true, + test.getApproximate()); + } + continue; + } + + int testNum = 0; + for (RecipeTestCase.Test test : testCase.getTestList()) { + testNum++; + + TargetRange target; + switch (test.getOperationCase()) { + case KEY: + target = recipe.keyToTargetRange(test.getKey()); + break; + case KEY_RANGE: + target = recipe.keyRangeToTargetRange(test.getKeyRange()); + break; + case KEY_SET: + target = recipe.keySetToTargetRange(test.getKeySet()); + break; + case MUTATION: + target = recipe.mutationToTargetRange(test.getMutation()); + break; + case QUERY_PARAMS: + target = recipe.queryParamsToTargetRange(test.getQueryParams()); + break; + case OPERATION_NOT_SET: + default: + throw new UnsupportedOperationException("Unsupported operation in test case"); + } + + assertEquals( + "Start mismatch in test case: " + testCase.getName() + " test #" + testNum, + test.getStart(), + target.start); + assertEquals( + "Limit mismatch in test case: " + testCase.getName() + " test #" + testNum, + test.getLimit(), + target.limit); + assertEquals( + "Approximate mismatch in test case: " + testCase.getName() + " test #" + testNum, + test.getApproximate(), + target.approximate); + } + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptorTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptorTest.java new file mode 100644 index 000000000000..266a90a3aa5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/RequestIdInterceptorTest.java @@ -0,0 +1,326 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_CALL_OPTIONS_KEY; +import static com.google.cloud.spanner.XGoogSpannerRequestId.REQUEST_ID_HEADER_KEY; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.grpc.GcpManagedChannel; +import com.google.cloud.spanner.XGoogSpannerRequestId; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.MethodDescriptor.Marshaller; +import java.io.InputStream; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link RequestIdInterceptor}. */ +@RunWith(JUnit4.class) +public class RequestIdInterceptorTest { + + // Pattern to parse request ID: version.randProcessId.clientId.channelId.requestId.attempt + private static final Pattern REQUEST_ID_PATTERN = + Pattern.compile("^(\\d)\\.([0-9a-z]{16})\\.(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)$"); + + @Test + public void testInterceptorSetsRequestIdHeader() { + RequestIdInterceptor interceptor = new RequestIdInterceptor(); + XGoogSpannerRequestId requestId = XGoogSpannerRequestId.of(1, 2, 3, 0); + CallOptions callOptions = + CallOptions.DEFAULT.withOption(REQUEST_ID_CALL_OPTIONS_KEY, requestId); + + AtomicReference capturedHeaders = new AtomicReference<>(); + + Channel fakeChannel = + new FakeChannel() { + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return new FakeClientCall() { + @Override + public void start(Listener responseListener, Metadata headers) { + capturedHeaders.set(headers); + } + }; + } + }; + + MethodDescriptor methodDescriptor = createMethodDescriptor(); + ClientCall call = + interceptor.interceptCall(methodDescriptor, callOptions, fakeChannel); + call.start(new NoOpListener<>(), new Metadata()); + + assertNotNull(capturedHeaders.get()); + String headerValue = capturedHeaders.get().get(REQUEST_ID_HEADER_KEY); + assertNotNull(headerValue); + + // Verify the header matches the expected pattern with attempt incremented to 1. + Matcher matcher = REQUEST_ID_PATTERN.matcher(headerValue); + assertTrue("Header value should match request ID pattern", matcher.matches()); + // Attempt should be 1 (incremented from 0). + assertTrue("Attempt should be 1", headerValue.endsWith(".1")); + } + + @Test + public void testInterceptorUpdatesChannelIdFromGrpcGcp() { + RequestIdInterceptor interceptor = new RequestIdInterceptor(); + + // Start with channel ID 0 (placeholder when DCP is enabled). + XGoogSpannerRequestId requestId = XGoogSpannerRequestId.of(1, 0, 3, 0); + + // Simulate grpc-gcp setting the actual channel ID (0-based) in CallOptions. + int gcpChannelId = 5; // grpc-gcp channel IDs are 0-based. + CallOptions callOptions = + CallOptions.DEFAULT + .withOption(REQUEST_ID_CALL_OPTIONS_KEY, requestId) + .withOption(GcpManagedChannel.CHANNEL_ID_KEY, gcpChannelId); + + AtomicReference capturedHeaders = new AtomicReference<>(); + + Channel fakeChannel = + new FakeChannel() { + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return new FakeClientCall() { + @Override + public void start(Listener responseListener, Metadata headers) { + capturedHeaders.set(headers); + } + }; + } + }; + + MethodDescriptor methodDescriptor = createMethodDescriptor(); + ClientCall call = + interceptor.interceptCall(methodDescriptor, callOptions, fakeChannel); + call.start(new NoOpListener<>(), new Metadata()); + + assertNotNull(capturedHeaders.get()); + String headerValue = capturedHeaders.get().get(REQUEST_ID_HEADER_KEY); + assertNotNull(headerValue); + + // Parse the header and verify the channel ID was updated. + // Expected channel ID in header is gcpChannelId + 1 = 6. + Matcher matcher = REQUEST_ID_PATTERN.matcher(headerValue); + assertTrue("Header value should match request ID pattern", matcher.matches()); + String channelIdStr = matcher.group(4); + // Channel ID should be gcpChannelId + 1 = 6. + assertTrue( + "Channel ID should be " + (gcpChannelId + 1), + channelIdStr.equals(String.valueOf(gcpChannelId + 1))); + } + + @Test + public void testInterceptorDoesNotUpdateChannelIdWhenNotProvided() { + RequestIdInterceptor interceptor = new RequestIdInterceptor(); + + // Start with a specific channel ID. + long originalChannelId = 3; + XGoogSpannerRequestId requestId = XGoogSpannerRequestId.of(1, originalChannelId, 5, 0); + + // No CHANNEL_ID_KEY set in CallOptions (grpc-gcp not used or not available). + CallOptions callOptions = + CallOptions.DEFAULT.withOption(REQUEST_ID_CALL_OPTIONS_KEY, requestId); + + AtomicReference capturedHeaders = new AtomicReference<>(); + + Channel fakeChannel = + new FakeChannel() { + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return new FakeClientCall() { + @Override + public void start(Listener responseListener, Metadata headers) { + capturedHeaders.set(headers); + } + }; + } + }; + + MethodDescriptor methodDescriptor = createMethodDescriptor(); + ClientCall call = + interceptor.interceptCall(methodDescriptor, callOptions, fakeChannel); + call.start(new NoOpListener<>(), new Metadata()); + + assertNotNull(capturedHeaders.get()); + String headerValue = capturedHeaders.get().get(REQUEST_ID_HEADER_KEY); + assertNotNull(headerValue); + + // Parse the header and verify the channel ID remained unchanged. + Matcher matcher = REQUEST_ID_PATTERN.matcher(headerValue); + assertTrue("Header value should match request ID pattern", matcher.matches()); + String channelIdStr = matcher.group(4); + // Channel ID should remain 3. + assertTrue( + "Channel ID should remain " + originalChannelId, + channelIdStr.equals(String.valueOf(originalChannelId))); + } + + @Test + public void testInterceptorOverridesChannelIdWhenGrpcGcpProvides() { + RequestIdInterceptor interceptor = new RequestIdInterceptor(); + + // Start with a non-zero channel ID. + long originalChannelId = 3; + XGoogSpannerRequestId requestId = XGoogSpannerRequestId.of(1, originalChannelId, 5, 0); + + // Simulate grpc-gcp setting a different channel ID. + int gcpChannelId = 7; + CallOptions callOptions = + CallOptions.DEFAULT + .withOption(REQUEST_ID_CALL_OPTIONS_KEY, requestId) + .withOption(GcpManagedChannel.CHANNEL_ID_KEY, gcpChannelId); + + AtomicReference capturedHeaders = new AtomicReference<>(); + + Channel fakeChannel = + new FakeChannel() { + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return new FakeClientCall() { + @Override + public void start(Listener responseListener, Metadata headers) { + capturedHeaders.set(headers); + } + }; + } + }; + + MethodDescriptor methodDescriptor = createMethodDescriptor(); + ClientCall call = + interceptor.interceptCall(methodDescriptor, callOptions, fakeChannel); + call.start(new NoOpListener<>(), new Metadata()); + + assertNotNull(capturedHeaders.get()); + String headerValue = capturedHeaders.get().get(REQUEST_ID_HEADER_KEY); + assertNotNull(headerValue); + + // Parse the header and verify the channel ID WAS updated to grpc-gcp's value. + Matcher matcher = REQUEST_ID_PATTERN.matcher(headerValue); + assertTrue("Header value should match request ID pattern", matcher.matches()); + String channelIdStr = matcher.group(4); + // Channel ID should be gcpChannelId + 1 = 8 (grpc-gcp's channel ID overrides the original). + assertTrue( + "Channel ID should be " + (gcpChannelId + 1) + " but was " + channelIdStr, + channelIdStr.equals(String.valueOf(gcpChannelId + 1))); + } + + @Test + public void testInterceptorWithNoRequestId() { + RequestIdInterceptor interceptor = new RequestIdInterceptor(); + + // No request ID in CallOptions. + CallOptions callOptions = CallOptions.DEFAULT; + + AtomicReference capturedHeaders = new AtomicReference<>(); + + Channel fakeChannel = + new FakeChannel() { + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return new FakeClientCall() { + @Override + public void start(Listener responseListener, Metadata headers) { + capturedHeaders.set(headers); + } + }; + } + }; + + MethodDescriptor methodDescriptor = createMethodDescriptor(); + ClientCall call = + interceptor.interceptCall(methodDescriptor, callOptions, fakeChannel); + call.start(new NoOpListener<>(), new Metadata()); + + assertNotNull(capturedHeaders.get()); + // No request ID header should be set. + assertNull(capturedHeaders.get().get(REQUEST_ID_HEADER_KEY)); + } + + private static MethodDescriptor createMethodDescriptor() { + return MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("test/method") + .setRequestMarshaller(new FakeMarshaller<>()) + .setResponseMarshaller(new FakeMarshaller<>()) + .build(); + } + + private static class FakeMarshaller implements Marshaller { + @Override + public InputStream stream(T value) { + return null; + } + + @Override + public T parse(InputStream stream) { + return null; + } + } + + private abstract static class FakeChannel extends Channel { + @Override + public String authority() { + return "fake-authority"; + } + } + + private abstract static class FakeClientCall extends ClientCall { + @Override + public void start(Listener responseListener, Metadata headers) {} + + @Override + public void request(int numMessages) {} + + @Override + public void cancel(String message, Throwable cause) {} + + @Override + public void halfClose() {} + + @Override + public void sendMessage(ReqT message) {} + } + + private static class NoOpListener extends ClientCall.Listener { + @Override + public void onMessage(T message) {} + + @Override + public void onHeaders(Metadata headers) {} + + @Override + public void onClose(io.grpc.Status status, Metadata trailers) {} + + @Override + public void onReady() {} + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java new file mode 100644 index 000000000000..8073b11735e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java @@ -0,0 +1,125 @@ +/* + * Copyright 2017 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import java.util.List; +import java.util.Map; +import org.junit.Test; + +public class SpannerMetadataProviderTest { + @Test + public void testGetHeadersAsMetadata() { + Map headers = ImmutableMap.of("header1", "value1", "header2", "value2"); + SpannerMetadataProvider metadataProvider = SpannerMetadataProvider.create(headers, "header3"); + + Metadata metadata = metadataProvider.newMetadata(null, "stuff"); + assertEquals(headers.size() + 1, metadata.keys().size()); + assertEquals( + headers.get("header1"), metadata.get(Key.of("header1", Metadata.ASCII_STRING_MARSHALLER))); + assertEquals( + headers.get("header2"), metadata.get(Key.of("header2", Metadata.ASCII_STRING_MARSHALLER))); + assertEquals("stuff", metadata.get(Key.of("header3", Metadata.ASCII_STRING_MARSHALLER))); + } + + @Test + public void testGetResourceHeaderValue() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header3"); + + assertEquals("projects/p", getResourceHeaderValue(metadataProvider, "garbage")); + assertEquals("projects/p", getResourceHeaderValue(metadataProvider, "projects/p")); + assertEquals( + "projects/p/instances/i", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i")); + assertEquals( + "projects/p/instances/i/databases/d", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i/databases/d")); + assertEquals( + "projects/p/instances/i/databases/d", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i/databases/d/sessions/s")); + assertEquals( + "projects/p/instances/i", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i/operations/op")); + assertEquals( + "projects/p/instances/i/databases/d", + getResourceHeaderValue( + metadataProvider, "projects/p/instances/i/databases/d/operations/op")); + assertEquals( + "projects/p/instances/i", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i/operations")); + assertEquals( + "projects/p/instances/i/databases/d", + getResourceHeaderValue(metadataProvider, "projects/p/instances/i/databases/d/operations")); + } + + @Test + public void testNewExtraHeaders() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header1"); + Map> extraHeaders = metadataProvider.newExtraHeaders(null, "value1"); + Map> expectedHeaders = + ImmutableMap.>of("header1", ImmutableList.of("value1")); + assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); + } + + @Test + public void testNewRouteToLeaderHeader() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header1"); + Map> extraHeaders = metadataProvider.newRouteToLeaderHeader(); + Map> expectedHeaders = + ImmutableMap.>of( + "x-goog-spanner-route-to-leader", ImmutableList.of("true")); + assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); + } + + @Test + public void testNewEndToEndTracingHeader() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header1"); + Map> extraHeaders = metadataProvider.newEndToEndTracingHeader(); + Map> expectedHeaders = + ImmutableMap.>of( + "x-goog-spanner-end-to-end-tracing", ImmutableList.of("true")); + assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); + } + + @Test + public void testNewAfeServerTimingHeader() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header1"); + Map> extraHeaders = metadataProvider.newAfeServerTimingHeader(); + Map> expectedHeaders = + ImmutableMap.>of( + "x-goog-spanner-enable-afe-server-timing", ImmutableList.of("true")); + assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); + } + + private String getResourceHeaderValue( + SpannerMetadataProvider headerProvider, String resourceTokenTemplate) { + Metadata metadata = headerProvider.newMetadata(resourceTokenTemplate, "projects/p"); + assertEquals(1, metadata.keys().size()); + return metadata.get(Key.of("header3", Metadata.ASCII_STRING_MARSHALLER)); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerRpcMetricsTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerRpcMetricsTest.java new file mode 100644 index 000000000000..c6095dde7d2f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerRpcMetricsTest.java @@ -0,0 +1,301 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.TypeCode; +import io.grpc.ForwardingServerCall; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SpannerRpcMetricsTest { + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private static InetSocketAddress address; + private static Spanner spannerWithOpenTelemetry; + private static DatabaseClient databaseClient; + private static final Map optionsMap = new HashMap<>(); + private static MockSpannerServiceImpl mockSpannerNoHeader; + private static Server serverNoHeader; + private static InetSocketAddress addressNoHeader; + private static Spanner spannerNoHeaderNoOpenTelemetry; + private static DatabaseClient databaseClientNoHeader; + private static String instanceId = "fake-instance"; + private static String databaseId = "fake-database"; + private static String noHeaderdatabaseId = "fake-database-1"; + private static String projectId = "fake-project"; + private static AtomicInteger fakeServerTiming = new AtomicInteger(new Random().nextInt(1000) + 1); + private static final Statement SELECT1AND2 = + Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); + private static final ResultSetMetadata SELECT1AND2_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + StructType.Field.newBuilder() + .setName("COL1") + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("1").build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(com.google.protobuf.Value.newBuilder().setStringValue("2").build()) + .build()) + .setMetadata(SELECT1AND2_METADATA) + .build(); + private static final Statement UPDATE_FOO_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + + private static InMemoryMetricReader inMemoryMetricReader; + + private static InMemoryMetricReader inMemoryMetricReaderInjected; + + @BeforeClass + public static void startServer() throws Exception { + SpannerOptions.enableOpenTelemetryMetrics(); + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); + address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .intercept( + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall serverCall, + Metadata headers, + ServerCallHandler serverCallHandler) { + return serverCallHandler.startCall( + new ForwardingServerCall.SimpleForwardingServerCall( + serverCall) { + @Override + public void sendHeaders(Metadata headers) { + headers.put( + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), + String.format("gfet4t7; dur=%d", fakeServerTiming.get())); + super.sendHeaders(headers); + } + }, + headers); + } + }) + .build() + .start(); + optionsMap.put(SpannerRpc.Option.CHANNEL_HINT, 1L); + inMemoryMetricReader = InMemoryMetricReader.create(); + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder().registerMetricReader(inMemoryMetricReader).build(); + GlobalOpenTelemetry.resetForTest(); + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProvider).buildAndRegisterGlobal(); + + inMemoryMetricReaderInjected = InMemoryMetricReader.create(); + SdkMeterProvider sdkMeterProviderInjected = + SdkMeterProvider.builder().registerMetricReader(inMemoryMetricReaderInjected).build(); + + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProviderInjected).build(); + + spannerWithOpenTelemetry = + createSpannerOptionsWithOpenTelemetry(address, server, openTelemetry).getService(); + databaseClient = + spannerWithOpenTelemetry.getDatabaseClient( + DatabaseId.of(projectId, instanceId, databaseId)); + + mockSpannerNoHeader = new MockSpannerServiceImpl(); + mockSpannerNoHeader.setAbortProbability(0.0D); + mockSpannerNoHeader.putStatementResult( + MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); + mockSpannerNoHeader.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); + addressNoHeader = new InetSocketAddress("localhost", 0); + serverNoHeader = + NettyServerBuilder.forAddress(addressNoHeader) + .addService(mockSpannerNoHeader) + .build() + .start(); + spannerNoHeaderNoOpenTelemetry = + createSpannerOptions(addressNoHeader, serverNoHeader).getService(); + databaseClientNoHeader = + spannerNoHeaderNoOpenTelemetry.getDatabaseClient( + DatabaseId.of(projectId, instanceId, noHeaderdatabaseId)); + } + + @AfterClass + public static void stopServer() throws InterruptedException { + if (spannerWithOpenTelemetry != null) { + spannerWithOpenTelemetry.close(); + server.shutdown(); + server.awaitTermination(); + } + + if (spannerNoHeaderNoOpenTelemetry != null) { + spannerNoHeaderNoOpenTelemetry.close(); + serverNoHeader.shutdown(); + serverNoHeader.awaitTermination(); + } + } + + @After + public void reset() { + mockSpanner.reset(); + mockSpannerNoHeader.reset(); + } + + @Test + public void testGfeLatencyExecuteSqlWithInjectedOpenTelemetry() throws InterruptedException { + databaseClient + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + + double latency = + getGfeLatencyMetric( + getMetricData("spanner/gfe_latency", inMemoryMetricReaderInjected), + "google.spanner.v1.Spanner/ExecuteSql"); + assertEquals(fakeServerTiming.get(), latency, 0); + } + + @Test + public void testGfeMissingHeaderExecuteSqlWithGlobalOpenTelemetry() throws InterruptedException { + databaseClient + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + long count = + getHeaderLatencyMetric( + getMetricData("spanner/gfe_header_missing_count", inMemoryMetricReaderInjected), + "google.spanner.v1.Spanner/Commit", + databaseId); + assertEquals(0, count); + + databaseClientNoHeader + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_FOO_STATEMENT)); + long count1 = + getHeaderLatencyMetric( + getMetricData("spanner/gfe_header_missing_count", inMemoryMetricReader), + "google.spanner.v1.Spanner/Commit", + noHeaderdatabaseId); + assertEquals(1, count1); + } + + private static SpannerOptions createSpannerOptionsWithOpenTelemetry( + InetSocketAddress address, Server server, OpenTelemetry openTelemetry) { + + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .setOpenTelemetry(openTelemetry) + .build(); + } + + private static SpannerOptions createSpannerOptions(InetSocketAddress address, Server server) { + + String endpoint = address.getHostString() + ":" + server.getPort(); + return SpannerOptions.newBuilder() + .setProjectId("[PROJECT]") + .setChannelConfigurator( + input -> { + input.usePlaintext(); + return input; + }) + .setHost("http://" + endpoint) + .setCredentials(NoCredentials.getInstance()) + .build(); + } + + private long getHeaderLatencyMetric(MetricData metricData, String methodName, String databaseId) { + return metricData.getLongSumData().getPoints().stream() + .filter( + x -> + x.getAttributes().asMap().containsValue(methodName) + && x.getAttributes().asMap().containsValue(databaseId)) + .findFirst() + .get() + .getValue(); + } + + private double getGfeLatencyMetric(MetricData metricData, String methodName) { + return metricData.getHistogramData().getPoints().stream() + .filter(x -> x.getAttributes().asMap().containsValue(methodName)) + .findFirst() + .get() + .getMax(); + } + + private MetricData getMetricData(String metricName, InMemoryMetricReader inMemoryMetricReader) { + Collection metricDataCollection = inMemoryMetricReader.collectAllMetrics(); + Collection metricDataFiltered = + metricDataCollection.stream() + .filter(x -> x.getName().equals(metricName)) + .collect(Collectors.toList()); + return metricDataFiltered.stream().findFirst().get(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SsFormatTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SsFormatTest.java new file mode 100644 index 000000000000..da8a833db43c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SsFormatTest.java @@ -0,0 +1,902 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.protobuf.ByteString; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.TreeSet; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link SsFormat}. */ +@RunWith(JUnit4.class) +public class SsFormatTest { + + private static List signedIntTestValues; + private static List unsignedIntTestValues; + private static List doubleTestValues; + + /** Comparator for unsigned lexicographic comparison of byte arrays. */ + private static final Comparator UNSIGNED_BYTE_COMPARATOR = + (a, b) -> + ByteString.unsignedLexicographicalComparator() + .compare(ByteString.copyFrom(a), ByteString.copyFrom(b)); + + @BeforeClass + public static void setUpTestData() { + signedIntTestValues = buildSignedIntTestValues(); + unsignedIntTestValues = buildUnsignedIntTestValues(); + doubleTestValues = buildDoubleTestValues(); + } + + private static List buildSignedIntTestValues() { + TreeSet values = new TreeSet<>(); + + // Range of small values + for (int i = -300; i < 300; i++) { + values.add((long) i); + } + + // Powers of 2 and boundaries + for (int i = 0; i < 63; i++) { + long powerOf2 = 1L << i; + values.add(powerOf2); + values.add(powerOf2 - 1); + values.add(powerOf2 + 1); + values.add(-powerOf2); + values.add(-powerOf2 - 1); + values.add(-powerOf2 + 1); + } + + // Edge cases + values.add(Long.MIN_VALUE); + values.add(Long.MAX_VALUE); + + return new ArrayList<>(values); + } + + private static List buildUnsignedIntTestValues() { + TreeSet values = new TreeSet<>(Long::compareUnsigned); + + // Range of small values + for (int i = 0; i < 600; i++) { + values.add((long) i); + } + + // Powers of 2 and boundaries (treating as unsigned) + for (int i = 0; i < 64; i++) { + long powerOf2 = 1L << i; + values.add(powerOf2); + if (powerOf2 > 0) { + values.add(powerOf2 - 1); + } + values.add(powerOf2 + 1); + } + + // Max unsigned value (all bits set) + values.add(-1L); // 0xFFFFFFFFFFFFFFFF as unsigned + + return new ArrayList<>(values); + } + + private static List buildDoubleTestValues() { + TreeSet values = + new TreeSet<>( + (a, b) -> { + // Handle NaN specially - put at end + if (Double.isNaN(a) && Double.isNaN(b)) return 0; + if (Double.isNaN(a)) return 1; + if (Double.isNaN(b)) return -1; + return Double.compare(a, b); + }); + + // Basic values + values.add(0.0); + values.add(-0.0); + values.add(Double.POSITIVE_INFINITY); + values.add(Double.NEGATIVE_INFINITY); + values.add(Double.MIN_VALUE); + values.add(Double.MAX_VALUE); + values.add(-Double.MIN_VALUE); + values.add(-Double.MAX_VALUE); + + // Powers of 10 + double value = 1.0; + for (int i = 0; i < 10; i++) { + values.add(value); + values.add(-value); + value /= 10; + } + + long[] signs = {0, 1}; + long[] exponents = { + 0, 1, 2, 100, 200, 512, 1000, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, + 2000, 2045, 2046, 2047 + }; + long[] fractions = { + 0, + 1, + 2, + 10, + 16, + 255, + 256, + 32767, + 32768, + 65535, + 65536, + 1000000, + 0x7ffffffeL, + 0x7fffffffL, + 0x80000000L, + 0x80000001L, + 0x80000002L, + 0x0003456789abcdefL, + 0x0007fffffffffffeL, + 0x0007ffffffffffffL, + 0x0008000000000000L, + 0x0008000000000001L, + 0x000cba9876543210L, + 0x000fffffffff0000L, + 0x000ffffffffff000L, + 0x000fffffffffff00L, + 0x000ffffffffffff0L, + 0x000ffffffffffff8L, + 0x000ffffffffffffcL, + 0x000ffffffffffffeL, + 0x000fffffffffffffL + }; + + for (long sign : signs) { + for (long exponent : exponents) { + for (long fraction : fractions) { + long bits = (sign << 63) | (exponent << 52) | fraction; + values.add(Double.longBitsToDouble(bits)); + } + } + } + + return new ArrayList<>(values); + } + + // ==================== Prefix Successor Tests ==================== + + @Test + public void makePrefixSuccessor_emptyInput_returnsEmpty() { + assertEquals(ByteString.EMPTY, SsFormat.makePrefixSuccessor(ByteString.EMPTY)); + assertEquals(ByteString.EMPTY, SsFormat.makePrefixSuccessor(null)); + } + + @Test + public void makePrefixSuccessor_singleByte_setsLsb() { + ByteString input = ByteString.copyFrom(new byte[] {0x00}); + ByteString result = SsFormat.makePrefixSuccessor(input); + + assertEquals(1, result.size()); + assertEquals(0x01, result.byteAt(0) & 0xFF); + } + + @Test + public void makePrefixSuccessor_multipleBytes_onlyModifiesLastByte() { + ByteString input = ByteString.copyFrom(new byte[] {0x12, 0x34, 0x00}); + ByteString result = SsFormat.makePrefixSuccessor(input); + + assertEquals(3, result.size()); + assertEquals(0x12, result.byteAt(0) & 0xFF); + assertEquals(0x34, result.byteAt(1) & 0xFF); + assertEquals(0x01, result.byteAt(2) & 0xFF); + } + + @Test + public void makePrefixSuccessor_resultIsGreaterThanOriginal() { + byte[] original = new byte[] {0x10, 0x20, 0x30}; + ByteString successor = SsFormat.makePrefixSuccessor(ByteString.copyFrom(original)); + + assertTrue( + ByteString.unsignedLexicographicalComparator() + .compare(ByteString.copyFrom(original), successor) + < 0); + } + + // ==================== Composite Tag Tests ==================== + + @Test + public void appendCompositeTag_shortTag_encodesInOneByte() { + // Tags 1-15 should fit in 1 byte + for (int tag = 1; tag <= 15; tag++) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendCompositeTag(out, tag); + byte[] result = out.toByteArray(); + + assertEquals("Tag " + tag + " should encode to 1 byte", 1, result.length); + assertEquals("Tag " + tag + " should encode as tag << 1", tag << 1, result[0] & 0xFF); + } + } + + @Test + public void appendCompositeTag_mediumTag_encodesInTwoBytes() { + // Tags 16-4095 should fit in 2 bytes + int[] testTags = {16, 100, 1000, 4095}; + for (int tag : testTags) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendCompositeTag(out, tag); + byte[] result = out.toByteArray(); + + assertEquals("Tag " + tag + " should encode to 2 bytes", 2, result.length); + } + } + + @Test + public void appendCompositeTag_largeTag_encodesInThreeBytes() { + // Tags 4096-65535 should fit in 3 bytes + int[] testTags = {4096, 10000, 65535}; + for (int tag : testTags) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendCompositeTag(out, tag); + byte[] result = out.toByteArray(); + + assertEquals("Tag " + tag + " should encode to 3 bytes", 3, result.length); + } + } + + @Test + public void appendCompositeTag_invalidTag_throws() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + assertThrows(IllegalArgumentException.class, () -> SsFormat.appendCompositeTag(out, 0)); + assertThrows(IllegalArgumentException.class, () -> SsFormat.appendCompositeTag(out, -1)); + assertThrows(IllegalArgumentException.class, () -> SsFormat.appendCompositeTag(out, 65536)); + } + + @Test + public void appendCompositeTag_preservesOrdering() { + // Verify smaller tags encode to lexicographically smaller byte sequences + for (int tag1 = 1; tag1 <= 100; tag1++) { + for (int tag2 = tag1 + 1; tag2 <= 101 && tag2 <= tag1 + 10; tag2++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendCompositeTag(out1, tag1); + SsFormat.appendCompositeTag(out2, tag2); + + assertTrue( + "Tag " + tag1 + " should encode smaller than tag " + tag2, + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + } + + // ==================== Signed Integer Tests ==================== + + @Test + public void appendInt64Increasing_preservesOrdering() { + // Verify that encoded integers maintain their natural ordering + for (int i = 0; i < signedIntTestValues.size() - 1; i++) { + long v1 = signedIntTestValues.get(i); + long v2 = signedIntTestValues.get(i + 1); + + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendInt64Increasing(out1, v1); + SsFormat.appendInt64Increasing(out2, v2); + + assertTrue( + "Encoded " + v1 + " should be less than encoded " + v2, + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + + @Test + public void appendInt64Decreasing_reversesOrdering() { + // Verify that decreasing encoding reverses the ordering + for (int i = 0; i < signedIntTestValues.size() - 1; i++) { + long v1 = signedIntTestValues.get(i); + long v2 = signedIntTestValues.get(i + 1); + + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendInt64Decreasing(out1, v1); + SsFormat.appendInt64Decreasing(out2, v2); + + assertTrue( + "Decreasing encoded " + v1 + " should be greater than encoded " + v2, + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) > 0); + } + } + + @Test + public void appendInt64Increasing_hasIsKeyBitSet() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendInt64Increasing(out, 42); + byte[] result = out.toByteArray(); + + assertTrue("IS_KEY bit (0x80) should be set", (result[0] & 0x80) != 0); + } + + @Test + public void appendInt64Increasing_edgeCases() { + long[] edgeCases = {Long.MIN_VALUE, -1, 0, 1, Long.MAX_VALUE}; + + for (long value : edgeCases) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendInt64Increasing(out, value); + byte[] result = out.toByteArray(); + + assertTrue("Result should have at least 2 bytes for value " + value, result.length >= 2); + assertTrue("IS_KEY bit should be set for value " + value, (result[0] & 0x80) != 0); + } + } + + // ==================== Boolean Tests ==================== + + @Test + public void appendBoolIncreasing_preservesOrdering() { + UnsynchronizedByteArrayOutputStream outFalse = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outTrue = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBoolIncreasing(outFalse, false); + SsFormat.appendBoolIncreasing(outTrue, true); + + assertTrue( + "Encoded false should be less than encoded true", + UNSIGNED_BYTE_COMPARATOR.compare(outFalse.toByteArray(), outTrue.toByteArray()) < 0); + } + + @Test + public void appendBoolIncreasing_encodesCorrectly() { + UnsynchronizedByteArrayOutputStream outFalse = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outTrue = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBoolIncreasing(outFalse, false); + SsFormat.appendBoolIncreasing(outTrue, true); + + // false=0: header 0x80 (IS_KEY | TYPE_UINT_1), payload 0x00 + assertArrayEquals(new byte[] {(byte) 0x80, 0x00}, outFalse.toByteArray()); + // true=1: header 0x80, payload 0x02 (1 << 1) + assertArrayEquals(new byte[] {(byte) 0x80, 0x02}, outTrue.toByteArray()); + } + + @Test + public void appendBoolDecreasing_reversesOrdering() { + UnsynchronizedByteArrayOutputStream outFalse = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outTrue = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBoolDecreasing(outFalse, false); + SsFormat.appendBoolDecreasing(outTrue, true); + + assertTrue( + "Decreasing encoded false should be greater than encoded true", + UNSIGNED_BYTE_COMPARATOR.compare(outFalse.toByteArray(), outTrue.toByteArray()) > 0); + } + + @Test + public void appendBoolDecreasing_encodesCorrectly() { + UnsynchronizedByteArrayOutputStream outFalse = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outTrue = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBoolDecreasing(outFalse, false); + SsFormat.appendBoolDecreasing(outTrue, true); + + // false=0 inverted: header 0xA8 (IS_KEY | TYPE_DECREASING_UINT_1), payload 0xFE (~0 & 0x7F) << + // 1 + assertArrayEquals(new byte[] {(byte) 0xA8, (byte) 0xFE}, outFalse.toByteArray()); + // true=1 inverted: header 0xA8, payload 0xFC (~1 & 0x7F) << 1 + assertArrayEquals(new byte[] {(byte) 0xA8, (byte) 0xFC}, outTrue.toByteArray()); + } + + // ==================== String Tests ==================== + + @Test + public void appendStringIncreasing_preservesOrdering() { + String[] strings = {"", "a", "aa", "ab", "b", "hello", "world", "\u00ff"}; + Arrays.sort(strings); + + for (int i = 0; i < strings.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendStringIncreasing(out1, strings[i]); + SsFormat.appendStringIncreasing(out2, strings[i + 1]); + + assertTrue( + "Encoded '" + strings[i] + "' should be less than '" + strings[i + 1] + "'", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + + @Test + public void appendStringDecreasing_reversesOrdering() { + String[] strings = {"", "a", "b", "hello"}; + + for (int i = 0; i < strings.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendStringDecreasing(out1, strings[i]); + SsFormat.appendStringDecreasing(out2, strings[i + 1]); + + assertTrue( + "Decreasing encoded '" + strings[i] + "' should be greater than '" + strings[i + 1] + "'", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) > 0); + } + } + + @Test + public void appendStringIncreasing_escapesSpecialBytes() { + // Test that 0x00 and 0xFF bytes are properly escaped + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendBytesIncreasing(out, new byte[] {0x00, (byte) 0xFF, 0x42}); + byte[] result = out.toByteArray(); + + // Result should be longer due to escaping: + // header (1) + escaped 0x00 (2) + escaped 0xFF (2) + 0x42 (1) + terminator (2) = 8 + assertTrue("Result should include escape sequences", result.length > 5); + } + + @Test + public void appendStringIncreasing_emptyString() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendStringIncreasing(out, ""); + byte[] result = out.toByteArray(); + + // Empty string should still have header + terminator + assertTrue("Empty string encoding should have at least 3 bytes", result.length >= 3); + assertTrue("IS_KEY bit should be set", (result[0] & 0x80) != 0); + } + + // ==================== Bytes Tests ==================== + + @Test + public void appendBytesIncreasing_preservesOrdering() { + byte[][] testBytes = { + new byte[] {}, + new byte[] {0x00}, + new byte[] {0x01}, + new byte[] {0x01, 0x02}, + new byte[] {(byte) 0xFF} + }; + + for (int i = 0; i < testBytes.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBytesIncreasing(out1, testBytes[i]); + SsFormat.appendBytesIncreasing(out2, testBytes[i + 1]); + + assertTrue( + "Encoded bytes should maintain lexicographic order", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + + @Test + public void appendBytesDecreasing_reversesOrdering() { + byte[][] testBytes = { + new byte[] {}, + new byte[] {0x00}, + new byte[] {0x01}, + new byte[] {0x01, 0x02}, + new byte[] {(byte) 0xFF} + }; + + for (int i = 0; i < testBytes.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBytesDecreasing(out1, testBytes[i]); + SsFormat.appendBytesDecreasing(out2, testBytes[i + 1]); + + assertTrue( + "Decreasing encoded bytes should reverse lexicographic order", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) > 0); + } + } + + @Test + public void appendBytesDecreasing_escapesSpecialBytes() { + // Test that 0x00 and 0xFF bytes are properly escaped in decreasing mode + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendBytesDecreasing(out, new byte[] {0x00, (byte) 0xFF, 0x42}); + byte[] result = out.toByteArray(); + + // Result should be longer due to escaping + // In decreasing mode: bytes are inverted, then escaped + // Original 0x00 -> inverted to 0xFF -> needs escape (0xFF, 0x10) + // Original 0xFF -> inverted to 0x00 -> needs escape (0x00, 0xF0) + // Original 0x42 -> inverted to 0xBD -> no escape needed + assertTrue("Result should include escape sequences", result.length > 5); + } + + @Test + public void appendBytesDecreasing_emptyArray() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendBytesDecreasing(out, new byte[] {}); + byte[] result = out.toByteArray(); + + // Empty bytes should still have header + terminator + assertTrue("Empty bytes encoding should have at least 3 bytes", result.length >= 3); + assertTrue("IS_KEY bit should be set", (result[0] & 0x80) != 0); + } + + @Test + public void appendBytesIncreasing_vs_Decreasing_sameInput_differentOutput() { + byte[] input = new byte[] {0x01, 0x02, 0x03}; + + UnsynchronizedByteArrayOutputStream outInc = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outDec = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendBytesIncreasing(outInc, input); + SsFormat.appendBytesDecreasing(outDec, input); + + // The outputs should be different (different header type and inverted bytes) + assertFalse( + "Increasing and decreasing encodings should differ", + Arrays.equals(outInc.toByteArray(), outDec.toByteArray())); + } + + // ==================== Double Tests ==================== + + @Test + public void appendDoubleIncreasing_preservesOrdering() { + // Filter out NaN as it has special comparison semantics + List sortedDoubles = new ArrayList<>(); + for (double d : doubleTestValues) { + if (!Double.isNaN(d)) { + sortedDoubles.add(d); + } + } + sortedDoubles.sort(Double::compare); + + for (int i = 0; i < sortedDoubles.size() - 1; i++) { + double v1 = sortedDoubles.get(i); + double v2 = sortedDoubles.get(i + 1); + + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendDoubleIncreasing(out1, v1); + SsFormat.appendDoubleIncreasing(out2, v2); + + int cmp = UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()); + + // Note: -0.0 and 0.0 encode identically (both map to 0 internally), so allow equality + assertTrue("Encoded " + v1 + " should be <= encoded " + v2, cmp <= 0); + } + } + + @Test + public void appendDoubleDecreasing_reversesOrdering() { + double[] values = {-Double.MAX_VALUE, -1.0, 0.0, 1.0, Double.MAX_VALUE}; + + for (int i = 0; i < values.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendDoubleDecreasing(out1, values[i]); + SsFormat.appendDoubleDecreasing(out2, values[i + 1]); + + assertTrue( + "Decreasing encoded " + values[i] + " should be greater than " + values[i + 1], + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) > 0); + } + } + + @Test + public void appendDoubleIncreasing_specialValues() { + // Test special double values + // Note: -0.0 is excluded because it encodes identically to 0.0 + // (both have internal representation mapping to 0) + double[] specialValues = { + Double.NEGATIVE_INFINITY, + -Double.MAX_VALUE, + -1.0, + -Double.MIN_VALUE, + 0.0, // -0.0 encodes the same as 0.0 + Double.MIN_VALUE, + 1.0, + Double.MAX_VALUE, + Double.POSITIVE_INFINITY + }; + + // Verify ordering is preserved + for (int i = 0; i < specialValues.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendDoubleIncreasing(out1, specialValues[i]); + SsFormat.appendDoubleIncreasing(out2, specialValues[i + 1]); + + assertTrue( + "Special value " + specialValues[i] + " should encode less than " + specialValues[i + 1], + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + + @Test + public void appendDoubleIncreasing_negativeZeroEqualsPositiveZero() { + // Verify that -0.0 and 0.0 encode identically + // This is correct behavior: both map to internal representation 0 + UnsynchronizedByteArrayOutputStream outNegZero = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream outPosZero = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendDoubleIncreasing(outNegZero, -0.0); + SsFormat.appendDoubleIncreasing(outPosZero, 0.0); + + assertArrayEquals( + "-0.0 and 0.0 should encode identically", + outNegZero.toByteArray(), + outPosZero.toByteArray()); + } + + @Test + public void appendDoubleIncreasing_nan() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendDoubleIncreasing(out, Double.NaN); + byte[] result = out.toByteArray(); + + assertTrue("NaN encoding should have at least 2 bytes", result.length >= 2); + assertTrue("IS_KEY bit should be set for NaN", (result[0] & 0x80) != 0); + } + + // ==================== Null Marker Tests ==================== + + @Test + public void appendNullOrderedFirst_encoding() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendNullOrderedFirst(out); + byte[] result = out.toByteArray(); + + assertEquals("Null ordered first should encode to 2 bytes", 2, result.length); + assertTrue("IS_KEY bit should be set", (result[0] & 0x80) != 0); + } + + @Test + public void appendNullOrderedLast_encoding() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendNullOrderedLast(out); + byte[] result = out.toByteArray(); + + assertEquals("Null ordered last should encode to 2 bytes", 2, result.length); + assertTrue("IS_KEY bit should be set", (result[0] & 0x80) != 0); + } + + @Test + public void appendNotNullMarkerNullOrderedFirst_encoding() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendNotNullMarkerNullOrderedFirst(out); + byte[] result = out.toByteArray(); + + assertEquals("Not-null marker (nulls first) should encode to 1 byte", 1, result.length); + } + + @Test + public void appendNotNullMarkerNullOrderedLast_encoding() { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendNotNullMarkerNullOrderedLast(out); + byte[] result = out.toByteArray(); + + assertEquals("Not-null marker (nulls last) should encode to 1 byte", 1, result.length); + } + + @Test + public void nullOrderedFirst_sortsBeforeValues() { + UnsynchronizedByteArrayOutputStream nullOut = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream valueOut = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendNullOrderedFirst(nullOut); + SsFormat.appendNotNullMarkerNullOrderedFirst(valueOut); + SsFormat.appendInt64Increasing(valueOut, Long.MIN_VALUE); + + assertTrue( + "Null (ordered first) should sort before any value", + UNSIGNED_BYTE_COMPARATOR.compare(nullOut.toByteArray(), valueOut.toByteArray()) < 0); + } + + @Test + public void nullOrderedLast_sortsAfterValues() { + UnsynchronizedByteArrayOutputStream nullOut = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream valueOut = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendNullOrderedLast(nullOut); + SsFormat.appendNotNullMarkerNullOrderedLast(valueOut); + SsFormat.appendInt64Increasing(valueOut, Long.MAX_VALUE); + + assertTrue( + "Null (ordered last) should sort after any value", + UNSIGNED_BYTE_COMPARATOR.compare(nullOut.toByteArray(), valueOut.toByteArray()) > 0); + } + + // ==================== Timestamp Tests ==================== + + @Test + public void encodeTimestamp_length() { + byte[] result = SsFormat.encodeTimestamp(0, 0); + assertEquals("Timestamp should encode to 12 bytes", 12, result.length); + } + + @Test + public void encodeTimestamp_preservesOrdering() { + long[][] timestamps = { + {0, 0}, + {0, 1}, + {0, 999999999}, + {1, 0}, + {100, 500000000}, + {Long.MAX_VALUE / 2, 0} + }; + + for (int i = 0; i < timestamps.length - 1; i++) { + byte[] t1 = SsFormat.encodeTimestamp(timestamps[i][0], (int) timestamps[i][1]); + byte[] t2 = SsFormat.encodeTimestamp(timestamps[i + 1][0], (int) timestamps[i + 1][1]); + + assertTrue( + "Earlier timestamp should encode smaller", UNSIGNED_BYTE_COMPARATOR.compare(t1, t2) < 0); + } + } + + // ==================== UUID Tests ==================== + + @Test + public void encodeUuid_length() { + byte[] result = SsFormat.encodeUuid(0, 0); + assertEquals("UUID should encode to 16 bytes", 16, result.length); + } + + @Test + public void encodeUuid_bigEndianEncoding() { + byte[] result = SsFormat.encodeUuid(0x0102030405060708L, 0x090A0B0C0D0E0F10L); + + // Verify big-endian encoding of high bits + assertEquals(0x01, result[0] & 0xFF); + assertEquals(0x02, result[1] & 0xFF); + assertEquals(0x03, result[2] & 0xFF); + assertEquals(0x04, result[3] & 0xFF); + assertEquals(0x05, result[4] & 0xFF); + assertEquals(0x06, result[5] & 0xFF); + assertEquals(0x07, result[6] & 0xFF); + assertEquals(0x08, result[7] & 0xFF); + + // Verify big-endian encoding of low bits + assertEquals(0x09, result[8] & 0xFF); + assertEquals(0x0A, result[9] & 0xFF); + assertEquals(0x0B, result[10] & 0xFF); + assertEquals(0x0C, result[11] & 0xFF); + assertEquals(0x0D, result[12] & 0xFF); + assertEquals(0x0E, result[13] & 0xFF); + assertEquals(0x0F, result[14] & 0xFF); + assertEquals(0x10, result[15] & 0xFF); + } + + @Test + public void encodeUuid_preservesOrdering() { + // UUIDs compared as unsigned 128-bit integers should preserve order + long[][] uuids = { + {0, 0}, + {0, 1}, + {0, Long.MAX_VALUE}, + {1, 0}, + {Long.MAX_VALUE, Long.MAX_VALUE} + }; + + for (int i = 0; i < uuids.length - 1; i++) { + byte[] u1 = SsFormat.encodeUuid(uuids[i][0], uuids[i][1]); + byte[] u2 = SsFormat.encodeUuid(uuids[i + 1][0], uuids[i + 1][1]); + + assertTrue("UUID ordering should be preserved", UNSIGNED_BYTE_COMPARATOR.compare(u1, u2) < 0); + } + } + + // ==================== Composite Key Tests ==================== + + @Test + public void compositeKey_tagPlusIntPreservesOrdering() { + int tag = 5; + long[] values = {Long.MIN_VALUE, -1, 0, 1, Long.MAX_VALUE}; + + for (int i = 0; i < values.length - 1; i++) { + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendCompositeTag(out1, tag); + SsFormat.appendInt64Increasing(out1, values[i]); + + SsFormat.appendCompositeTag(out2, tag); + SsFormat.appendInt64Increasing(out2, values[i + 1]); + + assertTrue( + "Composite key with " + values[i] + " should be less than with " + values[i + 1], + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + } + + @Test + public void compositeKey_differentTagsSortByTag() { + long value = 100; + + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendCompositeTag(out1, 5); + SsFormat.appendInt64Increasing(out1, value); + + SsFormat.appendCompositeTag(out2, 10); + SsFormat.appendInt64Increasing(out2, value); + + assertTrue( + "Key with smaller tag should sort first", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + + @Test + public void compositeKey_multipleKeyParts() { + // Simulate encoding a composite key with multiple parts: tag + int + string + UnsynchronizedByteArrayOutputStream out1 = new UnsynchronizedByteArrayOutputStream(); + UnsynchronizedByteArrayOutputStream out2 = new UnsynchronizedByteArrayOutputStream(); + + SsFormat.appendCompositeTag(out1, 1); + SsFormat.appendInt64Increasing(out1, 100); + SsFormat.appendStringIncreasing(out1, "alice"); + + SsFormat.appendCompositeTag(out2, 1); + SsFormat.appendInt64Increasing(out2, 100); + SsFormat.appendStringIncreasing(out2, "bob"); + + assertTrue( + "Keys with same prefix but different strings should order by string", + UNSIGNED_BYTE_COMPARATOR.compare(out1.toByteArray(), out2.toByteArray()) < 0); + } + + // ==================== Order Preservation Summary Test ==================== + + @Test + public void orderPreservation_comprehensiveIntTest() { + // Take a sample of values to avoid O(n^2) test time + int step = Math.max(1, signedIntTestValues.size() / 100); + List sample = new ArrayList<>(); + for (int i = 0; i < signedIntTestValues.size(); i += step) { + sample.add(signedIntTestValues.get(i)); + } + + // Encode all values + List encoded = new ArrayList<>(); + for (long v : sample) { + UnsynchronizedByteArrayOutputStream out = new UnsynchronizedByteArrayOutputStream(); + SsFormat.appendInt64Increasing(out, v); + encoded.add(out.toByteArray()); + } + + // Verify the encoded values are in the same order as the original values + for (int i = 0; i < sample.size() - 1; i++) { + int comparison = UNSIGNED_BYTE_COMPARATOR.compare(encoded.get(i), encoded.get(i + 1)); + assertTrue( + "Order should be preserved: " + sample.get(i) + " < " + sample.get(i + 1), + comparison < 0); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/TargetRangeTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/TargetRangeTest.java new file mode 100644 index 000000000000..ac43da07f316 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/TargetRangeTest.java @@ -0,0 +1,286 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.spi.v1; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.protobuf.ByteString; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@link TargetRange}. */ +@RunWith(JUnit4.class) +public class TargetRangeTest { + + private static ByteString bs(String s) { + return ByteString.copyFromUtf8(s); + } + + // ==================== isPoint Tests ==================== + + @Test + public void isPoint_emptyLimit_returnsTrue() { + TargetRange range = new TargetRange(bs("a"), ByteString.EMPTY, false); + assertTrue(range.isPoint()); + } + + @Test + public void isPoint_nonEmptyLimit_returnsFalse() { + TargetRange range = new TargetRange(bs("a"), bs("b"), false); + assertFalse(range.isPoint()); + } + + // ==================== mergeFrom Start Key Tests ==================== + + @Test + public void mergeFrom_otherStartSmaller_updatesStart() { + TargetRange target = new TargetRange(bs("c"), bs("f"), false); + TargetRange other = new TargetRange(bs("a"), bs("d"), false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + } + + @Test + public void mergeFrom_otherStartEqual_keepsOriginalStart() { + TargetRange target = new TargetRange(bs("c"), bs("f"), false); + TargetRange other = new TargetRange(bs("c"), bs("e"), false); + + target.mergeFrom(other); + + assertEquals(bs("c"), target.start); + } + + @Test + public void mergeFrom_otherStartLarger_keepsOriginalStart() { + TargetRange target = new TargetRange(bs("a"), bs("f"), false); + TargetRange other = new TargetRange(bs("c"), bs("e"), false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + } + + // ==================== mergeFrom Limit Key Tests (Range into Range) ==================== + + @Test + public void mergeFrom_otherLimitLarger_updatesLimit() { + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + TargetRange other = new TargetRange(bs("b"), bs("e"), false); + + target.mergeFrom(other); + + assertEquals(bs("e"), target.limit); + } + + @Test + public void mergeFrom_otherLimitEqual_keepsOriginalLimit() { + TargetRange target = new TargetRange(bs("a"), bs("e"), false); + TargetRange other = new TargetRange(bs("b"), bs("e"), false); + + target.mergeFrom(other); + + assertEquals(bs("e"), target.limit); + } + + @Test + public void mergeFrom_otherLimitSmaller_keepsOriginalLimit() { + TargetRange target = new TargetRange(bs("a"), bs("f"), false); + TargetRange other = new TargetRange(bs("b"), bs("d"), false); + + target.mergeFrom(other); + + assertEquals(bs("f"), target.limit); + } + + // ==================== mergeFrom Point into Range Tests ==================== + + @Test + public void mergeFrom_pointBeyondLimit_extendsLimitWithPrefixSuccessor() { + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + // Point at "d" which is beyond the limit "c" + TargetRange point = new TargetRange(bs("d"), ByteString.EMPTY, false); + + target.mergeFrom(point); + + // Limit should be makePrefixSuccessor("d") + assertEquals(SsFormat.makePrefixSuccessor(bs("d")), target.limit); + } + + @Test + public void mergeFrom_pointAtLimit_extendsLimitWithPrefixSuccessor() { + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + // Point at "c" which equals the limit + TargetRange point = new TargetRange(bs("c"), ByteString.EMPTY, false); + + target.mergeFrom(point); + + // Limit should be makePrefixSuccessor("c") + assertEquals(SsFormat.makePrefixSuccessor(bs("c")), target.limit); + } + + @Test + public void mergeFrom_pointWithinRange_keepsOriginalLimit() { + TargetRange target = new TargetRange(bs("a"), bs("e"), false); + // Point at "c" which is within the range [a, e) + TargetRange point = new TargetRange(bs("c"), ByteString.EMPTY, false); + + target.mergeFrom(point); + + // Limit should remain unchanged since point is within range + assertEquals(bs("e"), target.limit); + } + + @Test + public void mergeFrom_pointBeforeStart_updatesStartKeepsLimit() { + TargetRange target = new TargetRange(bs("c"), bs("e"), false); + // Point at "a" which is before the start + TargetRange point = new TargetRange(bs("a"), ByteString.EMPTY, false); + + target.mergeFrom(point); + + assertEquals(bs("a"), target.start); + // Limit unchanged since point is before the range + assertEquals(bs("e"), target.limit); + } + + // ==================== mergeFrom Point into Point Tests ==================== + + @Test + public void mergeFrom_pointIntoPoint_smallerStart_extendsToIncludeBoth() { + TargetRange target = new TargetRange(bs("c"), ByteString.EMPTY, false); + TargetRange other = new TargetRange(bs("a"), ByteString.EMPTY, false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + // Since target was a point (limit empty), and other.start < target.limit (empty), + // limit stays empty? Let's verify the logic... + // Actually: other.isPoint() && other.start >= this.limit + // If this.limit is empty, then other.start >= empty is always true (lexicographically) + // So limit becomes makePrefixSuccessor(other.start) + assertEquals(SsFormat.makePrefixSuccessor(bs("a")), target.limit); + } + + @Test + public void mergeFrom_pointIntoPoint_largerStart() { + TargetRange target = new TargetRange(bs("a"), ByteString.EMPTY, false); + TargetRange other = new TargetRange(bs("c"), ByteString.EMPTY, false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + // other.isPoint() && other.start("c") >= target.limit(empty) is true + assertEquals(SsFormat.makePrefixSuccessor(bs("c")), target.limit); + } + + // ==================== mergeFrom Approximate Flag Tests ==================== + + @Test + public void mergeFrom_bothNotApproximate_resultNotApproximate() { + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + TargetRange other = new TargetRange(bs("b"), bs("d"), false); + + target.mergeFrom(other); + + assertFalse(target.approximate); + } + + @Test + public void mergeFrom_targetApproximate_resultApproximate() { + TargetRange target = new TargetRange(bs("a"), bs("c"), true); + TargetRange other = new TargetRange(bs("b"), bs("d"), false); + + target.mergeFrom(other); + + assertTrue(target.approximate); + } + + @Test + public void mergeFrom_otherApproximate_resultApproximate() { + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + TargetRange other = new TargetRange(bs("b"), bs("d"), true); + + target.mergeFrom(other); + + assertTrue(target.approximate); + } + + @Test + public void mergeFrom_bothApproximate_resultApproximate() { + TargetRange target = new TargetRange(bs("a"), bs("c"), true); + TargetRange other = new TargetRange(bs("b"), bs("d"), true); + + target.mergeFrom(other); + + assertTrue(target.approximate); + } + + // ==================== mergeFrom Combined Scenarios ==================== + + @Test + public void mergeFrom_disjointRanges_createsUnion() { + // [a, c) merged with [e, g) should give [a, g) + TargetRange target = new TargetRange(bs("a"), bs("c"), false); + TargetRange other = new TargetRange(bs("e"), bs("g"), false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + assertEquals(bs("g"), target.limit); + } + + @Test + public void mergeFrom_overlappingRanges_createsUnion() { + // [a, d) merged with [c, f) should give [a, f) + TargetRange target = new TargetRange(bs("a"), bs("d"), false); + TargetRange other = new TargetRange(bs("c"), bs("f"), false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + assertEquals(bs("f"), target.limit); + } + + @Test + public void mergeFrom_containedRange_keepsOuter() { + // [a, f) merged with [b, d) should give [a, f) + TargetRange target = new TargetRange(bs("a"), bs("f"), false); + TargetRange other = new TargetRange(bs("b"), bs("d"), false); + + target.mergeFrom(other); + + assertEquals(bs("a"), target.start); + assertEquals(bs("f"), target.limit); + } + + @Test + public void mergeFrom_multiplePoints_createsSpanningRange() { + // Start with point at "c", merge point at "a", then point at "e" + TargetRange target = new TargetRange(bs("c"), ByteString.EMPTY, false); + + target.mergeFrom(new TargetRange(bs("a"), ByteString.EMPTY, false)); + target.mergeFrom(new TargetRange(bs("e"), ByteString.EMPTY, false)); + + assertEquals(bs("a"), target.start); + assertEquals(SsFormat.makePrefixSuccessor(bs("e")), target.limit); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpanner.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpanner.java new file mode 100644 index 000000000000..e4c2ad5cd2d5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpanner.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockSpanner implements MockGrpcService { + private final MockSpannerImpl serviceImpl; + + public MockSpanner() { + serviceImpl = new MockSpannerImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpannerImpl.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpannerImpl.java new file mode 100644 index 000000000000..52926e09b5f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/MockSpannerImpl.java @@ -0,0 +1,416 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import com.google.api.core.BetaApi; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Empty; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SpannerGrpc.SpannerImplBase; +import com.google.spanner.v1.Transaction; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import javax.annotation.Generated; + +@BetaApi +@Generated("by gapic-generator-java") +public class MockSpannerImpl extends SpannerImplBase { + private List requests; + private Queue responses; + + public MockSpannerImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createSession( + CreateSessionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Session) { + requests.add(request); + responseObserver.onNext(((Session) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Session.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchCreateSessions( + BatchCreateSessionsRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchCreateSessionsResponse) { + requests.add(request); + responseObserver.onNext(((BatchCreateSessionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchCreateSessions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchCreateSessionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getSession(GetSessionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Session) { + requests.add(request); + responseObserver.onNext(((Session) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Session.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listSessions( + ListSessionsRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListSessionsResponse) { + requests.add(request); + responseObserver.onNext(((ListSessionsResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListSessions, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListSessionsResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteSession(DeleteSessionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteSession, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void executeSql(ExecuteSqlRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ResultSet) { + requests.add(request); + responseObserver.onNext(((ResultSet) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ExecuteSql, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ResultSet.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void executeStreamingSql( + ExecuteSqlRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof PartialResultSet) { + requests.add(request); + responseObserver.onNext(((PartialResultSet) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ExecuteStreamingSql, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + PartialResultSet.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void executeBatchDml( + ExecuteBatchDmlRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ExecuteBatchDmlResponse) { + requests.add(request); + responseObserver.onNext(((ExecuteBatchDmlResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ExecuteBatchDml, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ExecuteBatchDmlResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void read(ReadRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ResultSet) { + requests.add(request); + responseObserver.onNext(((ResultSet) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method Read, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ResultSet.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void streamingRead( + ReadRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof PartialResultSet) { + requests.add(request); + responseObserver.onNext(((PartialResultSet) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method StreamingRead, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + PartialResultSet.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void beginTransaction( + BeginTransactionRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Transaction) { + requests.add(request); + responseObserver.onNext(((Transaction) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BeginTransaction, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Transaction.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void commit(CommitRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof CommitResponse) { + requests.add(request); + responseObserver.onNext(((CommitResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method Commit, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + CommitResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void rollback(RollbackRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method Rollback, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void partitionQuery( + PartitionQueryRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof PartitionResponse) { + requests.add(request); + responseObserver.onNext(((PartitionResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method PartitionQuery, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + PartitionResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void partitionRead( + PartitionReadRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof PartitionResponse) { + requests.add(request); + responseObserver.onNext(((PartitionResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method PartitionRead, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + PartitionResponse.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void batchWrite( + BatchWriteRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BatchWriteResponse) { + requests.add(request); + responseObserver.onNext(((BatchWriteResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method BatchWrite, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BatchWriteResponse.class.getName(), + Exception.class.getName()))); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java new file mode 100644 index 000000000000..0d7107aa5d16 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java @@ -0,0 +1,1366 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.httpjson.GaxHttpJsonProperties; +import com.google.api.gax.httpjson.testing.MockHttpService; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.testing.FakeStatusCode; +import com.google.cloud.spanner.v1.stub.HttpJsonSpannerStub; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Timestamp; +import com.google.rpc.Status; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.DatabaseName; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.KeySet; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.Mutation; +import com.google.spanner.v1.Partition; +import com.google.spanner.v1.PartitionOptions; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SessionName; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import com.google.spanner.v1.Type; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class SpannerClientHttpJsonTest { + private static MockHttpService mockService; + private static SpannerClient client; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockService = + new MockHttpService( + HttpJsonSpannerStub.getMethodDescriptors(), SpannerSettings.getDefaultEndpoint()); + SpannerSettings settings = + SpannerSettings.newHttpJsonBuilder() + .setTransportChannelProvider( + SpannerSettings.defaultHttpJsonTransportProviderBuilder() + .setHttpTransport(mockService) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = SpannerClient.create(settings); + } + + @AfterClass + public static void stopServer() { + client.close(); + } + + @Before + public void setUp() {} + + @After + public void tearDown() throws Exception { + mockService.reset(); + } + + @Test + public void createSessionTest() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + Session actualResponse = client.createSession(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createSessionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.createSession(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createSessionTest2() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + + Session actualResponse = client.createSession(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createSessionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + client.createSession(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreateSessionsTest() throws Exception { + BatchCreateSessionsResponse expectedResponse = + BatchCreateSessionsResponse.newBuilder().addAllSession(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + int sessionCount = 185691686; + + BatchCreateSessionsResponse actualResponse = client.batchCreateSessions(database, sessionCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchCreateSessionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + int sessionCount = 185691686; + client.batchCreateSessions(database, sessionCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreateSessionsTest2() throws Exception { + BatchCreateSessionsResponse expectedResponse = + BatchCreateSessionsResponse.newBuilder().addAllSession(new ArrayList()).build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + int sessionCount = 185691686; + + BatchCreateSessionsResponse actualResponse = client.batchCreateSessions(database, sessionCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void batchCreateSessionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + int sessionCount = 185691686; + client.batchCreateSessions(database, sessionCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getSessionTest() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockService.addResponse(expectedResponse); + + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + + Session actualResponse = client.getSession(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getSessionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + client.getSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getSessionTest2() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-199/instances/instance-199/databases/database-199/sessions/session-199"; + + Session actualResponse = client.getSession(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getSessionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-199/instances/instance-199/databases/database-199/sessions/session-199"; + client.getSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listSessionsTest() throws Exception { + Session responsesElement = Session.newBuilder().build(); + ListSessionsResponse expectedResponse = + ListSessionsResponse.newBuilder() + .setNextPageToken("") + .addAllSessions(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListSessionsPagedResponse pagedListResponse = client.listSessions(database); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getSessionsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listSessionsExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listSessions(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listSessionsTest2() throws Exception { + Session responsesElement = Session.newBuilder().build(); + ListSessionsResponse expectedResponse = + ListSessionsResponse.newBuilder() + .setNextPageToken("") + .addAllSessions(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + + ListSessionsPagedResponse pagedListResponse = client.listSessions(database); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getSessionsList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listSessionsExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String database = "projects/project-3102/instances/instance-3102/databases/database-3102"; + client.listSessions(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteSessionTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + + client.deleteSession(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteSessionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + client.deleteSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteSessionTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-199/instances/instance-199/databases/database-199/sessions/session-199"; + + client.deleteSession(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteSessionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-199/instances/instance-199/databases/database-199/sessions/session-199"; + client.deleteSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void executeSqlTest() throws Exception { + ResultSet expectedResponse = + ResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllRows(new ArrayList()) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + ResultSet actualResponse = client.executeSql(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void executeSqlExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + client.executeSql(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void executeStreamingSqlTest() throws Exception {} + + @Test + public void executeStreamingSqlExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + } + + @Test + public void executeBatchDmlTest() throws Exception { + ExecuteBatchDmlResponse expectedResponse = + ExecuteBatchDmlResponse.newBuilder() + .addAllResultSets(new ArrayList()) + .setStatus(Status.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + ExecuteBatchDmlRequest request = + ExecuteBatchDmlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .addAllStatements(new ArrayList()) + .setSeqno(109325920) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setLastStatements(true) + .build(); + + ExecuteBatchDmlResponse actualResponse = client.executeBatchDml(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void executeBatchDmlExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ExecuteBatchDmlRequest request = + ExecuteBatchDmlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .addAllStatements(new ArrayList()) + .setSeqno(109325920) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setLastStatements(true) + .build(); + client.executeBatchDml(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readTest() throws Exception { + ResultSet expectedResponse = + ResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllRows(new ArrayList()) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + ResultSet actualResponse = client.read(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void readExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + client.read(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void streamingReadTest() throws Exception {} + + @Test + public void streamingReadExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + } + + @Test + public void beginTransactionTest() throws Exception { + Transaction expectedResponse = + Transaction.newBuilder() + .setId(ByteString.EMPTY) + .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions options = TransactionOptions.newBuilder().build(); + + Transaction actualResponse = client.beginTransaction(session, options); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void beginTransactionExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions options = TransactionOptions.newBuilder().build(); + client.beginTransaction(session, options); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void beginTransactionTest2() throws Exception { + Transaction expectedResponse = + Transaction.newBuilder() + .setId(ByteString.EMPTY) + .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + TransactionOptions options = TransactionOptions.newBuilder().build(); + + Transaction actualResponse = client.beginTransaction(session, options); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void beginTransactionExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + TransactionOptions options = TransactionOptions.newBuilder().build(); + client.beginTransaction(session, options); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, transactionId, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void commitExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + client.commit(session, transactionId, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest2() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, singleUseTransaction, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void commitExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + client.commit(session, singleUseTransaction, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest3() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, transactionId, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void commitExceptionTest3() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + client.commit(session, transactionId, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest4() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, singleUseTransaction, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void commitExceptionTest4() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + client.commit(session, singleUseTransaction, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void rollbackTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + + client.rollback(session, transactionId); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void rollbackExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + client.rollback(session, transactionId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void rollbackTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + ByteString transactionId = ByteString.EMPTY; + + client.rollback(session, transactionId); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void rollbackExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String session = + "projects/project-2078/instances/instance-2078/databases/database-2078/sessions/session-2078"; + ByteString transactionId = ByteString.EMPTY; + client.rollback(session, transactionId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void partitionQueryTest() throws Exception { + PartitionResponse expectedResponse = + PartitionResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .setTransaction(Transaction.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + PartitionQueryRequest request = + PartitionQueryRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + + PartitionResponse actualResponse = client.partitionQuery(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void partitionQueryExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + PartitionQueryRequest request = + PartitionQueryRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + client.partitionQuery(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void partitionReadTest() throws Exception { + PartitionResponse expectedResponse = + PartitionResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .setTransaction(Transaction.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + PartitionReadRequest request = + PartitionReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + + PartitionResponse actualResponse = client.partitionRead(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void partitionReadExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + PartitionReadRequest request = + PartitionReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + client.partitionRead(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchWriteTest() throws Exception {} + + @Test + public void batchWriteExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java new file mode 100644 index 000000000000..9e3b86c91cd7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java @@ -0,0 +1,1483 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.v1; + +import static com.google.cloud.spanner.v1.SpannerClient.ListSessionsPagedResponse; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.common.collect.Lists; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ByteString; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Timestamp; +import com.google.protobuf.Value; +import com.google.rpc.Status; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BatchCreateSessionsResponse; +import com.google.spanner.v1.BatchWriteRequest; +import com.google.spanner.v1.BatchWriteResponse; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CacheUpdate; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.DatabaseName; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteBatchDmlResponse; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.GetSessionRequest; +import com.google.spanner.v1.KeySet; +import com.google.spanner.v1.ListSessionsRequest; +import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; +import com.google.spanner.v1.Mutation; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.Partition; +import com.google.spanner.v1.PartitionOptions; +import com.google.spanner.v1.PartitionQueryRequest; +import com.google.spanner.v1.PartitionReadRequest; +import com.google.spanner.v1.PartitionResponse; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.RequestOptions; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.RoutingHint; +import com.google.spanner.v1.Session; +import com.google.spanner.v1.SessionName; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import com.google.spanner.v1.Type; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import javax.annotation.Generated; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@Generated("by gapic-generator-java") +public class SpannerClientTest { + private static MockServiceHelper mockServiceHelper; + private static MockSpanner mockSpanner; + private LocalChannelProvider channelProvider; + private SpannerClient client; + + @BeforeClass + public static void startStaticServer() { + mockSpanner = new MockSpanner(); + mockServiceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockSpanner)); + mockServiceHelper.start(); + } + + @AfterClass + public static void stopServer() { + mockServiceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + mockServiceHelper.reset(); + channelProvider = mockServiceHelper.createChannelProvider(); + SpannerSettings settings = + SpannerSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = SpannerClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + public void createSessionTest() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockSpanner.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + Session actualResponse = client.createSession(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateSessionRequest actualRequest = ((CreateSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.createSession(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createSessionTest2() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockSpanner.addResponse(expectedResponse); + + String database = "database1789464955"; + + Session actualResponse = client.createSession(database); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateSessionRequest actualRequest = ((CreateSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String database = "database1789464955"; + client.createSession(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreateSessionsTest() throws Exception { + BatchCreateSessionsResponse expectedResponse = + BatchCreateSessionsResponse.newBuilder().addAllSession(new ArrayList()).build(); + mockSpanner.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + int sessionCount = 185691686; + + BatchCreateSessionsResponse actualResponse = client.batchCreateSessions(database, sessionCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreateSessionsRequest actualRequest = ((BatchCreateSessionsRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertEquals(sessionCount, actualRequest.getSessionCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreateSessionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + int sessionCount = 185691686; + client.batchCreateSessions(database, sessionCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchCreateSessionsTest2() throws Exception { + BatchCreateSessionsResponse expectedResponse = + BatchCreateSessionsResponse.newBuilder().addAllSession(new ArrayList()).build(); + mockSpanner.addResponse(expectedResponse); + + String database = "database1789464955"; + int sessionCount = 185691686; + + BatchCreateSessionsResponse actualResponse = client.batchCreateSessions(database, sessionCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BatchCreateSessionsRequest actualRequest = ((BatchCreateSessionsRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(sessionCount, actualRequest.getSessionCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void batchCreateSessionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String database = "database1789464955"; + int sessionCount = 185691686; + client.batchCreateSessions(database, sessionCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getSessionTest() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockSpanner.addResponse(expectedResponse); + + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + + Session actualResponse = client.getSession(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetSessionRequest actualRequest = ((GetSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + client.getSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getSessionTest2() throws Exception { + Session expectedResponse = + Session.newBuilder() + .setName( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .putAllLabels(new HashMap()) + .setCreateTime(Timestamp.newBuilder().build()) + .setApproximateLastUseTime(Timestamp.newBuilder().build()) + .setCreatorRole("creatorRole-190742846") + .setMultiplexed(true) + .build(); + mockSpanner.addResponse(expectedResponse); + + String name = "name3373707"; + + Session actualResponse = client.getSession(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetSessionRequest actualRequest = ((GetSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String name = "name3373707"; + client.getSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listSessionsTest() throws Exception { + Session responsesElement = Session.newBuilder().build(); + ListSessionsResponse expectedResponse = + ListSessionsResponse.newBuilder() + .setNextPageToken("") + .addAllSessions(Arrays.asList(responsesElement)) + .build(); + mockSpanner.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListSessionsPagedResponse pagedListResponse = client.listSessions(database); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getSessionsList().get(0), resources.get(0)); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListSessionsRequest actualRequest = ((ListSessionsRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listSessionsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listSessions(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listSessionsTest2() throws Exception { + Session responsesElement = Session.newBuilder().build(); + ListSessionsResponse expectedResponse = + ListSessionsResponse.newBuilder() + .setNextPageToken("") + .addAllSessions(Arrays.asList(responsesElement)) + .build(); + mockSpanner.addResponse(expectedResponse); + + String database = "database1789464955"; + + ListSessionsPagedResponse pagedListResponse = client.listSessions(database); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getSessionsList().get(0), resources.get(0)); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListSessionsRequest actualRequest = ((ListSessionsRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listSessionsExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String database = "database1789464955"; + client.listSessions(database); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteSessionTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockSpanner.addResponse(expectedResponse); + + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + + client.deleteSession(name); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteSessionRequest actualRequest = ((DeleteSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName name = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + client.deleteSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteSessionTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockSpanner.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteSession(name); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteSessionRequest actualRequest = ((DeleteSessionRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteSessionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String name = "name3373707"; + client.deleteSession(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void executeSqlTest() throws Exception { + ResultSet expectedResponse = + ResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllRows(new ArrayList()) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + ResultSet actualResponse = client.executeSql(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ExecuteSqlRequest actualRequest = ((ExecuteSqlRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getSession(), actualRequest.getSession()); + Assert.assertEquals(request.getTransaction(), actualRequest.getTransaction()); + Assert.assertEquals(request.getSql(), actualRequest.getSql()); + Assert.assertEquals(request.getParams(), actualRequest.getParams()); + Assert.assertEquals(request.getParamTypesMap(), actualRequest.getParamTypesMap()); + Assert.assertEquals(request.getResumeToken(), actualRequest.getResumeToken()); + Assert.assertEquals(request.getQueryMode(), actualRequest.getQueryMode()); + Assert.assertEquals(request.getPartitionToken(), actualRequest.getPartitionToken()); + Assert.assertEquals(request.getSeqno(), actualRequest.getSeqno()); + Assert.assertEquals(request.getQueryOptions(), actualRequest.getQueryOptions()); + Assert.assertEquals(request.getRequestOptions(), actualRequest.getRequestOptions()); + Assert.assertEquals(request.getDirectedReadOptions(), actualRequest.getDirectedReadOptions()); + Assert.assertEquals(request.getDataBoostEnabled(), actualRequest.getDataBoostEnabled()); + Assert.assertEquals(request.getLastStatement(), actualRequest.getLastStatement()); + Assert.assertEquals(request.getRoutingHint(), actualRequest.getRoutingHint()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void executeSqlExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + client.executeSql(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void executeStreamingSqlTest() throws Exception { + PartialResultSet expectedResponse = + PartialResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllValues(new ArrayList()) + .setChunkedValue(true) + .setResumeToken(ByteString.EMPTY) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setLast(true) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.executeStreamingSqlCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void executeStreamingSqlExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + ExecuteSqlRequest request = + ExecuteSqlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setSeqno(109325920) + .setQueryOptions(ExecuteSqlRequest.QueryOptions.newBuilder().build()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setLastStatement(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.executeStreamingSqlCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void executeBatchDmlTest() throws Exception { + ExecuteBatchDmlResponse expectedResponse = + ExecuteBatchDmlResponse.newBuilder() + .addAllResultSets(new ArrayList()) + .setStatus(Status.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + ExecuteBatchDmlRequest request = + ExecuteBatchDmlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .addAllStatements(new ArrayList()) + .setSeqno(109325920) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setLastStatements(true) + .build(); + + ExecuteBatchDmlResponse actualResponse = client.executeBatchDml(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ExecuteBatchDmlRequest actualRequest = ((ExecuteBatchDmlRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getSession(), actualRequest.getSession()); + Assert.assertEquals(request.getTransaction(), actualRequest.getTransaction()); + Assert.assertEquals(request.getStatementsList(), actualRequest.getStatementsList()); + Assert.assertEquals(request.getSeqno(), actualRequest.getSeqno()); + Assert.assertEquals(request.getRequestOptions(), actualRequest.getRequestOptions()); + Assert.assertEquals(request.getLastStatements(), actualRequest.getLastStatements()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void executeBatchDmlExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + ExecuteBatchDmlRequest request = + ExecuteBatchDmlRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .addAllStatements(new ArrayList()) + .setSeqno(109325920) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setLastStatements(true) + .build(); + client.executeBatchDml(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void readTest() throws Exception { + ResultSet expectedResponse = + ResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllRows(new ArrayList()) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + ResultSet actualResponse = client.read(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ReadRequest actualRequest = ((ReadRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getSession(), actualRequest.getSession()); + Assert.assertEquals(request.getTransaction(), actualRequest.getTransaction()); + Assert.assertEquals(request.getTable(), actualRequest.getTable()); + Assert.assertEquals(request.getIndex(), actualRequest.getIndex()); + Assert.assertEquals(request.getColumnsList(), actualRequest.getColumnsList()); + Assert.assertEquals(request.getKeySet(), actualRequest.getKeySet()); + Assert.assertEquals(request.getLimit(), actualRequest.getLimit()); + Assert.assertEquals(request.getResumeToken(), actualRequest.getResumeToken()); + Assert.assertEquals(request.getPartitionToken(), actualRequest.getPartitionToken()); + Assert.assertEquals(request.getRequestOptions(), actualRequest.getRequestOptions()); + Assert.assertEquals(request.getDirectedReadOptions(), actualRequest.getDirectedReadOptions()); + Assert.assertEquals(request.getDataBoostEnabled(), actualRequest.getDataBoostEnabled()); + Assert.assertEquals(request.getOrderBy(), actualRequest.getOrderBy()); + Assert.assertEquals(request.getLockHint(), actualRequest.getLockHint()); + Assert.assertEquals(request.getRoutingHint(), actualRequest.getRoutingHint()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void readExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + client.read(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void streamingReadTest() throws Exception { + PartialResultSet expectedResponse = + PartialResultSet.newBuilder() + .setMetadata(ResultSetMetadata.newBuilder().build()) + .addAllValues(new ArrayList()) + .setChunkedValue(true) + .setResumeToken(ByteString.EMPTY) + .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setLast(true) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.streamingReadCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void streamingReadExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setLimit(102976443) + .setResumeToken(ByteString.EMPTY) + .setPartitionToken(ByteString.EMPTY) + .setRequestOptions(RequestOptions.newBuilder().build()) + .setDirectedReadOptions(DirectedReadOptions.newBuilder().build()) + .setDataBoostEnabled(true) + .setRoutingHint(RoutingHint.newBuilder().build()) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.streamingReadCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } + + @Test + public void beginTransactionTest() throws Exception { + Transaction expectedResponse = + Transaction.newBuilder() + .setId(ByteString.EMPTY) + .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions options = TransactionOptions.newBuilder().build(); + + Transaction actualResponse = client.beginTransaction(session, options); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BeginTransactionRequest actualRequest = ((BeginTransactionRequest) actualRequests.get(0)); + + Assert.assertEquals(session.toString(), actualRequest.getSession()); + Assert.assertEquals(options, actualRequest.getOptions()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void beginTransactionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions options = TransactionOptions.newBuilder().build(); + client.beginTransaction(session, options); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void beginTransactionTest2() throws Exception { + Transaction expectedResponse = + Transaction.newBuilder() + .setId(ByteString.EMPTY) + .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + String session = "session1984987798"; + TransactionOptions options = TransactionOptions.newBuilder().build(); + + Transaction actualResponse = client.beginTransaction(session, options); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + BeginTransactionRequest actualRequest = ((BeginTransactionRequest) actualRequests.get(0)); + + Assert.assertEquals(session, actualRequest.getSession()); + Assert.assertEquals(options, actualRequest.getOptions()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void beginTransactionExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String session = "session1984987798"; + TransactionOptions options = TransactionOptions.newBuilder().build(); + client.beginTransaction(session, options); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, transactionId, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CommitRequest actualRequest = ((CommitRequest) actualRequests.get(0)); + + Assert.assertEquals(session.toString(), actualRequest.getSession()); + Assert.assertEquals(transactionId, actualRequest.getTransactionId()); + Assert.assertEquals(mutations, actualRequest.getMutationsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void commitExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + client.commit(session, transactionId, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest2() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, singleUseTransaction, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CommitRequest actualRequest = ((CommitRequest) actualRequests.get(0)); + + Assert.assertEquals(session.toString(), actualRequest.getSession()); + Assert.assertEquals(singleUseTransaction, actualRequest.getSingleUseTransaction()); + Assert.assertEquals(mutations, actualRequest.getMutationsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void commitExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + client.commit(session, singleUseTransaction, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest3() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + String session = "session1984987798"; + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, transactionId, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CommitRequest actualRequest = ((CommitRequest) actualRequests.get(0)); + + Assert.assertEquals(session, actualRequest.getSession()); + Assert.assertEquals(transactionId, actualRequest.getTransactionId()); + Assert.assertEquals(mutations, actualRequest.getMutationsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void commitExceptionTest3() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String session = "session1984987798"; + ByteString transactionId = ByteString.EMPTY; + List mutations = new ArrayList<>(); + client.commit(session, transactionId, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void commitTest4() throws Exception { + CommitResponse expectedResponse = + CommitResponse.newBuilder() + .setCommitTimestamp(Timestamp.newBuilder().build()) + .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) + .setCacheUpdate(CacheUpdate.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + String session = "session1984987798"; + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + + CommitResponse actualResponse = client.commit(session, singleUseTransaction, mutations); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CommitRequest actualRequest = ((CommitRequest) actualRequests.get(0)); + + Assert.assertEquals(session, actualRequest.getSession()); + Assert.assertEquals(singleUseTransaction, actualRequest.getSingleUseTransaction()); + Assert.assertEquals(mutations, actualRequest.getMutationsList()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void commitExceptionTest4() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String session = "session1984987798"; + TransactionOptions singleUseTransaction = TransactionOptions.newBuilder().build(); + List mutations = new ArrayList<>(); + client.commit(session, singleUseTransaction, mutations); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void rollbackTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockSpanner.addResponse(expectedResponse); + + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + + client.rollback(session, transactionId); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RollbackRequest actualRequest = ((RollbackRequest) actualRequests.get(0)); + + Assert.assertEquals(session.toString(), actualRequest.getSession()); + Assert.assertEquals(transactionId, actualRequest.getTransactionId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void rollbackExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + SessionName session = SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]"); + ByteString transactionId = ByteString.EMPTY; + client.rollback(session, transactionId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void rollbackTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockSpanner.addResponse(expectedResponse); + + String session = "session1984987798"; + ByteString transactionId = ByteString.EMPTY; + + client.rollback(session, transactionId); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + RollbackRequest actualRequest = ((RollbackRequest) actualRequests.get(0)); + + Assert.assertEquals(session, actualRequest.getSession()); + Assert.assertEquals(transactionId, actualRequest.getTransactionId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void rollbackExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + String session = "session1984987798"; + ByteString transactionId = ByteString.EMPTY; + client.rollback(session, transactionId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void partitionQueryTest() throws Exception { + PartitionResponse expectedResponse = + PartitionResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .setTransaction(Transaction.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + PartitionQueryRequest request = + PartitionQueryRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + + PartitionResponse actualResponse = client.partitionQuery(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PartitionQueryRequest actualRequest = ((PartitionQueryRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getSession(), actualRequest.getSession()); + Assert.assertEquals(request.getTransaction(), actualRequest.getTransaction()); + Assert.assertEquals(request.getSql(), actualRequest.getSql()); + Assert.assertEquals(request.getParams(), actualRequest.getParams()); + Assert.assertEquals(request.getParamTypesMap(), actualRequest.getParamTypesMap()); + Assert.assertEquals(request.getPartitionOptions(), actualRequest.getPartitionOptions()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void partitionQueryExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + PartitionQueryRequest request = + PartitionQueryRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setSql("sql114126") + .setParams(Struct.newBuilder().build()) + .putAllParamTypes(new HashMap()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + client.partitionQuery(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void partitionReadTest() throws Exception { + PartitionResponse expectedResponse = + PartitionResponse.newBuilder() + .addAllPartitions(new ArrayList()) + .setTransaction(Transaction.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + + PartitionReadRequest request = + PartitionReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + + PartitionResponse actualResponse = client.partitionRead(request); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockSpanner.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + PartitionReadRequest actualRequest = ((PartitionReadRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getSession(), actualRequest.getSession()); + Assert.assertEquals(request.getTransaction(), actualRequest.getTransaction()); + Assert.assertEquals(request.getTable(), actualRequest.getTable()); + Assert.assertEquals(request.getIndex(), actualRequest.getIndex()); + Assert.assertEquals(request.getColumnsList(), actualRequest.getColumnsList()); + Assert.assertEquals(request.getKeySet(), actualRequest.getKeySet()); + Assert.assertEquals(request.getPartitionOptions(), actualRequest.getPartitionOptions()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void partitionReadExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + + try { + PartitionReadRequest request = + PartitionReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setKeySet(KeySet.newBuilder().build()) + .setPartitionOptions(PartitionOptions.newBuilder().build()) + .build(); + client.partitionRead(request); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void batchWriteTest() throws Exception { + BatchWriteResponse expectedResponse = + BatchWriteResponse.newBuilder() + .addAllIndexes(new ArrayList()) + .setStatus(Status.newBuilder().build()) + .setCommitTimestamp(Timestamp.newBuilder().build()) + .build(); + mockSpanner.addResponse(expectedResponse); + BatchWriteRequest request = + BatchWriteRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .addAllMutationGroups(new ArrayList()) + .setExcludeTxnFromChangeStreams(true) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.batchWriteCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + public void batchWriteExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockSpanner.addException(exception); + BatchWriteRequest request = + BatchWriteRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setRequestOptions(RequestOptions.newBuilder().build()) + .addAllMutationGroups(new ArrayList()) + .setExcludeTxnFromChangeStreams(true) + .build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = + client.batchWriteCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/proto/finder_test.proto b/java-spanner/google-cloud-spanner/src/test/proto/finder_test.proto new file mode 100644 index 000000000000..3c3f1d8d299e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/proto/finder_test.proto @@ -0,0 +1,56 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package spanner.cloud.location; + +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/spanner.proto"; + +option java_multiple_files = true; + +message FinderTestCase { + string name = 1; + message Event { + // Name for the event, for diagnostic purposes. + string name = 1; + + // A cache update that should be applied to the `Finder` before calling + // `FindServer`. + google.spanner.v1.CacheUpdate cache_update = 2; + + // During `FindServer`, servers in the `unhealthy_servers` list should + // report false to `Server::IsHealthy`. + repeated string unhealthy_servers = 3; + + // The argument to pass to `FindServer` + oneof request { + google.spanner.v1.ReadRequest read = 4; + google.spanner.v1.ExecuteSqlRequest sql = 5; + } + + // The server that `FindServer` should return. If empty, `FindServer` + // should return null. + string server = 6; + + // The routing hint that should be filled in by `FindServer`. + google.spanner.v1.RoutingHint hint = 7; + } + repeated Event event = 2; +} + +message FinderTestCases { + repeated FinderTestCase test_case = 2; +} diff --git a/java-spanner/google-cloud-spanner/src/test/proto/range_cache_test.proto b/java-spanner/google-cloud-spanner/src/test/proto/range_cache_test.proto new file mode 100644 index 000000000000..a5accaf557fb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/proto/range_cache_test.proto @@ -0,0 +1,75 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package spanner.cloud.location; + +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/spanner.proto"; + +option java_multiple_files = true; + +message RangeCacheTestCases { + repeated RangeCacheTestCase test_case = 1; +} + +message RangeCacheTestCase { + // Name of the test case, for diagnostic purposes. + string name = 1; + + // A single step in the test case. Each test starts with a newly constructed + // empty RangeCache, and runs one step at a time. + message Step { + // If present, the cache is updated with this CacheUpdate before running + // the tests below. + google.spanner.v1.CacheUpdate update = 1; + + // The tests then run one at a time. + message Test { + // If true, the test will be run with prefer_leader=true. Otherwise, + // prefer_leader will be false. + bool leader = 1; + + // If non-empty, the test will be run with this directed read options. + google.spanner.v1.DirectedReadOptions directed_read_options = 2; + + // key and limit_key are both optional, and if present, are copied into + // the routing hint passed to FillRoutingHint. + bytes key = 3; + bytes limit_key = 4; + + // The mode for RangeCache::RangeMode. + enum RangeMode { + COVERING_SPLIT = 0; + PICK_RANDOM = 1; + } + RangeMode range_mode = 5; + + // If set, overrides the default value of the + // --spanner_cloud_location_range_cache_min_entries_for_random_pick flag. + int32 min_cache_entries_for_random_pick = 6; + + // `result` should exactly match the routing hint after FillRoutingHint + // is called. + + google.spanner.v1.RoutingHint result = 7; + // If non-empty, then FillRoutingHint should return a server with this + // address. If empty, then FillRoutingHint should return nullptr. + string server = 8; + } + repeated Test test = 2; + } + repeated Step step = 2; +} diff --git a/java-spanner/google-cloud-spanner/src/test/proto/recipe_test.proto b/java-spanner/google-cloud-spanner/src/test/proto/recipe_test.proto new file mode 100644 index 000000000000..cb6f055eeb43 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/proto/recipe_test.proto @@ -0,0 +1,54 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package spanner.cloud.location; + +import "google/protobuf/struct.proto"; +import "google/spanner/v1/keys.proto"; +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/mutation.proto"; + +option java_multiple_files = true; + +// Proto definition for the textproto of tests. +message RecipeTestCases { + repeated RecipeTestCase test_case = 1; +} + +message RecipeTestCase { + // Name of the test case, for diagnostic purposes. + string name = 1; + + // A list of recipes to be used to evaluate the tests below. + google.spanner.v1.RecipeList recipes = 2; + + message Test { + // Each test encodes a single operation. + oneof operation { + google.protobuf.ListValue key = 1; + google.spanner.v1.KeyRange key_range = 2; + google.spanner.v1.KeySet key_set = 3; + google.spanner.v1.Mutation mutation = 4; + google.protobuf.Struct query_params = 5; + } + + // `start`, `limit`, and `approximate` are the expected results of encoding. + bytes start = 6; + bytes limit = 7; + bool approximate = 8; + } + repeated Test test = 3; +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner/native-image.properties b/java-spanner/google-cloud-spanner/src/test/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner/native-image.properties new file mode 100644 index 000000000000..383f5390d637 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner/native-image.properties @@ -0,0 +1,3 @@ +Args=--initialize-at-build-time=org.junit.runner.RunWith \ + --initialize-at-build-time=org.junit.experimental.categories.Category \ + --initialize-at-build-time=org.junit.runners.model.FrameworkField diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md new file mode 100644 index 000000000000..967cb32a2989 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md @@ -0,0 +1,5 @@ +#### To generate SingerProto.java and descriptors.pb file from singer.proto using `protoc` +```shell +cd google-cloud-spanner/src/test/resources/com/google/cloud/spanner +protoc --proto_path=. --include_imports --descriptor_set_out=descriptors.pb --java_out=. singer.proto +``` diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql new file mode 100644 index 000000000000..e1122271907b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql @@ -0,0 +1,35397 @@ +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; + show variable autocommit; +NEW_CONNECTION; + show variable autocommit; +NEW_CONNECTION; + + + +show variable autocommit; +NEW_CONNECTION; +show variable autocommit ; +NEW_CONNECTION; +show variable autocommit ; +NEW_CONNECTION; +show variable autocommit + +; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +show +variable +autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-autocommit; +NEW_CONNECTION; +show variable readonly; +NEW_CONNECTION; +SHOW VARIABLE READONLY; +NEW_CONNECTION; +show variable readonly; +NEW_CONNECTION; + show variable readonly; +NEW_CONNECTION; + show variable readonly; +NEW_CONNECTION; + + + +show variable readonly; +NEW_CONNECTION; +show variable readonly ; +NEW_CONNECTION; +show variable readonly ; +NEW_CONNECTION; +show variable readonly + +; +NEW_CONNECTION; +show variable readonly; +NEW_CONNECTION; +show variable readonly; +NEW_CONNECTION; +show +variable +readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable readonly/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-readonly; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +SHOW VARIABLE RETRY_ABORTS_INTERNALLY; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; + show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; + show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; + + + +show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally ; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally ; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally + +; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +show +variable +retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally bar; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally%; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally_; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally&; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally$; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally@; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally!; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally*; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally(; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally); +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally-; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally+; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally-#; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally/; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally\; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally?; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally-/; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally/#; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable retry_aborts_internally; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable retry_aborts_internally/-; +NEW_CONNECTION; +set readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-retry_aborts_internally; +NEW_CONNECTION; +show variable autocommit_dml_mode; +NEW_CONNECTION; +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +show variable autocommit_dml_mode; +NEW_CONNECTION; + show variable autocommit_dml_mode; +NEW_CONNECTION; + show variable autocommit_dml_mode; +NEW_CONNECTION; + + + +show variable autocommit_dml_mode; +NEW_CONNECTION; +show variable autocommit_dml_mode ; +NEW_CONNECTION; +show variable autocommit_dml_mode ; +NEW_CONNECTION; +show variable autocommit_dml_mode + +; +NEW_CONNECTION; +show variable autocommit_dml_mode; +NEW_CONNECTION; +show variable autocommit_dml_mode; +NEW_CONNECTION; +show +variable +autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_dml_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-autocommit_dml_mode; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; + show variable statement_timeout; +NEW_CONNECTION; + show variable statement_timeout; +NEW_CONNECTION; + + + +show variable statement_timeout; +NEW_CONNECTION; +show variable statement_timeout ; +NEW_CONNECTION; +show variable statement_timeout ; +NEW_CONNECTION; +show variable statement_timeout + +; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +show +variable +statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-statement_timeout; +NEW_CONNECTION; +show variable transaction_timeout; +NEW_CONNECTION; +SHOW VARIABLE TRANSACTION_TIMEOUT; +NEW_CONNECTION; +show variable transaction_timeout; +NEW_CONNECTION; + show variable transaction_timeout; +NEW_CONNECTION; + show variable transaction_timeout; +NEW_CONNECTION; + + + +show variable transaction_timeout; +NEW_CONNECTION; +show variable transaction_timeout ; +NEW_CONNECTION; +show variable transaction_timeout ; +NEW_CONNECTION; +show variable transaction_timeout + +; +NEW_CONNECTION; +show variable transaction_timeout; +NEW_CONNECTION; +show variable transaction_timeout; +NEW_CONNECTION; +show +variable +transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-transaction_timeout; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; + show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; + show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; + + + +show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp ; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp ; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp + +; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +show +variable +read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp bar; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp%; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp_; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp&; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp$; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp@; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp!; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp*; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp(; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp); +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp-; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp+; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp-#; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp/; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp\; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp?; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp-/; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp/#; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable read_timestamp; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_timestamp/-; +NEW_CONNECTION; +set readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-read_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + + + +show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp + +; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show +variable +commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_timestamp/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-commit_timestamp; +NEW_CONNECTION; +show variable read_only_staleness; +NEW_CONNECTION; +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +show variable read_only_staleness; +NEW_CONNECTION; + show variable read_only_staleness; +NEW_CONNECTION; + show variable read_only_staleness; +NEW_CONNECTION; + + + +show variable read_only_staleness; +NEW_CONNECTION; +show variable read_only_staleness ; +NEW_CONNECTION; +show variable read_only_staleness ; +NEW_CONNECTION; +show variable read_only_staleness + +; +NEW_CONNECTION; +show variable read_only_staleness; +NEW_CONNECTION; +show variable read_only_staleness; +NEW_CONNECTION; +show +variable +read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_only_staleness/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-read_only_staleness; +NEW_CONNECTION; +show variable directed_read; +NEW_CONNECTION; +SHOW VARIABLE DIRECTED_READ; +NEW_CONNECTION; +show variable directed_read; +NEW_CONNECTION; + show variable directed_read; +NEW_CONNECTION; + show variable directed_read; +NEW_CONNECTION; + + + +show variable directed_read; +NEW_CONNECTION; +show variable directed_read ; +NEW_CONNECTION; +show variable directed_read ; +NEW_CONNECTION; +show variable directed_read + +; +NEW_CONNECTION; +show variable directed_read; +NEW_CONNECTION; +show variable directed_read; +NEW_CONNECTION; +show +variable +directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable directed_read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-directed_read; +NEW_CONNECTION; +show variable optimizer_version; +NEW_CONNECTION; +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +show variable optimizer_version; +NEW_CONNECTION; + show variable optimizer_version; +NEW_CONNECTION; + show variable optimizer_version; +NEW_CONNECTION; + + + +show variable optimizer_version; +NEW_CONNECTION; +show variable optimizer_version ; +NEW_CONNECTION; +show variable optimizer_version ; +NEW_CONNECTION; +show variable optimizer_version + +; +NEW_CONNECTION; +show variable optimizer_version; +NEW_CONNECTION; +show variable optimizer_version; +NEW_CONNECTION; +show +variable +optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_version/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-optimizer_version; +NEW_CONNECTION; +show variable optimizer_statistics_package; +NEW_CONNECTION; +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +show variable optimizer_statistics_package; +NEW_CONNECTION; + show variable optimizer_statistics_package; +NEW_CONNECTION; + show variable optimizer_statistics_package; +NEW_CONNECTION; + + + +show variable optimizer_statistics_package; +NEW_CONNECTION; +show variable optimizer_statistics_package ; +NEW_CONNECTION; +show variable optimizer_statistics_package ; +NEW_CONNECTION; +show variable optimizer_statistics_package + +; +NEW_CONNECTION; +show variable optimizer_statistics_package; +NEW_CONNECTION; +show variable optimizer_statistics_package; +NEW_CONNECTION; +show +variable +optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable optimizer_statistics_package/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-optimizer_statistics_package; +NEW_CONNECTION; +show variable return_commit_stats; +NEW_CONNECTION; +SHOW VARIABLE RETURN_COMMIT_STATS; +NEW_CONNECTION; +show variable return_commit_stats; +NEW_CONNECTION; + show variable return_commit_stats; +NEW_CONNECTION; + show variable return_commit_stats; +NEW_CONNECTION; + + + +show variable return_commit_stats; +NEW_CONNECTION; +show variable return_commit_stats ; +NEW_CONNECTION; +show variable return_commit_stats ; +NEW_CONNECTION; +show variable return_commit_stats + +; +NEW_CONNECTION; +show variable return_commit_stats; +NEW_CONNECTION; +show variable return_commit_stats; +NEW_CONNECTION; +show +variable +return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable return_commit_stats/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-return_commit_stats; +NEW_CONNECTION; +show variable max_commit_delay; +NEW_CONNECTION; +SHOW VARIABLE MAX_COMMIT_DELAY; +NEW_CONNECTION; +show variable max_commit_delay; +NEW_CONNECTION; + show variable max_commit_delay; +NEW_CONNECTION; + show variable max_commit_delay; +NEW_CONNECTION; + + + +show variable max_commit_delay; +NEW_CONNECTION; +show variable max_commit_delay ; +NEW_CONNECTION; +show variable max_commit_delay ; +NEW_CONNECTION; +show variable max_commit_delay + +; +NEW_CONNECTION; +show variable max_commit_delay; +NEW_CONNECTION; +show variable max_commit_delay; +NEW_CONNECTION; +show +variable +max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_commit_delay/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-max_commit_delay; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; + show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; + show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; + + + +show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response + +; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +show +variable +commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable commit_response/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-commit_response; +NEW_CONNECTION; +show variable statement_tag; +NEW_CONNECTION; +SHOW VARIABLE STATEMENT_TAG; +NEW_CONNECTION; +show variable statement_tag; +NEW_CONNECTION; + show variable statement_tag; +NEW_CONNECTION; + show variable statement_tag; +NEW_CONNECTION; + + + +show variable statement_tag; +NEW_CONNECTION; +show variable statement_tag ; +NEW_CONNECTION; +show variable statement_tag ; +NEW_CONNECTION; +show variable statement_tag + +; +NEW_CONNECTION; +show variable statement_tag; +NEW_CONNECTION; +show variable statement_tag; +NEW_CONNECTION; +show +variable +statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-statement_tag; +NEW_CONNECTION; +show variable transaction_tag; +NEW_CONNECTION; +SHOW VARIABLE TRANSACTION_TAG; +NEW_CONNECTION; +show variable transaction_tag; +NEW_CONNECTION; + show variable transaction_tag; +NEW_CONNECTION; + show variable transaction_tag; +NEW_CONNECTION; + + + +show variable transaction_tag; +NEW_CONNECTION; +show variable transaction_tag ; +NEW_CONNECTION; +show variable transaction_tag ; +NEW_CONNECTION; +show variable transaction_tag + +; +NEW_CONNECTION; +show variable transaction_tag; +NEW_CONNECTION; +show variable transaction_tag; +NEW_CONNECTION; +show +variable +transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-transaction_tag; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW VARIABLE EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show +variable +exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable rpc_priority; +NEW_CONNECTION; +SHOW VARIABLE RPC_PRIORITY; +NEW_CONNECTION; +show variable rpc_priority; +NEW_CONNECTION; + show variable rpc_priority; +NEW_CONNECTION; + show variable rpc_priority; +NEW_CONNECTION; + + + +show variable rpc_priority; +NEW_CONNECTION; +show variable rpc_priority ; +NEW_CONNECTION; +show variable rpc_priority ; +NEW_CONNECTION; +show variable rpc_priority + +; +NEW_CONNECTION; +show variable rpc_priority; +NEW_CONNECTION; +show variable rpc_priority; +NEW_CONNECTION; +show +variable +rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable rpc_priority/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-rpc_priority; +NEW_CONNECTION; +show variable savepoint_support; +NEW_CONNECTION; +SHOW VARIABLE SAVEPOINT_SUPPORT; +NEW_CONNECTION; +show variable savepoint_support; +NEW_CONNECTION; + show variable savepoint_support; +NEW_CONNECTION; + show variable savepoint_support; +NEW_CONNECTION; + + + +show variable savepoint_support; +NEW_CONNECTION; +show variable savepoint_support ; +NEW_CONNECTION; +show variable savepoint_support ; +NEW_CONNECTION; +show variable savepoint_support + +; +NEW_CONNECTION; +show variable savepoint_support; +NEW_CONNECTION; +show variable savepoint_support; +NEW_CONNECTION; +show +variable +savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable savepoint_support/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-savepoint_support; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +SHOW VARIABLE DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; + show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; + show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; + + + +show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write + +; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +show +variable +delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable delay_transaction_start_until_first_write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable keep_transaction_alive; +NEW_CONNECTION; +SHOW VARIABLE KEEP_TRANSACTION_ALIVE; +NEW_CONNECTION; +show variable keep_transaction_alive; +NEW_CONNECTION; + show variable keep_transaction_alive; +NEW_CONNECTION; + show variable keep_transaction_alive; +NEW_CONNECTION; + + + +show variable keep_transaction_alive; +NEW_CONNECTION; +show variable keep_transaction_alive ; +NEW_CONNECTION; +show variable keep_transaction_alive ; +NEW_CONNECTION; +show variable keep_transaction_alive + +; +NEW_CONNECTION; +show variable keep_transaction_alive; +NEW_CONNECTION; +show variable keep_transaction_alive; +NEW_CONNECTION; +show +variable +keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-keep_transaction_alive; +NEW_CONNECTION; +show variable auto_batch_dml; +NEW_CONNECTION; +SHOW VARIABLE AUTO_BATCH_DML; +NEW_CONNECTION; +show variable auto_batch_dml; +NEW_CONNECTION; + show variable auto_batch_dml; +NEW_CONNECTION; + show variable auto_batch_dml; +NEW_CONNECTION; + + + +show variable auto_batch_dml; +NEW_CONNECTION; +show variable auto_batch_dml ; +NEW_CONNECTION; +show variable auto_batch_dml ; +NEW_CONNECTION; +show variable auto_batch_dml + +; +NEW_CONNECTION; +show variable auto_batch_dml; +NEW_CONNECTION; +show variable auto_batch_dml; +NEW_CONNECTION; +show +variable +auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-auto_batch_dml; +NEW_CONNECTION; +show variable auto_batch_dml_update_count; +NEW_CONNECTION; +SHOW VARIABLE AUTO_BATCH_DML_UPDATE_COUNT; +NEW_CONNECTION; +show variable auto_batch_dml_update_count; +NEW_CONNECTION; + show variable auto_batch_dml_update_count; +NEW_CONNECTION; + show variable auto_batch_dml_update_count; +NEW_CONNECTION; + + + +show variable auto_batch_dml_update_count; +NEW_CONNECTION; +show variable auto_batch_dml_update_count ; +NEW_CONNECTION; +show variable auto_batch_dml_update_count ; +NEW_CONNECTION; +show variable auto_batch_dml_update_count + +; +NEW_CONNECTION; +show variable auto_batch_dml_update_count; +NEW_CONNECTION; +show variable auto_batch_dml_update_count; +NEW_CONNECTION; +show +variable +auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-auto_batch_dml_update_count; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +SHOW VARIABLE AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; + + + +show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification + +; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show +variable +auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_batch_dml_update_count_verification/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-auto_batch_dml_update_count_verification; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; +PARTITION SELECT COL1, COL2 FROM MY_TABLE; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; + partition select col1, col2 from my_table; +NEW_CONNECTION; + partition select col1, col2 from my_table; +NEW_CONNECTION; + + + +partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table ; +NEW_CONNECTION; +partition select col1, col2 from my_table ; +NEW_CONNECTION; +partition select col1, col2 from my_table + +; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; +partition +select +col1, +col2 +from +my_table; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/-; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +RUN PARTITIONED QUERY SELECT COL1, COL2 FROM MY_TABLE; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + + + +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table ; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table ; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table + +; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run +partitioned +query +select +col1, +col2 +from +my_table; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table/-; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +BEGIN; +NEW_CONNECTION; +begin; +NEW_CONNECTION; + begin; +NEW_CONNECTION; + begin; +NEW_CONNECTION; + + + +begin; +NEW_CONNECTION; +begin ; +NEW_CONNECTION; +begin ; +NEW_CONNECTION; +begin + +; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-; +NEW_CONNECTION; +start; +NEW_CONNECTION; +START; +NEW_CONNECTION; +start; +NEW_CONNECTION; + start; +NEW_CONNECTION; + start; +NEW_CONNECTION; + + + +start; +NEW_CONNECTION; +start ; +NEW_CONNECTION; +start ; +NEW_CONNECTION; +start + +; +NEW_CONNECTION; +start; +NEW_CONNECTION; +start; +NEW_CONNECTION; +start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +BEGIN TRANSACTION; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; + begin transaction; +NEW_CONNECTION; + begin transaction; +NEW_CONNECTION; + + + +begin transaction; +NEW_CONNECTION; +begin transaction ; +NEW_CONNECTION; +begin transaction ; +NEW_CONNECTION; +begin transaction + +; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +begin +transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin)transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-transaction; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +START TRANSACTION; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; + start transaction; +NEW_CONNECTION; + start transaction; +NEW_CONNECTION; + + + +start transaction; +NEW_CONNECTION; +start transaction ; +NEW_CONNECTION; +start transaction ; +NEW_CONNECTION; +start transaction + +; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +start +transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start)transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-transaction; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; + begin isolation level repeatable read; +NEW_CONNECTION; + begin isolation level repeatable read; +NEW_CONNECTION; + + + +begin isolation level repeatable read; +NEW_CONNECTION; +begin isolation level repeatable read ; +NEW_CONNECTION; +begin isolation level repeatable read ; +NEW_CONNECTION; +begin isolation level repeatable read + +; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +begin +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/-read; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; + begin transaction isolation level repeatable read; +NEW_CONNECTION; + begin transaction isolation level repeatable read; +NEW_CONNECTION; + + + +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin transaction isolation level repeatable read ; +NEW_CONNECTION; +begin transaction isolation level repeatable read ; +NEW_CONNECTION; +begin transaction isolation level repeatable read + +; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/-read; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; + begin isolation level serializable; +NEW_CONNECTION; + begin isolation level serializable; +NEW_CONNECTION; + + + +begin isolation level serializable; +NEW_CONNECTION; +begin isolation level serializable ; +NEW_CONNECTION; +begin isolation level serializable ; +NEW_CONNECTION; +begin isolation level serializable + +; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +begin +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/-serializable; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; + begin transaction isolation level serializable; +NEW_CONNECTION; + begin transaction isolation level serializable; +NEW_CONNECTION; + + + +begin transaction isolation level serializable; +NEW_CONNECTION; +begin transaction isolation level serializable ; +NEW_CONNECTION; +begin transaction isolation level serializable ; +NEW_CONNECTION; +begin transaction isolation level serializable + +; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/-serializable; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +START ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; + start isolation level repeatable read; +NEW_CONNECTION; + start isolation level repeatable read; +NEW_CONNECTION; + + + +start isolation level repeatable read; +NEW_CONNECTION; +start isolation level repeatable read ; +NEW_CONNECTION; +start isolation level repeatable read ; +NEW_CONNECTION; +start isolation level repeatable read + +; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +start +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/-read; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; + start transaction isolation level repeatable read; +NEW_CONNECTION; + start transaction isolation level repeatable read; +NEW_CONNECTION; + + + +start transaction isolation level repeatable read; +NEW_CONNECTION; +start transaction isolation level repeatable read ; +NEW_CONNECTION; +start transaction isolation level repeatable read ; +NEW_CONNECTION; +start transaction isolation level repeatable read + +; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +start +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/-read; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + + + +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable + +; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/-serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + + + +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable + +; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/-serializable; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +COMMIT; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; + commit; +NEW_CONNECTION; +begin transaction; + commit; +NEW_CONNECTION; +begin transaction; + + + +commit; +NEW_CONNECTION; +begin transaction; +commit ; +NEW_CONNECTION; +begin transaction; +commit ; +NEW_CONNECTION; +begin transaction; +commit + +; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +COMMIT TRANSACTION; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; + commit transaction; +NEW_CONNECTION; +begin transaction; + commit transaction; +NEW_CONNECTION; +begin transaction; + + + +commit transaction; +NEW_CONNECTION; +begin transaction; +commit transaction ; +NEW_CONNECTION; +begin transaction; +commit transaction ; +NEW_CONNECTION; +begin transaction; +commit transaction + +; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +commit +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-transaction; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +ROLLBACK; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; + rollback; +NEW_CONNECTION; +begin transaction; + rollback; +NEW_CONNECTION; +begin transaction; + + + +rollback; +NEW_CONNECTION; +begin transaction; +rollback ; +NEW_CONNECTION; +begin transaction; +rollback ; +NEW_CONNECTION; +begin transaction; +rollback + +; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +ROLLBACK TRANSACTION; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; + rollback transaction; +NEW_CONNECTION; +begin transaction; + rollback transaction; +NEW_CONNECTION; +begin transaction; + + + +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback transaction ; +NEW_CONNECTION; +begin transaction; +rollback transaction ; +NEW_CONNECTION; +begin transaction; +rollback transaction + +; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-transaction; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +START BATCH DDL; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; + start batch ddl; +NEW_CONNECTION; + start batch ddl; +NEW_CONNECTION; + + + +start batch ddl; +NEW_CONNECTION; +start batch ddl ; +NEW_CONNECTION; +start batch ddl ; +NEW_CONNECTION; +start batch ddl + +; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +start +batch +ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch%ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch_ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch&ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch$ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch@ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch!ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch*ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch(ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch)ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch+ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-#ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch\ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch?ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-/ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/#ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/-ddl; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +START BATCH DML; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; + start batch dml; +NEW_CONNECTION; + start batch dml; +NEW_CONNECTION; + + + +start batch dml; +NEW_CONNECTION; +start batch dml ; +NEW_CONNECTION; +start batch dml ; +NEW_CONNECTION; +start batch dml + +; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +start +batch +dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch%dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch&dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch$dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch@dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch!dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch*dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch(dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch)dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch+dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-#dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch\dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch?dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-/dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/#dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/-dml; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +RUN BATCH; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; + run batch; +NEW_CONNECTION; +start batch ddl; + run batch; +NEW_CONNECTION; +start batch ddl; + + + +run batch; +NEW_CONNECTION; +start batch ddl; +run batch ; +NEW_CONNECTION; +start batch ddl; +run batch ; +NEW_CONNECTION; +start batch ddl; +run batch + +; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +run +batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch bar; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +%run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch%; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run%batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +_run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch_; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run_batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +&run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch&; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run&batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +$run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch$; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run$batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +@run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch@; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run@batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +!run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch!; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run!batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +*run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch*; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run*batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +(run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch(; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run(batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +)run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch); +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run)batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT ++run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch+; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run+batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +\run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch\; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run\batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +?run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch?; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run?batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/-batch; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +ABORT BATCH; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; + abort batch; +NEW_CONNECTION; +start batch ddl; + abort batch; +NEW_CONNECTION; +start batch ddl; + + + +abort batch; +NEW_CONNECTION; +start batch ddl; +abort batch ; +NEW_CONNECTION; +start batch ddl; +abort batch ; +NEW_CONNECTION; +start batch ddl; +abort batch + +; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +abort +batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch bar; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch%; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch_; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch&; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch$; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch@; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch!; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch*; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch(; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch); +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort)batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch+; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch\; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch?; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-batch; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +RESET ALL; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + + + +reset all; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all + +; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset +all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset%all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset_all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset&all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset$all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset@all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset!all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset*all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset(all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset)all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset+all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset\all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset?all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/-all; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +SET AUTOCOMMIT = TRUE; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; + set autocommit = true; +NEW_CONNECTION; + set autocommit = true; +NEW_CONNECTION; + + + +set autocommit = true; +NEW_CONNECTION; +set autocommit = true ; +NEW_CONNECTION; +set autocommit = true ; +NEW_CONNECTION; +set autocommit = true + +; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +set +autocommit += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/-true; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +SET AUTOCOMMIT = FALSE; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + + + +set autocommit = false; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false + +; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set +autocommit += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/-false; +NEW_CONNECTION; +set readonly = true; +NEW_CONNECTION; +SET READONLY = TRUE; +NEW_CONNECTION; +set readonly = true; +NEW_CONNECTION; + set readonly = true; +NEW_CONNECTION; + set readonly = true; +NEW_CONNECTION; + + + +set readonly = true; +NEW_CONNECTION; +set readonly = true ; +NEW_CONNECTION; +set readonly = true ; +NEW_CONNECTION; +set readonly = true + +; +NEW_CONNECTION; +set readonly = true; +NEW_CONNECTION; +set readonly = true; +NEW_CONNECTION; +set +readonly += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/-true; +NEW_CONNECTION; +set readonly = false; +NEW_CONNECTION; +SET READONLY = FALSE; +NEW_CONNECTION; +set readonly = false; +NEW_CONNECTION; + set readonly = false; +NEW_CONNECTION; + set readonly = false; +NEW_CONNECTION; + + + +set readonly = false; +NEW_CONNECTION; +set readonly = false ; +NEW_CONNECTION; +set readonly = false ; +NEW_CONNECTION; +set readonly = false + +; +NEW_CONNECTION; +set readonly = false; +NEW_CONNECTION; +set readonly = false; +NEW_CONNECTION; +set +readonly += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set readonly =/-false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET RETRY_ABORTS_INTERNALLY = TRUE; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +retry_aborts_internally += +true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =%true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =_true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =&true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =$true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =@true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =!true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =*true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =(true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =)true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =+true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-#true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =\true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =?true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-/true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/#true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = true/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/-true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET RETRY_ABORTS_INTERNALLY = FALSE; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +retry_aborts_internally += +false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =%false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =_false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =&false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =$false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =@false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =!false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =*false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =(false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =)false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =+false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-#false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =\false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =?false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =-/false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/#false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally = false/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set retry_aborts_internally =/-false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET LOCAL RETRY_ABORTS_INTERNALLY = TRUE; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +local +retry_aborts_internally += +true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =%true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =_true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =&true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =$true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =@true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =!true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =*true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =(true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =)true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =+true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-#true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =\true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =?true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-/true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/#true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local retry_aborts_internally = true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = true/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/-true; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET LOCAL RETRY_ABORTS_INTERNALLY = FALSE; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +local +retry_aborts_internally += +false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =%false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =_false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =&false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =$false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =@false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =!false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =*false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =(false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =)false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =+false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-#false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =\false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =?false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =-/false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/#false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local retry_aborts_internally = false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally = false/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local retry_aborts_internally =/-false; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='partitioned_non_atomic'; +NEW_CONNECTION; + set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +NEW_CONNECTION; +set autocommit_dml_mode='transactional'; +NEW_CONNECTION; + set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; + set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; + + + +set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL' ; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL' ; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL' + +; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set +autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='transactional_with_fallback_to_partitioned_non_atomic'; +NEW_CONNECTION; + set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set statement_timeout=null; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT=NULL; +NEW_CONNECTION; +set statement_timeout=null; +NEW_CONNECTION; + set statement_timeout=null; +NEW_CONNECTION; + set statement_timeout=null; +NEW_CONNECTION; + + + +set statement_timeout=null; +NEW_CONNECTION; +set statement_timeout=null ; +NEW_CONNECTION; +set statement_timeout=null ; +NEW_CONNECTION; +set statement_timeout=null + +; +NEW_CONNECTION; +set statement_timeout=null; +NEW_CONNECTION; +set statement_timeout=null; +NEW_CONNECTION; +set +statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=null/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout=null; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = NULL ; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; + set statement_timeout = null ; +NEW_CONNECTION; + set statement_timeout = null ; +NEW_CONNECTION; + + + +set statement_timeout = null ; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; +set statement_timeout = null + +; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; +set statement_timeout = null ; +NEW_CONNECTION; +set +statement_timeout += +null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = null/-; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='1S'; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; + set statement_timeout='1s'; +NEW_CONNECTION; + set statement_timeout='1s'; +NEW_CONNECTION; + + + +set statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout='1s' ; +NEW_CONNECTION; +set statement_timeout='1s' ; +NEW_CONNECTION; +set statement_timeout='1s' + +; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +set +statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = '1S' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + + + +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' + +; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set +statement_timeout += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/-; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT=100; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; + set statement_timeout=100; +NEW_CONNECTION; + set statement_timeout=100; +NEW_CONNECTION; + + + +set statement_timeout=100; +NEW_CONNECTION; +set statement_timeout=100 ; +NEW_CONNECTION; +set statement_timeout=100 ; +NEW_CONNECTION; +set statement_timeout=100 + +; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +set +statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout=100; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + + + +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 + +; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set +statement_timeout += +100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/-; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='100MS'; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; + set statement_timeout='100ms'; +NEW_CONNECTION; + set statement_timeout='100ms'; +NEW_CONNECTION; + + + +set statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout='100ms' ; +NEW_CONNECTION; +set statement_timeout='100ms' ; +NEW_CONNECTION; +set statement_timeout='100ms' + +; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +set +statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='10000US'; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; + set statement_timeout='10000us'; +NEW_CONNECTION; + set statement_timeout='10000us'; +NEW_CONNECTION; + + + +set statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='10000us' ; +NEW_CONNECTION; +set statement_timeout='10000us' ; +NEW_CONNECTION; +set statement_timeout='10000us' + +; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +set +statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='9223372036854775807NS'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + + + +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' + +; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set +statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set transaction_timeout=null; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT=NULL; +NEW_CONNECTION; +set transaction_timeout=null; +NEW_CONNECTION; + set transaction_timeout=null; +NEW_CONNECTION; + set transaction_timeout=null; +NEW_CONNECTION; + + + +set transaction_timeout=null; +NEW_CONNECTION; +set transaction_timeout=null ; +NEW_CONNECTION; +set transaction_timeout=null ; +NEW_CONNECTION; +set transaction_timeout=null + +; +NEW_CONNECTION; +set transaction_timeout=null; +NEW_CONNECTION; +set transaction_timeout=null; +NEW_CONNECTION; +set +transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=null/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout=null; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT = NULL ; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; + set transaction_timeout = null ; +NEW_CONNECTION; + set transaction_timeout = null ; +NEW_CONNECTION; + + + +set transaction_timeout = null ; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; +set transaction_timeout = null + +; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; +set transaction_timeout = null ; +NEW_CONNECTION; +set +transaction_timeout += +null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = null/-; +NEW_CONNECTION; +set transaction_timeout='1s'; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT='1S'; +NEW_CONNECTION; +set transaction_timeout='1s'; +NEW_CONNECTION; + set transaction_timeout='1s'; +NEW_CONNECTION; + set transaction_timeout='1s'; +NEW_CONNECTION; + + + +set transaction_timeout='1s'; +NEW_CONNECTION; +set transaction_timeout='1s' ; +NEW_CONNECTION; +set transaction_timeout='1s' ; +NEW_CONNECTION; +set transaction_timeout='1s' + +; +NEW_CONNECTION; +set transaction_timeout='1s'; +NEW_CONNECTION; +set transaction_timeout='1s'; +NEW_CONNECTION; +set +transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout='1s'; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT = '1S' ; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; + set transaction_timeout = '1s' ; +NEW_CONNECTION; + set transaction_timeout = '1s' ; +NEW_CONNECTION; + + + +set transaction_timeout = '1s' ; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; +set transaction_timeout = '1s' + +; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; +set transaction_timeout = '1s' ; +NEW_CONNECTION; +set +transaction_timeout += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = '1s'/-; +NEW_CONNECTION; +set transaction_timeout=100; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT=100; +NEW_CONNECTION; +set transaction_timeout=100; +NEW_CONNECTION; + set transaction_timeout=100; +NEW_CONNECTION; + set transaction_timeout=100; +NEW_CONNECTION; + + + +set transaction_timeout=100; +NEW_CONNECTION; +set transaction_timeout=100 ; +NEW_CONNECTION; +set transaction_timeout=100 ; +NEW_CONNECTION; +set transaction_timeout=100 + +; +NEW_CONNECTION; +set transaction_timeout=100; +NEW_CONNECTION; +set transaction_timeout=100; +NEW_CONNECTION; +set +transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout=100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout=100; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT = 100 ; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; + set transaction_timeout = 100 ; +NEW_CONNECTION; + set transaction_timeout = 100 ; +NEW_CONNECTION; + + + +set transaction_timeout = 100 ; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; +set transaction_timeout = 100 + +; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; +set transaction_timeout = 100 ; +NEW_CONNECTION; +set +transaction_timeout += +100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout = 100/-; +NEW_CONNECTION; +set transaction_timeout='100ms'; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT='100MS'; +NEW_CONNECTION; +set transaction_timeout='100ms'; +NEW_CONNECTION; + set transaction_timeout='100ms'; +NEW_CONNECTION; + set transaction_timeout='100ms'; +NEW_CONNECTION; + + + +set transaction_timeout='100ms'; +NEW_CONNECTION; +set transaction_timeout='100ms' ; +NEW_CONNECTION; +set transaction_timeout='100ms' ; +NEW_CONNECTION; +set transaction_timeout='100ms' + +; +NEW_CONNECTION; +set transaction_timeout='100ms'; +NEW_CONNECTION; +set transaction_timeout='100ms'; +NEW_CONNECTION; +set +transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout='100ms'; +NEW_CONNECTION; +set transaction_timeout='10000us'; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT='10000US'; +NEW_CONNECTION; +set transaction_timeout='10000us'; +NEW_CONNECTION; + set transaction_timeout='10000us'; +NEW_CONNECTION; + set transaction_timeout='10000us'; +NEW_CONNECTION; + + + +set transaction_timeout='10000us'; +NEW_CONNECTION; +set transaction_timeout='10000us' ; +NEW_CONNECTION; +set transaction_timeout='10000us' ; +NEW_CONNECTION; +set transaction_timeout='10000us' + +; +NEW_CONNECTION; +set transaction_timeout='10000us'; +NEW_CONNECTION; +set transaction_timeout='10000us'; +NEW_CONNECTION; +set +transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout='10000us'; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +SET TRANSACTION_TIMEOUT='9223372036854775807NS'; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + + + +set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns' + +; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set +transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_timeout='9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; + set transaction read only; +NEW_CONNECTION; +set autocommit = false; + set transaction read only; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set transaction read only ; +NEW_CONNECTION; +set autocommit = false; +set transaction read only ; +NEW_CONNECTION; +set autocommit = false; +set transaction read only + +; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set +transaction +read +only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read%only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read_only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read&only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read$only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read@only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read!only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read*only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read(only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read)only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read+only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-#only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read\only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read?only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-/only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/#only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/-only; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; + set transaction read write; +NEW_CONNECTION; +set autocommit = false; + set transaction read write; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set transaction read write ; +NEW_CONNECTION; +set autocommit = false; +set transaction read write ; +NEW_CONNECTION; +set autocommit = false; +set transaction read write + +; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set +transaction +read +write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read%write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read_write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read&write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read$write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read@write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read!write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read*write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read(write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read)write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read+write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-#write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read\write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read?write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-/write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/#write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/-write; +NEW_CONNECTION; +set read_only_staleness='STRONG'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +set read_only_staleness='strong'; +NEW_CONNECTION; + set read_only_staleness='STRONG'; +NEW_CONNECTION; + set read_only_staleness='STRONG'; +NEW_CONNECTION; + + + +set read_only_staleness='STRONG'; +NEW_CONNECTION; +set read_only_staleness='STRONG' ; +NEW_CONNECTION; +set read_only_staleness='STRONG' ; +NEW_CONNECTION; +set read_only_staleness='STRONG' + +; +NEW_CONNECTION; +set read_only_staleness='STRONG'; +NEW_CONNECTION; +set read_only_staleness='STRONG'; +NEW_CONNECTION; +set +read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='STRONG'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-read_only_staleness='STRONG'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123-08:00'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + + + +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' + +; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123z'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + + + +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' + +; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123+07:45'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + + + +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' + +; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321-07:00'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + + + +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' + +; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321z'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + + + +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' + +; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321+05:30'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + + + +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' + +; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MAX_STALENESS 12S'; +NEW_CONNECTION; +set read_only_staleness='max_staleness 12s'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; + + + +set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s' + +; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set +read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS%12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS_12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS&12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS$12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS@12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS!12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS*12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS(12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS)12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS+12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS\12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS?12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 12s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/-12s'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MAX_STALENESS 100MS'; +NEW_CONNECTION; +set read_only_staleness='max_staleness 100ms'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; + + + +set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms' + +; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set +read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS%100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS_100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS&100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS$100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS@100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS!100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS*100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS(100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS)100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS+100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS\100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS?100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/-100ms'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MAX_STALENESS 99999US'; +NEW_CONNECTION; +set read_only_staleness='max_staleness 99999us'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; + + + +set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us' + +; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set +read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS%99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS_99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS&99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS$99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS@99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS!99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS*99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS(99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS)99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS+99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS\99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS?99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 99999us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/-99999us'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='MAX_STALENESS 10NS'; +NEW_CONNECTION; +set read_only_staleness='max_staleness 10ns'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; + set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; + + + +set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns' + +; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set +read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS%10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS_10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS&10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS$10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS@10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS!10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS*10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS(10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS)10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS+10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS\10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS?10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS-/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS 10ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='MAX_STALENESS/-10ns'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 15S'; +NEW_CONNECTION; +set read_only_staleness='exact_staleness 15s'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; + + + +set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s' + +; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set +read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS%15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS_15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS&15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS$15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS@15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS!15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS*15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS(15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS)15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS+15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS\15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS?15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/-15s'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1500MS'; +NEW_CONNECTION; +set read_only_staleness='exact_staleness 1500ms'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + + + +set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms' + +; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set +read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS%1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS_1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS&1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS$1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS@1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS!1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS*1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS(1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS)1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS+1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS\1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS?1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 1500ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/-1500ms'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 15000000US'; +NEW_CONNECTION; +set read_only_staleness='exact_staleness 15000000us'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + + + +set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us' + +; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set +read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS%15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS_15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS&15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS$15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS@15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS!15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS*15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS(15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS)15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS+15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS\15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS?15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 15000000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/-15000000us'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 9999NS'; +NEW_CONNECTION; +set read_only_staleness='exact_staleness 9999ns'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + + + +set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns' + +; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set +read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS%9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS_9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS&9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS$9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS@9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS!9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS*9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS(9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS)9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS+9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS\9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS?9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS-/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS 9999ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_only_staleness='EXACT_STALENESS/-9999ns'; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + + + +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' + +; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set +directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set directed_read=''; +NEW_CONNECTION; + set directed_read=''; +NEW_CONNECTION; + set directed_read=''; +NEW_CONNECTION; + + + +set directed_read=''; +NEW_CONNECTION; +set directed_read='' ; +NEW_CONNECTION; +set directed_read='' ; +NEW_CONNECTION; +set directed_read='' + +; +NEW_CONNECTION; +set directed_read=''; +NEW_CONNECTION; +set directed_read=''; +NEW_CONNECTION; +set +directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set directed_read=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-directed_read=''; +NEW_CONNECTION; +set optimizer_version='1'; +NEW_CONNECTION; +SET OPTIMIZER_VERSION='1'; +NEW_CONNECTION; +set optimizer_version='1'; +NEW_CONNECTION; + set optimizer_version='1'; +NEW_CONNECTION; + set optimizer_version='1'; +NEW_CONNECTION; + + + +set optimizer_version='1'; +NEW_CONNECTION; +set optimizer_version='1' ; +NEW_CONNECTION; +set optimizer_version='1' ; +NEW_CONNECTION; +set optimizer_version='1' + +; +NEW_CONNECTION; +set optimizer_version='1'; +NEW_CONNECTION; +set optimizer_version='1'; +NEW_CONNECTION; +set +optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_version='1'; +NEW_CONNECTION; +set optimizer_version='200'; +NEW_CONNECTION; +SET OPTIMIZER_VERSION='200'; +NEW_CONNECTION; +set optimizer_version='200'; +NEW_CONNECTION; + set optimizer_version='200'; +NEW_CONNECTION; + set optimizer_version='200'; +NEW_CONNECTION; + + + +set optimizer_version='200'; +NEW_CONNECTION; +set optimizer_version='200' ; +NEW_CONNECTION; +set optimizer_version='200' ; +NEW_CONNECTION; +set optimizer_version='200' + +; +NEW_CONNECTION; +set optimizer_version='200'; +NEW_CONNECTION; +set optimizer_version='200'; +NEW_CONNECTION; +set +optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='200'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_version='200'; +NEW_CONNECTION; +set optimizer_version='LATEST'; +NEW_CONNECTION; +SET OPTIMIZER_VERSION='LATEST'; +NEW_CONNECTION; +set optimizer_version='latest'; +NEW_CONNECTION; + set optimizer_version='LATEST'; +NEW_CONNECTION; + set optimizer_version='LATEST'; +NEW_CONNECTION; + + + +set optimizer_version='LATEST'; +NEW_CONNECTION; +set optimizer_version='LATEST' ; +NEW_CONNECTION; +set optimizer_version='LATEST' ; +NEW_CONNECTION; +set optimizer_version='LATEST' + +; +NEW_CONNECTION; +set optimizer_version='LATEST'; +NEW_CONNECTION; +set optimizer_version='LATEST'; +NEW_CONNECTION; +set +optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='LATEST'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_version='LATEST'; +NEW_CONNECTION; +set optimizer_version=''; +NEW_CONNECTION; +SET OPTIMIZER_VERSION=''; +NEW_CONNECTION; +set optimizer_version=''; +NEW_CONNECTION; + set optimizer_version=''; +NEW_CONNECTION; + set optimizer_version=''; +NEW_CONNECTION; + + + +set optimizer_version=''; +NEW_CONNECTION; +set optimizer_version='' ; +NEW_CONNECTION; +set optimizer_version='' ; +NEW_CONNECTION; +set optimizer_version='' + +; +NEW_CONNECTION; +set optimizer_version=''; +NEW_CONNECTION; +set optimizer_version=''; +NEW_CONNECTION; +set +optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_version=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_version=''; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +SET OPTIMIZER_STATISTICS_PACKAGE='AUTO_20191128_14_47_22UTC'; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22utc'; +NEW_CONNECTION; + set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + + + +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' + +; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set +optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set optimizer_statistics_package=''; +NEW_CONNECTION; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +NEW_CONNECTION; +set optimizer_statistics_package=''; +NEW_CONNECTION; + set optimizer_statistics_package=''; +NEW_CONNECTION; + set optimizer_statistics_package=''; +NEW_CONNECTION; + + + +set optimizer_statistics_package=''; +NEW_CONNECTION; +set optimizer_statistics_package='' ; +NEW_CONNECTION; +set optimizer_statistics_package='' ; +NEW_CONNECTION; +set optimizer_statistics_package='' + +; +NEW_CONNECTION; +set optimizer_statistics_package=''; +NEW_CONNECTION; +set optimizer_statistics_package=''; +NEW_CONNECTION; +set +optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set optimizer_statistics_package=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-optimizer_statistics_package=''; +NEW_CONNECTION; +set return_commit_stats = true; +NEW_CONNECTION; +SET RETURN_COMMIT_STATS = TRUE; +NEW_CONNECTION; +set return_commit_stats = true; +NEW_CONNECTION; + set return_commit_stats = true; +NEW_CONNECTION; + set return_commit_stats = true; +NEW_CONNECTION; + + + +set return_commit_stats = true; +NEW_CONNECTION; +set return_commit_stats = true ; +NEW_CONNECTION; +set return_commit_stats = true ; +NEW_CONNECTION; +set return_commit_stats = true + +; +NEW_CONNECTION; +set return_commit_stats = true; +NEW_CONNECTION; +set return_commit_stats = true; +NEW_CONNECTION; +set +return_commit_stats += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/-true; +NEW_CONNECTION; +set return_commit_stats = false; +NEW_CONNECTION; +SET RETURN_COMMIT_STATS = FALSE; +NEW_CONNECTION; +set return_commit_stats = false; +NEW_CONNECTION; + set return_commit_stats = false; +NEW_CONNECTION; + set return_commit_stats = false; +NEW_CONNECTION; + + + +set return_commit_stats = false; +NEW_CONNECTION; +set return_commit_stats = false ; +NEW_CONNECTION; +set return_commit_stats = false ; +NEW_CONNECTION; +set return_commit_stats = false + +; +NEW_CONNECTION; +set return_commit_stats = false; +NEW_CONNECTION; +set return_commit_stats = false; +NEW_CONNECTION; +set +return_commit_stats += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set return_commit_stats =/-false; +NEW_CONNECTION; +set max_commit_delay=null; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY=NULL; +NEW_CONNECTION; +set max_commit_delay=null; +NEW_CONNECTION; + set max_commit_delay=null; +NEW_CONNECTION; + set max_commit_delay=null; +NEW_CONNECTION; + + + +set max_commit_delay=null; +NEW_CONNECTION; +set max_commit_delay=null ; +NEW_CONNECTION; +set max_commit_delay=null ; +NEW_CONNECTION; +set max_commit_delay=null + +; +NEW_CONNECTION; +set max_commit_delay=null; +NEW_CONNECTION; +set max_commit_delay=null; +NEW_CONNECTION; +set +max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=null/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay=null; +NEW_CONNECTION; +set max_commit_delay = null; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = NULL; +NEW_CONNECTION; +set max_commit_delay = null; +NEW_CONNECTION; + set max_commit_delay = null; +NEW_CONNECTION; + set max_commit_delay = null; +NEW_CONNECTION; + + + +set max_commit_delay = null; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null + +; +NEW_CONNECTION; +set max_commit_delay = null; +NEW_CONNECTION; +set max_commit_delay = null; +NEW_CONNECTION; +set +max_commit_delay += +null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =%null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =_null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =&null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =$null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =@null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =!null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =*null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =(null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =)null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =+null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-#null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =\null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =?null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-/null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/#null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/-null; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = NULL ; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; + set max_commit_delay = null ; +NEW_CONNECTION; + set max_commit_delay = null ; +NEW_CONNECTION; + + + +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null + +; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set max_commit_delay = null ; +NEW_CONNECTION; +set +max_commit_delay += +null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = null/-; +NEW_CONNECTION; +set max_commit_delay=1000; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY=1000; +NEW_CONNECTION; +set max_commit_delay=1000; +NEW_CONNECTION; + set max_commit_delay=1000; +NEW_CONNECTION; + set max_commit_delay=1000; +NEW_CONNECTION; + + + +set max_commit_delay=1000; +NEW_CONNECTION; +set max_commit_delay=1000 ; +NEW_CONNECTION; +set max_commit_delay=1000 ; +NEW_CONNECTION; +set max_commit_delay=1000 + +; +NEW_CONNECTION; +set max_commit_delay=1000; +NEW_CONNECTION; +set max_commit_delay=1000; +NEW_CONNECTION; +set +max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay=1000/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay=1000; +NEW_CONNECTION; +set max_commit_delay = 1000; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = 1000; +NEW_CONNECTION; +set max_commit_delay = 1000; +NEW_CONNECTION; + set max_commit_delay = 1000; +NEW_CONNECTION; + set max_commit_delay = 1000; +NEW_CONNECTION; + + + +set max_commit_delay = 1000; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 + +; +NEW_CONNECTION; +set max_commit_delay = 1000; +NEW_CONNECTION; +set max_commit_delay = 1000; +NEW_CONNECTION; +set +max_commit_delay += +1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =%1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =_1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =&1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =$1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =@1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =!1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =*1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =(1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =)1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =+1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-#1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =\1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =?1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-/1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/#1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/-1000; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; + set max_commit_delay = 1000 ; +NEW_CONNECTION; + set max_commit_delay = 1000 ; +NEW_CONNECTION; + + + +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 + +; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set max_commit_delay = 1000 ; +NEW_CONNECTION; +set +max_commit_delay += +1000 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = 1000/-; +NEW_CONNECTION; +set max_commit_delay='1s'; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY='1S'; +NEW_CONNECTION; +set max_commit_delay='1s'; +NEW_CONNECTION; + set max_commit_delay='1s'; +NEW_CONNECTION; + set max_commit_delay='1s'; +NEW_CONNECTION; + + + +set max_commit_delay='1s'; +NEW_CONNECTION; +set max_commit_delay='1s' ; +NEW_CONNECTION; +set max_commit_delay='1s' ; +NEW_CONNECTION; +set max_commit_delay='1s' + +; +NEW_CONNECTION; +set max_commit_delay='1s'; +NEW_CONNECTION; +set max_commit_delay='1s'; +NEW_CONNECTION; +set +max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay='1s'; +NEW_CONNECTION; +set max_commit_delay = '1s'; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = '1S'; +NEW_CONNECTION; +set max_commit_delay = '1s'; +NEW_CONNECTION; + set max_commit_delay = '1s'; +NEW_CONNECTION; + set max_commit_delay = '1s'; +NEW_CONNECTION; + + + +set max_commit_delay = '1s'; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' + +; +NEW_CONNECTION; +set max_commit_delay = '1s'; +NEW_CONNECTION; +set max_commit_delay = '1s'; +NEW_CONNECTION; +set +max_commit_delay += +'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =%'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =_'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =&'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =$'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =@'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =!'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =*'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =('1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =)'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =+'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =\'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =?'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =-/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay =/-'1s'; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY = '1S' ; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; + set max_commit_delay = '1s' ; +NEW_CONNECTION; + set max_commit_delay = '1s' ; +NEW_CONNECTION; + + + +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' + +; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set max_commit_delay = '1s' ; +NEW_CONNECTION; +set +max_commit_delay += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay = '1s'/-; +NEW_CONNECTION; +set max_commit_delay='100ms'; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY='100MS'; +NEW_CONNECTION; +set max_commit_delay='100ms'; +NEW_CONNECTION; + set max_commit_delay='100ms'; +NEW_CONNECTION; + set max_commit_delay='100ms'; +NEW_CONNECTION; + + + +set max_commit_delay='100ms'; +NEW_CONNECTION; +set max_commit_delay='100ms' ; +NEW_CONNECTION; +set max_commit_delay='100ms' ; +NEW_CONNECTION; +set max_commit_delay='100ms' + +; +NEW_CONNECTION; +set max_commit_delay='100ms'; +NEW_CONNECTION; +set max_commit_delay='100ms'; +NEW_CONNECTION; +set +max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay='100ms'; +NEW_CONNECTION; +set max_commit_delay='10000us'; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY='10000US'; +NEW_CONNECTION; +set max_commit_delay='10000us'; +NEW_CONNECTION; + set max_commit_delay='10000us'; +NEW_CONNECTION; + set max_commit_delay='10000us'; +NEW_CONNECTION; + + + +set max_commit_delay='10000us'; +NEW_CONNECTION; +set max_commit_delay='10000us' ; +NEW_CONNECTION; +set max_commit_delay='10000us' ; +NEW_CONNECTION; +set max_commit_delay='10000us' + +; +NEW_CONNECTION; +set max_commit_delay='10000us'; +NEW_CONNECTION; +set max_commit_delay='10000us'; +NEW_CONNECTION; +set +max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay='10000us'; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +SET MAX_COMMIT_DELAY='9223372036854775807NS'; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; + set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; + set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; + + + +set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns' ; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns' ; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns' + +; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +set +max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_commit_delay='9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-max_commit_delay='9223372036854775807ns'; +NEW_CONNECTION; +set statement_tag='tag1'; +NEW_CONNECTION; +SET STATEMENT_TAG='TAG1'; +NEW_CONNECTION; +set statement_tag='tag1'; +NEW_CONNECTION; + set statement_tag='tag1'; +NEW_CONNECTION; + set statement_tag='tag1'; +NEW_CONNECTION; + + + +set statement_tag='tag1'; +NEW_CONNECTION; +set statement_tag='tag1' ; +NEW_CONNECTION; +set statement_tag='tag1' ; +NEW_CONNECTION; +set statement_tag='tag1' + +; +NEW_CONNECTION; +set statement_tag='tag1'; +NEW_CONNECTION; +set statement_tag='tag1'; +NEW_CONNECTION; +set +statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_tag='tag1'; +NEW_CONNECTION; +set statement_tag='tag2'; +NEW_CONNECTION; +SET STATEMENT_TAG='TAG2'; +NEW_CONNECTION; +set statement_tag='tag2'; +NEW_CONNECTION; + set statement_tag='tag2'; +NEW_CONNECTION; + set statement_tag='tag2'; +NEW_CONNECTION; + + + +set statement_tag='tag2'; +NEW_CONNECTION; +set statement_tag='tag2' ; +NEW_CONNECTION; +set statement_tag='tag2' ; +NEW_CONNECTION; +set statement_tag='tag2' + +; +NEW_CONNECTION; +set statement_tag='tag2'; +NEW_CONNECTION; +set statement_tag='tag2'; +NEW_CONNECTION; +set +statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='tag2'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_tag='tag2'; +NEW_CONNECTION; +set statement_tag=''; +NEW_CONNECTION; +SET STATEMENT_TAG=''; +NEW_CONNECTION; +set statement_tag=''; +NEW_CONNECTION; + set statement_tag=''; +NEW_CONNECTION; + set statement_tag=''; +NEW_CONNECTION; + + + +set statement_tag=''; +NEW_CONNECTION; +set statement_tag='' ; +NEW_CONNECTION; +set statement_tag='' ; +NEW_CONNECTION; +set statement_tag='' + +; +NEW_CONNECTION; +set statement_tag=''; +NEW_CONNECTION; +set statement_tag=''; +NEW_CONNECTION; +set +statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_tag=''; +NEW_CONNECTION; +set statement_tag='test_tag'; +NEW_CONNECTION; +SET STATEMENT_TAG='TEST_TAG'; +NEW_CONNECTION; +set statement_tag='test_tag'; +NEW_CONNECTION; + set statement_tag='test_tag'; +NEW_CONNECTION; + set statement_tag='test_tag'; +NEW_CONNECTION; + + + +set statement_tag='test_tag'; +NEW_CONNECTION; +set statement_tag='test_tag' ; +NEW_CONNECTION; +set statement_tag='test_tag' ; +NEW_CONNECTION; +set statement_tag='test_tag' + +; +NEW_CONNECTION; +set statement_tag='test_tag'; +NEW_CONNECTION; +set statement_tag='test_tag'; +NEW_CONNECTION; +set +statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_tag='test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_tag='test_tag'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION_TAG='TAG1'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1' + +; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set +transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag1'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION_TAG='TAG2'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2' + +; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set +transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='tag2'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION_TAG=''; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='' + +; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set +transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION_TAG='TEST_TAG'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' + +; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set +transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag='test_tag'; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + + + +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true + +; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set +exclude_txn_from_change_streams += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/-true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = FALSE; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + + + +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false + +; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set +exclude_txn_from_change_streams += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/-false; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +SET RPC_PRIORITY='HIGH'; +NEW_CONNECTION; +set rpc_priority='high'; +NEW_CONNECTION; + set rpc_priority='HIGH'; +NEW_CONNECTION; + set rpc_priority='HIGH'; +NEW_CONNECTION; + + + +set rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='HIGH' ; +NEW_CONNECTION; +set rpc_priority='HIGH' ; +NEW_CONNECTION; +set rpc_priority='HIGH' + +; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +set +rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +SET RPC_PRIORITY='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='medium'; +NEW_CONNECTION; + set rpc_priority='MEDIUM'; +NEW_CONNECTION; + set rpc_priority='MEDIUM'; +NEW_CONNECTION; + + + +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set rpc_priority='MEDIUM' + +; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set +rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +SET RPC_PRIORITY='LOW'; +NEW_CONNECTION; +set rpc_priority='low'; +NEW_CONNECTION; + set rpc_priority='LOW'; +NEW_CONNECTION; + set rpc_priority='LOW'; +NEW_CONNECTION; + + + +set rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='LOW' ; +NEW_CONNECTION; +set rpc_priority='LOW' ; +NEW_CONNECTION; +set rpc_priority='LOW' + +; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +set +rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +SET RPC_PRIORITY='NULL'; +NEW_CONNECTION; +set rpc_priority='null'; +NEW_CONNECTION; + set rpc_priority='NULL'; +NEW_CONNECTION; + set rpc_priority='NULL'; +NEW_CONNECTION; + + + +set rpc_priority='NULL'; +NEW_CONNECTION; +set rpc_priority='NULL' ; +NEW_CONNECTION; +set rpc_priority='NULL' ; +NEW_CONNECTION; +set rpc_priority='NULL' + +; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +set +rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='NULL'; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='ENABLED'; +NEW_CONNECTION; +set savepoint_support='enabled'; +NEW_CONNECTION; + set savepoint_support='ENABLED'; +NEW_CONNECTION; + set savepoint_support='ENABLED'; +NEW_CONNECTION; + + + +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='ENABLED' ; +NEW_CONNECTION; +set savepoint_support='ENABLED' ; +NEW_CONNECTION; +set savepoint_support='ENABLED' + +; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set +savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='fail_after_rollback'; +NEW_CONNECTION; + set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + + + +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' + +; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set +savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='DISABLED'; +NEW_CONNECTION; +set savepoint_support='disabled'; +NEW_CONNECTION; + set savepoint_support='DISABLED'; +NEW_CONNECTION; + set savepoint_support='DISABLED'; +NEW_CONNECTION; + + + +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set savepoint_support='DISABLED' ; +NEW_CONNECTION; +set savepoint_support='DISABLED' ; +NEW_CONNECTION; +set savepoint_support='DISABLED' + +; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set +savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='DISABLED'; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + + + +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true + +; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set +delay_transaction_start_until_first_write += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/-true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + + + +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false + +; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set +delay_transaction_start_until_first_write += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/-false; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +SET KEEP_TRANSACTION_ALIVE = TRUE; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; + set keep_transaction_alive = true; +NEW_CONNECTION; + set keep_transaction_alive = true; +NEW_CONNECTION; + + + +set keep_transaction_alive = true; +NEW_CONNECTION; +set keep_transaction_alive = true ; +NEW_CONNECTION; +set keep_transaction_alive = true ; +NEW_CONNECTION; +set keep_transaction_alive = true + +; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +set +keep_transaction_alive += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/-true; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +SET KEEP_TRANSACTION_ALIVE = FALSE; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; + set keep_transaction_alive = false; +NEW_CONNECTION; + set keep_transaction_alive = false; +NEW_CONNECTION; + + + +set keep_transaction_alive = false; +NEW_CONNECTION; +set keep_transaction_alive = false ; +NEW_CONNECTION; +set keep_transaction_alive = false ; +NEW_CONNECTION; +set keep_transaction_alive = false + +; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +set +keep_transaction_alive += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/-false; +NEW_CONNECTION; +set auto_batch_dml = true; +NEW_CONNECTION; +SET AUTO_BATCH_DML = TRUE; +NEW_CONNECTION; +set auto_batch_dml = true; +NEW_CONNECTION; + set auto_batch_dml = true; +NEW_CONNECTION; + set auto_batch_dml = true; +NEW_CONNECTION; + + + +set auto_batch_dml = true; +NEW_CONNECTION; +set auto_batch_dml = true ; +NEW_CONNECTION; +set auto_batch_dml = true ; +NEW_CONNECTION; +set auto_batch_dml = true + +; +NEW_CONNECTION; +set auto_batch_dml = true; +NEW_CONNECTION; +set auto_batch_dml = true; +NEW_CONNECTION; +set +auto_batch_dml += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/-true; +NEW_CONNECTION; +set auto_batch_dml = false; +NEW_CONNECTION; +SET AUTO_BATCH_DML = FALSE; +NEW_CONNECTION; +set auto_batch_dml = false; +NEW_CONNECTION; + set auto_batch_dml = false; +NEW_CONNECTION; + set auto_batch_dml = false; +NEW_CONNECTION; + + + +set auto_batch_dml = false; +NEW_CONNECTION; +set auto_batch_dml = false ; +NEW_CONNECTION; +set auto_batch_dml = false ; +NEW_CONNECTION; +set auto_batch_dml = false + +; +NEW_CONNECTION; +set auto_batch_dml = false; +NEW_CONNECTION; +set auto_batch_dml = false; +NEW_CONNECTION; +set +auto_batch_dml += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml =/-false; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +SET AUTO_BATCH_DML_UPDATE_COUNT = 0; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0; +NEW_CONNECTION; + set auto_batch_dml_update_count = 0; +NEW_CONNECTION; + set auto_batch_dml_update_count = 0; +NEW_CONNECTION; + + + +set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0 ; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0 ; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0 + +; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set +auto_batch_dml_update_count += +0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =%0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =_0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =&0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =$0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =@0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =!0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =*0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =(0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =)0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =+0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =\0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =?0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 0/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/-0; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +SET AUTO_BATCH_DML_UPDATE_COUNT = 100; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100; +NEW_CONNECTION; + set auto_batch_dml_update_count = 100; +NEW_CONNECTION; + set auto_batch_dml_update_count = 100; +NEW_CONNECTION; + + + +set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100 ; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100 ; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100 + +; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set +auto_batch_dml_update_count += +100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =%100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =_100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =&100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =$100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =@100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =!100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =*100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =(100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =)100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =+100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =\100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =?100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =-/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count = 100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count =/-100; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +SET AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = TRUE; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + + + +set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true ; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true ; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true + +; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set +auto_batch_dml_update_count_verification += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/-true; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +SET AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = FALSE; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + + + +set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false ; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false ; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false + +; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set +auto_batch_dml_update_count_verification += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_batch_dml_update_count_verification =/-false; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET LOCAL BATCH_DML_UPDATE_COUNT = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0 + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +local +batch_dml_update_count += +0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0 bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =%0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =_0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =&0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =$0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =@0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =!0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =*0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =(0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =)0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =+0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-#0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =\0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =?0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-/0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/#0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local batch_dml_update_count = 0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 0/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/-0; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET LOCAL BATCH_DML_UPDATE_COUNT = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100 + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +local +batch_dml_update_count += +100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100 bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =%100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =_100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =&100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =$100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =@100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =!100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =*100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =(100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =)100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =+100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-#100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =\100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =?100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =-/100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/#100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count = 100/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local batch_dml_update_count =/-100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET BATCH_DML_UPDATE_COUNT = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1 + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +batch_dml_update_count += +1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1 bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =%1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =_1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =&1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =$1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =@1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =!1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =*1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =(1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =)1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =+1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-#1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =\1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =?1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-/1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/#1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set batch_dml_update_count = 1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 1/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/-1; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +SET BATCH_DML_UPDATE_COUNT = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; + + + +set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100 ; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100 + +; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +set +batch_dml_update_count += +100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100 bar; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100%; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =%100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100_; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =_100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100&; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =&100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100$; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =$100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100@; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =@100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100!; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =!100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100*; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =*100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100(; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =(100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100); +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =)100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100+; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =+100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100-#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-#100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100\; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =\100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100?; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =?100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100-/; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =-/100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100/#; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/#100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set batch_dml_update_count = 100; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count = 100/-; +NEW_CONNECTION; +set readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set batch_dml_update_count =/-100; +NEW_CONNECTION; +show variable read_lock_mode; +NEW_CONNECTION; +SHOW VARIABLE READ_LOCK_MODE; +NEW_CONNECTION; +show variable read_lock_mode; +NEW_CONNECTION; + show variable read_lock_mode; +NEW_CONNECTION; + show variable read_lock_mode; +NEW_CONNECTION; + + + +show variable read_lock_mode; +NEW_CONNECTION; +show variable read_lock_mode ; +NEW_CONNECTION; +show variable read_lock_mode ; +NEW_CONNECTION; +show variable read_lock_mode + +; +NEW_CONNECTION; +show variable read_lock_mode; +NEW_CONNECTION; +show variable read_lock_mode; +NEW_CONNECTION; +show +variable +read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable read_lock_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-read_lock_mode; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +SET READ_LOCK_MODE='OPTIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='optimistic'; +NEW_CONNECTION; + set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; + set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; + + + +set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC' ; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC' ; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC' + +; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set +read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='OPTIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +SET READ_LOCK_MODE='PESSIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='pessimistic'; +NEW_CONNECTION; + set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; + set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; + + + +set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC' ; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC' ; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC' + +; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set +read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='PESSIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +SET READ_LOCK_MODE='UNSPECIFIED'; +NEW_CONNECTION; +set read_lock_mode='unspecified'; +NEW_CONNECTION; + set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; + set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; + + + +set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED' ; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED' ; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED' + +; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set +read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set read_lock_mode='UNSPECIFIED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +SHOW VARIABLE DATA_BOOST_ENABLED; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; + show variable data_boost_enabled; +NEW_CONNECTION; + show variable data_boost_enabled; +NEW_CONNECTION; + + + +show variable data_boost_enabled; +NEW_CONNECTION; +show variable data_boost_enabled ; +NEW_CONNECTION; +show variable data_boost_enabled ; +NEW_CONNECTION; +show variable data_boost_enabled + +; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +show +variable +data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-data_boost_enabled; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +SET DATA_BOOST_ENABLED = TRUE; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; + set data_boost_enabled = true; +NEW_CONNECTION; + set data_boost_enabled = true; +NEW_CONNECTION; + + + +set data_boost_enabled = true; +NEW_CONNECTION; +set data_boost_enabled = true ; +NEW_CONNECTION; +set data_boost_enabled = true ; +NEW_CONNECTION; +set data_boost_enabled = true + +; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +set +data_boost_enabled += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/-true; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +SET DATA_BOOST_ENABLED = FALSE; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; + set data_boost_enabled = false; +NEW_CONNECTION; + set data_boost_enabled = false; +NEW_CONNECTION; + + + +set data_boost_enabled = false; +NEW_CONNECTION; +set data_boost_enabled = false ; +NEW_CONNECTION; +set data_boost_enabled = false ; +NEW_CONNECTION; +set data_boost_enabled = false + +; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +set +data_boost_enabled += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/-false; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +SHOW VARIABLE AUTO_PARTITION_MODE; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; + show variable auto_partition_mode; +NEW_CONNECTION; + show variable auto_partition_mode; +NEW_CONNECTION; + + + +show variable auto_partition_mode; +NEW_CONNECTION; +show variable auto_partition_mode ; +NEW_CONNECTION; +show variable auto_partition_mode ; +NEW_CONNECTION; +show variable auto_partition_mode + +; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +show +variable +auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-auto_partition_mode; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +SET AUTO_PARTITION_MODE = TRUE; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; + set auto_partition_mode = true; +NEW_CONNECTION; + set auto_partition_mode = true; +NEW_CONNECTION; + + + +set auto_partition_mode = true; +NEW_CONNECTION; +set auto_partition_mode = true ; +NEW_CONNECTION; +set auto_partition_mode = true ; +NEW_CONNECTION; +set auto_partition_mode = true + +; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +set +auto_partition_mode += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/-true; +NEW_CONNECTION; +set auto_partition_mode = false; +NEW_CONNECTION; +SET AUTO_PARTITION_MODE = FALSE; +NEW_CONNECTION; +set auto_partition_mode = false; +NEW_CONNECTION; + set auto_partition_mode = false; +NEW_CONNECTION; + set auto_partition_mode = false; +NEW_CONNECTION; + + + +set auto_partition_mode = false; +NEW_CONNECTION; +set auto_partition_mode = false ; +NEW_CONNECTION; +set auto_partition_mode = false ; +NEW_CONNECTION; +set auto_partition_mode = false + +; +NEW_CONNECTION; +set auto_partition_mode = false; +NEW_CONNECTION; +set auto_partition_mode = false; +NEW_CONNECTION; +set +auto_partition_mode += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/-false; +NEW_CONNECTION; +show variable max_partitions; +NEW_CONNECTION; +SHOW VARIABLE MAX_PARTITIONS; +NEW_CONNECTION; +show variable max_partitions; +NEW_CONNECTION; + show variable max_partitions; +NEW_CONNECTION; + show variable max_partitions; +NEW_CONNECTION; + + + +show variable max_partitions; +NEW_CONNECTION; +show variable max_partitions ; +NEW_CONNECTION; +show variable max_partitions ; +NEW_CONNECTION; +show variable max_partitions + +; +NEW_CONNECTION; +show variable max_partitions; +NEW_CONNECTION; +show variable max_partitions; +NEW_CONNECTION; +show +variable +max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-max_partitions; +NEW_CONNECTION; +set max_partitions = 0; +NEW_CONNECTION; +SET MAX_PARTITIONS = 0; +NEW_CONNECTION; +set max_partitions = 0; +NEW_CONNECTION; + set max_partitions = 0; +NEW_CONNECTION; + set max_partitions = 0; +NEW_CONNECTION; + + + +set max_partitions = 0; +NEW_CONNECTION; +set max_partitions = 0 ; +NEW_CONNECTION; +set max_partitions = 0 ; +NEW_CONNECTION; +set max_partitions = 0 + +; +NEW_CONNECTION; +set max_partitions = 0; +NEW_CONNECTION; +set max_partitions = 0; +NEW_CONNECTION; +set +max_partitions += +0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =%0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =_0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =&0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =$0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =@0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =!0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =*0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =(0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =)0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =+0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =\0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =?0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_partitions = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/-0; +NEW_CONNECTION; +set max_partitions = 10; +NEW_CONNECTION; +SET MAX_PARTITIONS = 10; +NEW_CONNECTION; +set max_partitions = 10; +NEW_CONNECTION; + set max_partitions = 10; +NEW_CONNECTION; + set max_partitions = 10; +NEW_CONNECTION; + + + +set max_partitions = 10; +NEW_CONNECTION; +set max_partitions = 10 ; +NEW_CONNECTION; +set max_partitions = 10 ; +NEW_CONNECTION; +set max_partitions = 10 + +; +NEW_CONNECTION; +set max_partitions = 10; +NEW_CONNECTION; +set max_partitions = 10; +NEW_CONNECTION; +set +max_partitions += +10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =%10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =_10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =&10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =$10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =@10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =!10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =*10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =(10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =)10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =+10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =\10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =?10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 10/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/-10; +NEW_CONNECTION; +show variable max_partitioned_parallelism; +NEW_CONNECTION; +SHOW VARIABLE MAX_PARTITIONED_PARALLELISM; +NEW_CONNECTION; +show variable max_partitioned_parallelism; +NEW_CONNECTION; + show variable max_partitioned_parallelism; +NEW_CONNECTION; + show variable max_partitioned_parallelism; +NEW_CONNECTION; + + + +show variable max_partitioned_parallelism; +NEW_CONNECTION; +show variable max_partitioned_parallelism ; +NEW_CONNECTION; +show variable max_partitioned_parallelism ; +NEW_CONNECTION; +show variable max_partitioned_parallelism + +; +NEW_CONNECTION; +show variable max_partitioned_parallelism; +NEW_CONNECTION; +show variable max_partitioned_parallelism; +NEW_CONNECTION; +show +variable +max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-max_partitioned_parallelism; +NEW_CONNECTION; +set max_partitioned_parallelism = 0; +NEW_CONNECTION; +SET MAX_PARTITIONED_PARALLELISM = 0; +NEW_CONNECTION; +set max_partitioned_parallelism = 0; +NEW_CONNECTION; + set max_partitioned_parallelism = 0; +NEW_CONNECTION; + set max_partitioned_parallelism = 0; +NEW_CONNECTION; + + + +set max_partitioned_parallelism = 0; +NEW_CONNECTION; +set max_partitioned_parallelism = 0 ; +NEW_CONNECTION; +set max_partitioned_parallelism = 0 ; +NEW_CONNECTION; +set max_partitioned_parallelism = 0 + +; +NEW_CONNECTION; +set max_partitioned_parallelism = 0; +NEW_CONNECTION; +set max_partitioned_parallelism = 0; +NEW_CONNECTION; +set +max_partitioned_parallelism += +0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =%0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =_0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =&0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =$0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =@0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =!0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =*0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =(0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =)0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =+0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =\0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =?0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_partitioned_parallelism = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/-0; +NEW_CONNECTION; +set max_partitioned_parallelism = 10; +NEW_CONNECTION; +SET MAX_PARTITIONED_PARALLELISM = 10; +NEW_CONNECTION; +set max_partitioned_parallelism = 10; +NEW_CONNECTION; + set max_partitioned_parallelism = 10; +NEW_CONNECTION; + set max_partitioned_parallelism = 10; +NEW_CONNECTION; + + + +set max_partitioned_parallelism = 10; +NEW_CONNECTION; +set max_partitioned_parallelism = 10 ; +NEW_CONNECTION; +set max_partitioned_parallelism = 10 ; +NEW_CONNECTION; +set max_partitioned_parallelism = 10 + +; +NEW_CONNECTION; +set max_partitioned_parallelism = 10; +NEW_CONNECTION; +set max_partitioned_parallelism = 10; +NEW_CONNECTION; +set +max_partitioned_parallelism += +10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =%10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =_10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =&10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =$10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =@10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =!10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =*10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =(10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =)10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =+10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =\10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =?10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 10/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/-10; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +SET PROTO_DESCRIPTORS='PROTODESCRIPTORSBASE64'; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; + set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; + set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; + + + +set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64' ; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64' ; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64' + +; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +set +proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors='protodescriptorsbase64'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-proto_descriptors='protodescriptorsbase64'; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +SET PROTO_DESCRIPTORS_FILE_PATH='SRC/TEST/RESOURCES/COM/GOOGLE/CLOUD/SPANNER/DESCRIPTORS.PB'; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; + set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; + set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; + + + +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' ; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' ; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' + +; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +set +proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; +NEW_CONNECTION; +show variable proto_descriptors; +NEW_CONNECTION; +SHOW VARIABLE PROTO_DESCRIPTORS; +NEW_CONNECTION; +show variable proto_descriptors; +NEW_CONNECTION; + show variable proto_descriptors; +NEW_CONNECTION; + show variable proto_descriptors; +NEW_CONNECTION; + + + +show variable proto_descriptors; +NEW_CONNECTION; +show variable proto_descriptors ; +NEW_CONNECTION; +show variable proto_descriptors ; +NEW_CONNECTION; +show variable proto_descriptors + +; +NEW_CONNECTION; +show variable proto_descriptors; +NEW_CONNECTION; +show variable proto_descriptors; +NEW_CONNECTION; +show +variable +proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable proto_descriptors; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-proto_descriptors; +NEW_CONNECTION; +show variable proto_descriptors_file_path; +NEW_CONNECTION; +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; +NEW_CONNECTION; +show variable proto_descriptors_file_path; +NEW_CONNECTION; + show variable proto_descriptors_file_path; +NEW_CONNECTION; + show variable proto_descriptors_file_path; +NEW_CONNECTION; + + + +show variable proto_descriptors_file_path; +NEW_CONNECTION; +show variable proto_descriptors_file_path ; +NEW_CONNECTION; +show variable proto_descriptors_file_path ; +NEW_CONNECTION; +show variable proto_descriptors_file_path + +; +NEW_CONNECTION; +show variable proto_descriptors_file_path; +NEW_CONNECTION; +show variable proto_descriptors_file_path; +NEW_CONNECTION; +show +variable +proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable proto_descriptors_file_path; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-proto_descriptors_file_path; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/CommentsTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/CommentsTest.sql new file mode 100644 index 000000000000..916a35d9ef18 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/CommentsTest.sql @@ -0,0 +1,302 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@EXPECT 'SELECT 1'; +SELECT 1; +-- This is a single line comment +SELECT 1; +# This is a single line comment +SELECT 1; +/* This is a multi line comment on one line */ +SELECT 1; +/* This + is + a + multiline + comment +*/ +SELECT 1; +/* This + * is + * a + * multiline + * comment + */ +SELECT 1; +/** This is a javadoc style comment on one line*/ +SELECT 1; +/** This + is + a + javadoc + style + comment + on + multiple + lines +*/ +SELECT 1; +/** This + * is + * a + * javadoc + * style + * comment + * on + * multiple + * lines + */ +SELECT 1; + +@EXPECT 'SELECT +1'; +-- First comment +SELECT-- second comment +1; +# First comment +SELECT# second comment +1; +-- First comment +SELECT-- second comment +1--third comment +; +# First comment +SELECT# second comment +1#Third comment +; +/* First comment */ +SELECT/* second comment */ +1; +/* First comment */ +SELECT/* second comment */ +1/* Third comment */ +; + + +@EXPECT 'SELECT +1'; +-- First comment +SELECT -- second comment +1 ; +# First comment +SELECT # second comment +1 ; +-- First comment +SELECT -- second comment +1 --third comment +; +# First comment +SELECT # second comment +1 #Third comment +; +/* First comment */ +SELECT /* second comment */ +1 ; +/* First comment */ +SELECT /* second comment */ +1 /* Third comment */ +; + +@EXPECT 'SELECT "TEST -- This is not a comment"'; +SELECT "TEST -- This is not a comment"; +-- This is a comment +SELECT "TEST -- This is not a comment"; +-- This is a comment +SELECT "TEST -- This is not a comment" -- This is a comment; + +@EXPECT 'SELECT "TEST # This is not a comment"'; +SELECT "TEST # This is not a comment"; +# This is a comment +SELECT "TEST # This is not a comment"; +# This is a comment +SELECT "TEST # This is not a comment" # This is a comment; + +@EXPECT 'SELECT "TEST /* This is not a comment */"'; +SELECT "TEST /* This is not a comment */"; +/* This is a comment */ +SELECT "TEST /* This is not a comment */"; +/* This is a comment */ +SELECT "TEST /* This is not a comment */" /* This is a comment */; + +@EXPECT 'SELECT 'TEST -- This is not a comment''; +SELECT 'TEST -- This is not a comment'; +-- This is a comment +SELECT 'TEST -- This is not a comment'; +-- This is a comment +SELECT 'TEST -- This is not a comment' -- This is a comment; + +@EXPECT 'SELECT 'TEST # This is not a comment''; +SELECT 'TEST # This is not a comment'; +# This is a comment +SELECT 'TEST # This is not a comment'; +# This is a comment +SELECT 'TEST # This is not a comment' # This is a comment; + +@EXPECT 'SELECT 'TEST /* This is not a comment */''; +SELECT 'TEST /* This is not a comment */'; +/* This is a comment */ +SELECT 'TEST /* This is not a comment */'; +/* This is a comment */ +SELECT 'TEST /* This is not a comment */' /* This is a comment */; + +@EXPECT 'SELECT '''TEST +-- This is not a comment +''''; +SELECT '''TEST +-- This is not a comment +'''; +-- This is a comment +SELECT '''TEST +-- This is not a comment +'''; +-- This is a comment +SELECT '''TEST +-- This is not a comment +''' -- This is a comment; + +@EXPECT 'SELECT '''TEST +# This is not a comment +''''; +SELECT '''TEST +# This is not a comment +'''; +# This is a comment +SELECT '''TEST +# This is not a comment +'''; +# This is a comment +SELECT '''TEST +# This is not a comment +''' # This is a comment; + +@EXPECT 'SELECT '''TEST +/* This is not a comment */ +''''; +SELECT '''TEST +/* This is not a comment */ +'''; +/* This is a comment */ +SELECT '''TEST +/* This is not a comment */ +'''; +/* This is a comment */ +SELECT '''TEST +/* This is not a comment */ +''' /* This is a comment */; + + +@EXPECT 'SELECT """TEST +-- This is not a comment +"""'; +SELECT """TEST +-- This is not a comment +"""; +-- This is a comment +SELECT """TEST +-- This is not a comment +"""; +-- This is a comment +SELECT """TEST +-- This is not a comment +""" -- This is a comment; + +@EXPECT 'SELECT """TEST +# This is not a comment +"""'; +SELECT """TEST +# This is not a comment +"""; +# This is a comment +SELECT """TEST +# This is not a comment +"""; +# This is a comment +SELECT """TEST +# This is not a comment +""" # This is a comment; + +@EXPECT 'SELECT """TEST +/* This is not a comment */ +"""'; +SELECT """TEST +/* This is not a comment */ +"""; +/* This is a comment */ +SELECT """TEST +/* This is not a comment */ +"""; +/* This is a comment */ +SELECT """TEST +/* This is not a comment */ +""" /* This is a comment */; + + + +@EXPECT 'SELECT ```TEST +-- This is not a comment +```'; +SELECT ```TEST +-- This is not a comment +```; +-- This is a comment +SELECT ```TEST +-- This is not a comment +```; +-- This is a comment +SELECT ```TEST +-- This is not a comment +``` -- This is a comment; + +@EXPECT 'SELECT ```TEST +# This is not a comment +```'; +SELECT ```TEST +# This is not a comment +```; +# This is a comment +SELECT ```TEST +# This is not a comment +```; +# This is a comment +SELECT ```TEST +# This is not a comment +``` # This is a comment; + +@EXPECT 'SELECT ```TEST +/* This is not a comment */ +```'; +SELECT ```TEST +/* This is not a comment */ +```; +/* This is a comment */ +SELECT ```TEST +/* This is not a comment */ +```; +/* This is a comment */ +SELECT ```TEST +/* This is not a comment */ +``` /* This is a comment */; + + +@EXPECT 'SELECT 1'; +/* This is a comment /* this is still a comment */ +SELECT 1; +/** This is a javadoc style comment /* this is still a comment */ +SELECT 1; +/** This is a javadoc style comment /** this is still a comment */ +SELECT 1; +/** This is a javadoc style comment /** this is still a comment **/ +SELECT 1; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql new file mode 100644 index 000000000000..68c9297298f8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql @@ -0,0 +1,13655 @@ +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:17.951000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:17.951000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:17.951000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.067000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.067000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.067000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.165000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.165000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.165000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.265000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.265000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.265000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.347000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.347000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.347000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.418000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.418000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.495000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.495000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.566000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.566000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.566000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.639000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.639000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.639000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.697000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.697000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.697000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.757000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.757000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.811000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.811000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.864000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.864000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.864000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.923000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.923000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.923000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.973000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.973000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.973000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.027000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.027000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.027000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.098000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.098000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.098000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.162000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.162000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.162000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.214000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.214000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.277000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.277000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.335000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.335000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.335000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.384000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.384000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.434000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.434000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.434000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.490000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.490000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.490000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.490000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=FALSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.550000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.550000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.550000000Z'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.600000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.600000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.600000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.600000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.656000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.656000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.656000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.656000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.715000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.715000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.715000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.715000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.770000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.770000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.770000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.770000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.824000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.824000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.824000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.824000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.873000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.873000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.873000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.873000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.925000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.925000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.925000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.925000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READONLY=FALSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.981000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.981000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.981000000Z'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.031000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.031000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.031000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.031000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.085000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.085000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.085000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.085000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READONLY=FALSE; +@EXPECT RESULT_SET 'READONLY',FALSE +SHOW VARIABLE READONLY; +SET READONLY=TRUE; +@EXPECT RESULT_SET 'READONLY',TRUE +SHOW VARIABLE READONLY; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.136000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.136000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.136000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.136000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_RESPONSE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','2' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','latest' +SHOW VARIABLE OPTIMIZER_VERSION; +SET OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +SET OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS' +SHOW VARIABLE READ_ONLY_STALENESS; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET READONLY=TRUE; +SET AUTOCOMMIT=TRUE; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql new file mode 100644 index 000000000000..2efc59ed36e4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql @@ -0,0 +1,230 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +NEW_CONNECTION; +-- Create table in autocommit mode + +@EXPECT RESULT_SET 'AUTOCOMMIT',true +SHOW VARIABLE AUTOCOMMIT; +@EXPECT RESULT_SET 'READONLY',false +SHOW VARIABLE READONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_DDL_AUTOCOMMIT'; + +CREATE TABLE VALID_DDL_AUTOCOMMIT (ID INT64 NOT NULL, BAR STRING(100)) PRIMARY KEY (ID); + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_DDL_AUTOCOMMIT'; + + +NEW_CONNECTION; +-- Try to create a table with an invalid SQL statement + +@EXPECT RESULT_SET 'AUTOCOMMIT',true +SHOW VARIABLE AUTOCOMMIT; +@EXPECT RESULT_SET 'READONLY',false +SHOW VARIABLE READONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='INVALID_DDL_AUTOCOMMIT'; + +@EXPECT EXCEPTION INVALID_ARGUMENT +CREATE TABLE INVALID_DDL_AUTOCOMMIT (ID INT64 NOT NULL, BAZ STRING(100), MISSING_DATA_TYPE_COL) PRIMARY KEY (ID); + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='INVALID_DDL_AUTOCOMMIT'; + + +NEW_CONNECTION; +-- Try to create a new table in a DDL_BATCH + +-- Check that the table is not present +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_SINGLE_DDL_IN_DDL_BATCH'; + +-- Change to DDL batch mode +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +-- Execute the create table statement, but do not commit yet +CREATE TABLE VALID_SINGLE_DDL_IN_DDL_BATCH (ID INT64 NOT NULL, BAR STRING(100)) PRIMARY KEY (ID); + +NEW_CONNECTION; +-- Transaction has not been committed, so the table should not be present +-- We do this in a new transaction, as selects are not allowed in a DDL_BATCH +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_SINGLE_DDL_IN_DDL_BATCH'; + +-- Change to DDL batch mode again +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +-- Execute the create table statement and do a commit +CREATE TABLE VALID_SINGLE_DDL_IN_DDL_BATCH (ID INT64 NOT NULL, BAR STRING(100)) PRIMARY KEY (ID); +RUN BATCH; + +-- Go back to AUTOCOMMIT mode and check that the table was created +SET AUTOCOMMIT = TRUE; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_SINGLE_DDL_IN_DDL_BATCH'; + + +NEW_CONNECTION; +-- Create two tables in one batch + +-- First ensure that the tables do not exist +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_MULTIPLE_DDL_IN_DDL_BATCH_1' OR TABLE_NAME='VALID_MULTIPLE_DDL_IN_DDL_BATCH_2'; + +-- Change to DDL batch mode +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +-- Create two tables +CREATE TABLE VALID_MULTIPLE_DDL_IN_DDL_BATCH_1 (ID INT64 NOT NULL, BAR STRING(100)) PRIMARY KEY (ID); +CREATE TABLE VALID_MULTIPLE_DDL_IN_DDL_BATCH_2 (ID INT64 NOT NULL, BAR STRING(100)) PRIMARY KEY (ID); +-- Run the batch +RUN BATCH; + +-- Switch to autocommit and verify that both tables exist +SET AUTOCOMMIT = TRUE; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='VALID_MULTIPLE_DDL_IN_DDL_BATCH_1' OR TABLE_NAME='VALID_MULTIPLE_DDL_IN_DDL_BATCH_2'; + + +NEW_CONNECTION; +/* + * Do a test that shows that a DDL batch might only execute some of the statements, + * for example if data in a table prevents a unique index from being created. + */ +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +CREATE TABLE TEST1 (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); +CREATE TABLE TEST2 (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); +RUN BATCH; + +SET AUTOCOMMIT = TRUE; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='TEST1' OR TABLE_NAME='TEST2'; + +-- Fill the second table with some data that will prevent us from creating a unique index on +-- the name column. +INSERT INTO TEST2 (ID, NAME) VALUES (1, 'TEST'); +INSERT INTO TEST2 (ID, NAME) VALUES (2, 'TEST'); + +-- Ensure the indices that we are to create do not exist +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE (TABLE_NAME='TEST1' AND INDEX_NAME='IDX_TEST1') + OR (TABLE_NAME='TEST2' AND INDEX_NAME='IDX_TEST2'); + +-- Try to create two unique indices in one batch +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +CREATE UNIQUE INDEX IDX_TEST1 ON TEST1 (NAME); +CREATE UNIQUE INDEX IDX_TEST2 ON TEST2 (NAME); + +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; + +SET AUTOCOMMIT = TRUE; + +-- Ensure that IDX_TEST1 was created and IDX_TEST2 was not. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE TABLE_NAME='TEST1' AND INDEX_NAME='IDX_TEST1'; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE TABLE_NAME='TEST2' AND INDEX_NAME='IDX_TEST2'; + +NEW_CONNECTION; +/* Verify that empty DDL batches are accepted. */ +START BATCH DDL; +RUN BATCH; + +START BATCH DDL; +ABORT BATCH; + +NEW_CONNECTION; +-- Set proto descriptors using relative path to the descriptors.pb file. This gets applied for next DDL statement +SET PROTO_DESCRIPTORS_FILE_PATH = 'src/test/resources/com/google/cloud/spanner/descriptors.pb'; +-- Check if Proto descriptors is set +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS_FILE_PATH' +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; + +CREATE PROTO BUNDLE (examples.spanner.music.Genre); +-- Check if Proto descriptors is reset to null +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS',null +SHOW VARIABLE PROTO_DESCRIPTORS; +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS_FILE_PATH',null +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; + +-- Set Proto Descriptor as base64 string. This gets applied to all statements in next DDL batch +SET PROTO_DESCRIPTORS = 'CvYCCgxzaW5nZXIucHJvdG8SFmV4YW1wbGVzLnNwYW5uZXIubXVzaWMi6gEKClNpbmdlckluZm8SIAoJc2luZ2VyX2lkGAEgASgDSABSCHNpbmdlcklkiAEBEiIKCmJpcnRoX2RhdGUYAiABKAlIAVIJYmlydGhEYXRliAEBEiUKC25hdGlvbmFsaXR5GAMgASgJSAJSC25hdGlvbmFsaXR5iAEBEjgKBWdlbnJlGAQgASgOMh0uZXhhbXBsZXMuc3Bhbm5lci5tdXNpYy5HZW5yZUgDUgVnZW5yZYgBAUIMCgpfc2luZ2VyX2lkQg0KC19iaXJ0aF9kYXRlQg4KDF9uYXRpb25hbGl0eUIICgZfZ2VucmUqLgoFR2VucmUSBwoDUE9QEAASCAoESkFaWhABEggKBEZPTEsQAhIICgRST0NLEANCKQoYY29tLmdvb2dsZS5jbG91ZC5zcGFubmVyQgtTaW5nZXJQcm90b1AAYgZwcm90bzM='; + +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS' +SHOW VARIABLE PROTO_DESCRIPTORS; + +START BATCH DDL; +ALTER PROTO BUNDLE INSERT (examples.spanner.music.SingerInfo); +CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo examples.spanner.music.SingerInfo, + SingerGenre examples.spanner.music.Genre +) PRIMARY KEY (SingerId); +-- Run the batch +RUN BATCH; + +-- Check if Proto descriptors is reset to null +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS',null +SHOW VARIABLE PROTO_DESCRIPTORS; +-- Check that the table is created +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Singers'; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest.sql new file mode 100644 index 000000000000..6a8df6a58b9f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest.sql @@ -0,0 +1,279 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +NEW_CONNECTION; + +-- Test a couple of count queries to ensure the presence of the data +@EXPECT RESULT_SET 'READONLY',true +SHOW VARIABLE READONLY; + +-- Check initial contents. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +-- Check initial contents. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +-- Assert that there is a read timestamp +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +NEW_CONNECTION; +-- Test two selects in one temporary transaction +@EXPECT RESULT_SET 'READONLY',true +SHOW VARIABLE READONLY; + +BEGIN; + +@EXPECT RESULT_SET 'NUMBER',1 +SELECT NUMBER +FROM NUMBERS +WHERE NUMBER=1; + +@PUT 'READ_TIMESTAMP1' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT RESULT_SET 'PRIME_NUMBER',13 +SELECT PRIME_NUMBER +FROM PRIME_NUMBERS +WHERE PRIME_NUMBER=13; + +@PUT 'READ_TIMESTAMP2' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT EQUAL 'READ_TIMESTAMP1','READ_TIMESTAMP2'; + +COMMIT; + +NEW_CONNECTION; + +/* + * ------------------------------------------------------------------------------------------------ + * | Test different read only staleness values in autocommit mode | + * ------------------------------------------------------------------------------------------------ + */ + +--TimestampBound.ofReadTimestamp(Timestamp.now()), + +@PUT 'CURRENT_TIMESTAMP' +SELECT CURRENT_TIMESTAMP(); + +SET READ_ONLY_STALENESS = 'READ_TIMESTAMP %%CURRENT_TIMESTAMP%%'; + +-- Check SELECT with READ_TIMESTAMP +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@PUT 'READ_TIMESTAMP1' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with READ_TIMESTAMP +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@PUT 'READ_TIMESTAMP2' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT EQUAL 'READ_TIMESTAMP1','READ_TIMESTAMP2'; +@EXPECT EQUAL 'READ_TIMESTAMP1','CURRENT_TIMESTAMP'; + +NEW_CONNECTION; +--TimestampBound.ofMinReadTimestamp(Timestamp.now()), + +@PUT 'CURRENT_TIMESTAMP' +SELECT CURRENT_TIMESTAMP(); + +SET READ_ONLY_STALENESS = 'MIN_READ_TIMESTAMP %%CURRENT_TIMESTAMP%%'; + +-- Check SELECT with MIN_READ_TIMESTAMP +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with MIN_READ_TIMESTAMP +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +--TimestampBound.ofExactStaleness(1, TimeUnit.MILLISECONDS), + +SET READ_ONLY_STALENESS = 'EXACT_STALENESS 1ms'; + +-- Check SELECT with EXACT_STALENESS +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with EXACT_STALENESS +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +--TimestampBound.ofMaxStaleness(100, TimeUnit.MILLISECONDS) + +SET READ_ONLY_STALENESS = 'MAX_STALENESS 100ms'; + +-- Check SELECT with MAX_STALENESS +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with MAX_STALENESS +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +--TimestampBound.strong() + +SET READ_ONLY_STALENESS = 'STRONG'; + +-- Check SELECT with STRONG +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with STRONG +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +/* + * ------------------------------------------------------------------------------------------------ + * | Test the same read only staleness values in transactional mode | + * ------------------------------------------------------------------------------------------------ + */ + +--TimestampBound.ofReadTimestamp(Timestamp.now()), + +@PUT 'CURRENT_TIMESTAMP' +SELECT CURRENT_TIMESTAMP(); + +SET AUTOCOMMIT = FALSE; + +SET READ_ONLY_STALENESS = 'READ_TIMESTAMP %%CURRENT_TIMESTAMP%%'; + +-- Check SELECT with READ_TIMESTAMP in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@PUT 'READ_TIMESTAMP1' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with READ_TIMESTAMP in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@PUT 'READ_TIMESTAMP2' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT EQUAL 'READ_TIMESTAMP1','READ_TIMESTAMP2'; +@EXPECT EQUAL 'READ_TIMESTAMP1','CURRENT_TIMESTAMP'; + +COMMIT; + +NEW_CONNECTION; +--TimestampBound.ofMinReadTimestamp(Timestamp.now()), + +@PUT 'CURRENT_TIMESTAMP' +SELECT CURRENT_TIMESTAMP(); + +SET AUTOCOMMIT = FALSE; + +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS = 'MIN_READ_TIMESTAMP %%CURRENT_TIMESTAMP%%'; + + +NEW_CONNECTION; +--TimestampBound.ofExactStaleness(100, TimeUnit.MILLISECONDS), +SET AUTOCOMMIT = FALSE; + +SET READ_ONLY_STALENESS = 'EXACT_STALENESS 100ms'; + +-- Check SELECT with EXACT_STALENESS in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@PUT 'READ_TIMESTAMP1' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with EXACT_STALENESS in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@PUT 'READ_TIMESTAMP2' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT EQUAL 'READ_TIMESTAMP1','READ_TIMESTAMP2'; + +COMMIT; + + +NEW_CONNECTION; +--TimestampBound.ofMaxStaleness(100, TimeUnit.MILLISECONDS) +SET AUTOCOMMIT = FALSE; + +@EXPECT EXCEPTION FAILED_PRECONDITION +SET READ_ONLY_STALENESS = 'MAX_STALENESS 100ms'; + + +NEW_CONNECTION; +--TimestampBound.strong() +SET AUTOCOMMIT = FALSE; + +SET READ_ONLY_STALENESS = 'STRONG'; + +-- Check SELECT with STRONG in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1000 AS EXPECTED FROM NUMBERS; + +@PUT 'READ_TIMESTAMP1' +SHOW VARIABLE READ_TIMESTAMP; + +-- Check SELECT with STRONG in a transaction. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 168 AS EXPECTED FROM PRIME_NUMBERS; + +@PUT 'READ_TIMESTAMP2' +SHOW VARIABLE READ_TIMESTAMP; + +@EXPECT EQUAL 'READ_TIMESTAMP1','READ_TIMESTAMP2'; + +COMMIT; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest_CreateTables.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest_CreateTables.sql new file mode 100644 index 000000000000..5ba95f80d45d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadOnlySpannerTest_CreateTables.sql @@ -0,0 +1,24 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +NEW_CONNECTION; + +SET READONLY = FALSE; +START BATCH DDL; + +CREATE TABLE NUMBERS (NUMBER INT64 NOT NULL, NAME STRING(200) NOT NULL) PRIMARY KEY (NUMBER); +CREATE TABLE PRIME_NUMBERS (PRIME_NUMBER INT64 NOT NULL, BINARY_REPRESENTATION STRING(MAX) NOT NULL) PRIMARY KEY (PRIME_NUMBER); +RUN BATCH; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadWriteAutocommitSpannerTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadWriteAutocommitSpannerTest.sql new file mode 100644 index 000000000000..4d5327954f87 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITReadWriteAutocommitSpannerTest.sql @@ -0,0 +1,216 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +NEW_CONNECTION; + +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +INSERT INTO TEST (ID, NAME) VALUES (1, 'test'); + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + + +NEW_CONNECTION; + +@EXPECT RESULT_SET 'ID',1 +SELECT * +FROM TEST; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + + +NEW_CONNECTION; +@EXPECT UPDATE_COUNT 1 +INSERT INTO TEST (ID, NAME) VALUES (2, 'FOO'); + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED FROM TEST; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Do an update in partitioned_non_atomic mode +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; + +@EXPECT UPDATE_COUNT 1 +UPDATE TEST SET NAME = 'partitioned' WHERE ID=2; + +-- Reset dml mode to transactional +SET AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; + +@EXPECT RESULT_SET 'NAME','partitioned' +SELECT NAME FROM TEST WHERE ID=2; + +-- Set a statement timeout that should never be reached +SET STATEMENT_TIMEOUT = '10000s'; + +@EXPECT RESULT_SET 'NAME','partitioned' +SELECT NAME FROM TEST WHERE ID=2; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Set a statement timeout that should always be exceeded +SET STATEMENT_TIMEOUT = '1ns'; + +@EXPECT EXCEPTION DEADLINE_EXCEEDED +SELECT NAME FROM TEST WHERE ID=2; + +-- Turn off statement timeouts +SET STATEMENT_TIMEOUT = null; +-- There should be no read timestamp available +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +-- Set a statement timeout that should never be reached +SET STATEMENT_TIMEOUT = '10000s'; + +@EXPECT UPDATE_COUNT 1 +INSERT INTO TEST (ID, NAME) VALUES (3, 'test'); + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Set a statement timeout that should always be exceeded +SET STATEMENT_TIMEOUT = '1ns'; +-- And then try to do an insert +@EXPECT EXCEPTION DEADLINE_EXCEEDED +INSERT INTO TEST (ID, NAME) VALUES (4, 'test'); + +-- Turn off statement timeouts +SET STATEMENT_TIMEOUT = null; +-- Delete record with id 4 if it exists (even though the statement timed out, +-- there is still a small chance that the statement did succeed) +DELETE FROM TEST WHERE ID=4; + +-- Verify that a timeout means there's no commit timestamp +SET STATEMENT_TIMEOUT = '1ns'; + +@EXPECT EXCEPTION DEADLINE_EXCEEDED +INSERT INTO TEST (ID, NAME) VALUES (4, 'test'); + +SET STATEMENT_TIMEOUT = null; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + + +NEW_CONNECTION; +-- Execute a number of statements on one connection +DELETE FROM TEST WHERE ID=4; + +@EXPECT UPDATE_COUNT 1 +INSERT INTO TEST (ID, NAME) VALUES (4, 'test'); + +@EXPECT RESULT_SET 'NAME','test' +SELECT * FROM TEST WHERE ID=4; + +@EXPECT UPDATE_COUNT 1 +UPDATE TEST SET NAME='test18' WHERE ID=4; + +@EXPECT RESULT_SET 'NAME','test18' +SELECT * FROM TEST WHERE ID=4; + +@EXPECT UPDATE_COUNT 1 +DELETE FROM TEST WHERE ID=4; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM TEST +WHERE ID=4; + + +NEW_CONNECTION; +-- Test primary key violation + +@EXPECT UPDATE_COUNT 1 +INSERT INTO TEST (ID, NAME) VALUES (4, 'test'); + +@EXPECT EXCEPTION ALREADY_EXISTS +INSERT INTO TEST (ID, NAME) VALUES (4, 'should not be there'); + +--Check that the second insert failed +@EXPECT RESULT_SET 'NAME','test' +SELECT * FROM TEST WHERE ID=4; + + +NEW_CONNECTION; +-- Test multiple timeouts after each other on the same connection +SET STATEMENT_TIMEOUT = '1ns'; + +@EXPECT EXCEPTION DEADLINE_EXCEEDED +SELECT NAME FROM TEST WHERE ID=2; + +@EXPECT EXCEPTION DEADLINE_EXCEEDED +SELECT NAME FROM TEST WHERE ID=2; + +@EXPECT EXCEPTION DEADLINE_EXCEEDED +SELECT NAME FROM TEST WHERE ID=2; + + +NEW_CONNECTION; +-- Execute a DML batch. +START BATCH DML; +@EXPECT UPDATE_COUNT -1 +INSERT INTO TEST (ID, NAME) VALUES (10, 'Batched insert 1'); +@EXPECT UPDATE_COUNT -1 +INSERT INTO TEST (ID, NAME) VALUES (11, 'Batched insert 2'); +@EXPECT UPDATE_COUNT -1 +INSERT INTO TEST (ID, NAME) VALUES (12, 'Batched insert 3'); +@EXPECT RESULT_SET 'UPDATE_COUNTS',[1,1,1] +RUN BATCH; + +-- Verify that the records were inserted. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 3 AS EXPECTED +FROM TEST +WHERE ID IN (10,11,12); + + +-- Execute a DML batch with an error. +START BATCH DML; +@EXPECT UPDATE_COUNT -1 +DELETE FROM TEST WHERE ID IN (10,11,12); +@EXPECT UPDATE_COUNT -1 +DELETE FROM TEST_NOT_FOUND WHERE ID IN (10,11,12); +-- Returns an error because of the second statement. +@EXPECT EXCEPTION INVALID_ARGUMENT +RUN BATCH; + +-- Verify that the records were not deleted. +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 3 AS EXPECTED +FROM TEST +WHERE ID IN (10,11,12); + +START BATCH DML; +@EXPECT UPDATE_COUNT -1 +DELETE FROM TEST WHERE ID=10; +DELETE FROM TEST WHERE ID=11; +DELETE FROM TEST WHERE ID=12; +@EXPECT RESULT_SET 'UPDATE_COUNTS',[1,1,1] +RUN BATCH; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlMusicScriptTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlMusicScriptTest.sql new file mode 100644 index 000000000000..243cdda32bd0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlMusicScriptTest.sql @@ -0,0 +1,675 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * SQL script that uses the standard Singers/Albums/Songs/Concerts data model for testing the Connection API. + */ + +SET AUTOCOMMIT = FALSE; +START BATCH DDL; + +CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX), + BirthDate DATE +) PRIMARY KEY(SingerId); + +CREATE INDEX SingersByFirstLastName ON Singers(FirstName, LastName); + +CREATE TABLE Albums ( + SingerId INT64 NOT NULL, + AlbumId INT64 NOT NULL, + AlbumTitle STRING(MAX), + MarketingBudget INT64 +) PRIMARY KEY(SingerId, AlbumId), + INTERLEAVE IN PARENT Singers ON DELETE CASCADE; + +CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle); + +CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget); + +CREATE TABLE Songs ( + SingerId INT64 NOT NULL, + AlbumId INT64 NOT NULL, + TrackId INT64 NOT NULL, + SongName STRING(MAX), + Duration INT64, + SongGenre STRING(25) +) PRIMARY KEY(SingerId, AlbumId, TrackId), + INTERLEAVE IN PARENT Albums ON DELETE CASCADE; + +CREATE INDEX SongsBySingerAlbumSongNameDesc ON Songs(SingerId, AlbumId, SongName DESC), INTERLEAVE IN Albums; + +CREATE INDEX SongsBySongName ON Songs(SongName); + +CREATE TABLE Concerts ( + VenueId INT64 NOT NULL, + SingerId INT64 NOT NULL, + ConcertDate DATE NOT NULL, + BeginTime TIMESTAMP, + EndTime TIMESTAMP, + TicketPrices ARRAY +) PRIMARY KEY(VenueId, SingerId, ConcertDate); + +RUN BATCH; + +-- Check that all tables and indices were created +SET AUTOCOMMIT = TRUE; + +@EXPECT RESULT_SET +SELECT TABLE_NAME AS ACTUAL, 'Singers' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Singers'; + +@EXPECT RESULT_SET +SELECT TABLE_NAME AS ACTUAL, 'Albums' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Albums'; + +@EXPECT RESULT_SET +SELECT TABLE_NAME AS ACTUAL, 'Songs' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Songs'; + +@EXPECT RESULT_SET +SELECT TABLE_NAME AS ACTUAL, 'Concerts' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Concerts'; + +@EXPECT RESULT_SET +SELECT INDEX_NAME AS ACTUAL, 'SingersByFirstLastName' AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE INDEX_NAME='SingersByFirstLastName' AND TABLE_NAME='Singers'; + +@EXPECT RESULT_SET +SELECT INDEX_NAME AS ACTUAL, 'AlbumsByAlbumTitle' AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE INDEX_NAME='AlbumsByAlbumTitle' AND TABLE_NAME='Albums'; + +@EXPECT RESULT_SET +SELECT INDEX_NAME AS ACTUAL, 'AlbumsByAlbumTitle2' AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE INDEX_NAME='AlbumsByAlbumTitle2' AND TABLE_NAME='Albums'; + +@EXPECT RESULT_SET +SELECT INDEX_NAME AS ACTUAL, 'SongsBySingerAlbumSongNameDesc' AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE INDEX_NAME='SongsBySingerAlbumSongNameDesc' AND TABLE_NAME='Songs'; + +@EXPECT RESULT_SET +SELECT INDEX_NAME AS ACTUAL, 'SongsBySongName' AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE INDEX_NAME='SongsBySongName' AND TABLE_NAME='Songs'; + +@EXPECT RESULT_SET +SELECT PARENT_TABLE_NAME AS ACTUAL, 'Singers' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Albums'; + +@EXPECT RESULT_SET +SELECT PARENT_TABLE_NAME AS ACTUAL, 'Albums' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Songs'; + +-- Insert test data +SET AUTOCOMMIT = FALSE; +SET TRANSACTION READ WRITE; + +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (1,'First 1','Last 1',FROM_BASE64('TIX0lfKhZyUjI+F5VXYQ9O/SXgQh4kT4Ktnp7BYPnTmAps2DA2YuulryUp9UD21JxGY1oq+UYQ/HYeN5eZ6aY31ualSCN417oWJH2yeZweByeGpxJ3XQ3tVnjbz2AYfaZ8IAap0v5EbUN/ATQT3H6NBb3qM+RzVK/VszGOxs0i8+aT6rXH3hsfXbgL36zXChrSxDNT4TjxhAjPA1YiDPqw=='),DATE '1906-04-28'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (2,'First 2','Last 2',FROM_BASE64('RtIHMSnQrvv1/aZEhBtP/JBfDe1dLjgRqGOQ/5qr3uYhdId6wthztQel0bD4Ucypl6L6/Lc56rz9PfvwKmvlBuMGr87zBvi1q3O/O74/4MTOl6Nic/3ltzxA7GEIgyKAcbKYdApPdMGMOG2Vx4p8nbPaPwMBr6hcp68A5xG/FLTreNVv2IVZx7NMSw3lqe3AV2uYdKWJp4zFB+qshsCmkA=='),DATE '1922-11-17'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (3,'First 3','Last 3',FROM_BASE64('VF6u7dM6mIY0RebnEh6E0jYajqZcfGH9b9HeotdCvUzpbOo7wJfqPMLZP3QVYnG416BFPct7Bl90EsbNE8FA/4EwUk8SU65N86PZGRoUUXEeTvaojCjVeqTByM0GQ+nROb73Kd0cW/TURRGv3ihLCMQdWIH8iGgCtjN3G+7vBavsinnnToCdSPlJxweyYKTdo+JwdqL3kVFk2O1QymuaHA=='),DATE '1935-11-08'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (4,'First 4','Last 4',FROM_BASE64('8YrvnC8FyWLGLTcv7D/fUKLxX+UqYiz6U0WOJHWyneRCDIFKsLSue3lhtbW+MnsrveL0WFmHHXRTdQ91EWzhvqdIJf4JIyk/Ndmq6mouM0n36EUeTAPQU7Wg4bxsrzggyD5FNvvuimLLpKuQBDZY1os7Xw/bksWUJ7XzZwy90pfDrgtGb4DdWZ1EJ6x71C2IMuzCnzhoV7/E15tXjiOfkg=='),DATE '1945-03-23'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (5,'First 5','Last 5',FROM_BASE64('BOItWwCAuhUAwZxfmh3F3KK6+Ne+KqShIZA/uCHi72wJOY0V3/Y/f5M8XhE+MLwz0VvLANU3Y6sHonGw8d09YkAZEo034Z2Q+YEEfFCwIhUIM+VTfwOkuRYgeU1SXBXjcZc5zsicakqYA82O3cd1RsFW+mmAO/bBcbSqraxuBR/5DYnbKrL9b5q9xqL+kQRMm2ZwoWpQP24Xke3lRlQlYg=='),DATE '1953-06-03'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (6,'First 6','Last 6',FROM_BASE64('Tz+o/44KGH34c7DzY4R/H7v0Uk4HyLV4yzjZ9VApDkhwiNKi33JspiLxfG43UvDpk0nLfRSbwu9h7p69H5NPNs9FyGaLxmqmKlP4/vohRJffbyLPEcGl3uSFRg7tnWcrlyegS03MotT9wXQNfjiAFwDh70jsxd7LnowepMMjk+qt+R8MaZkyZyL/AuE300N5P2D5i7shkS4F3IudQihtIA=='),DATE '1956-02-07'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (7,'First 7','Last 7',FROM_BASE64('r1QqmO+/0+e/J616waBJQVciDvuaLuRahdtZaCC5Tq95VxwIXfZE0Hisj0SER3+3A53DZw0qClcMOdPVTufwrKhAwggyqbtQ54UR2Q/dwAA1rHzikSS9JCyY3ksTQgUYmvcvtlfnNo/RA222yDyJMQ9sBcpJAs5cAbac0X4v8DgWicueJtQe2ohZMh4r7L9LydHW+B8DpqAa2yZWEzHoPw=='),DATE '1969-05-26'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (8,'First 8','Last 8',FROM_BASE64('IEcPp2dPGDIGotq157CMyokgh2eh5buNTqUu6JQfmbpb4vOuNFzFNrljhnFIxqj+PWAhkjjNhcYTHCmwAM5USSjrpyObo6P8KHe1ctENtgzVZ6Ym3OiKydVLsQGltVOFSpT4l/sM2aGc3AiAPkjAncCZudrMy57MtDcIUCwz1I8giEzB2ZOhlSaR2v8Vs+62+KiZOWGFnoVnen4lWywHEw=='),DATE '1978-11-08'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (9,'First 9','Last 9',FROM_BASE64('uNTiTW4e9PZuRNhywFSLAkMtLpkS/+g3J3FWuiI7kAjyWHzIJMX/KswTyyiUKasn1lcdrIieGN1wiyyXU0+o9kbCuCeT4RfrC+tSqh4rm5pbY+JaR65DtbzfGw3TrWkwoHhfxU/ftnZ14v6H3xAMVM22P3rphR833b1jc1lz3R/mdcTN0dEYhzrgCCtpoOygZZpmN8yOUR3AG0oErN1pyw=='),DATE '1987-07-04'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (10,'First 10','Last 10',FROM_BASE64('ZLpjMKSfhG9zjm/ACvA5TwB/djEWviypBYklKeuTwodVKUYciGavmgm2QkwlslDP0D0PutUorM3trZHt2vqSkKdpzoQxPa5fBtuUa+FQ3xtCZ8RoDJoZ1TEy3rGX6oUQh4vsrflFxhzuUKevKPMmf/ZQFvslytPN0vtHbHtPA9i4iaw0R6RuWyoc8QBLcHHyopul63KzweLlTBacSeC7oQ=='),DATE '1993-07-06'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (11,'First 11','Last 11',FROM_BASE64('OwMBMDWFt7Rni8cbwjWDeZ4BWluiA91JHDyh41zm2Vm36DuwwaLblF1kX2oGwJkICx2191DyfnFsJ7xejiPylZg170+iSwOeNkjj7s45JyihJVnCgEN2u5/D/7DFi0lmdqIGQzIJZ7VrhC/qEU8+4kx8uPfoQx8XndgOQJYibzw2YZM6LIMHhmJmd1nDvvda7Etdo5s9rYlGN6lvvBGifA=='),DATE '1895-09-15'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (12,'First 12','Last 12',FROM_BASE64('DNE+9WHnlmdsOLBBtVwEBotQmrHLw3JbKU6CzAGmV6yEtut+dmZU0OyDMK1jMY+vFH1fK0CSUJ4oM9jBkrI2lIhbL4UyxZghP3z6aWyywpOor/llA4xYoNdaT75xcQJUFYkrR1omHA5BqFG72qx+bjv2qV2izdttQYqq72+TYDLYCPLzhB6iP21zodySDD3HS0qc2/FeJHtm4Xe/HdOzlw=='),DATE '1922-01-03'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (13,'First 13','Last 13',FROM_BASE64('ltUFKoMUjmWlo0Vxwq4pd2kONfIX7mnNnmC2UmfF+M6w9X0U41iMVSVkVZBh4jpUNHx3AhZEJH4x7DnGgcuc8JaFbqRCT8GawwmFvrrJV8lBT3fPCV/QRDrP8Mx4DePxnBEcABfwPJlHMOF59WJX67eUyx1o5EnJx+KS8DTEPEh0yKWDreErIvMPft5T5JlHgGqOMrSNX3eKiEPI3sWjpw=='),DATE '1930-08-03'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (14,'First 14','Last 14',FROM_BASE64('SuJFYQf146OJUJCoOUOcN41X8M84E1lHVn/S0mHjrq7J8HPsUmfjAUNrwZ5RVJJ81vsm8I3xQkLtlQRC7lWkS5eCFSSo56B/NwtgtMugg+r7PSjoIC2HbIM0p89PA8QPW7jRMSx7fmSIm+PJBxavr5xJvv38IQboX/G4lK8wtMy0eNySrya0OE5fCKGKvke2fP9V5QpzhC0WnG9lhs/aRg=='),DATE '1940-05-12'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (15,'First 15','Last 15',FROM_BASE64('bUSUVJWZAEqNlHvePw03zNjtv6Bt45YXSnkduopD1wfJK8enFd9/9FwcWpEom6F8k7skYlTFMgsJCoxVXYXEbb9ZXYi0NoevQ3xG9uWT5NvFCKw++IeUwVztUjYIIHkH4zsYI3csH9Su3yHEqddKyg216ccUTgB0NZcosgKs1eTg1NC5BIzqCXa5Z+X38t/QismLAflh1gTbD2F7ihSDIA=='),DATE '1946-09-23'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (16,'First 16','Last 16',FROM_BASE64('lan0QanIaA/igo2HGkKzB82cKPgAZod4JMN/sV7kQSsMLqzMijItMQFKR1H6eYJosWKm2ajiYByBG8nxP5og5B3aut/Y2n58U86jgoHxoQR4LHOmhCnzjsBLfwZ4wE8BN8iznkWuMihQbZvsAQituLo3zygYKzSZ0V0O+nJBf4OesrS9UW1fwC312k1iB39ELDnZFTuWfca+8nqiv5kolg=='),DATE '1947-09-30'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (17,'First 17','Last 17',FROM_BASE64('NCks7+4E3+XdpeWrBg/0zz2t3KzNGh1hE3uZaXcQewMjBmiujYEP4teH6Sb2awoR55zII2eJHC6hyTcrCVLrTktBm8wqSkve+yxTvY1IIJt5eULLCerZeyl8dRDXpkpIJ3itPvXlsQUToBNhxW2gQqmz+segWsJwbtQSnnGZ+Frn8JiFSz51FCRcYP/eBlogljT9vxxuWkKrL8koRz3+ew=='),DATE '1948-02-04'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (18,'First 18','Last 18',FROM_BASE64('KZyY3e9J6d5G4uuBYa4mLVWaZv8SowusNx4KHrYx5VWF3xLsy/ZuOqRczNDeAitOyXC4lh2t1M8hclCsyxHIW2cUOR9xQnnqWtYA3WrYcFycqJn4hM+ghEAX5OXRVWEQ5lr9op7cSJio2JMawTcR5z3MrNucj0VTBtLTQKxHcW7VeuPARHCBkDCOicr0gQr/pODAUs5ipqv807ZhCV0n7Q=='),DATE '1962-11-10'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (19,'First 19','Last 19',FROM_BASE64('RymWYvEhWXx4AhUQaGoruxL/XgVUnz2vjMqa8yQtFEd8awR7Vzknzl1hpl/kKdCaKSzI9TV6RoD0HZu0U3UTazIh5WjMZYKwDT0ewsh4i5S1EIRzGohMg7l0vUVRtCzWVO5uALxm1mKYp3SczU0ETmQ0t+o1ke13Bz8I1/hyIsj6PnCIvxdOBZVycfKZ64dVxwwB9vYjVoRv5jnEw4K9Kg=='),DATE '1988-10-07'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (20,'First 20','Last 20',FROM_BASE64('+OgGCnKsVaMSILWEr/0wqkPajWcdSx79nDEOqmtCtQiIP7zqZETVKOjgfVYZg3pBzQQHQ9IExYlCvqX+//GxLhegBzKFfpJ6hp6NpKiJ2p6odLfeYVkvP7GdggU5sII5da3ApMebuQDkbYS1fWk8pXdqDIpkWbFG/PTGq+p1IdHRV0tkAEe9NFW2h5y8aO6Oy+zJprq0IX5CYln6zek/gQ=='),DATE '2000-02-29'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (21,'First 21','Last 21',FROM_BASE64('0BPlFBGYZ1w3Snpf3o6PswLXQeFgEE2ehluMDXYeqPjOO+dXN6s3CrS3JUVhejj5kARAoT510bRM4eAY7x5zRBtGQisDkeaeh0d1h/o+NESSX8MPZikMmeltT864xjxwnTd/5/m1oZTSVjVLVewzi6b1MuHcghcHrdgYMy/0f3ivz1DJMH6T4tIIFxg/y8Ueb0qKcFGvnvJlTWuNnMEpdA=='),DATE '1886-08-09'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (22,'First 22','Last 22',FROM_BASE64('K6GpZCoGGMJdF/55vnvqKCfpNyVrSJGMIpvDA8nxlwQFLBqemxUgCFrtAAZx7ERd0ndYXFJOpsRS8+You2lPcaXh/jfGWoZSy/AHLS7vpML7vRzrxKlyuocjaaDEm/wb351dEU3tT8VSOEhFkYk1VvhiFS47Hm5au5E2XXUpuQwHE/6f+FlkoD64wgmdyaOQWaJgJ0Nhg6UHWA0+MJ6AEA=='),DATE '1889-04-03'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (23,'First 23','Last 23',FROM_BASE64('etCS2Mi+5SgbO7VHnr1V+3PHp2ExR+NW0mF5mUhUxitXK4CtS1tbcUzvoSYoVEMhRlT0d1O2qlIyOPayxIB2GfEfhkJpajyPSjyBSEmh8frdKLx9qICQ+Ztwv8jK9JBOJC3VQxooS49ovUff/0W6akc0s9bNevQx8v8d3daklRCKFWYQCSIFcoYZv5+78zwZ8KZHErTXQl2ZW9/zH06Uew=='),DATE '1892-01-21'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (24,'First 24','Last 24',FROM_BASE64('RUq40F7M3XWGARLYX8ZW04hf4L+zW2wkKXf5zH4rqBxG2sacUMpZcK65mmQgj51C8XFcny+4E+ZuGfGVXIi3rsF700SnZVeFdKx1s2WMZJRKvJjbXKyHoaWytT9oUHIGqJw4qXpGh/CKUb9frNWCK5FiUk6iNsE3yMsEMVe+WnG5x+AeGXtWYwW5FtF0TnqUTnXw1lf3QzSdXg+DAvIZQA=='),DATE '1898-02-21'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (25,'First 25','Last 25',FROM_BASE64('HzmgCTWib9uD7oO0qPps+AvNx1o5b38RVsvRGXAMG9uxijG62pDK6gMVwCMt0PvUJNxEpcs0uKmPqx8eF66+V++VKjpKI8CerlxrEi7oOq344tAwRYK941HqXgBN9nQB1X33cwIufEwqq5nU85HGlGFm6O8EEGQb6+n5hXItCuMJAZWXkYiK1UW7H3zLDW4xJp+ijA35TfcHusirUaH8WQ=='),DATE '1911-12-15'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (26,'First 26','Last 26',FROM_BASE64('/j6wAEIFHAARYGDM5RDQYihb8QSfQGvZA7O2wDL2Tag1iY+twDM6DcFtvKt3PFmaFKYlB1mNkc129CWAW7PSHIIKDIVTDeI4PSHWy6DwCrYBDfn/ARmP7tRFqtM/RzFQKVQS3BvKPTAv3dNTjIWto9C0FFv7TyguTv0aPVkboxzQpJnR2FfjNG05/Uy2J67w+ngspZiUvj3aRQI/s/k8qQ=='),DATE '1912-07-01'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (27,'First 27','Last 27',FROM_BASE64('OTmvVaLjJI/8/xL6/W2dYhmAnhnB8SlzdnU1VxV/Z/FAmYp+4rALgjjq6KrQNpZ7oF2Iw+MF6bWbdQO2I3uqiH9nwCGflrq1Tjf8YkqwfZvYJ/RAorsd9WHHK74+5XpGYB3hgcPjeZ5vZg6cBeHnJbAlxEKTiBpnGvOoQqBm5bQQ1sLKsLanvBcMEsGyT98BlEZIHYeubPOELbLmhR/SWQ=='),DATE '1939-05-17'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (28,'First 28','Last 28',FROM_BASE64('OVlneKkb0DxkcCvpsKAVCE6hTutOrOBk+lF/iNCh/YMFQbIiVdZyBWNhpo8yfDKicgL50n3jjPNvEPX+I/RdKG82uM/bF8v/SWkrpzxXX9HMN4Ng9kErouVTK7s0Gf1eBHQcF4WrIbnCuN+SxWQxJhD6LCX02xw0JxXyqqRBUEmGYEbTmr/vfKZgGfj0UWYdvxbFX8bO+6vDq9NK9wTAKA=='),DATE '1946-10-18'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (29,'First 29','Last 29',FROM_BASE64('BUClPKVhaV2/DkWtL/JwWVIlqUjF1bOhCNQGM0xz/pbgxsE1jcLYNoMNVFbVPhwmEIyGiqAGOyUzfyyVVNHfPY0Hrbw2LlHXbm7VymFvxqlRNufrDnQrA6ZzsZECwkYHNtrOVcSp0rdSToNDzKdViSDPwIEzELtCKFWcycDYHF5AZzJDU1AV7gQQZHi8h8oJfncvCP9wLnXy/YpCjnDClQ=='),DATE '1956-12-23'); +INSERT INTO Singers (SingerId, FirstName, LastName, SingerInfo, BirthDate) VALUES (30,'First 30','Last 30',FROM_BASE64('1JJ36b41BmeXie2RZ8TykjUUzyJfWV0ZoRShMybsVYebCanPaTb6uUiTFT4MDP1ise6jjN2STpZ49aHL8fOj02vzq1TsAVOznAScd2cbLDdnfSjVeABZRFKKIslpBNUSEP+4sgkoRi7D+ojXsNYVl+D+hSEB1mPn0brbV6mMQUWeryiUFf9ock9hYqa3BbhDEGFjkH0WcnRrvDHZgGAIVw=='),DATE '1988-05-29'); +COMMIT; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 30 AS EXPECTED +FROM Singers; + +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (1,1,"Album 1 1",980045); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (2,2,"Album 2 2",667788); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (3,3,"Album 3 3",908791); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (4,4,"Album 4 4",690335); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (5,5,"Album 5 5",133041); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (6,6,"Album 6 6",505292); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (7,7,"Album 7 7",91969); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (8,8,"Album 8 8",289965); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (9,9,"Album 9 9",78176); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (10,10,"Album 10 10",485664); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (11,11,"Album 11 11",972680); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (12,12,"Album 12 12",893680); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (13,13,"Album 13 13",892138); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (14,14,"Album 14 14",449562); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (15,15,"Album 15 15",150968); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (16,16,"Album 16 16",580377); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (17,17,"Album 17 17",763081); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (18,18,"Album 18 18",203427); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (19,19,"Album 19 19",995368); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (20,20,"Album 20 20",29900); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (21,21,"Album 21 21",723728); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (22,22,"Album 22 22",540582); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (23,23,"Album 23 23",784245); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (24,24,"Album 24 24",614788); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (25,25,"Album 25 25",275649); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (26,26,"Album 26 26",970898); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (27,27,"Album 27 27",409289); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (28,28,"Album 28 28",766560); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (29,29,"Album 29 29",32414); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (30,30,"Album 30 30",457957); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (1,31,"Album 1 31",52546); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (2,32,"Album 2 32",412424); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (3,33,"Album 3 33",568496); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (4,34,"Album 4 34",353491); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (5,35,"Album 5 35",489951); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (6,36,"Album 6 36",75938); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (7,37,"Album 7 37",460461); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (8,38,"Album 8 38",642042); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (9,39,"Album 9 39",282872); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (10,40,"Album 10 40",521496); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (11,41,"Album 11 41",98126); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (12,42,"Album 12 42",535113); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (13,43,"Album 13 43",957625); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (14,44,"Album 14 44",667630); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (15,45,"Album 15 45",236968); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (16,46,"Album 16 46",445647); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (17,47,"Album 17 47",446396); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (18,48,"Album 18 48",852859); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (19,49,"Album 19 49",404105); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (20,50,"Album 20 50",384439); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (21,51,"Album 21 51",440468); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (22,52,"Album 22 52",455384); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (23,53,"Album 23 53",210756); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (24,54,"Album 24 54",849113); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (25,55,"Album 25 55",63969); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (26,56,"Album 26 56",277122); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (27,57,"Album 27 57",350063); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (28,58,"Album 28 58",359473); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (29,59,"Album 29 59",209825); +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle, MarketingBudget) VALUES (30,60,"Album 30 60",84543); + +COMMIT; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 60 AS EXPECTED +FROM Albums; + +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,42,1,"Song 12 42 1",387,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,12,2,"Song 12 12 2",202,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,59,3,"Song 29 59 3",160,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,4,"Song 23 23 4",255,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,54,5,"Song 24 54 5",436,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (28,58,6,"Song 28 58 6",121,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,7,"Song 27 27 7",319,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,24,8,"Song 24 24 8",213,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,49,9,"Song 19 49 9",280,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (17,47,10,"Song 17 47 10",253,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (6,6,11,"Song 6 6 11",321,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,42,12,"Song 12 42 12",124,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (25,25,13,"Song 25 25 13",449,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,24,14,"Song 24 24 14",438,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,5,15,"Song 5 5 15",378,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,39,16,"Song 9 39 16",202,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (20,50,17,"Song 20 50 17",452,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (7,37,18,"Song 7 37 18",420,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,8,19,"Song 8 8 19",318,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,35,20,"Song 5 35 20",347,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,3,21,"Song 3 3 21",377,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,15,22,"Song 15 15 22",314,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,49,23,"Song 19 49 23",199,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (20,20,24,"Song 20 20 24",266,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,45,25,"Song 15 45 25",433,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (14,44,26,"Song 14 44 26",482,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,19,27,"Song 19 19 27",345,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,43,28,"Song 13 43 28",159,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (18,48,29,"Song 18 48 29",350,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,13,30,"Song 13 13 30",131,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,9,31,"Song 9 9 31",183,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,13,32,"Song 13 13 32",193,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,24,33,"Song 24 24 33",378,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,60,34,"Song 30 60 34",270,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,43,35,"Song 13 43 35",375,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,36,"Song 27 27 36",219,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (20,50,37,"Song 20 50 37",314,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (18,48,38,"Song 18 48 38",416,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,51,39,"Song 21 51 39",330,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (1,31,40,"Song 1 31 40",376,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,5,41,"Song 5 5 41",398,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,45,42,"Song 15 45 42",466,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,24,43,"Song 24 24 43",384,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,19,44,"Song 19 19 44",472,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,45,45,"Song 15 45 45",246,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,33,46,"Song 3 33 46",412,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,47,"Song 23 23 47",159,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,60,48,"Song 30 60 48",290,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,19,49,"Song 19 19 49",446,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (16,16,50,"Song 16 16 50",485,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (4,4,51,"Song 4 4 51",185,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,38,52,"Song 8 38 52",349,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,54,53,"Song 24 54 53",301,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,35,54,"Song 5 35 54",206,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,30,55,"Song 30 30 55",250,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,42,56,"Song 12 42 56",146,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,30,57,"Song 30 30 57",416,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (26,56,58,"Song 26 56 58",244,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (20,50,59,"Song 20 50 59",356,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (7,7,60,"Song 7 7 60",234,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,19,61,"Song 19 19 61",412,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,43,62,"Song 13 43 62",161,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,5,63,"Song 5 5 63",300,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (1,31,64,"Song 1 31 64",307,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (4,4,65,"Song 4 4 65",197,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,54,66,"Song 24 54 66",180,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,3,67,"Song 3 3 67",156,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (14,44,68,"Song 14 44 68",184,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,51,69,"Song 21 51 69",486,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,49,70,"Song 19 49 70",212,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,39,71,"Song 9 39 71",452,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,53,72,"Song 23 53 72",425,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (11,41,73,"Song 11 41 73",316,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,8,74,"Song 8 8 74",395,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,9,75,"Song 9 9 75",189,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (2,2,76,"Song 2 2 76",354,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,53,77,"Song 23 53 77",137,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,15,78,"Song 15 15 78",176,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,60,79,"Song 30 60 79",224,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (14,44,80,"Song 14 44 80",305,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,81,"Song 27 27 81",432,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (18,18,82,"Song 18 18 82",357,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (10,10,83,"Song 10 10 83",187,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,42,84,"Song 12 42 84",461,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,8,85,"Song 8 8 85",434,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (1,31,86,"Song 1 31 86",436,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (11,41,87,"Song 11 41 87",469,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,13,88,"Song 13 13 88",452,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (4,34,89,"Song 4 34 89",309,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,21,90,"Song 21 21 90",226,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (6,36,91,"Song 6 36 91",257,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,92,"Song 27 27 92",251,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,39,93,"Song 9 39 93",325,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,30,94,"Song 30 30 94",122,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,59,95,"Song 29 59 95",207,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (1,1,96,"Song 1 1 96",318,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (4,4,97,"Song 4 4 97",353,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,98,"Song 23 23 98",450,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,12,99,"Song 12 12 99",323,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,24,100,"Song 24 24 100",397,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,101,"Song 27 27 101",296,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,59,102,"Song 29 59 102",349,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (17,47,103,"Song 17 47 103",438,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,5,104,"Song 5 5 104",388,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (26,56,105,"Song 26 56 105",425,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (22,52,106,"Song 22 52 106",154,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,107,"Song 23 23 107",213,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,38,108,"Song 8 38 108",276,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,39,109,"Song 9 39 109",417,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (9,9,110,"Song 9 9 110",299,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (22,52,111,"Song 22 52 111",476,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,21,112,"Song 21 21 112",225,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,113,"Song 23 23 113",303,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (7,7,114,"Song 7 7 114",291,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,38,115,"Song 8 38 115",276,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (14,44,116,"Song 14 44 116",238,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,57,117,"Song 27 57 117",188,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (28,28,118,"Song 28 28 118",372,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (15,15,119,"Song 15 15 119",258,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,21,120,"Song 21 21 120",308,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,59,121,"Song 29 59 121",319,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (28,58,122,"Song 28 58 122",453,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (7,7,123,"Song 7 7 123",198,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (4,4,124,"Song 4 4 124",435,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,27,125,"Song 27 27 125",475,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (30,30,126,"Song 30 30 126",395,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,51,127,"Song 21 51 127",454,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,29,128,"Song 29 29 128",376,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (27,57,129,"Song 27 57 129",396,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,53,130,"Song 23 53 130",458,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (6,36,131,"Song 6 36 131",289,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (29,29,132,"Song 29 29 132",207,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (25,55,133,"Song 25 55 133",280,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,3,134,"Song 3 3 134",432,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (5,35,135,"",304,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,3,136,"",392,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,12,137,"",393,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (13,13,138,"",382,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (18,48,139,"",447,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (17,17,140,"",182,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (23,23,141,"",266,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (21,51,142,"",383,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (3,3,143,"",439,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (25,25,144,"",454,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (12,12,145,"",179,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (19,19,146,"",422,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (24,54,147,"",478,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (8,38,148,"",233,'Unknown'); +INSERT INTO Songs (SingerId, AlbumId, TrackId, SongName, Duration, SongGenre) VALUES (6,6,149,"",245,'Unknown'); + +COMMIT; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 149 AS EXPECTED +FROM Songs; + +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (1,1,DATE '2003-06-19',TIMESTAMP '2003-06-19T12:30:05Z',TIMESTAMP '2003-06-19T18:57:15Z',[11,93,140,923]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (2,18,DATE '2004-01-25',TIMESTAMP '2004-01-25T14:58:28Z',TIMESTAMP '2004-01-26T01:10:52Z',[18,51,101,812]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (3,21,DATE '2005-03-15',TIMESTAMP '2005-03-15T18:14:50Z',TIMESTAMP '2005-03-16T02:21:28Z',[23,26,107,721]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (4,16,DATE '2009-05-09',TIMESTAMP '2009-05-09T05:22:34Z',TIMESTAMP '2009-05-09T15:28:28Z',[18,70,150,297]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (5,11,DATE '2001-01-07',TIMESTAMP '2001-01-07T18:37:33Z',TIMESTAMP '2001-01-07T21:22:17Z',[20,55,185,672]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (6,25,DATE '2015-11-19',TIMESTAMP '2015-11-19T22:47:42Z',TIMESTAMP '2015-11-20T02:54:01Z',[12,73,150,833]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (7,26,DATE '2012-10-06',TIMESTAMP '2012-10-06T10:58:43Z',TIMESTAMP '2012-10-06T15:35:40Z',[8,83,199,625]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (8,8,DATE '2001-09-26',TIMESTAMP '2001-09-26T06:41:20Z',TIMESTAMP '2001-09-26T16:38:35Z',[19,87,192,912]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (9,27,DATE '2016-11-24',TIMESTAMP '2016-11-24T20:00:48Z',TIMESTAMP '2016-11-24T23:03:07Z',[20,84,134,885]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (10,30,DATE '2017-05-05',TIMESTAMP '2017-05-05T12:44:05Z',TIMESTAMP '2017-05-05T23:06:55Z',[17,44,177,997]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (11,7,DATE '2018-06-07',TIMESTAMP '2018-06-07T07:03:11Z',TIMESTAMP '2018-06-07T08:21:41Z',[10,73,182,287]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (12,22,DATE '2009-01-07',TIMESTAMP '2009-01-07T23:22:11Z',TIMESTAMP '2009-01-08T08:34:18Z',[22,59,150,983]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (13,16,DATE '2013-06-28',TIMESTAMP '2013-06-28T14:59:25Z',TIMESTAMP '2013-06-28T22:32:11Z',[17,41,129,433]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (14,11,DATE '2005-08-19',TIMESTAMP '2005-08-19T01:11:28Z',TIMESTAMP '2005-08-19T01:30:30Z',[18,49,110,590]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (15,18,DATE '2001-11-26',TIMESTAMP '2001-11-26T15:55:31Z',TIMESTAMP '2001-11-26T20:52:13Z',[18,51,132,854]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (16,26,DATE '2009-01-04',TIMESTAMP '2009-01-04T03:09:11Z',TIMESTAMP '2009-01-04T12:02:14Z',[5,37,146,344]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (17,20,DATE '2012-09-28',TIMESTAMP '2012-09-28T00:45:00Z',TIMESTAMP '2012-09-28T02:10:39Z',[15,89,185,480]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (18,24,DATE '2004-09-06',TIMESTAMP '2004-09-06T09:55:40Z',TIMESTAMP '2004-09-06T18:10:32Z',[23,51,113,244]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (19,21,DATE '2010-11-18',TIMESTAMP '2010-11-18T09:59:17Z',TIMESTAMP '2010-11-18T17:13:12Z',[14,69,164,218]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (20,29,DATE '2010-12-24',TIMESTAMP '2010-12-24T04:21:25Z',TIMESTAMP '2010-12-24T06:10:08Z',[20,34,166,573]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (21,3,DATE '2000-05-14',TIMESTAMP '2000-05-14T13:49:08Z',TIMESTAMP '2000-05-14T14:39:25Z',[21,67,136,779]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (22,18,DATE '2000-05-14',TIMESTAMP '2000-05-14T00:23:23Z',TIMESTAMP '2000-05-14T01:20:04Z',[21,91,111,749]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (23,26,DATE '2015-05-04',TIMESTAMP '2015-05-04T10:39:46Z',TIMESTAMP '2015-05-04T19:21:45Z',[24,91,128,559]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (24,16,DATE '2012-08-18',TIMESTAMP '2012-08-18T08:47:12Z',TIMESTAMP '2012-08-18T09:35:03Z',[19,44,136,281]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (25,4,DATE '2000-03-16',TIMESTAMP '2000-03-16T10:15:15Z',TIMESTAMP '2000-03-16T12:29:53Z',[22,28,111,948]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (26,4,DATE '2002-11-20',TIMESTAMP '2002-11-20T16:28:19Z',TIMESTAMP '2002-11-20T17:56:10Z',[7,70,141,517]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (27,23,DATE '2000-08-09',TIMESTAMP '2000-08-09T04:30:51Z',TIMESTAMP '2000-08-09T15:27:15Z',[13,98,156,230]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (28,16,DATE '2000-10-15',TIMESTAMP '2000-10-15T04:12:39Z',TIMESTAMP '2000-10-15T14:07:05Z',[8,39,160,455]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (29,22,DATE '2003-03-25',TIMESTAMP '2003-03-25T17:21:56Z',TIMESTAMP '2003-03-25T19:18:25Z',[17,70,148,681]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (30,15,DATE '2008-11-11',TIMESTAMP '2008-11-11T22:56:07Z',TIMESTAMP '2008-11-12T09:33:48Z',[24,47,175,901]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (31,7,DATE '2018-05-22',TIMESTAMP '2018-05-22T20:54:59Z',TIMESTAMP '2018-05-23T02:52:28Z',[13,34,177,804]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (32,30,DATE '2000-04-03',TIMESTAMP '2000-04-03T13:54:10Z',TIMESTAMP '2000-04-03T15:57:02Z',[16,48,137,249]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (33,23,DATE '2003-12-24',TIMESTAMP '2003-12-24T22:22:00Z',TIMESTAMP '2003-12-25T06:09:40Z',[15,36,131,922]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (34,12,DATE '2012-06-23',TIMESTAMP '2012-06-23T18:15:30Z',TIMESTAMP '2012-06-24T03:46:17Z',[25,31,160,564]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (35,5,DATE '2017-12-15',TIMESTAMP '2017-12-15T09:43:38Z',TIMESTAMP '2017-12-15T17:18:28Z',[22,31,177,868]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (36,20,DATE '2012-12-21',TIMESTAMP '2012-12-21T08:28:14Z',TIMESTAMP '2012-12-21T11:34:59Z',[25,62,143,437]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (37,19,DATE '2014-07-07',TIMESTAMP '2014-07-07T22:01:35Z',TIMESTAMP '2014-07-08T04:39:37Z',[8,31,184,784]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (38,15,DATE '2012-07-26',TIMESTAMP '2012-07-26T09:45:35Z',TIMESTAMP '2012-07-26T13:03:53Z',[19,79,140,908]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (39,24,DATE '2014-03-19',TIMESTAMP '2014-03-19T07:52:25Z',TIMESTAMP '2014-03-19T11:47:01Z',[11,90,141,978]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (40,4,DATE '2015-08-26',TIMESTAMP '2015-08-26T20:51:25Z',TIMESTAMP '2015-08-27T07:06:46Z',[15,94,195,510]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (41,24,DATE '2016-04-11',TIMESTAMP '2016-04-11T08:59:07Z',TIMESTAMP '2016-04-11T13:23:30Z',[15,51,173,233]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (42,18,DATE '2005-03-19',TIMESTAMP '2005-03-19T15:45:04Z',TIMESTAMP '2005-03-19T16:28:42Z',[19,31,188,546]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (43,7,DATE '2001-01-04',TIMESTAMP '2001-01-04T11:02:16Z',TIMESTAMP '2001-01-04T11:32:21Z',[20,37,133,958]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (44,5,DATE '2015-12-24',TIMESTAMP '2015-12-24T06:49:48Z',TIMESTAMP '2015-12-24T14:46:46Z',[12,61,175,233]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (45,12,DATE '2011-08-24',TIMESTAMP '2011-08-24T03:45:46Z',TIMESTAMP '2011-08-24T06:13:10Z',[18,38,169,913]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (46,16,DATE '2017-03-04',TIMESTAMP '2017-03-04T04:01:04Z',TIMESTAMP '2017-03-04T13:44:38Z',[21,79,119,839]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (47,18,DATE '2009-05-19',TIMESTAMP '2009-05-19T23:10:52Z',TIMESTAMP '2009-05-20T04:02:01Z',[25,79,151,357]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (48,22,DATE '2003-10-03',TIMESTAMP '2003-10-03T14:10:24Z',TIMESTAMP '2003-10-03T17:35:09Z',[18,60,140,450]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (49,9,DATE '2003-03-07',TIMESTAMP '2003-03-07T22:09:59Z',TIMESTAMP '2003-03-08T08:28:29Z',[22,41,122,726]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (50,9,DATE '2015-07-12',TIMESTAMP '2015-07-12T07:43:51Z',TIMESTAMP '2015-07-12T12:45:20Z',[18,67,126,474]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (51,12,DATE '2014-11-05',TIMESTAMP '2014-11-05T19:03:00Z',TIMESTAMP '2014-11-06T05:27:07Z',[19,43,125,865]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (52,6,DATE '2016-07-25',TIMESTAMP '2016-07-25T14:39:28Z',TIMESTAMP '2016-07-26T00:36:03Z',[6,74,192,344]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (53,13,DATE '2005-08-02',TIMESTAMP '2005-08-02T16:06:47Z',TIMESTAMP '2005-08-02T17:13:41Z',[5,52,192,977]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (54,18,DATE '2010-01-25',TIMESTAMP '2010-01-25T07:34:54Z',TIMESTAMP '2010-01-25T16:29:11Z',[24,85,181,304]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (55,14,DATE '2012-05-20',TIMESTAMP '2012-05-20T13:15:12Z',TIMESTAMP '2012-05-20T17:40:09Z',[15,43,104,665]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (56,3,DATE '2013-09-08',TIMESTAMP '2013-09-08T19:53:42Z',TIMESTAMP '2013-09-08T22:32:52Z',[14,81,129,354]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (57,27,DATE '2003-07-18',TIMESTAMP '2003-07-18T23:11:24Z',TIMESTAMP '2003-07-19T03:29:46Z',[21,85,188,854]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (58,27,DATE '2001-04-10',TIMESTAMP '2001-04-10T08:36:49Z',TIMESTAMP '2001-04-10T16:17:57Z',[17,86,161,438]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (59,2,DATE '2002-07-02',TIMESTAMP '2002-07-02T17:32:20Z',TIMESTAMP '2002-07-03T01:59:33Z',[23,59,164,357]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (60,28,DATE '2000-11-24',TIMESTAMP '2000-11-24T12:53:25Z',TIMESTAMP '2000-11-24T22:37:53Z',[22,47,161,739]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (61,12,DATE '2017-07-04',TIMESTAMP '2017-07-04T21:02:01Z',TIMESTAMP '2017-07-05T03:57:29Z',[16,88,179,478]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (62,3,DATE '2015-10-07',TIMESTAMP '2015-10-07T17:58:42Z',TIMESTAMP '2015-10-07T21:04:38Z',[21,44,155,381]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (63,23,DATE '2005-05-03',TIMESTAMP '2005-05-03T15:08:10Z',TIMESTAMP '2005-05-03T20:58:30Z',[20,43,111,824]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (64,24,DATE '2012-12-09',TIMESTAMP '2012-12-09T02:52:09Z',TIMESTAMP '2012-12-09T08:01:11Z',[18,87,106,997]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (65,30,DATE '2004-03-01',TIMESTAMP '2004-03-01T07:09:06Z',TIMESTAMP '2004-03-01T07:49:32Z',[14,26,195,895]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (66,24,DATE '2007-05-19',TIMESTAMP '2007-05-19T10:20:57Z',TIMESTAMP '2007-05-19T15:21:09Z',[18,54,179,238]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (67,16,DATE '2016-01-06',TIMESTAMP '2016-01-06T21:32:20Z',TIMESTAMP '2016-01-07T02:31:32Z',[20,61,120,652]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (68,2,DATE '2007-10-26',TIMESTAMP '2007-10-26T03:37:22Z',TIMESTAMP '2007-10-26T10:02:36Z',[11,65,151,537]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (69,2,DATE '2018-08-11',TIMESTAMP '2018-08-11T01:33:38Z',TIMESTAMP '2018-08-11T07:39:21Z',[10,98,105,621]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (70,23,DATE '2012-07-06',TIMESTAMP '2012-07-06T01:02:23Z',TIMESTAMP '2012-07-06T05:04:16Z',[14,44,172,953]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (71,7,DATE '2006-01-24',TIMESTAMP '2006-01-24T15:32:10Z',TIMESTAMP '2006-01-24T17:40:43Z',[9,58,150,713]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (72,8,DATE '2002-11-06',TIMESTAMP '2002-11-06T05:58:03Z',TIMESTAMP '2002-11-06T07:43:24Z',[25,36,193,213]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (73,10,DATE '2003-11-24',TIMESTAMP '2003-11-24T17:39:10Z',TIMESTAMP '2003-11-25T03:17:36Z',[8,55,200,352]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (74,16,DATE '2007-11-03',TIMESTAMP '2007-11-03T05:49:12Z',TIMESTAMP '2007-11-03T16:34:16Z',[21,50,114,820]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (75,4,DATE '2009-05-06',TIMESTAMP '2009-05-06T18:52:07Z',TIMESTAMP '2009-05-06T21:10:02Z',[16,42,101,281]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (76,1,DATE '2012-12-03',TIMESTAMP '2012-12-03T06:01:05Z',TIMESTAMP '2012-12-03T06:45:00Z',[24,60,140,292]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (77,1,DATE '2016-11-26',TIMESTAMP '2016-11-26T01:19:27Z',TIMESTAMP '2016-11-26T07:20:17Z',[19,31,123,214]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (78,9,DATE '2018-05-21',TIMESTAMP '2018-05-21T00:14:43Z',TIMESTAMP '2018-05-21T08:43:35Z',[7,28,115,634]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (79,14,DATE '2013-11-20',TIMESTAMP '2013-11-20T08:54:47Z',TIMESTAMP '2013-11-20T10:44:54Z',[18,39,155,328]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (80,17,DATE '2015-10-11',TIMESTAMP '2015-10-11T23:41:17Z',TIMESTAMP '2015-10-12T02:42:48Z',[16,94,102,894]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (81,23,DATE '2011-08-07',TIMESTAMP '2011-08-07T19:33:01Z',TIMESTAMP '2011-08-07T21:51:53Z',[23,90,134,370]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (82,7,DATE '2010-04-10',TIMESTAMP '2010-04-10T13:22:08Z',TIMESTAMP '2010-04-10T17:59:08Z',[18,68,121,303]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (83,27,DATE '2001-07-08',TIMESTAMP '2001-07-08T20:19:54Z',TIMESTAMP '2001-07-08T22:46:15Z',[18,86,148,746]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (84,6,DATE '2017-09-02',TIMESTAMP '2017-09-02T10:29:03Z',TIMESTAMP '2017-09-02T13:06:41Z',[12,85,138,471]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (85,1,DATE '2013-11-02',TIMESTAMP '2013-11-02T04:01:03Z',TIMESTAMP '2013-11-02T14:08:47Z',[9,65,111,583]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (86,22,DATE '2004-04-03',TIMESTAMP '2004-04-03T19:13:48Z',TIMESTAMP '2004-04-04T05:59:31Z',[19,72,105,908]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (87,2,DATE '2012-02-26',TIMESTAMP '2012-02-26T22:52:21Z',TIMESTAMP '2012-02-27T02:55:24Z',[16,75,129,740]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (88,9,DATE '2017-09-17',TIMESTAMP '2017-09-17T11:28:49Z',TIMESTAMP '2017-09-17T12:13:03Z',[24,77,182,755]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (89,11,DATE '2011-03-28',TIMESTAMP '2011-03-28T13:05:23Z',TIMESTAMP '2011-03-28T16:32:29Z',[22,96,174,731]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (90,21,DATE '2006-12-12',TIMESTAMP '2006-12-12T20:44:10Z',TIMESTAMP '2006-12-12T22:10:34Z',[15,68,166,616]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (91,27,DATE '2010-08-18',TIMESTAMP '2010-08-18T05:49:35Z',TIMESTAMP '2010-08-18T12:58:36Z',[12,84,157,369]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (92,2,DATE '2003-02-03',TIMESTAMP '2003-02-03T11:19:43Z',TIMESTAMP '2003-02-03T22:10:42Z',[25,59,140,939]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (93,5,DATE '2016-01-04',TIMESTAMP '2016-01-04T08:10:26Z',TIMESTAMP '2016-01-04T13:08:30Z',[5,90,163,272]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (94,3,DATE '2018-04-20',TIMESTAMP '2018-04-20T07:19:52Z',TIMESTAMP '2018-04-20T17:41:01Z',[5,59,109,854]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (95,19,DATE '2016-10-09',TIMESTAMP '2016-10-09T17:02:59Z',TIMESTAMP '2016-10-09T17:37:27Z',[6,35,176,442]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (96,9,DATE '2007-06-12',TIMESTAMP '2007-06-12T16:50:12Z',TIMESTAMP '2007-06-12T19:27:30Z',[7,49,169,729]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (97,29,DATE '2012-11-25',TIMESTAMP '2012-11-25T20:40:30Z',TIMESTAMP '2012-11-25T21:29:50Z',[12,35,128,269]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (98,11,DATE '2013-10-22',TIMESTAMP '2013-10-22T03:26:36Z',TIMESTAMP '2013-10-22T06:42:42Z',[14,49,148,726]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (99,10,DATE '2006-05-10',TIMESTAMP '2006-05-10T05:49:43Z',TIMESTAMP '2006-05-10T07:12:18Z',[5,67,131,360]); +INSERT INTO Concerts (VenueId, SingerId, ConcertDate, BeginTime, EndTime, TicketPrices) VALUES (100,18,DATE '2015-02-15',TIMESTAMP '2015-02-15T01:18:05Z',TIMESTAMP '2015-02-15T04:19:27Z',[11,38,127,909]); + +COMMIT; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 100 AS EXPECTED +FROM Concerts; + +COMMIT; + +# Switch to read-only mode +SET READONLY = TRUE; + +# Do a query that should also generate a read timestamp +@EXPECT RESULT_SET 'NUMBER_OF_SINGERS',30 +SELECT COUNT(*) AS NUMBER_OF_SINGERS +FROM Singers; + +# Check that the read-timestamp is there +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- End the read-only transaction and try to get a commit timestamp. +COMMIT; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Try to do an update in read-only mode +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE Singers SET FirstName='FirstName' WHERE SingerId=1; + +-- Verify that it was not changed +@EXPECT RESULT_SET 'FirstName','First 1' +SELECT FirstName +FROM Singers +WHERE SingerId=1; + +COMMIT; + +-- Switch to autocommit and read/write mode +SET READONLY = FALSE; +SET AUTOCOMMIT = TRUE; + +-- Try to insert a record that already exists +@EXPECT EXCEPTION ALREADY_EXISTS +INSERT INTO Singers (SingerId, FirstName, LastName) +SELECT SingerId, FirstName, LastName +FROM Singers +WHERE SingerId=1; + +-- Ensure there was no commit timestamp +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Delete a record that will also cascade to other records +-- First verify the actual number of records +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED +FROM Albums +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 4 AS EXPECTED +FROM Songs +WHERE SingerId=1; + +-- Even though the delete cascades to several other records, the update count is returned as 1 +@EXPECT UPDATE_COUNT 1 +DELETE FROM Singers WHERE SingerId=1; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Albums +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Songs +WHERE SingerId=1; + +-- Switch to transactional mode +SET AUTOCOMMIT = FALSE; + +-- Delete a record that will also cascade to other records and then rollback +-- First verify the actual number of records +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED +FROM Albums +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Songs +WHERE SingerId=2; + +-- Even though the delete cascades to several other records, the update count is returned as 1 +@EXPECT UPDATE_COUNT 1 +DELETE FROM Singers WHERE SingerId=2; + +-- Verify that the change is visible inside the transaction +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Albums +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Songs +WHERE SingerId=2; + +-- Rollback and verify that no changes were persisted +ROLLBACK; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 2 AS EXPECTED +FROM Albums +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Songs +WHERE SingerId=2; + +-- End transaction +COMMIT; + +-- Switch to autocommit and partitioned_non_atomic mode and redo the delete +SET AUTOCOMMIT = TRUE; +SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; + +@EXPECT UPDATE_COUNT 1 +DELETE FROM Singers WHERE SingerId=2; + +-- There should be no commit timestamp for PARTITIONED_NON_ATOMIC +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Albums +WHERE SingerId=2; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Songs +WHERE SingerId=2; + +@EXPECT RESULT_SET +WITH Song2 AS (SELECT * FROM Songs WHERE SingerId=2) +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Song2; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_CreateTables.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_CreateTables.sql new file mode 100644 index 000000000000..b72560b55f8c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_CreateTables.sql @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test script that creates a couple of test tables in one transaction. + */ + +-- Turn off autocommit (and verify) +@EXPECT NO_RESULT +SET AUTOCOMMIT = FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; + +-- Turn off readonly (and verify) +@EXPECT NO_RESULT +SET READONLY = FALSE; +@EXPECT RESULT_SET 'READONLY',false +SHOW VARIABLE READONLY; + +-- Start a DDL batch to execute a number of DDL statements as one operation. +@EXPECT NO_RESULT +START BATCH DDL; + +-- Create a couple of test tables +@EXPECT NO_RESULT +CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) +) PRIMARY KEY (SingerId); + +@EXPECT NO_RESULT +CREATE TABLE Albums ( + SingerId INT64 NOT NULL, + AlbumId INT64 NOT NULL, + AlbumTitle STRING(MAX) +) PRIMARY KEY (SingerId, AlbumId), +-- interleave this table in the Singers table +INTERLEAVE IN PARENT Singers ON DELETE CASCADE; + +-- Create a secondary index +@EXPECT NO_RESULT +CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle); + +-- Run the DDL batch +RUN BATCH; + +-- Reset the statement timeout +SET STATEMENT_TIMEOUT=null; + +/* + * Verify that the test tables have been created + */ +@EXPECT NO_RESULT +SET AUTOCOMMIT = TRUE; +@EXPECT NO_RESULT +SET READONLY = TRUE; + +-- Check that the table has been created +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Singers'; + +-- Check for all columns +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 4 AS EXPECTED +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_NAME='Singers' +/** + * List all expected column names + */ +AND COLUMN_NAME IN ( + 'SingerId', + 'FirstName', + 'LastName', + 'SingerInfo' +); + +-- Check for index +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.INDEXES +WHERE TABLE_NAME='Albums' AND INDEX_NAME='AlbumsByAlbumTitle'; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_InsertTestData.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_InsertTestData.sql new file mode 100644 index 000000000000..e1434ce35952 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_InsertTestData.sql @@ -0,0 +1,79 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Insert test data into test tables + */ + +@EXPECT NO_RESULT +SET AUTOCOMMIT = FALSE; +@EXPECT NO_RESULT +SET READONLY = FALSE; + +@EXPECT UPDATE_COUNT 3 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES(1, 'First 1', 'Last 1'), + (2, 'First 2', 'Last 2'), + (3, 'First 3', 'Last 3'); + +@EXPECT UPDATE_COUNT 3 +INSERT INTO Singers (SingerId, FirstName, LastName) +SELECT 4, 'First 4', 'Last 4' +UNION ALL +SELECT 5, 'First 5', 'Last 5' +UNION ALL +SELECT 6, 'First 6', 'Last 6'; + +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (10, 'First 10', 'Last 10'); +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (11, 'First 11', 'Last 11'); + +@EXPECT UPDATE_COUNT 5 +INSERT INTO Albums (SingerId, AlbumId, AlbumTitle) +VALUES + (1, 1, 'Album 1 1'), + (1, 2, 'Album 1 2'), + (2, 1, 'Album 2 1'), + (2, 2, 'Album 2 2'), + (2, 3, 'Album 2 3'); + +@EXPECT NO_RESULT +COMMIT; + +-- Try to insert a record that already exists +@EXPECT EXCEPTION ALREADY_EXISTS +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (10, 'First 10', 'Last 10'); + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE FirstName='First 10' AND LastName='Last 10'; + +@EXPECT NO_RESULT +ROLLBACK; + +-- Verify the contents of the tables +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 5 AS EXPECTED +FROM Albums; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 8 AS EXPECTED +FROM Singers; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitDmlMode.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitDmlMode.sql new file mode 100644 index 000000000000..bd0e06ab8ec6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitDmlMode.sql @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script that tests the different possible autocommit dml modes + */ + +SET AUTOCOMMIT = FALSE; +SET READONLY = FALSE; + +-- First verify that the mode cannot be set when not in autocommit mode +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Cannot set autocommit DML mode while not in autocommit mode or while a transaction is active' +SET AUTOCOMMIT_DML_MODE = 'Transactional'; + +-- Turn on autocommit and set mode to transactional +SET AUTOCOMMIT = TRUE; + +@EXPECT NO_RESULT +SET AUTOCOMMIT_DML_MODE = 'Transactional'; + +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; + +-- Verify that executing an update statement is possible +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='Some Other Last Name' /* It used to be 'Last 1' */ +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Some Other Last Name' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +-- Reset to original value in partitioned mode +@EXPECT NO_RESULT +SET AUTOCOMMIT_DML_MODE = 'partitioned_non_atomic'; + +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; + +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='Last 1' +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +-- Verify that trying to set the mode to an invalid value will throw an exception +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for AUTOCOMMIT_DML_MODE: 'None'' +SET AUTOCOMMIT_DML_MODE = 'None'; + +-- Verify that setting the mode in read-only mode will throw an exception +SET READONLY = TRUE; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Cannot set autocommit DML mode for a read-only connection' +SET AUTOCOMMIT_DML_MODE = 'Transactional'; + +-- Back to read-write mode +SET READONLY = FALSE; + +-- Verify that turning off autocommit and on again will not reset the AUTOCOMMIT_DML_MODE value +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; +SET AUTOCOMMIT = FALSE; +SET AUTOCOMMIT = TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; + +-- Reset to default value +SET AUTOCOMMIT_DML_MODE = 'Transactional'; +@EXPECT RESULT_SET 'AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE AUTOCOMMIT_DML_MODE; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitReadOnly.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitReadOnly.sql new file mode 100644 index 000000000000..d104f74b7cda --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestAutocommitReadOnly.sql @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script that tests a connection in read-only and autocommit mode + */ + +SET AUTOCOMMIT = TRUE; +SET READONLY = TRUE; + +-- First verify that the autocommit dml mode cannot be set when in read-only mode +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Cannot set autocommit DML mode for a read-only connection' +SET AUTOCOMMIT_DML_MODE = 'PARTITIONED_NON_ATOMIC'; + +-- Verify that executing an update statement fails +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed in read-only mode' +UPDATE Singers SET LastName='Some Other Last Name' /* It used to be 'Last 1' */ +WHERE SingerId=1; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +-- Verify the same for INSERT and DELETE statements +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed in read-only mode' +INSERT INTO Singers (SingerId, FirstName, LastName) VALUES (9999, 'First 9999', 'Last 9999'); + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed in read-only mode' +DELETE FROM Singers; + +-- Verify that the same error message is given even if the update statements references a non-existent table +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed in read-only mode' +/* The referenced table does not exist */ +update Artists set LastName='Some Last Name' +where ArtistId=1; + +-- Verify that DDL statements will also cause an exception +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL statements are not allowed in read-only mode' +CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID); + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL statements are not allowed in read-only mode' +/* The statement is recognized even if it is preceded + * by a multi-line comment */ +-- And a single line comment, and some spaces + + DROP TABLE Singers; + +-- And verify that alter table statements also fail +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL statements are not allowed in read-only mode' +alter table Singers add column test string(100); diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetCommitTimestamp.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetCommitTimestamp.sql new file mode 100644 index 000000000000..2baa59741210 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetCommitTimestamp.sql @@ -0,0 +1,138 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test SHOW VARIABLE COMMIT_TIMESTAMP in different modes + */ + +-- Select query in autocommit and read-only mode should not yield a commit timestamp +@EXPECT NO_RESULT +SET AUTOCOMMIT = TRUE; +@EXPECT NO_RESULT +SET READONLY = TRUE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Select query in autocommit and read-write mode should not yield a commit timestamp +@EXPECT NO_RESULT +SET READONLY = FALSE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Select query in transactional and read-only mode should not yield a commit timestamp +@EXPECT NO_RESULT +SET AUTOCOMMIT = FALSE; +@EXPECT NO_RESULT +SET READONLY = TRUE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT NO_RESULT +COMMIT; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Select query in transactional and read-write mode should yield a commit timestamp +@EXPECT NO_RESULT +SET READONLY = FALSE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT NO_RESULT +COMMIT; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Update statement in transactional and read-write mode should yield a commit timestamp +@EXPECT NO_RESULT +SET AUTOCOMMIT = FALSE; + +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='New Last 1' +WHERE SingerId=1; + +@EXPECT NO_RESULT +COMMIT; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Reset the value to its original value +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='Last 1' +WHERE SingerId=1; + +@EXPECT NO_RESULT +COMMIT; + +-- Select query in transactional and read-write mode that rollbacks should not yield a commit timestamp +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT NO_RESULT +ROLLBACK; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Update statement in transactional and read-write mode that rollbacks should not yield a commit timestamp +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='New Last 1' +WHERE SingerId=1; + +@EXPECT NO_RESULT +ROLLBACK; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP',null +SHOW VARIABLE COMMIT_TIMESTAMP; + +-- Invalid select query in transactional and read-write mode should yield a commit timestamp +-- The (invalid) query is sent to the server, initiating a transaction, that is committed afterwards +SET AUTOCOMMIT = FALSE; +SET READONLY = FALSE; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM NonExistentTable +WHERE SingerId=1; + +@EXPECT NO_RESULT +COMMIT; + +@EXPECT RESULT_SET 'COMMIT_TIMESTAMP' +SHOW VARIABLE COMMIT_TIMESTAMP; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetReadTimestamp.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetReadTimestamp.sql new file mode 100644 index 000000000000..c166d48ed88d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestGetReadTimestamp.sql @@ -0,0 +1,112 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test SHOW VARIABLE READ_TIMESTAMP in different modes + */ + +-- Select query in autocommit and read-only mode should yield a read timestamp +SET AUTOCOMMIT = TRUE; +SET READONLY = TRUE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Select query in autocommit and read-write mode should yield a read timestamp +SET READONLY = FALSE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Select query in transactional and read-only mode should yield a read timestamp +SET AUTOCOMMIT = FALSE; +SET READONLY = TRUE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +COMMIT; + +-- Select query in transactional and read-write mode should NOT yield a read timestamp +SET READONLY = FALSE; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +COMMIT; + +-- Update statement in transactional and read-write mode should NOT yield a read timestamp +SET AUTOCOMMIT = FALSE; + +@EXPECT UPDATE_COUNT 1 +UPDATE Singers SET LastName='New Last 1' +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +ROLLBACK; + +-- Verify that the rollback actually worked +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + +COMMIT; + +-- Invalid select query in autocommit and read-only mode should not yield a read timestamp +SET AUTOCOMMIT = TRUE; +SET READONLY = TRUE; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM NonExistentTable +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +-- Invalid select query in autocommit and read-write mode should not yield a read timestamp +SET READONLY = FALSE; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM NonExistentTable +WHERE SingerId=1; + +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestInvalidStatements.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestInvalidStatements.sql new file mode 100644 index 000000000000..551d5c07ce7b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestInvalidStatements.sql @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script for testing invalid/unrecognized statements + */ +-- EXPLAIN statement +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement: EXPLAIN' +EXPLAIN SELECT * +FROM Singers; + +-- EXPLAIN ANALYZE statement +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement: EXPLAIN ANALYZE' +EXPLAIN ANALYZE SELECT * +FROM Singers; + +-- SET unknown property +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown statement: SET some_property' +SET some_property='value'; \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestQueryOptions.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestQueryOptions.sql new file mode 100644 index 000000000000..d1abbce7476d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestQueryOptions.sql @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test setting and getting the following query options: + * - Optimizer version + * - Optimizer statistics package + */ + +-- Set and get valid values. +-- Optimizer version +@EXPECT NO_RESULT +SET OPTIMIZER_VERSION = '1'; + +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','1' +SHOW VARIABLE OPTIMIZER_VERSION; + +@EXPECT NO_RESULT +SET OPTIMIZER_VERSION = '555'; + +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','555' +SHOW VARIABLE OPTIMIZER_VERSION; + +@EXPECT NO_RESULT +SET OPTIMIZER_VERSION = 'LATEST'; + +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','LATEST' +SHOW VARIABLE OPTIMIZER_VERSION; + +@EXPECT NO_RESULT +SET OPTIMIZER_VERSION = ''; + +@EXPECT RESULT_SET 'OPTIMIZER_VERSION','' +SHOW VARIABLE OPTIMIZER_VERSION; + +-- Optimizer statistics package +@EXPECT NO_RESULT +SET OPTIMIZER_STATISTICS_PACKAGE = 'custom-package_withNumbers-1234'; + +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','custom-package_withNumbers-1234' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; + +@EXPECT NO_RESULT +SET OPTIMIZER_STATISTICS_PACKAGE = ''; + +@EXPECT RESULT_SET 'OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE; + +-- Try to set invalid values. +-- Optimizer version +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for OPTIMIZER_VERSION: 'None'' +SET OPTIMIZER_VERSION = 'None'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for OPTIMIZER_VERSION: 'v1'' +SET OPTIMIZER_VERSION = 'v1'; + +-- Optimizer statistics package +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for OPTIMIZER_STATISTICS_PACKAGE: ' '' +SET OPTIMIZER_STATISTICS_PACKAGE = ' '; + +-- RPC Priority +@EXPECT NO_RESULT +SET RPC_PRIORITY = 'MEDIUM'; + +@EXPECT RESULT_SET 'RPC_PRIORITY','MEDIUM' +SHOW VARIABLE RPC_PRIORITY; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET RPC_PRIORITY = ''; + +@EXPECT RESULT_SET 'RPC_PRIORITY','MEDIUM' +SHOW VARIABLE RPC_PRIORITY; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestReadOnlyStaleness.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestReadOnlyStaleness.sql new file mode 100644 index 000000000000..6feb7894bf7d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestReadOnlyStaleness.sql @@ -0,0 +1,262 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script that tests the different possible read-only staleness modes + */ + +-- First test in autocommit mode. READONLY mode is not strictly necessary +SET AUTOCOMMIT = TRUE; +SET READONLY = FALSE; + +--------------------- STRONG ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set STRONG with a timestamp value +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'STRONG 2018-11-15T13:09:25Z'' +SET READ_ONLY_STALENESS='STRONG 2018-11-15T13:09:25Z'; + +-- Try to set STRONG with a duration value +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'STRONG 10s'' +SET READ_ONLY_STALENESS='STRONG 10s'; + +--------------------- MIN_READ_TIMESTAMP ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-11-15T13:09:25Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-11-15T13:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Min_Read_Timestamp 2018-11-15T13:09:25-08:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-11-15T21:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-11-15T13:09:25+07:45'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-11-15T05:24:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set MIN_READ_TIMESTAMP without a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MIN_READ_TIMESTAMP'' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP'; + +-- Try to set MIN_READ_TIMESTAMP with a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MIN_READ_TIMESTAMP 10s'' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 10s'; + +--------------------- READ_TIMESTAMP ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-11-15T13:09:25Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T13:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Read_Timestamp 2018-11-15T13:09:25-08:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T21:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='read_timestamp 2018-11-15T13:09:25+07:45'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T05:24:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set READ_TIMESTAMP without a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'READ_TIMESTAMP'' +SET READ_ONLY_STALENESS='READ_TIMESTAMP'; + +-- Try to set READ_TIMESTAMP with a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'READ_TIMESTAMP 10s'' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 10s'; + +--------------------- MAX_STALENESS ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Max_Staleness 1000ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='max_staleness 10001ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10001ns' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set MAX_STALENESS without a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MAX_STALENESS'' +SET READ_ONLY_STALENESS='MAX_STALENESS'; + +-- Try to set MAX_STALENESS with a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MAX_STALENESS 2018-11-15T13:09:25+07:45'' +SET READ_ONLY_STALENESS='MAX_STALENESS 2018-11-15T13:09:25+07:45'; + +--------------------- EXACT_STALENESS ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='EXACT_STALENESS 1000ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Exact_Staleness 1001ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1001ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='exact_staleness 1000000000ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set EXACT_STALENESS without a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'EXACT_STALENESS'' +SET READ_ONLY_STALENESS='EXACT_STALENESS'; + +-- Try to set EXACT_STALENESS with a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'EXACT_STALENESS 2018-11-15T13:09:25+07:45'' +SET READ_ONLY_STALENESS='EXACT_STALENESS 2018-11-15T13:09:25+07:45'; + + +------------------------------------------------------------------------------------------------------------------------------ + + +-- Then test in transactional read-only mode. +SET AUTOCOMMIT = FALSE; +SET READONLY = TRUE; + +--------------------- STRONG ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set STRONG with a timestamp value +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'STRONG 2018-11-15T13:09:25Z'' +SET READ_ONLY_STALENESS='STRONG 2018-11-15T13:09:25Z'; + +-- Try to set STRONG with a duration value +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'STRONG 10s'' +SET READ_ONLY_STALENESS='STRONG 10s'; + +--------------------- MIN_READ_TIMESTAMP ---------------------------- +-- This is not allowed in transactional mode +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: MAX_STALENESS and MIN_READ_TIMESTAMP are only allowed in autocommit mode' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-11-15T13:09:25Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +--------------------- READ_TIMESTAMP ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-11-15T13:09:25Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T13:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Read_Timestamp 2018-11-15T13:09:25-08:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T21:09:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='read_timestamp 2018-11-15T13:09:25+07:45'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T05:24:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set READ_TIMESTAMP without a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'READ_TIMESTAMP'' +SET READ_ONLY_STALENESS='READ_TIMESTAMP'; + +-- Try to set READ_TIMESTAMP with a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'READ_TIMESTAMP 10s'' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 10s'; + +--------------------- MAX_STALENESS ---------------------------- +-- only allowed in autocommit mode +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: MAX_STALENESS and MIN_READ_TIMESTAMP are only allowed in autocommit mode' +SET READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-11-15T05:24:25Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +--------------------- EXACT_STALENESS ---------------------------- +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='EXACT_STALENESS 1000ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='Exact_Staleness 1001ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1001ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='exact_staleness 1000000000ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Try to set EXACT_STALENESS without a duration +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'EXACT_STALENESS'' +SET READ_ONLY_STALENESS='EXACT_STALENESS'; + +-- Try to set EXACT_STALENESS with a timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'EXACT_STALENESS 2018-11-15T13:09:25+07:45'' +SET READ_ONLY_STALENESS='EXACT_STALENESS 2018-11-15T13:09:25+07:45'; + + +---------------------------------------------------------------------------------------------------------------- + +-- Then test in transactional read-write mode. This should also work, although it has no effect on the current transaction, unless the transaction mode is explicitly set to read only +SET AUTOCOMMIT = FALSE; +SET READONLY = FALSE; + +@EXPECT NO_RESULT +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Then test while in an active transaction. This should not be allowed. +SET TRANSACTION READ ONLY; +SELECT * +FROM Singers; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Cannot set read-only staleness when a transaction has been started' +SET READ_ONLY_STALENESS='EXACT_STALENESS 1000ms'; +-- Check that the staleness mode is still 'STRONG' +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +COMMIT; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestSetStatements.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestSetStatements.sql new file mode 100644 index 000000000000..8502d9da136d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestSetStatements.sql @@ -0,0 +1,58 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script for testing setting invalid values for the different connection and transaction options + */ + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for AUTOCOMMIT: on' +set autocommit = on; +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READONLY: on' +set readonly = on; + +SET AUTOCOMMIT = TRUE; +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for AUTOCOMMIT_DML_MODE: 'non_atomic'' +set autocommit_dml_mode='non_atomic'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'weak'' +set read_only_staleness='weak'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'strong 2018-11-15T13:09:25Z'' +set read_only_staleness='strong 2018-11-15T13:09:25Z'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MIN_READ_TIMESTAMP'' +set read_only_staleness='MIN_READ_TIMESTAMP'; +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MIN_READ_TIMESTAMP 10s'' +set read_only_staleness='MIN_READ_TIMESTAMP 10s'; +-- Missing timezone in timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MIN_READ_TIMESTAMP 2018-11-15T13:09:25'' +set read_only_staleness='MIN_READ_TIMESTAMP 2018-11-15T13:09:25'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MAX_STALENESS'' +set read_only_staleness='MAX_STALENESS'; +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MAX_STALENESS 2018-11-15T13:09:25Z'' +set read_only_staleness='MAX_STALENESS 2018-11-15T13:09:25Z'; +-- Missing time unit +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for READ_ONLY_STALENESS: 'MAX_STALENESS 10'' +set read_only_staleness='MAX_STALENESS 10'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for STATEMENT_TIMEOUT: -1' +set statement_timeout=-1; +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for STATEMENT_TIMEOUT: '1'' +set statement_timeout='1'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for TRANSACTION: readonly' +set transaction readonly; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestStatementTimeout.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestStatementTimeout.sql new file mode 100644 index 000000000000..ec4de61bcdf9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestStatementTimeout.sql @@ -0,0 +1,261 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test setting statement timeout and verify that statements actually do timeout + */ + +-- Ensure we know what mode we are in +SET AUTOCOMMIT = TRUE; +SET AUTOCOMMIT_DML_MODE='Transactional'; +SET READONLY = FALSE; + +-- Verify that setting a negative timeout value is not allowed +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for STATEMENT_TIMEOUT: '-1ms'' +SET STATEMENT_TIMEOUT='-1ms'; + +@EXPECT EXCEPTION INVALID_ARGUMENT 'INVALID_ARGUMENT: Unknown value for STATEMENT_TIMEOUT: '1'' +SET STATEMENT_TIMEOUT='1'; + +-- First set the statement timeout to null, which means no timeout +SET STATEMENT_TIMEOUT=null; + +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Do a somewhat complex query that should not timeout +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM ( + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) RES +; + +-- Set the statement timeout to 1 nanosecond that should cause basically any statement to timeout +SET STATEMENT_TIMEOUT='1ns'; + +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Do a somewhat complex query that should now timeout +@EXPECT EXCEPTION DEADLINE_EXCEEDED 'DEADLINE_EXCEEDED:' +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM ( + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) RES +; + +-- Try to execute an update that should also timeout +@EXPECT EXCEPTION DEADLINE_EXCEEDED 'DEADLINE_EXCEEDED:' +UPDATE Singers SET LastName='Some Other Last Name' /* It used to be 'Last 1' */ +WHERE SingerId=1 +OR LastName IN ( + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) +; + +-- Verify that the record was not updated +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; + + +----------------------------------------------------------------------------------------------------- + +-- Repeat test in transactional mode +SET AUTOCOMMIT = FALSE; +-- First set the statement timeout to null, which means no timeout +SET STATEMENT_TIMEOUT=null; + +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Do a somewhat complex query that should not timeout +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM ( + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) RES +; + +-- Set the statement timeout to 1 nanosecond that should cause basically any statement to timeout +SET STATEMENT_TIMEOUT='1ns'; + +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Do a somewhat complex query that should now timeout +@EXPECT EXCEPTION DEADLINE_EXCEEDED 'DEADLINE_EXCEEDED:' +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM ( + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT * + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) RES +; +-- We need to rollback the transaction as it is no longer usable. +-- A timeout during a rollback is ignored, and also not rolling back +-- a transaction on the emulator will make the transaction remain the +-- current transaction. We therefore remove the timeout before the +-- rollback call. +SET STATEMENT_TIMEOUT=null; +ROLLBACK; + +SET STATEMENT_TIMEOUT='1ns'; + +-- Try to execute an update that should also timeout +@EXPECT EXCEPTION DEADLINE_EXCEEDED 'DEADLINE_EXCEEDED:' +UPDATE Singers SET LastName='Some Other Last Name' /* It used to be 'Last 1' */ +WHERE SingerId=1 +OR LastName IN ( + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + UNION ALL + SELECT LastName + FROM Singers + WHERE LastName IN (SELECT AlbumTitle FROM Albums) + OR LastName IN (SELECT CAST(SingerId AS STRING) FROM Singers) + OR FirstName IN (SELECT AlbumTitle FROM Albums) + OR FirstName IN (SELECT CAST(SingerId AS STRING) FROM Singers) +) +; + +/* As we are in a transaction, the statement *could* continue in the background and will not + * automatically be rolled back by the connection. Whether the statement will continue to + * execute in the background depends on what the reason for the timeout was. If the timeout + * was caused because the statement took too long to execute on the server, the statement + * will continue to run server side. If the timeout was caused by a network problem that + * prevented the statement to be delivered to the server in a timely fashion, the statement + * has never reached the server and hence will not be executed in the background. + * + * It is the responsibility of the user to rollback the transaction. If the user does nothing, + * the transaction will automatically abort server side and the change will not be committed. + */ + +-- Now rollback the transaction and verify that there was no permanent change +SET STATEMENT_TIMEOUT=null; +ROLLBACK; + +@EXPECT RESULT_SET +SELECT LastName AS ACTUAL, 'Last 1' AS EXPECTED +FROM Singers +WHERE SingerId=1; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTemporaryTransactions.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTemporaryTransactions.sql new file mode 100644 index 000000000000..2a64a2a09f85 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTemporaryTransactions.sql @@ -0,0 +1,67 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test script for temporary transactions (i.e. autocommit mode with explicit BEGIN [TRANSACTION] statements) + */ + +SET AUTOCOMMIT = TRUE; +SET READONLY = FALSE; + +-- Insert a new singer in a temporary transaction and commit +BEGIN; +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (9999, 'First 9999', 'Last 9999'); +COMMIT; + +-- Verify that the record is there +@EXPECT RESULT_SET +SELECT FirstName AS ACTUAL, 'First 9999' AS EXPECTED +FROM Singers +WHERE SingerId=9999 +UNION ALL +SELECT LastName AS ACTUAL, 'Last 9999' AS EXPECTED +FROM Singers +WHERE SingerId=9999; + +-- Insert another singer in a temporary transaction and rollback +BEGIN; +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (9998, 'First 9998', 'Last 9998'); +ROLLBACK; + +-- Verify that the record is not there +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=9998; + +-- Delete the initial test record in autocommit mode +@EXPECT UPDATE_COUNT 1 +DELETE FROM Singers +WHERE SingerId=9999; + +-- Verify that a rollback is not possible, as we are in autocommit mode +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: This connection has no transaction' +ROLLBACK; + +-- Verify that the record has been removed +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=9999; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode.sql new file mode 100644 index 000000000000..c89dab4f8d8e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode.sql @@ -0,0 +1,152 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script that tests the different possible transaction modes in read-write mode + */ + +SET AUTOCOMMIT = FALSE; +SET READONLY = FALSE; + +-- Insert a test record +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (9999, 'First 9999', 'Last 9999'); +COMMIT; + +---------------------------------------- Test read only transactions --------------------------------------------- +SET TRANSACTION READ ONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=9999; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed for read-only transactions' +-- try to update a record in a read-only transaction +UPDATE Singers SET FirstName='New First Name' WHERE SingerId=9999; + +-- We are in a read-only transaction that has returned a query, so there should be a read-timestamp +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Finish the transaction with a rollback. This removes the read timestamp +ROLLBACK; + +-- Read timestamp from the previous transaction should no longer be available as it rolled back +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +-- Start a new read only transaction and SHOW VARIABLE the read timestamp after a commit +SET TRANSACTION READ ONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=9999; + +COMMIT; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Try to execute DDL in a read-only transaction +SET TRANSACTION READ ONLY; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL statements are not allowed for read-only transactions' +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); + +ROLLBACK; + +---------------------------------------- Test read/write transactions --------------------------------------------- +SET TRANSACTION READ WRITE; + +@EXPECT UPDATE_COUNT 1 +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (9998, 'First 9998', 'Last 9998'); + +COMMIT; + +-- Verify the existence of the record +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=9998; + +COMMIT; + +-- try to delete the record, then rollback the transaction +@EXPECT UPDATE_COUNT 1 +DELETE FROM Singers WHERE SingerId=9998; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 0 AS EXPECTED +FROM Singers +WHERE SingerId=9998; + +ROLLBACK; + +-- Verify that the rollback succeeded +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=9998; + +-- Try to execute DDL in a read/write transaction +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL-statements are not allowed inside a read/write transaction.' +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); + +ROLLBACK; + +---------------------------------------- Test DDL batches --------------------------------------------- +START BATCH DDL; + +-- Verify that queries and updates fail +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Executing queries is not allowed for DDL batches.' +SELECT * +FROM Singers; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Executing updates is not allowed for DDL batches.' +UPDATE Singers SET LastName='Foo'; + +-- Verify that DDL statements are allowed +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); +alter table FOO add column bar timestamp; +RUN BATCH; + +-- Verify the existence of the table and the column +SET AUTOCOMMIT = TRUE; +@EXPECT RESULT_SET +SELECT TABLE_NAME AS ACTUAL, 'FOO' AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='FOO'; + +@EXPECT RESULT_SET +SELECT COLUMN_NAME AS ACTUAL, 'bar' AS EXPECTED +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_NAME='FOO' AND COLUMN_NAME='bar'; + +SET AUTOCOMMIT = FALSE; + +-- Remove the table +START BATCH DDL; +DROP TABLE FOO; +RUN BATCH; + +-- Remove the test records +@EXPECT UPDATE_COUNT 2 +DELETE FROM Singers WHERE SingerId IN (9999, 9998); +COMMIT; \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode_ReadOnly.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode_ReadOnly.sql new file mode 100644 index 000000000000..edd213ff607e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITSqlScriptTest_TestTransactionMode_ReadOnly.sql @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Script that tests the different possible transaction modes in read-only mode + */ + +SET AUTOCOMMIT = FALSE; +SET READONLY = TRUE; + +-- Verify that trying to insert a test record will fail +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed for read-only transactions' +INSERT INTO Singers (SingerId, FirstName, LastName) +VALUES (9999, 'First 9999', 'Last 9999'); +COMMIT; + +---------------------------------------- Test read only transactions --------------------------------------------- +SET TRANSACTION READ ONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=1; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Update statements are not allowed for read-only transactions' +-- try to update a record in a read-only transaction +UPDATE Singers SET FirstName='New First Name' WHERE SingerId=9999; + +-- We are in a read-only transaction that has returned a query, so there should be a read-timestamp +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Finish the transaction with a rollback. This removes the read timestamp +ROLLBACK; + +-- Read timestamp from the previous transaction should no longer be available as it rolled back +@EXPECT RESULT_SET 'READ_TIMESTAMP',null +SHOW VARIABLE READ_TIMESTAMP; + +-- Start a new read only transaction and SHOW VARIABLE the read timestamp after a commit +SET TRANSACTION READ ONLY; + +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM Singers +WHERE SingerId=1; + +COMMIT; + +@EXPECT RESULT_SET 'READ_TIMESTAMP' +SHOW VARIABLE READ_TIMESTAMP; + +-- Try to execute DDL in a read-only transaction +SET TRANSACTION READ ONLY; + +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: DDL statements are not allowed for read-only transactions' +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100)) PRIMARY KEY (ID); + +ROLLBACK; + +---------------------------------------- Test read/write transactions --------------------------------------------- +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: The transaction mode can only be READ_ONLY when the connection is in read_only mode' +SET TRANSACTION READ WRITE; + +---------------------------------------- Test DDL batches --------------------------------------------- +@EXPECT EXCEPTION FAILED_PRECONDITION 'FAILED_PRECONDITION: Cannot start a DDL batch when the connection is in read-only mode' +START BATCH DDL; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITTransactionModeTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITTransactionModeTest.sql new file mode 100644 index 000000000000..e2253d3cda06 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITTransactionModeTest.sql @@ -0,0 +1,116 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +NEW_CONNECTION; + +-- Test that DDL statements are allowed in DDL batches +START BATCH DDL; + +-- Try to execute a DDL statement +@EXPECT NO_RESULT +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100) PRIMARY KEY (ID); +-- Abort batch as creating a table takes quite some time +ABORT BATCH; + + +NEW_CONNECTION; + +-- Test that DDL statements are not allowed in read/write transactions +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; +@EXPECT RESULT_SET 'READONLY',false +SHOW VARIABLE READONLY; +@EXPECT RESULT_SET 'C',1 +SELECT 1 AS C; + +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100) PRIMARY KEY (ID); + + +NEW_CONNECTION; + +-- Test that DDL statements are not allowed in read-only transactions +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; + +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE FOO (ID INT64 NOT NULL, NAME STRING(100) PRIMARY KEY (ID); + + +NEW_CONNECTION; + +-- Test that DML statements are allowed in read/write transactions +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; +@EXPECT RESULT_SET 'READONLY',false +SHOW VARIABLE READONLY; + +@EXPECT UPDATE_COUNT 1 +INSERT INTO TEST (ID, NAME) VALUES (1, 'TEST'); +@EXPECT UPDATE_COUNT 1 +UPDATE TEST SET NAME='TEST2' WHERE ID=1; +@EXPECT UPDATE_COUNT 1 +DELETE FROM TEST WHERE ID=1; +COMMIT; + + +NEW_CONNECTION; + +-- Test that DML statements are not allowed in read-only transactions +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; + +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE FOO SET BAR=1 WHERE ID=2; + + +NEW_CONNECTION; + +-- Test that DML statements are not allowed in DDL batches +START BATCH DDL; + +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE FOO SET BAR=1 WHERE ID=2; + + +NEW_CONNECTION; + +-- Test that queries are allowed in read/write transactions +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; + +SELECT * FROM TEST; + + +NEW_CONNECTION; + +-- Test that queries are allowed in read-only transactions +@EXPECT RESULT_SET 'AUTOCOMMIT',false +SHOW VARIABLE AUTOCOMMIT; +SET TRANSACTION READ ONLY; + +SELECT * FROM TEST; + + +NEW_CONNECTION; + +-- Test that queries are not allowed in DDL batches +START BATCH DDL; + +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT * FROM TEST; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetReadOnlyStalenessTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetReadOnlyStalenessTest.sql new file mode 100644 index 000000000000..e545753c34e5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetReadOnlyStalenessTest.sql @@ -0,0 +1,575 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Test valid values for strong +SET READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS='Strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS = 'strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS = 'strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS += +'strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; +SET READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE READ_ONLY_STALENESS; + +-- Test invalid values for strong +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='strongg'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='sstrong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='strng'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' strong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' strong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=strong; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS="strong"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=`strong`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='''strong'''; + + +-- Test valid values for min_read_timestamp +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='Min_Read_Timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2000-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2000-02-29T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2004-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2004-02-29T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+01:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T12:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01-01:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T14:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+06:30'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T07:06:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+24:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-06T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + + +-- Test invalid values for min_read_timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestampp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='mmin_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_red_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min read timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min-read-timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min%read%timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=min_read_timestamp 2018-12-07T13:36:00.01Z; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS="min_read_timestamp 2018-12-07T13:36:00.01Z"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=`min_read_timestamp 2018-12-07T13:36:00.01Z`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='''min_read_timestamp 2018-12-07T13:36:00.01Z'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07 13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T3:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.9999999999Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-7T13:36:00.01Z'; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+8'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.0108:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:00.0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:000'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+100:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01*08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01%08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01 08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z+08:00'; + + + + +-- Test valid values for read_timestamp +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='Read_Timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2000-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2000-02-29T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2004-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2004-02-29T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+01:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T12:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01-01:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T14:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+06:30'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T07:06:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+24:00'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-06T13:36:00.010000000Z' +SHOW VARIABLE READ_ONLY_STALENESS; + + +-- Test invalid values for read_timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestampp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='mread_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='red_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read-timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read%timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=read_timestamp 2018-12-07T13:36:00.01Z; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS="read_timestamp 2018-12-07T13:36:00.01Z"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=`read_timestamp 2018-12-07T13:36:00.01Z`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='''read_timestamp 2018-12-07T13:36:00.01Z'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07 13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T3:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.9999999999Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-7T13:36:00.01Z'; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+8'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.0108:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:00.0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:000'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+100:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01*08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01%08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01 08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z+08:00'; + + +-- Test valid values for exact_staleness +SET READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='Exact_Staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1ns' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 9999s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 9999s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1000ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1001ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1001ms' +SHOW VARIABLE READ_ONLY_STALENESS; + + +SET READ_ONLY_STALENESS='exact_staleness 1000us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1001us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1001us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1000ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='exact_staleness 1001ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','EXACT_STALENESS 1001ns' +SHOW VARIABLE READ_ONLY_STALENESS; + + +-- Test invalid values for exact_staleness +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_stalenesss 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='eexact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exct_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact-staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact%staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' exact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' exact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=exact_staleness 10s; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS="exact_staleness 10s"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=`exact_staleness 10s`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='''exact_staleness 10s'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness 10'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness 10mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness 999999999999s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness not_a_number'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='exact_staleness'; + + + +-- Test valid values for max_staleness +SET READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_Staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_STALENESS 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1ns' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 9999s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 9999s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1000ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1s' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1001ms'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1001ms' +SHOW VARIABLE READ_ONLY_STALENESS; + + +SET READ_ONLY_STALENESS='max_staleness 1000us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1ms' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1001us'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1001us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1000ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1us' +SHOW VARIABLE READ_ONLY_STALENESS; + +SET READ_ONLY_STALENESS='max_staleness 1001ns'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MAX_STALENESS 1001ns' +SHOW VARIABLE READ_ONLY_STALENESS; + + +-- Test invalid values for max_staleness +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_stalenesss 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='emax_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='mx_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max-staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max%staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' max_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' max_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=' max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=max_staleness 10s; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS="max_staleness 10s"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS=`max_staleness 10s`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='''max_staleness 10s'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness 10'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness 10mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness 999999999999s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness not_a_number'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET READ_ONLY_STALENESS='max_staleness'; + diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetStatementTimeoutTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetStatementTimeoutTest.sql new file mode 100644 index 000000000000..d74be83acf4b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/SetStatementTimeoutTest.sql @@ -0,0 +1,158 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Test valid values +-- Null (no timeout) +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Seconds +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT = '2s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','2s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1S'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Milliseconds +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1mS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1MS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Microseconds +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1uS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1US'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Nanoseconds +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1nS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1NS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Test flip to higher time unit +SET STATEMENT_TIMEOUT='1000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + + +-- Invalid suffixes +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1m'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1mi'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1h'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1n'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1u'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1'; + +-- Invalid numbers +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='-1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='a1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='0xas'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='0x1s'; + +-- Invalid because of spaces +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1 s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT=' 1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1m s'; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/TimeoutSqlScriptTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/TimeoutSqlScriptTest.sql new file mode 100644 index 000000000000..06dc96fb8693 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/TimeoutSqlScriptTest.sql @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- check that the default is null +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; +-- set a new value +SET STATEMENT_TIMEOUT='1000ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- do a simple select and verify that the timeout does not change +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- set a value that contains a fraction of a second +SET STATEMENT_TIMEOUT='1800ms'; +-- check that the jdbc driver reports the value that is set, although under water the JDBC connection will round it to a whole second +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1800ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- set a value that is just above a whole second +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- set a value that contains a whole second +SET STATEMENT_TIMEOUT='3s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','3s' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- set a value to a higher value +SET STATEMENT_TIMEOUT='2999ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','2999ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +-- Check that setting the value to 0 is not allowed +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='0s'; +-- Set a timeout value and then reset it to null +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=null; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT',null +SHOW VARIABLE STATEMENT_TIMEOUT; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Albums.txt b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Albums.txt new file mode 100644 index 000000000000..0cf7eafedf85 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Albums.txt @@ -0,0 +1,60 @@ +(1,1,"Album 1",980045); +(2,2,"Album 2",667788); +(3,3,"Album 3",908791); +(4,4,"Album 4",690335); +(5,5,"Album 5",133041); +(6,6,"Album 6",505292); +(7,7,"Album 7",91969); +(8,8,"Album 8",289965); +(9,9,"Album 9",78176); +(10,10,"Album 10",485664); +(11,11,"Album 11",972680); +(12,12,"Album 12",893680); +(13,13,"Album 13",892138); +(14,14,"Album 14",449562); +(15,15,"Album 15",150968); +(16,16,"Album 16",580377); +(17,17,"Album 17",763081); +(18,18,"Album 18",203427); +(19,19,"Album 19",995368); +(20,20,"Album 20",29900); +(21,21,"Album 21",723728); +(22,22,"Album 22",540582); +(23,23,"Album 23",784245); +(24,24,"Album 24",614788); +(25,25,"Album 25",275649); +(26,26,"Album 26",970898); +(27,27,"Album 27",409289); +(28,28,"Album 28",766560); +(29,29,"Album 29",32414); +(30,30,"Album 30",457957); +(1,31,"Album 31",52546); +(2,32,"Album 32",412424); +(3,33,"Album 33",568496); +(4,34,"Album 34",353491); +(5,35,"Album 35",489951); +(6,36,"Album 36",75938); +(7,37,"Album 37",460461); +(8,38,"Album 38",642042); +(9,39,"Album 39",282872); +(10,40,"Album 40",521496); +(11,41,"Album 41",98126); +(12,42,"Album 42",535113); +(13,43,"Album 43",957625); +(14,44,"Album 44",667630); +(15,45,"Album 45",236968); +(16,46,"Album 46",445647); +(17,47,"Album 47",446396); +(18,48,"Album 48",852859); +(19,49,"Album 49",404105); +(20,50,"Album 50",384439); +(21,51,"Album 51",440468); +(22,52,"Album 52",455384); +(23,53,"Album 53",210756); +(24,54,"Album 54",849113); +(25,55,"Album 55",63969); +(26,56,"Album 56",277122); +(27,57,"Album 57",350063); +(28,58,"Album 58",359473); +(29,59,"Album 59",209825); +(30,60,"Album 60",84543); diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Concerts.txt b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Concerts.txt new file mode 100644 index 000000000000..2e53d92ccafc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Concerts.txt @@ -0,0 +1,100 @@ +(1,1,DATE '2003-06-19',TIMESTAMP '2003-06-19T12:30:05Z',TIMESTAMP '2003-06-19T18:57:15Z',[11,93,140,923]); +(2,18,DATE '2004-01-25',TIMESTAMP '2004-01-25T14:58:28Z',TIMESTAMP '2004-01-26T01:10:52Z',[18,51,101,812]); +(3,21,DATE '2005-03-15',TIMESTAMP '2005-03-15T18:14:50Z',TIMESTAMP '2005-03-16T02:21:28Z',[23,26,107,721]); +(4,16,DATE '2009-05-09',TIMESTAMP '2009-05-09T05:22:34Z',TIMESTAMP '2009-05-09T15:28:28Z',[18,70,150,297]); +(5,11,DATE '2001-01-07',TIMESTAMP '2001-01-07T18:37:33Z',TIMESTAMP '2001-01-07T21:22:17Z',[20,55,185,672]); +(6,25,DATE '2015-11-19',TIMESTAMP '2015-11-19T22:47:42Z',TIMESTAMP '2015-11-20T02:54:01Z',[12,73,150,833]); +(7,26,DATE '2012-10-06',TIMESTAMP '2012-10-06T10:58:43Z',TIMESTAMP '2012-10-06T15:35:40Z',[8,83,199,625]); +(8,8,DATE '2001-09-26',TIMESTAMP '2001-09-26T06:41:20Z',TIMESTAMP '2001-09-26T16:38:35Z',[19,87,192,912]); +(9,27,DATE '2016-11-24',TIMESTAMP '2016-11-24T20:00:48Z',TIMESTAMP '2016-11-24T23:03:07Z',[20,84,134,885]); +(10,30,DATE '2017-05-05',TIMESTAMP '2017-05-05T12:44:05Z',TIMESTAMP '2017-05-05T23:06:55Z',[17,44,177,997]); +(11,7,DATE '2018-06-07',TIMESTAMP '2018-06-07T07:03:11Z',TIMESTAMP '2018-06-07T08:21:41Z',[10,73,182,287]); +(12,22,DATE '2009-01-07',TIMESTAMP '2009-01-07T23:22:11Z',TIMESTAMP '2009-01-08T08:34:18Z',[22,59,150,983]); +(13,16,DATE '2013-06-28',TIMESTAMP '2013-06-28T14:59:25Z',TIMESTAMP '2013-06-28T22:32:11Z',[17,41,129,433]); +(14,11,DATE '2005-08-19',TIMESTAMP '2005-08-19T01:11:28Z',TIMESTAMP '2005-08-19T01:30:30Z',[18,49,110,590]); +(15,18,DATE '2001-11-26',TIMESTAMP '2001-11-26T15:55:31Z',TIMESTAMP '2001-11-26T20:52:13Z',[18,51,132,854]); +(16,26,DATE '2009-01-04',TIMESTAMP '2009-01-04T03:09:11Z',TIMESTAMP '2009-01-04T12:02:14Z',[5,37,146,344]); +(17,20,DATE '2012-09-28',TIMESTAMP '2012-09-28T00:45:00Z',TIMESTAMP '2012-09-28T02:10:39Z',[15,89,185,480]); +(18,24,DATE '2004-09-06',TIMESTAMP '2004-09-06T09:55:40Z',TIMESTAMP '2004-09-06T18:10:32Z',[23,51,113,244]); +(19,21,DATE '2010-11-18',TIMESTAMP '2010-11-18T09:59:17Z',TIMESTAMP '2010-11-18T17:13:12Z',[14,69,164,218]); +(20,29,DATE '2010-12-24',TIMESTAMP '2010-12-24T04:21:25Z',TIMESTAMP '2010-12-24T06:10:08Z',[20,34,166,573]); +(21,3,DATE '2000-05-14',TIMESTAMP '2000-05-14T13:49:08Z',TIMESTAMP '2000-05-14T14:39:25Z',[21,67,136,779]); +(22,18,DATE '2000-05-14',TIMESTAMP '2000-05-14T00:23:23Z',TIMESTAMP '2000-05-14T01:20:04Z',[21,91,111,749]); +(23,26,DATE '2015-05-04',TIMESTAMP '2015-05-04T10:39:46Z',TIMESTAMP '2015-05-04T19:21:45Z',[24,91,128,559]); +(24,16,DATE '2012-08-18',TIMESTAMP '2012-08-18T08:47:12Z',TIMESTAMP '2012-08-18T09:35:03Z',[19,44,136,281]); +(25,4,DATE '2000-03-16',TIMESTAMP '2000-03-16T10:15:15Z',TIMESTAMP '2000-03-16T12:29:53Z',[22,28,111,948]); +(26,4,DATE '2002-11-20',TIMESTAMP '2002-11-20T16:28:19Z',TIMESTAMP '2002-11-20T17:56:10Z',[7,70,141,517]); +(27,23,DATE '2000-08-09',TIMESTAMP '2000-08-09T04:30:51Z',TIMESTAMP '2000-08-09T15:27:15Z',[13,98,156,230]); +(28,16,DATE '2000-10-15',TIMESTAMP '2000-10-15T04:12:39Z',TIMESTAMP '2000-10-15T14:07:05Z',[8,39,160,455]); +(29,22,DATE '2003-03-25',TIMESTAMP '2003-03-25T17:21:56Z',TIMESTAMP '2003-03-25T19:18:25Z',[17,70,148,681]); +(30,15,DATE '2008-11-11',TIMESTAMP '2008-11-11T22:56:07Z',TIMESTAMP '2008-11-12T09:33:48Z',[24,47,175,901]); +(31,7,DATE '2018-05-22',TIMESTAMP '2018-05-22T20:54:59Z',TIMESTAMP '2018-05-23T02:52:28Z',[13,34,177,804]); +(32,30,DATE '2000-04-03',TIMESTAMP '2000-04-03T13:54:10Z',TIMESTAMP '2000-04-03T15:57:02Z',[16,48,137,249]); +(33,23,DATE '2003-12-24',TIMESTAMP '2003-12-24T22:22:00Z',TIMESTAMP '2003-12-25T06:09:40Z',[15,36,131,922]); +(34,12,DATE '2012-06-23',TIMESTAMP '2012-06-23T18:15:30Z',TIMESTAMP '2012-06-24T03:46:17Z',[25,31,160,564]); +(35,5,DATE '2017-12-15',TIMESTAMP '2017-12-15T09:43:38Z',TIMESTAMP '2017-12-15T17:18:28Z',[22,31,177,868]); +(36,20,DATE '2012-12-21',TIMESTAMP '2012-12-21T08:28:14Z',TIMESTAMP '2012-12-21T11:34:59Z',[25,62,143,437]); +(37,19,DATE '2014-07-07',TIMESTAMP '2014-07-07T22:01:35Z',TIMESTAMP '2014-07-08T04:39:37Z',[8,31,184,784]); +(38,15,DATE '2012-07-26',TIMESTAMP '2012-07-26T09:45:35Z',TIMESTAMP '2012-07-26T13:03:53Z',[19,79,140,908]); +(39,24,DATE '2014-03-19',TIMESTAMP '2014-03-19T07:52:25Z',TIMESTAMP '2014-03-19T11:47:01Z',[11,90,141,978]); +(40,4,DATE '2015-08-26',TIMESTAMP '2015-08-26T20:51:25Z',TIMESTAMP '2015-08-27T07:06:46Z',[15,94,195,510]); +(41,24,DATE '2016-04-11',TIMESTAMP '2016-04-11T08:59:07Z',TIMESTAMP '2016-04-11T13:23:30Z',[15,51,173,233]); +(42,18,DATE '2005-03-19',TIMESTAMP '2005-03-19T15:45:04Z',TIMESTAMP '2005-03-19T16:28:42Z',[19,31,188,546]); +(43,7,DATE '2001-01-04',TIMESTAMP '2001-01-04T11:02:16Z',TIMESTAMP '2001-01-04T11:32:21Z',[20,37,133,958]); +(44,5,DATE '2015-12-24',TIMESTAMP '2015-12-24T06:49:48Z',TIMESTAMP '2015-12-24T14:46:46Z',[12,61,175,233]); +(45,12,DATE '2011-08-24',TIMESTAMP '2011-08-24T03:45:46Z',TIMESTAMP '2011-08-24T06:13:10Z',[18,38,169,913]); +(46,16,DATE '2017-03-04',TIMESTAMP '2017-03-04T04:01:04Z',TIMESTAMP '2017-03-04T13:44:38Z',[21,79,119,839]); +(47,18,DATE '2009-05-19',TIMESTAMP '2009-05-19T23:10:52Z',TIMESTAMP '2009-05-20T04:02:01Z',[25,79,151,357]); +(48,22,DATE '2003-10-03',TIMESTAMP '2003-10-03T14:10:24Z',TIMESTAMP '2003-10-03T17:35:09Z',[18,60,140,450]); +(49,9,DATE '2003-03-07',TIMESTAMP '2003-03-07T22:09:59Z',TIMESTAMP '2003-03-08T08:28:29Z',[22,41,122,726]); +(50,9,DATE '2015-07-12',TIMESTAMP '2015-07-12T07:43:51Z',TIMESTAMP '2015-07-12T12:45:20Z',[18,67,126,474]); +(51,12,DATE '2014-11-05',TIMESTAMP '2014-11-05T19:03:00Z',TIMESTAMP '2014-11-06T05:27:07Z',[19,43,125,865]); +(52,6,DATE '2016-07-25',TIMESTAMP '2016-07-25T14:39:28Z',TIMESTAMP '2016-07-26T00:36:03Z',[6,74,192,344]); +(53,13,DATE '2005-08-02',TIMESTAMP '2005-08-02T16:06:47Z',TIMESTAMP '2005-08-02T17:13:41Z',[5,52,192,977]); +(54,18,DATE '2010-01-25',TIMESTAMP '2010-01-25T07:34:54Z',TIMESTAMP '2010-01-25T16:29:11Z',[24,85,181,304]); +(55,14,DATE '2012-05-20',TIMESTAMP '2012-05-20T13:15:12Z',TIMESTAMP '2012-05-20T17:40:09Z',[15,43,104,665]); +(56,3,DATE '2013-09-08',TIMESTAMP '2013-09-08T19:53:42Z',TIMESTAMP '2013-09-08T22:32:52Z',[14,81,129,354]); +(57,27,DATE '2003-07-18',TIMESTAMP '2003-07-18T23:11:24Z',TIMESTAMP '2003-07-19T03:29:46Z',[21,85,188,854]); +(58,27,DATE '2001-04-10',TIMESTAMP '2001-04-10T08:36:49Z',TIMESTAMP '2001-04-10T16:17:57Z',[17,86,161,438]); +(59,2,DATE '2002-07-02',TIMESTAMP '2002-07-02T17:32:20Z',TIMESTAMP '2002-07-03T01:59:33Z',[23,59,164,357]); +(60,28,DATE '2000-11-24',TIMESTAMP '2000-11-24T12:53:25Z',TIMESTAMP '2000-11-24T22:37:53Z',[22,47,161,739]); +(61,12,DATE '2017-07-04',TIMESTAMP '2017-07-04T21:02:01Z',TIMESTAMP '2017-07-05T03:57:29Z',[16,88,179,478]); +(62,3,DATE '2015-10-07',TIMESTAMP '2015-10-07T17:58:42Z',TIMESTAMP '2015-10-07T21:04:38Z',[21,44,155,381]); +(63,23,DATE '2005-05-03',TIMESTAMP '2005-05-03T15:08:10Z',TIMESTAMP '2005-05-03T20:58:30Z',[20,43,111,824]); +(64,24,DATE '2012-12-09',TIMESTAMP '2012-12-09T02:52:09Z',TIMESTAMP '2012-12-09T08:01:11Z',[18,87,106,997]); +(65,30,DATE '2004-03-01',TIMESTAMP '2004-03-01T07:09:06Z',TIMESTAMP '2004-03-01T07:49:32Z',[14,26,195,895]); +(66,24,DATE '2007-05-19',TIMESTAMP '2007-05-19T10:20:57Z',TIMESTAMP '2007-05-19T15:21:09Z',[18,54,179,238]); +(67,16,DATE '2016-01-06',TIMESTAMP '2016-01-06T21:32:20Z',TIMESTAMP '2016-01-07T02:31:32Z',[20,61,120,652]); +(68,2,DATE '2007-10-26',TIMESTAMP '2007-10-26T03:37:22Z',TIMESTAMP '2007-10-26T10:02:36Z',[11,65,151,537]); +(69,2,DATE '2018-08-11',TIMESTAMP '2018-08-11T01:33:38Z',TIMESTAMP '2018-08-11T07:39:21Z',[10,98,105,621]); +(70,23,DATE '2012-07-06',TIMESTAMP '2012-07-06T01:02:23Z',TIMESTAMP '2012-07-06T05:04:16Z',[14,44,172,953]); +(71,7,DATE '2006-01-24',TIMESTAMP '2006-01-24T15:32:10Z',TIMESTAMP '2006-01-24T17:40:43Z',[9,58,150,713]); +(72,8,DATE '2002-11-06',TIMESTAMP '2002-11-06T05:58:03Z',TIMESTAMP '2002-11-06T07:43:24Z',[25,36,193,213]); +(73,10,DATE '2003-11-24',TIMESTAMP '2003-11-24T17:39:10Z',TIMESTAMP '2003-11-25T03:17:36Z',[8,55,200,352]); +(74,16,DATE '2007-11-03',TIMESTAMP '2007-11-03T05:49:12Z',TIMESTAMP '2007-11-03T16:34:16Z',[21,50,114,820]); +(75,4,DATE '2009-05-06',TIMESTAMP '2009-05-06T18:52:07Z',TIMESTAMP '2009-05-06T21:10:02Z',[16,42,101,281]); +(76,1,DATE '2012-12-03',TIMESTAMP '2012-12-03T06:01:05Z',TIMESTAMP '2012-12-03T06:45:00Z',[24,60,140,292]); +(77,1,DATE '2016-11-26',TIMESTAMP '2016-11-26T01:19:27Z',TIMESTAMP '2016-11-26T07:20:17Z',[19,31,123,214]); +(78,9,DATE '2018-05-21',TIMESTAMP '2018-05-21T00:14:43Z',TIMESTAMP '2018-05-21T08:43:35Z',[7,28,115,634]); +(79,14,DATE '2013-11-20',TIMESTAMP '2013-11-20T08:54:47Z',TIMESTAMP '2013-11-20T10:44:54Z',[18,39,155,328]); +(80,17,DATE '2015-10-11',TIMESTAMP '2015-10-11T23:41:17Z',TIMESTAMP '2015-10-12T02:42:48Z',[16,94,102,894]); +(81,23,DATE '2011-08-07',TIMESTAMP '2011-08-07T19:33:01Z',TIMESTAMP '2011-08-07T21:51:53Z',[23,90,134,370]); +(82,7,DATE '2010-04-10',TIMESTAMP '2010-04-10T13:22:08Z',TIMESTAMP '2010-04-10T17:59:08Z',[18,68,121,303]); +(83,27,DATE '2001-07-08',TIMESTAMP '2001-07-08T20:19:54Z',TIMESTAMP '2001-07-08T22:46:15Z',[18,86,148,746]); +(84,6,DATE '2017-09-02',TIMESTAMP '2017-09-02T10:29:03Z',TIMESTAMP '2017-09-02T13:06:41Z',[12,85,138,471]); +(85,1,DATE '2013-11-02',TIMESTAMP '2013-11-02T04:01:03Z',TIMESTAMP '2013-11-02T14:08:47Z',[9,65,111,583]); +(86,22,DATE '2004-04-03',TIMESTAMP '2004-04-03T19:13:48Z',TIMESTAMP '2004-04-04T05:59:31Z',[19,72,105,908]); +(87,2,DATE '2012-02-26',TIMESTAMP '2012-02-26T22:52:21Z',TIMESTAMP '2012-02-27T02:55:24Z',[16,75,129,740]); +(88,9,DATE '2017-09-17',TIMESTAMP '2017-09-17T11:28:49Z',TIMESTAMP '2017-09-17T12:13:03Z',[24,77,182,755]); +(89,11,DATE '2011-03-28',TIMESTAMP '2011-03-28T13:05:23Z',TIMESTAMP '2011-03-28T16:32:29Z',[22,96,174,731]); +(90,21,DATE '2006-12-12',TIMESTAMP '2006-12-12T20:44:10Z',TIMESTAMP '2006-12-12T22:10:34Z',[15,68,166,616]); +(91,27,DATE '2010-08-18',TIMESTAMP '2010-08-18T05:49:35Z',TIMESTAMP '2010-08-18T12:58:36Z',[12,84,157,369]); +(92,2,DATE '2003-02-03',TIMESTAMP '2003-02-03T11:19:43Z',TIMESTAMP '2003-02-03T22:10:42Z',[25,59,140,939]); +(93,5,DATE '2016-01-04',TIMESTAMP '2016-01-04T08:10:26Z',TIMESTAMP '2016-01-04T13:08:30Z',[5,90,163,272]); +(94,3,DATE '2018-04-20',TIMESTAMP '2018-04-20T07:19:52Z',TIMESTAMP '2018-04-20T17:41:01Z',[5,59,109,854]); +(95,19,DATE '2016-10-09',TIMESTAMP '2016-10-09T17:02:59Z',TIMESTAMP '2016-10-09T17:37:27Z',[6,35,176,442]); +(96,9,DATE '2007-06-12',TIMESTAMP '2007-06-12T16:50:12Z',TIMESTAMP '2007-06-12T19:27:30Z',[7,49,169,729]); +(97,29,DATE '2012-11-25',TIMESTAMP '2012-11-25T20:40:30Z',TIMESTAMP '2012-11-25T21:29:50Z',[12,35,128,269]); +(98,11,DATE '2013-10-22',TIMESTAMP '2013-10-22T03:26:36Z',TIMESTAMP '2013-10-22T06:42:42Z',[14,49,148,726]); +(99,10,DATE '2006-05-10',TIMESTAMP '2006-05-10T05:49:43Z',TIMESTAMP '2006-05-10T07:12:18Z',[5,67,131,360]); +(100,18,DATE '2015-02-15',TIMESTAMP '2015-02-15T01:18:05Z',TIMESTAMP '2015-02-15T04:19:27Z',[11,38,127,909]); diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/CreateMusicTables.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/CreateMusicTables.sql new file mode 100644 index 000000000000..a0d60d480481 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/CreateMusicTables.sql @@ -0,0 +1,100 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +START BATCH DDL; + +CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX), + BirthDate DATE +) PRIMARY KEY(SingerId); + +CREATE INDEX SingersByFirstLastName ON Singers(FirstName, LastName); + +CREATE TABLE Albums ( + SingerId INT64 NOT NULL, + AlbumId INT64 NOT NULL, + AlbumTitle STRING(MAX), + MarketingBudget INT64 +) PRIMARY KEY(SingerId, AlbumId), + INTERLEAVE IN PARENT Singers ON DELETE CASCADE; + +CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle); + +CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget); + +CREATE TABLE Songs ( + SingerId INT64 NOT NULL, + AlbumId INT64 NOT NULL, + TrackId INT64 NOT NULL, + SongName STRING(MAX), + Duration INT64, + SongGenre STRING(25) +) PRIMARY KEY(SingerId, AlbumId, TrackId), + INTERLEAVE IN PARENT Albums ON DELETE CASCADE; + +CREATE UNIQUE INDEX SongsBySingerAlbumSongNameDesc ON Songs(SingerId, AlbumId, SongName DESC), INTERLEAVE IN Albums; + +CREATE INDEX SongsBySongName ON Songs(SongName); + +CREATE TABLE Concerts ( + VenueId INT64 NOT NULL, + SingerId INT64 NOT NULL, + ConcertDate DATE NOT NULL, + BeginTime TIMESTAMP, + EndTime TIMESTAMP, + TicketPrices ARRAY, + CONSTRAINT Fk_Concerts_Singer FOREIGN KEY (SingerId) REFERENCES Singers (SingerId) +) PRIMARY KEY(VenueId, SingerId, ConcertDate); + +CREATE TABLE TableWithAllColumnTypes ( + ColInt64 INT64 NOT NULL, + ColFloat64 FLOAT64 NOT NULL, + ColBool BOOL NOT NULL, + ColString STRING(100) NOT NULL, + ColStringMax STRING(MAX) NOT NULL, + ColBytes BYTES(100) NOT NULL, + ColBytesMax BYTES(MAX) NOT NULL, + ColDate DATE NOT NULL, + ColTimestamp TIMESTAMP NOT NULL, + ColCommitTS TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true), + + ColInt64Array ARRAY, + ColFloat64Array ARRAY, + ColBoolArray ARRAY, + ColStringArray ARRAY, + ColStringMaxArray ARRAY, + ColBytesArray ARRAY, + ColBytesMaxArray ARRAY, + ColDateArray ARRAY, + ColTimestampArray ARRAY +) PRIMARY KEY (ColInt64) +; + +CREATE TABLE TableWithRef ( + Id INT64 NOT NULL, + RefFloat FLOAT64 NOT NULL, + RefString STRING(100) NOT NULL, + RefDate DATE NOT NULL, + CONSTRAINT Fk_TableWithRef_TableWithAllColumnTypes + FOREIGN KEY (RefFloat, RefString, RefDate) + REFERENCES TableWithAllColumnTypes (ColFloat64, ColString, ColDate) +) PRIMARY KEY (Id) +; + +RUN BATCH; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Singers.txt b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Singers.txt new file mode 100644 index 000000000000..939873de17e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Singers.txt @@ -0,0 +1,30 @@ +(1,'First 1','A Last 1',FROM_BASE64('5KckBUgBLuj+nlmI0WXBEA0TCSeOK8x/B35kzIIHC01YrF8CNTjRT8hVQ4T0NkVGZjz96bF68aBu4QQ9VlZ/EhX22++vPADslt5YdqFzJjdxhlbGCufKIbrCVn1Po5u+j46SaV1TAHffIGfsAY0lhHJNmRS2P4p2/CGWJas4bzEo/Fn/JxuKF/et1LXgmlShOiE+LbnysvjbDt7GcbL7mA=='),DATE '1906-04-28'); +(2,'First 2','ALast 2',FROM_BASE64('1uLrsGLZS2BUfLGU0CLO9lgDau+TfX/XYK0RyEKvwgWdm3f0mbt4vbLziTn7iY/fM5OeGoeNZneQFWJoAY1XimD4aFDcQlIkkUxaXHFbOik4KNc/OiQMNaLghXtyaTP+UEaHX7o3w7iCp/wjzljEsnaYZxYTKY8Nm6JfKSwXvP1xHXi0KpaCdLf/V/8Vg102CNqqR4fgFwLy3/5QLxbGAg=='),DATE '1922-11-17'); +(3,'First 3','A Last 3',FROM_BASE64('Mc3i5IYbAWfrpZbCa8R2IXsK2DM0zW8mCx58tMClAPvIKktEIOh/HEl3l6qnJ8FPqp17E+PYZsplE9Hxu0LV9N1inR4TO+my3h9Vq72BA6hoSHmo2mxhRSF+iUL3TrC+MalcKPZuKnmI48RRfKoIrwP0Am1iqXWhhpOMo+zHDVL40FsbDIDW7zezTjyxPmwryF9I5vF+t1j8B5NKPA8Gnw=='),DATE '1935-11-08'); +(4,'First 4','A Last 4',FROM_BASE64('FyMiT7GA1pQzfrf/SnGN4iWMizOf+M59fVauKUqP84oGBSkI4F746K+grXinm3txsfPGu23J4OYtZt9FUxQssbaVykkhGBX9+nuiV+RYESLRj6uHT6mZH5g2BOR9L6cWRclks6GBFzIwjFu7JqeofmiJt/R1BwJMFH+07rDRPgi/9oS/DK6/ipC77dpomAuW48d9IbUNofd9/tc89mJbEg=='),DATE '1945-03-23'); +(5,'First 5','A Last 5',FROM_BASE64('BfpuifEdiau23UcEnnaONRxA9V0UH2uMhv3gZG/o0tnh6IR306luK4UL12RSbDVzGgpmDa7tLNC2pZzAlDMJmO4h5F73GzvSa8mVFrJrnqeNy9ECLM1sTIH1HYaF9jZXAYcSo14PK4+xygz1tsENs3jfKfWNuNBEi463x+fL8RcVxVMclSyOEBQaTnLD0pnzji99NkwMBQiwIHN5bl6XLA=='),DATE '1953-06-03'); +(6,'First 6','A Last 6',FROM_BASE64('Hdcj53Vc1yNAuCAP7H8YGadmbdOFGg1nfSpfVuiWWgx2OGR8KQIzTTHny8FsYnmyEBmJQZQMv1m1HU0EFmq5b4id0TmBUMfPzgWF5LFAJPgziAnbprYiKhwDaiRxrmL4Q0kVozeT1vniS3T6HioC20pjzmN8aF1vxzrLBr3IC9e8zHt9+Vla52lNoG/8atlWaSPx1agj5CyPncO7QTdPuw=='),DATE '1956-02-07'); +(7,'First 7','B Last 7',FROM_BASE64('FqOl3vAVSMU6NGINNZjVbYQRgmb95stb6CHfMYEec32ngI8XCS2687kGHfZ16innPuGau5Z/JSchkE8JYMaSITQ/7+B5eh40vZI1CKuLyXKfZ4BR8VkBVqxXyAsAShboxlt+kMHZEvWMY0iXcl9jCB3V+GNbPMHlCxz46CAnjp9ArnwwojRZgUDK9PonTr9N4GBEmO04DLip30LyvCpW/g=='),DATE '1969-05-26'); +(8,'First 8','B Last 8',FROM_BASE64('PQLBMSGGZXeSI7FLot4EcX3JfPTafiu5yeisMBQvuQmDW7kQC/mU9Oh+UhAbovDIx0dZGJ5dIhoAXZEzZPxGgBMWvbPNFmTB7+Q5Hd3/1uxL81XDZs6FlVCpGCKB0KER4WxtKbVqeQgltCeEvhYsTeLRNJdka33uq06lSZFyJXKX9bQyRLCZlGQRy2VHG1+sqjAX0FfcLf6RNEG64sDwOA=='),DATE '1978-11-08'); +(9,'First 9','B Last 9',FROM_BASE64('Nk2ZqJZSCFKKY+NMSq0WGVnNDS/BDHDEyb/bbJFxC/TqRPi+8DQ1csNlqvULW0pDEE6IygIRQR2lv5kT43E4tal4PKXx/z5LTeL9xiJ+qOHwLqAJrZK3V4aQFCNXT6t95lsxZLcjaP6fzNWlGwuN++iR7hpYJLI3WQSlaPL2GuHI9dLGS9ZUPR9KhDor72IURkOHU9Dt1eWfYouPsuVnrA=='),DATE '1987-07-04'); +(10,'First 10','B Last 10',FROM_BASE64('E0rzdg+IDhd1N9S/Nh11Yr6Za+xZlfCOQr0TrsxjFzRvO1ZnXjHGQPdWvT/LIJV+f7TXu0rYtk75fx7uPylxANNEYnLfD7v6UHB5Yful0TCweTFs9qH35BEd0jJl0ATb1ggUmsxXF0xZQpsRRnfycPbMY59w2APSX3hvgF2Xk7lvhHQaSbjnHOh4s78cqa2Atrjeds8KI/I4v+aVQIX9+Q=='),DATE '1993-07-06'); +(11,'First 11','C Last 11',FROM_BASE64('rO6MoGIcZJ+6zIPyMCt2td9ytNkSAn/LxWkrBKUGQs+Fk+Se43Ml7YsuRYhdcIeawAtySL35vZzb6Avl7bH/MyXrg4E0jdvjpEsi7KHiN4f0ky85purgyEg8tRASTi2zVsIM8c27DZenHOqRFl4KUaGRdQATOEoEIH3aHLVoACb0Y1m9JDTIDBcKST8nvTDzayQx1Ur4CO+ZqqG8ye99ww=='),DATE '1895-09-15'); +(12,'First 12','C Last 12',FROM_BASE64('ki4b0vB6EHp9qdm9lFrxplFPTnFCq9/BWwBLK1Jzz39K2q1rESpVXFIe2L2WzOKrunXo8e+p7+xlzBBKCwVIW6hQy0A+7kp7SwdZgWr2pXJqtSuo43fwfhf/A63zFaUYg3AkuNJNAtV/F3mOVudPeJ2xvfRfJ06uKY4MzsDhXAFW5Wsf1ypWMCke58U1VncvpuNOwzSb0El+hOreiQRX8g=='),DATE '1922-01-03'); +(13,'First 13','C Last 13',FROM_BASE64('KEx72v8CBibM16yet2U0Nsbt8KypF1ih18jLso5Q4AmFYUK7961GTYWj0YprWHxIxL/3qAXkm8jjtcRqEUmIXxW7kR0xC7sOcMYJsOSsy2m59YnDTlzDLlR1gynzNJVUhj+aKkUPMQfYV6Dw3UEx2Kik4NKDlGJQc9A0w3rfXjzRln8Ou3F+KYMuuoi/4jP4GEu1Pgyqy8jhQKiN0e9cjA=='),DATE '1930-08-03'); +(14,'First 14','C Last 14',FROM_BASE64('SdOTrssLh3vCqjtPNSqUvX5xrHs6/tpdlqgbp1jp9FseXhhJm+sq6FhgSMX+jm/grBUkUbGCNcWYthv3hdRMIapyZANENn/8CN0BxoVnvECGA9moThVIVghiSAiUNB+SyZg9XlmFRBaQcXpSWoZ1tIRTIuFRKpKaC2GYiOHVPLSQEUOBGc+sN4J0eCvM5aytanUGzn0o98yL73hbRXjwUw=='),DATE '1940-05-12'); +(15,'First 15','C Last 15',FROM_BASE64('NFgZNOzxoDSrAOPXtDIyUtetwm6eUN11YdjB7rIFfylx2SSTbhWZMuJoToE8xQz458BNaUJ8xPB+fJR2AQJL75eZdwJgSA7nnSiFcQgJxU7CEShlBT1ANNJvPujQ7cowRAo4joPfxoBSODuZcEc9WeXhJpEnlQtiGq7k+kzQ0oPgGaj0gLNwiC2zZwL5XCiet5qsRk4LSkUcX3OugpgeCg=='),DATE '1946-09-23'); +(16,'First 16','D Last 16',FROM_BASE64('mmXbRxEVEhiWeMGeuYOv6xGovovbq/qfevmWdMcGiqmCU1yzUrWKLvDhwzYZMbdscj7Tr5e4YyaXpIgpdMro3SApCyfhX5o/dsBlBNVmwLqg8DYbzAeApkaXeiStpAtKdQ8kZ1jezAFlhR/PioNsZjj1iAU4paFLG5F1i01spp3OOeJaD1mUTCSEO85rOAbQ8+B1N3vzz1Yc3E3mTQtxHg=='),DATE '1947-09-30'); +(17,'First 17','D Last 17',FROM_BASE64('zXIaISyrpPC/s0Yowmfdcbcavm6bGs95oBnWOHeTlXXk2n61Ug/GdG4gn5KxcXTfkqZAyzCBEnoaVcrwcp3HzEPUQeumXQn0dt2/oc5s0qfmGDo1+eOVy3tWMTdXv3vKmc4xXQ9bTMQE+MTtZDUknVCJ73zGUAMunFgzVnERLGBfOVaoLxcwC4HBrRtvtLlMboaCHirA1U5fF8xx81dK/g=='),DATE '1948-02-04'); +(18,'First 18','D Last 18',FROM_BASE64('mYZ5eW8+N/PFaDAmnIU77XX1jOZszdOXxblHbB0gKoJ6XOLYKcsmJKG8mjmUCQDYiG1b12xRQga6wprvLsciiyIwTCca6i4JL4RoVz+GqyDLmNSxHruz4xIB7XFIQ0q5SIeL7nob7llp7n+4+T9VYeoMDlFkG903WfjZNqsMRzpFYTT1C4ef+IlHOQSD5K3f/H/uWQ61PUebhSYe9hYQNg=='),DATE '1962-11-10'); +(19,'First 19','D Last 19',FROM_BASE64('/FqFHFjzF6yL/d3bKfSS9ZS1j8xkDlmF6gLpFf/MlC8idAt9ceLvr6oNEAKIdu0xqfLN9fKh9B7wQjAGUBKFLFlVzPIC2BBt7cCiiVVqgYwH3PIKiWL1LndLi1VRcNpy+gWLdgaFn+u4FxNotLDhdx9jUJOsDDPy9aBDSYmYOBajVUgN6jyfvG6egGnIhj+RNGxRkKfZ0isLOByT82v73w=='),DATE '1988-10-07'); +(20,'First 20','D Last 20',FROM_BASE64('3YII52xQjwk1RwhDPlbEKYWje92/04jIYhSJWa7LsLEt6qDxaFt1/1viQKAJWreCzgD0iC08CSJOQDLEmpuDPW5HZmWSnA0AuO46TGaYWtdQDFeJhgVzcldsAC7dMRid+SO1+sjvr/VfGCJP0XT3kWdE8mWNTdI74KrVm0CChj6XF7fLtjekbjZETrg7ySo8pmawVbTKrrZ5FIuNlkxI/Q=='),DATE '2000-02-29'); +(21,'First 21','D Last 21',FROM_BASE64('sKVHb3YTv4OPAB+77pjXln3omYqFy20LkBT/uP7PMSWlYaH+UpRdzOO52pDUh6BrMDjS3qgXU7irLoNA2NEma1QFzvVrLaa6yArnpZCyAEOw3OzpIQf2lJ7YqN8ZjwWEn8SztpMZBiJVXeZoYyYWnhkn9a+crIBOWMYI9ZfPUWk1xtvMX7I1QgHdSqPsLpT8iSnO42tjraGd5ulqkWrIRg=='),DATE '1886-08-09'); +(22,'First 22','E Last 22',FROM_BASE64('07GVEGBPEhQC8lELkxIGFhrQNspbl2NEGIrND5VXnUJFnuFctZaSpPovPHlYKtORcpGFsHTv3rSM6UUxTYFHzIUrxQJxNJ84KXjEXrlAN4tWkQOifh4icFc9FezQzQfsjf0KDjRatIFy8Q2jcSUfnhHbeZ1gpbsLIp8Ajioc7ptZG7Lnl1JyPmqKjQwQ+9WyE4uB0BGJTHI3xwzXQLm45A=='),DATE '1889-04-03'); +(23,'First 23','E Last 23',FROM_BASE64('tyGduCzcRQhWJXhDm5c9a8Mfpyyc1sKKk/OxSJFJ2numyNWuurKglMuDZSgdC9sH122eZdJU1uid7umiWYwhYYUC0JuYaYLNpYCnRuOL2FWVSN9jrJYX6AsNNpUDUfcKlJobFL/XJ+ulAr18Z/qtoWXDr4lx5TZAk05TTlFHJwRjIrybzrFYohhZZ20O4WtL3dryRKTgTgvVSElX0SZ8Tw=='),DATE '1892-01-21'); +(24,'First 24','E Last 24',FROM_BASE64('bXe6LecQB+BcWwzcE3b+JrH/20zrVIUXsH9AcBMduKoVIpCnrloUziiWbE1b2Te3/mD7ShKfD4RXSahJ7KgACA3CxS70yAa945NaqoX/aND7kGfFE6PEiS4pUrkJ10A1mRY2fP0J/Qn2tEyegtTF4b2BZuACQJy0qU8QyYrykaVK/+ExVI+MrvHA2LD062EDWPJrPgApCpPRMmtAV3KnTA=='),DATE '1898-02-21'); +(25,'First 25','Last 25',FROM_BASE64('82g7Ytc6/RBp/3vFxUG7JfAz9al82TRPlqBybWKBj/1pA26Pgv1UTxdDloQ24ovTmRZ3agPmaFEc/0ry820ozm8NmR340IwHRmO+jb2LQY4FGEMKFg7zDxAZpJXqITMZsFL1zO/EJ6VMnvZ90Udk2mywsnvv857PRCXJgx6vu4gn+oqUaRAQnSHq3pveu4/88FogqWoOotSzraD2RkW39Q=='),DATE '1911-12-15'); +(26,'First 26','Last 26',FROM_BASE64('KwbjBuHNQvzeWR2Ucf3v5dHAIO/b3/A0AlKxWI2qARDKuaXNuBooTCtdhIv5ZczOH5BbEKlaYkK3mr3GA5GClmwxsafbv3eE0LkV88T7KjfrKSkfatyTtcIWLIrw60B5hlMS5uxmj4X9nZfivj9boB4g3rqEdg/vgOsO9xdk/BKw6FMCuDO3PgDGEn89dOZmaB0PgadYNN3vqz8ZLgXWtg=='),DATE '1912-07-01'); +(27,'First 27','Last 27',FROM_BASE64('O3T43r6OjBwCWu925WlVnd6NLufFAken2Jk/QQBJOGQWsqc4dQsFhs/RSAC8iMZg32lfpfjMQPltRjmwqV7JleYRxL9e6co5WDj9cQk7AcL6wedgR5O/voPZIJ0aqkh5bZvijuxNIerbYhmYZEPOuzhgz9ayE7LgPkvO6WWNfhYuhnulnuDa3e2RsBNC7J1zuuf3DKHnL8SpaD0SMcZRuw=='),DATE '1939-05-17'); +(28,'First 28','Last 28',FROM_BASE64('xC71kYOpe6iZJd4DZnb11wBapa37lquOSW0JzuS15kW1xSG/Jxu0FXUIbFaBJ84hvFYQ3OSxr5HRxI0SBaFyUQhglUT3KTv/m8fEN/W+apBu4aUtlLcZPOTr1amaz30fu89J6pEoQOgmswSIr/0CtiaQ/ZHnuU2rZUXh7hTzBdygF30bAIq6yBGPpfb/MV66yagZtQO/q69sRmar70H/hg=='),DATE '1946-10-18'); +(29,'First 29','Last 29',FROM_BASE64('koHC6ZTUt89ksDORKlw5ep/zJCO0/LNo5A6yC5E8HEKOZpzX7xllDsIQuDmMQDn1HCHkpouKFmoTM24kWvfAs9B6yE7JccSFJbUU5s4Z/iLtYnnfKDzMEDDd/TyL6FxxS0McscfZ/TIc6ZFCArlJCbviqTSafPamrlD7tOJNxkCZae+dFIgnTCiTcwcjvkQeM5Ul6jDNoqIy5lrZdR6wJg=='),DATE '1956-12-23'); +(30,'First 30','Last 30',FROM_BASE64('WjdDzKHsiWCc0kXraf7NbebOU2TIv9KicHO6Og18iZpsxKH0am6wN7f1FwB1VSvZkvfJQgFkqjoqYEJ8qmgKB/YC9mbQAP14BjoJTq6fwDehF5leqSYT7NJarlhV7BX+hn4cCOBZ/gdGPCdK2aXZy8KJrnxh6RBGe0+84L3mEOaSZmZRvmXMcRjRozu17qV3xm6mo7BTq+/7tES3CAovMw=='),DATE '1988-05-29'); diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Songs.txt b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Songs.txt new file mode 100644 index 000000000000..bda929b0fe5f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/it/Songs.txt @@ -0,0 +1,149 @@ +(12,42,1,"Song 12 42 1",387,'Unknown'); +(12,12,2,"Song 12 12 2",202,'Unknown'); +(29,59,3,"Song 29 59 3",160,'Unknown'); +(23,23,4,"Song 23 23 4",255,'Unknown'); +(24,54,5,"Song 24 54 5",436,'Unknown'); +(28,58,6,"Song 28 58 6",121,'Unknown'); +(27,27,7,"Song 27 27 7",319,'Unknown'); +(24,24,8,"Song 24 24 8",213,'Unknown'); +(19,49,9,"Song 19 49 9",280,'Unknown'); +(17,47,10,"Song 17 47 10",253,'Unknown'); +(6,6,11,"Song 6 6 11",321,'Unknown'); +(12,42,12,"Song 12 42 12 12 42 12",124,'Unknown'); +(25,25,13,"Song 25 25 13",449,'Unknown'); +(24,24,14,"Song 24 24 14",438,'Unknown'); +(5,5,15,"Song 5 5 15",378,'Unknown'); +(9,39,16,"Song 9 39 16",202,'Unknown'); +(20,50,17,"Song 20 50 17",452,'Unknown'); +(7,37,18,"Song 7 37 18",420,'Unknown'); +(8,8,19,"Song 8 8 19",318,'Unknown'); +(5,35,20,"Song 5 35 20",347,'Unknown'); +(3,3,21,"Song 3 3 21",377,'Unknown'); +(15,15,22,"Song 15 15 22",314,'Unknown'); +(19,49,23,"Song 19 49 23",199,'Unknown'); +(20,20,24,"Song 20 20 24",266,'Unknown'); +(15,45,25,"Song 15 45 25",433,'Unknown'); +(14,44,26,"Song 14 44 26",482,'Unknown'); +(19,19,27,"Song 19 19 27",345,'Unknown'); +(13,43,28,"Song 13 43 28",159,'Unknown'); +(18,48,29,"Song 18 48 29",350,'Unknown'); +(13,13,30,"Song 13 13 30",131,'Unknown'); +(9,9,31,"Song 9 9 31",183,'Unknown'); +(13,13,32,"Song 13 13 32",193,'Unknown'); +(24,24,33,"Song 24 24 33",378,'Unknown'); +(30,60,34,"Song 30 60 34",270,'Unknown'); +(13,43,35,"Song 13 43 35",375,'Unknown'); +(27,27,36,"Song 27 27 36",219,'Unknown'); +(20,50,37,"Song 20 50 37",314,'Unknown'); +(18,48,38,"Song 18 48 38",416,'Unknown'); +(21,51,39,"Song 21 51 39",330,'Unknown'); +(1,31,40,"Song 1 31 40",376,'Unknown'); +(5,5,41,"Song 5 5 41",398,'Unknown'); +(15,45,42,"Song 15 45 42",466,'Unknown'); +(24,24,43,"v 24 24 43",384,'Unknown'); +(19,19,44,"Song 19 19 44",472,'Unknown'); +(15,45,45,"Song 15 45 45",246,'Unknown'); +(3,33,46,"Song 3 33 46",412,'Unknown'); +(23,23,47,"Song 23 23 47",159,'Unknown'); +(30,60,48,"Song 30 60 48",290,'Unknown'); +(19,19,49,"Song 19 19 49",446,'Unknown'); +(16,16,50,"Song 16 16 50",485,'Unknown'); +(4,4,51,"Song 4 4 51",185,'Unknown'); +(8,38,52,"Song 8 38 52",349,'Unknown'); +(24,54,53,"Song 24 54 53",301,'Unknown'); +(5,35,54,"Song 5 35 54",206,'Unknown'); +(30,30,55,"Song 30 30 55",250,'Unknown'); +(12,42,56,"Song 12 42 56",146,'Unknown'); +(30,30,57,"Song 30 30 57",416,'Unknown'); +(26,56,58,"Song 26 56 58",244,'Unknown'); +(20,50,59,"Song 20 50 59",356,'Unknown'); +(7,7,60,"Song 7 7 60",234,'Unknown'); +(19,19,61,"Song 19 19 61",412,'Unknown'); +(13,43,62,"Song 13 43 62",161,'Unknown'); +(5,5,63,"Song 5 5 63",300,'Unknown'); +(1,31,64,"Song 1 31 64",307,'Unknown'); +(4,4,65,"Song 4 4 65",197,'Unknown'); +(24,54,66,"Song 24 54 66",180,'Unknown'); +(3,3,67,"Song 3 3 67",156,'Unknown'); +(14,44,68,"Song 14 44 68",184,'Unknown'); +(21,51,69,"Song 21 51 69",486,'Unknown'); +(19,49,70,"Song 19 49 70",212,'Unknown'); +(9,39,71,"Song 9 39 71",452,'Unknown'); +(23,53,72,"Song 23 53 72",425,'Unknown'); +(11,41,73,"Song 11 41 73",316,'Unknown'); +(8,8,74,"Song 8 8 74",395,'Unknown'); +(9,9,75,"Song 9 9 75",189,'Unknown'); +(2,2,76,"Song 2 2 76",354,'Unknown'); +(23,53,77,"Song 23 53 77",137,'Unknown'); +(15,15,78,"TSong 15 15 78",176,'Unknown'); +(30,60,79,"Song 30 60 79",224,'Unknown'); +(14,44,80,"Song 14 44 80",305,'Unknown'); +(27,27,81,"Song 27 27 81",432,'Unknown'); +(18,18,82,"Song 18 18 82",357,'Unknown'); +(10,10,83,"Song 10 10 83",187,'Unknown'); +(12,42,84,"Song 12 42 84",461,'Unknown'); +(8,8,85,"Song 8 8 85",434,'Unknown'); +(1,31,86,"Song 1 31 86",436,'Unknown'); +(11,41,87,"Song 11 41 87",469,'Unknown'); +(13,13,88,"Song 13 13 88",452,'Unknown'); +(4,34,89,"Song 4 34 89",309,'Unknown'); +(21,21,90,"Song 21 21 90",226,'Unknown'); +(6,36,91,"Song 6 36 91",257,'Unknown'); +(27,27,92,"Song 27 27 92",251,'Unknown'); +(9,39,93,"Song 9 39 93",325,'Unknown'); +(30,30,94,"Song 30 30 94",122,'Unknown'); +(29,59,95,"Song 29 59 95",207,'Unknown'); +(1,1,96,"Song 1 1 96",318,'Unknown'); +(4,4,97,"Song 4 4 97",353,'Unknown'); +(23,23,98,"Song 23 23 98",450,'Unknown'); +(12,12,99,"Song 12 12 99",323,'Unknown'); +(24,24,100,"Song 24 24 100",397,'Unknown'); +(27,27,101,"Song 27 27 101",296,'Unknown'); +(29,59,102,"Song 29 59 102",349,'Unknown'); +(17,47,103,"Song 17 47 103",438,'Unknown'); +(5,5,104,"Song 5 5 104",388,'Unknown'); +(26,56,105,"Song 26 56 105",425,'Unknown'); +(22,52,106,"Song 22 52 106",154,'Unknown'); +(23,23,107,"Song 23 23 107",213,'Unknown'); +(8,38,108,"Song 8 38 108",276,'Unknown'); +(9,39,109,"Song 9 39 109",417,'Unknown'); +(9,9,110,"Song 9 9 110",299,'Unknown'); +(22,52,111,"Song 22 52 111",476,'Unknown'); +(21,21,112,"Song 21 21 112",225,'Unknown'); +(23,23,113,"Song 23 23 113",303,'Unknown'); +(7,7,114,"Song 7 7 114",291,'Unknown'); +(8,38,115,"Song 8 38 115",276,'Unknown'); +(14,44,116,"Song 14 44 116",238,'Unknown'); +(27,57,117,"Song 27 57 117",188,'Unknown'); +(28,28,118,"Song 28 28 118",372,'Unknown'); +(15,15,119,"Song 15 15 119",258,'Unknown'); +(21,21,120,"Song 21 21 120",308,'Unknown'); +(29,59,121,"Song 29 59 121",319,'Unknown'); +(28,58,122,"Song 28 58 122",453,'Unknown'); +(7,7,123,"Song 7 7 123",198,'Unknown'); +(4,4,124,"Song 4 4 124",435,'Unknown'); +(27,27,125,"Song 27 27 125",475,'Unknown'); +(30,30,126,"Song 30 30 126",395,'Unknown'); +(21,51,127,"Song 21 51 127",454,'Unknown'); +(29,29,128,"Song 29 29 128",376,'Unknown'); +(27,57,129,"Song 27 57 129",396,'Unknown'); +(23,53,130,"Song 23 53 130",458,'Unknown'); +(6,36,131,"Song 6 36 131",289,'Unknown'); +(29,29,132,"Song 29 29 132",207,'Unknown'); +(25,55,133,"Song 25 55 133",280,'Unknown'); +(3,3,134,"Song 3 3 134",432,'Unknown'); +(5,35,135,"1 5 35 135",304,'Unknown'); +(3,3,136,"2 3 3 136",392,'Unknown'); +(12,12,137,"3 12 12 137",393,'Unknown'); +(13,13,138,"4 13 13 138",382,'Unknown'); +(18,48,139,"5 18 48 139",447,'Unknown'); +(17,17,140,"6 17 17 140",182,'Unknown'); +(23,23,141,"7 23 23 141",266,'Unknown'); +(21,51,142,"8 21 51 142",383,'Unknown'); +(3,3,143,"9 3 3 143",439,'Unknown'); +(25,25,144,"10 25 25 144",454,'Unknown'); +(12,12,145,"11 12 12 145",179,'Unknown'); +(19,19,146,"12 19 19 146",422,'Unknown'); +(24,54,147,"13 24 54 147",478,'Unknown'); +(8,38,148,"14 8 38 148",233,'Unknown'); +(6,6,149,"15 6 6 149",245,'Unknown'); diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql new file mode 100644 index 000000000000..a5a6a01ada9e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql @@ -0,0 +1,104458 @@ +NEW_CONNECTION; +show autocommit; +NEW_CONNECTION; +SHOW AUTOCOMMIT; +NEW_CONNECTION; +show autocommit; +NEW_CONNECTION; + show autocommit; +NEW_CONNECTION; + show autocommit; +NEW_CONNECTION; + + + +show autocommit; +NEW_CONNECTION; +show autocommit ; +NEW_CONNECTION; +show autocommit ; +NEW_CONNECTION; +show autocommit + +; +NEW_CONNECTION; +show autocommit; +NEW_CONNECTION; +show autocommit; +NEW_CONNECTION; +show +autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show autocommit/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-autocommit; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; + show variable autocommit; +NEW_CONNECTION; + show variable autocommit; +NEW_CONNECTION; + + + +show variable autocommit; +NEW_CONNECTION; +show variable autocommit ; +NEW_CONNECTION; +show variable autocommit ; +NEW_CONNECTION; +show variable autocommit + +; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +show variable autocommit; +NEW_CONNECTION; +show +variable +autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable autocommit; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable autocommit/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-autocommit; +NEW_CONNECTION; +show spanner.readonly; +NEW_CONNECTION; +SHOW SPANNER.READONLY; +NEW_CONNECTION; +show spanner.readonly; +NEW_CONNECTION; + show spanner.readonly; +NEW_CONNECTION; + show spanner.readonly; +NEW_CONNECTION; + + + +show spanner.readonly; +NEW_CONNECTION; +show spanner.readonly ; +NEW_CONNECTION; +show spanner.readonly ; +NEW_CONNECTION; +show spanner.readonly + +; +NEW_CONNECTION; +show spanner.readonly; +NEW_CONNECTION; +show spanner.readonly; +NEW_CONNECTION; +show +spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.readonly/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.readonly; +NEW_CONNECTION; +show variable spanner.readonly; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +show variable spanner.readonly; +NEW_CONNECTION; + show variable spanner.readonly; +NEW_CONNECTION; + show variable spanner.readonly; +NEW_CONNECTION; + + + +show variable spanner.readonly; +NEW_CONNECTION; +show variable spanner.readonly ; +NEW_CONNECTION; +show variable spanner.readonly ; +NEW_CONNECTION; +show variable spanner.readonly + +; +NEW_CONNECTION; +show variable spanner.readonly; +NEW_CONNECTION; +show variable spanner.readonly; +NEW_CONNECTION; +show +variable +spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.readonly; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.readonly/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.readonly; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +SHOW SPANNER.RETRY_ABORTS_INTERNALLY; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + + + +show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally ; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally ; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally + +; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show +spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally bar; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally%; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally_; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally&; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally$; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally@; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally!; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally*; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally(; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally); +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally-; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally+; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally-#; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally/; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally\; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally?; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally-/; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally/#; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.retry_aborts_internally/-; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +SHOW VARIABLE SPANNER.RETRY_ABORTS_INTERNALLY; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; + + + +show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally ; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally ; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally + +; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +show +variable +spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally bar; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally%; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally_; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally&; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally$; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally@; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally!; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally*; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally(; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally); +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally-; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally+; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally-#; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally/; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally\; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally?; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally-/; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally/#; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.retry_aborts_internally; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.retry_aborts_internally/-; +NEW_CONNECTION; +set spanner.readonly=false; +set autocommit=false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.retry_aborts_internally; +NEW_CONNECTION; +show spanner.autocommit_dml_mode; +NEW_CONNECTION; +SHOW SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +show spanner.autocommit_dml_mode; +NEW_CONNECTION; + show spanner.autocommit_dml_mode; +NEW_CONNECTION; + show spanner.autocommit_dml_mode; +NEW_CONNECTION; + + + +show spanner.autocommit_dml_mode; +NEW_CONNECTION; +show spanner.autocommit_dml_mode ; +NEW_CONNECTION; +show spanner.autocommit_dml_mode ; +NEW_CONNECTION; +show spanner.autocommit_dml_mode + +; +NEW_CONNECTION; +show spanner.autocommit_dml_mode; +NEW_CONNECTION; +show spanner.autocommit_dml_mode; +NEW_CONNECTION; +show +spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.autocommit_dml_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.autocommit_dml_mode; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; + show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; + show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; + + + +show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode ; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode ; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode + +; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +show +variable +spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.autocommit_dml_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.autocommit_dml_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.autocommit_dml_mode; +NEW_CONNECTION; +show statement_timeout; +NEW_CONNECTION; +SHOW STATEMENT_TIMEOUT; +NEW_CONNECTION; +show statement_timeout; +NEW_CONNECTION; + show statement_timeout; +NEW_CONNECTION; + show statement_timeout; +NEW_CONNECTION; + + + +show statement_timeout; +NEW_CONNECTION; +show statement_timeout ; +NEW_CONNECTION; +show statement_timeout ; +NEW_CONNECTION; +show statement_timeout + +; +NEW_CONNECTION; +show statement_timeout; +NEW_CONNECTION; +show statement_timeout; +NEW_CONNECTION; +show +statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show statement_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-statement_timeout; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; + show variable statement_timeout; +NEW_CONNECTION; + show variable statement_timeout; +NEW_CONNECTION; + + + +show variable statement_timeout; +NEW_CONNECTION; +show variable statement_timeout ; +NEW_CONNECTION; +show variable statement_timeout ; +NEW_CONNECTION; +show variable statement_timeout + +; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +show variable statement_timeout; +NEW_CONNECTION; +show +variable +statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable statement_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable statement_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-statement_timeout; +NEW_CONNECTION; +show spanner.transaction_timeout; +NEW_CONNECTION; +SHOW SPANNER.TRANSACTION_TIMEOUT; +NEW_CONNECTION; +show spanner.transaction_timeout; +NEW_CONNECTION; + show spanner.transaction_timeout; +NEW_CONNECTION; + show spanner.transaction_timeout; +NEW_CONNECTION; + + + +show spanner.transaction_timeout; +NEW_CONNECTION; +show spanner.transaction_timeout ; +NEW_CONNECTION; +show spanner.transaction_timeout ; +NEW_CONNECTION; +show spanner.transaction_timeout + +; +NEW_CONNECTION; +show spanner.transaction_timeout; +NEW_CONNECTION; +show spanner.transaction_timeout; +NEW_CONNECTION; +show +spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.transaction_timeout; +NEW_CONNECTION; +show variable spanner.transaction_timeout; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.TRANSACTION_TIMEOUT; +NEW_CONNECTION; +show variable spanner.transaction_timeout; +NEW_CONNECTION; + show variable spanner.transaction_timeout; +NEW_CONNECTION; + show variable spanner.transaction_timeout; +NEW_CONNECTION; + + + +show variable spanner.transaction_timeout; +NEW_CONNECTION; +show variable spanner.transaction_timeout ; +NEW_CONNECTION; +show variable spanner.transaction_timeout ; +NEW_CONNECTION; +show variable spanner.transaction_timeout + +; +NEW_CONNECTION; +show variable spanner.transaction_timeout; +NEW_CONNECTION; +show variable spanner.transaction_timeout; +NEW_CONNECTION; +show +variable +spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.transaction_timeout; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_timeout/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.transaction_timeout; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +SHOW SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + + + +show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp ; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp ; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp + +; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show +spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp bar; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp%; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp_; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp&; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp$; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp@; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp!; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp*; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp(; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp); +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp-; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp+; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp-#; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp/; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp\; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp?; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp-/; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp/#; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_timestamp/-; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; + + + +show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp ; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp ; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp + +; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +show +variable +spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp bar; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp%; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp_; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp&; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp$; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp@; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp!; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp*; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp(; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp); +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp-; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp+; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp-#; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp/; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp\; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp?; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp-/; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp/#; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.read_timestamp; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_timestamp/-; +NEW_CONNECTION; +set spanner.readonly = true; +SELECT 1 AS TEST; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.read_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +SHOW SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + + + +show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp + +; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show +spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_timestamp/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; + + + +show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp ; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp + +; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +show +variable +spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.commit_timestamp; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_timestamp/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.commit_timestamp; +NEW_CONNECTION; +show spanner.read_only_staleness; +NEW_CONNECTION; +SHOW SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +show spanner.read_only_staleness; +NEW_CONNECTION; + show spanner.read_only_staleness; +NEW_CONNECTION; + show spanner.read_only_staleness; +NEW_CONNECTION; + + + +show spanner.read_only_staleness; +NEW_CONNECTION; +show spanner.read_only_staleness ; +NEW_CONNECTION; +show spanner.read_only_staleness ; +NEW_CONNECTION; +show spanner.read_only_staleness + +; +NEW_CONNECTION; +show spanner.read_only_staleness; +NEW_CONNECTION; +show spanner.read_only_staleness; +NEW_CONNECTION; +show +spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_only_staleness/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.read_only_staleness; +NEW_CONNECTION; +show variable spanner.read_only_staleness; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +show variable spanner.read_only_staleness; +NEW_CONNECTION; + show variable spanner.read_only_staleness; +NEW_CONNECTION; + show variable spanner.read_only_staleness; +NEW_CONNECTION; + + + +show variable spanner.read_only_staleness; +NEW_CONNECTION; +show variable spanner.read_only_staleness ; +NEW_CONNECTION; +show variable spanner.read_only_staleness ; +NEW_CONNECTION; +show variable spanner.read_only_staleness + +; +NEW_CONNECTION; +show variable spanner.read_only_staleness; +NEW_CONNECTION; +show variable spanner.read_only_staleness; +NEW_CONNECTION; +show +variable +spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.read_only_staleness; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_only_staleness/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.read_only_staleness; +NEW_CONNECTION; +show spanner.directed_read; +NEW_CONNECTION; +SHOW SPANNER.DIRECTED_READ; +NEW_CONNECTION; +show spanner.directed_read; +NEW_CONNECTION; + show spanner.directed_read; +NEW_CONNECTION; + show spanner.directed_read; +NEW_CONNECTION; + + + +show spanner.directed_read; +NEW_CONNECTION; +show spanner.directed_read ; +NEW_CONNECTION; +show spanner.directed_read ; +NEW_CONNECTION; +show spanner.directed_read + +; +NEW_CONNECTION; +show spanner.directed_read; +NEW_CONNECTION; +show spanner.directed_read; +NEW_CONNECTION; +show +spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.directed_read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.directed_read; +NEW_CONNECTION; +show variable spanner.directed_read; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.DIRECTED_READ; +NEW_CONNECTION; +show variable spanner.directed_read; +NEW_CONNECTION; + show variable spanner.directed_read; +NEW_CONNECTION; + show variable spanner.directed_read; +NEW_CONNECTION; + + + +show variable spanner.directed_read; +NEW_CONNECTION; +show variable spanner.directed_read ; +NEW_CONNECTION; +show variable spanner.directed_read ; +NEW_CONNECTION; +show variable spanner.directed_read + +; +NEW_CONNECTION; +show variable spanner.directed_read; +NEW_CONNECTION; +show variable spanner.directed_read; +NEW_CONNECTION; +show +variable +spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.directed_read; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.directed_read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.directed_read; +NEW_CONNECTION; +show spanner.optimizer_version; +NEW_CONNECTION; +SHOW SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +show spanner.optimizer_version; +NEW_CONNECTION; + show spanner.optimizer_version; +NEW_CONNECTION; + show spanner.optimizer_version; +NEW_CONNECTION; + + + +show spanner.optimizer_version; +NEW_CONNECTION; +show spanner.optimizer_version ; +NEW_CONNECTION; +show spanner.optimizer_version ; +NEW_CONNECTION; +show spanner.optimizer_version + +; +NEW_CONNECTION; +show spanner.optimizer_version; +NEW_CONNECTION; +show spanner.optimizer_version; +NEW_CONNECTION; +show +spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_version/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.optimizer_version; +NEW_CONNECTION; +show variable spanner.optimizer_version; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +show variable spanner.optimizer_version; +NEW_CONNECTION; + show variable spanner.optimizer_version; +NEW_CONNECTION; + show variable spanner.optimizer_version; +NEW_CONNECTION; + + + +show variable spanner.optimizer_version; +NEW_CONNECTION; +show variable spanner.optimizer_version ; +NEW_CONNECTION; +show variable spanner.optimizer_version ; +NEW_CONNECTION; +show variable spanner.optimizer_version + +; +NEW_CONNECTION; +show variable spanner.optimizer_version; +NEW_CONNECTION; +show variable spanner.optimizer_version; +NEW_CONNECTION; +show +variable +spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.optimizer_version; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_version/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.optimizer_version; +NEW_CONNECTION; +show spanner.optimizer_statistics_package; +NEW_CONNECTION; +SHOW SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +show spanner.optimizer_statistics_package; +NEW_CONNECTION; + show spanner.optimizer_statistics_package; +NEW_CONNECTION; + show spanner.optimizer_statistics_package; +NEW_CONNECTION; + + + +show spanner.optimizer_statistics_package; +NEW_CONNECTION; +show spanner.optimizer_statistics_package ; +NEW_CONNECTION; +show spanner.optimizer_statistics_package ; +NEW_CONNECTION; +show spanner.optimizer_statistics_package + +; +NEW_CONNECTION; +show spanner.optimizer_statistics_package; +NEW_CONNECTION; +show spanner.optimizer_statistics_package; +NEW_CONNECTION; +show +spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.optimizer_statistics_package/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.optimizer_statistics_package; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; + show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; + show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; + + + +show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package ; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package ; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package + +; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +show +variable +spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.optimizer_statistics_package; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.optimizer_statistics_package/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.optimizer_statistics_package; +NEW_CONNECTION; +show spanner.return_commit_stats; +NEW_CONNECTION; +SHOW SPANNER.RETURN_COMMIT_STATS; +NEW_CONNECTION; +show spanner.return_commit_stats; +NEW_CONNECTION; + show spanner.return_commit_stats; +NEW_CONNECTION; + show spanner.return_commit_stats; +NEW_CONNECTION; + + + +show spanner.return_commit_stats; +NEW_CONNECTION; +show spanner.return_commit_stats ; +NEW_CONNECTION; +show spanner.return_commit_stats ; +NEW_CONNECTION; +show spanner.return_commit_stats + +; +NEW_CONNECTION; +show spanner.return_commit_stats; +NEW_CONNECTION; +show spanner.return_commit_stats; +NEW_CONNECTION; +show +spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.return_commit_stats/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.return_commit_stats; +NEW_CONNECTION; +show variable spanner.return_commit_stats; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.RETURN_COMMIT_STATS; +NEW_CONNECTION; +show variable spanner.return_commit_stats; +NEW_CONNECTION; + show variable spanner.return_commit_stats; +NEW_CONNECTION; + show variable spanner.return_commit_stats; +NEW_CONNECTION; + + + +show variable spanner.return_commit_stats; +NEW_CONNECTION; +show variable spanner.return_commit_stats ; +NEW_CONNECTION; +show variable spanner.return_commit_stats ; +NEW_CONNECTION; +show variable spanner.return_commit_stats + +; +NEW_CONNECTION; +show variable spanner.return_commit_stats; +NEW_CONNECTION; +show variable spanner.return_commit_stats; +NEW_CONNECTION; +show +variable +spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.return_commit_stats; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.return_commit_stats/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.return_commit_stats; +NEW_CONNECTION; +show spanner.max_commit_delay; +NEW_CONNECTION; +SHOW SPANNER.MAX_COMMIT_DELAY; +NEW_CONNECTION; +show spanner.max_commit_delay; +NEW_CONNECTION; + show spanner.max_commit_delay; +NEW_CONNECTION; + show spanner.max_commit_delay; +NEW_CONNECTION; + + + +show spanner.max_commit_delay; +NEW_CONNECTION; +show spanner.max_commit_delay ; +NEW_CONNECTION; +show spanner.max_commit_delay ; +NEW_CONNECTION; +show spanner.max_commit_delay + +; +NEW_CONNECTION; +show spanner.max_commit_delay; +NEW_CONNECTION; +show spanner.max_commit_delay; +NEW_CONNECTION; +show +spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_commit_delay/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.max_commit_delay; +NEW_CONNECTION; +show variable spanner.max_commit_delay; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.MAX_COMMIT_DELAY; +NEW_CONNECTION; +show variable spanner.max_commit_delay; +NEW_CONNECTION; + show variable spanner.max_commit_delay; +NEW_CONNECTION; + show variable spanner.max_commit_delay; +NEW_CONNECTION; + + + +show variable spanner.max_commit_delay; +NEW_CONNECTION; +show variable spanner.max_commit_delay ; +NEW_CONNECTION; +show variable spanner.max_commit_delay ; +NEW_CONNECTION; +show variable spanner.max_commit_delay + +; +NEW_CONNECTION; +show variable spanner.max_commit_delay; +NEW_CONNECTION; +show variable spanner.max_commit_delay; +NEW_CONNECTION; +show +variable +spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.max_commit_delay; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_commit_delay/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.max_commit_delay; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +SHOW SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + + + +show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response + +; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show +spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.commit_response/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; + + + +show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response ; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response + +; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +show +variable +spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response bar; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response%; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response_; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response&; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response$; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response@; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response!; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response*; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response(; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response); +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response+; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response-#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response\; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response?; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response-/; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response/#; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.commit_response; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.commit_response/-; +NEW_CONNECTION; +update foo set bar=1; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.commit_response; +NEW_CONNECTION; +show spanner.statement_tag; +NEW_CONNECTION; +SHOW SPANNER.STATEMENT_TAG; +NEW_CONNECTION; +show spanner.statement_tag; +NEW_CONNECTION; + show spanner.statement_tag; +NEW_CONNECTION; + show spanner.statement_tag; +NEW_CONNECTION; + + + +show spanner.statement_tag; +NEW_CONNECTION; +show spanner.statement_tag ; +NEW_CONNECTION; +show spanner.statement_tag ; +NEW_CONNECTION; +show spanner.statement_tag + +; +NEW_CONNECTION; +show spanner.statement_tag; +NEW_CONNECTION; +show spanner.statement_tag; +NEW_CONNECTION; +show +spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.statement_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.statement_tag; +NEW_CONNECTION; +show variable spanner.statement_tag; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.STATEMENT_TAG; +NEW_CONNECTION; +show variable spanner.statement_tag; +NEW_CONNECTION; + show variable spanner.statement_tag; +NEW_CONNECTION; + show variable spanner.statement_tag; +NEW_CONNECTION; + + + +show variable spanner.statement_tag; +NEW_CONNECTION; +show variable spanner.statement_tag ; +NEW_CONNECTION; +show variable spanner.statement_tag ; +NEW_CONNECTION; +show variable spanner.statement_tag + +; +NEW_CONNECTION; +show variable spanner.statement_tag; +NEW_CONNECTION; +show variable spanner.statement_tag; +NEW_CONNECTION; +show +variable +spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.statement_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.statement_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.statement_tag; +NEW_CONNECTION; +show spanner.transaction_tag; +NEW_CONNECTION; +SHOW SPANNER.TRANSACTION_TAG; +NEW_CONNECTION; +show spanner.transaction_tag; +NEW_CONNECTION; + show spanner.transaction_tag; +NEW_CONNECTION; + show spanner.transaction_tag; +NEW_CONNECTION; + + + +show spanner.transaction_tag; +NEW_CONNECTION; +show spanner.transaction_tag ; +NEW_CONNECTION; +show spanner.transaction_tag ; +NEW_CONNECTION; +show spanner.transaction_tag + +; +NEW_CONNECTION; +show spanner.transaction_tag; +NEW_CONNECTION; +show spanner.transaction_tag; +NEW_CONNECTION; +show +spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.transaction_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.transaction_tag; +NEW_CONNECTION; +show variable spanner.transaction_tag; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.TRANSACTION_TAG; +NEW_CONNECTION; +show variable spanner.transaction_tag; +NEW_CONNECTION; + show variable spanner.transaction_tag; +NEW_CONNECTION; + show variable spanner.transaction_tag; +NEW_CONNECTION; + + + +show variable spanner.transaction_tag; +NEW_CONNECTION; +show variable spanner.transaction_tag ; +NEW_CONNECTION; +show variable spanner.transaction_tag ; +NEW_CONNECTION; +show variable spanner.transaction_tag + +; +NEW_CONNECTION; +show variable spanner.transaction_tag; +NEW_CONNECTION; +show variable spanner.transaction_tag; +NEW_CONNECTION; +show +variable +spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.transaction_tag; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.transaction_tag/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.transaction_tag; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show +spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show +variable +spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show spanner.rpc_priority; +NEW_CONNECTION; +SHOW SPANNER.RPC_PRIORITY; +NEW_CONNECTION; +show spanner.rpc_priority; +NEW_CONNECTION; + show spanner.rpc_priority; +NEW_CONNECTION; + show spanner.rpc_priority; +NEW_CONNECTION; + + + +show spanner.rpc_priority; +NEW_CONNECTION; +show spanner.rpc_priority ; +NEW_CONNECTION; +show spanner.rpc_priority ; +NEW_CONNECTION; +show spanner.rpc_priority + +; +NEW_CONNECTION; +show spanner.rpc_priority; +NEW_CONNECTION; +show spanner.rpc_priority; +NEW_CONNECTION; +show +spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.rpc_priority/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.rpc_priority; +NEW_CONNECTION; +show variable spanner.rpc_priority; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.RPC_PRIORITY; +NEW_CONNECTION; +show variable spanner.rpc_priority; +NEW_CONNECTION; + show variable spanner.rpc_priority; +NEW_CONNECTION; + show variable spanner.rpc_priority; +NEW_CONNECTION; + + + +show variable spanner.rpc_priority; +NEW_CONNECTION; +show variable spanner.rpc_priority ; +NEW_CONNECTION; +show variable spanner.rpc_priority ; +NEW_CONNECTION; +show variable spanner.rpc_priority + +; +NEW_CONNECTION; +show variable spanner.rpc_priority; +NEW_CONNECTION; +show variable spanner.rpc_priority; +NEW_CONNECTION; +show +variable +spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.rpc_priority; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.rpc_priority/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.rpc_priority; +NEW_CONNECTION; +show spanner.savepoint_support; +NEW_CONNECTION; +SHOW SPANNER.SAVEPOINT_SUPPORT; +NEW_CONNECTION; +show spanner.savepoint_support; +NEW_CONNECTION; + show spanner.savepoint_support; +NEW_CONNECTION; + show spanner.savepoint_support; +NEW_CONNECTION; + + + +show spanner.savepoint_support; +NEW_CONNECTION; +show spanner.savepoint_support ; +NEW_CONNECTION; +show spanner.savepoint_support ; +NEW_CONNECTION; +show spanner.savepoint_support + +; +NEW_CONNECTION; +show spanner.savepoint_support; +NEW_CONNECTION; +show spanner.savepoint_support; +NEW_CONNECTION; +show +spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.savepoint_support/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.savepoint_support; +NEW_CONNECTION; +show variable spanner.savepoint_support; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.SAVEPOINT_SUPPORT; +NEW_CONNECTION; +show variable spanner.savepoint_support; +NEW_CONNECTION; + show variable spanner.savepoint_support; +NEW_CONNECTION; + show variable spanner.savepoint_support; +NEW_CONNECTION; + + + +show variable spanner.savepoint_support; +NEW_CONNECTION; +show variable spanner.savepoint_support ; +NEW_CONNECTION; +show variable spanner.savepoint_support ; +NEW_CONNECTION; +show variable spanner.savepoint_support + +; +NEW_CONNECTION; +show variable spanner.savepoint_support; +NEW_CONNECTION; +show variable spanner.savepoint_support; +NEW_CONNECTION; +show +variable +spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.savepoint_support; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.savepoint_support/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.savepoint_support; +NEW_CONNECTION; +show spanner.read_lock_mode; +NEW_CONNECTION; +SHOW SPANNER.READ_LOCK_MODE; +NEW_CONNECTION; +show spanner.read_lock_mode; +NEW_CONNECTION; + show spanner.read_lock_mode; +NEW_CONNECTION; + show spanner.read_lock_mode; +NEW_CONNECTION; + + + +show spanner.read_lock_mode; +NEW_CONNECTION; +show spanner.read_lock_mode ; +NEW_CONNECTION; +show spanner.read_lock_mode ; +NEW_CONNECTION; +show spanner.read_lock_mode + +; +NEW_CONNECTION; +show spanner.read_lock_mode; +NEW_CONNECTION; +show spanner.read_lock_mode; +NEW_CONNECTION; +show +spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.read_lock_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.read_lock_mode; +NEW_CONNECTION; +show variable spanner.read_lock_mode; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.READ_LOCK_MODE; +NEW_CONNECTION; +show variable spanner.read_lock_mode; +NEW_CONNECTION; + show variable spanner.read_lock_mode; +NEW_CONNECTION; + show variable spanner.read_lock_mode; +NEW_CONNECTION; + + + +show variable spanner.read_lock_mode; +NEW_CONNECTION; +show variable spanner.read_lock_mode ; +NEW_CONNECTION; +show variable spanner.read_lock_mode ; +NEW_CONNECTION; +show variable spanner.read_lock_mode + +; +NEW_CONNECTION; +show variable spanner.read_lock_mode; +NEW_CONNECTION; +show variable spanner.read_lock_mode; +NEW_CONNECTION; +show +variable +spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.read_lock_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.read_lock_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.read_lock_mode; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +SHOW SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + + + +show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write + +; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show +spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.delay_transaction_start_until_first_write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; + + + +show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write ; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write + +; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show +variable +spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.delay_transaction_start_until_first_write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.delay_transaction_start_until_first_write; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +SHOW SPANNER.KEEP_TRANSACTION_ALIVE; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; + show spanner.keep_transaction_alive; +NEW_CONNECTION; + show spanner.keep_transaction_alive; +NEW_CONNECTION; + + + +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show spanner.keep_transaction_alive ; +NEW_CONNECTION; +show spanner.keep_transaction_alive ; +NEW_CONNECTION; +show spanner.keep_transaction_alive + +; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show +spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.KEEP_TRANSACTION_ALIVE; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + + + +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive ; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive ; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive + +; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show +variable +spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.keep_transaction_alive; +NEW_CONNECTION; +show spanner.auto_batch_dml; +NEW_CONNECTION; +SHOW SPANNER.AUTO_BATCH_DML; +NEW_CONNECTION; +show spanner.auto_batch_dml; +NEW_CONNECTION; + show spanner.auto_batch_dml; +NEW_CONNECTION; + show spanner.auto_batch_dml; +NEW_CONNECTION; + + + +show spanner.auto_batch_dml; +NEW_CONNECTION; +show spanner.auto_batch_dml ; +NEW_CONNECTION; +show spanner.auto_batch_dml ; +NEW_CONNECTION; +show spanner.auto_batch_dml + +; +NEW_CONNECTION; +show spanner.auto_batch_dml; +NEW_CONNECTION; +show spanner.auto_batch_dml; +NEW_CONNECTION; +show +spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.auto_batch_dml; +NEW_CONNECTION; +show variable spanner.auto_batch_dml; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.AUTO_BATCH_DML; +NEW_CONNECTION; +show variable spanner.auto_batch_dml; +NEW_CONNECTION; + show variable spanner.auto_batch_dml; +NEW_CONNECTION; + show variable spanner.auto_batch_dml; +NEW_CONNECTION; + + + +show variable spanner.auto_batch_dml; +NEW_CONNECTION; +show variable spanner.auto_batch_dml ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml + +; +NEW_CONNECTION; +show variable spanner.auto_batch_dml; +NEW_CONNECTION; +show variable spanner.auto_batch_dml; +NEW_CONNECTION; +show +variable +spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.auto_batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.auto_batch_dml; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +SHOW SPANNER.AUTO_BATCH_DML_UPDATE_COUNT; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + + + +show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count ; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count ; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count + +; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show +spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.AUTO_BATCH_DML_UPDATE_COUNT; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; + + + +show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count + +; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show +variable +spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.auto_batch_dml_update_count; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +SHOW SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + + + +show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification + +; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show +spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_batch_dml_update_count_verification/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; + + + +show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification ; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification + +; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show +variable +spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_batch_dml_update_count_verification/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.auto_batch_dml_update_count_verification; +NEW_CONNECTION; +show transaction isolation level; +NEW_CONNECTION; +SHOW TRANSACTION ISOLATION LEVEL; +NEW_CONNECTION; +show transaction isolation level; +NEW_CONNECTION; + show transaction isolation level; +NEW_CONNECTION; + show transaction isolation level; +NEW_CONNECTION; + + + +show transaction isolation level; +NEW_CONNECTION; +show transaction isolation level ; +NEW_CONNECTION; +show transaction isolation level ; +NEW_CONNECTION; +show transaction isolation level + +; +NEW_CONNECTION; +show transaction isolation level; +NEW_CONNECTION; +show transaction isolation level; +NEW_CONNECTION; +show +transaction +isolation +level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation%level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation_level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation&level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation$level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation@level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation!level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation*level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation(level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation)level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation-level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation+level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation-#level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation/level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation\level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation?level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation-/level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation/#level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation level/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show transaction isolation/-level; +NEW_CONNECTION; +show variable transaction isolation level; +NEW_CONNECTION; +SHOW VARIABLE TRANSACTION ISOLATION LEVEL; +NEW_CONNECTION; +show variable transaction isolation level; +NEW_CONNECTION; + show variable transaction isolation level; +NEW_CONNECTION; + show variable transaction isolation level; +NEW_CONNECTION; + + + +show variable transaction isolation level; +NEW_CONNECTION; +show variable transaction isolation level ; +NEW_CONNECTION; +show variable transaction isolation level ; +NEW_CONNECTION; +show variable transaction isolation level + +; +NEW_CONNECTION; +show variable transaction isolation level; +NEW_CONNECTION; +show variable transaction isolation level; +NEW_CONNECTION; +show +variable +transaction +isolation +level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation%level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation_level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation&level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation$level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation@level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation!level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation*level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation(level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation)level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation-level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation+level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation-#level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation/level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation\level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation?level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation-/level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation/#level; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable transaction isolation level; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation level/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable transaction isolation/-level; +NEW_CONNECTION; +show default_transaction_isolation; +NEW_CONNECTION; +SHOW DEFAULT_TRANSACTION_ISOLATION; +NEW_CONNECTION; +show default_transaction_isolation; +NEW_CONNECTION; + show default_transaction_isolation; +NEW_CONNECTION; + show default_transaction_isolation; +NEW_CONNECTION; + + + +show default_transaction_isolation; +NEW_CONNECTION; +show default_transaction_isolation ; +NEW_CONNECTION; +show default_transaction_isolation ; +NEW_CONNECTION; +show default_transaction_isolation + +; +NEW_CONNECTION; +show default_transaction_isolation; +NEW_CONNECTION; +show default_transaction_isolation; +NEW_CONNECTION; +show +default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show default_transaction_isolation/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-default_transaction_isolation; +NEW_CONNECTION; +show variable default_transaction_isolation; +NEW_CONNECTION; +SHOW VARIABLE DEFAULT_TRANSACTION_ISOLATION; +NEW_CONNECTION; +show variable default_transaction_isolation; +NEW_CONNECTION; + show variable default_transaction_isolation; +NEW_CONNECTION; + show variable default_transaction_isolation; +NEW_CONNECTION; + + + +show variable default_transaction_isolation; +NEW_CONNECTION; +show variable default_transaction_isolation ; +NEW_CONNECTION; +show variable default_transaction_isolation ; +NEW_CONNECTION; +show variable default_transaction_isolation + +; +NEW_CONNECTION; +show variable default_transaction_isolation; +NEW_CONNECTION; +show variable default_transaction_isolation; +NEW_CONNECTION; +show +variable +default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable default_transaction_isolation; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable default_transaction_isolation/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-default_transaction_isolation; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +BEGIN; +NEW_CONNECTION; +begin; +NEW_CONNECTION; + begin; +NEW_CONNECTION; + begin; +NEW_CONNECTION; + + + +begin; +NEW_CONNECTION; +begin ; +NEW_CONNECTION; +begin ; +NEW_CONNECTION; +begin + +; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-; +NEW_CONNECTION; +start; +NEW_CONNECTION; +START; +NEW_CONNECTION; +start; +NEW_CONNECTION; + start; +NEW_CONNECTION; + start; +NEW_CONNECTION; + + + +start; +NEW_CONNECTION; +start ; +NEW_CONNECTION; +start ; +NEW_CONNECTION; +start + +; +NEW_CONNECTION; +start; +NEW_CONNECTION; +start; +NEW_CONNECTION; +start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +BEGIN TRANSACTION; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; + begin transaction; +NEW_CONNECTION; + begin transaction; +NEW_CONNECTION; + + + +begin transaction; +NEW_CONNECTION; +begin transaction ; +NEW_CONNECTION; +begin transaction ; +NEW_CONNECTION; +begin transaction + +; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +begin transaction; +NEW_CONNECTION; +begin +transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin)transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-transaction; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +START TRANSACTION; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; + start transaction; +NEW_CONNECTION; + start transaction; +NEW_CONNECTION; + + + +start transaction; +NEW_CONNECTION; +start transaction ; +NEW_CONNECTION; +start transaction ; +NEW_CONNECTION; +start transaction + +; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +start transaction; +NEW_CONNECTION; +start +transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start)transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-transaction; +NEW_CONNECTION; +begin work; +NEW_CONNECTION; +BEGIN WORK; +NEW_CONNECTION; +begin work; +NEW_CONNECTION; + begin work; +NEW_CONNECTION; + begin work; +NEW_CONNECTION; + + + +begin work; +NEW_CONNECTION; +begin work ; +NEW_CONNECTION; +begin work ; +NEW_CONNECTION; +begin work + +; +NEW_CONNECTION; +begin work; +NEW_CONNECTION; +begin work; +NEW_CONNECTION; +begin +work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin%work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin_work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin&work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin$work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin@work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin!work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin*work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin(work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin)work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin+work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-#work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin\work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin?work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin-/work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/#work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin/-work; +NEW_CONNECTION; +start work; +NEW_CONNECTION; +START WORK; +NEW_CONNECTION; +start work; +NEW_CONNECTION; + start work; +NEW_CONNECTION; + start work; +NEW_CONNECTION; + + + +start work; +NEW_CONNECTION; +start work ; +NEW_CONNECTION; +start work ; +NEW_CONNECTION; +start work + +; +NEW_CONNECTION; +start work; +NEW_CONNECTION; +start work; +NEW_CONNECTION; +start +work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start%work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start_work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start&work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start$work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start@work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start!work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start*work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start(work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start)work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start+work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-#work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start\work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start?work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start-/work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/#work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start/-work; +NEW_CONNECTION; +begin read only; +NEW_CONNECTION; +BEGIN READ ONLY; +NEW_CONNECTION; +begin read only; +NEW_CONNECTION; + begin read only; +NEW_CONNECTION; + begin read only; +NEW_CONNECTION; + + + +begin read only; +NEW_CONNECTION; +begin read only ; +NEW_CONNECTION; +begin read only ; +NEW_CONNECTION; +begin read only + +; +NEW_CONNECTION; +begin read only; +NEW_CONNECTION; +begin read only; +NEW_CONNECTION; +begin +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/-only; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +START READ ONLY; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; + start read only; +NEW_CONNECTION; + start read only; +NEW_CONNECTION; + + + +start read only; +NEW_CONNECTION; +start read only ; +NEW_CONNECTION; +start read only ; +NEW_CONNECTION; +start read only + +; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +start +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/-only; +NEW_CONNECTION; +begin transaction read only; +NEW_CONNECTION; +BEGIN TRANSACTION READ ONLY; +NEW_CONNECTION; +begin transaction read only; +NEW_CONNECTION; + begin transaction read only; +NEW_CONNECTION; + begin transaction read only; +NEW_CONNECTION; + + + +begin transaction read only; +NEW_CONNECTION; +begin transaction read only ; +NEW_CONNECTION; +begin transaction read only ; +NEW_CONNECTION; +begin transaction read only + +; +NEW_CONNECTION; +begin transaction read only; +NEW_CONNECTION; +begin transaction read only; +NEW_CONNECTION; +begin +transaction +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/-only; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +START TRANSACTION READ ONLY; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; + start transaction read only; +NEW_CONNECTION; + start transaction read only; +NEW_CONNECTION; + + + +start transaction read only; +NEW_CONNECTION; +start transaction read only ; +NEW_CONNECTION; +start transaction read only ; +NEW_CONNECTION; +start transaction read only + +; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +start +transaction +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/-only; +NEW_CONNECTION; +begin work read only; +NEW_CONNECTION; +BEGIN WORK READ ONLY; +NEW_CONNECTION; +begin work read only; +NEW_CONNECTION; + begin work read only; +NEW_CONNECTION; + begin work read only; +NEW_CONNECTION; + + + +begin work read only; +NEW_CONNECTION; +begin work read only ; +NEW_CONNECTION; +begin work read only ; +NEW_CONNECTION; +begin work read only + +; +NEW_CONNECTION; +begin work read only; +NEW_CONNECTION; +begin work read only; +NEW_CONNECTION; +begin +work +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/-only; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +START WORK READ ONLY; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; + start work read only; +NEW_CONNECTION; + start work read only; +NEW_CONNECTION; + + + +start work read only; +NEW_CONNECTION; +start work read only ; +NEW_CONNECTION; +start work read only ; +NEW_CONNECTION; +start work read only + +; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +start +work +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/-only; +NEW_CONNECTION; +begin read write; +NEW_CONNECTION; +BEGIN READ WRITE; +NEW_CONNECTION; +begin read write; +NEW_CONNECTION; + begin read write; +NEW_CONNECTION; + begin read write; +NEW_CONNECTION; + + + +begin read write; +NEW_CONNECTION; +begin read write ; +NEW_CONNECTION; +begin read write ; +NEW_CONNECTION; +begin read write + +; +NEW_CONNECTION; +begin read write; +NEW_CONNECTION; +begin read write; +NEW_CONNECTION; +begin +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read/-write; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +START READ WRITE; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; + start read write; +NEW_CONNECTION; + start read write; +NEW_CONNECTION; + + + +start read write; +NEW_CONNECTION; +start read write ; +NEW_CONNECTION; +start read write ; +NEW_CONNECTION; +start read write + +; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +start +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/-write; +NEW_CONNECTION; +begin transaction read write; +NEW_CONNECTION; +BEGIN TRANSACTION READ WRITE; +NEW_CONNECTION; +begin transaction read write; +NEW_CONNECTION; + begin transaction read write; +NEW_CONNECTION; + begin transaction read write; +NEW_CONNECTION; + + + +begin transaction read write; +NEW_CONNECTION; +begin transaction read write ; +NEW_CONNECTION; +begin transaction read write ; +NEW_CONNECTION; +begin transaction read write + +; +NEW_CONNECTION; +begin transaction read write; +NEW_CONNECTION; +begin transaction read write; +NEW_CONNECTION; +begin +transaction +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read/-write; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +START TRANSACTION READ WRITE; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; + start transaction read write; +NEW_CONNECTION; + start transaction read write; +NEW_CONNECTION; + + + +start transaction read write; +NEW_CONNECTION; +start transaction read write ; +NEW_CONNECTION; +start transaction read write ; +NEW_CONNECTION; +start transaction read write + +; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +start +transaction +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/-write; +NEW_CONNECTION; +begin work read write; +NEW_CONNECTION; +BEGIN WORK READ WRITE; +NEW_CONNECTION; +begin work read write; +NEW_CONNECTION; + begin work read write; +NEW_CONNECTION; + begin work read write; +NEW_CONNECTION; + + + +begin work read write; +NEW_CONNECTION; +begin work read write ; +NEW_CONNECTION; +begin work read write ; +NEW_CONNECTION; +begin work read write + +; +NEW_CONNECTION; +begin work read write; +NEW_CONNECTION; +begin work read write; +NEW_CONNECTION; +begin +work +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read/-write; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +START WORK READ WRITE; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; + start work read write; +NEW_CONNECTION; + start work read write; +NEW_CONNECTION; + + + +start work read write; +NEW_CONNECTION; +start work read write ; +NEW_CONNECTION; +start work read write ; +NEW_CONNECTION; +start work read write + +; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +start +work +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/-write; +NEW_CONNECTION; +begin isolation level default; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin isolation level default; +NEW_CONNECTION; + begin isolation level default; +NEW_CONNECTION; + begin isolation level default; +NEW_CONNECTION; + + + +begin isolation level default; +NEW_CONNECTION; +begin isolation level default ; +NEW_CONNECTION; +begin isolation level default ; +NEW_CONNECTION; +begin isolation level default + +; +NEW_CONNECTION; +begin isolation level default; +NEW_CONNECTION; +begin isolation level default; +NEW_CONNECTION; +begin +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/-default; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; + start isolation level default; +NEW_CONNECTION; + start isolation level default; +NEW_CONNECTION; + + + +start isolation level default; +NEW_CONNECTION; +start isolation level default ; +NEW_CONNECTION; +start isolation level default ; +NEW_CONNECTION; +start isolation level default + +; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +start +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/-default; +NEW_CONNECTION; +begin transaction isolation level default; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin transaction isolation level default; +NEW_CONNECTION; + begin transaction isolation level default; +NEW_CONNECTION; + begin transaction isolation level default; +NEW_CONNECTION; + + + +begin transaction isolation level default; +NEW_CONNECTION; +begin transaction isolation level default ; +NEW_CONNECTION; +begin transaction isolation level default ; +NEW_CONNECTION; +begin transaction isolation level default + +; +NEW_CONNECTION; +begin transaction isolation level default; +NEW_CONNECTION; +begin transaction isolation level default; +NEW_CONNECTION; +begin +transaction +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/-default; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; + start transaction isolation level default; +NEW_CONNECTION; + start transaction isolation level default; +NEW_CONNECTION; + + + +start transaction isolation level default; +NEW_CONNECTION; +start transaction isolation level default ; +NEW_CONNECTION; +start transaction isolation level default ; +NEW_CONNECTION; +start transaction isolation level default + +; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +start +transaction +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/-default; +NEW_CONNECTION; +begin work isolation level default; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin work isolation level default; +NEW_CONNECTION; + begin work isolation level default; +NEW_CONNECTION; + begin work isolation level default; +NEW_CONNECTION; + + + +begin work isolation level default; +NEW_CONNECTION; +begin work isolation level default ; +NEW_CONNECTION; +begin work isolation level default ; +NEW_CONNECTION; +begin work isolation level default + +; +NEW_CONNECTION; +begin work isolation level default; +NEW_CONNECTION; +begin work isolation level default; +NEW_CONNECTION; +begin +work +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/-default; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; + start work isolation level default; +NEW_CONNECTION; + start work isolation level default; +NEW_CONNECTION; + + + +start work isolation level default; +NEW_CONNECTION; +start work isolation level default ; +NEW_CONNECTION; +start work isolation level default ; +NEW_CONNECTION; +start work isolation level default + +; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +start +work +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/-default; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; + begin isolation level serializable; +NEW_CONNECTION; + begin isolation level serializable; +NEW_CONNECTION; + + + +begin isolation level serializable; +NEW_CONNECTION; +begin isolation level serializable ; +NEW_CONNECTION; +begin isolation level serializable ; +NEW_CONNECTION; +begin isolation level serializable + +; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +begin isolation level serializable; +NEW_CONNECTION; +begin +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level/-serializable; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + + + +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable + +; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/-serializable; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; + begin transaction isolation level serializable; +NEW_CONNECTION; + begin transaction isolation level serializable; +NEW_CONNECTION; + + + +begin transaction isolation level serializable; +NEW_CONNECTION; +begin transaction isolation level serializable ; +NEW_CONNECTION; +begin transaction isolation level serializable ; +NEW_CONNECTION; +begin transaction isolation level serializable + +; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +begin transaction isolation level serializable; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level/-serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + + + +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable + +; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/-serializable; +NEW_CONNECTION; +begin work isolation level serializable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin work isolation level serializable; +NEW_CONNECTION; + begin work isolation level serializable; +NEW_CONNECTION; + begin work isolation level serializable; +NEW_CONNECTION; + + + +begin work isolation level serializable; +NEW_CONNECTION; +begin work isolation level serializable ; +NEW_CONNECTION; +begin work isolation level serializable ; +NEW_CONNECTION; +begin work isolation level serializable + +; +NEW_CONNECTION; +begin work isolation level serializable; +NEW_CONNECTION; +begin work isolation level serializable; +NEW_CONNECTION; +begin +work +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level/-serializable; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; + start work isolation level serializable; +NEW_CONNECTION; + start work isolation level serializable; +NEW_CONNECTION; + + + +start work isolation level serializable; +NEW_CONNECTION; +start work isolation level serializable ; +NEW_CONNECTION; +start work isolation level serializable ; +NEW_CONNECTION; +start work isolation level serializable + +; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +start +work +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/-serializable; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; + begin isolation level repeatable read; +NEW_CONNECTION; + begin isolation level repeatable read; +NEW_CONNECTION; + + + +begin isolation level repeatable read; +NEW_CONNECTION; +begin isolation level repeatable read ; +NEW_CONNECTION; +begin isolation level repeatable read ; +NEW_CONNECTION; +begin isolation level repeatable read + +; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +begin isolation level repeatable read; +NEW_CONNECTION; +begin +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable/-read; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +START ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; + start isolation level repeatable read; +NEW_CONNECTION; + start isolation level repeatable read; +NEW_CONNECTION; + + + +start isolation level repeatable read; +NEW_CONNECTION; +start isolation level repeatable read ; +NEW_CONNECTION; +start isolation level repeatable read ; +NEW_CONNECTION; +start isolation level repeatable read + +; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +start isolation level repeatable read; +NEW_CONNECTION; +start +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable/-read; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; + begin transaction isolation level repeatable read; +NEW_CONNECTION; + begin transaction isolation level repeatable read; +NEW_CONNECTION; + + + +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin transaction isolation level repeatable read ; +NEW_CONNECTION; +begin transaction isolation level repeatable read ; +NEW_CONNECTION; +begin transaction isolation level repeatable read + +; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin transaction isolation level repeatable read; +NEW_CONNECTION; +begin +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable/-read; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; + start transaction isolation level repeatable read; +NEW_CONNECTION; + start transaction isolation level repeatable read; +NEW_CONNECTION; + + + +start transaction isolation level repeatable read; +NEW_CONNECTION; +start transaction isolation level repeatable read ; +NEW_CONNECTION; +start transaction isolation level repeatable read ; +NEW_CONNECTION; +start transaction isolation level repeatable read + +; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +start transaction isolation level repeatable read; +NEW_CONNECTION; +start +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable/-read; +NEW_CONNECTION; +begin work isolation level repeatable read; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +begin work isolation level repeatable read; +NEW_CONNECTION; + begin work isolation level repeatable read; +NEW_CONNECTION; + begin work isolation level repeatable read; +NEW_CONNECTION; + + + +begin work isolation level repeatable read; +NEW_CONNECTION; +begin work isolation level repeatable read ; +NEW_CONNECTION; +begin work isolation level repeatable read ; +NEW_CONNECTION; +begin work isolation level repeatable read + +; +NEW_CONNECTION; +begin work isolation level repeatable read; +NEW_CONNECTION; +begin work isolation level repeatable read; +NEW_CONNECTION; +begin +work +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable/-read; +NEW_CONNECTION; +start work isolation level repeatable read; +NEW_CONNECTION; +START WORK ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +start work isolation level repeatable read; +NEW_CONNECTION; + start work isolation level repeatable read; +NEW_CONNECTION; + start work isolation level repeatable read; +NEW_CONNECTION; + + + +start work isolation level repeatable read; +NEW_CONNECTION; +start work isolation level repeatable read ; +NEW_CONNECTION; +start work isolation level repeatable read ; +NEW_CONNECTION; +start work isolation level repeatable read + +; +NEW_CONNECTION; +start work isolation level repeatable read; +NEW_CONNECTION; +start work isolation level repeatable read; +NEW_CONNECTION; +start +work +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable/-read; +NEW_CONNECTION; +begin isolation level default read write; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +begin isolation level default read write; +NEW_CONNECTION; + begin isolation level default read write; +NEW_CONNECTION; + begin isolation level default read write; +NEW_CONNECTION; + + + +begin isolation level default read write; +NEW_CONNECTION; +begin isolation level default read write ; +NEW_CONNECTION; +begin isolation level default read write ; +NEW_CONNECTION; +begin isolation level default read write + +; +NEW_CONNECTION; +begin isolation level default read write; +NEW_CONNECTION; +begin isolation level default read write; +NEW_CONNECTION; +begin +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read/-write; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; + start isolation level default read only; +NEW_CONNECTION; + start isolation level default read only; +NEW_CONNECTION; + + + +start isolation level default read only; +NEW_CONNECTION; +start isolation level default read only ; +NEW_CONNECTION; +start isolation level default read only ; +NEW_CONNECTION; +start isolation level default read only + +; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +start +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/-only; +NEW_CONNECTION; +begin transaction isolation level default read only; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +begin transaction isolation level default read only; +NEW_CONNECTION; + begin transaction isolation level default read only; +NEW_CONNECTION; + begin transaction isolation level default read only; +NEW_CONNECTION; + + + +begin transaction isolation level default read only; +NEW_CONNECTION; +begin transaction isolation level default read only ; +NEW_CONNECTION; +begin transaction isolation level default read only ; +NEW_CONNECTION; +begin transaction isolation level default read only + +; +NEW_CONNECTION; +begin transaction isolation level default read only; +NEW_CONNECTION; +begin transaction isolation level default read only; +NEW_CONNECTION; +begin +transaction +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read/-only; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; + start transaction isolation level default read write; +NEW_CONNECTION; + start transaction isolation level default read write; +NEW_CONNECTION; + + + +start transaction isolation level default read write; +NEW_CONNECTION; +start transaction isolation level default read write ; +NEW_CONNECTION; +start transaction isolation level default read write ; +NEW_CONNECTION; +start transaction isolation level default read write + +; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +start +transaction +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/-write; +NEW_CONNECTION; +begin work isolation level default read write; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +begin work isolation level default read write; +NEW_CONNECTION; + begin work isolation level default read write; +NEW_CONNECTION; + begin work isolation level default read write; +NEW_CONNECTION; + + + +begin work isolation level default read write; +NEW_CONNECTION; +begin work isolation level default read write ; +NEW_CONNECTION; +begin work isolation level default read write ; +NEW_CONNECTION; +begin work isolation level default read write + +; +NEW_CONNECTION; +begin work isolation level default read write; +NEW_CONNECTION; +begin work isolation level default read write; +NEW_CONNECTION; +begin +work +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read/-write; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; + start work isolation level default read only; +NEW_CONNECTION; + start work isolation level default read only; +NEW_CONNECTION; + + + +start work isolation level default read only; +NEW_CONNECTION; +start work isolation level default read only ; +NEW_CONNECTION; +start work isolation level default read only ; +NEW_CONNECTION; +start work isolation level default read only + +; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +start +work +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/-only; +NEW_CONNECTION; +begin isolation level serializable read write; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +begin isolation level serializable read write; +NEW_CONNECTION; + begin isolation level serializable read write; +NEW_CONNECTION; + begin isolation level serializable read write; +NEW_CONNECTION; + + + +begin isolation level serializable read write; +NEW_CONNECTION; +begin isolation level serializable read write ; +NEW_CONNECTION; +begin isolation level serializable read write ; +NEW_CONNECTION; +begin isolation level serializable read write + +; +NEW_CONNECTION; +begin isolation level serializable read write; +NEW_CONNECTION; +begin isolation level serializable read write; +NEW_CONNECTION; +begin +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read/-write; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; + start isolation level serializable read write; +NEW_CONNECTION; + start isolation level serializable read write; +NEW_CONNECTION; + + + +start isolation level serializable read write; +NEW_CONNECTION; +start isolation level serializable read write ; +NEW_CONNECTION; +start isolation level serializable read write ; +NEW_CONNECTION; +start isolation level serializable read write + +; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +start +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/-write; +NEW_CONNECTION; +begin transaction isolation level serializable read only; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY; +NEW_CONNECTION; +begin transaction isolation level serializable read only; +NEW_CONNECTION; + begin transaction isolation level serializable read only; +NEW_CONNECTION; + begin transaction isolation level serializable read only; +NEW_CONNECTION; + + + +begin transaction isolation level serializable read only; +NEW_CONNECTION; +begin transaction isolation level serializable read only ; +NEW_CONNECTION; +begin transaction isolation level serializable read only ; +NEW_CONNECTION; +begin transaction isolation level serializable read only + +; +NEW_CONNECTION; +begin transaction isolation level serializable read only; +NEW_CONNECTION; +begin transaction isolation level serializable read only; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read/-only; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; + start transaction isolation level serializable read write; +NEW_CONNECTION; + start transaction isolation level serializable read write; +NEW_CONNECTION; + + + +start transaction isolation level serializable read write; +NEW_CONNECTION; +start transaction isolation level serializable read write ; +NEW_CONNECTION; +start transaction isolation level serializable read write ; +NEW_CONNECTION; +start transaction isolation level serializable read write + +; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +start +transaction +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/-write; +NEW_CONNECTION; +begin work isolation level serializable read write; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +begin work isolation level serializable read write; +NEW_CONNECTION; + begin work isolation level serializable read write; +NEW_CONNECTION; + begin work isolation level serializable read write; +NEW_CONNECTION; + + + +begin work isolation level serializable read write; +NEW_CONNECTION; +begin work isolation level serializable read write ; +NEW_CONNECTION; +begin work isolation level serializable read write ; +NEW_CONNECTION; +begin work isolation level serializable read write + +; +NEW_CONNECTION; +begin work isolation level serializable read write; +NEW_CONNECTION; +begin work isolation level serializable read write; +NEW_CONNECTION; +begin +work +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read/-write; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE READ ONLY; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; + start work isolation level serializable read only; +NEW_CONNECTION; + start work isolation level serializable read only; +NEW_CONNECTION; + + + +start work isolation level serializable read only; +NEW_CONNECTION; +start work isolation level serializable read only ; +NEW_CONNECTION; +start work isolation level serializable read only ; +NEW_CONNECTION; +start work isolation level serializable read only + +; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +start +work +isolation +level +serializable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/-only; +NEW_CONNECTION; +begin isolation level repeatable read read write; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL REPEATABLE READ READ WRITE; +NEW_CONNECTION; +begin isolation level repeatable read read write; +NEW_CONNECTION; + begin isolation level repeatable read read write; +NEW_CONNECTION; + begin isolation level repeatable read read write; +NEW_CONNECTION; + + + +begin isolation level repeatable read read write; +NEW_CONNECTION; +begin isolation level repeatable read read write ; +NEW_CONNECTION; +begin isolation level repeatable read read write ; +NEW_CONNECTION; +begin isolation level repeatable read read write + +; +NEW_CONNECTION; +begin isolation level repeatable read read write; +NEW_CONNECTION; +begin isolation level repeatable read read write; +NEW_CONNECTION; +begin +isolation +level +repeatable +read +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read read/-write; +NEW_CONNECTION; +start isolation level repeatable read read write; +NEW_CONNECTION; +START ISOLATION LEVEL REPEATABLE READ READ WRITE; +NEW_CONNECTION; +start isolation level repeatable read read write; +NEW_CONNECTION; + start isolation level repeatable read read write; +NEW_CONNECTION; + start isolation level repeatable read read write; +NEW_CONNECTION; + + + +start isolation level repeatable read read write; +NEW_CONNECTION; +start isolation level repeatable read read write ; +NEW_CONNECTION; +start isolation level repeatable read read write ; +NEW_CONNECTION; +start isolation level repeatable read read write + +; +NEW_CONNECTION; +start isolation level repeatable read read write; +NEW_CONNECTION; +start isolation level repeatable read read write; +NEW_CONNECTION; +start +isolation +level +repeatable +read +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read read/-write; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ READ ONLY; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only; +NEW_CONNECTION; + begin transaction isolation level repeatable read read only; +NEW_CONNECTION; + begin transaction isolation level repeatable read read only; +NEW_CONNECTION; + + + +begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only ; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only ; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only + +; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +begin +transaction +isolation +level +repeatable +read +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read read/-only; +NEW_CONNECTION; +start transaction isolation level repeatable read read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ READ WRITE; +NEW_CONNECTION; +start transaction isolation level repeatable read read write; +NEW_CONNECTION; + start transaction isolation level repeatable read read write; +NEW_CONNECTION; + start transaction isolation level repeatable read read write; +NEW_CONNECTION; + + + +start transaction isolation level repeatable read read write; +NEW_CONNECTION; +start transaction isolation level repeatable read read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read read write + +; +NEW_CONNECTION; +start transaction isolation level repeatable read read write; +NEW_CONNECTION; +start transaction isolation level repeatable read read write; +NEW_CONNECTION; +start +transaction +isolation +level +repeatable +read +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read read/-write; +NEW_CONNECTION; +begin work isolation level repeatable read read write; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL REPEATABLE READ READ WRITE; +NEW_CONNECTION; +begin work isolation level repeatable read read write; +NEW_CONNECTION; + begin work isolation level repeatable read read write; +NEW_CONNECTION; + begin work isolation level repeatable read read write; +NEW_CONNECTION; + + + +begin work isolation level repeatable read read write; +NEW_CONNECTION; +begin work isolation level repeatable read read write ; +NEW_CONNECTION; +begin work isolation level repeatable read read write ; +NEW_CONNECTION; +begin work isolation level repeatable read read write + +; +NEW_CONNECTION; +begin work isolation level repeatable read read write; +NEW_CONNECTION; +begin work isolation level repeatable read read write; +NEW_CONNECTION; +begin +work +isolation +level +repeatable +read +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level repeatable read read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read read/-write; +NEW_CONNECTION; +start work isolation level repeatable read read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL REPEATABLE READ READ ONLY; +NEW_CONNECTION; +start work isolation level repeatable read read only; +NEW_CONNECTION; + start work isolation level repeatable read read only; +NEW_CONNECTION; + start work isolation level repeatable read read only; +NEW_CONNECTION; + + + +start work isolation level repeatable read read only; +NEW_CONNECTION; +start work isolation level repeatable read read only ; +NEW_CONNECTION; +start work isolation level repeatable read read only ; +NEW_CONNECTION; +start work isolation level repeatable read read only + +; +NEW_CONNECTION; +start work isolation level repeatable read read only; +NEW_CONNECTION; +start work isolation level repeatable read read only; +NEW_CONNECTION; +start +work +isolation +level +repeatable +read +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level repeatable read read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read read/-only; +NEW_CONNECTION; +begin isolation level serializable, read write; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +begin isolation level serializable, read write; +NEW_CONNECTION; + begin isolation level serializable, read write; +NEW_CONNECTION; + begin isolation level serializable, read write; +NEW_CONNECTION; + + + +begin isolation level serializable, read write; +NEW_CONNECTION; +begin isolation level serializable, read write ; +NEW_CONNECTION; +begin isolation level serializable, read write ; +NEW_CONNECTION; +begin isolation level serializable, read write + +; +NEW_CONNECTION; +begin isolation level serializable, read write; +NEW_CONNECTION; +begin isolation level serializable, read write; +NEW_CONNECTION; +begin +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read/-write; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; + start isolation level serializable, read write; +NEW_CONNECTION; + start isolation level serializable, read write; +NEW_CONNECTION; + + + +start isolation level serializable, read write; +NEW_CONNECTION; +start isolation level serializable, read write ; +NEW_CONNECTION; +start isolation level serializable, read write ; +NEW_CONNECTION; +start isolation level serializable, read write + +; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +start +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/-write; +NEW_CONNECTION; +begin transaction isolation level serializable, read only; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY; +NEW_CONNECTION; +begin transaction isolation level serializable, read only; +NEW_CONNECTION; + begin transaction isolation level serializable, read only; +NEW_CONNECTION; + begin transaction isolation level serializable, read only; +NEW_CONNECTION; + + + +begin transaction isolation level serializable, read only; +NEW_CONNECTION; +begin transaction isolation level serializable, read only ; +NEW_CONNECTION; +begin transaction isolation level serializable, read only ; +NEW_CONNECTION; +begin transaction isolation level serializable, read only + +; +NEW_CONNECTION; +begin transaction isolation level serializable, read only; +NEW_CONNECTION; +begin transaction isolation level serializable, read only; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read/-only; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; + start transaction isolation level serializable, read write; +NEW_CONNECTION; + start transaction isolation level serializable, read write; +NEW_CONNECTION; + + + +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start transaction isolation level serializable, read write ; +NEW_CONNECTION; +start transaction isolation level serializable, read write ; +NEW_CONNECTION; +start transaction isolation level serializable, read write + +; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start +transaction +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/-write; +NEW_CONNECTION; +begin work isolation level serializable, read write; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +begin work isolation level serializable, read write; +NEW_CONNECTION; + begin work isolation level serializable, read write; +NEW_CONNECTION; + begin work isolation level serializable, read write; +NEW_CONNECTION; + + + +begin work isolation level serializable, read write; +NEW_CONNECTION; +begin work isolation level serializable, read write ; +NEW_CONNECTION; +begin work isolation level serializable, read write ; +NEW_CONNECTION; +begin work isolation level serializable, read write + +; +NEW_CONNECTION; +begin work isolation level serializable, read write; +NEW_CONNECTION; +begin work isolation level serializable, read write; +NEW_CONNECTION; +begin +work +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read/-write; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE, READ ONLY; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + + + +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only + +; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start +work +isolation +level +serializable, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/-only; +NEW_CONNECTION; +begin isolation level repeatable read, read write; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +begin isolation level repeatable read, read write; +NEW_CONNECTION; + begin isolation level repeatable read, read write; +NEW_CONNECTION; + begin isolation level repeatable read, read write; +NEW_CONNECTION; + + + +begin isolation level repeatable read, read write; +NEW_CONNECTION; +begin isolation level repeatable read, read write ; +NEW_CONNECTION; +begin isolation level repeatable read, read write ; +NEW_CONNECTION; +begin isolation level repeatable read, read write + +; +NEW_CONNECTION; +begin isolation level repeatable read, read write; +NEW_CONNECTION; +begin isolation level repeatable read, read write; +NEW_CONNECTION; +begin +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level repeatable read, read/-write; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +START ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; + start isolation level repeatable read, read write; +NEW_CONNECTION; + start isolation level repeatable read, read write; +NEW_CONNECTION; + + + +start isolation level repeatable read, read write; +NEW_CONNECTION; +start isolation level repeatable read, read write ; +NEW_CONNECTION; +start isolation level repeatable read, read write ; +NEW_CONNECTION; +start isolation level repeatable read, read write + +; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +start +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/-write; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; + begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; + begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; + + + +begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only ; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only ; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only + +; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +begin +transaction +isolation +level +repeatable +read, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level repeatable read, read/-only; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + + + +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write + +; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start +transaction +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/-write; +NEW_CONNECTION; +begin work isolation level repeatable read, read write; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +begin work isolation level repeatable read, read write; +NEW_CONNECTION; + begin work isolation level repeatable read, read write; +NEW_CONNECTION; + begin work isolation level repeatable read, read write; +NEW_CONNECTION; + + + +begin work isolation level repeatable read, read write; +NEW_CONNECTION; +begin work isolation level repeatable read, read write ; +NEW_CONNECTION; +begin work isolation level repeatable read, read write ; +NEW_CONNECTION; +begin work isolation level repeatable read, read write + +; +NEW_CONNECTION; +begin work isolation level repeatable read, read write; +NEW_CONNECTION; +begin work isolation level repeatable read, read write; +NEW_CONNECTION; +begin +work +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level repeatable read, read/-write; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL REPEATABLE READ, READ ONLY; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; + start work isolation level repeatable read, read only; +NEW_CONNECTION; + start work isolation level repeatable read, read only; +NEW_CONNECTION; + + + +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start work isolation level repeatable read, read only ; +NEW_CONNECTION; +start work isolation level repeatable read, read only ; +NEW_CONNECTION; +start work isolation level repeatable read, read only + +; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start +work +isolation +level +repeatable +read, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/-only; +NEW_CONNECTION; +begin not deferrable; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE; +NEW_CONNECTION; +begin not deferrable; +NEW_CONNECTION; + begin not deferrable; +NEW_CONNECTION; + begin not deferrable; +NEW_CONNECTION; + + + +begin not deferrable; +NEW_CONNECTION; +begin not deferrable ; +NEW_CONNECTION; +begin not deferrable ; +NEW_CONNECTION; +begin not deferrable + +; +NEW_CONNECTION; +begin not deferrable; +NEW_CONNECTION; +begin not deferrable; +NEW_CONNECTION; +begin +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not/-deferrable; +NEW_CONNECTION; +start not deferrable; +NEW_CONNECTION; +START NOT DEFERRABLE; +NEW_CONNECTION; +start not deferrable; +NEW_CONNECTION; + start not deferrable; +NEW_CONNECTION; + start not deferrable; +NEW_CONNECTION; + + + +start not deferrable; +NEW_CONNECTION; +start not deferrable ; +NEW_CONNECTION; +start not deferrable ; +NEW_CONNECTION; +start not deferrable + +; +NEW_CONNECTION; +start not deferrable; +NEW_CONNECTION; +start not deferrable; +NEW_CONNECTION; +start +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start not/-deferrable; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; + begin transaction not deferrable; +NEW_CONNECTION; + begin transaction not deferrable; +NEW_CONNECTION; + + + +begin transaction not deferrable; +NEW_CONNECTION; +begin transaction not deferrable ; +NEW_CONNECTION; +begin transaction not deferrable ; +NEW_CONNECTION; +begin transaction not deferrable + +; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +begin +transaction +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/-deferrable; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +START TRANSACTION NOT DEFERRABLE; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; + start transaction not deferrable; +NEW_CONNECTION; + start transaction not deferrable; +NEW_CONNECTION; + + + +start transaction not deferrable; +NEW_CONNECTION; +start transaction not deferrable ; +NEW_CONNECTION; +start transaction not deferrable ; +NEW_CONNECTION; +start transaction not deferrable + +; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +start +transaction +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/-deferrable; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; + begin work not deferrable; +NEW_CONNECTION; + begin work not deferrable; +NEW_CONNECTION; + + + +begin work not deferrable; +NEW_CONNECTION; +begin work not deferrable ; +NEW_CONNECTION; +begin work not deferrable ; +NEW_CONNECTION; +begin work not deferrable + +; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +begin +work +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/-deferrable; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +START WORK NOT DEFERRABLE; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; + start work not deferrable; +NEW_CONNECTION; + start work not deferrable; +NEW_CONNECTION; + + + +start work not deferrable; +NEW_CONNECTION; +start work not deferrable ; +NEW_CONNECTION; +start work not deferrable ; +NEW_CONNECTION; +start work not deferrable + +; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +start +work +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/-deferrable; +NEW_CONNECTION; +begin read only not deferrable; +NEW_CONNECTION; +BEGIN READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +begin read only not deferrable; +NEW_CONNECTION; + begin read only not deferrable; +NEW_CONNECTION; + begin read only not deferrable; +NEW_CONNECTION; + + + +begin read only not deferrable; +NEW_CONNECTION; +begin read only not deferrable ; +NEW_CONNECTION; +begin read only not deferrable ; +NEW_CONNECTION; +begin read only not deferrable + +; +NEW_CONNECTION; +begin read only not deferrable; +NEW_CONNECTION; +begin read only not deferrable; +NEW_CONNECTION; +begin +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read only not/-deferrable; +NEW_CONNECTION; +start read only not deferrable; +NEW_CONNECTION; +START READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start read only not deferrable; +NEW_CONNECTION; + start read only not deferrable; +NEW_CONNECTION; + start read only not deferrable; +NEW_CONNECTION; + + + +start read only not deferrable; +NEW_CONNECTION; +start read only not deferrable ; +NEW_CONNECTION; +start read only not deferrable ; +NEW_CONNECTION; +start read only not deferrable + +; +NEW_CONNECTION; +start read only not deferrable; +NEW_CONNECTION; +start read only not deferrable; +NEW_CONNECTION; +start +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only not/-deferrable; +NEW_CONNECTION; +begin transaction read only not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction read only not deferrable; +NEW_CONNECTION; + begin transaction read only not deferrable; +NEW_CONNECTION; + begin transaction read only not deferrable; +NEW_CONNECTION; + + + +begin transaction read only not deferrable; +NEW_CONNECTION; +begin transaction read only not deferrable ; +NEW_CONNECTION; +begin transaction read only not deferrable ; +NEW_CONNECTION; +begin transaction read only not deferrable + +; +NEW_CONNECTION; +begin transaction read only not deferrable; +NEW_CONNECTION; +begin transaction read only not deferrable; +NEW_CONNECTION; +begin +transaction +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read only not/-deferrable; +NEW_CONNECTION; +start transaction read only not deferrable; +NEW_CONNECTION; +START TRANSACTION READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start transaction read only not deferrable; +NEW_CONNECTION; + start transaction read only not deferrable; +NEW_CONNECTION; + start transaction read only not deferrable; +NEW_CONNECTION; + + + +start transaction read only not deferrable; +NEW_CONNECTION; +start transaction read only not deferrable ; +NEW_CONNECTION; +start transaction read only not deferrable ; +NEW_CONNECTION; +start transaction read only not deferrable + +; +NEW_CONNECTION; +start transaction read only not deferrable; +NEW_CONNECTION; +start transaction read only not deferrable; +NEW_CONNECTION; +start +transaction +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only not/-deferrable; +NEW_CONNECTION; +begin work read only not deferrable; +NEW_CONNECTION; +BEGIN WORK READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +begin work read only not deferrable; +NEW_CONNECTION; + begin work read only not deferrable; +NEW_CONNECTION; + begin work read only not deferrable; +NEW_CONNECTION; + + + +begin work read only not deferrable; +NEW_CONNECTION; +begin work read only not deferrable ; +NEW_CONNECTION; +begin work read only not deferrable ; +NEW_CONNECTION; +begin work read only not deferrable + +; +NEW_CONNECTION; +begin work read only not deferrable; +NEW_CONNECTION; +begin work read only not deferrable; +NEW_CONNECTION; +begin +work +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read only not/-deferrable; +NEW_CONNECTION; +start work read only not deferrable; +NEW_CONNECTION; +START WORK READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start work read only not deferrable; +NEW_CONNECTION; + start work read only not deferrable; +NEW_CONNECTION; + start work read only not deferrable; +NEW_CONNECTION; + + + +start work read only not deferrable; +NEW_CONNECTION; +start work read only not deferrable ; +NEW_CONNECTION; +start work read only not deferrable ; +NEW_CONNECTION; +start work read only not deferrable + +; +NEW_CONNECTION; +start work read only not deferrable; +NEW_CONNECTION; +start work read only not deferrable; +NEW_CONNECTION; +start +work +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only not/-deferrable; +NEW_CONNECTION; +begin read write not deferrable; +NEW_CONNECTION; +BEGIN READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin read write not deferrable; +NEW_CONNECTION; + begin read write not deferrable; +NEW_CONNECTION; + begin read write not deferrable; +NEW_CONNECTION; + + + +begin read write not deferrable; +NEW_CONNECTION; +begin read write not deferrable ; +NEW_CONNECTION; +begin read write not deferrable ; +NEW_CONNECTION; +begin read write not deferrable + +; +NEW_CONNECTION; +begin read write not deferrable; +NEW_CONNECTION; +begin read write not deferrable; +NEW_CONNECTION; +begin +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin read write not/-deferrable; +NEW_CONNECTION; +start read write not deferrable; +NEW_CONNECTION; +START READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start read write not deferrable; +NEW_CONNECTION; + start read write not deferrable; +NEW_CONNECTION; + start read write not deferrable; +NEW_CONNECTION; + + + +start read write not deferrable; +NEW_CONNECTION; +start read write not deferrable ; +NEW_CONNECTION; +start read write not deferrable ; +NEW_CONNECTION; +start read write not deferrable + +; +NEW_CONNECTION; +start read write not deferrable; +NEW_CONNECTION; +start read write not deferrable; +NEW_CONNECTION; +start +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write not/-deferrable; +NEW_CONNECTION; +begin transaction read write not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction read write not deferrable; +NEW_CONNECTION; + begin transaction read write not deferrable; +NEW_CONNECTION; + begin transaction read write not deferrable; +NEW_CONNECTION; + + + +begin transaction read write not deferrable; +NEW_CONNECTION; +begin transaction read write not deferrable ; +NEW_CONNECTION; +begin transaction read write not deferrable ; +NEW_CONNECTION; +begin transaction read write not deferrable + +; +NEW_CONNECTION; +begin transaction read write not deferrable; +NEW_CONNECTION; +begin transaction read write not deferrable; +NEW_CONNECTION; +begin +transaction +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction read write not/-deferrable; +NEW_CONNECTION; +start transaction read write not deferrable; +NEW_CONNECTION; +START TRANSACTION READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start transaction read write not deferrable; +NEW_CONNECTION; + start transaction read write not deferrable; +NEW_CONNECTION; + start transaction read write not deferrable; +NEW_CONNECTION; + + + +start transaction read write not deferrable; +NEW_CONNECTION; +start transaction read write not deferrable ; +NEW_CONNECTION; +start transaction read write not deferrable ; +NEW_CONNECTION; +start transaction read write not deferrable + +; +NEW_CONNECTION; +start transaction read write not deferrable; +NEW_CONNECTION; +start transaction read write not deferrable; +NEW_CONNECTION; +start +transaction +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write not/-deferrable; +NEW_CONNECTION; +begin work read write not deferrable; +NEW_CONNECTION; +BEGIN WORK READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin work read write not deferrable; +NEW_CONNECTION; + begin work read write not deferrable; +NEW_CONNECTION; + begin work read write not deferrable; +NEW_CONNECTION; + + + +begin work read write not deferrable; +NEW_CONNECTION; +begin work read write not deferrable ; +NEW_CONNECTION; +begin work read write not deferrable ; +NEW_CONNECTION; +begin work read write not deferrable + +; +NEW_CONNECTION; +begin work read write not deferrable; +NEW_CONNECTION; +begin work read write not deferrable; +NEW_CONNECTION; +begin +work +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work read write not/-deferrable; +NEW_CONNECTION; +start work read write not deferrable; +NEW_CONNECTION; +START WORK READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start work read write not deferrable; +NEW_CONNECTION; + start work read write not deferrable; +NEW_CONNECTION; + start work read write not deferrable; +NEW_CONNECTION; + + + +start work read write not deferrable; +NEW_CONNECTION; +start work read write not deferrable ; +NEW_CONNECTION; +start work read write not deferrable ; +NEW_CONNECTION; +start work read write not deferrable + +; +NEW_CONNECTION; +start work read write not deferrable; +NEW_CONNECTION; +start work read write not deferrable; +NEW_CONNECTION; +start +work +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write not/-deferrable; +NEW_CONNECTION; +begin isolation level default not deferrable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +begin isolation level default not deferrable; +NEW_CONNECTION; + begin isolation level default not deferrable; +NEW_CONNECTION; + begin isolation level default not deferrable; +NEW_CONNECTION; + + + +begin isolation level default not deferrable; +NEW_CONNECTION; +begin isolation level default not deferrable ; +NEW_CONNECTION; +begin isolation level default not deferrable ; +NEW_CONNECTION; +begin isolation level default not deferrable + +; +NEW_CONNECTION; +begin isolation level default not deferrable; +NEW_CONNECTION; +begin isolation level default not deferrable; +NEW_CONNECTION; +begin +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default not/-deferrable; +NEW_CONNECTION; +start isolation level default not deferrable; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +start isolation level default not deferrable; +NEW_CONNECTION; + start isolation level default not deferrable; +NEW_CONNECTION; + start isolation level default not deferrable; +NEW_CONNECTION; + + + +start isolation level default not deferrable; +NEW_CONNECTION; +start isolation level default not deferrable ; +NEW_CONNECTION; +start isolation level default not deferrable ; +NEW_CONNECTION; +start isolation level default not deferrable + +; +NEW_CONNECTION; +start isolation level default not deferrable; +NEW_CONNECTION; +start isolation level default not deferrable; +NEW_CONNECTION; +start +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default not/-deferrable; +NEW_CONNECTION; +begin transaction isolation level default not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction isolation level default not deferrable; +NEW_CONNECTION; + begin transaction isolation level default not deferrable; +NEW_CONNECTION; + begin transaction isolation level default not deferrable; +NEW_CONNECTION; + + + +begin transaction isolation level default not deferrable; +NEW_CONNECTION; +begin transaction isolation level default not deferrable ; +NEW_CONNECTION; +begin transaction isolation level default not deferrable ; +NEW_CONNECTION; +begin transaction isolation level default not deferrable + +; +NEW_CONNECTION; +begin transaction isolation level default not deferrable; +NEW_CONNECTION; +begin transaction isolation level default not deferrable; +NEW_CONNECTION; +begin +transaction +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default not/-deferrable; +NEW_CONNECTION; +start transaction isolation level default not deferrable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +start transaction isolation level default not deferrable; +NEW_CONNECTION; + start transaction isolation level default not deferrable; +NEW_CONNECTION; + start transaction isolation level default not deferrable; +NEW_CONNECTION; + + + +start transaction isolation level default not deferrable; +NEW_CONNECTION; +start transaction isolation level default not deferrable ; +NEW_CONNECTION; +start transaction isolation level default not deferrable ; +NEW_CONNECTION; +start transaction isolation level default not deferrable + +; +NEW_CONNECTION; +start transaction isolation level default not deferrable; +NEW_CONNECTION; +start transaction isolation level default not deferrable; +NEW_CONNECTION; +start +transaction +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default not/-deferrable; +NEW_CONNECTION; +begin work isolation level default not deferrable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +begin work isolation level default not deferrable; +NEW_CONNECTION; + begin work isolation level default not deferrable; +NEW_CONNECTION; + begin work isolation level default not deferrable; +NEW_CONNECTION; + + + +begin work isolation level default not deferrable; +NEW_CONNECTION; +begin work isolation level default not deferrable ; +NEW_CONNECTION; +begin work isolation level default not deferrable ; +NEW_CONNECTION; +begin work isolation level default not deferrable + +; +NEW_CONNECTION; +begin work isolation level default not deferrable; +NEW_CONNECTION; +begin work isolation level default not deferrable; +NEW_CONNECTION; +begin +work +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default not/-deferrable; +NEW_CONNECTION; +start work isolation level default not deferrable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT NOT DEFERRABLE; +NEW_CONNECTION; +start work isolation level default not deferrable; +NEW_CONNECTION; + start work isolation level default not deferrable; +NEW_CONNECTION; + start work isolation level default not deferrable; +NEW_CONNECTION; + + + +start work isolation level default not deferrable; +NEW_CONNECTION; +start work isolation level default not deferrable ; +NEW_CONNECTION; +start work isolation level default not deferrable ; +NEW_CONNECTION; +start work isolation level default not deferrable + +; +NEW_CONNECTION; +start work isolation level default not deferrable; +NEW_CONNECTION; +start work isolation level default not deferrable; +NEW_CONNECTION; +start +work +isolation +level +default +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default not/-deferrable; +NEW_CONNECTION; +begin isolation level serializable not deferrable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +begin isolation level serializable not deferrable; +NEW_CONNECTION; + begin isolation level serializable not deferrable; +NEW_CONNECTION; + begin isolation level serializable not deferrable; +NEW_CONNECTION; + + + +begin isolation level serializable not deferrable; +NEW_CONNECTION; +begin isolation level serializable not deferrable ; +NEW_CONNECTION; +begin isolation level serializable not deferrable ; +NEW_CONNECTION; +begin isolation level serializable not deferrable + +; +NEW_CONNECTION; +begin isolation level serializable not deferrable; +NEW_CONNECTION; +begin isolation level serializable not deferrable; +NEW_CONNECTION; +begin +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable not/-deferrable; +NEW_CONNECTION; +start isolation level serializable not deferrable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +start isolation level serializable not deferrable; +NEW_CONNECTION; + start isolation level serializable not deferrable; +NEW_CONNECTION; + start isolation level serializable not deferrable; +NEW_CONNECTION; + + + +start isolation level serializable not deferrable; +NEW_CONNECTION; +start isolation level serializable not deferrable ; +NEW_CONNECTION; +start isolation level serializable not deferrable ; +NEW_CONNECTION; +start isolation level serializable not deferrable + +; +NEW_CONNECTION; +start isolation level serializable not deferrable; +NEW_CONNECTION; +start isolation level serializable not deferrable; +NEW_CONNECTION; +start +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable not/-deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; + + + +begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable + +; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable not/-deferrable; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable not deferrable; +NEW_CONNECTION; + + + +start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable + +; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable not/-deferrable; +NEW_CONNECTION; +begin work isolation level serializable not deferrable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +begin work isolation level serializable not deferrable; +NEW_CONNECTION; + begin work isolation level serializable not deferrable; +NEW_CONNECTION; + begin work isolation level serializable not deferrable; +NEW_CONNECTION; + + + +begin work isolation level serializable not deferrable; +NEW_CONNECTION; +begin work isolation level serializable not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable not deferrable + +; +NEW_CONNECTION; +begin work isolation level serializable not deferrable; +NEW_CONNECTION; +begin work isolation level serializable not deferrable; +NEW_CONNECTION; +begin +work +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable not/-deferrable; +NEW_CONNECTION; +start work isolation level serializable not deferrable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE NOT DEFERRABLE; +NEW_CONNECTION; +start work isolation level serializable not deferrable; +NEW_CONNECTION; + start work isolation level serializable not deferrable; +NEW_CONNECTION; + start work isolation level serializable not deferrable; +NEW_CONNECTION; + + + +start work isolation level serializable not deferrable; +NEW_CONNECTION; +start work isolation level serializable not deferrable ; +NEW_CONNECTION; +start work isolation level serializable not deferrable ; +NEW_CONNECTION; +start work isolation level serializable not deferrable + +; +NEW_CONNECTION; +start work isolation level serializable not deferrable; +NEW_CONNECTION; +start work isolation level serializable not deferrable; +NEW_CONNECTION; +start +work +isolation +level +serializable +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable not/-deferrable; +NEW_CONNECTION; +begin isolation level default read write not deferrable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL DEFAULT READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin isolation level default read write not deferrable; +NEW_CONNECTION; + begin isolation level default read write not deferrable; +NEW_CONNECTION; + begin isolation level default read write not deferrable; +NEW_CONNECTION; + + + +begin isolation level default read write not deferrable; +NEW_CONNECTION; +begin isolation level default read write not deferrable ; +NEW_CONNECTION; +begin isolation level default read write not deferrable ; +NEW_CONNECTION; +begin isolation level default read write not deferrable + +; +NEW_CONNECTION; +begin isolation level default read write not deferrable; +NEW_CONNECTION; +begin isolation level default read write not deferrable; +NEW_CONNECTION; +begin +isolation +level +default +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level default read write not/-deferrable; +NEW_CONNECTION; +start isolation level default read only not deferrable; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start isolation level default read only not deferrable; +NEW_CONNECTION; + start isolation level default read only not deferrable; +NEW_CONNECTION; + start isolation level default read only not deferrable; +NEW_CONNECTION; + + + +start isolation level default read only not deferrable; +NEW_CONNECTION; +start isolation level default read only not deferrable ; +NEW_CONNECTION; +start isolation level default read only not deferrable ; +NEW_CONNECTION; +start isolation level default read only not deferrable + +; +NEW_CONNECTION; +start isolation level default read only not deferrable; +NEW_CONNECTION; +start isolation level default read only not deferrable; +NEW_CONNECTION; +start +isolation +level +default +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only not/-deferrable; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL DEFAULT READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; + begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; + begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; + + + +begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable ; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable ; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable + +; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +begin +transaction +isolation +level +default +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level default read only not/-deferrable; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable; +NEW_CONNECTION; + start transaction isolation level default read write not deferrable; +NEW_CONNECTION; + start transaction isolation level default read write not deferrable; +NEW_CONNECTION; + + + +start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable ; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable ; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable + +; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +start +transaction +isolation +level +default +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write not/-deferrable; +NEW_CONNECTION; +begin work isolation level default read write not deferrable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL DEFAULT READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin work isolation level default read write not deferrable; +NEW_CONNECTION; + begin work isolation level default read write not deferrable; +NEW_CONNECTION; + begin work isolation level default read write not deferrable; +NEW_CONNECTION; + + + +begin work isolation level default read write not deferrable; +NEW_CONNECTION; +begin work isolation level default read write not deferrable ; +NEW_CONNECTION; +begin work isolation level default read write not deferrable ; +NEW_CONNECTION; +begin work isolation level default read write not deferrable + +; +NEW_CONNECTION; +begin work isolation level default read write not deferrable; +NEW_CONNECTION; +begin work isolation level default read write not deferrable; +NEW_CONNECTION; +begin +work +isolation +level +default +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level default read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level default read write not/-deferrable; +NEW_CONNECTION; +start work isolation level default read only not deferrable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start work isolation level default read only not deferrable; +NEW_CONNECTION; + start work isolation level default read only not deferrable; +NEW_CONNECTION; + start work isolation level default read only not deferrable; +NEW_CONNECTION; + + + +start work isolation level default read only not deferrable; +NEW_CONNECTION; +start work isolation level default read only not deferrable ; +NEW_CONNECTION; +start work isolation level default read only not deferrable ; +NEW_CONNECTION; +start work isolation level default read only not deferrable + +; +NEW_CONNECTION; +start work isolation level default read only not deferrable; +NEW_CONNECTION; +start work isolation level default read only not deferrable; +NEW_CONNECTION; +start +work +isolation +level +default +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only not/-deferrable; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable; +NEW_CONNECTION; + begin isolation level serializable read write not deferrable; +NEW_CONNECTION; + begin isolation level serializable read write not deferrable; +NEW_CONNECTION; + + + +begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable ; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable ; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable + +; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin +isolation +level +serializable +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable read write not/-deferrable; +NEW_CONNECTION; +start isolation level serializable read write not deferrable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start isolation level serializable read write not deferrable; +NEW_CONNECTION; + start isolation level serializable read write not deferrable; +NEW_CONNECTION; + start isolation level serializable read write not deferrable; +NEW_CONNECTION; + + + +start isolation level serializable read write not deferrable; +NEW_CONNECTION; +start isolation level serializable read write not deferrable ; +NEW_CONNECTION; +start isolation level serializable read write not deferrable ; +NEW_CONNECTION; +start isolation level serializable read write not deferrable + +; +NEW_CONNECTION; +start isolation level serializable read write not deferrable; +NEW_CONNECTION; +start isolation level serializable read write not deferrable; +NEW_CONNECTION; +start +isolation +level +serializable +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write not/-deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; + + + +begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable + +; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable read only not/-deferrable; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; + + + +start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable + +; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write not/-deferrable; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE READ WRITE NOT DEFERRABLE; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; + begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; + begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; + + + +begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable + +; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +begin +work +isolation +level +serializable +read +write +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable read write not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable read write not/-deferrable; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE READ ONLY NOT DEFERRABLE; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable; +NEW_CONNECTION; + start work isolation level serializable read only not deferrable; +NEW_CONNECTION; + start work isolation level serializable read only not deferrable; +NEW_CONNECTION; + + + +start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable ; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable ; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable + +; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +start +work +isolation +level +serializable +read +only +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable read only not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only not/-deferrable; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +BEGIN ISOLATION LEVEL SERIALIZABLE, READ WRITE, NOT DEFERRABLE; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + + + +begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable + +; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin +isolation +level +serializable, +read +write, +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin isolation level serializable, read write, not/-deferrable; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE, READ WRITE, NOT DEFERRABLE; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + + + +start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable + +; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start +isolation +level +serializable, +read +write, +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write, not/-deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY, NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; + begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; + + + +begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable ; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable + +; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +begin +transaction +isolation +level +serializable, +read +only, +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction isolation level serializable, read only, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction isolation level serializable, read only, not/-deferrable; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ WRITE, NOT DEFERRABLE; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + + + +start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable + +; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable, +read +write, +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write, not/-deferrable; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +BEGIN WORK ISOLATION LEVEL SERIALIZABLE, READ WRITE, NOT DEFERRABLE; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; + + + +begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable ; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable + +; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +begin +work +isolation +level +serializable, +read +write, +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work isolation level serializable, read write, not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work isolation level serializable, read write, not/-deferrable; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE, READ ONLY; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + + + +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only + +; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start +work +isolation +level +serializable, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/-only; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; + begin transaction not deferrable; +NEW_CONNECTION; + begin transaction not deferrable; +NEW_CONNECTION; + + + +begin transaction not deferrable; +NEW_CONNECTION; +begin transaction not deferrable ; +NEW_CONNECTION; +begin transaction not deferrable ; +NEW_CONNECTION; +begin transaction not deferrable + +; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +begin transaction not deferrable; +NEW_CONNECTION; +begin +transaction +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not/-deferrable; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +START TRANSACTION NOT DEFERRABLE; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; + start transaction not deferrable; +NEW_CONNECTION; + start transaction not deferrable; +NEW_CONNECTION; + + + +start transaction not deferrable; +NEW_CONNECTION; +start transaction not deferrable ; +NEW_CONNECTION; +start transaction not deferrable ; +NEW_CONNECTION; +start transaction not deferrable + +; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +start transaction not deferrable; +NEW_CONNECTION; +start +transaction +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction not/-deferrable; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; + begin work not deferrable; +NEW_CONNECTION; + begin work not deferrable; +NEW_CONNECTION; + + + +begin work not deferrable; +NEW_CONNECTION; +begin work not deferrable ; +NEW_CONNECTION; +begin work not deferrable ; +NEW_CONNECTION; +begin work not deferrable + +; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +begin work not deferrable; +NEW_CONNECTION; +begin +work +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not/-deferrable; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +START WORK NOT DEFERRABLE; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; + start work not deferrable; +NEW_CONNECTION; + start work not deferrable; +NEW_CONNECTION; + + + +start work not deferrable; +NEW_CONNECTION; +start work not deferrable ; +NEW_CONNECTION; +start work not deferrable ; +NEW_CONNECTION; +start work not deferrable + +; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +start work not deferrable; +NEW_CONNECTION; +start +work +not +deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not%deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not_deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not&deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not$deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not@deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not!deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not*deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not(deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not)deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not+deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not\deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not?deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not-/deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/#deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work not deferrable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not deferrable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work not/-deferrable; +NEW_CONNECTION; +begin not deferrable read only; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE READ ONLY; +NEW_CONNECTION; +begin not deferrable read only; +NEW_CONNECTION; + begin not deferrable read only; +NEW_CONNECTION; + begin not deferrable read only; +NEW_CONNECTION; + + + +begin not deferrable read only; +NEW_CONNECTION; +begin not deferrable read only ; +NEW_CONNECTION; +begin not deferrable read only ; +NEW_CONNECTION; +begin not deferrable read only + +; +NEW_CONNECTION; +begin not deferrable read only; +NEW_CONNECTION; +begin not deferrable read only; +NEW_CONNECTION; +begin +not +deferrable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/-only; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +START READ ONLY; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; + start read only; +NEW_CONNECTION; + start read only; +NEW_CONNECTION; + + + +start read only; +NEW_CONNECTION; +start read only ; +NEW_CONNECTION; +start read only ; +NEW_CONNECTION; +start read only + +; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +start read only; +NEW_CONNECTION; +start +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/-only; +NEW_CONNECTION; +begin transaction not deferrable read only; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE READ ONLY; +NEW_CONNECTION; +begin transaction not deferrable read only; +NEW_CONNECTION; + begin transaction not deferrable read only; +NEW_CONNECTION; + begin transaction not deferrable read only; +NEW_CONNECTION; + + + +begin transaction not deferrable read only; +NEW_CONNECTION; +begin transaction not deferrable read only ; +NEW_CONNECTION; +begin transaction not deferrable read only ; +NEW_CONNECTION; +begin transaction not deferrable read only + +; +NEW_CONNECTION; +begin transaction not deferrable read only; +NEW_CONNECTION; +begin transaction not deferrable read only; +NEW_CONNECTION; +begin +transaction +not +deferrable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/-only; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +START TRANSACTION READ ONLY; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; + start transaction read only; +NEW_CONNECTION; + start transaction read only; +NEW_CONNECTION; + + + +start transaction read only; +NEW_CONNECTION; +start transaction read only ; +NEW_CONNECTION; +start transaction read only ; +NEW_CONNECTION; +start transaction read only + +; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +start transaction read only; +NEW_CONNECTION; +start +transaction +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/-only; +NEW_CONNECTION; +begin work not deferrable read only; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE READ ONLY; +NEW_CONNECTION; +begin work not deferrable read only; +NEW_CONNECTION; + begin work not deferrable read only; +NEW_CONNECTION; + begin work not deferrable read only; +NEW_CONNECTION; + + + +begin work not deferrable read only; +NEW_CONNECTION; +begin work not deferrable read only ; +NEW_CONNECTION; +begin work not deferrable read only ; +NEW_CONNECTION; +begin work not deferrable read only + +; +NEW_CONNECTION; +begin work not deferrable read only; +NEW_CONNECTION; +begin work not deferrable read only; +NEW_CONNECTION; +begin +work +not +deferrable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/-only; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +START WORK READ ONLY; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; + start work read only; +NEW_CONNECTION; + start work read only; +NEW_CONNECTION; + + + +start work read only; +NEW_CONNECTION; +start work read only ; +NEW_CONNECTION; +start work read only ; +NEW_CONNECTION; +start work read only + +; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +start work read only; +NEW_CONNECTION; +start +work +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/-only; +NEW_CONNECTION; +begin not deferrable read write; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE READ WRITE; +NEW_CONNECTION; +begin not deferrable read write; +NEW_CONNECTION; + begin not deferrable read write; +NEW_CONNECTION; + begin not deferrable read write; +NEW_CONNECTION; + + + +begin not deferrable read write; +NEW_CONNECTION; +begin not deferrable read write ; +NEW_CONNECTION; +begin not deferrable read write ; +NEW_CONNECTION; +begin not deferrable read write + +; +NEW_CONNECTION; +begin not deferrable read write; +NEW_CONNECTION; +begin not deferrable read write; +NEW_CONNECTION; +begin +not +deferrable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable read/-write; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +START READ WRITE; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; + start read write; +NEW_CONNECTION; + start read write; +NEW_CONNECTION; + + + +start read write; +NEW_CONNECTION; +start read write ; +NEW_CONNECTION; +start read write ; +NEW_CONNECTION; +start read write + +; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +start read write; +NEW_CONNECTION; +start +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start read/-write; +NEW_CONNECTION; +begin transaction not deferrable read write; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE READ WRITE; +NEW_CONNECTION; +begin transaction not deferrable read write; +NEW_CONNECTION; + begin transaction not deferrable read write; +NEW_CONNECTION; + begin transaction not deferrable read write; +NEW_CONNECTION; + + + +begin transaction not deferrable read write; +NEW_CONNECTION; +begin transaction not deferrable read write ; +NEW_CONNECTION; +begin transaction not deferrable read write ; +NEW_CONNECTION; +begin transaction not deferrable read write + +; +NEW_CONNECTION; +begin transaction not deferrable read write; +NEW_CONNECTION; +begin transaction not deferrable read write; +NEW_CONNECTION; +begin +transaction +not +deferrable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable read/-write; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +START TRANSACTION READ WRITE; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; + start transaction read write; +NEW_CONNECTION; + start transaction read write; +NEW_CONNECTION; + + + +start transaction read write; +NEW_CONNECTION; +start transaction read write ; +NEW_CONNECTION; +start transaction read write ; +NEW_CONNECTION; +start transaction read write + +; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +start transaction read write; +NEW_CONNECTION; +start +transaction +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction read/-write; +NEW_CONNECTION; +begin work not deferrable read write; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE READ WRITE; +NEW_CONNECTION; +begin work not deferrable read write; +NEW_CONNECTION; + begin work not deferrable read write; +NEW_CONNECTION; + begin work not deferrable read write; +NEW_CONNECTION; + + + +begin work not deferrable read write; +NEW_CONNECTION; +begin work not deferrable read write ; +NEW_CONNECTION; +begin work not deferrable read write ; +NEW_CONNECTION; +begin work not deferrable read write + +; +NEW_CONNECTION; +begin work not deferrable read write; +NEW_CONNECTION; +begin work not deferrable read write; +NEW_CONNECTION; +begin +work +not +deferrable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable read/-write; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +START WORK READ WRITE; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; + start work read write; +NEW_CONNECTION; + start work read write; +NEW_CONNECTION; + + + +start work read write; +NEW_CONNECTION; +start work read write ; +NEW_CONNECTION; +start work read write ; +NEW_CONNECTION; +start work read write + +; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +start work read write; +NEW_CONNECTION; +start +work +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work read/-write; +NEW_CONNECTION; +begin not deferrable isolation level default; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin not deferrable isolation level default; +NEW_CONNECTION; + begin not deferrable isolation level default; +NEW_CONNECTION; + begin not deferrable isolation level default; +NEW_CONNECTION; + + + +begin not deferrable isolation level default; +NEW_CONNECTION; +begin not deferrable isolation level default ; +NEW_CONNECTION; +begin not deferrable isolation level default ; +NEW_CONNECTION; +begin not deferrable isolation level default + +; +NEW_CONNECTION; +begin not deferrable isolation level default; +NEW_CONNECTION; +begin not deferrable isolation level default; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/-default; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; + start isolation level default; +NEW_CONNECTION; + start isolation level default; +NEW_CONNECTION; + + + +start isolation level default; +NEW_CONNECTION; +start isolation level default ; +NEW_CONNECTION; +start isolation level default ; +NEW_CONNECTION; +start isolation level default + +; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +start isolation level default; +NEW_CONNECTION; +start +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/-default; +NEW_CONNECTION; +begin transaction not deferrable isolation level default; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin transaction not deferrable isolation level default; +NEW_CONNECTION; + begin transaction not deferrable isolation level default; +NEW_CONNECTION; + begin transaction not deferrable isolation level default; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level default; +NEW_CONNECTION; +begin transaction not deferrable isolation level default ; +NEW_CONNECTION; +begin transaction not deferrable isolation level default ; +NEW_CONNECTION; +begin transaction not deferrable isolation level default + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level default; +NEW_CONNECTION; +begin transaction not deferrable isolation level default; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/-default; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; + start transaction isolation level default; +NEW_CONNECTION; + start transaction isolation level default; +NEW_CONNECTION; + + + +start transaction isolation level default; +NEW_CONNECTION; +start transaction isolation level default ; +NEW_CONNECTION; +start transaction isolation level default ; +NEW_CONNECTION; +start transaction isolation level default + +; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +start transaction isolation level default; +NEW_CONNECTION; +start +transaction +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/-default; +NEW_CONNECTION; +begin work not deferrable isolation level default; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +begin work not deferrable isolation level default; +NEW_CONNECTION; + begin work not deferrable isolation level default; +NEW_CONNECTION; + begin work not deferrable isolation level default; +NEW_CONNECTION; + + + +begin work not deferrable isolation level default; +NEW_CONNECTION; +begin work not deferrable isolation level default ; +NEW_CONNECTION; +begin work not deferrable isolation level default ; +NEW_CONNECTION; +begin work not deferrable isolation level default + +; +NEW_CONNECTION; +begin work not deferrable isolation level default; +NEW_CONNECTION; +begin work not deferrable isolation level default; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/-default; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; + start work isolation level default; +NEW_CONNECTION; + start work isolation level default; +NEW_CONNECTION; + + + +start work isolation level default; +NEW_CONNECTION; +start work isolation level default ; +NEW_CONNECTION; +start work isolation level default ; +NEW_CONNECTION; +start work isolation level default + +; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +start work isolation level default; +NEW_CONNECTION; +start +work +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/-default; +NEW_CONNECTION; +begin not deferrable isolation level serializable; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin not deferrable isolation level serializable; +NEW_CONNECTION; + begin not deferrable isolation level serializable; +NEW_CONNECTION; + begin not deferrable isolation level serializable; +NEW_CONNECTION; + + + +begin not deferrable isolation level serializable; +NEW_CONNECTION; +begin not deferrable isolation level serializable ; +NEW_CONNECTION; +begin not deferrable isolation level serializable ; +NEW_CONNECTION; +begin not deferrable isolation level serializable + +; +NEW_CONNECTION; +begin not deferrable isolation level serializable; +NEW_CONNECTION; +begin not deferrable isolation level serializable; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level/-serializable; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + start isolation level serializable; +NEW_CONNECTION; + + + +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable ; +NEW_CONNECTION; +start isolation level serializable + +; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start isolation level serializable; +NEW_CONNECTION; +start +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level/-serializable; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level/-serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + start transaction isolation level serializable; +NEW_CONNECTION; + + + +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable ; +NEW_CONNECTION; +start transaction isolation level serializable + +; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start transaction isolation level serializable; +NEW_CONNECTION; +start +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level/-serializable; +NEW_CONNECTION; +begin work not deferrable isolation level serializable; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +begin work not deferrable isolation level serializable; +NEW_CONNECTION; + begin work not deferrable isolation level serializable; +NEW_CONNECTION; + begin work not deferrable isolation level serializable; +NEW_CONNECTION; + + + +begin work not deferrable isolation level serializable; +NEW_CONNECTION; +begin work not deferrable isolation level serializable ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable + +; +NEW_CONNECTION; +begin work not deferrable isolation level serializable; +NEW_CONNECTION; +begin work not deferrable isolation level serializable; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level/-serializable; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; + start work isolation level serializable; +NEW_CONNECTION; + start work isolation level serializable; +NEW_CONNECTION; + + + +start work isolation level serializable; +NEW_CONNECTION; +start work isolation level serializable ; +NEW_CONNECTION; +start work isolation level serializable ; +NEW_CONNECTION; +start work isolation level serializable + +; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +start work isolation level serializable; +NEW_CONNECTION; +start +work +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level/-serializable; +NEW_CONNECTION; +begin not deferrable isolation level default read write; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +begin not deferrable isolation level default read write; +NEW_CONNECTION; + begin not deferrable isolation level default read write; +NEW_CONNECTION; + begin not deferrable isolation level default read write; +NEW_CONNECTION; + + + +begin not deferrable isolation level default read write; +NEW_CONNECTION; +begin not deferrable isolation level default read write ; +NEW_CONNECTION; +begin not deferrable isolation level default read write ; +NEW_CONNECTION; +begin not deferrable isolation level default read write + +; +NEW_CONNECTION; +begin not deferrable isolation level default read write; +NEW_CONNECTION; +begin not deferrable isolation level default read write; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level default read/-write; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +START ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; + start isolation level default read only; +NEW_CONNECTION; + start isolation level default read only; +NEW_CONNECTION; + + + +start isolation level default read only; +NEW_CONNECTION; +start isolation level default read only ; +NEW_CONNECTION; +start isolation level default read only ; +NEW_CONNECTION; +start isolation level default read only + +; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +start isolation level default read only; +NEW_CONNECTION; +start +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level default read/-only; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level default read/-only; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; + start transaction isolation level default read write; +NEW_CONNECTION; + start transaction isolation level default read write; +NEW_CONNECTION; + + + +start transaction isolation level default read write; +NEW_CONNECTION; +start transaction isolation level default read write ; +NEW_CONNECTION; +start transaction isolation level default read write ; +NEW_CONNECTION; +start transaction isolation level default read write + +; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +start transaction isolation level default read write; +NEW_CONNECTION; +start +transaction +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level default read/-write; +NEW_CONNECTION; +begin work not deferrable isolation level default read write; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL DEFAULT READ WRITE; +NEW_CONNECTION; +begin work not deferrable isolation level default read write; +NEW_CONNECTION; + begin work not deferrable isolation level default read write; +NEW_CONNECTION; + begin work not deferrable isolation level default read write; +NEW_CONNECTION; + + + +begin work not deferrable isolation level default read write; +NEW_CONNECTION; +begin work not deferrable isolation level default read write ; +NEW_CONNECTION; +begin work not deferrable isolation level default read write ; +NEW_CONNECTION; +begin work not deferrable isolation level default read write + +; +NEW_CONNECTION; +begin work not deferrable isolation level default read write; +NEW_CONNECTION; +begin work not deferrable isolation level default read write; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +default +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level default read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level default read/-write; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL DEFAULT READ ONLY; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; + start work isolation level default read only; +NEW_CONNECTION; + start work isolation level default read only; +NEW_CONNECTION; + + + +start work isolation level default read only; +NEW_CONNECTION; +start work isolation level default read only ; +NEW_CONNECTION; +start work isolation level default read only ; +NEW_CONNECTION; +start work isolation level default read only + +; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +start work isolation level default read only; +NEW_CONNECTION; +start +work +isolation +level +default +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level default read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level default read/-only; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write; +NEW_CONNECTION; + begin not deferrable isolation level serializable read write; +NEW_CONNECTION; + begin not deferrable isolation level serializable read write; +NEW_CONNECTION; + + + +begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write ; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write ; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write + +; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable read/-write; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; + start isolation level serializable read write; +NEW_CONNECTION; + start isolation level serializable read write; +NEW_CONNECTION; + + + +start isolation level serializable read write; +NEW_CONNECTION; +start isolation level serializable read write ; +NEW_CONNECTION; +start isolation level serializable read write ; +NEW_CONNECTION; +start isolation level serializable read write + +; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +start isolation level serializable read write; +NEW_CONNECTION; +start +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable read/-write; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE READ ONLY; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +serializable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable read/-only; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; + start transaction isolation level serializable read write; +NEW_CONNECTION; + start transaction isolation level serializable read write; +NEW_CONNECTION; + + + +start transaction isolation level serializable read write; +NEW_CONNECTION; +start transaction isolation level serializable read write ; +NEW_CONNECTION; +start transaction isolation level serializable read write ; +NEW_CONNECTION; +start transaction isolation level serializable read write + +; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +start transaction isolation level serializable read write; +NEW_CONNECTION; +start +transaction +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable read/-write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE READ WRITE; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; + begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; + begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; + + + +begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write + +; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +serializable +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level serializable read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable read/-write; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE READ ONLY; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; + start work isolation level serializable read only; +NEW_CONNECTION; + start work isolation level serializable read only; +NEW_CONNECTION; + + + +start work isolation level serializable read only; +NEW_CONNECTION; +start work isolation level serializable read only ; +NEW_CONNECTION; +start work isolation level serializable read only ; +NEW_CONNECTION; +start work isolation level serializable read only + +; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +start work isolation level serializable read only; +NEW_CONNECTION; +start +work +isolation +level +serializable +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable read/-only; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; + begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; + begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; + + + +begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write ; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write ; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write + +; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level serializable, read/-write; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +START ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; + start isolation level serializable, read write; +NEW_CONNECTION; + start isolation level serializable, read write; +NEW_CONNECTION; + + + +start isolation level serializable, read write; +NEW_CONNECTION; +start isolation level serializable, read write ; +NEW_CONNECTION; +start isolation level serializable, read write ; +NEW_CONNECTION; +start isolation level serializable, read write + +; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +start isolation level serializable, read write; +NEW_CONNECTION; +start +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level serializable, read/-write; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE, READ ONLY; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +serializable, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level serializable, read/-only; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; + start transaction isolation level serializable, read write; +NEW_CONNECTION; + start transaction isolation level serializable, read write; +NEW_CONNECTION; + + + +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start transaction isolation level serializable, read write ; +NEW_CONNECTION; +start transaction isolation level serializable, read write ; +NEW_CONNECTION; +start transaction isolation level serializable, read write + +; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start transaction isolation level serializable, read write; +NEW_CONNECTION; +start +transaction +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level serializable, read/-write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL SERIALIZABLE, READ WRITE; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; + begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; + begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; + + + +begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write ; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write + +; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +serializable, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level serializable, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level serializable, read/-write; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL SERIALIZABLE, READ ONLY; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + start work isolation level serializable, read only; +NEW_CONNECTION; + + + +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only ; +NEW_CONNECTION; +start work isolation level serializable, read only + +; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start work isolation level serializable, read only; +NEW_CONNECTION; +start +work +isolation +level +serializable, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level serializable, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level serializable, read/-only; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +BEGIN NOT DEFERRABLE ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + + + +begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write ; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write ; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write + +; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin +not +deferrable +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin not deferrable isolation level repeatable read, read/-write; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +START ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; + start isolation level repeatable read, read write; +NEW_CONNECTION; + start isolation level repeatable read, read write; +NEW_CONNECTION; + + + +start isolation level repeatable read, read write; +NEW_CONNECTION; +start isolation level repeatable read, read write ; +NEW_CONNECTION; +start isolation level repeatable read, read write ; +NEW_CONNECTION; +start isolation level repeatable read, read write + +; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +start isolation level repeatable read, read write; +NEW_CONNECTION; +start +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start isolation level repeatable read, read/-write; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +BEGIN TRANSACTION NOT DEFERRABLE ISOLATION LEVEL REPEATABLE READ, READ ONLY; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; + begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; + + + +begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only ; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only + +; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +begin +transaction +not +deferrable +isolation +level +repeatable +read, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin transaction not deferrable isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin transaction not deferrable isolation level repeatable read, read/-only; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + start transaction isolation level repeatable read, read write; +NEW_CONNECTION; + + + +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write ; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write + +; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +start +transaction +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start transaction isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start transaction isolation level repeatable read, read/-write; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +BEGIN WORK NOT DEFERRABLE ISOLATION LEVEL REPEATABLE READ, READ WRITE; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; + + + +begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write ; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write ; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write + +; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +begin +work +not +deferrable +isolation +level +repeatable +read, +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-begin work not deferrable isolation level repeatable read, read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +begin work not deferrable isolation level repeatable read, read/-write; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +START WORK ISOLATION LEVEL REPEATABLE READ, READ ONLY; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; + start work isolation level repeatable read, read only; +NEW_CONNECTION; + start work isolation level repeatable read, read only; +NEW_CONNECTION; + + + +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start work isolation level repeatable read, read only ; +NEW_CONNECTION; +start work isolation level repeatable read, read only ; +NEW_CONNECTION; +start work isolation level repeatable read, read only + +; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start work isolation level repeatable read, read only; +NEW_CONNECTION; +start +work +isolation +level +repeatable +read, +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start work isolation level repeatable read, read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start work isolation level repeatable read, read/-only; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +COMMIT; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; + commit; +NEW_CONNECTION; +begin transaction; + commit; +NEW_CONNECTION; +begin transaction; + + + +commit; +NEW_CONNECTION; +begin transaction; +commit ; +NEW_CONNECTION; +begin transaction; +commit ; +NEW_CONNECTION; +begin transaction; +commit + +; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +COMMIT TRANSACTION; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; + commit transaction; +NEW_CONNECTION; +begin transaction; + commit transaction; +NEW_CONNECTION; +begin transaction; + + + +commit transaction; +NEW_CONNECTION; +begin transaction; +commit transaction ; +NEW_CONNECTION; +begin transaction; +commit transaction ; +NEW_CONNECTION; +begin transaction; +commit transaction + +; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +commit transaction; +NEW_CONNECTION; +begin transaction; +commit +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-transaction; +NEW_CONNECTION; +begin transaction; +commit work; +NEW_CONNECTION; +begin transaction; +COMMIT WORK; +NEW_CONNECTION; +begin transaction; +commit work; +NEW_CONNECTION; +begin transaction; + commit work; +NEW_CONNECTION; +begin transaction; + commit work; +NEW_CONNECTION; +begin transaction; + + + +commit work; +NEW_CONNECTION; +begin transaction; +commit work ; +NEW_CONNECTION; +begin transaction; +commit work ; +NEW_CONNECTION; +begin transaction; +commit work + +; +NEW_CONNECTION; +begin transaction; +commit work; +NEW_CONNECTION; +begin transaction; +commit work; +NEW_CONNECTION; +begin transaction; +commit +work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit%work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit_work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit&work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit$work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit@work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit!work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit*work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit(work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit)work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit+work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit\work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit?work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit-/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit/-work; +NEW_CONNECTION; +begin transaction; +commit and no chain; +NEW_CONNECTION; +begin transaction; +COMMIT AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +commit and no chain; +NEW_CONNECTION; +begin transaction; + commit and no chain; +NEW_CONNECTION; +begin transaction; + commit and no chain; +NEW_CONNECTION; +begin transaction; + + + +commit and no chain; +NEW_CONNECTION; +begin transaction; +commit and no chain ; +NEW_CONNECTION; +begin transaction; +commit and no chain ; +NEW_CONNECTION; +begin transaction; +commit and no chain + +; +NEW_CONNECTION; +begin transaction; +commit and no chain; +NEW_CONNECTION; +begin transaction; +commit and no chain; +NEW_CONNECTION; +begin transaction; +commit +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit and no/-chain; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +COMMIT TRANSACTION AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain; +NEW_CONNECTION; +begin transaction; + commit transaction and no chain; +NEW_CONNECTION; +begin transaction; + commit transaction and no chain; +NEW_CONNECTION; +begin transaction; + + + +commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain ; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain ; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain + +; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +commit +transaction +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit transaction and no/-chain; +NEW_CONNECTION; +begin transaction; +commit work and no chain; +NEW_CONNECTION; +begin transaction; +COMMIT WORK AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +commit work and no chain; +NEW_CONNECTION; +begin transaction; + commit work and no chain; +NEW_CONNECTION; +begin transaction; + commit work and no chain; +NEW_CONNECTION; +begin transaction; + + + +commit work and no chain; +NEW_CONNECTION; +begin transaction; +commit work and no chain ; +NEW_CONNECTION; +begin transaction; +commit work and no chain ; +NEW_CONNECTION; +begin transaction; +commit work and no chain + +; +NEW_CONNECTION; +begin transaction; +commit work and no chain; +NEW_CONNECTION; +begin transaction; +commit work and no chain; +NEW_CONNECTION; +begin transaction; +commit +work +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-commit work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +commit work and no/-chain; +NEW_CONNECTION; +begin transaction; +end; +NEW_CONNECTION; +begin transaction; +END; +NEW_CONNECTION; +begin transaction; +end; +NEW_CONNECTION; +begin transaction; + end; +NEW_CONNECTION; +begin transaction; + end; +NEW_CONNECTION; +begin transaction; + + + +end; +NEW_CONNECTION; +begin transaction; +end ; +NEW_CONNECTION; +begin transaction; +end ; +NEW_CONNECTION; +begin transaction; +end + +; +NEW_CONNECTION; +begin transaction; +end; +NEW_CONNECTION; +begin transaction; +end; +NEW_CONNECTION; +begin transaction; +end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/-; +NEW_CONNECTION; +begin transaction; +end transaction; +NEW_CONNECTION; +begin transaction; +END TRANSACTION; +NEW_CONNECTION; +begin transaction; +end transaction; +NEW_CONNECTION; +begin transaction; + end transaction; +NEW_CONNECTION; +begin transaction; + end transaction; +NEW_CONNECTION; +begin transaction; + + + +end transaction; +NEW_CONNECTION; +begin transaction; +end transaction ; +NEW_CONNECTION; +begin transaction; +end transaction ; +NEW_CONNECTION; +begin transaction; +end transaction + +; +NEW_CONNECTION; +begin transaction; +end transaction; +NEW_CONNECTION; +begin transaction; +end transaction; +NEW_CONNECTION; +begin transaction; +end +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/-transaction; +NEW_CONNECTION; +begin transaction; +end work; +NEW_CONNECTION; +begin transaction; +END WORK; +NEW_CONNECTION; +begin transaction; +end work; +NEW_CONNECTION; +begin transaction; + end work; +NEW_CONNECTION; +begin transaction; + end work; +NEW_CONNECTION; +begin transaction; + + + +end work; +NEW_CONNECTION; +begin transaction; +end work ; +NEW_CONNECTION; +begin transaction; +end work ; +NEW_CONNECTION; +begin transaction; +end work + +; +NEW_CONNECTION; +begin transaction; +end work; +NEW_CONNECTION; +begin transaction; +end work; +NEW_CONNECTION; +begin transaction; +end +work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end%work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end_work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end&work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end$work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end@work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end!work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end*work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end(work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end)work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end+work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end\work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end?work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end-/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end/-work; +NEW_CONNECTION; +begin transaction; +end and no chain; +NEW_CONNECTION; +begin transaction; +END AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +end and no chain; +NEW_CONNECTION; +begin transaction; + end and no chain; +NEW_CONNECTION; +begin transaction; + end and no chain; +NEW_CONNECTION; +begin transaction; + + + +end and no chain; +NEW_CONNECTION; +begin transaction; +end and no chain ; +NEW_CONNECTION; +begin transaction; +end and no chain ; +NEW_CONNECTION; +begin transaction; +end and no chain + +; +NEW_CONNECTION; +begin transaction; +end and no chain; +NEW_CONNECTION; +begin transaction; +end and no chain; +NEW_CONNECTION; +begin transaction; +end +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end and no/-chain; +NEW_CONNECTION; +begin transaction; +end transaction and no chain; +NEW_CONNECTION; +begin transaction; +END TRANSACTION AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +end transaction and no chain; +NEW_CONNECTION; +begin transaction; + end transaction and no chain; +NEW_CONNECTION; +begin transaction; + end transaction and no chain; +NEW_CONNECTION; +begin transaction; + + + +end transaction and no chain; +NEW_CONNECTION; +begin transaction; +end transaction and no chain ; +NEW_CONNECTION; +begin transaction; +end transaction and no chain ; +NEW_CONNECTION; +begin transaction; +end transaction and no chain + +; +NEW_CONNECTION; +begin transaction; +end transaction and no chain; +NEW_CONNECTION; +begin transaction; +end transaction and no chain; +NEW_CONNECTION; +begin transaction; +end +transaction +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end transaction and no/-chain; +NEW_CONNECTION; +begin transaction; +end work and no chain; +NEW_CONNECTION; +begin transaction; +END WORK AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +end work and no chain; +NEW_CONNECTION; +begin transaction; + end work and no chain; +NEW_CONNECTION; +begin transaction; + end work and no chain; +NEW_CONNECTION; +begin transaction; + + + +end work and no chain; +NEW_CONNECTION; +begin transaction; +end work and no chain ; +NEW_CONNECTION; +begin transaction; +end work and no chain ; +NEW_CONNECTION; +begin transaction; +end work and no chain + +; +NEW_CONNECTION; +begin transaction; +end work and no chain; +NEW_CONNECTION; +begin transaction; +end work and no chain; +NEW_CONNECTION; +begin transaction; +end +work +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-end work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +end work and no/-chain; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +ROLLBACK; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; + rollback; +NEW_CONNECTION; +begin transaction; + rollback; +NEW_CONNECTION; +begin transaction; + + + +rollback; +NEW_CONNECTION; +begin transaction; +rollback ; +NEW_CONNECTION; +begin transaction; +rollback ; +NEW_CONNECTION; +begin transaction; +rollback + +; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +ROLLBACK TRANSACTION; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; + rollback transaction; +NEW_CONNECTION; +begin transaction; + rollback transaction; +NEW_CONNECTION; +begin transaction; + + + +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback transaction ; +NEW_CONNECTION; +begin transaction; +rollback transaction ; +NEW_CONNECTION; +begin transaction; +rollback transaction + +; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback transaction; +NEW_CONNECTION; +begin transaction; +rollback +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-transaction; +NEW_CONNECTION; +begin transaction; +rollback work; +NEW_CONNECTION; +begin transaction; +ROLLBACK WORK; +NEW_CONNECTION; +begin transaction; +rollback work; +NEW_CONNECTION; +begin transaction; + rollback work; +NEW_CONNECTION; +begin transaction; + rollback work; +NEW_CONNECTION; +begin transaction; + + + +rollback work; +NEW_CONNECTION; +begin transaction; +rollback work ; +NEW_CONNECTION; +begin transaction; +rollback work ; +NEW_CONNECTION; +begin transaction; +rollback work + +; +NEW_CONNECTION; +begin transaction; +rollback work; +NEW_CONNECTION; +begin transaction; +rollback work; +NEW_CONNECTION; +begin transaction; +rollback +work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback%work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback_work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback&work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback$work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback@work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback!work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback*work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback(work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback)work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback+work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback\work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback?work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback-/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback/-work; +NEW_CONNECTION; +begin transaction; +rollback and no chain; +NEW_CONNECTION; +begin transaction; +ROLLBACK AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +rollback and no chain; +NEW_CONNECTION; +begin transaction; + rollback and no chain; +NEW_CONNECTION; +begin transaction; + rollback and no chain; +NEW_CONNECTION; +begin transaction; + + + +rollback and no chain; +NEW_CONNECTION; +begin transaction; +rollback and no chain ; +NEW_CONNECTION; +begin transaction; +rollback and no chain ; +NEW_CONNECTION; +begin transaction; +rollback and no chain + +; +NEW_CONNECTION; +begin transaction; +rollback and no chain; +NEW_CONNECTION; +begin transaction; +rollback and no chain; +NEW_CONNECTION; +begin transaction; +rollback +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback and no/-chain; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +ROLLBACK TRANSACTION AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; + rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; + rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; + + + +rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain ; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain ; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain + +; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +rollback +transaction +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback transaction and no/-chain; +NEW_CONNECTION; +begin transaction; +rollback work and no chain; +NEW_CONNECTION; +begin transaction; +ROLLBACK WORK AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +rollback work and no chain; +NEW_CONNECTION; +begin transaction; + rollback work and no chain; +NEW_CONNECTION; +begin transaction; + rollback work and no chain; +NEW_CONNECTION; +begin transaction; + + + +rollback work and no chain; +NEW_CONNECTION; +begin transaction; +rollback work and no chain ; +NEW_CONNECTION; +begin transaction; +rollback work and no chain ; +NEW_CONNECTION; +begin transaction; +rollback work and no chain + +; +NEW_CONNECTION; +begin transaction; +rollback work and no chain; +NEW_CONNECTION; +begin transaction; +rollback work and no chain; +NEW_CONNECTION; +begin transaction; +rollback +work +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-rollback work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +rollback work and no/-chain; +NEW_CONNECTION; +begin transaction; +abort; +NEW_CONNECTION; +begin transaction; +ABORT; +NEW_CONNECTION; +begin transaction; +abort; +NEW_CONNECTION; +begin transaction; + abort; +NEW_CONNECTION; +begin transaction; + abort; +NEW_CONNECTION; +begin transaction; + + + +abort; +NEW_CONNECTION; +begin transaction; +abort ; +NEW_CONNECTION; +begin transaction; +abort ; +NEW_CONNECTION; +begin transaction; +abort + +; +NEW_CONNECTION; +begin transaction; +abort; +NEW_CONNECTION; +begin transaction; +abort; +NEW_CONNECTION; +begin transaction; +abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-; +NEW_CONNECTION; +begin transaction; +abort transaction; +NEW_CONNECTION; +begin transaction; +ABORT TRANSACTION; +NEW_CONNECTION; +begin transaction; +abort transaction; +NEW_CONNECTION; +begin transaction; + abort transaction; +NEW_CONNECTION; +begin transaction; + abort transaction; +NEW_CONNECTION; +begin transaction; + + + +abort transaction; +NEW_CONNECTION; +begin transaction; +abort transaction ; +NEW_CONNECTION; +begin transaction; +abort transaction ; +NEW_CONNECTION; +begin transaction; +abort transaction + +; +NEW_CONNECTION; +begin transaction; +abort transaction; +NEW_CONNECTION; +begin transaction; +abort transaction; +NEW_CONNECTION; +begin transaction; +abort +transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort)transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort transaction; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-transaction; +NEW_CONNECTION; +begin transaction; +abort work; +NEW_CONNECTION; +begin transaction; +ABORT WORK; +NEW_CONNECTION; +begin transaction; +abort work; +NEW_CONNECTION; +begin transaction; + abort work; +NEW_CONNECTION; +begin transaction; + abort work; +NEW_CONNECTION; +begin transaction; + + + +abort work; +NEW_CONNECTION; +begin transaction; +abort work ; +NEW_CONNECTION; +begin transaction; +abort work ; +NEW_CONNECTION; +begin transaction; +abort work + +; +NEW_CONNECTION; +begin transaction; +abort work; +NEW_CONNECTION; +begin transaction; +abort work; +NEW_CONNECTION; +begin transaction; +abort +work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort)work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort work; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-work; +NEW_CONNECTION; +begin transaction; +abort and no chain; +NEW_CONNECTION; +begin transaction; +ABORT AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +abort and no chain; +NEW_CONNECTION; +begin transaction; + abort and no chain; +NEW_CONNECTION; +begin transaction; + abort and no chain; +NEW_CONNECTION; +begin transaction; + + + +abort and no chain; +NEW_CONNECTION; +begin transaction; +abort and no chain ; +NEW_CONNECTION; +begin transaction; +abort and no chain ; +NEW_CONNECTION; +begin transaction; +abort and no chain + +; +NEW_CONNECTION; +begin transaction; +abort and no chain; +NEW_CONNECTION; +begin transaction; +abort and no chain; +NEW_CONNECTION; +begin transaction; +abort +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort and no/-chain; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +ABORT TRANSACTION AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain; +NEW_CONNECTION; +begin transaction; + abort transaction and no chain; +NEW_CONNECTION; +begin transaction; + abort transaction and no chain; +NEW_CONNECTION; +begin transaction; + + + +abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain ; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain ; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain + +; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +abort +transaction +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort transaction and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort transaction and no/-chain; +NEW_CONNECTION; +begin transaction; +abort work and no chain; +NEW_CONNECTION; +begin transaction; +ABORT WORK AND NO CHAIN; +NEW_CONNECTION; +begin transaction; +abort work and no chain; +NEW_CONNECTION; +begin transaction; + abort work and no chain; +NEW_CONNECTION; +begin transaction; + abort work and no chain; +NEW_CONNECTION; +begin transaction; + + + +abort work and no chain; +NEW_CONNECTION; +begin transaction; +abort work and no chain ; +NEW_CONNECTION; +begin transaction; +abort work and no chain ; +NEW_CONNECTION; +begin transaction; +abort work and no chain + +; +NEW_CONNECTION; +begin transaction; +abort work and no chain; +NEW_CONNECTION; +begin transaction; +abort work and no chain; +NEW_CONNECTION; +begin transaction; +abort +work +and +no +chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain bar; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain%; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no%chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain_; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no_chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain&; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no&chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain$; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no$chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain@; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no@chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain!; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no!chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain*; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no*chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain(; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no(chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain); +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no)chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no-chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain+; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no+chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain-#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no-#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain\; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no\chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain?; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no?chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain-/; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no-/chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain/#; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no/#chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort work and no chain; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no chain/-; +NEW_CONNECTION; +begin transaction; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort work and no/-chain; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +START BATCH DDL; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; + start batch ddl; +NEW_CONNECTION; + start batch ddl; +NEW_CONNECTION; + + + +start batch ddl; +NEW_CONNECTION; +start batch ddl ; +NEW_CONNECTION; +start batch ddl ; +NEW_CONNECTION; +start batch ddl + +; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +start batch ddl; +NEW_CONNECTION; +start +batch +ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch%ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch_ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch&ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch$ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch@ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch!ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch*ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch(ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch)ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch+ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-#ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch\ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch?ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-/ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/#ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start batch ddl; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch ddl/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/-ddl; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +START BATCH DML; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; + start batch dml; +NEW_CONNECTION; + start batch dml; +NEW_CONNECTION; + + + +start batch dml; +NEW_CONNECTION; +start batch dml ; +NEW_CONNECTION; +start batch dml ; +NEW_CONNECTION; +start batch dml + +; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +start batch dml; +NEW_CONNECTION; +start +batch +dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch%dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch_dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch&dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch$dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch@dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch!dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch*dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch(dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch)dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch+dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-#dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch\dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch?dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch-/dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/#dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-start batch dml; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch dml/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +start batch/-dml; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +RUN BATCH; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; + run batch; +NEW_CONNECTION; +start batch ddl; + run batch; +NEW_CONNECTION; +start batch ddl; + + + +run batch; +NEW_CONNECTION; +start batch ddl; +run batch ; +NEW_CONNECTION; +start batch ddl; +run batch ; +NEW_CONNECTION; +start batch ddl; +run batch + +; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +run batch; +NEW_CONNECTION; +start batch ddl; +run +batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch bar; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +%run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch%; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run%batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +_run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch_; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run_batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +&run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch&; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run&batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +$run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch$; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run$batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +@run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch@; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run@batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +!run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch!; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run!batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +*run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch*; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run*batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +(run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch(; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run(batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +)run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch); +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run)batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT ++run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch+; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run+batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +\run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch\; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run\batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +?run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch?; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run?batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch-/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run-/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-run batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run batch/-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +run/-batch; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +ABORT BATCH; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; + abort batch; +NEW_CONNECTION; +start batch ddl; + abort batch; +NEW_CONNECTION; +start batch ddl; + + + +abort batch; +NEW_CONNECTION; +start batch ddl; +abort batch ; +NEW_CONNECTION; +start batch ddl; +abort batch ; +NEW_CONNECTION; +start batch ddl; +abort batch + +; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +abort batch; +NEW_CONNECTION; +start batch ddl; +abort +batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch bar; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +%abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch%; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort%batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +_abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch_; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort_batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +&abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch&; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort&batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +$abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch$; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort$batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +@abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch@; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort@batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +!abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch!; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort!batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +*abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch*; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort*batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +(abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch(; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort(batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +)abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch); +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort)batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT ++abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch+; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort+batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +\abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch\; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort\batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +?abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch?; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort?batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch-/; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort-/batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/#; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/#batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-abort batch; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort batch/-; +NEW_CONNECTION; +start batch ddl; +@EXPECT EXCEPTION INVALID_ARGUMENT +abort/-batch; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +RESET ALL; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + + + +reset all; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all + +; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset +all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset%all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset_all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset&all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset$all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset@all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset!all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset*all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset(all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset)all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset+all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset\all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset?all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/-all; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +SET AUTOCOMMIT = TRUE; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; + set autocommit = true; +NEW_CONNECTION; + set autocommit = true; +NEW_CONNECTION; + + + +set autocommit = true; +NEW_CONNECTION; +set autocommit = true ; +NEW_CONNECTION; +set autocommit = true ; +NEW_CONNECTION; +set autocommit = true + +; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +set autocommit = true; +NEW_CONNECTION; +set +autocommit += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/-true; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +SET AUTOCOMMIT = FALSE; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + + + +set autocommit = false; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false + +; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set +autocommit += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/-false; +NEW_CONNECTION; +set autocommit to true; +NEW_CONNECTION; +SET AUTOCOMMIT TO TRUE; +NEW_CONNECTION; +set autocommit to true; +NEW_CONNECTION; + set autocommit to true; +NEW_CONNECTION; + set autocommit to true; +NEW_CONNECTION; + + + +set autocommit to true; +NEW_CONNECTION; +set autocommit to true ; +NEW_CONNECTION; +set autocommit to true ; +NEW_CONNECTION; +set autocommit to true + +; +NEW_CONNECTION; +set autocommit to true; +NEW_CONNECTION; +set autocommit to true; +NEW_CONNECTION; +set +autocommit +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/-true; +NEW_CONNECTION; +set autocommit to false; +NEW_CONNECTION; +SET AUTOCOMMIT TO FALSE; +NEW_CONNECTION; +set autocommit to false; +NEW_CONNECTION; + set autocommit to false; +NEW_CONNECTION; + set autocommit to false; +NEW_CONNECTION; + + + +set autocommit to false; +NEW_CONNECTION; +set autocommit to false ; +NEW_CONNECTION; +set autocommit to false ; +NEW_CONNECTION; +set autocommit to false + +; +NEW_CONNECTION; +set autocommit to false; +NEW_CONNECTION; +set autocommit to false; +NEW_CONNECTION; +set +autocommit +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit to/-false; +NEW_CONNECTION; +set spanner.readonly = true; +NEW_CONNECTION; +SET SPANNER.READONLY = TRUE; +NEW_CONNECTION; +set spanner.readonly = true; +NEW_CONNECTION; + set spanner.readonly = true; +NEW_CONNECTION; + set spanner.readonly = true; +NEW_CONNECTION; + + + +set spanner.readonly = true; +NEW_CONNECTION; +set spanner.readonly = true ; +NEW_CONNECTION; +set spanner.readonly = true ; +NEW_CONNECTION; +set spanner.readonly = true + +; +NEW_CONNECTION; +set spanner.readonly = true; +NEW_CONNECTION; +set spanner.readonly = true; +NEW_CONNECTION; +set +spanner.readonly += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.readonly = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/-true; +NEW_CONNECTION; +set spanner.readonly = false; +NEW_CONNECTION; +SET SPANNER.READONLY = FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +NEW_CONNECTION; + set spanner.readonly = false; +NEW_CONNECTION; + set spanner.readonly = false; +NEW_CONNECTION; + + + +set spanner.readonly = false; +NEW_CONNECTION; +set spanner.readonly = false ; +NEW_CONNECTION; +set spanner.readonly = false ; +NEW_CONNECTION; +set spanner.readonly = false + +; +NEW_CONNECTION; +set spanner.readonly = false; +NEW_CONNECTION; +set spanner.readonly = false; +NEW_CONNECTION; +set +spanner.readonly += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.readonly = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly =/-false; +NEW_CONNECTION; +set spanner.readonly to true; +NEW_CONNECTION; +SET SPANNER.READONLY TO TRUE; +NEW_CONNECTION; +set spanner.readonly to true; +NEW_CONNECTION; + set spanner.readonly to true; +NEW_CONNECTION; + set spanner.readonly to true; +NEW_CONNECTION; + + + +set spanner.readonly to true; +NEW_CONNECTION; +set spanner.readonly to true ; +NEW_CONNECTION; +set spanner.readonly to true ; +NEW_CONNECTION; +set spanner.readonly to true + +; +NEW_CONNECTION; +set spanner.readonly to true; +NEW_CONNECTION; +set spanner.readonly to true; +NEW_CONNECTION; +set +spanner.readonly +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.readonly to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/-true; +NEW_CONNECTION; +set spanner.readonly to false; +NEW_CONNECTION; +SET SPANNER.READONLY TO FALSE; +NEW_CONNECTION; +set spanner.readonly to false; +NEW_CONNECTION; + set spanner.readonly to false; +NEW_CONNECTION; + set spanner.readonly to false; +NEW_CONNECTION; + + + +set spanner.readonly to false; +NEW_CONNECTION; +set spanner.readonly to false ; +NEW_CONNECTION; +set spanner.readonly to false ; +NEW_CONNECTION; +set spanner.readonly to false + +; +NEW_CONNECTION; +set spanner.readonly to false; +NEW_CONNECTION; +set spanner.readonly to false; +NEW_CONNECTION; +set +spanner.readonly +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.readonly to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.readonly to/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.RETRY_ABORTS_INTERNALLY = TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.retry_aborts_internally += +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.RETRY_ABORTS_INTERNALLY = FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.retry_aborts_internally += +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally = false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally =/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.RETRY_ABORTS_INTERNALLY TO TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.retry_aborts_internally +to +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.RETRY_ABORTS_INTERNALLY TO FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.retry_aborts_internally +to +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.retry_aborts_internally to/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.RETRY_ABORTS_INTERNALLY = TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.retry_aborts_internally += +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.RETRY_ABORTS_INTERNALLY = FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.retry_aborts_internally += +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally = false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally =/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.RETRY_ABORTS_INTERNALLY TO TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.retry_aborts_internally +to +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.RETRY_ABORTS_INTERNALLY TO FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.retry_aborts_internally +to +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.retry_aborts_internally to/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SESSION SPANNER.RETRY_ABORTS_INTERNALLY = TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +session +spanner.retry_aborts_internally += +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session spanner.retry_aborts_internally = true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SESSION SPANNER.RETRY_ABORTS_INTERNALLY = FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +session +spanner.retry_aborts_internally += +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session spanner.retry_aborts_internally = false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally = false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally =/-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SESSION SPANNER.RETRY_ABORTS_INTERNALLY TO TRUE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +session +spanner.retry_aborts_internally +to +true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to%true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to_true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to&true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to$true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to@true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to!true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to*true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to(true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to)true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to+true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to\true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to?true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-/true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/#true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session spanner.retry_aborts_internally to true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to true/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/-true; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SESSION SPANNER.RETRY_ABORTS_INTERNALLY TO FALSE; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +session +spanner.retry_aborts_internally +to +false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to%false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to_false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to&false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to$false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to@false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to!false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to*false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to(false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to)false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to+false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to\false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to?false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to-/false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/#false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session spanner.retry_aborts_internally to false; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to false/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session spanner.retry_aborts_internally to/-false; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='partitioned_non_atomic'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.autocommit_dml_mode='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='transactional'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.autocommit_dml_mode='TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='transactional_with_fallback_to_partitioned_non_atomic'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.autocommit_dml_mode='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE TO 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'partitioned_non_atomic'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode +to +'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to%'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to_'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to&'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to$'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to@'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to!'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to*'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to('PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to)'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to+'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-#'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to\'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to?'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-/'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/#'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/-'PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE TO 'TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'transactional'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode +to +'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to%'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to_'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to&'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to$'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to@'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to!'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to*'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to('TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to)'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to+'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-#'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to\'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to?'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-/'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/#'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode to 'TRANSACTIONAL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/-'TRANSACTIONAL'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.AUTOCOMMIT_DML_MODE TO 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'transactional_with_fallback_to_partitioned_non_atomic'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; + + + +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' ; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' + +; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set +spanner.autocommit_dml_mode +to +'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to%'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to_'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to&'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to$'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to@'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to!'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to*'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to('TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to)'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to+'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-#'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to\'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to?'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to-/'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/#'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to 'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.autocommit_dml_mode to/-'TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +set statement_timeout=default; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT=DEFAULT; +NEW_CONNECTION; +set statement_timeout=default; +NEW_CONNECTION; + set statement_timeout=default; +NEW_CONNECTION; + set statement_timeout=default; +NEW_CONNECTION; + + + +set statement_timeout=default; +NEW_CONNECTION; +set statement_timeout=default ; +NEW_CONNECTION; +set statement_timeout=default ; +NEW_CONNECTION; +set statement_timeout=default + +; +NEW_CONNECTION; +set statement_timeout=default; +NEW_CONNECTION; +set statement_timeout=default; +NEW_CONNECTION; +set +statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout=default; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; + set statement_timeout = default ; +NEW_CONNECTION; + set statement_timeout = default ; +NEW_CONNECTION; + + + +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default + +; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set +statement_timeout += +default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/-; +NEW_CONNECTION; +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; + set statement_timeout = DEFAULT ; +NEW_CONNECTION; + set statement_timeout = DEFAULT ; +NEW_CONNECTION; + + + +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = DEFAULT + +; +NEW_CONNECTION; +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = DEFAULT ; +NEW_CONNECTION; +set +statement_timeout += +DEFAULT +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = DEFAULT/-; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='1S'; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; + set statement_timeout='1s'; +NEW_CONNECTION; + set statement_timeout='1s'; +NEW_CONNECTION; + + + +set statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout='1s' ; +NEW_CONNECTION; +set statement_timeout='1s' ; +NEW_CONNECTION; +set statement_timeout='1s' + +; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout='1s'; +NEW_CONNECTION; +set +statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='1s'; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = '1S' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + + + +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' + +; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set +statement_timeout += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/-; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='100MS'; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; + set statement_timeout='100ms'; +NEW_CONNECTION; + set statement_timeout='100ms'; +NEW_CONNECTION; + + + +set statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout='100ms' ; +NEW_CONNECTION; +set statement_timeout='100ms' ; +NEW_CONNECTION; +set statement_timeout='100ms' + +; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout='100ms'; +NEW_CONNECTION; +set +statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='100ms'; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT=100; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; + set statement_timeout=100; +NEW_CONNECTION; + set statement_timeout=100; +NEW_CONNECTION; + + + +set statement_timeout=100; +NEW_CONNECTION; +set statement_timeout=100 ; +NEW_CONNECTION; +set statement_timeout=100 ; +NEW_CONNECTION; +set statement_timeout=100 + +; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +set statement_timeout=100; +NEW_CONNECTION; +set +statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout=100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout=100; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + + + +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 + +; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set +statement_timeout += +100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/-; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='10000US'; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; + set statement_timeout='10000us'; +NEW_CONNECTION; + set statement_timeout='10000us'; +NEW_CONNECTION; + + + +set statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='10000us' ; +NEW_CONNECTION; +set statement_timeout='10000us' ; +NEW_CONNECTION; +set statement_timeout='10000us' + +; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='10000us'; +NEW_CONNECTION; +set +statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='10000us'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT='9223372036854775807NS'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; + + + +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns' + +; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set +statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout='9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-statement_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout to default; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO DEFAULT; +NEW_CONNECTION; +set statement_timeout to default; +NEW_CONNECTION; + set statement_timeout to default; +NEW_CONNECTION; + set statement_timeout to default; +NEW_CONNECTION; + + + +set statement_timeout to default; +NEW_CONNECTION; +set statement_timeout to default ; +NEW_CONNECTION; +set statement_timeout to default ; +NEW_CONNECTION; +set statement_timeout to default + +; +NEW_CONNECTION; +set statement_timeout to default; +NEW_CONNECTION; +set statement_timeout to default; +NEW_CONNECTION; +set +statement_timeout +to +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-default; +NEW_CONNECTION; +set statement_timeout to '1s'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO '1S'; +NEW_CONNECTION; +set statement_timeout to '1s'; +NEW_CONNECTION; + set statement_timeout to '1s'; +NEW_CONNECTION; + set statement_timeout to '1s'; +NEW_CONNECTION; + + + +set statement_timeout to '1s'; +NEW_CONNECTION; +set statement_timeout to '1s' ; +NEW_CONNECTION; +set statement_timeout to '1s' ; +NEW_CONNECTION; +set statement_timeout to '1s' + +; +NEW_CONNECTION; +set statement_timeout to '1s'; +NEW_CONNECTION; +set statement_timeout to '1s'; +NEW_CONNECTION; +set +statement_timeout +to +'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to('1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-'1s'; +NEW_CONNECTION; +set statement_timeout to '100ms'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO '100MS'; +NEW_CONNECTION; +set statement_timeout to '100ms'; +NEW_CONNECTION; + set statement_timeout to '100ms'; +NEW_CONNECTION; + set statement_timeout to '100ms'; +NEW_CONNECTION; + + + +set statement_timeout to '100ms'; +NEW_CONNECTION; +set statement_timeout to '100ms' ; +NEW_CONNECTION; +set statement_timeout to '100ms' ; +NEW_CONNECTION; +set statement_timeout to '100ms' + +; +NEW_CONNECTION; +set statement_timeout to '100ms'; +NEW_CONNECTION; +set statement_timeout to '100ms'; +NEW_CONNECTION; +set +statement_timeout +to +'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to('100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-'100ms'; +NEW_CONNECTION; +set statement_timeout to 100; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO 100; +NEW_CONNECTION; +set statement_timeout to 100; +NEW_CONNECTION; + set statement_timeout to 100; +NEW_CONNECTION; + set statement_timeout to 100; +NEW_CONNECTION; + + + +set statement_timeout to 100; +NEW_CONNECTION; +set statement_timeout to 100 ; +NEW_CONNECTION; +set statement_timeout to 100 ; +NEW_CONNECTION; +set statement_timeout to 100 + +; +NEW_CONNECTION; +set statement_timeout to 100; +NEW_CONNECTION; +set statement_timeout to 100; +NEW_CONNECTION; +set +statement_timeout +to +100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to(100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to 100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-100; +NEW_CONNECTION; +set statement_timeout to '10000us'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO '10000US'; +NEW_CONNECTION; +set statement_timeout to '10000us'; +NEW_CONNECTION; + set statement_timeout to '10000us'; +NEW_CONNECTION; + set statement_timeout to '10000us'; +NEW_CONNECTION; + + + +set statement_timeout to '10000us'; +NEW_CONNECTION; +set statement_timeout to '10000us' ; +NEW_CONNECTION; +set statement_timeout to '10000us' ; +NEW_CONNECTION; +set statement_timeout to '10000us' + +; +NEW_CONNECTION; +set statement_timeout to '10000us'; +NEW_CONNECTION; +set statement_timeout to '10000us'; +NEW_CONNECTION; +set +statement_timeout +to +'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to('10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-'10000us'; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT TO '9223372036854775807NS'; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + + + +set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns' ; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns' + +; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set +statement_timeout +to +'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to%'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to_'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to&'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to$'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to@'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to!'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to*'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to('9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to)'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to+'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to\'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to?'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to-/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to '9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout to/-'9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout=default; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT=DEFAULT; +NEW_CONNECTION; +set spanner.transaction_timeout=default; +NEW_CONNECTION; + set spanner.transaction_timeout=default; +NEW_CONNECTION; + set spanner.transaction_timeout=default; +NEW_CONNECTION; + + + +set spanner.transaction_timeout=default; +NEW_CONNECTION; +set spanner.transaction_timeout=default ; +NEW_CONNECTION; +set spanner.transaction_timeout=default ; +NEW_CONNECTION; +set spanner.transaction_timeout=default + +; +NEW_CONNECTION; +set spanner.transaction_timeout=default; +NEW_CONNECTION; +set spanner.transaction_timeout=default; +NEW_CONNECTION; +set +spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout=default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout=default; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; + set spanner.transaction_timeout = default ; +NEW_CONNECTION; + set spanner.transaction_timeout = default ; +NEW_CONNECTION; + + + +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +set spanner.transaction_timeout = default + +; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; +set +spanner.transaction_timeout += +default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = default/-; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = default ; +NEW_CONNECTION; + set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; + set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; + + + +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT + +; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +set +spanner.transaction_timeout += +DEFAULT +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout = DEFAULT ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = DEFAULT/-; +NEW_CONNECTION; +set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT='1S'; +NEW_CONNECTION; +set spanner.transaction_timeout='1s'; +NEW_CONNECTION; + set spanner.transaction_timeout='1s'; +NEW_CONNECTION; + set spanner.transaction_timeout='1s'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +set spanner.transaction_timeout='1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout='1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout='1s' + +; +NEW_CONNECTION; +set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +set +spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout='1s'; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT = '1S' ; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; + set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; + set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; + + + +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' + +; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +set +spanner.transaction_timeout += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = '1s'/-; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT='100MS'; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; + set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; + set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms' ; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms' ; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms' + +; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +set +spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout='100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout=100; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT=100; +NEW_CONNECTION; +set spanner.transaction_timeout=100; +NEW_CONNECTION; + set spanner.transaction_timeout=100; +NEW_CONNECTION; + set spanner.transaction_timeout=100; +NEW_CONNECTION; + + + +set spanner.transaction_timeout=100; +NEW_CONNECTION; +set spanner.transaction_timeout=100 ; +NEW_CONNECTION; +set spanner.transaction_timeout=100 ; +NEW_CONNECTION; +set spanner.transaction_timeout=100 + +; +NEW_CONNECTION; +set spanner.transaction_timeout=100; +NEW_CONNECTION; +set spanner.transaction_timeout=100; +NEW_CONNECTION; +set +spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout=100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout=100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout=100; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT = 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; + set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; + set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; + + + +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 + +; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +set +spanner.transaction_timeout += +100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout = 100/-; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT='10000US'; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; + set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; + set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us' ; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us' ; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us' + +; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +set +spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout='10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT='9223372036854775807NS'; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns' + +; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set +spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout='9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_timeout='9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout to default; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO DEFAULT; +NEW_CONNECTION; +set spanner.transaction_timeout to default; +NEW_CONNECTION; + set spanner.transaction_timeout to default; +NEW_CONNECTION; + set spanner.transaction_timeout to default; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to default; +NEW_CONNECTION; +set spanner.transaction_timeout to default ; +NEW_CONNECTION; +set spanner.transaction_timeout to default ; +NEW_CONNECTION; +set spanner.transaction_timeout to default + +; +NEW_CONNECTION; +set spanner.transaction_timeout to default; +NEW_CONNECTION; +set spanner.transaction_timeout to default; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-default; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO '1S'; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; + set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; + set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s' + +; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to('1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-'1s'; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO '100MS'; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; + set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; + set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms' + +; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to('100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#'100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to '100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-'100ms'; +NEW_CONNECTION; +set spanner.transaction_timeout to 100; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO 100; +NEW_CONNECTION; +set spanner.transaction_timeout to 100; +NEW_CONNECTION; + set spanner.transaction_timeout to 100; +NEW_CONNECTION; + set spanner.transaction_timeout to 100; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to 100; +NEW_CONNECTION; +set spanner.transaction_timeout to 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout to 100 ; +NEW_CONNECTION; +set spanner.transaction_timeout to 100 + +; +NEW_CONNECTION; +set spanner.transaction_timeout to 100; +NEW_CONNECTION; +set spanner.transaction_timeout to 100; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to(100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to 100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-100; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO '10000US'; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; + set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; + set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us' + +; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to('10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-'10000us'; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +SET SPANNER.TRANSACTION_TIMEOUT TO '9223372036854775807NS'; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; + + + +set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns' + +; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +set +spanner.transaction_timeout +to +'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to%'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to_'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to&'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to$'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to@'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to!'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to*'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to('9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to)'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to+'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to\'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to?'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to-/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_timeout to '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to '9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_timeout to/-'9223372036854775807ns'; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; + set transaction read only; +NEW_CONNECTION; +set autocommit = false; + set transaction read only; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set transaction read only ; +NEW_CONNECTION; +set autocommit = false; +set transaction read only ; +NEW_CONNECTION; +set autocommit = false; +set transaction read only + +; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set transaction read only; +NEW_CONNECTION; +set autocommit = false; +set +transaction +read +only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read%only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read_only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read&only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read$only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read@only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read!only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read*only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read(only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read)only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read+only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-#only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read\only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read?only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-/only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/#only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction read only; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read only/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/-only; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; + set transaction read write; +NEW_CONNECTION; +set autocommit = false; + set transaction read write; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set transaction read write ; +NEW_CONNECTION; +set autocommit = false; +set transaction read write ; +NEW_CONNECTION; +set autocommit = false; +set transaction read write + +; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set transaction read write; +NEW_CONNECTION; +set autocommit = false; +set +transaction +read +write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read%write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read_write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read&write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read$write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read@write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read!write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read*write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read(write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read)write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read+write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-#write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read\write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read?write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read-/write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/#write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction read write; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read write/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction read/-write; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default + +; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +set +transaction +isolation +level +default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level%default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level_default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level&default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level$default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level@default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level!default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level*default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level(default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level)default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level+default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-#default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level\default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level?default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-/default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/#default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction isolation level default; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level default/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/-default; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable + +; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +set +transaction +isolation +level +serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level%serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level_serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level&serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level$serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level@serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level!serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level*serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level(serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level)serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level+serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-#serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level\serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level?serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level-/serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/#serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction isolation level serializable; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level serializable/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level/-serializable; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; + set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read ; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read + +; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +set +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable%read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable_read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable&read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable$read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable@read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable!read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable*read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable(read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable)read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable-read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable+read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable-#read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable/read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable\read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable?read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable-/read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable/#read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction isolation level repeatable read; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable read/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction isolation level repeatable/-read; +NEW_CONNECTION; +set session characteristics as transaction read only; +NEW_CONNECTION; +SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY; +NEW_CONNECTION; +set session characteristics as transaction read only; +NEW_CONNECTION; + set session characteristics as transaction read only; +NEW_CONNECTION; + set session characteristics as transaction read only; +NEW_CONNECTION; + + + +set session characteristics as transaction read only; +NEW_CONNECTION; +set session characteristics as transaction read only ; +NEW_CONNECTION; +set session characteristics as transaction read only ; +NEW_CONNECTION; +set session characteristics as transaction read only + +; +NEW_CONNECTION; +set session characteristics as transaction read only; +NEW_CONNECTION; +set session characteristics as transaction read only; +NEW_CONNECTION; +set +session +characteristics +as +transaction +read +only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read%only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read_only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read&only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read$only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read@only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read!only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read*only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read(only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read)only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read+only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read\only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read?only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-/only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/#only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session characteristics as transaction read only; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read only/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/-only; +NEW_CONNECTION; +set session characteristics as transaction read write; +NEW_CONNECTION; +SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE; +NEW_CONNECTION; +set session characteristics as transaction read write; +NEW_CONNECTION; + set session characteristics as transaction read write; +NEW_CONNECTION; + set session characteristics as transaction read write; +NEW_CONNECTION; + + + +set session characteristics as transaction read write; +NEW_CONNECTION; +set session characteristics as transaction read write ; +NEW_CONNECTION; +set session characteristics as transaction read write ; +NEW_CONNECTION; +set session characteristics as transaction read write + +; +NEW_CONNECTION; +set session characteristics as transaction read write; +NEW_CONNECTION; +set session characteristics as transaction read write; +NEW_CONNECTION; +set +session +characteristics +as +transaction +read +write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read%write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read_write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read&write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read$write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read@write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read!write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read*write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read(write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read)write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read+write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read\write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read?write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read-/write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/#write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session characteristics as transaction read write; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read write/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction read/-write; +NEW_CONNECTION; +set session characteristics as transaction isolation level default; +NEW_CONNECTION; +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL DEFAULT; +NEW_CONNECTION; +set session characteristics as transaction isolation level default; +NEW_CONNECTION; + set session characteristics as transaction isolation level default; +NEW_CONNECTION; + set session characteristics as transaction isolation level default; +NEW_CONNECTION; + + + +set session characteristics as transaction isolation level default; +NEW_CONNECTION; +set session characteristics as transaction isolation level default ; +NEW_CONNECTION; +set session characteristics as transaction isolation level default ; +NEW_CONNECTION; +set session characteristics as transaction isolation level default + +; +NEW_CONNECTION; +set session characteristics as transaction isolation level default; +NEW_CONNECTION; +set session characteristics as transaction isolation level default; +NEW_CONNECTION; +set +session +characteristics +as +transaction +isolation +level +default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level%default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level_default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level&default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level$default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level@default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level!default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level*default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level(default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level)default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level+default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level\default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level?default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-/default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/#default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session characteristics as transaction isolation level default; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level default/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/-default; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; + set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; + set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; + + + +set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable ; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable ; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable + +; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +set +session +characteristics +as +transaction +isolation +level +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session characteristics as transaction isolation level serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level/-serializable; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; + set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; + set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; + + + +set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read ; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read ; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read + +; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +set +session +characteristics +as +transaction +isolation +level +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set session characteristics as transaction isolation level repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set session characteristics as transaction isolation level repeatable/-read; +NEW_CONNECTION; +set default_transaction_isolation=serializable; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION=SERIALIZABLE; +NEW_CONNECTION; +set default_transaction_isolation=serializable; +NEW_CONNECTION; + set default_transaction_isolation=serializable; +NEW_CONNECTION; + set default_transaction_isolation=serializable; +NEW_CONNECTION; + + + +set default_transaction_isolation=serializable; +NEW_CONNECTION; +set default_transaction_isolation=serializable ; +NEW_CONNECTION; +set default_transaction_isolation=serializable ; +NEW_CONNECTION; +set default_transaction_isolation=serializable + +; +NEW_CONNECTION; +set default_transaction_isolation=serializable; +NEW_CONNECTION; +set default_transaction_isolation=serializable; +NEW_CONNECTION; +set +default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation=serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-default_transaction_isolation=serializable; +NEW_CONNECTION; +set default_transaction_isolation to serializable; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION TO SERIALIZABLE; +NEW_CONNECTION; +set default_transaction_isolation to serializable; +NEW_CONNECTION; + set default_transaction_isolation to serializable; +NEW_CONNECTION; + set default_transaction_isolation to serializable; +NEW_CONNECTION; + + + +set default_transaction_isolation to serializable; +NEW_CONNECTION; +set default_transaction_isolation to serializable ; +NEW_CONNECTION; +set default_transaction_isolation to serializable ; +NEW_CONNECTION; +set default_transaction_isolation to serializable + +; +NEW_CONNECTION; +set default_transaction_isolation to serializable; +NEW_CONNECTION; +set default_transaction_isolation to serializable; +NEW_CONNECTION; +set +default_transaction_isolation +to +serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to%serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to_serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to&serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to$serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to@serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to!serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to*serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to(serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to)serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to+serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to\serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to?serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-/serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/#serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation to serializable; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to serializable/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/-serializable; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION TO 'SERIALIZABLE'; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; + set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; + set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; + + + +set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable' ; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable' ; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable' + +; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +set +default_transaction_isolation +to +'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to%'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to_'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to&'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to$'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to@'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to!'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to*'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to('serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to)'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to+'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-#'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to\'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to?'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-/'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/#'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation to 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'serializable'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/-'serializable'; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION = 'SERIALIZABLE'; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; + set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; + set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; + + + +set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable' ; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable' ; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable' + +; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +set +default_transaction_isolation += +'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =%'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =_'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =&'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =$'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =@'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =!'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =*'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =('serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =)'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =+'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-#'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =\'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =?'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-/'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/#'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation = 'serializable'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'serializable'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/-'serializable'; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION = "SERIALIZABLE"; +NEW_CONNECTION; +set default_transaction_isolation = "serializable"; +NEW_CONNECTION; + set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; + set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; + + + +set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE" ; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE" ; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE" + +; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +set +default_transaction_isolation += +"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE" bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =%"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =_"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =&"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =$"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =@"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =!"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =*"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =("SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =)"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =+"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-#"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =\"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =?"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-/"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/#"SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation = "SERIALIZABLE"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "SERIALIZABLE"/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/-"SERIALIZABLE"; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION=REPEATABLE READ; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read; +NEW_CONNECTION; + set default_transaction_isolation=repeatable read; +NEW_CONNECTION; + set default_transaction_isolation=repeatable read; +NEW_CONNECTION; + + + +set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read ; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read ; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read + +; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +set +default_transaction_isolation=repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation=repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation=repeatable/-read; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION TO REPEATABLE READ; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read; +NEW_CONNECTION; + set default_transaction_isolation to repeatable read; +NEW_CONNECTION; + set default_transaction_isolation to repeatable read; +NEW_CONNECTION; + + + +set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read ; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read ; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read + +; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +set +default_transaction_isolation +to +repeatable +read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable%read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable_read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable&read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable$read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable@read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable!read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable*read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable(read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable)read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable-read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable+read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable-#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable\read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable?read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable-/read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable/#read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation to repeatable read; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable read/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to repeatable/-read; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION TO 'REPEATABLE READ'; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; + set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; + set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; + + + +set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read' ; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read' ; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read' + +; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +set +default_transaction_isolation +to +'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable%read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable_read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable&read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable$read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable@read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable!read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable*read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable(read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable)read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable-read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable+read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable-#read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable/read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable\read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable?read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable-/read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable/#read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation to 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable read'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to 'repeatable/-read'; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION = 'REPEATABLE READ'; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; + set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; + set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; + + + +set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read' ; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read' ; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read' + +; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +set +default_transaction_isolation += +'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable%read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable_read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable&read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable$read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable@read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable!read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable*read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable(read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable)read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable-read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable+read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable-#read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable/read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable\read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable?read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable-/read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable/#read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation = 'repeatable read'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable read'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = 'repeatable/-read'; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION = "REPEATABLE READ"; +NEW_CONNECTION; +set default_transaction_isolation = "repeatable read"; +NEW_CONNECTION; + set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; + set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; + + + +set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ" ; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ" ; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ" + +; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +set +default_transaction_isolation += +"REPEATABLE +READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ" bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE%READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE_READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE&READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE$READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE@READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE!READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE*READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE(READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE)READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE-READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE+READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE-#READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE/READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE\READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE?READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE-/READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE/#READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation = "REPEATABLE READ"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE READ"/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = "REPEATABLE/-READ"; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION = DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation = default; +NEW_CONNECTION; + set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; + set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; + + + +set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT ; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT ; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT + +; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +set +default_transaction_isolation += +DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =%DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =_DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =&DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =$DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =@DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =!DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =*DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =(DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =)DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =+DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-#DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =\DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =?DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =-/DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/#DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation = DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation = DEFAULT/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation =/-DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_ISOLATION TO DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation to default; +NEW_CONNECTION; + set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; + set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; + + + +set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT ; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT ; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT + +; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +set +default_transaction_isolation +to +DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to%DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to_DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to&DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to$DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to@DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to!DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to*DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to(DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to)DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to+DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-#DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to\DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to?DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to-/DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/#DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_isolation to DEFAULT; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to DEFAULT/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_isolation to/-DEFAULT; +NEW_CONNECTION; +set default_transaction_read_only = true; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = TRUE; +NEW_CONNECTION; +set default_transaction_read_only = true; +NEW_CONNECTION; + set default_transaction_read_only = true; +NEW_CONNECTION; + set default_transaction_read_only = true; +NEW_CONNECTION; + + + +set default_transaction_read_only = true; +NEW_CONNECTION; +set default_transaction_read_only = true ; +NEW_CONNECTION; +set default_transaction_read_only = true ; +NEW_CONNECTION; +set default_transaction_read_only = true + +; +NEW_CONNECTION; +set default_transaction_read_only = true; +NEW_CONNECTION; +set default_transaction_read_only = true; +NEW_CONNECTION; +set +default_transaction_read_only += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-true; +NEW_CONNECTION; +set default_transaction_read_only = false; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = FALSE; +NEW_CONNECTION; +set default_transaction_read_only = false; +NEW_CONNECTION; + set default_transaction_read_only = false; +NEW_CONNECTION; + set default_transaction_read_only = false; +NEW_CONNECTION; + + + +set default_transaction_read_only = false; +NEW_CONNECTION; +set default_transaction_read_only = false ; +NEW_CONNECTION; +set default_transaction_read_only = false ; +NEW_CONNECTION; +set default_transaction_read_only = false + +; +NEW_CONNECTION; +set default_transaction_read_only = false; +NEW_CONNECTION; +set default_transaction_read_only = false; +NEW_CONNECTION; +set +default_transaction_read_only += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-false; +NEW_CONNECTION; +set default_transaction_read_only = t; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = T; +NEW_CONNECTION; +set default_transaction_read_only = t; +NEW_CONNECTION; + set default_transaction_read_only = t; +NEW_CONNECTION; + set default_transaction_read_only = t; +NEW_CONNECTION; + + + +set default_transaction_read_only = t; +NEW_CONNECTION; +set default_transaction_read_only = t ; +NEW_CONNECTION; +set default_transaction_read_only = t ; +NEW_CONNECTION; +set default_transaction_read_only = t + +; +NEW_CONNECTION; +set default_transaction_read_only = t; +NEW_CONNECTION; +set default_transaction_read_only = t; +NEW_CONNECTION; +set +default_transaction_read_only += +t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = t; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = t/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-t; +NEW_CONNECTION; +set default_transaction_read_only = f; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = F; +NEW_CONNECTION; +set default_transaction_read_only = f; +NEW_CONNECTION; + set default_transaction_read_only = f; +NEW_CONNECTION; + set default_transaction_read_only = f; +NEW_CONNECTION; + + + +set default_transaction_read_only = f; +NEW_CONNECTION; +set default_transaction_read_only = f ; +NEW_CONNECTION; +set default_transaction_read_only = f ; +NEW_CONNECTION; +set default_transaction_read_only = f + +; +NEW_CONNECTION; +set default_transaction_read_only = f; +NEW_CONNECTION; +set default_transaction_read_only = f; +NEW_CONNECTION; +set +default_transaction_read_only += +f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = f; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = f/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-f; +NEW_CONNECTION; +set default_transaction_read_only to 't'; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY TO 'T'; +NEW_CONNECTION; +set default_transaction_read_only to 't'; +NEW_CONNECTION; + set default_transaction_read_only to 't'; +NEW_CONNECTION; + set default_transaction_read_only to 't'; +NEW_CONNECTION; + + + +set default_transaction_read_only to 't'; +NEW_CONNECTION; +set default_transaction_read_only to 't' ; +NEW_CONNECTION; +set default_transaction_read_only to 't' ; +NEW_CONNECTION; +set default_transaction_read_only to 't' + +; +NEW_CONNECTION; +set default_transaction_read_only to 't'; +NEW_CONNECTION; +set default_transaction_read_only to 't'; +NEW_CONNECTION; +set +default_transaction_read_only +to +'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to%'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to_'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to&'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to$'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to@'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to!'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to*'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to('t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to)'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to+'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-#'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to\'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to?'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-/'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/#'t'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only to 't'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to 't'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/-'t'; +NEW_CONNECTION; +set default_transaction_read_only to "f"; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY TO "F"; +NEW_CONNECTION; +set default_transaction_read_only to "f"; +NEW_CONNECTION; + set default_transaction_read_only to "f"; +NEW_CONNECTION; + set default_transaction_read_only to "f"; +NEW_CONNECTION; + + + +set default_transaction_read_only to "f"; +NEW_CONNECTION; +set default_transaction_read_only to "f" ; +NEW_CONNECTION; +set default_transaction_read_only to "f" ; +NEW_CONNECTION; +set default_transaction_read_only to "f" + +; +NEW_CONNECTION; +set default_transaction_read_only to "f"; +NEW_CONNECTION; +set default_transaction_read_only to "f"; +NEW_CONNECTION; +set +default_transaction_read_only +to +"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f" bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to%"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to_"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to&"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to$"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to@"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to!"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to*"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to("f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to)"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to+"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-#"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to\"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to?"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to-/"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/#"f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only to "f"; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to "f"/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only to/-"f"; +NEW_CONNECTION; +set default_transaction_read_only = on; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = ON; +NEW_CONNECTION; +set default_transaction_read_only = on; +NEW_CONNECTION; + set default_transaction_read_only = on; +NEW_CONNECTION; + set default_transaction_read_only = on; +NEW_CONNECTION; + + + +set default_transaction_read_only = on; +NEW_CONNECTION; +set default_transaction_read_only = on ; +NEW_CONNECTION; +set default_transaction_read_only = on ; +NEW_CONNECTION; +set default_transaction_read_only = on + +; +NEW_CONNECTION; +set default_transaction_read_only = on; +NEW_CONNECTION; +set default_transaction_read_only = on; +NEW_CONNECTION; +set +default_transaction_read_only += +on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = on; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = on/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-on; +NEW_CONNECTION; +set default_transaction_read_only = off; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = OFF; +NEW_CONNECTION; +set default_transaction_read_only = off; +NEW_CONNECTION; + set default_transaction_read_only = off; +NEW_CONNECTION; + set default_transaction_read_only = off; +NEW_CONNECTION; + + + +set default_transaction_read_only = off; +NEW_CONNECTION; +set default_transaction_read_only = off ; +NEW_CONNECTION; +set default_transaction_read_only = off ; +NEW_CONNECTION; +set default_transaction_read_only = off + +; +NEW_CONNECTION; +set default_transaction_read_only = off; +NEW_CONNECTION; +set default_transaction_read_only = off; +NEW_CONNECTION; +set +default_transaction_read_only += +off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = off/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-off; +NEW_CONNECTION; +set default_transaction_read_only = 1; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = 1; +NEW_CONNECTION; +set default_transaction_read_only = 1; +NEW_CONNECTION; + set default_transaction_read_only = 1; +NEW_CONNECTION; + set default_transaction_read_only = 1; +NEW_CONNECTION; + + + +set default_transaction_read_only = 1; +NEW_CONNECTION; +set default_transaction_read_only = 1 ; +NEW_CONNECTION; +set default_transaction_read_only = 1 ; +NEW_CONNECTION; +set default_transaction_read_only = 1 + +; +NEW_CONNECTION; +set default_transaction_read_only = 1; +NEW_CONNECTION; +set default_transaction_read_only = 1; +NEW_CONNECTION; +set +default_transaction_read_only += +1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 1/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-1; +NEW_CONNECTION; +set default_transaction_read_only = 0; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = 0; +NEW_CONNECTION; +set default_transaction_read_only = 0; +NEW_CONNECTION; + set default_transaction_read_only = 0; +NEW_CONNECTION; + set default_transaction_read_only = 0; +NEW_CONNECTION; + + + +set default_transaction_read_only = 0; +NEW_CONNECTION; +set default_transaction_read_only = 0 ; +NEW_CONNECTION; +set default_transaction_read_only = 0 ; +NEW_CONNECTION; +set default_transaction_read_only = 0 + +; +NEW_CONNECTION; +set default_transaction_read_only = 0; +NEW_CONNECTION; +set default_transaction_read_only = 0; +NEW_CONNECTION; +set +default_transaction_read_only += +0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = 0/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-0; +NEW_CONNECTION; +set default_transaction_read_only = yes; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = YES; +NEW_CONNECTION; +set default_transaction_read_only = yes; +NEW_CONNECTION; + set default_transaction_read_only = yes; +NEW_CONNECTION; + set default_transaction_read_only = yes; +NEW_CONNECTION; + + + +set default_transaction_read_only = yes; +NEW_CONNECTION; +set default_transaction_read_only = yes ; +NEW_CONNECTION; +set default_transaction_read_only = yes ; +NEW_CONNECTION; +set default_transaction_read_only = yes + +; +NEW_CONNECTION; +set default_transaction_read_only = yes; +NEW_CONNECTION; +set default_transaction_read_only = yes; +NEW_CONNECTION; +set +default_transaction_read_only += +yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = yes; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = yes/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-yes; +NEW_CONNECTION; +set default_transaction_read_only = no; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = NO; +NEW_CONNECTION; +set default_transaction_read_only = no; +NEW_CONNECTION; + set default_transaction_read_only = no; +NEW_CONNECTION; + set default_transaction_read_only = no; +NEW_CONNECTION; + + + +set default_transaction_read_only = no; +NEW_CONNECTION; +set default_transaction_read_only = no ; +NEW_CONNECTION; +set default_transaction_read_only = no ; +NEW_CONNECTION; +set default_transaction_read_only = no + +; +NEW_CONNECTION; +set default_transaction_read_only = no; +NEW_CONNECTION; +set default_transaction_read_only = no; +NEW_CONNECTION; +set +default_transaction_read_only += +no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = no; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = no/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-no; +NEW_CONNECTION; +set default_transaction_read_only = y; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = Y; +NEW_CONNECTION; +set default_transaction_read_only = y; +NEW_CONNECTION; + set default_transaction_read_only = y; +NEW_CONNECTION; + set default_transaction_read_only = y; +NEW_CONNECTION; + + + +set default_transaction_read_only = y; +NEW_CONNECTION; +set default_transaction_read_only = y ; +NEW_CONNECTION; +set default_transaction_read_only = y ; +NEW_CONNECTION; +set default_transaction_read_only = y + +; +NEW_CONNECTION; +set default_transaction_read_only = y; +NEW_CONNECTION; +set default_transaction_read_only = y; +NEW_CONNECTION; +set +default_transaction_read_only += +y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = y; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = y/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-y; +NEW_CONNECTION; +set default_transaction_read_only = n; +NEW_CONNECTION; +SET DEFAULT_TRANSACTION_READ_ONLY = N; +NEW_CONNECTION; +set default_transaction_read_only = n; +NEW_CONNECTION; + set default_transaction_read_only = n; +NEW_CONNECTION; + set default_transaction_read_only = n; +NEW_CONNECTION; + + + +set default_transaction_read_only = n; +NEW_CONNECTION; +set default_transaction_read_only = n ; +NEW_CONNECTION; +set default_transaction_read_only = n ; +NEW_CONNECTION; +set default_transaction_read_only = n + +; +NEW_CONNECTION; +set default_transaction_read_only = n; +NEW_CONNECTION; +set default_transaction_read_only = n; +NEW_CONNECTION; +set +default_transaction_read_only += +n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =%n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =_n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =&n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =$n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =@n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =!n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =*n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =(n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =)n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =+n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-#n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =\n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =?n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =-/n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/#n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set default_transaction_read_only = n; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only = n/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set default_transaction_read_only =/-n; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness='strong'; +NEW_CONNECTION; + set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; + set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG' ; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG' ; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +set +spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='STRONG'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.read_only_staleness='STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123-08:00'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123z'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123+07:45'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321-07:00'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set +spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321z'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set +spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321+05:30'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set +spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 12S'; +NEW_CONNECTION; +set spanner.read_only_staleness='max_staleness 12s'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS%12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS_12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS&12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS$12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS@12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS!12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS*12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS(12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS)12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS+12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS\12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS?12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 12s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/-12s'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100MS'; +NEW_CONNECTION; +set spanner.read_only_staleness='max_staleness 100ms'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS%100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS_100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS&100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS$100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS@100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS!100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS*100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS(100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS)100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS+100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS\100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS?100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/-100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 99999US'; +NEW_CONNECTION; +set spanner.read_only_staleness='max_staleness 99999us'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS%99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS_99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS&99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS$99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS@99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS!99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS*99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS(99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS)99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS+99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS\99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS?99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 99999us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/-99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10NS'; +NEW_CONNECTION; +set spanner.read_only_staleness='max_staleness 10ns'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; + set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +set +spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS%10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS_10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS&10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS$10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS@10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS!10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS*10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS(10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS)10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS+10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS\10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS?10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS-/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS 10ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='MAX_STALENESS/-10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 15S'; +NEW_CONNECTION; +set spanner.read_only_staleness='exact_staleness 15s'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +set +spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS%15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS_15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS&15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS$15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS@15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS!15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS*15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS(15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS)15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS+15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS\15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS?15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/-15s'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1500MS'; +NEW_CONNECTION; +set spanner.read_only_staleness='exact_staleness 1500ms'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set +spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS%1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS_1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS&1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS$1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS@1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS!1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS*1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS(1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS)1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS+1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS\1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS?1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 1500ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/-1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 15000000US'; +NEW_CONNECTION; +set spanner.read_only_staleness='exact_staleness 15000000us'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set +spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS%15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS_15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS&15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS$15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS@15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS!15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS*15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS(15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS)15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS+15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS\15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS?15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 15000000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/-15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 9999NS'; +NEW_CONNECTION; +set spanner.read_only_staleness='exact_staleness 9999ns'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns' + +; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set +spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS%9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS_9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS&9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS$9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS@9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS!9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS*9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS(9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS)9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS+9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS\9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS?9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS-/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness='EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS 9999ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness='EXACT_STALENESS/-9999ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'strong'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to%'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to_'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to&'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to$'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to@'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to!'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to*'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to('STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to)'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to-'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to+'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to-#'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to/'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to\'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to?'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to-/'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to/#'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'STRONG'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'STRONG'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to/-'STRONG'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'min_read_timestamp 2018-01-02t03:04:05.123-08:00'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123-08:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'min_read_timestamp 2018-01-02t03:04:05.123z'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'min_read_timestamp 2018-01-02t03:04:05.123+07:45'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123+07:45'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'read_timestamp 2018-01-02t03:04:05.54321-07:00'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP%2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP_2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP&2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP$2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP@2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP!2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP*2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP(2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP)2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP+2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP\2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP?2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-/2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/#2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/-2018-01-02T03:04:05.54321-07:00'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'read_timestamp 2018-01-02t03:04:05.54321z'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP%2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP_2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP&2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP$2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP@2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP!2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP*2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP(2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP)2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP+2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP\2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP?2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-/2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/#2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/-2018-01-02T03:04:05.54321Z'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'read_timestamp 2018-01-02t03:04:05.54321+05:30'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP%2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP_2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP&2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP$2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP@2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP!2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP*2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP(2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP)2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP+2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP\2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP?2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP-/2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/#2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'READ_TIMESTAMP/-2018-01-02T03:04:05.54321+05:30'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MAX_STALENESS 12S'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'max_staleness 12s'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS%12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS_12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS&12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS$12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS@12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS!12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS*12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS(12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS)12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS+12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS\12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS?12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-/12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/#12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MAX_STALENESS 12s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 12s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/-12s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MAX_STALENESS 100MS'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'max_staleness 100ms'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS%100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS_100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS&100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS$100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS@100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS!100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS*100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS(100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS)100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS+100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS\100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS?100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-/100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/#100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MAX_STALENESS 100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/-100ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MAX_STALENESS 99999US'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'max_staleness 99999us'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS%99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS_99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS&99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS$99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS@99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS!99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS*99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS(99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS)99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS+99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS\99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS?99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-/99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/#99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MAX_STALENESS 99999us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 99999us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/-99999us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'MAX_STALENESS 10NS'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'max_staleness 10ns'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS%10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS_10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS&10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS$10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS@10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS!10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS*10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS(10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS)10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS+10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS\10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS?10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS-/10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/#10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'MAX_STALENESS 10ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS 10ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'MAX_STALENESS/-10ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'EXACT_STALENESS 15S'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'exact_staleness 15s'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS%15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS_15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS&15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS$15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS@15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS!15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS*15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS(15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS)15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS+15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS\15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS?15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-/15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/#15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'EXACT_STALENESS 15s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/-15s'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'EXACT_STALENESS 1500MS'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'exact_staleness 1500ms'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS%1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS_1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS&1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS$1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS@1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS!1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS*1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS(1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS)1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS+1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS\1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS?1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-/1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/#1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 1500ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/-1500ms'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'EXACT_STALENESS 15000000US'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'exact_staleness 15000000us'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS%15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS_15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS&15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS$15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS@15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS!15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS*15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS(15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS)15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS+15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS\15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS?15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-/15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/#15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 15000000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/-15000000us'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +SET SPANNER.READ_ONLY_STALENESS TO 'EXACT_STALENESS 9999NS'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'exact_staleness 9999ns'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; + + + +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns' ; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns' + +; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +set +spanner.read_only_staleness +to +'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS%9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS_9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS&9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS$9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS@9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS!9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS*9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS(9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS)9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS+9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS\9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS?9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS-/9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/#9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS 9999ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_only_staleness to 'EXACT_STALENESS/-9999ns'; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; + + + +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' + +; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set +spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +NEW_CONNECTION; +set spanner.directed_read=''; +NEW_CONNECTION; + set spanner.directed_read=''; +NEW_CONNECTION; + set spanner.directed_read=''; +NEW_CONNECTION; + + + +set spanner.directed_read=''; +NEW_CONNECTION; +set spanner.directed_read='' ; +NEW_CONNECTION; +set spanner.directed_read='' ; +NEW_CONNECTION; +set spanner.directed_read='' + +; +NEW_CONNECTION; +set spanner.directed_read=''; +NEW_CONNECTION; +set spanner.directed_read=''; +NEW_CONNECTION; +set +spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.directed_read=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.directed_read=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.directed_read=''; +NEW_CONNECTION; +set spanner.optimizer_version='1'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION='1'; +NEW_CONNECTION; +set spanner.optimizer_version='1'; +NEW_CONNECTION; + set spanner.optimizer_version='1'; +NEW_CONNECTION; + set spanner.optimizer_version='1'; +NEW_CONNECTION; + + + +set spanner.optimizer_version='1'; +NEW_CONNECTION; +set spanner.optimizer_version='1' ; +NEW_CONNECTION; +set spanner.optimizer_version='1' ; +NEW_CONNECTION; +set spanner.optimizer_version='1' + +; +NEW_CONNECTION; +set spanner.optimizer_version='1'; +NEW_CONNECTION; +set spanner.optimizer_version='1'; +NEW_CONNECTION; +set +spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version='1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_version='1'; +NEW_CONNECTION; +set spanner.optimizer_version='200'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION='200'; +NEW_CONNECTION; +set spanner.optimizer_version='200'; +NEW_CONNECTION; + set spanner.optimizer_version='200'; +NEW_CONNECTION; + set spanner.optimizer_version='200'; +NEW_CONNECTION; + + + +set spanner.optimizer_version='200'; +NEW_CONNECTION; +set spanner.optimizer_version='200' ; +NEW_CONNECTION; +set spanner.optimizer_version='200' ; +NEW_CONNECTION; +set spanner.optimizer_version='200' + +; +NEW_CONNECTION; +set spanner.optimizer_version='200'; +NEW_CONNECTION; +set spanner.optimizer_version='200'; +NEW_CONNECTION; +set +spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version='200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='200'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_version='200'; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION='LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version='latest'; +NEW_CONNECTION; + set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; + set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; + + + +set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST' ; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST' ; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST' + +; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +set +spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='LATEST'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_version='LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version=''; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION=''; +NEW_CONNECTION; +set spanner.optimizer_version=''; +NEW_CONNECTION; + set spanner.optimizer_version=''; +NEW_CONNECTION; + set spanner.optimizer_version=''; +NEW_CONNECTION; + + + +set spanner.optimizer_version=''; +NEW_CONNECTION; +set spanner.optimizer_version='' ; +NEW_CONNECTION; +set spanner.optimizer_version='' ; +NEW_CONNECTION; +set spanner.optimizer_version='' + +; +NEW_CONNECTION; +set spanner.optimizer_version=''; +NEW_CONNECTION; +set spanner.optimizer_version=''; +NEW_CONNECTION; +set +spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_version=''; +NEW_CONNECTION; +set spanner.optimizer_version to '1'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION TO '1'; +NEW_CONNECTION; +set spanner.optimizer_version to '1'; +NEW_CONNECTION; + set spanner.optimizer_version to '1'; +NEW_CONNECTION; + set spanner.optimizer_version to '1'; +NEW_CONNECTION; + + + +set spanner.optimizer_version to '1'; +NEW_CONNECTION; +set spanner.optimizer_version to '1' ; +NEW_CONNECTION; +set spanner.optimizer_version to '1' ; +NEW_CONNECTION; +set spanner.optimizer_version to '1' + +; +NEW_CONNECTION; +set spanner.optimizer_version to '1'; +NEW_CONNECTION; +set spanner.optimizer_version to '1'; +NEW_CONNECTION; +set +spanner.optimizer_version +to +'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to%'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to_'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to&'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to$'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to@'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to!'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to*'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to('1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to)'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to+'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-#'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to\'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to?'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-/'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/#'1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version to '1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/-'1'; +NEW_CONNECTION; +set spanner.optimizer_version to '200'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION TO '200'; +NEW_CONNECTION; +set spanner.optimizer_version to '200'; +NEW_CONNECTION; + set spanner.optimizer_version to '200'; +NEW_CONNECTION; + set spanner.optimizer_version to '200'; +NEW_CONNECTION; + + + +set spanner.optimizer_version to '200'; +NEW_CONNECTION; +set spanner.optimizer_version to '200' ; +NEW_CONNECTION; +set spanner.optimizer_version to '200' ; +NEW_CONNECTION; +set spanner.optimizer_version to '200' + +; +NEW_CONNECTION; +set spanner.optimizer_version to '200'; +NEW_CONNECTION; +set spanner.optimizer_version to '200'; +NEW_CONNECTION; +set +spanner.optimizer_version +to +'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to%'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to_'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to&'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to$'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to@'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to!'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to*'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to('200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to)'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to+'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-#'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to\'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to?'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-/'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/#'200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version to '200'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '200'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/-'200'; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION TO 'LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version to 'latest'; +NEW_CONNECTION; + set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; + set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; + + + +set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST' ; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST' ; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST' + +; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +set +spanner.optimizer_version +to +'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to%'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to_'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to&'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to$'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to@'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to!'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to*'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to('LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to)'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to+'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-#'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to\'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to?'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-/'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/#'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version to 'LATEST'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to 'LATEST'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/-'LATEST'; +NEW_CONNECTION; +set spanner.optimizer_version to ''; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_VERSION TO ''; +NEW_CONNECTION; +set spanner.optimizer_version to ''; +NEW_CONNECTION; + set spanner.optimizer_version to ''; +NEW_CONNECTION; + set spanner.optimizer_version to ''; +NEW_CONNECTION; + + + +set spanner.optimizer_version to ''; +NEW_CONNECTION; +set spanner.optimizer_version to '' ; +NEW_CONNECTION; +set spanner.optimizer_version to '' ; +NEW_CONNECTION; +set spanner.optimizer_version to '' + +; +NEW_CONNECTION; +set spanner.optimizer_version to ''; +NEW_CONNECTION; +set spanner.optimizer_version to ''; +NEW_CONNECTION; +set +spanner.optimizer_version +to +''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to '' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to%''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to_''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to&''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to$''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to@''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to!''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to*''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to(''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to)''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to+''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to\''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to?''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to-/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_version to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to ''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_version to/-''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='AUTO_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22utc'; +NEW_CONNECTION; + set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + + + +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC' + +; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set +spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_statistics_package='auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; + set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; + set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; + + + +set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package='' + +; +NEW_CONNECTION; +set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +set +spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.optimizer_statistics_package=''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE TO 'AUTO_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22utc'; +NEW_CONNECTION; + set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; + + + +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC' + +; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set +spanner.optimizer_statistics_package +to +'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to%'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to_'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to&'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to$'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to@'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to!'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to*'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to('auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to)'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to+'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-#'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to\'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to?'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-/'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/#'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to 'auto_20191128_14_47_22UTC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/-'auto_20191128_14_47_22UTC'; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE TO ''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; + set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; + set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; + + + +set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to '' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to '' ; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to '' + +; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +set +spanner.optimizer_statistics_package +to +''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to '' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to%''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to_''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to&''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to$''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to@''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to!''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to*''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to(''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to)''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to+''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to\''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to?''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to-/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.optimizer_statistics_package to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to ''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.optimizer_statistics_package to/-''; +NEW_CONNECTION; +set spanner.return_commit_stats = true; +NEW_CONNECTION; +SET SPANNER.RETURN_COMMIT_STATS = TRUE; +NEW_CONNECTION; +set spanner.return_commit_stats = true; +NEW_CONNECTION; + set spanner.return_commit_stats = true; +NEW_CONNECTION; + set spanner.return_commit_stats = true; +NEW_CONNECTION; + + + +set spanner.return_commit_stats = true; +NEW_CONNECTION; +set spanner.return_commit_stats = true ; +NEW_CONNECTION; +set spanner.return_commit_stats = true ; +NEW_CONNECTION; +set spanner.return_commit_stats = true + +; +NEW_CONNECTION; +set spanner.return_commit_stats = true; +NEW_CONNECTION; +set spanner.return_commit_stats = true; +NEW_CONNECTION; +set +spanner.return_commit_stats += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.return_commit_stats = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/-true; +NEW_CONNECTION; +set spanner.return_commit_stats = false; +NEW_CONNECTION; +SET SPANNER.RETURN_COMMIT_STATS = FALSE; +NEW_CONNECTION; +set spanner.return_commit_stats = false; +NEW_CONNECTION; + set spanner.return_commit_stats = false; +NEW_CONNECTION; + set spanner.return_commit_stats = false; +NEW_CONNECTION; + + + +set spanner.return_commit_stats = false; +NEW_CONNECTION; +set spanner.return_commit_stats = false ; +NEW_CONNECTION; +set spanner.return_commit_stats = false ; +NEW_CONNECTION; +set spanner.return_commit_stats = false + +; +NEW_CONNECTION; +set spanner.return_commit_stats = false; +NEW_CONNECTION; +set spanner.return_commit_stats = false; +NEW_CONNECTION; +set +spanner.return_commit_stats += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.return_commit_stats = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats =/-false; +NEW_CONNECTION; +set spanner.return_commit_stats to true; +NEW_CONNECTION; +SET SPANNER.RETURN_COMMIT_STATS TO TRUE; +NEW_CONNECTION; +set spanner.return_commit_stats to true; +NEW_CONNECTION; + set spanner.return_commit_stats to true; +NEW_CONNECTION; + set spanner.return_commit_stats to true; +NEW_CONNECTION; + + + +set spanner.return_commit_stats to true; +NEW_CONNECTION; +set spanner.return_commit_stats to true ; +NEW_CONNECTION; +set spanner.return_commit_stats to true ; +NEW_CONNECTION; +set spanner.return_commit_stats to true + +; +NEW_CONNECTION; +set spanner.return_commit_stats to true; +NEW_CONNECTION; +set spanner.return_commit_stats to true; +NEW_CONNECTION; +set +spanner.return_commit_stats +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.return_commit_stats to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/-true; +NEW_CONNECTION; +set spanner.return_commit_stats to false; +NEW_CONNECTION; +SET SPANNER.RETURN_COMMIT_STATS TO FALSE; +NEW_CONNECTION; +set spanner.return_commit_stats to false; +NEW_CONNECTION; + set spanner.return_commit_stats to false; +NEW_CONNECTION; + set spanner.return_commit_stats to false; +NEW_CONNECTION; + + + +set spanner.return_commit_stats to false; +NEW_CONNECTION; +set spanner.return_commit_stats to false ; +NEW_CONNECTION; +set spanner.return_commit_stats to false ; +NEW_CONNECTION; +set spanner.return_commit_stats to false + +; +NEW_CONNECTION; +set spanner.return_commit_stats to false; +NEW_CONNECTION; +set spanner.return_commit_stats to false; +NEW_CONNECTION; +set +spanner.return_commit_stats +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.return_commit_stats to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.return_commit_stats to/-false; +NEW_CONNECTION; +set spanner.max_commit_delay=null; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY=NULL; +NEW_CONNECTION; +set spanner.max_commit_delay=null; +NEW_CONNECTION; + set spanner.max_commit_delay=null; +NEW_CONNECTION; + set spanner.max_commit_delay=null; +NEW_CONNECTION; + + + +set spanner.max_commit_delay=null; +NEW_CONNECTION; +set spanner.max_commit_delay=null ; +NEW_CONNECTION; +set spanner.max_commit_delay=null ; +NEW_CONNECTION; +set spanner.max_commit_delay=null + +; +NEW_CONNECTION; +set spanner.max_commit_delay=null; +NEW_CONNECTION; +set spanner.max_commit_delay=null; +NEW_CONNECTION; +set +spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay=null; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=null/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.max_commit_delay=null; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = NULL; +NEW_CONNECTION; +set spanner.max_commit_delay = null; +NEW_CONNECTION; + set spanner.max_commit_delay = NULL; +NEW_CONNECTION; + set spanner.max_commit_delay = NULL; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL ; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL ; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL + +; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +set +spanner.max_commit_delay += +NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =%NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =_NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =&NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =$NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =@NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =!NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =*NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =(NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =)NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =+NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-#NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =\NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =?NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-/NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/#NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = NULL; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = NULL/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/-NULL; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = NULL ; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; + set spanner.max_commit_delay = null ; +NEW_CONNECTION; + set spanner.max_commit_delay = null ; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +set spanner.max_commit_delay = null + +; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +set spanner.max_commit_delay = null ; +NEW_CONNECTION; +set +spanner.max_commit_delay += +null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = null ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = null/-; +NEW_CONNECTION; +set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY='1S'; +NEW_CONNECTION; +set spanner.max_commit_delay='1s'; +NEW_CONNECTION; + set spanner.max_commit_delay='1s'; +NEW_CONNECTION; + set spanner.max_commit_delay='1s'; +NEW_CONNECTION; + + + +set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +set spanner.max_commit_delay='1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay='1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay='1s' + +; +NEW_CONNECTION; +set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +set +spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay='1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.max_commit_delay='1s'; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = '1S'; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; + set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; + set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' + +; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +set +spanner.max_commit_delay += +'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =%'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =_'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =&'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =$'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =@'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =!'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =*'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =('1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =)'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =+'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =\'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =?'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-/'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/#'1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = '1s'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/-'1s'; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = '1S' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; + set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; + set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' + +; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +set +spanner.max_commit_delay += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = '1s'/-; +NEW_CONNECTION; +set spanner.max_commit_delay=1000; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY=1000; +NEW_CONNECTION; +set spanner.max_commit_delay=1000; +NEW_CONNECTION; + set spanner.max_commit_delay=1000; +NEW_CONNECTION; + set spanner.max_commit_delay=1000; +NEW_CONNECTION; + + + +set spanner.max_commit_delay=1000; +NEW_CONNECTION; +set spanner.max_commit_delay=1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay=1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay=1000 + +; +NEW_CONNECTION; +set spanner.max_commit_delay=1000; +NEW_CONNECTION; +set spanner.max_commit_delay=1000; +NEW_CONNECTION; +set +spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay=1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay=1000/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.max_commit_delay=1000; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = 1000; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000; +NEW_CONNECTION; + set spanner.max_commit_delay = 1000; +NEW_CONNECTION; + set spanner.max_commit_delay = 1000; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 + +; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +set +spanner.max_commit_delay += +1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =%1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =_1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =&1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =$1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =@1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =!1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =*1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =(1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =)1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =+1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-#1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =\1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =?1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =-/1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/#1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = 1000; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay =/-1000; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; + set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; + set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; + + + +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 + +; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +set +spanner.max_commit_delay += +1000 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay = 1000 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay = 1000/-; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY='100MS'; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; + set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; + set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; + + + +set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms' ; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms' ; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms' + +; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +set +spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay='100ms'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.max_commit_delay='100ms'; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY TO '10000US'; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; + set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; + set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; + + + +set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us' ; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us' ; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us' + +; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +set +spanner.max_commit_delay +to +'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to%'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to_'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to&'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to$'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to@'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to!'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to*'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to('10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to)'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to-'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to+'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to-#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to\'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to?'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to-/'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to/#'10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay to '10000us'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to '10000us'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay to/-'10000us'; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +SET SPANNER.MAX_COMMIT_DELAY TO '9223372036854775807NS'; +NEW_CONNECTION; +set spanner.max_commit_delay to '9223372036854775807ns'; +NEW_CONNECTION; + set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; + set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; + + + +set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns' ; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns' + +; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +set +spanner.max_commit_delay +TO +'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO%'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO_'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO&'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO$'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO@'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO!'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO*'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO('9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO)'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO-'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO+'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO-#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO\'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO?'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO-/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO/#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO/-'9223372036854775807ns'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG='TAG1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; + set spanner.statement_tag='tag1'; +NEW_CONNECTION; + set spanner.statement_tag='tag1'; +NEW_CONNECTION; + + + +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1' ; +NEW_CONNECTION; +set spanner.statement_tag='tag1' ; +NEW_CONNECTION; +set spanner.statement_tag='tag1' + +; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set +spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG='TAG2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; + set spanner.statement_tag='tag2'; +NEW_CONNECTION; + set spanner.statement_tag='tag2'; +NEW_CONNECTION; + + + +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2' ; +NEW_CONNECTION; +set spanner.statement_tag='tag2' ; +NEW_CONNECTION; +set spanner.statement_tag='tag2' + +; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set +spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG=''; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; + set spanner.statement_tag=''; +NEW_CONNECTION; + set spanner.statement_tag=''; +NEW_CONNECTION; + + + +set spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag='' ; +NEW_CONNECTION; +set spanner.statement_tag='' ; +NEW_CONNECTION; +set spanner.statement_tag='' + +; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +set +spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TAG1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TAG2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO ''; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; + set spanner.statement_tag to ''; +NEW_CONNECTION; + set spanner.statement_tag to ''; +NEW_CONNECTION; + + + +set spanner.statement_tag to ''; +NEW_CONNECTION; +set spanner.statement_tag to '' ; +NEW_CONNECTION; +set spanner.statement_tag to '' ; +NEW_CONNECTION; +set spanner.statement_tag to '' + +; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +set +spanner.statement_tag +to +''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to '' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to(''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-''; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TEST_TAG'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' ; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' ; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG='TAG1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG='TAG2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TAG1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TAG2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to '' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to(''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TEST_TAG'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-'test_tag'; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + + + +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true + +; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set +spanner.exclude_txn_from_change_streams += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/-true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS = FALSE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; + + + +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false + +; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set +spanner.exclude_txn_from_change_streams += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/-false; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS TO TRUE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; + + + +set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true + +; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +set +spanner.exclude_txn_from_change_streams +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.exclude_txn_from_change_streams to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/-true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS TO FALSE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; + + + +set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false + +; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +set +spanner.exclude_txn_from_change_streams +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.exclude_txn_from_change_streams to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams to/-false; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY='HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority='high'; +NEW_CONNECTION; + set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; + set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; + + + +set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH' ; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH' ; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH' + +; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +set +spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='HIGH'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.rpc_priority='HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY='MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority='medium'; +NEW_CONNECTION; + set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; + set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; + + + +set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM' + +; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +set +spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='MEDIUM'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.rpc_priority='MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY='LOW'; +NEW_CONNECTION; +set spanner.rpc_priority='low'; +NEW_CONNECTION; + set spanner.rpc_priority='LOW'; +NEW_CONNECTION; + set spanner.rpc_priority='LOW'; +NEW_CONNECTION; + + + +set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +set spanner.rpc_priority='LOW' ; +NEW_CONNECTION; +set spanner.rpc_priority='LOW' ; +NEW_CONNECTION; +set spanner.rpc_priority='LOW' + +; +NEW_CONNECTION; +set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +set +spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='LOW'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.rpc_priority='LOW'; +NEW_CONNECTION; +set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY='NULL'; +NEW_CONNECTION; +set spanner.rpc_priority='null'; +NEW_CONNECTION; + set spanner.rpc_priority='NULL'; +NEW_CONNECTION; + set spanner.rpc_priority='NULL'; +NEW_CONNECTION; + + + +set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +set spanner.rpc_priority='NULL' ; +NEW_CONNECTION; +set spanner.rpc_priority='NULL' ; +NEW_CONNECTION; +set spanner.rpc_priority='NULL' + +; +NEW_CONNECTION; +set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +set +spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority='NULL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.rpc_priority='NULL'; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY TO 'HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority to 'high'; +NEW_CONNECTION; + set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; + set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; + + + +set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH' + +; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +set +spanner.rpc_priority +to +'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to%'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to_'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to&'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to$'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to@'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to!'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to*'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to('HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to)'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to+'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-#'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to\'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to?'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-/'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/#'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority to 'HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'HIGH'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/-'HIGH'; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY TO 'MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority to 'medium'; +NEW_CONNECTION; + set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; + set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; + + + +set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM' + +; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +set +spanner.rpc_priority +to +'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to%'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to_'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to&'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to$'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to@'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to!'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to*'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to('MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to)'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to+'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-#'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to\'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to?'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-/'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/#'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority to 'MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'MEDIUM'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/-'MEDIUM'; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY TO 'LOW'; +NEW_CONNECTION; +set spanner.rpc_priority to 'low'; +NEW_CONNECTION; + set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; + set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; + + + +set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW' + +; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +set +spanner.rpc_priority +to +'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to%'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to_'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to&'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to$'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to@'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to!'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to*'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to('LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to)'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to+'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-#'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to\'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to?'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-/'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/#'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority to 'LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'LOW'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/-'LOW'; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +SET SPANNER.RPC_PRIORITY TO 'NULL'; +NEW_CONNECTION; +set spanner.rpc_priority to 'null'; +NEW_CONNECTION; + set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; + set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; + + + +set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL' ; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL' + +; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +set +spanner.rpc_priority +to +'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to%'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to_'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to&'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to$'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to@'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to!'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to*'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to('NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to)'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to+'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-#'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to\'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to?'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to-/'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/#'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.rpc_priority to 'NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to 'NULL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.rpc_priority to/-'NULL'; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT='ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='enabled'; +NEW_CONNECTION; + set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; + set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; + + + +set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED' + +; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +set +spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='ENABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.savepoint_support='ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support='fail_after_rollback'; +NEW_CONNECTION; + set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + + + +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' + +; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set +spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT='DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='disabled'; +NEW_CONNECTION; + set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; + set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; + + + +set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED' + +; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +set +spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support='DISABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.savepoint_support='DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT TO 'ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'enabled'; +NEW_CONNECTION; + set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; + set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; + + + +set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED' + +; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +set +spanner.savepoint_support +to +'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to%'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to_'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to&'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to$'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to@'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to!'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to*'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to('ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to)'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to+'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-#'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to\'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to?'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-/'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/#'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support to 'ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'ENABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/-'ENABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT TO 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support to 'fail_after_rollback'; +NEW_CONNECTION; + set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + + + +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' + +; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set +spanner.savepoint_support +to +'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to%'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to_'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to&'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to$'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to@'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to!'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to*'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to('FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to)'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to+'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-#'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to\'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to?'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-/'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/#'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/-'FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +SET SPANNER.SAVEPOINT_SUPPORT TO 'DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'disabled'; +NEW_CONNECTION; + set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; + set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; + + + +set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED' ; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED' + +; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +set +spanner.savepoint_support +to +'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to%'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to_'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to&'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to$'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to@'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to!'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to*'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to('DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to)'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to+'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-#'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to\'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to?'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to-/'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/#'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.savepoint_support to 'DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to 'DISABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.savepoint_support to/-'DISABLED'; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE='OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='optimistic'; +NEW_CONNECTION; + set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; + set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC' + +; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set +spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='OPTIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.read_lock_mode='OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE='PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='pessimistic'; +NEW_CONNECTION; + set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; + set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC' + +; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set +spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='PESSIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.read_lock_mode='PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE='UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode='unspecified'; +NEW_CONNECTION; + set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; + set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED' ; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED' ; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED' + +; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set +spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode='UNSPECIFIED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.read_lock_mode='UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE TO 'OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'optimistic'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC' + +; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +set +spanner.read_lock_mode +to +'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to%'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to_'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to&'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to$'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to@'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to!'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to*'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to('OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to)'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to+'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-#'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to\'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to?'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-/'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/#'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode to 'OPTIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'OPTIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/-'OPTIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE TO 'PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'pessimistic'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC' + +; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +set +spanner.read_lock_mode +to +'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to%'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to_'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to&'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to$'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to@'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to!'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to*'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to('PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to)'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to+'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-#'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to\'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to?'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-/'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/#'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode to 'PESSIMISTIC'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'PESSIMISTIC'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/-'PESSIMISTIC'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +SET SPANNER.READ_LOCK_MODE TO 'UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'unspecified'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; + set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; + + + +set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED' ; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED' + +; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +set +spanner.read_lock_mode +to +'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to%'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to_'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to&'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to$'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to@'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to!'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to*'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to('UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to)'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to+'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-#'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to\'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to?'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to-/'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/#'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.read_lock_mode to 'UNSPECIFIED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to 'UNSPECIFIED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.read_lock_mode to/-'UNSPECIFIED'; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + + + +set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true + +; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set +spanner.delay_transaction_start_until_first_write += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/-true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + + + +set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false + +; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set +spanner.delay_transaction_start_until_first_write += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write =/-false; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO TRUE; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; + + + +set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true + +; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +set +spanner.delay_transaction_start_until_first_write +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.delay_transaction_start_until_first_write to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/-true; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO FALSE; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; + set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; + + + +set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false ; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false + +; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +set +spanner.delay_transaction_start_until_first_write +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.delay_transaction_start_until_first_write to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.delay_transaction_start_until_first_write to/-false; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +SET SPANNER.KEEP_TRANSACTION_ALIVE = TRUE; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true; +NEW_CONNECTION; + set spanner.keep_transaction_alive = true; +NEW_CONNECTION; + set spanner.keep_transaction_alive = true; +NEW_CONNECTION; + + + +set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true ; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true ; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true + +; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +set +spanner.keep_transaction_alive += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/-true; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +SET SPANNER.KEEP_TRANSACTION_ALIVE = FALSE; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false; +NEW_CONNECTION; + set spanner.keep_transaction_alive = false; +NEW_CONNECTION; + set spanner.keep_transaction_alive = false; +NEW_CONNECTION; + + + +set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false ; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false ; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false + +; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +set +spanner.keep_transaction_alive += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive =/-false; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +SET SPANNER.KEEP_TRANSACTION_ALIVE TO TRUE; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true; +NEW_CONNECTION; + set spanner.keep_transaction_alive to true; +NEW_CONNECTION; + set spanner.keep_transaction_alive to true; +NEW_CONNECTION; + + + +set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true ; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true ; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true + +; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +set +spanner.keep_transaction_alive +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.keep_transaction_alive to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/-true; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +SET SPANNER.KEEP_TRANSACTION_ALIVE TO FALSE; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false; +NEW_CONNECTION; + set spanner.keep_transaction_alive to false; +NEW_CONNECTION; + set spanner.keep_transaction_alive to false; +NEW_CONNECTION; + + + +set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false ; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false ; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false + +; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +set +spanner.keep_transaction_alive +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.keep_transaction_alive to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.keep_transaction_alive to/-false; +NEW_CONNECTION; +set spanner.auto_batch_dml = true; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML = TRUE; +NEW_CONNECTION; +set spanner.auto_batch_dml = true; +NEW_CONNECTION; + set spanner.auto_batch_dml = true; +NEW_CONNECTION; + set spanner.auto_batch_dml = true; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml = true; +NEW_CONNECTION; +set spanner.auto_batch_dml = true ; +NEW_CONNECTION; +set spanner.auto_batch_dml = true ; +NEW_CONNECTION; +set spanner.auto_batch_dml = true + +; +NEW_CONNECTION; +set spanner.auto_batch_dml = true; +NEW_CONNECTION; +set spanner.auto_batch_dml = true; +NEW_CONNECTION; +set +spanner.auto_batch_dml += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/-true; +NEW_CONNECTION; +set spanner.auto_batch_dml = false; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML = FALSE; +NEW_CONNECTION; +set spanner.auto_batch_dml = false; +NEW_CONNECTION; + set spanner.auto_batch_dml = false; +NEW_CONNECTION; + set spanner.auto_batch_dml = false; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml = false; +NEW_CONNECTION; +set spanner.auto_batch_dml = false ; +NEW_CONNECTION; +set spanner.auto_batch_dml = false ; +NEW_CONNECTION; +set spanner.auto_batch_dml = false + +; +NEW_CONNECTION; +set spanner.auto_batch_dml = false; +NEW_CONNECTION; +set spanner.auto_batch_dml = false; +NEW_CONNECTION; +set +spanner.auto_batch_dml += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml =/-false; +NEW_CONNECTION; +set spanner.auto_batch_dml to true; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML TO TRUE; +NEW_CONNECTION; +set spanner.auto_batch_dml to true; +NEW_CONNECTION; + set spanner.auto_batch_dml to true; +NEW_CONNECTION; + set spanner.auto_batch_dml to true; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml to true; +NEW_CONNECTION; +set spanner.auto_batch_dml to true ; +NEW_CONNECTION; +set spanner.auto_batch_dml to true ; +NEW_CONNECTION; +set spanner.auto_batch_dml to true + +; +NEW_CONNECTION; +set spanner.auto_batch_dml to true; +NEW_CONNECTION; +set spanner.auto_batch_dml to true; +NEW_CONNECTION; +set +spanner.auto_batch_dml +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/-true; +NEW_CONNECTION; +set spanner.auto_batch_dml to false; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML TO FALSE; +NEW_CONNECTION; +set spanner.auto_batch_dml to false; +NEW_CONNECTION; + set spanner.auto_batch_dml to false; +NEW_CONNECTION; + set spanner.auto_batch_dml to false; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml to false; +NEW_CONNECTION; +set spanner.auto_batch_dml to false ; +NEW_CONNECTION; +set spanner.auto_batch_dml to false ; +NEW_CONNECTION; +set spanner.auto_batch_dml to false + +; +NEW_CONNECTION; +set spanner.auto_batch_dml to false; +NEW_CONNECTION; +set spanner.auto_batch_dml to false; +NEW_CONNECTION; +set +spanner.auto_batch_dml +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/-false; +NEW_CONNECTION; +set spanner.auto_batch_dml to off; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML TO OFF; +NEW_CONNECTION; +set spanner.auto_batch_dml to off; +NEW_CONNECTION; + set spanner.auto_batch_dml to off; +NEW_CONNECTION; + set spanner.auto_batch_dml to off; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml to off; +NEW_CONNECTION; +set spanner.auto_batch_dml to off ; +NEW_CONNECTION; +set spanner.auto_batch_dml to off ; +NEW_CONNECTION; +set spanner.auto_batch_dml to off + +; +NEW_CONNECTION; +set spanner.auto_batch_dml to off; +NEW_CONNECTION; +set spanner.auto_batch_dml to off; +NEW_CONNECTION; +set +spanner.auto_batch_dml +to +off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to%off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to_off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to&off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to$off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to@off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to!off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to*off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to(off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to)off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to+off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to\off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to?off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to-/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to off/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml to/-off; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT = 0; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0 + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count += +0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =%0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =_0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =&0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =$0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =@0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =!0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =*0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =(0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =)0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =+0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =\0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =?0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-/0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count = 0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 0/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/-0; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT = 100; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100 + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count += +100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =%100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =_100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =&100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =$100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =@100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =!100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =*100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =(100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =)100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =+100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =\100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =?100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =-/100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/#100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count = 100; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count = 100/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count =/-100; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT TO 1; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1 + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count +to +1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to%1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to_1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to&1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to$1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to@1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to!1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to*1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to(1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to)1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to-1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to+1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to-#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to\1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to?1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to-/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to/#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count to 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to 1/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count to/-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.BATCH_DML_UPDATE_COUNT = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0 + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.batch_dml_update_count += +0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0 bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =%0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =_0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =&0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =$0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =@0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =!0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =*0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =(0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =)0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =+0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-#0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =\0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =?0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-/0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/#0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.batch_dml_update_count = 0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 0/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/-0; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.BATCH_DML_UPDATE_COUNT = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100 + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.batch_dml_update_count += +100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100 bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =%100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =_100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =&100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =$100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =@100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =!100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =*100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =(100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =)100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =+100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-#100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =\100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =?100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =-/100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/#100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.batch_dml_update_count = 100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count = 100/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count =/-100; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET LOCAL SPANNER.BATCH_DML_UPDATE_COUNT TO 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1 + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +local +spanner.batch_dml_update_count +to +1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1 bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to%1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to_1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to&1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to$1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to@1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to!1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to*1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to(1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to)1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to+1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to-#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to\1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to?1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to-/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to/#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set local spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to 1/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set local spanner.batch_dml_update_count to/-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.BATCH_DML_UPDATE_COUNT TO 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1 + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.batch_dml_update_count +to +1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1 bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to%1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to_1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to&1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to$1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to@1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to!1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to*1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to(1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to)1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to+1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to-#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to\1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to?1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to-/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to/#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.batch_dml_update_count to 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to 1/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count to/-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +SET SPANNER.BATCH_DML_UPDATE_COUNT = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; + + + +set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1 ; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1 + +; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +set +spanner.batch_dml_update_count += +1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1 bar; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1%; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =%1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1_; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =_1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1&; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =&1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1$; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =$1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1@; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =@1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1!; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =!1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1*; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =*1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1(; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =(1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1); +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =)1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =-1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1+; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =+1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1-#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =-#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1\; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =\1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1?; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =?1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1-/; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =-/1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1/#; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =/#1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.batch_dml_update_count = 1; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count = 1/-; +NEW_CONNECTION; +set spanner.readonly = false; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.batch_dml_update_count =/-1; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = TRUE; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count_verification += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count_verification = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/-true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION = FALSE; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count_verification += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count_verification = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification =/-false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION TO TRUE; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count_verification +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count_verification to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/-true; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION TO FALSE; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count_verification +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count_verification to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/-false; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +SET SPANNER.AUTO_BATCH_DML_UPDATE_COUNT_VERIFICATION TO OFF; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; + set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; + + + +set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off ; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off + +; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +set +spanner.auto_batch_dml_update_count_verification +to +off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to%off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to_off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to&off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to$off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to@off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to!off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to*off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to(off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to)off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to+off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to\off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to?off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to-/off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/#off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_batch_dml_update_count_verification to off; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to off/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_batch_dml_update_count_verification to/-off; +NEW_CONNECTION; +show spanner.data_boost_enabled; +NEW_CONNECTION; +SHOW SPANNER.DATA_BOOST_ENABLED; +NEW_CONNECTION; +show spanner.data_boost_enabled; +NEW_CONNECTION; + show spanner.data_boost_enabled; +NEW_CONNECTION; + show spanner.data_boost_enabled; +NEW_CONNECTION; + + + +show spanner.data_boost_enabled; +NEW_CONNECTION; +show spanner.data_boost_enabled ; +NEW_CONNECTION; +show spanner.data_boost_enabled ; +NEW_CONNECTION; +show spanner.data_boost_enabled + +; +NEW_CONNECTION; +show spanner.data_boost_enabled; +NEW_CONNECTION; +show spanner.data_boost_enabled; +NEW_CONNECTION; +show +spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.data_boost_enabled/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.data_boost_enabled; +NEW_CONNECTION; +show variable spanner.data_boost_enabled; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.DATA_BOOST_ENABLED; +NEW_CONNECTION; +show variable spanner.data_boost_enabled; +NEW_CONNECTION; + show variable spanner.data_boost_enabled; +NEW_CONNECTION; + show variable spanner.data_boost_enabled; +NEW_CONNECTION; + + + +show variable spanner.data_boost_enabled; +NEW_CONNECTION; +show variable spanner.data_boost_enabled ; +NEW_CONNECTION; +show variable spanner.data_boost_enabled ; +NEW_CONNECTION; +show variable spanner.data_boost_enabled + +; +NEW_CONNECTION; +show variable spanner.data_boost_enabled; +NEW_CONNECTION; +show variable spanner.data_boost_enabled; +NEW_CONNECTION; +show +variable +spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.data_boost_enabled/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.data_boost_enabled; +NEW_CONNECTION; +set spanner.data_boost_enabled = true; +NEW_CONNECTION; +SET SPANNER.DATA_BOOST_ENABLED = TRUE; +NEW_CONNECTION; +set spanner.data_boost_enabled = true; +NEW_CONNECTION; + set spanner.data_boost_enabled = true; +NEW_CONNECTION; + set spanner.data_boost_enabled = true; +NEW_CONNECTION; + + + +set spanner.data_boost_enabled = true; +NEW_CONNECTION; +set spanner.data_boost_enabled = true ; +NEW_CONNECTION; +set spanner.data_boost_enabled = true ; +NEW_CONNECTION; +set spanner.data_boost_enabled = true + +; +NEW_CONNECTION; +set spanner.data_boost_enabled = true; +NEW_CONNECTION; +set spanner.data_boost_enabled = true; +NEW_CONNECTION; +set +spanner.data_boost_enabled += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/-true; +NEW_CONNECTION; +set spanner.data_boost_enabled = false; +NEW_CONNECTION; +SET SPANNER.DATA_BOOST_ENABLED = FALSE; +NEW_CONNECTION; +set spanner.data_boost_enabled = false; +NEW_CONNECTION; + set spanner.data_boost_enabled = false; +NEW_CONNECTION; + set spanner.data_boost_enabled = false; +NEW_CONNECTION; + + + +set spanner.data_boost_enabled = false; +NEW_CONNECTION; +set spanner.data_boost_enabled = false ; +NEW_CONNECTION; +set spanner.data_boost_enabled = false ; +NEW_CONNECTION; +set spanner.data_boost_enabled = false + +; +NEW_CONNECTION; +set spanner.data_boost_enabled = false; +NEW_CONNECTION; +set spanner.data_boost_enabled = false; +NEW_CONNECTION; +set +spanner.data_boost_enabled += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled =/-false; +NEW_CONNECTION; +set spanner.data_boost_enabled to true; +NEW_CONNECTION; +SET SPANNER.DATA_BOOST_ENABLED TO TRUE; +NEW_CONNECTION; +set spanner.data_boost_enabled to true; +NEW_CONNECTION; + set spanner.data_boost_enabled to true; +NEW_CONNECTION; + set spanner.data_boost_enabled to true; +NEW_CONNECTION; + + + +set spanner.data_boost_enabled to true; +NEW_CONNECTION; +set spanner.data_boost_enabled to true ; +NEW_CONNECTION; +set spanner.data_boost_enabled to true ; +NEW_CONNECTION; +set spanner.data_boost_enabled to true + +; +NEW_CONNECTION; +set spanner.data_boost_enabled to true; +NEW_CONNECTION; +set spanner.data_boost_enabled to true; +NEW_CONNECTION; +set +spanner.data_boost_enabled +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.data_boost_enabled to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/-true; +NEW_CONNECTION; +set spanner.data_boost_enabled to false; +NEW_CONNECTION; +SET SPANNER.DATA_BOOST_ENABLED TO FALSE; +NEW_CONNECTION; +set spanner.data_boost_enabled to false; +NEW_CONNECTION; + set spanner.data_boost_enabled to false; +NEW_CONNECTION; + set spanner.data_boost_enabled to false; +NEW_CONNECTION; + + + +set spanner.data_boost_enabled to false; +NEW_CONNECTION; +set spanner.data_boost_enabled to false ; +NEW_CONNECTION; +set spanner.data_boost_enabled to false ; +NEW_CONNECTION; +set spanner.data_boost_enabled to false + +; +NEW_CONNECTION; +set spanner.data_boost_enabled to false; +NEW_CONNECTION; +set spanner.data_boost_enabled to false; +NEW_CONNECTION; +set +spanner.data_boost_enabled +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.data_boost_enabled to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.data_boost_enabled to/-false; +NEW_CONNECTION; +show spanner.auto_partition_mode; +NEW_CONNECTION; +SHOW SPANNER.AUTO_PARTITION_MODE; +NEW_CONNECTION; +show spanner.auto_partition_mode; +NEW_CONNECTION; + show spanner.auto_partition_mode; +NEW_CONNECTION; + show spanner.auto_partition_mode; +NEW_CONNECTION; + + + +show spanner.auto_partition_mode; +NEW_CONNECTION; +show spanner.auto_partition_mode ; +NEW_CONNECTION; +show spanner.auto_partition_mode ; +NEW_CONNECTION; +show spanner.auto_partition_mode + +; +NEW_CONNECTION; +show spanner.auto_partition_mode; +NEW_CONNECTION; +show spanner.auto_partition_mode; +NEW_CONNECTION; +show +spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.auto_partition_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.auto_partition_mode; +NEW_CONNECTION; +show variable spanner.auto_partition_mode; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.AUTO_PARTITION_MODE; +NEW_CONNECTION; +show variable spanner.auto_partition_mode; +NEW_CONNECTION; + show variable spanner.auto_partition_mode; +NEW_CONNECTION; + show variable spanner.auto_partition_mode; +NEW_CONNECTION; + + + +show variable spanner.auto_partition_mode; +NEW_CONNECTION; +show variable spanner.auto_partition_mode ; +NEW_CONNECTION; +show variable spanner.auto_partition_mode ; +NEW_CONNECTION; +show variable spanner.auto_partition_mode + +; +NEW_CONNECTION; +show variable spanner.auto_partition_mode; +NEW_CONNECTION; +show variable spanner.auto_partition_mode; +NEW_CONNECTION; +show +variable +spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.auto_partition_mode/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.auto_partition_mode; +NEW_CONNECTION; +set spanner.auto_partition_mode = true; +NEW_CONNECTION; +SET SPANNER.AUTO_PARTITION_MODE = TRUE; +NEW_CONNECTION; +set spanner.auto_partition_mode = true; +NEW_CONNECTION; + set spanner.auto_partition_mode = true; +NEW_CONNECTION; + set spanner.auto_partition_mode = true; +NEW_CONNECTION; + + + +set spanner.auto_partition_mode = true; +NEW_CONNECTION; +set spanner.auto_partition_mode = true ; +NEW_CONNECTION; +set spanner.auto_partition_mode = true ; +NEW_CONNECTION; +set spanner.auto_partition_mode = true + +; +NEW_CONNECTION; +set spanner.auto_partition_mode = true; +NEW_CONNECTION; +set spanner.auto_partition_mode = true; +NEW_CONNECTION; +set +spanner.auto_partition_mode += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/-true; +NEW_CONNECTION; +set spanner.auto_partition_mode = false; +NEW_CONNECTION; +SET SPANNER.AUTO_PARTITION_MODE = FALSE; +NEW_CONNECTION; +set spanner.auto_partition_mode = false; +NEW_CONNECTION; + set spanner.auto_partition_mode = false; +NEW_CONNECTION; + set spanner.auto_partition_mode = false; +NEW_CONNECTION; + + + +set spanner.auto_partition_mode = false; +NEW_CONNECTION; +set spanner.auto_partition_mode = false ; +NEW_CONNECTION; +set spanner.auto_partition_mode = false ; +NEW_CONNECTION; +set spanner.auto_partition_mode = false + +; +NEW_CONNECTION; +set spanner.auto_partition_mode = false; +NEW_CONNECTION; +set spanner.auto_partition_mode = false; +NEW_CONNECTION; +set +spanner.auto_partition_mode += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_partition_mode = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode =/-false; +NEW_CONNECTION; +set spanner.auto_partition_mode to true; +NEW_CONNECTION; +SET SPANNER.AUTO_PARTITION_MODE TO TRUE; +NEW_CONNECTION; +set spanner.auto_partition_mode to true; +NEW_CONNECTION; + set spanner.auto_partition_mode to true; +NEW_CONNECTION; + set spanner.auto_partition_mode to true; +NEW_CONNECTION; + + + +set spanner.auto_partition_mode to true; +NEW_CONNECTION; +set spanner.auto_partition_mode to true ; +NEW_CONNECTION; +set spanner.auto_partition_mode to true ; +NEW_CONNECTION; +set spanner.auto_partition_mode to true + +; +NEW_CONNECTION; +set spanner.auto_partition_mode to true; +NEW_CONNECTION; +set spanner.auto_partition_mode to true; +NEW_CONNECTION; +set +spanner.auto_partition_mode +to +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_partition_mode to true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/-true; +NEW_CONNECTION; +set spanner.auto_partition_mode to false; +NEW_CONNECTION; +SET SPANNER.AUTO_PARTITION_MODE TO FALSE; +NEW_CONNECTION; +set spanner.auto_partition_mode to false; +NEW_CONNECTION; + set spanner.auto_partition_mode to false; +NEW_CONNECTION; + set spanner.auto_partition_mode to false; +NEW_CONNECTION; + + + +set spanner.auto_partition_mode to false; +NEW_CONNECTION; +set spanner.auto_partition_mode to false ; +NEW_CONNECTION; +set spanner.auto_partition_mode to false ; +NEW_CONNECTION; +set spanner.auto_partition_mode to false + +; +NEW_CONNECTION; +set spanner.auto_partition_mode to false; +NEW_CONNECTION; +set spanner.auto_partition_mode to false; +NEW_CONNECTION; +set +spanner.auto_partition_mode +to +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.auto_partition_mode to false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.auto_partition_mode to/-false; +NEW_CONNECTION; +show spanner.max_partitions; +NEW_CONNECTION; +SHOW SPANNER.MAX_PARTITIONS; +NEW_CONNECTION; +show spanner.max_partitions; +NEW_CONNECTION; + show spanner.max_partitions; +NEW_CONNECTION; + show spanner.max_partitions; +NEW_CONNECTION; + + + +show spanner.max_partitions; +NEW_CONNECTION; +show spanner.max_partitions ; +NEW_CONNECTION; +show spanner.max_partitions ; +NEW_CONNECTION; +show spanner.max_partitions + +; +NEW_CONNECTION; +show spanner.max_partitions; +NEW_CONNECTION; +show spanner.max_partitions; +NEW_CONNECTION; +show +spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitions/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.max_partitions; +NEW_CONNECTION; +show variable spanner.max_partitions; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.MAX_PARTITIONS; +NEW_CONNECTION; +show variable spanner.max_partitions; +NEW_CONNECTION; + show variable spanner.max_partitions; +NEW_CONNECTION; + show variable spanner.max_partitions; +NEW_CONNECTION; + + + +show variable spanner.max_partitions; +NEW_CONNECTION; +show variable spanner.max_partitions ; +NEW_CONNECTION; +show variable spanner.max_partitions ; +NEW_CONNECTION; +show variable spanner.max_partitions + +; +NEW_CONNECTION; +show variable spanner.max_partitions; +NEW_CONNECTION; +show variable spanner.max_partitions; +NEW_CONNECTION; +show +variable +spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.max_partitions; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitions/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.max_partitions; +NEW_CONNECTION; +set spanner.max_partitions = 1; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONS = 1; +NEW_CONNECTION; +set spanner.max_partitions = 1; +NEW_CONNECTION; + set spanner.max_partitions = 1; +NEW_CONNECTION; + set spanner.max_partitions = 1; +NEW_CONNECTION; + + + +set spanner.max_partitions = 1; +NEW_CONNECTION; +set spanner.max_partitions = 1 ; +NEW_CONNECTION; +set spanner.max_partitions = 1 ; +NEW_CONNECTION; +set spanner.max_partitions = 1 + +; +NEW_CONNECTION; +set spanner.max_partitions = 1; +NEW_CONNECTION; +set spanner.max_partitions = 1; +NEW_CONNECTION; +set +spanner.max_partitions += +1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =%1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =_1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =&1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =$1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =@1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =!1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =*1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =(1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =)1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =+1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =\1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =?1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitions = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 1/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/-1; +NEW_CONNECTION; +set spanner.max_partitions = 10; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONS = 10; +NEW_CONNECTION; +set spanner.max_partitions = 10; +NEW_CONNECTION; + set spanner.max_partitions = 10; +NEW_CONNECTION; + set spanner.max_partitions = 10; +NEW_CONNECTION; + + + +set spanner.max_partitions = 10; +NEW_CONNECTION; +set spanner.max_partitions = 10 ; +NEW_CONNECTION; +set spanner.max_partitions = 10 ; +NEW_CONNECTION; +set spanner.max_partitions = 10 + +; +NEW_CONNECTION; +set spanner.max_partitions = 10; +NEW_CONNECTION; +set spanner.max_partitions = 10; +NEW_CONNECTION; +set +spanner.max_partitions += +10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =%10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =_10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =&10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =$10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =@10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =!10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =*10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =(10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =)10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =+10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =\10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =?10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =-/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitions = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions = 10/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions =/-10; +NEW_CONNECTION; +set spanner.max_partitions to 5; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONS TO 5; +NEW_CONNECTION; +set spanner.max_partitions to 5; +NEW_CONNECTION; + set spanner.max_partitions to 5; +NEW_CONNECTION; + set spanner.max_partitions to 5; +NEW_CONNECTION; + + + +set spanner.max_partitions to 5; +NEW_CONNECTION; +set spanner.max_partitions to 5 ; +NEW_CONNECTION; +set spanner.max_partitions to 5 ; +NEW_CONNECTION; +set spanner.max_partitions to 5 + +; +NEW_CONNECTION; +set spanner.max_partitions to 5; +NEW_CONNECTION; +set spanner.max_partitions to 5; +NEW_CONNECTION; +set +spanner.max_partitions +to +5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to%5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to_5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to&5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to$5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to@5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to!5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to*5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to(5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to)5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to+5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-#5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to\5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to?5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-/5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/#5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitions to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 5/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/-5; +NEW_CONNECTION; +set spanner.max_partitions to 20; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONS TO 20; +NEW_CONNECTION; +set spanner.max_partitions to 20; +NEW_CONNECTION; + set spanner.max_partitions to 20; +NEW_CONNECTION; + set spanner.max_partitions to 20; +NEW_CONNECTION; + + + +set spanner.max_partitions to 20; +NEW_CONNECTION; +set spanner.max_partitions to 20 ; +NEW_CONNECTION; +set spanner.max_partitions to 20 ; +NEW_CONNECTION; +set spanner.max_partitions to 20 + +; +NEW_CONNECTION; +set spanner.max_partitions to 20; +NEW_CONNECTION; +set spanner.max_partitions to 20; +NEW_CONNECTION; +set +spanner.max_partitions +to +20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to%20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to_20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to&20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to$20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to@20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to!20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to*20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to(20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to)20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to+20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-#20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to\20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to?20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to-/20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/#20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitions to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to 20/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitions to/-20; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +SHOW SPANNER.MAX_PARTITIONED_PARALLELISM; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism; +NEW_CONNECTION; + show spanner.max_partitioned_parallelism; +NEW_CONNECTION; + show spanner.max_partitioned_parallelism; +NEW_CONNECTION; + + + +show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism ; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism ; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism + +; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show +spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show%spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show_spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show&spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show$spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show@spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show!spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show*spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show(spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show)spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show+spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-#spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show\spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show?spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show-/spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/#spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.max_partitioned_parallelism/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show/-spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.MAX_PARTITIONED_PARALLELISM; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; + show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; + show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; + + + +show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism ; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism ; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism + +; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +show +variable +spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.max_partitioned_parallelism; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.max_partitioned_parallelism/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.max_partitioned_parallelism; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONED_PARALLELISM = 1; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; + + + +set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1 + +; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +set +spanner.max_partitioned_parallelism += +1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =%1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =_1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =&1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =$1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =@1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =!1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =*1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =(1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =)1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =+1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =\1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =?1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-/1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/#1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitioned_parallelism = 1; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 1/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/-1; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONED_PARALLELISM = 10; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; + + + +set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10 + +; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +set +spanner.max_partitioned_parallelism += +10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =%10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =_10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =&10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =$10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =@10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =!10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =*10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =(10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =)10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =+10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =\10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =?10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =-/10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/#10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitioned_parallelism = 10; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism = 10/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism =/-10; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONED_PARALLELISM TO 5; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; + + + +set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5 + +; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +set +spanner.max_partitioned_parallelism +to +5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to%5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to_5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to&5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to$5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to@5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to!5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to*5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to(5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to)5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to+5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-#5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to\5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to?5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-/5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/#5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitioned_parallelism to 5; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 5/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/-5; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +SET SPANNER.MAX_PARTITIONED_PARALLELISM TO 20; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; + set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; + + + +set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20 ; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20 + +; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +set +spanner.max_partitioned_parallelism +to +20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to%20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to_20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to&20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to$20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to@20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to!20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to*20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to(20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to)20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to+20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-#20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to\20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to?20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to-/20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/#20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_partitioned_parallelism to 20; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to 20/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_partitioned_parallelism to/-20; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/CommentsTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/CommentsTest.sql new file mode 100644 index 000000000000..51ae1e7203cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/CommentsTest.sql @@ -0,0 +1,187 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@EXPECT 'SELECT 1'; +SELECT 1; +-- This is a single line comment +SELECT 1; +/* This is a multi line comment on one line */ +SELECT 1; +/* This + is + a + multiline + comment +*/ +SELECT 1; +/* This + * is + * a + * multiline + * comment + */ +SELECT 1; +/** This is a javadoc style comment on one line*/ +SELECT 1; +/** This + is + a + javadoc + style + comment + on + multiple + lines +*/ +SELECT 1; +/** This + * is + * a + * javadoc + * style + * comment + * on + * multiple + * lines + */ +SELECT 1; + +@EXPECT 'SELECT + 1'; +-- First comment +SELECT-- second comment + 1; +-- First comment +SELECT-- second comment + 1--third comment +; +@EXPECT 'SELECT + 1'; +/* First comment */ +SELECT/* second comment */ + 1; +/* First comment */ +SELECT/* second comment */ + 1/* Third comment */ +; + + +@EXPECT 'SELECT + 1'; +-- First comment +SELECT -- second comment + 1 ; +-- First comment +SELECT -- second comment + 1 --third comment +; +@EXPECT 'SELECT + 1'; +/* First comment */ +SELECT /* second comment */ + 1 ; +/* First comment */ +SELECT /* second comment */ + 1 /* Third comment */ +; + +@EXPECT 'SELECT "TEST -- This is not a comment"'; +SELECT "TEST -- This is not a comment"; +-- This is a comment +SELECT "TEST -- This is not a comment"; +-- This is a comment +SELECT "TEST -- This is not a comment" -- This is a comment; + + @EXPECT 'SELECT "TEST /* This is not a comment */"'; +SELECT "TEST /* This is not a comment */"; +/* This is a comment */ +SELECT "TEST /* This is not a comment */"; +/* This is a comment */ +SELECT "TEST /* This is not a comment */" /* This is a comment */; + +@EXPECT 'SELECT 'TEST -- This is not a comment''; +SELECT 'TEST -- This is not a comment'; +-- This is a comment +SELECT 'TEST -- This is not a comment'; +-- This is a comment +SELECT 'TEST -- This is not a comment' -- This is a comment; + + @EXPECT 'SELECT 'TEST /* This is not a comment */''; +SELECT 'TEST /* This is not a comment */'; +/* This is a comment */ +SELECT 'TEST /* This is not a comment */'; +/* This is a comment */ +SELECT 'TEST /* This is not a comment */' /* This is a comment */; + +@EXPECT 'SELECT $$TEST +-- This is not a comment +$$'; +SELECT $$TEST +-- This is not a comment +$$; +-- This is a comment +SELECT $$TEST +-- This is not a comment +$$; +-- This is a comment +SELECT $$TEST +-- This is not a comment +$$ -- This is a comment; + + @EXPECT 'SELECT $$TEST +/* This is not a comment */ +$$'; +SELECT $$TEST +/* This is not a comment */ +$$; +/* This is a comment */ +SELECT $$TEST +/* This is not a comment */ +$$; +/* This is a comment */ +SELECT $$TEST +/* This is not a comment */ +$$ /* This is a comment */; + +@EXPECT 'SELECT 1'; +/* This is a comment /* This is an embedded comment */ This is still a comment */ +SELECT 1; +/** This is a javadoc style comment /* This is an embedded comment */ This is still a comment */ +SELECT 1; +/** This is a javadoc style comment /** This is an embedded comment */ This is still a comment */ +SELECT 1; +/** This is a javadoc style comment /** This is an embedded comment **/ This is still a comment **/ +SELECT 1; +/* multiline comment + * with nesting: /* nested block comment */ + */ +SELECT 1; + +@EXPECT 'SELECT U&"d\0061t\+000061" FROM FOO'; +SELECT U&"d\0061t\+000061" FROM FOO; +/* This is a comment /* U&"d\0061t\+000061" */ This is still a comment */ +SELECT U&"d\0061t\+000061" FROM FOO; +SELECT U&"d\0061t\+000061" /* This is a comment /* U&"d\0061t\+000061" */ This is still a comment */FROM FOO; + +@EXPECT 'SELECT U&"d\0061t\+000061" +FROM FOO'; +SELECT U&"d\0061t\+000061" -- U&"d\0061t\+000061" == data */ This is still a comment */ +FROM FOO; + +@EXPECT 'SELECT 'foo' +'bar' +SELECT 'foo' -- This is allowed in PostgreSQL +'bar'; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql new file mode 100644 index 000000000000..72f50037384d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql @@ -0,0 +1,13655 @@ +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.019000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.019000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.019000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.115000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.115000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.115000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.217000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.217000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.217000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.309000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.309000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.309000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.382000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.382000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.382000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.457000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.457000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.532000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.532000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.593000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.593000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.593000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.668000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.668000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.668000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.729000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.729000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.729000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.785000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.785000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.837000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.837000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.895000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.895000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.895000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.947000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.947000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.947000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:18.998000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:18.998000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:18.998000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.059000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.059000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.059000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.130000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.130000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.130000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.186000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.186000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.186000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.247000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.247000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.307000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.307000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.360000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.360000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.360000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.409000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.409000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.459000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.459000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.459000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.522000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.522000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.522000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.522000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.575000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.575000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.575000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.627000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.627000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.627000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.627000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.683000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.683000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.683000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.683000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP' +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.742000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.742000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.742000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.742000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.797000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.797000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.797000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.797000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT UPDATE_COUNT 1 +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +SET SPANNER.AUTOCOMMIT_DML_MODE='TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE','TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DDL; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +START BATCH DML; +@EXPECT EXCEPTION FAILED_PRECONDITION +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.849000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.849000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.849000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.849000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=FALSE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.898000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.898000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.898000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.898000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:19.954000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:19.954000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:19.954000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:19.954000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET AUTOCOMMIT=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READONLY=FALSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.005000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.005000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.005000000Z'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.057000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.057000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.057000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.057000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP' +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.111000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.111000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.111000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.111000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ABORT BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET AUTOCOMMIT=FALSE; +@EXPECT RESULT_SET 'AUTOCOMMIT',FALSE +SHOW VARIABLE AUTOCOMMIT; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'AUTOCOMMIT',TRUE +SHOW VARIABLE AUTOCOMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'TEST',1 +SELECT 1 AS TEST; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.AUTOCOMMIT_DML_MODE' +SHOW VARIABLE SPANNER.AUTOCOMMIT_DML_MODE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ ONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET TRANSACTION READ WRITE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.READ_TIMESTAMP',null +SHOW VARIABLE SPANNER.READ_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DDL; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +START BATCH DML; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +ROLLBACK; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READONLY=FALSE; +@EXPECT RESULT_SET 'SPANNER.READONLY',FALSE +SHOW VARIABLE SPANNER.READONLY; +SET SPANNER.READONLY=TRUE; +@EXPECT RESULT_SET 'SPANNER.READONLY',TRUE +SHOW VARIABLE SPANNER.READONLY; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2026-01-05T11:33:20.160000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2026-01-05T11:33:20.160000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2026-01-05T11:33:20.160000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2026-01-05T11:33:20.160000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 100ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 100ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 100us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 100us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_RESPONSE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.OPTIMIZER_VERSION='1'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','1' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='2'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','2' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION='latest'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','latest' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +SET SPANNER.OPTIMIZER_VERSION=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_VERSION','' +SHOW VARIABLE SPANNER.OPTIMIZER_VERSION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.COMMIT_TIMESTAMP',null +SHOW VARIABLE SPANNER.COMMIT_TIMESTAMP; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE='custom-package'; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','custom-package' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +SET SPANNER.OPTIMIZER_STATISTICS_PACKAGE=''; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE','' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT=DEFAULT; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT='0ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +COMMIT; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT RESULT_SET 'SPANNER.OPTIMIZER_STATISTICS_PACKAGE' +SHOW VARIABLE SPANNER.OPTIMIZER_STATISTICS_PACKAGE; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +RUN BATCH; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +@EXPECT EXCEPTION FAILED_PRECONDITION +SET SPANNER.TRANSACTION_TAG = 'some-tag'; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; +BEGIN TRANSACTION; +SELECT 1 AS TEST; +@EXPECT EXCEPTION FAILED_PRECONDITION +UPDATE foo SET bar=1; +@EXPECT EXCEPTION FAILED_PRECONDITION +CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); +@EXPECT EXCEPTION FAILED_PRECONDITION +BEGIN TRANSACTION; +NEW_CONNECTION; +SET SPANNER.READONLY=TRUE; +SET AUTOCOMMIT=TRUE; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetReadOnlyStalenessTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetReadOnlyStalenessTest.sql new file mode 100644 index 000000000000..76c014526b05 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetReadOnlyStalenessTest.sql @@ -0,0 +1,575 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Test valid values for strong +SET SPANNER.READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS='STRONG'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS='Strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS = 'strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS = 'strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS + = + 'strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; +SET SPANNER.READ_ONLY_STALENESS='strong'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','STRONG' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +-- Test invalid values for strong +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='strongg'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='sstrong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='strng'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' strong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' strong'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' strong '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=strong; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS="strong"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=`strong`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='''strong'''; + + +-- Test valid values for min_read_timestamp +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='Min_Read_Timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2000-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2000-02-29T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2004-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2004-02-29T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+01:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T12:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01-01:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T14:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+06:30'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-07T07:06:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+24:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2018-12-06T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +-- Test invalid values for min_read_timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestampp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='mmin_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_red_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min read timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min-read-timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min%read%timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' min_read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=min_read_timestamp 2018-12-07T13:36:00.01Z; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS="min_read_timestamp 2018-12-07T13:36:00.01Z"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=`min_read_timestamp 2018-12-07T13:36:00.01Z`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='''min_read_timestamp 2018-12-07T13:36:00.01Z'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07 13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T3:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.9999999999Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-7T13:36:00.01Z'; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+8'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.0108:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:00.0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+08:000'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01+100:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01*08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01%08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01 08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='min_read_timestamp 2018-12-07T13:36:00.01Z+08:00'; + + + + +-- Test valid values for read_timestamp +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='Read_Timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2000-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2000-02-29T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2004-02-29T13:36:00.01Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2004-02-29T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2019-01-01T00:00:00Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2019-01-01T00:00:00Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+01:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T12:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01-01:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T14:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+06:30'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-07T07:06:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+24:00'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2018-12-06T13:36:00.010000000Z' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +-- Test invalid values for read_timestamp +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestampp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='mread_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='red_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read-timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read%timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' read_timestamp 2018-12-07T13:36:00.01Z '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=read_timestamp 2018-12-07T13:36:00.01Z; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS="read_timestamp 2018-12-07T13:36:00.01Z"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=`read_timestamp 2018-12-07T13:36:00.01Z`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='''read_timestamp 2018-12-07T13:36:00.01Z'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07 13:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T3:36:00.01Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.9999999999Z'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-7T13:36:00.01Z'; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+8'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.0108:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:00.0'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+08:000'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01+100:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01*08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01%08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01 08:00'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='read_timestamp 2018-12-07T13:36:00.01Z+08:00'; + + +-- Test valid values for exact_staleness +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='Exact_Staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1ns' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 9999s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 9999s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1000ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1001ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1001ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1000us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1001us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1001us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1000ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 1001ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','EXACT_STALENESS 1001ns' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +-- Test invalid values for exact_staleness +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_stalenesss 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='eexact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exct_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact-staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact%staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' exact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' exact_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' exact_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=exact_staleness 10s; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS="exact_staleness 10s"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=`exact_staleness 10s`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='''exact_staleness 10s'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 10mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness 999999999999s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness not_a_number'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='exact_staleness'; + + + +-- Test valid values for max_staleness +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_Staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_STALENESS 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1ns' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 9999s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 9999s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 10s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1000ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1s' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1001ms'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1001ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1000us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1ms' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1001us'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1001us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1000ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1us' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + +SET SPANNER.READ_ONLY_STALENESS='max_staleness 1001ns'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MAX_STALENESS 1001ns' +SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; + + +-- Test invalid values for max_staleness +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_stalenesss 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='emax_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='mx_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max-staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max%staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' max_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' max_staleness 10s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=' max_staleness 10s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=max_staleness 10s; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS="max_staleness 10s"; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS=`max_staleness 10s`; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='''max_staleness 10s'''; + +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness 10mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness 999999999999s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness not_a_number'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET SPANNER.READ_ONLY_STALENESS='max_staleness'; + diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetStatementTimeoutTest.sql b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetStatementTimeoutTest.sql new file mode 100644 index 000000000000..d0b80d94f592 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/SetStatementTimeoutTest.sql @@ -0,0 +1,170 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Test valid values +-- Null (no timeout) +SET STATEMENT_TIMEOUT=default; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','0' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Seconds +SET STATEMENT_TIMEOUT='1s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; +SET STATEMENT_TIMEOUT = '2s'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','2s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1S'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Milliseconds +SET STATEMENT_TIMEOUT='1ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1mS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1MS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Microseconds +SET STATEMENT_TIMEOUT='1us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1uS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1US'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Nanoseconds +SET STATEMENT_TIMEOUT='1ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1Ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1nS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1NS'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Test flip to higher time unit +SET STATEMENT_TIMEOUT='1000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001ns' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001us' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1001ms'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1001ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000000ns'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT='1000000us'; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- No unit (milliseconds) +SET STATEMENT_TIMEOUT=1; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','1ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT=100; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','100ms' +SHOW VARIABLE STATEMENT_TIMEOUT; + +SET STATEMENT_TIMEOUT=10000; +@EXPECT RESULT_SET 'STATEMENT_TIMEOUT','10s' +SHOW VARIABLE STATEMENT_TIMEOUT; + +-- Invalid suffixes +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1m'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1mi'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1h'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1mus'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1n'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1u'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1'; + +-- Invalid numbers +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='-1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='a1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='0xas'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='0x1s'; + +-- Invalid because of spaces +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1 s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT=' 1s'; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1s '; +@EXPECT EXCEPTION INVALID_ARGUMENT +SET STATEMENT_TIMEOUT='1m s'; diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-app-default.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-app-default.json new file mode 100644 index 000000000000..3e849240609d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-app-default.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "app-default-test-project", + "private_key_id": "some-key-id", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCgFkCu/z3Sn8Py\nIHcUSSyCOjZ17AB/0F5IvhuGtTZTIJ33ltxeYmb69qK0m4QSk75HH5eX1OgF7/6E\nraqDaKlgi8ecSot1XrMRKyIY+LlTLMWeNtG54vymwjeH9wrMWQWfHJUCVUgUE8hL\nxQMEbU2Hn8t6P1JaGkfsm8cLiD0ve25iw/P2OPDdJukBlE2+47OL8RndyziVnLkW\n1y1kfujd83Cnf0sJp78fBKI7IRBWyXTS1C6/reZlkPq+D9ImCvn4YHpI6J1rsSDD\nitDbA0v/8l7KLyJoOK9vsFym4yhs4Q+Nzathv/ozvdHhob8I6JVizb5+D4QJ0GZ4\n9bMIzpV3AgMBAAECggEANbrCQvnP2h1dOqrCzMAyfBLlrHZlRIC+5FNKEdBIR2Yv\nHqY3mEYWoiydN1jq3wFPX7euvXrr1PJTzlwrWqeKEalYeZBbdi0ulJiRfSgdq1jf\nitaDVYxll+KfmLbFnQhT5q9FI99Trcll9GhPNvEokFiFhqYyEaO4eCVgjPJQ9tpC\njnKl9eLeHkhYK1pOKxJVBqzPcZI3F9jNvXWYTO1F7lFMom9I89W+NtBRAeYZnVao\nfH52GDJProB5A9BtRXkuLDzmFFjGjr8oT4E32wLuiu2UvNHgIZe7Hz+fMGGHsDHS\nc5jogukVwgt7QrfKVtoRTk8h2jyIrpRy4AQLfJZ4QQKBgQDT0X3+mp+tbEncLFBW\nXLT1k/1VWumi1TipiVsMpUcAZjWTGO8/WJF83IcNBAFqBJGyTAwplXIyUeI8CGg0\nOkBPY1Cy/dtTyiav05vRPZPkT44mhBua2tyhaE12DVo1/6ezF88yTPQkO9EjlvLs\nW32XlhqGQ8O587xjNaB/rqsjXwKBgQDBenKlO3eDBSfVb/te/xwqeVnKBgEYIAOJ\nN+FE49aH05n8DYQij1m8+Ck6sFBZywWliIQdS6d6J4NE69Ok7bfowwVqoumX8LWD\ntstMkOE2FmPCF4nbxkAFYbcuep1iItA2LVSwh3ChAA6aexA/fV6bMauhJPJTSnwX\nItnKvPMc6QKBgBrwZtTNt4cn4ZDl9eW17rHY+3wyjspN0eIF/RVzo78SQLgPkMX+\nrqoxpd9q5f8ky57gex+CyT5LGbnG2/HggrNWDzpkfNOAP0FXaVbIPRnpYEvXu2cL\ndMn2aPudoR6DAEIPwiNElDxTezrKhOS4khWIWqE+1xK8Q/ZeKKZ0gYGDAoGAcc3p\no6FgAfRFYvl0fYNHeQBaPUfc2ujxy4PQAKqXpNtlhuoYYA+79DhwX/IXwUl3L9Am\nDelTQLn/L8oberbNZ59XD0t2ZYYT7r7VxFqv7hWrZh5cW6a4P7IjgrZi3reli0iM\nuS1hpYIYFOvwObgvrs+/qZDG7REx/pXkT6lmwwkCgYEAub91mMF6YR76ETPAdFLs\nfTAP+Q06Ate+gw1Ob6Cf+biF6TXe/B3Ql1R4FzPF9xG1rS/BfI+bUi3W68/jDill\nHnDPvFxX9Q82/KNCY9LlzbuQLWjziyQk6BgXtcKWT1K1zy4tsZZy5WVvXEXdF+XV\n3MMUkEaBHE5iddCl7Kkbvfg=\n-----END PRIVATE KEY-----\n", + "client_email": "test@app-default-test-project.iam.gserviceaccount.com", + "client_id": "1234567890", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40app-default-test-project.iam.gserviceaccount.com" +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-cloud-storage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-cloud-storage.json new file mode 100644 index 000000000000..cf2157696a80 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key-cloud-storage.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "gs-test-project", + "private_key_id": "cloud-storage-key-id", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCgFkCu/z3Sn8Py\nIHcUSSyCOjZ17AB/0F5IvhuGtTZTIJ33ltxeYmb69qK0m4QSk75HH5eX1OgF7/6E\nraqDaKlgi8ecSot1XrMRKyIY+LlTLMWeNtG54vymwjeH9wrMWQWfHJUCVUgUE8hL\nxQMEbU2Hn8t6P1JaGkfsm8cLiD0ve25iw/P2OPDdJukBlE2+47OL8RndyziVnLkW\n1y1kfujd83Cnf0sJp78fBKI7IRBWyXTS1C6/reZlkPq+D9ImCvn4YHpI6J1rsSDD\nitDbA0v/8l7KLyJoOK9vsFym4yhs4Q+Nzathv/ozvdHhob8I6JVizb5+D4QJ0GZ4\n9bMIzpV3AgMBAAECggEANbrCQvnP2h1dOqrCzMAyfBLlrHZlRIC+5FNKEdBIR2Yv\nHqY3mEYWoiydN1jq3wFPX7euvXrr1PJTzlwrWqeKEalYeZBbdi0ulJiRfSgdq1jf\nitaDVYxll+KfmLbFnQhT5q9FI99Trcll9GhPNvEokFiFhqYyEaO4eCVgjPJQ9tpC\njnKl9eLeHkhYK1pOKxJVBqzPcZI3F9jNvXWYTO1F7lFMom9I89W+NtBRAeYZnVao\nfH52GDJProB5A9BtRXkuLDzmFFjGjr8oT4E32wLuiu2UvNHgIZe7Hz+fMGGHsDHS\nc5jogukVwgt7QrfKVtoRTk8h2jyIrpRy4AQLfJZ4QQKBgQDT0X3+mp+tbEncLFBW\nXLT1k/1VWumi1TipiVsMpUcAZjWTGO8/WJF83IcNBAFqBJGyTAwplXIyUeI8CGg0\nOkBPY1Cy/dtTyiav05vRPZPkT44mhBua2tyhaE12DVo1/6ezF88yTPQkO9EjlvLs\nW32XlhqGQ8O587xjNaB/rqsjXwKBgQDBenKlO3eDBSfVb/te/xwqeVnKBgEYIAOJ\nN+FE49aH05n8DYQij1m8+Ck6sFBZywWliIQdS6d6J4NE69Ok7bfowwVqoumX8LWD\ntstMkOE2FmPCF4nbxkAFYbcuep1iItA2LVSwh3ChAA6aexA/fV6bMauhJPJTSnwX\nItnKvPMc6QKBgBrwZtTNt4cn4ZDl9eW17rHY+3wyjspN0eIF/RVzo78SQLgPkMX+\nrqoxpd9q5f8ky57gex+CyT5LGbnG2/HggrNWDzpkfNOAP0FXaVbIPRnpYEvXu2cL\ndMn2aPudoR6DAEIPwiNElDxTezrKhOS4khWIWqE+1xK8Q/ZeKKZ0gYGDAoGAcc3p\no6FgAfRFYvl0fYNHeQBaPUfc2ujxy4PQAKqXpNtlhuoYYA+79DhwX/IXwUl3L9Am\nDelTQLn/L8oberbNZ59XD0t2ZYYT7r7VxFqv7hWrZh5cW6a4P7IjgrZi3reli0iM\nuS1hpYIYFOvwObgvrs+/qZDG7REx/pXkT6lmwwkCgYEAub91mMF6YR76ETPAdFLs\nfTAP+Q06Ate+gw1Ob6Cf+biF6TXe/B3Ql1R4FzPF9xG1rS/BfI+bUi3W68/jDill\nHnDPvFxX9Q82/KNCY9LlzbuQLWjziyQk6BgXtcKWT1K1zy4tsZZy5WVvXEXdF+XV\n3MMUkEaBHE5iddCl7Kkbvfg=\n-----END PRIVATE KEY-----\n", + "client_email": "cloud-storage@gs-test-project.iam.gserviceaccount.com", + "client_id": "1234567890", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/cloud-storage%40gs-test-project.iam.gserviceaccount.com" +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key.json new file mode 100644 index 000000000000..b7f5351350a2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/test-key.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "test-project", + "private_key_id": "some-key-id", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCgFkCu/z3Sn8Py\nIHcUSSyCOjZ17AB/0F5IvhuGtTZTIJ33ltxeYmb69qK0m4QSk75HH5eX1OgF7/6E\nraqDaKlgi8ecSot1XrMRKyIY+LlTLMWeNtG54vymwjeH9wrMWQWfHJUCVUgUE8hL\nxQMEbU2Hn8t6P1JaGkfsm8cLiD0ve25iw/P2OPDdJukBlE2+47OL8RndyziVnLkW\n1y1kfujd83Cnf0sJp78fBKI7IRBWyXTS1C6/reZlkPq+D9ImCvn4YHpI6J1rsSDD\nitDbA0v/8l7KLyJoOK9vsFym4yhs4Q+Nzathv/ozvdHhob8I6JVizb5+D4QJ0GZ4\n9bMIzpV3AgMBAAECggEANbrCQvnP2h1dOqrCzMAyfBLlrHZlRIC+5FNKEdBIR2Yv\nHqY3mEYWoiydN1jq3wFPX7euvXrr1PJTzlwrWqeKEalYeZBbdi0ulJiRfSgdq1jf\nitaDVYxll+KfmLbFnQhT5q9FI99Trcll9GhPNvEokFiFhqYyEaO4eCVgjPJQ9tpC\njnKl9eLeHkhYK1pOKxJVBqzPcZI3F9jNvXWYTO1F7lFMom9I89W+NtBRAeYZnVao\nfH52GDJProB5A9BtRXkuLDzmFFjGjr8oT4E32wLuiu2UvNHgIZe7Hz+fMGGHsDHS\nc5jogukVwgt7QrfKVtoRTk8h2jyIrpRy4AQLfJZ4QQKBgQDT0X3+mp+tbEncLFBW\nXLT1k/1VWumi1TipiVsMpUcAZjWTGO8/WJF83IcNBAFqBJGyTAwplXIyUeI8CGg0\nOkBPY1Cy/dtTyiav05vRPZPkT44mhBua2tyhaE12DVo1/6ezF88yTPQkO9EjlvLs\nW32XlhqGQ8O587xjNaB/rqsjXwKBgQDBenKlO3eDBSfVb/te/xwqeVnKBgEYIAOJ\nN+FE49aH05n8DYQij1m8+Ck6sFBZywWliIQdS6d6J4NE69Ok7bfowwVqoumX8LWD\ntstMkOE2FmPCF4nbxkAFYbcuep1iItA2LVSwh3ChAA6aexA/fV6bMauhJPJTSnwX\nItnKvPMc6QKBgBrwZtTNt4cn4ZDl9eW17rHY+3wyjspN0eIF/RVzo78SQLgPkMX+\nrqoxpd9q5f8ky57gex+CyT5LGbnG2/HggrNWDzpkfNOAP0FXaVbIPRnpYEvXu2cL\ndMn2aPudoR6DAEIPwiNElDxTezrKhOS4khWIWqE+1xK8Q/ZeKKZ0gYGDAoGAcc3p\no6FgAfRFYvl0fYNHeQBaPUfc2ujxy4PQAKqXpNtlhuoYYA+79DhwX/IXwUl3L9Am\nDelTQLn/L8oberbNZ59XD0t2ZYYT7r7VxFqv7hWrZh5cW6a4P7IjgrZi3reli0iM\nuS1hpYIYFOvwObgvrs+/qZDG7REx/pXkT6lmwwkCgYEAub91mMF6YR76ETPAdFLs\nfTAP+Q06Ate+gw1Ob6Cf+biF6TXe/B3Ql1R4FzPF9xG1rS/BfI+bUi3W68/jDill\nHnDPvFxX9Q82/KNCY9LlzbuQLWjziyQk6BgXtcKWT1K1zy4tsZZy5WVvXEXdF+XV\n3MMUkEaBHE5iddCl7Kkbvfg=\n-----END PRIVATE KEY-----\n", + "client_email": "test@test-project.iam.gserviceaccount.com", + "client_id": "1234567890", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40test-project.iam.gserviceaccount.com" +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb new file mode 100644 index 000000000000..115e5fccbb55 Binary files /dev/null and b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb differ diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_1_true_without_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_1_true_without_comma.json new file mode 100644 index 000000000000..c14e3f6b1e9d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_1_true_without_comma.json @@ -0,0 +1 @@ +[1 true] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_a_invalid_utf8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_a_invalid_utf8.json new file mode 100644 index 000000000000..38a86e2e6512 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_a_invalid_utf8.json @@ -0,0 +1 @@ +[a] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_colon_instead_of_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_colon_instead_of_comma.json new file mode 100644 index 000000000000..0d02ad448332 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_colon_instead_of_comma.json @@ -0,0 +1 @@ +["": 1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_after_close.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_after_close.json new file mode 100644 index 000000000000..2ccba8d95033 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_after_close.json @@ -0,0 +1 @@ +[""], \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_and_number.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_and_number.json new file mode 100755 index 000000000000..d2c84e374a2a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_comma_and_number.json @@ -0,0 +1 @@ +[,1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_comma.json new file mode 100755 index 000000000000..0431712bc1db --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_comma.json @@ -0,0 +1 @@ +[1,,2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_extra_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_extra_comma.json new file mode 100644 index 000000000000..3f01d3129238 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_double_extra_comma.json @@ -0,0 +1 @@ +["x",,] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_close.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_close.json new file mode 100644 index 000000000000..c12f9fae1cbe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_close.json @@ -0,0 +1 @@ +["x"]] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_comma.json new file mode 100644 index 000000000000..5f8ce18e4b2a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_extra_comma.json @@ -0,0 +1 @@ +["",] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete.json new file mode 100644 index 000000000000..cc65b0b512a0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete.json @@ -0,0 +1 @@ +["x" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete_invalid_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete_invalid_value.json new file mode 100644 index 000000000000..c21a8f6cff9b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_incomplete_invalid_value.json @@ -0,0 +1 @@ +[x \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_inner_array_no_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_inner_array_no_comma.json new file mode 100644 index 000000000000..c70b716471b7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_inner_array_no_comma.json @@ -0,0 +1 @@ +[3[4]] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_invalid_utf8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_invalid_utf8.json new file mode 100644 index 000000000000..6099d3441a41 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_invalid_utf8.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_items_separated_by_semicolon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_items_separated_by_semicolon.json new file mode 100755 index 000000000000..d4bd7314ca87 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_items_separated_by_semicolon.json @@ -0,0 +1 @@ +[1:2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_comma.json new file mode 100755 index 000000000000..9d7077c6804a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_comma.json @@ -0,0 +1 @@ +[,] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_minus.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_minus.json new file mode 100755 index 000000000000..29501c6ca23d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_just_minus.json @@ -0,0 +1 @@ +[-] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_missing_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_missing_value.json new file mode 100644 index 000000000000..3a6ba86f3ab5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_missing_value.json @@ -0,0 +1 @@ +[ , ""] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_newlines_unclosed.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_newlines_unclosed.json new file mode 100644 index 000000000000..646680065295 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_newlines_unclosed.json @@ -0,0 +1,3 @@ +["a", +4 +,1, \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_comma.json new file mode 100755 index 000000000000..13f6f1d18a44 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_comma.json @@ -0,0 +1 @@ +[1,] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_several_commas.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_several_commas.json new file mode 100755 index 000000000000..0ac408cb8aeb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_number_and_several_commas.json @@ -0,0 +1 @@ +[1,,] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_spaces_vertical_tab_formfeed.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_spaces_vertical_tab_formfeed.json new file mode 100755 index 000000000000..6cd7cf5855d7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_spaces_vertical_tab_formfeed.json @@ -0,0 +1 @@ +[" a"\f] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_star_inside.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_star_inside.json new file mode 100755 index 000000000000..5a5194647ade --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_star_inside.json @@ -0,0 +1 @@ +[*] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed.json new file mode 100644 index 000000000000..060733059011 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed.json @@ -0,0 +1 @@ +["" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_trailing_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_trailing_comma.json new file mode 100644 index 000000000000..6604698ffcd4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_trailing_comma.json @@ -0,0 +1 @@ +[1, \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_new_lines.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_new_lines.json new file mode 100644 index 000000000000..4f61de3fb1d0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_new_lines.json @@ -0,0 +1,3 @@ +[1, +1 +,1 \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_object_inside.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_object_inside.json new file mode 100644 index 000000000000..043a87e2db92 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_array_unclosed_with_object_inside.json @@ -0,0 +1 @@ +[{} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_false.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_false.json new file mode 100644 index 000000000000..eb18c6a1437e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_false.json @@ -0,0 +1 @@ +[fals] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_null.json new file mode 100644 index 000000000000..c18ef5385103 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_null.json @@ -0,0 +1 @@ +[nul] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_true.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_true.json new file mode 100644 index 000000000000..f451ac6d2e7a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_incomplete_true.json @@ -0,0 +1 @@ +[tru] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_++.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_++.json new file mode 100644 index 000000000000..bdb62aaf4d90 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_++.json @@ -0,0 +1 @@ +[++1234] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+1.json new file mode 100755 index 000000000000..3cbe58c92b1d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+1.json @@ -0,0 +1 @@ +[+1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+Inf.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+Inf.json new file mode 100755 index 000000000000..871ae14d535f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_+Inf.json @@ -0,0 +1 @@ +[+Inf] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-01.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-01.json new file mode 100755 index 000000000000..0df32bac8076 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-01.json @@ -0,0 +1 @@ +[-01] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-1.0..json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-1.0..json new file mode 100755 index 000000000000..7cf55a85a739 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-1.0..json @@ -0,0 +1 @@ +[-1.0.] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-2..json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-2..json new file mode 100755 index 000000000000..9be84365d096 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-2..json @@ -0,0 +1 @@ +[-2.] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-NaN.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-NaN.json new file mode 100755 index 000000000000..f61615d404ea --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_-NaN.json @@ -0,0 +1 @@ +[-NaN] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.-1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.-1.json new file mode 100644 index 000000000000..1c9f2dd1b71d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.-1.json @@ -0,0 +1 @@ +[.-1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.2e-3.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.2e-3.json new file mode 100755 index 000000000000..c6c976f25776 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_.2e-3.json @@ -0,0 +1 @@ +[.2e-3] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.1.2.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.1.2.json new file mode 100755 index 000000000000..c83a25621e31 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.1.2.json @@ -0,0 +1 @@ +[0.1.2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e+.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e+.json new file mode 100644 index 000000000000..a55a1bfefa33 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e+.json @@ -0,0 +1 @@ +[0.3e+] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e.json new file mode 100644 index 000000000000..3dd5df4b3a85 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.3e.json @@ -0,0 +1 @@ +[0.3e] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.e1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.e1.json new file mode 100644 index 000000000000..c92c71ccb23f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0.e1.json @@ -0,0 +1 @@ +[0.e1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E+.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E+.json new file mode 100644 index 000000000000..3ba2c7d6d093 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E+.json @@ -0,0 +1 @@ +[0E+] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E.json new file mode 100755 index 000000000000..5301840d1c31 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0_capital_E.json @@ -0,0 +1 @@ +[0E] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e+.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e+.json new file mode 100644 index 000000000000..8ab0bc4b8b2c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e+.json @@ -0,0 +1 @@ +[0e+] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e.json new file mode 100644 index 000000000000..47ec421bb624 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_0e.json @@ -0,0 +1 @@ +[0e] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e+.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e+.json new file mode 100755 index 000000000000..cd84b9f69e3b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e+.json @@ -0,0 +1 @@ +[1.0e+] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e-.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e-.json new file mode 100755 index 000000000000..4eb7afa0f99c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e-.json @@ -0,0 +1 @@ +[1.0e-] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e.json new file mode 100755 index 000000000000..21753f4c745f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1.0e.json @@ -0,0 +1 @@ +[1.0e] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1_000.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1_000.json new file mode 100755 index 000000000000..7b18b66b38ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1_000.json @@ -0,0 +1 @@ +[1 000.0] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1eE2.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1eE2.json new file mode 100755 index 000000000000..4318a341d710 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_1eE2.json @@ -0,0 +1 @@ +[1eE2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e+3.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e+3.json new file mode 100755 index 000000000000..4442f394dd73 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e+3.json @@ -0,0 +1 @@ +[2.e+3] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e-3.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e-3.json new file mode 100755 index 000000000000..a65060edfcfd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e-3.json @@ -0,0 +1 @@ +[2.e-3] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e3.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e3.json new file mode 100755 index 000000000000..66f7cf701bc2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_2.e3.json @@ -0,0 +1 @@ +[2.e3] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_9.e+.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_9.e+.json new file mode 100644 index 000000000000..732a7b11ce60 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_9.e+.json @@ -0,0 +1 @@ +[9.e+] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_Inf.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_Inf.json new file mode 100755 index 000000000000..c40c734c3cde --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_Inf.json @@ -0,0 +1 @@ +[Inf] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_NaN.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_NaN.json new file mode 100755 index 000000000000..4992317909e5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_NaN.json @@ -0,0 +1 @@ +[NaN] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_U+FF11_fullwidth_digit_one.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_U+FF11_fullwidth_digit_one.json new file mode 100644 index 000000000000..b14587e5ebbb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_U+FF11_fullwidth_digit_one.json @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_expression.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_expression.json new file mode 100644 index 000000000000..76fdbc8a4999 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_expression.json @@ -0,0 +1 @@ +[1+2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_1_digit.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_1_digit.json new file mode 100644 index 000000000000..3b214880c630 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_1_digit.json @@ -0,0 +1 @@ +[0x1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_2_digits.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_2_digits.json new file mode 100644 index 000000000000..83e516ab0e34 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_hex_2_digits.json @@ -0,0 +1 @@ +[0x42] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_infinity.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_infinity.json new file mode 100755 index 000000000000..8c2baf783a8e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_infinity.json @@ -0,0 +1 @@ +[Infinity] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid+-.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid+-.json new file mode 100644 index 000000000000..1cce602b518f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid+-.json @@ -0,0 +1 @@ +[0e+-1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-negative-real.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-negative-real.json new file mode 100644 index 000000000000..5fc3c1efb60c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-negative-real.json @@ -0,0 +1 @@ +[-123.123foo] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-bigger-int.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-bigger-int.json new file mode 100644 index 000000000000..3b97e580e859 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-bigger-int.json @@ -0,0 +1 @@ +[123] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-exponent.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-exponent.json new file mode 100644 index 000000000000..ea35d723cdee --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-exponent.json @@ -0,0 +1 @@ +[1e1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-int.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-int.json new file mode 100644 index 000000000000..371226e4cd93 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_invalid-utf-8-in-int.json @@ -0,0 +1 @@ +[0] diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_infinity.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_infinity.json new file mode 100755 index 000000000000..cf4133d22d68 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_infinity.json @@ -0,0 +1 @@ +[-Infinity] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_sign_with_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_sign_with_trailing_garbage.json new file mode 100644 index 000000000000..a6d8e78e7ce3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_sign_with_trailing_garbage.json @@ -0,0 +1 @@ +[-foo] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_space_1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_space_1.json new file mode 100644 index 000000000000..9a5ebedf6797 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_minus_space_1.json @@ -0,0 +1 @@ +[- 1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_int_starting_with_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_int_starting_with_zero.json new file mode 100644 index 000000000000..67af0960af79 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_int_starting_with_zero.json @@ -0,0 +1 @@ +[-012] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_real_without_int_part.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_real_without_int_part.json new file mode 100755 index 000000000000..1f2a43496ed9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_real_without_int_part.json @@ -0,0 +1 @@ +[-.123] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_with_garbage_at_end.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_with_garbage_at_end.json new file mode 100644 index 000000000000..2aa73119fbbc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_neg_with_garbage_at_end.json @@ -0,0 +1 @@ +[-1x] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_garbage_after_e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_garbage_after_e.json new file mode 100644 index 000000000000..9213dfca8de9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_garbage_after_e.json @@ -0,0 +1 @@ +[1ea] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_with_invalid_utf8_after_e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_with_invalid_utf8_after_e.json new file mode 100644 index 000000000000..1e52ef964cf0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_with_invalid_utf8_after_e.json @@ -0,0 +1 @@ +[1e] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_without_fractional_part.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_without_fractional_part.json new file mode 100755 index 000000000000..1de287cf892d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_real_without_fractional_part.json @@ -0,0 +1 @@ +[1.] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_starting_with_dot.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_starting_with_dot.json new file mode 100755 index 000000000000..f682dbdce039 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_starting_with_dot.json @@ -0,0 +1 @@ +[.123] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha.json new file mode 100644 index 000000000000..1e42d81822b2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha.json @@ -0,0 +1 @@ +[1.2a-3] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha_char.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha_char.json new file mode 100644 index 000000000000..b79daccb8a6c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_alpha_char.json @@ -0,0 +1 @@ +[1.8011670033376514H-308] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_leading_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_leading_zero.json new file mode 100755 index 000000000000..7106da1f3b85 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_number_with_leading_zero.json @@ -0,0 +1 @@ +[012] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bad_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bad_value.json new file mode 100644 index 000000000000..a03a8c03b72a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bad_value.json @@ -0,0 +1 @@ +["x", truth] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bracket_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bracket_key.json new file mode 100644 index 000000000000..cc443b483214 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_bracket_key.json @@ -0,0 +1 @@ +{[: "x"} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_comma_instead_of_colon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_comma_instead_of_colon.json new file mode 100644 index 000000000000..8d56377087d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_comma_instead_of_colon.json @@ -0,0 +1 @@ +{"x", null} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_double_colon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_double_colon.json new file mode 100644 index 000000000000..80e8c7b89a90 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_double_colon.json @@ -0,0 +1 @@ +{"x"::"b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_emoji.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_emoji.json new file mode 100644 index 000000000000..cb4078eaa101 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_emoji.json @@ -0,0 +1 @@ +{🇨🇭} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_garbage_at_end.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_garbage_at_end.json new file mode 100644 index 000000000000..80c42cbadcfc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_garbage_at_end.json @@ -0,0 +1 @@ +{"a":"a" 123} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_key_with_single_quotes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_key_with_single_quotes.json new file mode 100755 index 000000000000..77c32759962b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_key_with_single_quotes.json @@ -0,0 +1 @@ +{key: 'value'} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_lone_continuation_byte_in_key_and_trailing_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_lone_continuation_byte_in_key_and_trailing_comma.json new file mode 100644 index 000000000000..aa2cb637cd36 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_lone_continuation_byte_in_key_and_trailing_comma.json @@ -0,0 +1 @@ +{"":"0",} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_colon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_colon.json new file mode 100644 index 000000000000..b98eff62da4f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_colon.json @@ -0,0 +1 @@ +{"a" b} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_key.json new file mode 100755 index 000000000000..b4fb0f528edb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_key.json @@ -0,0 +1 @@ +{:"b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_semicolon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_semicolon.json new file mode 100755 index 000000000000..e3451384f81a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_semicolon.json @@ -0,0 +1 @@ +{"a" "b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_value.json new file mode 100644 index 000000000000..3ef538a60e69 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_missing_value.json @@ -0,0 +1 @@ +{"a": \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_no-colon.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_no-colon.json new file mode 100644 index 000000000000..f3797b357646 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_no-colon.json @@ -0,0 +1 @@ +{"a" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key.json new file mode 100755 index 000000000000..b9945b34b449 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key.json @@ -0,0 +1 @@ +{1:1} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key_but_huge_number_instead.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key_but_huge_number_instead.json new file mode 100755 index 000000000000..b37fa86c0e02 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_non_string_key_but_huge_number_instead.json @@ -0,0 +1 @@ +{9999E9999:1} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_repeated_null_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_repeated_null_null.json new file mode 100755 index 000000000000..f7d2959d0d48 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_repeated_null_null.json @@ -0,0 +1 @@ +{null:null,null:null} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_several_trailing_commas.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_several_trailing_commas.json new file mode 100755 index 000000000000..3c9afe8dc90d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_several_trailing_commas.json @@ -0,0 +1 @@ +{"id":0,,,,,} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_single_quote.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_single_quote.json new file mode 100644 index 000000000000..e5cdf976ad63 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_single_quote.json @@ -0,0 +1 @@ +{'a':0} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comma.json new file mode 100755 index 000000000000..a4b02509459f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comma.json @@ -0,0 +1 @@ +{"id":0,} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment.json new file mode 100644 index 000000000000..a372c6553d2e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment.json @@ -0,0 +1 @@ +{"a":"b"}/**/ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_open.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_open.json new file mode 100644 index 000000000000..d557f41ca452 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_open.json @@ -0,0 +1 @@ +{"a":"b"}/**// \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open.json new file mode 100644 index 000000000000..e335136c0796 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open.json @@ -0,0 +1 @@ +{"a":"b"}// \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open_incomplete.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open_incomplete.json new file mode 100644 index 000000000000..d892e49f1767 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_trailing_comment_slash_open_incomplete.json @@ -0,0 +1 @@ +{"a":"b"}/ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_two_commas_in_a_row.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_two_commas_in_a_row.json new file mode 100755 index 000000000000..7c639ae64969 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_two_commas_in_a_row.json @@ -0,0 +1 @@ +{"a":"b",,"c":"d"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unquoted_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unquoted_key.json new file mode 100644 index 000000000000..8ba137293c3a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unquoted_key.json @@ -0,0 +1 @@ +{a: "b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unterminated-value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unterminated-value.json new file mode 100644 index 000000000000..7fe699a6a3fe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_unterminated-value.json @@ -0,0 +1 @@ +{"a":"a \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_single_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_single_string.json new file mode 100644 index 000000000000..d63f7fbb7e3c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_single_string.json @@ -0,0 +1 @@ +{ "foo" : "bar", "a" } \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_trailing_garbage.json new file mode 100644 index 000000000000..787c8f0a8c03 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_object_with_trailing_garbage.json @@ -0,0 +1 @@ +{"a":"b"}# \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_single_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_single_space.json new file mode 100755 index 000000000000..0519ecba6ea9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_single_space.json @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape.json new file mode 100644 index 000000000000..acec66d8f4bf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape.json @@ -0,0 +1 @@ +["\uD800\"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u.json new file mode 100644 index 000000000000..e834b05e9637 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u.json @@ -0,0 +1 @@ +["\uD800\u"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1.json new file mode 100644 index 000000000000..a04cd348928d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1.json @@ -0,0 +1 @@ +["\uD800\u1"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1x.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1x.json new file mode 100644 index 000000000000..bfbd234098fe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_1_surrogate_then_escape_u1x.json @@ -0,0 +1 @@ +["\uD800\u1x"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_accentuated_char_no_quotes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_accentuated_char_no_quotes.json new file mode 100644 index 000000000000..fd6895693f52 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_accentuated_char_no_quotes.json @@ -0,0 +1 @@ +[é] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_backslash_00.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_backslash_00.json new file mode 100644 index 000000000000..b5bf267b5d4e Binary files /dev/null and b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_backslash_00.json differ diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escape_x.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escape_x.json new file mode 100644 index 000000000000..fae291938d9d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escape_x.json @@ -0,0 +1 @@ +["\x00"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_backslash_bad.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_backslash_bad.json new file mode 100755 index 000000000000..016fcb47ef52 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_backslash_bad.json @@ -0,0 +1 @@ +["\\\"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_ctrl_char_tab.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_ctrl_char_tab.json new file mode 100644 index 000000000000..f35ea382bb03 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_ctrl_char_tab.json @@ -0,0 +1 @@ +["\ "] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_emoji.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_emoji.json new file mode 100644 index 000000000000..a27775421363 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_escaped_emoji.json @@ -0,0 +1 @@ +["\🌀"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escape.json new file mode 100755 index 000000000000..3415c33ca8ab --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escape.json @@ -0,0 +1 @@ +["\"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escaped_character.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escaped_character.json new file mode 100755 index 000000000000..0f2197ea2901 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_escaped_character.json @@ -0,0 +1 @@ +["\u00A"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate.json new file mode 100755 index 000000000000..75504a656188 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate.json @@ -0,0 +1 @@ +["\uD834\uDd"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate_escape_invalid.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate_escape_invalid.json new file mode 100755 index 000000000000..bd9656060df4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_incomplete_surrogate_escape_invalid.json @@ -0,0 +1 @@ +["\uD800\uD800\x"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid-utf-8-in-escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid-utf-8-in-escape.json new file mode 100644 index 000000000000..0c43006430d3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid-utf-8-in-escape.json @@ -0,0 +1 @@ +["\u"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_backslash_esc.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_backslash_esc.json new file mode 100755 index 000000000000..d1eb60921a08 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_backslash_esc.json @@ -0,0 +1 @@ +["\a"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_unicode_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_unicode_escape.json new file mode 100644 index 000000000000..7608cb6ba896 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_unicode_escape.json @@ -0,0 +1 @@ +["\uqqqq"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_utf8_after_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_utf8_after_escape.json new file mode 100644 index 000000000000..2f757a25b545 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_invalid_utf8_after_escape.json @@ -0,0 +1 @@ +["\"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_leading_uescaped_thinspace.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_leading_uescaped_thinspace.json new file mode 100755 index 000000000000..7b297c6365af --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_leading_uescaped_thinspace.json @@ -0,0 +1 @@ +[\u0020"asd"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_no_quotes_with_bad_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_no_quotes_with_bad_escape.json new file mode 100644 index 000000000000..01bc70abae62 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_no_quotes_with_bad_escape.json @@ -0,0 +1 @@ +[\n] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_doublequote.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_doublequote.json new file mode 100755 index 000000000000..9d68933c44f1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_doublequote.json @@ -0,0 +1 @@ +" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_quote.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_quote.json new file mode 100644 index 000000000000..caff239bfc36 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_quote.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_string_no_double_quotes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_string_no_double_quotes.json new file mode 100755 index 000000000000..f2ba8f84ab5c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_single_string_no_double_quotes.json @@ -0,0 +1 @@ +abc \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_start_escape_unclosed.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_start_escape_unclosed.json new file mode 100644 index 000000000000..db62a46fcbc6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_start_escape_unclosed.json @@ -0,0 +1 @@ +["\ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_ctrl_char.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_ctrl_char.json new file mode 100755 index 000000000000..9f21348071d3 Binary files /dev/null and b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_ctrl_char.json differ diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_newline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_newline.json new file mode 100644 index 000000000000..700d36086914 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_newline.json @@ -0,0 +1,2 @@ +["new +line"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_tab.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_tab.json new file mode 100644 index 000000000000..160264a2d995 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unescaped_tab.json @@ -0,0 +1 @@ +[" "] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unicode_CapitalU.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unicode_CapitalU.json new file mode 100644 index 000000000000..17332bb174cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_unicode_CapitalU.json @@ -0,0 +1 @@ +"\UA66D" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_with_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_with_trailing_garbage.json new file mode 100644 index 000000000000..efe3bd272c4e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_string_with_trailing_garbage.json @@ -0,0 +1 @@ +""x \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_100000_opening_arrays.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_100000_opening_arrays.json new file mode 100644 index 000000000000..a4823eeccebc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_100000_opening_arrays.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_U+2060_word_joined.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_U+2060_word_joined.json new file mode 100644 index 000000000000..81156a6996ed --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_U+2060_word_joined.json @@ -0,0 +1 @@ +[⁠] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_UTF8_BOM_no_data.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_UTF8_BOM_no_data.json new file mode 100755 index 000000000000..5f282702bb03 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_UTF8_BOM_no_data.json @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_..json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_..json new file mode 100755 index 000000000000..a56fef0b021f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_..json @@ -0,0 +1 @@ +<.> \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_null.json new file mode 100755 index 000000000000..617f26254934 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_angle_bracket_null.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_trailing_garbage.json new file mode 100644 index 000000000000..5a745e6f3c36 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_trailing_garbage.json @@ -0,0 +1 @@ +[1]x \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_extra_array_close.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_extra_array_close.json new file mode 100755 index 000000000000..6cfb1398d224 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_extra_array_close.json @@ -0,0 +1 @@ +[1]] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_unclosed_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_unclosed_string.json new file mode 100755 index 000000000000..ba6b1788b672 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_array_with_unclosed_string.json @@ -0,0 +1 @@ +["asd] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_ascii-unicode-identifier.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_ascii-unicode-identifier.json new file mode 100644 index 000000000000..ef2ab62fe763 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_ascii-unicode-identifier.json @@ -0,0 +1 @@ +aå \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_capitalized_True.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_capitalized_True.json new file mode 100755 index 000000000000..7cd88469ab3d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_capitalized_True.json @@ -0,0 +1 @@ +[True] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_close_unopened_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_close_unopened_array.json new file mode 100755 index 000000000000..d2af0c646a8f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_close_unopened_array.json @@ -0,0 +1 @@ +1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_comma_instead_of_closing_brace.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_comma_instead_of_closing_brace.json new file mode 100644 index 000000000000..ac61b820068b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_comma_instead_of_closing_brace.json @@ -0,0 +1 @@ +{"x": true, \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_double_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_double_array.json new file mode 100755 index 000000000000..058d1626e5af --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_double_array.json @@ -0,0 +1 @@ +[][] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_end_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_end_array.json new file mode 100644 index 000000000000..54caf60b1367 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_end_array.json @@ -0,0 +1 @@ +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_incomplete_UTF8_BOM.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_incomplete_UTF8_BOM.json new file mode 100755 index 000000000000..bfcdd514f330 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_incomplete_UTF8_BOM.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-invalid-utf-8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-invalid-utf-8.json new file mode 100644 index 000000000000..8b1296cad20a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-invalid-utf-8.json @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-open-bracket.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-open-bracket.json new file mode 100644 index 000000000000..8e2f0bef135b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_lone-open-bracket.json @@ -0,0 +1 @@ +[ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_no_data.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_no_data.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_null-byte-outside-string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_null-byte-outside-string.json new file mode 100644 index 000000000000..326db14422a7 Binary files /dev/null and b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_null-byte-outside-string.json differ diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_number_with_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_number_with_trailing_garbage.json new file mode 100644 index 000000000000..0746539d246c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_number_with_trailing_garbage.json @@ -0,0 +1 @@ +2@ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_followed_by_closing_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_followed_by_closing_object.json new file mode 100644 index 000000000000..aa9ebaec5709 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_followed_by_closing_object.json @@ -0,0 +1 @@ +{}} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_unclosed_no_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_unclosed_no_value.json new file mode 100644 index 000000000000..17d045147fe5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_unclosed_no_value.json @@ -0,0 +1 @@ +{"": \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_comment.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_comment.json new file mode 100644 index 000000000000..ed1b569b7049 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_comment.json @@ -0,0 +1 @@ +{"a":/*comment*/"b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_trailing_garbage.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_trailing_garbage.json new file mode 100644 index 000000000000..9ca2336d7451 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_object_with_trailing_garbage.json @@ -0,0 +1 @@ +{"a": true} "x" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_apostrophe.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_apostrophe.json new file mode 100644 index 000000000000..8bebe3af09a7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_apostrophe.json @@ -0,0 +1 @@ +[' \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_comma.json new file mode 100644 index 000000000000..6295fdc36db0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_comma.json @@ -0,0 +1 @@ +[, \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_object.json new file mode 100644 index 000000000000..e870445b2e25 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_object.json @@ -0,0 +1 @@ +[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"":[{"": diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_object.json new file mode 100644 index 000000000000..7a63c8c57c8a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_object.json @@ -0,0 +1 @@ +[{ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_string.json new file mode 100644 index 000000000000..9822a6baf7e5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_open_string.json @@ -0,0 +1 @@ +["a \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_string.json new file mode 100644 index 000000000000..42a619362021 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_array_string.json @@ -0,0 +1 @@ +["a" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object.json new file mode 100644 index 000000000000..81750b96f9d8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object.json @@ -0,0 +1 @@ +{ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_close_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_close_array.json new file mode 100755 index 000000000000..eebc700a1013 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_close_array.json @@ -0,0 +1 @@ +{] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_comma.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_comma.json new file mode 100644 index 000000000000..47bc9106f86a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_comma.json @@ -0,0 +1 @@ +{, \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_array.json new file mode 100644 index 000000000000..381ede5dea5f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_array.json @@ -0,0 +1 @@ +{[ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_string.json new file mode 100644 index 000000000000..328c30cd782a --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_open_string.json @@ -0,0 +1 @@ +{"a \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_string_with_apostrophes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_string_with_apostrophes.json new file mode 100644 index 000000000000..9dba17090cf6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_object_string_with_apostrophes.json @@ -0,0 +1 @@ +{'a' \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_open.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_open.json new file mode 100644 index 000000000000..841fd5f8640e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_open_open.json @@ -0,0 +1 @@ +["\{["\{["\{["\{ \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_eacute.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_eacute.json new file mode 100644 index 000000000000..92a39f398b80 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_eacute.json @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_star.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_star.json new file mode 100755 index 000000000000..f59ec20aabf5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_single_star.json @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_trailing_#.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_trailing_#.json new file mode 100644 index 000000000000..8986110875af --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_trailing_#.json @@ -0,0 +1 @@ +{"a":"b"}#{} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_uescaped_LF_before_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_uescaped_LF_before_string.json new file mode 100755 index 000000000000..df2f0f242244 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_uescaped_LF_before_string.json @@ -0,0 +1 @@ +[\u000A""] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array.json new file mode 100755 index 000000000000..11209515c8dd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array.json @@ -0,0 +1 @@ +[1 \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_partial_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_partial_null.json new file mode 100644 index 000000000000..0d591762c127 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_partial_null.json @@ -0,0 +1 @@ +[ false, nul \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_false.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_false.json new file mode 100644 index 000000000000..a2ff8504a9c6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_false.json @@ -0,0 +1 @@ +[ true, fals \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_true.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_true.json new file mode 100644 index 000000000000..3149e8f5a7a5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_array_unfinished_true.json @@ -0,0 +1 @@ +[ false, tru \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_object.json new file mode 100755 index 000000000000..694d69dbd00c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unclosed_object.json @@ -0,0 +1 @@ +{"asd":"asd" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unicode-identifier.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unicode-identifier.json new file mode 100644 index 000000000000..7284aea33d3f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_unicode-identifier.json @@ -0,0 +1 @@ +å \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_U+2060_word_joiner.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_U+2060_word_joiner.json new file mode 100755 index 000000000000..81156a6996ed --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_U+2060_word_joiner.json @@ -0,0 +1 @@ +[⁠] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_formfeed.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_formfeed.json new file mode 100755 index 000000000000..a9ea535d1bbe --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/invalid/n_structure_whitespace_formfeed.json @@ -0,0 +1 @@ +[ ] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_80_nested.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_80_nested.json new file mode 100644 index 000000000000..f32ea97ccd3b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_80_nested.json @@ -0,0 +1 @@ +{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{"nest":{}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_arraysWithSpaces.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_arraysWithSpaces.json new file mode 100755 index 000000000000..582290798f43 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_arraysWithSpaces.json @@ -0,0 +1 @@ +[[] ] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty-string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty-string.json new file mode 100644 index 000000000000..93b6be2bccad --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty-string.json @@ -0,0 +1 @@ +[""] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty.json new file mode 100755 index 000000000000..0637a088a01e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_empty.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_ending_with_newline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_ending_with_newline.json new file mode 100755 index 000000000000..eac5f7b46e04 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_ending_with_newline.json @@ -0,0 +1 @@ +["a"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_false.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_false.json new file mode 100644 index 000000000000..67b2f07601e4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_false.json @@ -0,0 +1 @@ +[false] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_heterogeneous.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_heterogeneous.json new file mode 100755 index 000000000000..d3c1e264845e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_heterogeneous.json @@ -0,0 +1 @@ +[null, 1, "1", {}] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_null.json new file mode 100644 index 000000000000..500db4a86aa3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_null.json @@ -0,0 +1 @@ +[null] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_1_and_newline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_1_and_newline.json new file mode 100644 index 000000000000..994825500ae3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_1_and_newline.json @@ -0,0 +1,2 @@ +[1 +] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_leading_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_leading_space.json new file mode 100755 index 000000000000..18bfe6422c78 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_leading_space.json @@ -0,0 +1 @@ + [1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_several_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_several_null.json new file mode 100755 index 000000000000..99f6c5d1d883 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_several_null.json @@ -0,0 +1 @@ +[1,null,null,null,2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_trailing_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_trailing_space.json new file mode 100755 index 000000000000..de9e7a94492d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_array_with_trailing_space.json @@ -0,0 +1 @@ +[2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number.json new file mode 100644 index 000000000000..e5f5cc3340d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number.json @@ -0,0 +1 @@ +[123e65] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e+1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e+1.json new file mode 100755 index 000000000000..d1d3967065cf --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e+1.json @@ -0,0 +1 @@ +[0e+1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e1.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e1.json new file mode 100755 index 000000000000..3283a7936514 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_0e1.json @@ -0,0 +1 @@ +[0e1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_after_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_after_space.json new file mode 100644 index 000000000000..623570d96040 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_after_space.json @@ -0,0 +1 @@ +[ 4] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_double_close_to_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_double_close_to_zero.json new file mode 100755 index 000000000000..96555ff78230 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_double_close_to_zero.json @@ -0,0 +1 @@ +[-0.000000000000000000000000000000000000000000000000000000000000000000000000000001] diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_int_with_exp.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_int_with_exp.json new file mode 100755 index 000000000000..a4ca9e754fb0 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_int_with_exp.json @@ -0,0 +1 @@ +[20e1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_minus_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_minus_zero.json new file mode 100755 index 000000000000..37af1312ab5d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_minus_zero.json @@ -0,0 +1 @@ +[-0] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_int.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_int.json new file mode 100644 index 000000000000..8e30f8bd9660 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_int.json @@ -0,0 +1 @@ +[-123] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_one.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_one.json new file mode 100644 index 000000000000..99d21a2a0f09 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_one.json @@ -0,0 +1 @@ +[-1] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_zero.json new file mode 100644 index 000000000000..37af1312ab5d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_negative_zero.json @@ -0,0 +1 @@ +[-0] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e.json new file mode 100644 index 000000000000..6edbdfcb180d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e.json @@ -0,0 +1 @@ +[1E22] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_neg_exp.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_neg_exp.json new file mode 100644 index 000000000000..0a01bd3ef4ca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_neg_exp.json @@ -0,0 +1 @@ +[1E-2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_pos_exp.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_pos_exp.json new file mode 100644 index 000000000000..5a8fc0972425 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_capital_e_pos_exp.json @@ -0,0 +1 @@ +[1E+2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_exponent.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_exponent.json new file mode 100644 index 000000000000..da2522d61a1e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_exponent.json @@ -0,0 +1 @@ +[123e45] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_fraction_exponent.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_fraction_exponent.json new file mode 100644 index 000000000000..3944a7a454b3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_fraction_exponent.json @@ -0,0 +1 @@ +[123.456e78] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_neg_exp.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_neg_exp.json new file mode 100644 index 000000000000..ca40d3c25538 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_neg_exp.json @@ -0,0 +1 @@ +[1e-2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_pos_exponent.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_pos_exponent.json new file mode 100644 index 000000000000..343601d51fdd --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_real_pos_exponent.json @@ -0,0 +1 @@ +[1e+2] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_int.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_int.json new file mode 100644 index 000000000000..e47f69afcf45 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_int.json @@ -0,0 +1 @@ +[123] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_real.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_real.json new file mode 100644 index 000000000000..b02878e5fc19 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_number_simple_real.json @@ -0,0 +1 @@ +[123.456789] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object.json new file mode 100755 index 000000000000..78262eda3fa9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object.json @@ -0,0 +1 @@ +{"asd":"sdf", "dfg":"fgh"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_basic.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_basic.json new file mode 100755 index 000000000000..646bbe7bb1f8 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_basic.json @@ -0,0 +1 @@ +{"asd":"sdf"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key.json new file mode 100755 index 000000000000..bbc2e1ce433c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key.json @@ -0,0 +1 @@ +{"a":"b","a":"c"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key_and_value.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key_and_value.json new file mode 100755 index 000000000000..211581c20717 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_duplicated_key_and_value.json @@ -0,0 +1 @@ +{"a":"b","a":"b"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty.json new file mode 100644 index 000000000000..9e26dfeeb6e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty_key.json new file mode 100755 index 000000000000..c0013d3b8bd2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_empty_key.json @@ -0,0 +1 @@ +{"":0} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_escaped_null_in_key.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_escaped_null_in_key.json new file mode 100644 index 000000000000..593f0f67f9cb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_escaped_null_in_key.json @@ -0,0 +1 @@ +{"foo\u0000bar": 42} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_extreme_numbers.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_extreme_numbers.json new file mode 100644 index 000000000000..a0d3531c32f9 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_extreme_numbers.json @@ -0,0 +1 @@ +{ "min": -1.0e+28, "max": 1.0e+28 } \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_long_strings.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_long_strings.json new file mode 100644 index 000000000000..bdc4a08719ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_long_strings.json @@ -0,0 +1 @@ +{"x":[{"id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}], "id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_simple.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_simple.json new file mode 100644 index 000000000000..dacac917fb7b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_simple.json @@ -0,0 +1 @@ +{"a":[]} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_string_unicode.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_string_unicode.json new file mode 100644 index 000000000000..8effdb297c78 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_string_unicode.json @@ -0,0 +1 @@ +{"title":"\u041f\u043e\u043b\u0442\u043e\u0440\u0430 \u0417\u0435\u043c\u043b\u0435\u043a\u043e\u043f\u0430" } \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_with_newlines.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_with_newlines.json new file mode 100644 index 000000000000..246ec6b34d5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_object_with_newlines.json @@ -0,0 +1,3 @@ +{ +"a": "b" +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_1_2_3_bytes_UTF-8_sequences.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_1_2_3_bytes_UTF-8_sequences.json new file mode 100755 index 000000000000..9967ddeb8b11 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_1_2_3_bytes_UTF-8_sequences.json @@ -0,0 +1 @@ +["\u0060\u012a\u12AB"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pair.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pair.json new file mode 100755 index 000000000000..996875cc8c96 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pair.json @@ -0,0 +1 @@ +["\uD801\udc37"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pairs.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pairs.json new file mode 100755 index 000000000000..3401021ecef5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_accepted_surrogate_pairs.json @@ -0,0 +1 @@ +["\ud83d\ude39\ud83d\udc8d"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_allowed_escapes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_allowed_escapes.json new file mode 100644 index 000000000000..7f495532fb37 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_allowed_escapes.json @@ -0,0 +1 @@ +["\"\\\/\b\f\n\r\t"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_and_u_escaped_zero.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_and_u_escaped_zero.json new file mode 100755 index 000000000000..d4439eda73ac --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_and_u_escaped_zero.json @@ -0,0 +1 @@ +["\\u0000"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_doublequotes.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_doublequotes.json new file mode 100644 index 000000000000..ae03243b6742 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_backslash_doublequotes.json @@ -0,0 +1 @@ +["\""] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_comments.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_comments.json new file mode 100644 index 000000000000..2260c20c2f86 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_comments.json @@ -0,0 +1 @@ +["a/*b*/c/*d//e"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_a.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_a.json new file mode 100644 index 000000000000..6715d6f4049f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_a.json @@ -0,0 +1 @@ +["\\a"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_n.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_n.json new file mode 100644 index 000000000000..44ca56c4d942 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_double_escape_n.json @@ -0,0 +1 @@ +["\\n"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_control_character.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_control_character.json new file mode 100644 index 000000000000..5b014a9c25b5 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_control_character.json @@ -0,0 +1 @@ +["\u0012"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_noncharacter.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_noncharacter.json new file mode 100755 index 000000000000..2ff52e2c50bc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_escaped_noncharacter.json @@ -0,0 +1 @@ +["\uFFFF"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array.json new file mode 100755 index 000000000000..21d7ae4cd80b --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array.json @@ -0,0 +1 @@ +["asd"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array_with_leading_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array_with_leading_space.json new file mode 100755 index 000000000000..9e1887c1e4d2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_in_array_with_leading_space.json @@ -0,0 +1 @@ +[ "asd"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_last_surrogates_1_and_2.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_last_surrogates_1_and_2.json new file mode 100644 index 000000000000..3919cef76579 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_last_surrogates_1_and_2.json @@ -0,0 +1 @@ +["\uDBFF\uDFFF"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nbsp_uescaped.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nbsp_uescaped.json new file mode 100644 index 000000000000..2085ab1a1c77 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nbsp_uescaped.json @@ -0,0 +1 @@ +["new\u00A0line"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+10FFFF.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+10FFFF.json new file mode 100755 index 000000000000..059e4d9dd0f4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+10FFFF.json @@ -0,0 +1 @@ +["􏿿"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+FFFF.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+FFFF.json new file mode 100755 index 000000000000..4c913bd41a08 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_nonCharacterInUTF-8_U+FFFF.json @@ -0,0 +1 @@ +["￿"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_null_escape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_null_escape.json new file mode 100644 index 000000000000..c1ad844043e6 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_null_escape.json @@ -0,0 +1 @@ +["\u0000"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_one-byte-utf-8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_one-byte-utf-8.json new file mode 100644 index 000000000000..157185923ac7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_one-byte-utf-8.json @@ -0,0 +1 @@ +["\u002c"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_pi.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_pi.json new file mode 100644 index 000000000000..9df11ae88bde --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_pi.json @@ -0,0 +1 @@ +["π"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_reservedCharacterInUTF-8_U+1BFFF.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_reservedCharacterInUTF-8_U+1BFFF.json new file mode 100755 index 000000000000..10a33a1717ec --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_reservedCharacterInUTF-8_U+1BFFF.json @@ -0,0 +1 @@ +["𛿿"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_simple_ascii.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_simple_ascii.json new file mode 100644 index 000000000000..8cadf7d051df --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_simple_ascii.json @@ -0,0 +1 @@ +["asd "] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_space.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_space.json new file mode 100644 index 000000000000..efd782cc3250 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_space.json @@ -0,0 +1 @@ +" " \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json new file mode 100755 index 000000000000..7620b66559f3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json @@ -0,0 +1 @@ +["\uD834\uDd1e"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_three-byte-utf-8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_three-byte-utf-8.json new file mode 100644 index 000000000000..108f1d67dffc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_three-byte-utf-8.json @@ -0,0 +1 @@ +["\u0821"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_two-byte-utf-8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_two-byte-utf-8.json new file mode 100644 index 000000000000..461503c31001 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_two-byte-utf-8.json @@ -0,0 +1 @@ +["\u0123"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2028_line_sep.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2028_line_sep.json new file mode 100755 index 000000000000..897b6021af72 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2028_line_sep.json @@ -0,0 +1 @@ +["
"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2029_par_sep.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2029_par_sep.json new file mode 100755 index 000000000000..8cd998c89ea3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_u+2029_par_sep.json @@ -0,0 +1 @@ +["
"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uEscape.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uEscape.json new file mode 100755 index 000000000000..f7b41a02fa5e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uEscape.json @@ -0,0 +1 @@ +["\u0061\u30af\u30EA\u30b9"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uescaped_newline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uescaped_newline.json new file mode 100644 index 000000000000..3a5a220b69cc --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_uescaped_newline.json @@ -0,0 +1 @@ +["new\u000Aline"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unescaped_char_delete.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unescaped_char_delete.json new file mode 100755 index 000000000000..7d064f498715 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unescaped_char_delete.json @@ -0,0 +1 @@ +[""] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode.json new file mode 100644 index 000000000000..3598095b79be --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode.json @@ -0,0 +1 @@ +["\uA66D"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicodeEscapedBackslash.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicodeEscapedBackslash.json new file mode 100755 index 000000000000..0bb3b51e7eae --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicodeEscapedBackslash.json @@ -0,0 +1 @@ +["\u005C"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_2.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_2.json new file mode 100644 index 000000000000..a7dcb97683f7 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_2.json @@ -0,0 +1 @@ +["⍂㈴⍂"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+10FFFE_nonchar.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+10FFFE_nonchar.json new file mode 100644 index 000000000000..9a8370b96a1c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+10FFFE_nonchar.json @@ -0,0 +1 @@ +["\uDBFF\uDFFE"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+1FFFE_nonchar.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+1FFFE_nonchar.json new file mode 100644 index 000000000000..c51f8ae45c3c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+1FFFE_nonchar.json @@ -0,0 +1 @@ +["\uD83F\uDFFE"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json new file mode 100644 index 000000000000..626d5f81572d --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json @@ -0,0 +1 @@ +["\u200B"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+2064_invisible_plus.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+2064_invisible_plus.json new file mode 100644 index 000000000000..1e23972c65e3 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+2064_invisible_plus.json @@ -0,0 +1 @@ +["\u2064"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FDD0_nonchar.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FDD0_nonchar.json new file mode 100644 index 000000000000..18ef151b4f21 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FDD0_nonchar.json @@ -0,0 +1 @@ +["\uFDD0"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FFFE_nonchar.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FFFE_nonchar.json new file mode 100644 index 000000000000..13d261fdad59 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_U+FFFE_nonchar.json @@ -0,0 +1 @@ +["\uFFFE"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_escaped_double_quote.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_escaped_double_quote.json new file mode 100755 index 000000000000..4e6257856dd2 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_unicode_escaped_double_quote.json @@ -0,0 +1 @@ +["\u0022"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_utf8.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_utf8.json new file mode 100644 index 000000000000..40878435f978 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_utf8.json @@ -0,0 +1 @@ +["€𝄞"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_with_del_character.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_with_del_character.json new file mode 100755 index 000000000000..8bd24907d9ea --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_string_with_del_character.json @@ -0,0 +1 @@ +["aa"] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_false.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_false.json new file mode 100644 index 000000000000..02e4a84d62c4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_false.json @@ -0,0 +1 @@ +false \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_int.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_int.json new file mode 100755 index 000000000000..f70d7bba4ae1 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_int.json @@ -0,0 +1 @@ +42 \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_negative_real.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_negative_real.json new file mode 100755 index 000000000000..b5135a207dee --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_negative_real.json @@ -0,0 +1 @@ +-0.1 \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_null.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_null.json new file mode 100644 index 000000000000..ec747fa47ddb --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_null.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_string.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_string.json new file mode 100755 index 000000000000..b6e982ca96aa --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_string.json @@ -0,0 +1 @@ +"asd" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_true.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_true.json new file mode 100755 index 000000000000..f32a5804e292 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_lonely_true.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_string_empty.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_string_empty.json new file mode 100644 index 000000000000..3cc762b5501e --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_string_empty.json @@ -0,0 +1 @@ +"" \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_trailing_newline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_trailing_newline.json new file mode 100644 index 000000000000..0c3426d4c287 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_trailing_newline.json @@ -0,0 +1 @@ +["a"] diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_true_in_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_true_in_array.json new file mode 100644 index 000000000000..de601e305f4f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_true_in_array.json @@ -0,0 +1 @@ +[true] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_whitespace_array.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_whitespace_array.json new file mode 100644 index 000000000000..2bedf7f2de5c --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/it/valid/y_structure_whitespace_array.json @@ -0,0 +1 @@ + [] \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/jmh/jmh-baseline.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/jmh/jmh-baseline.json new file mode 100644 index 000000000000..fc05eb0016a4 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/jmh/jmh-baseline.json @@ -0,0 +1,22 @@ +{ + "benchmarkResultMap": { + "com.google.cloud.spanner.benchmarking.ReadBenchmark.queryBenchmark": { + "scorePercentiles": [ + { + "percentile": "50.0", + "baseline": "450", + "difference": "20" + } + ] + }, + "com.google.cloud.spanner.benchmarking.ReadBenchmark.readBenchmark": { + "scorePercentiles": [ + { + "percentile": "50.0", + "baseline": "450", + "difference": "20" + } + ] + } + } +} \ No newline at end of file diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/read_tests.json b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/read_tests.json new file mode 100644 index 000000000000..1d6d3b21b115 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/read_tests.json @@ -0,0 +1,242 @@ +{"tests": [ + { + "result": {"value": [[ + true, + "abc", + "{\"color\":\"red\",\"value\":\"#f00\"}", + "100", + 1.1, + "3.141592", + "YWJj", + [ + "abc", + "def", + null, + "ghi" + ], + [ + ["abc"], + ["def"], + ["ghi"] + ] + ]]}, + "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"BOOL\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f3\",\n \"type\": {\n \"code\": \"JSON\"\n }\n }, {\n \"name\": \"f4\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }, {\n \"name\": \"f5\",\n \"type\": {\n \"code\": \"FLOAT64\"\n }\n }, {\n \"name\": \"f6\",\n \"type\": {\n \"code\": \"NUMERIC\"\n }\n }, {\n \"name\": \"f7\",\n \"type\": {\n \"code\": \"BYTES\"\n }\n }, {\n \"name\": \"f8\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }, {\n \"name\": \"f9\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f81\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [true, \"abc\", \"{\\\"color\\\":\\\"red\\\",\\\"value\\\":\\\"#f00\\\"}\", \"100\", 1.1, \"3.141592\", \"YWJj\", [\"abc\", \"def\", null, \"ghi\"], [[\"abc\"], [\"def\"], [\"ghi\"]]]\n}"], + "name": "Basic Test" + }, + { + "result": {"value": [["abcdefghi"]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n },\n \"values\": [\"abc\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"def\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"ghi\"]\n}" + ], + "name": "String Chunking Test" + }, + { + "result": {"value": [[[ + "abc", + "def", + "ghi", + "jkl" + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"d\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"ef\", \"gh\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"i\", \"jkl\"]]\n}" + ], + "name": "String Array Chunking Test" + }, + { + "result": {"value": [[[ + "abc", + "def", + null, + "ghi", + null, + "jkl" + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"def\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[null, \"ghi\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[null, \"jkl\"]]\n}" + ], + "name": "String Array Chunking Test With Nulls" + }, + { + "result": {"value": [[[ + "abc", + "def", + "ghi", + "jkl" + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"def\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"\", \"ghi\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"\", \"jkl\"]]\n}" + ], + "name": "String Array Chunking Test With Empty Strings" + }, + { + "result": {"value": [[["abcdefghi"]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"def\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"ghi\"]]\n}" + ], + "name": "String Array Chunking Test With One Large String" + }, + { + "result": {"value": [[["{}", "{\"color\":\"red\",\"value\":\"#f00\"}"]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"JSON\"\n }\n }\n }]\n }\n },\n \"values\": [[\"{}\", \"{\\\"color\\\":\\\"red\\\"\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\",\\\"value\\\":\\\"#f00\\\"}\"]],\n \"chunkedValue\": false\n}" + ], + "name": "JSON Array Chunking Test" + }, + { + "result": {"value": [[[ + "1", + "23", + "4", + null, + 5 + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"INT64\"\n }\n }\n }]\n }\n },\n \"values\": [[\"1\", \"2\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"3\", \"4\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"\", null, \"5\"]]\n}" + ], + "name": "INT64 Array Chunking Test" + }, + { + "result": {"value": [[[ + 1, + 2, + "Infinity", + "-Infinity", + "NaN", + null, + 3 + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"FLOAT64\"\n }\n }\n }]\n }\n },\n \"values\": [[1.0, 2.0]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"Infinity\", \"-Infinity\", \"NaN\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"\", null, 3.0]]\n}" + ], + "name": "FLOAT64 Array Chunking Test" + }, + { + "result": {"value": [[[ + "1.1", + "2.3", + "4.0", + null, + 5.5 + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"NUMERIC\"\n }\n }\n }]\n }\n },\n \"values\": [[\"1.1\", \"2.\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"3\", \"4.0\"]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[\"\", null, \"5.5\"]]\n}" + ], + "name": "NUMERIC Array Chunking Test" + }, + { + "result": {"value": [[[ + [ + "abc", + "defghi" + ], + [ + "123", + "456" + ] + ]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[\"abc\", \"def\"]]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[[\"ghi\"], [\"123\", \"456\"]]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[[\"\"]]]\n}" + ], + "name": "Struct Array Chunking Test" + }, + { + "result": {"value": [[[[[["abc"]]]]]]}, + "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[[[\"abc\"]]]]]\n}"], + "name": "Nested Struct Array Test" + }, + { + "result": {"value": [[[[[ + ["abc"], + ["def"] + ]]]]]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[[[\"ab\"]]]]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[[[[\"c\"], [\"def\"]]]]]\n}" + ], + "name": "Nested Struct Array Chunking Test" + }, + { + "result": {"value": [ + [ + "1", + [["ab"]] + ], + [ + "2", + [["c"]] + ] + ]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f21\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [\"1\", [[\"a\"]]],\n \"chunkedValue\": true\n}", + "{\n \"values\": [[[\"b\"]], \"2\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"\", [[\"c\"]]]\n}" + ], + "name": "Struct Array And String Chunking Test" + }, + { + "result": {"value": [ + [ + "abc", + "1" + ], + [ + "def", + "2" + ] + ]}, + "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }]\n }\n },\n \"values\": [\"abc\", \"1\", \"def\", \"2\"]\n}"], + "name": "Multiple Row Single Chunk" + }, + { + "result": {"value": [ + [ + "abc", + "1" + ], + [ + "def", + "2" + ] + ]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }]\n }\n },\n \"values\": [\"ab\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"c\", \"1\", \"de\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"f\", \"2\"]\n}" + ], + "name": "Multiple Row Multiple Chunks" + }, + { + "result": {"value": [ + ["ab"], + ["c"], + ["d"], + ["ef"] + ]}, + "chunks": [ + "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n },\n \"values\": [\"a\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"b\", \"c\"]\n}", + "{\n \"values\": [\"d\", \"e\"],\n \"chunkedValue\": true\n}", + "{\n \"values\": [\"f\"]\n}" + ], + "name": "Multiple Row Chunks/Non Chunks Interleaved" + } +]} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto new file mode 100644 index 000000000000..570a3f6f7059 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package examples.spanner.music; + +option java_package = "com.google.cloud.spanner"; +option java_outer_classname = "SingerProto"; +option java_multiple_files = false; + +message SingerInfo { + optional int64 singer_id = 1; + optional string birth_date = 2; + optional string nationality = 3; + optional Genre genre = 4; +} + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/finder_test.textproto b/java-spanner/google-cloud-spanner/src/test/resources/finder_test.textproto new file mode 100644 index 000000000000..5747c2aee0ef --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/finder_test.textproto @@ -0,0 +1,10078 @@ +test_case { + name: "AllRows" + event { + name: "AllRows/0" + read { + session: "instances/default/databases/db15/sessions/Cj3BBOeD1dP3S66wHZkO00_A5Jv6pRbjR4ox3cktXTOs5bpIgRwIx6bn_QihOEOGGUqEAKU3o9rP5qcqhem-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + all: true + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099527356417 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "AllRows/2" + read { + session: "instances/default/databases/db15/sessions/Cj3BBOeD1dP3S66wHZkO00_A5Jv6pRbjR4ox3cktXTOs5bpIgRwIx6bn_QihOEOGGUqEAKU3o9rP5qcqhem-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + all: true + } + } + hint { + operation_uid: 1 + database_id: 1099527356417 + schema_generation: "\001\001" + key: "A\206\310\002" + limit_key: "A\206\310\003" + } + } + event { + cache_update { + database_id: 1099527356417 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 155189249 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\364\207l" + } + group { + group_uid: 155189249 + tablets { + tablet_uid: 155189249 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\265" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t@\000\001" + } + key_recipes { + } + } + } + event { + name: "AllRows/4" + read { + session: "instances/default/databases/db15/sessions/Cj3BBOeD1dP3S66wHZkO00_A5Jv6pRbjR4ox3cktXTOs5bpIgRwIx6bn_QihOEOGGUqEAKU3o9rP5qcqhem-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + all: true + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099527356417 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 155189249 + split_id: 14079378335067013120 + tablet_uid: 155189249 + } + } + event { + cache_update { + database_id: 1099527356417 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 155189249 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\366E\014" + } + group { + group_uid: 155189249 + tablets { + tablet_uid: 155189249 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\265" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t@\000\001" + } + key_recipes { + } + } + } + event { + name: "AllRows/6" + read { + session: "instances/default/databases/db15/sessions/Cj3BBOeD1dP3S66wHZkO00_A5Jv6pRbjR4ox3cktXTOs5bpIgRwIx6bn_QihOEOGGUqEAKU3o9rP5qcqhem-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + all: true + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099527356417 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 155189249 + split_id: 14079378335067013120 + tablet_uid: 155189249 + } + } + event { + cache_update { + database_id: 1099527356417 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 155189249 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\366E\014" + } + group { + group_uid: 155189249 + tablets { + tablet_uid: 155189249 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\265" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t@\000\001" + } + key_recipes { + } + } + } +} +test_case { + name: "AllRows_Query" + event { + name: "AllRows_Query/0" + sql { + session: "instances/default/databases/db32/sessions/Cj0GojmEGR2hztX-uM5PHIWlsxPJCNXlqv0WGEwJaEEpQqxxDMvCPcsOzBlvYwUvgBO4B612WcYmbOzYYH8WEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T" + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099546230785 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 2 + } + } + } + } + } + event { + name: "AllRows_Query/2" + sql { + session: "instances/default/databases/db32/sessions/Cj0GojmEGR2hztX-uM5PHIWlsxPJCNXlqv0WGEwJaEEpQqxxDMvCPcsOzBlvYwUvgBO4B612WcYmbOzYYH8WEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T" + } + hint { + operation_uid: 1 + database_id: 1099546230785 + schema_generation: "\001\001" + key: "A\206\310\004" + } + } + event { + cache_update { + database_id: 1099546230785 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 277872641 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\250#\026" + } + group { + group_uid: 277872641 + tablets { + tablet_uid: 277872641 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001\177" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020\220\000\001" + } + key_recipes { + } + } + } + event { + name: "AllRows_Query/4" + sql { + session: "instances/default/databases/db32/sessions/Cj0GojmEGR2hztX-uM5PHIWlsxPJCNXlqv0WGEwJaEEpQqxxDMvCPcsOzBlvYwUvgBO4B612WcYmbOzYYH8WEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T" + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099546230785 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 277872641 + split_id: 14079378335067013120 + tablet_uid: 277872641 + } + } + event { + cache_update { + database_id: 1099546230785 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 277872641 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\257X\261" + } + group { + group_uid: 277872641 + tablets { + tablet_uid: 277872641 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001\177" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020\220\000\001" + } + key_recipes { + } + } + } + event { + name: "AllRows_Query/6" + sql { + session: "instances/default/databases/db32/sessions/Cj0GojmEGR2hztX-uM5PHIWlsxPJCNXlqv0WGEwJaEEpQqxxDMvCPcsOzBlvYwUvgBO4B612WcYmbOzYYH8WEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T" + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099546230785 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 277872641 + split_id: 14079378335067013120 + tablet_uid: 277872641 + } + } +} +test_case { + name: "DropDatabase" + event { + name: "DropDatabase/0" + read { + session: "instances/default/databases/db7/sessions/Cj0YdZkOhi1oPNIoTSIFvWggN4HPXnkgZ4pK1NcGXTEeUw8Fr-dTnvFKAeldTUxyzt1zTToerCe3T6XkSgAJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099517919233 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "DropDatabase/2" + read { + session: "instances/default/databases/db7/sessions/Cj0YdZkOhi1oPNIoTSIFvWggN4HPXnkgZ4pK1NcGXTEeUw8Fr-dTnvFKAeldTUxyzt1zTToerCe3T6XkSgAJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099517919233 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099517919233 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\314\260\211\000" + } + group { + group_uid: 95420417 + tablets { + tablet_uid: 95420417 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001r" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005\260\000\001" + } + key_recipes { + } + } + } + event { + name: "DropDatabase/4" + read { + session: "instances/default/databases/db7/sessions/Cj0YdZkOhi1oPNIoTSIFvWggN4HPXnkgZ4pK1NcGXTEeUw8Fr-dTnvFKAeldTUxyzt1zTToerCe3T6XkSgAJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099517919233 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + tablet_uid: 95420417 + } + } + event { + cache_update { + database_id: 1099517919233 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\314\267\020%" + } + group { + group_uid: 95420417 + tablets { + tablet_uid: 95420417 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001r" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005\260\000\001" + } + key_recipes { + } + } + } + event { + name: "DropDatabase/6" + read { + session: "instances/default/databases/db7/sessions/Cj0YdZkOhi1oPNIoTSIFvWggN4HPXnkgZ4pK1NcGXTEeUw8Fr-dTnvFKAeldTUxyzt1zTToerCe3T6XkSgAJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099517919233 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + tablet_uid: 95420417 + } + } + event { + name: "DropDatabase/7" + read { + session: "instances/default/databases/db7/sessions/Cj0YdZkOhi1oPNIoTSIFvWggN4HPXnkgZ4pK1NcGXTEeUw8Fr-dTnvFKAeldTUxyzt1zTToerCe3T6XkSgAJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099517919233 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + tablet_uid: 95420417 + } + } + event { + name: "DropDatabase/8" + read { + session: "instances/default/databases/db7/sessions/Cj3TKzdKJxZRjiCmu4-t34VAf6hJX0jtVFwD_4r02LGDLemFeajxEvbL5uJRGI6n2tkzh90z28eBtGG4VMcoEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099517919233 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 95420417 + split_id: 14079378335067013120 + tablet_uid: 95420417 + } + } + event { + cache_update { + database_id: 1099518967809 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 101711873 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\316\230\264\322" + } + group { + group_uid: 101711873 + tablets { + tablet_uid: 101711873 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\201" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006\020\000\001" + } + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "DropDatabase/10" + read { + session: "instances/default/databases/db7/sessions/Cj3TKzdKJxZRjiCmu4-t34VAf6hJX0jtVFwD_4r02LGDLemFeajxEvbL5uJRGI6n2tkzh90z28eBtGG4VMcoEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099518967809 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 101711873 + split_id: 14079378335067013120 + tablet_uid: 101711873 + } + } +} +test_case { + name: "DropDatabase_Query" + event { + name: "DropDatabase_Query/0" + sql { + session: "instances/default/databases/db24/sessions/CjzqkvN0XqNJI9149yzUSwfsCokOLsnhmDF8xt_O6U8cnWNvs1yXG4IZ_F3eDa-dooydAR1kFmsJuKR2MSQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099536793601 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "DropDatabase_Query/2" + sql { + session: "instances/default/databases/db24/sessions/CjzqkvN0XqNJI9149yzUSwfsCokOLsnhmDF8xt_O6U8cnWNvs1yXG4IZ_F3eDa-dooydAR1kFmsJuKR2MSQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099536793601 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099536793601 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\323T\204\035" + } + group { + group_uid: 218103809 + tablets { + tablet_uid: 218103809 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001<" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\r\000\000\001" + } + key_recipes { + } + } + } + event { + name: "DropDatabase_Query/4" + sql { + session: "instances/default/databases/db24/sessions/CjzqkvN0XqNJI9149yzUSwfsCokOLsnhmDF8xt_O6U8cnWNvs1yXG4IZ_F3eDa-dooydAR1kFmsJuKR2MSQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099536793601 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + tablet_uid: 218103809 + } + } + event { + cache_update { + database_id: 1099536793601 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\323T\204\035" + } + group { + group_uid: 218103809 + tablets { + tablet_uid: 218103809 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001<" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\r\000\000\001" + } + key_recipes { + } + } + } + event { + name: "DropDatabase_Query/6" + sql { + session: "instances/default/databases/db24/sessions/CjzqkvN0XqNJI9149yzUSwfsCokOLsnhmDF8xt_O6U8cnWNvs1yXG4IZ_F3eDa-dooydAR1kFmsJuKR2MSQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099536793601 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + tablet_uid: 218103809 + } + } + event { + name: "DropDatabase_Query/7" + sql { + session: "instances/default/databases/db24/sessions/CjzqkvN0XqNJI9149yzUSwfsCokOLsnhmDF8xt_O6U8cnWNvs1yXG4IZ_F3eDa-dooydAR1kFmsJuKR2MSQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099536793601 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + tablet_uid: 218103809 + } + } + event { + name: "DropDatabase_Query/8" + sql { + session: "instances/default/databases/db24/sessions/Cj1CH8Frp6VtAXaqx0Y5Fzu4KF_6PgC0lDACO5O72-XqeiRXqDXIKo2L7jDSwWFe-x0k3b7v-OtJeJ2t-j5CEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099536793601 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 218103809 + split_id: 14079378335067013120 + tablet_uid: 218103809 + } + } + event { + cache_update { + database_id: 1099537842177 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 224395265 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\3258X\350" + } + group { + group_uid: 224395265 + tablets { + tablet_uid: 224395265 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001K" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\r`\000\001" + } + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "DropDatabase_Query/10" + sql { + session: "instances/default/databases/db24/sessions/Cj1CH8Frp6VtAXaqx0Y5Fzu4KF_6PgC0lDACO5O72-XqeiRXqDXIKo2L7jDSwWFe-x0k3b7v-OtJeJ2t-j5CEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 2 + database_id: 1099537842177 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099537842177 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 2 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "DropDatabase_Query/12" + sql { + session: "instances/default/databases/db24/sessions/Cj1CH8Frp6VtAXaqx0Y5Fzu4KF_6PgC0lDACO5O72-XqeiRXqDXIKo2L7jDSwWFe-x0k3b7v-OtJeJ2t-j5CEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099537842177 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 224395265 + split_id: 14079378335067013120 + tablet_uid: 224395265 + } + } +} +test_case { + name: "DropTable" + event { + name: "DropTable/0" + read { + session: "instances/default/databases/db6/sessions/Cjzdj2emR5Db0EijIRbqxYXobPqHRRjWoXCeGyYAUU45Q8eaxOgVRwqLfrWMjgE7RsVjd1MExsPuARm0k1QQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099516870657 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "DropTable/2" + read { + session: "instances/default/databases/db6/sessions/Cjzdj2emR5Db0EijIRbqxYXobPqHRRjWoXCeGyYAUU45Q8eaxOgVRwqLfrWMjgE7RsVjd1MExsPuARm0k1QQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099516870657 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099516870657 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 94371841 + split_id: 14079378335067013121 + generation: "\007\006C\314\314-\203[\007\006C\314\314-\217F" + } + group { + group_uid: 94371841 + tablets { + tablet_uid: 94371841 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001p" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005\240\000\001" + } + key_recipes { + } + } + } + event { + name: "DropTable/4" + read { + session: "instances/default/databases/db6/sessions/Cjzdj2emR5Db0EijIRbqxYXobPqHRRjWoXCeGyYAUU45Q8eaxOgVRwqLfrWMjgE7RsVjd1MExsPuARm0k1QQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099516870657 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 94371841 + split_id: 14079378335067013121 + tablet_uid: 94371841 + } + } + event { + name: "DropTable/5" + read { + session: "instances/default/databases/db6/sessions/Cjzdj2emR5Db0EijIRbqxYXobPqHRRjWoXCeGyYAUU45Q8eaxOgVRwqLfrWMjgE7RsVjd1MExsPuARm0k1QQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099516870657 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 94371841 + split_id: 14079378335067013121 + tablet_uid: 94371841 + } + } + event { + name: "DropTable/6" + read { + session: "instances/default/databases/db6/sessions/Cjzdj2emR5Db0EijIRbqxYXobPqHRRjWoXCeGyYAUU45Q8eaxOgVRwqLfrWMjgE7RsVjd1MExsPuARm0k1QQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099516870657 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 94371841 + split_id: 14079378335067013121 + tablet_uid: 94371841 + } + } +} +test_case { + name: "DropTable_Query" + event { + name: "DropTable_Query/0" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099535745025 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "DropTable_Query/2" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099535745025 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\316\342\373\007\006C\314\322\317\n\241" + } + group { + group_uid: 216006657 + tablets { + tablet_uid: 216006657 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001:" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014\340\000\001" + } + key_recipes { + } + } + } + event { + name: "DropTable_Query/4" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + tablet_uid: 216006657 + } + } + event { + cache_update { + database_id: 1099535745025 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\316\342\373\007\006C\314\322\323Z\315" + } + group { + group_uid: 216006657 + tablets { + tablet_uid: 216006657 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001:" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014\340\000\001" + } + key_recipes { + } + } + } + event { + name: "DropTable_Query/6" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + tablet_uid: 216006657 + } + } + event { + name: "DropTable_Query/7" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + tablet_uid: 216006657 + } + } + event { + name: "DropTable_Query/8" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + tablet_uid: 216006657 + } + } + event { + cache_update { + database_id: 1099535745025 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 216006657 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\316\342\373\007\006C\314\322\323Z\315" + } + group { + group_uid: 216006657 + tablets { + tablet_uid: 216006657 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001:" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014\340\000\001" + } + key_recipes { + schema_generation: "\001\003" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 3 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "key" + } + } + } + } + } + event { + name: "DropTable_Query/10" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\003" + key: "A\206\310\006\234\221\000" + } + } + event { + cache_update { + database_id: 1099535745025 + range { + start_key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 217055233 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\316\342\373\007\006C\314\322\317\n\241" + } + group { + group_uid: 217055233 + tablets { + tablet_uid: 217055233 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001;" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014\360\000\001" + } + key_recipes { + } + } + } + event { + name: "DropTable_Query/12" + sql { + session: "instances/default/databases/db23/sessions/CjwOqvrExRJDRJJdVo2gAssby75h0g7V10Y5ZxDHqv0YeWUWDGwTfharjGH6t73iqG5m4GXUfjpkqQRVPxsQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099535745025 + schema_generation: "\001\003" + key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 217055233 + split_id: 14079378335067013121 + tablet_uid: 217055233 + } + } +} +test_case { + name: "GlobalIndex" + event { + name: "GlobalIndex/0" + read { + session: "instances/default/databases/db10/sessions/Cj2SfZKLzB4nnXuwwI1VKvjUXXTrygu1niE6w4W-OXraTsmoOUubuN4wizbc455aTUR5zoNDXT1KipdeF64TEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099522113537 + key_recipes { + schema_generation: "\001\001" + recipe { + index_name: "GlobalIndex" + part { + tag: 50020 + } + part { + tag: 2 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "V0" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "GlobalIndex/2" + read { + session: "instances/default/databases/db10/sessions/Cj2SfZKLzB4nnXuwwI1VKvjUXXTrygu1niE6w4W-OXraTsmoOUubuN4wizbc455aTUR5zoNDXT1KipdeF64TEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099522113537 + schema_generation: "\001\001" + key: "A\206\310\004\234\2310_0\000x" + limit_key: "A\206\310\004\234\2310_0\000y" + } + } + event { + cache_update { + database_id: 1099522113537 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 121634817 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317<\276\306" + } + group { + group_uid: 121634817 + tablets { + tablet_uid: 121634817 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\223" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\007@\000\001" + } + key_recipes { + } + } + } + event { + name: "GlobalIndex/4" + read { + session: "instances/default/databases/db10/sessions/Cj2SfZKLzB4nnXuwwI1VKvjUXXTrygu1niE6w4W-OXraTsmoOUubuN4wizbc455aTUR5zoNDXT1KipdeF64TEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099522113537 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 121634817 + split_id: 14079378335067013120 + tablet_uid: 121634817 + } + } + event { + cache_update { + database_id: 1099522113537 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 121634817 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317>\330]" + } + group { + group_uid: 121634817 + tablets { + tablet_uid: 121634817 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\223" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\007@\000\001" + } + key_recipes { + } + } + } + event { + name: "GlobalIndex/6" + read { + session: "instances/default/databases/db10/sessions/Cj2SfZKLzB4nnXuwwI1VKvjUXXTrygu1niE6w4W-OXraTsmoOUubuN4wizbc455aTUR5zoNDXT1KipdeF64TEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099522113537 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 121634817 + split_id: 14079378335067013120 + tablet_uid: 121634817 + } + } +} +test_case { + name: "GlobalIndex_Query" + event { + name: "GlobalIndex_Query/0" + sql { + session: "instances/default/databases/db27/sessions/Cj2j_zqkXN2mCtkbuUA7him868HfGm0ISHxO8OVhF7BawzQduQQE91KOAhYjolCxMK25z5Pkkss_6mBFsl6EEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = @v0" + params { + fields { + key: "v0" + value { + string_value: "0_0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099540987905 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 2 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "v0" + } + } + } + } + } + event { + name: "GlobalIndex_Query/2" + sql { + session: "instances/default/databases/db27/sessions/Cj2j_zqkXN2mCtkbuUA7him868HfGm0ISHxO8OVhF7BawzQduQQE91KOAhYjolCxMK25z5Pkkss_6mBFsl6EEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = @v0" + params { + fields { + key: "v0" + value { + string_value: "0_0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099540987905 + schema_generation: "\001\001" + key: "A\206\310\004\234\2310_0\000x" + } + } + event { + cache_update { + database_id: 1099540987905 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 244318209 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325\373\036\334" + } + group { + group_uid: 244318209 + tablets { + tablet_uid: 244318209 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001_" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\016\220\000\001" + } + key_recipes { + } + } + } + event { + name: "GlobalIndex_Query/4" + sql { + session: "instances/default/databases/db27/sessions/Cj2j_zqkXN2mCtkbuUA7him868HfGm0ISHxO8OVhF7BawzQduQQE91KOAhYjolCxMK25z5Pkkss_6mBFsl6EEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = @v0" + params { + fields { + key: "v0" + value { + string_value: "0_0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099540987905 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 244318209 + split_id: 14079378335067013120 + tablet_uid: 244318209 + } + } + event { + cache_update { + database_id: 1099540987905 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 244318209 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325\375\"\r" + } + group { + group_uid: 244318209 + tablets { + tablet_uid: 244318209 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001_" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\016\220\000\001" + } + key_recipes { + } + } + } + event { + name: "GlobalIndex_Query/6" + sql { + session: "instances/default/databases/db27/sessions/Cj2j_zqkXN2mCtkbuUA7him868HfGm0ISHxO8OVhF7BawzQduQQE91KOAhYjolCxMK25z5Pkkss_6mBFsl6EEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = @v0" + params { + fields { + key: "v0" + value { + string_value: "0_0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099540987905 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 244318209 + split_id: 14079378335067013120 + tablet_uid: 244318209 + } + } +} +test_case { + name: "GroupDeletion" + event { + name: "GroupDeletion/0" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099520016385 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "GroupDeletion/2" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099520016385 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099520016385 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 108003329 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\316\247-E" + } + group { + group_uid: 108003329 + tablets { + tablet_uid: 108003329 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\204" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006p\000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion/4" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099520016385 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 108003329 + split_id: 14079378335067013120 + tablet_uid: 108003329 + } + } + event { + cache_update { + database_id: 1099520016385 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 108003329 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\316\250\241\234" + } + group { + group_uid: 108003329 + tablets { + tablet_uid: 108003329 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\204" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006p\000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion/6" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099520016385 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 108003329 + split_id: 14079378335067013120 + tablet_uid: 108003329 + } + } + event { + name: "GroupDeletion/7" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099520016385 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 108003329 + split_id: 14079378335067013120 + tablet_uid: 108003329 + } + } + event { + cache_update { + database_id: 1099520016385 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 114294785 + split_id: 14079378335067013124 + generation: "\007\006C\314\316\271~\231\007\006C\314\317\017\371\343" + } + group { + group_uid: 114294785 + tablets { + tablet_uid: 114294785 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\212" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006\320\000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion/9" + read { + session: "instances/default/databases/db8/sessions/Cj2YTJ4-liCil_CTp2ldntWNe-Q1moGC2WmQds4wCmJl2G_XJKUi8wMd616zuX9h7ru-dDid2i4ez_62VO_FEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099520016385 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 114294785 + split_id: 14079378335067013124 + tablet_uid: 114294785 + } + } +} +test_case { + name: "GroupDeletion_Query" + event { + name: "GroupDeletion_Query/0" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099538890753 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "GroupDeletion_Query/2" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099538890753 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099538890753 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 230686721 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325I\301\354" + } + group { + group_uid: 230686721 + tablets { + tablet_uid: 230686721 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001S" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\r\300\000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion_Query/4" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099538890753 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 230686721 + split_id: 14079378335067013120 + tablet_uid: 230686721 + } + } + event { + cache_update { + database_id: 1099538890753 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 230686721 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325L\365\363" + } + group { + group_uid: 230686721 + tablets { + tablet_uid: 230686721 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001S" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\r\300\000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion_Query/6" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099538890753 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 230686721 + split_id: 14079378335067013120 + tablet_uid: 230686721 + } + } + event { + name: "GroupDeletion_Query/7" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099538890753 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 230686721 + split_id: 14079378335067013120 + tablet_uid: 230686721 + } + } + event { + cache_update { + database_id: 1099538890753 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 236978177 + split_id: 14079378335067013124 + generation: "\007\006C\314\325]\371r\007\006C\314\325\264y\234" + } + group { + group_uid: 236978177 + tablets { + tablet_uid: 236978177 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001V" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\016 \000\001" + } + key_recipes { + } + } + } + event { + name: "GroupDeletion_Query/9" + sql { + session: "instances/default/databases/db25/sessions/CjzwZvNAsqeeS_4WyWSRcOTXyaGjdp8zV4mkj3qDu34yD2oxUb_znww6xxRwktfRVvNjgx_H0XP2um8W4WUQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099538890753 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 236978177 + split_id: 14079378335067013124 + tablet_uid: 236978177 + } + } +} +test_case { + name: "Interleaved" + event { + name: "Interleaved/0" + read { + session: "instances/default/databases/db9/sessions/Cj159oUqLBjvyJBqNYV0pPwVz-a8ZuI6RNL-8C27MFNJ3faQKqSyzmgAttt98quBLgzWI-Z7RZAVzB2-RLz1EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099521064961 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "Interleaved" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + part { + tag: 5 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "Key2" + } + } + } + } + } + event { + name: "Interleaved/2" + read { + session: "instances/default/databases/db9/sessions/Cj159oUqLBjvyJBqNYV0pPwVz-a8ZuI6RNL-8C27MFNJ3faQKqSyzmgAttt98quBLgzWI-Z7RZAVzB2-RLz1EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099521064961 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x\n\234\221\002" + } + } + event { + cache_update { + database_id: 1099521064961 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 115343361 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\"1\357" + } + group { + group_uid: 115343361 + tablets { + tablet_uid: 115343361 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\213" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006\340\000\001" + } + key_recipes { + } + } + } + event { + name: "Interleaved/4" + read { + session: "instances/default/databases/db9/sessions/Cj159oUqLBjvyJBqNYV0pPwVz-a8ZuI6RNL-8C27MFNJ3faQKqSyzmgAttt98quBLgzWI-Z7RZAVzB2-RLz1EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099521064961 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 115343361 + split_id: 14079378335067013120 + tablet_uid: 115343361 + } + } + event { + cache_update { + database_id: 1099521064961 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 115343361 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317$\325\264" + } + group { + group_uid: 115343361 + tablets { + tablet_uid: 115343361 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\213" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\006\340\000\001" + } + key_recipes { + } + } + } + event { + name: "Interleaved/6" + read { + session: "instances/default/databases/db9/sessions/Cj159oUqLBjvyJBqNYV0pPwVz-a8ZuI6RNL-8C27MFNJ3faQKqSyzmgAttt98quBLgzWI-Z7RZAVzB2-RLz1EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099521064961 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 115343361 + split_id: 14079378335067013120 + tablet_uid: 115343361 + } + } +} +test_case { + name: "Interleaved_Query" + event { + name: "Interleaved_Query/0" + sql { + session: "instances/default/databases/db26/sessions/CjwmKo7LqIFOF3-EZMw7pN1t5FHbWltIzxNurAxOo4YsPhpFVXTxgna7VKVy7lRt0OLwuunlyBtMuQwEtrkQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = @k AND Key2 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "1" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099539939329 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + } + } + event { + name: "Interleaved_Query/2" + sql { + session: "instances/default/databases/db26/sessions/CjwmKo7LqIFOF3-EZMw7pN1t5FHbWltIzxNurAxOo4YsPhpFVXTxgna7VKVy7lRt0OLwuunlyBtMuQwEtrkQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = @k AND Key2 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "1" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099539939329 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099539939329 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 238026753 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325\326%\352" + } + group { + group_uid: 238026753 + tablets { + tablet_uid: 238026753 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001X" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\0160\000\001" + } + key_recipes { + } + } + } + event { + name: "Interleaved_Query/4" + sql { + session: "instances/default/databases/db26/sessions/CjwmKo7LqIFOF3-EZMw7pN1t5FHbWltIzxNurAxOo4YsPhpFVXTxgna7VKVy7lRt0OLwuunlyBtMuQwEtrkQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = @k AND Key2 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "1" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099539939329 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 238026753 + split_id: 14079378335067013120 + tablet_uid: 238026753 + } + } + event { + cache_update { + database_id: 1099539939329 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 238026753 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\325\335\2640" + } + group { + group_uid: 238026753 + tablets { + tablet_uid: 238026753 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001X" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\0160\000\001" + } + key_recipes { + } + } + } + event { + name: "Interleaved_Query/6" + sql { + session: "instances/default/databases/db26/sessions/CjwmKo7LqIFOF3-EZMw7pN1t5FHbWltIzxNurAxOo4YsPhpFVXTxgna7VKVy7lRt0OLwuunlyBtMuQwEtrkQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = @k AND Key2 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "1" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099539939329 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 238026753 + split_id: 14079378335067013120 + tablet_uid: 238026753 + } + } +} +test_case { + name: "LocalIndex" + event { + name: "LocalIndex/0" + read { + session: "instances/default/databases/db11/sessions/Cj3NnaSUOiCKsnC8U3zeivYI8gzlBVS8qGYPTllITW3XwUhaR_wRK0U54FmFFaV3e3_aq2SgWEvGuKldsQaUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + index: "LocalIndex" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "0_1_4" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099523162113 + key_recipes { + schema_generation: "\001\001" + recipe { + index_name: "LocalIndex" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + part { + tag: 6 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "V4" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "Key2" + } + } + } + } + } + event { + name: "LocalIndex/2" + read { + session: "instances/default/databases/db11/sessions/Cj3NnaSUOiCKsnC8U3zeivYI8gzlBVS8qGYPTllITW3XwUhaR_wRK0U54FmFFaV3e3_aq2SgWEvGuKldsQaUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + index: "LocalIndex" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "0_1_4" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099523162113 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x\014\234\2310_1_4\000x" + limit_key: "A\206\310\002\234\2310\000x\014\234\2310_1_4\000y" + } + } + event { + cache_update { + database_id: 1099523162113 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 127926273 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317Q\246\337" + } + group { + group_uid: 127926273 + tablets { + tablet_uid: 127926273 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\230" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\007\240\000\001" + } + key_recipes { + } + } + } + event { + name: "LocalIndex/4" + read { + session: "instances/default/databases/db11/sessions/Cj3NnaSUOiCKsnC8U3zeivYI8gzlBVS8qGYPTllITW3XwUhaR_wRK0U54FmFFaV3e3_aq2SgWEvGuKldsQaUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + index: "LocalIndex" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "0_1_4" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099523162113 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 127926273 + split_id: 14079378335067013120 + tablet_uid: 127926273 + } + } + event { + cache_update { + database_id: 1099523162113 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 127926273 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317W\220p" + } + group { + group_uid: 127926273 + tablets { + tablet_uid: 127926273 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\230" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\007\240\000\001" + } + key_recipes { + } + } + } + event { + name: "LocalIndex/6" + read { + session: "instances/default/databases/db11/sessions/Cj3NnaSUOiCKsnC8U3zeivYI8gzlBVS8qGYPTllITW3XwUhaR_wRK0U54FmFFaV3e3_aq2SgWEvGuKldsQaUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + index: "LocalIndex" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "0_1_4" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099523162113 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 127926273 + split_id: 14079378335067013120 + tablet_uid: 127926273 + } + } +} +test_case { + name: "LocalIndex_Query" + event { + name: "LocalIndex_Query/0" + sql { + session: "instances/default/databases/db28/sessions/CjzP21cJKRNtIN_GWjr6wiMTSt8dYLIfFJy3h4u1li9PDKDHfIZN3YU1zJMe5FZ7DlnXumM88ezT_abQ4cIQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved@{FORCE_INDEX=LocalIndex} WHERE Key = @k AND V4 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "0_1_4" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099542036481 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + } + } + event { + name: "LocalIndex_Query/2" + sql { + session: "instances/default/databases/db28/sessions/CjzP21cJKRNtIN_GWjr6wiMTSt8dYLIfFJy3h4u1li9PDKDHfIZN3YU1zJMe5FZ7DlnXumM88ezT_abQ4cIQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved@{FORCE_INDEX=LocalIndex} WHERE Key = @k AND V4 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "0_1_4" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099542036481 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099542036481 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 250609665 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\020^\316" + } + group { + group_uid: 250609665 + tablets { + tablet_uid: 250609665 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001d" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\016\360\000\001" + } + key_recipes { + } + } + } + event { + name: "LocalIndex_Query/4" + sql { + session: "instances/default/databases/db28/sessions/CjzP21cJKRNtIN_GWjr6wiMTSt8dYLIfFJy3h4u1li9PDKDHfIZN3YU1zJMe5FZ7DlnXumM88ezT_abQ4cIQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved@{FORCE_INDEX=LocalIndex} WHERE Key = @k AND V4 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "0_1_4" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099542036481 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 250609665 + split_id: 14079378335067013120 + tablet_uid: 250609665 + } + } + event { + cache_update { + database_id: 1099542036481 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 250609665 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\022\344\366" + } + group { + group_uid: 250609665 + tablets { + tablet_uid: 250609665 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001d" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\016\360\000\001" + } + key_recipes { + } + } + } + event { + name: "LocalIndex_Query/6" + sql { + session: "instances/default/databases/db28/sessions/CjzP21cJKRNtIN_GWjr6wiMTSt8dYLIfFJy3h4u1li9PDKDHfIZN3YU1zJMe5FZ7DlnXumM88ezT_abQ4cIQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved@{FORCE_INDEX=LocalIndex} WHERE Key = @k AND V4 = @k2" + params { + fields { + key: "k" + value { + string_value: "0" + } + } + fields { + key: "k2" + value { + string_value: "0_1_4" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099542036481 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 250609665 + split_id: 14079378335067013120 + tablet_uid: 250609665 + } + } +} +test_case { + name: "Merge" + event { + name: "Merge/0" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099515822081 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "Merge/2" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099515822081 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 78643201 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\313\356\324\233" + } + group { + group_uid: 78643201 + tablets { + tablet_uid: 78643201 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001b" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\004\260\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge/4" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 78643201 + split_id: 14079378335067013120 + tablet_uid: 78643201 + } + } + event { + name: "Merge/5" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 78643201 + split_id: 14079378335067013120 + tablet_uid: 78643201 + } + } + event { + cache_update { + database_id: 1099515822081 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 85983233 + split_id: 14079378335067013121 + generation: "\007\006C\314\313\377\336@\007\006C\314\313\377\360\270" + } + range { + start_key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 84934657 + split_id: 14079378335067013121 + generation: "\007\006C\314\313\377\336@\007\006C\314\313\377\360\270" + } + group { + group_uid: 85983233 + tablets { + tablet_uid: 85983233 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001i" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005 \000\001" + } + group { + group_uid: 84934657 + tablets { + tablet_uid: 84934657 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001h" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005\020\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge/7" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 85983233 + split_id: 14079378335067013121 + tablet_uid: 85983233 + } + } + event { + name: "Merge/8" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "6" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 84934657 + split_id: 14079378335067013121 + tablet_uid: 84934657 + } + } + event { + name: "Merge/9" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 85983233 + split_id: 14079378335067013121 + tablet_uid: 85983233 + } + } + event { + cache_update { + database_id: 1099515822081 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 85983233 + split_id: 14079378335067013123 + generation: "\007\006C\314\314\003\324\213\007\006C\314\314\003\334\212" + } + group { + group_uid: 85983233 + tablets { + tablet_uid: 85983233 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001i" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\005 \000\001" + } + key_recipes { + } + } + } + event { + name: "Merge/11" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 85983233 + split_id: 14079378335067013123 + tablet_uid: 85983233 + } + } + event { + name: "Merge/12" + read { + session: "instances/default/databases/db5/sessions/Cjz7xOXhdnzM3xT1oBtjGhhBHXBGsxj63sYo7p6OcsysOczYU7KkRu_Uv63jH_R97AUzW8qTn9BgpgC5ncMQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "6" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099515822081 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 85983233 + split_id: 14079378335067013123 + tablet_uid: 85983233 + } + } +} +test_case { + name: "Merge_Query" + event { + name: "Merge_Query/0" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099534696449 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "Merge_Query/2" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099534696449 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 201326593 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\3229\235\n" + } + group { + group_uid: 201326593 + tablets { + tablet_uid: 201326593 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001," + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014\000\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge_Query/4" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 201326593 + split_id: 14079378335067013120 + tablet_uid: 201326593 + } + } + event { + name: "Merge_Query/5" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 201326593 + split_id: 14079378335067013120 + tablet_uid: 201326593 + } + } + event { + cache_update { + database_id: 1099534696449 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 207618049 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\234*\275\007\006C\314\322\2345\351" + } + range { + start_key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 208666625 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\234*\275\007\006C\314\322\2345\351" + } + group { + group_uid: 207618049 + tablets { + tablet_uid: 207618049 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\0012" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014`\000\001" + } + group { + group_uid: 208666625 + tablets { + tablet_uid: 208666625 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\0013" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014p\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge_Query/7" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 207618049 + split_id: 14079378335067013121 + tablet_uid: 207618049 + } + } + event { + name: "Merge_Query/8" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "6" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 208666625 + split_id: 14079378335067013121 + tablet_uid: 208666625 + } + } + event { + cache_update { + database_id: 1099534696449 + range { + start_key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 208666625 + split_id: 14079378335067013121 + generation: "\007\006C\314\322\234*\275\007\006C\314\322\235\007\356" + } + group { + group_uid: 208666625 + tablets { + tablet_uid: 208666625 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\0013" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014p\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge_Query/10" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "6" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310\002\234\2315\000x" + limit_key: "A\206\311" + group_uid: 208666625 + split_id: 14079378335067013121 + tablet_uid: 208666625 + } + } + event { + name: "Merge_Query/11" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2315\000x" + group_uid: 207618049 + split_id: 14079378335067013121 + tablet_uid: 207618049 + } + } + event { + cache_update { + database_id: 1099534696449 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 207618049 + split_id: 14079378335067013123 + generation: "\007\006C\314\322\2435a\007\006C\314\322\243C\004" + } + group { + group_uid: 207618049 + tablets { + tablet_uid: 207618049 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\0012" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\014`\000\001" + } + key_recipes { + } + } + } + event { + name: "Merge_Query/13" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 207618049 + split_id: 14079378335067013123 + tablet_uid: 207618049 + } + } + event { + name: "Merge_Query/14" + sql { + session: "instances/default/databases/db22/sessions/Cj1iKobFlHIjFAAFmyl3O7-THtK7-TS4hiAm_v9p0ablflCYr9WEbYn0QSEVHUPKxTdHd7OxkQh5CPryVaIjEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "6" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099534696449 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 207618049 + split_id: 14079378335067013123 + tablet_uid: 207618049 + } + } +} +test_case { + name: "MixedQueryShapes" + event { + name: "MixedQueryShapes/0" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key = \'0\'" + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099549376513 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + value { + string_value: "0" + } + } + } + } + } + } + event { + name: "MixedQueryShapes/2" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key = \'0\'" + } + hint { + operation_uid: 1 + database_id: 1099549376513 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099549376513 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 298844161 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\327\014\177\223" + } + group { + group_uid: 298844161 + tablets { + tablet_uid: 298844161 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001\222" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\021\320\000\001" + } + key_recipes { + } + } + } + event { + name: "MixedQueryShapes/4" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key = \'0\'" + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099549376513 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 298844161 + split_id: 14079378335067013120 + tablet_uid: 298844161 + } + } + event { + name: "MixedQueryShapes/5" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1 FROM T WHERE Key = \'0\'" + } + hint { + operation_uid: 2 + database_id: 1099549376513 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099549376513 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 2 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + value { + string_value: "0" + } + } + } + } + } + } + event { + name: "MixedQueryShapes/7" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1 FROM T WHERE Key = \'0\'" + } + server: "localhost:15000" + hint { + operation_uid: 2 + database_id: 1099549376513 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 298844161 + split_id: 14079378335067013120 + tablet_uid: 298844161 + } + } + event { + name: "MixedQueryShapes/8" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = \'0_0\'" + } + hint { + operation_uid: 3 + database_id: 1099549376513 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099549376513 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 3 + part { + tag: 50020 + } + part { + tag: 2 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + value { + string_value: "0_0" + } + } + } + } + } + } + event { + name: "MixedQueryShapes/10" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=GlobalIndex} WHERE V0 = \'0_0\'" + } + server: "localhost:15000" + hint { + operation_uid: 3 + database_id: 1099549376513 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 298844161 + split_id: 14079378335067013120 + tablet_uid: 298844161 + } + } + event { + name: "MixedQueryShapes/11" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = \'0\' AND Key2 = 1" + } + hint { + operation_uid: 4 + database_id: 1099549376513 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099549376513 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 4 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + value { + string_value: "0" + } + } + } + } + } + } + event { + name: "MixedQueryShapes/13" + sql { + session: "instances/default/databases/db35/sessions/Cj1r5FdH5G6JlOfKUeNpQyDVAAIHtV3iukgB0FHLjQk9hiuK_K0e4iIsIE2ujs9fpktU9gs73sMb09dyQaxzEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, Key2, V4 FROM Interleaved WHERE Key = \'0\' AND Key2 = 1" + } + server: "localhost:15000" + hint { + operation_uid: 4 + database_id: 1099549376513 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 298844161 + split_id: 14079378335067013120 + tablet_uid: 298844161 + } + } +} +test_case { + name: "MixedReadShapes" + event { + name: "MixedReadShapes/0" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099529453569 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "MixedReadShapes/2" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099529453569 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\030\004\243" + } + group { + group_uid: 167772161 + tablets { + tablet_uid: 167772161 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\277" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n\000\000\001" + } + key_recipes { + } + } + } + event { + name: "MixedReadShapes/4" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + tablet_uid: 167772161 + } + } + event { + cache_update { + database_id: 1099529453569 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\032\370j" + } + group { + group_uid: 167772161 + tablets { + tablet_uid: 167772161 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\277" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n\000\000\001" + } + key_recipes { + } + } + } + event { + name: "MixedReadShapes/6" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + tablet_uid: 167772161 + } + } + event { + name: "MixedReadShapes/7" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + tablet_uid: 167772161 + } + } + event { + name: "MixedReadShapes/8" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + hint { + operation_uid: 3 + database_id: 1099529453569 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099529453569 + key_recipes { + schema_generation: "\001\001" + recipe { + index_name: "GlobalIndex" + part { + tag: 50020 + } + part { + tag: 2 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "V0" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "MixedReadShapes/10" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + index: "GlobalIndex" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0_0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 3 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + tablet_uid: 167772161 + } + } + event { + name: "MixedReadShapes/11" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + hint { + operation_uid: 4 + database_id: 1099529453569 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099529453569 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "Interleaved" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + part { + tag: 5 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "Key2" + } + } + } + } + } + event { + name: "MixedReadShapes/13" + read { + session: "instances/default/databases/db17/sessions/Cj11_K9Nwx32LGYbrWvyBBlWBUSPqjqsdLosEGdpzdOE2DuNonsyrDc4SAlBK3h5RDil8VD_xWE-0dpVmdBwEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "Interleaved" + columns: "Key" + columns: "Key2" + columns: "V4" + key_set { + keys { + values { + string_value: "0" + } + values { + string_value: "1" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 4 + database_id: 1099529453569 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 167772161 + split_id: 14079378335067013120 + tablet_uid: 167772161 + } + } +} +test_case { + name: "MultiPointQuery" + event { + name: "MultiPointQuery/0" + sql { + session: "instances/default/databases/db34/sessions/Cj1JEf_BWyeFh30JH_mbDYbGaUpAOVs2cu9NlDhT_gSLxCxvtNLzu5lDhZHUfV_f_IFjIiWfJp0YIuz9Rgk-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key IN (\'0\', \'1\', \'2\')" + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099548327937 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + value { + string_value: "0" + } + } + } + } + } + } + event { + name: "MultiPointQuery/2" + sql { + session: "instances/default/databases/db34/sessions/Cj1JEf_BWyeFh30JH_mbDYbGaUpAOVs2cu9NlDhT_gSLxCxvtNLzu5lDhZHUfV_f_IFjIiWfJp0YIuz9Rgk-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key IN (\'0\', \'1\', \'2\')" + } + hint { + operation_uid: 1 + database_id: 1099548327937 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099548327937 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 292552705 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\362\241\323" + } + group { + group_uid: 292552705 + tablets { + tablet_uid: 292552705 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001\214" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\021p\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiPointQuery/4" + sql { + session: "instances/default/databases/db34/sessions/Cj1JEf_BWyeFh30JH_mbDYbGaUpAOVs2cu9NlDhT_gSLxCxvtNLzu5lDhZHUfV_f_IFjIiWfJp0YIuz9Rgk-EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key IN (\'0\', \'1\', \'2\')" + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099548327937 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 292552705 + split_id: 14079378335067013120 + tablet_uid: 292552705 + } + } +} +test_case { + name: "MultiPointRead" + event { + name: "MultiPointRead/0" + read { + session: "instances/default/databases/db16/sessions/Cj1lBesOACZy5M0-XNPQK3sZKjFljpivNOp5UkVksA4i-P5_UszGIn1x3kLVQTGhWk3mC-vv0bTwS-RhC6JqEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + keys { + values { + string_value: "1" + } + } + keys { + values { + string_value: "2" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099528404993 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "MultiPointRead/2" + read { + session: "instances/default/databases/db16/sessions/Cj1lBesOACZy5M0-XNPQK3sZKjFljpivNOp5UkVksA4i-P5_UszGIn1x3kLVQTGhWk3mC-vv0bTwS-RhC6JqEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + keys { + values { + string_value: "1" + } + } + keys { + values { + string_value: "2" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099528404993 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2312\000y" + } + } + event { + cache_update { + database_id: 1099528404993 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 161480705 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\0077\344" + } + group { + group_uid: 161480705 + tablets { + tablet_uid: 161480705 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\272" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t\240\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiPointRead/4" + read { + session: "instances/default/databases/db16/sessions/Cj1lBesOACZy5M0-XNPQK3sZKjFljpivNOp5UkVksA4i-P5_UszGIn1x3kLVQTGhWk3mC-vv0bTwS-RhC6JqEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + keys { + values { + string_value: "1" + } + } + keys { + values { + string_value: "2" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099528404993 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 161480705 + split_id: 14079378335067013120 + tablet_uid: 161480705 + } + } + event { + cache_update { + database_id: 1099528404993 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 161480705 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\010?\"" + } + group { + group_uid: 161480705 + tablets { + tablet_uid: 161480705 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\272" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t\240\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiPointRead/6" + read { + session: "instances/default/databases/db16/sessions/Cj1lBesOACZy5M0-XNPQK3sZKjFljpivNOp5UkVksA4i-P5_UszGIn1x3kLVQTGhWk3mC-vv0bTwS-RhC6JqEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + keys { + values { + string_value: "0" + } + } + keys { + values { + string_value: "1" + } + } + keys { + values { + string_value: "2" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099528404993 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 161480705 + split_id: 14079378335067013120 + tablet_uid: 161480705 + } + } + event { + cache_update { + database_id: 1099528404993 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 161480705 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\010?\"" + } + group { + group_uid: 161480705 + tablets { + tablet_uid: 161480705 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\272" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t\240\000\001" + } + key_recipes { + } + } + } +} +test_case { + name: "MultiSplitRangeQuery" + event { + name: "MultiSplitRangeQuery/0" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099545182209 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "start" + } + } + } + } + } + event { + name: "MultiSplitRangeQuery/2" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099545182209 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099545182209 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 269484033 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\207\216s" + } + group { + group_uid: 269484033 + tablets { + tablet_uid: 269484033 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001w" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020\020\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeQuery/4" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099545182209 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 269484033 + split_id: 14079378335067013120 + tablet_uid: 269484033 + } + } + event { + cache_update { + database_id: 1099545182209 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 269484033 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326\207\216s" + } + group { + group_uid: 269484033 + tablets { + tablet_uid: 269484033 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001w" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020\020\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeQuery/6" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099545182209 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 269484033 + split_id: 14079378335067013120 + tablet_uid: 269484033 + } + } + event { + name: "MultiSplitRangeQuery/7" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099545182209 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 269484033 + split_id: 14079378335067013120 + tablet_uid: 269484033 + } + } + event { + cache_update { + database_id: 1099545182209 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 275775489 + split_id: 14079378335067013121 + generation: "\007\006C\314\326\232H-\007\006C\314\326\232O\315" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 276824065 + split_id: 14079378335067013121 + generation: "\007\006C\314\326\232H-\007\006C\314\326\232O\315" + } + group { + group_uid: 275775489 + tablets { + tablet_uid: 275775489 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001}" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020p\000\001" + } + group { + group_uid: 276824065 + tablets { + tablet_uid: 276824065 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001|" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\020\200\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeQuery/9" + sql { + session: "instances/default/databases/db31/sessions/Cj26cAs4RD17PnwartOnEP5weRuWeJiGsWyw0p_woAWpI-qkh_vsgPdD99_nQRZyD9Ysr_UQGP1_b5NtZb-OEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099545182209 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 275775489 + split_id: 14079378335067013121 + tablet_uid: 275775489 + } + } +} +test_case { + name: "MultiSplitRangeRead" + event { + name: "MultiSplitRangeRead/0" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099526307841 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "MultiSplitRangeRead/2" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2315\000x" + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 146800641 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\324\266i" + } + group { + group_uid: 146800641 + tablets { + tablet_uid: 146800641 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\252" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010\300\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeRead/4" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 146800641 + split_id: 14079378335067013120 + tablet_uid: 146800641 + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 146800641 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\327j\374" + } + group { + group_uid: 146800641 + tablets { + tablet_uid: 146800641 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\252" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010\300\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeRead/6" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 146800641 + split_id: 14079378335067013120 + tablet_uid: 146800641 + } + } + event { + name: "MultiSplitRangeRead/7" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 146800641 + split_id: 14079378335067013120 + tablet_uid: 146800641 + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 153092097 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 154140673 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + group { + group_uid: 153092097 + tablets { + tablet_uid: 153092097 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\260" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t \000\001" + } + group { + group_uid: 154140673 + tablets { + tablet_uid: 154140673 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\261" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t0\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeRead/9" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2315\000x" + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 153092097 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 154140673 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + group { + group_uid: 153092097 + tablets { + tablet_uid: 153092097 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\260" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t \000\001" + } + group { + group_uid: 154140673 + tablets { + tablet_uid: 154140673 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\261" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t0\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeRead/11" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2315\000x" + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 153092097 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 154140673 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + group { + group_uid: 153092097 + tablets { + tablet_uid: 153092097 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\260" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t \000\001" + } + group { + group_uid: 154140673 + tablets { + tablet_uid: 154140673 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\261" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t0\000\001" + } + key_recipes { + } + } + } + event { + name: "MultiSplitRangeRead/13" + read { + session: "instances/default/databases/db14/sessions/Cj0o3nR5UFrOZg2SHTIXxry6YlztuIUCKChW73CDnur1iUntENeOIUCLJi1ON9ym2fIzfYOB0G4apCR5I4YQEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099526307841 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2315\000x" + } + } + event { + cache_update { + database_id: 1099526307841 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 153092097 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 154140673 + split_id: 14079378335067013121 + generation: "\007\006C\314\317\345k\007\007\006C\314\317\345|{" + } + group { + group_uid: 153092097 + tablets { + tablet_uid: 153092097 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\260" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t \000\001" + } + group { + group_uid: 154140673 + tablets { + tablet_uid: 154140673 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\261" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\t0\000\001" + } + key_recipes { + } + } + } +} +test_case { + name: "PointQuery_MoveTablet" + event { + name: "PointQuery_MoveTablet/0" + sql { + session: "instances/default/databases/db20/sessions/Cj1N8sQ1mMXNR1g4jxOmUpJctw82dg0KcXzxZYKPIRwWsXqu0bhbi4E-_sLxODFwo_lYyhRy9bB8xBjMbn6MEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099532599297 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "PointQuery_MoveTablet/2" + sql { + session: "instances/default/databases/db20/sessions/Cj1N8sQ1mMXNR1g4jxOmUpJctw82dg0KcXzxZYKPIRwWsXqu0bhbi4E-_sLxODFwo_lYyhRy9bB8xBjMbn6MEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099532599297 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099532599297 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 188743681 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320r\274\204" + } + group { + group_uid: 188743681 + tablets { + tablet_uid: 188743681 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\323" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\013@\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_MoveTablet/4" + sql { + session: "instances/default/databases/db20/sessions/Cj1N8sQ1mMXNR1g4jxOmUpJctw82dg0KcXzxZYKPIRwWsXqu0bhbi4E-_sLxODFwo_lYyhRy9bB8xBjMbn6MEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099532599297 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 188743681 + split_id: 14079378335067013120 + tablet_uid: 188743681 + } + } + event { + cache_update { + database_id: 1099532599297 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 188743681 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320x\255_" + } + group { + group_uid: 188743681 + tablets { + tablet_uid: 188743681 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\323" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\013@\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_MoveTablet/6" + sql { + session: "instances/default/databases/db20/sessions/Cj1N8sQ1mMXNR1g4jxOmUpJctw82dg0KcXzxZYKPIRwWsXqu0bhbi4E-_sLxODFwo_lYyhRy9bB8xBjMbn6MEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099532599297 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 188743681 + split_id: 14079378335067013120 + tablet_uid: 188743681 + } + } + event { + name: "PointQuery_MoveTablet/7" + sql { + session: "instances/default/databases/db20/sessions/Cj1N8sQ1mMXNR1g4jxOmUpJctw82dg0KcXzxZYKPIRwWsXqu0bhbi4E-_sLxODFwo_lYyhRy9bB8xBjMbn6MEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099532599297 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 188743681 + split_id: 14079378335067013120 + tablet_uid: 188743681 + } + } +} +test_case { + name: "PointQuery_Split" + event { + name: "PointQuery_Split/0" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099531550721 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "PointQuery_Split/2" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099531550721 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099531550721 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 180355073 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320Op\220" + } + group { + group_uid: 180355073 + tablets { + tablet_uid: 180355073 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\315" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n\300\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_Split/4" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099531550721 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 180355073 + split_id: 14079378335067013120 + tablet_uid: 180355073 + } + } + event { + cache_update { + database_id: 1099531550721 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 180355073 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320Q}h" + } + group { + group_uid: 180355073 + tablets { + tablet_uid: 180355073 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\315" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n\300\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_Split/6" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099531550721 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 180355073 + split_id: 14079378335067013120 + tablet_uid: 180355073 + } + } + event { + name: "PointQuery_Split/7" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099531550721 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 180355073 + split_id: 14079378335067013120 + tablet_uid: 180355073 + } + } + event { + cache_update { + database_id: 1099531550721 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 186646529 + split_id: 14079378335067013121 + generation: "\007\006C\314\320c\021\014\007\006C\314\320c\026\345" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 187695105 + split_id: 14079378335067013121 + generation: "\007\006C\314\320c\021\014\007\006C\314\320c\026\345" + } + group { + group_uid: 186646529 + tablets { + tablet_uid: 186646529 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\320" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\013 \000\001" + } + group { + group_uid: 187695105 + tablets { + tablet_uid: 187695105 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\321" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\0130\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_Split/9" + sql { + session: "instances/default/databases/db19/sessions/Cj2RvpMnHqQiSvRU_rl0CfvTHBYP-K9YWOZN0oPTqz_3o3UJkAV0kyMcVK_r3tClMWKYSJSK3tGn0llf-I56EO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099531550721 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 186646529 + split_id: 14079378335067013121 + tablet_uid: 186646529 + } + } +} +test_case { + name: "PointQuery_StopServer" + event { + name: "PointQuery_StopServer/0" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099533647873 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "PointQuery_StopServer/2" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099533647873 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099533647873 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\223\232\214" + } + group { + group_uid: 195035137 + tablets { + tablet_uid: 195035137 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\331" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\013\240\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_StopServer/4" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099533647873 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + tablet_uid: 195035137 + } + } + event { + cache_update { + database_id: 1099533647873 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\232\312r" + } + group { + group_uid: 195035137 + tablets { + tablet_uid: 195035137 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\331" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\013\240\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_StopServer/6" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099533647873 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + tablet_uid: 195035137 + } + } + event { + name: "PointQuery_StopServer/7" + unhealthy_servers: "localhost:15100" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099533647873 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + skipped_tablet_uid { + tablet_uid: 195035137 + incarnation: "\001\331" + } + skipped_tablet_uid { + tablet_uid: 195035137 + incarnation: "\001\331" + } + } + } + event { + cache_update { + database_id: 1099533647873 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\320\223\232\214" + } + group { + group_uid: 195035137 + tablets { + tablet_uid: 195035137 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001)" + } + generation: "\010\377\377\377\377\377\377\377\377\001\002\004\013\240\000\001" + } + key_recipes { + } + } + } + event { + name: "PointQuery_StopServer/9" + sql { + session: "instances/default/databases/db21/sessions/Cj2mLhhl2TEML6_S76tM9oTQnNZWOUYSYTbcM0g1a235ck_wOuvrssPXtj6kS89DUYK3B9sJwmp3MAtsd0rKEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099533647873 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 195035137 + split_id: 14079378335067013120 + tablet_uid: 195035137 + } + } +} +test_case { + name: "PointRead_Basic" + event { + name: "PointRead_Basic/0" + read { + session: "instances/default/databases/db1/sessions/Cj2dgaSCS1TIcsY03_rLwNkT4bcSD0rl96ymOmEs6St0gocLKdJ13KOcTUAr0hZzE7UgcFAH_pgoeDLs-gevEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099511627777 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "PointRead_Basic/2" + read { + session: "instances/default/databases/db1/sessions/Cj2dgaSCS1TIcsY03_rLwNkT4bcSD0rl96ymOmEs6St0gocLKdJ13KOcTUAr0hZzE7UgcFAH_pgoeDLs-gevEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099511627777 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099511627777 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 51380225 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\312\266\333\267" + } + group { + group_uid: 51380225 + tablets { + tablet_uid: 51380225 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\0014" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003\020\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_Basic/4" + read { + session: "instances/default/databases/db1/sessions/Cj2dgaSCS1TIcsY03_rLwNkT4bcSD0rl96ymOmEs6St0gocLKdJ13KOcTUAr0hZzE7UgcFAH_pgoeDLs-gevEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099511627777 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 51380225 + split_id: 14079378335067013120 + tablet_uid: 51380225 + } + } + event { + name: "PointRead_Basic/5" + read { + session: "instances/default/databases/db1/sessions/Cj2dgaSCS1TIcsY03_rLwNkT4bcSD0rl96ymOmEs6St0gocLKdJ13KOcTUAr0hZzE7UgcFAH_pgoeDLs-gevEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "6" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099511627777 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 51380225 + split_id: 14079378335067013120 + tablet_uid: 51380225 + } + } + event { + name: "PointRead_Basic/6" + read { + session: "instances/default/databases/db1/sessions/Cj2dgaSCS1TIcsY03_rLwNkT4bcSD0rl96ymOmEs6St0gocLKdJ13KOcTUAr0hZzE7UgcFAH_pgoeDLs-gevEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "99" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099511627777 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 51380225 + split_id: 14079378335067013120 + tablet_uid: 51380225 + } + } +} +test_case { + name: "PointRead_MoveTablet" + event { + name: "PointRead_MoveTablet/0" + read { + session: "instances/default/databases/db3/sessions/Cj1AKsB4f6xVAxSo5B5BEa_WQta60iwvkBhF0_CO2ES-1CICxfRMDL5FaiFDCdlVa22_VEiq4S9xZWRU6fJuEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099513724929 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "PointRead_MoveTablet/2" + read { + session: "instances/default/databases/db3/sessions/Cj1AKsB4f6xVAxSo5B5BEa_WQta60iwvkBhF0_CO2ES-1CICxfRMDL5FaiFDCdlVa22_VEiq4S9xZWRU6fJuEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099513724929 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099513724929 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 66060289 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\312\371S\230" + } + group { + group_uid: 66060289 + tablets { + tablet_uid: 66060289 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001B" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003\360\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_MoveTablet/4" + read { + session: "instances/default/databases/db3/sessions/Cj1AKsB4f6xVAxSo5B5BEa_WQta60iwvkBhF0_CO2ES-1CICxfRMDL5FaiFDCdlVa22_VEiq4S9xZWRU6fJuEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099513724929 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 66060289 + split_id: 14079378335067013120 + tablet_uid: 66060289 + } + } + event { + cache_update { + database_id: 1099513724929 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 66060289 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\312\377\310\024" + } + group { + group_uid: 66060289 + tablets { + tablet_uid: 66060289 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001B" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003\360\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_MoveTablet/6" + read { + session: "instances/default/databases/db3/sessions/Cj1AKsB4f6xVAxSo5B5BEa_WQta60iwvkBhF0_CO2ES-1CICxfRMDL5FaiFDCdlVa22_VEiq4S9xZWRU6fJuEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099513724929 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 66060289 + split_id: 14079378335067013120 + tablet_uid: 66060289 + } + } + event { + name: "PointRead_MoveTablet/7" + read { + session: "instances/default/databases/db3/sessions/Cj1AKsB4f6xVAxSo5B5BEa_WQta60iwvkBhF0_CO2ES-1CICxfRMDL5FaiFDCdlVa22_VEiq4S9xZWRU6fJuEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099513724929 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 66060289 + split_id: 14079378335067013120 + tablet_uid: 66060289 + } + } +} +test_case { + name: "PointRead_Split" + event { + name: "PointRead_Split/0" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099512676353 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "PointRead_Split/2" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099512676353 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099512676353 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 57671681 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\312\320\311\201" + } + group { + group_uid: 57671681 + tablets { + tablet_uid: 57671681 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\0019" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003p\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_Split/4" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099512676353 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 57671681 + split_id: 14079378335067013120 + tablet_uid: 57671681 + } + } + event { + cache_update { + database_id: 1099512676353 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 57671681 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\312\325J\"" + } + group { + group_uid: 57671681 + tablets { + tablet_uid: 57671681 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\0019" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003p\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_Split/6" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099512676353 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 57671681 + split_id: 14079378335067013120 + tablet_uid: 57671681 + } + } + event { + name: "PointRead_Split/7" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099512676353 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 57671681 + split_id: 14079378335067013120 + tablet_uid: 57671681 + } + } + event { + cache_update { + database_id: 1099512676353 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 63963137 + split_id: 14079378335067013121 + generation: "\007\006C\314\312\346,\370\007\006C\314\312\346G\266" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 65011713 + split_id: 14079378335067013121 + generation: "\007\006C\314\312\346,\370\007\006C\314\312\346G\266" + } + group { + group_uid: 63963137 + tablets { + tablet_uid: 63963137 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001@" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003\320\000\001" + } + group { + group_uid: 65011713 + tablets { + tablet_uid: 65011713 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001?" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\003\340\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_Split/9" + read { + session: "instances/default/databases/db2/sessions/Cj0P1CjTc1r75lf93ktX0nFIilgWPzLMlGnWTx-ZrjQo0Gf7-8YTsxgLsq7TFigkRjfOoY7s-KBDYRGUBOhtEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099512676353 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 63963137 + split_id: 14079378335067013121 + tablet_uid: 63963137 + } + } +} +test_case { + name: "PointRead_StopServer" + event { + name: "PointRead_StopServer/0" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099514773505 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "PointRead_StopServer/2" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099514773505 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099514773505 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\313\027\337?" + } + group { + group_uid: 72351745 + tablets { + tablet_uid: 72351745 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001H" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\004P\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_StopServer/4" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099514773505 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + tablet_uid: 72351745 + } + } + event { + cache_update { + database_id: 1099514773505 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\313\030\343J" + } + group { + group_uid: 72351745 + tablets { + tablet_uid: 72351745 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001H" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\004P\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_StopServer/6" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099514773505 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + tablet_uid: 72351745 + } + } + event { + name: "PointRead_StopServer/7" + unhealthy_servers: "localhost:15100" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099514773505 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + skipped_tablet_uid { + tablet_uid: 72351745 + incarnation: "\001H" + } + skipped_tablet_uid { + tablet_uid: 72351745 + incarnation: "\001H" + } + } + } + event { + cache_update { + database_id: 1099514773505 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\313\027\337?" + } + group { + group_uid: 72351745 + tablets { + tablet_uid: 72351745 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\001_" + } + generation: "\010\377\377\377\377\377\377\377\377\001\001\004\004P\000\001" + } + key_recipes { + } + } + } + event { + name: "PointRead_StopServer/9" + read { + session: "instances/default/databases/db4/sessions/Cj06QaQcKhM5rNrSjoYJN6w9yW8QlrvlrAYbHThdJcgxiQOcxXv_urgbse66Ol2wnM36ddl7v6GMVjh0JXdJEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099514773505 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 72351745 + split_id: 14079378335067013120 + tablet_uid: 72351745 + } + } +} +test_case { + name: "Query_Basic" + event { + name: "Query_Basic/0" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099530502145 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "Query_Basic/2" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099530502145 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099530502145 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\3205D\321" + } + group { + group_uid: 174063617 + tablets { + tablet_uid: 174063617 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\304" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n`\000\001" + } + key_recipes { + } + } + } + event { + name: "Query_Basic/4" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099530502145 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + tablet_uid: 174063617 + } + } + event { + cache_update { + database_id: 1099530502145 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\3207\336\354" + } + group { + group_uid: 174063617 + tablets { + tablet_uid: 174063617 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\304" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\n`\000\001" + } + key_recipes { + } + } + } + event { + name: "Query_Basic/6" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099530502145 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + tablet_uid: 174063617 + } + } + event { + name: "Query_Basic/7" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "6" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099530502145 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + tablet_uid: 174063617 + } + } + event { + name: "Query_Basic/8" + sql { + session: "instances/default/databases/db18/sessions/Cj0AhIB1FQZ0mpfUyRe4RAdNeLsy8_3N0l6G0SYbTQH9L4Mo6SAJCEW0asR2_0Y2B_x-EQH5q3LXC8IqY_UUEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "99" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099530502145 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 174063617 + split_id: 14079378335067013120 + tablet_uid: 174063617 + } + } +} +test_case { + name: "Queryroot" + event { + name: "Queryroot/0" + sql { + session: "instances/default/databases/db36/sessions/Cj1-3deMwlMPSU3TUZjwEiTnCEx6ntNH_PgS62Q5Uq5SIJ7I3nvRImR3DuycWYdHyu_aL5pq9cpaUDBf3csWEO_3ts7M-ZAD" + sql: "SELECT TEXT_FINGERPRINT, TEXT, FROM SPANNER_SYS.OLDEST_ACTIVE_QUERIES" + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099550425089 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50016 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NOT_NULL + type { + code: INT64 + } + value { + string_value: "0" + } + } + } + } + } + } + event { + name: "Queryroot/2" + sql { + session: "instances/default/databases/db36/sessions/Cj1-3deMwlMPSU3TUZjwEiTnCEx6ntNH_PgS62Q5Uq5SIJ7I3nvRImR3DuycWYdHyu_aL5pq9cpaUDBf3csWEO_3ts7M-ZAD" + sql: "SELECT TEXT_FINGERPRINT, TEXT, FROM SPANNER_SYS.OLDEST_ACTIVE_QUERIES" + } + hint { + operation_uid: 1 + database_id: 1099550425089 + schema_generation: "\001\001" + key: "A\206\300\002\221\000" + } + } + event { + cache_update { + database_id: 1099550425089 + range { + start_key: "A\206\300" + limit_key: "A\206\301" + group_uid: 309329921 + split_id: 14078252435160170496 + generation: "\000\007\006C\314\3271\033z" + } + group { + group_uid: 309329921 + tablets { + tablet_uid: 309329921 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001\232" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\022p\000\001" + } + key_recipes { + } + } + } + event { + name: "Queryroot/4" + sql { + session: "instances/default/databases/db36/sessions/Cj1-3deMwlMPSU3TUZjwEiTnCEx6ntNH_PgS62Q5Uq5SIJ7I3nvRImR3DuycWYdHyu_aL5pq9cpaUDBf3csWEO_3ts7M-ZAD" + sql: "SELECT TEXT_FINGERPRINT, TEXT, FROM SPANNER_SYS.OLDEST_ACTIVE_QUERIES" + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099550425089 + schema_generation: "\001\001" + key: "A\206\300" + limit_key: "A\206\301" + group_uid: 309329921 + split_id: 14078252435160170496 + tablet_uid: 309329921 + } + } +} +test_case { + name: "RandomSplitQuery" + event { + name: "RandomSplitQuery/0" + sql { + session: "instances/default/databases/db33/sessions/Cjx4AhBshfE8Wz_DweJPRclYklWrtNOgZU6SOL_5EY-RxIkB0O1k8mYR1tr1pYqNnVlVPFTrl1kJd9_Ho9sQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=_BASE_TABLE} WHERE Key IN UNNEST(@keys)" + params { + fields { + key: "keys" + value { + list_value { + values { + string_value: "0" + } + values { + string_value: "1" + } + values { + string_value: "2" + } + } + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099547279361 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "keys" + } + } + } + } + } + event { + name: "RandomSplitQuery/2" + sql { + session: "instances/default/databases/db33/sessions/Cjx4AhBshfE8Wz_DweJPRclYklWrtNOgZU6SOL_5EY-RxIkB0O1k8mYR1tr1pYqNnVlVPFTrl1kJd9_Ho9sQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=_BASE_TABLE} WHERE Key IN UNNEST(@keys)" + params { + fields { + key: "keys" + value { + list_value { + values { + string_value: "0" + } + values { + string_value: "1" + } + values { + string_value: "2" + } + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099547279361 + schema_generation: "\001\001" + key: "A\206\310\002" + limit_key: "A\206\310\003" + } + } + event { + cache_update { + database_id: 1099547279361 + range { + start_key: "A\206\310" + limit_key: "A\206\310\002\234\2311\000x" + group_uid: 290455553 + split_id: 14079378335067013121 + generation: "\007\006C\314\326\324z5\007\006C\314\326\325\300\240" + } + range { + start_key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 291504129 + split_id: 14079378335067013121 + generation: "\007\006C\314\326\324z5\007\006C\314\326\325\300\240" + } + group { + group_uid: 290455553 + tablets { + tablet_uid: 290455553 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001\212" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\021P\000\001" + } + group { + group_uid: 291504129 + tablets { + tablet_uid: 291504129 + server_address: "localhost:15000" + role: READ_WRITE + incarnation: "\002\001\213" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\021`\000\001" + } + key_recipes { + } + } + } + event { + name: "RandomSplitQuery/4" + sql { + session: "instances/default/databases/db33/sessions/Cjx4AhBshfE8Wz_DweJPRclYklWrtNOgZU6SOL_5EY-RxIkB0O1k8mYR1tr1pYqNnVlVPFTrl1kJd9_Ho9sQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T@{FORCE_INDEX=_BASE_TABLE} WHERE Key IN UNNEST(@keys)" + params { + fields { + key: "keys" + value { + list_value { + values { + string_value: "0" + } + values { + string_value: "1" + } + values { + string_value: "2" + } + } + } + } + } + } + server: "localhost:15000" + hint { + operation_uid: 1 + database_id: 1099547279361 + schema_generation: "\001\001" + key: "A\206\310\002\234\2311\000x" + limit_key: "A\206\311" + group_uid: 291504129 + split_id: 14079378335067013121 + tablet_uid: 291504129 + } + } +} +test_case { + name: "RangeQuery" + event { + name: "RangeQuery/0" + sql { + session: "instances/default/databases/db30/sessions/CjynQBcGQrjQeodWrxOhJQjQgAKxjCQ-leDzPdhgZW8QORrZibtwD7HWqz-U8RNelce3k2eYamxiIMIE2SQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099544133633 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "start" + } + } + } + } + } + event { + name: "RangeQuery/2" + sql { + session: "instances/default/databases/db30/sessions/CjynQBcGQrjQeodWrxOhJQjQgAKxjCQ-leDzPdhgZW8QORrZibtwD7HWqz-U8RNelce3k2eYamxiIMIE2SQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099544133633 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099544133633 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 263192577 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326o/\274" + } + group { + group_uid: 263192577 + tablets { + tablet_uid: 263192577 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001q" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\017\260\000\001" + } + key_recipes { + } + } + } + event { + name: "RangeQuery/4" + sql { + session: "instances/default/databases/db30/sessions/CjynQBcGQrjQeodWrxOhJQjQgAKxjCQ-leDzPdhgZW8QORrZibtwD7HWqz-U8RNelce3k2eYamxiIMIE2SQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099544133633 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 263192577 + split_id: 14079378335067013120 + tablet_uid: 263192577 + } + } + event { + cache_update { + database_id: 1099544133633 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 263192577 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326s\202\243" + } + group { + group_uid: 263192577 + tablets { + tablet_uid: 263192577 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001q" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\017\260\000\001" + } + key_recipes { + } + } + } + event { + name: "RangeQuery/6" + sql { + session: "instances/default/databases/db30/sessions/CjynQBcGQrjQeodWrxOhJQjQgAKxjCQ-leDzPdhgZW8QORrZibtwD7HWqz-U8RNelce3k2eYamxiIMIE2SQQ7_e2zsz5kAM" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0 FROM T WHERE Key >= @start AND Key < @limit" + params { + fields { + key: "limit" + value { + string_value: "5" + } + } + fields { + key: "start" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099544133633 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 263192577 + split_id: 14079378335067013120 + tablet_uid: 263192577 + } + } +} +test_case { + name: "RangeRead" + event { + name: "RangeRead/0" + read { + session: "instances/default/databases/db13/sessions/Cj3u91rLO503wDtURxIEP9XFEOjXODpgPD-THdCp1MX-LrLsBFNM7_IhP8F8Q3XzmyG1OGNoiDoCA0--eIvGEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099525259265 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "RangeRead/2" + read { + session: "instances/default/databases/db13/sessions/Cj3u91rLO503wDtURxIEP9XFEOjXODpgPD-THdCp1MX-LrLsBFNM7_IhP8F8Q3XzmyG1OGNoiDoCA0--eIvGEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099525259265 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + limit_key: "A\206\310\002\234\2315\000x" + } + } + event { + cache_update { + database_id: 1099525259265 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 140509185 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\270\342\357" + } + group { + group_uid: 140509185 + tablets { + tablet_uid: 140509185 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\244" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010`\000\001" + } + key_recipes { + } + } + } + event { + name: "RangeRead/4" + read { + session: "instances/default/databases/db13/sessions/Cj3u91rLO503wDtURxIEP9XFEOjXODpgPD-THdCp1MX-LrLsBFNM7_IhP8F8Q3XzmyG1OGNoiDoCA0--eIvGEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099525259265 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 140509185 + split_id: 14079378335067013120 + tablet_uid: 140509185 + } + } + event { + cache_update { + database_id: 1099525259265 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 140509185 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317\270\342\357" + } + group { + group_uid: 140509185 + tablets { + tablet_uid: 140509185 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\244" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010`\000\001" + } + key_recipes { + } + } + } + event { + name: "RangeRead/6" + read { + session: "instances/default/databases/db13/sessions/Cj3u91rLO503wDtURxIEP9XFEOjXODpgPD-THdCp1MX-LrLsBFNM7_IhP8F8Q3XzmyG1OGNoiDoCA0--eIvGEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + key_set { + ranges { + start_closed { + values { + string_value: "0" + } + } + end_open { + values { + string_value: "5" + } + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099525259265 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 140509185 + split_id: 14079378335067013120 + tablet_uid: 140509185 + } + } +} +test_case { + name: "SchemaChange" + event { + name: "SchemaChange/0" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099524210689 + key_recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "Key" + } + } + } + } + } + event { + name: "SchemaChange/2" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099524210689 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099524210689 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317k\222^" + } + group { + group_uid: 134217729 + tablets { + tablet_uid: 134217729 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\236" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010\000\000\001" + } + key_recipes { + } + } + } + event { + name: "SchemaChange/4" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099524210689 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + tablet_uid: 134217729 + } + } + event { + cache_update { + database_id: 1099524210689 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\317m\306\322" + } + group { + group_uid: 134217729 + tablets { + tablet_uid: 134217729 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\001\236" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\010\000\000\001" + } + key_recipes { + } + } + } + event { + name: "SchemaChange/6" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099524210689 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + tablet_uid: 134217729 + } + } + event { + name: "SchemaChange/7" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + columns: "V2" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099524210689 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + tablet_uid: 134217729 + } + } + event { + name: "SchemaChange/8" + read { + session: "instances/default/databases/db12/sessions/Cj3nfoJOAt2jL0sglk3NLsu54-3zpWp6CnodAvXgomtqQPO2naJaUHeFEOSVQCkj2_dfxCCOBf7YIL6QQT2eEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + table: "T" + columns: "Key" + columns: "V0" + columns: "V1" + key_set { + keys { + values { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099524210689 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 134217729 + split_id: 14079378335067013120 + tablet_uid: 134217729 + } + } +} +test_case { + name: "SchemaChange_Query" + event { + name: "SchemaChange_Query/0" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + } + } + event { + cache_update { + database_id: 1099543085057 + key_recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 1 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "SchemaChange_Query/2" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 1 + database_id: 1099543085057 + schema_generation: "\001\001" + key: "A\206\310\002\234\2310\000x" + } + } + event { + cache_update { + database_id: 1099543085057 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326)\373>" + } + group { + group_uid: 256901121 + tablets { + tablet_uid: 256901121 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001k" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\017P\000\001" + } + key_recipes { + } + } + } + event { + name: "SchemaChange_Query/4" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099543085057 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + tablet_uid: 256901121 + } + } + event { + cache_update { + database_id: 1099543085057 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326+\204M" + } + group { + group_uid: 256901121 + tablets { + tablet_uid: 256901121 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001k" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\017P\000\001" + } + key_recipes { + } + } + } + event { + name: "SchemaChange_Query/6" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099543085057 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + tablet_uid: 256901121 + } + } + event { + name: "SchemaChange_Query/7" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1, V2 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 1 + database_id: 1099543085057 + schema_generation: "\001\001" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + tablet_uid: 256901121 + } + } + event { + name: "SchemaChange_Query/8" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + hint { + operation_uid: 2 + database_id: 1099543085057 + schema_generation: "\001\001" + } + } + event { + cache_update { + database_id: 1099543085057 + key_recipes { + schema_generation: "\001\002" + recipe { + operation_uid: 2 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "key" + } + } + } + } + } + event { + name: "SchemaChange_Query/10" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099543085057 + schema_generation: "\001\002" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + tablet_uid: 256901121 + } + } + event { + cache_update { + database_id: 1099543085057 + range { + start_key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + generation: "\000\007\006C\314\326+\204M" + } + group { + group_uid: 256901121 + tablets { + tablet_uid: 256901121 + server_address: "localhost:15100" + role: READ_WRITE + incarnation: "\002\001k" + } + generation: "\010\377\377\377\377\377\377\377\377\000\004\017P\000\001" + } + key_recipes { + } + } + } + event { + name: "SchemaChange_Query/12" + sql { + session: "instances/default/databases/db29/sessions/Cj3q6mix4HEACSTV7qfn-anCj6i3qJoPhAU9-97hUwZ_mBPgNQF9vRPFpH5onCRy5ZxWf8FuDJlYvcD-53bMEO_3ts7M-ZAD" + transaction { + single_use { + read_only { + strong: true + } + } + } + sql: "SELECT Key, V0, V1 FROM T WHERE Key = @key" + params { + fields { + key: "key" + value { + string_value: "0" + } + } + } + } + server: "localhost:15100" + hint { + operation_uid: 2 + database_id: 1099543085057 + schema_generation: "\001\002" + key: "A\206\310" + limit_key: "A\206\311" + group_uid: 256901121 + split_id: 14079378335067013120 + tablet_uid: 256901121 + } + } +} diff --git a/java-spanner/google-cloud-spanner/src/test/resources/logging.properties b/java-spanner/google-cloud-spanner/src/test/resources/logging.properties new file mode 100644 index 000000000000..c817ab7acd9f --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/logging.properties @@ -0,0 +1,7 @@ +.level=INFO +.handlers=java.util.logging.ConsoleHandler +java.util.logging.ConsoleHandler.level=INFO +java.util.logging.Logger.useParentHandlers=true + +# Set log level to WARN for SpannerImpl to prevent log spamming of the Spanner configuration. +com.google.cloud.spanner.SpannerImpl.LEVEL=WARN diff --git a/java-spanner/google-cloud-spanner/src/test/resources/range_cache_test.textproto b/java-spanner/google-cloud-spanner/src/test/resources/range_cache_test.textproto new file mode 100644 index 000000000000..70a5e1151d55 --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/range_cache_test.textproto @@ -0,0 +1,1204 @@ +test_case { + name: "no_key" + step { + test { + result { + } + } + } +} + +test_case { + name: "empty_key" + step { + test { + key: "" + result { + key: "" + } + } + } +} + +test_case { + name: "empty_cache" + step { + update { + } + test { + key: "a" + result { + key: "a" + } + } + } +} + +test_case { + name: "basic_cache_hit" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + } + } + test { + key: "a" + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "z" + result { + key: "z" + } + } + } +} + +test_case { + name: "cache_math" + step { + # Start with [k..n)->split 3 + update { + range { + start_key: "k" + limit_key: "n" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + tablets { + tablet_uid: 4 + server_address: "server1" + } + } + } + test { + key: "k" + result { + key: "k" + limit_key: "n" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } + # Insert [m..o)->4. Now: + # [k..m)->3 + # [m..o)->4 + step { + update { + range { + start_key: "m" + limit_key: "o" + group_uid: 2 + split_id: 4 + generation: "23" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "m" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "m" + result { + key: "m" + limit_key: "o" + group_uid: 2 + split_id: 4 + tablet_uid: 4 + } + server: "server1" + } + } + # Insert [n..p)->5. Now: + # [k..m)->3 + # [m..o)->4 + # [o..p)->5 + step { + update { + range { + start_key: "n" + limit_key: "p" + group_uid: 2 + split_id: 5 + generation: "24" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "m" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "m" + result { + key: "m" + limit_key: "n" + group_uid: 2 + split_id: 4 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "n" + result { + key: "n" + limit_key: "p" + group_uid: 2 + split_id: 5 + tablet_uid: 4 + } + server: "server1" + } + } + # Exact range replacement: Insert [m..n)->6 + step { + update { + range { + start_key: "m" + limit_key: "n" + group_uid: 2 + split_id: 6 + generation: "25" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "m" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "m" + result { + key: "m" + limit_key: "n" + group_uid: 2 + split_id: 6 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "n" + result { + key: "n" + limit_key: "p" + group_uid: 2 + split_id: 5 + tablet_uid: 4 + } + server: "server1" + } + } + # Merge ranges, insert [k..o)->7. Now: + # [k..o)->7 + # [o..p)->5 + step { + update { + range { + start_key: "k" + limit_key: "o" + group_uid: 2 + split_id: 7 + generation: "26" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "o" + group_uid: 2 + split_id: 7 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "n" + result { + key: "k" + limit_key: "o" + group_uid: 2 + split_id: 7 + tablet_uid: 4 + } + server: "server1" + } + test { + key: "o" + result { + key: "o" + limit_key: "p" + group_uid: 2 + split_id: 5 + tablet_uid: 4 + } + server: "server1" + } + } + # Inserting an old range does nothing. + step { + update { + range { + start_key: "k" + limit_key: "o" + group_uid: 2 + split_id: 8 + generation: "25" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "o" + group_uid: 2 + split_id: 7 + tablet_uid: 4 + } + server: "server1" + } + } + # Old ranges that are bigger than the existing old ranges are also discarded. + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 8 + generation: "25" + } + } + test { + key: "k" + result { + key: "k" + limit_key: "o" + group_uid: 2 + split_id: 7 + tablet_uid: 4 + } + server: "server1" + } + } +} + +test_case { + name: "leader_selection" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + tablets { + tablet_uid: 6 + server_address: "server3" + location: "us-central1" + role: READ_WRITE + incarnation: "66" + distance: 10 + skip: false + } + leader_index: 1 + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 5 + } + server: "server2" + } + } + # Update the leader to be tablet 4. + step { + update { + group { + group_uid: 2 + leader_index: 0 + generation: "23" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + tablets { + tablet_uid: 6 + server_address: "server3" + location: "us-central1" + role: READ_WRITE + incarnation: "66" + distance: 10 + skip: false + } + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } + # Old generation updates are ignored. + step { + update { + group { + group_uid: 2 + leader_index: 1 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + tablets { + tablet_uid: 6 + server_address: "server3" + location: "us-central1" + role: READ_WRITE + incarnation: "66" + distance: 10 + skip: false + } + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } +} + +# We should not use a leader that is too far away, and instead use a close +# non-leader replica. +test_case { + name: "far_away_leader" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 5 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 6 + skip: false + } + leader_index: 1 + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } +} + +# No leader - make sure we handle leader_index: -1 +test_case { + name: "no_leader" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + leader_index: -1 + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } +} + +test_case { + name: "tablet_location_updates" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + incarnation: "33" + } + } + } + test { + key: "a" + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } + step { + update { + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server2" + incarnation: "34" + } + } + } + test { + key: "a" + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server2" + } + } + # Distance updates are allowed with same incarnation. + step { + update { + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server2" + incarnation: "34" + distance: 6 + } + } + } + test { + key: "a" + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + } + } + } + step { + update { + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server2" + incarnation: "34" + distance: 2 + } + } + } + test { + key: "a" + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server2" + } + } +} + +test_case { + name: "replica_reshuffling" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + } + # Reorder the replicas. The updates should apply correctly, and the new + # first replica should be used. + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central1" + role: READ_WRITE + incarnation: "55" + distance: 0 + skip: false + } + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 1 + skip: false + } + } + } + test { + key: "a" + leader: true + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 5 + } + server: "server2" + } + } +} + +# Directed read options: region picking +test_case { + name: "directed_read_options" + step { + update { + range { + start_key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + location: "us-central1" + role: READ_WRITE + incarnation: "44" + distance: 0 + skip: false + } + tablets { + tablet_uid: 5 + server_address: "server2" + location: "us-central2" + role: READ_WRITE + incarnation: "55" + distance: 1 + skip: false + } + tablets { + tablet_uid: 6 + server_address: "server3" + location: "us-central3" + role: READ_ONLY + incarnation: "66" + distance: 2 + skip: false + } + } + } + # Specific location + test { + key: "a" + directed_read_options { + include_replicas { + replica_selections { + location: "us-central2" + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 5 + } + server: "server2" + } + # Specific replica type + test { + key: "a" + directed_read_options { + include_replicas { + replica_selections { + type: READ_ONLY + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 6 + } + server: "server3" + } + # Specific location and replica type, match found. + test { + key: "a" + directed_read_options { + include_replicas { + replica_selections { + location: "us-central3" + type: READ_ONLY + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 6 + } + server: "server3" + } + # Specific location and replica type, no match found. + test { + key: "a" + directed_read_options { + include_replicas { + replica_selections { + location: "us-central2" + type: READ_ONLY + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + } + } + # Exclude a location + test { + key: "a" + directed_read_options { + exclude_replicas { + replica_selections { + location: "us-central1" + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 5 + } + server: "server2" + } + # Exclude a replica type + test { + key: "a" + directed_read_options { + exclude_replicas { + replica_selections { + type: READ_WRITE + } + } + } + result { + key: "a" + limit_key: "z" + group_uid: 2 + split_id: 3 + tablet_uid: 6 + } + server: "server3" + } + } +} + +test_case { + name: "range_calls" + step { + update { + range { + start_key: "b" + limit_key: "f" + group_uid: 2 + split_id: 3 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + incarnation: "44" + } + } + } + # Part of the range matching start. + test { + key: "b" + limit_key: "c" + result { + key: "b" + limit_key: "f" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + # Part of the range matching limit. + test { + key: "e" + limit_key: "f" + result { + key: "b" + limit_key: "f" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + # Exact range match. + test { + key: "b" + limit_key: "f" + result { + key: "b" + limit_key: "f" + group_uid: 2 + split_id: 3 + tablet_uid: 4 + } + server: "server1" + } + # Range does not overlap, start. + test { + key: "a" + limit_key: "f" + result { + key: "a" + limit_key: "f" + } + } + # Range does not overlap, limit. + test { + key: "b" + limit_key: "g" + result { + key: "b" + limit_key: "g" + } + } + # Range does not overlap, both sides. + test { + key: "a" + limit_key: "g" + result { + key: "a" + limit_key: "g" + } + } + } +} + +test_case { + name: "range_calls_random" + step { + update { + range { + start_key: "a" + limit_key: "c" + group_uid: 2 + split_id: 3 + generation: "22" + } + range { + start_key: "c" + limit_key: "e" + group_uid: 2 + split_id: 4 + generation: "22" + } + range { + start_key: "e" + limit_key: "g" + group_uid: 2 + split_id: 5 + generation: "22" + } + range { + start_key: "j" + limit_key: "l" + group_uid: 2 + split_id: 6 + generation: "22" + } + range { + start_key: "o" + limit_key: "q" + group_uid: 2 + split_id: 7 + generation: "22" + } + group { + group_uid: 2 + generation: "22" + tablets { + tablet_uid: 4 + server_address: "server1" + incarnation: "44" + } + } + } + # Requested range covers multiple splits with COVERING_SPLIT. + # Should pick nothing. + test { + key: "b" + limit_key: "f" + range_mode: COVERING_SPLIT + result { + key: "b" + limit_key: "f" + } + } + # With PICK_RANDOM, should get a random split that overlaps. + test { + key: "b" + limit_key: "f" + range_mode: PICK_RANDOM + result { + key: "c" + limit_key: "e" + group_uid: 2 + split_id: 4 + tablet_uid: 4 + } + server: "server1" + } + # There is a gap in the cache for the requested ranges, so we should pick + # nothing. Test gaps at the start, in the middle, at the end, and at + # the end of the entire cache. + test { + key: "g" # Matches the limit of prior split. + limit_key: "k" # Inside the next cached split after 'g'. + range_mode: PICK_RANDOM + result { + key: "g" + limit_key: "k" + } + } + test { + key: "h" # In a gap between cached splits. + limit_key: "k" # Inside the next cached split after 'h'. + range_mode: PICK_RANDOM + result { + key: "h" + limit_key: "k" + } + } + test { + key: "f" # Inside a cached split + limit_key: "k" # Inside another cached split, but a gap between f and k. + range_mode: PICK_RANDOM + result { + key: "f" + limit_key: "k" + } + } + test { + key: "k" # Inside a cached split. + limit_key: "m" # In a gap between the cached split and the next one. + range_mode: PICK_RANDOM + result { + key: "k" + limit_key: "m" + } + } + test { + key: "p" # In the last cached split in the cache. + limit_key: "z" # Should cause iteration to hit the end of the cache. + range_mode: PICK_RANDOM + result { + key: "p" + limit_key: "z" + } + } + # Gaps are okay if we have enough cached entries. + # Test the boundary condition first - we only have 5 entries in the cache. + test { + key: "a" + limit_key: "z" + range_mode: PICK_RANDOM + min_cache_entries_for_random_pick: 6 + result { + key: "a" + limit_key: "z" + } + } + test { + key: "a" + limit_key: "z" + range_mode: PICK_RANDOM + min_cache_entries_for_random_pick: 5 + result { + key: "e" + limit_key: "g" + group_uid: 2 + split_id: 5 + tablet_uid: 4 + } + server: "server1" + } + } +} + + diff --git a/java-spanner/google-cloud-spanner/src/test/resources/recipe_test.textproto b/java-spanner/google-cloud-spanner/src/test/resources/recipe_test.textproto new file mode 100644 index 000000000000..43fae04f5eca --- /dev/null +++ b/java-spanner/google-cloud-spanner/src/test/resources/recipe_test.textproto @@ -0,0 +1,3943 @@ +test_case { + name: "DataTypeTest_BOOL" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_BOOL" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: BOOL + } + identifier: "k" + } + } + } + test { + key { + values { + bool_value: false + } + } + start: "A\206\310\002\234\200\000" + } + test { + key { + values { + bool_value: true + } + } + start: "A\206\310\002\234\200\002" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "true" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + bool_value: false + } + } + end_open { + values { + bool_value: true + } + } + } + start: "A\206\310\002\234\200\000" + limit: "A\206\310\002\234\200\002" + } + test { + key_range { + start_open { + values { + bool_value: false + } + } + end_closed { + values { + bool_value: true + } + } + } + start: "A\206\310\002\234\200\001" + limit: "A\206\310\002\234\200\003" + } + test { + key_range { + start_closed { + values { + bool_value: false + } + } + end_closed { + values { + bool_value: true + } + } + } + start: "A\206\310\002\234\200\000" + limit: "A\206\310\002\234\200\003" + } + test { + key_range { + start_open { + values { + bool_value: false + } + } + end_open { + values { + bool_value: true + } + } + } + start: "A\206\310\002\234\200\001" + limit: "A\206\310\002\234\200\002" + } +} + +test_case { + name: "DataTypeTest_BOOL_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_BOOL_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: BOOL + } + identifier: "k" + } + } + } + test { + key { + values { + bool_value: true + } + } + start: "A\206\310\002\273\250\374" + } + test { + key { + values { + bool_value: false + } + } + start: "A\206\310\002\273\250\376" + } + test { + key_range { + start_closed { + values { + bool_value: true + } + } + end_open { + values { + bool_value: false + } + } + } + start: "A\206\310\002\273\250\374" + limit: "A\206\310\002\273\250\376" + } + test { + key_range { + start_open { + values { + bool_value: true + } + } + end_closed { + values { + bool_value: false + } + } + } + start: "A\206\310\002\273\250\375" + limit: "A\206\310\002\273\250\377" + } + test { + key_range { + start_closed { + values { + bool_value: true + } + } + end_closed { + values { + bool_value: false + } + } + } + start: "A\206\310\002\273\250\374" + limit: "A\206\310\002\273\250\377" + } + test { + key_range { + start_open { + values { + bool_value: true + } + } + end_open { + values { + bool_value: false + } + } + } + start: "A\206\310\002\273\250\375" + limit: "A\206\310\002\273\250\376" + } +} + +test_case { + name: "DataTypeTest_ENUM" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_ENUM" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: ENUM + proto_type_fqn: "spanner.test.TestEnum" + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "1" + } + } + start: "A\206\310\002\234\221\002" + } + test { + key { + values { + string_value: "2" + } + } + start: "A\206\310\002\234\221\004" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "NUMBER_ONE" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + string_value: "1" + } + } + end_open { + values { + string_value: "2" + } + } + } + start: "A\206\310\002\234\221\002" + limit: "A\206\310\002\234\221\004" + } + test { + key_range { + start_open { + values { + string_value: "1" + } + } + end_closed { + values { + string_value: "2" + } + } + } + start: "A\206\310\002\234\221\003" + limit: "A\206\310\002\234\221\005" + } + test { + key_range { + start_closed { + values { + string_value: "1" + } + } + end_closed { + values { + string_value: "2" + } + } + } + start: "A\206\310\002\234\221\002" + limit: "A\206\310\002\234\221\005" + } + test { + key_range { + start_open { + values { + string_value: "1" + } + } + end_open { + values { + string_value: "2" + } + } + } + start: "A\206\310\002\234\221\003" + limit: "A\206\310\002\234\221\004" + } +} + +test_case { + name: "DataTypeTest_ENUM_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_ENUM_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: ENUM + proto_type_fqn: "spanner.test.TestEnum" + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "2" + } + } + start: "A\206\310\002\273\260\372" + } + test { + key { + values { + string_value: "1" + } + } + start: "A\206\310\002\273\260\374" + } + test { + key_range { + start_closed { + values { + string_value: "2" + } + } + end_open { + values { + string_value: "1" + } + } + } + start: "A\206\310\002\273\260\372" + limit: "A\206\310\002\273\260\374" + } + test { + key_range { + start_open { + values { + string_value: "2" + } + } + end_closed { + values { + string_value: "1" + } + } + } + start: "A\206\310\002\273\260\373" + limit: "A\206\310\002\273\260\375" + } + test { + key_range { + start_closed { + values { + string_value: "2" + } + } + end_closed { + values { + string_value: "1" + } + } + } + start: "A\206\310\002\273\260\372" + limit: "A\206\310\002\273\260\375" + } + test { + key_range { + start_open { + values { + string_value: "2" + } + } + end_open { + values { + string_value: "1" + } + } + } + start: "A\206\310\002\273\260\373" + limit: "A\206\310\002\273\260\374" + } +} + +test_case { + name: "DataTypeTest_INT64" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_INT64" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "-9223372036854775808" + } + } + start: "A\206\310\002\234\211\000\000\000\000\000\000\000\000" + } + test { + key { + values { + string_value: "9223372036854775807" + } + } + start: "A\206\310\002\234\230\377\377\377\377\377\377\377\376" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "0" + } + } + start: "A\206\310\002\234\221\000" + } + test { + key { + values { + string_value: "-1" + } + } + start: "A\206\310\002\234\220\376" + } + test { + key { + values { + string_value: "1" + } + } + start: "A\206\310\002\234\221\002" + } + test { + key { + values { + number_value: 1 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "Infinity" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + string_value: "-9223372036854775808" + } + } + end_open { + values { + string_value: "9223372036854775807" + } + } + } + start: "A\206\310\002\234\211\000\000\000\000\000\000\000\000" + limit: "A\206\310\002\234\230\377\377\377\377\377\377\377\376" + } + test { + key_range { + start_open { + values { + string_value: "-9223372036854775808" + } + } + end_closed { + values { + string_value: "9223372036854775807" + } + } + } + start: "A\206\310\002\234\211\000\000\000\000\000\000\000\001" + limit: "A\206\310\002\234\230\377\377\377\377\377\377\377\377" + } + test { + key_range { + start_closed { + values { + string_value: "-9223372036854775808" + } + } + end_closed { + values { + string_value: "9223372036854775807" + } + } + } + start: "A\206\310\002\234\211\000\000\000\000\000\000\000\000" + limit: "A\206\310\002\234\230\377\377\377\377\377\377\377\377" + } + test { + key_range { + start_open { + values { + string_value: "-9223372036854775808" + } + } + end_open { + values { + string_value: "9223372036854775807" + } + } + } + start: "A\206\310\002\234\211\000\000\000\000\000\000\000\001" + limit: "A\206\310\002\234\230\377\377\377\377\377\377\377\376" + } +} + +test_case { + name: "DataTypeTest_INT64_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_INT64_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: INT64 + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "9223372036854775807" + } + } + start: "A\206\310\002\273\251\000\000\000\000\000\000\000\000" + } + test { + key { + values { + string_value: "-9223372036854775808" + } + } + start: "A\206\310\002\273\270\377\377\377\377\377\377\377\376" + } + test { + key_range { + start_closed { + values { + string_value: "9223372036854775807" + } + } + end_open { + values { + string_value: "-9223372036854775808" + } + } + } + start: "A\206\310\002\273\251\000\000\000\000\000\000\000\000" + limit: "A\206\310\002\273\270\377\377\377\377\377\377\377\376" + } + test { + key_range { + start_open { + values { + string_value: "9223372036854775807" + } + } + end_closed { + values { + string_value: "-9223372036854775808" + } + } + } + start: "A\206\310\002\273\251\000\000\000\000\000\000\000\001" + limit: "A\206\310\002\273\270\377\377\377\377\377\377\377\377" + } + test { + key_range { + start_closed { + values { + string_value: "9223372036854775807" + } + } + end_closed { + values { + string_value: "-9223372036854775808" + } + } + } + start: "A\206\310\002\273\251\000\000\000\000\000\000\000\000" + limit: "A\206\310\002\273\270\377\377\377\377\377\377\377\377" + } + test { + key_range { + start_open { + values { + string_value: "9223372036854775807" + } + } + end_open { + values { + string_value: "-9223372036854775808" + } + } + } + start: "A\206\310\002\273\251\000\000\000\000\000\000\000\001" + limit: "A\206\310\002\273\270\377\377\377\377\377\377\377\376" + } +} + +test_case { + name: "DataTypeTest_FLOAT64" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_FLOAT64" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: FLOAT64 + } + identifier: "k" + } + } + } + test { + key { + values { + number_value: -1.7976931348623157e+308 + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\002" + } + test { + key { + values { + number_value: 1.7976931348623157e+308 + } + } + start: "A\206\310\002\234\321\377\337\377\377\377\377\377\376" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002\234\312\000" + } + test { + key { + values { + number_value: -1 + } + } + start: "A\206\310\002\234\302\200 \000\000\000\000\000\000" + } + test { + key { + values { + number_value: 1 + } + } + start: "A\206\310\002\234\321\177\340\000\000\000\000\000\000" + } + test { + key { + values { + string_value: "Infinity" + } + } + start: "A\206\310\002\234\321\377\340\000\000\000\000\000\000" + } + test { + key { + values { + string_value: "-Infinity" + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\000" + } + test { + key { + values { + string_value: "NaN" + } + } + start: "A\206\310\002\234\321\377\360\000\000\000\000\000\000" + } + test { + key { + values { + string_value: "UnexpectedString" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + bool_value: true + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + number_value: -1.7976931348623157e+308 + } + } + end_open { + values { + number_value: 1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\002" + limit: "A\206\310\002\234\321\377\337\377\377\377\377\377\376" + } + test { + key_range { + start_open { + values { + number_value: -1.7976931348623157e+308 + } + } + end_closed { + values { + number_value: 1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\003" + limit: "A\206\310\002\234\321\377\337\377\377\377\377\377\377" + } + test { + key_range { + start_closed { + values { + number_value: -1.7976931348623157e+308 + } + } + end_closed { + values { + number_value: 1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\002" + limit: "A\206\310\002\234\321\377\337\377\377\377\377\377\377" + } + test { + key_range { + start_open { + values { + number_value: -1.7976931348623157e+308 + } + } + end_open { + values { + number_value: 1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\234\302\000 \000\000\000\000\000\003" + limit: "A\206\310\002\234\321\377\337\377\377\377\377\377\376" + } +} + +test_case { + name: "DataTypeTest_FLOAT64_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_FLOAT64_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: FLOAT64 + } + identifier: "k" + } + } + } + test { + key { + values { + number_value: 1.7976931348623157e+308 + } + } + start: "A\206\310\002\273\322\000 \000\000\000\000\000\000" + } + test { + key { + values { + number_value: -1.7976931348623157e+308 + } + } + start: "A\206\310\002\273\341\377\337\377\377\377\377\377\374" + } + test { + key_range { + start_closed { + values { + number_value: 1.7976931348623157e+308 + } + } + end_open { + values { + number_value: -1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\273\322\000 \000\000\000\000\000\000" + limit: "A\206\310\002\273\341\377\337\377\377\377\377\377\374" + } + test { + key_range { + start_open { + values { + number_value: 1.7976931348623157e+308 + } + } + end_closed { + values { + number_value: -1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\273\322\000 \000\000\000\000\000\001" + limit: "A\206\310\002\273\341\377\337\377\377\377\377\377\375" + } + test { + key_range { + start_closed { + values { + number_value: 1.7976931348623157e+308 + } + } + end_closed { + values { + number_value: -1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\273\322\000 \000\000\000\000\000\000" + limit: "A\206\310\002\273\341\377\337\377\377\377\377\377\375" + } + test { + key_range { + start_open { + values { + number_value: 1.7976931348623157e+308 + } + } + end_open { + values { + number_value: -1.7976931348623157e+308 + } + } + } + start: "A\206\310\002\273\322\000 \000\000\000\000\000\001" + limit: "A\206\310\002\273\341\377\337\377\377\377\377\377\374" + } +} + +test_case { + name: "DataTypeTest_TIMESTAMP" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_TIMESTAMP" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: TIMESTAMP + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210n\t\000\360\000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360:\377\020\364A\177;\232\311\377\020\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "1970-01-01T00:00:00Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "2023-10-26T10:00:00Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360\000\360e:8\240\000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "2023-10-26T10:00:00.1234567890Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360\000\360e:8\240\007[\315\025\000x" + } + test { + key { + values { + string_value: "2023-10-26T10:00:00.1234567891Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360\000\360e:8\240\007[\315\025\000x" + } + test { + key { + values { + string_value: "2023-10-26T10:00:00.1234567899Z" + } + } + start: "A\206\310\002\234\231\200\000\360\000\360\000\360e:8\240\007[\315\025\000x" + } + test { + key { + values { + string_value: "0000-10-26T10:00:00Z" + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210\026A \000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "NOT A TIMESTAMP" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-10-26T10:00:00" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-10-26T10:00:00z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-10-26T10:00:00+07:00" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-13-26T10:00:00Z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-10-26T10:00:61Z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-10-26 10:00:00Z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "10000-10-26T10:00:00Z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + end_open { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210n\t\000\360\000\360\000\360\000\360\000\360\000x" + limit: "A\206\310\002\234\231\200\000\360\000\360:\377\020\364A\177;\232\311\377\020\000x" + } + test { + key_range { + start_open { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + end_closed { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210n\t\000\360\000\360\000\360\000\360\000\360\000y" + limit: "A\206\310\002\234\231\200\000\360\000\360:\377\020\364A\177;\232\311\377\020\000y" + } + test { + key_range { + start_closed { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + end_closed { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210n\t\000\360\000\360\000\360\000\360\000\360\000x" + limit: "A\206\310\002\234\231\200\000\360\000\360:\377\020\364A\177;\232\311\377\020\000y" + } + test { + key_range { + start_open { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + end_open { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + } + start: "A\206\310\002\234\231\177\377\020\377\020\361\210n\t\000\360\000\360\000\360\000\360\000\360\000y" + limit: "A\206\310\002\234\231\200\000\360\000\360:\377\020\364A\177;\232\311\377\020\000x" + } +} + +test_case { + name: "DataTypeTest_TIMESTAMP_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_TIMESTAMP_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: TIMESTAMP + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + start: "A\206\310\002\273\271\177\377\020\377\020\305\000\360\013\276\200\304e6\000\360\377x" + } + test { + key { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + start: "A\206\310\002\273\271\200\000\360\000\360\016w\221\366\377\020\377\020\377\020\377\020\377\020\377x" + } + test { + key_range { + start_closed { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + end_open { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + } + start: "A\206\310\002\273\271\177\377\020\377\020\305\000\360\013\276\200\304e6\000\360\377x" + limit: "A\206\310\002\273\271\200\000\360\000\360\016w\221\366\377\020\377\020\377\020\377\020\377\020\377x" + } + test { + key_range { + start_open { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + end_closed { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + } + start: "A\206\310\002\273\271\177\377\020\377\020\305\000\360\013\276\200\304e6\000\360\377y" + limit: "A\206\310\002\273\271\200\000\360\000\360\016w\221\366\377\020\377\020\377\020\377\020\377\020\377y" + } + test { + key_range { + start_closed { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + end_closed { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + } + start: "A\206\310\002\273\271\177\377\020\377\020\305\000\360\013\276\200\304e6\000\360\377x" + limit: "A\206\310\002\273\271\200\000\360\000\360\016w\221\366\377\020\377\020\377\020\377\020\377\020\377y" + } + test { + key_range { + start_open { + values { + string_value: "9999-12-31T23:59:59.999999999Z" + } + } + end_open { + values { + string_value: "0001-01-01T00:00:00Z" + } + } + } + start: "A\206\310\002\273\271\177\377\020\377\020\305\000\360\013\276\200\304e6\000\360\377y" + limit: "A\206\310\002\273\271\200\000\360\000\360\016w\221\366\377\020\377\020\377\020\377\020\377\020\377x" + } +} + +test_case { + name: "DataTypeTest_DATE" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_DATE" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: DATE + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "0000-01-01" + } + } + start: "A\206\310\002\234\216\352\n\260" + } + test { + key { + values { + string_value: "9999-12-31" + } + } + start: "A\206\310\002\234\223Y\201@" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "1970-01-01" + } + } + start: "A\206\310\002\234\221\000" + } + test { + key { + values { + string_value: "2023-10-26" + } + } + start: "A\206\310\002\234\222\231\220" + } + test { + key { + values { + string_value: "NOT A DATE" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-13-01" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-12-32" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "10000-01-01" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-1-1" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-01-001" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023/01/01" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "2023-01-01T10:00:00Z" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + string_value: "0000-01-01" + } + } + end_open { + values { + string_value: "9999-12-31" + } + } + } + start: "A\206\310\002\234\216\352\n\260" + limit: "A\206\310\002\234\223Y\201@" + } + test { + key_range { + start_open { + values { + string_value: "0000-01-01" + } + } + end_closed { + values { + string_value: "9999-12-31" + } + } + } + start: "A\206\310\002\234\216\352\n\261" + limit: "A\206\310\002\234\223Y\201A" + } + test { + key_range { + start_closed { + values { + string_value: "0000-01-01" + } + } + end_closed { + values { + string_value: "9999-12-31" + } + } + } + start: "A\206\310\002\234\216\352\n\260" + limit: "A\206\310\002\234\223Y\201A" + } + test { + key_range { + start_open { + values { + string_value: "0000-01-01" + } + } + end_open { + values { + string_value: "9999-12-31" + } + } + } + start: "A\206\310\002\234\216\352\n\261" + limit: "A\206\310\002\234\223Y\201@" + } +} + +test_case { + name: "DataTypeTest_DATE_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_DATE_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: DATE + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "9999-12-31" + } + } + start: "A\206\310\002\273\256\246~\276" + } + test { + key { + values { + string_value: "0000-01-01" + } + } + start: "A\206\310\002\273\263\025\365N" + } + test { + key_range { + start_closed { + values { + string_value: "9999-12-31" + } + } + end_open { + values { + string_value: "0000-01-01" + } + } + } + start: "A\206\310\002\273\256\246~\276" + limit: "A\206\310\002\273\263\025\365N" + } + test { + key_range { + start_open { + values { + string_value: "9999-12-31" + } + } + end_closed { + values { + string_value: "0000-01-01" + } + } + } + start: "A\206\310\002\273\256\246~\277" + limit: "A\206\310\002\273\263\025\365O" + } + test { + key_range { + start_closed { + values { + string_value: "9999-12-31" + } + } + end_closed { + values { + string_value: "0000-01-01" + } + } + } + start: "A\206\310\002\273\256\246~\276" + limit: "A\206\310\002\273\263\025\365O" + } + test { + key_range { + start_open { + values { + string_value: "9999-12-31" + } + } + end_open { + values { + string_value: "0000-01-01" + } + } + } + start: "A\206\310\002\273\256\246~\277" + limit: "A\206\310\002\273\263\025\365N" + } +} + +test_case { + name: "DataTypeTest_STRING" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_STRING" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\234\231\000x" + } + test { + key { + values { + string_value: "ZZZZZZZ" + } + } + start: "A\206\310\002\234\231ZZZZZZZ\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key_range { + start_closed { + values { + string_value: "" + } + } + end_open { + values { + string_value: "ZZZZZZZ" + } + } + } + start: "A\206\310\002\234\231\000x" + limit: "A\206\310\002\234\231ZZZZZZZ\000x" + } + test { + key_range { + start_open { + values { + string_value: "" + } + } + end_closed { + values { + string_value: "ZZZZZZZ" + } + } + } + start: "A\206\310\002\234\231\000y" + limit: "A\206\310\002\234\231ZZZZZZZ\000y" + } + test { + key_range { + start_closed { + values { + string_value: "" + } + } + end_closed { + values { + string_value: "ZZZZZZZ" + } + } + } + start: "A\206\310\002\234\231\000x" + limit: "A\206\310\002\234\231ZZZZZZZ\000y" + } + test { + key_range { + start_open { + values { + string_value: "" + } + } + end_open { + values { + string_value: "ZZZZZZZ" + } + } + } + start: "A\206\310\002\234\231\000y" + limit: "A\206\310\002\234\231ZZZZZZZ\000x" + } +} + +test_case { + name: "DataTypeTest_STRING_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_STRING_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "ZZZZZZZ" + } + } + start: "A\206\310\002\273\271\245\245\245\245\245\245\245\377x" + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\273\271\377x" + } + test { + key_range { + start_closed { + values { + string_value: "ZZZZZZZ" + } + } + end_open { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\245\245\245\245\245\245\245\377x" + limit: "A\206\310\002\273\271\377x" + } + test { + key_range { + start_open { + values { + string_value: "ZZZZZZZ" + } + } + end_closed { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\245\245\245\245\245\245\245\377y" + limit: "A\206\310\002\273\271\377y" + } + test { + key_range { + start_closed { + values { + string_value: "ZZZZZZZ" + } + } + end_closed { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\245\245\245\245\245\245\245\377x" + limit: "A\206\310\002\273\271\377y" + } + test { + key_range { + start_open { + values { + string_value: "ZZZZZZZ" + } + } + end_open { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\245\245\245\245\245\245\245\377y" + limit: "A\206\310\002\273\271\377x" + } +} + +test_case { + name: "DataTypeTest_BYTES" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_BYTES" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: BYTES + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\234\231\000x" + } + test { + key { + values { + string_value: "/////w==" + } + } + start: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\234\231\000x" + } + test { + key_range { + start_closed { + values { + string_value: "" + } + } + end_open { + values { + string_value: "/////w==" + } + } + } + start: "A\206\310\002\234\231\000x" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\000x" + } + test { + key_range { + start_open { + values { + string_value: "" + } + } + end_closed { + values { + string_value: "/////w==" + } + } + } + start: "A\206\310\002\234\231\000y" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\000y" + } + test { + key_range { + start_closed { + values { + string_value: "" + } + } + end_closed { + values { + string_value: "/////w==" + } + } + } + start: "A\206\310\002\234\231\000x" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\000y" + } + test { + key_range { + start_open { + values { + string_value: "" + } + } + end_open { + values { + string_value: "/////w==" + } + } + } + start: "A\206\310\002\234\231\000y" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\000x" + } +} + +test_case { + name: "DataTypeTest_BYTES_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_BYTES_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: BYTES + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "/////w==" + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\377x" + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\273\271\377x" + } + test { + key_range { + start_closed { + values { + string_value: "/////w==" + } + } + end_open { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\377x" + limit: "A\206\310\002\273\271\377x" + } + test { + key_range { + start_open { + values { + string_value: "/////w==" + } + } + end_closed { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\377y" + limit: "A\206\310\002\273\271\377y" + } + test { + key_range { + start_closed { + values { + string_value: "/////w==" + } + } + end_closed { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\377x" + limit: "A\206\310\002\273\271\377y" + } + test { + key_range { + start_open { + values { + string_value: "/////w==" + } + } + end_open { + values { + string_value: "" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\377y" + limit: "A\206\310\002\273\271\377x" + } +} + +test_case { + name: "NumericBasic" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "NumericBasic" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: NUMERIC + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "123" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 123 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } +} + +test_case { + name: "NumericMultiPart" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "NumericMultiPart" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "user_id" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: NUMERIC + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "123" + } + values { + string_value: "456" + } + } + start: "A\206\310\002\234\221\366" + limit: "A\206\310\002\234\221\367" + approximate: true + } +} + +test_case { + name: "DataTypeTest_UUID" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_UUID" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: UUID + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + start: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000" + } + test { + key { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000x" + } + test { + key { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + start: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000x" + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002\234\231\0224Vx\0224\0224\0224\0224Vx\220\253\000x" + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890AB" + } + } + start: "A\206\310\002\234\231\0224Vx\0224\0224\0224\0224Vx\220\253\000x" + } + test { + key { + values { + string_value: "{12345678-1234-1234-1234-1234567890ad}" + } + } + start: "A\206\310\002\234\231\0224Vx\0224\0224\0224\0224Vx\220\255\000x" + } + test { + key { + values { + string_value: "{FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF}" + } + } + start: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000x" + } + test { + key { + values { + string_value: "NOT A UUID" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + number_value: 0 + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678x1234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890a" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890abc" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890ag" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "123456781234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-12341234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-12341234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-12341234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "-12345678-1234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890ab-" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678--1234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "{12345678-1234-1234-1234-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-1234-1234-1234-1234567890ab}" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "{{12345678-1234-1234-1234-1234567890ab}}" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key { + values { + string_value: "12345678-{1234-1234-1234}-1234567890ab" + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } + test { + key_range { + start_closed { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + end_open { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000x" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000x" + } + test { + key_range { + start_open { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + end_closed { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000y" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000y" + } + test { + key_range { + start_closed { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + end_closed { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000x" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000y" + } + test { + key_range { + start_open { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + end_open { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + } + start: "A\206\310\002\234\231\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000y" + limit: "A\206\310\002\234\231\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\000x" + } +} + +test_case { + name: "DataTypeTest_UUID_Desc" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "DataTypeTest_UUID_Desc" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: DESCENDING + null_order: NULLS_LAST + type { + code: UUID + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\377x" + } + test { + key { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + start: "A\206\310\002\273\271\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377x" + } + test { + key_range { + start_closed { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + end_open { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\377x" + limit: "A\206\310\002\273\271\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377x" + } + test { + key_range { + start_open { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + end_closed { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\377y" + limit: "A\206\310\002\273\271\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377y" + } + test { + key_range { + start_closed { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + end_closed { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\377x" + limit: "A\206\310\002\273\271\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377y" + } + test { + key_range { + start_open { + values { + string_value: "ffffffff-ffff-ffff-ffff-ffffffffffff" + } + } + end_open { + values { + string_value: "00000000-0000-0000-0000-000000000000" + } + } + } + start: "A\206\310\002\273\271\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\000\360\377y" + limit: "A\206\310\002\273\271\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377\020\377x" + } +} + +test_case { + name: "NotNull" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "NotNull" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NOT_NULL + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\231\000x" + } + test { + key { + values { + string_value: "foo" + } + } + start: "A\206\310\002\231foo\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } +} + +test_case { + name: "NullsLast" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "NullsLast" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_LAST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "" + } + } + start: "A\206\310\002\273\231\000x" + } + test { + key { + values { + string_value: "foo" + } + } + start: "A\206\310\002\273\231foo\000x" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\274\000" + } +} + +test_case { + name: "MultiPart" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "MultiPart" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k1" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k2" + } + } + } + test { + key { + values { + string_value: "foo" + } + values { + string_value: "8" + } + } + start: "A\206\310\002\234\231foo\000x\234\221\020" + } + test { + key { + values { + string_value: "foo" + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\234\231foo\000x\233\000" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + string_value: "8" + } + } + start: "A\206\310\002\233\000\234\221\020" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000\233\000" + } + test { + key_range { + start_closed { + values { + string_value: "A" + } + } + end_closed { + values { + string_value: "Z" + } + } + } + start: "A\206\310\002\234\231A\000x" + limit: "A\206\310\002\234\231Z\000y" + } + test { + key_range { + start_closed { + values { + string_value: "A" + } + values { + string_value: "4" + } + } + end_closed { + values { + string_value: "A" + } + values { + string_value: "7" + } + } + } + start: "A\206\310\002\234\231A\000x\234\221\010" + limit: "A\206\310\002\234\231A\000x\234\221\017" + } +} + +test_case { + name: "Interleaved" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "C" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + part { + tag: 2 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k2" + } + } + } + test { + key { + values { + string_value: "foo" + } + values { + string_value: "99" + } + } + start: "A\206\310\002\234\231foo\000x\004\234\221\306" + } + test { + key { + values { + string_value: "foo" + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\234\231foo\000x\004\233\000" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + string_value: "99" + } + } + start: "A\206\310\002\233\000\004\234\221\306" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000\004\233\000" + } + test { + key_range { + start_closed { + values { + string_value: "A" + } + } + end_closed { + values { + string_value: "Z" + } + } + } + start: "A\206\310\002\234\231A\000x\004" + limit: "A\206\310\002\234\231Z\000x\005" + } + test { + key_range { + start_closed { + values { + string_value: "A" + } + values { + string_value: "4" + } + } + end_closed { + values { + string_value: "A" + } + values { + string_value: "7" + } + } + } + start: "A\206\310\002\234\231A\000x\004\234\221\010" + limit: "A\206\310\002\234\231A\000x\004\234\221\017" + } +} + +test_case { + name: "GeneratedKeyColumns" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k3" + } + } + } + test { + key { + values { + string_value: "foo" + } + values { + string_value: "99" + } + } + start: "A\206\310\002\234\231foo\000x\234\221\306" + } + test { + key { + values { + string_value: "foo" + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\234\231foo\000x\233\000" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + string_value: "99" + } + } + start: "A\206\310\002\233\000\234\221\306" + } + test { + key { + values { + null_value: NULL_VALUE + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\233\000\233\000" + } + test { + key_range { + start_closed { + values { + string_value: "A" + } + values { + string_value: "4" + } + } + end_closed { + values { + string_value: "A" + } + values { + string_value: "7" + } + } + } + start: "A\206\310\002\234\231A\000x\234\221\010" + limit: "A\206\310\002\234\231A\000x\234\221\017" + } +} + +test_case { + name: "GlobalIndex" + recipes { + schema_generation: "\001\001" + recipe { + index_name: "I" + part { + tag: 1 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k2" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "8" + } + } + start: "\002\002\234\221\020" + limit: "\002\002\234\221\021" + } + test { + key { + values { + null_value: NULL_VALUE + } + } + start: "\002\002\233\000" + limit: "\002\002\233\001" + } +} + +test_case { + name: "LocalIndex" + recipes { + schema_generation: "\001\001" + recipe { + index_name: "I" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + part { + tag: 3 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k3" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k2" + } + } + } + test { + key { + values { + string_value: "foo" + } + values { + string_value: "8" + } + } + start: "A\206\310\002\234\231foo\000x\006\234\221\020" + limit: "A\206\310\002\234\231foo\000x\006\234\221\021" + } + test { + key { + values { + string_value: "foo" + } + values { + null_value: NULL_VALUE + } + } + start: "A\206\310\002\234\231foo\000x\006\233\000" + limit: "A\206\310\002\234\231foo\000x\006\233\001" + } +} + +test_case { + name: "KeySet" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "KeySet" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k" + } + } + } + test { + key_set { + keys { + values { + string_value: "99" + } + } + } + start: "A\206\310\002\234\221\306" + } + test { + key_set { + ranges { + start_closed { + values { + string_value: "1" + } + } + end_open { + values { + string_value: "10" + } + } + } + } + start: "A\206\310\002\234\221\002" + limit: "A\206\310\002\234\221\024" + } + test { + key_set { + keys { + values { + string_value: "99" + } + } + keys { + values { + string_value: "101" + } + } + } + start: "A\206\310\002\234\221\306" + limit: "A\206\310\002\234\221\313" + } + test { + key_set { + ranges { + start_closed { + values { + string_value: "1" + } + } + end_open { + values { + string_value: "10" + } + } + } + ranges { + start_closed { + values { + string_value: "20" + } + } + end_open { + values { + string_value: "30" + } + } + } + } + start: "A\206\310\002\234\221\002" + limit: "A\206\310\002\234\221<" + } + test { + key_set { + keys { + values { + string_value: "1" + } + } + ranges { + start_closed { + values { + string_value: "5" + } + } + end_open { + values { + string_value: "10" + } + } + } + } + start: "A\206\310\002\234\221\002" + limit: "A\206\310\002\234\221\024" + } + test { + key_set { + keys { + values { + string_value: "10" + } + } + ranges { + start_closed { + values { + string_value: "5" + } + } + end_open { + values { + string_value: "10" + } + } + } + } + start: "A\206\310\002\234\221\n" + limit: "A\206\310\002\234\221\025" + } +} + +test_case { + name: "KeySet_All" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key_set { + all: true + } + start: "A\206\310" + limit: "A\206\311" + } +} + +test_case { + name: "InvalidRecipe_EmptyPart" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "BadRecipe" + part { + tag: 50020 + } + part { + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "A" + } + } + start: "A\206\310" + limit: "A\206\311" + approximate: true + } +} + +test_case { + name: "InvalidRecipe_BadOrder" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "BadRecipe" + part { + tag: 50020 + } + part { + order: 99 + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k1" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "A" + } + } + start: "A\206\310" + limit: "A\206\311" + approximate: true + } +} + +test_case { + name: "InvalidRecipe_BadNullOrder" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "BadRecipe" + part { + tag: 50020 + } + part { + order: ASCENDING + null_order: 99 + type { + code: STRING + } + identifier: "k1" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "A" + } + } + start: "A\206\310" + limit: "A\206\311" + approximate: true + } +} + +test_case { + name: "InvalidRecipe_BadType" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "BadRecipe" + part { + tag: 50020 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: TOKENLIST + } + identifier: "k1" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + key { + values { + string_value: "A" + } + } + start: "A\206\310" + limit: "A\206\311" + approximate: true + } +} + +test_case { + name: "SimpleMutations" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "SimpleMutations" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k" + } + } + } + test { + mutation { + insert { + table: "SimpleMutations" + columns: "k" + values { + values { + string_value: "80" + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + update { + table: "SimpleMutations" + columns: "k" + values { + values { + string_value: "80" + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + insert_or_update { + table: "SimpleMutations" + columns: "k" + values { + values { + string_value: "80" + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + replace { + table: "SimpleMutations" + columns: "k" + values { + values { + string_value: "80" + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + delete { + table: "SimpleMutations" + key_set { + keys { + values { + string_value: "80" + } + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + delete { + table: "SimpleMutations" + key_set { + ranges { + start_closed { + values { + string_value: "80" + } + } + end_open { + values { + string_value: "100" + } + } + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\310" + } +} + +test_case { + name: "QueueMutations" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "Q" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: INT64 + } + identifier: "k" + } + } + } + test { + mutation { + send { + queue: "Q" + key { + values { + string_value: "80" + } + } + payload { + string_value: "" + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } + test { + mutation { + ack { + queue: "Q" + key { + values { + string_value: "80" + } + } + } + } + start: "A\206\310\002\234\221\240" + limit: "A\206\310\002\234\221\241" + } +} + +test_case { + name: "CustomMutationCases" + recipes { + schema_generation: "\001\001" + recipe { + table_name: "T" + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "k" + } + } + } + test { + mutation { + } + start: "" + limit: "\377" + approximate: true + } + test { + mutation { + delete { + key_set { + all: true + } + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + } + test { + mutation { + delete { + key_set { + keys { + values { + string_value: "123" + } + } + keys { + values { + string_value: "456" + } + } + } + } + } + start: "A\206\310\002\234\231123\000x" + limit: "A\206\310\002\234\231456\000y" + } + test { + mutation { + delete { + key_set { + ranges { + start_closed { + values { + string_value: "123" + } + } + end_open { + values { + string_value: "456" + } + } + } + ranges { + start_closed { + values { + string_value: "100" + } + } + end_open { + values { + string_value: "200" + } + } + } + ranges { + start_closed { + values { + string_value: "150" + } + } + end_open { + values { + string_value: "500" + } + } + } + } + } + } + start: "A\206\310\002\234\231100\000x" + limit: "A\206\310\002\234\231500\000x" + } + test { + mutation { + delete { + key_set { + ranges { + start_closed { + values { + string_value: "123" + } + } + end_open { + values { + string_value: "456" + } + } + } + all: true + } + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + } + test { + mutation { + delete { + key_set { + keys { + values { + string_value: "123" + } + } + keys { + values { + number_value: 456 + } + } + } + } + } + start: "A\206\310\002" + limit: "A\206\310\003" + approximate: true + } +} + +test_case { + name: "QueryEncoding" + recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 6 + part { + tag: 50020 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "p1" + } + part { + order: ASCENDING + null_order: NULLS_FIRST + type { + code: STRING + } + identifier: "p0" + } + } + } + test { + query_params { + fields { + key: "p0" + value { + string_value: "foo" + } + } + fields { + key: "p1" + value { + string_value: "bar" + } + } + } + start: "A\206\310\002\234\231bar\000x\234\231foo\000x" + } + test { + query_params { + fields { + key: "p1" + value { + string_value: "bar" + } + } + } + start: "A\206\310\002\234\231bar\000x" + limit: "A\206\310\002\234\231bar\000y" + approximate: true + } +} + +test_case { + name: "RandomQueryroot" + recipes { + schema_generation: "\001\001" + recipe { + operation_uid: 7 + part { + tag: 50016 + } + part { + tag: 1 + } + part { + order: ASCENDING + null_order: NOT_NULL + type { + code: INT64 + } + random: true + } + } + } + test { + query_params { + } + start: "A\206\300\002\230\327\342\351\276\316\214%$" + } +} \ No newline at end of file diff --git a/java-spanner/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..80e6f1d59cbd --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml @@ -0,0 +1,9 @@ + + + + + 7012 + com/google/spanner/admin/database/v1/* + * + + diff --git a/java-spanner/grpc-google-cloud-spanner-admin-database-v1/pom.xml b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/pom.xml new file mode 100644 index 000000000000..934c443ed7fc --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/pom.xml @@ -0,0 +1,89 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + grpc-google-cloud-spanner-admin-database-v1 + GRPC library for grpc-google-cloud-spanner-admin-database-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + + + com.google.guava + guava + + + com.google.auto.value + auto-value-annotations + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api.grpc + proto-google-common-protos + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.auto.value:auto-value-annotations,javax.annotation:javax.annotation-api + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java new file mode 100644 index 000000000000..162a0cb9c529 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java @@ -0,0 +1,4546 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.spanner.admin.database.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
    + * Cloud Spanner Database Admin API
    + * The Cloud Spanner Database Admin API can be used to:
    + *   * create, drop, and list databases
    + *   * update the schema of pre-existing databases
    + *   * create, delete, copy and list backups for a database
    + *   * restore a database from an existing backup
    + * 
    + */ +@io.grpc.stub.annotations.GrpcGenerated +public final class DatabaseAdminGrpc { + + private DatabaseAdminGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.spanner.admin.database.v1.DatabaseAdmin"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabasesRequest, + com.google.spanner.admin.database.v1.ListDatabasesResponse> + getListDatabasesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListDatabases", + requestType = com.google.spanner.admin.database.v1.ListDatabasesRequest.class, + responseType = com.google.spanner.admin.database.v1.ListDatabasesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabasesRequest, + com.google.spanner.admin.database.v1.ListDatabasesResponse> + getListDatabasesMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabasesRequest, + com.google.spanner.admin.database.v1.ListDatabasesResponse> + getListDatabasesMethod; + if ((getListDatabasesMethod = DatabaseAdminGrpc.getListDatabasesMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListDatabasesMethod = DatabaseAdminGrpc.getListDatabasesMethod) == null) { + DatabaseAdminGrpc.getListDatabasesMethod = + getListDatabasesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListDatabases")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabasesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabasesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListDatabases")) + .build(); + } + } + } + return getListDatabasesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateDatabaseRequest, + com.google.longrunning.Operation> + getCreateDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateDatabase", + requestType = com.google.spanner.admin.database.v1.CreateDatabaseRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateDatabaseRequest, + com.google.longrunning.Operation> + getCreateDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateDatabaseRequest, + com.google.longrunning.Operation> + getCreateDatabaseMethod; + if ((getCreateDatabaseMethod = DatabaseAdminGrpc.getCreateDatabaseMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getCreateDatabaseMethod = DatabaseAdminGrpc.getCreateDatabaseMethod) == null) { + DatabaseAdminGrpc.getCreateDatabaseMethod = + getCreateDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.CreateDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("CreateDatabase")) + .build(); + } + } + } + return getCreateDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseRequest, + com.google.spanner.admin.database.v1.Database> + getGetDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetDatabase", + requestType = com.google.spanner.admin.database.v1.GetDatabaseRequest.class, + responseType = com.google.spanner.admin.database.v1.Database.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseRequest, + com.google.spanner.admin.database.v1.Database> + getGetDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseRequest, + com.google.spanner.admin.database.v1.Database> + getGetDatabaseMethod; + if ((getGetDatabaseMethod = DatabaseAdminGrpc.getGetDatabaseMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetDatabaseMethod = DatabaseAdminGrpc.getGetDatabaseMethod) == null) { + DatabaseAdminGrpc.getGetDatabaseMethod = + getGetDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.Database.getDefaultInstance())) + .setSchemaDescriptor(new DatabaseAdminMethodDescriptorSupplier("GetDatabase")) + .build(); + } + } + } + return getGetDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.longrunning.Operation> + getUpdateDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateDatabase", + requestType = com.google.spanner.admin.database.v1.UpdateDatabaseRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.longrunning.Operation> + getUpdateDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.longrunning.Operation> + getUpdateDatabaseMethod; + if ((getUpdateDatabaseMethod = DatabaseAdminGrpc.getUpdateDatabaseMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getUpdateDatabaseMethod = DatabaseAdminGrpc.getUpdateDatabaseMethod) == null) { + DatabaseAdminGrpc.getUpdateDatabaseMethod = + getUpdateDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("UpdateDatabase")) + .build(); + } + } + } + return getUpdateDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest, + com.google.longrunning.Operation> + getUpdateDatabaseDdlMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateDatabaseDdl", + requestType = com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest, + com.google.longrunning.Operation> + getUpdateDatabaseDdlMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest, + com.google.longrunning.Operation> + getUpdateDatabaseDdlMethod; + if ((getUpdateDatabaseDdlMethod = DatabaseAdminGrpc.getUpdateDatabaseDdlMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getUpdateDatabaseDdlMethod = DatabaseAdminGrpc.getUpdateDatabaseDdlMethod) == null) { + DatabaseAdminGrpc.getUpdateDatabaseDdlMethod = + getUpdateDatabaseDdlMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateDatabaseDdl")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("UpdateDatabaseDdl")) + .build(); + } + } + } + return getUpdateDatabaseDdlMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DropDatabaseRequest, com.google.protobuf.Empty> + getDropDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DropDatabase", + requestType = com.google.spanner.admin.database.v1.DropDatabaseRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DropDatabaseRequest, com.google.protobuf.Empty> + getDropDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DropDatabaseRequest, com.google.protobuf.Empty> + getDropDatabaseMethod; + if ((getDropDatabaseMethod = DatabaseAdminGrpc.getDropDatabaseMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getDropDatabaseMethod = DatabaseAdminGrpc.getDropDatabaseMethod) == null) { + DatabaseAdminGrpc.getDropDatabaseMethod = + getDropDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DropDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.DropDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("DropDatabase")) + .build(); + } + } + } + return getDropDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse> + getGetDatabaseDdlMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetDatabaseDdl", + requestType = com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.class, + responseType = com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse> + getGetDatabaseDdlMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse> + getGetDatabaseDdlMethod; + if ((getGetDatabaseDdlMethod = DatabaseAdminGrpc.getGetDatabaseDdlMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetDatabaseDdlMethod = DatabaseAdminGrpc.getGetDatabaseDdlMethod) == null) { + DatabaseAdminGrpc.getGetDatabaseDdlMethod = + getGetDatabaseDdlMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetDatabaseDdl")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("GetDatabaseDdl")) + .build(); + } + } + } + return getGetDatabaseDdlMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SetIamPolicy", + requestType = com.google.iam.v1.SetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod() { + io.grpc.MethodDescriptor + getSetIamPolicyMethod; + if ((getSetIamPolicyMethod = DatabaseAdminGrpc.getSetIamPolicyMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getSetIamPolicyMethod = DatabaseAdminGrpc.getSetIamPolicyMethod) == null) { + DatabaseAdminGrpc.getSetIamPolicyMethod = + getSetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("SetIamPolicy")) + .build(); + } + } + } + return getSetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetIamPolicy", + requestType = com.google.iam.v1.GetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod() { + io.grpc.MethodDescriptor + getGetIamPolicyMethod; + if ((getGetIamPolicyMethod = DatabaseAdminGrpc.getGetIamPolicyMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetIamPolicyMethod = DatabaseAdminGrpc.getGetIamPolicyMethod) == null) { + DatabaseAdminGrpc.getGetIamPolicyMethod = + getGetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("GetIamPolicy")) + .build(); + } + } + } + return getGetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "TestIamPermissions", + requestType = com.google.iam.v1.TestIamPermissionsRequest.class, + responseType = com.google.iam.v1.TestIamPermissionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod() { + io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + if ((getTestIamPermissionsMethod = DatabaseAdminGrpc.getTestIamPermissionsMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getTestIamPermissionsMethod = DatabaseAdminGrpc.getTestIamPermissionsMethod) == null) { + DatabaseAdminGrpc.getTestIamPermissionsMethod = + getTestIamPermissionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "TestIamPermissions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("TestIamPermissions")) + .build(); + } + } + } + return getTestIamPermissionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupRequest, + com.google.longrunning.Operation> + getCreateBackupMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateBackup", + requestType = com.google.spanner.admin.database.v1.CreateBackupRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupRequest, + com.google.longrunning.Operation> + getCreateBackupMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupRequest, + com.google.longrunning.Operation> + getCreateBackupMethod; + if ((getCreateBackupMethod = DatabaseAdminGrpc.getCreateBackupMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getCreateBackupMethod = DatabaseAdminGrpc.getCreateBackupMethod) == null) { + DatabaseAdminGrpc.getCreateBackupMethod = + getCreateBackupMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateBackup")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.CreateBackupRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("CreateBackup")) + .build(); + } + } + } + return getCreateBackupMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CopyBackupRequest, com.google.longrunning.Operation> + getCopyBackupMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CopyBackup", + requestType = com.google.spanner.admin.database.v1.CopyBackupRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CopyBackupRequest, com.google.longrunning.Operation> + getCopyBackupMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CopyBackupRequest, + com.google.longrunning.Operation> + getCopyBackupMethod; + if ((getCopyBackupMethod = DatabaseAdminGrpc.getCopyBackupMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getCopyBackupMethod = DatabaseAdminGrpc.getCopyBackupMethod) == null) { + DatabaseAdminGrpc.getCopyBackupMethod = + getCopyBackupMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CopyBackup")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.CopyBackupRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor(new DatabaseAdminMethodDescriptorSupplier("CopyBackup")) + .build(); + } + } + } + return getCopyBackupMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getGetBackupMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetBackup", + requestType = com.google.spanner.admin.database.v1.GetBackupRequest.class, + responseType = com.google.spanner.admin.database.v1.Backup.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getGetBackupMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getGetBackupMethod; + if ((getGetBackupMethod = DatabaseAdminGrpc.getGetBackupMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetBackupMethod = DatabaseAdminGrpc.getGetBackupMethod) == null) { + DatabaseAdminGrpc.getGetBackupMethod = + getGetBackupMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetBackup")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetBackupRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.Backup.getDefaultInstance())) + .setSchemaDescriptor(new DatabaseAdminMethodDescriptorSupplier("GetBackup")) + .build(); + } + } + } + return getGetBackupMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getUpdateBackupMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateBackup", + requestType = com.google.spanner.admin.database.v1.UpdateBackupRequest.class, + responseType = com.google.spanner.admin.database.v1.Backup.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getUpdateBackupMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupRequest, + com.google.spanner.admin.database.v1.Backup> + getUpdateBackupMethod; + if ((getUpdateBackupMethod = DatabaseAdminGrpc.getUpdateBackupMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getUpdateBackupMethod = DatabaseAdminGrpc.getUpdateBackupMethod) == null) { + DatabaseAdminGrpc.getUpdateBackupMethod = + getUpdateBackupMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateBackup")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.UpdateBackupRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.Backup.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("UpdateBackup")) + .build(); + } + } + } + return getUpdateBackupMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupRequest, com.google.protobuf.Empty> + getDeleteBackupMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteBackup", + requestType = com.google.spanner.admin.database.v1.DeleteBackupRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupRequest, com.google.protobuf.Empty> + getDeleteBackupMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupRequest, com.google.protobuf.Empty> + getDeleteBackupMethod; + if ((getDeleteBackupMethod = DatabaseAdminGrpc.getDeleteBackupMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getDeleteBackupMethod = DatabaseAdminGrpc.getDeleteBackupMethod) == null) { + DatabaseAdminGrpc.getDeleteBackupMethod = + getDeleteBackupMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteBackup")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.DeleteBackupRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("DeleteBackup")) + .build(); + } + } + } + return getDeleteBackupMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupsRequest, + com.google.spanner.admin.database.v1.ListBackupsResponse> + getListBackupsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBackups", + requestType = com.google.spanner.admin.database.v1.ListBackupsRequest.class, + responseType = com.google.spanner.admin.database.v1.ListBackupsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupsRequest, + com.google.spanner.admin.database.v1.ListBackupsResponse> + getListBackupsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupsRequest, + com.google.spanner.admin.database.v1.ListBackupsResponse> + getListBackupsMethod; + if ((getListBackupsMethod = DatabaseAdminGrpc.getListBackupsMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListBackupsMethod = DatabaseAdminGrpc.getListBackupsMethod) == null) { + DatabaseAdminGrpc.getListBackupsMethod = + getListBackupsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListBackups")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new DatabaseAdminMethodDescriptorSupplier("ListBackups")) + .build(); + } + } + } + return getListBackupsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.RestoreDatabaseRequest, + com.google.longrunning.Operation> + getRestoreDatabaseMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "RestoreDatabase", + requestType = com.google.spanner.admin.database.v1.RestoreDatabaseRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.RestoreDatabaseRequest, + com.google.longrunning.Operation> + getRestoreDatabaseMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.RestoreDatabaseRequest, + com.google.longrunning.Operation> + getRestoreDatabaseMethod; + if ((getRestoreDatabaseMethod = DatabaseAdminGrpc.getRestoreDatabaseMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getRestoreDatabaseMethod = DatabaseAdminGrpc.getRestoreDatabaseMethod) == null) { + DatabaseAdminGrpc.getRestoreDatabaseMethod = + getRestoreDatabaseMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "RestoreDatabase")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("RestoreDatabase")) + .build(); + } + } + } + return getRestoreDatabaseMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + getListDatabaseOperationsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListDatabaseOperations", + requestType = com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.class, + responseType = com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + getListDatabaseOperationsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + getListDatabaseOperationsMethod; + if ((getListDatabaseOperationsMethod = DatabaseAdminGrpc.getListDatabaseOperationsMethod) + == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListDatabaseOperationsMethod = DatabaseAdminGrpc.getListDatabaseOperationsMethod) + == null) { + DatabaseAdminGrpc.getListDatabaseOperationsMethod = + getListDatabaseOperationsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListDatabaseOperations")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListDatabaseOperations")) + .build(); + } + } + } + return getListDatabaseOperationsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupOperationsRequest, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + getListBackupOperationsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBackupOperations", + requestType = com.google.spanner.admin.database.v1.ListBackupOperationsRequest.class, + responseType = com.google.spanner.admin.database.v1.ListBackupOperationsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupOperationsRequest, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + getListBackupOperationsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupOperationsRequest, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + getListBackupOperationsMethod; + if ((getListBackupOperationsMethod = DatabaseAdminGrpc.getListBackupOperationsMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListBackupOperationsMethod = DatabaseAdminGrpc.getListBackupOperationsMethod) + == null) { + DatabaseAdminGrpc.getListBackupOperationsMethod = + getListBackupOperationsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListBackupOperations")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListBackupOperations")) + .build(); + } + } + } + return getListBackupOperationsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse> + getListDatabaseRolesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListDatabaseRoles", + requestType = com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.class, + responseType = com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse> + getListDatabaseRolesMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse> + getListDatabaseRolesMethod; + if ((getListDatabaseRolesMethod = DatabaseAdminGrpc.getListDatabaseRolesMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListDatabaseRolesMethod = DatabaseAdminGrpc.getListDatabaseRolesMethod) == null) { + DatabaseAdminGrpc.getListDatabaseRolesMethod = + getListDatabaseRolesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListDatabaseRoles")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListDatabaseRoles")) + .build(); + } + } + } + return getListDatabaseRolesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.AddSplitPointsRequest, + com.google.spanner.admin.database.v1.AddSplitPointsResponse> + getAddSplitPointsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "AddSplitPoints", + requestType = com.google.spanner.admin.database.v1.AddSplitPointsRequest.class, + responseType = com.google.spanner.admin.database.v1.AddSplitPointsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.AddSplitPointsRequest, + com.google.spanner.admin.database.v1.AddSplitPointsResponse> + getAddSplitPointsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.AddSplitPointsRequest, + com.google.spanner.admin.database.v1.AddSplitPointsResponse> + getAddSplitPointsMethod; + if ((getAddSplitPointsMethod = DatabaseAdminGrpc.getAddSplitPointsMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getAddSplitPointsMethod = DatabaseAdminGrpc.getAddSplitPointsMethod) == null) { + DatabaseAdminGrpc.getAddSplitPointsMethod = + getAddSplitPointsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AddSplitPoints")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.AddSplitPointsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.AddSplitPointsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("AddSplitPoints")) + .build(); + } + } + } + return getAddSplitPointsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateBackupSchedule", + requestType = com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod; + if ((getCreateBackupScheduleMethod = DatabaseAdminGrpc.getCreateBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getCreateBackupScheduleMethod = DatabaseAdminGrpc.getCreateBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getCreateBackupScheduleMethod = + getCreateBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("CreateBackupSchedule")) + .build(); + } + } + } + return getCreateBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetBackupSchedule", + requestType = com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod; + if ((getGetBackupScheduleMethod = DatabaseAdminGrpc.getGetBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetBackupScheduleMethod = DatabaseAdminGrpc.getGetBackupScheduleMethod) == null) { + DatabaseAdminGrpc.getGetBackupScheduleMethod = + getGetBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("GetBackupSchedule")) + .build(); + } + } + } + return getGetBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateBackupSchedule", + requestType = com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod; + if ((getUpdateBackupScheduleMethod = DatabaseAdminGrpc.getUpdateBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getUpdateBackupScheduleMethod = DatabaseAdminGrpc.getUpdateBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getUpdateBackupScheduleMethod = + getUpdateBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("UpdateBackupSchedule")) + .build(); + } + } + } + return getUpdateBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteBackupSchedule", + requestType = com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod; + if ((getDeleteBackupScheduleMethod = DatabaseAdminGrpc.getDeleteBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getDeleteBackupScheduleMethod = DatabaseAdminGrpc.getDeleteBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getDeleteBackupScheduleMethod = + getDeleteBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("DeleteBackupSchedule")) + .build(); + } + } + } + return getDeleteBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBackupSchedules", + requestType = com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + responseType = com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod; + if ((getListBackupSchedulesMethod = DatabaseAdminGrpc.getListBackupSchedulesMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListBackupSchedulesMethod = DatabaseAdminGrpc.getListBackupSchedulesMethod) + == null) { + DatabaseAdminGrpc.getListBackupSchedulesMethod = + getListBackupSchedulesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListBackupSchedules")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListBackupSchedules")) + .build(); + } + } + } + return getListBackupSchedulesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "InternalUpdateGraphOperation", + requestType = com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + responseType = + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod; + if ((getInternalUpdateGraphOperationMethod = + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod) + == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getInternalUpdateGraphOperationMethod = + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod) + == null) { + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod = + getInternalUpdateGraphOperationMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "InternalUpdateGraphOperation")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1 + .InternalUpdateGraphOperationRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1 + .InternalUpdateGraphOperationResponse.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("InternalUpdateGraphOperation")) + .build(); + } + } + } + return getInternalUpdateGraphOperationMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static DatabaseAdminStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public DatabaseAdminStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminStub(channel, callOptions); + } + }; + return DatabaseAdminStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static DatabaseAdminBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public DatabaseAdminBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminBlockingV2Stub(channel, callOptions); + } + }; + return DatabaseAdminBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static DatabaseAdminBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public DatabaseAdminBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminBlockingStub(channel, callOptions); + } + }; + return DatabaseAdminBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static DatabaseAdminFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public DatabaseAdminFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminFutureStub(channel, callOptions); + } + }; + return DatabaseAdminFutureStub.newStub(factory, channel); + } + + /** + * + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public interface AsyncService { + + /** + * + * + *
    +     * Lists Cloud Spanner databases.
    +     * 
    + */ + default void listDatabases( + com.google.spanner.admin.database.v1.ListDatabasesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListDatabasesMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates a new Cloud Spanner database and starts to prepare it for serving.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format `<database_name>/operations/<operation_id>` and
    +     * can be used to track preparation of the database. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + default void createDatabase( + com.google.spanner.admin.database.v1.CreateDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateDatabaseMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets the state of a Cloud Spanner database.
    +     * 
    + */ + default void getDatabase( + com.google.spanner.admin.database.v1.GetDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetDatabaseMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates a Cloud Spanner database. The returned
    +     * [long-running operation][google.longrunning.Operation] can be used to track
    +     * the progress of updating the database. If the named database does not
    +     * exist, returns `NOT_FOUND`.
    +     * While the operation is pending:
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field is set to true.
    +     *   * Cancelling the operation is best-effort. If the cancellation succeeds,
    +     *     the operation metadata's
    +     *     [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
    +     *     is set, the updates are reverted, and the operation terminates with a
    +     *     `CANCELLED` status.
    +     *   * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
    +     *     until the pending operation is done (returns successfully or with
    +     *     error).
    +     *   * Reading the database via the API continues to give the pre-request
    +     *     values.
    +     * Upon completion of the returned operation:
    +     *   * The new values are in effect and readable via the API.
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field becomes false.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
    +     * and can be used to track the database modification. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + default void updateDatabase( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateDatabaseMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates the schema of a Cloud Spanner database by
    +     * creating/altering/dropping tables, columns, indexes, etc. The returned
    +     * [long-running operation][google.longrunning.Operation] will have a name of
    +     * the format `<database_name>/operations/<operation_id>` and can be used to
    +     * track execution of the schema change(s). The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
    +     * The operation has no response.
    +     * 
    + */ + default void updateDatabaseDdl( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateDatabaseDdlMethod(), responseObserver); + } + + /** + * + * + *
    +     * Drops (aka deletes) a Cloud Spanner database.
    +     * Completed backups for the database will be retained according to their
    +     * `expire_time`.
    +     * Note: Cloud Spanner might continue to accept requests for a few seconds
    +     * after the database has been deleted.
    +     * 
    + */ + default void dropDatabase( + com.google.spanner.admin.database.v1.DropDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDropDatabaseMethod(), responseObserver); + } + + /** + * + * + *
    +     * Returns the schema of a Cloud Spanner database as a list of formatted
    +     * DDL statements. This method does not show pending schema updates, those may
    +     * be queried using the [Operations][google.longrunning.Operations] API.
    +     * 
    + */ + default void getDatabaseDdl( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetDatabaseDdlMethod(), responseObserver); + } + + /** + * + * + *
    +     * Sets the access control policy on a database or backup resource.
    +     * Replaces any existing policy.
    +     * Authorization requires `spanner.databases.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + default void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets the access control policy for a database or backup resource.
    +     * Returns an empty policy if a database or backup exists but does not have a
    +     * policy set.
    +     * Authorization requires `spanner.databases.getIamPolicy` permission on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.getIamPolicy`
    +     * permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + default void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified database or backup
    +     * resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner database will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.databases.list` permission on the containing Cloud
    +     * Spanner instance. Otherwise returns an empty set of permissions.
    +     * Calling this method on a backup that does not exist will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.backups.list` permission on the containing instance.
    +     * 
    + */ + default void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getTestIamPermissionsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Starts creating a new Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track creation of the backup. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the creation and delete the
    +     * backup. There can be only one pending backup creation per database. Backup
    +     * creation of different databases can run concurrently.
    +     * 
    + */ + default void createBackup( + com.google.spanner.admin.database.v1.CreateBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateBackupMethod(), responseObserver); + } + + /** + * + * + *
    +     * Starts copying a Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track copying of the backup. The operation is associated
    +     * with the destination backup.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the copying and delete the
    +     * destination backup. Concurrent CopyBackup requests can run on the same
    +     * source backup.
    +     * 
    + */ + default void copyBackup( + com.google.spanner.admin.database.v1.CopyBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCopyBackupMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + default void getBackup( + com.google.spanner.admin.database.v1.GetBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetBackupMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + default void updateBackup( + com.google.spanner.admin.database.v1.UpdateBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateBackupMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + default void deleteBackup( + com.google.spanner.admin.database.v1.DeleteBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteBackupMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists completed and pending backups.
    +     * Backups returned are ordered by `create_time` in descending order,
    +     * starting from the most recent `create_time`.
    +     * 
    + */ + default void listBackups( + com.google.spanner.admin.database.v1.ListBackupsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBackupsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Create a new database by restoring from a completed backup. The new
    +     * database must be in the same project and in an instance with the same
    +     * instance configuration as the instance containing
    +     * the backup. The returned database [long-running
    +     * operation][google.longrunning.Operation] has a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
    +     * and can be used to track the progress of the operation, and to cancel it.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] type
    +     * is [Database][google.spanner.admin.database.v1.Database], if
    +     * successful. Cancelling the returned operation will stop the restore and
    +     * delete the database.
    +     * There can be only one database being restored into an instance at a time.
    +     * Once the restore operation completes, a new restore operation can be
    +     * initiated, without waiting for the optimize operation associated with the
    +     * first restore to complete.
    +     * 
    + */ + default void restoreDatabase( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getRestoreDatabaseMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists database [longrunning-operations][google.longrunning.Operation].
    +     * A database operation has a name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations.
    +     * 
    + */ + default void listDatabaseOperations( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListDatabaseOperationsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists the backup [long-running operations][google.longrunning.Operation] in
    +     * the given instance. A backup operation has a name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + default void listBackupOperations( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBackupOperationsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists Cloud Spanner database roles.
    +     * 
    + */ + default void listDatabaseRoles( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListDatabaseRolesMethod(), responseObserver); + } + + /** + * + * + *
    +     * Adds split points to specified tables, indexes of a database.
    +     * 
    + */ + default void addSplitPoints( + com.google.spanner.admin.database.v1.AddSplitPointsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getAddSplitPointsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + default void createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + default void getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + default void updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + default void deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + default void listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBackupSchedulesMethod(), responseObserver); + } + + /** + * + * + *
    +     * This is an internal API called by Spanner Graph jobs. You should never need
    +     * to call this API directly.
    +     * 
    + */ + default void internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getInternalUpdateGraphOperationMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service DatabaseAdmin. + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public abstract static class DatabaseAdminImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return DatabaseAdminGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service DatabaseAdmin. + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public static final class DatabaseAdminStub + extends io.grpc.stub.AbstractAsyncStub { + private DatabaseAdminStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected DatabaseAdminStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists Cloud Spanner databases.
    +     * 
    + */ + public void listDatabases( + com.google.spanner.admin.database.v1.ListDatabasesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListDatabasesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates a new Cloud Spanner database and starts to prepare it for serving.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format `<database_name>/operations/<operation_id>` and
    +     * can be used to track preparation of the database. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public void createDatabase( + com.google.spanner.admin.database.v1.CreateDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets the state of a Cloud Spanner database.
    +     * 
    + */ + public void getDatabase( + com.google.spanner.admin.database.v1.GetDatabaseRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates a Cloud Spanner database. The returned
    +     * [long-running operation][google.longrunning.Operation] can be used to track
    +     * the progress of updating the database. If the named database does not
    +     * exist, returns `NOT_FOUND`.
    +     * While the operation is pending:
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field is set to true.
    +     *   * Cancelling the operation is best-effort. If the cancellation succeeds,
    +     *     the operation metadata's
    +     *     [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
    +     *     is set, the updates are reverted, and the operation terminates with a
    +     *     `CANCELLED` status.
    +     *   * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
    +     *     until the pending operation is done (returns successfully or with
    +     *     error).
    +     *   * Reading the database via the API continues to give the pre-request
    +     *     values.
    +     * Upon completion of the returned operation:
    +     *   * The new values are in effect and readable via the API.
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field becomes false.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
    +     * and can be used to track the database modification. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public void updateDatabase( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates the schema of a Cloud Spanner database by
    +     * creating/altering/dropping tables, columns, indexes, etc. The returned
    +     * [long-running operation][google.longrunning.Operation] will have a name of
    +     * the format `<database_name>/operations/<operation_id>` and can be used to
    +     * track execution of the schema change(s). The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
    +     * The operation has no response.
    +     * 
    + */ + public void updateDatabaseDdl( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateDatabaseDdlMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Drops (aka deletes) a Cloud Spanner database.
    +     * Completed backups for the database will be retained according to their
    +     * `expire_time`.
    +     * Note: Cloud Spanner might continue to accept requests for a few seconds
    +     * after the database has been deleted.
    +     * 
    + */ + public void dropDatabase( + com.google.spanner.admin.database.v1.DropDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDropDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Returns the schema of a Cloud Spanner database as a list of formatted
    +     * DDL statements. This method does not show pending schema updates, those may
    +     * be queried using the [Operations][google.longrunning.Operations] API.
    +     * 
    + */ + public void getDatabaseDdl( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetDatabaseDdlMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Sets the access control policy on a database or backup resource.
    +     * Replaces any existing policy.
    +     * Authorization requires `spanner.databases.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets the access control policy for a database or backup resource.
    +     * Returns an empty policy if a database or backup exists but does not have a
    +     * policy set.
    +     * Authorization requires `spanner.databases.getIamPolicy` permission on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.getIamPolicy`
    +     * permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified database or backup
    +     * resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner database will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.databases.list` permission on the containing Cloud
    +     * Spanner instance. Otherwise returns an empty set of permissions.
    +     * Calling this method on a backup that does not exist will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.backups.list` permission on the containing instance.
    +     * 
    + */ + public void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Starts creating a new Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track creation of the backup. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the creation and delete the
    +     * backup. There can be only one pending backup creation per database. Backup
    +     * creation of different databases can run concurrently.
    +     * 
    + */ + public void createBackup( + com.google.spanner.admin.database.v1.CreateBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateBackupMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Starts copying a Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track copying of the backup. The operation is associated
    +     * with the destination backup.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the copying and delete the
    +     * destination backup. Concurrent CopyBackup requests can run on the same
    +     * source backup.
    +     * 
    + */ + public void copyBackup( + com.google.spanner.admin.database.v1.CopyBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCopyBackupMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public void getBackup( + com.google.spanner.admin.database.v1.GetBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetBackupMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public void updateBackup( + com.google.spanner.admin.database.v1.UpdateBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateBackupMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public void deleteBackup( + com.google.spanner.admin.database.v1.DeleteBackupRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteBackupMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists completed and pending backups.
    +     * Backups returned are ordered by `create_time` in descending order,
    +     * starting from the most recent `create_time`.
    +     * 
    + */ + public void listBackups( + com.google.spanner.admin.database.v1.ListBackupsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBackupsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Create a new database by restoring from a completed backup. The new
    +     * database must be in the same project and in an instance with the same
    +     * instance configuration as the instance containing
    +     * the backup. The returned database [long-running
    +     * operation][google.longrunning.Operation] has a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
    +     * and can be used to track the progress of the operation, and to cancel it.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] type
    +     * is [Database][google.spanner.admin.database.v1.Database], if
    +     * successful. Cancelling the returned operation will stop the restore and
    +     * delete the database.
    +     * There can be only one database being restored into an instance at a time.
    +     * Once the restore operation completes, a new restore operation can be
    +     * initiated, without waiting for the optimize operation associated with the
    +     * first restore to complete.
    +     * 
    + */ + public void restoreDatabase( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRestoreDatabaseMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists database [longrunning-operations][google.longrunning.Operation].
    +     * A database operation has a name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations.
    +     * 
    + */ + public void listDatabaseOperations( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListDatabaseOperationsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists the backup [long-running operations][google.longrunning.Operation] in
    +     * the given instance. A backup operation has a name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public void listBackupOperations( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBackupOperationsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists Cloud Spanner database roles.
    +     * 
    + */ + public void listDatabaseRoles( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListDatabaseRolesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Adds split points to specified tables, indexes of a database.
    +     * 
    + */ + public void addSplitPoints( + com.google.spanner.admin.database.v1.AddSplitPointsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getAddSplitPointsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public void createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public void getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public void updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public void deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public void listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBackupSchedulesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * This is an internal API called by Spanner Graph jobs. You should never need
    +     * to call this API directly.
    +     * 
    + */ + public void internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getInternalUpdateGraphOperationMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service DatabaseAdmin. + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public static final class DatabaseAdminBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private DatabaseAdminBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected DatabaseAdminBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists Cloud Spanner databases.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabasesResponse listDatabases( + com.google.spanner.admin.database.v1.ListDatabasesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListDatabasesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a new Cloud Spanner database and starts to prepare it for serving.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format `<database_name>/operations/<operation_id>` and
    +     * can be used to track preparation of the database. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation createDatabase( + com.google.spanner.admin.database.v1.CreateDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the state of a Cloud Spanner database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Database getDatabase( + com.google.spanner.admin.database.v1.GetDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a Cloud Spanner database. The returned
    +     * [long-running operation][google.longrunning.Operation] can be used to track
    +     * the progress of updating the database. If the named database does not
    +     * exist, returns `NOT_FOUND`.
    +     * While the operation is pending:
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field is set to true.
    +     *   * Cancelling the operation is best-effort. If the cancellation succeeds,
    +     *     the operation metadata's
    +     *     [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
    +     *     is set, the updates are reverted, and the operation terminates with a
    +     *     `CANCELLED` status.
    +     *   * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
    +     *     until the pending operation is done (returns successfully or with
    +     *     error).
    +     *   * Reading the database via the API continues to give the pre-request
    +     *     values.
    +     * Upon completion of the returned operation:
    +     *   * The new values are in effect and readable via the API.
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field becomes false.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
    +     * and can be used to track the database modification. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation updateDatabase( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates the schema of a Cloud Spanner database by
    +     * creating/altering/dropping tables, columns, indexes, etc. The returned
    +     * [long-running operation][google.longrunning.Operation] will have a name of
    +     * the format `<database_name>/operations/<operation_id>` and can be used to
    +     * track execution of the schema change(s). The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
    +     * The operation has no response.
    +     * 
    + */ + public com.google.longrunning.Operation updateDatabaseDdl( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateDatabaseDdlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Drops (aka deletes) a Cloud Spanner database.
    +     * Completed backups for the database will be retained according to their
    +     * `expire_time`.
    +     * Note: Cloud Spanner might continue to accept requests for a few seconds
    +     * after the database has been deleted.
    +     * 
    + */ + public com.google.protobuf.Empty dropDatabase( + com.google.spanner.admin.database.v1.DropDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDropDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns the schema of a Cloud Spanner database as a list of formatted
    +     * DDL statements. This method does not show pending schema updates, those may
    +     * be queried using the [Operations][google.longrunning.Operations] API.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDatabaseDdl( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetDatabaseDdlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Sets the access control policy on a database or backup resource.
    +     * Replaces any existing policy.
    +     * Authorization requires `spanner.databases.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the access control policy for a database or backup resource.
    +     * Returns an empty policy if a database or backup exists but does not have a
    +     * policy set.
    +     * Authorization requires `spanner.databases.getIamPolicy` permission on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.getIamPolicy`
    +     * permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified database or backup
    +     * resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner database will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.databases.list` permission on the containing Cloud
    +     * Spanner instance. Otherwise returns an empty set of permissions.
    +     * Calling this method on a backup that does not exist will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.backups.list` permission on the containing instance.
    +     * 
    + */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Starts creating a new Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track creation of the backup. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the creation and delete the
    +     * backup. There can be only one pending backup creation per database. Backup
    +     * creation of different databases can run concurrently.
    +     * 
    + */ + public com.google.longrunning.Operation createBackup( + com.google.spanner.admin.database.v1.CreateBackupRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Starts copying a Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track copying of the backup. The operation is associated
    +     * with the destination backup.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the copying and delete the
    +     * destination backup. Concurrent CopyBackup requests can run on the same
    +     * source backup.
    +     * 
    + */ + public com.google.longrunning.Operation copyBackup( + com.google.spanner.admin.database.v1.CopyBackupRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCopyBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Backup getBackup( + com.google.spanner.admin.database.v1.GetBackupRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Backup updateBackup( + com.google.spanner.admin.database.v1.UpdateBackupRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.protobuf.Empty deleteBackup( + com.google.spanner.admin.database.v1.DeleteBackupRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists completed and pending backups.
    +     * Backups returned are ordered by `create_time` in descending order,
    +     * starting from the most recent `create_time`.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupsResponse listBackups( + com.google.spanner.admin.database.v1.ListBackupsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListBackupsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Create a new database by restoring from a completed backup. The new
    +     * database must be in the same project and in an instance with the same
    +     * instance configuration as the instance containing
    +     * the backup. The returned database [long-running
    +     * operation][google.longrunning.Operation] has a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
    +     * and can be used to track the progress of the operation, and to cancel it.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] type
    +     * is [Database][google.spanner.admin.database.v1.Database], if
    +     * successful. Cancelling the returned operation will stop the restore and
    +     * delete the database.
    +     * There can be only one database being restored into an instance at a time.
    +     * Once the restore operation completes, a new restore operation can be
    +     * initiated, without waiting for the optimize operation associated with the
    +     * first restore to complete.
    +     * 
    + */ + public com.google.longrunning.Operation restoreDatabase( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getRestoreDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists database [longrunning-operations][google.longrunning.Operation].
    +     * A database operation has a name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + listDatabaseOperations( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListDatabaseOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists the backup [long-running operations][google.longrunning.Operation] in
    +     * the given instance. A backup operation has a name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse listBackupOperations( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListBackupOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists Cloud Spanner database roles.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse listDatabaseRoles( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListDatabaseRolesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Adds split points to specified tables, indexes of a database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.AddSplitPointsResponse addSplitPoints( + com.google.spanner.admin.database.v1.AddSplitPointsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getAddSplitPointsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public com.google.protobuf.Empty deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListBackupSchedulesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * This is an internal API called by Spanner Graph jobs. You should never need
    +     * to call this API directly.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getInternalUpdateGraphOperationMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service DatabaseAdmin. + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public static final class DatabaseAdminBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private DatabaseAdminBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected DatabaseAdminBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminBlockingStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists Cloud Spanner databases.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabasesResponse listDatabases( + com.google.spanner.admin.database.v1.ListDatabasesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListDatabasesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a new Cloud Spanner database and starts to prepare it for serving.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format `<database_name>/operations/<operation_id>` and
    +     * can be used to track preparation of the database. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation createDatabase( + com.google.spanner.admin.database.v1.CreateDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the state of a Cloud Spanner database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Database getDatabase( + com.google.spanner.admin.database.v1.GetDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a Cloud Spanner database. The returned
    +     * [long-running operation][google.longrunning.Operation] can be used to track
    +     * the progress of updating the database. If the named database does not
    +     * exist, returns `NOT_FOUND`.
    +     * While the operation is pending:
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field is set to true.
    +     *   * Cancelling the operation is best-effort. If the cancellation succeeds,
    +     *     the operation metadata's
    +     *     [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
    +     *     is set, the updates are reverted, and the operation terminates with a
    +     *     `CANCELLED` status.
    +     *   * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
    +     *     until the pending operation is done (returns successfully or with
    +     *     error).
    +     *   * Reading the database via the API continues to give the pre-request
    +     *     values.
    +     * Upon completion of the returned operation:
    +     *   * The new values are in effect and readable via the API.
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field becomes false.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
    +     * and can be used to track the database modification. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation updateDatabase( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates the schema of a Cloud Spanner database by
    +     * creating/altering/dropping tables, columns, indexes, etc. The returned
    +     * [long-running operation][google.longrunning.Operation] will have a name of
    +     * the format `<database_name>/operations/<operation_id>` and can be used to
    +     * track execution of the schema change(s). The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
    +     * The operation has no response.
    +     * 
    + */ + public com.google.longrunning.Operation updateDatabaseDdl( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateDatabaseDdlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Drops (aka deletes) a Cloud Spanner database.
    +     * Completed backups for the database will be retained according to their
    +     * `expire_time`.
    +     * Note: Cloud Spanner might continue to accept requests for a few seconds
    +     * after the database has been deleted.
    +     * 
    + */ + public com.google.protobuf.Empty dropDatabase( + com.google.spanner.admin.database.v1.DropDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDropDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns the schema of a Cloud Spanner database as a list of formatted
    +     * DDL statements. This method does not show pending schema updates, those may
    +     * be queried using the [Operations][google.longrunning.Operations] API.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDatabaseDdl( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetDatabaseDdlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Sets the access control policy on a database or backup resource.
    +     * Replaces any existing policy.
    +     * Authorization requires `spanner.databases.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the access control policy for a database or backup resource.
    +     * Returns an empty policy if a database or backup exists but does not have a
    +     * policy set.
    +     * Authorization requires `spanner.databases.getIamPolicy` permission on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.getIamPolicy`
    +     * permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified database or backup
    +     * resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner database will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.databases.list` permission on the containing Cloud
    +     * Spanner instance. Otherwise returns an empty set of permissions.
    +     * Calling this method on a backup that does not exist will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.backups.list` permission on the containing instance.
    +     * 
    + */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Starts creating a new Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track creation of the backup. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the creation and delete the
    +     * backup. There can be only one pending backup creation per database. Backup
    +     * creation of different databases can run concurrently.
    +     * 
    + */ + public com.google.longrunning.Operation createBackup( + com.google.spanner.admin.database.v1.CreateBackupRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Starts copying a Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track copying of the backup. The operation is associated
    +     * with the destination backup.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the copying and delete the
    +     * destination backup. Concurrent CopyBackup requests can run on the same
    +     * source backup.
    +     * 
    + */ + public com.google.longrunning.Operation copyBackup( + com.google.spanner.admin.database.v1.CopyBackupRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCopyBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Backup getBackup( + com.google.spanner.admin.database.v1.GetBackupRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.spanner.admin.database.v1.Backup updateBackup( + com.google.spanner.admin.database.v1.UpdateBackupRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.protobuf.Empty deleteBackup( + com.google.spanner.admin.database.v1.DeleteBackupRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteBackupMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists completed and pending backups.
    +     * Backups returned are ordered by `create_time` in descending order,
    +     * starting from the most recent `create_time`.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupsResponse listBackups( + com.google.spanner.admin.database.v1.ListBackupsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBackupsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Create a new database by restoring from a completed backup. The new
    +     * database must be in the same project and in an instance with the same
    +     * instance configuration as the instance containing
    +     * the backup. The returned database [long-running
    +     * operation][google.longrunning.Operation] has a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
    +     * and can be used to track the progress of the operation, and to cancel it.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] type
    +     * is [Database][google.spanner.admin.database.v1.Database], if
    +     * successful. Cancelling the returned operation will stop the restore and
    +     * delete the database.
    +     * There can be only one database being restored into an instance at a time.
    +     * Once the restore operation completes, a new restore operation can be
    +     * initiated, without waiting for the optimize operation associated with the
    +     * first restore to complete.
    +     * 
    + */ + public com.google.longrunning.Operation restoreDatabase( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRestoreDatabaseMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists database [longrunning-operations][google.longrunning.Operation].
    +     * A database operation has a name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + listDatabaseOperations( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListDatabaseOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists the backup [long-running operations][google.longrunning.Operation] in
    +     * the given instance. A backup operation has a name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse listBackupOperations( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBackupOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists Cloud Spanner database roles.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse listDatabaseRoles( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListDatabaseRolesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Adds split points to specified tables, indexes of a database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.AddSplitPointsResponse addSplitPoints( + com.google.spanner.admin.database.v1.AddSplitPointsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getAddSplitPointsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public com.google.protobuf.Empty deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBackupSchedulesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * This is an internal API called by Spanner Graph jobs. You should never need
    +     * to call this API directly.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getInternalUpdateGraphOperationMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service DatabaseAdmin. + * + *
    +   * Cloud Spanner Database Admin API
    +   * The Cloud Spanner Database Admin API can be used to:
    +   *   * create, drop, and list databases
    +   *   * update the schema of pre-existing databases
    +   *   * create, delete, copy and list backups for a database
    +   *   * restore a database from an existing backup
    +   * 
    + */ + public static final class DatabaseAdminFutureStub + extends io.grpc.stub.AbstractFutureStub { + private DatabaseAdminFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected DatabaseAdminFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new DatabaseAdminFutureStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists Cloud Spanner databases.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListDatabasesResponse> + listDatabases(com.google.spanner.admin.database.v1.ListDatabasesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListDatabasesMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates a new Cloud Spanner database and starts to prepare it for serving.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format `<database_name>/operations/<operation_id>` and
    +     * can be used to track preparation of the database. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createDatabase(com.google.spanner.admin.database.v1.CreateDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets the state of a Cloud Spanner database.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.Database> + getDatabase(com.google.spanner.admin.database.v1.GetDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates a Cloud Spanner database. The returned
    +     * [long-running operation][google.longrunning.Operation] can be used to track
    +     * the progress of updating the database. If the named database does not
    +     * exist, returns `NOT_FOUND`.
    +     * While the operation is pending:
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field is set to true.
    +     *   * Cancelling the operation is best-effort. If the cancellation succeeds,
    +     *     the operation metadata's
    +     *     [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
    +     *     is set, the updates are reverted, and the operation terminates with a
    +     *     `CANCELLED` status.
    +     *   * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
    +     *     until the pending operation is done (returns successfully or with
    +     *     error).
    +     *   * Reading the database via the API continues to give the pre-request
    +     *     values.
    +     * Upon completion of the returned operation:
    +     *   * The new values are in effect and readable via the API.
    +     *   * The database's
    +     *     [reconciling][google.spanner.admin.database.v1.Database.reconciling]
    +     *     field becomes false.
    +     * The returned [long-running operation][google.longrunning.Operation] will
    +     * have a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
    +     * and can be used to track the database modification. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Database][google.spanner.admin.database.v1.Database], if successful.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + updateDatabase(com.google.spanner.admin.database.v1.UpdateDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates the schema of a Cloud Spanner database by
    +     * creating/altering/dropping tables, columns, indexes, etc. The returned
    +     * [long-running operation][google.longrunning.Operation] will have a name of
    +     * the format `<database_name>/operations/<operation_id>` and can be used to
    +     * track execution of the schema change(s). The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
    +     * The operation has no response.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + updateDatabaseDdl(com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateDatabaseDdlMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Drops (aka deletes) a Cloud Spanner database.
    +     * Completed backups for the database will be retained according to their
    +     * `expire_time`.
    +     * Note: Cloud Spanner might continue to accept requests for a few seconds
    +     * after the database has been deleted.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + dropDatabase(com.google.spanner.admin.database.v1.DropDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDropDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Returns the schema of a Cloud Spanner database as a list of formatted
    +     * DDL statements. This method does not show pending schema updates, those may
    +     * be queried using the [Operations][google.longrunning.Operations] API.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse> + getDatabaseDdl(com.google.spanner.admin.database.v1.GetDatabaseDdlRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetDatabaseDdlMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Sets the access control policy on a database or backup resource.
    +     * Replaces any existing policy.
    +     * Authorization requires `spanner.databases.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.setIamPolicy`
    +     * permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets the access control policy for a database or backup resource.
    +     * Returns an empty policy if a database or backup exists but does not have a
    +     * policy set.
    +     * Authorization requires `spanner.databases.getIamPolicy` permission on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * For backups, authorization requires `spanner.backups.getIamPolicy`
    +     * permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified database or backup
    +     * resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner database will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.databases.list` permission on the containing Cloud
    +     * Spanner instance. Otherwise returns an empty set of permissions.
    +     * Calling this method on a backup that does not exist will
    +     * result in a NOT_FOUND error if the user has
    +     * `spanner.backups.list` permission on the containing instance.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.iam.v1.TestIamPermissionsResponse> + testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Starts creating a new Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track creation of the backup. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the creation and delete the
    +     * backup. There can be only one pending backup creation per database. Backup
    +     * creation of different databases can run concurrently.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createBackup(com.google.spanner.admin.database.v1.CreateBackupRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateBackupMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Starts copying a Cloud Spanner Backup.
    +     * The returned backup [long-running operation][google.longrunning.Operation]
    +     * will have a name of the format
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
    +     * and can be used to track copying of the backup. The operation is associated
    +     * with the destination backup.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Backup][google.spanner.admin.database.v1.Backup], if successful.
    +     * Cancelling the returned operation will stop the copying and delete the
    +     * destination backup. Concurrent CopyBackup requests can run on the same
    +     * source backup.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + copyBackup(com.google.spanner.admin.database.v1.CopyBackupRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCopyBackupMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.Backup> + getBackup(com.google.spanner.admin.database.v1.GetBackupRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetBackupMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.Backup> + updateBackup(com.google.spanner.admin.database.v1.UpdateBackupRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateBackupMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteBackup(com.google.spanner.admin.database.v1.DeleteBackupRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteBackupMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists completed and pending backups.
    +     * Backups returned are ordered by `create_time` in descending order,
    +     * starting from the most recent `create_time`.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListBackupsResponse> + listBackups(com.google.spanner.admin.database.v1.ListBackupsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBackupsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Create a new database by restoring from a completed backup. The new
    +     * database must be in the same project and in an instance with the same
    +     * instance configuration as the instance containing
    +     * the backup. The returned database [long-running
    +     * operation][google.longrunning.Operation] has a name of the format
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
    +     * and can be used to track the progress of the operation, and to cancel it.
    +     * The [metadata][google.longrunning.Operation.metadata] field type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * The [response][google.longrunning.Operation.response] type
    +     * is [Database][google.spanner.admin.database.v1.Database], if
    +     * successful. Cancelling the returned operation will stop the restore and
    +     * delete the database.
    +     * There can be only one database being restored into an instance at a time.
    +     * Once the restore operation completes, a new restore operation can be
    +     * initiated, without waiting for the optimize operation associated with the
    +     * first restore to complete.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + restoreDatabase(com.google.spanner.admin.database.v1.RestoreDatabaseRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRestoreDatabaseMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists database [longrunning-operations][google.longrunning.Operation].
    +     * A database operation has a name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse> + listDatabaseOperations( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListDatabaseOperationsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists the backup [long-running operations][google.longrunning.Operation] in
    +     * the given instance. A backup operation has a name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
    +     * The long-running operation
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListBackupOperationsResponse> + listBackupOperations( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBackupOperationsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists Cloud Spanner database roles.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse> + listDatabaseRoles(com.google.spanner.admin.database.v1.ListDatabaseRolesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListDatabaseRolesMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Adds split points to specified tables, indexes of a database.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.AddSplitPointsResponse> + addSplitPoints(com.google.spanner.admin.database.v1.AddSplitPointsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getAddSplitPointsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + getBackupSchedule(com.google.spanner.admin.database.v1.GetBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBackupSchedulesMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * This is an internal API called by Spanner Graph jobs. You should never need
    +     * to call this API directly.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getInternalUpdateGraphOperationMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_LIST_DATABASES = 0; + private static final int METHODID_CREATE_DATABASE = 1; + private static final int METHODID_GET_DATABASE = 2; + private static final int METHODID_UPDATE_DATABASE = 3; + private static final int METHODID_UPDATE_DATABASE_DDL = 4; + private static final int METHODID_DROP_DATABASE = 5; + private static final int METHODID_GET_DATABASE_DDL = 6; + private static final int METHODID_SET_IAM_POLICY = 7; + private static final int METHODID_GET_IAM_POLICY = 8; + private static final int METHODID_TEST_IAM_PERMISSIONS = 9; + private static final int METHODID_CREATE_BACKUP = 10; + private static final int METHODID_COPY_BACKUP = 11; + private static final int METHODID_GET_BACKUP = 12; + private static final int METHODID_UPDATE_BACKUP = 13; + private static final int METHODID_DELETE_BACKUP = 14; + private static final int METHODID_LIST_BACKUPS = 15; + private static final int METHODID_RESTORE_DATABASE = 16; + private static final int METHODID_LIST_DATABASE_OPERATIONS = 17; + private static final int METHODID_LIST_BACKUP_OPERATIONS = 18; + private static final int METHODID_LIST_DATABASE_ROLES = 19; + private static final int METHODID_ADD_SPLIT_POINTS = 20; + private static final int METHODID_CREATE_BACKUP_SCHEDULE = 21; + private static final int METHODID_GET_BACKUP_SCHEDULE = 22; + private static final int METHODID_UPDATE_BACKUP_SCHEDULE = 23; + private static final int METHODID_DELETE_BACKUP_SCHEDULE = 24; + private static final int METHODID_LIST_BACKUP_SCHEDULES = 25; + private static final int METHODID_INTERNAL_UPDATE_GRAPH_OPERATION = 26; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_LIST_DATABASES: + serviceImpl.listDatabases( + (com.google.spanner.admin.database.v1.ListDatabasesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListDatabasesResponse>) + responseObserver); + break; + case METHODID_CREATE_DATABASE: + serviceImpl.createDatabase( + (com.google.spanner.admin.database.v1.CreateDatabaseRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_DATABASE: + serviceImpl.getDatabase( + (com.google.spanner.admin.database.v1.GetDatabaseRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_DATABASE: + serviceImpl.updateDatabase( + (com.google.spanner.admin.database.v1.UpdateDatabaseRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_DATABASE_DDL: + serviceImpl.updateDatabaseDdl( + (com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DROP_DATABASE: + serviceImpl.dropDatabase( + (com.google.spanner.admin.database.v1.DropDatabaseRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_DATABASE_DDL: + serviceImpl.getDatabaseDdl( + (com.google.spanner.admin.database.v1.GetDatabaseDdlRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse>) + responseObserver); + break; + case METHODID_SET_IAM_POLICY: + serviceImpl.setIamPolicy( + (com.google.iam.v1.SetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_IAM_POLICY: + serviceImpl.getIamPolicy( + (com.google.iam.v1.GetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_TEST_IAM_PERMISSIONS: + serviceImpl.testIamPermissions( + (com.google.iam.v1.TestIamPermissionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_BACKUP: + serviceImpl.createBackup( + (com.google.spanner.admin.database.v1.CreateBackupRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_COPY_BACKUP: + serviceImpl.copyBackup( + (com.google.spanner.admin.database.v1.CopyBackupRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_BACKUP: + serviceImpl.getBackup( + (com.google.spanner.admin.database.v1.GetBackupRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_BACKUP: + serviceImpl.updateBackup( + (com.google.spanner.admin.database.v1.UpdateBackupRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_BACKUP: + serviceImpl.deleteBackup( + (com.google.spanner.admin.database.v1.DeleteBackupRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_BACKUPS: + serviceImpl.listBackups( + (com.google.spanner.admin.database.v1.ListBackupsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupsResponse>) + responseObserver); + break; + case METHODID_RESTORE_DATABASE: + serviceImpl.restoreDatabase( + (com.google.spanner.admin.database.v1.RestoreDatabaseRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_DATABASE_OPERATIONS: + serviceImpl.listDatabaseOperations( + (com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse>) + responseObserver); + break; + case METHODID_LIST_BACKUP_OPERATIONS: + serviceImpl.listBackupOperations( + (com.google.spanner.admin.database.v1.ListBackupOperationsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupOperationsResponse>) + responseObserver); + break; + case METHODID_LIST_DATABASE_ROLES: + serviceImpl.listDatabaseRoles( + (com.google.spanner.admin.database.v1.ListDatabaseRolesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse>) + responseObserver); + break; + case METHODID_ADD_SPLIT_POINTS: + serviceImpl.addSplitPoints( + (com.google.spanner.admin.database.v1.AddSplitPointsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.AddSplitPointsResponse>) + responseObserver); + break; + case METHODID_CREATE_BACKUP_SCHEDULE: + serviceImpl.createBackupSchedule( + (com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_BACKUP_SCHEDULE: + serviceImpl.getBackupSchedule( + (com.google.spanner.admin.database.v1.GetBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_BACKUP_SCHEDULE: + serviceImpl.updateBackupSchedule( + (com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_BACKUP_SCHEDULE: + serviceImpl.deleteBackupSchedule( + (com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_BACKUP_SCHEDULES: + serviceImpl.listBackupSchedules( + (com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>) + responseObserver); + break; + case METHODID_INTERNAL_UPDATE_GRAPH_OPERATION: + serviceImpl.internalUpdateGraphOperation( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getListDatabasesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListDatabasesRequest, + com.google.spanner.admin.database.v1.ListDatabasesResponse>( + service, METHODID_LIST_DATABASES))) + .addMethod( + getCreateDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.CreateDatabaseRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_DATABASE))) + .addMethod( + getGetDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.GetDatabaseRequest, + com.google.spanner.admin.database.v1.Database>(service, METHODID_GET_DATABASE))) + .addMethod( + getUpdateDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_DATABASE))) + .addMethod( + getUpdateDatabaseDdlMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_DATABASE_DDL))) + .addMethod( + getDropDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.DropDatabaseRequest, + com.google.protobuf.Empty>(service, METHODID_DROP_DATABASE))) + .addMethod( + getGetDatabaseDdlMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse>( + service, METHODID_GET_DATABASE_DDL))) + .addMethod( + getSetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_SET_IAM_POLICY))) + .addMethod( + getGetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_GET_IAM_POLICY))) + .addMethod( + getTestIamPermissionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse>( + service, METHODID_TEST_IAM_PERMISSIONS))) + .addMethod( + getCreateBackupMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.CreateBackupRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_BACKUP))) + .addMethod( + getCopyBackupMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.CopyBackupRequest, + com.google.longrunning.Operation>(service, METHODID_COPY_BACKUP))) + .addMethod( + getGetBackupMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.GetBackupRequest, + com.google.spanner.admin.database.v1.Backup>(service, METHODID_GET_BACKUP))) + .addMethod( + getUpdateBackupMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.UpdateBackupRequest, + com.google.spanner.admin.database.v1.Backup>(service, METHODID_UPDATE_BACKUP))) + .addMethod( + getDeleteBackupMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.DeleteBackupRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_BACKUP))) + .addMethod( + getListBackupsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListBackupsRequest, + com.google.spanner.admin.database.v1.ListBackupsResponse>( + service, METHODID_LIST_BACKUPS))) + .addMethod( + getRestoreDatabaseMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.RestoreDatabaseRequest, + com.google.longrunning.Operation>(service, METHODID_RESTORE_DATABASE))) + .addMethod( + getListDatabaseOperationsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse>( + service, METHODID_LIST_DATABASE_OPERATIONS))) + .addMethod( + getListBackupOperationsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListBackupOperationsRequest, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse>( + service, METHODID_LIST_BACKUP_OPERATIONS))) + .addMethod( + getListDatabaseRolesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse>( + service, METHODID_LIST_DATABASE_ROLES))) + .addMethod( + getAddSplitPointsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.AddSplitPointsRequest, + com.google.spanner.admin.database.v1.AddSplitPointsResponse>( + service, METHODID_ADD_SPLIT_POINTS))) + .addMethod( + getCreateBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_CREATE_BACKUP_SCHEDULE))) + .addMethod( + getGetBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_GET_BACKUP_SCHEDULE))) + .addMethod( + getUpdateBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_UPDATE_BACKUP_SCHEDULE))) + .addMethod( + getDeleteBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_BACKUP_SCHEDULE))) + .addMethod( + getListBackupSchedulesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>( + service, METHODID_LIST_BACKUP_SCHEDULES))) + .addMethod( + getInternalUpdateGraphOperationMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse>( + service, METHODID_INTERNAL_UPDATE_GRAPH_OPERATION))) + .build(); + } + + private abstract static class DatabaseAdminBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + DatabaseAdminBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("DatabaseAdmin"); + } + } + + private static final class DatabaseAdminFileDescriptorSupplier + extends DatabaseAdminBaseDescriptorSupplier { + DatabaseAdminFileDescriptorSupplier() {} + } + + private static final class DatabaseAdminMethodDescriptorSupplier + extends DatabaseAdminBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + DatabaseAdminMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (DatabaseAdminGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new DatabaseAdminFileDescriptorSupplier()) + .addMethod(getListDatabasesMethod()) + .addMethod(getCreateDatabaseMethod()) + .addMethod(getGetDatabaseMethod()) + .addMethod(getUpdateDatabaseMethod()) + .addMethod(getUpdateDatabaseDdlMethod()) + .addMethod(getDropDatabaseMethod()) + .addMethod(getGetDatabaseDdlMethod()) + .addMethod(getSetIamPolicyMethod()) + .addMethod(getGetIamPolicyMethod()) + .addMethod(getTestIamPermissionsMethod()) + .addMethod(getCreateBackupMethod()) + .addMethod(getCopyBackupMethod()) + .addMethod(getGetBackupMethod()) + .addMethod(getUpdateBackupMethod()) + .addMethod(getDeleteBackupMethod()) + .addMethod(getListBackupsMethod()) + .addMethod(getRestoreDatabaseMethod()) + .addMethod(getListDatabaseOperationsMethod()) + .addMethod(getListBackupOperationsMethod()) + .addMethod(getListDatabaseRolesMethod()) + .addMethod(getAddSplitPointsMethod()) + .addMethod(getCreateBackupScheduleMethod()) + .addMethod(getGetBackupScheduleMethod()) + .addMethod(getUpdateBackupScheduleMethod()) + .addMethod(getDeleteBackupScheduleMethod()) + .addMethod(getListBackupSchedulesMethod()) + .addMethod(getInternalUpdateGraphOperationMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..c611281a06e6 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml @@ -0,0 +1,9 @@ + + + + + 7012 + com/google/spanner/admin/instance/v1/* + * + + diff --git a/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/pom.xml b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/pom.xml new file mode 100644 index 000000000000..cb0ec16a2102 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/pom.xml @@ -0,0 +1,89 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + grpc-google-cloud-spanner-admin-instance-v1 + GRPC library for grpc-google-cloud-spanner-admin-instance-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + + + com.google.guava + guava + + + com.google.auto.value + auto-value-annotations + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api.grpc + proto-google-common-protos + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.auto.value:auto-value-annotations,javax.annotation:javax.annotation-api + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java new file mode 100644 index 000000000000..093b6a459608 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java @@ -0,0 +1,4627 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.spanner.admin.instance.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
    + * Cloud Spanner Instance Admin API
    + * The Cloud Spanner Instance Admin API can be used to create, delete,
    + * modify and list instances. Instances are dedicated Cloud Spanner serving
    + * and storage resources to be used by Cloud Spanner databases.
    + * Each instance has a "configuration", which dictates where the
    + * serving resources for the Cloud Spanner instance are located (e.g.,
    + * US-central, Europe). Configurations are created by Google based on
    + * resource availability.
    + * Cloud Spanner billing is based on the instances that exist and their
    + * sizes. After an instance exists, there are no additional
    + * per-database or per-operation charges for use of the instance
    + * (though there may be additional network bandwidth charges).
    + * Instances offer isolation: problems with databases in one instance
    + * will not affect other instances. However, within an instance
    + * databases can affect each other. For example, if one database in an
    + * instance receives a lot of requests and consumes most of the
    + * instance resources, fewer resources are available for other
    + * databases in that instance, and their performance may suffer.
    + * 
    + */ +@io.grpc.stub.annotations.GrpcGenerated +public final class InstanceAdminGrpc { + + private InstanceAdminGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.spanner.admin.instance.v1.InstanceAdmin"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + getListInstanceConfigsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListInstanceConfigs", + requestType = com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.class, + responseType = com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + getListInstanceConfigsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + getListInstanceConfigsMethod; + if ((getListInstanceConfigsMethod = InstanceAdminGrpc.getListInstanceConfigsMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getListInstanceConfigsMethod = InstanceAdminGrpc.getListInstanceConfigsMethod) + == null) { + InstanceAdminGrpc.getListInstanceConfigsMethod = + getListInstanceConfigsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListInstanceConfigs")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("ListInstanceConfigs")) + .build(); + } + } + } + return getListInstanceConfigsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest, + com.google.spanner.admin.instance.v1.InstanceConfig> + getGetInstanceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetInstanceConfig", + requestType = com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.class, + responseType = com.google.spanner.admin.instance.v1.InstanceConfig.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest, + com.google.spanner.admin.instance.v1.InstanceConfig> + getGetInstanceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest, + com.google.spanner.admin.instance.v1.InstanceConfig> + getGetInstanceConfigMethod; + if ((getGetInstanceConfigMethod = InstanceAdminGrpc.getGetInstanceConfigMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getGetInstanceConfigMethod = InstanceAdminGrpc.getGetInstanceConfigMethod) == null) { + InstanceAdminGrpc.getGetInstanceConfigMethod = + getGetInstanceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetInstanceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.InstanceConfig + .getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("GetInstanceConfig")) + .build(); + } + } + } + return getGetInstanceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest, + com.google.longrunning.Operation> + getCreateInstanceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateInstanceConfig", + requestType = com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest, + com.google.longrunning.Operation> + getCreateInstanceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest, + com.google.longrunning.Operation> + getCreateInstanceConfigMethod; + if ((getCreateInstanceConfigMethod = InstanceAdminGrpc.getCreateInstanceConfigMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getCreateInstanceConfigMethod = InstanceAdminGrpc.getCreateInstanceConfigMethod) + == null) { + InstanceAdminGrpc.getCreateInstanceConfigMethod = + getCreateInstanceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateInstanceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("CreateInstanceConfig")) + .build(); + } + } + } + return getCreateInstanceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest, + com.google.longrunning.Operation> + getUpdateInstanceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateInstanceConfig", + requestType = com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest, + com.google.longrunning.Operation> + getUpdateInstanceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest, + com.google.longrunning.Operation> + getUpdateInstanceConfigMethod; + if ((getUpdateInstanceConfigMethod = InstanceAdminGrpc.getUpdateInstanceConfigMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getUpdateInstanceConfigMethod = InstanceAdminGrpc.getUpdateInstanceConfigMethod) + == null) { + InstanceAdminGrpc.getUpdateInstanceConfigMethod = + getUpdateInstanceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateInstanceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("UpdateInstanceConfig")) + .build(); + } + } + } + return getUpdateInstanceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest, + com.google.protobuf.Empty> + getDeleteInstanceConfigMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteInstanceConfig", + requestType = com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest, + com.google.protobuf.Empty> + getDeleteInstanceConfigMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest, + com.google.protobuf.Empty> + getDeleteInstanceConfigMethod; + if ((getDeleteInstanceConfigMethod = InstanceAdminGrpc.getDeleteInstanceConfigMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getDeleteInstanceConfigMethod = InstanceAdminGrpc.getDeleteInstanceConfigMethod) + == null) { + InstanceAdminGrpc.getDeleteInstanceConfigMethod = + getDeleteInstanceConfigMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteInstanceConfig")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("DeleteInstanceConfig")) + .build(); + } + } + } + return getDeleteInstanceConfigMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + getListInstanceConfigOperationsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListInstanceConfigOperations", + requestType = com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.class, + responseType = + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + getListInstanceConfigOperationsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + getListInstanceConfigOperationsMethod; + if ((getListInstanceConfigOperationsMethod = + InstanceAdminGrpc.getListInstanceConfigOperationsMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getListInstanceConfigOperationsMethod = + InstanceAdminGrpc.getListInstanceConfigOperationsMethod) + == null) { + InstanceAdminGrpc.getListInstanceConfigOperationsMethod = + getListInstanceConfigOperationsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListInstanceConfigOperations")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1 + .ListInstanceConfigOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1 + .ListInstanceConfigOperationsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("ListInstanceConfigOperations")) + .build(); + } + } + } + return getListInstanceConfigOperationsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancesRequest, + com.google.spanner.admin.instance.v1.ListInstancesResponse> + getListInstancesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListInstances", + requestType = com.google.spanner.admin.instance.v1.ListInstancesRequest.class, + responseType = com.google.spanner.admin.instance.v1.ListInstancesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancesRequest, + com.google.spanner.admin.instance.v1.ListInstancesResponse> + getListInstancesMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancesRequest, + com.google.spanner.admin.instance.v1.ListInstancesResponse> + getListInstancesMethod; + if ((getListInstancesMethod = InstanceAdminGrpc.getListInstancesMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getListInstancesMethod = InstanceAdminGrpc.getListInstancesMethod) == null) { + InstanceAdminGrpc.getListInstancesMethod = + getListInstancesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListInstances")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstancesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstancesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("ListInstances")) + .build(); + } + } + } + return getListInstancesMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + getListInstancePartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListInstancePartitions", + requestType = com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.class, + responseType = com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + getListInstancePartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + getListInstancePartitionsMethod; + if ((getListInstancePartitionsMethod = InstanceAdminGrpc.getListInstancePartitionsMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getListInstancePartitionsMethod = InstanceAdminGrpc.getListInstancePartitionsMethod) + == null) { + InstanceAdminGrpc.getListInstancePartitionsMethod = + getListInstancePartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListInstancePartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("ListInstancePartitions")) + .build(); + } + } + } + return getListInstancePartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceRequest, + com.google.spanner.admin.instance.v1.Instance> + getGetInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetInstance", + requestType = com.google.spanner.admin.instance.v1.GetInstanceRequest.class, + responseType = com.google.spanner.admin.instance.v1.Instance.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceRequest, + com.google.spanner.admin.instance.v1.Instance> + getGetInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstanceRequest, + com.google.spanner.admin.instance.v1.Instance> + getGetInstanceMethod; + if ((getGetInstanceMethod = InstanceAdminGrpc.getGetInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getGetInstanceMethod = InstanceAdminGrpc.getGetInstanceMethod) == null) { + InstanceAdminGrpc.getGetInstanceMethod = + getGetInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.GetInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.Instance.getDefaultInstance())) + .setSchemaDescriptor(new InstanceAdminMethodDescriptorSupplier("GetInstance")) + .build(); + } + } + } + return getGetInstanceMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceRequest, + com.google.longrunning.Operation> + getCreateInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateInstance", + requestType = com.google.spanner.admin.instance.v1.CreateInstanceRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceRequest, + com.google.longrunning.Operation> + getCreateInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstanceRequest, + com.google.longrunning.Operation> + getCreateInstanceMethod; + if ((getCreateInstanceMethod = InstanceAdminGrpc.getCreateInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getCreateInstanceMethod = InstanceAdminGrpc.getCreateInstanceMethod) == null) { + InstanceAdminGrpc.getCreateInstanceMethod = + getCreateInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.CreateInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("CreateInstance")) + .build(); + } + } + } + return getCreateInstanceMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceRequest, + com.google.longrunning.Operation> + getUpdateInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateInstance", + requestType = com.google.spanner.admin.instance.v1.UpdateInstanceRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceRequest, + com.google.longrunning.Operation> + getUpdateInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstanceRequest, + com.google.longrunning.Operation> + getUpdateInstanceMethod; + if ((getUpdateInstanceMethod = InstanceAdminGrpc.getUpdateInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getUpdateInstanceMethod = InstanceAdminGrpc.getUpdateInstanceMethod) == null) { + InstanceAdminGrpc.getUpdateInstanceMethod = + getUpdateInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "UpdateInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("UpdateInstance")) + .build(); + } + } + } + return getUpdateInstanceMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceRequest, com.google.protobuf.Empty> + getDeleteInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteInstance", + requestType = com.google.spanner.admin.instance.v1.DeleteInstanceRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceRequest, com.google.protobuf.Empty> + getDeleteInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstanceRequest, com.google.protobuf.Empty> + getDeleteInstanceMethod; + if ((getDeleteInstanceMethod = InstanceAdminGrpc.getDeleteInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getDeleteInstanceMethod = InstanceAdminGrpc.getDeleteInstanceMethod) == null) { + InstanceAdminGrpc.getDeleteInstanceMethod = + getDeleteInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("DeleteInstance")) + .build(); + } + } + } + return getDeleteInstanceMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SetIamPolicy", + requestType = com.google.iam.v1.SetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.SetIamPolicyRequest, com.google.iam.v1.Policy> + getSetIamPolicyMethod() { + io.grpc.MethodDescriptor + getSetIamPolicyMethod; + if ((getSetIamPolicyMethod = InstanceAdminGrpc.getSetIamPolicyMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getSetIamPolicyMethod = InstanceAdminGrpc.getSetIamPolicyMethod) == null) { + InstanceAdminGrpc.getSetIamPolicyMethod = + getSetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.SetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("SetIamPolicy")) + .build(); + } + } + } + return getSetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetIamPolicy", + requestType = com.google.iam.v1.GetIamPolicyRequest.class, + responseType = com.google.iam.v1.Policy.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.GetIamPolicyRequest, com.google.iam.v1.Policy> + getGetIamPolicyMethod() { + io.grpc.MethodDescriptor + getGetIamPolicyMethod; + if ((getGetIamPolicyMethod = InstanceAdminGrpc.getGetIamPolicyMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getGetIamPolicyMethod = InstanceAdminGrpc.getGetIamPolicyMethod) == null) { + InstanceAdminGrpc.getGetIamPolicyMethod = + getGetIamPolicyMethod = + io.grpc.MethodDescriptor + .newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetIamPolicy")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.GetIamPolicyRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.Policy.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("GetIamPolicy")) + .build(); + } + } + } + return getGetIamPolicyMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "TestIamPermissions", + requestType = com.google.iam.v1.TestIamPermissionsRequest.class, + responseType = com.google.iam.v1.TestIamPermissionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod() { + io.grpc.MethodDescriptor< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse> + getTestIamPermissionsMethod; + if ((getTestIamPermissionsMethod = InstanceAdminGrpc.getTestIamPermissionsMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getTestIamPermissionsMethod = InstanceAdminGrpc.getTestIamPermissionsMethod) == null) { + InstanceAdminGrpc.getTestIamPermissionsMethod = + getTestIamPermissionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "TestIamPermissions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.iam.v1.TestIamPermissionsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("TestIamPermissions")) + .build(); + } + } + } + return getTestIamPermissionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest, + com.google.spanner.admin.instance.v1.InstancePartition> + getGetInstancePartitionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetInstancePartition", + requestType = com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.class, + responseType = com.google.spanner.admin.instance.v1.InstancePartition.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest, + com.google.spanner.admin.instance.v1.InstancePartition> + getGetInstancePartitionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest, + com.google.spanner.admin.instance.v1.InstancePartition> + getGetInstancePartitionMethod; + if ((getGetInstancePartitionMethod = InstanceAdminGrpc.getGetInstancePartitionMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getGetInstancePartitionMethod = InstanceAdminGrpc.getGetInstancePartitionMethod) + == null) { + InstanceAdminGrpc.getGetInstancePartitionMethod = + getGetInstancePartitionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "GetInstancePartition")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.InstancePartition + .getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("GetInstancePartition")) + .build(); + } + } + } + return getGetInstancePartitionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest, + com.google.longrunning.Operation> + getCreateInstancePartitionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateInstancePartition", + requestType = com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest, + com.google.longrunning.Operation> + getCreateInstancePartitionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest, + com.google.longrunning.Operation> + getCreateInstancePartitionMethod; + if ((getCreateInstancePartitionMethod = InstanceAdminGrpc.getCreateInstancePartitionMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getCreateInstancePartitionMethod = InstanceAdminGrpc.getCreateInstancePartitionMethod) + == null) { + InstanceAdminGrpc.getCreateInstancePartitionMethod = + getCreateInstancePartitionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateInstancePartition")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("CreateInstancePartition")) + .build(); + } + } + } + return getCreateInstancePartitionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest, + com.google.protobuf.Empty> + getDeleteInstancePartitionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteInstancePartition", + requestType = com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest, + com.google.protobuf.Empty> + getDeleteInstancePartitionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest, + com.google.protobuf.Empty> + getDeleteInstancePartitionMethod; + if ((getDeleteInstancePartitionMethod = InstanceAdminGrpc.getDeleteInstancePartitionMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getDeleteInstancePartitionMethod = InstanceAdminGrpc.getDeleteInstancePartitionMethod) + == null) { + InstanceAdminGrpc.getDeleteInstancePartitionMethod = + getDeleteInstancePartitionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteInstancePartition")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("DeleteInstancePartition")) + .build(); + } + } + } + return getDeleteInstancePartitionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest, + com.google.longrunning.Operation> + getUpdateInstancePartitionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateInstancePartition", + requestType = com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest, + com.google.longrunning.Operation> + getUpdateInstancePartitionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest, + com.google.longrunning.Operation> + getUpdateInstancePartitionMethod; + if ((getUpdateInstancePartitionMethod = InstanceAdminGrpc.getUpdateInstancePartitionMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getUpdateInstancePartitionMethod = InstanceAdminGrpc.getUpdateInstancePartitionMethod) + == null) { + InstanceAdminGrpc.getUpdateInstancePartitionMethod = + getUpdateInstancePartitionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateInstancePartition")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("UpdateInstancePartition")) + .build(); + } + } + } + return getUpdateInstancePartitionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + getListInstancePartitionOperationsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListInstancePartitionOperations", + requestType = + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.class, + responseType = + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + getListInstancePartitionOperationsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + getListInstancePartitionOperationsMethod; + if ((getListInstancePartitionOperationsMethod = + InstanceAdminGrpc.getListInstancePartitionOperationsMethod) + == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getListInstancePartitionOperationsMethod = + InstanceAdminGrpc.getListInstancePartitionOperationsMethod) + == null) { + InstanceAdminGrpc.getListInstancePartitionOperationsMethod = + getListInstancePartitionOperationsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListInstancePartitionOperations")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1 + .ListInstancePartitionOperationsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1 + .ListInstancePartitionOperationsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier( + "ListInstancePartitionOperations")) + .build(); + } + } + } + return getListInstancePartitionOperationsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "MoveInstance", + requestType = com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod; + if ((getMoveInstanceMethod = InstanceAdminGrpc.getMoveInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getMoveInstanceMethod = InstanceAdminGrpc.getMoveInstanceMethod) == null) { + InstanceAdminGrpc.getMoveInstanceMethod = + getMoveInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "MoveInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.MoveInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("MoveInstance")) + .build(); + } + } + } + return getMoveInstanceMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static InstanceAdminStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public InstanceAdminStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminStub(channel, callOptions); + } + }; + return InstanceAdminStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static InstanceAdminBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public InstanceAdminBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminBlockingV2Stub(channel, callOptions); + } + }; + return InstanceAdminBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static InstanceAdminBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public InstanceAdminBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminBlockingStub(channel, callOptions); + } + }; + return InstanceAdminBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static InstanceAdminFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public InstanceAdminFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminFutureStub(channel, callOptions); + } + }; + return InstanceAdminFutureStub.newStub(factory, channel); + } + + /** + * + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public interface AsyncService { + + /** + * + * + *
    +     * Lists the supported instance configurations for a given project.
    +     * Returns both Google-managed configurations and user-managed
    +     * configurations.
    +     * 
    + */ + default void listInstanceConfigs( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListInstanceConfigsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance configuration.
    +     * 
    + */ + default void getInstanceConfig( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetInstanceConfigMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates an instance configuration and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
    +     * Immediately after the request returns:
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true. Its state is `CREATING`.
    +     * While the operation is pending:
    +     *   * Cancelling the operation renders the instance configuration immediately
    +     *     unreadable via the API.
    +     *   * Except for deleting the creating resource, all other attempts to modify
    +     *     the instance configuration are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Instances can be created using the instance configuration.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false. Its state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * creation of the instance configuration. The
    +     * metadata field type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.create` permission on
    +     * the resource
    +     * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
    +     * 
    + */ + default void createInstanceConfig( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateInstanceConfigMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates an instance configuration. The returned
    +     * long-running operation can be used to track
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
    +     * Immediately after the request returns:
    +     *   * The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true.
    +     * While the operation is pending:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
    +     *     The operation is guaranteed to succeed at undoing all changes, after
    +     *     which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
    +     *     pre-request values.
    +     * Upon completion of the returned operation:
    +     *   * Creating instances using the instance configuration uses the new
    +     *     values.
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * the instance configuration modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + default void updateInstanceConfig( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateInstanceConfigMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes the instance configuration. Deletion is only allowed when no
    +     * instances are using the configuration. If any instances are using
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
    +     * Authorization requires `spanner.instanceConfigs.delete` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + default void deleteInstanceConfig( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteInstanceConfigMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists the user-managed instance configuration long-running
    +     * operations in the given project. An instance
    +     * configuration operation has a name of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + default void listInstanceConfigOperations( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListInstanceConfigOperationsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists all instances in the given project.
    +     * 
    + */ + default void listInstances( + com.google.spanner.admin.instance.v1.ListInstancesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListInstancesMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists all instance partitions for the given instance.
    +     * 
    + */ + default void listInstancePartitions( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListInstancePartitionsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance.
    +     * 
    + */ + default void getInstance( + com.google.spanner.admin.instance.v1.GetInstanceRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetInstanceMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates an instance and begins preparing it to begin serving. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance. The instance name is assigned by the caller. If the
    +     * named instance already exists, `CreateInstance` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance is readable via the API, with all requested attributes
    +     *     but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance immediately unreadable
    +     *     via the API.
    +     *   * The instance can be deleted.
    +     *   * All other attempts to modify the instance are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can be created in the instance.
    +     *   * The instance's allocated resource levels are readable via the API.
    +     *   * The instance's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track creation of the instance.  The
    +     * metadata field type is
    +     * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * 
    + */ + default void createInstance( + com.google.spanner.admin.instance.v1.CreateInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateInstanceMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates an instance, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance. If the named instance does not
    +     * exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance's allocation
    +     *     has been requested, billing is based on the newly-requested level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance are rejected.
    +     *   * Reading the instance via the API continues to give the pre-request
    +     *     resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance's
    +     *     tables.
    +     *   * The instance's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track the instance modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * Authorization requires `spanner.instances.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.Instance.name].
    +     * 
    + */ + default void updateInstance( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateInstanceMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes an instance.
    +     * Immediately upon completion of the request:
    +     *   * Billing ceases for all of the instance's reserved resources.
    +     * Soon afterward:
    +     *   * The instance and *all of its databases* immediately and
    +     *     irrevocably disappear from the API. All data in the databases
    +     *     is permanently deleted.
    +     * 
    + */ + default void deleteInstance( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteInstanceMethod(), responseObserver); + } + + /** + * + * + *
    +     * Sets the access control policy on an instance resource. Replaces any
    +     * existing policy.
    +     * Authorization requires `spanner.instances.setIamPolicy` on
    +     * [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + default void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getSetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets the access control policy for an instance resource. Returns an empty
    +     * policy if an instance exists but does not have a policy set.
    +     * Authorization requires `spanner.instances.getIamPolicy` on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + default void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetIamPolicyMethod(), responseObserver); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified instance resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner instance resource will
    +     * result in a NOT_FOUND error if the user has `spanner.instances.list`
    +     * permission on the containing Google Cloud Project. Otherwise returns an
    +     * empty set of permissions.
    +     * 
    + */ + default void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getTestIamPermissionsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance partition.
    +     * 
    + */ + default void getInstancePartition( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetInstancePartitionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates an instance partition and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new instance partition.
    +     * The instance partition name is assigned by the caller. If the named
    +     * instance partition already exists, `CreateInstancePartition` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance partition is readable via the API, with all requested
    +     *     attributes but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance partition immediately
    +     *     unreadable via the API.
    +     *   * The instance partition can be deleted.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can start using this instance partition.
    +     *   * The instance partition's allocated resource levels are readable via the
    +     *     API.
    +     *   * The instance partition's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track creation of the instance partition.  The
    +     * metadata field type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * 
    + */ + default void createInstancePartition( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateInstancePartitionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes an existing instance partition. Requires that the
    +     * instance partition is not used by any database or backup and is not the
    +     * default instance partition of an instance.
    +     * Authorization requires `spanner.instancePartitions.delete` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + default void deleteInstancePartition( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteInstancePartitionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates an instance partition, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance partition. If the named instance
    +     * partition does not exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance partition's
    +     *   allocation has been requested, billing is based on the newly-requested
    +     *   level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     *   * Reading the instance partition via the API continues to give the
    +     *     pre-request resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance
    +     *     partition's tables.
    +     *   * The instance partition's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track the instance partition modification. The
    +     * metadata field type is
    +     * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * Authorization requires `spanner.instancePartitions.update` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + default void updateInstancePartition( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateInstancePartitionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists instance partition long-running operations in the given instance.
    +     * An instance partition operation has a name of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting from the
    +     * most recently started operation.
    +     * Authorization requires `spanner.instancePartitionOperations.list`
    +     * permission on the resource
    +     * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
    +     * 
    + */ + default void listInstancePartitionOperations( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListInstancePartitionOperationsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned long-running operation to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned long-running operation has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * metadata field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + default void moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getMoveInstanceMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service InstanceAdmin. + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public abstract static class InstanceAdminImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return InstanceAdminGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service InstanceAdmin. + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public static final class InstanceAdminStub + extends io.grpc.stub.AbstractAsyncStub { + private InstanceAdminStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected InstanceAdminStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists the supported instance configurations for a given project.
    +     * Returns both Google-managed configurations and user-managed
    +     * configurations.
    +     * 
    + */ + public void listInstanceConfigs( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListInstanceConfigsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance configuration.
    +     * 
    + */ + public void getInstanceConfig( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetInstanceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates an instance configuration and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
    +     * Immediately after the request returns:
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true. Its state is `CREATING`.
    +     * While the operation is pending:
    +     *   * Cancelling the operation renders the instance configuration immediately
    +     *     unreadable via the API.
    +     *   * Except for deleting the creating resource, all other attempts to modify
    +     *     the instance configuration are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Instances can be created using the instance configuration.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false. Its state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * creation of the instance configuration. The
    +     * metadata field type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.create` permission on
    +     * the resource
    +     * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
    +     * 
    + */ + public void createInstanceConfig( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateInstanceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates an instance configuration. The returned
    +     * long-running operation can be used to track
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
    +     * Immediately after the request returns:
    +     *   * The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true.
    +     * While the operation is pending:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
    +     *     The operation is guaranteed to succeed at undoing all changes, after
    +     *     which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
    +     *     pre-request values.
    +     * Upon completion of the returned operation:
    +     *   * Creating instances using the instance configuration uses the new
    +     *     values.
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * the instance configuration modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public void updateInstanceConfig( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateInstanceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes the instance configuration. Deletion is only allowed when no
    +     * instances are using the configuration. If any instances are using
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
    +     * Authorization requires `spanner.instanceConfigs.delete` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public void deleteInstanceConfig( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteInstanceConfigMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists the user-managed instance configuration long-running
    +     * operations in the given project. An instance
    +     * configuration operation has a name of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public void listInstanceConfigOperations( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListInstanceConfigOperationsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists all instances in the given project.
    +     * 
    + */ + public void listInstances( + com.google.spanner.admin.instance.v1.ListInstancesRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListInstancesMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists all instance partitions for the given instance.
    +     * 
    + */ + public void listInstancePartitions( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListInstancePartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance.
    +     * 
    + */ + public void getInstance( + com.google.spanner.admin.instance.v1.GetInstanceRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetInstanceMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates an instance and begins preparing it to begin serving. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance. The instance name is assigned by the caller. If the
    +     * named instance already exists, `CreateInstance` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance is readable via the API, with all requested attributes
    +     *     but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance immediately unreadable
    +     *     via the API.
    +     *   * The instance can be deleted.
    +     *   * All other attempts to modify the instance are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can be created in the instance.
    +     *   * The instance's allocated resource levels are readable via the API.
    +     *   * The instance's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track creation of the instance.  The
    +     * metadata field type is
    +     * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * 
    + */ + public void createInstance( + com.google.spanner.admin.instance.v1.CreateInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateInstanceMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates an instance, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance. If the named instance does not
    +     * exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance's allocation
    +     *     has been requested, billing is based on the newly-requested level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance are rejected.
    +     *   * Reading the instance via the API continues to give the pre-request
    +     *     resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance's
    +     *     tables.
    +     *   * The instance's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track the instance modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * Authorization requires `spanner.instances.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.Instance.name].
    +     * 
    + */ + public void updateInstance( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateInstanceMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes an instance.
    +     * Immediately upon completion of the request:
    +     *   * Billing ceases for all of the instance's reserved resources.
    +     * Soon afterward:
    +     *   * The instance and *all of its databases* immediately and
    +     *     irrevocably disappear from the API. All data in the databases
    +     *     is permanently deleted.
    +     * 
    + */ + public void deleteInstance( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteInstanceMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Sets the access control policy on an instance resource. Replaces any
    +     * existing policy.
    +     * Authorization requires `spanner.instances.setIamPolicy` on
    +     * [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public void setIamPolicy( + com.google.iam.v1.SetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets the access control policy for an instance resource. Returns an empty
    +     * policy if an instance exists but does not have a policy set.
    +     * Authorization requires `spanner.instances.getIamPolicy` on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public void getIamPolicy( + com.google.iam.v1.GetIamPolicyRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified instance resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner instance resource will
    +     * result in a NOT_FOUND error if the user has `spanner.instances.list`
    +     * permission on the containing Google Cloud Project. Otherwise returns an
    +     * empty set of permissions.
    +     * 
    + */ + public void testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets information about a particular instance partition.
    +     * 
    + */ + public void getInstancePartition( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetInstancePartitionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates an instance partition and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new instance partition.
    +     * The instance partition name is assigned by the caller. If the named
    +     * instance partition already exists, `CreateInstancePartition` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance partition is readable via the API, with all requested
    +     *     attributes but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance partition immediately
    +     *     unreadable via the API.
    +     *   * The instance partition can be deleted.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can start using this instance partition.
    +     *   * The instance partition's allocated resource levels are readable via the
    +     *     API.
    +     *   * The instance partition's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track creation of the instance partition.  The
    +     * metadata field type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * 
    + */ + public void createInstancePartition( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateInstancePartitionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes an existing instance partition. Requires that the
    +     * instance partition is not used by any database or backup and is not the
    +     * default instance partition of an instance.
    +     * Authorization requires `spanner.instancePartitions.delete` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public void deleteInstancePartition( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteInstancePartitionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates an instance partition, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance partition. If the named instance
    +     * partition does not exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance partition's
    +     *   allocation has been requested, billing is based on the newly-requested
    +     *   level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     *   * Reading the instance partition via the API continues to give the
    +     *     pre-request resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance
    +     *     partition's tables.
    +     *   * The instance partition's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track the instance partition modification. The
    +     * metadata field type is
    +     * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * Authorization requires `spanner.instancePartitions.update` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public void updateInstancePartition( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateInstancePartitionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists instance partition long-running operations in the given instance.
    +     * An instance partition operation has a name of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting from the
    +     * most recently started operation.
    +     * Authorization requires `spanner.instancePartitionOperations.list`
    +     * permission on the resource
    +     * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
    +     * 
    + */ + public void listInstancePartitionOperations( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListInstancePartitionOperationsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned long-running operation to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned long-running operation has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * metadata field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public void moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getMoveInstanceMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service InstanceAdmin. + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public static final class InstanceAdminBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private InstanceAdminBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected InstanceAdminBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists the supported instance configurations for a given project.
    +     * Returns both Google-managed configurations and user-managed
    +     * configurations.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse listInstanceConfigs( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListInstanceConfigsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance configuration.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance configuration and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
    +     * Immediately after the request returns:
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true. Its state is `CREATING`.
    +     * While the operation is pending:
    +     *   * Cancelling the operation renders the instance configuration immediately
    +     *     unreadable via the API.
    +     *   * Except for deleting the creating resource, all other attempts to modify
    +     *     the instance configuration are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Instances can be created using the instance configuration.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false. Its state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * creation of the instance configuration. The
    +     * metadata field type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.create` permission on
    +     * the resource
    +     * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
    +     * 
    + */ + public com.google.longrunning.Operation createInstanceConfig( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance configuration. The returned
    +     * long-running operation can be used to track
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
    +     * Immediately after the request returns:
    +     *   * The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true.
    +     * While the operation is pending:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
    +     *     The operation is guaranteed to succeed at undoing all changes, after
    +     *     which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
    +     *     pre-request values.
    +     * Upon completion of the returned operation:
    +     *   * Creating instances using the instance configuration uses the new
    +     *     values.
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * the instance configuration modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstanceConfig( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes the instance configuration. Deletion is only allowed when no
    +     * instances are using the configuration. If any instances are using
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
    +     * Authorization requires `spanner.instanceConfigs.delete` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstanceConfig( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists the user-managed instance configuration long-running
    +     * operations in the given project. An instance
    +     * configuration operation has a name of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + listInstanceConfigOperations( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListInstanceConfigOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all instances in the given project.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancesResponse listInstances( + com.google.spanner.admin.instance.v1.ListInstancesRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListInstancesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all instance partitions for the given instance.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + listInstancePartitions( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListInstancePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.Instance getInstance( + com.google.spanner.admin.instance.v1.GetInstanceRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance and begins preparing it to begin serving. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance. The instance name is assigned by the caller. If the
    +     * named instance already exists, `CreateInstance` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance is readable via the API, with all requested attributes
    +     *     but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance immediately unreadable
    +     *     via the API.
    +     *   * The instance can be deleted.
    +     *   * All other attempts to modify the instance are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can be created in the instance.
    +     *   * The instance's allocated resource levels are readable via the API.
    +     *   * The instance's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track creation of the instance.  The
    +     * metadata field type is
    +     * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation createInstance( + com.google.spanner.admin.instance.v1.CreateInstanceRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance. If the named instance does not
    +     * exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance's allocation
    +     *     has been requested, billing is based on the newly-requested level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance are rejected.
    +     *   * Reading the instance via the API continues to give the pre-request
    +     *     resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance's
    +     *     tables.
    +     *   * The instance's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track the instance modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * Authorization requires `spanner.instances.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.Instance.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstance( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes an instance.
    +     * Immediately upon completion of the request:
    +     *   * Billing ceases for all of the instance's reserved resources.
    +     * Soon afterward:
    +     *   * The instance and *all of its databases* immediately and
    +     *     irrevocably disappear from the API. All data in the databases
    +     *     is permanently deleted.
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstance( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Sets the access control policy on an instance resource. Replaces any
    +     * existing policy.
    +     * Authorization requires `spanner.instances.setIamPolicy` on
    +     * [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the access control policy for an instance resource. Returns an empty
    +     * policy if an instance exists but does not have a policy set.
    +     * Authorization requires `spanner.instances.getIamPolicy` on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified instance resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner instance resource will
    +     * result in a NOT_FOUND error if the user has `spanner.instances.list`
    +     * permission on the containing Google Cloud Project. Otherwise returns an
    +     * empty set of permissions.
    +     * 
    + */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance partition.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance partition and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new instance partition.
    +     * The instance partition name is assigned by the caller. If the named
    +     * instance partition already exists, `CreateInstancePartition` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance partition is readable via the API, with all requested
    +     *     attributes but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance partition immediately
    +     *     unreadable via the API.
    +     *   * The instance partition can be deleted.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can start using this instance partition.
    +     *   * The instance partition's allocated resource levels are readable via the
    +     *     API.
    +     *   * The instance partition's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track creation of the instance partition.  The
    +     * metadata field type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * 
    + */ + public com.google.longrunning.Operation createInstancePartition( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes an existing instance partition. Requires that the
    +     * instance partition is not used by any database or backup and is not the
    +     * default instance partition of an instance.
    +     * Authorization requires `spanner.instancePartitions.delete` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstancePartition( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance partition, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance partition. If the named instance
    +     * partition does not exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance partition's
    +     *   allocation has been requested, billing is based on the newly-requested
    +     *   level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     *   * Reading the instance partition via the API continues to give the
    +     *     pre-request resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance
    +     *     partition's tables.
    +     *   * The instance partition's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track the instance partition modification. The
    +     * metadata field type is
    +     * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * Authorization requires `spanner.instancePartitions.update` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstancePartition( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getUpdateInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists instance partition long-running operations in the given instance.
    +     * An instance partition operation has a name of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting from the
    +     * most recently started operation.
    +     * Authorization requires `spanner.instancePartitionOperations.list`
    +     * permission on the resource
    +     * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + listInstancePartitionOperations( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListInstancePartitionOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned long-running operation to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned long-running operation has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * metadata field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public com.google.longrunning.Operation moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getMoveInstanceMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service InstanceAdmin. + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public static final class InstanceAdminBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private InstanceAdminBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected InstanceAdminBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminBlockingStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists the supported instance configurations for a given project.
    +     * Returns both Google-managed configurations and user-managed
    +     * configurations.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse listInstanceConfigs( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListInstanceConfigsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance configuration.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance configuration and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
    +     * Immediately after the request returns:
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true. Its state is `CREATING`.
    +     * While the operation is pending:
    +     *   * Cancelling the operation renders the instance configuration immediately
    +     *     unreadable via the API.
    +     *   * Except for deleting the creating resource, all other attempts to modify
    +     *     the instance configuration are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Instances can be created using the instance configuration.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false. Its state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * creation of the instance configuration. The
    +     * metadata field type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.create` permission on
    +     * the resource
    +     * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
    +     * 
    + */ + public com.google.longrunning.Operation createInstanceConfig( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance configuration. The returned
    +     * long-running operation can be used to track
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
    +     * Immediately after the request returns:
    +     *   * The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true.
    +     * While the operation is pending:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
    +     *     The operation is guaranteed to succeed at undoing all changes, after
    +     *     which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
    +     *     pre-request values.
    +     * Upon completion of the returned operation:
    +     *   * Creating instances using the instance configuration uses the new
    +     *     values.
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * the instance configuration modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstanceConfig( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes the instance configuration. Deletion is only allowed when no
    +     * instances are using the configuration. If any instances are using
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
    +     * Authorization requires `spanner.instanceConfigs.delete` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstanceConfig( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteInstanceConfigMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists the user-managed instance configuration long-running
    +     * operations in the given project. An instance
    +     * configuration operation has a name of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + listInstanceConfigOperations( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListInstanceConfigOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all instances in the given project.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancesResponse listInstances( + com.google.spanner.admin.instance.v1.ListInstancesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListInstancesMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all instance partitions for the given instance.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + listInstancePartitions( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListInstancePartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.Instance getInstance( + com.google.spanner.admin.instance.v1.GetInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance and begins preparing it to begin serving. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance. The instance name is assigned by the caller. If the
    +     * named instance already exists, `CreateInstance` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance is readable via the API, with all requested attributes
    +     *     but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance immediately unreadable
    +     *     via the API.
    +     *   * The instance can be deleted.
    +     *   * All other attempts to modify the instance are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can be created in the instance.
    +     *   * The instance's allocated resource levels are readable via the API.
    +     *   * The instance's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track creation of the instance.  The
    +     * metadata field type is
    +     * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * 
    + */ + public com.google.longrunning.Operation createInstance( + com.google.spanner.admin.instance.v1.CreateInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance. If the named instance does not
    +     * exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance's allocation
    +     *     has been requested, billing is based on the newly-requested level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance are rejected.
    +     *   * Reading the instance via the API continues to give the pre-request
    +     *     resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance's
    +     *     tables.
    +     *   * The instance's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track the instance modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * Authorization requires `spanner.instances.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.Instance.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstance( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes an instance.
    +     * Immediately upon completion of the request:
    +     *   * Billing ceases for all of the instance's reserved resources.
    +     * Soon afterward:
    +     *   * The instance and *all of its databases* immediately and
    +     *     irrevocably disappear from the API. All data in the databases
    +     *     is permanently deleted.
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstance( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteInstanceMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Sets the access control policy on an instance resource. Replaces any
    +     * existing policy.
    +     * Authorization requires `spanner.instances.setIamPolicy` on
    +     * [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getSetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets the access control policy for an instance resource. Returns an empty
    +     * policy if an instance exists but does not have a policy set.
    +     * Authorization requires `spanner.instances.getIamPolicy` on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.iam.v1.Policy getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetIamPolicyMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified instance resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner instance resource will
    +     * result in a NOT_FOUND error if the user has `spanner.instances.list`
    +     * permission on the containing Google Cloud Project. Otherwise returns an
    +     * empty set of permissions.
    +     * 
    + */ + public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( + com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getTestIamPermissionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance partition.
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates an instance partition and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new instance partition.
    +     * The instance partition name is assigned by the caller. If the named
    +     * instance partition already exists, `CreateInstancePartition` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance partition is readable via the API, with all requested
    +     *     attributes but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance partition immediately
    +     *     unreadable via the API.
    +     *   * The instance partition can be deleted.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can start using this instance partition.
    +     *   * The instance partition's allocated resource levels are readable via the
    +     *     API.
    +     *   * The instance partition's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track creation of the instance partition.  The
    +     * metadata field type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * 
    + */ + public com.google.longrunning.Operation createInstancePartition( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes an existing instance partition. Requires that the
    +     * instance partition is not used by any database or backup and is not the
    +     * default instance partition of an instance.
    +     * Authorization requires `spanner.instancePartitions.delete` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.protobuf.Empty deleteInstancePartition( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates an instance partition, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance partition. If the named instance
    +     * partition does not exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance partition's
    +     *   allocation has been requested, billing is based on the newly-requested
    +     *   level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     *   * Reading the instance partition via the API continues to give the
    +     *     pre-request resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance
    +     *     partition's tables.
    +     *   * The instance partition's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track the instance partition modification. The
    +     * metadata field type is
    +     * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * Authorization requires `spanner.instancePartitions.update` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.longrunning.Operation updateInstancePartition( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateInstancePartitionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists instance partition long-running operations in the given instance.
    +     * An instance partition operation has a name of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting from the
    +     * most recently started operation.
    +     * Authorization requires `spanner.instancePartitionOperations.list`
    +     * permission on the resource
    +     * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
    +     * 
    + */ + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + listInstancePartitionOperations( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListInstancePartitionOperationsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned long-running operation to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned long-running operation has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * metadata field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public com.google.longrunning.Operation moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getMoveInstanceMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service InstanceAdmin. + * + *
    +   * Cloud Spanner Instance Admin API
    +   * The Cloud Spanner Instance Admin API can be used to create, delete,
    +   * modify and list instances. Instances are dedicated Cloud Spanner serving
    +   * and storage resources to be used by Cloud Spanner databases.
    +   * Each instance has a "configuration", which dictates where the
    +   * serving resources for the Cloud Spanner instance are located (e.g.,
    +   * US-central, Europe). Configurations are created by Google based on
    +   * resource availability.
    +   * Cloud Spanner billing is based on the instances that exist and their
    +   * sizes. After an instance exists, there are no additional
    +   * per-database or per-operation charges for use of the instance
    +   * (though there may be additional network bandwidth charges).
    +   * Instances offer isolation: problems with databases in one instance
    +   * will not affect other instances. However, within an instance
    +   * databases can affect each other. For example, if one database in an
    +   * instance receives a lot of requests and consumes most of the
    +   * instance resources, fewer resources are available for other
    +   * databases in that instance, and their performance may suffer.
    +   * 
    + */ + public static final class InstanceAdminFutureStub + extends io.grpc.stub.AbstractFutureStub { + private InstanceAdminFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected InstanceAdminFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new InstanceAdminFutureStub(channel, callOptions); + } + + /** + * + * + *
    +     * Lists the supported instance configurations for a given project.
    +     * Returns both Google-managed configurations and user-managed
    +     * configurations.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse> + listInstanceConfigs( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListInstanceConfigsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance configuration.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.InstanceConfig> + getInstanceConfig(com.google.spanner.admin.instance.v1.GetInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetInstanceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates an instance configuration and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
    +     * Immediately after the request returns:
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true. Its state is `CREATING`.
    +     * While the operation is pending:
    +     *   * Cancelling the operation renders the instance configuration immediately
    +     *     unreadable via the API.
    +     *   * Except for deleting the creating resource, all other attempts to modify
    +     *     the instance configuration are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Instances can be created using the instance configuration.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false. Its state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * creation of the instance configuration. The
    +     * metadata field type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.create` permission on
    +     * the resource
    +     * [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createInstanceConfig( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateInstanceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates an instance configuration. The returned
    +     * long-running operation can be used to track
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
    +     * Immediately after the request returns:
    +     *   * The instance configuration's
    +     *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *     field is set to true.
    +     * While the operation is pending:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
    +     *     The operation is guaranteed to succeed at undoing all changes, after
    +     *     which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
    +     *     pre-request values.
    +     * Upon completion of the returned operation:
    +     *   * Creating instances using the instance configuration uses the new
    +     *     values.
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
    +     *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
    +     *   field becomes false.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_config_name>/operations/<operation_id>` and can be used to track
    +     * the instance configuration modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
    +     * The response field type is
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
    +     * successful.
    +     * Authorization requires `spanner.instanceConfigs.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + updateInstanceConfig( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateInstanceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes the instance configuration. Deletion is only allowed when no
    +     * instances are using the configuration. If any instances are using
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
    +     * Authorization requires `spanner.instanceConfigs.delete` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteInstanceConfig( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteInstanceConfigMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists the user-managed instance configuration long-running
    +     * operations in the given project. An instance
    +     * configuration operation has a name of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse> + listInstanceConfigOperations( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListInstanceConfigOperationsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists all instances in the given project.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.ListInstancesResponse> + listInstances(com.google.spanner.admin.instance.v1.ListInstancesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListInstancesMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists all instance partitions for the given instance.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse> + listInstancePartitions( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListInstancePartitionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.Instance> + getInstance(com.google.spanner.admin.instance.v1.GetInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetInstanceMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates an instance and begins preparing it to begin serving. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new
    +     * instance. The instance name is assigned by the caller. If the
    +     * named instance already exists, `CreateInstance` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance is readable via the API, with all requested attributes
    +     *     but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance immediately unreadable
    +     *     via the API.
    +     *   * The instance can be deleted.
    +     *   * All other attempts to modify the instance are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can be created in the instance.
    +     *   * The instance's allocated resource levels are readable via the API.
    +     *   * The instance's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track creation of the instance.  The
    +     * metadata field type is
    +     * [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createInstance(com.google.spanner.admin.instance.v1.CreateInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateInstanceMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates an instance, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance. If the named instance does not
    +     * exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance's allocation
    +     *     has been requested, billing is based on the newly-requested level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance are rejected.
    +     *   * Reading the instance via the API continues to give the pre-request
    +     *     resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance's
    +     *     tables.
    +     *   * The instance's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format `<instance_name>/operations/<operation_id>` and
    +     * can be used to track the instance modification.  The
    +     * metadata field type is
    +     * [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance], if successful.
    +     * Authorization requires `spanner.instances.update` permission on
    +     * the resource [name][google.spanner.admin.instance.v1.Instance.name].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + updateInstance(com.google.spanner.admin.instance.v1.UpdateInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateInstanceMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes an instance.
    +     * Immediately upon completion of the request:
    +     *   * Billing ceases for all of the instance's reserved resources.
    +     * Soon afterward:
    +     *   * The instance and *all of its databases* immediately and
    +     *     irrevocably disappear from the API. All data in the databases
    +     *     is permanently deleted.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteInstance(com.google.spanner.admin.instance.v1.DeleteInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteInstanceMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Sets the access control policy on an instance resource. Replaces any
    +     * existing policy.
    +     * Authorization requires `spanner.instances.setIamPolicy` on
    +     * [resource][google.iam.v1.SetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + setIamPolicy(com.google.iam.v1.SetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getSetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets the access control policy for an instance resource. Returns an empty
    +     * policy if an instance exists but does not have a policy set.
    +     * Authorization requires `spanner.instances.getIamPolicy` on
    +     * [resource][google.iam.v1.GetIamPolicyRequest.resource].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + getIamPolicy(com.google.iam.v1.GetIamPolicyRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetIamPolicyMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Returns permissions that the caller has on the specified instance resource.
    +     * Attempting this RPC on a non-existent Cloud Spanner instance resource will
    +     * result in a NOT_FOUND error if the user has `spanner.instances.list`
    +     * permission on the containing Google Cloud Project. Otherwise returns an
    +     * empty set of permissions.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.iam.v1.TestIamPermissionsResponse> + testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getTestIamPermissionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets information about a particular instance partition.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.InstancePartition> + getInstancePartition( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetInstancePartitionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates an instance partition and begins preparing it to be used. The
    +     * returned long-running operation
    +     * can be used to track the progress of preparing the new instance partition.
    +     * The instance partition name is assigned by the caller. If the named
    +     * instance partition already exists, `CreateInstancePartition` returns
    +     * `ALREADY_EXISTS`.
    +     * Immediately upon completion of this request:
    +     *   * The instance partition is readable via the API, with all requested
    +     *     attributes but no allocated resources. Its state is `CREATING`.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation renders the instance partition immediately
    +     *     unreadable via the API.
    +     *   * The instance partition can be deleted.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     * Upon completion of the returned operation:
    +     *   * Billing for all successfully-allocated resources begins (some types
    +     *     may have lower than the requested levels).
    +     *   * Databases can start using this instance partition.
    +     *   * The instance partition's allocated resource levels are readable via the
    +     *     API.
    +     *   * The instance partition's state becomes `READY`.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track creation of the instance partition.  The
    +     * metadata field type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createInstancePartition( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateInstancePartitionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes an existing instance partition. Requires that the
    +     * instance partition is not used by any database or backup and is not the
    +     * default instance partition of an instance.
    +     * Authorization requires `spanner.instancePartitions.delete` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteInstancePartition( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteInstancePartitionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates an instance partition, and begins allocating or releasing resources
    +     * as requested. The returned long-running operation can be used to track the
    +     * progress of updating the instance partition. If the named instance
    +     * partition does not exist, returns `NOT_FOUND`.
    +     * Immediately upon completion of this request:
    +     *   * For resource types for which a decrease in the instance partition's
    +     *   allocation has been requested, billing is based on the newly-requested
    +     *   level.
    +     * Until completion of the returned operation:
    +     *   * Cancelling the operation sets its metadata's
    +     *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
    +     *     and begins restoring resources to their pre-request values. The
    +     *     operation is guaranteed to succeed at undoing all resource changes,
    +     *     after which point it terminates with a `CANCELLED` status.
    +     *   * All other attempts to modify the instance partition are rejected.
    +     *   * Reading the instance partition via the API continues to give the
    +     *     pre-request resource levels.
    +     * Upon completion of the returned operation:
    +     *   * Billing begins for all successfully-allocated resources (some types
    +     *     may have lower than the requested levels).
    +     *   * All newly-reserved resources are available for serving the instance
    +     *     partition's tables.
    +     *   * The instance partition's new resource levels are readable via the API.
    +     * The returned long-running operation will
    +     * have a name of the format
    +     * `<instance_partition_name>/operations/<operation_id>` and can be used to
    +     * track the instance partition modification. The
    +     * metadata field type is
    +     * [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
    +     * The response field type is
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
    +     * successful.
    +     * Authorization requires `spanner.instancePartitions.update` permission on
    +     * the resource
    +     * [name][google.spanner.admin.instance.v1.InstancePartition.name].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + updateInstancePartition( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateInstancePartitionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists instance partition long-running operations in the given instance.
    +     * An instance partition operation has a name of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
    +     * The long-running operation
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that have completed/failed/canceled within the last 7 days,
    +     * and pending operations. Operations returned are ordered by
    +     * `operation.metadata.value.start_time` in descending order starting from the
    +     * most recently started operation.
    +     * Authorization requires `spanner.instancePartitionOperations.list`
    +     * permission on the resource
    +     * [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse> + listInstancePartitionOperations( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListInstancePartitionOperationsMethod(), getCallOptions()), + request); + } + + /** + * + * + *
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned long-running operation to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned long-running operation has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * metadata field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The response field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + moveInstance(com.google.spanner.admin.instance.v1.MoveInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getMoveInstanceMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_LIST_INSTANCE_CONFIGS = 0; + private static final int METHODID_GET_INSTANCE_CONFIG = 1; + private static final int METHODID_CREATE_INSTANCE_CONFIG = 2; + private static final int METHODID_UPDATE_INSTANCE_CONFIG = 3; + private static final int METHODID_DELETE_INSTANCE_CONFIG = 4; + private static final int METHODID_LIST_INSTANCE_CONFIG_OPERATIONS = 5; + private static final int METHODID_LIST_INSTANCES = 6; + private static final int METHODID_LIST_INSTANCE_PARTITIONS = 7; + private static final int METHODID_GET_INSTANCE = 8; + private static final int METHODID_CREATE_INSTANCE = 9; + private static final int METHODID_UPDATE_INSTANCE = 10; + private static final int METHODID_DELETE_INSTANCE = 11; + private static final int METHODID_SET_IAM_POLICY = 12; + private static final int METHODID_GET_IAM_POLICY = 13; + private static final int METHODID_TEST_IAM_PERMISSIONS = 14; + private static final int METHODID_GET_INSTANCE_PARTITION = 15; + private static final int METHODID_CREATE_INSTANCE_PARTITION = 16; + private static final int METHODID_DELETE_INSTANCE_PARTITION = 17; + private static final int METHODID_UPDATE_INSTANCE_PARTITION = 18; + private static final int METHODID_LIST_INSTANCE_PARTITION_OPERATIONS = 19; + private static final int METHODID_MOVE_INSTANCE = 20; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_LIST_INSTANCE_CONFIGS: + serviceImpl.listInstanceConfigs( + (com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse>) + responseObserver); + break; + case METHODID_GET_INSTANCE_CONFIG: + serviceImpl.getInstanceConfig( + (com.google.spanner.admin.instance.v1.GetInstanceConfigRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_INSTANCE_CONFIG: + serviceImpl.createInstanceConfig( + (com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_INSTANCE_CONFIG: + serviceImpl.updateInstanceConfig( + (com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_INSTANCE_CONFIG: + serviceImpl.deleteInstanceConfig( + (com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_INSTANCE_CONFIG_OPERATIONS: + serviceImpl.listInstanceConfigOperations( + (com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse>) + responseObserver); + break; + case METHODID_LIST_INSTANCES: + serviceImpl.listInstances( + (com.google.spanner.admin.instance.v1.ListInstancesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancesResponse>) + responseObserver); + break; + case METHODID_LIST_INSTANCE_PARTITIONS: + serviceImpl.listInstancePartitions( + (com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse>) + responseObserver); + break; + case METHODID_GET_INSTANCE: + serviceImpl.getInstance( + (com.google.spanner.admin.instance.v1.GetInstanceRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_INSTANCE: + serviceImpl.createInstance( + (com.google.spanner.admin.instance.v1.CreateInstanceRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_INSTANCE: + serviceImpl.updateInstance( + (com.google.spanner.admin.instance.v1.UpdateInstanceRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_INSTANCE: + serviceImpl.deleteInstance( + (com.google.spanner.admin.instance.v1.DeleteInstanceRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SET_IAM_POLICY: + serviceImpl.setIamPolicy( + (com.google.iam.v1.SetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_IAM_POLICY: + serviceImpl.getIamPolicy( + (com.google.iam.v1.GetIamPolicyRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_TEST_IAM_PERMISSIONS: + serviceImpl.testIamPermissions( + (com.google.iam.v1.TestIamPermissionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_INSTANCE_PARTITION: + serviceImpl.getInstancePartition( + (com.google.spanner.admin.instance.v1.GetInstancePartitionRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_CREATE_INSTANCE_PARTITION: + serviceImpl.createInstancePartition( + (com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_DELETE_INSTANCE_PARTITION: + serviceImpl.deleteInstancePartition( + (com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_UPDATE_INSTANCE_PARTITION: + serviceImpl.updateInstancePartition( + (com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_INSTANCE_PARTITION_OPERATIONS: + serviceImpl.listInstancePartitionOperations( + (com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse>) + responseObserver); + break; + case METHODID_MOVE_INSTANCE: + serviceImpl.moveInstance( + (com.google.spanner.admin.instance.v1.MoveInstanceRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getListInstanceConfigsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse>( + service, METHODID_LIST_INSTANCE_CONFIGS))) + .addMethod( + getGetInstanceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest, + com.google.spanner.admin.instance.v1.InstanceConfig>( + service, METHODID_GET_INSTANCE_CONFIG))) + .addMethod( + getCreateInstanceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_INSTANCE_CONFIG))) + .addMethod( + getUpdateInstanceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_INSTANCE_CONFIG))) + .addMethod( + getDeleteInstanceConfigMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_INSTANCE_CONFIG))) + .addMethod( + getListInstanceConfigOperationsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse>( + service, METHODID_LIST_INSTANCE_CONFIG_OPERATIONS))) + .addMethod( + getListInstancesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.ListInstancesRequest, + com.google.spanner.admin.instance.v1.ListInstancesResponse>( + service, METHODID_LIST_INSTANCES))) + .addMethod( + getListInstancePartitionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse>( + service, METHODID_LIST_INSTANCE_PARTITIONS))) + .addMethod( + getGetInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.GetInstanceRequest, + com.google.spanner.admin.instance.v1.Instance>(service, METHODID_GET_INSTANCE))) + .addMethod( + getCreateInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.CreateInstanceRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_INSTANCE))) + .addMethod( + getUpdateInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.UpdateInstanceRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_INSTANCE))) + .addMethod( + getDeleteInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.DeleteInstanceRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_INSTANCE))) + .addMethod( + getSetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_SET_IAM_POLICY))) + .addMethod( + getGetIamPolicyMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers( + service, METHODID_GET_IAM_POLICY))) + .addMethod( + getTestIamPermissionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.iam.v1.TestIamPermissionsRequest, + com.google.iam.v1.TestIamPermissionsResponse>( + service, METHODID_TEST_IAM_PERMISSIONS))) + .addMethod( + getGetInstancePartitionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest, + com.google.spanner.admin.instance.v1.InstancePartition>( + service, METHODID_GET_INSTANCE_PARTITION))) + .addMethod( + getCreateInstancePartitionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest, + com.google.longrunning.Operation>(service, METHODID_CREATE_INSTANCE_PARTITION))) + .addMethod( + getDeleteInstancePartitionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_INSTANCE_PARTITION))) + .addMethod( + getUpdateInstancePartitionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest, + com.google.longrunning.Operation>(service, METHODID_UPDATE_INSTANCE_PARTITION))) + .addMethod( + getListInstancePartitionOperationsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse>( + service, METHODID_LIST_INSTANCE_PARTITION_OPERATIONS))) + .addMethod( + getMoveInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation>(service, METHODID_MOVE_INSTANCE))) + .build(); + } + + private abstract static class InstanceAdminBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + InstanceAdminBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("InstanceAdmin"); + } + } + + private static final class InstanceAdminFileDescriptorSupplier + extends InstanceAdminBaseDescriptorSupplier { + InstanceAdminFileDescriptorSupplier() {} + } + + private static final class InstanceAdminMethodDescriptorSupplier + extends InstanceAdminBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + InstanceAdminMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (InstanceAdminGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new InstanceAdminFileDescriptorSupplier()) + .addMethod(getListInstanceConfigsMethod()) + .addMethod(getGetInstanceConfigMethod()) + .addMethod(getCreateInstanceConfigMethod()) + .addMethod(getUpdateInstanceConfigMethod()) + .addMethod(getDeleteInstanceConfigMethod()) + .addMethod(getListInstanceConfigOperationsMethod()) + .addMethod(getListInstancesMethod()) + .addMethod(getListInstancePartitionsMethod()) + .addMethod(getGetInstanceMethod()) + .addMethod(getCreateInstanceMethod()) + .addMethod(getUpdateInstanceMethod()) + .addMethod(getDeleteInstanceMethod()) + .addMethod(getSetIamPolicyMethod()) + .addMethod(getGetIamPolicyMethod()) + .addMethod(getTestIamPermissionsMethod()) + .addMethod(getGetInstancePartitionMethod()) + .addMethod(getCreateInstancePartitionMethod()) + .addMethod(getDeleteInstancePartitionMethod()) + .addMethod(getUpdateInstancePartitionMethod()) + .addMethod(getListInstancePartitionOperationsMethod()) + .addMethod(getMoveInstanceMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-spanner/grpc-google-cloud-spanner-executor-v1/pom.xml b/java-spanner/grpc-google-cloud-spanner-executor-v1/pom.xml new file mode 100644 index 000000000000..372ac050093f --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-executor-v1/pom.xml @@ -0,0 +1,81 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + 6.112.1-SNAPSHOT + grpc-google-cloud-spanner-executor-v1 + GRPC library for google-cloud-spanner + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + com.google.guava + guava + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.api.grpc:proto-google-common-protos,com.google.guava:guava + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/grpc-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerExecutorProxyGrpc.java b/java-spanner/grpc-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerExecutorProxyGrpc.java new file mode 100644 index 000000000000..b33fca41f1c1 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerExecutorProxyGrpc.java @@ -0,0 +1,420 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.spanner.executor.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
    + * Service that executes SpannerActions asynchronously.
    + * 
    + */ +@io.grpc.stub.annotations.GrpcGenerated +public final class SpannerExecutorProxyGrpc { + + private SpannerExecutorProxyGrpc() {} + + public static final java.lang.String SERVICE_NAME = + "google.spanner.executor.v1.SpannerExecutorProxy"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.executor.v1.SpannerAsyncActionRequest, + com.google.spanner.executor.v1.SpannerAsyncActionResponse> + getExecuteActionAsyncMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ExecuteActionAsync", + requestType = com.google.spanner.executor.v1.SpannerAsyncActionRequest.class, + responseType = com.google.spanner.executor.v1.SpannerAsyncActionResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.spanner.executor.v1.SpannerAsyncActionRequest, + com.google.spanner.executor.v1.SpannerAsyncActionResponse> + getExecuteActionAsyncMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.executor.v1.SpannerAsyncActionRequest, + com.google.spanner.executor.v1.SpannerAsyncActionResponse> + getExecuteActionAsyncMethod; + if ((getExecuteActionAsyncMethod = SpannerExecutorProxyGrpc.getExecuteActionAsyncMethod) + == null) { + synchronized (SpannerExecutorProxyGrpc.class) { + if ((getExecuteActionAsyncMethod = SpannerExecutorProxyGrpc.getExecuteActionAsyncMethod) + == null) { + SpannerExecutorProxyGrpc.getExecuteActionAsyncMethod = + getExecuteActionAsyncMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ExecuteActionAsync")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.executor.v1.SpannerAsyncActionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.executor.v1.SpannerAsyncActionResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new SpannerExecutorProxyMethodDescriptorSupplier("ExecuteActionAsync")) + .build(); + } + } + } + return getExecuteActionAsyncMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static SpannerExecutorProxyStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerExecutorProxyStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyStub(channel, callOptions); + } + }; + return SpannerExecutorProxyStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static SpannerExecutorProxyBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerExecutorProxyBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyBlockingV2Stub(channel, callOptions); + } + }; + return SpannerExecutorProxyBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static SpannerExecutorProxyBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerExecutorProxyBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyBlockingStub(channel, callOptions); + } + }; + return SpannerExecutorProxyBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static SpannerExecutorProxyFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerExecutorProxyFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyFutureStub(channel, callOptions); + } + }; + return SpannerExecutorProxyFutureStub.newStub(factory, channel); + } + + /** + * + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public interface AsyncService { + + /** + * + * + *
    +     * ExecuteActionAsync is a streaming call that starts executing a new Spanner
    +     * action.
    +     * For each request, the server will reply with one or more responses, but
    +     * only the last response will contain status in the outcome.
    +     * Responses can be matched to requests by action_id. It is allowed to have
    +     * multiple actions in flight--in that case, actions are be executed in
    +     * parallel.
    +     * 
    + */ + default io.grpc.stub.StreamObserver + executeActionAsync( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( + getExecuteActionAsyncMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service SpannerExecutorProxy. + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public abstract static class SpannerExecutorProxyImplBase + implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return SpannerExecutorProxyGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service SpannerExecutorProxy. + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public static final class SpannerExecutorProxyStub + extends io.grpc.stub.AbstractAsyncStub { + private SpannerExecutorProxyStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerExecutorProxyStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyStub(channel, callOptions); + } + + /** + * + * + *
    +     * ExecuteActionAsync is a streaming call that starts executing a new Spanner
    +     * action.
    +     * For each request, the server will reply with one or more responses, but
    +     * only the last response will contain status in the outcome.
    +     * Responses can be matched to requests by action_id. It is allowed to have
    +     * multiple actions in flight--in that case, actions are be executed in
    +     * parallel.
    +     * 
    + */ + public io.grpc.stub.StreamObserver + executeActionAsync( + io.grpc.stub.StreamObserver + responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getExecuteActionAsyncMethod(), getCallOptions()), responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service SpannerExecutorProxy. + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public static final class SpannerExecutorProxyBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private SpannerExecutorProxyBlockingV2Stub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerExecutorProxyBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
    +     * ExecuteActionAsync is a streaming call that starts executing a new Spanner
    +     * action.
    +     * For each request, the server will reply with one or more responses, but
    +     * only the last response will contain status in the outcome.
    +     * Responses can be matched to requests by action_id. It is allowed to have
    +     * multiple actions in flight--in that case, actions are be executed in
    +     * parallel.
    +     * 
    + */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall< + com.google.spanner.executor.v1.SpannerAsyncActionRequest, + com.google.spanner.executor.v1.SpannerAsyncActionResponse> + executeActionAsync() { + return io.grpc.stub.ClientCalls.blockingBidiStreamingCall( + getChannel(), getExecuteActionAsyncMethod(), getCallOptions()); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service SpannerExecutorProxy. + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public static final class SpannerExecutorProxyBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private SpannerExecutorProxyBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerExecutorProxyBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyBlockingStub(channel, callOptions); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service SpannerExecutorProxy. + * + *
    +   * Service that executes SpannerActions asynchronously.
    +   * 
    + */ + public static final class SpannerExecutorProxyFutureStub + extends io.grpc.stub.AbstractFutureStub { + private SpannerExecutorProxyFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerExecutorProxyFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerExecutorProxyFutureStub(channel, callOptions); + } + } + + private static final int METHODID_EXECUTE_ACTION_ASYNC = 0; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_EXECUTE_ACTION_ASYNC: + return (io.grpc.stub.StreamObserver) + serviceImpl.executeActionAsync( + (io.grpc.stub.StreamObserver< + com.google.spanner.executor.v1.SpannerAsyncActionResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getExecuteActionAsyncMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + com.google.spanner.executor.v1.SpannerAsyncActionRequest, + com.google.spanner.executor.v1.SpannerAsyncActionResponse>( + service, METHODID_EXECUTE_ACTION_ASYNC))) + .build(); + } + + private abstract static class SpannerExecutorProxyBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + SpannerExecutorProxyBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("SpannerExecutorProxy"); + } + } + + private static final class SpannerExecutorProxyFileDescriptorSupplier + extends SpannerExecutorProxyBaseDescriptorSupplier { + SpannerExecutorProxyFileDescriptorSupplier() {} + } + + private static final class SpannerExecutorProxyMethodDescriptorSupplier + extends SpannerExecutorProxyBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + SpannerExecutorProxyMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (SpannerExecutorProxyGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new SpannerExecutorProxyFileDescriptorSupplier()) + .addMethod(getExecuteActionAsyncMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-spanner/grpc-google-cloud-spanner-v1/pom.xml b/java-spanner/grpc-google-cloud-spanner-v1/pom.xml new file mode 100644 index 000000000000..88f5cfc97b36 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-v1/pom.xml @@ -0,0 +1,81 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + grpc-google-cloud-spanner-v1 + GRPC library for grpc-google-cloud-spanner-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + + + com.google.guava + guava + + + com.google.auto.value + auto-value-annotations + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.auto.value:auto-value-annotations,javax.annotation:javax.annotation-api + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/grpc-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerGrpc.java b/java-spanner/grpc-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerGrpc.java new file mode 100644 index 000000000000..4c280f1af901 --- /dev/null +++ b/java-spanner/grpc-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerGrpc.java @@ -0,0 +1,2803 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.spanner.v1; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + * + * + *
    + * Cloud Spanner API
    + * The Cloud Spanner API can be used to manage sessions and execute
    + * transactions on data stored in Cloud Spanner databases.
    + * 
    + */ +@io.grpc.stub.annotations.GrpcGenerated +public final class SpannerGrpc { + + private SpannerGrpc() {} + + public static final java.lang.String SERVICE_NAME = "google.spanner.v1.Spanner"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.CreateSessionRequest, com.google.spanner.v1.Session> + getCreateSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateSession", + requestType = com.google.spanner.v1.CreateSessionRequest.class, + responseType = com.google.spanner.v1.Session.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.CreateSessionRequest, com.google.spanner.v1.Session> + getCreateSessionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.CreateSessionRequest, com.google.spanner.v1.Session> + getCreateSessionMethod; + if ((getCreateSessionMethod = SpannerGrpc.getCreateSessionMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getCreateSessionMethod = SpannerGrpc.getCreateSessionMethod) == null) { + SpannerGrpc.getCreateSessionMethod = + getCreateSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.CreateSessionRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.Session.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("CreateSession")) + .build(); + } + } + } + return getCreateSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchCreateSessionsRequest, + com.google.spanner.v1.BatchCreateSessionsResponse> + getBatchCreateSessionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCreateSessions", + requestType = com.google.spanner.v1.BatchCreateSessionsRequest.class, + responseType = com.google.spanner.v1.BatchCreateSessionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchCreateSessionsRequest, + com.google.spanner.v1.BatchCreateSessionsResponse> + getBatchCreateSessionsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchCreateSessionsRequest, + com.google.spanner.v1.BatchCreateSessionsResponse> + getBatchCreateSessionsMethod; + if ((getBatchCreateSessionsMethod = SpannerGrpc.getBatchCreateSessionsMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getBatchCreateSessionsMethod = SpannerGrpc.getBatchCreateSessionsMethod) == null) { + SpannerGrpc.getBatchCreateSessionsMethod = + getBatchCreateSessionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCreateSessions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.BatchCreateSessionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.BatchCreateSessionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new SpannerMethodDescriptorSupplier("BatchCreateSessions")) + .build(); + } + } + } + return getBatchCreateSessionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.GetSessionRequest, com.google.spanner.v1.Session> + getGetSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetSession", + requestType = com.google.spanner.v1.GetSessionRequest.class, + responseType = com.google.spanner.v1.Session.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.GetSessionRequest, com.google.spanner.v1.Session> + getGetSessionMethod() { + io.grpc.MethodDescriptor + getGetSessionMethod; + if ((getGetSessionMethod = SpannerGrpc.getGetSessionMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getGetSessionMethod = SpannerGrpc.getGetSessionMethod) == null) { + SpannerGrpc.getGetSessionMethod = + getGetSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.GetSessionRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.Session.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("GetSession")) + .build(); + } + } + } + return getGetSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ListSessionsRequest, com.google.spanner.v1.ListSessionsResponse> + getListSessionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListSessions", + requestType = com.google.spanner.v1.ListSessionsRequest.class, + responseType = com.google.spanner.v1.ListSessionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ListSessionsRequest, com.google.spanner.v1.ListSessionsResponse> + getListSessionsMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.ListSessionsRequest, com.google.spanner.v1.ListSessionsResponse> + getListSessionsMethod; + if ((getListSessionsMethod = SpannerGrpc.getListSessionsMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getListSessionsMethod = SpannerGrpc.getListSessionsMethod) == null) { + SpannerGrpc.getListSessionsMethod = + getListSessionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListSessions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ListSessionsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ListSessionsResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("ListSessions")) + .build(); + } + } + } + return getListSessionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.DeleteSessionRequest, com.google.protobuf.Empty> + getDeleteSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteSession", + requestType = com.google.spanner.v1.DeleteSessionRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.DeleteSessionRequest, com.google.protobuf.Empty> + getDeleteSessionMethod() { + io.grpc.MethodDescriptor + getDeleteSessionMethod; + if ((getDeleteSessionMethod = SpannerGrpc.getDeleteSessionMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getDeleteSessionMethod = SpannerGrpc.getDeleteSessionMethod) == null) { + SpannerGrpc.getDeleteSessionMethod = + getDeleteSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeleteSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.DeleteSessionRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("DeleteSession")) + .build(); + } + } + } + return getDeleteSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.ResultSet> + getExecuteSqlMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ExecuteSql", + requestType = com.google.spanner.v1.ExecuteSqlRequest.class, + responseType = com.google.spanner.v1.ResultSet.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.ResultSet> + getExecuteSqlMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.ResultSet> + getExecuteSqlMethod; + if ((getExecuteSqlMethod = SpannerGrpc.getExecuteSqlMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getExecuteSqlMethod = SpannerGrpc.getExecuteSqlMethod) == null) { + SpannerGrpc.getExecuteSqlMethod = + getExecuteSqlMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ExecuteSql")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ExecuteSqlRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ResultSet.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("ExecuteSql")) + .build(); + } + } + } + return getExecuteSqlMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.PartialResultSet> + getExecuteStreamingSqlMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ExecuteStreamingSql", + requestType = com.google.spanner.v1.ExecuteSqlRequest.class, + responseType = com.google.spanner.v1.PartialResultSet.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.PartialResultSet> + getExecuteStreamingSqlMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.PartialResultSet> + getExecuteStreamingSqlMethod; + if ((getExecuteStreamingSqlMethod = SpannerGrpc.getExecuteStreamingSqlMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getExecuteStreamingSqlMethod = SpannerGrpc.getExecuteStreamingSqlMethod) == null) { + SpannerGrpc.getExecuteStreamingSqlMethod = + getExecuteStreamingSqlMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ExecuteStreamingSql")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ExecuteSqlRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartialResultSet.getDefaultInstance())) + .setSchemaDescriptor( + new SpannerMethodDescriptorSupplier("ExecuteStreamingSql")) + .build(); + } + } + } + return getExecuteStreamingSqlMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteBatchDmlRequest, + com.google.spanner.v1.ExecuteBatchDmlResponse> + getExecuteBatchDmlMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ExecuteBatchDml", + requestType = com.google.spanner.v1.ExecuteBatchDmlRequest.class, + responseType = com.google.spanner.v1.ExecuteBatchDmlResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteBatchDmlRequest, + com.google.spanner.v1.ExecuteBatchDmlResponse> + getExecuteBatchDmlMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.ExecuteBatchDmlRequest, + com.google.spanner.v1.ExecuteBatchDmlResponse> + getExecuteBatchDmlMethod; + if ((getExecuteBatchDmlMethod = SpannerGrpc.getExecuteBatchDmlMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getExecuteBatchDmlMethod = SpannerGrpc.getExecuteBatchDmlMethod) == null) { + SpannerGrpc.getExecuteBatchDmlMethod = + getExecuteBatchDmlMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ExecuteBatchDml")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ExecuteBatchDmlRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ExecuteBatchDmlResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("ExecuteBatchDml")) + .build(); + } + } + } + return getExecuteBatchDmlMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.ResultSet> + getReadMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Read", + requestType = com.google.spanner.v1.ReadRequest.class, + responseType = com.google.spanner.v1.ResultSet.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.ResultSet> + getReadMethod() { + io.grpc.MethodDescriptor + getReadMethod; + if ((getReadMethod = SpannerGrpc.getReadMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getReadMethod = SpannerGrpc.getReadMethod) == null) { + SpannerGrpc.getReadMethod = + getReadMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Read")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ReadRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ResultSet.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("Read")) + .build(); + } + } + } + return getReadMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.PartialResultSet> + getStreamingReadMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "StreamingRead", + requestType = com.google.spanner.v1.ReadRequest.class, + responseType = com.google.spanner.v1.PartialResultSet.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.PartialResultSet> + getStreamingReadMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.PartialResultSet> + getStreamingReadMethod; + if ((getStreamingReadMethod = SpannerGrpc.getStreamingReadMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getStreamingReadMethod = SpannerGrpc.getStreamingReadMethod) == null) { + SpannerGrpc.getStreamingReadMethod = + getStreamingReadMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamingRead")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.ReadRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartialResultSet.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("StreamingRead")) + .build(); + } + } + } + return getStreamingReadMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.BeginTransactionRequest, com.google.spanner.v1.Transaction> + getBeginTransactionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BeginTransaction", + requestType = com.google.spanner.v1.BeginTransactionRequest.class, + responseType = com.google.spanner.v1.Transaction.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.BeginTransactionRequest, com.google.spanner.v1.Transaction> + getBeginTransactionMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.BeginTransactionRequest, com.google.spanner.v1.Transaction> + getBeginTransactionMethod; + if ((getBeginTransactionMethod = SpannerGrpc.getBeginTransactionMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getBeginTransactionMethod = SpannerGrpc.getBeginTransactionMethod) == null) { + SpannerGrpc.getBeginTransactionMethod = + getBeginTransactionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BeginTransaction")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.BeginTransactionRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.Transaction.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("BeginTransaction")) + .build(); + } + } + } + return getBeginTransactionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.CommitRequest, com.google.spanner.v1.CommitResponse> + getCommitMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Commit", + requestType = com.google.spanner.v1.CommitRequest.class, + responseType = com.google.spanner.v1.CommitResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.CommitRequest, com.google.spanner.v1.CommitResponse> + getCommitMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.CommitRequest, com.google.spanner.v1.CommitResponse> + getCommitMethod; + if ((getCommitMethod = SpannerGrpc.getCommitMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getCommitMethod = SpannerGrpc.getCommitMethod) == null) { + SpannerGrpc.getCommitMethod = + getCommitMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Commit")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.CommitRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.CommitResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("Commit")) + .build(); + } + } + } + return getCommitMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.RollbackRequest, com.google.protobuf.Empty> + getRollbackMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Rollback", + requestType = com.google.spanner.v1.RollbackRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.RollbackRequest, com.google.protobuf.Empty> + getRollbackMethod() { + io.grpc.MethodDescriptor + getRollbackMethod; + if ((getRollbackMethod = SpannerGrpc.getRollbackMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getRollbackMethod = SpannerGrpc.getRollbackMethod) == null) { + SpannerGrpc.getRollbackMethod = + getRollbackMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Rollback")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.RollbackRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("Rollback")) + .build(); + } + } + } + return getRollbackMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionQueryRequest, com.google.spanner.v1.PartitionResponse> + getPartitionQueryMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PartitionQuery", + requestType = com.google.spanner.v1.PartitionQueryRequest.class, + responseType = com.google.spanner.v1.PartitionResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionQueryRequest, com.google.spanner.v1.PartitionResponse> + getPartitionQueryMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionQueryRequest, com.google.spanner.v1.PartitionResponse> + getPartitionQueryMethod; + if ((getPartitionQueryMethod = SpannerGrpc.getPartitionQueryMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getPartitionQueryMethod = SpannerGrpc.getPartitionQueryMethod) == null) { + SpannerGrpc.getPartitionQueryMethod = + getPartitionQueryMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PartitionQuery")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartitionQueryRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartitionResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("PartitionQuery")) + .build(); + } + } + } + return getPartitionQueryMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionReadRequest, com.google.spanner.v1.PartitionResponse> + getPartitionReadMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PartitionRead", + requestType = com.google.spanner.v1.PartitionReadRequest.class, + responseType = com.google.spanner.v1.PartitionResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionReadRequest, com.google.spanner.v1.PartitionResponse> + getPartitionReadMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.PartitionReadRequest, com.google.spanner.v1.PartitionResponse> + getPartitionReadMethod; + if ((getPartitionReadMethod = SpannerGrpc.getPartitionReadMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getPartitionReadMethod = SpannerGrpc.getPartitionReadMethod) == null) { + SpannerGrpc.getPartitionReadMethod = + getPartitionReadMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PartitionRead")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartitionReadRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.PartitionResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("PartitionRead")) + .build(); + } + } + } + return getPartitionReadMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchWriteRequest, com.google.spanner.v1.BatchWriteResponse> + getBatchWriteMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchWrite", + requestType = com.google.spanner.v1.BatchWriteRequest.class, + responseType = com.google.spanner.v1.BatchWriteResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchWriteRequest, com.google.spanner.v1.BatchWriteResponse> + getBatchWriteMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.v1.BatchWriteRequest, com.google.spanner.v1.BatchWriteResponse> + getBatchWriteMethod; + if ((getBatchWriteMethod = SpannerGrpc.getBatchWriteMethod) == null) { + synchronized (SpannerGrpc.class) { + if ((getBatchWriteMethod = SpannerGrpc.getBatchWriteMethod) == null) { + SpannerGrpc.getBatchWriteMethod = + getBatchWriteMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BatchWrite")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.BatchWriteRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.v1.BatchWriteResponse.getDefaultInstance())) + .setSchemaDescriptor(new SpannerMethodDescriptorSupplier("BatchWrite")) + .build(); + } + } + } + return getBatchWriteMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static SpannerStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerStub(channel, callOptions); + } + }; + return SpannerStub.newStub(factory, channel); + } + + /** Creates a new blocking-style stub that supports all types of calls on the service */ + public static SpannerBlockingV2Stub newBlockingV2Stub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerBlockingV2Stub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerBlockingV2Stub(channel, callOptions); + } + }; + return SpannerBlockingV2Stub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static SpannerBlockingStub newBlockingStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerBlockingStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerBlockingStub(channel, callOptions); + } + }; + return SpannerBlockingStub.newStub(factory, channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static SpannerFutureStub newFutureStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public SpannerFutureStub newStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerFutureStub(channel, callOptions); + } + }; + return SpannerFutureStub.newStub(factory, channel); + } + + /** + * + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public interface AsyncService { + + /** + * + * + *
    +     * Creates a new session. A session can be used to perform
    +     * transactions that read and/or modify data in a Cloud Spanner database.
    +     * Sessions are meant to be reused for many consecutive
    +     * transactions.
    +     * Sessions can only execute one transaction at a time. To execute
    +     * multiple concurrent read-write/write-only transactions, create
    +     * multiple sessions. Note that standalone reads and queries use a
    +     * transaction internally, and count toward the one transaction
    +     * limit.
    +     * Active sessions use additional server resources, so it's a good idea to
    +     * delete idle and unneeded sessions.
    +     * Aside from explicit deletes, Cloud Spanner can delete sessions when no
    +     * operations are sent for more than an hour. If a session is deleted,
    +     * requests to it return `NOT_FOUND`.
    +     * Idle sessions can be kept alive by sending a trivial SQL query
    +     * periodically, for example, `"SELECT 1"`.
    +     * 
    + */ + default void createSession( + com.google.spanner.v1.CreateSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateSessionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates multiple new sessions.
    +     * This API can be used to initialize a session cache on the clients.
    +     * See https://goo.gl/TgSFN2 for best practices on session cache management.
    +     * 
    + */ + default void batchCreateSessions( + com.google.spanner.v1.BatchCreateSessionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBatchCreateSessionsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
    +     * This is mainly useful for determining whether a session is still
    +     * alive.
    +     * 
    + */ + default void getSession( + com.google.spanner.v1.GetSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetSessionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists all sessions in a given database.
    +     * 
    + */ + default void listSessions( + com.google.spanner.v1.ListSessionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListSessionsMethod(), responseObserver); + } + + /** + * + * + *
    +     * Ends a session, releasing server resources associated with it. This
    +     * asynchronously triggers the cancellation of any operations that are running
    +     * with this session.
    +     * 
    + */ + default void deleteSession( + com.google.spanner.v1.DeleteSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteSessionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Executes an SQL statement, returning all results in a single reply. This
    +     * method can't be used to return a result set larger than 10 MiB;
    +     * if the query yields more data than that, the query fails with
    +     * a `FAILED_PRECONDITION` error.
    +     * Operations inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be fetched in streaming fashion by calling
    +     * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
    +     * instead.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + default void executeSql( + com.google.spanner.v1.ExecuteSqlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getExecuteSqlMethod(), responseObserver); + } + + /** + * + * + *
    +     * Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
    +     * result set as a stream. Unlike
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
    +     * the size of the returned result set. However, no individual row in the
    +     * result set can exceed 100 MiB, and no column value can exceed 10 MiB.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + default void executeStreamingSql( + com.google.spanner.v1.ExecuteSqlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getExecuteStreamingSqlMethod(), responseObserver); + } + + /** + * + * + *
    +     * Executes a batch of SQL DML statements. This method allows many statements
    +     * to be run with lower latency than submitting them sequentially with
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +     * Statements are executed in sequential order. A request can succeed even if
    +     * a statement fails. The
    +     * [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
    +     * field in the response provides information about the statement that failed.
    +     * Clients must inspect this field to determine whether an error occurred.
    +     * Execution stops after the first failed statement; the remaining statements
    +     * are not executed.
    +     * 
    + */ + default void executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getExecuteBatchDmlMethod(), responseObserver); + } + + /** + * + * + *
    +     * Reads rows from the database using key lookups and scans, as a
    +     * simple key/value style alternative to
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
    +     * used to return a result set larger than 10 MiB; if the read matches more
    +     * data than that, the read fails with a `FAILED_PRECONDITION`
    +     * error.
    +     * Reads inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be yielded in streaming fashion by calling
    +     * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    +     * 
    + */ + default void read( + com.google.spanner.v1.ReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadMethod(), responseObserver); + } + + /** + * + * + *
    +     * Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
    +     * as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
    +     * limit on the size of the returned result set. However, no individual row in
    +     * the result set can exceed 100 MiB, and no column value can exceed
    +     * 10 MiB.
    +     * 
    + */ + default void streamingRead( + com.google.spanner.v1.ReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getStreamingReadMethod(), responseObserver); + } + + /** + * + * + *
    +     * Begins a new transaction. This step can often be skipped:
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    +     * side-effect.
    +     * 
    + */ + default void beginTransaction( + com.google.spanner.v1.BeginTransactionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getBeginTransactionMethod(), responseObserver); + } + + /** + * + * + *
    +     * Commits a transaction. The request includes the mutations to be
    +     * applied to rows in the database.
    +     * `Commit` might return an `ABORTED` error. This can occur at any time;
    +     * commonly, the cause is conflicts with concurrent
    +     * transactions. However, it can also happen for a variety of other
    +     * reasons. If `Commit` returns `ABORTED`, the caller should retry
    +     * the transaction from the beginning, reusing the same session.
    +     * On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
    +     * for example, if the client job experiences a 1+ hour networking failure.
    +     * At that point, Cloud Spanner has lost track of the transaction outcome and
    +     * we recommend that you perform another read from the database to see the
    +     * state of things as they are now.
    +     * 
    + */ + default void commit( + com.google.spanner.v1.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCommitMethod(), responseObserver); + } + + /** + * + * + *
    +     * Rolls back a transaction, releasing any locks it holds. It's a good
    +     * idea to call this for any transaction that includes one or more
    +     * [Read][google.spanner.v1.Spanner.Read] or
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
    +     * decides not to commit.
    +     * `Rollback` returns `OK` if it successfully aborts the transaction, the
    +     * transaction was already aborted, or the transaction isn't
    +     * found. `Rollback` never returns `ABORTED`.
    +     * 
    + */ + default void rollback( + com.google.spanner.v1.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getRollbackMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a query
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
    +     * specify a subset of the query result to read. The same session and
    +     * read-only transaction must be used by the `PartitionQueryRequest` used to
    +     * create the partition tokens and the `ExecuteSqlRequests` that use the
    +     * partition tokens.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the query, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + default void partitionQuery( + com.google.spanner.v1.PartitionQueryRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getPartitionQueryMethod(), responseObserver); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a read
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
    +     * subset of the read result to read. The same session and read-only
    +     * transaction must be used by the `PartitionReadRequest` used to create the
    +     * partition tokens and the `ReadRequests` that use the partition tokens.
    +     * There are no ordering guarantees on rows returned among the returned
    +     * partition tokens, or even within each individual `StreamingRead` call
    +     * issued with a `partition_token`.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the read, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + default void partitionRead( + com.google.spanner.v1.PartitionReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getPartitionReadMethod(), responseObserver); + } + + /** + * + * + *
    +     * Batches the supplied mutation groups in a collection of efficient
    +     * transactions. All mutations in a group are committed atomically. However,
    +     * mutations across groups can be committed non-atomically in an unspecified
    +     * order and thus, they must be independent of each other. Partial failure is
    +     * possible, that is, some groups might have been committed successfully,
    +     * while some might have failed. The results of individual batches are
    +     * streamed into the response as the batches are applied.
    +     * `BatchWrite` requests are not replay protected, meaning that each mutation
    +     * group can be applied more than once. Replays of non-idempotent mutations
    +     * can have undesirable effects. For example, replays of an insert mutation
    +     * can produce an already exists error or if you use generated or commit
    +     * timestamp-based keys, it can result in additional rows being added to the
    +     * mutation's table. We recommend structuring your mutation groups to be
    +     * idempotent to avoid this issue.
    +     * 
    + */ + default void batchWrite( + com.google.spanner.v1.BatchWriteRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getBatchWriteMethod(), responseObserver); + } + } + + /** + * Base class for the server implementation of the service Spanner. + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public abstract static class SpannerImplBase implements io.grpc.BindableService, AsyncService { + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return SpannerGrpc.bindService(this); + } + } + + /** + * A stub to allow clients to do asynchronous rpc calls to service Spanner. + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public static final class SpannerStub extends io.grpc.stub.AbstractAsyncStub { + private SpannerStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerStub(channel, callOptions); + } + + /** + * + * + *
    +     * Creates a new session. A session can be used to perform
    +     * transactions that read and/or modify data in a Cloud Spanner database.
    +     * Sessions are meant to be reused for many consecutive
    +     * transactions.
    +     * Sessions can only execute one transaction at a time. To execute
    +     * multiple concurrent read-write/write-only transactions, create
    +     * multiple sessions. Note that standalone reads and queries use a
    +     * transaction internally, and count toward the one transaction
    +     * limit.
    +     * Active sessions use additional server resources, so it's a good idea to
    +     * delete idle and unneeded sessions.
    +     * Aside from explicit deletes, Cloud Spanner can delete sessions when no
    +     * operations are sent for more than an hour. If a session is deleted,
    +     * requests to it return `NOT_FOUND`.
    +     * Idle sessions can be kept alive by sending a trivial SQL query
    +     * periodically, for example, `"SELECT 1"`.
    +     * 
    + */ + public void createSession( + com.google.spanner.v1.CreateSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates multiple new sessions.
    +     * This API can be used to initialize a session cache on the clients.
    +     * See https://goo.gl/TgSFN2 for best practices on session cache management.
    +     * 
    + */ + public void batchCreateSessions( + com.google.spanner.v1.BatchCreateSessionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBatchCreateSessionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
    +     * This is mainly useful for determining whether a session is still
    +     * alive.
    +     * 
    + */ + public void getSession( + com.google.spanner.v1.GetSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetSessionMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Lists all sessions in a given database.
    +     * 
    + */ + public void listSessions( + com.google.spanner.v1.ListSessionsRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListSessionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Ends a session, releasing server resources associated with it. This
    +     * asynchronously triggers the cancellation of any operations that are running
    +     * with this session.
    +     * 
    + */ + public void deleteSession( + com.google.spanner.v1.DeleteSessionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Executes an SQL statement, returning all results in a single reply. This
    +     * method can't be used to return a result set larger than 10 MiB;
    +     * if the query yields more data than that, the query fails with
    +     * a `FAILED_PRECONDITION` error.
    +     * Operations inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be fetched in streaming fashion by calling
    +     * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
    +     * instead.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public void executeSql( + com.google.spanner.v1.ExecuteSqlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getExecuteSqlMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
    +     * result set as a stream. Unlike
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
    +     * the size of the returned result set. However, no individual row in the
    +     * result set can exceed 100 MiB, and no column value can exceed 10 MiB.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public void executeStreamingSql( + com.google.spanner.v1.ExecuteSqlRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getExecuteStreamingSqlMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Executes a batch of SQL DML statements. This method allows many statements
    +     * to be run with lower latency than submitting them sequentially with
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +     * Statements are executed in sequential order. A request can succeed even if
    +     * a statement fails. The
    +     * [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
    +     * field in the response provides information about the statement that failed.
    +     * Clients must inspect this field to determine whether an error occurred.
    +     * Execution stops after the first failed statement; the remaining statements
    +     * are not executed.
    +     * 
    + */ + public void executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getExecuteBatchDmlMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Reads rows from the database using key lookups and scans, as a
    +     * simple key/value style alternative to
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
    +     * used to return a result set larger than 10 MiB; if the read matches more
    +     * data than that, the read fails with a `FAILED_PRECONDITION`
    +     * error.
    +     * Reads inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be yielded in streaming fashion by calling
    +     * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    +     * 
    + */ + public void read( + com.google.spanner.v1.ReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getReadMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
    +     * as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
    +     * limit on the size of the returned result set. However, no individual row in
    +     * the result set can exceed 100 MiB, and no column value can exceed
    +     * 10 MiB.
    +     * 
    + */ + public void streamingRead( + com.google.spanner.v1.ReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getStreamingReadMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Begins a new transaction. This step can often be skipped:
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    +     * side-effect.
    +     * 
    + */ + public void beginTransaction( + com.google.spanner.v1.BeginTransactionRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getBeginTransactionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Commits a transaction. The request includes the mutations to be
    +     * applied to rows in the database.
    +     * `Commit` might return an `ABORTED` error. This can occur at any time;
    +     * commonly, the cause is conflicts with concurrent
    +     * transactions. However, it can also happen for a variety of other
    +     * reasons. If `Commit` returns `ABORTED`, the caller should retry
    +     * the transaction from the beginning, reusing the same session.
    +     * On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
    +     * for example, if the client job experiences a 1+ hour networking failure.
    +     * At that point, Cloud Spanner has lost track of the transaction outcome and
    +     * we recommend that you perform another read from the database to see the
    +     * state of things as they are now.
    +     * 
    + */ + public void commit( + com.google.spanner.v1.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Rolls back a transaction, releasing any locks it holds. It's a good
    +     * idea to call this for any transaction that includes one or more
    +     * [Read][google.spanner.v1.Spanner.Read] or
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
    +     * decides not to commit.
    +     * `Rollback` returns `OK` if it successfully aborts the transaction, the
    +     * transaction was already aborted, or the transaction isn't
    +     * found. `Rollback` never returns `ABORTED`.
    +     * 
    + */ + public void rollback( + com.google.spanner.v1.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a query
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
    +     * specify a subset of the query result to read. The same session and
    +     * read-only transaction must be used by the `PartitionQueryRequest` used to
    +     * create the partition tokens and the `ExecuteSqlRequests` that use the
    +     * partition tokens.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the query, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public void partitionQuery( + com.google.spanner.v1.PartitionQueryRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPartitionQueryMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a read
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
    +     * subset of the read result to read. The same session and read-only
    +     * transaction must be used by the `PartitionReadRequest` used to create the
    +     * partition tokens and the `ReadRequests` that use the partition tokens.
    +     * There are no ordering guarantees on rows returned among the returned
    +     * partition tokens, or even within each individual `StreamingRead` call
    +     * issued with a `partition_token`.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the read, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public void partitionRead( + com.google.spanner.v1.PartitionReadRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPartitionReadMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Batches the supplied mutation groups in a collection of efficient
    +     * transactions. All mutations in a group are committed atomically. However,
    +     * mutations across groups can be committed non-atomically in an unspecified
    +     * order and thus, they must be independent of each other. Partial failure is
    +     * possible, that is, some groups might have been committed successfully,
    +     * while some might have failed. The results of individual batches are
    +     * streamed into the response as the batches are applied.
    +     * `BatchWrite` requests are not replay protected, meaning that each mutation
    +     * group can be applied more than once. Replays of non-idempotent mutations
    +     * can have undesirable effects. For example, replays of an insert mutation
    +     * can produce an already exists error or if you use generated or commit
    +     * timestamp-based keys, it can result in additional rows being added to the
    +     * mutation's table. We recommend structuring your mutation groups to be
    +     * idempotent to avoid this issue.
    +     * 
    + */ + public void batchWrite( + com.google.spanner.v1.BatchWriteRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getBatchWriteMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + * A stub to allow clients to do synchronous rpc calls to service Spanner. + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public static final class SpannerBlockingV2Stub + extends io.grpc.stub.AbstractBlockingStub { + private SpannerBlockingV2Stub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerBlockingV2Stub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerBlockingV2Stub(channel, callOptions); + } + + /** + * + * + *
    +     * Creates a new session. A session can be used to perform
    +     * transactions that read and/or modify data in a Cloud Spanner database.
    +     * Sessions are meant to be reused for many consecutive
    +     * transactions.
    +     * Sessions can only execute one transaction at a time. To execute
    +     * multiple concurrent read-write/write-only transactions, create
    +     * multiple sessions. Note that standalone reads and queries use a
    +     * transaction internally, and count toward the one transaction
    +     * limit.
    +     * Active sessions use additional server resources, so it's a good idea to
    +     * delete idle and unneeded sessions.
    +     * Aside from explicit deletes, Cloud Spanner can delete sessions when no
    +     * operations are sent for more than an hour. If a session is deleted,
    +     * requests to it return `NOT_FOUND`.
    +     * Idle sessions can be kept alive by sending a trivial SQL query
    +     * periodically, for example, `"SELECT 1"`.
    +     * 
    + */ + public com.google.spanner.v1.Session createSession( + com.google.spanner.v1.CreateSessionRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCreateSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates multiple new sessions.
    +     * This API can be used to initialize a session cache on the clients.
    +     * See https://goo.gl/TgSFN2 for best practices on session cache management.
    +     * 
    + */ + public com.google.spanner.v1.BatchCreateSessionsResponse batchCreateSessions( + com.google.spanner.v1.BatchCreateSessionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBatchCreateSessionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
    +     * This is mainly useful for determining whether a session is still
    +     * alive.
    +     * 
    + */ + public com.google.spanner.v1.Session getSession(com.google.spanner.v1.GetSessionRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getGetSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all sessions in a given database.
    +     * 
    + */ + public com.google.spanner.v1.ListSessionsResponse listSessions( + com.google.spanner.v1.ListSessionsRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getListSessionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Ends a session, releasing server resources associated with it. This
    +     * asynchronously triggers the cancellation of any operations that are running
    +     * with this session.
    +     * 
    + */ + public com.google.protobuf.Empty deleteSession( + com.google.spanner.v1.DeleteSessionRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getDeleteSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Executes an SQL statement, returning all results in a single reply. This
    +     * method can't be used to return a result set larger than 10 MiB;
    +     * if the query yields more data than that, the query fails with
    +     * a `FAILED_PRECONDITION` error.
    +     * Operations inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be fetched in streaming fashion by calling
    +     * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
    +     * instead.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public com.google.spanner.v1.ResultSet executeSql( + com.google.spanner.v1.ExecuteSqlRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getExecuteSqlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
    +     * result set as a stream. Unlike
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
    +     * the size of the returned result set. However, no individual row in the
    +     * result set can exceed 100 MiB, and no column value can exceed 10 MiB.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall + executeStreamingSql(com.google.spanner.v1.ExecuteSqlRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getExecuteStreamingSqlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Executes a batch of SQL DML statements. This method allows many statements
    +     * to be run with lower latency than submitting them sequentially with
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +     * Statements are executed in sequential order. A request can succeed even if
    +     * a statement fails. The
    +     * [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
    +     * field in the response provides information about the statement that failed.
    +     * Clients must inspect this field to determine whether an error occurred.
    +     * Execution stops after the first failed statement; the remaining statements
    +     * are not executed.
    +     * 
    + */ + public com.google.spanner.v1.ExecuteBatchDmlResponse executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getExecuteBatchDmlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Reads rows from the database using key lookups and scans, as a
    +     * simple key/value style alternative to
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
    +     * used to return a result set larger than 10 MiB; if the read matches more
    +     * data than that, the read fails with a `FAILED_PRECONDITION`
    +     * error.
    +     * Reads inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be yielded in streaming fashion by calling
    +     * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    +     * 
    + */ + public com.google.spanner.v1.ResultSet read(com.google.spanner.v1.ReadRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
    +     * as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
    +     * limit on the size of the returned result set. However, no individual row in
    +     * the result set can exceed 100 MiB, and no column value can exceed
    +     * 10 MiB.
    +     * 
    + */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall streamingRead( + com.google.spanner.v1.ReadRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getStreamingReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Begins a new transaction. This step can often be skipped:
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    +     * side-effect.
    +     * 
    + */ + public com.google.spanner.v1.Transaction beginTransaction( + com.google.spanner.v1.BeginTransactionRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getBeginTransactionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Commits a transaction. The request includes the mutations to be
    +     * applied to rows in the database.
    +     * `Commit` might return an `ABORTED` error. This can occur at any time;
    +     * commonly, the cause is conflicts with concurrent
    +     * transactions. However, it can also happen for a variety of other
    +     * reasons. If `Commit` returns `ABORTED`, the caller should retry
    +     * the transaction from the beginning, reusing the same session.
    +     * On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
    +     * for example, if the client job experiences a 1+ hour networking failure.
    +     * At that point, Cloud Spanner has lost track of the transaction outcome and
    +     * we recommend that you perform another read from the database to see the
    +     * state of things as they are now.
    +     * 
    + */ + public com.google.spanner.v1.CommitResponse commit(com.google.spanner.v1.CommitRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getCommitMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Rolls back a transaction, releasing any locks it holds. It's a good
    +     * idea to call this for any transaction that includes one or more
    +     * [Read][google.spanner.v1.Spanner.Read] or
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
    +     * decides not to commit.
    +     * `Rollback` returns `OK` if it successfully aborts the transaction, the
    +     * transaction was already aborted, or the transaction isn't
    +     * found. `Rollback` never returns `ABORTED`.
    +     * 
    + */ + public com.google.protobuf.Empty rollback(com.google.spanner.v1.RollbackRequest request) + throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getRollbackMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a query
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
    +     * specify a subset of the query result to read. The same session and
    +     * read-only transaction must be used by the `PartitionQueryRequest` used to
    +     * create the partition tokens and the `ExecuteSqlRequests` that use the
    +     * partition tokens.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the query, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.spanner.v1.PartitionResponse partitionQuery( + com.google.spanner.v1.PartitionQueryRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getPartitionQueryMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a read
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
    +     * subset of the read result to read. The same session and read-only
    +     * transaction must be used by the `PartitionReadRequest` used to create the
    +     * partition tokens and the `ReadRequests` that use the partition tokens.
    +     * There are no ordering guarantees on rows returned among the returned
    +     * partition tokens, or even within each individual `StreamingRead` call
    +     * issued with a `partition_token`.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the read, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.spanner.v1.PartitionResponse partitionRead( + com.google.spanner.v1.PartitionReadRequest request) throws io.grpc.StatusException { + return io.grpc.stub.ClientCalls.blockingV2UnaryCall( + getChannel(), getPartitionReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Batches the supplied mutation groups in a collection of efficient
    +     * transactions. All mutations in a group are committed atomically. However,
    +     * mutations across groups can be committed non-atomically in an unspecified
    +     * order and thus, they must be independent of each other. Partial failure is
    +     * possible, that is, some groups might have been committed successfully,
    +     * while some might have failed. The results of individual batches are
    +     * streamed into the response as the batches are applied.
    +     * `BatchWrite` requests are not replay protected, meaning that each mutation
    +     * group can be applied more than once. Replays of non-idempotent mutations
    +     * can have undesirable effects. For example, replays of an insert mutation
    +     * can produce an already exists error or if you use generated or commit
    +     * timestamp-based keys, it can result in additional rows being added to the
    +     * mutation's table. We recommend structuring your mutation groups to be
    +     * idempotent to avoid this issue.
    +     * 
    + */ + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/10918") + public io.grpc.stub.BlockingClientCall batchWrite( + com.google.spanner.v1.BatchWriteRequest request) { + return io.grpc.stub.ClientCalls.blockingV2ServerStreamingCall( + getChannel(), getBatchWriteMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do limited synchronous rpc calls to service Spanner. + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public static final class SpannerBlockingStub + extends io.grpc.stub.AbstractBlockingStub { + private SpannerBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerBlockingStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerBlockingStub(channel, callOptions); + } + + /** + * + * + *
    +     * Creates a new session. A session can be used to perform
    +     * transactions that read and/or modify data in a Cloud Spanner database.
    +     * Sessions are meant to be reused for many consecutive
    +     * transactions.
    +     * Sessions can only execute one transaction at a time. To execute
    +     * multiple concurrent read-write/write-only transactions, create
    +     * multiple sessions. Note that standalone reads and queries use a
    +     * transaction internally, and count toward the one transaction
    +     * limit.
    +     * Active sessions use additional server resources, so it's a good idea to
    +     * delete idle and unneeded sessions.
    +     * Aside from explicit deletes, Cloud Spanner can delete sessions when no
    +     * operations are sent for more than an hour. If a session is deleted,
    +     * requests to it return `NOT_FOUND`.
    +     * Idle sessions can be kept alive by sending a trivial SQL query
    +     * periodically, for example, `"SELECT 1"`.
    +     * 
    + */ + public com.google.spanner.v1.Session createSession( + com.google.spanner.v1.CreateSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates multiple new sessions.
    +     * This API can be used to initialize a session cache on the clients.
    +     * See https://goo.gl/TgSFN2 for best practices on session cache management.
    +     * 
    + */ + public com.google.spanner.v1.BatchCreateSessionsResponse batchCreateSessions( + com.google.spanner.v1.BatchCreateSessionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBatchCreateSessionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
    +     * This is mainly useful for determining whether a session is still
    +     * alive.
    +     * 
    + */ + public com.google.spanner.v1.Session getSession( + com.google.spanner.v1.GetSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all sessions in a given database.
    +     * 
    + */ + public com.google.spanner.v1.ListSessionsResponse listSessions( + com.google.spanner.v1.ListSessionsRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListSessionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Ends a session, releasing server resources associated with it. This
    +     * asynchronously triggers the cancellation of any operations that are running
    +     * with this session.
    +     * 
    + */ + public com.google.protobuf.Empty deleteSession( + com.google.spanner.v1.DeleteSessionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Executes an SQL statement, returning all results in a single reply. This
    +     * method can't be used to return a result set larger than 10 MiB;
    +     * if the query yields more data than that, the query fails with
    +     * a `FAILED_PRECONDITION` error.
    +     * Operations inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be fetched in streaming fashion by calling
    +     * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
    +     * instead.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public com.google.spanner.v1.ResultSet executeSql( + com.google.spanner.v1.ExecuteSqlRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getExecuteSqlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
    +     * result set as a stream. Unlike
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
    +     * the size of the returned result set. However, no individual row in the
    +     * result set can exceed 100 MiB, and no column value can exceed 10 MiB.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public java.util.Iterator executeStreamingSql( + com.google.spanner.v1.ExecuteSqlRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getExecuteStreamingSqlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Executes a batch of SQL DML statements. This method allows many statements
    +     * to be run with lower latency than submitting them sequentially with
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +     * Statements are executed in sequential order. A request can succeed even if
    +     * a statement fails. The
    +     * [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
    +     * field in the response provides information about the statement that failed.
    +     * Clients must inspect this field to determine whether an error occurred.
    +     * Execution stops after the first failed statement; the remaining statements
    +     * are not executed.
    +     * 
    + */ + public com.google.spanner.v1.ExecuteBatchDmlResponse executeBatchDml( + com.google.spanner.v1.ExecuteBatchDmlRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getExecuteBatchDmlMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Reads rows from the database using key lookups and scans, as a
    +     * simple key/value style alternative to
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
    +     * used to return a result set larger than 10 MiB; if the read matches more
    +     * data than that, the read fails with a `FAILED_PRECONDITION`
    +     * error.
    +     * Reads inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be yielded in streaming fashion by calling
    +     * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    +     * 
    + */ + public com.google.spanner.v1.ResultSet read(com.google.spanner.v1.ReadRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
    +     * as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
    +     * limit on the size of the returned result set. However, no individual row in
    +     * the result set can exceed 100 MiB, and no column value can exceed
    +     * 10 MiB.
    +     * 
    + */ + public java.util.Iterator streamingRead( + com.google.spanner.v1.ReadRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getStreamingReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Begins a new transaction. This step can often be skipped:
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    +     * side-effect.
    +     * 
    + */ + public com.google.spanner.v1.Transaction beginTransaction( + com.google.spanner.v1.BeginTransactionRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getBeginTransactionMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Commits a transaction. The request includes the mutations to be
    +     * applied to rows in the database.
    +     * `Commit` might return an `ABORTED` error. This can occur at any time;
    +     * commonly, the cause is conflicts with concurrent
    +     * transactions. However, it can also happen for a variety of other
    +     * reasons. If `Commit` returns `ABORTED`, the caller should retry
    +     * the transaction from the beginning, reusing the same session.
    +     * On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
    +     * for example, if the client job experiences a 1+ hour networking failure.
    +     * At that point, Cloud Spanner has lost track of the transaction outcome and
    +     * we recommend that you perform another read from the database to see the
    +     * state of things as they are now.
    +     * 
    + */ + public com.google.spanner.v1.CommitResponse commit( + com.google.spanner.v1.CommitRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCommitMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Rolls back a transaction, releasing any locks it holds. It's a good
    +     * idea to call this for any transaction that includes one or more
    +     * [Read][google.spanner.v1.Spanner.Read] or
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
    +     * decides not to commit.
    +     * `Rollback` returns `OK` if it successfully aborts the transaction, the
    +     * transaction was already aborted, or the transaction isn't
    +     * found. `Rollback` never returns `ABORTED`.
    +     * 
    + */ + public com.google.protobuf.Empty rollback(com.google.spanner.v1.RollbackRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRollbackMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a query
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
    +     * specify a subset of the query result to read. The same session and
    +     * read-only transaction must be used by the `PartitionQueryRequest` used to
    +     * create the partition tokens and the `ExecuteSqlRequests` that use the
    +     * partition tokens.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the query, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.spanner.v1.PartitionResponse partitionQuery( + com.google.spanner.v1.PartitionQueryRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPartitionQueryMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a read
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
    +     * subset of the read result to read. The same session and read-only
    +     * transaction must be used by the `PartitionReadRequest` used to create the
    +     * partition tokens and the `ReadRequests` that use the partition tokens.
    +     * There are no ordering guarantees on rows returned among the returned
    +     * partition tokens, or even within each individual `StreamingRead` call
    +     * issued with a `partition_token`.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the read, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.spanner.v1.PartitionResponse partitionRead( + com.google.spanner.v1.PartitionReadRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPartitionReadMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Batches the supplied mutation groups in a collection of efficient
    +     * transactions. All mutations in a group are committed atomically. However,
    +     * mutations across groups can be committed non-atomically in an unspecified
    +     * order and thus, they must be independent of each other. Partial failure is
    +     * possible, that is, some groups might have been committed successfully,
    +     * while some might have failed. The results of individual batches are
    +     * streamed into the response as the batches are applied.
    +     * `BatchWrite` requests are not replay protected, meaning that each mutation
    +     * group can be applied more than once. Replays of non-idempotent mutations
    +     * can have undesirable effects. For example, replays of an insert mutation
    +     * can produce an already exists error or if you use generated or commit
    +     * timestamp-based keys, it can result in additional rows being added to the
    +     * mutation's table. We recommend structuring your mutation groups to be
    +     * idempotent to avoid this issue.
    +     * 
    + */ + public java.util.Iterator batchWrite( + com.google.spanner.v1.BatchWriteRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getBatchWriteMethod(), getCallOptions(), request); + } + } + + /** + * A stub to allow clients to do ListenableFuture-style rpc calls to service Spanner. + * + *
    +   * Cloud Spanner API
    +   * The Cloud Spanner API can be used to manage sessions and execute
    +   * transactions on data stored in Cloud Spanner databases.
    +   * 
    + */ + public static final class SpannerFutureStub + extends io.grpc.stub.AbstractFutureStub { + private SpannerFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected SpannerFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new SpannerFutureStub(channel, callOptions); + } + + /** + * + * + *
    +     * Creates a new session. A session can be used to perform
    +     * transactions that read and/or modify data in a Cloud Spanner database.
    +     * Sessions are meant to be reused for many consecutive
    +     * transactions.
    +     * Sessions can only execute one transaction at a time. To execute
    +     * multiple concurrent read-write/write-only transactions, create
    +     * multiple sessions. Note that standalone reads and queries use a
    +     * transaction internally, and count toward the one transaction
    +     * limit.
    +     * Active sessions use additional server resources, so it's a good idea to
    +     * delete idle and unneeded sessions.
    +     * Aside from explicit deletes, Cloud Spanner can delete sessions when no
    +     * operations are sent for more than an hour. If a session is deleted,
    +     * requests to it return `NOT_FOUND`.
    +     * Idle sessions can be kept alive by sending a trivial SQL query
    +     * periodically, for example, `"SELECT 1"`.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + createSession(com.google.spanner.v1.CreateSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates multiple new sessions.
    +     * This API can be used to initialize a session cache on the clients.
    +     * See https://goo.gl/TgSFN2 for best practices on session cache management.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.v1.BatchCreateSessionsResponse> + batchCreateSessions(com.google.spanner.v1.BatchCreateSessionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBatchCreateSessionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets a session. Returns `NOT_FOUND` if the session doesn't exist.
    +     * This is mainly useful for determining whether a session is still
    +     * alive.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + getSession(com.google.spanner.v1.GetSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists all sessions in a given database.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.v1.ListSessionsResponse> + listSessions(com.google.spanner.v1.ListSessionsRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListSessionsMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Ends a session, releasing server resources associated with it. This
    +     * asynchronously triggers the cancellation of any operations that are running
    +     * with this session.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteSession(com.google.spanner.v1.DeleteSessionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Executes an SQL statement, returning all results in a single reply. This
    +     * method can't be used to return a result set larger than 10 MiB;
    +     * if the query yields more data than that, the query fails with
    +     * a `FAILED_PRECONDITION` error.
    +     * Operations inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be fetched in streaming fashion by calling
    +     * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
    +     * instead.
    +     * The query string can be SQL or [Graph Query Language
    +     * (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro).
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + executeSql(com.google.spanner.v1.ExecuteSqlRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getExecuteSqlMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Executes a batch of SQL DML statements. This method allows many statements
    +     * to be run with lower latency than submitting them sequentially with
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +     * Statements are executed in sequential order. A request can succeed even if
    +     * a statement fails. The
    +     * [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
    +     * field in the response provides information about the statement that failed.
    +     * Clients must inspect this field to determine whether an error occurred.
    +     * Execution stops after the first failed statement; the remaining statements
    +     * are not executed.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.v1.ExecuteBatchDmlResponse> + executeBatchDml(com.google.spanner.v1.ExecuteBatchDmlRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getExecuteBatchDmlMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Reads rows from the database using key lookups and scans, as a
    +     * simple key/value style alternative to
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be
    +     * used to return a result set larger than 10 MiB; if the read matches more
    +     * data than that, the read fails with a `FAILED_PRECONDITION`
    +     * error.
    +     * Reads inside read-write transactions might return `ABORTED`. If
    +     * this occurs, the application should restart the transaction from
    +     * the beginning. See [Transaction][google.spanner.v1.Transaction] for more
    +     * details.
    +     * Larger result sets can be yielded in streaming fashion by calling
    +     * [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture read( + com.google.spanner.v1.ReadRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getReadMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Begins a new transaction. This step can often be skipped:
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
    +     * side-effect.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + beginTransaction(com.google.spanner.v1.BeginTransactionRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getBeginTransactionMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Commits a transaction. The request includes the mutations to be
    +     * applied to rows in the database.
    +     * `Commit` might return an `ABORTED` error. This can occur at any time;
    +     * commonly, the cause is conflicts with concurrent
    +     * transactions. However, it can also happen for a variety of other
    +     * reasons. If `Commit` returns `ABORTED`, the caller should retry
    +     * the transaction from the beginning, reusing the same session.
    +     * On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
    +     * for example, if the client job experiences a 1+ hour networking failure.
    +     * At that point, Cloud Spanner has lost track of the transaction outcome and
    +     * we recommend that you perform another read from the database to see the
    +     * state of things as they are now.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + commit(com.google.spanner.v1.CommitRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Rolls back a transaction, releasing any locks it holds. It's a good
    +     * idea to call this for any transaction that includes one or more
    +     * [Read][google.spanner.v1.Spanner.Read] or
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
    +     * decides not to commit.
    +     * `Rollback` returns `OK` if it successfully aborts the transaction, the
    +     * transaction was already aborted, or the transaction isn't
    +     * found. `Rollback` never returns `ABORTED`.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture rollback( + com.google.spanner.v1.RollbackRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a query
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
    +     * specify a subset of the query result to read. The same session and
    +     * read-only transaction must be used by the `PartitionQueryRequest` used to
    +     * create the partition tokens and the `ExecuteSqlRequests` that use the
    +     * partition tokens.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the query, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.v1.PartitionResponse> + partitionQuery(com.google.spanner.v1.PartitionQueryRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPartitionQueryMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Creates a set of partition tokens that can be used to execute a read
    +     * operation in parallel. Each of the returned partition tokens can be used
    +     * by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
    +     * subset of the read result to read. The same session and read-only
    +     * transaction must be used by the `PartitionReadRequest` used to create the
    +     * partition tokens and the `ReadRequests` that use the partition tokens.
    +     * There are no ordering guarantees on rows returned among the returned
    +     * partition tokens, or even within each individual `StreamingRead` call
    +     * issued with a `partition_token`.
    +     * Partition tokens become invalid when the session used to create them
    +     * is deleted, is idle for too long, begins a new transaction, or becomes too
    +     * old. When any of these happen, it isn't possible to resume the read, and
    +     * the whole operation must be restarted from the beginning.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.v1.PartitionResponse> + partitionRead(com.google.spanner.v1.PartitionReadRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPartitionReadMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_SESSION = 0; + private static final int METHODID_BATCH_CREATE_SESSIONS = 1; + private static final int METHODID_GET_SESSION = 2; + private static final int METHODID_LIST_SESSIONS = 3; + private static final int METHODID_DELETE_SESSION = 4; + private static final int METHODID_EXECUTE_SQL = 5; + private static final int METHODID_EXECUTE_STREAMING_SQL = 6; + private static final int METHODID_EXECUTE_BATCH_DML = 7; + private static final int METHODID_READ = 8; + private static final int METHODID_STREAMING_READ = 9; + private static final int METHODID_BEGIN_TRANSACTION = 10; + private static final int METHODID_COMMIT = 11; + private static final int METHODID_ROLLBACK = 12; + private static final int METHODID_PARTITION_QUERY = 13; + private static final int METHODID_PARTITION_READ = 14; + private static final int METHODID_BATCH_WRITE = 15; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final AsyncService serviceImpl; + private final int methodId; + + MethodHandlers(AsyncService serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_SESSION: + serviceImpl.createSession( + (com.google.spanner.v1.CreateSessionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_BATCH_CREATE_SESSIONS: + serviceImpl.batchCreateSessions( + (com.google.spanner.v1.BatchCreateSessionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_SESSION: + serviceImpl.getSession( + (com.google.spanner.v1.GetSessionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_SESSIONS: + serviceImpl.listSessions( + (com.google.spanner.v1.ListSessionsRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_SESSION: + serviceImpl.deleteSession( + (com.google.spanner.v1.DeleteSessionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_EXECUTE_SQL: + serviceImpl.executeSql( + (com.google.spanner.v1.ExecuteSqlRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_EXECUTE_STREAMING_SQL: + serviceImpl.executeStreamingSql( + (com.google.spanner.v1.ExecuteSqlRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_EXECUTE_BATCH_DML: + serviceImpl.executeBatchDml( + (com.google.spanner.v1.ExecuteBatchDmlRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_READ: + serviceImpl.read( + (com.google.spanner.v1.ReadRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_STREAMING_READ: + serviceImpl.streamingRead( + (com.google.spanner.v1.ReadRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_BEGIN_TRANSACTION: + serviceImpl.beginTransaction( + (com.google.spanner.v1.BeginTransactionRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_COMMIT: + serviceImpl.commit( + (com.google.spanner.v1.CommitRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_ROLLBACK: + serviceImpl.rollback( + (com.google.spanner.v1.RollbackRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PARTITION_QUERY: + serviceImpl.partitionQuery( + (com.google.spanner.v1.PartitionQueryRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_PARTITION_READ: + serviceImpl.partitionRead( + (com.google.spanner.v1.PartitionReadRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_BATCH_WRITE: + serviceImpl.batchWrite( + (com.google.spanner.v1.BatchWriteRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.CreateSessionRequest, com.google.spanner.v1.Session>( + service, METHODID_CREATE_SESSION))) + .addMethod( + getBatchCreateSessionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.BatchCreateSessionsRequest, + com.google.spanner.v1.BatchCreateSessionsResponse>( + service, METHODID_BATCH_CREATE_SESSIONS))) + .addMethod( + getGetSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.GetSessionRequest, com.google.spanner.v1.Session>( + service, METHODID_GET_SESSION))) + .addMethod( + getListSessionsMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.ListSessionsRequest, + com.google.spanner.v1.ListSessionsResponse>(service, METHODID_LIST_SESSIONS))) + .addMethod( + getDeleteSessionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.DeleteSessionRequest, com.google.protobuf.Empty>( + service, METHODID_DELETE_SESSION))) + .addMethod( + getExecuteSqlMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.ExecuteSqlRequest, com.google.spanner.v1.ResultSet>( + service, METHODID_EXECUTE_SQL))) + .addMethod( + getExecuteStreamingSqlMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.spanner.v1.ExecuteSqlRequest, + com.google.spanner.v1.PartialResultSet>( + service, METHODID_EXECUTE_STREAMING_SQL))) + .addMethod( + getExecuteBatchDmlMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.ExecuteBatchDmlRequest, + com.google.spanner.v1.ExecuteBatchDmlResponse>( + service, METHODID_EXECUTE_BATCH_DML))) + .addMethod( + getReadMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.ResultSet>( + service, METHODID_READ))) + .addMethod( + getStreamingReadMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.spanner.v1.ReadRequest, com.google.spanner.v1.PartialResultSet>( + service, METHODID_STREAMING_READ))) + .addMethod( + getBeginTransactionMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.BeginTransactionRequest, + com.google.spanner.v1.Transaction>(service, METHODID_BEGIN_TRANSACTION))) + .addMethod( + getCommitMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.CommitRequest, com.google.spanner.v1.CommitResponse>( + service, METHODID_COMMIT))) + .addMethod( + getRollbackMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.RollbackRequest, com.google.protobuf.Empty>( + service, METHODID_ROLLBACK))) + .addMethod( + getPartitionQueryMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.PartitionQueryRequest, + com.google.spanner.v1.PartitionResponse>(service, METHODID_PARTITION_QUERY))) + .addMethod( + getPartitionReadMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.v1.PartitionReadRequest, + com.google.spanner.v1.PartitionResponse>(service, METHODID_PARTITION_READ))) + .addMethod( + getBatchWriteMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.spanner.v1.BatchWriteRequest, + com.google.spanner.v1.BatchWriteResponse>(service, METHODID_BATCH_WRITE))) + .build(); + } + + private abstract static class SpannerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + SpannerBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.spanner.v1.SpannerProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("Spanner"); + } + } + + private static final class SpannerFileDescriptorSupplier extends SpannerBaseDescriptorSupplier { + SpannerFileDescriptorSupplier() {} + } + + private static final class SpannerMethodDescriptorSupplier extends SpannerBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final java.lang.String methodName; + + SpannerMethodDescriptorSupplier(java.lang.String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (SpannerGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new SpannerFileDescriptorSupplier()) + .addMethod(getCreateSessionMethod()) + .addMethod(getBatchCreateSessionsMethod()) + .addMethod(getGetSessionMethod()) + .addMethod(getListSessionsMethod()) + .addMethod(getDeleteSessionMethod()) + .addMethod(getExecuteSqlMethod()) + .addMethod(getExecuteStreamingSqlMethod()) + .addMethod(getExecuteBatchDmlMethod()) + .addMethod(getReadMethod()) + .addMethod(getStreamingReadMethod()) + .addMethod(getBeginTransactionMethod()) + .addMethod(getCommitMethod()) + .addMethod(getRollbackMethod()) + .addMethod(getPartitionQueryMethod()) + .addMethod(getPartitionReadMethod()) + .addMethod(getBatchWriteMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/java-spanner/license_file b/java-spanner/license_file new file mode 100644 index 000000000000..eeb415312514 --- /dev/null +++ b/java-spanner/license_file @@ -0,0 +1,15 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ \ No newline at end of file diff --git a/java-spanner/owlbot.py b/java-spanner/owlbot.py new file mode 100644 index 000000000000..193523318929 --- /dev/null +++ b/java-spanner/owlbot.py @@ -0,0 +1,36 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synthtool as s +from synthtool.languages import java + +for library in s.get_staging_dirs(): + # put any special-case replacements here + s.move(library) +s.remove_staging_dirs() +java.common_templates( + monorepo=True, + excludes=[ + ".github/*", + ".kokoro/*", + "samples/*", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.md", + "LICENSE", + "SECURITY.md", + "java.header", + "license-checks.xml", + "renovate.json", + ".gitignore" +]) diff --git a/java-spanner/pom.xml b/java-spanner/pom.xml new file mode 100644 index 000000000000..9e0293d05e8f --- /dev/null +++ b/java-spanner/pom.xml @@ -0,0 +1,156 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner-parent + pom + 6.112.1-SNAPSHOT + Google Cloud Spanner Parent + https://github.com/googleapis/google-cloud-java + + Java idiomatic client for Google Cloud Platform services. + + + + com.google.cloud + google-cloud-jar-parent + 1.83.0-SNAPSHOT + ../google-cloud-jar-parent/pom.xml + + + + + chingor + Jeff Ching + chingor@google.com + Google + + Developer + + + + + Google LLC + + + scm:git:git@github.com:googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + HEAD + + + https://github.com/googleapis/google-cloud-java/issues + GitHub Issues + + + + + Apache-2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + UTF-8 + UTF-8 + github + google-cloud-spanner-parent + + + + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + + + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + + + + com.google.truth + truth + 1.4.5 + test + + + org.checkerframework + checker-qual + + + + + + + + google-cloud-spanner + grpc-google-cloud-spanner-v1 + grpc-google-cloud-spanner-admin-instance-v1 + grpc-google-cloud-spanner-admin-database-v1 + grpc-google-cloud-spanner-executor-v1 + proto-google-cloud-spanner-admin-instance-v1 + proto-google-cloud-spanner-v1 + proto-google-cloud-spanner-admin-database-v1 + proto-google-cloud-spanner-executor-v1 + google-cloud-spanner-executor + google-cloud-spanner-bom + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.15.0 + + 1.8 + 1.8 + UTF-8 + -Xlint:unchecked + -Xlint:deprecation + true + + + + + + diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml b/java-spanner/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..fb64ee184704 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml @@ -0,0 +1,137 @@ + + + + + 7012 + com/google/spanner/admin/database/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/spanner/admin/database/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/spanner/admin/database/v1/*OrBuilder + boolean has*(*) + + + + + + 5001 + com/google/spanner/admin/database/v1/* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/database/v1/*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/database/v1/*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/database/v1/*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/database/v1/*$*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/database/v1/*$*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/database/v1/*Proto + com/google/protobuf/GeneratedFile + + + + 7005 + com/google/spanner/admin/database/v1/** + * newBuilderForType(*) + ** + + + + 7006 + com/google/spanner/admin/database/v1/** + * internalGetFieldAccessorTable() + ** + + + + 7014 + com/google/spanner/admin/database/v1/** + * getDescriptor() + + + 7006 + com/google/spanner/admin/database/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clear() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clone() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setUnknownFields(*) + ** + + diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/pom.xml b/java-spanner/proto-google-cloud-spanner-admin-database-v1/pom.xml new file mode 100644 index 000000000000..a92af4e8a16f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/pom.xml @@ -0,0 +1,47 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + 6.112.1-SNAPSHOT + proto-google-cloud-spanner-admin-database-v1 + PROTO library for proto-google-cloud-spanner-admin-database-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + com.google.api.grpc + proto-google-iam-v1 + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequest.java new file mode 100644 index 000000000000..93020429848b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequest.java @@ -0,0 +1,1414 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.AddSplitPointsRequest} + */ +@com.google.protobuf.Generated +public final class AddSplitPointsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.AddSplitPointsRequest) + AddSplitPointsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AddSplitPointsRequest"); + } + + // Use AddSplitPointsRequest.newBuilder() to construct. + private AddSplitPointsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AddSplitPointsRequest() { + database_ = ""; + splitPoints_ = java.util.Collections.emptyList(); + initiator_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.AddSplitPointsRequest.class, + com.google.spanner.admin.database.v1.AddSplitPointsRequest.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database on whose tables/indexes split points are to be
    +   * added. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database on whose tables/indexes split points are to be
    +   * added. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SPLIT_POINTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List splitPoints_; + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List getSplitPointsList() { + return splitPoints_; + } + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getSplitPointsOrBuilderList() { + return splitPoints_; + } + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getSplitPointsCount() { + return splitPoints_.size(); + } + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index) { + return splitPoints_.get(index); + } + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder( + int index) { + return splitPoints_.get(index); + } + + public static final int INITIATOR_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object initiator_ = ""; + + /** + * + * + *
    +   * Optional. A user-supplied tag associated with the split points.
    +   * For example, "intital_data_load", "special_event_1".
    +   * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +   * The length of the tag must not exceed 50 characters,else will be trimmed.
    +   * Only valid UTF8 characters are allowed.
    +   * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The initiator. + */ + @java.lang.Override + public java.lang.String getInitiator() { + java.lang.Object ref = initiator_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + initiator_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. A user-supplied tag associated with the split points.
    +   * For example, "intital_data_load", "special_event_1".
    +   * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +   * The length of the tag must not exceed 50 characters,else will be trimmed.
    +   * Only valid UTF8 characters are allowed.
    +   * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for initiator. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInitiatorBytes() { + java.lang.Object ref = initiator_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + initiator_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + for (int i = 0; i < splitPoints_.size(); i++) { + output.writeMessage(2, splitPoints_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(initiator_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, initiator_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + for (int i = 0; i < splitPoints_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, splitPoints_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(initiator_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, initiator_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.AddSplitPointsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.AddSplitPointsRequest other = + (com.google.spanner.admin.database.v1.AddSplitPointsRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getSplitPointsList().equals(other.getSplitPointsList())) return false; + if (!getInitiator().equals(other.getInitiator())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (getSplitPointsCount() > 0) { + hash = (37 * hash) + SPLIT_POINTS_FIELD_NUMBER; + hash = (53 * hash) + getSplitPointsList().hashCode(); + } + hash = (37 * hash) + INITIATOR_FIELD_NUMBER; + hash = (53 * hash) + getInitiator().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.AddSplitPointsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.AddSplitPointsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.AddSplitPointsRequest) + com.google.spanner.admin.database.v1.AddSplitPointsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.AddSplitPointsRequest.class, + com.google.spanner.admin.database.v1.AddSplitPointsRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.AddSplitPointsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + if (splitPointsBuilder_ == null) { + splitPoints_ = java.util.Collections.emptyList(); + } else { + splitPoints_ = null; + splitPointsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + initiator_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.AddSplitPointsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsRequest build() { + com.google.spanner.admin.database.v1.AddSplitPointsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsRequest buildPartial() { + com.google.spanner.admin.database.v1.AddSplitPointsRequest result = + new com.google.spanner.admin.database.v1.AddSplitPointsRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.AddSplitPointsRequest result) { + if (splitPointsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + splitPoints_ = java.util.Collections.unmodifiableList(splitPoints_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.splitPoints_ = splitPoints_; + } else { + result.splitPoints_ = splitPointsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.AddSplitPointsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.initiator_ = initiator_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.AddSplitPointsRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.AddSplitPointsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.AddSplitPointsRequest other) { + if (other == com.google.spanner.admin.database.v1.AddSplitPointsRequest.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (splitPointsBuilder_ == null) { + if (!other.splitPoints_.isEmpty()) { + if (splitPoints_.isEmpty()) { + splitPoints_ = other.splitPoints_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSplitPointsIsMutable(); + splitPoints_.addAll(other.splitPoints_); + } + onChanged(); + } + } else { + if (!other.splitPoints_.isEmpty()) { + if (splitPointsBuilder_.isEmpty()) { + splitPointsBuilder_.dispose(); + splitPointsBuilder_ = null; + splitPoints_ = other.splitPoints_; + bitField0_ = (bitField0_ & ~0x00000002); + splitPointsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSplitPointsFieldBuilder() + : null; + } else { + splitPointsBuilder_.addAllMessages(other.splitPoints_); + } + } + } + if (!other.getInitiator().isEmpty()) { + initiator_ = other.initiator_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.spanner.admin.database.v1.SplitPoints m = + input.readMessage( + com.google.spanner.admin.database.v1.SplitPoints.parser(), + extensionRegistry); + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(m); + } else { + splitPointsBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + initiator_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database on whose tables/indexes split points are to be
    +     * added. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database on whose tables/indexes split points are to be
    +     * added. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database on whose tables/indexes split points are to be
    +     * added. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database on whose tables/indexes split points are to be
    +     * added. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database on whose tables/indexes split points are to be
    +     * added. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List splitPoints_ = + java.util.Collections.emptyList(); + + private void ensureSplitPointsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + splitPoints_ = + new java.util.ArrayList(splitPoints_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder> + splitPointsBuilder_; + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List getSplitPointsList() { + if (splitPointsBuilder_ == null) { + return java.util.Collections.unmodifiableList(splitPoints_); + } else { + return splitPointsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getSplitPointsCount() { + if (splitPointsBuilder_ == null) { + return splitPoints_.size(); + } else { + return splitPointsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index) { + if (splitPointsBuilder_ == null) { + return splitPoints_.get(index); + } else { + return splitPointsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.set(index, value); + onChanged(); + } else { + splitPointsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.set(index, builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSplitPoints(com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.add(value); + onChanged(); + } else { + splitPointsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.add(index, value); + onChanged(); + } else { + splitPointsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSplitPoints( + com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(index, builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllSplitPoints( + java.lang.Iterable values) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, splitPoints_); + onChanged(); + } else { + splitPointsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearSplitPoints() { + if (splitPointsBuilder_ == null) { + splitPoints_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + splitPointsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeSplitPoints(int index) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.remove(index); + onChanged(); + } else { + splitPointsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder getSplitPointsBuilder( + int index) { + return internalGetSplitPointsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder( + int index) { + if (splitPointsBuilder_ == null) { + return splitPoints_.get(index); + } else { + return splitPointsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getSplitPointsOrBuilderList() { + if (splitPointsBuilder_ != null) { + return splitPointsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(splitPoints_); + } + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder addSplitPointsBuilder() { + return internalGetSplitPointsFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder addSplitPointsBuilder( + int index) { + return internalGetSplitPointsFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The split points to add.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getSplitPointsBuilderList() { + return internalGetSplitPointsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder> + internalGetSplitPointsFieldBuilder() { + if (splitPointsBuilder_ == null) { + splitPointsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder>( + splitPoints_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + splitPoints_ = null; + } + return splitPointsBuilder_; + } + + private java.lang.Object initiator_ = ""; + + /** + * + * + *
    +     * Optional. A user-supplied tag associated with the split points.
    +     * For example, "intital_data_load", "special_event_1".
    +     * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +     * The length of the tag must not exceed 50 characters,else will be trimmed.
    +     * Only valid UTF8 characters are allowed.
    +     * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The initiator. + */ + public java.lang.String getInitiator() { + java.lang.Object ref = initiator_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + initiator_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. A user-supplied tag associated with the split points.
    +     * For example, "intital_data_load", "special_event_1".
    +     * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +     * The length of the tag must not exceed 50 characters,else will be trimmed.
    +     * Only valid UTF8 characters are allowed.
    +     * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for initiator. + */ + public com.google.protobuf.ByteString getInitiatorBytes() { + java.lang.Object ref = initiator_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + initiator_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. A user-supplied tag associated with the split points.
    +     * For example, "intital_data_load", "special_event_1".
    +     * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +     * The length of the tag must not exceed 50 characters,else will be trimmed.
    +     * Only valid UTF8 characters are allowed.
    +     * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The initiator to set. + * @return This builder for chaining. + */ + public Builder setInitiator(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + initiator_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A user-supplied tag associated with the split points.
    +     * For example, "intital_data_load", "special_event_1".
    +     * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +     * The length of the tag must not exceed 50 characters,else will be trimmed.
    +     * Only valid UTF8 characters are allowed.
    +     * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearInitiator() { + initiator_ = getDefaultInstance().getInitiator(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A user-supplied tag associated with the split points.
    +     * For example, "intital_data_load", "special_event_1".
    +     * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +     * The length of the tag must not exceed 50 characters,else will be trimmed.
    +     * Only valid UTF8 characters are allowed.
    +     * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for initiator to set. + * @return This builder for chaining. + */ + public Builder setInitiatorBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + initiator_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.AddSplitPointsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.AddSplitPointsRequest) + private static final com.google.spanner.admin.database.v1.AddSplitPointsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.AddSplitPointsRequest(); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AddSplitPointsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequestOrBuilder.java new file mode 100644 index 000000000000..bbdc2efc8ae3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsRequestOrBuilder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface AddSplitPointsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.AddSplitPointsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database on whose tables/indexes split points are to be
    +   * added. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database on whose tables/indexes split points are to be
    +   * added. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getSplitPointsList(); + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index); + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getSplitPointsCount(); + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getSplitPointsOrBuilderList(); + + /** + * + * + *
    +   * Required. The split points to add.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder(int index); + + /** + * + * + *
    +   * Optional. A user-supplied tag associated with the split points.
    +   * For example, "intital_data_load", "special_event_1".
    +   * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +   * The length of the tag must not exceed 50 characters,else will be trimmed.
    +   * Only valid UTF8 characters are allowed.
    +   * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The initiator. + */ + java.lang.String getInitiator(); + + /** + * + * + *
    +   * Optional. A user-supplied tag associated with the split points.
    +   * For example, "intital_data_load", "special_event_1".
    +   * Defaults to "CloudAddSplitPointsAPI" if not specified.
    +   * The length of the tag must not exceed 50 characters,else will be trimmed.
    +   * Only valid UTF8 characters are allowed.
    +   * 
    + * + * string initiator = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for initiator. + */ + com.google.protobuf.ByteString getInitiatorBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponse.java new file mode 100644 index 000000000000..30eaaf424dce --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponse.java @@ -0,0 +1,399 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.AddSplitPointsResponse} + */ +@com.google.protobuf.Generated +public final class AddSplitPointsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.AddSplitPointsResponse) + AddSplitPointsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AddSplitPointsResponse"); + } + + // Use AddSplitPointsResponse.newBuilder() to construct. + private AddSplitPointsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AddSplitPointsResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.AddSplitPointsResponse.class, + com.google.spanner.admin.database.v1.AddSplitPointsResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.AddSplitPointsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.AddSplitPointsResponse other = + (com.google.spanner.admin.database.v1.AddSplitPointsResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.AddSplitPointsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.AddSplitPointsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.AddSplitPointsResponse) + com.google.spanner.admin.database.v1.AddSplitPointsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.AddSplitPointsResponse.class, + com.google.spanner.admin.database.v1.AddSplitPointsResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.AddSplitPointsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsResponse getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.AddSplitPointsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsResponse build() { + com.google.spanner.admin.database.v1.AddSplitPointsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsResponse buildPartial() { + com.google.spanner.admin.database.v1.AddSplitPointsResponse result = + new com.google.spanner.admin.database.v1.AddSplitPointsResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.AddSplitPointsResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.AddSplitPointsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.AddSplitPointsResponse other) { + if (other == com.google.spanner.admin.database.v1.AddSplitPointsResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.AddSplitPointsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.AddSplitPointsResponse) + private static final com.google.spanner.admin.database.v1.AddSplitPointsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.AddSplitPointsResponse(); + } + + public static com.google.spanner.admin.database.v1.AddSplitPointsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AddSplitPointsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.AddSplitPointsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponseOrBuilder.java new file mode 100644 index 000000000000..e94d9fb2f36b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/AddSplitPointsResponseOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface AddSplitPointsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.AddSplitPointsResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java new file mode 100644 index 000000000000..ccc75d550365 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java @@ -0,0 +1,6598 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * A backup of a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.Backup} + */ +@com.google.protobuf.Generated +public final class Backup extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.Backup) + BackupOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Backup"); + } + + // Use Backup.newBuilder() to construct. + private Backup(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Backup() { + database_ = ""; + name_ = ""; + state_ = 0; + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + encryptionInformation_ = java.util.Collections.emptyList(); + databaseDialect_ = 0; + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + incrementalBackupChainId_ = ""; + instancePartitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_Backup_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_Backup_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.Backup.class, + com.google.spanner.admin.database.v1.Backup.Builder.class); + } + + /** + * + * + *
    +   * Indicates the current state of the backup.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.database.v1.Backup.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
    +     * The pending backup is still being created. Operations on the
    +     * backup may fail with `FAILED_PRECONDITION` in this state.
    +     * 
    + * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
    +     * The backup is complete and ready for use.
    +     * 
    + * + * READY = 2; + */ + READY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The pending backup is still being created. Operations on the
    +     * backup may fail with `FAILED_PRECONDITION` in this state.
    +     * 
    + * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + + /** + * + * + *
    +     * The backup is complete and ready for use.
    +     * 
    + * + * READY = 2; + */ + public static final int READY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return READY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.Backup.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.Backup.State) + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_TIME_FIELD_NUMBER = 9; + private com.google.protobuf.Timestamp versionTime_; + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return Whether the versionTime field is set. + */ + @java.lang.Override + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return The versionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getVersionTime() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
    +   *
    +   * A globally unique identifier for the backup which cannot be
    +   * changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters
    +   * in length.
    +   *
    +   * The backup is stored in the location(s) specified in the instance
    +   * configuration of the instance containing the backup, identified
    +   * by the prefix of the backup name of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
    +   *
    +   * A globally unique identifier for the backup which cannot be
    +   * changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters
    +   * in length.
    +   *
    +   * The backup is stored in the location(s) specified in the instance
    +   * configuration of the instance containing the backup, identified
    +   * by the prefix of the backup name of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int SIZE_BYTES_FIELD_NUMBER = 5; + private long sizeBytes_ = 0L; + + /** + * + * + *
    +   * Output only. Size of the backup in bytes.
    +   * 
    + * + * int64 size_bytes = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The sizeBytes. + */ + @java.lang.Override + public long getSizeBytes() { + return sizeBytes_; + } + + public static final int FREEABLE_SIZE_BYTES_FIELD_NUMBER = 15; + private long freeableSizeBytes_ = 0L; + + /** + * + * + *
    +   * Output only. The number of bytes that will be freed by deleting this
    +   * backup. This value will be zero if, for example, this backup is part of an
    +   * incremental backup chain and younger backups in the chain require that we
    +   * keep its data. For backups not in an incremental backup chain, this is
    +   * always the size of the backup. This value may change if backups on the same
    +   * chain get created, deleted or expired.
    +   * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + @java.lang.Override + public long getFreeableSizeBytes() { + return freeableSizeBytes_; + } + + public static final int EXCLUSIVE_SIZE_BYTES_FIELD_NUMBER = 16; + private long exclusiveSizeBytes_ = 0L; + + /** + * + * + *
    +   * Output only. For a backup in an incremental backup chain, this is the
    +   * storage space needed to keep the data that has changed since the previous
    +   * backup. For all other backups, this is always the size of the backup. This
    +   * value may change if backups on the same chain get deleted or expired.
    +   *
    +   * This field can be used to calculate the total storage space used by a set
    +   * of backups. For example, the total space used by all backups of a database
    +   * can be computed by summing up this field.
    +   * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + @java.lang.Override + public long getExclusiveSizeBytes() { + return exclusiveSizeBytes_; + } + + public static final int STATE_FIELD_NUMBER = 6; + private int state_ = 0; + + /** + * + * + *
    +   * Output only. The current state of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +   * Output only. The current state of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup.State getState() { + com.google.spanner.admin.database.v1.Backup.State result = + com.google.spanner.admin.database.v1.Backup.State.forNumber(state_); + return result == null ? com.google.spanner.admin.database.v1.Backup.State.UNRECOGNIZED : result; + } + + public static final int REFERENCING_DATABASES_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList referencingDatabases_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingDatabases. + */ + public com.google.protobuf.ProtocolStringList getReferencingDatabasesList() { + return referencingDatabases_; + } + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingDatabases. + */ + public int getReferencingDatabasesCount() { + return referencingDatabases_.size(); + } + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + public java.lang.String getReferencingDatabases(int index) { + return referencingDatabases_.get(index); + } + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + public com.google.protobuf.ByteString getReferencingDatabasesBytes(int index) { + return referencingDatabases_.getByteString(index); + } + + public static final int ENCRYPTION_INFO_FIELD_NUMBER = 8; + private com.google.spanner.admin.database.v1.EncryptionInfo encryptionInfo_; + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionInfo field is set. + */ + @java.lang.Override + public boolean hasEncryptionInfo() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo() { + return encryptionInfo_ == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance() + : encryptionInfo_; + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder() { + return encryptionInfo_ == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance() + : encryptionInfo_; + } + + public static final int ENCRYPTION_INFORMATION_FIELD_NUMBER = 13; + + @SuppressWarnings("serial") + private java.util.List + encryptionInformation_; + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInformationList() { + return encryptionInformation_; + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInformationOrBuilderList() { + return encryptionInformation_; + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getEncryptionInformationCount() { + return encryptionInformation_.size(); + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index) { + return encryptionInformation_.get(index); + } + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder + getEncryptionInformationOrBuilder(int index) { + return encryptionInformation_.get(index); + } + + public static final int DATABASE_DIALECT_FIELD_NUMBER = 10; + private int databaseDialect_ = 0; + + /** + * + * + *
    +   * Output only. The database dialect information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +   * Output only. The database dialect information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + public static final int REFERENCING_BACKUPS_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList referencingBackups_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingBackups. + */ + public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { + return referencingBackups_; + } + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingBackups. + */ + public int getReferencingBackupsCount() { + return referencingBackups_.size(); + } + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + public java.lang.String getReferencingBackups(int index) { + return referencingBackups_.get(index); + } + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { + return referencingBackups_.getByteString(index); + } + + public static final int MAX_EXPIRE_TIME_FIELD_NUMBER = 12; + private com.google.protobuf.Timestamp maxExpireTime_; + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the maxExpireTime field is set. + */ + @java.lang.Override + public boolean hasMaxExpireTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The maxExpireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getMaxExpireTime() { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder() { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } + + public static final int BACKUP_SCHEDULES_FIELD_NUMBER = 14; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList backupSchedules_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + public com.google.protobuf.ProtocolStringList getBackupSchedulesList() { + return backupSchedules_; + } + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + public java.lang.String getBackupSchedules(int index) { + return backupSchedules_.get(index); + } + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the backupSchedules at the given index. + */ + public com.google.protobuf.ByteString getBackupSchedulesBytes(int index) { + return backupSchedules_.getByteString(index); + } + + public static final int INCREMENTAL_BACKUP_CHAIN_ID_FIELD_NUMBER = 17; + + @SuppressWarnings("serial") + private volatile java.lang.Object incrementalBackupChainId_ = ""; + + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + @java.lang.Override + public java.lang.String getIncrementalBackupChainId() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + incrementalBackupChainId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncrementalBackupChainIdBytes() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + incrementalBackupChainId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OLDEST_VERSION_TIME_FIELD_NUMBER = 18; + private com.google.protobuf.Timestamp oldestVersionTime_; + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the oldestVersionTime field is set. + */ + @java.lang.Override + public boolean hasOldestVersionTime() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The oldestVersionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getOldestVersionTime() { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder() { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } + + public static final int INSTANCE_PARTITIONS_FIELD_NUMBER = 19; + + @SuppressWarnings("serial") + private java.util.List + instancePartitions_; + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getInstancePartitionsList() { + return instancePartitions_; + } + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder> + getInstancePartitionsOrBuilderList() { + return instancePartitions_; + } + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getInstancePartitionsCount() { + return instancePartitions_.size(); + } + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartition getInstancePartitions( + int index) { + return instancePartitions_.get(index); + } + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder + getInstancePartitionsOrBuilder(int index) { + return instancePartitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, database_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getExpireTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getCreateTime()); + } + if (sizeBytes_ != 0L) { + output.writeInt64(5, sizeBytes_); + } + if (state_ != com.google.spanner.admin.database.v1.Backup.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(6, state_); + } + for (int i = 0; i < referencingDatabases_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, referencingDatabases_.getRaw(i)); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(8, getEncryptionInfo()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(9, getVersionTime()); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + output.writeEnum(10, databaseDialect_); + } + for (int i = 0; i < referencingBackups_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, referencingBackups_.getRaw(i)); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(12, getMaxExpireTime()); + } + for (int i = 0; i < encryptionInformation_.size(); i++) { + output.writeMessage(13, encryptionInformation_.get(i)); + } + for (int i = 0; i < backupSchedules_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 14, backupSchedules_.getRaw(i)); + } + if (freeableSizeBytes_ != 0L) { + output.writeInt64(15, freeableSizeBytes_); + } + if (exclusiveSizeBytes_ != 0L) { + output.writeInt64(16, exclusiveSizeBytes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(incrementalBackupChainId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 17, incrementalBackupChainId_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(18, getOldestVersionTime()); + } + for (int i = 0; i < instancePartitions_.size(); i++) { + output.writeMessage(19, instancePartitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, database_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getExpireTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCreateTime()); + } + if (sizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(5, sizeBytes_); + } + if (state_ != com.google.spanner.admin.database.v1.Backup.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(6, state_); + } + { + int dataSize = 0; + for (int i = 0; i < referencingDatabases_.size(); i++) { + dataSize += computeStringSizeNoTag(referencingDatabases_.getRaw(i)); + } + size += dataSize; + size += 1 * getReferencingDatabasesList().size(); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getEncryptionInfo()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getVersionTime()); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, databaseDialect_); + } + { + int dataSize = 0; + for (int i = 0; i < referencingBackups_.size(); i++) { + dataSize += computeStringSizeNoTag(referencingBackups_.getRaw(i)); + } + size += dataSize; + size += 1 * getReferencingBackupsList().size(); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getMaxExpireTime()); + } + for (int i = 0; i < encryptionInformation_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 13, encryptionInformation_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < backupSchedules_.size(); i++) { + dataSize += computeStringSizeNoTag(backupSchedules_.getRaw(i)); + } + size += dataSize; + size += 1 * getBackupSchedulesList().size(); + } + if (freeableSizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(15, freeableSizeBytes_); + } + if (exclusiveSizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(16, exclusiveSizeBytes_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(incrementalBackupChainId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(17, incrementalBackupChainId_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getOldestVersionTime()); + } + for (int i = 0; i < instancePartitions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(19, instancePartitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.Backup)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.Backup other = + (com.google.spanner.admin.database.v1.Backup) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (hasVersionTime() != other.hasVersionTime()) return false; + if (hasVersionTime()) { + if (!getVersionTime().equals(other.getVersionTime())) return false; + } + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getName().equals(other.getName())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (getSizeBytes() != other.getSizeBytes()) return false; + if (getFreeableSizeBytes() != other.getFreeableSizeBytes()) return false; + if (getExclusiveSizeBytes() != other.getExclusiveSizeBytes()) return false; + if (state_ != other.state_) return false; + if (!getReferencingDatabasesList().equals(other.getReferencingDatabasesList())) return false; + if (hasEncryptionInfo() != other.hasEncryptionInfo()) return false; + if (hasEncryptionInfo()) { + if (!getEncryptionInfo().equals(other.getEncryptionInfo())) return false; + } + if (!getEncryptionInformationList().equals(other.getEncryptionInformationList())) return false; + if (databaseDialect_ != other.databaseDialect_) return false; + if (!getReferencingBackupsList().equals(other.getReferencingBackupsList())) return false; + if (hasMaxExpireTime() != other.hasMaxExpireTime()) return false; + if (hasMaxExpireTime()) { + if (!getMaxExpireTime().equals(other.getMaxExpireTime())) return false; + } + if (!getBackupSchedulesList().equals(other.getBackupSchedulesList())) return false; + if (!getIncrementalBackupChainId().equals(other.getIncrementalBackupChainId())) return false; + if (hasOldestVersionTime() != other.hasOldestVersionTime()) return false; + if (hasOldestVersionTime()) { + if (!getOldestVersionTime().equals(other.getOldestVersionTime())) return false; + } + if (!getInstancePartitionsList().equals(other.getInstancePartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (hasVersionTime()) { + hash = (37 * hash) + VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getVersionTime().hashCode(); + } + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + hash = (37 * hash) + SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSizeBytes()); + hash = (37 * hash) + FREEABLE_SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getFreeableSizeBytes()); + hash = (37 * hash) + EXCLUSIVE_SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getExclusiveSizeBytes()); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + if (getReferencingDatabasesCount() > 0) { + hash = (37 * hash) + REFERENCING_DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getReferencingDatabasesList().hashCode(); + } + if (hasEncryptionInfo()) { + hash = (37 * hash) + ENCRYPTION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionInfo().hashCode(); + } + if (getEncryptionInformationCount() > 0) { + hash = (37 * hash) + ENCRYPTION_INFORMATION_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionInformationList().hashCode(); + } + hash = (37 * hash) + DATABASE_DIALECT_FIELD_NUMBER; + hash = (53 * hash) + databaseDialect_; + if (getReferencingBackupsCount() > 0) { + hash = (37 * hash) + REFERENCING_BACKUPS_FIELD_NUMBER; + hash = (53 * hash) + getReferencingBackupsList().hashCode(); + } + if (hasMaxExpireTime()) { + hash = (37 * hash) + MAX_EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getMaxExpireTime().hashCode(); + } + if (getBackupSchedulesCount() > 0) { + hash = (37 * hash) + BACKUP_SCHEDULES_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedulesList().hashCode(); + } + hash = (37 * hash) + INCREMENTAL_BACKUP_CHAIN_ID_FIELD_NUMBER; + hash = (53 * hash) + getIncrementalBackupChainId().hashCode(); + if (hasOldestVersionTime()) { + hash = (37 * hash) + OLDEST_VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getOldestVersionTime().hashCode(); + } + if (getInstancePartitionsCount() > 0) { + hash = (37 * hash) + INSTANCE_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Backup parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Backup parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Backup parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.Backup prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A backup of a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.Backup} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.Backup) + com.google.spanner.admin.database.v1.BackupOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_Backup_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_Backup_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.Backup.class, + com.google.spanner.admin.database.v1.Backup.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.Backup.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetVersionTimeFieldBuilder(); + internalGetExpireTimeFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetEncryptionInfoFieldBuilder(); + internalGetEncryptionInformationFieldBuilder(); + internalGetMaxExpireTimeFieldBuilder(); + internalGetOldestVersionTimeFieldBuilder(); + internalGetInstancePartitionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + name_ = ""; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + sizeBytes_ = 0L; + freeableSizeBytes_ = 0L; + exclusiveSizeBytes_ = 0L; + state_ = 0; + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + encryptionInfo_ = null; + if (encryptionInfoBuilder_ != null) { + encryptionInfoBuilder_.dispose(); + encryptionInfoBuilder_ = null; + } + if (encryptionInformationBuilder_ == null) { + encryptionInformation_ = java.util.Collections.emptyList(); + } else { + encryptionInformation_ = null; + encryptionInformationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); + databaseDialect_ = 0; + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + maxExpireTime_ = null; + if (maxExpireTimeBuilder_ != null) { + maxExpireTimeBuilder_.dispose(); + maxExpireTimeBuilder_ = null; + } + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + incrementalBackupChainId_ = ""; + oldestVersionTime_ = null; + if (oldestVersionTimeBuilder_ != null) { + oldestVersionTimeBuilder_.dispose(); + oldestVersionTimeBuilder_ = null; + } + if (instancePartitionsBuilder_ == null) { + instancePartitions_ = java.util.Collections.emptyList(); + } else { + instancePartitions_ = null; + instancePartitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00040000); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_Backup_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.Backup.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup build() { + com.google.spanner.admin.database.v1.Backup result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup buildPartial() { + com.google.spanner.admin.database.v1.Backup result = + new com.google.spanner.admin.database.v1.Backup(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.admin.database.v1.Backup result) { + if (encryptionInformationBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0)) { + encryptionInformation_ = java.util.Collections.unmodifiableList(encryptionInformation_); + bitField0_ = (bitField0_ & ~0x00000800); + } + result.encryptionInformation_ = encryptionInformation_; + } else { + result.encryptionInformation_ = encryptionInformationBuilder_.build(); + } + if (instancePartitionsBuilder_ == null) { + if (((bitField0_ & 0x00040000) != 0)) { + instancePartitions_ = java.util.Collections.unmodifiableList(instancePartitions_); + bitField0_ = (bitField0_ & ~0x00040000); + } + result.instancePartitions_ = instancePartitions_; + } else { + result.instancePartitions_ = instancePartitionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.Backup result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.versionTime_ = + versionTimeBuilder_ == null ? versionTime_ : versionTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.sizeBytes_ = sizeBytes_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.freeableSizeBytes_ = freeableSizeBytes_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.exclusiveSizeBytes_ = exclusiveSizeBytes_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + referencingDatabases_.makeImmutable(); + result.referencingDatabases_ = referencingDatabases_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.encryptionInfo_ = + encryptionInfoBuilder_ == null ? encryptionInfo_ : encryptionInfoBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.databaseDialect_ = databaseDialect_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + referencingBackups_.makeImmutable(); + result.referencingBackups_ = referencingBackups_; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.maxExpireTime_ = + maxExpireTimeBuilder_ == null ? maxExpireTime_ : maxExpireTimeBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00008000) != 0)) { + backupSchedules_.makeImmutable(); + result.backupSchedules_ = backupSchedules_; + } + if (((from_bitField0_ & 0x00010000) != 0)) { + result.incrementalBackupChainId_ = incrementalBackupChainId_; + } + if (((from_bitField0_ & 0x00020000) != 0)) { + result.oldestVersionTime_ = + oldestVersionTimeBuilder_ == null + ? oldestVersionTime_ + : oldestVersionTimeBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.Backup) { + return mergeFrom((com.google.spanner.admin.database.v1.Backup) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.Backup other) { + if (other == com.google.spanner.admin.database.v1.Backup.getDefaultInstance()) return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasVersionTime()) { + mergeVersionTime(other.getVersionTime()); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.getSizeBytes() != 0L) { + setSizeBytes(other.getSizeBytes()); + } + if (other.getFreeableSizeBytes() != 0L) { + setFreeableSizeBytes(other.getFreeableSizeBytes()); + } + if (other.getExclusiveSizeBytes() != 0L) { + setExclusiveSizeBytes(other.getExclusiveSizeBytes()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (!other.referencingDatabases_.isEmpty()) { + if (referencingDatabases_.isEmpty()) { + referencingDatabases_ = other.referencingDatabases_; + bitField0_ |= 0x00000200; + } else { + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.addAll(other.referencingDatabases_); + } + onChanged(); + } + if (other.hasEncryptionInfo()) { + mergeEncryptionInfo(other.getEncryptionInfo()); + } + if (encryptionInformationBuilder_ == null) { + if (!other.encryptionInformation_.isEmpty()) { + if (encryptionInformation_.isEmpty()) { + encryptionInformation_ = other.encryptionInformation_; + bitField0_ = (bitField0_ & ~0x00000800); + } else { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.addAll(other.encryptionInformation_); + } + onChanged(); + } + } else { + if (!other.encryptionInformation_.isEmpty()) { + if (encryptionInformationBuilder_.isEmpty()) { + encryptionInformationBuilder_.dispose(); + encryptionInformationBuilder_ = null; + encryptionInformation_ = other.encryptionInformation_; + bitField0_ = (bitField0_ & ~0x00000800); + encryptionInformationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetEncryptionInformationFieldBuilder() + : null; + } else { + encryptionInformationBuilder_.addAllMessages(other.encryptionInformation_); + } + } + } + if (other.databaseDialect_ != 0) { + setDatabaseDialectValue(other.getDatabaseDialectValue()); + } + if (!other.referencingBackups_.isEmpty()) { + if (referencingBackups_.isEmpty()) { + referencingBackups_ = other.referencingBackups_; + bitField0_ |= 0x00002000; + } else { + ensureReferencingBackupsIsMutable(); + referencingBackups_.addAll(other.referencingBackups_); + } + onChanged(); + } + if (other.hasMaxExpireTime()) { + mergeMaxExpireTime(other.getMaxExpireTime()); + } + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedules_.isEmpty()) { + backupSchedules_ = other.backupSchedules_; + bitField0_ |= 0x00008000; + } else { + ensureBackupSchedulesIsMutable(); + backupSchedules_.addAll(other.backupSchedules_); + } + onChanged(); + } + if (!other.getIncrementalBackupChainId().isEmpty()) { + incrementalBackupChainId_ = other.incrementalBackupChainId_; + bitField0_ |= 0x00010000; + onChanged(); + } + if (other.hasOldestVersionTime()) { + mergeOldestVersionTime(other.getOldestVersionTime()); + } + if (instancePartitionsBuilder_ == null) { + if (!other.instancePartitions_.isEmpty()) { + if (instancePartitions_.isEmpty()) { + instancePartitions_ = other.instancePartitions_; + bitField0_ = (bitField0_ & ~0x00040000); + } else { + ensureInstancePartitionsIsMutable(); + instancePartitions_.addAll(other.instancePartitions_); + } + onChanged(); + } + } else { + if (!other.instancePartitions_.isEmpty()) { + if (instancePartitionsBuilder_.isEmpty()) { + instancePartitionsBuilder_.dispose(); + instancePartitionsBuilder_ = null; + instancePartitions_ = other.instancePartitions_; + bitField0_ = (bitField0_ & ~0x00040000); + instancePartitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetInstancePartitionsFieldBuilder() + : null; + } else { + instancePartitionsBuilder_.addAllMessages(other.instancePartitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 10 + case 18: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 34 + case 40: + { + sizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000020; + break; + } // case 40 + case 48: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000100; + break; + } // case 48 + case 58: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(s); + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetEncryptionInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 66 + case 74: + { + input.readMessage( + internalGetVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 74 + case 80: + { + databaseDialect_ = input.readEnum(); + bitField0_ |= 0x00001000; + break; + } // case 80 + case 90: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(s); + break; + } // case 90 + case 98: + { + input.readMessage( + internalGetMaxExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00004000; + break; + } // case 98 + case 106: + { + com.google.spanner.admin.database.v1.EncryptionInfo m = + input.readMessage( + com.google.spanner.admin.database.v1.EncryptionInfo.parser(), + extensionRegistry); + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(m); + } else { + encryptionInformationBuilder_.addMessage(m); + } + break; + } // case 106 + case 114: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(s); + break; + } // case 114 + case 120: + { + freeableSizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 120 + case 128: + { + exclusiveSizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 128 + case 138: + { + incrementalBackupChainId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00010000; + break; + } // case 138 + case 146: + { + input.readMessage( + internalGetOldestVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00020000; + break; + } // case 146 + case 154: + { + com.google.spanner.admin.database.v1.BackupInstancePartition m = + input.readMessage( + com.google.spanner.admin.database.v1.BackupInstancePartition.parser(), + extensionRegistry); + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(m); + } else { + instancePartitionsBuilder_.addMessage(m); + } + break; + } // case 154 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp versionTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + versionTimeBuilder_; + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return Whether the versionTime field is set. + */ + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return The versionTime. + */ + public com.google.protobuf.Timestamp getVersionTime() { + if (versionTimeBuilder_ == null) { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } else { + return versionTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versionTime_ = value; + } else { + versionTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (versionTimeBuilder_ == null) { + versionTime_ = builderForValue.build(); + } else { + versionTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public Builder mergeVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && versionTime_ != null + && versionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getVersionTimeBuilder().mergeFrom(value); + } else { + versionTime_ = value; + } + } else { + versionTimeBuilder_.mergeFrom(value); + } + if (versionTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public Builder clearVersionTime() { + bitField0_ = (bitField0_ & ~0x00000002); + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public com.google.protobuf.Timestamp.Builder getVersionTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetVersionTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + if (versionTimeBuilder_ != null) { + return versionTimeBuilder_.getMessageOrBuilder(); + } else { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } + } + + /** + * + * + *
    +     * The backup will contain an externally consistent copy of the database at
    +     * the timestamp specified by `version_time`. If `version_time` is not
    +     * specified, the system will set `version_time` to the `create_time` of the
    +     * backup.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetVersionTimeFieldBuilder() { + if (versionTimeBuilder_ == null) { + versionTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getVersionTime(), getParentForChildren(), isClean()); + versionTime_ = null; + } + return versionTimeBuilder_; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000004); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. The expiration time of the backup, with microseconds
    +     * granularity that must be at least 6 hours and at most 366 days
    +     * from the time the CreateBackup request is processed. Once the `expire_time`
    +     * has passed, the backup is eligible to be automatically deleted by Cloud
    +     * Spanner to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
    +     *
    +     * A globally unique identifier for the backup which cannot be
    +     * changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters
    +     * in length.
    +     *
    +     * The backup is stored in the location(s) specified in the instance
    +     * configuration of the instance containing the backup, identified
    +     * by the prefix of the backup name of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
    +     *
    +     * A globally unique identifier for the backup which cannot be
    +     * changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters
    +     * in length.
    +     *
    +     * The backup is stored in the location(s) specified in the instance
    +     * configuration of the instance containing the backup, identified
    +     * by the prefix of the backup name of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
    +     *
    +     * A globally unique identifier for the backup which cannot be
    +     * changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters
    +     * in length.
    +     *
    +     * The backup is stored in the location(s) specified in the instance
    +     * configuration of the instance containing the backup, identified
    +     * by the prefix of the backup name of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
    +     *
    +     * A globally unique identifier for the backup which cannot be
    +     * changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters
    +     * in length.
    +     *
    +     * The backup is stored in the location(s) specified in the instance
    +     * configuration of the instance containing the backup, identified
    +     * by the prefix of the backup name of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
    +     *
    +     * A globally unique identifier for the backup which cannot be
    +     * changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters
    +     * in length.
    +     *
    +     * The backup is stored in the location(s) specified in the instance
    +     * configuration of the instance containing the backup, identified
    +     * by the prefix of the backup name of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000010); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request is received. If the request does not specify `version_time`, the
    +     * `version_time` of the backup will be equivalent to the `create_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private long sizeBytes_; + + /** + * + * + *
    +     * Output only. Size of the backup in bytes.
    +     * 
    + * + * int64 size_bytes = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The sizeBytes. + */ + @java.lang.Override + public long getSizeBytes() { + return sizeBytes_; + } + + /** + * + * + *
    +     * Output only. Size of the backup in bytes.
    +     * 
    + * + * int64 size_bytes = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The sizeBytes to set. + * @return This builder for chaining. + */ + public Builder setSizeBytes(long value) { + + sizeBytes_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Size of the backup in bytes.
    +     * 
    + * + * int64 size_bytes = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000020); + sizeBytes_ = 0L; + onChanged(); + return this; + } + + private long freeableSizeBytes_; + + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + @java.lang.Override + public long getFreeableSizeBytes() { + return freeableSizeBytes_; + } + + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The freeableSizeBytes to set. + * @return This builder for chaining. + */ + public Builder setFreeableSizeBytes(long value) { + + freeableSizeBytes_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearFreeableSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000040); + freeableSizeBytes_ = 0L; + onChanged(); + return this; + } + + private long exclusiveSizeBytes_; + + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + @java.lang.Override + public long getExclusiveSizeBytes() { + return exclusiveSizeBytes_; + } + + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The exclusiveSizeBytes to set. + * @return This builder for chaining. + */ + public Builder setExclusiveSizeBytes(long value) { + + exclusiveSizeBytes_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearExclusiveSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000080); + exclusiveSizeBytes_ = 0L; + onChanged(); + return this; + } + + private int state_ = 0; + + /** + * + * + *
    +     * Output only. The current state of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +     * Output only. The current state of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current state of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup.State getState() { + com.google.spanner.admin.database.v1.Backup.State result = + com.google.spanner.admin.database.v1.Backup.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.database.v1.Backup.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The current state of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.spanner.admin.database.v1.Backup.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000100; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current state of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000100); + state_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList referencingDatabases_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReferencingDatabasesIsMutable() { + if (!referencingDatabases_.isModifiable()) { + referencingDatabases_ = new com.google.protobuf.LazyStringArrayList(referencingDatabases_); + } + bitField0_ |= 0x00000200; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingDatabases. + */ + public com.google.protobuf.ProtocolStringList getReferencingDatabasesList() { + referencingDatabases_.makeImmutable(); + return referencingDatabases_; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingDatabases. + */ + public int getReferencingDatabasesCount() { + return referencingDatabases_.size(); + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + public java.lang.String getReferencingDatabases(int index) { + return referencingDatabases_.get(index); + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + public com.google.protobuf.ByteString getReferencingDatabasesBytes(int index) { + return referencingDatabases_.getByteString(index); + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The referencingDatabases to set. + * @return This builder for chaining. + */ + public Builder setReferencingDatabases(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.set(index, value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addReferencingDatabases(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param values The referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addAllReferencingDatabases(java.lang.Iterable values) { + ensureReferencingDatabasesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingDatabases_); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearReferencingDatabases() { + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the restored databases that reference the backup.
    +     * The database names are of
    +     * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +     * Referencing databases may exist in different instances. The existence of
    +     * any referencing database prevents the backup from being deleted. When a
    +     * restored database from the backup enters the `READY` state, the reference
    +     * to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addReferencingDatabasesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.EncryptionInfo encryptionInfo_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + encryptionInfoBuilder_; + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionInfo field is set. + */ + public boolean hasEncryptionInfo() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionInfo. + */ + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo() { + if (encryptionInfoBuilder_ == null) { + return encryptionInfo_ == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance() + : encryptionInfo_; + } else { + return encryptionInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInfo(com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionInfo_ = value; + } else { + encryptionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInfo( + com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInfoBuilder_ == null) { + encryptionInfo_ = builderForValue.build(); + } else { + encryptionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEncryptionInfo(com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && encryptionInfo_ != null + && encryptionInfo_ + != com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()) { + getEncryptionInfoBuilder().mergeFrom(value); + } else { + encryptionInfo_ = value; + } + } else { + encryptionInfoBuilder_.mergeFrom(value); + } + if (encryptionInfo_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionInfo() { + bitField0_ = (bitField0_ & ~0x00000400); + encryptionInfo_ = null; + if (encryptionInfoBuilder_ != null) { + encryptionInfoBuilder_.dispose(); + encryptionInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryptionInfoBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return internalGetEncryptionInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder + getEncryptionInfoOrBuilder() { + if (encryptionInfoBuilder_ != null) { + return encryptionInfoBuilder_.getMessageOrBuilder(); + } else { + return encryptionInfo_ == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance() + : encryptionInfo_; + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + internalGetEncryptionInfoFieldBuilder() { + if (encryptionInfoBuilder_ == null) { + encryptionInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder>( + getEncryptionInfo(), getParentForChildren(), isClean()); + encryptionInfo_ = null; + } + return encryptionInfoBuilder_; + } + + private java.util.List + encryptionInformation_ = java.util.Collections.emptyList(); + + private void ensureEncryptionInformationIsMutable() { + if (!((bitField0_ & 0x00000800) != 0)) { + encryptionInformation_ = + new java.util.ArrayList( + encryptionInformation_); + bitField0_ |= 0x00000800; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + encryptionInformationBuilder_; + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInformationList() { + if (encryptionInformationBuilder_ == null) { + return java.util.Collections.unmodifiableList(encryptionInformation_); + } else { + return encryptionInformationBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getEncryptionInformationCount() { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.size(); + } else { + return encryptionInformationBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index) { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.get(index); + } else { + return encryptionInformationBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInformationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInformationIsMutable(); + encryptionInformation_.set(index, value); + onChanged(); + } else { + encryptionInformationBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.set(index, builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInformationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(value); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInformationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(index, value); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(index, builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllEncryptionInformation( + java.lang.Iterable values) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, encryptionInformation_); + onChanged(); + } else { + encryptionInformationBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionInformation() { + if (encryptionInformationBuilder_ == null) { + encryptionInformation_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + } else { + encryptionInformationBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeEncryptionInformation(int index) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.remove(index); + onChanged(); + } else { + encryptionInformationBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + getEncryptionInformationBuilder(int index) { + return internalGetEncryptionInformationFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder + getEncryptionInformationOrBuilder(int index) { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.get(index); + } else { + return encryptionInformationBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInformationOrBuilderList() { + if (encryptionInformationBuilder_ != null) { + return encryptionInformationBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(encryptionInformation_); + } + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + addEncryptionInformationBuilder() { + return internalGetEncryptionInformationFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + addEncryptionInformationBuilder(int index) { + return internalGetEncryptionInformationFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInformationBuilderList() { + return internalGetEncryptionInformationFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + internalGetEncryptionInformationFieldBuilder() { + if (encryptionInformationBuilder_ == null) { + encryptionInformationBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder>( + encryptionInformation_, + ((bitField0_ & 0x00000800) != 0), + getParentForChildren(), + isClean()); + encryptionInformation_ = null; + } + return encryptionInformationBuilder_; + } + + private int databaseDialect_ = 0; + + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialectValue(int value) { + databaseDialect_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseDialect value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + databaseDialect_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearDatabaseDialect() { + bitField0_ = (bitField0_ & ~0x00001000); + databaseDialect_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList referencingBackups_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReferencingBackupsIsMutable() { + if (!referencingBackups_.isModifiable()) { + referencingBackups_ = new com.google.protobuf.LazyStringArrayList(referencingBackups_); + } + bitField0_ |= 0x00002000; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingBackups. + */ + public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { + referencingBackups_.makeImmutable(); + return referencingBackups_; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingBackups. + */ + public int getReferencingBackupsCount() { + return referencingBackups_.size(); + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + public java.lang.String getReferencingBackups(int index) { + return referencingBackups_.get(index); + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { + return referencingBackups_.getByteString(index); + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The referencingBackups to set. + * @return This builder for chaining. + */ + public Builder setReferencingBackups(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.set(index, value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addReferencingBackups(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param values The referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addAllReferencingBackups(java.lang.Iterable values) { + ensureReferencingBackupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingBackups_); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearReferencingBackups() { + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00002000); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp maxExpireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + maxExpireTimeBuilder_; + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the maxExpireTime field is set. + */ + public boolean hasMaxExpireTime() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The maxExpireTime. + */ + public com.google.protobuf.Timestamp getMaxExpireTime() { + if (maxExpireTimeBuilder_ == null) { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } else { + return maxExpireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setMaxExpireTime(com.google.protobuf.Timestamp value) { + if (maxExpireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + maxExpireTime_ = value; + } else { + maxExpireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (maxExpireTimeBuilder_ == null) { + maxExpireTime_ = builderForValue.build(); + } else { + maxExpireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) { + if (maxExpireTimeBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && maxExpireTime_ != null + && maxExpireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getMaxExpireTimeBuilder().mergeFrom(value); + } else { + maxExpireTime_ = value; + } + } else { + maxExpireTimeBuilder_.mergeFrom(value); + } + if (maxExpireTime_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearMaxExpireTime() { + bitField0_ = (bitField0_ & ~0x00004000); + maxExpireTime_ = null; + if (maxExpireTimeBuilder_ != null) { + maxExpireTimeBuilder_.dispose(); + maxExpireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getMaxExpireTimeBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return internalGetMaxExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder() { + if (maxExpireTimeBuilder_ != null) { + return maxExpireTimeBuilder_.getMessageOrBuilder(); + } else { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } + } + + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetMaxExpireTimeFieldBuilder() { + if (maxExpireTimeBuilder_ == null) { + maxExpireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getMaxExpireTime(), getParentForChildren(), isClean()); + maxExpireTime_ = null; + } + return maxExpireTimeBuilder_; + } + + private com.google.protobuf.LazyStringArrayList backupSchedules_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureBackupSchedulesIsMutable() { + if (!backupSchedules_.isModifiable()) { + backupSchedules_ = new com.google.protobuf.LazyStringArrayList(backupSchedules_); + } + bitField0_ |= 0x00008000; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + public com.google.protobuf.ProtocolStringList getBackupSchedulesList() { + backupSchedules_.makeImmutable(); + return backupSchedules_; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + public java.lang.String getBackupSchedules(int index) { + return backupSchedules_.get(index); + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the backupSchedules at the given index. + */ + public com.google.protobuf.ByteString getBackupSchedulesBytes(int index) { + return backupSchedules_.getByteString(index); + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The backupSchedules to set. + * @return This builder for chaining. + */ + public Builder setBackupSchedules(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, value); + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The backupSchedules to add. + * @return This builder for chaining. + */ + public Builder addBackupSchedules(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param values The backupSchedules to add. + * @return This builder for chaining. + */ + public Builder addAllBackupSchedules(java.lang.Iterable values) { + ensureBackupSchedulesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backupSchedules_); + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearBackupSchedules() { + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00008000); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the backupSchedules to add. + * @return This builder for chaining. + */ + public Builder addBackupSchedulesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + private java.lang.Object incrementalBackupChainId_ = ""; + + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + public java.lang.String getIncrementalBackupChainId() { + java.lang.Object ref = incrementalBackupChainId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + incrementalBackupChainId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + public com.google.protobuf.ByteString getIncrementalBackupChainIdBytes() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + incrementalBackupChainId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The incrementalBackupChainId to set. + * @return This builder for chaining. + */ + public Builder setIncrementalBackupChainId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + incrementalBackupChainId_ = value; + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearIncrementalBackupChainId() { + incrementalBackupChainId_ = getDefaultInstance().getIncrementalBackupChainId(); + bitField0_ = (bitField0_ & ~0x00010000); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The bytes for incrementalBackupChainId to set. + * @return This builder for chaining. + */ + public Builder setIncrementalBackupChainIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + incrementalBackupChainId_ = value; + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp oldestVersionTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + oldestVersionTimeBuilder_; + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the oldestVersionTime field is set. + */ + public boolean hasOldestVersionTime() { + return ((bitField0_ & 0x00020000) != 0); + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The oldestVersionTime. + */ + public com.google.protobuf.Timestamp getOldestVersionTime() { + if (oldestVersionTimeBuilder_ == null) { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } else { + return oldestVersionTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOldestVersionTime(com.google.protobuf.Timestamp value) { + if (oldestVersionTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + oldestVersionTime_ = value; + } else { + oldestVersionTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOldestVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (oldestVersionTimeBuilder_ == null) { + oldestVersionTime_ = builderForValue.build(); + } else { + oldestVersionTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00020000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeOldestVersionTime(com.google.protobuf.Timestamp value) { + if (oldestVersionTimeBuilder_ == null) { + if (((bitField0_ & 0x00020000) != 0) + && oldestVersionTime_ != null + && oldestVersionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getOldestVersionTimeBuilder().mergeFrom(value); + } else { + oldestVersionTime_ = value; + } + } else { + oldestVersionTimeBuilder_.mergeFrom(value); + } + if (oldestVersionTime_ != null) { + bitField0_ |= 0x00020000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearOldestVersionTime() { + bitField0_ = (bitField0_ & ~0x00020000); + oldestVersionTime_ = null; + if (oldestVersionTimeBuilder_ != null) { + oldestVersionTimeBuilder_.dispose(); + oldestVersionTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getOldestVersionTimeBuilder() { + bitField0_ |= 0x00020000; + onChanged(); + return internalGetOldestVersionTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder() { + if (oldestVersionTimeBuilder_ != null) { + return oldestVersionTimeBuilder_.getMessageOrBuilder(); + } else { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } + } + + /** + * + * + *
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
    +     * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetOldestVersionTimeFieldBuilder() { + if (oldestVersionTimeBuilder_ == null) { + oldestVersionTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getOldestVersionTime(), getParentForChildren(), isClean()); + oldestVersionTime_ = null; + } + return oldestVersionTimeBuilder_; + } + + private java.util.List + instancePartitions_ = java.util.Collections.emptyList(); + + private void ensureInstancePartitionsIsMutable() { + if (!((bitField0_ & 0x00040000) != 0)) { + instancePartitions_ = + new java.util.ArrayList( + instancePartitions_); + bitField0_ |= 0x00040000; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupInstancePartition, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder, + com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder> + instancePartitionsBuilder_; + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getInstancePartitionsList() { + if (instancePartitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(instancePartitions_); + } else { + return instancePartitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getInstancePartitionsCount() { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.size(); + } else { + return instancePartitionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.BackupInstancePartition getInstancePartitions( + int index) { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.get(index); + } else { + return instancePartitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setInstancePartitions( + int index, com.google.spanner.admin.database.v1.BackupInstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.set(index, value); + onChanged(); + } else { + instancePartitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setInstancePartitions( + int index, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.set(index, builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addInstancePartitions( + com.google.spanner.admin.database.v1.BackupInstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(value); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addInstancePartitions( + int index, com.google.spanner.admin.database.v1.BackupInstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(index, value); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addInstancePartitions( + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addInstancePartitions( + int index, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(index, builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllInstancePartitions( + java.lang.Iterable + values) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, instancePartitions_); + onChanged(); + } else { + instancePartitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearInstancePartitions() { + if (instancePartitionsBuilder_ == null) { + instancePartitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00040000); + onChanged(); + } else { + instancePartitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeInstancePartitions(int index) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.remove(index); + onChanged(); + } else { + instancePartitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.BackupInstancePartition.Builder + getInstancePartitionsBuilder(int index) { + return internalGetInstancePartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder + getInstancePartitionsOrBuilder(int index) { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.get(index); + } else { + return instancePartitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List< + ? extends com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder> + getInstancePartitionsOrBuilderList() { + if (instancePartitionsBuilder_ != null) { + return instancePartitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(instancePartitions_); + } + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.BackupInstancePartition.Builder + addInstancePartitionsBuilder() { + return internalGetInstancePartitionsFieldBuilder() + .addBuilder( + com.google.spanner.admin.database.v1.BackupInstancePartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.BackupInstancePartition.Builder + addInstancePartitionsBuilder(int index) { + return internalGetInstancePartitionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.database.v1.BackupInstancePartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The instance partition(s) storing the backup.
    +     *
    +     * This is the same as the list of the instance partition(s) that the database
    +     * had footprint in at the backup's `version_time`.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getInstancePartitionsBuilderList() { + return internalGetInstancePartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupInstancePartition, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder, + com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder> + internalGetInstancePartitionsFieldBuilder() { + if (instancePartitionsBuilder_ == null) { + instancePartitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupInstancePartition, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder, + com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder>( + instancePartitions_, + ((bitField0_ & 0x00040000) != 0), + getParentForChildren(), + isClean()); + instancePartitions_ = null; + } + return instancePartitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.Backup) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Backup) + private static final com.google.spanner.admin.database.v1.Backup DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.Backup(); + } + + public static com.google.spanner.admin.database.v1.Backup getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Backup parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java new file mode 100644 index 000000000000..ce202a7a5a17 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java @@ -0,0 +1,1423 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Information about a backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupInfo} + */ +@com.google.protobuf.Generated +public final class BackupInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupInfo) + BackupInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupInfo"); + } + + // Use BackupInfo.newBuilder() to construct. + private BackupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BackupInfo() { + backup_ = ""; + sourceDatabase_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupInfo.class, + com.google.spanner.admin.database.v1.BackupInfo.Builder.class); + } + + private int bitField0_; + public static final int BACKUP_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object backup_ = ""; + + /** + * + * + *
    +   * Name of the backup.
    +   * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + @java.lang.Override + public java.lang.String getBackup() { + java.lang.Object ref = backup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backup_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name of the backup.
    +   * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupBytes() { + java.lang.Object ref = backup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VERSION_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp versionTime_; + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return Whether the versionTime field is set. + */ + @java.lang.Override + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return The versionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getVersionTime() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int SOURCE_DATABASE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceDatabase_ = ""; + + /** + * + * + *
    +   * Name of the database the backup was created from.
    +   * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The sourceDatabase. + */ + @java.lang.Override + public java.lang.String getSourceDatabase() { + java.lang.Object ref = sourceDatabase_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceDatabase_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name of the database the backup was created from.
    +   * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceDatabase. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceDatabaseBytes() { + java.lang.Object ref = sourceDatabase_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceDatabase_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backup_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, backup_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getCreateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceDatabase_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, sourceDatabase_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getVersionTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backup_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, backup_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCreateTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceDatabase_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, sourceDatabase_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getVersionTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupInfo)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupInfo other = + (com.google.spanner.admin.database.v1.BackupInfo) obj; + + if (!getBackup().equals(other.getBackup())) return false; + if (hasVersionTime() != other.hasVersionTime()) return false; + if (hasVersionTime()) { + if (!getVersionTime().equals(other.getVersionTime())) return false; + } + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (!getSourceDatabase().equals(other.getSourceDatabase())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getBackup().hashCode(); + if (hasVersionTime()) { + hash = (37 * hash) + VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getVersionTime().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + hash = (37 * hash) + SOURCE_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getSourceDatabase().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.BackupInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Information about a backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupInfo) + com.google.spanner.admin.database.v1.BackupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupInfo.class, + com.google.spanner.admin.database.v1.BackupInfo.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetVersionTimeFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + backup_ = ""; + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + sourceDatabase_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo build() { + com.google.spanner.admin.database.v1.BackupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo buildPartial() { + com.google.spanner.admin.database.v1.BackupInfo result = + new com.google.spanner.admin.database.v1.BackupInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.BackupInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.backup_ = backup_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.versionTime_ = + versionTimeBuilder_ == null ? versionTime_ : versionTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.sourceDatabase_ = sourceDatabase_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupInfo) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupInfo other) { + if (other == com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance()) + return this; + if (!other.getBackup().isEmpty()) { + backup_ = other.backup_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasVersionTime()) { + mergeVersionTime(other.getVersionTime()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (!other.getSourceDatabase().isEmpty()) { + sourceDatabase_ = other.sourceDatabase_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + backup_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 18 + case 26: + { + sourceDatabase_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object backup_ = ""; + + /** + * + * + *
    +     * Name of the backup.
    +     * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + public java.lang.String getBackup() { + java.lang.Object ref = backup_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the backup.
    +     * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + public com.google.protobuf.ByteString getBackupBytes() { + java.lang.Object ref = backup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the backup.
    +     * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The backup to set. + * @return This builder for chaining. + */ + public Builder setBackup(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backup_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the backup.
    +     * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearBackup() { + backup_ = getDefaultInstance().getBackup(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the backup.
    +     * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for backup to set. + * @return This builder for chaining. + */ + public Builder setBackupBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backup_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp versionTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + versionTimeBuilder_; + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return Whether the versionTime field is set. + */ + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return The versionTime. + */ + public com.google.protobuf.Timestamp getVersionTime() { + if (versionTimeBuilder_ == null) { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } else { + return versionTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versionTime_ = value; + } else { + versionTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (versionTimeBuilder_ == null) { + versionTime_ = builderForValue.build(); + } else { + versionTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public Builder mergeVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && versionTime_ != null + && versionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getVersionTimeBuilder().mergeFrom(value); + } else { + versionTime_ = value; + } + } else { + versionTimeBuilder_.mergeFrom(value); + } + if (versionTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public Builder clearVersionTime() { + bitField0_ = (bitField0_ & ~0x00000002); + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getVersionTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetVersionTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + if (versionTimeBuilder_ != null) { + return versionTimeBuilder_.getMessageOrBuilder(); + } else { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } + } + + /** + * + * + *
    +     * The backup contains an externally consistent copy of `source_database` at
    +     * the timestamp specified by `version_time`. If the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
    +     * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetVersionTimeFieldBuilder() { + if (versionTimeBuilder_ == null) { + versionTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getVersionTime(), getParentForChildren(), isClean()); + versionTime_ = null; + } + return versionTimeBuilder_; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private java.lang.Object sourceDatabase_ = ""; + + /** + * + * + *
    +     * Name of the database the backup was created from.
    +     * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The sourceDatabase. + */ + public java.lang.String getSourceDatabase() { + java.lang.Object ref = sourceDatabase_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceDatabase_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the database the backup was created from.
    +     * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceDatabase. + */ + public com.google.protobuf.ByteString getSourceDatabaseBytes() { + java.lang.Object ref = sourceDatabase_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceDatabase_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the database the backup was created from.
    +     * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The sourceDatabase to set. + * @return This builder for chaining. + */ + public Builder setSourceDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceDatabase_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the database the backup was created from.
    +     * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearSourceDatabase() { + sourceDatabase_ = getDefaultInstance().getSourceDatabase(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the database the backup was created from.
    +     * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for sourceDatabase to set. + * @return This builder for chaining. + */ + public Builder setSourceDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceDatabase_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupInfo) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupInfo) + private static final com.google.spanner.admin.database.v1.BackupInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupInfo(); + } + + public static com.google.spanner.admin.database.v1.BackupInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java new file mode 100644 index 000000000000..02c959477ee5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java @@ -0,0 +1,172 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface BackupInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Name of the backup.
    +   * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + java.lang.String getBackup(); + + /** + * + * + *
    +   * Name of the backup.
    +   * 
    + * + * string backup = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + com.google.protobuf.ByteString getBackupBytes(); + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return Whether the versionTime field is set. + */ + boolean hasVersionTime(); + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + * + * @return The versionTime. + */ + com.google.protobuf.Timestamp getVersionTime(); + + /** + * + * + *
    +   * The backup contains an externally consistent copy of `source_database` at
    +   * the timestamp specified by `version_time`. If the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder(); + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Name of the database the backup was created from.
    +   * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The sourceDatabase. + */ + java.lang.String getSourceDatabase(); + + /** + * + * + *
    +   * Name of the database the backup was created from.
    +   * 
    + * + * string source_database = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceDatabase. + */ + com.google.protobuf.ByteString getSourceDatabaseBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartition.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartition.java new file mode 100644 index 000000000000..c95133cddd87 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartition.java @@ -0,0 +1,608 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Instance partition information for the backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupInstancePartition} + */ +@com.google.protobuf.Generated +public final class BackupInstancePartition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupInstancePartition) + BackupInstancePartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupInstancePartition"); + } + + // Use BackupInstancePartition.newBuilder() to construct. + private BackupInstancePartition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BackupInstancePartition() { + instancePartition_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInstancePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupInstancePartition.class, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder.class); + } + + public static final int INSTANCE_PARTITION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instancePartition_ = ""; + + /** + * + * + *
    +   * A unique identifier for the instance partition. Values are of the form
    +   * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +   * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The instancePartition. + */ + @java.lang.Override + public java.lang.String getInstancePartition() { + java.lang.Object ref = instancePartition_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instancePartition_ = s; + return s; + } + } + + /** + * + * + *
    +   * A unique identifier for the instance partition. Values are of the form
    +   * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +   * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for instancePartition. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstancePartitionBytes() { + java.lang.Object ref = instancePartition_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instancePartition_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instancePartition_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instancePartition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instancePartition_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instancePartition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupInstancePartition)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupInstancePartition other = + (com.google.spanner.admin.database.v1.BackupInstancePartition) obj; + + if (!getInstancePartition().equals(other.getInstancePartition())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartition().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.BackupInstancePartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Instance partition information for the backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupInstancePartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupInstancePartition) + com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInstancePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupInstancePartition.class, + com.google.spanner.admin.database.v1.BackupInstancePartition.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupInstancePartition.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instancePartition_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartition + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupInstancePartition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartition build() { + com.google.spanner.admin.database.v1.BackupInstancePartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartition buildPartial() { + com.google.spanner.admin.database.v1.BackupInstancePartition result = + new com.google.spanner.admin.database.v1.BackupInstancePartition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.BackupInstancePartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instancePartition_ = instancePartition_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupInstancePartition) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupInstancePartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupInstancePartition other) { + if (other + == com.google.spanner.admin.database.v1.BackupInstancePartition.getDefaultInstance()) + return this; + if (!other.getInstancePartition().isEmpty()) { + instancePartition_ = other.instancePartition_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instancePartition_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instancePartition_ = ""; + + /** + * + * + *
    +     * A unique identifier for the instance partition. Values are of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +     * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The instancePartition. + */ + public java.lang.String getInstancePartition() { + java.lang.Object ref = instancePartition_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instancePartition_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A unique identifier for the instance partition. Values are of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +     * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for instancePartition. + */ + public com.google.protobuf.ByteString getInstancePartitionBytes() { + java.lang.Object ref = instancePartition_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instancePartition_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A unique identifier for the instance partition. Values are of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +     * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The instancePartition to set. + * @return This builder for chaining. + */ + public Builder setInstancePartition(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instancePartition_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A unique identifier for the instance partition. Values are of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +     * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearInstancePartition() { + instancePartition_ = getDefaultInstance().getInstancePartition(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A unique identifier for the instance partition. Values are of the form
    +     * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +     * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for instancePartition to set. + * @return This builder for chaining. + */ + public Builder setInstancePartitionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instancePartition_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupInstancePartition) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupInstancePartition) + private static final com.google.spanner.admin.database.v1.BackupInstancePartition + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupInstancePartition(); + } + + public static com.google.spanner.admin.database.v1.BackupInstancePartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupInstancePartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInstancePartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartitionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartitionOrBuilder.java new file mode 100644 index 000000000000..8a2856c7b80d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInstancePartitionOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface BackupInstancePartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupInstancePartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A unique identifier for the instance partition. Values are of the form
    +   * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +   * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The instancePartition. + */ + java.lang.String getInstancePartition(); + + /** + * + * + *
    +   * A unique identifier for the instance partition. Values are of the form
    +   * `projects/<project>/instances/<instance>/instancePartitions/<instance_partition_id>`
    +   * 
    + * + * string instance_partition = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for instancePartition. + */ + com.google.protobuf.ByteString getInstancePartitionBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupName.java new file mode 100644 index 000000000000..da90c9f84018 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BackupName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_BACKUP = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/backups/{backup}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String backup; + + @Deprecated + protected BackupName() { + project = null; + instance = null; + backup = null; + } + + private BackupName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + backup = Preconditions.checkNotNull(builder.getBackup()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getBackup() { + return backup; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BackupName of(String project, String instance, String backup) { + return newBuilder().setProject(project).setInstance(instance).setBackup(backup).build(); + } + + public static String format(String project, String instance, String backup) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setBackup(backup) + .build() + .toString(); + } + + public static BackupName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_BACKUP.validatedMatch( + formattedString, "BackupName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance"), matchMap.get("backup")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BackupName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_BACKUP.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (backup != null) { + fieldMapBuilder.put("backup", backup); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_BACKUP.instantiate( + "project", project, "instance", instance, "backup", backup); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + BackupName that = ((BackupName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.backup, that.backup); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(backup); + return h; + } + + /** Builder for projects/{project}/instances/{instance}/backups/{backup}. */ + public static class Builder { + private String project; + private String instance; + private String backup; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getBackup() { + return backup; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setBackup(String backup) { + this.backup = backup; + return this; + } + + private Builder(BackupName backupName) { + this.project = backupName.project; + this.instance = backupName.instance; + this.backup = backupName.backup; + } + + public BackupName build() { + return new BackupName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java new file mode 100644 index 000000000000..1366d6d0c053 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java @@ -0,0 +1,1013 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface BackupOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.Backup) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return Whether the versionTime field is set. + */ + boolean hasVersionTime(); + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + * + * @return The versionTime. + */ + com.google.protobuf.Timestamp getVersionTime(); + + /** + * + * + *
    +   * The backup will contain an externally consistent copy of the database at
    +   * the timestamp specified by `version_time`. If `version_time` is not
    +   * specified, the system will set `version_time` to the `create_time` of the
    +   * backup.
    +   * 
    + * + * .google.protobuf.Timestamp version_time = 9; + */ + com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder(); + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. The expiration time of the backup, with microseconds
    +   * granularity that must be at least 6 hours and at most 366 days
    +   * from the time the CreateBackup request is processed. Once the `expire_time`
    +   * has passed, the backup is eligible to be automatically deleted by Cloud
    +   * Spanner to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
    +   *
    +   * A globally unique identifier for the backup which cannot be
    +   * changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters
    +   * in length.
    +   *
    +   * The backup is stored in the location(s) specified in the instance
    +   * configuration of the instance containing the backup, identified
    +   * by the prefix of the backup name of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
    +   *
    +   * A globally unique identifier for the backup which cannot be
    +   * changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters
    +   * in length.
    +   *
    +   * The backup is stored in the location(s) specified in the instance
    +   * configuration of the instance containing the backup, identified
    +   * by the prefix of the backup name of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request is received. If the request does not specify `version_time`, the
    +   * `version_time` of the backup will be equivalent to the `create_time`.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. Size of the backup in bytes.
    +   * 
    + * + * int64 size_bytes = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The sizeBytes. + */ + long getSizeBytes(); + + /** + * + * + *
    +   * Output only. The number of bytes that will be freed by deleting this
    +   * backup. This value will be zero if, for example, this backup is part of an
    +   * incremental backup chain and younger backups in the chain require that we
    +   * keep its data. For backups not in an incremental backup chain, this is
    +   * always the size of the backup. This value may change if backups on the same
    +   * chain get created, deleted or expired.
    +   * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + long getFreeableSizeBytes(); + + /** + * + * + *
    +   * Output only. For a backup in an incremental backup chain, this is the
    +   * storage space needed to keep the data that has changed since the previous
    +   * backup. For all other backups, this is always the size of the backup. This
    +   * value may change if backups on the same chain get deleted or expired.
    +   *
    +   * This field can be used to calculate the total storage space used by a set
    +   * of backups. For example, the total space used by all backups of a database
    +   * can be computed by summing up this field.
    +   * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + long getExclusiveSizeBytes(); + + /** + * + * + *
    +   * Output only. The current state of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
    +   * Output only. The current state of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.spanner.admin.database.v1.Backup.State getState(); + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingDatabases. + */ + java.util.List getReferencingDatabasesList(); + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingDatabases. + */ + int getReferencingDatabasesCount(); + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + java.lang.String getReferencingDatabases(int index); + + /** + * + * + *
    +   * Output only. The names of the restored databases that reference the backup.
    +   * The database names are of
    +   * the form `projects/<project>/instances/<instance>/databases/<database>`.
    +   * Referencing databases may exist in different instances. The existence of
    +   * any referencing database prevents the backup from being deleted. When a
    +   * restored database from the backup enters the `READY` state, the reference
    +   * to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_databases = 7 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + com.google.protobuf.ByteString getReferencingDatabasesBytes(int index); + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionInfo field is set. + */ + boolean hasEncryptionInfo(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionInfo. + */ + com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getEncryptionInformationList(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index); + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getEncryptionInformationCount(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getEncryptionInformationOrBuilderList(); + + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInformationOrBuilder( + int index); + + /** + * + * + *
    +   * Output only. The database dialect information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + int getDatabaseDialectValue(); + + /** + * + * + *
    +   * Output only. The database dialect information for the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect(); + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingBackups. + */ + java.util.List getReferencingBackupsList(); + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingBackups. + */ + int getReferencingBackupsCount(); + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + java.lang.String getReferencingBackups(int index); + + /** + * + * + *
    +   * Output only. The names of the destination backups being created by copying
    +   * this source backup. The backup names are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * Referencing backups may exist in different instances. The existence of
    +   * any referencing backup prevents the backup from being deleted. When the
    +   * copy operation is done (either successfully completed or cancelled or the
    +   * destination backup is deleted), the reference to the backup is removed.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + com.google.protobuf.ByteString getReferencingBackupsBytes(int index); + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the maxExpireTime field is set. + */ + boolean hasMaxExpireTime(); + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The maxExpireTime. + */ + com.google.protobuf.Timestamp getMaxExpireTime(); + + /** + * + * + *
    +   * Output only. The max allowed expiration time of the backup, with
    +   * microseconds granularity. A backup's expiration time can be configured in
    +   * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +   * copying an existing backup, the expiration time specified must be
    +   * less than `Backup.max_expire_time`.
    +   * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + java.util.List getBackupSchedulesList(); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + int getBackupSchedulesCount(); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + java.lang.String getBackupSchedules(int index); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the backupSchedules at the given index. + */ + com.google.protobuf.ByteString getBackupSchedulesBytes(int index); + + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + java.lang.String getIncrementalBackupChainId(); + + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + com.google.protobuf.ByteString getIncrementalBackupChainIdBytes(); + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the oldestVersionTime field is set. + */ + boolean hasOldestVersionTime(); + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The oldestVersionTime. + */ + com.google.protobuf.Timestamp getOldestVersionTime(); + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getInstancePartitionsList(); + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.BackupInstancePartition getInstancePartitions(int index); + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getInstancePartitionsCount(); + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getInstancePartitionsOrBuilderList(); + + /** + * + * + *
    +   * Output only. The instance partition(s) storing the backup.
    +   *
    +   * This is the same as the list of the instance partition(s) that the database
    +   * had footprint in at the backup's `version_time`.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.BackupInstancePartition instance_partitions = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.BackupInstancePartitionOrBuilder + getInstancePartitionsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java new file mode 100644 index 000000000000..38d648d08926 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java @@ -0,0 +1,434 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public final class BackupProto extends com.google.protobuf.GeneratedFile { + private BackupProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_Backup_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_Backup_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateBackupRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CopyBackupRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetBackupRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupInstancePartition_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n-google/spanner/admin/database/v1/backu" + + "p.proto\022 google.spanner.admin.database.v" + + "1\032\037google/api/field_behavior.proto\032\031goog" + + "le/api/resource.proto\032#google/longrunnin" + + "g/operations.proto\032 google/protobuf/fiel" + + "d_mask.proto\032\037google/protobuf/timestamp." + + "proto\032-google/spanner/admin/database/v1/" + + "common.proto\"\355\t\n\006Backup\0226\n\010database\030\002 \001(" + + "\tB$\372A!\n\037spanner.googleapis.com/Database\022" + + "0\n\014version_time\030\t \001(\0132\032.google.protobuf." + + "Timestamp\022/\n\013expire_time\030\003 \001(\0132\032.google." + + "protobuf.Timestamp\022\014\n\004name\030\001 \001(\t\0224\n\013crea" + + "te_time\030\004 \001(\0132\032.google.protobuf.Timestam" + + "pB\003\340A\003\022\027\n\nsize_bytes\030\005 \001(\003B\003\340A\003\022 \n\023freea" + + "ble_size_bytes\030\017 \001(\003B\003\340A\003\022!\n\024exclusive_s" + + "ize_bytes\030\020 \001(\003B\003\340A\003\022B\n\005state\030\006 \001(\0162..go" + + "ogle.spanner.admin.database.v1.Backup.St" + + "ateB\003\340A\003\022F\n\025referencing_databases\030\007 \003(\tB" + + "\'\340A\003\372A!\n\037spanner.googleapis.com/Database" + + "\022N\n\017encryption_info\030\010 \001(\01320.google.spann" + + "er.admin.database.v1.EncryptionInfoB\003\340A\003" + + "\022U\n\026encryption_information\030\r \003(\01320.googl" + + "e.spanner.admin.database.v1.EncryptionIn" + + "foB\003\340A\003\022P\n\020database_dialect\030\n \001(\01621.goog" + + "le.spanner.admin.database.v1.DatabaseDia" + + "lectB\003\340A\003\022B\n\023referencing_backups\030\013 \003(\tB%" + + "\340A\003\372A\037\n\035spanner.googleapis.com/Backup\0228\n" + + "\017max_expire_time\030\014 \001(\0132\032.google.protobuf" + + ".TimestampB\003\340A\003\022G\n\020backup_schedules\030\016 \003(" + + "\tB-\340A\003\372A\'\n%spanner.googleapis.com/Backup" + + "Schedule\022(\n\033incremental_backup_chain_id\030" + + "\021 \001(\tB\003\340A\003\022<\n\023oldest_version_time\030\022 \001(\0132" + + "\032.google.protobuf.TimestampB\003\340A\003\022[\n\023inst" + + "ance_partitions\030\023 \003(\01329.google.spanner.a" + + "dmin.database.v1.BackupInstancePartition" + + "B\003\340A\003\"7\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n\005READY\020\002:\\\352AY\n\035spanner.goo" + + "gleapis.com/Backup\0228projects/{project}/i" + + "nstances/{instance}/backups/{backup}\"\205\002\n" + + "\023CreateBackupRequest\0227\n\006parent\030\001 \001(\tB\'\340A" + + "\002\372A!\n\037spanner.googleapis.com/Instance\022\026\n" + + "\tbackup_id\030\002 \001(\tB\003\340A\002\022=\n\006backup\030\003 \001(\0132(." + + "google.spanner.admin.database.v1.BackupB" + + "\003\340A\002\022^\n\021encryption_config\030\004 \001(\0132>.google" + + ".spanner.admin.database.v1.CreateBackupE" + + "ncryptionConfigB\003\340A\001\"\370\001\n\024CreateBackupMet" + + "adata\0220\n\004name\030\001 \001(\tB\"\372A\037\n\035spanner.google" + + "apis.com/Backup\0226\n\010database\030\002 \001(\tB$\372A!\n\037" + + "spanner.googleapis.com/Database\022E\n\010progr" + + "ess\030\003 \001(\01323.google.spanner.admin.databas" + + "e.v1.OperationProgress\022/\n\013cancel_time\030\004 " + + "\001(\0132\032.google.protobuf.Timestamp\"\266\002\n\021Copy" + + "BackupRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037s" + + "panner.googleapis.com/Instance\022\026\n\tbackup" + + "_id\030\002 \001(\tB\003\340A\002\022<\n\rsource_backup\030\003 \001(\tB%\340" + + "A\002\372A\037\n\035spanner.googleapis.com/Backup\0224\n\013" + + "expire_time\030\004 \001(\0132\032.google.protobuf.Time" + + "stampB\003\340A\002\022\\\n\021encryption_config\030\005 \001(\0132<." + + "google.spanner.admin.database.v1.CopyBac" + + "kupEncryptionConfigB\003\340A\001\"\371\001\n\022CopyBackupM" + + "etadata\0220\n\004name\030\001 \001(\tB\"\372A\037\n\035spanner.goog" + + "leapis.com/Backup\0229\n\rsource_backup\030\002 \001(\t" + + "B\"\372A\037\n\035spanner.googleapis.com/Backup\022E\n\010" + + "progress\030\003 \001(\01323.google.spanner.admin.da" + + "tabase.v1.OperationProgress\022/\n\013cancel_ti" + + "me\030\004 \001(\0132\032.google.protobuf.Timestamp\"\212\001\n" + + "\023UpdateBackupRequest\022=\n\006backup\030\001 \001(\0132(.g" + + "oogle.spanner.admin.database.v1.BackupB\003" + + "\340A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.protob" + + "uf.FieldMaskB\003\340A\002\"G\n\020GetBackupRequest\0223\n" + + "\004name\030\001 \001(\tB%\340A\002\372A\037\n\035spanner.googleapis." + + "com/Backup\"J\n\023DeleteBackupRequest\0223\n\004nam" + + "e\030\001 \001(\tB%\340A\002\372A\037\n\035spanner.googleapis.com/" + + "Backup\"\204\001\n\022ListBackupsRequest\0227\n\006parent\030" + + "\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/In" + + "stance\022\016\n\006filter\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(" + + "\005\022\022\n\npage_token\030\004 \001(\t\"i\n\023ListBackupsResp" + + "onse\0229\n\007backups\030\001 \003(\0132(.google.spanner.a" + + "dmin.database.v1.Backup\022\027\n\017next_page_tok" + + "en\030\002 \001(\t\"\215\001\n\033ListBackupOperationsRequest" + + "\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.google" + + "apis.com/Instance\022\016\n\006filter\030\002 \001(\t\022\021\n\tpag" + + "e_size\030\003 \001(\005\022\022\n\npage_token\030\004 \001(\t\"j\n\034List" + + "BackupOperationsResponse\0221\n\noperations\030\001" + + " \003(\0132\035.google.longrunning.Operation\022\027\n\017n" + + "ext_page_token\030\002 \001(\t\"\342\001\n\nBackupInfo\0222\n\006b" + + "ackup\030\001 \001(\tB\"\372A\037\n\035spanner.googleapis.com" + + "/Backup\0220\n\014version_time\030\004 \001(\0132\032.google.p" + + "rotobuf.Timestamp\022/\n\013create_time\030\002 \001(\0132\032" + + ".google.protobuf.Timestamp\022=\n\017source_dat" + + "abase\030\003 \001(\tB$\372A!\n\037spanner.googleapis.com" + + "/Database\"\237\003\n\034CreateBackupEncryptionConf" + + "ig\022k\n\017encryption_type\030\001 \001(\0162M.google.spa" + + "nner.admin.database.v1.CreateBackupEncry" + + "ptionConfig.EncryptionTypeB\003\340A\002\022?\n\014kms_k" + + "ey_name\030\002 \001(\tB)\340A\001\372A#\n!cloudkms.googleap" + + "is.com/CryptoKey\022@\n\rkms_key_names\030\003 \003(\tB" + + ")\340A\001\372A#\n!cloudkms.googleapis.com/CryptoK" + + "ey\"\216\001\n\016EncryptionType\022\037\n\033ENCRYPTION_TYPE" + + "_UNSPECIFIED\020\000\022\033\n\027USE_DATABASE_ENCRYPTIO" + + "N\020\001\022\035\n\031GOOGLE_DEFAULT_ENCRYPTION\020\002\022\037\n\033CU" + + "STOMER_MANAGED_ENCRYPTION\020\003\"\253\003\n\032CopyBack" + + "upEncryptionConfig\022i\n\017encryption_type\030\001 " + + "\001(\0162K.google.spanner.admin.database.v1.C" + + "opyBackupEncryptionConfig.EncryptionType" + + "B\003\340A\002\022?\n\014kms_key_name\030\002 \001(\tB)\340A\001\372A#\n!clo" + + "udkms.googleapis.com/CryptoKey\022@\n\rkms_ke" + + "y_names\030\003 \003(\tB)\340A\001\372A#\n!cloudkms.googleap" + + "is.com/CryptoKey\"\236\001\n\016EncryptionType\022\037\n\033E" + + "NCRYPTION_TYPE_UNSPECIFIED\020\000\022+\n\'USE_CONF" + + "IG_DEFAULT_OR_BACKUP_ENCRYPTION\020\001\022\035\n\031GOO" + + "GLE_DEFAULT_ENCRYPTION\020\002\022\037\n\033CUSTOMER_MAN" + + "AGED_ENCRYPTION\020\003\"\020\n\016FullBackupSpec\"\027\n\025I" + + "ncrementalBackupSpec\"d\n\027BackupInstancePa" + + "rtition\022I\n\022instance_partition\030\001 \001(\tB-\372A*" + + "\n(spanner.googleapis.com/InstancePartiti" + + "onB\375\001\n$com.google.spanner.admin.database" + + ".v1B\013BackupProtoP\001ZFcloud.google.com/go/" + + "spanner/admin/database/apiv1/databasepb;" + + "databasepb\252\002&Google.Cloud.Spanner.Admin." + + "Database.V1\312\002&Google\\Cloud\\Spanner\\Admin" + + "\\Database\\V1\352\002+Google::Cloud::Spanner::A" + + "dmin::Database::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(), + }); + internal_static_google_spanner_admin_database_v1_Backup_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_database_v1_Backup_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_Backup_descriptor, + new java.lang.String[] { + "Database", + "VersionTime", + "ExpireTime", + "Name", + "CreateTime", + "SizeBytes", + "FreeableSizeBytes", + "ExclusiveSizeBytes", + "State", + "ReferencingDatabases", + "EncryptionInfo", + "EncryptionInformation", + "DatabaseDialect", + "ReferencingBackups", + "MaxExpireTime", + "BackupSchedules", + "IncrementalBackupChainId", + "OldestVersionTime", + "InstancePartitions", + }); + internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_database_v1_CreateBackupRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor, + new java.lang.String[] { + "Parent", "BackupId", "Backup", "EncryptionConfig", + }); + internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor, + new java.lang.String[] { + "Name", "Database", "Progress", "CancelTime", + }); + internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_admin_database_v1_CopyBackupRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor, + new java.lang.String[] { + "Parent", "BackupId", "SourceBackup", "ExpireTime", "EncryptionConfig", + }); + internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor, + new java.lang.String[] { + "Name", "SourceBackup", "Progress", "CancelTime", + }); + internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor, + new java.lang.String[] { + "Backup", "UpdateMask", + }); + internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_spanner_admin_database_v1_GetBackupRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_spanner_admin_database_v1_ListBackupsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_spanner_admin_database_v1_ListBackupsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor, + new java.lang.String[] { + "Backups", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor, + new java.lang.String[] { + "Operations", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_spanner_admin_database_v1_BackupInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupInfo_descriptor, + new java.lang.String[] { + "Backup", "VersionTime", "CreateTime", "SourceDatabase", + }); + internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor, + new java.lang.String[] { + "EncryptionType", "KmsKeyName", "KmsKeyNames", + }); + internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor, + new java.lang.String[] { + "EncryptionType", "KmsKeyName", "KmsKeyNames", + }); + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_spanner_admin_database_v1_BackupInstancePartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupInstancePartition_descriptor, + new java.lang.String[] { + "InstancePartition", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java new file mode 100644 index 000000000000..ef07225e99b3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java @@ -0,0 +1,2698 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * BackupSchedule expresses the automated backup creation specification for a
    + * Spanner database.
    + * Next ID: 10
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupSchedule} + */ +@com.google.protobuf.Generated +public final class BackupSchedule extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupSchedule) + BackupScheduleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupSchedule"); + } + + // Use BackupSchedule.newBuilder() to construct. + private BackupSchedule(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BackupSchedule() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupSchedule.class, + com.google.spanner.admin.database.v1.BackupSchedule.Builder.class); + } + + private int bitField0_; + private int backupTypeSpecCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object backupTypeSpec_; + + public enum BackupTypeSpecCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + FULL_BACKUP_SPEC(7), + INCREMENTAL_BACKUP_SPEC(8), + BACKUPTYPESPEC_NOT_SET(0); + private final int value; + + private BackupTypeSpecCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static BackupTypeSpecCase valueOf(int value) { + return forNumber(value); + } + + public static BackupTypeSpecCase forNumber(int value) { + switch (value) { + case 7: + return FULL_BACKUP_SPEC; + case 8: + return INCREMENTAL_BACKUP_SPEC; + case 0: + return BACKUPTYPESPEC_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public BackupTypeSpecCase getBackupTypeSpecCase() { + return BackupTypeSpecCase.forNumber(backupTypeSpecCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SPEC_FIELD_NUMBER = 6; + private com.google.spanner.admin.database.v1.BackupScheduleSpec spec_; + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + @java.lang.Override + public boolean hasSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec() { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder() { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + + public static final int RETENTION_DURATION_FIELD_NUMBER = 3; + private com.google.protobuf.Duration retentionDuration_; + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + @java.lang.Override + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + @java.lang.Override + public com.google.protobuf.Duration getRetentionDuration() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + public static final int FULL_BACKUP_SPEC_FIELD_NUMBER = 7; + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + @java.lang.Override + public boolean hasFullBackupSpec() { + return backupTypeSpecCase_ == 7; + } + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec() { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder getFullBackupSpecOrBuilder() { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + + public static final int INCREMENTAL_BACKUP_SPEC_FIELD_NUMBER = 8; + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + @java.lang.Override + public boolean hasIncrementalBackupSpec() { + return backupTypeSpecCase_ == 8; + } + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec() { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder() { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 9; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getRetentionDuration()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getSpec()); + } + if (backupTypeSpecCase_ == 7) { + output.writeMessage(7, (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_); + } + if (backupTypeSpecCase_ == 8) { + output.writeMessage( + 8, (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(9, getUpdateTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getRetentionDuration()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getSpec()); + } + if (backupTypeSpecCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_); + } + if (backupTypeSpecCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getUpdateTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupSchedule)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupSchedule other = + (com.google.spanner.admin.database.v1.BackupSchedule) obj; + + if (!getName().equals(other.getName())) return false; + if (hasSpec() != other.hasSpec()) return false; + if (hasSpec()) { + if (!getSpec().equals(other.getSpec())) return false; + } + if (hasRetentionDuration() != other.hasRetentionDuration()) return false; + if (hasRetentionDuration()) { + if (!getRetentionDuration().equals(other.getRetentionDuration())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getBackupTypeSpecCase().equals(other.getBackupTypeSpecCase())) return false; + switch (backupTypeSpecCase_) { + case 7: + if (!getFullBackupSpec().equals(other.getFullBackupSpec())) return false; + break; + case 8: + if (!getIncrementalBackupSpec().equals(other.getIncrementalBackupSpec())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasSpec()) { + hash = (37 * hash) + SPEC_FIELD_NUMBER; + hash = (53 * hash) + getSpec().hashCode(); + } + if (hasRetentionDuration()) { + hash = (37 * hash) + RETENTION_DURATION_FIELD_NUMBER; + hash = (53 * hash) + getRetentionDuration().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + switch (backupTypeSpecCase_) { + case 7: + hash = (37 * hash) + FULL_BACKUP_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getFullBackupSpec().hashCode(); + break; + case 8: + hash = (37 * hash) + INCREMENTAL_BACKUP_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getIncrementalBackupSpec().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.BackupSchedule prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * BackupSchedule expresses the automated backup creation specification for a
    +   * Spanner database.
    +   * Next ID: 10
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupSchedule} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupSchedule) + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupSchedule.class, + com.google.spanner.admin.database.v1.BackupSchedule.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupSchedule.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetSpecFieldBuilder(); + internalGetRetentionDurationFieldBuilder(); + internalGetEncryptionConfigFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + spec_ = null; + if (specBuilder_ != null) { + specBuilder_.dispose(); + specBuilder_ = null; + } + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + if (fullBackupSpecBuilder_ != null) { + fullBackupSpecBuilder_.clear(); + } + if (incrementalBackupSpecBuilder_ != null) { + incrementalBackupSpecBuilder_.clear(); + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule build() { + com.google.spanner.admin.database.v1.BackupSchedule result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule buildPartial() { + com.google.spanner.admin.database.v1.BackupSchedule result = + new com.google.spanner.admin.database.v1.BackupSchedule(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.BackupSchedule result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.spec_ = specBuilder_ == null ? spec_ : specBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.retentionDuration_ = + retentionDurationBuilder_ == null + ? retentionDuration_ + : retentionDurationBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.admin.database.v1.BackupSchedule result) { + result.backupTypeSpecCase_ = backupTypeSpecCase_; + result.backupTypeSpec_ = this.backupTypeSpec_; + if (backupTypeSpecCase_ == 7 && fullBackupSpecBuilder_ != null) { + result.backupTypeSpec_ = fullBackupSpecBuilder_.build(); + } + if (backupTypeSpecCase_ == 8 && incrementalBackupSpecBuilder_ != null) { + result.backupTypeSpec_ = incrementalBackupSpecBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupSchedule) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupSchedule) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupSchedule other) { + if (other == com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasSpec()) { + mergeSpec(other.getSpec()); + } + if (other.hasRetentionDuration()) { + mergeRetentionDuration(other.getRetentionDuration()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + switch (other.getBackupTypeSpecCase()) { + case FULL_BACKUP_SPEC: + { + mergeFullBackupSpec(other.getFullBackupSpec()); + break; + } + case INCREMENTAL_BACKUP_SPEC: + { + mergeIncrementalBackupSpec(other.getIncrementalBackupSpec()); + break; + } + case BACKUPTYPESPEC_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage( + internalGetRetentionDurationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 50: + { + input.readMessage(internalGetSpecFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetFullBackupSpecFieldBuilder().getBuilder(), extensionRegistry); + backupTypeSpecCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetIncrementalBackupSpecFieldBuilder().getBuilder(), extensionRegistry); + backupTypeSpecCase_ = 8; + break; + } // case 66 + case 74: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int backupTypeSpecCase_ = 0; + private java.lang.Object backupTypeSpec_; + + public BackupTypeSpecCase getBackupTypeSpecCase() { + return BackupTypeSpecCase.forNumber(backupTypeSpecCase_); + } + + public Builder clearBackupTypeSpec() { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.BackupScheduleSpec spec_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder> + specBuilder_; + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + public boolean hasSpec() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec() { + if (specBuilder_ == null) { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } else { + return specBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSpec(com.google.spanner.admin.database.v1.BackupScheduleSpec value) { + if (specBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + spec_ = value; + } else { + specBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSpec( + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder builderForValue) { + if (specBuilder_ == null) { + spec_ = builderForValue.build(); + } else { + specBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSpec(com.google.spanner.admin.database.v1.BackupScheduleSpec value) { + if (specBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && spec_ != null + && spec_ + != com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance()) { + getSpecBuilder().mergeFrom(value); + } else { + spec_ = value; + } + } else { + specBuilder_.mergeFrom(value); + } + if (spec_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSpec() { + bitField0_ = (bitField0_ & ~0x00000002); + spec_ = null; + if (specBuilder_ != null) { + specBuilder_.dispose(); + specBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder getSpecBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder() { + if (specBuilder_ != null) { + return specBuilder_.getMessageOrBuilder(); + } else { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + } + + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder> + internalGetSpecFieldBuilder() { + if (specBuilder_ == null) { + specBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder>( + getSpec(), getParentForChildren(), isClean()); + spec_ = null; + } + return specBuilder_; + } + + private com.google.protobuf.Duration retentionDuration_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + retentionDurationBuilder_; + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + public com.google.protobuf.Duration getRetentionDuration() { + if (retentionDurationBuilder_ == null) { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } else { + return retentionDurationBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionDuration_ = value; + } else { + retentionDurationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration.Builder builderForValue) { + if (retentionDurationBuilder_ == null) { + retentionDuration_ = builderForValue.build(); + } else { + retentionDurationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && retentionDuration_ != null + && retentionDuration_ != com.google.protobuf.Duration.getDefaultInstance()) { + getRetentionDurationBuilder().mergeFrom(value); + } else { + retentionDuration_ = value; + } + } else { + retentionDurationBuilder_.mergeFrom(value); + } + if (retentionDuration_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetentionDuration() { + bitField0_ = (bitField0_ & ~0x00000004); + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getRetentionDurationBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetRetentionDurationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + if (retentionDurationBuilder_ != null) { + return retentionDurationBuilder_.getMessageOrBuilder(); + } else { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + } + + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetRetentionDurationFieldBuilder() { + if (retentionDurationBuilder_ == null) { + retentionDurationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getRetentionDuration(), getParentForChildren(), isClean()); + retentionDuration_ = null; + } + return retentionDurationBuilder_; + } + + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + .getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder> + fullBackupSpecBuilder_; + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + @java.lang.Override + public boolean hasFullBackupSpec() { + return backupTypeSpecCase_ == 7; + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec() { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } else { + if (backupTypeSpecCase_ == 7) { + return fullBackupSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder setFullBackupSpec(com.google.spanner.admin.database.v1.FullBackupSpec value) { + if (fullBackupSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupTypeSpec_ = value; + onChanged(); + } else { + fullBackupSpecBuilder_.setMessage(value); + } + backupTypeSpecCase_ = 7; + return this; + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder setFullBackupSpec( + com.google.spanner.admin.database.v1.FullBackupSpec.Builder builderForValue) { + if (fullBackupSpecBuilder_ == null) { + backupTypeSpec_ = builderForValue.build(); + onChanged(); + } else { + fullBackupSpecBuilder_.setMessage(builderForValue.build()); + } + backupTypeSpecCase_ = 7; + return this; + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder mergeFullBackupSpec(com.google.spanner.admin.database.v1.FullBackupSpec value) { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7 + && backupTypeSpec_ + != com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance()) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.FullBackupSpec.newBuilder( + (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + backupTypeSpec_ = value; + } + onChanged(); + } else { + if (backupTypeSpecCase_ == 7) { + fullBackupSpecBuilder_.mergeFrom(value); + } else { + fullBackupSpecBuilder_.setMessage(value); + } + } + backupTypeSpecCase_ = 7; + return this; + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder clearFullBackupSpec() { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + } + } else { + if (backupTypeSpecCase_ == 7) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + } + fullBackupSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public com.google.spanner.admin.database.v1.FullBackupSpec.Builder getFullBackupSpecBuilder() { + return internalGetFullBackupSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder + getFullBackupSpecOrBuilder() { + if ((backupTypeSpecCase_ == 7) && (fullBackupSpecBuilder_ != null)) { + return fullBackupSpecBuilder_.getMessageOrBuilder(); + } else { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder> + internalGetFullBackupSpecFieldBuilder() { + if (fullBackupSpecBuilder_ == null) { + if (!(backupTypeSpecCase_ == 7)) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + fullBackupSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder>( + (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_, + getParentForChildren(), + isClean()); + backupTypeSpec_ = null; + } + backupTypeSpecCase_ = 7; + onChanged(); + return fullBackupSpecBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder> + incrementalBackupSpecBuilder_; + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + @java.lang.Override + public boolean hasIncrementalBackupSpec() { + return backupTypeSpecCase_ == 8; + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec() { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } else { + if (backupTypeSpecCase_ == 8) { + return incrementalBackupSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder setIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec value) { + if (incrementalBackupSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupTypeSpec_ = value; + onChanged(); + } else { + incrementalBackupSpecBuilder_.setMessage(value); + } + backupTypeSpecCase_ = 8; + return this; + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder setIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder builderForValue) { + if (incrementalBackupSpecBuilder_ == null) { + backupTypeSpec_ = builderForValue.build(); + onChanged(); + } else { + incrementalBackupSpecBuilder_.setMessage(builderForValue.build()); + } + backupTypeSpecCase_ = 8; + return this; + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder mergeIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec value) { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8 + && backupTypeSpec_ + != com.google.spanner.admin.database.v1.IncrementalBackupSpec + .getDefaultInstance()) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.IncrementalBackupSpec.newBuilder( + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + backupTypeSpec_ = value; + } + onChanged(); + } else { + if (backupTypeSpecCase_ == 8) { + incrementalBackupSpecBuilder_.mergeFrom(value); + } else { + incrementalBackupSpecBuilder_.setMessage(value); + } + } + backupTypeSpecCase_ = 8; + return this; + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder clearIncrementalBackupSpec() { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + } + } else { + if (backupTypeSpecCase_ == 8) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + } + incrementalBackupSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder + getIncrementalBackupSpecBuilder() { + return internalGetIncrementalBackupSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder() { + if ((backupTypeSpecCase_ == 8) && (incrementalBackupSpecBuilder_ != null)) { + return incrementalBackupSpecBuilder_.getMessageOrBuilder(); + } else { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder> + internalGetIncrementalBackupSpecFieldBuilder() { + if (incrementalBackupSpecBuilder_ == null) { + if (!(backupTypeSpecCase_ == 8)) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + incrementalBackupSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder>( + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_, + getParentForChildren(), + isClean()); + backupTypeSpec_ = null; + } + backupTypeSpecCase_ = 8; + onChanged(); + return incrementalBackupSpecBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000040); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupSchedule) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupSchedule) + private static final com.google.spanner.admin.database.v1.BackupSchedule DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupSchedule(); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupSchedule parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java new file mode 100644 index 000000000000..0fd9b79cd7a8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java @@ -0,0 +1,261 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BackupScheduleName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_DATABASE_SCHEDULE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String database; + private final String schedule; + + @Deprecated + protected BackupScheduleName() { + project = null; + instance = null; + database = null; + schedule = null; + } + + private BackupScheduleName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + database = Preconditions.checkNotNull(builder.getDatabase()); + schedule = Preconditions.checkNotNull(builder.getSchedule()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSchedule() { + return schedule; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BackupScheduleName of( + String project, String instance, String database, String schedule) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSchedule(schedule) + .build(); + } + + public static String format(String project, String instance, String database, String schedule) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSchedule(schedule) + .build() + .toString(); + } + + public static BackupScheduleName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_DATABASE_SCHEDULE.validatedMatch( + formattedString, "BackupScheduleName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("instance"), + matchMap.get("database"), + matchMap.get("schedule")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BackupScheduleName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_DATABASE_SCHEDULE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + if (schedule != null) { + fieldMapBuilder.put("schedule", schedule); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_DATABASE_SCHEDULE.instantiate( + "project", project, "instance", instance, "database", database, "schedule", schedule); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + BackupScheduleName that = ((BackupScheduleName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.database, that.database) + && Objects.equals(this.schedule, that.schedule); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(database); + h *= 1000003; + h ^= Objects.hashCode(schedule); + return h; + } + + /** + * Builder for + * projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}. + */ + public static class Builder { + private String project; + private String instance; + private String database; + private String schedule; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSchedule() { + return schedule; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + public Builder setSchedule(String schedule) { + this.schedule = schedule; + return this; + } + + private Builder(BackupScheduleName backupScheduleName) { + this.project = backupScheduleName.project; + this.instance = backupScheduleName.instance; + this.database = backupScheduleName.database; + this.schedule = backupScheduleName.schedule; + } + + public BackupScheduleName build() { + return new BackupScheduleName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java new file mode 100644 index 000000000000..4e5a79ab54ce --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java @@ -0,0 +1,341 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface BackupScheduleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupSchedule) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + boolean hasSpec(); + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec(); + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder(); + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + boolean hasRetentionDuration(); + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + com.google.protobuf.Duration getRetentionDuration(); + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder(); + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder(); + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + boolean hasFullBackupSpec(); + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec(); + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder getFullBackupSpecOrBuilder(); + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + boolean hasIncrementalBackupSpec(); + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec(); + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder(); + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + com.google.spanner.admin.database.v1.BackupSchedule.BackupTypeSpecCase getBackupTypeSpecCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java new file mode 100644 index 000000000000..e5cdc336c620 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java @@ -0,0 +1,260 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public final class BackupScheduleProto extends com.google.protobuf.GeneratedFile { + private BackupScheduleProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupScheduleProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "6google/spanner/admin/database/v1/backup_schedule.proto\022 google.spanner.admin.d" + + "atabase.v1\032\037google/api/field_behavior.pr" + + "oto\032\031google/api/resource.proto\032\036google/protobuf/duration.proto\032" + + " google/protobuf/field_mask.proto\032\037google/protobuf/timest" + + "amp.proto\032-google/spanner/admin/database/v1/backup.proto\"i\n" + + "\022BackupScheduleSpec\022B\n" + + "\tcron_spec\030\001" + + " \001(\0132-.google.spanner.admin.database.v1.CrontabSpecH\000B\017\n\r" + + "schedule_spec\"\244\005\n" + + "\016BackupSchedule\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\010\022G\n" + + "\004spec\030\006" + + " \001(\01324.google.spanner.admin.database.v1.BackupScheduleSpecB\003\340A\001\022:\n" + + "\022retention_duration\030\003" + + " \001(\0132\031.google.protobuf.DurationB\003\340A\001\022^\n" + + "\021encryption_config\030\004 \001(" + + "\0132>.google.spanner.admin.database.v1.CreateBackupEncryptionConfigB\003\340A\001\022L\n" + + "\020full_backup_spec\030\007" + + " \001(\01320.google.spanner.admin.database.v1.FullBackupSpecH\000\022Z\n" + + "\027incremental_backup_spec\030\010 \001(\01327.google.spanner.a" + + "dmin.database.v1.IncrementalBackupSpecH\000\0224\n" + + "\013update_time\030\t" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003:\245\001\352A\241\001\n" + + "%spanner.googleapis.com/BackupSchedule\022Wprojects/{project}" + + "/instances/{instance}/databases/{databas" + + "e}/backupSchedules/{schedule}*\017backupSchedules2\016backupScheduleB\022\n" + + "\020backup_type_spec\"q\n" + + "\013CrontabSpec\022\021\n" + + "\004text\030\001 \001(\tB\003\340A\002\022\026\n" + + "\ttime_zone\030\002 \001(\tB\003\340A\003\0227\n" + + "\017creation_window\030\003" + + " \001(\0132\031.google.protobuf.DurationB\003\340A\003\"\307\001\n" + + "\033CreateBackupScheduleRequest\0227\n" + + "\006parent\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Database\022\037\n" + + "\022backup_schedule_id\030\002 \001(\tB\003\340A\002\022N\n" + + "\017backup_schedule\030\003 \001(\01320.google.spanne" + + "r.admin.database.v1.BackupScheduleB\003\340A\002\"W\n" + + "\030GetBackupScheduleRequest\022;\n" + + "\004name\030\001 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/BackupSchedule\"Z\n" + + "\033DeleteBackupScheduleRequest\022;\n" + + "\004name\030\001 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/BackupSchedule\"\206\001\n" + + "\032ListBackupSchedulesRequest\0227\n" + + "\006parent\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Database\022\026\n" + + "\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\004 \001(\tB\003\340A\001\"\202\001\n" + + "\033ListBackupSchedulesResponse\022J\n" + + "\020backup_schedules\030\001" + + " \003(\01320.google.spanner.admin.database.v1.BackupSchedule\022\027\n" + + "\017next_page_token\030\002 \001(\t\"\243\001\n" + + "\033UpdateBackupScheduleRequest\022N\n" + + "\017backup_schedule\030\001 \001(\01320.google.spa" + + "nner.admin.database.v1.BackupScheduleB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002B\205\002\n" + + "$com.google.spanner.admin.database.v1B\023BackupScheduleProtoP\001" + + "ZFcloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb\252\002&Googl" + + "e.Cloud.Spanner.Admin.Database.V1\312\002&Goog" + + "le\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Goo" + + "gle::Cloud::Spanner::Admin::Database::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), + }); + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor, + new java.lang.String[] { + "CronSpec", "ScheduleSpec", + }); + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor, + new java.lang.String[] { + "Name", + "Spec", + "RetentionDuration", + "EncryptionConfig", + "FullBackupSpec", + "IncrementalBackupSpec", + "UpdateTime", + "BackupTypeSpec", + }); + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor, + new java.lang.String[] { + "Text", "TimeZone", "CreationWindow", + }); + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Parent", "BackupScheduleId", "BackupSchedule", + }); + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor, + new java.lang.String[] { + "BackupSchedules", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor, + new java.lang.String[] { + "BackupSchedule", "UpdateMask", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java new file mode 100644 index 000000000000..22075bdc46ad --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java @@ -0,0 +1,798 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Defines specifications of the backup schedule.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupScheduleSpec} + */ +@com.google.protobuf.Generated +public final class BackupScheduleSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupScheduleSpec) + BackupScheduleSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BackupScheduleSpec"); + } + + // Use BackupScheduleSpec.newBuilder() to construct. + private BackupScheduleSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BackupScheduleSpec() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupScheduleSpec.class, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder.class); + } + + private int scheduleSpecCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object scheduleSpec_; + + public enum ScheduleSpecCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + CRON_SPEC(1), + SCHEDULESPEC_NOT_SET(0); + private final int value; + + private ScheduleSpecCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ScheduleSpecCase valueOf(int value) { + return forNumber(value); + } + + public static ScheduleSpecCase forNumber(int value) { + switch (value) { + case 1: + return CRON_SPEC; + case 0: + return SCHEDULESPEC_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ScheduleSpecCase getScheduleSpecCase() { + return ScheduleSpecCase.forNumber(scheduleSpecCase_); + } + + public static final int CRON_SPEC_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + @java.lang.Override + public boolean hasCronSpec() { + return scheduleSpecCase_ == 1; + } + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getCronSpec() { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder() { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (scheduleSpecCase_ == 1) { + output.writeMessage(1, (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (scheduleSpecCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupScheduleSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupScheduleSpec other = + (com.google.spanner.admin.database.v1.BackupScheduleSpec) obj; + + if (!getScheduleSpecCase().equals(other.getScheduleSpecCase())) return false; + switch (scheduleSpecCase_) { + case 1: + if (!getCronSpec().equals(other.getCronSpec())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (scheduleSpecCase_) { + case 1: + hash = (37 * hash) + CRON_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getCronSpec().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.BackupScheduleSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Defines specifications of the backup schedule.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupScheduleSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupScheduleSpec) + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupScheduleSpec.class, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupScheduleSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (cronSpecBuilder_ != null) { + cronSpecBuilder_.clear(); + } + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec build() { + com.google.spanner.admin.database.v1.BackupScheduleSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec buildPartial() { + com.google.spanner.admin.database.v1.BackupScheduleSpec result = + new com.google.spanner.admin.database.v1.BackupScheduleSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.BackupScheduleSpec result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.database.v1.BackupScheduleSpec result) { + result.scheduleSpecCase_ = scheduleSpecCase_; + result.scheduleSpec_ = this.scheduleSpec_; + if (scheduleSpecCase_ == 1 && cronSpecBuilder_ != null) { + result.scheduleSpec_ = cronSpecBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupScheduleSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupScheduleSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupScheduleSpec other) { + if (other == com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance()) + return this; + switch (other.getScheduleSpecCase()) { + case CRON_SPEC: + { + mergeCronSpec(other.getCronSpec()); + break; + } + case SCHEDULESPEC_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCronSpecFieldBuilder().getBuilder(), extensionRegistry); + scheduleSpecCase_ = 1; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int scheduleSpecCase_ = 0; + private java.lang.Object scheduleSpec_; + + public ScheduleSpecCase getScheduleSpecCase() { + return ScheduleSpecCase.forNumber(scheduleSpecCase_); + } + + public Builder clearScheduleSpec() { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder> + cronSpecBuilder_; + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + @java.lang.Override + public boolean hasCronSpec() { + return scheduleSpecCase_ == 1; + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getCronSpec() { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } else { + if (scheduleSpecCase_ == 1) { + return cronSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder setCronSpec(com.google.spanner.admin.database.v1.CrontabSpec value) { + if (cronSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + scheduleSpec_ = value; + onChanged(); + } else { + cronSpecBuilder_.setMessage(value); + } + scheduleSpecCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder setCronSpec( + com.google.spanner.admin.database.v1.CrontabSpec.Builder builderForValue) { + if (cronSpecBuilder_ == null) { + scheduleSpec_ = builderForValue.build(); + onChanged(); + } else { + cronSpecBuilder_.setMessage(builderForValue.build()); + } + scheduleSpecCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder mergeCronSpec(com.google.spanner.admin.database.v1.CrontabSpec value) { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1 + && scheduleSpec_ + != com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance()) { + scheduleSpec_ = + com.google.spanner.admin.database.v1.CrontabSpec.newBuilder( + (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + scheduleSpec_ = value; + } + onChanged(); + } else { + if (scheduleSpecCase_ == 1) { + cronSpecBuilder_.mergeFrom(value); + } else { + cronSpecBuilder_.setMessage(value); + } + } + scheduleSpecCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder clearCronSpec() { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1) { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + onChanged(); + } + } else { + if (scheduleSpecCase_ == 1) { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + } + cronSpecBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public com.google.spanner.admin.database.v1.CrontabSpec.Builder getCronSpecBuilder() { + return internalGetCronSpecFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder() { + if ((scheduleSpecCase_ == 1) && (cronSpecBuilder_ != null)) { + return cronSpecBuilder_.getMessageOrBuilder(); + } else { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder> + internalGetCronSpecFieldBuilder() { + if (cronSpecBuilder_ == null) { + if (!(scheduleSpecCase_ == 1)) { + scheduleSpec_ = com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + cronSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder>( + (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_, + getParentForChildren(), + isClean()); + scheduleSpec_ = null; + } + scheduleSpecCase_ = 1; + onChanged(); + return cronSpecBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupScheduleSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupScheduleSpec) + private static final com.google.spanner.admin.database.v1.BackupScheduleSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupScheduleSpec(); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupScheduleSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java new file mode 100644 index 000000000000..0109d54758b4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java @@ -0,0 +1,67 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface BackupScheduleSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupScheduleSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + boolean hasCronSpec(); + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + com.google.spanner.admin.database.v1.CrontabSpec getCronSpec(); + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder(); + + com.google.spanner.admin.database.v1.BackupScheduleSpec.ScheduleSpecCase getScheduleSpecCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java new file mode 100644 index 000000000000..fcbb39dc095e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java @@ -0,0 +1,150 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public final class CommonProto extends com.google.protobuf.GeneratedFile { + private CommonProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommonProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_OperationProgress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_EncryptionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_EncryptionInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n-google/spanner/admin/database/v1/commo" + + "n.proto\022 google.spanner.admin.database.v" + + "1\032\037google/api/field_behavior.proto\032\031goog" + + "le/api/resource.proto\032\037google/protobuf/t" + + "imestamp.proto\032\027google/rpc/status.proto\"" + + "\213\001\n\021OperationProgress\022\030\n\020progress_percen" + + "t\030\001 \001(\005\022.\n\nstart_time\030\002 \001(\0132\032.google.pro" + + "tobuf.Timestamp\022,\n\010end_time\030\003 \001(\0132\032.goog" + + "le.protobuf.Timestamp\"\217\001\n\020EncryptionConf" + + "ig\022<\n\014kms_key_name\030\002 \001(\tB&\372A#\n!cloudkms." + + "googleapis.com/CryptoKey\022=\n\rkms_key_name" + + "s\030\003 \003(\tB&\372A#\n!cloudkms.googleapis.com/Cr" + + "yptoKey\"\302\002\n\016EncryptionInfo\022S\n\017encryption" + + "_type\030\003 \001(\01625.google.spanner.admin.datab" + + "ase.v1.EncryptionInfo.TypeB\003\340A\003\0222\n\021encry" + + "ption_status\030\004 \001(\0132\022.google.rpc.StatusB\003" + + "\340A\003\022I\n\017kms_key_version\030\002 \001(\tB0\340A\003\372A*\n(cl" + + "oudkms.googleapis.com/CryptoKeyVersion\"\\" + + "\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\035\n\031GOOGLE_D" + + "EFAULT_ENCRYPTION\020\001\022\037\n\033CUSTOMER_MANAGED_" + + "ENCRYPTION\020\002*\\\n\017DatabaseDialect\022 \n\034DATAB" + + "ASE_DIALECT_UNSPECIFIED\020\000\022\027\n\023GOOGLE_STAN" + + "DARD_SQL\020\001\022\016\n\nPOSTGRESQL\020\002B\242\004\n$com.googl" + + "e.spanner.admin.database.v1B\013CommonProto" + + "P\001ZFcloud.google.com/go/spanner/admin/da" + + "tabase/apiv1/databasepb;databasepb\252\002&Goo" + + "gle.Cloud.Spanner.Admin.Database.V1\312\002&Go" + + "ogle\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+G" + + "oogle::Cloud::Spanner::Admin::Database::" + + "V1\352Ax\n!cloudkms.googleapis.com/CryptoKey" + + "\022Sprojects/{project}/locations/{location" + + "}/keyRings/{key_ring}/cryptoKeys/{crypto" + + "_key}\352A\246\001\n(cloudkms.googleapis.com/Crypt" + + "oKeyVersion\022zprojects/{project}/location" + + "s/{location}/keyRings/{key_ring}/cryptoK" + + "eys/{crypto_key}/cryptoKeyVersions/{cryp" + + "to_key_version}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + }); + internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_database_v1_OperationProgress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor, + new java.lang.String[] { + "ProgressPercent", "StartTime", "EndTime", + }); + internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_database_v1_EncryptionConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor, + new java.lang.String[] { + "KmsKeyName", "KmsKeyNames", + }); + internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_admin_database_v1_EncryptionInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor, + new java.lang.String[] { + "EncryptionType", "EncryptionStatus", "KmsKeyVersion", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java new file mode 100644 index 000000000000..a03493f9269e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java @@ -0,0 +1,1552 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encryption configuration for the copied backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupEncryptionConfig} + */ +@com.google.protobuf.Generated +public final class CopyBackupEncryptionConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CopyBackupEncryptionConfig) + CopyBackupEncryptionConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CopyBackupEncryptionConfig"); + } + + // Use CopyBackupEncryptionConfig.newBuilder() to construct. + private CopyBackupEncryptionConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CopyBackupEncryptionConfig() { + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.class, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder.class); + } + + /** + * + * + *
    +   * Encryption types for the backup.
    +   * 
    + * + * Protobuf enum {@code + * google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType} + */ + public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + ENCRYPTION_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * This is the default option for
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * when
    +     * [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
    +     * is not specified. For example, if the source backup is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the source backup.
    +     * 
    + * + * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + */ + USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION(1), + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + GOOGLE_DEFAULT_ENCRYPTION(2), + /** + * + * + *
    +     * Use customer managed encryption. If specified, either `kms_key_name` or
    +     * `kms_key_names` must contain valid Cloud KMS key(s).
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + CUSTOMER_MANAGED_ENCRYPTION(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EncryptionType"); + } + + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + public static final int ENCRYPTION_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * This is the default option for
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * when
    +     * [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
    +     * is not specified. For example, if the source backup is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the source backup.
    +     * 
    + * + * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + */ + public static final int USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION_VALUE = 1; + + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + public static final int GOOGLE_DEFAULT_ENCRYPTION_VALUE = 2; + + /** + * + * + *
    +     * Use customer managed encryption. If specified, either `kms_key_name` or
    +     * `kms_key_names` must contain valid Cloud KMS key(s).
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + public static final int CUSTOMER_MANAGED_ENCRYPTION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EncryptionType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static EncryptionType forNumber(int value) { + switch (value) { + case 0: + return ENCRYPTION_TYPE_UNSPECIFIED; + case 1: + return USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION; + case 2: + return GOOGLE_DEFAULT_ENCRYPTION; + case 3: + return CUSTOMER_MANAGED_ENCRYPTION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EncryptionType findValueByNumber(int number) { + return EncryptionType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final EncryptionType[] VALUES = values(); + + public static EncryptionType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private EncryptionType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType) + } + + public static final int ENCRYPTION_TYPE_FIELD_NUMBER = 1; + private int encryptionType_ = 0; + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType.forNumber( + encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + public static final int KMS_KEY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + @java.lang.Override + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (encryptionType_ + != com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, kmsKeyName_); + } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (encryptionType_ + != com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, kmsKeyName_); + } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig other = + (com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig) obj; + + if (encryptionType_ != other.encryptionType_) return false; + if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + encryptionType_; + hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encryption configuration for the copied backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupEncryptionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CopyBackupEncryptionConfig) + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.class, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig build() { + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig buildPartial() { + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig result = + new com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionType_ = encryptionType_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.kmsKeyName_ = kmsKeyName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig) { + return mergeFrom((com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig other) { + if (other + == com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance()) + return this; + if (other.encryptionType_ != 0) { + setEncryptionTypeValue(other.getEncryptionTypeValue()); + } + if (!other.getKmsKeyName().isEmpty()) { + kmsKeyName_ = other.kmsKeyName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + encryptionType_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + kmsKeyName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int encryptionType_ = 0; + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionTypeValue(int value) { + encryptionType_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType.forNumber( + encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionType( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + encryptionType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearEncryptionType() { + bitField0_ = (bitField0_ & ~0x00000001); + encryptionType_ = 0; + onChanged(); + return this; + } + + private java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyName() { + kmsKeyName_ = getDefaultInstance().getKmsKeyName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CopyBackupEncryptionConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CopyBackupEncryptionConfig) + private static final com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig(); + } + + public static com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CopyBackupEncryptionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java new file mode 100644 index 000000000000..ef10e01d28ec --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java @@ -0,0 +1,219 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CopyBackupEncryptionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CopyBackupEncryptionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + int getEncryptionTypeValue(); + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType + getEncryptionType(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + java.lang.String getKmsKeyName(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java new file mode 100644 index 000000000000..7151a3520b0b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java @@ -0,0 +1,1543 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupMetadata} + */ +@com.google.protobuf.Generated +public final class CopyBackupMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CopyBackupMetadata) + CopyBackupMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CopyBackupMetadata"); + } + + // Use CopyBackupMetadata.newBuilder() to construct. + private CopyBackupMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CopyBackupMetadata() { + name_ = ""; + sourceBackup_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupMetadata.class, + com.google.spanner.admin.database.v1.CopyBackupMetadata.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * The name of the backup being created through the copy operation.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the backup being created through the copy operation.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_BACKUP_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +   * The name of the source backup that is being copied.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The sourceBackup. + */ + @java.lang.Override + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the source backup that is being copied.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceBackup. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 3; + private com.google.spanner.admin.database.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CopyBackupMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CopyBackupMetadata other = + (com.google.spanner.admin.database.v1.CopyBackupMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (!getSourceBackup().equals(other.getSourceBackup())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SOURCE_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getSourceBackup().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CopyBackupMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CopyBackupMetadata) + com.google.spanner.admin.database.v1.CopyBackupMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupMetadata.class, + com.google.spanner.admin.database.v1.CopyBackupMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CopyBackupMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + sourceBackup_ = ""; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CopyBackupMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupMetadata build() { + com.google.spanner.admin.database.v1.CopyBackupMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupMetadata buildPartial() { + com.google.spanner.admin.database.v1.CopyBackupMetadata result = + new com.google.spanner.admin.database.v1.CopyBackupMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CopyBackupMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.sourceBackup_ = sourceBackup_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CopyBackupMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.CopyBackupMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CopyBackupMetadata other) { + if (other == com.google.spanner.admin.database.v1.CopyBackupMetadata.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getSourceBackup().isEmpty()) { + sourceBackup_ = other.sourceBackup_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + sourceBackup_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * The name of the backup being created through the copy operation.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the backup being created through the copy operation.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the backup being created through the copy operation.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the backup being created through the copy operation.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the backup being created through the copy operation.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +     * The name of the source backup that is being copied.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The sourceBackup. + */ + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the source backup that is being copied.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceBackup. + */ + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the source backup that is being copied.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackup(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceBackup_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the source backup that is being copied.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearSourceBackup() { + sourceBackup_ = getDefaultInstance().getSourceBackup(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the source backup that is being copied.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackupBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceBackup_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder setProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000004); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000008); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which cancellation of CopyBackup operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CopyBackupMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CopyBackupMetadata) + private static final com.google.spanner.admin.database.v1.CopyBackupMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CopyBackupMetadata(); + } + + public static com.google.spanner.admin.database.v1.CopyBackupMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CopyBackupMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java new file mode 100644 index 000000000000..81ce31fb4e7a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CopyBackupMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CopyBackupMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The name of the backup being created through the copy operation.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * The name of the backup being created through the copy operation.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The name of the source backup that is being copied.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The sourceBackup. + */ + java.lang.String getSourceBackup(); + + /** + * + * + *
    +   * The name of the source backup that is being copied.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string source_backup = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for sourceBackup. + */ + com.google.protobuf.ByteString getSourceBackupBytes(); + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of CopyBackup operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java new file mode 100644 index 000000000000..981f13a37f71 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java @@ -0,0 +1,1771 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupRequest} + */ +@com.google.protobuf.Generated +public final class CopyBackupRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CopyBackupRequest) + CopyBackupRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CopyBackupRequest"); + } + + // Use CopyBackupRequest.newBuilder() to construct. + private CopyBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CopyBackupRequest() { + parent_ = ""; + backupId_ = ""; + sourceBackup_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupRequest.class, + com.google.spanner.admin.database.v1.CopyBackupRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * Required. The id of the backup copy.
    +   * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The id of the backup copy.
    +   * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_BACKUP_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +   * Required. The source backup to be copied.
    +   * The source backup needs to be in READY state for it to be copied.
    +   * Once CopyBackup is in progress, the source backup cannot be deleted or
    +   * cleaned up on expiration until CopyBackup is finished.
    +   * Values are of the form:
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBackup. + */ + @java.lang.Override + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The source backup to be copied.
    +   * The source backup needs to be in READY state for it to be copied.
    +   * Once CopyBackup is in progress, the source backup cannot be deleted or
    +   * cleaned up on expiration until CopyBackup is finished.
    +   * Values are of the form:
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBackup. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 5; + private com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getEncryptionConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getEncryptionConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CopyBackupRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CopyBackupRequest other = + (com.google.spanner.admin.database.v1.CopyBackupRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getSourceBackup().equals(other.getSourceBackup())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (37 * hash) + SOURCE_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getSourceBackup().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CopyBackupRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CopyBackupRequest) + com.google.spanner.admin.database.v1.CopyBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CopyBackupRequest.class, + com.google.spanner.admin.database.v1.CopyBackupRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CopyBackupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + backupId_ = ""; + sourceBackup_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CopyBackupRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CopyBackupRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupRequest build() { + com.google.spanner.admin.database.v1.CopyBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupRequest buildPartial() { + com.google.spanner.admin.database.v1.CopyBackupRequest result = + new com.google.spanner.admin.database.v1.CopyBackupRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CopyBackupRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.backupId_ = backupId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.sourceBackup_ = sourceBackup_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CopyBackupRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.CopyBackupRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CopyBackupRequest other) { + if (other == com.google.spanner.admin.database.v1.CopyBackupRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getSourceBackup().isEmpty()) { + sourceBackup_ = other.sourceBackup_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + sourceBackup_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * Required. The id of the backup copy.
    +     * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the backup copy.
    +     * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the backup copy.
    +     * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the backup copy.
    +     * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the backup copy.
    +     * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +     * Required. The source backup to be copied.
    +     * The source backup needs to be in READY state for it to be copied.
    +     * Once CopyBackup is in progress, the source backup cannot be deleted or
    +     * cleaned up on expiration until CopyBackup is finished.
    +     * Values are of the form:
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBackup. + */ + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The source backup to be copied.
    +     * The source backup needs to be in READY state for it to be copied.
    +     * Once CopyBackup is in progress, the source backup cannot be deleted or
    +     * cleaned up on expiration until CopyBackup is finished.
    +     * Values are of the form:
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBackup. + */ + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The source backup to be copied.
    +     * The source backup needs to be in READY state for it to be copied.
    +     * Once CopyBackup is in progress, the source backup cannot be deleted or
    +     * cleaned up on expiration until CopyBackup is finished.
    +     * Values are of the form:
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackup(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceBackup_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The source backup to be copied.
    +     * The source backup needs to be in READY state for it to be copied.
    +     * Once CopyBackup is in progress, the source backup cannot be deleted or
    +     * cleaned up on expiration until CopyBackup is finished.
    +     * Values are of the form:
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSourceBackup() { + sourceBackup_ = getDefaultInstance().getSourceBackup(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The source backup to be copied.
    +     * The source backup needs to be in READY state for it to be copied.
    +     * Once CopyBackup is in progress, the source backup cannot be deleted or
    +     * cleaned up on expiration until CopyBackup is finished.
    +     * Values are of the form:
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackupBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceBackup_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000008); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Required. The expiration time of the backup in microsecond granularity.
    +     * The expiration time must be at least 6 hours and at most 366 days
    +     * from the `create_time` of the source backup. Once the `expire_time` has
    +     * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +     * to free the resources used by the backup.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig + .getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000010); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CopyBackupRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CopyBackupRequest) + private static final com.google.spanner.admin.database.v1.CopyBackupRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CopyBackupRequest(); + } + + public static com.google.spanner.admin.database.v1.CopyBackupRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CopyBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CopyBackupRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java new file mode 100644 index 000000000000..3eae68b598f1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java @@ -0,0 +1,238 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CopyBackupRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CopyBackupRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The id of the backup copy.
    +   * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * Required. The id of the backup copy.
    +   * The `backup_id` appended to `parent` forms the full backup_uri of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * Required. The source backup to be copied.
    +   * The source backup needs to be in READY state for it to be copied.
    +   * Once CopyBackup is in progress, the source backup cannot be deleted or
    +   * cleaned up on expiration until CopyBackup is finished.
    +   * Values are of the form:
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The sourceBackup. + */ + java.lang.String getSourceBackup(); + + /** + * + * + *
    +   * Required. The source backup to be copied.
    +   * The source backup needs to be in READY state for it to be copied.
    +   * Once CopyBackup is in progress, the source backup cannot be deleted or
    +   * cleaned up on expiration until CopyBackup is finished.
    +   * Values are of the form:
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string source_backup = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for sourceBackup. + */ + com.google.protobuf.ByteString getSourceBackupBytes(); + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Required. The expiration time of the backup in microsecond granularity.
    +   * The expiration time must be at least 6 hours and at most 366 days
    +   * from the `create_time` of the source backup. Once the `expire_time` has
    +   * passed, the backup is eligible to be automatically deleted by Cloud Spanner
    +   * to free the resources used by the backup.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CopyBackupEncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.CopyBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java new file mode 100644 index 000000000000..1540a3abc7c1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java @@ -0,0 +1,1539 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encryption configuration for the backup to create.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupEncryptionConfig} + */ +@com.google.protobuf.Generated +public final class CreateBackupEncryptionConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateBackupEncryptionConfig) + CreateBackupEncryptionConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateBackupEncryptionConfig"); + } + + // Use CreateBackupEncryptionConfig.newBuilder() to construct. + private CreateBackupEncryptionConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateBackupEncryptionConfig() { + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.class, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder.class); + } + + /** + * + * + *
    +   * Encryption types for the backup.
    +   * 
    + * + * Protobuf enum {@code + * google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType} + */ + public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + ENCRYPTION_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * Use the same encryption configuration as the database. This is the
    +     * default option when
    +     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
    +     * is empty. For example, if the database is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the database.
    +     * 
    + * + * USE_DATABASE_ENCRYPTION = 1; + */ + USE_DATABASE_ENCRYPTION(1), + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + GOOGLE_DEFAULT_ENCRYPTION(2), + /** + * + * + *
    +     * Use customer managed encryption. If specified, `kms_key_name`
    +     * must contain a valid Cloud KMS key.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + CUSTOMER_MANAGED_ENCRYPTION(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EncryptionType"); + } + + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + public static final int ENCRYPTION_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Use the same encryption configuration as the database. This is the
    +     * default option when
    +     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
    +     * is empty. For example, if the database is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the database.
    +     * 
    + * + * USE_DATABASE_ENCRYPTION = 1; + */ + public static final int USE_DATABASE_ENCRYPTION_VALUE = 1; + + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + public static final int GOOGLE_DEFAULT_ENCRYPTION_VALUE = 2; + + /** + * + * + *
    +     * Use customer managed encryption. If specified, `kms_key_name`
    +     * must contain a valid Cloud KMS key.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + public static final int CUSTOMER_MANAGED_ENCRYPTION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EncryptionType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static EncryptionType forNumber(int value) { + switch (value) { + case 0: + return ENCRYPTION_TYPE_UNSPECIFIED; + case 1: + return USE_DATABASE_ENCRYPTION; + case 2: + return GOOGLE_DEFAULT_ENCRYPTION; + case 3: + return CUSTOMER_MANAGED_ENCRYPTION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EncryptionType findValueByNumber(int number) { + return EncryptionType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final EncryptionType[] VALUES = values(); + + public static EncryptionType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private EncryptionType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType) + } + + public static final int ENCRYPTION_TYPE_FIELD_NUMBER = 1; + private int encryptionType_ = 0; + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType.forNumber( + encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + public static final int KMS_KEY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + @java.lang.Override + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (encryptionType_ + != com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, kmsKeyName_); + } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (encryptionType_ + != com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, kmsKeyName_); + } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig other = + (com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig) obj; + + if (encryptionType_ != other.encryptionType_) return false; + if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + encryptionType_; + hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encryption configuration for the backup to create.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupEncryptionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateBackupEncryptionConfig) + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.class, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig build() { + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig buildPartial() { + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig result = + new com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionType_ = encryptionType_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.kmsKeyName_ = kmsKeyName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig other) { + if (other + == com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance()) + return this; + if (other.encryptionType_ != 0) { + setEncryptionTypeValue(other.getEncryptionTypeValue()); + } + if (!other.getKmsKeyName().isEmpty()) { + kmsKeyName_ = other.kmsKeyName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + encryptionType_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + kmsKeyName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int encryptionType_ = 0; + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionTypeValue(int value) { + encryptionType_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + .forNumber(encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionType( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + encryptionType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearEncryptionType() { + bitField0_ = (bitField0_ & ~0x00000001); + encryptionType_ = 0; + onChanged(); + return this; + } + + private java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyName() { + kmsKeyName_ = getDefaultInstance().getKmsKeyName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to protect the backup.
    +     * This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateBackupEncryptionConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupEncryptionConfig) + private static final com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig(); + } + + public static com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBackupEncryptionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java new file mode 100644 index 000000000000..abd7c65b69b3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java @@ -0,0 +1,215 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateBackupEncryptionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateBackupEncryptionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + int getEncryptionTypeValue(); + + /** + * + * + *
    +   * Required. The encryption type of the backup.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType + getEncryptionType(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + java.lang.String getKmsKeyName(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to protect the backup.
    +   * This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java new file mode 100644 index 000000000000..1bab9d15b762 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java @@ -0,0 +1,1515 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupMetadata} + */ +@com.google.protobuf.Generated +public final class CreateBackupMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateBackupMetadata) + CreateBackupMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateBackupMetadata"); + } + + // Use CreateBackupMetadata.newBuilder() to construct. + private CreateBackupMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateBackupMetadata() { + name_ = ""; + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupMetadata.class, + com.google.spanner.admin.database.v1.CreateBackupMetadata.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * The name of the backup being created.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the backup being created.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * The name of the database the backup is created from.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the database the backup is created from.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 3; + private com.google.spanner.admin.database.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateBackupMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateBackupMetadata other = + (com.google.spanner.admin.database.v1.CreateBackupMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDatabase().equals(other.getDatabase())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateBackupMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateBackupMetadata) + com.google.spanner.admin.database.v1.CreateBackupMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupMetadata.class, + com.google.spanner.admin.database.v1.CreateBackupMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateBackupMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + database_ = ""; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateBackupMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupMetadata build() { + com.google.spanner.admin.database.v1.CreateBackupMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupMetadata buildPartial() { + com.google.spanner.admin.database.v1.CreateBackupMetadata result = + new com.google.spanner.admin.database.v1.CreateBackupMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CreateBackupMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.database_ = database_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateBackupMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateBackupMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CreateBackupMetadata other) { + if (other == com.google.spanner.admin.database.v1.CreateBackupMetadata.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * The name of the backup being created.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the backup being created.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the backup being created.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the backup being created.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the backup being created.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * The name of the database the backup is created from.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the database the backup is created from.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the database the backup is created from.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the database the backup is created from.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the database the backup is created from.
    +     * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder setProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000004); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000008); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +     * corresponding to `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateBackupMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupMetadata) + private static final com.google.spanner.admin.database.v1.CreateBackupMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateBackupMetadata(); + } + + public static com.google.spanner.admin.database.v1.CreateBackupMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBackupMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java new file mode 100644 index 000000000000..61ebc10cc352 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java @@ -0,0 +1,193 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateBackupMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateBackupMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The name of the backup being created.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * The name of the backup being created.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The name of the database the backup is created from.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * The name of the database the backup is created from.
    +   * 
    + * + * string database = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + * + * @return The progress. + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 3; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1,
    +   * corresponding to `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java new file mode 100644 index 000000000000..5b825bf82f76 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java @@ -0,0 +1,1528 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupRequest} + */ +@com.google.protobuf.Generated +public final class CreateBackupRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateBackupRequest) + CreateBackupRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateBackupRequest"); + } + + // Use CreateBackupRequest.newBuilder() to construct. + private CreateBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateBackupRequest() { + parent_ = ""; + backupId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupRequest.class, + com.google.spanner.admin.database.v1.CreateBackupRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance in which the backup will be
    +   * created. This must be the same instance that contains the database the
    +   * backup will be created from. The backup will be stored in the
    +   * location(s) specified in the instance configuration of this
    +   * instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance in which the backup will be
    +   * created. This must be the same instance that contains the database the
    +   * backup will be created from. The backup will be stored in the
    +   * location(s) specified in the instance configuration of this
    +   * instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * Required. The id of the backup to be created. The `backup_id` appended to
    +   * `parent` forms the full backup name of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The id of the backup to be created. The `backup_id` appended to
    +   * `parent` forms the full backup name of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_FIELD_NUMBER = 3; + private com.google.spanner.admin.database.v1.Backup backup_; + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + @java.lang.Override + public boolean hasBackup() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getBackup() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, backupId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getBackup()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getEncryptionConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, backupId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getBackup()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateBackupRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateBackupRequest other = + (com.google.spanner.admin.database.v1.CreateBackupRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (hasBackup() != other.hasBackup()) return false; + if (hasBackup()) { + if (!getBackup().equals(other.getBackup())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + if (hasBackup()) { + hash = (37 * hash) + BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getBackup().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateBackupRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateBackupRequest) + com.google.spanner.admin.database.v1.CreateBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupRequest.class, + com.google.spanner.admin.database.v1.CreateBackupRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateBackupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBackupFieldBuilder(); + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + backupId_ = ""; + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateBackupRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupRequest build() { + com.google.spanner.admin.database.v1.CreateBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupRequest buildPartial() { + com.google.spanner.admin.database.v1.CreateBackupRequest result = + new com.google.spanner.admin.database.v1.CreateBackupRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CreateBackupRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.backupId_ = backupId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backup_ = backupBuilder_ == null ? backup_ : backupBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateBackupRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateBackupRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CreateBackupRequest other) { + if (other == com.google.spanner.admin.database.v1.CreateBackupRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasBackup()) { + mergeBackup(other.getBackup()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetBackupFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance in which the backup will be
    +     * created. This must be the same instance that contains the database the
    +     * backup will be created from. The backup will be stored in the
    +     * location(s) specified in the instance configuration of this
    +     * instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which the backup will be
    +     * created. This must be the same instance that contains the database the
    +     * backup will be created from. The backup will be stored in the
    +     * location(s) specified in the instance configuration of this
    +     * instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which the backup will be
    +     * created. This must be the same instance that contains the database the
    +     * backup will be created from. The backup will be stored in the
    +     * location(s) specified in the instance configuration of this
    +     * instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which the backup will be
    +     * created. This must be the same instance that contains the database the
    +     * backup will be created from. The backup will be stored in the
    +     * location(s) specified in the instance configuration of this
    +     * instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which the backup will be
    +     * created. This must be the same instance that contains the database the
    +     * backup will be created from. The backup will be stored in the
    +     * location(s) specified in the instance configuration of this
    +     * instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * Required. The id of the backup to be created. The `backup_id` appended to
    +     * `parent` forms the full backup name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the backup to be created. The `backup_id` appended to
    +     * `parent` forms the full backup name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the backup to be created. The `backup_id` appended to
    +     * `parent` forms the full backup name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the backup to be created. The `backup_id` appended to
    +     * `parent` forms the full backup name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the backup to be created. The `backup_id` appended to
    +     * `parent` forms the full backup name of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +     * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.Backup backup_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + backupBuilder_; + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + public boolean hasBackup() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + public com.google.spanner.admin.database.v1.Backup getBackup() { + if (backupBuilder_ == null) { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } else { + return backupBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backup_ = value; + } else { + backupBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupBuilder_ == null) { + backup_ = builderForValue.build(); + } else { + backupBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && backup_ != null + && backup_ != com.google.spanner.admin.database.v1.Backup.getDefaultInstance()) { + getBackupBuilder().mergeFrom(value); + } else { + backup_ = value; + } + } else { + backupBuilder_.mergeFrom(value); + } + if (backup_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackup() { + bitField0_ = (bitField0_ & ~0x00000004); + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.Backup.Builder getBackupBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + if (backupBuilder_ != null) { + return backupBuilder_.getMessageOrBuilder(); + } else { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + } + + /** + * + * + *
    +     * Required. The backup to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + internalGetBackupFieldBuilder() { + if (backupBuilder_ == null) { + backupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder>( + getBackup(), getParentForChildren(), isClean()); + backup_ = null; + } + return backupBuilder_; + } + + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + .getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateBackupRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupRequest) + private static final com.google.spanner.admin.database.v1.CreateBackupRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateBackupRequest(); + } + + public static com.google.spanner.admin.database.v1.CreateBackupRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java new file mode 100644 index 000000000000..99882444f66d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java @@ -0,0 +1,197 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateBackupRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateBackupRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance in which the backup will be
    +   * created. This must be the same instance that contains the database the
    +   * backup will be created from. The backup will be stored in the
    +   * location(s) specified in the instance configuration of this
    +   * instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the instance in which the backup will be
    +   * created. This must be the same instance that contains the database the
    +   * backup will be created from. The backup will be stored in the
    +   * location(s) specified in the instance configuration of this
    +   * instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The id of the backup to be created. The `backup_id` appended to
    +   * `parent` forms the full backup name of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * Required. The id of the backup to be created. The `backup_id` appended to
    +   * `parent` forms the full backup name of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup_id>`.
    +   * 
    + * + * string backup_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + boolean hasBackup(); + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + com.google.spanner.admin.database.v1.Backup getBackup(); + + /** + * + * + *
    +   * Required. The backup to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java new file mode 100644 index 000000000000..6b0726c36e11 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java @@ -0,0 +1,1143 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupScheduleRequest} + */ +@com.google.protobuf.Generated +public final class CreateBackupScheduleRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + CreateBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateBackupScheduleRequest"); + } + + // Use CreateBackupScheduleRequest.newBuilder() to construct. + private CreateBackupScheduleRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateBackupScheduleRequest() { + parent_ = ""; + backupScheduleId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_SCHEDULE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupScheduleId_ = ""; + + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + @java.lang.Override + public java.lang.String getBackupScheduleId() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupScheduleId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupScheduleIdBytes() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupScheduleId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_SCHEDULE_FIELD_NUMBER = 3; + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + @java.lang.Override + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupScheduleId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, backupScheduleId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getBackupSchedule()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupScheduleId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, backupScheduleId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getBackupSchedule()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getBackupScheduleId().equals(other.getBackupScheduleId())) return false; + if (hasBackupSchedule() != other.hasBackupSchedule()) return false; + if (hasBackupSchedule()) { + if (!getBackupSchedule().equals(other.getBackupSchedule())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + BACKUP_SCHEDULE_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupScheduleId().hashCode(); + if (hasBackupSchedule()) { + hash = (37 * hash) + BACKUP_SCHEDULE_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedule().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + com.google.spanner.admin.database.v1.CreateBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBackupScheduleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + backupScheduleId_ = ""; + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.CreateBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.backupScheduleId_ = backupScheduleId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupSchedule_ = + backupScheduleBuilder_ == null ? backupSchedule_ : backupScheduleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBackupScheduleId().isEmpty()) { + backupScheduleId_ = other.backupScheduleId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasBackupSchedule()) { + mergeBackupSchedule(other.getBackupSchedule()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + backupScheduleId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetBackupScheduleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object backupScheduleId_ = ""; + + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + public java.lang.String getBackupScheduleId() { + java.lang.Object ref = backupScheduleId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupScheduleId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + public com.google.protobuf.ByteString getBackupScheduleIdBytes() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupScheduleId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The backupScheduleId to set. + * @return This builder for chaining. + */ + public Builder setBackupScheduleId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupScheduleId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearBackupScheduleId() { + backupScheduleId_ = getDefaultInstance().getBackupScheduleId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for backupScheduleId to set. + * @return This builder for chaining. + */ + public Builder setBackupScheduleIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupScheduleId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupScheduleBuilder_; + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + if (backupScheduleBuilder_ == null) { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } else { + return backupScheduleBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupSchedule_ = value; + } else { + backupScheduleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupScheduleBuilder_ == null) { + backupSchedule_ = builderForValue.build(); + } else { + backupScheduleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && backupSchedule_ != null + && backupSchedule_ + != com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) { + getBackupScheduleBuilder().mergeFrom(value); + } else { + backupSchedule_ = value; + } + } else { + backupScheduleBuilder_.mergeFrom(value); + } + if (backupSchedule_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackupSchedule() { + bitField0_ = (bitField0_ & ~0x00000004); + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupScheduleBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetBackupScheduleFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder + getBackupScheduleOrBuilder() { + if (backupScheduleBuilder_ != null) { + return backupScheduleBuilder_.getMessageOrBuilder(); + } else { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + } + + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + internalGetBackupScheduleFieldBuilder() { + if (backupScheduleBuilder_ == null) { + backupScheduleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + getBackupSchedule(), getParentForChildren(), isClean()); + backupSchedule_ = null; + } + return backupScheduleBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java new file mode 100644 index 000000000000..b72bf7410d30 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java @@ -0,0 +1,131 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + java.lang.String getBackupScheduleId(); + + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + com.google.protobuf.ByteString getBackupScheduleIdBytes(); + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + boolean hasBackupSchedule(); + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule(); + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java new file mode 100644 index 000000000000..106765ec363a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java @@ -0,0 +1,599 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseMetadata} + */ +@com.google.protobuf.Generated +public final class CreateDatabaseMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateDatabaseMetadata) + CreateDatabaseMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateDatabaseMetadata"); + } + + // Use CreateDatabaseMetadata.newBuilder() to construct. + private CreateDatabaseMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateDatabaseMetadata() { + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateDatabaseMetadata.class, + com.google.spanner.admin.database.v1.CreateDatabaseMetadata.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * The database being created.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * The database being created.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateDatabaseMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateDatabaseMetadata other = + (com.google.spanner.admin.database.v1.CreateDatabaseMetadata) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateDatabaseMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateDatabaseMetadata) + com.google.spanner.admin.database.v1.CreateDatabaseMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateDatabaseMetadata.class, + com.google.spanner.admin.database.v1.CreateDatabaseMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateDatabaseMetadata.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateDatabaseMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseMetadata build() { + com.google.spanner.admin.database.v1.CreateDatabaseMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseMetadata buildPartial() { + com.google.spanner.admin.database.v1.CreateDatabaseMetadata result = + new com.google.spanner.admin.database.v1.CreateDatabaseMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CreateDatabaseMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateDatabaseMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateDatabaseMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CreateDatabaseMetadata other) { + if (other == com.google.spanner.admin.database.v1.CreateDatabaseMetadata.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * The database being created.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The database being created.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The database being created.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database being created.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database being created.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateDatabaseMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseMetadata) + private static final com.google.spanner.admin.database.v1.CreateDatabaseMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateDatabaseMetadata(); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateDatabaseMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java new file mode 100644 index 000000000000..05e67b268581 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateDatabaseMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateDatabaseMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The database being created.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * The database being created.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java new file mode 100644 index 000000000000..37ff9fa76c1a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java @@ -0,0 +1,1847 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class CreateDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateDatabaseRequest) + CreateDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateDatabaseRequest"); + } + + // Use CreateDatabaseRequest.newBuilder() to construct. + private CreateDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateDatabaseRequest() { + parent_ = ""; + createStatement_ = ""; + extraStatements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + databaseDialect_ = 0; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateDatabaseRequest.class, + com.google.spanner.admin.database.v1.CreateDatabaseRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance that will serve the new database.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance that will serve the new database.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATE_STATEMENT_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object createStatement_ = ""; + + /** + * + * + *
    +   * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +   * new database.  The database ID must conform to the regular expression
    +   * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +   * If the database ID is a reserved word or if it contains a hyphen, the
    +   * database ID must be enclosed in backticks (`` ` ``).
    +   * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The createStatement. + */ + @java.lang.Override + public java.lang.String getCreateStatement() { + java.lang.Object ref = createStatement_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + createStatement_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +   * new database.  The database ID must conform to the regular expression
    +   * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +   * If the database ID is a reserved word or if it contains a hyphen, the
    +   * database ID must be enclosed in backticks (`` ` ``).
    +   * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for createStatement. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCreateStatementBytes() { + java.lang.Object ref = createStatement_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + createStatement_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXTRA_STATEMENTS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList extraStatements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the extraStatements. + */ + public com.google.protobuf.ProtocolStringList getExtraStatementsList() { + return extraStatements_; + } + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of extraStatements. + */ + public int getExtraStatementsCount() { + return extraStatements_.size(); + } + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The extraStatements at the given index. + */ + public java.lang.String getExtraStatements(int index) { + return extraStatements_.get(index); + } + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the extraStatements at the given index. + */ + public com.google.protobuf.ByteString getExtraStatementsBytes(int index) { + return extraStatements_.getByteString(index); + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + public static final int DATABASE_DIALECT_FIELD_NUMBER = 5; + private int databaseDialect_ = 0; + + /** + * + * + *
    +   * Optional. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +   * Optional. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + public static final int PROTO_DESCRIPTORS_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
    +   * 'extra_statements' above.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +   * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +   * to generate for moon/shot/app.proto, run
    +   * ```
    +   * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +   * --include_imports \
    +   * --descriptor_set_out=descriptors.data \
    +   * moon/shot/app.proto
    +   * ```
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(createStatement_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, createStatement_); + } + for (int i = 0; i < extraStatements_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, extraStatements_.getRaw(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getEncryptionConfig()); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + output.writeEnum(5, databaseDialect_); + } + if (!protoDescriptors_.isEmpty()) { + output.writeBytes(6, protoDescriptors_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(createStatement_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, createStatement_); + } + { + int dataSize = 0; + for (int i = 0; i < extraStatements_.size(); i++) { + dataSize += computeStringSizeNoTag(extraStatements_.getRaw(i)); + } + size += dataSize; + size += 1 * getExtraStatementsList().size(); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionConfig()); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(5, databaseDialect_); + } + if (!protoDescriptors_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(6, protoDescriptors_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateDatabaseRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateDatabaseRequest other = + (com.google.spanner.admin.database.v1.CreateDatabaseRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getCreateStatement().equals(other.getCreateStatement())) return false; + if (!getExtraStatementsList().equals(other.getExtraStatementsList())) return false; + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (databaseDialect_ != other.databaseDialect_) return false; + if (!getProtoDescriptors().equals(other.getProtoDescriptors())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + CREATE_STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + getCreateStatement().hashCode(); + if (getExtraStatementsCount() > 0) { + hash = (37 * hash) + EXTRA_STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + getExtraStatementsList().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + hash = (37 * hash) + DATABASE_DIALECT_FIELD_NUMBER; + hash = (53 * hash) + databaseDialect_; + hash = (37 * hash) + PROTO_DESCRIPTORS_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptors().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateDatabaseRequest) + com.google.spanner.admin.database.v1.CreateDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateDatabaseRequest.class, + com.google.spanner.admin.database.v1.CreateDatabaseRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateDatabaseRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + createStatement_ = ""; + extraStatements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + databaseDialect_ = 0; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseRequest build() { + com.google.spanner.admin.database.v1.CreateDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseRequest buildPartial() { + com.google.spanner.admin.database.v1.CreateDatabaseRequest result = + new com.google.spanner.admin.database.v1.CreateDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CreateDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.createStatement_ = createStatement_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + extraStatements_.makeImmutable(); + result.extraStatements_ = extraStatements_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.databaseDialect_ = databaseDialect_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.protoDescriptors_ = protoDescriptors_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateDatabaseRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CreateDatabaseRequest other) { + if (other == com.google.spanner.admin.database.v1.CreateDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getCreateStatement().isEmpty()) { + createStatement_ = other.createStatement_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.extraStatements_.isEmpty()) { + if (extraStatements_.isEmpty()) { + extraStatements_ = other.extraStatements_; + bitField0_ |= 0x00000004; + } else { + ensureExtraStatementsIsMutable(); + extraStatements_.addAll(other.extraStatements_); + } + onChanged(); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + if (other.databaseDialect_ != 0) { + setDatabaseDialectValue(other.getDatabaseDialectValue()); + } + if (!other.getProtoDescriptors().isEmpty()) { + setProtoDescriptors(other.getProtoDescriptors()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + createStatement_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureExtraStatementsIsMutable(); + extraStatements_.add(s); + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + databaseDialect_ = input.readEnum(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + protoDescriptors_ = input.readBytes(); + bitField0_ |= 0x00000020; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance that will serve the new database.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance that will serve the new database.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance that will serve the new database.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance that will serve the new database.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance that will serve the new database.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object createStatement_ = ""; + + /** + * + * + *
    +     * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +     * new database.  The database ID must conform to the regular expression
    +     * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +     * If the database ID is a reserved word or if it contains a hyphen, the
    +     * database ID must be enclosed in backticks (`` ` ``).
    +     * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The createStatement. + */ + public java.lang.String getCreateStatement() { + java.lang.Object ref = createStatement_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + createStatement_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +     * new database.  The database ID must conform to the regular expression
    +     * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +     * If the database ID is a reserved word or if it contains a hyphen, the
    +     * database ID must be enclosed in backticks (`` ` ``).
    +     * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for createStatement. + */ + public com.google.protobuf.ByteString getCreateStatementBytes() { + java.lang.Object ref = createStatement_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + createStatement_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +     * new database.  The database ID must conform to the regular expression
    +     * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +     * If the database ID is a reserved word or if it contains a hyphen, the
    +     * database ID must be enclosed in backticks (`` ` ``).
    +     * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The createStatement to set. + * @return This builder for chaining. + */ + public Builder setCreateStatement(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + createStatement_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +     * new database.  The database ID must conform to the regular expression
    +     * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +     * If the database ID is a reserved word or if it contains a hyphen, the
    +     * database ID must be enclosed in backticks (`` ` ``).
    +     * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearCreateStatement() { + createStatement_ = getDefaultInstance().getCreateStatement(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +     * new database.  The database ID must conform to the regular expression
    +     * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +     * If the database ID is a reserved word or if it contains a hyphen, the
    +     * database ID must be enclosed in backticks (`` ` ``).
    +     * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for createStatement to set. + * @return This builder for chaining. + */ + public Builder setCreateStatementBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + createStatement_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList extraStatements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureExtraStatementsIsMutable() { + if (!extraStatements_.isModifiable()) { + extraStatements_ = new com.google.protobuf.LazyStringArrayList(extraStatements_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the extraStatements. + */ + public com.google.protobuf.ProtocolStringList getExtraStatementsList() { + extraStatements_.makeImmutable(); + return extraStatements_; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of extraStatements. + */ + public int getExtraStatementsCount() { + return extraStatements_.size(); + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The extraStatements at the given index. + */ + public java.lang.String getExtraStatements(int index) { + return extraStatements_.get(index); + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the extraStatements at the given index. + */ + public com.google.protobuf.ByteString getExtraStatementsBytes(int index) { + return extraStatements_.getByteString(index); + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The extraStatements to set. + * @return This builder for chaining. + */ + public Builder setExtraStatements(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExtraStatementsIsMutable(); + extraStatements_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The extraStatements to add. + * @return This builder for chaining. + */ + public Builder addExtraStatements(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExtraStatementsIsMutable(); + extraStatements_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The extraStatements to add. + * @return This builder for chaining. + */ + public Builder addAllExtraStatements(java.lang.Iterable values) { + ensureExtraStatementsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, extraStatements_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearExtraStatements() { + extraStatements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A list of DDL statements to run inside the newly created
    +     * database. Statements can create tables, indexes, etc. These
    +     * statements execute atomically with the creation of the database:
    +     * if there is an error in any statement, the database is not created.
    +     * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the extraStatements to add. + * @return This builder for chaining. + */ + public Builder addExtraStatementsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureExtraStatementsIsMutable(); + extraStatements_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Google default encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + private int databaseDialect_ = 0; + + /** + * + * + *
    +     * Optional. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +     * Optional. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialectValue(int value) { + databaseDialect_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Optional. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseDialect value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + databaseDialect_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDatabaseDialect() { + bitField0_ = (bitField0_ & ~0x00000010); + databaseDialect_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
    +     * 'extra_statements' above.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
    +     * 'extra_statements' above.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The protoDescriptors to set. + * @return This builder for chaining. + */ + public Builder setProtoDescriptors(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptors_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
    +     * 'extra_statements' above.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearProtoDescriptors() { + bitField0_ = (bitField0_ & ~0x00000020); + protoDescriptors_ = getDefaultInstance().getProtoDescriptors(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseRequest) + private static final com.google.spanner.admin.database.v1.CreateDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateDatabaseRequest(); + } + + public static com.google.spanner.admin.database.v1.CreateDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..4e20351bf434 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CreateDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance that will serve the new database.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the instance that will serve the new database.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +   * new database.  The database ID must conform to the regular expression
    +   * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +   * If the database ID is a reserved word or if it contains a hyphen, the
    +   * database ID must be enclosed in backticks (`` ` ``).
    +   * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The createStatement. + */ + java.lang.String getCreateStatement(); + + /** + * + * + *
    +   * Required. A `CREATE DATABASE` statement, which specifies the ID of the
    +   * new database.  The database ID must conform to the regular expression
    +   * `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
    +   * If the database ID is a reserved word or if it contains a hyphen, the
    +   * database ID must be enclosed in backticks (`` ` ``).
    +   * 
    + * + * string create_statement = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for createStatement. + */ + com.google.protobuf.ByteString getCreateStatementBytes(); + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the extraStatements. + */ + java.util.List getExtraStatementsList(); + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of extraStatements. + */ + int getExtraStatementsCount(); + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The extraStatements at the given index. + */ + java.lang.String getExtraStatements(int index); + + /** + * + * + *
    +   * Optional. A list of DDL statements to run inside the newly created
    +   * database. Statements can create tables, indexes, etc. These
    +   * statements execute atomically with the creation of the database:
    +   * if there is an error in any statement, the database is not created.
    +   * 
    + * + * repeated string extra_statements = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the extraStatements at the given index. + */ + com.google.protobuf.ByteString getExtraStatementsBytes(int index); + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Google default encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder getEncryptionConfigOrBuilder(); + + /** + * + * + *
    +   * Optional. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + int getDatabaseDialectValue(); + + /** + * + * + *
    +   * Optional. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The databaseDialect. + */ + com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect(); + + /** + * + * + *
    +   * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
    +   * 'extra_statements' above.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +   * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +   * to generate for moon/shot/app.proto, run
    +   * ```
    +   * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +   * --include_imports \
    +   * --descriptor_set_out=descriptors.data \
    +   * moon/shot/app.proto
    +   * ```
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + com.google.protobuf.ByteString getProtoDescriptors(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java new file mode 100644 index 000000000000..fea9434cd1e2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java @@ -0,0 +1,1260 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * CrontabSpec can be used to specify the version time and frequency at
    + * which the backup should be created.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CrontabSpec} + */ +@com.google.protobuf.Generated +public final class CrontabSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CrontabSpec) + CrontabSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CrontabSpec"); + } + + // Use CrontabSpec.newBuilder() to construct. + private CrontabSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CrontabSpec() { + text_ = ""; + timeZone_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CrontabSpec.class, + com.google.spanner.admin.database.v1.CrontabSpec.Builder.class); + } + + private int bitField0_; + public static final int TEXT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object text_ = ""; + + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timezone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + @java.lang.Override + public java.lang.String getText() { + java.lang.Object ref = text_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + text_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timezone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTextBytes() { + java.lang.Object ref = text_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + text_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TIME_ZONE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object timeZone_ = ""; + + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + @java.lang.Override + public java.lang.String getTimeZone() { + java.lang.Object ref = timeZone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + timeZone_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTimeZoneBytes() { + java.lang.Object ref = timeZone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + timeZone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATION_WINDOW_FIELD_NUMBER = 3; + private com.google.protobuf.Duration creationWindow_; + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + @java.lang.Override + public boolean hasCreationWindow() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + @java.lang.Override + public com.google.protobuf.Duration getCreationWindow() { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder() { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(text_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, text_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(timeZone_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, timeZone_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreationWindow()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(text_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, text_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(timeZone_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, timeZone_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreationWindow()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CrontabSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CrontabSpec other = + (com.google.spanner.admin.database.v1.CrontabSpec) obj; + + if (!getText().equals(other.getText())) return false; + if (!getTimeZone().equals(other.getTimeZone())) return false; + if (hasCreationWindow() != other.hasCreationWindow()) return false; + if (hasCreationWindow()) { + if (!getCreationWindow().equals(other.getCreationWindow())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TEXT_FIELD_NUMBER; + hash = (53 * hash) + getText().hashCode(); + hash = (37 * hash) + TIME_ZONE_FIELD_NUMBER; + hash = (53 * hash) + getTimeZone().hashCode(); + if (hasCreationWindow()) { + hash = (37 * hash) + CREATION_WINDOW_FIELD_NUMBER; + hash = (53 * hash) + getCreationWindow().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.CrontabSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * CrontabSpec can be used to specify the version time and frequency at
    +   * which the backup should be created.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CrontabSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CrontabSpec) + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CrontabSpec.class, + com.google.spanner.admin.database.v1.CrontabSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CrontabSpec.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreationWindowFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + text_ = ""; + timeZone_ = ""; + creationWindow_ = null; + if (creationWindowBuilder_ != null) { + creationWindowBuilder_.dispose(); + creationWindowBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec build() { + com.google.spanner.admin.database.v1.CrontabSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec buildPartial() { + com.google.spanner.admin.database.v1.CrontabSpec result = + new com.google.spanner.admin.database.v1.CrontabSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CrontabSpec result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.text_ = text_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.timeZone_ = timeZone_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.creationWindow_ = + creationWindowBuilder_ == null ? creationWindow_ : creationWindowBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CrontabSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.CrontabSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CrontabSpec other) { + if (other == com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance()) + return this; + if (!other.getText().isEmpty()) { + text_ = other.text_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTimeZone().isEmpty()) { + timeZone_ = other.timeZone_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasCreationWindow()) { + mergeCreationWindow(other.getCreationWindow()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + text_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + timeZone_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCreationWindowFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object text_ = ""; + + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timezone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + public java.lang.String getText() { + java.lang.Object ref = text_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + text_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timezone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + public com.google.protobuf.ByteString getTextBytes() { + java.lang.Object ref = text_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + text_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timezone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The text to set. + * @return This builder for chaining. + */ + public Builder setText(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + text_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timezone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearText() { + text_ = getDefaultInstance().getText(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timezone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for text to set. + * @return This builder for chaining. + */ + public Builder setTextBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + text_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object timeZone_ = ""; + + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + public java.lang.String getTimeZone() { + java.lang.Object ref = timeZone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + timeZone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + public com.google.protobuf.ByteString getTimeZoneBytes() { + java.lang.Object ref = timeZone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + timeZone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The timeZone to set. + * @return This builder for chaining. + */ + public Builder setTimeZone(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + timeZone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearTimeZone() { + timeZone_ = getDefaultInstance().getTimeZone(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for timeZone to set. + * @return This builder for chaining. + */ + public Builder setTimeZoneBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + timeZone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.Duration creationWindow_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + creationWindowBuilder_; + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + public boolean hasCreationWindow() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + public com.google.protobuf.Duration getCreationWindow() { + if (creationWindowBuilder_ == null) { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } else { + return creationWindowBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreationWindow(com.google.protobuf.Duration value) { + if (creationWindowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + creationWindow_ = value; + } else { + creationWindowBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreationWindow(com.google.protobuf.Duration.Builder builderForValue) { + if (creationWindowBuilder_ == null) { + creationWindow_ = builderForValue.build(); + } else { + creationWindowBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreationWindow(com.google.protobuf.Duration value) { + if (creationWindowBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && creationWindow_ != null + && creationWindow_ != com.google.protobuf.Duration.getDefaultInstance()) { + getCreationWindowBuilder().mergeFrom(value); + } else { + creationWindow_ = value; + } + } else { + creationWindowBuilder_.mergeFrom(value); + } + if (creationWindow_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreationWindow() { + bitField0_ = (bitField0_ & ~0x00000004); + creationWindow_ = null; + if (creationWindowBuilder_ != null) { + creationWindowBuilder_.dispose(); + creationWindowBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Duration.Builder getCreationWindowBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreationWindowFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder() { + if (creationWindowBuilder_ != null) { + return creationWindowBuilder_.getMessageOrBuilder(); + } else { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + } + + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetCreationWindowFieldBuilder() { + if (creationWindowBuilder_ == null) { + creationWindowBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getCreationWindow(), getParentForChildren(), isClean()); + creationWindow_ = null; + } + return creationWindowBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CrontabSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CrontabSpec) + private static final com.google.spanner.admin.database.v1.CrontabSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CrontabSpec(); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CrontabSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java new file mode 100644 index 000000000000..03c7ce8580d6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface CrontabSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CrontabSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timezone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + java.lang.String getText(); + + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timezone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   * * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   * * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   * * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   * * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   * * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + com.google.protobuf.ByteString getTextBytes(); + + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + java.lang.String getTimeZone(); + + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + com.google.protobuf.ByteString getTimeZoneBytes(); + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + boolean hasCreationWindow(); + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + com.google.protobuf.Duration getCreationWindow(); + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyName.java new file mode 100644 index 000000000000..7bc4b6ba8ab0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyName.java @@ -0,0 +1,261 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class CryptoKeyName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_KEY_RING_CRYPTO_KEY = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String keyRing; + private final String cryptoKey; + + @Deprecated + protected CryptoKeyName() { + project = null; + location = null; + keyRing = null; + cryptoKey = null; + } + + private CryptoKeyName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + keyRing = Preconditions.checkNotNull(builder.getKeyRing()); + cryptoKey = Preconditions.checkNotNull(builder.getCryptoKey()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static CryptoKeyName of( + String project, String location, String keyRing, String cryptoKey) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .build(); + } + + public static String format(String project, String location, String keyRing, String cryptoKey) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .build() + .toString(); + } + + public static CryptoKeyName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.validatedMatch( + formattedString, "CryptoKeyName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("key_ring"), + matchMap.get("crypto_key")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (CryptoKeyName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (keyRing != null) { + fieldMapBuilder.put("key_ring", keyRing); + } + if (cryptoKey != null) { + fieldMapBuilder.put("crypto_key", cryptoKey); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY.instantiate( + "project", project, "location", location, "key_ring", keyRing, "crypto_key", cryptoKey); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + CryptoKeyName that = ((CryptoKeyName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.keyRing, that.keyRing) + && Objects.equals(this.cryptoKey, that.cryptoKey); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(keyRing); + h *= 1000003; + h ^= Objects.hashCode(cryptoKey); + return h; + } + + /** + * Builder for + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + */ + public static class Builder { + private String project; + private String location; + private String keyRing; + private String cryptoKey; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setKeyRing(String keyRing) { + this.keyRing = keyRing; + return this; + } + + public Builder setCryptoKey(String cryptoKey) { + this.cryptoKey = cryptoKey; + return this; + } + + private Builder(CryptoKeyName cryptoKeyName) { + this.project = cryptoKeyName.project; + this.location = cryptoKeyName.location; + this.keyRing = cryptoKeyName.keyRing; + this.cryptoKey = cryptoKeyName.cryptoKey; + } + + public CryptoKeyName build() { + return new CryptoKeyName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyVersionName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyVersionName.java new file mode 100644 index 000000000000..f1f20754f69f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CryptoKeyVersionName.java @@ -0,0 +1,298 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class CryptoKeyVersionName implements ResourceName { + private static final PathTemplate PROJECT_LOCATION_KEY_RING_CRYPTO_KEY_CRYPTO_KEY_VERSION = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}"); + private volatile Map fieldValuesMap; + private final String project; + private final String location; + private final String keyRing; + private final String cryptoKey; + private final String cryptoKeyVersion; + + @Deprecated + protected CryptoKeyVersionName() { + project = null; + location = null; + keyRing = null; + cryptoKey = null; + cryptoKeyVersion = null; + } + + private CryptoKeyVersionName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + location = Preconditions.checkNotNull(builder.getLocation()); + keyRing = Preconditions.checkNotNull(builder.getKeyRing()); + cryptoKey = Preconditions.checkNotNull(builder.getCryptoKey()); + cryptoKeyVersion = Preconditions.checkNotNull(builder.getCryptoKeyVersion()); + } + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public String getCryptoKeyVersion() { + return cryptoKeyVersion; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static CryptoKeyVersionName of( + String project, String location, String keyRing, String cryptoKey, String cryptoKeyVersion) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .setCryptoKeyVersion(cryptoKeyVersion) + .build(); + } + + public static String format( + String project, String location, String keyRing, String cryptoKey, String cryptoKeyVersion) { + return newBuilder() + .setProject(project) + .setLocation(location) + .setKeyRing(keyRing) + .setCryptoKey(cryptoKey) + .setCryptoKeyVersion(cryptoKeyVersion) + .build() + .toString(); + } + + public static CryptoKeyVersionName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_LOCATION_KEY_RING_CRYPTO_KEY_CRYPTO_KEY_VERSION.validatedMatch( + formattedString, "CryptoKeyVersionName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("location"), + matchMap.get("key_ring"), + matchMap.get("crypto_key"), + matchMap.get("crypto_key_version")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (CryptoKeyVersionName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY_CRYPTO_KEY_VERSION.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (location != null) { + fieldMapBuilder.put("location", location); + } + if (keyRing != null) { + fieldMapBuilder.put("key_ring", keyRing); + } + if (cryptoKey != null) { + fieldMapBuilder.put("crypto_key", cryptoKey); + } + if (cryptoKeyVersion != null) { + fieldMapBuilder.put("crypto_key_version", cryptoKeyVersion); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_LOCATION_KEY_RING_CRYPTO_KEY_CRYPTO_KEY_VERSION.instantiate( + "project", + project, + "location", + location, + "key_ring", + keyRing, + "crypto_key", + cryptoKey, + "crypto_key_version", + cryptoKeyVersion); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null || getClass() == o.getClass()) { + CryptoKeyVersionName that = ((CryptoKeyVersionName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.location, that.location) + && Objects.equals(this.keyRing, that.keyRing) + && Objects.equals(this.cryptoKey, that.cryptoKey) + && Objects.equals(this.cryptoKeyVersion, that.cryptoKeyVersion); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(location); + h *= 1000003; + h ^= Objects.hashCode(keyRing); + h *= 1000003; + h ^= Objects.hashCode(cryptoKey); + h *= 1000003; + h ^= Objects.hashCode(cryptoKeyVersion); + return h; + } + + /** + * Builder for + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}. + */ + public static class Builder { + private String project; + private String location; + private String keyRing; + private String cryptoKey; + private String cryptoKeyVersion; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getLocation() { + return location; + } + + public String getKeyRing() { + return keyRing; + } + + public String getCryptoKey() { + return cryptoKey; + } + + public String getCryptoKeyVersion() { + return cryptoKeyVersion; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setKeyRing(String keyRing) { + this.keyRing = keyRing; + return this; + } + + public Builder setCryptoKey(String cryptoKey) { + this.cryptoKey = cryptoKey; + return this; + } + + public Builder setCryptoKeyVersion(String cryptoKeyVersion) { + this.cryptoKeyVersion = cryptoKeyVersion; + return this; + } + + private Builder(CryptoKeyVersionName cryptoKeyVersionName) { + this.project = cryptoKeyVersionName.project; + this.location = cryptoKeyVersionName.location; + this.keyRing = cryptoKeyVersionName.keyRing; + this.cryptoKey = cryptoKeyVersionName.cryptoKey; + this.cryptoKeyVersion = cryptoKeyVersionName.cryptoKeyVersion; + } + + public CryptoKeyVersionName build() { + return new CryptoKeyVersionName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java new file mode 100644 index 000000000000..6afbcbbf51ea --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java @@ -0,0 +1,3957 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * A Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.Database} + */ +@com.google.protobuf.Generated +public final class Database extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.Database) + DatabaseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Database"); + } + + // Use Database.newBuilder() to construct. + private Database(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Database() { + name_ = ""; + state_ = 0; + encryptionInfo_ = java.util.Collections.emptyList(); + versionRetentionPeriod_ = ""; + defaultLeader_ = ""; + databaseDialect_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_Database_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.Database.class, + com.google.spanner.admin.database.v1.Database.Builder.class); + } + + /** + * + * + *
    +   * Indicates the current state of the database.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.database.v1.Database.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
    +     * The database is still being created. Operations on the database may fail
    +     * with `FAILED_PRECONDITION` in this state.
    +     * 
    + * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
    +     * The database is fully created and ready for use.
    +     * 
    + * + * READY = 2; + */ + READY(2), + /** + * + * + *
    +     * The database is fully created and ready for use, but is still
    +     * being optimized for performance and cannot handle full load.
    +     *
    +     * In this state, the database still references the backup
    +     * it was restore from, preventing the backup
    +     * from being deleted. When optimizations are complete, the full performance
    +     * of the database will be restored, and the database will transition to
    +     * `READY` state.
    +     * 
    + * + * READY_OPTIMIZING = 3; + */ + READY_OPTIMIZING(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The database is still being created. Operations on the database may fail
    +     * with `FAILED_PRECONDITION` in this state.
    +     * 
    + * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + + /** + * + * + *
    +     * The database is fully created and ready for use.
    +     * 
    + * + * READY = 2; + */ + public static final int READY_VALUE = 2; + + /** + * + * + *
    +     * The database is fully created and ready for use, but is still
    +     * being optimized for performance and cannot handle full load.
    +     *
    +     * In this state, the database still references the backup
    +     * it was restore from, preventing the backup
    +     * from being deleted. When optimizations are complete, the full performance
    +     * of the database will be restored, and the database will transition to
    +     * `READY` state.
    +     * 
    + * + * READY_OPTIMIZING = 3; + */ + public static final int READY_OPTIMIZING_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return READY; + case 3: + return READY_OPTIMIZING; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.Database.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.Database.State) + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`,
    +   * where `<database>` is as specified in the `CREATE DATABASE`
    +   * statement. This name can be passed to other API methods to
    +   * identify the database.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`,
    +   * where `<database>` is as specified in the `CREATE DATABASE`
    +   * statement. This name can be passed to other API methods to
    +   * identify the database.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATE_FIELD_NUMBER = 2; + private int state_ = 0; + + /** + * + * + *
    +   * Output only. The current database state.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +   * Output only. The current database state.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database.State getState() { + com.google.spanner.admin.database.v1.Database.State result = + com.google.spanner.admin.database.v1.Database.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.database.v1.Database.State.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int RESTORE_INFO_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.RestoreInfo restoreInfo_; + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the restoreInfo field is set. + */ + @java.lang.Override + public boolean hasRestoreInfo() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The restoreInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfo getRestoreInfo() { + return restoreInfo_ == null + ? com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance() + : restoreInfo_; + } + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfoOrBuilder getRestoreInfoOrBuilder() { + return restoreInfo_ == null + ? com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance() + : restoreInfo_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 5; + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + public static final int ENCRYPTION_INFO_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private java.util.List encryptionInfo_; + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInfoList() { + return encryptionInfo_; + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInfoOrBuilderList() { + return encryptionInfo_; + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getEncryptionInfoCount() { + return encryptionInfo_.size(); + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(int index) { + return encryptionInfo_.get(index); + } + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder( + int index) { + return encryptionInfo_.get(index); + } + + public static final int VERSION_RETENTION_PERIOD_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object versionRetentionPeriod_ = ""; + + /** + * + * + *
    +   * Output only. The period in which Cloud Spanner retains all versions of data
    +   * for the database. This is the same as the value of version_retention_period
    +   * database option set using
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
    +   * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The versionRetentionPeriod. + */ + @java.lang.Override + public java.lang.String getVersionRetentionPeriod() { + java.lang.Object ref = versionRetentionPeriod_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + versionRetentionPeriod_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. The period in which Cloud Spanner retains all versions of data
    +   * for the database. This is the same as the value of version_retention_period
    +   * database option set using
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
    +   * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for versionRetentionPeriod. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVersionRetentionPeriodBytes() { + java.lang.Object ref = versionRetentionPeriod_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + versionRetentionPeriod_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EARLIEST_VERSION_TIME_FIELD_NUMBER = 7; + private com.google.protobuf.Timestamp earliestVersionTime_; + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the earliestVersionTime field is set. + */ + @java.lang.Override + public boolean hasEarliestVersionTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The earliestVersionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEarliestVersionTime() { + return earliestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : earliestVersionTime_; + } + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEarliestVersionTimeOrBuilder() { + return earliestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : earliestVersionTime_; + } + + public static final int DEFAULT_LEADER_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object defaultLeader_ = ""; + + /** + * + * + *
    +   * Output only. The read-write region which contains the database's leader
    +   * replicas.
    +   *
    +   * This is the same as the value of default_leader
    +   * database option set using DatabaseAdmin.CreateDatabase or
    +   * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +   * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The defaultLeader. + */ + @java.lang.Override + public java.lang.String getDefaultLeader() { + java.lang.Object ref = defaultLeader_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultLeader_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. The read-write region which contains the database's leader
    +   * replicas.
    +   *
    +   * This is the same as the value of default_leader
    +   * database option set using DatabaseAdmin.CreateDatabase or
    +   * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +   * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for defaultLeader. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDefaultLeaderBytes() { + java.lang.Object ref = defaultLeader_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultLeader_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_DIALECT_FIELD_NUMBER = 10; + private int databaseDialect_ = 0; + + /** + * + * + *
    +   * Output only. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +   * Output only. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + public static final int ENABLE_DROP_PROTECTION_FIELD_NUMBER = 11; + private boolean enableDropProtection_ = false; + + /** + * + * + *
    +   * Whether drop protection is enabled for this database. Defaults to false,
    +   * if not set. For more details, please see how to [prevent accidental
    +   * database
    +   * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
    +   * 
    + * + * bool enable_drop_protection = 11; + * + * @return The enableDropProtection. + */ + @java.lang.Override + public boolean getEnableDropProtection() { + return enableDropProtection_; + } + + public static final int RECONCILING_FIELD_NUMBER = 12; + private boolean reconciling_ = false; + + /** + * + * + *
    +   * Output only. If true, the database is being updated. If false, there are no
    +   * ongoing update operations for the database.
    +   * 
    + * + * bool reconciling = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + @java.lang.Override + public boolean getReconciling() { + return reconciling_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (state_ + != com.google.spanner.admin.database.v1.Database.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(2, state_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getRestoreInfo()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getEncryptionConfig()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(versionRetentionPeriod_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, versionRetentionPeriod_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(7, getEarliestVersionTime()); + } + for (int i = 0; i < encryptionInfo_.size(); i++) { + output.writeMessage(8, encryptionInfo_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(defaultLeader_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, defaultLeader_); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + output.writeEnum(10, databaseDialect_); + } + if (enableDropProtection_ != false) { + output.writeBool(11, enableDropProtection_); + } + if (reconciling_ != false) { + output.writeBool(12, reconciling_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (state_ + != com.google.spanner.admin.database.v1.Database.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, state_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getRestoreInfo()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getEncryptionConfig()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(versionRetentionPeriod_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, versionRetentionPeriod_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getEarliestVersionTime()); + } + for (int i = 0; i < encryptionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, encryptionInfo_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(defaultLeader_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, defaultLeader_); + } + if (databaseDialect_ + != com.google.spanner.admin.database.v1.DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, databaseDialect_); + } + if (enableDropProtection_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableDropProtection_); + } + if (reconciling_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(12, reconciling_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.Database)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.Database other = + (com.google.spanner.admin.database.v1.Database) obj; + + if (!getName().equals(other.getName())) return false; + if (state_ != other.state_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasRestoreInfo() != other.hasRestoreInfo()) return false; + if (hasRestoreInfo()) { + if (!getRestoreInfo().equals(other.getRestoreInfo())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getEncryptionInfoList().equals(other.getEncryptionInfoList())) return false; + if (!getVersionRetentionPeriod().equals(other.getVersionRetentionPeriod())) return false; + if (hasEarliestVersionTime() != other.hasEarliestVersionTime()) return false; + if (hasEarliestVersionTime()) { + if (!getEarliestVersionTime().equals(other.getEarliestVersionTime())) return false; + } + if (!getDefaultLeader().equals(other.getDefaultLeader())) return false; + if (databaseDialect_ != other.databaseDialect_) return false; + if (getEnableDropProtection() != other.getEnableDropProtection()) return false; + if (getReconciling() != other.getReconciling()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasRestoreInfo()) { + hash = (37 * hash) + RESTORE_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRestoreInfo().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + if (getEncryptionInfoCount() > 0) { + hash = (37 * hash) + ENCRYPTION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionInfoList().hashCode(); + } + hash = (37 * hash) + VERSION_RETENTION_PERIOD_FIELD_NUMBER; + hash = (53 * hash) + getVersionRetentionPeriod().hashCode(); + if (hasEarliestVersionTime()) { + hash = (37 * hash) + EARLIEST_VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEarliestVersionTime().hashCode(); + } + hash = (37 * hash) + DEFAULT_LEADER_FIELD_NUMBER; + hash = (53 * hash) + getDefaultLeader().hashCode(); + hash = (37 * hash) + DATABASE_DIALECT_FIELD_NUMBER; + hash = (53 * hash) + databaseDialect_; + hash = (37 * hash) + ENABLE_DROP_PROTECTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableDropProtection()); + hash = (37 * hash) + RECONCILING_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReconciling()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.Database parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Database parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Database parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.Database parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.Database prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.Database} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.Database) + com.google.spanner.admin.database.v1.DatabaseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_Database_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.Database.class, + com.google.spanner.admin.database.v1.Database.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.Database.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetRestoreInfoFieldBuilder(); + internalGetEncryptionConfigFieldBuilder(); + internalGetEncryptionInfoFieldBuilder(); + internalGetEarliestVersionTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + state_ = 0; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + restoreInfo_ = null; + if (restoreInfoBuilder_ != null) { + restoreInfoBuilder_.dispose(); + restoreInfoBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + if (encryptionInfoBuilder_ == null) { + encryptionInfo_ = java.util.Collections.emptyList(); + } else { + encryptionInfo_ = null; + encryptionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + versionRetentionPeriod_ = ""; + earliestVersionTime_ = null; + if (earliestVersionTimeBuilder_ != null) { + earliestVersionTimeBuilder_.dispose(); + earliestVersionTimeBuilder_ = null; + } + defaultLeader_ = ""; + databaseDialect_ = 0; + enableDropProtection_ = false; + reconciling_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_Database_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.Database.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Database build() { + com.google.spanner.admin.database.v1.Database result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Database buildPartial() { + com.google.spanner.admin.database.v1.Database result = + new com.google.spanner.admin.database.v1.Database(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.admin.database.v1.Database result) { + if (encryptionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + encryptionInfo_ = java.util.Collections.unmodifiableList(encryptionInfo_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.encryptionInfo_ = encryptionInfo_; + } else { + result.encryptionInfo_ = encryptionInfoBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.Database result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.state_ = state_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.restoreInfo_ = + restoreInfoBuilder_ == null ? restoreInfo_ : restoreInfoBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.versionRetentionPeriod_ = versionRetentionPeriod_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.earliestVersionTime_ = + earliestVersionTimeBuilder_ == null + ? earliestVersionTime_ + : earliestVersionTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.defaultLeader_ = defaultLeader_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.databaseDialect_ = databaseDialect_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.enableDropProtection_ = enableDropProtection_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.reconciling_ = reconciling_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.Database) { + return mergeFrom((com.google.spanner.admin.database.v1.Database) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.Database other) { + if (other == com.google.spanner.admin.database.v1.Database.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasRestoreInfo()) { + mergeRestoreInfo(other.getRestoreInfo()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + if (encryptionInfoBuilder_ == null) { + if (!other.encryptionInfo_.isEmpty()) { + if (encryptionInfo_.isEmpty()) { + encryptionInfo_ = other.encryptionInfo_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.addAll(other.encryptionInfo_); + } + onChanged(); + } + } else { + if (!other.encryptionInfo_.isEmpty()) { + if (encryptionInfoBuilder_.isEmpty()) { + encryptionInfoBuilder_.dispose(); + encryptionInfoBuilder_ = null; + encryptionInfo_ = other.encryptionInfo_; + bitField0_ = (bitField0_ & ~0x00000020); + encryptionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetEncryptionInfoFieldBuilder() + : null; + } else { + encryptionInfoBuilder_.addAllMessages(other.encryptionInfo_); + } + } + } + if (!other.getVersionRetentionPeriod().isEmpty()) { + versionRetentionPeriod_ = other.versionRetentionPeriod_; + bitField0_ |= 0x00000040; + onChanged(); + } + if (other.hasEarliestVersionTime()) { + mergeEarliestVersionTime(other.getEarliestVersionTime()); + } + if (!other.getDefaultLeader().isEmpty()) { + defaultLeader_ = other.defaultLeader_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (other.databaseDialect_ != 0) { + setDatabaseDialectValue(other.getDatabaseDialectValue()); + } + if (other.getEnableDropProtection() != false) { + setEnableDropProtection(other.getEnableDropProtection()); + } + if (other.getReconciling() != false) { + setReconciling(other.getReconciling()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetRestoreInfoFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + versionRetentionPeriod_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000040; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetEarliestVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 58 + case 66: + { + com.google.spanner.admin.database.v1.EncryptionInfo m = + input.readMessage( + com.google.spanner.admin.database.v1.EncryptionInfo.parser(), + extensionRegistry); + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.add(m); + } else { + encryptionInfoBuilder_.addMessage(m); + } + break; + } // case 66 + case 74: + { + defaultLeader_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 74 + case 80: + { + databaseDialect_ = input.readEnum(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 88: + { + enableDropProtection_ = input.readBool(); + bitField0_ |= 0x00000400; + break; + } // case 88 + case 96: + { + reconciling_ = input.readBool(); + bitField0_ |= 0x00000800; + break; + } // case 96 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`,
    +     * where `<database>` is as specified in the `CREATE DATABASE`
    +     * statement. This name can be passed to other API methods to
    +     * identify the database.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`,
    +     * where `<database>` is as specified in the `CREATE DATABASE`
    +     * statement. This name can be passed to other API methods to
    +     * identify the database.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`,
    +     * where `<database>` is as specified in the `CREATE DATABASE`
    +     * statement. This name can be passed to other API methods to
    +     * identify the database.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`,
    +     * where `<database>` is as specified in the `CREATE DATABASE`
    +     * statement. This name can be passed to other API methods to
    +     * identify the database.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`,
    +     * where `<database>` is as specified in the `CREATE DATABASE`
    +     * statement. This name can be passed to other API methods to
    +     * identify the database.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int state_ = 0; + + /** + * + * + *
    +     * Output only. The current database state.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +     * Output only. The current database state.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current database state.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database.State getState() { + com.google.spanner.admin.database.v1.Database.State result = + com.google.spanner.admin.database.v1.Database.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.database.v1.Database.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The current database state.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.spanner.admin.database.v1.Database.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current database state.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000002); + state_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * Output only. If exists, the time at which the database creation started.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.spanner.admin.database.v1.RestoreInfo restoreInfo_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreInfo, + com.google.spanner.admin.database.v1.RestoreInfo.Builder, + com.google.spanner.admin.database.v1.RestoreInfoOrBuilder> + restoreInfoBuilder_; + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the restoreInfo field is set. + */ + public boolean hasRestoreInfo() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The restoreInfo. + */ + public com.google.spanner.admin.database.v1.RestoreInfo getRestoreInfo() { + if (restoreInfoBuilder_ == null) { + return restoreInfo_ == null + ? com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance() + : restoreInfo_; + } else { + return restoreInfoBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setRestoreInfo(com.google.spanner.admin.database.v1.RestoreInfo value) { + if (restoreInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + restoreInfo_ = value; + } else { + restoreInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setRestoreInfo( + com.google.spanner.admin.database.v1.RestoreInfo.Builder builderForValue) { + if (restoreInfoBuilder_ == null) { + restoreInfo_ = builderForValue.build(); + } else { + restoreInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeRestoreInfo(com.google.spanner.admin.database.v1.RestoreInfo value) { + if (restoreInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && restoreInfo_ != null + && restoreInfo_ + != com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance()) { + getRestoreInfoBuilder().mergeFrom(value); + } else { + restoreInfo_ = value; + } + } else { + restoreInfoBuilder_.mergeFrom(value); + } + if (restoreInfo_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearRestoreInfo() { + bitField0_ = (bitField0_ & ~0x00000008); + restoreInfo_ = null; + if (restoreInfoBuilder_ != null) { + restoreInfoBuilder_.dispose(); + restoreInfoBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.RestoreInfo.Builder getRestoreInfoBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetRestoreInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.RestoreInfoOrBuilder getRestoreInfoOrBuilder() { + if (restoreInfoBuilder_ != null) { + return restoreInfoBuilder_.getMessageOrBuilder(); + } else { + return restoreInfo_ == null + ? com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance() + : restoreInfo_; + } + } + + /** + * + * + *
    +     * Output only. Applicable only for restored databases. Contains information
    +     * about the restore source.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreInfo, + com.google.spanner.admin.database.v1.RestoreInfo.Builder, + com.google.spanner.admin.database.v1.RestoreInfoOrBuilder> + internalGetRestoreInfoFieldBuilder() { + if (restoreInfoBuilder_ == null) { + restoreInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreInfo, + com.google.spanner.admin.database.v1.RestoreInfo.Builder, + com.google.spanner.admin.database.v1.RestoreInfoOrBuilder>( + getRestoreInfo(), getParentForChildren(), isClean()); + restoreInfo_ = null; + } + return restoreInfoBuilder_; + } + + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000010); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption configuration for the database.
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + private java.util.List encryptionInfo_ = + java.util.Collections.emptyList(); + + private void ensureEncryptionInfoIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + encryptionInfo_ = + new java.util.ArrayList( + encryptionInfo_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + encryptionInfoBuilder_; + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInfoList() { + if (encryptionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(encryptionInfo_); + } else { + return encryptionInfoBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getEncryptionInfoCount() { + if (encryptionInfoBuilder_ == null) { + return encryptionInfo_.size(); + } else { + return encryptionInfoBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(int index) { + if (encryptionInfoBuilder_ == null) { + return encryptionInfo_.get(index); + } else { + return encryptionInfoBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInfo( + int index, com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInfoIsMutable(); + encryptionInfo_.set(index, value); + onChanged(); + } else { + encryptionInfoBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionInfo( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + encryptionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInfo(com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInfoIsMutable(); + encryptionInfo_.add(value); + onChanged(); + } else { + encryptionInfoBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInfo( + int index, com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInfoIsMutable(); + encryptionInfo_.add(index, value); + onChanged(); + } else { + encryptionInfoBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInfo( + com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.add(builderForValue.build()); + onChanged(); + } else { + encryptionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInfo( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + encryptionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllEncryptionInfo( + java.lang.Iterable values) { + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, encryptionInfo_); + onChanged(); + } else { + encryptionInfoBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionInfo() { + if (encryptionInfoBuilder_ == null) { + encryptionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + encryptionInfoBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeEncryptionInfo(int index) { + if (encryptionInfoBuilder_ == null) { + ensureEncryptionInfoIsMutable(); + encryptionInfo_.remove(index); + onChanged(); + } else { + encryptionInfoBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryptionInfoBuilder( + int index) { + return internalGetEncryptionInfoFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder( + int index) { + if (encryptionInfoBuilder_ == null) { + return encryptionInfo_.get(index); + } else { + return encryptionInfoBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInfoOrBuilderList() { + if (encryptionInfoBuilder_ != null) { + return encryptionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(encryptionInfo_); + } + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder addEncryptionInfoBuilder() { + return internalGetEncryptionInfoFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder addEncryptionInfoBuilder( + int index) { + return internalGetEncryptionInfoFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. For databases that are using customer managed encryption, this
    +     * field contains the encryption information for the database, such as
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
    +     *
    +     * For databases that are using Google default or other types of encryption,
    +     * this field is empty.
    +     *
    +     * This field is propagated lazily from the backend. There might be a delay
    +     * from when a key version is being used and when it appears in this field.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInfoBuilderList() { + return internalGetEncryptionInfoFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + internalGetEncryptionInfoFieldBuilder() { + if (encryptionInfoBuilder_ == null) { + encryptionInfoBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder>( + encryptionInfo_, + ((bitField0_ & 0x00000020) != 0), + getParentForChildren(), + isClean()); + encryptionInfo_ = null; + } + return encryptionInfoBuilder_; + } + + private java.lang.Object versionRetentionPeriod_ = ""; + + /** + * + * + *
    +     * Output only. The period in which Cloud Spanner retains all versions of data
    +     * for the database. This is the same as the value of version_retention_period
    +     * database option set using
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
    +     * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The versionRetentionPeriod. + */ + public java.lang.String getVersionRetentionPeriod() { + java.lang.Object ref = versionRetentionPeriod_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + versionRetentionPeriod_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. The period in which Cloud Spanner retains all versions of data
    +     * for the database. This is the same as the value of version_retention_period
    +     * database option set using
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
    +     * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for versionRetentionPeriod. + */ + public com.google.protobuf.ByteString getVersionRetentionPeriodBytes() { + java.lang.Object ref = versionRetentionPeriod_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + versionRetentionPeriod_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. The period in which Cloud Spanner retains all versions of data
    +     * for the database. This is the same as the value of version_retention_period
    +     * database option set using
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
    +     * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The versionRetentionPeriod to set. + * @return This builder for chaining. + */ + public Builder setVersionRetentionPeriod(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + versionRetentionPeriod_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The period in which Cloud Spanner retains all versions of data
    +     * for the database. This is the same as the value of version_retention_period
    +     * database option set using
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
    +     * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearVersionRetentionPeriod() { + versionRetentionPeriod_ = getDefaultInstance().getVersionRetentionPeriod(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The period in which Cloud Spanner retains all versions of data
    +     * for the database. This is the same as the value of version_retention_period
    +     * database option set using
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
    +     * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The bytes for versionRetentionPeriod to set. + * @return This builder for chaining. + */ + public Builder setVersionRetentionPeriodBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + versionRetentionPeriod_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp earliestVersionTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + earliestVersionTimeBuilder_; + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the earliestVersionTime field is set. + */ + public boolean hasEarliestVersionTime() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The earliestVersionTime. + */ + public com.google.protobuf.Timestamp getEarliestVersionTime() { + if (earliestVersionTimeBuilder_ == null) { + return earliestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : earliestVersionTime_; + } else { + return earliestVersionTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEarliestVersionTime(com.google.protobuf.Timestamp value) { + if (earliestVersionTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + earliestVersionTime_ = value; + } else { + earliestVersionTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEarliestVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (earliestVersionTimeBuilder_ == null) { + earliestVersionTime_ = builderForValue.build(); + } else { + earliestVersionTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEarliestVersionTime(com.google.protobuf.Timestamp value) { + if (earliestVersionTimeBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && earliestVersionTime_ != null + && earliestVersionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEarliestVersionTimeBuilder().mergeFrom(value); + } else { + earliestVersionTime_ = value; + } + } else { + earliestVersionTimeBuilder_.mergeFrom(value); + } + if (earliestVersionTime_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEarliestVersionTime() { + bitField0_ = (bitField0_ & ~0x00000080); + earliestVersionTime_ = null; + if (earliestVersionTimeBuilder_ != null) { + earliestVersionTimeBuilder_.dispose(); + earliestVersionTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getEarliestVersionTimeBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetEarliestVersionTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getEarliestVersionTimeOrBuilder() { + if (earliestVersionTimeBuilder_ != null) { + return earliestVersionTimeBuilder_.getMessageOrBuilder(); + } else { + return earliestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : earliestVersionTime_; + } + } + + /** + * + * + *
    +     * Output only. Earliest timestamp at which older versions of the data can be
    +     * read. This value is continuously updated by Cloud Spanner and becomes stale
    +     * the moment it is queried. If you are using this value to recover data, make
    +     * sure to account for the time from the moment when the value is queried to
    +     * the moment when you initiate the recovery.
    +     * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEarliestVersionTimeFieldBuilder() { + if (earliestVersionTimeBuilder_ == null) { + earliestVersionTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEarliestVersionTime(), getParentForChildren(), isClean()); + earliestVersionTime_ = null; + } + return earliestVersionTimeBuilder_; + } + + private java.lang.Object defaultLeader_ = ""; + + /** + * + * + *
    +     * Output only. The read-write region which contains the database's leader
    +     * replicas.
    +     *
    +     * This is the same as the value of default_leader
    +     * database option set using DatabaseAdmin.CreateDatabase or
    +     * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +     * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The defaultLeader. + */ + public java.lang.String getDefaultLeader() { + java.lang.Object ref = defaultLeader_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + defaultLeader_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. The read-write region which contains the database's leader
    +     * replicas.
    +     *
    +     * This is the same as the value of default_leader
    +     * database option set using DatabaseAdmin.CreateDatabase or
    +     * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +     * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for defaultLeader. + */ + public com.google.protobuf.ByteString getDefaultLeaderBytes() { + java.lang.Object ref = defaultLeader_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + defaultLeader_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. The read-write region which contains the database's leader
    +     * replicas.
    +     *
    +     * This is the same as the value of default_leader
    +     * database option set using DatabaseAdmin.CreateDatabase or
    +     * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +     * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The defaultLeader to set. + * @return This builder for chaining. + */ + public Builder setDefaultLeader(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + defaultLeader_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The read-write region which contains the database's leader
    +     * replicas.
    +     *
    +     * This is the same as the value of default_leader
    +     * database option set using DatabaseAdmin.CreateDatabase or
    +     * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +     * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearDefaultLeader() { + defaultLeader_ = getDefaultInstance().getDefaultLeader(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The read-write region which contains the database's leader
    +     * replicas.
    +     *
    +     * This is the same as the value of default_leader
    +     * database option set using DatabaseAdmin.CreateDatabase or
    +     * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +     * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for defaultLeader to set. + * @return This builder for chaining. + */ + public Builder setDefaultLeaderBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + defaultLeader_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private int databaseDialect_ = 0; + + /** + * + * + *
    +     * Output only. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + + /** + * + * + *
    +     * Output only. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialectValue(int value) { + databaseDialect_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseDialect value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + databaseDialect_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The dialect of the Cloud Spanner Database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearDatabaseDialect() { + bitField0_ = (bitField0_ & ~0x00000200); + databaseDialect_ = 0; + onChanged(); + return this; + } + + private boolean enableDropProtection_; + + /** + * + * + *
    +     * Whether drop protection is enabled for this database. Defaults to false,
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
    +     * 
    + * + * bool enable_drop_protection = 11; + * + * @return The enableDropProtection. + */ + @java.lang.Override + public boolean getEnableDropProtection() { + return enableDropProtection_; + } + + /** + * + * + *
    +     * Whether drop protection is enabled for this database. Defaults to false,
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
    +     * 
    + * + * bool enable_drop_protection = 11; + * + * @param value The enableDropProtection to set. + * @return This builder for chaining. + */ + public Builder setEnableDropProtection(boolean value) { + + enableDropProtection_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether drop protection is enabled for this database. Defaults to false,
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
    +     * 
    + * + * bool enable_drop_protection = 11; + * + * @return This builder for chaining. + */ + public Builder clearEnableDropProtection() { + bitField0_ = (bitField0_ & ~0x00000400); + enableDropProtection_ = false; + onChanged(); + return this; + } + + private boolean reconciling_; + + /** + * + * + *
    +     * Output only. If true, the database is being updated. If false, there are no
    +     * ongoing update operations for the database.
    +     * 
    + * + * bool reconciling = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + @java.lang.Override + public boolean getReconciling() { + return reconciling_; + } + + /** + * + * + *
    +     * Output only. If true, the database is being updated. If false, there are no
    +     * ongoing update operations for the database.
    +     * 
    + * + * bool reconciling = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The reconciling to set. + * @return This builder for chaining. + */ + public Builder setReconciling(boolean value) { + + reconciling_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If true, the database is being updated. If false, there are no
    +     * ongoing update operations for the database.
    +     * 
    + * + * bool reconciling = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearReconciling() { + bitField0_ = (bitField0_ & ~0x00000800); + reconciling_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.Database) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Database) + private static final com.google.spanner.admin.database.v1.Database DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.Database(); + } + + public static com.google.spanner.admin.database.v1.Database getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Database parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java new file mode 100644 index 000000000000..aa4a8200db74 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java @@ -0,0 +1,193 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Indicates the dialect type of a database.
    + * 
    + * + * Protobuf enum {@code google.spanner.admin.database.v1.DatabaseDialect} + */ +@com.google.protobuf.Generated +public enum DatabaseDialect implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +   * Default value. This value will create a database with the
    +   * GOOGLE_STANDARD_SQL dialect.
    +   * 
    + * + * DATABASE_DIALECT_UNSPECIFIED = 0; + */ + DATABASE_DIALECT_UNSPECIFIED(0), + /** + * + * + *
    +   * GoogleSQL supported SQL.
    +   * 
    + * + * GOOGLE_STANDARD_SQL = 1; + */ + GOOGLE_STANDARD_SQL(1), + /** + * + * + *
    +   * PostgreSQL supported SQL.
    +   * 
    + * + * POSTGRESQL = 2; + */ + POSTGRESQL(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DatabaseDialect"); + } + + /** + * + * + *
    +   * Default value. This value will create a database with the
    +   * GOOGLE_STANDARD_SQL dialect.
    +   * 
    + * + * DATABASE_DIALECT_UNSPECIFIED = 0; + */ + public static final int DATABASE_DIALECT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +   * GoogleSQL supported SQL.
    +   * 
    + * + * GOOGLE_STANDARD_SQL = 1; + */ + public static final int GOOGLE_STANDARD_SQL_VALUE = 1; + + /** + * + * + *
    +   * PostgreSQL supported SQL.
    +   * 
    + * + * POSTGRESQL = 2; + */ + public static final int POSTGRESQL_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DatabaseDialect valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DatabaseDialect forNumber(int value) { + switch (value) { + case 0: + return DATABASE_DIALECT_UNSPECIFIED; + case 1: + return GOOGLE_STANDARD_SQL; + case 2: + return POSTGRESQL; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DatabaseDialect findValueByNumber(int number) { + return DatabaseDialect.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto.getDescriptor().getEnumTypes().get(0); + } + + private static final DatabaseDialect[] VALUES = values(); + + public static DatabaseDialect valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DatabaseDialect(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.DatabaseDialect) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseName.java new file mode 100644 index 000000000000..55973a0c023b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class DatabaseName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_DATABASE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/databases/{database}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String database; + + @Deprecated + protected DatabaseName() { + project = null; + instance = null; + database = null; + } + + private DatabaseName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + database = Preconditions.checkNotNull(builder.getDatabase()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static DatabaseName of(String project, String instance, String database) { + return newBuilder().setProject(project).setInstance(instance).setDatabase(database).build(); + } + + public static String format(String project, String instance, String database) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .build() + .toString(); + } + + public static DatabaseName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_DATABASE.validatedMatch( + formattedString, "DatabaseName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance"), matchMap.get("database")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (DatabaseName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_DATABASE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_DATABASE.instantiate( + "project", project, "instance", instance, "database", database); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + DatabaseName that = ((DatabaseName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.database, that.database); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(database); + return h; + } + + /** Builder for projects/{project}/instances/{instance}/databases/{database}. */ + public static class Builder { + private String project; + private String instance; + private String database; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + private Builder(DatabaseName databaseName) { + this.project = databaseName.project; + this.instance = databaseName.instance; + this.database = databaseName.database; + } + + public DatabaseName build() { + return new DatabaseName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java new file mode 100644 index 000000000000..530c25d0ff56 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java @@ -0,0 +1,527 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DatabaseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.Database) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`,
    +   * where `<database>` is as specified in the `CREATE DATABASE`
    +   * statement. This name can be passed to other API methods to
    +   * identify the database.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`,
    +   * where `<database>` is as specified in the `CREATE DATABASE`
    +   * statement. This name can be passed to other API methods to
    +   * identify the database.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Output only. The current database state.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
    +   * Output only. The current database state.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database.State state = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.spanner.admin.database.v1.Database.State getState(); + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * Output only. If exists, the time at which the database creation started.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the restoreInfo field is set. + */ + boolean hasRestoreInfo(); + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The restoreInfo. + */ + com.google.spanner.admin.database.v1.RestoreInfo getRestoreInfo(); + + /** + * + * + *
    +   * Output only. Applicable only for restored databases. Contains information
    +   * about the restore source.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreInfo restore_info = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.RestoreInfoOrBuilder getRestoreInfoOrBuilder(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption configuration for the database.
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder getEncryptionConfigOrBuilder(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getEncryptionInfoList(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(int index); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getEncryptionInfoCount(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getEncryptionInfoOrBuilderList(); + + /** + * + * + *
    +   * Output only. For databases that are using customer managed encryption, this
    +   * field contains the encryption information for the database, such as
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
    +   *
    +   * For databases that are using Google default or other types of encryption,
    +   * this field is empty.
    +   *
    +   * This field is propagated lazily from the backend. There might be a delay
    +   * from when a key version is being used and when it appears in this field.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_info = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder( + int index); + + /** + * + * + *
    +   * Output only. The period in which Cloud Spanner retains all versions of data
    +   * for the database. This is the same as the value of version_retention_period
    +   * database option set using
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
    +   * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The versionRetentionPeriod. + */ + java.lang.String getVersionRetentionPeriod(); + + /** + * + * + *
    +   * Output only. The period in which Cloud Spanner retains all versions of data
    +   * for the database. This is the same as the value of version_retention_period
    +   * database option set using
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
    +   * 
    + * + * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for versionRetentionPeriod. + */ + com.google.protobuf.ByteString getVersionRetentionPeriodBytes(); + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the earliestVersionTime field is set. + */ + boolean hasEarliestVersionTime(); + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The earliestVersionTime. + */ + com.google.protobuf.Timestamp getEarliestVersionTime(); + + /** + * + * + *
    +   * Output only. Earliest timestamp at which older versions of the data can be
    +   * read. This value is continuously updated by Cloud Spanner and becomes stale
    +   * the moment it is queried. If you are using this value to recover data, make
    +   * sure to account for the time from the moment when the value is queried to
    +   * the moment when you initiate the recovery.
    +   * 
    + * + * + * .google.protobuf.Timestamp earliest_version_time = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getEarliestVersionTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The read-write region which contains the database's leader
    +   * replicas.
    +   *
    +   * This is the same as the value of default_leader
    +   * database option set using DatabaseAdmin.CreateDatabase or
    +   * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +   * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The defaultLeader. + */ + java.lang.String getDefaultLeader(); + + /** + * + * + *
    +   * Output only. The read-write region which contains the database's leader
    +   * replicas.
    +   *
    +   * This is the same as the value of default_leader
    +   * database option set using DatabaseAdmin.CreateDatabase or
    +   * DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
    +   * 
    + * + * string default_leader = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for defaultLeader. + */ + com.google.protobuf.ByteString getDefaultLeaderBytes(); + + /** + * + * + *
    +   * Output only. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + int getDatabaseDialectValue(); + + /** + * + * + *
    +   * Output only. The dialect of the Cloud Spanner Database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect(); + + /** + * + * + *
    +   * Whether drop protection is enabled for this database. Defaults to false,
    +   * if not set. For more details, please see how to [prevent accidental
    +   * database
    +   * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
    +   * 
    + * + * bool enable_drop_protection = 11; + * + * @return The enableDropProtection. + */ + boolean getEnableDropProtection(); + + /** + * + * + *
    +   * Output only. If true, the database is being updated. If false, there are no
    +   * ongoing update operations for the database.
    +   * 
    + * + * bool reconciling = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + boolean getReconciling(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java new file mode 100644 index 000000000000..b31034c9ab63 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java @@ -0,0 +1,610 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * A Cloud Spanner database role.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DatabaseRole} + */ +@com.google.protobuf.Generated +public final class DatabaseRole extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DatabaseRole) + DatabaseRoleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DatabaseRole"); + } + + // Use DatabaseRole.newBuilder() to construct. + private DatabaseRole(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DatabaseRole() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DatabaseRole_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DatabaseRole.class, + com.google.spanner.admin.database.v1.DatabaseRole.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the database role. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the database role. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DatabaseRole)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DatabaseRole other = + (com.google.spanner.admin.database.v1.DatabaseRole) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.DatabaseRole prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A Cloud Spanner database role.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DatabaseRole} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DatabaseRole) + com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DatabaseRole_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DatabaseRole.class, + com.google.spanner.admin.database.v1.DatabaseRole.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DatabaseRole.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRole getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DatabaseRole.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRole build() { + com.google.spanner.admin.database.v1.DatabaseRole result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRole buildPartial() { + com.google.spanner.admin.database.v1.DatabaseRole result = + new com.google.spanner.admin.database.v1.DatabaseRole(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.DatabaseRole result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DatabaseRole) { + return mergeFrom((com.google.spanner.admin.database.v1.DatabaseRole) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.DatabaseRole other) { + if (other == com.google.spanner.admin.database.v1.DatabaseRole.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the database role. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database role. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the database role. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database role. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the database role. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DatabaseRole) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DatabaseRole) + private static final com.google.spanner.admin.database.v1.DatabaseRole DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DatabaseRole(); + } + + public static com.google.spanner.admin.database.v1.DatabaseRole getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DatabaseRole parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRole getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java new file mode 100644 index 000000000000..d7a8f2303e1a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DatabaseRoleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DatabaseRole) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the database role. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the database role. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java new file mode 100644 index 000000000000..ab70fe88307e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java @@ -0,0 +1,1153 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Action information extracted from a DDL statement. This proto is used to
    + * display the brief info of the DDL statement for the operation
    + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DdlStatementActionInfo} + */ +@com.google.protobuf.Generated +public final class DdlStatementActionInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DdlStatementActionInfo) + DdlStatementActionInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DdlStatementActionInfo"); + } + + // Use DdlStatementActionInfo.newBuilder() to construct. + private DdlStatementActionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DdlStatementActionInfo() { + action_ = ""; + entityType_ = ""; + entityNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DdlStatementActionInfo.class, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder.class); + } + + public static final int ACTION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object action_ = ""; + + /** + * + * + *
    +   * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +   * This field is a non-empty string.
    +   * 
    + * + * string action = 1; + * + * @return The action. + */ + @java.lang.Override + public java.lang.String getAction() { + java.lang.Object ref = action_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + action_ = s; + return s; + } + } + + /** + * + * + *
    +   * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +   * This field is a non-empty string.
    +   * 
    + * + * string action = 1; + * + * @return The bytes for action. + */ + @java.lang.Override + public com.google.protobuf.ByteString getActionBytes() { + java.lang.Object ref = action_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + action_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object entityType_ = ""; + + /** + * + * + *
    +   * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +   * This field can be empty string for some DDL statement,
    +   * e.g. for statement "ANALYZE", `entity_type` = "".
    +   * 
    + * + * string entity_type = 2; + * + * @return The entityType. + */ + @java.lang.Override + public java.lang.String getEntityType() { + java.lang.Object ref = entityType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityType_ = s; + return s; + } + } + + /** + * + * + *
    +   * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +   * This field can be empty string for some DDL statement,
    +   * e.g. for statement "ANALYZE", `entity_type` = "".
    +   * 
    + * + * string entity_type = 2; + * + * @return The bytes for entityType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEntityTypeBytes() { + java.lang.Object ref = entityType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENTITY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList entityNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @return A list containing the entityNames. + */ + public com.google.protobuf.ProtocolStringList getEntityNamesList() { + return entityNames_; + } + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @return The count of entityNames. + */ + public int getEntityNamesCount() { + return entityNames_.size(); + } + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the element to return. + * @return The entityNames at the given index. + */ + public java.lang.String getEntityNames(int index) { + return entityNames_.get(index); + } + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the value to return. + * @return The bytes of the entityNames at the given index. + */ + public com.google.protobuf.ByteString getEntityNamesBytes(int index) { + return entityNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(action_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, action_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, entityType_); + } + for (int i = 0; i < entityNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, entityNames_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(action_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, action_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(entityType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, entityType_); + } + { + int dataSize = 0; + for (int i = 0; i < entityNames_.size(); i++) { + dataSize += computeStringSizeNoTag(entityNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getEntityNamesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DdlStatementActionInfo)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DdlStatementActionInfo other = + (com.google.spanner.admin.database.v1.DdlStatementActionInfo) obj; + + if (!getAction().equals(other.getAction())) return false; + if (!getEntityType().equals(other.getEntityType())) return false; + if (!getEntityNamesList().equals(other.getEntityNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ACTION_FIELD_NUMBER; + hash = (53 * hash) + getAction().hashCode(); + hash = (37 * hash) + ENTITY_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getEntityType().hashCode(); + if (getEntityNamesCount() > 0) { + hash = (37 * hash) + ENTITY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getEntityNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.DdlStatementActionInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action information extracted from a DDL statement. This proto is used to
    +   * display the brief info of the DDL statement for the operation
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DdlStatementActionInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DdlStatementActionInfo) + com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DdlStatementActionInfo.class, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DdlStatementActionInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + action_ = ""; + entityType_ = ""; + entityNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfo getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DdlStatementActionInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfo build() { + com.google.spanner.admin.database.v1.DdlStatementActionInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfo buildPartial() { + com.google.spanner.admin.database.v1.DdlStatementActionInfo result = + new com.google.spanner.admin.database.v1.DdlStatementActionInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.DdlStatementActionInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.action_ = action_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.entityType_ = entityType_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + entityNames_.makeImmutable(); + result.entityNames_ = entityNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DdlStatementActionInfo) { + return mergeFrom((com.google.spanner.admin.database.v1.DdlStatementActionInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.DdlStatementActionInfo other) { + if (other == com.google.spanner.admin.database.v1.DdlStatementActionInfo.getDefaultInstance()) + return this; + if (!other.getAction().isEmpty()) { + action_ = other.action_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getEntityType().isEmpty()) { + entityType_ = other.entityType_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.entityNames_.isEmpty()) { + if (entityNames_.isEmpty()) { + entityNames_ = other.entityNames_; + bitField0_ |= 0x00000004; + } else { + ensureEntityNamesIsMutable(); + entityNames_.addAll(other.entityNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + action_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + entityType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureEntityNamesIsMutable(); + entityNames_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object action_ = ""; + + /** + * + * + *
    +     * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +     * This field is a non-empty string.
    +     * 
    + * + * string action = 1; + * + * @return The action. + */ + public java.lang.String getAction() { + java.lang.Object ref = action_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + action_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +     * This field is a non-empty string.
    +     * 
    + * + * string action = 1; + * + * @return The bytes for action. + */ + public com.google.protobuf.ByteString getActionBytes() { + java.lang.Object ref = action_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + action_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +     * This field is a non-empty string.
    +     * 
    + * + * string action = 1; + * + * @param value The action to set. + * @return This builder for chaining. + */ + public Builder setAction(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +     * This field is a non-empty string.
    +     * 
    + * + * string action = 1; + * + * @return This builder for chaining. + */ + public Builder clearAction() { + action_ = getDefaultInstance().getAction(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +     * This field is a non-empty string.
    +     * 
    + * + * string action = 1; + * + * @param value The bytes for action to set. + * @return This builder for chaining. + */ + public Builder setActionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + action_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object entityType_ = ""; + + /** + * + * + *
    +     * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +     * This field can be empty string for some DDL statement,
    +     * e.g. for statement "ANALYZE", `entity_type` = "".
    +     * 
    + * + * string entity_type = 2; + * + * @return The entityType. + */ + public java.lang.String getEntityType() { + java.lang.Object ref = entityType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + entityType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +     * This field can be empty string for some DDL statement,
    +     * e.g. for statement "ANALYZE", `entity_type` = "".
    +     * 
    + * + * string entity_type = 2; + * + * @return The bytes for entityType. + */ + public com.google.protobuf.ByteString getEntityTypeBytes() { + java.lang.Object ref = entityType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + entityType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +     * This field can be empty string for some DDL statement,
    +     * e.g. for statement "ANALYZE", `entity_type` = "".
    +     * 
    + * + * string entity_type = 2; + * + * @param value The entityType to set. + * @return This builder for chaining. + */ + public Builder setEntityType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + entityType_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +     * This field can be empty string for some DDL statement,
    +     * e.g. for statement "ANALYZE", `entity_type` = "".
    +     * 
    + * + * string entity_type = 2; + * + * @return This builder for chaining. + */ + public Builder clearEntityType() { + entityType_ = getDefaultInstance().getEntityType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +     * This field can be empty string for some DDL statement,
    +     * e.g. for statement "ANALYZE", `entity_type` = "".
    +     * 
    + * + * string entity_type = 2; + * + * @param value The bytes for entityType to set. + * @return This builder for chaining. + */ + public Builder setEntityTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + entityType_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList entityNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureEntityNamesIsMutable() { + if (!entityNames_.isModifiable()) { + entityNames_ = new com.google.protobuf.LazyStringArrayList(entityNames_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @return A list containing the entityNames. + */ + public com.google.protobuf.ProtocolStringList getEntityNamesList() { + entityNames_.makeImmutable(); + return entityNames_; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @return The count of entityNames. + */ + public int getEntityNamesCount() { + return entityNames_.size(); + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the element to return. + * @return The entityNames at the given index. + */ + public java.lang.String getEntityNames(int index) { + return entityNames_.get(index); + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the value to return. + * @return The bytes of the entityNames at the given index. + */ + public com.google.protobuf.ByteString getEntityNamesBytes(int index) { + return entityNames_.getByteString(index); + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param index The index to set the value at. + * @param value The entityNames to set. + * @return This builder for chaining. + */ + public Builder setEntityNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntityNamesIsMutable(); + entityNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param value The entityNames to add. + * @return This builder for chaining. + */ + public Builder addEntityNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEntityNamesIsMutable(); + entityNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param values The entityNames to add. + * @return This builder for chaining. + */ + public Builder addAllEntityNames(java.lang.Iterable values) { + ensureEntityNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, entityNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @return This builder for chaining. + */ + public Builder clearEntityNames() { + entityNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The entity name(s) being operated on the DDL statement.
    +     * E.g.
    +     * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +     * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +     * 3. For statement "ANALYZE", `entity_names` = [].
    +     * 
    + * + * repeated string entity_names = 3; + * + * @param value The bytes of the entityNames to add. + * @return This builder for chaining. + */ + public Builder addEntityNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureEntityNamesIsMutable(); + entityNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DdlStatementActionInfo) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DdlStatementActionInfo) + private static final com.google.spanner.admin.database.v1.DdlStatementActionInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DdlStatementActionInfo(); + } + + public static com.google.spanner.admin.database.v1.DdlStatementActionInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DdlStatementActionInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java new file mode 100644 index 000000000000..b70586cbe471 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java @@ -0,0 +1,156 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DdlStatementActionInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DdlStatementActionInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +   * This field is a non-empty string.
    +   * 
    + * + * string action = 1; + * + * @return The action. + */ + java.lang.String getAction(); + + /** + * + * + *
    +   * The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
    +   * This field is a non-empty string.
    +   * 
    + * + * string action = 1; + * + * @return The bytes for action. + */ + com.google.protobuf.ByteString getActionBytes(); + + /** + * + * + *
    +   * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +   * This field can be empty string for some DDL statement,
    +   * e.g. for statement "ANALYZE", `entity_type` = "".
    +   * 
    + * + * string entity_type = 2; + * + * @return The entityType. + */ + java.lang.String getEntityType(); + + /** + * + * + *
    +   * The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
    +   * This field can be empty string for some DDL statement,
    +   * e.g. for statement "ANALYZE", `entity_type` = "".
    +   * 
    + * + * string entity_type = 2; + * + * @return The bytes for entityType. + */ + com.google.protobuf.ByteString getEntityTypeBytes(); + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @return A list containing the entityNames. + */ + java.util.List getEntityNamesList(); + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @return The count of entityNames. + */ + int getEntityNamesCount(); + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the element to return. + * @return The entityNames at the given index. + */ + java.lang.String getEntityNames(int index); + + /** + * + * + *
    +   * The entity name(s) being operated on the DDL statement.
    +   * E.g.
    +   * 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
    +   * 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
    +   * 3. For statement "ANALYZE", `entity_names` = [].
    +   * 
    + * + * repeated string entity_names = 3; + * + * @param index The index of the value to return. + * @return The bytes of the entityNames at the given index. + */ + com.google.protobuf.ByteString getEntityNamesBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java new file mode 100644 index 000000000000..0d51c9bcb18f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java @@ -0,0 +1,627 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupRequest} + */ +@com.google.protobuf.Generated +public final class DeleteBackupRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DeleteBackupRequest) + DeleteBackupRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteBackupRequest"); + } + + // Use DeleteBackupRequest.newBuilder() to construct. + private DeleteBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteBackupRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. Name of the backup to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. Name of the backup to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DeleteBackupRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DeleteBackupRequest other = + (com.google.spanner.admin.database.v1.DeleteBackupRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.DeleteBackupRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DeleteBackupRequest) + com.google.spanner.admin.database.v1.DeleteBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DeleteBackupRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DeleteBackupRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupRequest build() { + com.google.spanner.admin.database.v1.DeleteBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupRequest buildPartial() { + com.google.spanner.admin.database.v1.DeleteBackupRequest result = + new com.google.spanner.admin.database.v1.DeleteBackupRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.DeleteBackupRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DeleteBackupRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.DeleteBackupRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.DeleteBackupRequest other) { + if (other == com.google.spanner.admin.database.v1.DeleteBackupRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. Name of the backup to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the backup to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the backup to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the backup to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the backup to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DeleteBackupRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DeleteBackupRequest) + private static final com.google.spanner.admin.database.v1.DeleteBackupRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DeleteBackupRequest(); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java new file mode 100644 index 000000000000..07a259a9d34a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DeleteBackupRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DeleteBackupRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Name of the backup to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. Name of the backup to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java new file mode 100644 index 000000000000..41fec9e9f7d6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java @@ -0,0 +1,634 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupScheduleRequest} + */ +@com.google.protobuf.Generated +public final class DeleteBackupScheduleRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + DeleteBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteBackupScheduleRequest"); + } + + // Use DeleteBackupScheduleRequest.newBuilder() to construct. + private DeleteBackupScheduleRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteBackupScheduleRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java new file mode 100644 index 000000000000..277431696b73 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DeleteBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java new file mode 100644 index 000000000000..eee9562f4652 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java @@ -0,0 +1,613 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DropDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class DropDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DropDatabaseRequest) + DropDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DropDatabaseRequest"); + } + + // Use DropDatabaseRequest.newBuilder() to construct. + private DropDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DropDatabaseRequest() { + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DropDatabaseRequest.class, + com.google.spanner.admin.database.v1.DropDatabaseRequest.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database to be dropped.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database to be dropped.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DropDatabaseRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DropDatabaseRequest other = + (com.google.spanner.admin.database.v1.DropDatabaseRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.DropDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DropDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DropDatabaseRequest) + com.google.spanner.admin.database.v1.DropDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DropDatabaseRequest.class, + com.google.spanner.admin.database.v1.DropDatabaseRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DropDatabaseRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DropDatabaseRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DropDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DropDatabaseRequest build() { + com.google.spanner.admin.database.v1.DropDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DropDatabaseRequest buildPartial() { + com.google.spanner.admin.database.v1.DropDatabaseRequest result = + new com.google.spanner.admin.database.v1.DropDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.DropDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DropDatabaseRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.DropDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.DropDatabaseRequest other) { + if (other == com.google.spanner.admin.database.v1.DropDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database to be dropped.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database to be dropped.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database to be dropped.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to be dropped.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to be dropped.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DropDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DropDatabaseRequest) + private static final com.google.spanner.admin.database.v1.DropDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DropDatabaseRequest(); + } + + public static com.google.spanner.admin.database.v1.DropDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DropDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DropDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..ac33421ba89e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface DropDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DropDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database to be dropped.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database to be dropped.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java new file mode 100644 index 000000000000..67e2abb32d84 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java @@ -0,0 +1,1083 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encryption configuration for a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.EncryptionConfig} + */ +@com.google.protobuf.Generated +public final class EncryptionConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.EncryptionConfig) + EncryptionConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EncryptionConfig"); + } + + // Use EncryptionConfig.newBuilder() to construct. + private EncryptionConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private EncryptionConfig() { + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.EncryptionConfig.class, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder.class); + } + + public static final int KMS_KEY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +   * The Cloud KMS key to be used for encrypting and decrypting
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The kmsKeyName. + */ + @java.lang.Override + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } + } + + /** + * + * + *
    +   * The Cloud KMS key to be used for encrypting and decrypting
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for kmsKeyName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, kmsKeyName_); + } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, kmsKeyName_); + } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.EncryptionConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.EncryptionConfig other = + (com.google.spanner.admin.database.v1.EncryptionConfig) obj; + + if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.EncryptionConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encryption configuration for a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.EncryptionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.EncryptionConfig) + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.EncryptionConfig.class, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.EncryptionConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig build() { + com.google.spanner.admin.database.v1.EncryptionConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig buildPartial() { + com.google.spanner.admin.database.v1.EncryptionConfig result = + new com.google.spanner.admin.database.v1.EncryptionConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.EncryptionConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.kmsKeyName_ = kmsKeyName_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.EncryptionConfig) { + return mergeFrom((com.google.spanner.admin.database.v1.EncryptionConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.EncryptionConfig other) { + if (other == com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) + return this; + if (!other.getKmsKeyName().isEmpty()) { + kmsKeyName_ = other.kmsKeyName_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000002; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + kmsKeyName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +     * The Cloud KMS key to be used for encrypting and decrypting
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The kmsKeyName. + */ + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The Cloud KMS key to be used for encrypting and decrypting
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for kmsKeyName. + */ + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The Cloud KMS key to be used for encrypting and decrypting
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKeyName_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The Cloud KMS key to be used for encrypting and decrypting
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyName() { + kmsKeyName_ = getDefaultInstance().getKmsKeyName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The Cloud KMS key to be used for encrypting and decrypting
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKeyName_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.EncryptionConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.EncryptionConfig) + private static final com.google.spanner.admin.database.v1.EncryptionConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.EncryptionConfig(); + } + + public static com.google.spanner.admin.database.v1.EncryptionConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public EncryptionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java new file mode 100644 index 000000000000..7f197529ce78 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface EncryptionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.EncryptionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The Cloud KMS key to be used for encrypting and decrypting
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The kmsKeyName. + */ + java.lang.String getKmsKeyName(); + + /** + * + * + *
    +   * The Cloud KMS key to be used for encrypting and decrypting
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * string kms_key_name = 2 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for kmsKeyName. + */ + com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java new file mode 100644 index 000000000000..384fafb9cfdb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java @@ -0,0 +1,1297 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encryption information for a Cloud Spanner database or backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.EncryptionInfo} + */ +@com.google.protobuf.Generated +public final class EncryptionInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.EncryptionInfo) + EncryptionInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EncryptionInfo"); + } + + // Use EncryptionInfo.newBuilder() to construct. + private EncryptionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private EncryptionInfo() { + encryptionType_ = 0; + kmsKeyVersion_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.EncryptionInfo.class, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder.class); + } + + /** + * + * + *
    +   * Possible encryption types.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.database.v1.EncryptionInfo.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Encryption type was not specified, though data at rest remains encrypted.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * The data is encrypted at rest with a key that is
    +     * fully managed by Google. No key version or status will be populated.
    +     * This is the default state.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 1; + */ + GOOGLE_DEFAULT_ENCRYPTION(1), + /** + * + * + *
    +     * The data is encrypted at rest with a key that is
    +     * managed by the customer. The active version of the key. `kms_key_version`
    +     * will be populated, and `encryption_status` may be populated.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 2; + */ + CUSTOMER_MANAGED_ENCRYPTION(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Type"); + } + + /** + * + * + *
    +     * Encryption type was not specified, though data at rest remains encrypted.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The data is encrypted at rest with a key that is
    +     * fully managed by Google. No key version or status will be populated.
    +     * This is the default state.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 1; + */ + public static final int GOOGLE_DEFAULT_ENCRYPTION_VALUE = 1; + + /** + * + * + *
    +     * The data is encrypted at rest with a key that is
    +     * managed by the customer. The active version of the key. `kms_key_version`
    +     * will be populated, and `encryption_status` may be populated.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 2; + */ + public static final int CUSTOMER_MANAGED_ENCRYPTION_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return GOOGLE_DEFAULT_ENCRYPTION; + case 2: + return CUSTOMER_MANAGED_ENCRYPTION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.EncryptionInfo.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.EncryptionInfo.Type) + } + + private int bitField0_; + public static final int ENCRYPTION_TYPE_FIELD_NUMBER = 3; + private int encryptionType_ = 0; + + /** + * + * + *
    +   * Output only. The type of encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +   * Output only. The type of encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo.Type getEncryptionType() { + com.google.spanner.admin.database.v1.EncryptionInfo.Type result = + com.google.spanner.admin.database.v1.EncryptionInfo.Type.forNumber(encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.Type.UNRECOGNIZED + : result; + } + + public static final int ENCRYPTION_STATUS_FIELD_NUMBER = 4; + private com.google.rpc.Status encryptionStatus_; + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionStatus field is set. + */ + @java.lang.Override + public boolean hasEncryptionStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionStatus. + */ + @java.lang.Override + public com.google.rpc.Status getEncryptionStatus() { + return encryptionStatus_ == null + ? com.google.rpc.Status.getDefaultInstance() + : encryptionStatus_; + } + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder() { + return encryptionStatus_ == null + ? com.google.rpc.Status.getDefaultInstance() + : encryptionStatus_; + } + + public static final int KMS_KEY_VERSION_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKeyVersion_ = ""; + + /** + * + * + *
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
    +   * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyVersion. + */ + @java.lang.Override + public java.lang.String getKmsKeyVersion() { + java.lang.Object ref = kmsKeyVersion_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyVersion_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
    +   * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyVersion. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyVersionBytes() { + java.lang.Object ref = kmsKeyVersion_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyVersion_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, kmsKeyVersion_); + } + if (encryptionType_ + != com.google.spanner.admin.database.v1.EncryptionInfo.Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(3, encryptionType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getEncryptionStatus()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyVersion_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, kmsKeyVersion_); + } + if (encryptionType_ + != com.google.spanner.admin.database.v1.EncryptionInfo.Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, encryptionType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionStatus()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.EncryptionInfo)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.EncryptionInfo other = + (com.google.spanner.admin.database.v1.EncryptionInfo) obj; + + if (encryptionType_ != other.encryptionType_) return false; + if (hasEncryptionStatus() != other.hasEncryptionStatus()) return false; + if (hasEncryptionStatus()) { + if (!getEncryptionStatus().equals(other.getEncryptionStatus())) return false; + } + if (!getKmsKeyVersion().equals(other.getKmsKeyVersion())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + encryptionType_; + if (hasEncryptionStatus()) { + hash = (37 * hash) + ENCRYPTION_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionStatus().hashCode(); + } + hash = (37 * hash) + KMS_KEY_VERSION_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyVersion().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.EncryptionInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encryption information for a Cloud Spanner database or backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.EncryptionInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.EncryptionInfo) + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.EncryptionInfo.class, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.EncryptionInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEncryptionStatusFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionType_ = 0; + encryptionStatus_ = null; + if (encryptionStatusBuilder_ != null) { + encryptionStatusBuilder_.dispose(); + encryptionStatusBuilder_ = null; + } + kmsKeyVersion_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo build() { + com.google.spanner.admin.database.v1.EncryptionInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo buildPartial() { + com.google.spanner.admin.database.v1.EncryptionInfo result = + new com.google.spanner.admin.database.v1.EncryptionInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.EncryptionInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionType_ = encryptionType_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.encryptionStatus_ = + encryptionStatusBuilder_ == null ? encryptionStatus_ : encryptionStatusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.kmsKeyVersion_ = kmsKeyVersion_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.EncryptionInfo) { + return mergeFrom((com.google.spanner.admin.database.v1.EncryptionInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.EncryptionInfo other) { + if (other == com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()) + return this; + if (other.encryptionType_ != 0) { + setEncryptionTypeValue(other.getEncryptionTypeValue()); + } + if (other.hasEncryptionStatus()) { + mergeEncryptionStatus(other.getEncryptionStatus()); + } + if (!other.getKmsKeyVersion().isEmpty()) { + kmsKeyVersion_ = other.kmsKeyVersion_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + kmsKeyVersion_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 18 + case 24: + { + encryptionType_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 24 + case 34: + { + input.readMessage( + internalGetEncryptionStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int encryptionType_ = 0; + + /** + * + * + *
    +     * Output only. The type of encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +     * Output only. The type of encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionTypeValue(int value) { + encryptionType_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The type of encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo.Type getEncryptionType() { + com.google.spanner.admin.database.v1.EncryptionInfo.Type result = + com.google.spanner.admin.database.v1.EncryptionInfo.Type.forNumber(encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.EncryptionInfo.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The type of encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionType( + com.google.spanner.admin.database.v1.EncryptionInfo.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + encryptionType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The type of encryption.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearEncryptionType() { + bitField0_ = (bitField0_ & ~0x00000001); + encryptionType_ = 0; + onChanged(); + return this; + } + + private com.google.rpc.Status encryptionStatus_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + encryptionStatusBuilder_; + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionStatus field is set. + */ + public boolean hasEncryptionStatus() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionStatus. + */ + public com.google.rpc.Status getEncryptionStatus() { + if (encryptionStatusBuilder_ == null) { + return encryptionStatus_ == null + ? com.google.rpc.Status.getDefaultInstance() + : encryptionStatus_; + } else { + return encryptionStatusBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionStatus(com.google.rpc.Status value) { + if (encryptionStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionStatus_ = value; + } else { + encryptionStatusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setEncryptionStatus(com.google.rpc.Status.Builder builderForValue) { + if (encryptionStatusBuilder_ == null) { + encryptionStatus_ = builderForValue.build(); + } else { + encryptionStatusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeEncryptionStatus(com.google.rpc.Status value) { + if (encryptionStatusBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && encryptionStatus_ != null + && encryptionStatus_ != com.google.rpc.Status.getDefaultInstance()) { + getEncryptionStatusBuilder().mergeFrom(value); + } else { + encryptionStatus_ = value; + } + } else { + encryptionStatusBuilder_.mergeFrom(value); + } + if (encryptionStatus_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionStatus() { + bitField0_ = (bitField0_ & ~0x00000002); + encryptionStatus_ = null; + if (encryptionStatusBuilder_ != null) { + encryptionStatusBuilder_.dispose(); + encryptionStatusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.rpc.Status.Builder getEncryptionStatusBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetEncryptionStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder() { + if (encryptionStatusBuilder_ != null) { + return encryptionStatusBuilder_.getMessageOrBuilder(); + } else { + return encryptionStatus_ == null + ? com.google.rpc.Status.getDefaultInstance() + : encryptionStatus_; + } + } + + /** + * + * + *
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
    +     * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetEncryptionStatusFieldBuilder() { + if (encryptionStatusBuilder_ == null) { + encryptionStatusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>( + getEncryptionStatus(), getParentForChildren(), isClean()); + encryptionStatus_ = null; + } + return encryptionStatusBuilder_; + } + + private java.lang.Object kmsKeyVersion_ = ""; + + /** + * + * + *
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
    +     * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyVersion. + */ + public java.lang.String getKmsKeyVersion() { + java.lang.Object ref = kmsKeyVersion_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyVersion_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
    +     * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyVersion. + */ + public com.google.protobuf.ByteString getKmsKeyVersionBytes() { + java.lang.Object ref = kmsKeyVersion_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
    +     * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyVersion to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKeyVersion_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
    +     * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyVersion() { + kmsKeyVersion_ = getDefaultInstance().getKmsKeyVersion(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
    +     * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKeyVersion to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKeyVersion_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.EncryptionInfo) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.EncryptionInfo) + private static final com.google.spanner.admin.database.v1.EncryptionInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.EncryptionInfo(); + } + + public static com.google.spanner.admin.database.v1.EncryptionInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public EncryptionInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java new file mode 100644 index 000000000000..be33d9db1d8d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java @@ -0,0 +1,136 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface EncryptionInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.EncryptionInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Output only. The type of encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + int getEncryptionTypeValue(); + + /** + * + * + *
    +   * Output only. The type of encryption.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.EncryptionInfo.Type encryption_type = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionType. + */ + com.google.spanner.admin.database.v1.EncryptionInfo.Type getEncryptionType(); + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the encryptionStatus field is set. + */ + boolean hasEncryptionStatus(); + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The encryptionStatus. + */ + com.google.rpc.Status getEncryptionStatus(); + + /** + * + * + *
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
    +   * 
    + * + * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder(); + + /** + * + * + *
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
    +   * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyVersion. + */ + java.lang.String getKmsKeyVersion(); + + /** + * + * + *
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
    +   * 
    + * + * + * string kms_key_version = 2 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyVersion. + */ + com.google.protobuf.ByteString getKmsKeyVersionBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java new file mode 100644 index 000000000000..4846177aa30c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java @@ -0,0 +1,400 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The specification for full backups.
    + * A full backup stores the entire contents of the database at a given
    + * version time.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.FullBackupSpec} + */ +@com.google.protobuf.Generated +public final class FullBackupSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.FullBackupSpec) + FullBackupSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FullBackupSpec"); + } + + // Use FullBackupSpec.newBuilder() to construct. + private FullBackupSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private FullBackupSpec() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.FullBackupSpec.class, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.FullBackupSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.FullBackupSpec other = + (com.google.spanner.admin.database.v1.FullBackupSpec) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.FullBackupSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The specification for full backups.
    +   * A full backup stores the entire contents of the database at a given
    +   * version time.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.FullBackupSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.FullBackupSpec) + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.FullBackupSpec.class, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.FullBackupSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec build() { + com.google.spanner.admin.database.v1.FullBackupSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec buildPartial() { + com.google.spanner.admin.database.v1.FullBackupSpec result = + new com.google.spanner.admin.database.v1.FullBackupSpec(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.FullBackupSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.FullBackupSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.FullBackupSpec other) { + if (other == com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.FullBackupSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.FullBackupSpec) + private static final com.google.spanner.admin.database.v1.FullBackupSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.FullBackupSpec(); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FullBackupSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java new file mode 100644 index 000000000000..bdcd4d1c743c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface FullBackupSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.FullBackupSpec) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java new file mode 100644 index 000000000000..0542dcaa64e0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java @@ -0,0 +1,627 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupRequest} + */ +@com.google.protobuf.Generated +public final class GetBackupRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetBackupRequest) + GetBackupRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetBackupRequest"); + } + + // Use GetBackupRequest.newBuilder() to construct. + private GetBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetBackupRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_GetBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupRequest.class, + com.google.spanner.admin.database.v1.GetBackupRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. Name of the backup.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. Name of the backup.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetBackupRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetBackupRequest other = + (com.google.spanner.admin.database.v1.GetBackupRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetBackupRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetBackupRequest) + com.google.spanner.admin.database.v1.GetBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_GetBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupRequest.class, + com.google.spanner.admin.database.v1.GetBackupRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetBackupRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_GetBackupRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetBackupRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupRequest build() { + com.google.spanner.admin.database.v1.GetBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupRequest buildPartial() { + com.google.spanner.admin.database.v1.GetBackupRequest result = + new com.google.spanner.admin.database.v1.GetBackupRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.GetBackupRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetBackupRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.GetBackupRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetBackupRequest other) { + if (other == com.google.spanner.admin.database.v1.GetBackupRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. Name of the backup.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the backup.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the backup.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the backup.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the backup.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetBackupRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetBackupRequest) + private static final com.google.spanner.admin.database.v1.GetBackupRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetBackupRequest(); + } + + public static com.google.spanner.admin.database.v1.GetBackupRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java new file mode 100644 index 000000000000..0bc8d59dc0a2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface GetBackupRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetBackupRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Name of the backup.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. Name of the backup.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java new file mode 100644 index 000000000000..43130fde2cbb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java @@ -0,0 +1,631 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupScheduleRequest} + */ +@com.google.protobuf.Generated +public final class GetBackupScheduleRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetBackupScheduleRequest) + GetBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetBackupScheduleRequest"); + } + + // Use GetBackupScheduleRequest.newBuilder() to construct. + private GetBackupScheduleRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetBackupScheduleRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.GetBackupScheduleRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetBackupScheduleRequest) + com.google.spanner.admin.database.v1.GetBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetBackupScheduleRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.GetBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.GetBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.GetBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.GetBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java new file mode 100644 index 000000000000..1eaf5055ae40 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface GetBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java new file mode 100644 index 000000000000..a65174c9b146 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java @@ -0,0 +1,627 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlRequest} + */ +@com.google.protobuf.Generated +public final class GetDatabaseDdlRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetDatabaseDdlRequest) + GetDatabaseDdlRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetDatabaseDdlRequest"); + } + + // Use GetDatabaseDdlRequest.newBuilder() to construct. + private GetDatabaseDdlRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetDatabaseDdlRequest() { + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.class, + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database whose schema we wish to get.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database whose schema we wish to get.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetDatabaseDdlRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest other = + (com.google.spanner.admin.database.v1.GetDatabaseDdlRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetDatabaseDdlRequest) + com.google.spanner.admin.database.v1.GetDatabaseDdlRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.class, + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlRequest build() { + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlRequest buildPartial() { + com.google.spanner.admin.database.v1.GetDatabaseDdlRequest result = + new com.google.spanner.admin.database.v1.GetDatabaseDdlRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.GetDatabaseDdlRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetDatabaseDdlRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.GetDatabaseDdlRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetDatabaseDdlRequest other) { + if (other == com.google.spanner.admin.database.v1.GetDatabaseDdlRequest.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database whose schema we wish to get.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database whose schema we wish to get.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database whose schema we wish to get.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database whose schema we wish to get.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database whose schema we wish to get.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetDatabaseDdlRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlRequest) + private static final com.google.spanner.admin.database.v1.GetDatabaseDdlRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetDatabaseDdlRequest(); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetDatabaseDdlRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java new file mode 100644 index 000000000000..6056b044d99f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface GetDatabaseDdlRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetDatabaseDdlRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database whose schema we wish to get.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database whose schema we wish to get.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java new file mode 100644 index 000000000000..9215a815e3cf --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java @@ -0,0 +1,829 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlResponse} + */ +@com.google.protobuf.Generated +public final class GetDatabaseDdlResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetDatabaseDdlResponse) + GetDatabaseDdlResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetDatabaseDdlResponse"); + } + + // Use GetDatabaseDdlResponse.newBuilder() to construct. + private GetDatabaseDdlResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetDatabaseDdlResponse() { + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.class, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.Builder.class); + } + + public static final int STATEMENTS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + return statements_; + } + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + public static final int PROTO_DESCRIPTORS_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Proto descriptors stored in the database.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 2; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < statements_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, statements_.getRaw(i)); + } + if (!protoDescriptors_.isEmpty()) { + output.writeBytes(2, protoDescriptors_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < statements_.size(); i++) { + dataSize += computeStringSizeNoTag(statements_.getRaw(i)); + } + size += dataSize; + size += 1 * getStatementsList().size(); + } + if (!protoDescriptors_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, protoDescriptors_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetDatabaseDdlResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse other = + (com.google.spanner.admin.database.v1.GetDatabaseDdlResponse) obj; + + if (!getStatementsList().equals(other.getStatementsList())) return false; + if (!getProtoDescriptors().equals(other.getProtoDescriptors())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getStatementsCount() > 0) { + hash = (37 * hash) + STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + getStatementsList().hashCode(); + } + hash = (37 * hash) + PROTO_DESCRIPTORS_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptors().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetDatabaseDdlResponse) + com.google.spanner.admin.database.v1.GetDatabaseDdlResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.class, + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse build() { + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse buildPartial() { + com.google.spanner.admin.database.v1.GetDatabaseDdlResponse result = + new com.google.spanner.admin.database.v1.GetDatabaseDdlResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.GetDatabaseDdlResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + statements_.makeImmutable(); + result.statements_ = statements_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.protoDescriptors_ = protoDescriptors_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetDatabaseDdlResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.GetDatabaseDdlResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetDatabaseDdlResponse other) { + if (other == com.google.spanner.admin.database.v1.GetDatabaseDdlResponse.getDefaultInstance()) + return this; + if (!other.statements_.isEmpty()) { + if (statements_.isEmpty()) { + statements_ = other.statements_; + bitField0_ |= 0x00000001; + } else { + ensureStatementsIsMutable(); + statements_.addAll(other.statements_); + } + onChanged(); + } + if (!other.getProtoDescriptors().isEmpty()) { + setProtoDescriptors(other.getProtoDescriptors()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureStatementsIsMutable(); + statements_.add(s); + break; + } // case 10 + case 18: + { + protoDescriptors_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureStatementsIsMutable() { + if (!statements_.isModifiable()) { + statements_ = new com.google.protobuf.LazyStringArrayList(statements_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + statements_.makeImmutable(); + return statements_; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param index The index to set the value at. + * @param value The statements to set. + * @return This builder for chaining. + */ + public Builder setStatements(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param value The statements to add. + * @return This builder for chaining. + */ + public Builder addStatements(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param values The statements to add. + * @return This builder for chaining. + */ + public Builder addAllStatements(java.lang.Iterable values) { + ensureStatementsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, statements_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @return This builder for chaining. + */ + public Builder clearStatements() { + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of formatted DDL statements defining the schema of the database
    +     * specified in the request.
    +     * 
    + * + * repeated string statements = 1; + * + * @param value The bytes of the statements to add. + * @return This builder for chaining. + */ + public Builder addStatementsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Proto descriptors stored in the database.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 2; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + /** + * + * + *
    +     * Proto descriptors stored in the database.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 2; + * + * @param value The protoDescriptors to set. + * @return This builder for chaining. + */ + public Builder setProtoDescriptors(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptors_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Proto descriptors stored in the database.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 2; + * + * @return This builder for chaining. + */ + public Builder clearProtoDescriptors() { + bitField0_ = (bitField0_ & ~0x00000002); + protoDescriptors_ = getDefaultInstance().getProtoDescriptors(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetDatabaseDdlResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlResponse) + private static final com.google.spanner.admin.database.v1.GetDatabaseDdlResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetDatabaseDdlResponse(); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetDatabaseDdlResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseDdlResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java new file mode 100644 index 000000000000..8404df27654e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java @@ -0,0 +1,103 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface GetDatabaseDdlResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetDatabaseDdlResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @return A list containing the statements. + */ + java.util.List getStatementsList(); + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @return The count of statements. + */ + int getStatementsCount(); + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + java.lang.String getStatements(int index); + + /** + * + * + *
    +   * A list of formatted DDL statements defining the schema of the database
    +   * specified in the request.
    +   * 
    + * + * repeated string statements = 1; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + com.google.protobuf.ByteString getStatementsBytes(int index); + + /** + * + * + *
    +   * Proto descriptors stored in the database.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 2; + * + * @return The protoDescriptors. + */ + com.google.protobuf.ByteString getProtoDescriptors(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java new file mode 100644 index 000000000000..66ba99f8662d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java @@ -0,0 +1,620 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class GetDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetDatabaseRequest) + GetDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetDatabaseRequest"); + } + + // Use GetDatabaseRequest.newBuilder() to construct. + private GetDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetDatabaseRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseRequest.class, + com.google.spanner.admin.database.v1.GetDatabaseRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the requested database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the requested database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetDatabaseRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetDatabaseRequest other = + (com.google.spanner.admin.database.v1.GetDatabaseRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetDatabaseRequest) + com.google.spanner.admin.database.v1.GetDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetDatabaseRequest.class, + com.google.spanner.admin.database.v1.GetDatabaseRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetDatabaseRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseRequest build() { + com.google.spanner.admin.database.v1.GetDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseRequest buildPartial() { + com.google.spanner.admin.database.v1.GetDatabaseRequest result = + new com.google.spanner.admin.database.v1.GetDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.GetDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetDatabaseRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.GetDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetDatabaseRequest other) { + if (other == com.google.spanner.admin.database.v1.GetDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the requested database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested database. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseRequest) + private static final com.google.spanner.admin.database.v1.GetDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetDatabaseRequest(); + } + + public static com.google.spanner.admin.database.v1.GetDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..75afb8431e31 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface GetDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the requested database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the requested database. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java new file mode 100644 index 000000000000..159e18a5b719 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java @@ -0,0 +1,407 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The specification for incremental backup chains.
    + * An incremental backup stores the delta of changes between a previous
    + * backup and the database contents at a given version time. An
    + * incremental backup chain consists of a full backup and zero or more
    + * successive incremental backups. The first backup created for an
    + * incremental backup chain is always a full backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.IncrementalBackupSpec} + */ +@com.google.protobuf.Generated +public final class IncrementalBackupSpec extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.IncrementalBackupSpec) + IncrementalBackupSpecOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IncrementalBackupSpec"); + } + + // Use IncrementalBackupSpec.newBuilder() to construct. + private IncrementalBackupSpec(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IncrementalBackupSpec() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.class, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.IncrementalBackupSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.IncrementalBackupSpec other = + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.IncrementalBackupSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The specification for incremental backup chains.
    +   * An incremental backup stores the delta of changes between a previous
    +   * backup and the database contents at a given version time. An
    +   * incremental backup chain consists of a full backup and zero or more
    +   * successive incremental backups. The first backup created for an
    +   * incremental backup chain is always a full backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.IncrementalBackupSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.IncrementalBackupSpec) + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.class, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.IncrementalBackupSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec build() { + com.google.spanner.admin.database.v1.IncrementalBackupSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec buildPartial() { + com.google.spanner.admin.database.v1.IncrementalBackupSpec result = + new com.google.spanner.admin.database.v1.IncrementalBackupSpec(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.IncrementalBackupSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.IncrementalBackupSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.IncrementalBackupSpec other) { + if (other == com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.IncrementalBackupSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.IncrementalBackupSpec) + private static final com.google.spanner.admin.database.v1.IncrementalBackupSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.IncrementalBackupSpec(); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IncrementalBackupSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java new file mode 100644 index 000000000000..46ed7d327976 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface IncrementalBackupSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.IncrementalBackupSpec) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InstanceName.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InstanceName.java new file mode 100644 index 000000000000..0c1510da67d8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InstanceName.java @@ -0,0 +1,192 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class InstanceName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/instances/{instance}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + + @Deprecated + protected InstanceName() { + project = null; + instance = null; + } + + private InstanceName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static InstanceName of(String project, String instance) { + return newBuilder().setProject(project).setInstance(instance).build(); + } + + public static String format(String project, String instance) { + return newBuilder().setProject(project).setInstance(instance).build().toString(); + } + + public static InstanceName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE.validatedMatch( + formattedString, "InstanceName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (InstanceName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE.instantiate("project", project, "instance", instance); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + InstanceName that = ((InstanceName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + return h; + } + + /** Builder for projects/{project}/instances/{instance}. */ + public static class Builder { + private String project; + private String instance; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + private Builder(InstanceName instanceName) { + this.project = instanceName.project; + this.instance = instanceName.instance; + } + + public InstanceName build() { + return new InstanceName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java new file mode 100644 index 000000000000..bafd0df94a63 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java @@ -0,0 +1,1385 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Internal request proto, do not use directly.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest} + */ +@com.google.protobuf.Generated +public final class InternalUpdateGraphOperationRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + InternalUpdateGraphOperationRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InternalUpdateGraphOperationRequest"); + } + + // Use InternalUpdateGraphOperationRequest.newBuilder() to construct. + private InternalUpdateGraphOperationRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private InternalUpdateGraphOperationRequest() { + database_ = ""; + operationId_ = ""; + vmIdentityToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPERATION_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object operationId_ = ""; + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + @java.lang.Override + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VM_IDENTITY_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object vmIdentityToken_ = ""; + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + @java.lang.Override + public java.lang.String getVmIdentityToken() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + vmIdentityToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVmIdentityTokenBytes() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + vmIdentityToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 3; + private double progress_ = 0D; + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + @java.lang.Override + public double getProgress() { + return progress_; + } + + public static final int STATUS_FIELD_NUMBER = 6; + private com.google.rpc.Status status_; + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, operationId_); + } + if (java.lang.Double.doubleToRawLongBits(progress_) != 0) { + output.writeDouble(3, progress_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(vmIdentityToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, vmIdentityToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getStatus()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, operationId_); + } + if (java.lang.Double.doubleToRawLongBits(progress_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, progress_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(vmIdentityToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, vmIdentityToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getStatus()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest other = + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getOperationId().equals(other.getOperationId())) return false; + if (!getVmIdentityToken().equals(other.getVmIdentityToken())) return false; + if (java.lang.Double.doubleToLongBits(getProgress()) + != java.lang.Double.doubleToLongBits(other.getProgress())) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (37 * hash) + OPERATION_ID_FIELD_NUMBER; + hash = (53 * hash) + getOperationId().hashCode(); + hash = (37 * hash) + VM_IDENTITY_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getVmIdentityToken().hashCode(); + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getProgress())); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Internal request proto, do not use directly.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStatusFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + operationId_ = ""; + vmIdentityToken_ = ""; + progress_ = 0D; + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest build() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest buildPartial() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.operationId_ = operationId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.vmIdentityToken_ = vmIdentityToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.progress_ = progress_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) { + return mergeFrom( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest other) { + if (other + == com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + .getDefaultInstance()) return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getOperationId().isEmpty()) { + operationId_ = other.operationId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getVmIdentityToken().isEmpty()) { + vmIdentityToken_ = other.vmIdentityToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (java.lang.Double.doubleToRawLongBits(other.getProgress()) != 0) { + setProgress(other.getProgress()); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + operationId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 25: + { + progress_ = input.readDouble(); + bitField0_ |= 0x00000008; + break; + } // case 25 + case 42: + { + vmIdentityToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 42 + case 50: + { + input.readMessage(internalGetStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object operationId_ = ""; + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operationId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearOperationId() { + operationId_ = getDefaultInstance().getOperationId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operationId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object vmIdentityToken_ = ""; + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + public java.lang.String getVmIdentityToken() { + java.lang.Object ref = vmIdentityToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + vmIdentityToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + public com.google.protobuf.ByteString getVmIdentityTokenBytes() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + vmIdentityToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The vmIdentityToken to set. + * @return This builder for chaining. + */ + public Builder setVmIdentityToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + vmIdentityToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearVmIdentityToken() { + vmIdentityToken_ = getDefaultInstance().getVmIdentityToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for vmIdentityToken to set. + * @return This builder for chaining. + */ + public Builder setVmIdentityTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + vmIdentityToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private double progress_; + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + @java.lang.Override + public double getProgress() { + return progress_; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The progress to set. + * @return This builder for chaining. + */ + public Builder setProgress(double value) { + + progress_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000008); + progress_ = 0D; + onChanged(); + return this; + } + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000010); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
    +     * Internal field, do not use directly.
    +     * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + private static final com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest(); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InternalUpdateGraphOperationRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java new file mode 100644 index 000000000000..025fc44530e1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface InternalUpdateGraphOperationRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + java.lang.String getOperationId(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + com.google.protobuf.ByteString getOperationIdBytes(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + java.lang.String getVmIdentityToken(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + com.google.protobuf.ByteString getVmIdentityTokenBytes(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + double getProgress(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
    +   * Internal field, do not use directly.
    +   * 
    + * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java new file mode 100644 index 000000000000..e4c6f7589c16 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java @@ -0,0 +1,415 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Internal response proto, do not use directly.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse} + */ +@com.google.protobuf.Generated +public final class InternalUpdateGraphOperationResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + InternalUpdateGraphOperationResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InternalUpdateGraphOperationResponse"); + } + + // Use InternalUpdateGraphOperationResponse.newBuilder() to construct. + private InternalUpdateGraphOperationResponse( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private InternalUpdateGraphOperationResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.Builder + .class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse other = + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Internal response proto, do not use directly.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse build() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + buildPartial() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse result = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) { + return mergeFrom( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse other) { + if (other + == com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + .getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + private static final com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse(); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InternalUpdateGraphOperationResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java new file mode 100644 index 000000000000..17538af31a42 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface InternalUpdateGraphOperationResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java new file mode 100644 index 000000000000..2371ce6326fd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java @@ -0,0 +1,1617 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupOperationsRequest} + */ +@com.google.protobuf.Generated +public final class ListBackupOperationsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupOperationsRequest) + ListBackupOperationsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupOperationsRequest"); + } + + // Use ListBackupOperationsRequest.newBuilder() to construct. + private ListBackupOperationsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupOperationsRequest() { + parent_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest.class, + com.google.spanner.admin.database.v1.ListBackupOperationsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The instance of the backup operations. Values are of
    +   * the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance of the backup operations. Values are of
    +   * the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression that filters the list of returned backup operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `metadata.database:prod` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The source database name of backup contains the string "prod".
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.name:howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The backup name contains the string "howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +   * `(metadata.source_backup:test) AND` \
    +   * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +   * * The source backup name contains the string "test".
    +   * * The operation started before 2022-01-18T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.database:test_db)) OR` \
    +   * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +   * AND` \
    +   * `(metadata.source_backup:test_bkp)) AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata matches either of criteria:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * AND the source database name of the backup contains the string
    +   * "test_db"
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +   * AND the source backup name contains the string "test_bkp"
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression that filters the list of returned backup operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `metadata.database:prod` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The source database name of backup contains the string "prod".
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.name:howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The backup name contains the string "howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +   * `(metadata.source_backup:test) AND` \
    +   * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +   * * The source backup name contains the string "test".
    +   * * The operation started before 2022-01-18T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.database:test_db)) OR` \
    +   * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +   * AND` \
    +   * `(metadata.source_backup:test_bkp)) AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata matches either of criteria:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * AND the source database name of the backup contains the string
    +   * "test_db"
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +   * AND the source backup name contains the string "test_bkp"
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupOperationsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupOperationsRequest other = + (com.google.spanner.admin.database.v1.ListBackupOperationsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupOperationsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupOperationsRequest) + com.google.spanner.admin.database.v1.ListBackupOperationsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest.class, + com.google.spanner.admin.database.v1.ListBackupOperationsRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupOperationsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupOperationsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsRequest build() { + com.google.spanner.admin.database.v1.ListBackupOperationsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsRequest buildPartial() { + com.google.spanner.admin.database.v1.ListBackupOperationsRequest result = + new com.google.spanner.admin.database.v1.ListBackupOperationsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupOperationsRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupOperationsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupOperationsRequest other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupOperationsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The instance of the backup operations. Values are of
    +     * the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance of the backup operations. Values are of
    +     * the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance of the backup operations. Values are of
    +     * the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance of the backup operations. Values are of
    +     * the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance of the backup operations. Values are of
    +     * the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression that filters the list of returned backup operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `metadata.database:prod` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The source database name of backup contains the string "prod".
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.name:howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The backup name contains the string "howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +     * `(metadata.source_backup:test) AND` \
    +     * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * * The source backup name contains the string "test".
    +     * * The operation started before 2022-01-18T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.database:test_db)) OR` \
    +     * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +     * AND` \
    +     * `(metadata.source_backup:test_bkp)) AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata matches either of criteria:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * AND the source database name of the backup contains the string
    +     * "test_db"
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +     * AND the source backup name contains the string "test_bkp"
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned backup operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `metadata.database:prod` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The source database name of backup contains the string "prod".
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.name:howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The backup name contains the string "howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +     * `(metadata.source_backup:test) AND` \
    +     * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * * The source backup name contains the string "test".
    +     * * The operation started before 2022-01-18T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.database:test_db)) OR` \
    +     * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +     * AND` \
    +     * `(metadata.source_backup:test_bkp)) AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata matches either of criteria:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * AND the source database name of the backup contains the string
    +     * "test_db"
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +     * AND the source backup name contains the string "test_bkp"
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned backup operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `metadata.database:prod` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The source database name of backup contains the string "prod".
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.name:howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The backup name contains the string "howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +     * `(metadata.source_backup:test) AND` \
    +     * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * * The source backup name contains the string "test".
    +     * * The operation started before 2022-01-18T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.database:test_db)) OR` \
    +     * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +     * AND` \
    +     * `(metadata.source_backup:test_bkp)) AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata matches either of criteria:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * AND the source database name of the backup contains the string
    +     * "test_db"
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +     * AND the source backup name contains the string "test_bkp"
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned backup operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `metadata.database:prod` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The source database name of backup contains the string "prod".
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.name:howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The backup name contains the string "howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +     * `(metadata.source_backup:test) AND` \
    +     * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * * The source backup name contains the string "test".
    +     * * The operation started before 2022-01-18T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.database:test_db)) OR` \
    +     * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +     * AND` \
    +     * `(metadata.source_backup:test_bkp)) AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata matches either of criteria:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * AND the source database name of the backup contains the string
    +     * "test_db"
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +     * AND the source backup name contains the string "test_bkp"
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned backup operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `metadata.database:prod` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The source database name of backup contains the string "prod".
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.name:howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +     * * The backup name contains the string "howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +     * `(metadata.source_backup:test) AND` \
    +     * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +     * * The source backup name contains the string "test".
    +     * * The operation started before 2022-01-18T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +     * `(metadata.database:test_db)) OR` \
    +     * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +     * AND` \
    +     * `(metadata.source_backup:test_bkp)) AND` \
    +     * `(error:*)` - Returns operations where:
    +     * * The operation's metadata matches either of criteria:
    +     * * The operation's metadata type is
    +     * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +     * AND the source database name of the backup contains the string
    +     * "test_db"
    +     * * The operation's metadata type is
    +     * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +     * AND the source backup name contains the string "test_bkp"
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupOperationsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsRequest) + private static final com.google.spanner.admin.database.v1.ListBackupOperationsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupOperationsRequest(); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupOperationsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java new file mode 100644 index 000000000000..8d952b07ec45 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java @@ -0,0 +1,272 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupOperationsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupOperationsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance of the backup operations. Values are of
    +   * the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The instance of the backup operations. Values are of
    +   * the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * An expression that filters the list of returned backup operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `metadata.database:prod` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The source database name of backup contains the string "prod".
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.name:howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The backup name contains the string "howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +   * `(metadata.source_backup:test) AND` \
    +   * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +   * * The source backup name contains the string "test".
    +   * * The operation started before 2022-01-18T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.database:test_db)) OR` \
    +   * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +   * AND` \
    +   * `(metadata.source_backup:test_bkp)) AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata matches either of criteria:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * AND the source database name of the backup contains the string
    +   * "test_db"
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +   * AND the source backup name contains the string "test_bkp"
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression that filters the list of returned backup operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `metadata.database:prod` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The source database name of backup contains the string "prod".
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.name:howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
    +   * * The backup name contains the string "howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
    +   * `(metadata.source_backup:test) AND` \
    +   * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
    +   * * The source backup name contains the string "test".
    +   * * The operation started before 2022-01-18T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
    +   * `(metadata.database:test_db)) OR` \
    +   * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
    +   * AND` \
    +   * `(metadata.source_backup:test_bkp)) AND` \
    +   * `(error:*)` - Returns operations where:
    +   * * The operation's metadata matches either of criteria:
    +   * * The operation's metadata type is
    +   * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
    +   * AND the source database name of the backup contains the string
    +   * "test_db"
    +   * * The operation's metadata type is
    +   * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
    +   * AND the source backup name contains the string "test_bkp"
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java new file mode 100644 index 000000000000..78cb6b163474 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java @@ -0,0 +1,1319 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupOperationsResponse} + */ +@com.google.protobuf.Generated +public final class ListBackupOperationsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupOperationsResponse) + ListBackupOperationsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupOperationsResponse"); + } + + // Use ListBackupOperationsResponse.newBuilder() to construct. + private ListBackupOperationsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupOperationsResponse() { + operations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse.class, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse.Builder.class); + } + + public static final int OPERATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List operations_; + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List getOperationsList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List + getOperationsOrBuilderList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public int getOperationsCount() { + return operations_.size(); + } + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.Operation getOperations(int index) { + return operations_.get(index); + } + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + return operations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < operations_.size(); i++) { + output.writeMessage(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < operations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupOperationsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupOperationsResponse other = + (com.google.spanner.admin.database.v1.ListBackupOperationsResponse) obj; + + if (!getOperationsList().equals(other.getOperationsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOperationsCount() > 0) { + hash = (37 * hash) + OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupOperationsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupOperationsResponse) + com.google.spanner.admin.database.v1.ListBackupOperationsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse.class, + com.google.spanner.admin.database.v1.ListBackupOperationsResponse.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.ListBackupOperationsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + } else { + operations_ = null; + operationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupOperationsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupOperationsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse build() { + com.google.spanner.admin.database.v1.ListBackupOperationsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse buildPartial() { + com.google.spanner.admin.database.v1.ListBackupOperationsResponse result = + new com.google.spanner.admin.database.v1.ListBackupOperationsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse result) { + if (operationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + operations_ = java.util.Collections.unmodifiableList(operations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.operations_ = operations_; + } else { + result.operations_ = operationsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupOperationsResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupOperationsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupOperationsResponse other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupOperationsResponse.getDefaultInstance()) + return this; + if (operationsBuilder_ == null) { + if (!other.operations_.isEmpty()) { + if (operations_.isEmpty()) { + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOperationsIsMutable(); + operations_.addAll(other.operations_); + } + onChanged(); + } + } else { + if (!other.operations_.isEmpty()) { + if (operationsBuilder_.isEmpty()) { + operationsBuilder_.dispose(); + operationsBuilder_ = null; + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + operationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOperationsFieldBuilder() + : null; + } else { + operationsBuilder_.addAllMessages(other.operations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(m); + } else { + operationsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List operations_ = + java.util.Collections.emptyList(); + + private void ensureOperationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + operations_ = new java.util.ArrayList(operations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + operationsBuilder_; + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsList() { + if (operationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(operations_); + } else { + return operationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public int getOperationsCount() { + if (operationsBuilder_ == null) { + return operations_.size(); + } else { + return operationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation getOperations(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.set(index, value); + onChanged(); + } else { + operationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.set(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(value); + onChanged(); + } else { + operationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(index, value); + onChanged(); + } else { + operationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addAllOperations( + java.lang.Iterable values) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, operations_); + onChanged(); + } else { + operationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder clearOperations() { + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + operationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder removeOperations(int index) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.remove(index); + onChanged(); + } else { + operationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder getOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List + getOperationsOrBuilderList() { + if (operationsBuilder_ != null) { + return operationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(operations_); + } + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder() { + return internalGetOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching backup [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the backup's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata. Operations returned
    +     * include those that are pending or have completed/failed/canceled within the
    +     * last 7 days. Operations returned are ordered by
    +     * `operation.metadata.value.progress.start_time` in descending order starting
    +     * from the most recently started operation.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsBuilderList() { + return internalGetOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetOperationsFieldBuilder() { + if (operationsBuilder_ == null) { + operationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + operations_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + operations_ = null; + } + return operationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupOperationsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsResponse) + private static final com.google.spanner.admin.database.v1.ListBackupOperationsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupOperationsResponse(); + } + + public static com.google.spanner.admin.database.v1.ListBackupOperationsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupOperationsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupOperationsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java new file mode 100644 index 000000000000..6bc4c85b6f84 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java @@ -0,0 +1,153 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupOperationsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupOperationsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsList(); + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.Operation getOperations(int index); + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + int getOperationsCount(); + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsOrBuilderList(); + + /** + * + * + *
    +   * The list of matching backup [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the backup's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata. Operations returned
    +   * include those that are pending or have completed/failed/canceled within the
    +   * last 7 days. Operations returned are ordered by
    +   * `operation.metadata.value.progress.start_time` in descending order starting
    +   * from the most recently started operation.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java new file mode 100644 index 000000000000..a142c9ba9254 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java @@ -0,0 +1,952 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesRequest} + */ +@com.google.protobuf.Generated +public final class ListBackupSchedulesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + ListBackupSchedulesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupSchedulesRequest"); + } + + // Use ListBackupSchedulesRequest.newBuilder() to construct. + private ListBackupSchedulesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupSchedulesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Optional. Number of backup schedules to be returned in the response. If 0
    +   * or less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest other = + (com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + com.google.spanner.admin.database.v1.ListBackupSchedulesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest build() { + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest buildPartial() { + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result = + new com.google.spanner.admin.database.v1.ListBackupSchedulesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + private static final com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupSchedulesRequest(); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupSchedulesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java new file mode 100644 index 000000000000..87cd5b69bce6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupSchedulesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Optional. Number of backup schedules to be returned in the response. If 0
    +   * or less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java new file mode 100644 index 000000000000..8f7675d63b6b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java @@ -0,0 +1,1151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesResponse} + */ +@com.google.protobuf.Generated +public final class ListBackupSchedulesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + ListBackupSchedulesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupSchedulesResponse"); + } + + // Use ListBackupSchedulesResponse.newBuilder() to construct. + private ListBackupSchedulesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupSchedulesResponse() { + backupSchedules_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.Builder.class); + } + + public static final int BACKUP_SCHEDULES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List backupSchedules_; + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public java.util.List + getBackupSchedulesList() { + return backupSchedules_; + } + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public java.util.List + getBackupSchedulesOrBuilderList() { + return backupSchedules_; + } + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index) { + return backupSchedules_.get(index); + } + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index) { + return backupSchedules_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < backupSchedules_.size(); i++) { + output.writeMessage(1, backupSchedules_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < backupSchedules_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, backupSchedules_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse other = + (com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) obj; + + if (!getBackupSchedulesList().equals(other.getBackupSchedulesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBackupSchedulesCount() > 0) { + hash = (37 * hash) + BACKUP_SCHEDULES_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedulesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + com.google.spanner.admin.database.v1.ListBackupSchedulesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (backupSchedulesBuilder_ == null) { + backupSchedules_ = java.util.Collections.emptyList(); + } else { + backupSchedules_ = null; + backupSchedulesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse build() { + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse buildPartial() { + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result = + new com.google.spanner.admin.database.v1.ListBackupSchedulesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result) { + if (backupSchedulesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + backupSchedules_ = java.util.Collections.unmodifiableList(backupSchedules_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.backupSchedules_ = backupSchedules_; + } else { + result.backupSchedules_ = backupSchedulesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.getDefaultInstance()) + return this; + if (backupSchedulesBuilder_ == null) { + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedules_.isEmpty()) { + backupSchedules_ = other.backupSchedules_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBackupSchedulesIsMutable(); + backupSchedules_.addAll(other.backupSchedules_); + } + onChanged(); + } + } else { + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedulesBuilder_.isEmpty()) { + backupSchedulesBuilder_.dispose(); + backupSchedulesBuilder_ = null; + backupSchedules_ = other.backupSchedules_; + bitField0_ = (bitField0_ & ~0x00000001); + backupSchedulesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetBackupSchedulesFieldBuilder() + : null; + } else { + backupSchedulesBuilder_.addAllMessages(other.backupSchedules_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.BackupSchedule m = + input.readMessage( + com.google.spanner.admin.database.v1.BackupSchedule.parser(), + extensionRegistry); + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(m); + } else { + backupSchedulesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List backupSchedules_ = + java.util.Collections.emptyList(); + + private void ensureBackupSchedulesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + backupSchedules_ = + new java.util.ArrayList( + backupSchedules_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupSchedulesBuilder_; + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesList() { + if (backupSchedulesBuilder_ == null) { + return java.util.Collections.unmodifiableList(backupSchedules_); + } else { + return backupSchedulesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public int getBackupSchedulesCount() { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.size(); + } else { + return backupSchedulesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index) { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.get(index); + } else { + return backupSchedulesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder setBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, value); + onChanged(); + } else { + backupSchedulesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder setBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(index, value); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(index, builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addAllBackupSchedules( + java.lang.Iterable values) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backupSchedules_); + onChanged(); + } else { + backupSchedulesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder clearBackupSchedules() { + if (backupSchedulesBuilder_ == null) { + backupSchedules_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + backupSchedulesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder removeBackupSchedules(int index) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.remove(index); + onChanged(); + } else { + backupSchedulesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupSchedulesBuilder( + int index) { + return internalGetBackupSchedulesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index) { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.get(index); + } else { + return backupSchedulesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesOrBuilderList() { + if (backupSchedulesBuilder_ != null) { + return backupSchedulesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(backupSchedules_); + } + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder addBackupSchedulesBuilder() { + return internalGetBackupSchedulesFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder addBackupSchedulesBuilder( + int index) { + return internalGetBackupSchedulesFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesBuilderList() { + return internalGetBackupSchedulesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + internalGetBackupSchedulesFieldBuilder() { + if (backupSchedulesBuilder_ == null) { + backupSchedulesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + backupSchedules_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + backupSchedules_ = null; + } + return backupSchedulesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + private static final com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupSchedulesResponse(); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupSchedulesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java new file mode 100644 index 000000000000..dbf7578a1616 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupSchedulesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + java.util.List getBackupSchedulesList(); + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index); + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + int getBackupSchedulesCount(); + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + java.util.List + getBackupSchedulesOrBuilderList(); + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java new file mode 100644 index 000000000000..127d17c86e8d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java @@ -0,0 +1,1400 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsRequest} + */ +@com.google.protobuf.Generated +public final class ListBackupsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupsRequest) + ListBackupsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupsRequest"); + } + + // Use ListBackupsRequest.newBuilder() to construct. + private ListBackupsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupsRequest() { + parent_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupsRequest.class, + com.google.spanner.admin.database.v1.ListBackupsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The instance to list backups from.  Values are of the
    +   * form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance to list backups from.  Values are of the
    +   * form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression that filters the list of returned backups.
    +   *
    +   * A filter expression consists of a field name, a comparison operator, and a
    +   * value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the
    +   * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +   * filtering:
    +   *
    +   * * `name`
    +   * * `database`
    +   * * `state`
    +   * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `size_bytes`
    +   * * `backup_schedules`
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `name:Howl` - The backup's name contains the string "howl".
    +   * * `database:prod`
    +   * - The database's name contains the string "prod".
    +   * * `state:CREATING` - The backup is pending creation.
    +   * * `state:READY` - The backup is fully created and ready for use.
    +   * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +   * - The backup name contains the string "howl" and `create_time`
    +   * of the backup is before 2018-03-28T14:50:00Z.
    +   * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +   * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +   * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +   * * `backup_schedules:daily`
    +   * - The backup is created from a schedule with "daily" in its name.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression that filters the list of returned backups.
    +   *
    +   * A filter expression consists of a field name, a comparison operator, and a
    +   * value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the
    +   * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +   * filtering:
    +   *
    +   * * `name`
    +   * * `database`
    +   * * `state`
    +   * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `size_bytes`
    +   * * `backup_schedules`
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `name:Howl` - The backup's name contains the string "howl".
    +   * * `database:prod`
    +   * - The database's name contains the string "prod".
    +   * * `state:CREATING` - The backup is pending creation.
    +   * * `state:READY` - The backup is fully created and ready for use.
    +   * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +   * - The backup name contains the string "howl" and `create_time`
    +   * of the backup is before 2018-03-28T14:50:00Z.
    +   * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +   * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +   * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +   * * `backup_schedules:daily`
    +   * - The backup is created from a schedule with "daily" in its name.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupsRequest other = + (com.google.spanner.admin.database.v1.ListBackupsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupsRequest) + com.google.spanner.admin.database.v1.ListBackupsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupsRequest.class, + com.google.spanner.admin.database.v1.ListBackupsRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsRequest build() { + com.google.spanner.admin.database.v1.ListBackupsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsRequest buildPartial() { + com.google.spanner.admin.database.v1.ListBackupsRequest result = + new com.google.spanner.admin.database.v1.ListBackupsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.ListBackupsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupsRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListBackupsRequest other) { + if (other == com.google.spanner.admin.database.v1.ListBackupsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The instance to list backups from.  Values are of the
    +     * form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance to list backups from.  Values are of the
    +     * form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance to list backups from.  Values are of the
    +     * form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to list backups from.  Values are of the
    +     * form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to list backups from.  Values are of the
    +     * form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression that filters the list of returned backups.
    +     *
    +     * A filter expression consists of a field name, a comparison operator, and a
    +     * value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the
    +     * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +     * filtering:
    +     *
    +     * * `name`
    +     * * `database`
    +     * * `state`
    +     * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `size_bytes`
    +     * * `backup_schedules`
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `name:Howl` - The backup's name contains the string "howl".
    +     * * `database:prod`
    +     * - The database's name contains the string "prod".
    +     * * `state:CREATING` - The backup is pending creation.
    +     * * `state:READY` - The backup is fully created and ready for use.
    +     * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +     * - The backup name contains the string "howl" and `create_time`
    +     * of the backup is before 2018-03-28T14:50:00Z.
    +     * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +     * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +     * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +     * * `backup_schedules:daily`
    +     * - The backup is created from a schedule with "daily" in its name.
    +     * 
    + * + * string filter = 2; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned backups.
    +     *
    +     * A filter expression consists of a field name, a comparison operator, and a
    +     * value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the
    +     * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +     * filtering:
    +     *
    +     * * `name`
    +     * * `database`
    +     * * `state`
    +     * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `size_bytes`
    +     * * `backup_schedules`
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `name:Howl` - The backup's name contains the string "howl".
    +     * * `database:prod`
    +     * - The database's name contains the string "prod".
    +     * * `state:CREATING` - The backup is pending creation.
    +     * * `state:READY` - The backup is fully created and ready for use.
    +     * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +     * - The backup name contains the string "howl" and `create_time`
    +     * of the backup is before 2018-03-28T14:50:00Z.
    +     * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +     * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +     * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +     * * `backup_schedules:daily`
    +     * - The backup is created from a schedule with "daily" in its name.
    +     * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned backups.
    +     *
    +     * A filter expression consists of a field name, a comparison operator, and a
    +     * value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the
    +     * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +     * filtering:
    +     *
    +     * * `name`
    +     * * `database`
    +     * * `state`
    +     * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `size_bytes`
    +     * * `backup_schedules`
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `name:Howl` - The backup's name contains the string "howl".
    +     * * `database:prod`
    +     * - The database's name contains the string "prod".
    +     * * `state:CREATING` - The backup is pending creation.
    +     * * `state:READY` - The backup is fully created and ready for use.
    +     * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +     * - The backup name contains the string "howl" and `create_time`
    +     * of the backup is before 2018-03-28T14:50:00Z.
    +     * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +     * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +     * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +     * * `backup_schedules:daily`
    +     * - The backup is created from a schedule with "daily" in its name.
    +     * 
    + * + * string filter = 2; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned backups.
    +     *
    +     * A filter expression consists of a field name, a comparison operator, and a
    +     * value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the
    +     * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +     * filtering:
    +     *
    +     * * `name`
    +     * * `database`
    +     * * `state`
    +     * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `size_bytes`
    +     * * `backup_schedules`
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `name:Howl` - The backup's name contains the string "howl".
    +     * * `database:prod`
    +     * - The database's name contains the string "prod".
    +     * * `state:CREATING` - The backup is pending creation.
    +     * * `state:READY` - The backup is fully created and ready for use.
    +     * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +     * - The backup name contains the string "howl" and `create_time`
    +     * of the backup is before 2018-03-28T14:50:00Z.
    +     * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +     * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +     * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +     * * `backup_schedules:daily`
    +     * - The backup is created from a schedule with "daily" in its name.
    +     * 
    + * + * string filter = 2; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned backups.
    +     *
    +     * A filter expression consists of a field name, a comparison operator, and a
    +     * value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the
    +     * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +     * filtering:
    +     *
    +     * * `name`
    +     * * `database`
    +     * * `state`
    +     * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +     * * `size_bytes`
    +     * * `backup_schedules`
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic, but
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `name:Howl` - The backup's name contains the string "howl".
    +     * * `database:prod`
    +     * - The database's name contains the string "prod".
    +     * * `state:CREATING` - The backup is pending creation.
    +     * * `state:READY` - The backup is fully created and ready for use.
    +     * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +     * - The backup name contains the string "howl" and `create_time`
    +     * of the backup is before 2018-03-28T14:50:00Z.
    +     * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +     * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +     * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +     * * `backup_schedules:daily`
    +     * - The backup is created from a schedule with "daily" in its name.
    +     * 
    + * + * string filter = 2; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsRequest) + private static final com.google.spanner.admin.database.v1.ListBackupsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupsRequest(); + } + + public static com.google.spanner.admin.database.v1.ListBackupsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java new file mode 100644 index 000000000000..91a9f9692020 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java @@ -0,0 +1,212 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance to list backups from.  Values are of the
    +   * form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The instance to list backups from.  Values are of the
    +   * form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * An expression that filters the list of returned backups.
    +   *
    +   * A filter expression consists of a field name, a comparison operator, and a
    +   * value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the
    +   * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +   * filtering:
    +   *
    +   * * `name`
    +   * * `database`
    +   * * `state`
    +   * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `size_bytes`
    +   * * `backup_schedules`
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `name:Howl` - The backup's name contains the string "howl".
    +   * * `database:prod`
    +   * - The database's name contains the string "prod".
    +   * * `state:CREATING` - The backup is pending creation.
    +   * * `state:READY` - The backup is fully created and ready for use.
    +   * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +   * - The backup name contains the string "howl" and `create_time`
    +   * of the backup is before 2018-03-28T14:50:00Z.
    +   * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +   * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +   * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +   * * `backup_schedules:daily`
    +   * - The backup is created from a schedule with "daily" in its name.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression that filters the list of returned backups.
    +   *
    +   * A filter expression consists of a field name, a comparison operator, and a
    +   * value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the
    +   * [Backup][google.spanner.admin.database.v1.Backup] are eligible for
    +   * filtering:
    +   *
    +   * * `name`
    +   * * `database`
    +   * * `state`
    +   * * `create_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `expire_time`  (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
    +   * * `size_bytes`
    +   * * `backup_schedules`
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic, but
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `name:Howl` - The backup's name contains the string "howl".
    +   * * `database:prod`
    +   * - The database's name contains the string "prod".
    +   * * `state:CREATING` - The backup is pending creation.
    +   * * `state:READY` - The backup is fully created and ready for use.
    +   * * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
    +   * - The backup name contains the string "howl" and `create_time`
    +   * of the backup is before 2018-03-28T14:50:00Z.
    +   * * `expire_time < \"2018-03-28T14:50:00Z\"`
    +   * - The backup `expire_time` is before 2018-03-28T14:50:00Z.
    +   * * `size_bytes > 10000000000` - The backup's size is greater than 10GB
    +   * * `backup_schedules:daily`
    +   * - The backup is created from a schedule with "daily" in its name.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java new file mode 100644 index 000000000000..4def48cc8a98 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java @@ -0,0 +1,1151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsResponse} + */ +@com.google.protobuf.Generated +public final class ListBackupsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupsResponse) + ListBackupsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListBackupsResponse"); + } + + // Use ListBackupsResponse.newBuilder() to construct. + private ListBackupsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListBackupsResponse() { + backups_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupsResponse.class, + com.google.spanner.admin.database.v1.ListBackupsResponse.Builder.class); + } + + public static final int BACKUPS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List backups_; + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + @java.lang.Override + public java.util.List getBackupsList() { + return backups_; + } + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + @java.lang.Override + public java.util.List + getBackupsOrBuilderList() { + return backups_; + } + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + @java.lang.Override + public int getBackupsCount() { + return backups_.size(); + } + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getBackups(int index) { + return backups_.get(index); + } + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupsOrBuilder(int index) { + return backups_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < backups_.size(); i++) { + output.writeMessage(1, backups_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < backups_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, backups_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupsResponse other = + (com.google.spanner.admin.database.v1.ListBackupsResponse) obj; + + if (!getBackupsList().equals(other.getBackupsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBackupsCount() > 0) { + hash = (37 * hash) + BACKUPS_FIELD_NUMBER; + hash = (53 * hash) + getBackupsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupsResponse) + com.google.spanner.admin.database.v1.ListBackupsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupsResponse.class, + com.google.spanner.admin.database.v1.ListBackupsResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (backupsBuilder_ == null) { + backups_ = java.util.Collections.emptyList(); + } else { + backups_ = null; + backupsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_ListBackupsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsResponse getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsResponse build() { + com.google.spanner.admin.database.v1.ListBackupsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsResponse buildPartial() { + com.google.spanner.admin.database.v1.ListBackupsResponse result = + new com.google.spanner.admin.database.v1.ListBackupsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListBackupsResponse result) { + if (backupsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + backups_ = java.util.Collections.unmodifiableList(backups_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.backups_ = backups_; + } else { + result.backups_ = backupsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.ListBackupsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupsResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListBackupsResponse other) { + if (other == com.google.spanner.admin.database.v1.ListBackupsResponse.getDefaultInstance()) + return this; + if (backupsBuilder_ == null) { + if (!other.backups_.isEmpty()) { + if (backups_.isEmpty()) { + backups_ = other.backups_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBackupsIsMutable(); + backups_.addAll(other.backups_); + } + onChanged(); + } + } else { + if (!other.backups_.isEmpty()) { + if (backupsBuilder_.isEmpty()) { + backupsBuilder_.dispose(); + backupsBuilder_ = null; + backups_ = other.backups_; + bitField0_ = (bitField0_ & ~0x00000001); + backupsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetBackupsFieldBuilder() + : null; + } else { + backupsBuilder_.addAllMessages(other.backups_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.Backup m = + input.readMessage( + com.google.spanner.admin.database.v1.Backup.parser(), extensionRegistry); + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + backups_.add(m); + } else { + backupsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List backups_ = + java.util.Collections.emptyList(); + + private void ensureBackupsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + backups_ = new java.util.ArrayList(backups_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + backupsBuilder_; + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public java.util.List getBackupsList() { + if (backupsBuilder_ == null) { + return java.util.Collections.unmodifiableList(backups_); + } else { + return backupsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public int getBackupsCount() { + if (backupsBuilder_ == null) { + return backups_.size(); + } else { + return backupsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup getBackups(int index) { + if (backupsBuilder_ == null) { + return backups_.get(index); + } else { + return backupsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder setBackups(int index, com.google.spanner.admin.database.v1.Backup value) { + if (backupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupsIsMutable(); + backups_.set(index, value); + onChanged(); + } else { + backupsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder setBackups( + int index, com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + backups_.set(index, builderForValue.build()); + onChanged(); + } else { + backupsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder addBackups(com.google.spanner.admin.database.v1.Backup value) { + if (backupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupsIsMutable(); + backups_.add(value); + onChanged(); + } else { + backupsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder addBackups(int index, com.google.spanner.admin.database.v1.Backup value) { + if (backupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupsIsMutable(); + backups_.add(index, value); + onChanged(); + } else { + backupsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder addBackups(com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + backups_.add(builderForValue.build()); + onChanged(); + } else { + backupsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder addBackups( + int index, com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + backups_.add(index, builderForValue.build()); + onChanged(); + } else { + backupsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder addAllBackups( + java.lang.Iterable values) { + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backups_); + onChanged(); + } else { + backupsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder clearBackups() { + if (backupsBuilder_ == null) { + backups_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + backupsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public Builder removeBackups(int index) { + if (backupsBuilder_ == null) { + ensureBackupsIsMutable(); + backups_.remove(index); + onChanged(); + } else { + backupsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder getBackupsBuilder(int index) { + return internalGetBackupsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupsOrBuilder(int index) { + if (backupsBuilder_ == null) { + return backups_.get(index); + } else { + return backupsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public java.util.List + getBackupsOrBuilderList() { + if (backupsBuilder_ != null) { + return backupsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(backups_); + } + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder addBackupsBuilder() { + return internalGetBackupsFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.Backup.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder addBackupsBuilder(int index) { + return internalGetBackupsFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.Backup.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching backups. Backups returned are ordered by `create_time`
    +     * in descending order, starting from the most recent `create_time`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + public java.util.List + getBackupsBuilderList() { + return internalGetBackupsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + internalGetBackupsFieldBuilder() { + if (backupsBuilder_ == null) { + backupsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder>( + backups_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + backups_ = null; + } + return backupsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsResponse) + private static final com.google.spanner.admin.database.v1.ListBackupsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupsResponse(); + } + + public static com.google.spanner.admin.database.v1.ListBackupsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java new file mode 100644 index 000000000000..c829ea065071 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java @@ -0,0 +1,119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListBackupsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + java.util.List getBackupsList(); + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + com.google.spanner.admin.database.v1.Backup getBackups(int index); + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + int getBackupsCount(); + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + java.util.List + getBackupsOrBuilderList(); + + /** + * + * + *
    +   * The list of matching backups. Backups returned are ordered by `create_time`
    +   * in descending order, starting from the most recent `create_time`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup backups = 1; + */ + com.google.spanner.admin.database.v1.BackupOrBuilder getBackupsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java new file mode 100644 index 000000000000..7b2ab5ab931e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java @@ -0,0 +1,1446 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseOperationsRequest} + */ +@com.google.protobuf.Generated +public final class ListDatabaseOperationsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + ListDatabaseOperationsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabaseOperationsRequest"); + } + + // Use ListDatabaseOperationsRequest.newBuilder() to construct. + private ListDatabaseOperationsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabaseOperationsRequest() { + parent_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.class, + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The instance of the database operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance of the database operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [Operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +   * `(metadata.source_type:BACKUP) AND` \
    +   * `(metadata.backup_info.backup:backup_howl) AND` \
    +   * `(metadata.name:restored_howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +   * * The database is restored from a backup.
    +   * * The backup name contains "backup_howl".
    +   * * The restored database's name contains "restored_howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [Operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +   * `(metadata.source_type:BACKUP) AND` \
    +   * `(metadata.backup_info.backup:backup_howl) AND` \
    +   * `(metadata.name:restored_howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +   * * The database is restored from a backup.
    +   * * The backup name contains "backup_howl".
    +   * * The restored database's name contains "restored_howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest other = + (com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseOperationsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.class, + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest build() { + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest buildPartial() { + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest result = + new com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest) { + return mergeFrom( + (com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest other) { + if (other + == com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The instance of the database operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance of the database operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance of the database operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance of the database operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance of the database operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [Operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +     * `(metadata.source_type:BACKUP) AND` \
    +     * `(metadata.backup_info.backup:backup_howl) AND` \
    +     * `(metadata.name:restored_howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * * The database is restored from a backup.
    +     * * The backup name contains "backup_howl".
    +     * * The restored database's name contains "restored_howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [Operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +     * `(metadata.source_type:BACKUP) AND` \
    +     * `(metadata.backup_info.backup:backup_howl) AND` \
    +     * `(metadata.name:restored_howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * * The database is restored from a backup.
    +     * * The backup name contains "backup_howl".
    +     * * The restored database's name contains "restored_howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [Operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +     * `(metadata.source_type:BACKUP) AND` \
    +     * `(metadata.backup_info.backup:backup_howl) AND` \
    +     * `(metadata.name:restored_howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * * The database is restored from a backup.
    +     * * The backup name contains "backup_howl".
    +     * * The restored database's name contains "restored_howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [Operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +     * `(metadata.source_type:BACKUP) AND` \
    +     * `(metadata.backup_info.backup:backup_howl) AND` \
    +     * `(metadata.name:restored_howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * * The database is restored from a backup.
    +     * * The backup name contains "backup_howl".
    +     * * The restored database's name contains "restored_howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the [Operation][google.longrunning.Operation]
    +     * are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +     * `(metadata.source_type:BACKUP) AND` \
    +     * `(metadata.backup_info.backup:backup_howl) AND` \
    +     * `(metadata.name:restored_howl) AND` \
    +     * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +     * * The database is restored from a backup.
    +     * * The backup name contains "backup_howl".
    +     * * The restored database's name contains "restored_howl".
    +     * * The operation started before 2018-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + private static final com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest(); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabaseOperationsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java new file mode 100644 index 000000000000..d38a285db644 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java @@ -0,0 +1,222 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabaseOperationsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance of the database operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The instance of the database operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [Operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +   * `(metadata.source_type:BACKUP) AND` \
    +   * `(metadata.backup_info.backup:backup_howl) AND` \
    +   * `(metadata.name:restored_howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +   * * The database is restored from a backup.
    +   * * The backup name contains "backup_howl".
    +   * * The restored database's name contains "restored_howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the [Operation][google.longrunning.Operation]
    +   * are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
    +   * `(metadata.source_type:BACKUP) AND` \
    +   * `(metadata.backup_info.backup:backup_howl) AND` \
    +   * `(metadata.name:restored_howl) AND` \
    +   * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
    +   * * The database is restored from a backup.
    +   * * The backup name contains "backup_howl".
    +   * * The restored database's name contains "restored_howl".
    +   * * The operation started before 2018-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java new file mode 100644 index 000000000000..3b977a7c6e7a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java @@ -0,0 +1,1229 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseOperationsResponse} + */ +@com.google.protobuf.Generated +public final class ListDatabaseOperationsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + ListDatabaseOperationsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabaseOperationsResponse"); + } + + // Use ListDatabaseOperationsResponse.newBuilder() to construct. + private ListDatabaseOperationsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabaseOperationsResponse() { + operations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.class, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.Builder.class); + } + + public static final int OPERATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List operations_; + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List getOperationsList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List + getOperationsOrBuilderList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public int getOperationsCount() { + return operations_.size(); + } + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.Operation getOperations(int index) { + return operations_.get(index); + } + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + return operations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < operations_.size(); i++) { + output.writeMessage(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < operations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse other = + (com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse) obj; + + if (!getOperationsList().equals(other.getOperationsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOperationsCount() > 0) { + hash = (37 * hash) + OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseOperationsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.class, + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + } else { + operations_ = null; + operationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse build() { + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse buildPartial() { + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse result = + new com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse result) { + if (operationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + operations_ = java.util.Collections.unmodifiableList(operations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.operations_ = operations_; + } else { + result.operations_ = operationsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse) { + return mergeFrom( + (com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse other) { + if (other + == com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + .getDefaultInstance()) return this; + if (operationsBuilder_ == null) { + if (!other.operations_.isEmpty()) { + if (operations_.isEmpty()) { + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOperationsIsMutable(); + operations_.addAll(other.operations_); + } + onChanged(); + } + } else { + if (!other.operations_.isEmpty()) { + if (operationsBuilder_.isEmpty()) { + operationsBuilder_.dispose(); + operationsBuilder_ = null; + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + operationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOperationsFieldBuilder() + : null; + } else { + operationsBuilder_.addAllMessages(other.operations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(m); + } else { + operationsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List operations_ = + java.util.Collections.emptyList(); + + private void ensureOperationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + operations_ = new java.util.ArrayList(operations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + operationsBuilder_; + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsList() { + if (operationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(operations_); + } else { + return operationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public int getOperationsCount() { + if (operationsBuilder_ == null) { + return operations_.size(); + } else { + return operationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation getOperations(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.set(index, value); + onChanged(); + } else { + operationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.set(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(value); + onChanged(); + } else { + operationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(index, value); + onChanged(); + } else { + operationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addAllOperations( + java.lang.Iterable values) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, operations_); + onChanged(); + } else { + operationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder clearOperations() { + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + operationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder removeOperations(int index) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.remove(index); + onChanged(); + } else { + operationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder getOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List + getOperationsOrBuilderList() { + if (operationsBuilder_ != null) { + return operationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(operations_); + } + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder() { + return internalGetOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching database [long-running
    +     * operations][google.longrunning.Operation]. Each operation's name will be
    +     * prefixed by the database's name. The operation's
    +     * [metadata][google.longrunning.Operation.metadata] field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsBuilderList() { + return internalGetOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetOperationsFieldBuilder() { + if (operationsBuilder_ == null) { + operationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + operations_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + operations_ = null; + } + return operationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + private static final com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse(); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabaseOperationsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java new file mode 100644 index 000000000000..1b4b32f72f82 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java @@ -0,0 +1,133 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabaseOperationsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsList(); + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.Operation getOperations(int index); + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + int getOperationsCount(); + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsOrBuilderList(); + + /** + * + * + *
    +   * The list of matching database [long-running
    +   * operations][google.longrunning.Operation]. Each operation's name will be
    +   * prefixed by the database's name. The operation's
    +   * [metadata][google.longrunning.Operation.metadata] field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java new file mode 100644 index 000000000000..bea2b96c518f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java @@ -0,0 +1,942 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesRequest} + */ +@com.google.protobuf.Generated +public final class ListDatabaseRolesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabaseRolesRequest) + ListDatabaseRolesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabaseRolesRequest"); + } + + // Use ListDatabaseRolesRequest.newBuilder() to construct. + private ListDatabaseRolesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabaseRolesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.class, + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The database whose roles should be listed.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database whose roles should be listed.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of database roles to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabaseRolesRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest other = + (com.google.spanner.admin.database.v1.ListDatabaseRolesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabaseRolesRequest) + com.google.spanner.admin.database.v1.ListDatabaseRolesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.class, + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesRequest build() { + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesRequest buildPartial() { + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest result = + new com.google.spanner.admin.database.v1.ListDatabaseRolesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListDatabaseRolesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabaseRolesRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListDatabaseRolesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListDatabaseRolesRequest other) { + if (other + == com.google.spanner.admin.database.v1.ListDatabaseRolesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The database whose roles should be listed.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database whose roles should be listed.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database whose roles should be listed.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database whose roles should be listed.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database whose roles should be listed.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of database roles to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of database roles to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of database roles to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabaseRolesRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseRolesRequest) + private static final com.google.spanner.admin.database.v1.ListDatabaseRolesRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabaseRolesRequest(); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabaseRolesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java new file mode 100644 index 000000000000..4b3d05560cb1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabaseRolesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabaseRolesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database whose roles should be listed.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The database whose roles should be listed.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Number of database roles to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java new file mode 100644 index 000000000000..b855c53ba836 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java @@ -0,0 +1,1149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesResponse} + */ +@com.google.protobuf.Generated +public final class ListDatabaseRolesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabaseRolesResponse) + ListDatabaseRolesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabaseRolesResponse"); + } + + // Use ListDatabaseRolesResponse.newBuilder() to construct. + private ListDatabaseRolesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabaseRolesResponse() { + databaseRoles_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.class, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.Builder.class); + } + + public static final int DATABASE_ROLES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List databaseRoles_; + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + @java.lang.Override + public java.util.List getDatabaseRolesList() { + return databaseRoles_; + } + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + @java.lang.Override + public java.util.List + getDatabaseRolesOrBuilderList() { + return databaseRoles_; + } + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + @java.lang.Override + public int getDatabaseRolesCount() { + return databaseRoles_.size(); + } + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRole getDatabaseRoles(int index) { + return databaseRoles_.get(index); + } + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder getDatabaseRolesOrBuilder( + int index) { + return databaseRoles_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +   * call to fetch more of the matching roles.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +   * call to fetch more of the matching roles.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < databaseRoles_.size(); i++) { + output.writeMessage(1, databaseRoles_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < databaseRoles_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, databaseRoles_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabaseRolesResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse other = + (com.google.spanner.admin.database.v1.ListDatabaseRolesResponse) obj; + + if (!getDatabaseRolesList().equals(other.getDatabaseRolesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDatabaseRolesCount() > 0) { + hash = (37 * hash) + DATABASE_ROLES_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseRolesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabaseRolesResponse) + com.google.spanner.admin.database.v1.ListDatabaseRolesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.class, + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (databaseRolesBuilder_ == null) { + databaseRoles_ = java.util.Collections.emptyList(); + } else { + databaseRoles_ = null; + databaseRolesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse build() { + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse buildPartial() { + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse result = + new com.google.spanner.admin.database.v1.ListDatabaseRolesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse result) { + if (databaseRolesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + databaseRoles_ = java.util.Collections.unmodifiableList(databaseRoles_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.databaseRoles_ = databaseRoles_; + } else { + result.databaseRoles_ = databaseRolesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListDatabaseRolesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabaseRolesResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListDatabaseRolesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListDatabaseRolesResponse other) { + if (other + == com.google.spanner.admin.database.v1.ListDatabaseRolesResponse.getDefaultInstance()) + return this; + if (databaseRolesBuilder_ == null) { + if (!other.databaseRoles_.isEmpty()) { + if (databaseRoles_.isEmpty()) { + databaseRoles_ = other.databaseRoles_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureDatabaseRolesIsMutable(); + databaseRoles_.addAll(other.databaseRoles_); + } + onChanged(); + } + } else { + if (!other.databaseRoles_.isEmpty()) { + if (databaseRolesBuilder_.isEmpty()) { + databaseRolesBuilder_.dispose(); + databaseRolesBuilder_ = null; + databaseRoles_ = other.databaseRoles_; + bitField0_ = (bitField0_ & ~0x00000001); + databaseRolesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetDatabaseRolesFieldBuilder() + : null; + } else { + databaseRolesBuilder_.addAllMessages(other.databaseRoles_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.DatabaseRole m = + input.readMessage( + com.google.spanner.admin.database.v1.DatabaseRole.parser(), + extensionRegistry); + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + databaseRoles_.add(m); + } else { + databaseRolesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List databaseRoles_ = + java.util.Collections.emptyList(); + + private void ensureDatabaseRolesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + databaseRoles_ = + new java.util.ArrayList( + databaseRoles_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DatabaseRole, + com.google.spanner.admin.database.v1.DatabaseRole.Builder, + com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder> + databaseRolesBuilder_; + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public java.util.List + getDatabaseRolesList() { + if (databaseRolesBuilder_ == null) { + return java.util.Collections.unmodifiableList(databaseRoles_); + } else { + return databaseRolesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public int getDatabaseRolesCount() { + if (databaseRolesBuilder_ == null) { + return databaseRoles_.size(); + } else { + return databaseRolesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseRole getDatabaseRoles(int index) { + if (databaseRolesBuilder_ == null) { + return databaseRoles_.get(index); + } else { + return databaseRolesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder setDatabaseRoles( + int index, com.google.spanner.admin.database.v1.DatabaseRole value) { + if (databaseRolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabaseRolesIsMutable(); + databaseRoles_.set(index, value); + onChanged(); + } else { + databaseRolesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder setDatabaseRoles( + int index, com.google.spanner.admin.database.v1.DatabaseRole.Builder builderForValue) { + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + databaseRoles_.set(index, builderForValue.build()); + onChanged(); + } else { + databaseRolesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder addDatabaseRoles(com.google.spanner.admin.database.v1.DatabaseRole value) { + if (databaseRolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabaseRolesIsMutable(); + databaseRoles_.add(value); + onChanged(); + } else { + databaseRolesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder addDatabaseRoles( + int index, com.google.spanner.admin.database.v1.DatabaseRole value) { + if (databaseRolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabaseRolesIsMutable(); + databaseRoles_.add(index, value); + onChanged(); + } else { + databaseRolesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder addDatabaseRoles( + com.google.spanner.admin.database.v1.DatabaseRole.Builder builderForValue) { + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + databaseRoles_.add(builderForValue.build()); + onChanged(); + } else { + databaseRolesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder addDatabaseRoles( + int index, com.google.spanner.admin.database.v1.DatabaseRole.Builder builderForValue) { + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + databaseRoles_.add(index, builderForValue.build()); + onChanged(); + } else { + databaseRolesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder addAllDatabaseRoles( + java.lang.Iterable values) { + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, databaseRoles_); + onChanged(); + } else { + databaseRolesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder clearDatabaseRoles() { + if (databaseRolesBuilder_ == null) { + databaseRoles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + databaseRolesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public Builder removeDatabaseRoles(int index) { + if (databaseRolesBuilder_ == null) { + ensureDatabaseRolesIsMutable(); + databaseRoles_.remove(index); + onChanged(); + } else { + databaseRolesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseRole.Builder getDatabaseRolesBuilder( + int index) { + return internalGetDatabaseRolesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder getDatabaseRolesOrBuilder( + int index) { + if (databaseRolesBuilder_ == null) { + return databaseRoles_.get(index); + } else { + return databaseRolesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public java.util.List + getDatabaseRolesOrBuilderList() { + if (databaseRolesBuilder_ != null) { + return databaseRolesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(databaseRoles_); + } + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseRole.Builder addDatabaseRolesBuilder() { + return internalGetDatabaseRolesFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.DatabaseRole.getDefaultInstance()); + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseRole.Builder addDatabaseRolesBuilder( + int index) { + return internalGetDatabaseRolesFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.DatabaseRole.getDefaultInstance()); + } + + /** + * + * + *
    +     * Database roles that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + public java.util.List + getDatabaseRolesBuilderList() { + return internalGetDatabaseRolesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DatabaseRole, + com.google.spanner.admin.database.v1.DatabaseRole.Builder, + com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder> + internalGetDatabaseRolesFieldBuilder() { + if (databaseRolesBuilder_ == null) { + databaseRolesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DatabaseRole, + com.google.spanner.admin.database.v1.DatabaseRole.Builder, + com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder>( + databaseRoles_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + databaseRoles_ = null; + } + return databaseRolesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +     * call to fetch more of the matching roles.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +     * call to fetch more of the matching roles.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +     * call to fetch more of the matching roles.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +     * call to fetch more of the matching roles.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +     * call to fetch more of the matching roles.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabaseRolesResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseRolesResponse) + private static final com.google.spanner.admin.database.v1.ListDatabaseRolesResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabaseRolesResponse(); + } + + public static com.google.spanner.admin.database.v1.ListDatabaseRolesResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabaseRolesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java new file mode 100644 index 000000000000..f94b22d4bea3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabaseRolesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabaseRolesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + java.util.List getDatabaseRolesList(); + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + com.google.spanner.admin.database.v1.DatabaseRole getDatabaseRoles(int index); + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + int getDatabaseRolesCount(); + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + java.util.List + getDatabaseRolesOrBuilderList(); + + /** + * + * + *
    +   * Database roles that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DatabaseRole database_roles = 1; + */ + com.google.spanner.admin.database.v1.DatabaseRoleOrBuilder getDatabaseRolesOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +   * call to fetch more of the matching roles.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
    +   * call to fetch more of the matching roles.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java new file mode 100644 index 000000000000..8334a0103ac9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java @@ -0,0 +1,931 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesRequest} + */ +@com.google.protobuf.Generated +public final class ListDatabasesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabasesRequest) + ListDatabasesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabasesRequest"); + } + + // Use ListDatabasesRequest.newBuilder() to construct. + private ListDatabasesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabasesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabasesRequest.class, + com.google.spanner.admin.database.v1.ListDatabasesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The instance whose databases should be listed.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance whose databases should be listed.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabasesRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabasesRequest other = + (com.google.spanner.admin.database.v1.ListDatabasesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabasesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabasesRequest) + com.google.spanner.admin.database.v1.ListDatabasesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabasesRequest.class, + com.google.spanner.admin.database.v1.ListDatabasesRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListDatabasesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabasesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesRequest build() { + com.google.spanner.admin.database.v1.ListDatabasesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesRequest buildPartial() { + com.google.spanner.admin.database.v1.ListDatabasesRequest result = + new com.google.spanner.admin.database.v1.ListDatabasesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.ListDatabasesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabasesRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListDatabasesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListDatabasesRequest other) { + if (other == com.google.spanner.admin.database.v1.ListDatabasesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The instance whose databases should be listed.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance whose databases should be listed.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance whose databases should be listed.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance whose databases should be listed.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance whose databases should be listed.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabasesRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesRequest) + private static final com.google.spanner.admin.database.v1.ListDatabasesRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabasesRequest(); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabasesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java new file mode 100644 index 000000000000..c259ec9d3fd4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabasesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabasesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance whose databases should be listed.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The instance whose databases should be listed.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java new file mode 100644 index 000000000000..eb569c1ffddb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java @@ -0,0 +1,1130 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesResponse} + */ +@com.google.protobuf.Generated +public final class ListDatabasesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListDatabasesResponse) + ListDatabasesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListDatabasesResponse"); + } + + // Use ListDatabasesResponse.newBuilder() to construct. + private ListDatabasesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListDatabasesResponse() { + databases_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabasesResponse.class, + com.google.spanner.admin.database.v1.ListDatabasesResponse.Builder.class); + } + + public static final int DATABASES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List databases_; + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + @java.lang.Override + public java.util.List getDatabasesList() { + return databases_; + } + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + @java.lang.Override + public java.util.List + getDatabasesOrBuilderList() { + return databases_; + } + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + @java.lang.Override + public int getDatabasesCount() { + return databases_.size(); + } + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getDatabases(int index) { + return databases_.get(index); + } + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabasesOrBuilder(int index) { + return databases_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < databases_.size(); i++) { + output.writeMessage(1, databases_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < databases_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, databases_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListDatabasesResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListDatabasesResponse other = + (com.google.spanner.admin.database.v1.ListDatabasesResponse) obj; + + if (!getDatabasesList().equals(other.getDatabasesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDatabasesCount() > 0) { + hash = (37 * hash) + DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getDatabasesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListDatabasesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListDatabasesResponse) + com.google.spanner.admin.database.v1.ListDatabasesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListDatabasesResponse.class, + com.google.spanner.admin.database.v1.ListDatabasesResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListDatabasesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (databasesBuilder_ == null) { + databases_ = java.util.Collections.emptyList(); + } else { + databases_ = null; + databasesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesResponse getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListDatabasesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesResponse build() { + com.google.spanner.admin.database.v1.ListDatabasesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesResponse buildPartial() { + com.google.spanner.admin.database.v1.ListDatabasesResponse result = + new com.google.spanner.admin.database.v1.ListDatabasesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListDatabasesResponse result) { + if (databasesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + databases_ = java.util.Collections.unmodifiableList(databases_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.databases_ = databases_; + } else { + result.databases_ = databasesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.ListDatabasesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListDatabasesResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListDatabasesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.ListDatabasesResponse other) { + if (other == com.google.spanner.admin.database.v1.ListDatabasesResponse.getDefaultInstance()) + return this; + if (databasesBuilder_ == null) { + if (!other.databases_.isEmpty()) { + if (databases_.isEmpty()) { + databases_ = other.databases_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureDatabasesIsMutable(); + databases_.addAll(other.databases_); + } + onChanged(); + } + } else { + if (!other.databases_.isEmpty()) { + if (databasesBuilder_.isEmpty()) { + databasesBuilder_.dispose(); + databasesBuilder_ = null; + databases_ = other.databases_; + bitField0_ = (bitField0_ & ~0x00000001); + databasesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetDatabasesFieldBuilder() + : null; + } else { + databasesBuilder_.addAllMessages(other.databases_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.Database m = + input.readMessage( + com.google.spanner.admin.database.v1.Database.parser(), extensionRegistry); + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(m); + } else { + databasesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List databases_ = + java.util.Collections.emptyList(); + + private void ensureDatabasesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + databases_ = + new java.util.ArrayList(databases_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + databasesBuilder_; + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public java.util.List getDatabasesList() { + if (databasesBuilder_ == null) { + return java.util.Collections.unmodifiableList(databases_); + } else { + return databasesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public int getDatabasesCount() { + if (databasesBuilder_ == null) { + return databases_.size(); + } else { + return databasesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public com.google.spanner.admin.database.v1.Database getDatabases(int index) { + if (databasesBuilder_ == null) { + return databases_.get(index); + } else { + return databasesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder setDatabases(int index, com.google.spanner.admin.database.v1.Database value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.set(index, value); + onChanged(); + } else { + databasesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder setDatabases( + int index, com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.set(index, builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder addDatabases(com.google.spanner.admin.database.v1.Database value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.add(value); + onChanged(); + } else { + databasesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder addDatabases(int index, com.google.spanner.admin.database.v1.Database value) { + if (databasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatabasesIsMutable(); + databases_.add(index, value); + onChanged(); + } else { + databasesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder addDatabases( + com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder addDatabases( + int index, com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.add(index, builderForValue.build()); + onChanged(); + } else { + databasesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder addAllDatabases( + java.lang.Iterable values) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, databases_); + onChanged(); + } else { + databasesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder clearDatabases() { + if (databasesBuilder_ == null) { + databases_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + databasesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public Builder removeDatabases(int index) { + if (databasesBuilder_ == null) { + ensureDatabasesIsMutable(); + databases_.remove(index); + onChanged(); + } else { + databasesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder getDatabasesBuilder(int index) { + return internalGetDatabasesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabasesOrBuilder(int index) { + if (databasesBuilder_ == null) { + return databases_.get(index); + } else { + return databasesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public java.util.List + getDatabasesOrBuilderList() { + if (databasesBuilder_ != null) { + return databasesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(databases_); + } + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder addDatabasesBuilder() { + return internalGetDatabasesFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.Database.getDefaultInstance()); + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder addDatabasesBuilder(int index) { + return internalGetDatabasesFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.Database.getDefaultInstance()); + } + + /** + * + * + *
    +     * Databases that matched the request.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + public java.util.List + getDatabasesBuilderList() { + return internalGetDatabasesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + internalGetDatabasesFieldBuilder() { + if (databasesBuilder_ == null) { + databasesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder>( + databases_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + databases_ = null; + } + return databasesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListDatabasesResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesResponse) + private static final com.google.spanner.admin.database.v1.ListDatabasesResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListDatabasesResponse(); + } + + public static com.google.spanner.admin.database.v1.ListDatabasesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListDatabasesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListDatabasesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java new file mode 100644 index 000000000000..337dbc2c7f5c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface ListDatabasesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListDatabasesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + java.util.List getDatabasesList(); + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + com.google.spanner.admin.database.v1.Database getDatabases(int index); + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + int getDatabasesCount(); + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + java.util.List + getDatabasesOrBuilderList(); + + /** + * + * + *
    +   * Databases that matched the request.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database databases = 1; + */ + com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabasesOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java new file mode 100644 index 000000000000..fb3dd65b0485 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java @@ -0,0 +1,1078 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encapsulates progress related information for a Cloud Spanner long
    + * running operation.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.OperationProgress} + */ +@com.google.protobuf.Generated +public final class OperationProgress extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.OperationProgress) + OperationProgressOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "OperationProgress"); + } + + // Use OperationProgress.newBuilder() to construct. + private OperationProgress(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private OperationProgress() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_OperationProgress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.OperationProgress.class, + com.google.spanner.admin.database.v1.OperationProgress.Builder.class); + } + + private int bitField0_; + public static final int PROGRESS_PERCENT_FIELD_NUMBER = 1; + private int progressPercent_ = 0; + + /** + * + * + *
    +   * Percent completion of the operation.
    +   * Values are between 0 and 100 inclusive.
    +   * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (progressPercent_ != 0) { + output.writeInt32(1, progressPercent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getEndTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (progressPercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, progressPercent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getEndTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.OperationProgress)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.OperationProgress other = + (com.google.spanner.admin.database.v1.OperationProgress) obj; + + if (getProgressPercent() != other.getProgressPercent()) return false; + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROGRESS_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getProgressPercent(); + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OperationProgress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.OperationProgress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encapsulates progress related information for a Cloud Spanner long
    +   * running operation.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.OperationProgress} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.OperationProgress) + com.google.spanner.admin.database.v1.OperationProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_OperationProgress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.OperationProgress.class, + com.google.spanner.admin.database.v1.OperationProgress.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.OperationProgress.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + progressPercent_ = 0; + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.CommonProto + .internal_static_google_spanner_admin_database_v1_OperationProgress_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress build() { + com.google.spanner.admin.database.v1.OperationProgress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress buildPartial() { + com.google.spanner.admin.database.v1.OperationProgress result = + new com.google.spanner.admin.database.v1.OperationProgress(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.OperationProgress result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.progressPercent_ = progressPercent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.OperationProgress) { + return mergeFrom((com.google.spanner.admin.database.v1.OperationProgress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.OperationProgress other) { + if (other == com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) + return this; + if (other.getProgressPercent() != 0) { + setProgressPercent(other.getProgressPercent()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + progressPercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int progressPercent_; + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @param value The progressPercent to set. + * @return This builder for chaining. + */ + public Builder setProgressPercent(int value) { + + progressPercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearProgressPercent() { + bitField0_ = (bitField0_ & ~0x00000001); + progressPercent_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000004); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.OperationProgress) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OperationProgress) + private static final com.google.spanner.admin.database.v1.OperationProgress DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.OperationProgress(); + } + + public static com.google.spanner.admin.database.v1.OperationProgress getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OperationProgress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java new file mode 100644 index 000000000000..80a3ca67c21f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java @@ -0,0 +1,119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface OperationProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.OperationProgress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Percent completion of the operation.
    +   * Values are between 0 and 100 inclusive.
    +   * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + int getProgressPercent(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java new file mode 100644 index 000000000000..954c6b25dfce --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java @@ -0,0 +1,909 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the long-running operation used to track the progress
    + * of optimizations performed on a newly restored database. This long-running
    + * operation is automatically created by the system after the successful
    + * completion of a database restore, and cannot be cancelled.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata} + */ +@com.google.protobuf.Generated +public final class OptimizeRestoredDatabaseMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + OptimizeRestoredDatabaseMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "OptimizeRestoredDatabaseMetadata"); + } + + // Use OptimizeRestoredDatabaseMetadata.newBuilder() to construct. + private OptimizeRestoredDatabaseMetadata( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private OptimizeRestoredDatabaseMetadata() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.class, + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Name of the restored database being optimized.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name of the restored database being optimized.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.database.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getProgress()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata other = + (com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the long-running operation used to track the progress
    +   * of optimizations performed on a newly restored database. This long-running
    +   * operation is automatically created by the system after the successful
    +   * completion of a database restore, and cannot be cancelled.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.class, + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProgressFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata build() { + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata buildPartial() { + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata result = + new com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) { + return mergeFrom( + (com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata other) { + if (other + == com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Name of the restored database being optimized.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the restored database being optimized.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the restored database being optimized.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the restored database being optimized.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the restored database being optimized.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the post-restore optimizations.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + private static final com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata(); + } + + public static com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OptimizeRestoredDatabaseMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java new file mode 100644 index 000000000000..b25ba1e0297c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface OptimizeRestoredDatabaseMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Name of the restored database being optimized.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Name of the restored database being optimized.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the post-restore optimizations.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java new file mode 100644 index 000000000000..438b24bf8633 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java @@ -0,0 +1,1535 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Encryption configuration for the restored database.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig} + */ +@com.google.protobuf.Generated +public final class RestoreDatabaseEncryptionConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) + RestoreDatabaseEncryptionConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreDatabaseEncryptionConfig"); + } + + // Use RestoreDatabaseEncryptionConfig.newBuilder() to construct. + private RestoreDatabaseEncryptionConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreDatabaseEncryptionConfig() { + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.class, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder.class); + } + + /** + * + * + *
    +   * Encryption types for the database to be restored.
    +   * 
    + * + * Protobuf enum {@code + * google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType} + */ + public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + ENCRYPTION_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * This is the default option when
    +     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
    +     * is not specified.
    +     * 
    + * + * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + */ + USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION(1), + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + GOOGLE_DEFAULT_ENCRYPTION(2), + /** + * + * + *
    +     * Use customer managed encryption. If specified, `kms_key_name` must
    +     * must contain a valid Cloud KMS key.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + CUSTOMER_MANAGED_ENCRYPTION(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "EncryptionType"); + } + + /** + * + * + *
    +     * Unspecified. Do not use.
    +     * 
    + * + * ENCRYPTION_TYPE_UNSPECIFIED = 0; + */ + public static final int ENCRYPTION_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * This is the default option when
    +     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
    +     * is not specified.
    +     * 
    + * + * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + */ + public static final int USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION_VALUE = 1; + + /** + * + * + *
    +     * Use Google default encryption.
    +     * 
    + * + * GOOGLE_DEFAULT_ENCRYPTION = 2; + */ + public static final int GOOGLE_DEFAULT_ENCRYPTION_VALUE = 2; + + /** + * + * + *
    +     * Use customer managed encryption. If specified, `kms_key_name` must
    +     * must contain a valid Cloud KMS key.
    +     * 
    + * + * CUSTOMER_MANAGED_ENCRYPTION = 3; + */ + public static final int CUSTOMER_MANAGED_ENCRYPTION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EncryptionType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static EncryptionType forNumber(int value) { + switch (value) { + case 0: + return ENCRYPTION_TYPE_UNSPECIFIED; + case 1: + return USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION; + case 2: + return GOOGLE_DEFAULT_ENCRYPTION; + case 3: + return CUSTOMER_MANAGED_ENCRYPTION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EncryptionType findValueByNumber(int number) { + return EncryptionType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final EncryptionType[] VALUES = values(); + + public static EncryptionType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private EncryptionType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType) + } + + public static final int ENCRYPTION_TYPE_FIELD_NUMBER = 1; + private int encryptionType_ = 0; + + /** + * + * + *
    +   * Required. The encryption type of the restored database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +   * Required. The encryption type of the restored database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .forNumber(encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + public static final int KMS_KEY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + @java.lang.Override + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (encryptionType_ + != com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, kmsKeyName_); + } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (encryptionType_ + != com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .ENCRYPTION_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encryptionType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(kmsKeyName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, kmsKeyName_); + } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig other = + (com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) obj; + + if (encryptionType_ != other.encryptionType_) return false; + if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ENCRYPTION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + encryptionType_; + hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encryption configuration for the restored database.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.class, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder.class); + } + + // Construct using + // com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + encryptionType_ = 0; + kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig build() { + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig buildPartial() { + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig result = + new com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.encryptionType_ = encryptionType_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.kmsKeyName_ = kmsKeyName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) { + return mergeFrom( + (com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig other) { + if (other + == com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + .getDefaultInstance()) return this; + if (other.encryptionType_ != 0) { + setEncryptionTypeValue(other.getEncryptionTypeValue()); + } + if (!other.getKmsKeyName().isEmpty()) { + kmsKeyName_ = other.kmsKeyName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + encryptionType_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + kmsKeyName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int encryptionType_ = 0; + + /** + * + * + *
    +     * Required. The encryption type of the restored database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + @java.lang.Override + public int getEncryptionTypeValue() { + return encryptionType_; + } + + /** + * + * + *
    +     * Required. The encryption type of the restored database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionTypeValue(int value) { + encryptionType_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the restored database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + getEncryptionType() { + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType result = + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .forNumber(encryptionType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + .UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Required. The encryption type of the restored database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The encryptionType to set. + * @return This builder for chaining. + */ + public Builder setEncryptionType( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + encryptionType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The encryption type of the restored database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearEncryptionType() { + bitField0_ = (bitField0_ & ~0x00000001); + encryptionType_ = 0; + onChanged(); + return this; + } + + private java.lang.Object kmsKeyName_ = ""; + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + public java.lang.String getKmsKeyName() { + java.lang.Object ref = kmsKeyName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + kmsKeyName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + public com.google.protobuf.ByteString getKmsKeyNameBytes() { + java.lang.Object ref = kmsKeyName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + kmsKeyName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyName() { + kmsKeyName_ = getDefaultInstance().getKmsKeyName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for kmsKeyName to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + kmsKeyName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) + private static final com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig(); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreDatabaseEncryptionConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java new file mode 100644 index 000000000000..ee21bfaa6039 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java @@ -0,0 +1,215 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface RestoreDatabaseEncryptionConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The encryption type of the restored database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for encryptionType. + */ + int getEncryptionTypeValue(); + + /** + * + * + *
    +   * Required. The encryption type of the restored database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType encryption_type = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The encryptionType. + */ + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType + getEncryptionType(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The kmsKeyName. + */ + java.lang.String getKmsKeyName(); + + /** + * + * + *
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * 
    + * + * + * string kms_key_name = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for kmsKeyName. + */ + com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java new file mode 100644 index 000000000000..8235d5560423 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java @@ -0,0 +1,2143 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the long-running operation returned by
    + * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseMetadata} + */ +@com.google.protobuf.Generated +public final class RestoreDatabaseMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + RestoreDatabaseMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreDatabaseMetadata"); + } + + // Use RestoreDatabaseMetadata.newBuilder() to construct. + private RestoreDatabaseMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreDatabaseMetadata() { + name_ = ""; + sourceType_ = 0; + optimizeDatabaseOperationName_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.class, + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.Builder.class); + } + + private int bitField0_; + private int sourceInfoCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object sourceInfo_; + + public enum SourceInfoCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + BACKUP_INFO(3), + SOURCEINFO_NOT_SET(0); + private final int value; + + private SourceInfoCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SourceInfoCase valueOf(int value) { + return forNumber(value); + } + + public static SourceInfoCase forNumber(int value) { + switch (value) { + case 3: + return BACKUP_INFO; + case 0: + return SOURCEINFO_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SourceInfoCase getSourceInfoCase() { + return SourceInfoCase.forNumber(sourceInfoCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Name of the database being created and restored to.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name of the database being created and restored to.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_TYPE_FIELD_NUMBER = 2; + private int sourceType_ = 0; + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The enum numeric value on the wire for sourceType. + */ + @java.lang.Override + public int getSourceTypeValue() { + return sourceType_; + } + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The sourceType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreSourceType getSourceType() { + com.google.spanner.admin.database.v1.RestoreSourceType result = + com.google.spanner.admin.database.v1.RestoreSourceType.forNumber(sourceType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreSourceType.UNRECOGNIZED + : result; + } + + public static final int BACKUP_INFO_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return Whether the backupInfo field is set. + */ + @java.lang.Override + public boolean hasBackupInfo() { + return sourceInfoCase_ == 3; + } + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return The backupInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getBackupInfo() { + if (sourceInfoCase_ == 3) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder() { + if (sourceInfoCase_ == 3) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + + public static final int PROGRESS_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + public static final int OPTIMIZE_DATABASE_OPERATION_NAME_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object optimizeDatabaseOperationName_ = ""; + + /** + * + * + *
    +   * If exists, the name of the long-running operation that will be used to
    +   * track the post-restore optimization process to optimize the performance of
    +   * the restored database, and remove the dependency on the restore source.
    +   * The name is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +   * where the <database> is the name of database being created and restored to.
    +   * The metadata type of the  long-running operation is
    +   * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +   * This long-running operation will be automatically created by the system
    +   * after the RestoreDatabase long-running operation completes successfully.
    +   * This operation will not be created if the restore was not successful.
    +   * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The optimizeDatabaseOperationName. + */ + @java.lang.Override + public java.lang.String getOptimizeDatabaseOperationName() { + java.lang.Object ref = optimizeDatabaseOperationName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizeDatabaseOperationName_ = s; + return s; + } + } + + /** + * + * + *
    +   * If exists, the name of the long-running operation that will be used to
    +   * track the post-restore optimization process to optimize the performance of
    +   * the restored database, and remove the dependency on the restore source.
    +   * The name is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +   * where the <database> is the name of database being created and restored to.
    +   * The metadata type of the  long-running operation is
    +   * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +   * This long-running operation will be automatically created by the system
    +   * after the RestoreDatabase long-running operation completes successfully.
    +   * This operation will not be created if the restore was not successful.
    +   * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The bytes for optimizeDatabaseOperationName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOptimizeDatabaseOperationNameBytes() { + java.lang.Object ref = optimizeDatabaseOperationName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizeDatabaseOperationName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (sourceType_ + != com.google.spanner.admin.database.v1.RestoreSourceType.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(2, sourceType_); + } + if (sourceInfoCase_ == 3) { + output.writeMessage(3, (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getCancelTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizeDatabaseOperationName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, optimizeDatabaseOperationName_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (sourceType_ + != com.google.spanner.admin.database.v1.RestoreSourceType.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, sourceType_); + } + if (sourceInfoCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getCancelTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizeDatabaseOperationName_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize(6, optimizeDatabaseOperationName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.RestoreDatabaseMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata other = + (com.google.spanner.admin.database.v1.RestoreDatabaseMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (sourceType_ != other.sourceType_) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getOptimizeDatabaseOperationName().equals(other.getOptimizeDatabaseOperationName())) + return false; + if (!getSourceInfoCase().equals(other.getSourceInfoCase())) return false; + switch (sourceInfoCase_) { + case 3: + if (!getBackupInfo().equals(other.getBackupInfo())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + SOURCE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + sourceType_; + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (37 * hash) + OPTIMIZE_DATABASE_OPERATION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getOptimizeDatabaseOperationName().hashCode(); + switch (sourceInfoCase_) { + case 3: + hash = (37 * hash) + BACKUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getBackupInfo().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the long-running operation returned by
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + com.google.spanner.admin.database.v1.RestoreDatabaseMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.class, + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + sourceType_ = 0; + if (backupInfoBuilder_ != null) { + backupInfoBuilder_.clear(); + } + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + optimizeDatabaseOperationName_ = ""; + sourceInfoCase_ = 0; + sourceInfo_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseMetadata build() { + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseMetadata buildPartial() { + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata result = + new com.google.spanner.admin.database.v1.RestoreDatabaseMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.sourceType_ = sourceType_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.optimizeDatabaseOperationName_ = optimizeDatabaseOperationName_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata result) { + result.sourceInfoCase_ = sourceInfoCase_; + result.sourceInfo_ = this.sourceInfo_; + if (sourceInfoCase_ == 3 && backupInfoBuilder_ != null) { + result.sourceInfo_ = backupInfoBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.RestoreDatabaseMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.RestoreDatabaseMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.RestoreDatabaseMetadata other) { + if (other + == com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.sourceType_ != 0) { + setSourceTypeValue(other.getSourceTypeValue()); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + if (!other.getOptimizeDatabaseOperationName().isEmpty()) { + optimizeDatabaseOperationName_ = other.optimizeDatabaseOperationName_; + bitField0_ |= 0x00000020; + onChanged(); + } + switch (other.getSourceInfoCase()) { + case BACKUP_INFO: + { + mergeBackupInfo(other.getBackupInfo()); + break; + } + case SOURCEINFO_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + sourceType_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + input.readMessage( + internalGetBackupInfoFieldBuilder().getBuilder(), extensionRegistry); + sourceInfoCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + optimizeDatabaseOperationName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int sourceInfoCase_ = 0; + private java.lang.Object sourceInfo_; + + public SourceInfoCase getSourceInfoCase() { + return SourceInfoCase.forNumber(sourceInfoCase_); + } + + public Builder clearSourceInfo() { + sourceInfoCase_ = 0; + sourceInfo_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Name of the database being created and restored to.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the database being created and restored to.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the database being created and restored to.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the database being created and restored to.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the database being created and restored to.
    +     * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int sourceType_ = 0; + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The enum numeric value on the wire for sourceType. + */ + @java.lang.Override + public int getSourceTypeValue() { + return sourceType_; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @param value The enum numeric value on the wire for sourceType to set. + * @return This builder for chaining. + */ + public Builder setSourceTypeValue(int value) { + sourceType_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The sourceType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreSourceType getSourceType() { + com.google.spanner.admin.database.v1.RestoreSourceType result = + com.google.spanner.admin.database.v1.RestoreSourceType.forNumber(sourceType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreSourceType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @param value The sourceType to set. + * @return This builder for chaining. + */ + public Builder setSourceType(com.google.spanner.admin.database.v1.RestoreSourceType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + sourceType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return This builder for chaining. + */ + public Builder clearSourceType() { + bitField0_ = (bitField0_ & ~0x00000002); + sourceType_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder> + backupInfoBuilder_; + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return Whether the backupInfo field is set. + */ + @java.lang.Override + public boolean hasBackupInfo() { + return sourceInfoCase_ == 3; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return The backupInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getBackupInfo() { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 3) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } else { + if (sourceInfoCase_ == 3) { + return backupInfoBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + public Builder setBackupInfo(com.google.spanner.admin.database.v1.BackupInfo value) { + if (backupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sourceInfo_ = value; + onChanged(); + } else { + backupInfoBuilder_.setMessage(value); + } + sourceInfoCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + public Builder setBackupInfo( + com.google.spanner.admin.database.v1.BackupInfo.Builder builderForValue) { + if (backupInfoBuilder_ == null) { + sourceInfo_ = builderForValue.build(); + onChanged(); + } else { + backupInfoBuilder_.setMessage(builderForValue.build()); + } + sourceInfoCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + public Builder mergeBackupInfo(com.google.spanner.admin.database.v1.BackupInfo value) { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 3 + && sourceInfo_ + != com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance()) { + sourceInfo_ = + com.google.spanner.admin.database.v1.BackupInfo.newBuilder( + (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_) + .mergeFrom(value) + .buildPartial(); + } else { + sourceInfo_ = value; + } + onChanged(); + } else { + if (sourceInfoCase_ == 3) { + backupInfoBuilder_.mergeFrom(value); + } else { + backupInfoBuilder_.setMessage(value); + } + } + sourceInfoCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + public Builder clearBackupInfo() { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 3) { + sourceInfoCase_ = 0; + sourceInfo_ = null; + onChanged(); + } + } else { + if (sourceInfoCase_ == 3) { + sourceInfoCase_ = 0; + sourceInfo_ = null; + } + backupInfoBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + public com.google.spanner.admin.database.v1.BackupInfo.Builder getBackupInfoBuilder() { + return internalGetBackupInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder() { + if ((sourceInfoCase_ == 3) && (backupInfoBuilder_ != null)) { + return backupInfoBuilder_.getMessageOrBuilder(); + } else { + if (sourceInfoCase_ == 3) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Information about the backup used to restore the database.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder> + internalGetBackupInfoFieldBuilder() { + if (backupInfoBuilder_ == null) { + if (!(sourceInfoCase_ == 3)) { + sourceInfo_ = com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + backupInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder>( + (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_, + getParentForChildren(), + isClean()); + sourceInfo_ = null; + } + sourceInfoCase_ = 3; + onChanged(); + return backupInfoBuilder_; + } + + private com.google.spanner.admin.database.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return The progress. + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public Builder setProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000008); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000010); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which cancellation of this operation was received.
    +     * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +     * starts asynchronous cancellation on a long-running operation. The server
    +     * makes a best effort to cancel the operation, but success is not guaranteed.
    +     * Clients can use
    +     * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +     * other methods to check whether the cancellation succeeded or whether the
    +     * operation completed despite cancellation. On successful cancellation,
    +     * the operation is not deleted; instead, it becomes an operation with
    +     * an [Operation.error][google.longrunning.Operation.error] value with a
    +     * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +     * `Code.CANCELLED`.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + private java.lang.Object optimizeDatabaseOperationName_ = ""; + + /** + * + * + *
    +     * If exists, the name of the long-running operation that will be used to
    +     * track the post-restore optimization process to optimize the performance of
    +     * the restored database, and remove the dependency on the restore source.
    +     * The name is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +     * where the <database> is the name of database being created and restored to.
    +     * The metadata type of the  long-running operation is
    +     * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +     * This long-running operation will be automatically created by the system
    +     * after the RestoreDatabase long-running operation completes successfully.
    +     * This operation will not be created if the restore was not successful.
    +     * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The optimizeDatabaseOperationName. + */ + public java.lang.String getOptimizeDatabaseOperationName() { + java.lang.Object ref = optimizeDatabaseOperationName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizeDatabaseOperationName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If exists, the name of the long-running operation that will be used to
    +     * track the post-restore optimization process to optimize the performance of
    +     * the restored database, and remove the dependency on the restore source.
    +     * The name is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +     * where the <database> is the name of database being created and restored to.
    +     * The metadata type of the  long-running operation is
    +     * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +     * This long-running operation will be automatically created by the system
    +     * after the RestoreDatabase long-running operation completes successfully.
    +     * This operation will not be created if the restore was not successful.
    +     * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The bytes for optimizeDatabaseOperationName. + */ + public com.google.protobuf.ByteString getOptimizeDatabaseOperationNameBytes() { + java.lang.Object ref = optimizeDatabaseOperationName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizeDatabaseOperationName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If exists, the name of the long-running operation that will be used to
    +     * track the post-restore optimization process to optimize the performance of
    +     * the restored database, and remove the dependency on the restore source.
    +     * The name is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +     * where the <database> is the name of database being created and restored to.
    +     * The metadata type of the  long-running operation is
    +     * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +     * This long-running operation will be automatically created by the system
    +     * after the RestoreDatabase long-running operation completes successfully.
    +     * This operation will not be created if the restore was not successful.
    +     * 
    + * + * string optimize_database_operation_name = 6; + * + * @param value The optimizeDatabaseOperationName to set. + * @return This builder for chaining. + */ + public Builder setOptimizeDatabaseOperationName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + optimizeDatabaseOperationName_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If exists, the name of the long-running operation that will be used to
    +     * track the post-restore optimization process to optimize the performance of
    +     * the restored database, and remove the dependency on the restore source.
    +     * The name is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +     * where the <database> is the name of database being created and restored to.
    +     * The metadata type of the  long-running operation is
    +     * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +     * This long-running operation will be automatically created by the system
    +     * after the RestoreDatabase long-running operation completes successfully.
    +     * This operation will not be created if the restore was not successful.
    +     * 
    + * + * string optimize_database_operation_name = 6; + * + * @return This builder for chaining. + */ + public Builder clearOptimizeDatabaseOperationName() { + optimizeDatabaseOperationName_ = getDefaultInstance().getOptimizeDatabaseOperationName(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If exists, the name of the long-running operation that will be used to
    +     * track the post-restore optimization process to optimize the performance of
    +     * the restored database, and remove the dependency on the restore source.
    +     * The name is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +     * where the <database> is the name of database being created and restored to.
    +     * The metadata type of the  long-running operation is
    +     * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +     * This long-running operation will be automatically created by the system
    +     * after the RestoreDatabase long-running operation completes successfully.
    +     * This operation will not be created if the restore was not successful.
    +     * 
    + * + * string optimize_database_operation_name = 6; + * + * @param value The bytes for optimizeDatabaseOperationName to set. + * @return This builder for chaining. + */ + public Builder setOptimizeDatabaseOperationNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + optimizeDatabaseOperationName_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + private static final com.google.spanner.admin.database.v1.RestoreDatabaseMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.RestoreDatabaseMetadata(); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreDatabaseMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java new file mode 100644 index 000000000000..2aa59a8775c6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java @@ -0,0 +1,278 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface RestoreDatabaseMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Name of the database being created and restored to.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Name of the database being created and restored to.
    +   * 
    + * + * string name = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The enum numeric value on the wire for sourceType. + */ + int getSourceTypeValue(); + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 2; + * + * @return The sourceType. + */ + com.google.spanner.admin.database.v1.RestoreSourceType getSourceType(); + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return Whether the backupInfo field is set. + */ + boolean hasBackupInfo(); + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + * + * @return The backupInfo. + */ + com.google.spanner.admin.database.v1.BackupInfo getBackupInfo(); + + /** + * + * + *
    +   * Information about the backup used to restore the database.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 3; + */ + com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder(); + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + * + * @return The progress. + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 4; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which cancellation of this operation was received.
    +   * [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
    +   * starts asynchronous cancellation on a long-running operation. The server
    +   * makes a best effort to cancel the operation, but success is not guaranteed.
    +   * Clients can use
    +   * [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
    +   * other methods to check whether the cancellation succeeded or whether the
    +   * operation completed despite cancellation. On successful cancellation,
    +   * the operation is not deleted; instead, it becomes an operation with
    +   * an [Operation.error][google.longrunning.Operation.error] value with a
    +   * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
    +   * `Code.CANCELLED`.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 5; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); + + /** + * + * + *
    +   * If exists, the name of the long-running operation that will be used to
    +   * track the post-restore optimization process to optimize the performance of
    +   * the restored database, and remove the dependency on the restore source.
    +   * The name is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +   * where the <database> is the name of database being created and restored to.
    +   * The metadata type of the  long-running operation is
    +   * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +   * This long-running operation will be automatically created by the system
    +   * after the RestoreDatabase long-running operation completes successfully.
    +   * This operation will not be created if the restore was not successful.
    +   * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The optimizeDatabaseOperationName. + */ + java.lang.String getOptimizeDatabaseOperationName(); + + /** + * + * + *
    +   * If exists, the name of the long-running operation that will be used to
    +   * track the post-restore optimization process to optimize the performance of
    +   * the restored database, and remove the dependency on the restore source.
    +   * The name is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
    +   * where the <database> is the name of database being created and restored to.
    +   * The metadata type of the  long-running operation is
    +   * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
    +   * This long-running operation will be automatically created by the system
    +   * after the RestoreDatabase long-running operation completes successfully.
    +   * This operation will not be created if the restore was not successful.
    +   * 
    + * + * string optimize_database_operation_name = 6; + * + * @return The bytes for optimizeDatabaseOperationName. + */ + com.google.protobuf.ByteString getOptimizeDatabaseOperationNameBytes(); + + com.google.spanner.admin.database.v1.RestoreDatabaseMetadata.SourceInfoCase getSourceInfoCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java new file mode 100644 index 000000000000..310613bb1ac9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java @@ -0,0 +1,1574 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class RestoreDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.RestoreDatabaseRequest) + RestoreDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreDatabaseRequest"); + } + + // Use RestoreDatabaseRequest.newBuilder() to construct. + private RestoreDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreDatabaseRequest() { + parent_ = ""; + databaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest.class, + com.google.spanner.admin.database.v1.RestoreDatabaseRequest.Builder.class); + } + + private int bitField0_; + private int sourceCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object source_; + + public enum SourceCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + BACKUP(3), + SOURCE_NOT_SET(0); + private final int value; + + private SourceCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SourceCase valueOf(int value) { + return forNumber(value); + } + + public static SourceCase forNumber(int value) { + switch (value) { + case 3: + return BACKUP; + case 0: + return SOURCE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SourceCase getSourceCase() { + return SourceCase.forNumber(sourceCase_); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance in which to create the
    +   * restored database. This instance must be in the same project and
    +   * have the same instance configuration as the instance containing
    +   * the source backup. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance in which to create the
    +   * restored database. This instance must be in the same project and
    +   * have the same instance configuration as the instance containing
    +   * the source backup. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * Required. The id of the database to create and restore to. This
    +   * database must not already exist. The `database_id` appended to
    +   * `parent` forms the full database name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +   * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The id of the database to create and restore to. This
    +   * database must not already exist. The `database_id` appended to
    +   * `parent` forms the full database name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +   * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return Whether the backup field is set. + */ + public boolean hasBackup() { + return sourceCase_ == 3; + } + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + public java.lang.String getBackup() { + java.lang.Object ref = ""; + if (sourceCase_ == 3) { + ref = source_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (sourceCase_ == 3) { + source_ = s; + } + return s; + } + } + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + public com.google.protobuf.ByteString getBackupBytes() { + java.lang.Object ref = ""; + if (sourceCase_ == 3) { + ref = source_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (sourceCase_ == 3) { + source_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, databaseId_); + } + if (sourceCase_ == 3) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, source_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getEncryptionConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, databaseId_); + } + if (sourceCase_ == 3) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, source_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.RestoreDatabaseRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.RestoreDatabaseRequest other = + (com.google.spanner.admin.database.v1.RestoreDatabaseRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getSourceCase().equals(other.getSourceCase())) return false; + switch (sourceCase_) { + case 3: + if (!getBackup().equals(other.getBackup())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + switch (sourceCase_) { + case 3: + hash = (37 * hash) + BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getBackup().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.RestoreDatabaseRequest) + com.google.spanner.admin.database.v1.RestoreDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest.class, + com.google.spanner.admin.database.v1.RestoreDatabaseRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.RestoreDatabaseRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + databaseId_ = ""; + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + sourceCase_ = 0; + source_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.RestoreDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseRequest build() { + com.google.spanner.admin.database.v1.RestoreDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseRequest buildPartial() { + com.google.spanner.admin.database.v1.RestoreDatabaseRequest result = + new com.google.spanner.admin.database.v1.RestoreDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.RestoreDatabaseRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.databaseId_ = databaseId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.database.v1.RestoreDatabaseRequest result) { + result.sourceCase_ = sourceCase_; + result.source_ = this.source_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.RestoreDatabaseRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.RestoreDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.RestoreDatabaseRequest other) { + if (other == com.google.spanner.admin.database.v1.RestoreDatabaseRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + switch (other.getSourceCase()) { + case BACKUP: + { + sourceCase_ = 3; + source_ = other.source_; + onChanged(); + break; + } + case SOURCE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + sourceCase_ = 3; + source_ = s; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int sourceCase_ = 0; + private java.lang.Object source_; + + public SourceCase getSourceCase() { + return SourceCase.forNumber(sourceCase_); + } + + public Builder clearSource() { + sourceCase_ = 0; + source_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance in which to create the
    +     * restored database. This instance must be in the same project and
    +     * have the same instance configuration as the instance containing
    +     * the source backup. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the
    +     * restored database. This instance must be in the same project and
    +     * have the same instance configuration as the instance containing
    +     * the source backup. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the
    +     * restored database. This instance must be in the same project and
    +     * have the same instance configuration as the instance containing
    +     * the source backup. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the
    +     * restored database. This instance must be in the same project and
    +     * have the same instance configuration as the instance containing
    +     * the source backup. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the
    +     * restored database. This instance must be in the same project and
    +     * have the same instance configuration as the instance containing
    +     * the source backup. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * Required. The id of the database to create and restore to. This
    +     * database must not already exist. The `database_id` appended to
    +     * `parent` forms the full database name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +     * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the database to create and restore to. This
    +     * database must not already exist. The `database_id` appended to
    +     * `parent` forms the full database name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +     * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The id of the database to create and restore to. This
    +     * database must not already exist. The `database_id` appended to
    +     * `parent` forms the full database name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +     * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the database to create and restore to. This
    +     * database must not already exist. The `database_id` appended to
    +     * `parent` forms the full database name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +     * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The id of the database to create and restore to. This
    +     * database must not already exist. The `database_id` appended to
    +     * `parent` forms the full database name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +     * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return Whether the backup field is set. + */ + @java.lang.Override + public boolean hasBackup() { + return sourceCase_ == 3; + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + @java.lang.Override + public java.lang.String getBackup() { + java.lang.Object ref = ""; + if (sourceCase_ == 3) { + ref = source_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (sourceCase_ == 3) { + source_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupBytes() { + java.lang.Object ref = ""; + if (sourceCase_ == 3) { + ref = source_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (sourceCase_ == 3) { + source_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The backup to set. + * @return This builder for chaining. + */ + public Builder setBackup(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceCase_ = 3; + source_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearBackup() { + if (sourceCase_ == 3) { + sourceCase_ = 0; + source_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Name of the backup from which to restore.  Values are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for backup to set. + * @return This builder for chaining. + */ + public Builder setBackupBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceCase_ = 3; + source_ = value; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + .getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder + builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + .getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig + .getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.RestoreDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseRequest) + private static final com.google.spanner.admin.database.v1.RestoreDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.RestoreDatabaseRequest(); + } + + public static com.google.spanner.admin.database.v1.RestoreDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..b36a76b8a2af --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface RestoreDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.RestoreDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance in which to create the
    +   * restored database. This instance must be in the same project and
    +   * have the same instance configuration as the instance containing
    +   * the source backup. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the instance in which to create the
    +   * restored database. This instance must be in the same project and
    +   * have the same instance configuration as the instance containing
    +   * the source backup. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The id of the database to create and restore to. This
    +   * database must not already exist. The `database_id` appended to
    +   * `parent` forms the full database name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +   * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * Required. The id of the database to create and restore to. This
    +   * database must not already exist. The `database_id` appended to
    +   * `parent` forms the full database name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database_id>`.
    +   * 
    + * + * string database_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return Whether the backup field is set. + */ + boolean hasBackup(); + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The backup. + */ + java.lang.String getBackup(); + + /** + * + * + *
    +   * Name of the backup from which to restore.  Values are of the form
    +   * `projects/<project>/instances/<instance>/backups/<backup>`.
    +   * 
    + * + * string backup = 3 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for backup. + */ + com.google.protobuf.ByteString getBackupBytes(); + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder(); + + com.google.spanner.admin.database.v1.RestoreDatabaseRequest.SourceCase getSourceCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java new file mode 100644 index 000000000000..a06362fab8a4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java @@ -0,0 +1,970 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Information about the database restore.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreInfo} + */ +@com.google.protobuf.Generated +public final class RestoreInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.RestoreInfo) + RestoreInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreInfo"); + } + + // Use RestoreInfo.newBuilder() to construct. + private RestoreInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreInfo() { + sourceType_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreInfo.class, + com.google.spanner.admin.database.v1.RestoreInfo.Builder.class); + } + + private int sourceInfoCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object sourceInfo_; + + public enum SourceInfoCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + BACKUP_INFO(2), + SOURCEINFO_NOT_SET(0); + private final int value; + + private SourceInfoCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SourceInfoCase valueOf(int value) { + return forNumber(value); + } + + public static SourceInfoCase forNumber(int value) { + switch (value) { + case 2: + return BACKUP_INFO; + case 0: + return SOURCEINFO_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SourceInfoCase getSourceInfoCase() { + return SourceInfoCase.forNumber(sourceInfoCase_); + } + + public static final int SOURCE_TYPE_FIELD_NUMBER = 1; + private int sourceType_ = 0; + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The enum numeric value on the wire for sourceType. + */ + @java.lang.Override + public int getSourceTypeValue() { + return sourceType_; + } + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The sourceType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreSourceType getSourceType() { + com.google.spanner.admin.database.v1.RestoreSourceType result = + com.google.spanner.admin.database.v1.RestoreSourceType.forNumber(sourceType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreSourceType.UNRECOGNIZED + : result; + } + + public static final int BACKUP_INFO_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return Whether the backupInfo field is set. + */ + @java.lang.Override + public boolean hasBackupInfo() { + return sourceInfoCase_ == 2; + } + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return The backupInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getBackupInfo() { + if (sourceInfoCase_ == 2) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder() { + if (sourceInfoCase_ == 2) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (sourceType_ + != com.google.spanner.admin.database.v1.RestoreSourceType.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, sourceType_); + } + if (sourceInfoCase_ == 2) { + output.writeMessage(2, (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (sourceType_ + != com.google.spanner.admin.database.v1.RestoreSourceType.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, sourceType_); + } + if (sourceInfoCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.RestoreInfo)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.RestoreInfo other = + (com.google.spanner.admin.database.v1.RestoreInfo) obj; + + if (sourceType_ != other.sourceType_) return false; + if (!getSourceInfoCase().equals(other.getSourceInfoCase())) return false; + switch (sourceInfoCase_) { + case 2: + if (!getBackupInfo().equals(other.getBackupInfo())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SOURCE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + sourceType_; + switch (sourceInfoCase_) { + case 2: + hash = (37 * hash) + BACKUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getBackupInfo().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.RestoreInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Information about the database restore.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.RestoreInfo} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.RestoreInfo) + com.google.spanner.admin.database.v1.RestoreInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.RestoreInfo.class, + com.google.spanner.admin.database.v1.RestoreInfo.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.RestoreInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sourceType_ = 0; + if (backupInfoBuilder_ != null) { + backupInfoBuilder_.clear(); + } + sourceInfoCase_ = 0; + sourceInfo_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfo getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfo build() { + com.google.spanner.admin.database.v1.RestoreInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfo buildPartial() { + com.google.spanner.admin.database.v1.RestoreInfo result = + new com.google.spanner.admin.database.v1.RestoreInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.RestoreInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.sourceType_ = sourceType_; + } + } + + private void buildPartialOneofs(com.google.spanner.admin.database.v1.RestoreInfo result) { + result.sourceInfoCase_ = sourceInfoCase_; + result.sourceInfo_ = this.sourceInfo_; + if (sourceInfoCase_ == 2 && backupInfoBuilder_ != null) { + result.sourceInfo_ = backupInfoBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.RestoreInfo) { + return mergeFrom((com.google.spanner.admin.database.v1.RestoreInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.RestoreInfo other) { + if (other == com.google.spanner.admin.database.v1.RestoreInfo.getDefaultInstance()) + return this; + if (other.sourceType_ != 0) { + setSourceTypeValue(other.getSourceTypeValue()); + } + switch (other.getSourceInfoCase()) { + case BACKUP_INFO: + { + mergeBackupInfo(other.getBackupInfo()); + break; + } + case SOURCEINFO_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + sourceType_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetBackupInfoFieldBuilder().getBuilder(), extensionRegistry); + sourceInfoCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int sourceInfoCase_ = 0; + private java.lang.Object sourceInfo_; + + public SourceInfoCase getSourceInfoCase() { + return SourceInfoCase.forNumber(sourceInfoCase_); + } + + public Builder clearSourceInfo() { + sourceInfoCase_ = 0; + sourceInfo_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private int sourceType_ = 0; + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The enum numeric value on the wire for sourceType. + */ + @java.lang.Override + public int getSourceTypeValue() { + return sourceType_; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @param value The enum numeric value on the wire for sourceType to set. + * @return This builder for chaining. + */ + public Builder setSourceTypeValue(int value) { + sourceType_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The sourceType. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreSourceType getSourceType() { + com.google.spanner.admin.database.v1.RestoreSourceType result = + com.google.spanner.admin.database.v1.RestoreSourceType.forNumber(sourceType_); + return result == null + ? com.google.spanner.admin.database.v1.RestoreSourceType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @param value The sourceType to set. + * @return This builder for chaining. + */ + public Builder setSourceType(com.google.spanner.admin.database.v1.RestoreSourceType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + sourceType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of the restore source.
    +     * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return This builder for chaining. + */ + public Builder clearSourceType() { + bitField0_ = (bitField0_ & ~0x00000001); + sourceType_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder> + backupInfoBuilder_; + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return Whether the backupInfo field is set. + */ + @java.lang.Override + public boolean hasBackupInfo() { + return sourceInfoCase_ == 2; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return The backupInfo. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfo getBackupInfo() { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 2) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } else { + if (sourceInfoCase_ == 2) { + return backupInfoBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + public Builder setBackupInfo(com.google.spanner.admin.database.v1.BackupInfo value) { + if (backupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sourceInfo_ = value; + onChanged(); + } else { + backupInfoBuilder_.setMessage(value); + } + sourceInfoCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + public Builder setBackupInfo( + com.google.spanner.admin.database.v1.BackupInfo.Builder builderForValue) { + if (backupInfoBuilder_ == null) { + sourceInfo_ = builderForValue.build(); + onChanged(); + } else { + backupInfoBuilder_.setMessage(builderForValue.build()); + } + sourceInfoCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + public Builder mergeBackupInfo(com.google.spanner.admin.database.v1.BackupInfo value) { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 2 + && sourceInfo_ + != com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance()) { + sourceInfo_ = + com.google.spanner.admin.database.v1.BackupInfo.newBuilder( + (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_) + .mergeFrom(value) + .buildPartial(); + } else { + sourceInfo_ = value; + } + onChanged(); + } else { + if (sourceInfoCase_ == 2) { + backupInfoBuilder_.mergeFrom(value); + } else { + backupInfoBuilder_.setMessage(value); + } + } + sourceInfoCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + public Builder clearBackupInfo() { + if (backupInfoBuilder_ == null) { + if (sourceInfoCase_ == 2) { + sourceInfoCase_ = 0; + sourceInfo_ = null; + onChanged(); + } + } else { + if (sourceInfoCase_ == 2) { + sourceInfoCase_ = 0; + sourceInfo_ = null; + } + backupInfoBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + public com.google.spanner.admin.database.v1.BackupInfo.Builder getBackupInfoBuilder() { + return internalGetBackupInfoFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder() { + if ((sourceInfoCase_ == 2) && (backupInfoBuilder_ != null)) { + return backupInfoBuilder_.getMessageOrBuilder(); + } else { + if (sourceInfoCase_ == 2) { + return (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_; + } + return com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Information about the backup used to restore the database. The backup
    +     * may no longer exist.
    +     * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder> + internalGetBackupInfoFieldBuilder() { + if (backupInfoBuilder_ == null) { + if (!(sourceInfoCase_ == 2)) { + sourceInfo_ = com.google.spanner.admin.database.v1.BackupInfo.getDefaultInstance(); + } + backupInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupInfo, + com.google.spanner.admin.database.v1.BackupInfo.Builder, + com.google.spanner.admin.database.v1.BackupInfoOrBuilder>( + (com.google.spanner.admin.database.v1.BackupInfo) sourceInfo_, + getParentForChildren(), + isClean()); + sourceInfo_ = null; + } + sourceInfoCase_ = 2; + onChanged(); + return backupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.RestoreInfo) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreInfo) + private static final com.google.spanner.admin.database.v1.RestoreInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.RestoreInfo(); + } + + public static com.google.spanner.admin.database.v1.RestoreInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.RestoreInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java new file mode 100644 index 000000000000..1f7c2d94acea --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface RestoreInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.RestoreInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The enum numeric value on the wire for sourceType. + */ + int getSourceTypeValue(); + + /** + * + * + *
    +   * The type of the restore source.
    +   * 
    + * + * .google.spanner.admin.database.v1.RestoreSourceType source_type = 1; + * + * @return The sourceType. + */ + com.google.spanner.admin.database.v1.RestoreSourceType getSourceType(); + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return Whether the backupInfo field is set. + */ + boolean hasBackupInfo(); + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + * + * @return The backupInfo. + */ + com.google.spanner.admin.database.v1.BackupInfo getBackupInfo(); + + /** + * + * + *
    +   * Information about the backup used to restore the database. The backup
    +   * may no longer exist.
    +   * 
    + * + * .google.spanner.admin.database.v1.BackupInfo backup_info = 2; + */ + com.google.spanner.admin.database.v1.BackupInfoOrBuilder getBackupInfoOrBuilder(); + + com.google.spanner.admin.database.v1.RestoreInfo.SourceInfoCase getSourceInfoCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java new file mode 100644 index 000000000000..369e284b7fc7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java @@ -0,0 +1,172 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Indicates the type of the restore source.
    + * 
    + * + * Protobuf enum {@code google.spanner.admin.database.v1.RestoreSourceType} + */ +@com.google.protobuf.Generated +public enum RestoreSourceType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +   * No restore associated.
    +   * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +   * A backup was used as the source of the restore.
    +   * 
    + * + * BACKUP = 1; + */ + BACKUP(1), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreSourceType"); + } + + /** + * + * + *
    +   * No restore associated.
    +   * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +   * A backup was used as the source of the restore.
    +   * 
    + * + * BACKUP = 1; + */ + public static final int BACKUP_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RestoreSourceType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static RestoreSourceType forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return BACKUP; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RestoreSourceType findValueByNumber(int number) { + return RestoreSourceType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final RestoreSourceType[] VALUES = values(); + + public static RestoreSourceType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private RestoreSourceType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.database.v1.RestoreSourceType) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java new file mode 100644 index 000000000000..b66802a5d796 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java @@ -0,0 +1,814 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public final class SpannerDatabaseAdminProto extends com.google.protobuf.GeneratedFile { + private SpannerDatabaseAdminProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerDatabaseAdminProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_RestoreInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_Database_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_Database_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DatabaseRole_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_SplitPoints_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_SplitPoints_Key_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "=google/spanner/admin/database/v1/spanner_database_admin.proto\022 google.spanner." + + "admin.database.v1\032\034google/api/annotation" + + "s.proto\032\027google/api/client.proto\032\037google" + + "/api/field_behavior.proto\032\031google/api/re" + + "source.proto\032\036google/iam/v1/iam_policy.p" + + "roto\032\032google/iam/v1/policy.proto\032#google" + + "/longrunning/operations.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\034google/protobuf/struct.pro" + + "to\032\037google/protobuf/timestamp.proto\032\027goo" + + "gle/rpc/status.proto\032-google/spanner/admin/database/v1/backup.proto\0326google/span" + + "ner/admin/database/v1/backup_schedule.pr" + + "oto\032-google/spanner/admin/database/v1/common.proto\"\253\001\n" + + "\013RestoreInfo\022H\n" + + "\013source_type\030\001" + + " \001(\01623.google.spanner.admin.database.v1.RestoreSourceType\022C\n" + + "\013backup_info\030\002 \001(" + + "\0132,.google.spanner.admin.database.v1.BackupInfoH\000B\r\n" + + "\013source_info\"\312\006\n" + + "\010Database\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022D\n" + + "\005state\030\002 \001(\01620.google" + + ".spanner.admin.database.v1.Database.StateB\003\340A\003\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022H\n" + + "\014restore_info\030\004 \001" + + "(\0132-.google.spanner.admin.database.v1.RestoreInfoB\003\340A\003\022R\n" + + "\021encryption_config\030\005 \001(" + + "\01322.google.spanner.admin.database.v1.EncryptionConfigB\003\340A\003\022N\n" + + "\017encryption_info\030\010 " + + "\003(\01320.google.spanner.admin.database.v1.EncryptionInfoB\003\340A\003\022%\n" + + "\030version_retention_period\030\006 \001(\tB\003\340A\003\022>\n" + + "\025earliest_version_time\030\007" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\033\n" + + "\016default_leader\030\t \001(\tB\003\340A\003\022P\n" + + "\020database_dialect\030\n" + + " \001(\01621.google.spanner.admin.database.v1.DatabaseDialectB\003\340A\003\022\036\n" + + "\026enable_drop_protection\030\013 \001(\010\022\030\n" + + "\013reconciling\030\014 \001(\010B\003\340A\003\"M\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n" + + "\005READY\020\002\022\024\n" + + "\020READY_OPTIMIZING\020\003:b\352A_\n" + + "\037spanner.googleapis.com/D" + + "atabase\022\332A\006parent\202" + + "\323\344\223\002/\022-/v1/{parent=projects/*/instances/*}/databases\022\244\002\n" + + "\016CreateDatabase\0227.google.spanner.admin.database.v1.CreateDatabas" + + "eRequest\032\035.google.longrunning.Operation\"\271\001\312Ad\n" + + ")google.spanner.admin.database.v1.Database\0227google.spanner.admin.database." + + "v1.CreateDatabaseMetadata\332A\027parent,creat" + + "e_statement\202\323\344\223\0022\"-/v1/{parent=projects/*/instances/*}/databases:\001*\022\255\001\n" + + "\013GetDatabase\0224.google.spanner.admin.database.v1.G" + + "etDatabaseRequest\032*.google.spanner.admin" + + ".database.v1.Database\"<\332A\004name\202\323\344\223\002/\022-/v" + + "1/{name=projects/*/instances/*/databases/*}\022\357\001\n" + + "\016UpdateDatabase\0227.google.spanner." + + "admin.database.v1.UpdateDatabaseRequest\032\035.google.longrunning.Operation\"\204\001\312A\"\n" + + "\010Database\022\026UpdateDatabaseMetadata\332A\024databas" + + "e,update_mask\202\323\344\223\002B26/v1/{database.name=" + + "projects/*/instances/*/databases/*}:\010database\022\235\002\n" + + "\021UpdateDatabaseDdl\022:.google.spanner.admin.database.v1.UpdateDatabaseDdl" + + "Request\032\035.google.longrunning.Operation\"\254\001\312AS\n" + + "\025google.protobuf.Empty\022:google.spanner.admin.database.v1.UpdateDatabaseDdlM" + + "etadata\332A\023database,statements\202\323\344\223\002:25/v1" + + "/{database=projects/*/instances/*/databases/*}/ddl:\001*\022\243\001\n" + + "\014DropDatabase\0225.google.spanner.admin.database.v1.DropDatabaseRe" + + "quest\032\026.google.protobuf.Empty\"D\332A\010databa" + + "se\202\323\344\223\0023*1/v1/{database=projects/*/instances/*/databases/*}\022\315\001\n" + + "\016GetDatabaseDdl\0227.google.spanner.admin.database.v1.GetDat" + + "abaseDdlRequest\0328.google.spanner.admin.d" + + "atabase.v1.GetDatabaseDdlResponse\"H\332A\010da" + + "tabase\202\323\344\223\0027\0225/v1/{database=projects/*/instances/*/databases/*}/ddl\022\302\002\n" + + "\014SetIamPolicy\022\".google.iam.v1.SetIamPolicyRequest" + + "\032\025.google.iam.v1.Policy\"\366\001\332A\017resource,po" + + "licy\202\323\344\223\002\335\001\">/v1/{resource=projects/*/in" + + "stances/*/databases/*}:setIamPolicy:\001*ZA\"/v1/{resource=projects/*/inst" + + "ances/*/databases/*}:getIamPolicy:\001*ZA\".google.spanner.admin.database.v1.ListBacku" + + "pOperationsResponse\"E\332A\006parent\202\323\344\223\0026\0224/v" + + "1/{parent=projects/*/instances/*}/backupOperations\022\334\001\n" + + "\021ListDatabaseRoles\022:.google.spanner.admin.database.v1.ListDatabase" + + "RolesRequest\032;.google.spanner.admin.data" + + "base.v1.ListDatabaseRolesResponse\"N\332A\006pa" + + "rent\202\323\344\223\002?\022=/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles\022\350\001\n" + + "\016AddSplitPoints\0227.google.spanner.admin.data" + + "base.v1.AddSplitPointsRequest\0328.google.spanner.admin.database.v1.AddSplitPointsR" + + "esponse\"c\332A\025database,split_points\202\323\344\223\002E\"" + + "@/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints:\001*\022\216\002\n" + + "\024CreateBackupSchedule\022=.google.spanner.admin.dat" + + "abase.v1.CreateBackupScheduleRequest\0320.google.spanner.admin.database.v1.BackupSc" + + "hedule\"\204\001\332A)parent,backup_schedule,backu" + + "p_schedule_id\202\323\344\223\002R\"?/v1/{parent=project" + + "s/*/instances/*/databases/*}/backupSchedules:\017backup_schedule\022\321\001\n" + + "\021GetBackupSchedule\022:.google.spanner.admin.database.v1.G" + + "etBackupScheduleRequest\0320.google.spanner" + + ".admin.database.v1.BackupSchedule\"N\332A\004na" + + "me\202\323\344\223\002A\022?/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}\022\220\002\n" + + "\024UpdateBackupSchedule\022=.google.spanner.admi" + + "n.database.v1.UpdateBackupScheduleRequest\0320.google.spanner.admin.database.v1.Bac" + + "kupSchedule\"\206\001\332A\033backup_schedule,update_" + + "mask\202\323\344\223\002b2O/v1/{backup_schedule.name=pr" + + "ojects/*/instances/*/databases/*/backupSchedules/*}:\017backup_schedule\022\275\001\n" + + "\024DeleteBackupSchedule\022=.google.spanner.admin.dat" + + "abase.v1.DeleteBackupScheduleRequest\032\026.g" + + "oogle.protobuf.Empty\"N\332A\004name\202\323\344\223\002A*?/v1" + + "/{name=projects/*/instances/*/databases/*/backupSchedules/*}\022\344\001\n" + + "\023ListBackupSchedules\022<.google.spanner.admin.database.v1." + + "ListBackupSchedulesRequest\032=.google.spanner.admin.database.v1.ListBackupSchedule" + + "sResponse\"P\332A\006parent\202\323\344\223\002A\022?/v1/{parent=" + + "projects/*/instances/*/databases/*}/backupSchedules\022\307\001\n" + + "\034InternalUpdateGraphOperation\022E.google.spanner.admin.database.v1." + + "InternalUpdateGraphOperationRequest\032F.google.spanner.admin.database.v1.InternalU" + + "pdateGraphOperationResponse\"\030\332A\025database" + + ",operation_id\032x\312A\026spanner.googleapis.com" + + "\322A\\https://www.googleapis.com/auth/cloud" + + "-platform,https://www.googleapis.com/auth/spanner.adminB\326\003\n" + + "$com.google.spanner.admin.database.v1B\031SpannerDatabaseAdminPr" + + "otoP\001ZFcloud.google.com/go/spanner/admin" + + "/database/apiv1/databasepb;databasepb\252\002&" + + "Google.Cloud.Spanner.Admin.Database.V1\312\002" + + "&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352" + + "\002+Google::Cloud::Spanner::Admin::Database::V1\352AJ\n" + + "\037spanner.googleapis.com/Instanc" + + "e\022\'projects/{project}/instances/{instance}\352A{\n" + + "(spanner.googleapis.com/InstancePartition\022Oprojects/{project}/instances/{i" + + "nstance}/instancePartitions/{instance_partition}b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.iam.v1.IamPolicyProto.getDescriptor(), + com.google.iam.v1.PolicyProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.StructProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(), + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(), + }); + internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_database_v1_RestoreInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor, + new java.lang.String[] { + "SourceType", "BackupInfo", "SourceInfo", + }); + internal_static_google_spanner_admin_database_v1_Database_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_database_v1_Database_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_Database_descriptor, + new java.lang.String[] { + "Name", + "State", + "CreateTime", + "RestoreInfo", + "EncryptionConfig", + "EncryptionInfo", + "VersionRetentionPeriod", + "EarliestVersionTime", + "DefaultLeader", + "DatabaseDialect", + "EnableDropProtection", + "Reconciling", + }); + internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabasesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabasesResponse_descriptor, + new java.lang.String[] { + "Databases", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateDatabaseRequest_descriptor, + new java.lang.String[] { + "Parent", + "CreateStatement", + "ExtraStatements", + "EncryptionConfig", + "DatabaseDialect", + "ProtoDescriptors", + }); + internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateDatabaseMetadata_descriptor, + new java.lang.String[] { + "Database", + }); + internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetDatabaseRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor, + new java.lang.String[] { + "Database", "UpdateMask", + }); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor, + new java.lang.String[] { + "Request", "Progress", "CancelTime", + }); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor, + new java.lang.String[] { + "Database", "Statements", "OperationId", "ProtoDescriptors", "ThroughputMode", + }); + internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DdlStatementActionInfo_descriptor, + new java.lang.String[] { + "Action", "EntityType", "EntityNames", + }); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor, + new java.lang.String[] { + "Database", "Statements", "CommitTimestamps", "Throttled", "Progress", "Actions", + }); + internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DropDatabaseRequest_descriptor, + new java.lang.String[] { + "Database", + }); + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlRequest_descriptor, + new java.lang.String[] { + "Database", + }); + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetDatabaseDdlResponse_descriptor, + new java.lang.String[] { + "Statements", "ProtoDescriptors", + }); + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabaseOperationsResponse_descriptor, + new java.lang.String[] { + "Operations", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_RestoreDatabaseRequest_descriptor, + new java.lang.String[] { + "Parent", "DatabaseId", "Backup", "EncryptionConfig", "Source", + }); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor, + new java.lang.String[] { + "EncryptionType", "KmsKeyName", "KmsKeyNames", + }); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor, + new java.lang.String[] { + "Name", + "SourceType", + "BackupInfo", + "Progress", + "CancelTime", + "OptimizeDatabaseOperationName", + "SourceInfo", + }); + internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_OptimizeRestoredDatabaseMetadata_descriptor, + new java.lang.String[] { + "Name", "Progress", + }); + internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_spanner_admin_database_v1_DatabaseRole_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DatabaseRole_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor = + getDescriptor().getMessageType(22); + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListDatabaseRolesResponse_descriptor, + new java.lang.String[] { + "DatabaseRoles", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_AddSplitPointsRequest_descriptor, + new java.lang.String[] { + "Database", "SplitPoints", "Initiator", + }); + internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_AddSplitPointsResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_spanner_admin_database_v1_SplitPoints_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor, + new java.lang.String[] { + "Table", "Index", "Keys", "ExpireTime", + }); + internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor = + internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor.getNestedType(0); + internal_static_google_spanner_admin_database_v1_SplitPoints_Key_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor, + new java.lang.String[] { + "KeyParts", + }); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor, + new java.lang.String[] { + "Database", "OperationId", "VmIdentityToken", "Progress", "Status", + }); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor, + new java.lang.String[] {}); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.iam.v1.IamPolicyProto.getDescriptor(); + com.google.iam.v1.PolicyProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(); + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceDefinition); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.longrunning.OperationsProto.operationInfo); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPoints.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPoints.java new file mode 100644 index 000000000000..33b5dd6f2c61 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPoints.java @@ -0,0 +1,2423 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The split points of a table/index.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.SplitPoints} + */ +@com.google.protobuf.Generated +public final class SplitPoints extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.SplitPoints) + SplitPointsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SplitPoints"); + } + + // Use SplitPoints.newBuilder() to construct. + private SplitPoints(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SplitPoints() { + table_ = ""; + index_ = ""; + keys_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.SplitPoints.class, + com.google.spanner.admin.database.v1.SplitPoints.Builder.class); + } + + public interface KeyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.SplitPoints.Key) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the keyParts field is set. + */ + boolean hasKeyParts(); + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The keyParts. + */ + com.google.protobuf.ListValue getKeyParts(); + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.ListValueOrBuilder getKeyPartsOrBuilder(); + } + + /** + * + * + *
    +   * A split key.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.SplitPoints.Key} + */ + public static final class Key extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.SplitPoints.Key) + KeyOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Key"); + } + + // Use Key.newBuilder() to construct. + private Key(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Key() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_Key_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.SplitPoints.Key.class, + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder.class); + } + + private int bitField0_; + public static final int KEY_PARTS_FIELD_NUMBER = 1; + private com.google.protobuf.ListValue keyParts_; + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the keyParts field is set. + */ + @java.lang.Override + public boolean hasKeyParts() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The keyParts. + */ + @java.lang.Override + public com.google.protobuf.ListValue getKeyParts() { + return keyParts_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : keyParts_; + } + + /** + * + * + *
    +     * Required. The column values making up the split key.
    +     * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getKeyPartsOrBuilder() { + return keyParts_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : keyParts_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getKeyParts()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getKeyParts()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.SplitPoints.Key)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.SplitPoints.Key other = + (com.google.spanner.admin.database.v1.SplitPoints.Key) obj; + + if (hasKeyParts() != other.hasKeyParts()) return false; + if (hasKeyParts()) { + if (!getKeyParts().equals(other.getKeyParts())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasKeyParts()) { + hash = (37 * hash) + KEY_PARTS_FIELD_NUMBER; + hash = (53 * hash) + getKeyParts().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.SplitPoints.Key prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A split key.
    +     * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.SplitPoints.Key} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.SplitPoints.Key) + com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_Key_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.SplitPoints.Key.class, + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.SplitPoints.Key.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeyPartsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + keyParts_ = null; + if (keyPartsBuilder_ != null) { + keyPartsBuilder_.dispose(); + keyPartsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.Key getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.SplitPoints.Key.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.Key build() { + com.google.spanner.admin.database.v1.SplitPoints.Key result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.Key buildPartial() { + com.google.spanner.admin.database.v1.SplitPoints.Key result = + new com.google.spanner.admin.database.v1.SplitPoints.Key(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.SplitPoints.Key result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.keyParts_ = keyPartsBuilder_ == null ? keyParts_ : keyPartsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.SplitPoints.Key) { + return mergeFrom((com.google.spanner.admin.database.v1.SplitPoints.Key) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.SplitPoints.Key other) { + if (other == com.google.spanner.admin.database.v1.SplitPoints.Key.getDefaultInstance()) + return this; + if (other.hasKeyParts()) { + mergeKeyParts(other.getKeyParts()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetKeyPartsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ListValue keyParts_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + keyPartsBuilder_; + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the keyParts field is set. + */ + public boolean hasKeyParts() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The keyParts. + */ + public com.google.protobuf.ListValue getKeyParts() { + if (keyPartsBuilder_ == null) { + return keyParts_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : keyParts_; + } else { + return keyPartsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeyParts(com.google.protobuf.ListValue value) { + if (keyPartsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keyParts_ = value; + } else { + keyPartsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeyParts(com.google.protobuf.ListValue.Builder builderForValue) { + if (keyPartsBuilder_ == null) { + keyParts_ = builderForValue.build(); + } else { + keyPartsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeKeyParts(com.google.protobuf.ListValue value) { + if (keyPartsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && keyParts_ != null + && keyParts_ != com.google.protobuf.ListValue.getDefaultInstance()) { + getKeyPartsBuilder().mergeFrom(value); + } else { + keyParts_ = value; + } + } else { + keyPartsBuilder_.mergeFrom(value); + } + if (keyParts_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearKeyParts() { + bitField0_ = (bitField0_ & ~0x00000001); + keyParts_ = null; + if (keyPartsBuilder_ != null) { + keyPartsBuilder_.dispose(); + keyPartsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.ListValue.Builder getKeyPartsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetKeyPartsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.ListValueOrBuilder getKeyPartsOrBuilder() { + if (keyPartsBuilder_ != null) { + return keyPartsBuilder_.getMessageOrBuilder(); + } else { + return keyParts_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : keyParts_; + } + } + + /** + * + * + *
    +       * Required. The column values making up the split key.
    +       * 
    + * + * .google.protobuf.ListValue key_parts = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetKeyPartsFieldBuilder() { + if (keyPartsBuilder_ == null) { + keyPartsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + getKeyParts(), getParentForChildren(), isClean()); + keyParts_ = null; + } + return keyPartsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.SplitPoints.Key) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.SplitPoints.Key) + private static final com.google.spanner.admin.database.v1.SplitPoints.Key DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.SplitPoints.Key(); + } + + public static com.google.spanner.admin.database.v1.SplitPoints.Key getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Key parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.Key getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * The table to split.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * The table to split.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * The index to split.
    +   * If specified, the `table` field must refer to the index's base table.
    +   * 
    + * + * string index = 2; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * The index to split.
    +   * If specified, the `table` field must refer to the index's base table.
    +   * 
    + * + * string index = 2; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEYS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List keys_; + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List getKeysList() { + return keys_; + } + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getKeysOrBuilderList() { + return keys_; + } + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getKeysCount() { + return keys_.size(); + } + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.Key getKeys(int index) { + return keys_.get(index); + } + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder getKeysOrBuilder(int index) { + return keys_.get(index); + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, index_); + } + for (int i = 0; i < keys_.size(); i++) { + output.writeMessage(3, keys_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getExpireTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, index_); + } + for (int i = 0; i < keys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, keys_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getExpireTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.SplitPoints)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.SplitPoints other = + (com.google.spanner.admin.database.v1.SplitPoints) obj; + + if (!getTable().equals(other.getTable())) return false; + if (!getIndex().equals(other.getIndex())) return false; + if (!getKeysList().equals(other.getKeysList())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + if (getKeysCount() > 0) { + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeysList().hashCode(); + } + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.SplitPoints parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.SplitPoints prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The split points of a table/index.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.SplitPoints} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.SplitPoints) + com.google.spanner.admin.database.v1.SplitPointsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.SplitPoints.class, + com.google.spanner.admin.database.v1.SplitPoints.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.SplitPoints.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeysFieldBuilder(); + internalGetExpireTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + index_ = ""; + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + } else { + keys_ = null; + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_SplitPoints_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints build() { + com.google.spanner.admin.database.v1.SplitPoints result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints buildPartial() { + com.google.spanner.admin.database.v1.SplitPoints result = + new com.google.spanner.admin.database.v1.SplitPoints(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.SplitPoints result) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + keys_ = java.util.Collections.unmodifiableList(keys_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.database.v1.SplitPoints result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.index_ = index_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.SplitPoints) { + return mergeFrom((com.google.spanner.admin.database.v1.SplitPoints) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.SplitPoints other) { + if (other == com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance()) + return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getIndex().isEmpty()) { + index_ = other.index_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (keysBuilder_ == null) { + if (!other.keys_.isEmpty()) { + if (keys_.isEmpty()) { + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureKeysIsMutable(); + keys_.addAll(other.keys_); + } + onChanged(); + } + } else { + if (!other.keys_.isEmpty()) { + if (keysBuilder_.isEmpty()) { + keysBuilder_.dispose(); + keysBuilder_ = null; + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000004); + keysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetKeysFieldBuilder() + : null; + } else { + keysBuilder_.addAllMessages(other.keys_); + } + } + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.spanner.admin.database.v1.SplitPoints.Key m = + input.readMessage( + com.google.spanner.admin.database.v1.SplitPoints.Key.parser(), + extensionRegistry); + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(m); + } else { + keysBuilder_.addMessage(m); + } + break; + } // case 26 + case 42: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * The table to split.
    +     * 
    + * + * string table = 1; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The table to split.
    +     * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The table to split.
    +     * 
    + * + * string table = 1; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The table to split.
    +     * 
    + * + * string table = 1; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The table to split.
    +     * 
    + * + * string table = 1; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * The index to split.
    +     * If specified, the `table` field must refer to the index's base table.
    +     * 
    + * + * string index = 2; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The index to split.
    +     * If specified, the `table` field must refer to the index's base table.
    +     * 
    + * + * string index = 2; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The index to split.
    +     * If specified, the `table` field must refer to the index's base table.
    +     * 
    + * + * string index = 2; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The index to split.
    +     * If specified, the `table` field must refer to the index's base table.
    +     * 
    + * + * string index = 2; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The index to split.
    +     * If specified, the `table` field must refer to the index's base table.
    +     * 
    + * + * string index = 2; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.util.List keys_ = + java.util.Collections.emptyList(); + + private void ensureKeysIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + keys_ = + new java.util.ArrayList(keys_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints.Key, + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder, + com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder> + keysBuilder_; + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List getKeysList() { + if (keysBuilder_ == null) { + return java.util.Collections.unmodifiableList(keys_); + } else { + return keysBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getKeysCount() { + if (keysBuilder_ == null) { + return keys_.size(); + } else { + return keysBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Key getKeys(int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeys(int index, com.google.spanner.admin.database.v1.SplitPoints.Key value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.set(index, value); + onChanged(); + } else { + keysBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeys( + int index, com.google.spanner.admin.database.v1.SplitPoints.Key.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.set(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addKeys(com.google.spanner.admin.database.v1.SplitPoints.Key value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(value); + onChanged(); + } else { + keysBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addKeys(int index, com.google.spanner.admin.database.v1.SplitPoints.Key value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(index, value); + onChanged(); + } else { + keysBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addKeys( + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addKeys( + int index, com.google.spanner.admin.database.v1.SplitPoints.Key.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllKeys( + java.lang.Iterable values) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, keys_); + onChanged(); + } else { + keysBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + keysBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeKeys(int index) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.remove(index); + onChanged(); + } else { + keysBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Key.Builder getKeysBuilder(int index) { + return internalGetKeysFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder getKeysOrBuilder( + int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getKeysOrBuilderList() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(keys_); + } + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Key.Builder addKeysBuilder() { + return internalGetKeysFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.SplitPoints.Key.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.SplitPoints.Key.Builder addKeysBuilder(int index) { + return internalGetKeysFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.SplitPoints.Key.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The list of split keys, i.e., the split boundaries.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getKeysBuilderList() { + return internalGetKeysFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints.Key, + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder, + com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder> + internalGetKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints.Key, + com.google.spanner.admin.database.v1.SplitPoints.Key.Builder, + com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder>( + keys_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + keys_ = null; + } + return keysBuilder_; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000008); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Optional. The expiration timestamp of the split points.
    +     * A timestamp in the past means immediate expiration.
    +     * The maximum value can be 30 days in the future.
    +     * Defaults to 10 days in the future if not specified.
    +     * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.SplitPoints) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.SplitPoints) + private static final com.google.spanner.admin.database.v1.SplitPoints DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.SplitPoints(); + } + + public static com.google.spanner.admin.database.v1.SplitPoints getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitPoints parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPointsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPointsOrBuilder.java new file mode 100644 index 000000000000..88720dd4b4ba --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SplitPointsOrBuilder.java @@ -0,0 +1,197 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface SplitPointsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.SplitPoints) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The table to split.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * The table to split.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * The index to split.
    +   * If specified, the `table` field must refer to the index's base table.
    +   * 
    + * + * string index = 2; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * The index to split.
    +   * If specified, the `table` field must refer to the index's base table.
    +   * 
    + * + * string index = 2; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getKeysList(); + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.SplitPoints.Key getKeys(int index); + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getKeysCount(); + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getKeysOrBuilderList(); + + /** + * + * + *
    +   * Required. The list of split keys, i.e., the split boundaries.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.SplitPoints.Key keys = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.SplitPoints.KeyOrBuilder getKeysOrBuilder(int index); + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Optional. The expiration timestamp of the split points.
    +   * A timestamp in the past means immediate expiration.
    +   * The maximum value can be 30 days in the future.
    +   * Defaults to 10 days in the future if not specified.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java new file mode 100644 index 000000000000..ddc07036320c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java @@ -0,0 +1,1097 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupRequest} + */ +@com.google.protobuf.Generated +public final class UpdateBackupRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateBackupRequest) + UpdateBackupRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateBackupRequest"); + } + + // Use UpdateBackupRequest.newBuilder() to construct. + private UpdateBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateBackupRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupRequest.Builder.class); + } + + private int bitField0_; + public static final int BACKUP_FIELD_NUMBER = 1; + private com.google.spanner.admin.database.v1.Backup backup_; + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + @java.lang.Override + public boolean hasBackup() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getBackup() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getBackup()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getBackup()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateBackupRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateBackupRequest other = + (com.google.spanner.admin.database.v1.UpdateBackupRequest) obj; + + if (hasBackup() != other.hasBackup()) return false; + if (hasBackup()) { + if (!getBackup().equals(other.getBackup())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasBackup()) { + hash = (37 * hash) + BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getBackup().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateBackupRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateBackupRequest) + com.google.spanner.admin.database.v1.UpdateBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateBackupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBackupFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateBackupRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupRequest build() { + com.google.spanner.admin.database.v1.UpdateBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupRequest buildPartial() { + com.google.spanner.admin.database.v1.UpdateBackupRequest result = + new com.google.spanner.admin.database.v1.UpdateBackupRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.UpdateBackupRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.backup_ = backupBuilder_ == null ? backup_ : backupBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateBackupRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateBackupRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.UpdateBackupRequest other) { + if (other == com.google.spanner.admin.database.v1.UpdateBackupRequest.getDefaultInstance()) + return this; + if (other.hasBackup()) { + mergeBackup(other.getBackup()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetBackupFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.database.v1.Backup backup_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + backupBuilder_; + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + public boolean hasBackup() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + public com.google.spanner.admin.database.v1.Backup getBackup() { + if (backupBuilder_ == null) { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } else { + return backupBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backup_ = value; + } else { + backupBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupBuilder_ == null) { + backup_ = builderForValue.build(); + } else { + backupBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && backup_ != null + && backup_ != com.google.spanner.admin.database.v1.Backup.getDefaultInstance()) { + getBackupBuilder().mergeFrom(value); + } else { + backup_ = value; + } + } else { + backupBuilder_.mergeFrom(value); + } + if (backup_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackup() { + bitField0_ = (bitField0_ & ~0x00000001); + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.Backup.Builder getBackupBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + if (backupBuilder_ != null) { + return backupBuilder_.getMessageOrBuilder(); + } else { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + } + + /** + * + * + *
    +     * Required. The backup to update. `backup.name`, and the fields to be updated
    +     * as specified by `update_mask` are required. Other fields are ignored.
    +     * Update is only supported for the following fields:
    +     * * `backup.expire_time`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + internalGetBackupFieldBuilder() { + if (backupBuilder_ == null) { + backupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder>( + getBackup(), getParentForChildren(), isClean()); + backup_ = null; + } + return backupBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +     * Backup resource should be updated. This mask is relative to the Backup
    +     * resource, not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased accidentally
    +     * by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateBackupRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateBackupRequest) + private static final com.google.spanner.admin.database.v1.UpdateBackupRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateBackupRequest(); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java new file mode 100644 index 000000000000..a2da879952b3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateBackupRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateBackupRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backup field is set. + */ + boolean hasBackup(); + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backup. + */ + com.google.spanner.admin.database.v1.Backup getBackup(); + + /** + * + * + *
    +   * Required. The backup to update. `backup.name`, and the fields to be updated
    +   * as specified by `update_mask` are required. Other fields are ignored.
    +   * Update is only supported for the following fields:
    +   * * `backup.expire_time`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Backup backup = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields (e.g. `expire_time`) in the
    +   * Backup resource should be updated. This mask is relative to the Backup
    +   * resource, not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased accidentally
    +   * by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java new file mode 100644 index 000000000000..3bf7251dba3c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java @@ -0,0 +1,1097 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupScheduleRequest} + */ +@com.google.protobuf.Generated +public final class UpdateBackupScheduleRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + UpdateBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateBackupScheduleRequest"); + } + + // Use UpdateBackupScheduleRequest.newBuilder() to construct. + private UpdateBackupScheduleRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateBackupScheduleRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.Builder.class); + } + + private int bitField0_; + public static final int BACKUP_SCHEDULE_FIELD_NUMBER = 1; + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + @java.lang.Override + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getBackupSchedule()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getBackupSchedule()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) obj; + + if (hasBackupSchedule() != other.hasBackupSchedule()) return false; + if (hasBackupSchedule()) { + if (!getBackupSchedule().equals(other.getBackupSchedule())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasBackupSchedule()) { + hash = (37 * hash) + BACKUP_SCHEDULE_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedule().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBackupScheduleFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.backupSchedule_ = + backupScheduleBuilder_ == null ? backupSchedule_ : backupScheduleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.getDefaultInstance()) + return this; + if (other.hasBackupSchedule()) { + mergeBackupSchedule(other.getBackupSchedule()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetBackupScheduleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupScheduleBuilder_; + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + if (backupScheduleBuilder_ == null) { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } else { + return backupScheduleBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupSchedule_ = value; + } else { + backupScheduleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupScheduleBuilder_ == null) { + backupSchedule_ = builderForValue.build(); + } else { + backupScheduleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && backupSchedule_ != null + && backupSchedule_ + != com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) { + getBackupScheduleBuilder().mergeFrom(value); + } else { + backupSchedule_ = value; + } + } else { + backupScheduleBuilder_.mergeFrom(value); + } + if (backupSchedule_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackupSchedule() { + bitField0_ = (bitField0_ & ~0x00000001); + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupScheduleBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetBackupScheduleFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder + getBackupScheduleOrBuilder() { + if (backupScheduleBuilder_ != null) { + return backupScheduleBuilder_.getMessageOrBuilder(); + } else { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + } + + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + internalGetBackupScheduleFieldBuilder() { + if (backupScheduleBuilder_ == null) { + backupScheduleBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + getBackupSchedule(), getParentForChildren(), isClean()); + backupSchedule_ = null; + } + return backupScheduleBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java new file mode 100644 index 000000000000..5c5a21cfb35d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java @@ -0,0 +1,129 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/backup_schedule.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + boolean hasBackupSchedule(); + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule(); + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java new file mode 100644 index 000000000000..03acaaf5a753 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java @@ -0,0 +1,2790 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateDatabaseDdlMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) + UpdateDatabaseDdlMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateDatabaseDdlMetadata"); + } + + // Use UpdateDatabaseDdlMetadata.newBuilder() to construct. + private UpdateDatabaseDdlMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateDatabaseDdlMetadata() { + database_ = ""; + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + commitTimestamps_ = java.util.Collections.emptyList(); + progress_ = java.util.Collections.emptyList(); + actions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.class, + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * The database being modified.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * The database being modified.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATEMENTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + return statements_; + } + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + public static final int COMMIT_TIMESTAMPS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List commitTimestamps_; + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + @java.lang.Override + public java.util.List getCommitTimestampsList() { + return commitTimestamps_; + } + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + @java.lang.Override + public java.util.List + getCommitTimestampsOrBuilderList() { + return commitTimestamps_; + } + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + @java.lang.Override + public int getCommitTimestampsCount() { + return commitTimestamps_.size(); + } + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTimestamps(int index) { + return commitTimestamps_.get(index); + } + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampsOrBuilder(int index) { + return commitTimestamps_.get(index); + } + + public static final int THROTTLED_FIELD_NUMBER = 4; + private boolean throttled_ = false; + + /** + * + * + *
    +   * Output only. When true, indicates that the operation is throttled e.g.
    +   * due to resource constraints. When resources become available the operation
    +   * will resume and this field will be false again.
    +   * 
    + * + * bool throttled = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The throttled. + */ + @java.lang.Override + public boolean getThrottled() { + return throttled_; + } + + public static final int PROGRESS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private java.util.List progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + @java.lang.Override + public java.util.List getProgressList() { + return progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + @java.lang.Override + public java.util.List + getProgressOrBuilderList() { + return progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + @java.lang.Override + public int getProgressCount() { + return progress_.size(); + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress(int index) { + return progress_.get(index); + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder( + int index) { + return progress_.get(index); + } + + public static final int ACTIONS_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private java.util.List actions_; + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + @java.lang.Override + public java.util.List + getActionsList() { + return actions_; + } + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder> + getActionsOrBuilderList() { + return actions_; + } + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + @java.lang.Override + public int getActionsCount() { + return actions_.size(); + } + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfo getActions(int index) { + return actions_.get(index); + } + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder getActionsOrBuilder( + int index) { + return actions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + for (int i = 0; i < statements_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, statements_.getRaw(i)); + } + for (int i = 0; i < commitTimestamps_.size(); i++) { + output.writeMessage(3, commitTimestamps_.get(i)); + } + if (throttled_ != false) { + output.writeBool(4, throttled_); + } + for (int i = 0; i < progress_.size(); i++) { + output.writeMessage(5, progress_.get(i)); + } + for (int i = 0; i < actions_.size(); i++) { + output.writeMessage(6, actions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + { + int dataSize = 0; + for (int i = 0; i < statements_.size(); i++) { + dataSize += computeStringSizeNoTag(statements_.getRaw(i)); + } + size += dataSize; + size += 1 * getStatementsList().size(); + } + for (int i = 0; i < commitTimestamps_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, commitTimestamps_.get(i)); + } + if (throttled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, throttled_); + } + for (int i = 0; i < progress_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, progress_.get(i)); + } + for (int i = 0; i < actions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, actions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata other = + (com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getStatementsList().equals(other.getStatementsList())) return false; + if (!getCommitTimestampsList().equals(other.getCommitTimestampsList())) return false; + if (getThrottled() != other.getThrottled()) return false; + if (!getProgressList().equals(other.getProgressList())) return false; + if (!getActionsList().equals(other.getActionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (getStatementsCount() > 0) { + hash = (37 * hash) + STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + getStatementsList().hashCode(); + } + if (getCommitTimestampsCount() > 0) { + hash = (37 * hash) + COMMIT_TIMESTAMPS_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestampsList().hashCode(); + } + hash = (37 * hash) + THROTTLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getThrottled()); + if (getProgressCount() > 0) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgressList().hashCode(); + } + if (getActionsCount() > 0) { + hash = (37 * hash) + ACTIONS_FIELD_NUMBER; + hash = (53 * hash) + getActionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.class, + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (commitTimestampsBuilder_ == null) { + commitTimestamps_ = java.util.Collections.emptyList(); + } else { + commitTimestamps_ = null; + commitTimestampsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + throttled_ = false; + if (progressBuilder_ == null) { + progress_ = java.util.Collections.emptyList(); + } else { + progress_ = null; + progressBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + if (actionsBuilder_ == null) { + actions_ = java.util.Collections.emptyList(); + } else { + actions_ = null; + actionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata build() { + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata buildPartial() { + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata result = + new com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata result) { + if (commitTimestampsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + commitTimestamps_ = java.util.Collections.unmodifiableList(commitTimestamps_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.commitTimestamps_ = commitTimestamps_; + } else { + result.commitTimestamps_ = commitTimestampsBuilder_.build(); + } + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + progress_ = java.util.Collections.unmodifiableList(progress_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.progress_ = progress_; + } else { + result.progress_ = progressBuilder_.build(); + } + if (actionsBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + actions_ = java.util.Collections.unmodifiableList(actions_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.actions_ = actions_; + } else { + result.actions_ = actionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + statements_.makeImmutable(); + result.statements_ = statements_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.throttled_ = throttled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata other) { + if (other + == com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.statements_.isEmpty()) { + if (statements_.isEmpty()) { + statements_ = other.statements_; + bitField0_ |= 0x00000002; + } else { + ensureStatementsIsMutable(); + statements_.addAll(other.statements_); + } + onChanged(); + } + if (commitTimestampsBuilder_ == null) { + if (!other.commitTimestamps_.isEmpty()) { + if (commitTimestamps_.isEmpty()) { + commitTimestamps_ = other.commitTimestamps_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.addAll(other.commitTimestamps_); + } + onChanged(); + } + } else { + if (!other.commitTimestamps_.isEmpty()) { + if (commitTimestampsBuilder_.isEmpty()) { + commitTimestampsBuilder_.dispose(); + commitTimestampsBuilder_ = null; + commitTimestamps_ = other.commitTimestamps_; + bitField0_ = (bitField0_ & ~0x00000004); + commitTimestampsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetCommitTimestampsFieldBuilder() + : null; + } else { + commitTimestampsBuilder_.addAllMessages(other.commitTimestamps_); + } + } + } + if (other.getThrottled() != false) { + setThrottled(other.getThrottled()); + } + if (progressBuilder_ == null) { + if (!other.progress_.isEmpty()) { + if (progress_.isEmpty()) { + progress_ = other.progress_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureProgressIsMutable(); + progress_.addAll(other.progress_); + } + onChanged(); + } + } else { + if (!other.progress_.isEmpty()) { + if (progressBuilder_.isEmpty()) { + progressBuilder_.dispose(); + progressBuilder_ = null; + progress_ = other.progress_; + bitField0_ = (bitField0_ & ~0x00000010); + progressBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetProgressFieldBuilder() + : null; + } else { + progressBuilder_.addAllMessages(other.progress_); + } + } + } + if (actionsBuilder_ == null) { + if (!other.actions_.isEmpty()) { + if (actions_.isEmpty()) { + actions_ = other.actions_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureActionsIsMutable(); + actions_.addAll(other.actions_); + } + onChanged(); + } + } else { + if (!other.actions_.isEmpty()) { + if (actionsBuilder_.isEmpty()) { + actionsBuilder_.dispose(); + actionsBuilder_ = null; + actions_ = other.actions_; + bitField0_ = (bitField0_ & ~0x00000020); + actionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetActionsFieldBuilder() + : null; + } else { + actionsBuilder_.addAllMessages(other.actions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureStatementsIsMutable(); + statements_.add(s); + break; + } // case 18 + case 26: + { + com.google.protobuf.Timestamp m = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.add(m); + } else { + commitTimestampsBuilder_.addMessage(m); + } + break; + } // case 26 + case 32: + { + throttled_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + com.google.spanner.admin.database.v1.OperationProgress m = + input.readMessage( + com.google.spanner.admin.database.v1.OperationProgress.parser(), + extensionRegistry); + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + progress_.add(m); + } else { + progressBuilder_.addMessage(m); + } + break; + } // case 42 + case 50: + { + com.google.spanner.admin.database.v1.DdlStatementActionInfo m = + input.readMessage( + com.google.spanner.admin.database.v1.DdlStatementActionInfo.parser(), + extensionRegistry); + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + actions_.add(m); + } else { + actionsBuilder_.addMessage(m); + } + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * The database being modified.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The database being modified.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The database being modified.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database being modified.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database being modified.
    +     * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureStatementsIsMutable() { + if (!statements_.isModifiable()) { + statements_ = new com.google.protobuf.LazyStringArrayList(statements_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + statements_.makeImmutable(); + return statements_; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param index The index to set the value at. + * @param value The statements to set. + * @return This builder for chaining. + */ + public Builder setStatements(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param value The statements to add. + * @return This builder for chaining. + */ + public Builder addStatements(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param values The statements to add. + * @return This builder for chaining. + */ + public Builder addAllStatements(java.lang.Iterable values) { + ensureStatementsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, statements_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @return This builder for chaining. + */ + public Builder clearStatements() { + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For an update this list contains all the statements. For an
    +     * individual statement, this list contains only that statement.
    +     * 
    + * + * repeated string statements = 2; + * + * @param value The bytes of the statements to add. + * @return This builder for chaining. + */ + public Builder addStatementsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.util.List commitTimestamps_ = + java.util.Collections.emptyList(); + + private void ensureCommitTimestampsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + commitTimestamps_ = + new java.util.ArrayList(commitTimestamps_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampsBuilder_; + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public java.util.List getCommitTimestampsList() { + if (commitTimestampsBuilder_ == null) { + return java.util.Collections.unmodifiableList(commitTimestamps_); + } else { + return commitTimestampsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public int getCommitTimestampsCount() { + if (commitTimestampsBuilder_ == null) { + return commitTimestamps_.size(); + } else { + return commitTimestampsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public com.google.protobuf.Timestamp getCommitTimestamps(int index) { + if (commitTimestampsBuilder_ == null) { + return commitTimestamps_.get(index); + } else { + return commitTimestampsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder setCommitTimestamps(int index, com.google.protobuf.Timestamp value) { + if (commitTimestampsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCommitTimestampsIsMutable(); + commitTimestamps_.set(index, value); + onChanged(); + } else { + commitTimestampsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder setCommitTimestamps( + int index, com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.set(index, builderForValue.build()); + onChanged(); + } else { + commitTimestampsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder addCommitTimestamps(com.google.protobuf.Timestamp value) { + if (commitTimestampsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCommitTimestampsIsMutable(); + commitTimestamps_.add(value); + onChanged(); + } else { + commitTimestampsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder addCommitTimestamps(int index, com.google.protobuf.Timestamp value) { + if (commitTimestampsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCommitTimestampsIsMutable(); + commitTimestamps_.add(index, value); + onChanged(); + } else { + commitTimestampsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder addCommitTimestamps(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.add(builderForValue.build()); + onChanged(); + } else { + commitTimestampsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder addCommitTimestamps( + int index, com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.add(index, builderForValue.build()); + onChanged(); + } else { + commitTimestampsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder addAllCommitTimestamps( + java.lang.Iterable values) { + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, commitTimestamps_); + onChanged(); + } else { + commitTimestampsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder clearCommitTimestamps() { + if (commitTimestampsBuilder_ == null) { + commitTimestamps_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + commitTimestampsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public Builder removeCommitTimestamps(int index) { + if (commitTimestampsBuilder_ == null) { + ensureCommitTimestampsIsMutable(); + commitTimestamps_.remove(index); + onChanged(); + } else { + commitTimestampsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampsBuilder(int index) { + return internalGetCommitTimestampsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampsOrBuilder(int index) { + if (commitTimestampsBuilder_ == null) { + return commitTimestamps_.get(index); + } else { + return commitTimestampsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public java.util.List + getCommitTimestampsOrBuilderList() { + if (commitTimestampsBuilder_ != null) { + return commitTimestampsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(commitTimestamps_); + } + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public com.google.protobuf.Timestamp.Builder addCommitTimestampsBuilder() { + return internalGetCommitTimestampsFieldBuilder() + .addBuilder(com.google.protobuf.Timestamp.getDefaultInstance()); + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public com.google.protobuf.Timestamp.Builder addCommitTimestampsBuilder(int index) { + return internalGetCommitTimestampsFieldBuilder() + .addBuilder(index, com.google.protobuf.Timestamp.getDefaultInstance()); + } + + /** + * + * + *
    +     * Reports the commit timestamps of all statements that have
    +     * succeeded so far, where `commit_timestamps[i]` is the commit
    +     * timestamp for the statement `statements[i]`.
    +     * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + public java.util.List getCommitTimestampsBuilderList() { + return internalGetCommitTimestampsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimestampsFieldBuilder() { + if (commitTimestampsBuilder_ == null) { + commitTimestampsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + commitTimestamps_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + commitTimestamps_ = null; + } + return commitTimestampsBuilder_; + } + + private boolean throttled_; + + /** + * + * + *
    +     * Output only. When true, indicates that the operation is throttled e.g.
    +     * due to resource constraints. When resources become available the operation
    +     * will resume and this field will be false again.
    +     * 
    + * + * bool throttled = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The throttled. + */ + @java.lang.Override + public boolean getThrottled() { + return throttled_; + } + + /** + * + * + *
    +     * Output only. When true, indicates that the operation is throttled e.g.
    +     * due to resource constraints. When resources become available the operation
    +     * will resume and this field will be false again.
    +     * 
    + * + * bool throttled = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The throttled to set. + * @return This builder for chaining. + */ + public Builder setThrottled(boolean value) { + + throttled_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. When true, indicates that the operation is throttled e.g.
    +     * due to resource constraints. When resources become available the operation
    +     * will resume and this field will be false again.
    +     * 
    + * + * bool throttled = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearThrottled() { + bitField0_ = (bitField0_ & ~0x00000008); + throttled_ = false; + onChanged(); + return this; + } + + private java.util.List progress_ = + java.util.Collections.emptyList(); + + private void ensureProgressIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + progress_ = + new java.util.ArrayList( + progress_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public java.util.List + getProgressList() { + if (progressBuilder_ == null) { + return java.util.Collections.unmodifiableList(progress_); + } else { + return progressBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public int getProgressCount() { + if (progressBuilder_ == null) { + return progress_.size(); + } else { + return progressBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress(int index) { + if (progressBuilder_ == null) { + return progress_.get(index); + } else { + return progressBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder setProgress( + int index, com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProgressIsMutable(); + progress_.set(index, value); + onChanged(); + } else { + progressBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder setProgress( + int index, com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + progress_.set(index, builderForValue.build()); + onChanged(); + } else { + progressBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder addProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProgressIsMutable(); + progress_.add(value); + onChanged(); + } else { + progressBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder addProgress( + int index, com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProgressIsMutable(); + progress_.add(index, value); + onChanged(); + } else { + progressBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder addProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + progress_.add(builderForValue.build()); + onChanged(); + } else { + progressBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder addProgress( + int index, com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + progress_.add(index, builderForValue.build()); + onChanged(); + } else { + progressBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder addAllProgress( + java.lang.Iterable + values) { + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, progress_); + onChanged(); + } else { + progressBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder clearProgress() { + if (progressBuilder_ == null) { + progress_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + progressBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public Builder removeProgress(int index) { + if (progressBuilder_ == null) { + ensureProgressIsMutable(); + progress_.remove(index); + onChanged(); + } else { + progressBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder( + int index) { + return internalGetProgressFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder( + int index) { + if (progressBuilder_ == null) { + return progress_.get(index); + } else { + return progressBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public java.util.List + getProgressOrBuilderList() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(progress_); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder addProgressBuilder() { + return internalGetProgressFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder addProgressBuilder( + int index) { + return internalGetProgressFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * operations. All DDL statements will have continuously updating progress,
    +     * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +     * `progress[i]` will have start time and end time populated with commit
    +     * timestamp of operation, as well as a progress of 100% once the operation
    +     * has completed.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + public java.util.List + getProgressBuilderList() { + return internalGetProgressFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + progress_, ((bitField0_ & 0x00000010) != 0), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private java.util.List actions_ = + java.util.Collections.emptyList(); + + private void ensureActionsIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + actions_ = + new java.util.ArrayList( + actions_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DdlStatementActionInfo, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder, + com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder> + actionsBuilder_; + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public java.util.List + getActionsList() { + if (actionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(actions_); + } else { + return actionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public int getActionsCount() { + if (actionsBuilder_ == null) { + return actions_.size(); + } else { + return actionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public com.google.spanner.admin.database.v1.DdlStatementActionInfo getActions(int index) { + if (actionsBuilder_ == null) { + return actions_.get(index); + } else { + return actionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder setActions( + int index, com.google.spanner.admin.database.v1.DdlStatementActionInfo value) { + if (actionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureActionsIsMutable(); + actions_.set(index, value); + onChanged(); + } else { + actionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder setActions( + int index, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder builderForValue) { + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + actions_.set(index, builderForValue.build()); + onChanged(); + } else { + actionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder addActions(com.google.spanner.admin.database.v1.DdlStatementActionInfo value) { + if (actionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureActionsIsMutable(); + actions_.add(value); + onChanged(); + } else { + actionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder addActions( + int index, com.google.spanner.admin.database.v1.DdlStatementActionInfo value) { + if (actionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureActionsIsMutable(); + actions_.add(index, value); + onChanged(); + } else { + actionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder addActions( + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder builderForValue) { + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + actions_.add(builderForValue.build()); + onChanged(); + } else { + actionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder addActions( + int index, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder builderForValue) { + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + actions_.add(index, builderForValue.build()); + onChanged(); + } else { + actionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder addAllActions( + java.lang.Iterable + values) { + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, actions_); + onChanged(); + } else { + actionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder clearActions() { + if (actionsBuilder_ == null) { + actions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + actionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public Builder removeActions(int index) { + if (actionsBuilder_ == null) { + ensureActionsIsMutable(); + actions_.remove(index); + onChanged(); + } else { + actionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder getActionsBuilder( + int index) { + return internalGetActionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder getActionsOrBuilder( + int index) { + if (actionsBuilder_ == null) { + return actions_.get(index); + } else { + return actionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public java.util.List< + ? extends com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder> + getActionsOrBuilderList() { + if (actionsBuilder_ != null) { + return actionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(actions_); + } + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder addActionsBuilder() { + return internalGetActionsFieldBuilder() + .addBuilder( + com.google.spanner.admin.database.v1.DdlStatementActionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder addActionsBuilder( + int index) { + return internalGetActionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * The brief action info for the DDL statements.
    +     * `actions[i]` is the brief info for `statements[i]`.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + public java.util.List + getActionsBuilderList() { + return internalGetActionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DdlStatementActionInfo, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder, + com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder> + internalGetActionsFieldBuilder() { + if (actionsBuilder_ == null) { + actionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.DdlStatementActionInfo, + com.google.spanner.admin.database.v1.DdlStatementActionInfo.Builder, + com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder>( + actions_, ((bitField0_ & 0x00000020) != 0), getParentForChildren(), isClean()); + actions_ = null; + } + return actionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) + private static final com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata(); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateDatabaseDdlMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java new file mode 100644 index 000000000000..fa89da04418a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java @@ -0,0 +1,341 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateDatabaseDdlMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The database being modified.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * The database being modified.
    +   * 
    + * + * string database = 1 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @return A list containing the statements. + */ + java.util.List getStatementsList(); + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @return The count of statements. + */ + int getStatementsCount(); + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + java.lang.String getStatements(int index); + + /** + * + * + *
    +   * For an update this list contains all the statements. For an
    +   * individual statement, this list contains only that statement.
    +   * 
    + * + * repeated string statements = 2; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + com.google.protobuf.ByteString getStatementsBytes(int index); + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + java.util.List getCommitTimestampsList(); + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + com.google.protobuf.Timestamp getCommitTimestamps(int index); + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + int getCommitTimestampsCount(); + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + java.util.List + getCommitTimestampsOrBuilderList(); + + /** + * + * + *
    +   * Reports the commit timestamps of all statements that have
    +   * succeeded so far, where `commit_timestamps[i]` is the commit
    +   * timestamp for the statement `statements[i]`.
    +   * 
    + * + * repeated .google.protobuf.Timestamp commit_timestamps = 3; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampsOrBuilder(int index); + + /** + * + * + *
    +   * Output only. When true, indicates that the operation is throttled e.g.
    +   * due to resource constraints. When resources become available the operation
    +   * will resume and this field will be false again.
    +   * 
    + * + * bool throttled = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The throttled. + */ + boolean getThrottled(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + java.util.List getProgressList(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(int index); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + int getProgressCount(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + java.util.List + getProgressOrBuilderList(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * operations. All DDL statements will have continuously updating progress,
    +   * and `progress[i]` is the operation progress for `statements[i]`. Also,
    +   * `progress[i]` will have start time and end time populated with commit
    +   * timestamp of operation, as well as a progress of 100% once the operation
    +   * has completed.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.OperationProgress progress = 5; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(int index); + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + java.util.List getActionsList(); + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + com.google.spanner.admin.database.v1.DdlStatementActionInfo getActions(int index); + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + int getActionsCount(); + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + java.util.List + getActionsOrBuilderList(); + + /** + * + * + *
    +   * The brief action info for the DDL statements.
    +   * `actions[i]` is the brief info for `statements[i]`.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.DdlStatementActionInfo actions = 6; + */ + com.google.spanner.admin.database.v1.DdlStatementActionInfoOrBuilder getActionsOrBuilder( + int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java new file mode 100644 index 000000000000..f93eacb8c0fd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java @@ -0,0 +1,1521 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Enqueues the given DDL statements to be applied, in order but not
    + * necessarily all at once, to the database schema at some point (or
    + * points) in the future. The server checks that the statements
    + * are executable (syntactically valid, name tables that exist, etc.)
    + * before enqueueing them, but they may still fail upon
    + * later execution (e.g., if a statement from another batch of
    + * statements is applied first and it conflicts in some way, or if
    + * there is some data-related problem like a `NULL` value in a column to
    + * which `NOT NULL` would be added). If a statement fails, all
    + * subsequent statements in the batch are automatically cancelled.
    + *
    + * Each batch of statements is assigned a name which can be used with
    + * the [Operations][google.longrunning.Operations] API to monitor
    + * progress. See the
    + * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
    + * field for more details.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlRequest} + */ +@com.google.protobuf.Generated +public final class UpdateDatabaseDdlRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) + UpdateDatabaseDdlRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateDatabaseDdlRequest"); + } + + // Use UpdateDatabaseDdlRequest.newBuilder() to construct. + private UpdateDatabaseDdlRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateDatabaseDdlRequest() { + database_ = ""; + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + operationId_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.class, + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database to update.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database to update.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATEMENTS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + return statements_; + } + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + public static final int OPERATION_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object operationId_ = ""; + + /** + * + * + *
    +   * If empty, the new update request is assigned an
    +   * automatically-generated operation ID. Otherwise, `operation_id`
    +   * is used to construct the name of the resulting
    +   * [Operation][google.longrunning.Operation].
    +   *
    +   * Specifying an explicit operation ID simplifies determining
    +   * whether the statements were executed in the event that the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * call is replayed, or the return value is otherwise lost: the
    +   * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +   * and `operation_id` fields can be combined to form the
    +   * [name][google.longrunning.Operation.name] of the resulting
    +   * [longrunning.Operation][google.longrunning.Operation]:
    +   * `<database>/operations/<operation_id>`.
    +   *
    +   * `operation_id` should be unique within the database, and must be
    +   * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +   * automatically-generated operation IDs always begin with an
    +   * underscore. If the named operation already exists,
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * returns `ALREADY_EXISTS`.
    +   * 
    + * + * string operation_id = 3; + * + * @return The operationId. + */ + @java.lang.Override + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } + } + + /** + * + * + *
    +   * If empty, the new update request is assigned an
    +   * automatically-generated operation ID. Otherwise, `operation_id`
    +   * is used to construct the name of the resulting
    +   * [Operation][google.longrunning.Operation].
    +   *
    +   * Specifying an explicit operation ID simplifies determining
    +   * whether the statements were executed in the event that the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * call is replayed, or the return value is otherwise lost: the
    +   * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +   * and `operation_id` fields can be combined to form the
    +   * [name][google.longrunning.Operation.name] of the resulting
    +   * [longrunning.Operation][google.longrunning.Operation]:
    +   * `<database>/operations/<operation_id>`.
    +   *
    +   * `operation_id` should be unique within the database, and must be
    +   * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +   * automatically-generated operation IDs always begin with an
    +   * underscore. If the named operation already exists,
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * returns `ALREADY_EXISTS`.
    +   * 
    + * + * string operation_id = 3; + * + * @return The bytes for operationId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROTO_DESCRIPTORS_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +   * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +   * to generate for moon/shot/app.proto, run
    +   * ```
    +   * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +   * --include_imports \
    +   * --descriptor_set_out=descriptors.data \
    +   * moon/shot/app.proto
    +   * ```
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + public static final int THROUGHPUT_MODE_FIELD_NUMBER = 5; + private boolean throughputMode_ = false; + + /** + * + * + *
    +   * Optional. This field is exposed to be used by the Spanner Migration Tool.
    +   * For more details, see
    +   * [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
    +   * 
    + * + * bool throughput_mode = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The throughputMode. + */ + @java.lang.Override + public boolean getThroughputMode() { + return throughputMode_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + for (int i = 0; i < statements_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, statements_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, operationId_); + } + if (!protoDescriptors_.isEmpty()) { + output.writeBytes(4, protoDescriptors_); + } + if (throughputMode_ != false) { + output.writeBool(5, throughputMode_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + { + int dataSize = 0; + for (int i = 0; i < statements_.size(); i++) { + dataSize += computeStringSizeNoTag(statements_.getRaw(i)); + } + size += dataSize; + size += 1 * getStatementsList().size(); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, operationId_); + } + if (!protoDescriptors_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(4, protoDescriptors_); + } + if (throughputMode_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, throughputMode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest other = + (com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getStatementsList().equals(other.getStatementsList())) return false; + if (!getOperationId().equals(other.getOperationId())) return false; + if (!getProtoDescriptors().equals(other.getProtoDescriptors())) return false; + if (getThroughputMode() != other.getThroughputMode()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (getStatementsCount() > 0) { + hash = (37 * hash) + STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + getStatementsList().hashCode(); + } + hash = (37 * hash) + OPERATION_ID_FIELD_NUMBER; + hash = (53 * hash) + getOperationId().hashCode(); + hash = (37 * hash) + PROTO_DESCRIPTORS_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptors().hashCode(); + hash = (37 * hash) + THROUGHPUT_MODE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getThroughputMode()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Enqueues the given DDL statements to be applied, in order but not
    +   * necessarily all at once, to the database schema at some point (or
    +   * points) in the future. The server checks that the statements
    +   * are executable (syntactically valid, name tables that exist, etc.)
    +   * before enqueueing them, but they may still fail upon
    +   * later execution (e.g., if a statement from another batch of
    +   * statements is applied first and it conflicts in some way, or if
    +   * there is some data-related problem like a `NULL` value in a column to
    +   * which `NOT NULL` would be added). If a statement fails, all
    +   * subsequent statements in the batch are automatically cancelled.
    +   *
    +   * Each batch of statements is assigned a name which can be used with
    +   * the [Operations][google.longrunning.Operations] API to monitor
    +   * progress. See the
    +   * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
    +   * field for more details.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.class, + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + operationId_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + throughputMode_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseDdlRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest build() { + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest buildPartial() { + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest result = + new com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + statements_.makeImmutable(); + result.statements_ = statements_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.operationId_ = operationId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.protoDescriptors_ = protoDescriptors_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.throughputMode_ = throughputMode_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest other) { + if (other + == com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.statements_.isEmpty()) { + if (statements_.isEmpty()) { + statements_ = other.statements_; + bitField0_ |= 0x00000002; + } else { + ensureStatementsIsMutable(); + statements_.addAll(other.statements_); + } + onChanged(); + } + if (!other.getOperationId().isEmpty()) { + operationId_ = other.operationId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getProtoDescriptors().isEmpty()) { + setProtoDescriptors(other.getProtoDescriptors()); + } + if (other.getThroughputMode() != false) { + setThroughputMode(other.getThroughputMode()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureStatementsIsMutable(); + statements_.add(s); + break; + } // case 18 + case 26: + { + operationId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + protoDescriptors_ = input.readBytes(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + throughputMode_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database to update.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList statements_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureStatementsIsMutable() { + if (!statements_.isModifiable()) { + statements_ = new com.google.protobuf.LazyStringArrayList(statements_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the statements. + */ + public com.google.protobuf.ProtocolStringList getStatementsList() { + statements_.makeImmutable(); + return statements_; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of statements. + */ + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + public java.lang.String getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + public com.google.protobuf.ByteString getStatementsBytes(int index) { + return statements_.getByteString(index); + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The statements to set. + * @return This builder for chaining. + */ + public Builder setStatements(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The statements to add. + * @return This builder for chaining. + */ + public Builder addStatements(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The statements to add. + * @return This builder for chaining. + */ + public Builder addAllStatements(java.lang.Iterable values) { + ensureStatementsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, statements_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearStatements() { + statements_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. DDL statements to be applied to the database.
    +     * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the statements to add. + * @return This builder for chaining. + */ + public Builder addStatementsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureStatementsIsMutable(); + statements_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object operationId_ = ""; + + /** + * + * + *
    +     * If empty, the new update request is assigned an
    +     * automatically-generated operation ID. Otherwise, `operation_id`
    +     * is used to construct the name of the resulting
    +     * [Operation][google.longrunning.Operation].
    +     *
    +     * Specifying an explicit operation ID simplifies determining
    +     * whether the statements were executed in the event that the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * call is replayed, or the return value is otherwise lost: the
    +     * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +     * and `operation_id` fields can be combined to form the
    +     * [name][google.longrunning.Operation.name] of the resulting
    +     * [longrunning.Operation][google.longrunning.Operation]:
    +     * `<database>/operations/<operation_id>`.
    +     *
    +     * `operation_id` should be unique within the database, and must be
    +     * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +     * automatically-generated operation IDs always begin with an
    +     * underscore. If the named operation already exists,
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * returns `ALREADY_EXISTS`.
    +     * 
    + * + * string operation_id = 3; + * + * @return The operationId. + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If empty, the new update request is assigned an
    +     * automatically-generated operation ID. Otherwise, `operation_id`
    +     * is used to construct the name of the resulting
    +     * [Operation][google.longrunning.Operation].
    +     *
    +     * Specifying an explicit operation ID simplifies determining
    +     * whether the statements were executed in the event that the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * call is replayed, or the return value is otherwise lost: the
    +     * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +     * and `operation_id` fields can be combined to form the
    +     * [name][google.longrunning.Operation.name] of the resulting
    +     * [longrunning.Operation][google.longrunning.Operation]:
    +     * `<database>/operations/<operation_id>`.
    +     *
    +     * `operation_id` should be unique within the database, and must be
    +     * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +     * automatically-generated operation IDs always begin with an
    +     * underscore. If the named operation already exists,
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * returns `ALREADY_EXISTS`.
    +     * 
    + * + * string operation_id = 3; + * + * @return The bytes for operationId. + */ + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If empty, the new update request is assigned an
    +     * automatically-generated operation ID. Otherwise, `operation_id`
    +     * is used to construct the name of the resulting
    +     * [Operation][google.longrunning.Operation].
    +     *
    +     * Specifying an explicit operation ID simplifies determining
    +     * whether the statements were executed in the event that the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * call is replayed, or the return value is otherwise lost: the
    +     * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +     * and `operation_id` fields can be combined to form the
    +     * [name][google.longrunning.Operation.name] of the resulting
    +     * [longrunning.Operation][google.longrunning.Operation]:
    +     * `<database>/operations/<operation_id>`.
    +     *
    +     * `operation_id` should be unique within the database, and must be
    +     * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +     * automatically-generated operation IDs always begin with an
    +     * underscore. If the named operation already exists,
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * returns `ALREADY_EXISTS`.
    +     * 
    + * + * string operation_id = 3; + * + * @param value The operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operationId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If empty, the new update request is assigned an
    +     * automatically-generated operation ID. Otherwise, `operation_id`
    +     * is used to construct the name of the resulting
    +     * [Operation][google.longrunning.Operation].
    +     *
    +     * Specifying an explicit operation ID simplifies determining
    +     * whether the statements were executed in the event that the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * call is replayed, or the return value is otherwise lost: the
    +     * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +     * and `operation_id` fields can be combined to form the
    +     * [name][google.longrunning.Operation.name] of the resulting
    +     * [longrunning.Operation][google.longrunning.Operation]:
    +     * `<database>/operations/<operation_id>`.
    +     *
    +     * `operation_id` should be unique within the database, and must be
    +     * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +     * automatically-generated operation IDs always begin with an
    +     * underscore. If the named operation already exists,
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * returns `ALREADY_EXISTS`.
    +     * 
    + * + * string operation_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearOperationId() { + operationId_ = getDefaultInstance().getOperationId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If empty, the new update request is assigned an
    +     * automatically-generated operation ID. Otherwise, `operation_id`
    +     * is used to construct the name of the resulting
    +     * [Operation][google.longrunning.Operation].
    +     *
    +     * Specifying an explicit operation ID simplifies determining
    +     * whether the statements were executed in the event that the
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * call is replayed, or the return value is otherwise lost: the
    +     * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +     * and `operation_id` fields can be combined to form the
    +     * [name][google.longrunning.Operation.name] of the resulting
    +     * [longrunning.Operation][google.longrunning.Operation]:
    +     * `<database>/operations/<operation_id>`.
    +     *
    +     * `operation_id` should be unique within the database, and must be
    +     * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +     * automatically-generated operation IDs always begin with an
    +     * underscore. If the named operation already exists,
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +     * returns `ALREADY_EXISTS`.
    +     * 
    + * + * string operation_id = 3; + * + * @param value The bytes for operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operationId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The protoDescriptors to set. + * @return This builder for chaining. + */ + public Builder setProtoDescriptors(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptors_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
    +     * Contains a protobuf-serialized
    +     * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +     * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +     * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +     * to generate for moon/shot/app.proto, run
    +     * ```
    +     * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +     * --include_imports \
    +     * --descriptor_set_out=descriptors.data \
    +     * moon/shot/app.proto
    +     * ```
    +     * For more details, see protobuffer [self
    +     * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +     * 
    + * + * bytes proto_descriptors = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearProtoDescriptors() { + bitField0_ = (bitField0_ & ~0x00000008); + protoDescriptors_ = getDefaultInstance().getProtoDescriptors(); + onChanged(); + return this; + } + + private boolean throughputMode_; + + /** + * + * + *
    +     * Optional. This field is exposed to be used by the Spanner Migration Tool.
    +     * For more details, see
    +     * [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
    +     * 
    + * + * bool throughput_mode = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The throughputMode. + */ + @java.lang.Override + public boolean getThroughputMode() { + return throughputMode_; + } + + /** + * + * + *
    +     * Optional. This field is exposed to be used by the Spanner Migration Tool.
    +     * For more details, see
    +     * [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
    +     * 
    + * + * bool throughput_mode = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The throughputMode to set. + * @return This builder for chaining. + */ + public Builder setThroughputMode(boolean value) { + + throughputMode_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. This field is exposed to be used by the Spanner Migration Tool.
    +     * For more details, see
    +     * [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
    +     * 
    + * + * bool throughput_mode = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearThroughputMode() { + bitField0_ = (bitField0_ & ~0x00000010); + throughputMode_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) + private static final com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest(); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateDatabaseDdlRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java new file mode 100644 index 000000000000..c73ee6b7d805 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java @@ -0,0 +1,219 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateDatabaseDdlRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database to update.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database to update.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the statements. + */ + java.util.List getStatementsList(); + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of statements. + */ + int getStatementsCount(); + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The statements at the given index. + */ + java.lang.String getStatements(int index); + + /** + * + * + *
    +   * Required. DDL statements to be applied to the database.
    +   * 
    + * + * repeated string statements = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the statements at the given index. + */ + com.google.protobuf.ByteString getStatementsBytes(int index); + + /** + * + * + *
    +   * If empty, the new update request is assigned an
    +   * automatically-generated operation ID. Otherwise, `operation_id`
    +   * is used to construct the name of the resulting
    +   * [Operation][google.longrunning.Operation].
    +   *
    +   * Specifying an explicit operation ID simplifies determining
    +   * whether the statements were executed in the event that the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * call is replayed, or the return value is otherwise lost: the
    +   * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +   * and `operation_id` fields can be combined to form the
    +   * [name][google.longrunning.Operation.name] of the resulting
    +   * [longrunning.Operation][google.longrunning.Operation]:
    +   * `<database>/operations/<operation_id>`.
    +   *
    +   * `operation_id` should be unique within the database, and must be
    +   * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +   * automatically-generated operation IDs always begin with an
    +   * underscore. If the named operation already exists,
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * returns `ALREADY_EXISTS`.
    +   * 
    + * + * string operation_id = 3; + * + * @return The operationId. + */ + java.lang.String getOperationId(); + + /** + * + * + *
    +   * If empty, the new update request is assigned an
    +   * automatically-generated operation ID. Otherwise, `operation_id`
    +   * is used to construct the name of the resulting
    +   * [Operation][google.longrunning.Operation].
    +   *
    +   * Specifying an explicit operation ID simplifies determining
    +   * whether the statements were executed in the event that the
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * call is replayed, or the return value is otherwise lost: the
    +   * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
    +   * and `operation_id` fields can be combined to form the
    +   * [name][google.longrunning.Operation.name] of the resulting
    +   * [longrunning.Operation][google.longrunning.Operation]:
    +   * `<database>/operations/<operation_id>`.
    +   *
    +   * `operation_id` should be unique within the database, and must be
    +   * a valid identifier: `[a-z][a-z0-9_]*`. Note that
    +   * automatically-generated operation IDs always begin with an
    +   * underscore. If the named operation already exists,
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
    +   * returns `ALREADY_EXISTS`.
    +   * 
    + * + * string operation_id = 3; + * + * @return The bytes for operationId. + */ + com.google.protobuf.ByteString getOperationIdBytes(); + + /** + * + * + *
    +   * Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
    +   * Contains a protobuf-serialized
    +   * [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
    +   * To generate it, [install](https://grpc.io/docs/protoc-installation/) and
    +   * run `protoc` with --include_imports and --descriptor_set_out. For example,
    +   * to generate for moon/shot/app.proto, run
    +   * ```
    +   * $protoc  --proto_path=/app_path --proto_path=/lib_path \
    +   * --include_imports \
    +   * --descriptor_set_out=descriptors.data \
    +   * moon/shot/app.proto
    +   * ```
    +   * For more details, see protobuffer [self
    +   * description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
    +   * 
    + * + * bytes proto_descriptors = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The protoDescriptors. + */ + com.google.protobuf.ByteString getProtoDescriptors(); + + /** + * + * + *
    +   * Optional. This field is exposed to be used by the Spanner Migration Tool.
    +   * For more details, see
    +   * [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
    +   * 
    + * + * bool throughput_mode = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The throughputMode. + */ + boolean getThroughputMode(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java new file mode 100644 index 000000000000..a9b17bd93f71 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java @@ -0,0 +1,1312 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateDatabaseMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateDatabaseMetadata) + UpdateDatabaseMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateDatabaseMetadata"); + } + + // Use UpdateDatabaseMetadata.newBuilder() to construct. + private UpdateDatabaseMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateDatabaseMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.class, + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.Builder.class); + } + + private int bitField0_; + public static final int REQUEST_FIELD_NUMBER = 1; + private com.google.spanner.admin.database.v1.UpdateDatabaseRequest request_; + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return Whether the request field is set. + */ + @java.lang.Override + public boolean hasRequest() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return The request. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest getRequest() { + return request_ == null + ? com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance() + : request_; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder getRequestOrBuilder() { + return request_ == null + ? com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance() + : request_; + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.database.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getRequest()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRequest()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateDatabaseMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata other = + (com.google.spanner.admin.database.v1.UpdateDatabaseMetadata) obj; + + if (hasRequest() != other.hasRequest()) return false; + if (hasRequest()) { + if (!getRequest().equals(other.getRequest())) return false; + } + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRequest()) { + hash = (37 * hash) + REQUEST_FIELD_NUMBER; + hash = (53 * hash) + getRequest().hashCode(); + } + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateDatabaseMetadata) + com.google.spanner.admin.database.v1.UpdateDatabaseMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.class, + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRequestFieldBuilder(); + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + request_ = null; + if (requestBuilder_ != null) { + requestBuilder_.dispose(); + requestBuilder_ = null; + } + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseMetadata build() { + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseMetadata buildPartial() { + com.google.spanner.admin.database.v1.UpdateDatabaseMetadata result = + new com.google.spanner.admin.database.v1.UpdateDatabaseMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.UpdateDatabaseMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.request_ = requestBuilder_ == null ? request_ : requestBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateDatabaseMetadata) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateDatabaseMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.UpdateDatabaseMetadata other) { + if (other == com.google.spanner.admin.database.v1.UpdateDatabaseMetadata.getDefaultInstance()) + return this; + if (other.hasRequest()) { + mergeRequest(other.getRequest()); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetRequestFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.database.v1.UpdateDatabaseRequest request_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder, + com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder> + requestBuilder_; + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return Whether the request field is set. + */ + public boolean hasRequest() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return The request. + */ + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest getRequest() { + if (requestBuilder_ == null) { + return request_ == null + ? com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance() + : request_; + } else { + return requestBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public Builder setRequest(com.google.spanner.admin.database.v1.UpdateDatabaseRequest value) { + if (requestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + request_ = value; + } else { + requestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public Builder setRequest( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder builderForValue) { + if (requestBuilder_ == null) { + request_ = builderForValue.build(); + } else { + requestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public Builder mergeRequest(com.google.spanner.admin.database.v1.UpdateDatabaseRequest value) { + if (requestBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && request_ != null + && request_ + != com.google.spanner.admin.database.v1.UpdateDatabaseRequest + .getDefaultInstance()) { + getRequestBuilder().mergeFrom(value); + } else { + request_ = value; + } + } else { + requestBuilder_.mergeFrom(value); + } + if (request_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public Builder clearRequest() { + bitField0_ = (bitField0_ & ~0x00000001); + request_ = null; + if (requestBuilder_ != null) { + requestBuilder_.dispose(); + requestBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder getRequestBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetRequestFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + public com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder + getRequestOrBuilder() { + if (requestBuilder_ != null) { + return requestBuilder_.getMessageOrBuilder(); + } else { + return request_ == null + ? com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance() + : request_; + } + } + + /** + * + * + *
    +     * The request for
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +     * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder, + com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder> + internalGetRequestFieldBuilder() { + if (requestBuilder_ == null) { + requestBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.UpdateDatabaseRequest, + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder, + com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder>( + getRequest(), getParentForChildren(), isClean()); + request_ = null; + } + return requestBuilder_; + } + + private com.google.spanner.admin.database.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.database.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.database.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.database.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.OperationProgress, + com.google.spanner.admin.database.v1.OperationProgress.Builder, + com.google.spanner.admin.database.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is best-effort).
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateDatabaseMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseMetadata) + private static final com.google.spanner.admin.database.v1.UpdateDatabaseMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateDatabaseMetadata(); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateDatabaseMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java new file mode 100644 index 000000000000..671e4cf94f5c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java @@ -0,0 +1,151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateDatabaseMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateDatabaseMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return Whether the request field is set. + */ + boolean hasRequest(); + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + * + * @return The request. + */ + com.google.spanner.admin.database.v1.UpdateDatabaseRequest getRequest(); + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * .google.spanner.admin.database.v1.UpdateDatabaseRequest request = 1; + */ + com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder getRequestOrBuilder(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.database.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.database.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is best-effort).
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java new file mode 100644 index 000000000000..a0d0ea155f59 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java @@ -0,0 +1,1051 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseRequest} + */ +@com.google.protobuf.Generated +public final class UpdateDatabaseRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateDatabaseRequest) + UpdateDatabaseRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateDatabaseRequest"); + } + + // Use UpdateDatabaseRequest.newBuilder() to construct. + private UpdateDatabaseRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateDatabaseRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.class, + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 1; + private com.google.spanner.admin.database.v1.Database database_; + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the database field is set. + */ + @java.lang.Override + public boolean hasDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The database. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getDatabase() { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder() { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getDatabase()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getDatabase()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateDatabaseRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateDatabaseRequest other = + (com.google.spanner.admin.database.v1.UpdateDatabaseRequest) obj; + + if (hasDatabase() != other.hasDatabase()) return false; + if (hasDatabase()) { + if (!getDatabase().equals(other.getDatabase())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasDatabase()) { + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateDatabaseRequest) + com.google.spanner.admin.database.v1.UpdateDatabaseRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.class, + com.google.spanner.admin.database.v1.UpdateDatabaseRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateDatabaseRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetDatabaseFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = null; + if (databaseBuilder_ != null) { + databaseBuilder_.dispose(); + databaseBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_UpdateDatabaseRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest build() { + com.google.spanner.admin.database.v1.UpdateDatabaseRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest buildPartial() { + com.google.spanner.admin.database.v1.UpdateDatabaseRequest result = + new com.google.spanner.admin.database.v1.UpdateDatabaseRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.UpdateDatabaseRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = databaseBuilder_ == null ? database_ : databaseBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateDatabaseRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateDatabaseRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.UpdateDatabaseRequest other) { + if (other == com.google.spanner.admin.database.v1.UpdateDatabaseRequest.getDefaultInstance()) + return this; + if (other.hasDatabase()) { + mergeDatabase(other.getDatabase()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetDatabaseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.database.v1.Database database_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + databaseBuilder_; + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the database field is set. + */ + public boolean hasDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The database. + */ + public com.google.spanner.admin.database.v1.Database getDatabase() { + if (databaseBuilder_ == null) { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } else { + return databaseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setDatabase(com.google.spanner.admin.database.v1.Database value) { + if (databaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + } else { + databaseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setDatabase( + com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (databaseBuilder_ == null) { + database_ = builderForValue.build(); + } else { + databaseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeDatabase(com.google.spanner.admin.database.v1.Database value) { + if (databaseBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && database_ != null + && database_ != com.google.spanner.admin.database.v1.Database.getDefaultInstance()) { + getDatabaseBuilder().mergeFrom(value); + } else { + database_ = value; + } + } else { + databaseBuilder_.mergeFrom(value); + } + if (database_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearDatabase() { + bitField0_ = (bitField0_ & ~0x00000001); + database_ = null; + if (databaseBuilder_ != null) { + databaseBuilder_.dispose(); + databaseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.Database.Builder getDatabaseBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder() { + if (databaseBuilder_ != null) { + return databaseBuilder_.getMessageOrBuilder(); + } else { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + } + + /** + * + * + *
    +     * Required. The database to update.
    +     * The `name` field of the database is of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + internalGetDatabaseFieldBuilder() { + if (databaseBuilder_ == null) { + databaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder>( + getDatabase(), getParentForChildren(), isClean()); + database_ = null; + } + return databaseBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
    +     * Required. The list of fields to update. Currently, only
    +     * `enable_drop_protection` field can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateDatabaseRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseRequest) + private static final com.google.spanner.admin.database.v1.UpdateDatabaseRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateDatabaseRequest(); + } + + public static com.google.spanner.admin.database.v1.UpdateDatabaseRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateDatabaseRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateDatabaseRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java new file mode 100644 index 000000000000..1fe594d8c054 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java @@ -0,0 +1,120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/database/v1/spanner_database_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.database.v1; + +@com.google.protobuf.Generated +public interface UpdateDatabaseRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateDatabaseRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the database field is set. + */ + boolean hasDatabase(); + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The database. + */ + com.google.spanner.admin.database.v1.Database getDatabase(); + + /** + * + * + *
    +   * Required. The database to update.
    +   * The `name` field of the database is of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.Database database = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder(); + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
    +   * Required. The list of fields to update. Currently, only
    +   * `enable_drop_protection` field can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto new file mode 100644 index 000000000000..6898814c4217 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto @@ -0,0 +1,773 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/database/v1/common.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb"; +option java_multiple_files = true; +option java_outer_classname = "BackupProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1"; + +// A backup of a Cloud Spanner database. +message Backup { + option (google.api.resource) = { + type: "spanner.googleapis.com/Backup" + pattern: "projects/{project}/instances/{instance}/backups/{backup}" + }; + + // Indicates the current state of the backup. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The pending backup is still being created. Operations on the + // backup may fail with `FAILED_PRECONDITION` in this state. + CREATING = 1; + + // The backup is complete and ready for use. + READY = 2; + } + + // Required for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. Name of the database from which this backup was created. This + // needs to be in the same instance as the backup. Values are of the form + // `projects//instances//databases/`. + string database = 2 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; + + // The backup will contain an externally consistent copy of the database at + // the timestamp specified by `version_time`. If `version_time` is not + // specified, the system will set `version_time` to the `create_time` of the + // backup. + google.protobuf.Timestamp version_time = 9; + + // Required for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. The expiration time of the backup, with microseconds + // granularity that must be at least 6 hours and at most 366 days + // from the time the CreateBackup request is processed. Once the `expire_time` + // has passed, the backup is eligible to be automatically deleted by Cloud + // Spanner to free the resources used by the backup. + google.protobuf.Timestamp expire_time = 3; + + // Output only for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. Required for the + // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] + // operation. + // + // A globally unique identifier for the backup which cannot be + // changed. Values are of the form + // `projects//instances//backups/[a-z][a-z0-9_\-]*[a-z0-9]` + // The final segment of the name must be between 2 and 60 characters + // in length. + // + // The backup is stored in the location(s) specified in the instance + // configuration of the instance containing the backup, identified + // by the prefix of the backup name of the form + // `projects//instances/`. + string name = 1; + + // Output only. The time the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // request is received. If the request does not specify `version_time`, the + // `version_time` of the backup will be equivalent to the `create_time`. + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the backup in bytes. + int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bytes that will be freed by deleting this + // backup. This value will be zero if, for example, this backup is part of an + // incremental backup chain and younger backups in the chain require that we + // keep its data. For backups not in an incremental backup chain, this is + // always the size of the backup. This value may change if backups on the same + // chain get created, deleted or expired. + int64 freeable_size_bytes = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For a backup in an incremental backup chain, this is the + // storage space needed to keep the data that has changed since the previous + // backup. For all other backups, this is always the size of the backup. This + // value may change if backups on the same chain get deleted or expired. + // + // This field can be used to calculate the total storage space used by a set + // of backups. For example, the total space used by all backups of a database + // can be computed by summing up this field. + int64 exclusive_size_bytes = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The current state of the backup. + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The names of the restored databases that reference the backup. + // The database names are of + // the form `projects//instances//databases/`. + // Referencing databases may exist in different instances. The existence of + // any referencing database prevents the backup from being deleted. When a + // restored database from the backup enters the `READY` state, the reference + // to the backup is removed. + repeated string referencing_databases = 7 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Output only. The encryption information for the backup. + EncryptionInfo encryption_info = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The encryption information for the backup, whether it is + // protected by one or more KMS keys. The information includes all Cloud + // KMS key versions used to encrypt the backup. The `encryption_status' field + // inside of each `EncryptionInfo` is not populated. At least one of the key + // versions must be available for the backup to be restored. If a key version + // is revoked in the middle of a restore, the restore behavior is undefined. + repeated EncryptionInfo encryption_information = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The database dialect information for the backup. + DatabaseDialect database_dialect = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The names of the destination backups being created by copying + // this source backup. The backup names are of the form + // `projects//instances//backups/`. + // Referencing backups may exist in different instances. The existence of + // any referencing backup prevents the backup from being deleted. When the + // copy operation is done (either successfully completed or cancelled or the + // destination backup is deleted), the reference to the backup is removed. + repeated string referencing_backups = 11 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // Output only. The max allowed expiration time of the backup, with + // microseconds granularity. A backup's expiration time can be configured in + // multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or + // copying an existing backup, the expiration time specified must be + // less than `Backup.max_expire_time`. + google.protobuf.Timestamp max_expire_time = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. List of backup schedule URIs that are associated with + // creating this backup. This is only applicable for scheduled backups, and + // is empty for on-demand backups. + // + // To optimize for storage, whenever possible, multiple schedules are + // collapsed together to create one backup. In such cases, this field captures + // the list of all backup schedule URIs that are associated with creating + // this backup. If collapsing is not done, then this field captures the + // single backup schedule URI associated with creating this backup. + repeated string backup_schedules = 14 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; + + // Output only. Populated only for backups in an incremental backup chain. + // Backups share the same chain id if and only if they belong to the same + // incremental backup chain. Use this field to determine which backups are + // part of the same incremental backup chain. The ordering of backups in the + // chain can be determined by ordering the backup `version_time`. + string incremental_backup_chain_id = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data deleted at a time older than this is guaranteed not to be + // retained in order to support this backup. For a backup in an incremental + // backup chain, this is the version time of the oldest backup that exists or + // ever existed in the chain. For all other backups, this is the version time + // of the backup. This field can be used to understand what data is being + // retained by the backup system. + google.protobuf.Timestamp oldest_version_time = 18 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The instance partition(s) storing the backup. + // + // This is the same as the list of the instance partition(s) that the database + // had footprint in at the backup's `version_time`. + repeated BackupInstancePartition instance_partitions = 19 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The request for +// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +message CreateBackupRequest { + // Required. The name of the instance in which the backup will be + // created. This must be the same instance that contains the database the + // backup will be created from. The backup will be stored in the + // location(s) specified in the instance configuration of this + // instance. Values are of the form + // `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The id of the backup to be created. The `backup_id` appended to + // `parent` forms the full backup name of the form + // `projects//instances//backups/`. + string backup_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The backup to create. + Backup backup = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The encryption configuration used to encrypt the backup. If this + // field is not specified, the backup will use the same encryption + // configuration as the database by default, namely + // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + // = `USE_DATABASE_ENCRYPTION`. + CreateBackupEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata type for the operation returned by +// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +message CreateBackupMetadata { + // The name of the backup being created. + string name = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // The name of the database the backup is created from. + string database = 2 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; + + // The progress of the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. + OperationProgress progress = 3; + + // The time at which cancellation of this operation was received. + // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + // starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not guaranteed. + // Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a + // [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + google.protobuf.Timestamp cancel_time = 4; +} + +// The request for +// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. +message CopyBackupRequest { + // Required. The name of the destination instance that will contain the backup + // copy. Values are of the form: `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The id of the backup copy. + // The `backup_id` appended to `parent` forms the full backup_uri of the form + // `projects//instances//backups/`. + string backup_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source backup to be copied. + // The source backup needs to be in READY state for it to be copied. + // Once CopyBackup is in progress, the source backup cannot be deleted or + // cleaned up on expiration until CopyBackup is finished. + // Values are of the form: + // `projects//instances//backups/`. + string source_backup = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // Required. The expiration time of the backup in microsecond granularity. + // The expiration time must be at least 6 hours and at most 366 days + // from the `create_time` of the source backup. Once the `expire_time` has + // passed, the backup is eligible to be automatically deleted by Cloud Spanner + // to free the resources used by the backup. + google.protobuf.Timestamp expire_time = 4 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The encryption configuration used to encrypt the backup. If this + // field is not specified, the backup will use the same encryption + // configuration as the source backup by default, namely + // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + CopyBackupEncryptionConfig encryption_config = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata type for the operation returned by +// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. +message CopyBackupMetadata { + // The name of the backup being created through the copy operation. + // Values are of the form + // `projects//instances//backups/`. + string name = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // The name of the source backup that is being copied. + // Values are of the form + // `projects//instances//backups/`. + string source_backup = 2 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // The progress of the + // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + // operation. + OperationProgress progress = 3; + + // The time at which cancellation of CopyBackup operation was received. + // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + // starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not guaranteed. + // Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a + // [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + google.protobuf.Timestamp cancel_time = 4; +} + +// The request for +// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. +message UpdateBackupRequest { + // Required. The backup to update. `backup.name`, and the fields to be updated + // as specified by `update_mask` are required. Other fields are ignored. + // Update is only supported for the following fields: + // * `backup.expire_time`. + Backup backup = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields (e.g. `expire_time`) in the + // Backup resource should be updated. This mask is relative to the Backup + // resource, not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased accidentally + // by clients that do not know about them. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects//instances//backups/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; +} + +// The request for +// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. +message DeleteBackupRequest { + // Required. Name of the backup to delete. + // Values are of the form + // `projects//instances//backups/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; +} + +// The request for +// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +message ListBackupsRequest { + // Required. The instance to list backups from. Values are of the + // form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned backups. + // + // A filter expression consists of a field name, a comparison operator, and a + // value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the + // [Backup][google.spanner.admin.database.v1.Backup] are eligible for + // filtering: + // + // * `name` + // * `database` + // * `state` + // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `size_bytes` + // * `backup_schedules` + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic, but + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `name:Howl` - The backup's name contains the string "howl". + // * `database:prod` + // - The database's name contains the string "prod". + // * `state:CREATING` - The backup is pending creation. + // * `state:READY` - The backup is fully created and ready for use. + // * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")` + // - The backup name contains the string "howl" and `create_time` + // of the backup is before 2018-03-28T14:50:00Z. + // * `expire_time < \"2018-03-28T14:50:00Z\"` + // - The backup `expire_time` is before 2018-03-28T14:50:00Z. + // * `size_bytes > 10000000000` - The backup's size is greater than 10GB + // * `backup_schedules:daily` + // - The backup is created from a schedule with "daily" in its name. + string filter = 2; + + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] + // from a previous + // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] + // to the same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +message ListBackupsResponse { + // The list of matching backups. Backups returned are ordered by `create_time` + // in descending order, starting from the most recent `create_time`. + repeated Backup backups = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + // call to fetch more of the matching backups. + string next_page_token = 2; +} + +// The request for +// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. +message ListBackupOperationsRequest { + // Required. The instance of the backup operations. Values are of + // the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned backup operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the [operation][google.longrunning.Operation] + // are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + // is + // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. + // * `metadata.` - any field in metadata.value. + // `metadata.@type` must be specified first if filtering on metadata + // fields. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic, but + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ + // `metadata.database:prod` - Returns operations where: + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // * The source database name of backup contains the string "prod". + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ + // `(metadata.name:howl) AND` \ + // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ + // `(error:*)` - Returns operations where: + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // * The backup name contains the string "howl". + // * The operation started before 2018-03-28T14:50:00Z. + // * The operation resulted in an error. + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \ + // `(metadata.source_backup:test) AND` \ + // `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ + // `(error:*)` - Returns operations where: + // * The operation's metadata type is + // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + // * The source backup name contains the string "test". + // * The operation started before 2022-01-18T14:50:00Z. + // * The operation resulted in an error. + // * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ + // `(metadata.database:test_db)) OR` \ + // `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) + // AND` \ + // `(metadata.source_backup:test_bkp)) AND` \ + // `(error:*)` - Returns operations where: + // * The operation's metadata matches either of criteria: + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + // AND the source database name of the backup contains the string + // "test_db" + // * The operation's metadata type is + // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + // AND the source backup name contains the string "test_bkp" + // * The operation resulted in an error. + string filter = 2; + + // Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] + // from a previous + // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] + // to the same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. +message ListBackupOperationsResponse { + // The list of matching backup [long-running + // operations][google.longrunning.Operation]. Each operation's name will be + // prefixed by the backup's name. The operation's + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that are pending or have completed/failed/canceled within the + // last 7 days. Operations returned are ordered by + // `operation.metadata.value.progress.start_time` in descending order starting + // from the most recently started operation. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; +} + +// Information about a backup. +message BackupInfo { + // Name of the backup. + string backup = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; + + // The backup contains an externally consistent copy of `source_database` at + // the timestamp specified by `version_time`. If the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // request did not specify `version_time`, the `version_time` of the backup is + // equivalent to the `create_time`. + google.protobuf.Timestamp version_time = 4; + + // The time the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // request was received. + google.protobuf.Timestamp create_time = 2; + + // Name of the database the backup was created from. + string source_database = 3 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; +} + +// Encryption configuration for the backup to create. +message CreateBackupEncryptionConfig { + // Encryption types for the backup. + enum EncryptionType { + // Unspecified. Do not use. + ENCRYPTION_TYPE_UNSPECIFIED = 0; + + // Use the same encryption configuration as the database. This is the + // default option when + // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] + // is empty. For example, if the database is using + // `Customer_Managed_Encryption`, the backup will be using the same Cloud + // KMS key as the database. + USE_DATABASE_ENCRYPTION = 1; + + // Use Google default encryption. + GOOGLE_DEFAULT_ENCRYPTION = 2; + + // Use customer managed encryption. If specified, `kms_key_name` + // must contain a valid Cloud KMS key. + CUSTOMER_MANAGED_ENCRYPTION = 3; + } + + // Required. The encryption type of the backup. + EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The Cloud KMS key that will be used to protect the backup. + // This field should be set only when + // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + string kms_key_name = 2 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the backup's instance configuration. Some examples: + // * For single region instance configs, specify a single regional + // location KMS key. + // * For multi-regional instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For an instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; +} + +// Encryption configuration for the copied backup. +message CopyBackupEncryptionConfig { + // Encryption types for the backup. + enum EncryptionType { + // Unspecified. Do not use. + ENCRYPTION_TYPE_UNSPECIFIED = 0; + + // This is the default option for + // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + // when + // [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] + // is not specified. For example, if the source backup is using + // `Customer_Managed_Encryption`, the backup will be using the same Cloud + // KMS key as the source backup. + USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + + // Use Google default encryption. + GOOGLE_DEFAULT_ENCRYPTION = 2; + + // Use customer managed encryption. If specified, either `kms_key_name` or + // `kms_key_names` must contain valid Cloud KMS key(s). + CUSTOMER_MANAGED_ENCRYPTION = 3; + } + + // Required. The encryption type of the backup. + EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The Cloud KMS key that will be used to protect the backup. + // This field should be set only when + // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + string kms_key_name = 2 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // Kms keys specified can be in any order. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the backup's instance configuration. Some examples: + // * For single region instance configs, specify a single regional + // location KMS key. + // * For multi-regional instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For an instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; +} + +// The specification for full backups. +// A full backup stores the entire contents of the database at a given +// version time. +message FullBackupSpec {} + +// The specification for incremental backup chains. +// An incremental backup stores the delta of changes between a previous +// backup and the database contents at a given version time. An +// incremental backup chain consists of a full backup and zero or more +// successive incremental backups. The first backup created for an +// incremental backup chain is always a full backup. +message IncrementalBackupSpec {} + +// Instance partition information for the backup. +message BackupInstancePartition { + // A unique identifier for the instance partition. Values are of the form + // `projects//instances//instancePartitions/` + string instance_partition = 1 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/InstancePartition" + }]; +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto new file mode 100644 index 000000000000..c273516ae093 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto @@ -0,0 +1,230 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/database/v1/backup.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb"; +option java_multiple_files = true; +option java_outer_classname = "BackupScheduleProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1"; + +// Defines specifications of the backup schedule. +message BackupScheduleSpec { + // Required. + oneof schedule_spec { + // Cron style schedule specification. + CrontabSpec cron_spec = 1; + } +} + +// BackupSchedule expresses the automated backup creation specification for a +// Spanner database. +// Next ID: 10 +message BackupSchedule { + option (google.api.resource) = { + type: "spanner.googleapis.com/BackupSchedule" + pattern: "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}" + plural: "backupSchedules" + singular: "backupSchedule" + }; + + // Identifier. Output only for the + // [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation. + // Required for the + // [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule] + // operation. A globally unique identifier for the backup schedule which + // cannot be changed. Values are of the form + // `projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]` + // The final segment of the name must be between 2 and 60 characters in + // length. + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Optional. The schedule specification based on which the backup creations + // are triggered. + BackupScheduleSpec spec = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The retention duration of a backup that must be at least 6 hours + // and at most 366 days. The backup is eligible to be automatically deleted + // once the retention period has elapsed. + google.protobuf.Duration retention_duration = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encryption configuration that will be used to encrypt the + // backup. If this field is not specified, the backup will use the same + // encryption configuration as the database. + CreateBackupEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Required. Backup type spec determines the type of backup that is created by + // the backup schedule. Currently, only full backups are supported. + oneof backup_type_spec { + // The schedule creates only full backups. + FullBackupSpec full_backup_spec = 7; + + // The schedule creates incremental backup chains. + IncrementalBackupSpec incremental_backup_spec = 8; + } + + // Output only. The timestamp at which the schedule was last updated. + // If the schedule has never been updated, this field contains the timestamp + // when the schedule was first created. + google.protobuf.Timestamp update_time = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// CrontabSpec can be used to specify the version time and frequency at +// which the backup should be created. +message CrontabSpec { + // Required. Textual representation of the crontab. User can customize the + // backup frequency and the backup version time using the cron + // expression. The version time must be in UTC timezone. + // + // The backup will contain an externally consistent copy of the + // database at the version time. Allowed frequencies are 12 hour, 1 day, + // 1 week and 1 month. Examples of valid cron specifications: + // * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC. + // * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC. + // * `0 2 * * * ` : once a day at 2 past midnight in UTC. + // * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. + // * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC. + string text = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time zone of the times in `CrontabSpec.text`. Currently + // only UTC is supported. + string time_zone = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Schedule backups will contain an externally consistent copy + // of the database at the version time specified in + // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation + // of the scheduled backups at that version time. Spanner will initiate + // the creation of scheduled backups within the time window bounded by the + // version_time specified in `schedule_spec.cron_spec` and version_time + + // `creation_window`. + google.protobuf.Duration creation_window = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The request for +// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule]. +message CreateBackupScheduleRequest { + // Required. The name of the database that this backup schedule applies to. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Required. The Id to use for the backup schedule. The `backup_schedule_id` + // appended to `parent` forms the full backup schedule name of the form + // `projects//instances//databases//backupSchedules/`. + string backup_schedule_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The backup schedule to create. + BackupSchedule backup_schedule = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule]. +message GetBackupScheduleRequest { + // Required. The name of the schedule to retrieve. + // Values are of the form + // `projects//instances//databases//backupSchedules/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; +} + +// The request for +// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule]. +message DeleteBackupScheduleRequest { + // Required. The name of the schedule to delete. + // Values are of the form + // `projects//instances//databases//backupSchedules/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; +} + +// The request for +// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +message ListBackupSchedulesRequest { + // Required. Database is the parent resource whose backup schedules should be + // listed. Values are of the form + // projects//instances//databases/ + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Optional. Number of backup schedules to be returned in the response. If 0 + // or less, defaults to the server's maximum allowed page size. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token] + // from a previous + // [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse] + // to the same `parent`. + string page_token = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +message ListBackupSchedulesResponse { + // The list of backup schedules for a database. + repeated BackupSchedule backup_schedules = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules] + // call to fetch more of the schedules. + string next_page_token = 2; +} + +// The request for +// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]. +message UpdateBackupScheduleRequest { + // Required. The backup schedule to update. `backup_schedule.name`, and the + // fields to be updated as specified by `update_mask` are required. Other + // fields are ignored. + BackupSchedule backup_schedule = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields in the BackupSchedule resource + // should be updated. This mask is relative to the BackupSchedule resource, + // not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased + // accidentally. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto new file mode 100644 index 000000000000..c494b8cf7808 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto @@ -0,0 +1,132 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKeyVersion" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}" +}; + +// Encapsulates progress related information for a Cloud Spanner long +// running operation. +message OperationProgress { + // Percent completion of the operation. + // Values are between 0 and 100 inclusive. + int32 progress_percent = 1; + + // Time the request was received. + google.protobuf.Timestamp start_time = 2; + + // If set, the time at which this operation failed or was completed + // successfully. + google.protobuf.Timestamp end_time = 3; +} + +// Encryption configuration for a Cloud Spanner database. +message EncryptionConfig { + // The Cloud KMS key to be used for encrypting and decrypting + // the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + string kms_key_name = 2 [(google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + }]; + + // Specifies the KMS configuration for the one or more keys used to encrypt + // the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the database instance configuration. Some examples: + // * For single region database instance configs, specify a single regional + // location KMS key. + // * For multi-regional database instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For a database instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [(google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + }]; +} + +// Encryption information for a Cloud Spanner database or backup. +message EncryptionInfo { + // Possible encryption types. + enum Type { + // Encryption type was not specified, though data at rest remains encrypted. + TYPE_UNSPECIFIED = 0; + + // The data is encrypted at rest with a key that is + // fully managed by Google. No key version or status will be populated. + // This is the default state. + GOOGLE_DEFAULT_ENCRYPTION = 1; + + // The data is encrypted at rest with a key that is + // managed by the customer. The active version of the key. `kms_key_version` + // will be populated, and `encryption_status` may be populated. + CUSTOMER_MANAGED_ENCRYPTION = 2; + } + + // Output only. The type of encryption. + Type encryption_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the status of a recent encrypt/decrypt call on + // underlying data for this database or backup. Regardless of status, data is + // always encrypted at rest. + google.rpc.Status encryption_status = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A Cloud KMS key version that is being used to protect the + // database or backup. + string kms_key_version = 2 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKeyVersion" + } + ]; +} + +// Indicates the dialect type of a database. +enum DatabaseDialect { + // Default value. This value will create a database with the + // GOOGLE_STANDARD_SQL dialect. + DATABASE_DIALECT_UNSPECIFIED = 0; + + // GoogleSQL supported SQL. + GOOGLE_STANDARD_SQL = 1; + + // PostgreSQL supported SQL. + POSTGRESQL = 2; +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto new file mode 100644 index 000000000000..d41a4114c205 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto @@ -0,0 +1,1314 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/spanner/admin/database/v1/backup.proto"; +import "google/spanner/admin/database/v1/backup_schedule.proto"; +import "google/spanner/admin/database/v1/common.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb"; +option java_multiple_files = true; +option java_outer_classname = "SpannerDatabaseAdminProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1"; +option (google.api.resource_definition) = { + type: "spanner.googleapis.com/Instance" + pattern: "projects/{project}/instances/{instance}" +}; +option (google.api.resource_definition) = { + type: "spanner.googleapis.com/InstancePartition" + pattern: "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}" +}; + +// Cloud Spanner Database Admin API +// +// The Cloud Spanner Database Admin API can be used to: +// * create, drop, and list databases +// * update the schema of pre-existing databases +// * create, delete, copy and list backups for a database +// * restore a database from an existing backup +service DatabaseAdmin { + option (google.api.default_host) = "spanner.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/spanner.admin"; + + // Lists Cloud Spanner databases. + rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/databases" + }; + option (google.api.method_signature) = "parent"; + } + + // Creates a new Cloud Spanner database and starts to prepare it for serving. + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track preparation of the database. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + rpc CreateDatabase(CreateDatabaseRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/databases" + body: "*" + }; + option (google.api.method_signature) = "parent,create_statement"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.database.v1.Database" + metadata_type: "google.spanner.admin.database.v1.CreateDatabaseMetadata" + }; + } + + // Gets the state of a Cloud Spanner database. + rpc GetDatabase(GetDatabaseRequest) returns (Database) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/databases/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a Cloud Spanner database. The returned + // [long-running operation][google.longrunning.Operation] can be used to track + // the progress of updating the database. If the named database does not + // exist, returns `NOT_FOUND`. + // + // While the operation is pending: + // + // * The database's + // [reconciling][google.spanner.admin.database.v1.Database.reconciling] + // field is set to true. + // * Cancelling the operation is best-effort. If the cancellation succeeds, + // the operation metadata's + // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] + // is set, the updates are reverted, and the operation terminates with a + // `CANCELLED` status. + // * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error + // until the pending operation is done (returns successfully or with + // error). + // * Reading the database via the API continues to give the pre-request + // values. + // + // Upon completion of the returned operation: + // + // * The new values are in effect and readable via the API. + // * The database's + // [reconciling][google.spanner.admin.database.v1.Database.reconciling] + // field becomes false. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format + // `projects//instances//databases//operations/` + // and can be used to track the database modification. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + rpc UpdateDatabase(UpdateDatabaseRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{database.name=projects/*/instances/*/databases/*}" + body: "database" + }; + option (google.api.method_signature) = "database,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "Database" + metadata_type: "UpdateDatabaseMetadata" + }; + } + + // Updates the schema of a Cloud Spanner database by + // creating/altering/dropping tables, columns, indexes, etc. The returned + // [long-running operation][google.longrunning.Operation] will have a name of + // the format `/operations/` and can be used to + // track execution of the schema change(s). The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + // The operation has no response. + rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" + body: "*" + }; + option (google.api.method_signature) = "database,statements"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata" + }; + } + + // Drops (aka deletes) a Cloud Spanner database. + // Completed backups for the database will be retained according to their + // `expire_time`. + // Note: Cloud Spanner might continue to accept requests for a few seconds + // after the database has been deleted. + rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{database=projects/*/instances/*/databases/*}" + }; + option (google.api.method_signature) = "database"; + } + + // Returns the schema of a Cloud Spanner database as a list of formatted + // DDL statements. This method does not show pending schema updates, those may + // be queried using the [Operations][google.longrunning.Operations] API. + rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) { + option (google.api.http) = { + get: "/v1/{database=projects/*/instances/*/databases/*}/ddl" + }; + option (google.api.method_signature) = "database"; + } + + // Sets the access control policy on a database or backup resource. + // Replaces any existing policy. + // + // Authorization requires `spanner.databases.setIamPolicy` + // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + // For backups, authorization requires `spanner.backups.setIamPolicy` + // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" + body: "*" + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy" + body: "*" + } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy" + body: "*" + } + }; + option (google.api.method_signature) = "resource,policy"; + } + + // Gets the access control policy for a database or backup resource. + // Returns an empty policy if a database or backup exists but does not have a + // policy set. + // + // Authorization requires `spanner.databases.getIamPolicy` permission on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + // For backups, authorization requires `spanner.backups.getIamPolicy` + // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" + body: "*" + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy" + body: "*" + } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy" + body: "*" + } + }; + option (google.api.method_signature) = "resource"; + } + + // Returns permissions that the caller has on the specified database or backup + // resource. + // + // Attempting this RPC on a non-existent Cloud Spanner database will + // result in a NOT_FOUND error if the user has + // `spanner.databases.list` permission on the containing Cloud + // Spanner instance. Otherwise returns an empty set of permissions. + // Calling this method on a backup that does not exist will + // result in a NOT_FOUND error if the user has + // `spanner.backups.list` permission on the containing instance. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" + body: "*" + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions" + body: "*" + } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions" + body: "*" + } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions" + body: "*" + } + }; + option (google.api.method_signature) = "resource,permissions"; + } + + // Starts creating a new Cloud Spanner Backup. + // The returned backup [long-running operation][google.longrunning.Operation] + // will have a name of the format + // `projects//instances//backups//operations/` + // and can be used to track creation of the backup. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Backup][google.spanner.admin.database.v1.Backup], if successful. + // Cancelling the returned operation will stop the creation and delete the + // backup. There can be only one pending backup creation per database. Backup + // creation of different databases can run concurrently. + rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/backups" + body: "backup" + }; + option (google.api.method_signature) = "parent,backup,backup_id"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.database.v1.Backup" + metadata_type: "google.spanner.admin.database.v1.CreateBackupMetadata" + }; + } + + // Starts copying a Cloud Spanner Backup. + // The returned backup [long-running operation][google.longrunning.Operation] + // will have a name of the format + // `projects//instances//backups//operations/` + // and can be used to track copying of the backup. The operation is associated + // with the destination backup. + // The [metadata][google.longrunning.Operation.metadata] field type is + // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Backup][google.spanner.admin.database.v1.Backup], if successful. + // Cancelling the returned operation will stop the copying and delete the + // destination backup. Concurrent CopyBackup requests can run on the same + // source backup. + rpc CopyBackup(CopyBackupRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/backups:copy" + body: "*" + }; + option (google.api.method_signature) = + "parent,backup_id,source_backup,expire_time"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.database.v1.Backup" + metadata_type: "google.spanner.admin.database.v1.CopyBackupMetadata" + }; + } + + // Gets metadata on a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. + rpc GetBackup(GetBackupRequest) returns (Backup) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. + rpc UpdateBackup(UpdateBackupRequest) returns (Backup) { + option (google.api.http) = { + patch: "/v1/{backup.name=projects/*/instances/*/backups/*}" + body: "backup" + }; + option (google.api.method_signature) = "backup,update_mask"; + } + + // Deletes a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. + rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists completed and pending backups. + // Backups returned are ordered by `create_time` in descending order, + // starting from the most recent `create_time`. + rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/backups" + }; + option (google.api.method_signature) = "parent"; + } + + // Create a new database by restoring from a completed backup. The new + // database must be in the same project and in an instance with the same + // instance configuration as the instance containing + // the backup. The returned database [long-running + // operation][google.longrunning.Operation] has a name of the format + // `projects//instances//databases//operations/`, + // and can be used to track the progress of the operation, and to cancel it. + // The [metadata][google.longrunning.Operation.metadata] field type is + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + // The [response][google.longrunning.Operation.response] type + // is [Database][google.spanner.admin.database.v1.Database], if + // successful. Cancelling the returned operation will stop the restore and + // delete the database. + // There can be only one database being restored into an instance at a time. + // Once the restore operation completes, a new restore operation can be + // initiated, without waiting for the optimize operation associated with the + // first restore to complete. + rpc RestoreDatabase(RestoreDatabaseRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/databases:restore" + body: "*" + }; + option (google.api.method_signature) = "parent,database_id,backup"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.database.v1.Database" + metadata_type: "google.spanner.admin.database.v1.RestoreDatabaseMetadata" + }; + } + + // Lists database [longrunning-operations][google.longrunning.Operation]. + // A database operation has a name of the form + // `projects//instances//databases//operations/`. + // The long-running operation + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. + rpc ListDatabaseOperations(ListDatabaseOperationsRequest) + returns (ListDatabaseOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/databaseOperations" + }; + option (google.api.method_signature) = "parent"; + } + + // Lists the backup [long-running operations][google.longrunning.Operation] in + // the given instance. A backup operation has a name of the form + // `projects//instances//backups//operations/`. + // The long-running operation + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. Operations returned are ordered by + // `operation.metadata.value.progress.start_time` in descending order starting + // from the most recently started operation. + rpc ListBackupOperations(ListBackupOperationsRequest) + returns (ListBackupOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/backupOperations" + }; + option (google.api.method_signature) = "parent"; + } + + // Lists Cloud Spanner database roles. + rpc ListDatabaseRoles(ListDatabaseRolesRequest) + returns (ListDatabaseRolesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles" + }; + option (google.api.method_signature) = "parent"; + } + + // Adds split points to specified tables, indexes of a database. + rpc AddSplitPoints(AddSplitPointsRequest) returns (AddSplitPointsResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints" + body: "*" + }; + option (google.api.method_signature) = "database,split_points"; + } + + // Creates a new backup schedule. + rpc CreateBackupSchedule(CreateBackupScheduleRequest) + returns (BackupSchedule) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules" + body: "backup_schedule" + }; + option (google.api.method_signature) = + "parent,backup_schedule,backup_schedule_id"; + } + + // Gets backup schedule for the input schedule name. + rpc GetBackupSchedule(GetBackupScheduleRequest) returns (BackupSchedule) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a backup schedule. + rpc UpdateBackupSchedule(UpdateBackupScheduleRequest) + returns (BackupSchedule) { + option (google.api.http) = { + patch: "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}" + body: "backup_schedule" + }; + option (google.api.method_signature) = "backup_schedule,update_mask"; + } + + // Deletes a backup schedule. + rpc DeleteBackupSchedule(DeleteBackupScheduleRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists all the backup schedules for the database. + rpc ListBackupSchedules(ListBackupSchedulesRequest) + returns (ListBackupSchedulesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules" + }; + option (google.api.method_signature) = "parent"; + } + + // This is an internal API called by Spanner Graph jobs. You should never need + // to call this API directly. + rpc InternalUpdateGraphOperation(InternalUpdateGraphOperationRequest) + returns (InternalUpdateGraphOperationResponse) { + option (google.api.method_signature) = "database,operation_id"; + } +} + +// Information about the database restore. +message RestoreInfo { + // The type of the restore source. + RestoreSourceType source_type = 1; + + // Information about the source used to restore the database. + oneof source_info { + // Information about the backup used to restore the database. The backup + // may no longer exist. + BackupInfo backup_info = 2; + } +} + +// A Cloud Spanner database. +message Database { + option (google.api.resource) = { + type: "spanner.googleapis.com/Database" + pattern: "projects/{project}/instances/{instance}/databases/{database}" + }; + + // Indicates the current state of the database. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The database is still being created. Operations on the database may fail + // with `FAILED_PRECONDITION` in this state. + CREATING = 1; + + // The database is fully created and ready for use. + READY = 2; + + // The database is fully created and ready for use, but is still + // being optimized for performance and cannot handle full load. + // + // In this state, the database still references the backup + // it was restore from, preventing the backup + // from being deleted. When optimizations are complete, the full performance + // of the database will be restored, and the database will transition to + // `READY` state. + READY_OPTIMIZING = 3; + } + + // Required. The name of the database. Values are of the form + // `projects//instances//databases/`, + // where `` is as specified in the `CREATE DATABASE` + // statement. This name can be passed to other API methods to + // identify the database. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The current database state. + State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If exists, the time at which the database creation started. + google.protobuf.Timestamp create_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Applicable only for restored databases. Contains information + // about the restore source. + RestoreInfo restore_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For databases that are using customer managed encryption, this + // field contains the encryption configuration for the database. + // For databases that are using Google default or other types of encryption, + // this field is empty. + EncryptionConfig encryption_config = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For databases that are using customer managed encryption, this + // field contains the encryption information for the database, such as + // all Cloud KMS key versions that are in use. The `encryption_status' field + // inside of each `EncryptionInfo` is not populated. + // + // For databases that are using Google default or other types of encryption, + // this field is empty. + // + // This field is propagated lazily from the backend. There might be a delay + // from when a key version is being used and when it appears in this field. + repeated EncryptionInfo encryption_info = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The period in which Cloud Spanner retains all versions of data + // for the database. This is the same as the value of version_retention_period + // database option set using + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + // Defaults to 1 hour, if not set. + string version_retention_period = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Earliest timestamp at which older versions of the data can be + // read. This value is continuously updated by Cloud Spanner and becomes stale + // the moment it is queried. If you are using this value to recover data, make + // sure to account for the time from the moment when the value is queried to + // the moment when you initiate the recovery. + google.protobuf.Timestamp earliest_version_time = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The read-write region which contains the database's leader + // replicas. + // + // This is the same as the value of default_leader + // database option set using DatabaseAdmin.CreateDatabase or + // DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty. + string default_leader = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The dialect of the Cloud Spanner Database. + DatabaseDialect database_dialect = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether drop protection is enabled for this database. Defaults to false, + // if not set. For more details, please see how to [prevent accidental + // database + // deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). + bool enable_drop_protection = 11; + + // Output only. If true, the database is being updated. If false, there are no + // ongoing update operations for the database. + bool reconciling = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The request for +// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +message ListDatabasesRequest { + // Required. The instance whose databases should be listed. + // Values are of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Number of databases to be returned in the response. If 0 or less, + // defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] + // from a previous + // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + string page_token = 4; +} + +// The response for +// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +message ListDatabasesResponse { + // Databases that matched the request. + repeated Database databases = 1; + + // `next_page_token` can be sent in a subsequent + // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] + // call to fetch more of the matching databases. + string next_page_token = 2; +} + +// The request for +// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +message CreateDatabaseRequest { + // Required. The name of the instance that will serve the new database. + // Values are of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. A `CREATE DATABASE` statement, which specifies the ID of the + // new database. The database ID must conform to the regular expression + // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. + // If the database ID is a reserved word or if it contains a hyphen, the + // database ID must be enclosed in backticks (`` ` ``). + string create_statement = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A list of DDL statements to run inside the newly created + // database. Statements can create tables, indexes, etc. These + // statements execute atomically with the creation of the database: + // if there is an error in any statement, the database is not created. + repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encryption configuration for the database. If this field is + // not specified, Cloud Spanner will encrypt/decrypt all data at rest using + // Google default encryption. + EncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The dialect of the Cloud Spanner Database. + DatabaseDialect database_dialect = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in + // 'extra_statements' above. + // Contains a protobuf-serialized + // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). + // To generate it, [install](https://grpc.io/docs/protoc-installation/) and + // run `protoc` with --include_imports and --descriptor_set_out. For example, + // to generate for moon/shot/app.proto, run + // ``` + // $protoc --proto_path=/app_path --proto_path=/lib_path \ + // --include_imports \ + // --descriptor_set_out=descriptors.data \ + // moon/shot/app.proto + // ``` + // For more details, see protobuffer [self + // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description). + bytes proto_descriptors = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata type for the operation returned by +// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +message CreateDatabaseMetadata { + // The database being created. + string database = 1 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; +} + +// The request for +// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +message GetDatabaseRequest { + // Required. The name of the requested database. Values are of the form + // `projects//instances//databases/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; +} + +// The request for +// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]. +message UpdateDatabaseRequest { + // Required. The database to update. + // The `name` field of the database is of the form + // `projects//instances//databases/`. + Database database = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The list of fields to update. Currently, only + // `enable_drop_protection` field can be updated. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Metadata type for the operation returned by +// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]. +message UpdateDatabaseMetadata { + // The request for + // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]. + UpdateDatabaseRequest request = 1; + + // The progress of the + // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase] + // operation. + OperationProgress progress = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is best-effort). + google.protobuf.Timestamp cancel_time = 3; +} + +// Enqueues the given DDL statements to be applied, in order but not +// necessarily all at once, to the database schema at some point (or +// points) in the future. The server checks that the statements +// are executable (syntactically valid, name tables that exist, etc.) +// before enqueueing them, but they may still fail upon +// later execution (e.g., if a statement from another batch of +// statements is applied first and it conflicts in some way, or if +// there is some data-related problem like a `NULL` value in a column to +// which `NOT NULL` would be added). If a statement fails, all +// subsequent statements in the batch are automatically cancelled. +// +// Each batch of statements is assigned a name which can be used with +// the [Operations][google.longrunning.Operations] API to monitor +// progress. See the +// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] +// field for more details. +message UpdateDatabaseDdlRequest { + // Required. The database to update. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Required. DDL statements to be applied to the database. + repeated string statements = 2 [(google.api.field_behavior) = REQUIRED]; + + // If empty, the new update request is assigned an + // automatically-generated operation ID. Otherwise, `operation_id` + // is used to construct the name of the resulting + // [Operation][google.longrunning.Operation]. + // + // Specifying an explicit operation ID simplifies determining + // whether the statements were executed in the event that the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + // call is replayed, or the return value is otherwise lost: the + // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + // and `operation_id` fields can be combined to form the + // [name][google.longrunning.Operation.name] of the resulting + // [longrunning.Operation][google.longrunning.Operation]: + // `/operations/`. + // + // `operation_id` should be unique within the database, and must be + // a valid identifier: `[a-z][a-z0-9_]*`. Note that + // automatically-generated operation IDs always begin with an + // underscore. If the named operation already exists, + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + // returns `ALREADY_EXISTS`. + string operation_id = 3; + + // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements. + // Contains a protobuf-serialized + // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). + // To generate it, [install](https://grpc.io/docs/protoc-installation/) and + // run `protoc` with --include_imports and --descriptor_set_out. For example, + // to generate for moon/shot/app.proto, run + // ``` + // $protoc --proto_path=/app_path --proto_path=/lib_path \ + // --include_imports \ + // --descriptor_set_out=descriptors.data \ + // moon/shot/app.proto + // ``` + // For more details, see protobuffer [self + // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description). + bytes proto_descriptors = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. This field is exposed to be used by the Spanner Migration Tool. + // For more details, see + // [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool). + bool throughput_mode = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Action information extracted from a DDL statement. This proto is used to +// display the brief info of the DDL statement for the operation +// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. +message DdlStatementActionInfo { + // The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc. + // This field is a non-empty string. + string action = 1; + + // The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc. + // This field can be empty string for some DDL statement, + // e.g. for statement "ANALYZE", `entity_type` = "". + string entity_type = 2; + + // The entity name(s) being operated on the DDL statement. + // E.g. + // 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"]. + // 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"]. + // 3. For statement "ANALYZE", `entity_names` = []. + repeated string entity_names = 3; +} + +// Metadata type for the operation returned by +// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. +message UpdateDatabaseDdlMetadata { + // The database being modified. + string database = 1 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; + + // For an update this list contains all the statements. For an + // individual statement, this list contains only that statement. + repeated string statements = 2; + + // Reports the commit timestamps of all statements that have + // succeeded so far, where `commit_timestamps[i]` is the commit + // timestamp for the statement `statements[i]`. + repeated google.protobuf.Timestamp commit_timestamps = 3; + + // Output only. When true, indicates that the operation is throttled e.g. + // due to resource constraints. When resources become available the operation + // will resume and this field will be false again. + bool throttled = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The progress of the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + // operations. All DDL statements will have continuously updating progress, + // and `progress[i]` is the operation progress for `statements[i]`. Also, + // `progress[i]` will have start time and end time populated with commit + // timestamp of operation, as well as a progress of 100% once the operation + // has completed. + repeated OperationProgress progress = 5; + + // The brief action info for the DDL statements. + // `actions[i]` is the brief info for `statements[i]`. + repeated DdlStatementActionInfo actions = 6; +} + +// The request for +// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +message DropDatabaseRequest { + // Required. The database to be dropped. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; +} + +// The request for +// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +message GetDatabaseDdlRequest { + // Required. The database whose schema we wish to get. + // Values are of the form + // `projects//instances//databases/` + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; +} + +// The response for +// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +message GetDatabaseDdlResponse { + // A list of formatted DDL statements defining the schema of the database + // specified in the request. + repeated string statements = 1; + + // Proto descriptors stored in the database. + // Contains a protobuf-serialized + // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto). + // For more details, see protobuffer [self + // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description). + bytes proto_descriptors = 2; +} + +// The request for +// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. +message ListDatabaseOperationsRequest { + // Required. The instance of the database operations. + // Values are of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the [Operation][google.longrunning.Operation] + // are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + // is + // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. + // * `metadata.` - any field in metadata.value. + // `metadata.@type` must be specified first, if filtering on metadata + // fields. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic. However, + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \ + // `(metadata.source_type:BACKUP) AND` \ + // `(metadata.backup_info.backup:backup_howl) AND` \ + // `(metadata.name:restored_howl) AND` \ + // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ + // `(error:*)` - Return operations where: + // * The operation's metadata type is + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + // * The database is restored from a backup. + // * The backup name contains "backup_howl". + // * The restored database's name contains "restored_howl". + // * The operation started before 2018-03-28T14:50:00Z. + // * The operation resulted in an error. + string filter = 2; + + // Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] + // from a previous + // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] + // to the same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. +message ListDatabaseOperationsResponse { + // The list of matching database [long-running + // operations][google.longrunning.Operation]. Each operation's name will be + // prefixed by the database's name. The operation's + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; +} + +// The request for +// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. +message RestoreDatabaseRequest { + // Required. The name of the instance in which to create the + // restored database. This instance must be in the same project and + // have the same instance configuration as the instance containing + // the source backup. Values are of the form + // `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The id of the database to create and restore to. This + // database must not already exist. The `database_id` appended to + // `parent` forms the full database name of the form + // `projects//instances//databases/`. + string database_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//backups/`. + string backup = 3 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Backup" + }]; + } + + // Optional. An encryption configuration describing the encryption type and + // key resources in Cloud KMS used to encrypt/decrypt the database to restore + // to. If this field is not specified, the restored database will use the same + // encryption configuration as the backup by default, namely + // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + RestoreDatabaseEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Encryption configuration for the restored database. +message RestoreDatabaseEncryptionConfig { + // Encryption types for the database to be restored. + enum EncryptionType { + // Unspecified. Do not use. + ENCRYPTION_TYPE_UNSPECIFIED = 0; + + // This is the default option when + // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] + // is not specified. + USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; + + // Use Google default encryption. + GOOGLE_DEFAULT_ENCRYPTION = 2; + + // Use customer managed encryption. If specified, `kms_key_name` must + // must contain a valid Cloud KMS key. + CUSTOMER_MANAGED_ENCRYPTION = 3; + } + + // Required. The encryption type of the restored database. + EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The Cloud KMS key that will be used to encrypt/decrypt the + // restored database. This field should be set only when + // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + string kms_key_name = 2 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // encrypt the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the database instance configuration. Some examples: + // * For single region database instance configs, specify a single regional + // location KMS key. + // * For multi-regional database instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For a database instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; +} + +// Metadata type for the long-running operation returned by +// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. +message RestoreDatabaseMetadata { + // Name of the database being created and restored to. + string name = 1 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the database, as specified by + // `source` in + // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + oneof source_info { + // Information about the backup used to restore the database. + BackupInfo backup_info = 3; + } + + // The progress of the + // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase] + // operation. + OperationProgress progress = 4; + + // The time at which cancellation of this operation was received. + // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + // starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not guaranteed. + // Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a + // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + // `Code.CANCELLED`. + google.protobuf.Timestamp cancel_time = 5; + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored database, and remove the dependency on the restore source. + // The name is of the form + // `projects//instances//databases//operations/` + // where the is the name of database being created and restored to. + // The metadata type of the long-running operation is + // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + // This long-running operation will be automatically created by the system + // after the RestoreDatabase long-running operation completes successfully. + // This operation will not be created if the restore was not successful. + string optimize_database_operation_name = 6; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored database. This long-running +// operation is automatically created by the system after the successful +// completion of a database restore, and cannot be cancelled. +message OptimizeRestoredDatabaseMetadata { + // Name of the restored database being optimized. + string name = 1 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + }]; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} + +// Indicates the type of the restore source. +enum RestoreSourceType { + // No restore associated. + TYPE_UNSPECIFIED = 0; + + // A backup was used as the source of the restore. + BACKUP = 1; +} + +// A Cloud Spanner database role. +message DatabaseRole { + option (google.api.resource) = { + type: "spanner.googleapis.com/DatabaseRole" + pattern: "projects/{project}/instances/{instance}/databases/{database}/databaseRoles/{role}" + }; + + // Required. The name of the database role. Values are of the form + // `projects//instances//databases//databaseRoles/` + // where `` is as specified in the `CREATE ROLE` DDL statement. + string name = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +message ListDatabaseRolesRequest { + // Required. The database whose roles should be listed. + // Values are of the form + // `projects//instances//databases/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Number of database roles to be returned in the response. If 0 or less, + // defaults to the server's maximum allowed page size. + int32 page_size = 2; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] + // from a previous + // [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. + string page_token = 3; +} + +// The response for +// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +message ListDatabaseRolesResponse { + // Database roles that matched the request. + repeated DatabaseRole database_roles = 1; + + // `next_page_token` can be sent in a subsequent + // [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles] + // call to fetch more of the matching roles. + string next_page_token = 2; +} + +// The request for +// [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. +message AddSplitPointsRequest { + // Required. The database on whose tables/indexes split points are to be + // added. Values are of the form + // `projects//instances//databases/`. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Required. The split points to add. + repeated SplitPoints split_points = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A user-supplied tag associated with the split points. + // For example, "intital_data_load", "special_event_1". + // Defaults to "CloudAddSplitPointsAPI" if not specified. + // The length of the tag must not exceed 50 characters,else will be trimmed. + // Only valid UTF8 characters are allowed. + string initiator = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. +message AddSplitPointsResponse {} + +// The split points of a table/index. +message SplitPoints { + // A split key. + message Key { + // Required. The column values making up the split key. + google.protobuf.ListValue key_parts = 1 + [(google.api.field_behavior) = REQUIRED]; + } + + // The table to split. + string table = 1; + + // The index to split. + // If specified, the `table` field must refer to the index's base table. + string index = 2; + + // Required. The list of split keys, i.e., the split boundaries. + repeated Key keys = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The expiration timestamp of the split points. + // A timestamp in the past means immediate expiration. + // The maximum value can be 30 days in the future. + // Defaults to 10 days in the future if not specified. + google.protobuf.Timestamp expire_time = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Internal request proto, do not use directly. +message InternalUpdateGraphOperationRequest { + // Internal field, do not use directly. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + // Internal field, do not use directly. + string operation_id = 2 [(google.api.field_behavior) = REQUIRED]; + // Internal field, do not use directly. + string vm_identity_token = 5 [(google.api.field_behavior) = REQUIRED]; + // Internal field, do not use directly. + double progress = 3 [(google.api.field_behavior) = OPTIONAL]; + // Internal field, do not use directly. + google.rpc.Status status = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Internal response proto, do not use directly. +message InternalUpdateGraphOperationResponse {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..7236471b3716 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml @@ -0,0 +1,137 @@ + + + + + 7012 + com/google/spanner/admin/instance/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/spanner/admin/instance/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/spanner/admin/instance/v1/*OrBuilder + boolean has*(*) + + + + + + 5001 + com/google/spanner/admin/instance/v1/* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/instance/v1/*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/instance/v1/*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/instance/v1/*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/instance/v1/*$*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/admin/instance/v1/*$*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/admin/instance/v1/*Proto + com/google/protobuf/GeneratedFile + + + + 7005 + com/google/spanner/admin/instance/v1/** + * newBuilderForType(*) + ** + + + + 7006 + com/google/spanner/admin/instance/v1/** + * internalGetFieldAccessorTable() + ** + + + + 7014 + com/google/spanner/admin/instance/v1/** + * getDescriptor() + + + 7006 + com/google/spanner/admin/instance/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clear() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clone() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setUnknownFields(*) + ** + + diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/pom.xml b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/pom.xml new file mode 100644 index 000000000000..cb7a2ca271af --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/pom.xml @@ -0,0 +1,47 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + 6.112.1-SNAPSHOT + proto-google-cloud-spanner-admin-instance-v1 + PROTO library for proto-google-cloud-spanner-admin-instance-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + com.google.api.grpc + proto-google-iam-v1 + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java new file mode 100644 index 000000000000..01e307104c77 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java @@ -0,0 +1,6817 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Autoscaling configuration for an instance.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig} + */ +@com.google.protobuf.Generated +public final class AutoscalingConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig) + AutoscalingConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AutoscalingConfig"); + } + + // Use AutoscalingConfig.newBuilder() to construct. + private AutoscalingConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AutoscalingConfig() { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder.class); + } + + public interface AutoscalingLimitsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Minimum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to 1.
    +     * 
    + * + * int32 min_nodes = 1; + * + * @return Whether the minNodes field is set. + */ + boolean hasMinNodes(); + + /** + * + * + *
    +     * Minimum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to 1.
    +     * 
    + * + * int32 min_nodes = 1; + * + * @return The minNodes. + */ + int getMinNodes(); + + /** + * + * + *
    +     * Minimum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000.
    +     * 
    + * + * int32 min_processing_units = 2; + * + * @return Whether the minProcessingUnits field is set. + */ + boolean hasMinProcessingUnits(); + + /** + * + * + *
    +     * Minimum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000.
    +     * 
    + * + * int32 min_processing_units = 2; + * + * @return The minProcessingUnits. + */ + int getMinProcessingUnits(); + + /** + * + * + *
    +     * Maximum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to min_nodes.
    +     * 
    + * + * int32 max_nodes = 3; + * + * @return Whether the maxNodes field is set. + */ + boolean hasMaxNodes(); + + /** + * + * + *
    +     * Maximum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to min_nodes.
    +     * 
    + * + * int32 max_nodes = 3; + * + * @return The maxNodes. + */ + int getMaxNodes(); + + /** + * + * + *
    +     * Maximum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000 and be greater than or equal to
    +     * min_processing_units.
    +     * 
    + * + * int32 max_processing_units = 4; + * + * @return Whether the maxProcessingUnits field is set. + */ + boolean hasMaxProcessingUnits(); + + /** + * + * + *
    +     * Maximum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000 and be greater than or equal to
    +     * min_processing_units.
    +     * 
    + * + * int32 max_processing_units = 4; + * + * @return The maxProcessingUnits. + */ + int getMaxProcessingUnits(); + + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.MinLimitCase + getMinLimitCase(); + + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.MaxLimitCase + getMaxLimitCase(); + } + + /** + * + * + *
    +   * The autoscaling limits for the instance. Users can define the minimum and
    +   * maximum compute capacity allocated to the instance, and the autoscaler will
    +   * only scale within that range. Users can either use nodes or processing
    +   * units to specify the limits, but should use the same unit to set both the
    +   * min_limit and max_limit.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits} + */ + public static final class AutoscalingLimits extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) + AutoscalingLimitsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AutoscalingLimits"); + } + + // Use AutoscalingLimits.newBuilder() to construct. + private AutoscalingLimits(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AutoscalingLimits() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + .class); + } + + private int minLimitCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object minLimit_; + + public enum MinLimitCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + MIN_NODES(1), + MIN_PROCESSING_UNITS(2), + MINLIMIT_NOT_SET(0); + private final int value; + + private MinLimitCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MinLimitCase valueOf(int value) { + return forNumber(value); + } + + public static MinLimitCase forNumber(int value) { + switch (value) { + case 1: + return MIN_NODES; + case 2: + return MIN_PROCESSING_UNITS; + case 0: + return MINLIMIT_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public MinLimitCase getMinLimitCase() { + return MinLimitCase.forNumber(minLimitCase_); + } + + private int maxLimitCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object maxLimit_; + + public enum MaxLimitCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + MAX_NODES(3), + MAX_PROCESSING_UNITS(4), + MAXLIMIT_NOT_SET(0); + private final int value; + + private MaxLimitCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MaxLimitCase valueOf(int value) { + return forNumber(value); + } + + public static MaxLimitCase forNumber(int value) { + switch (value) { + case 3: + return MAX_NODES; + case 4: + return MAX_PROCESSING_UNITS; + case 0: + return MAXLIMIT_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public MaxLimitCase getMaxLimitCase() { + return MaxLimitCase.forNumber(maxLimitCase_); + } + + public static final int MIN_NODES_FIELD_NUMBER = 1; + + /** + * + * + *
    +     * Minimum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to 1.
    +     * 
    + * + * int32 min_nodes = 1; + * + * @return Whether the minNodes field is set. + */ + @java.lang.Override + public boolean hasMinNodes() { + return minLimitCase_ == 1; + } + + /** + * + * + *
    +     * Minimum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to 1.
    +     * 
    + * + * int32 min_nodes = 1; + * + * @return The minNodes. + */ + @java.lang.Override + public int getMinNodes() { + if (minLimitCase_ == 1) { + return (java.lang.Integer) minLimit_; + } + return 0; + } + + public static final int MIN_PROCESSING_UNITS_FIELD_NUMBER = 2; + + /** + * + * + *
    +     * Minimum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000.
    +     * 
    + * + * int32 min_processing_units = 2; + * + * @return Whether the minProcessingUnits field is set. + */ + @java.lang.Override + public boolean hasMinProcessingUnits() { + return minLimitCase_ == 2; + } + + /** + * + * + *
    +     * Minimum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000.
    +     * 
    + * + * int32 min_processing_units = 2; + * + * @return The minProcessingUnits. + */ + @java.lang.Override + public int getMinProcessingUnits() { + if (minLimitCase_ == 2) { + return (java.lang.Integer) minLimit_; + } + return 0; + } + + public static final int MAX_NODES_FIELD_NUMBER = 3; + + /** + * + * + *
    +     * Maximum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to min_nodes.
    +     * 
    + * + * int32 max_nodes = 3; + * + * @return Whether the maxNodes field is set. + */ + @java.lang.Override + public boolean hasMaxNodes() { + return maxLimitCase_ == 3; + } + + /** + * + * + *
    +     * Maximum number of nodes allocated to the instance. If set, this number
    +     * should be greater than or equal to min_nodes.
    +     * 
    + * + * int32 max_nodes = 3; + * + * @return The maxNodes. + */ + @java.lang.Override + public int getMaxNodes() { + if (maxLimitCase_ == 3) { + return (java.lang.Integer) maxLimit_; + } + return 0; + } + + public static final int MAX_PROCESSING_UNITS_FIELD_NUMBER = 4; + + /** + * + * + *
    +     * Maximum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000 and be greater than or equal to
    +     * min_processing_units.
    +     * 
    + * + * int32 max_processing_units = 4; + * + * @return Whether the maxProcessingUnits field is set. + */ + @java.lang.Override + public boolean hasMaxProcessingUnits() { + return maxLimitCase_ == 4; + } + + /** + * + * + *
    +     * Maximum number of processing units allocated to the instance. If set,
    +     * this number should be multiples of 1000 and be greater than or equal to
    +     * min_processing_units.
    +     * 
    + * + * int32 max_processing_units = 4; + * + * @return The maxProcessingUnits. + */ + @java.lang.Override + public int getMaxProcessingUnits() { + if (maxLimitCase_ == 4) { + return (java.lang.Integer) maxLimit_; + } + return 0; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (minLimitCase_ == 1) { + output.writeInt32(1, (int) ((java.lang.Integer) minLimit_)); + } + if (minLimitCase_ == 2) { + output.writeInt32(2, (int) ((java.lang.Integer) minLimit_)); + } + if (maxLimitCase_ == 3) { + output.writeInt32(3, (int) ((java.lang.Integer) maxLimit_)); + } + if (maxLimitCase_ == 4) { + output.writeInt32(4, (int) ((java.lang.Integer) maxLimit_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (minLimitCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 1, (int) ((java.lang.Integer) minLimit_)); + } + if (minLimitCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 2, (int) ((java.lang.Integer) minLimit_)); + } + if (maxLimitCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 3, (int) ((java.lang.Integer) maxLimit_)); + } + if (maxLimitCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 4, (int) ((java.lang.Integer) maxLimit_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) obj; + + if (!getMinLimitCase().equals(other.getMinLimitCase())) return false; + switch (minLimitCase_) { + case 1: + if (getMinNodes() != other.getMinNodes()) return false; + break; + case 2: + if (getMinProcessingUnits() != other.getMinProcessingUnits()) return false; + break; + case 0: + default: + } + if (!getMaxLimitCase().equals(other.getMaxLimitCase())) return false; + switch (maxLimitCase_) { + case 3: + if (getMaxNodes() != other.getMaxNodes()) return false; + break; + case 4: + if (getMaxProcessingUnits() != other.getMaxProcessingUnits()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (minLimitCase_) { + case 1: + hash = (37 * hash) + MIN_NODES_FIELD_NUMBER; + hash = (53 * hash) + getMinNodes(); + break; + case 2: + hash = (37 * hash) + MIN_PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getMinProcessingUnits(); + break; + case 0: + default: + } + switch (maxLimitCase_) { + case 3: + hash = (37 * hash) + MAX_NODES_FIELD_NUMBER; + hash = (53 * hash) + getMaxNodes(); + break; + case 4: + hash = (37 * hash) + MAX_PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getMaxProcessingUnits(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * The autoscaling limits for the instance. Users can define the minimum and
    +     * maximum compute capacity allocated to the instance, and the autoscaler will
    +     * only scale within that range. Users can either use nodes or processing
    +     * units to specify the limits, but should use the same unit to set both the
    +     * min_limit and max_limit.
    +     * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + minLimitCase_ = 0; + minLimit_ = null; + maxLimitCase_ = 0; + maxLimit_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits result) { + result.minLimitCase_ = minLimitCase_; + result.minLimit_ = this.minLimit_; + result.maxLimitCase_ = maxLimitCase_; + result.maxLimit_ = this.maxLimit_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance()) return this; + switch (other.getMinLimitCase()) { + case MIN_NODES: + { + setMinNodes(other.getMinNodes()); + break; + } + case MIN_PROCESSING_UNITS: + { + setMinProcessingUnits(other.getMinProcessingUnits()); + break; + } + case MINLIMIT_NOT_SET: + { + break; + } + } + switch (other.getMaxLimitCase()) { + case MAX_NODES: + { + setMaxNodes(other.getMaxNodes()); + break; + } + case MAX_PROCESSING_UNITS: + { + setMaxProcessingUnits(other.getMaxProcessingUnits()); + break; + } + case MAXLIMIT_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + minLimit_ = input.readInt32(); + minLimitCase_ = 1; + break; + } // case 8 + case 16: + { + minLimit_ = input.readInt32(); + minLimitCase_ = 2; + break; + } // case 16 + case 24: + { + maxLimit_ = input.readInt32(); + maxLimitCase_ = 3; + break; + } // case 24 + case 32: + { + maxLimit_ = input.readInt32(); + maxLimitCase_ = 4; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int minLimitCase_ = 0; + private java.lang.Object minLimit_; + + public MinLimitCase getMinLimitCase() { + return MinLimitCase.forNumber(minLimitCase_); + } + + public Builder clearMinLimit() { + minLimitCase_ = 0; + minLimit_ = null; + onChanged(); + return this; + } + + private int maxLimitCase_ = 0; + private java.lang.Object maxLimit_; + + public MaxLimitCase getMaxLimitCase() { + return MaxLimitCase.forNumber(maxLimitCase_); + } + + public Builder clearMaxLimit() { + maxLimitCase_ = 0; + maxLimit_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
    +       * Minimum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to 1.
    +       * 
    + * + * int32 min_nodes = 1; + * + * @return Whether the minNodes field is set. + */ + public boolean hasMinNodes() { + return minLimitCase_ == 1; + } + + /** + * + * + *
    +       * Minimum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to 1.
    +       * 
    + * + * int32 min_nodes = 1; + * + * @return The minNodes. + */ + public int getMinNodes() { + if (minLimitCase_ == 1) { + return (java.lang.Integer) minLimit_; + } + return 0; + } + + /** + * + * + *
    +       * Minimum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to 1.
    +       * 
    + * + * int32 min_nodes = 1; + * + * @param value The minNodes to set. + * @return This builder for chaining. + */ + public Builder setMinNodes(int value) { + + minLimitCase_ = 1; + minLimit_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Minimum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to 1.
    +       * 
    + * + * int32 min_nodes = 1; + * + * @return This builder for chaining. + */ + public Builder clearMinNodes() { + if (minLimitCase_ == 1) { + minLimitCase_ = 0; + minLimit_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Minimum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000.
    +       * 
    + * + * int32 min_processing_units = 2; + * + * @return Whether the minProcessingUnits field is set. + */ + public boolean hasMinProcessingUnits() { + return minLimitCase_ == 2; + } + + /** + * + * + *
    +       * Minimum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000.
    +       * 
    + * + * int32 min_processing_units = 2; + * + * @return The minProcessingUnits. + */ + public int getMinProcessingUnits() { + if (minLimitCase_ == 2) { + return (java.lang.Integer) minLimit_; + } + return 0; + } + + /** + * + * + *
    +       * Minimum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000.
    +       * 
    + * + * int32 min_processing_units = 2; + * + * @param value The minProcessingUnits to set. + * @return This builder for chaining. + */ + public Builder setMinProcessingUnits(int value) { + + minLimitCase_ = 2; + minLimit_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Minimum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000.
    +       * 
    + * + * int32 min_processing_units = 2; + * + * @return This builder for chaining. + */ + public Builder clearMinProcessingUnits() { + if (minLimitCase_ == 2) { + minLimitCase_ = 0; + minLimit_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Maximum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to min_nodes.
    +       * 
    + * + * int32 max_nodes = 3; + * + * @return Whether the maxNodes field is set. + */ + public boolean hasMaxNodes() { + return maxLimitCase_ == 3; + } + + /** + * + * + *
    +       * Maximum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to min_nodes.
    +       * 
    + * + * int32 max_nodes = 3; + * + * @return The maxNodes. + */ + public int getMaxNodes() { + if (maxLimitCase_ == 3) { + return (java.lang.Integer) maxLimit_; + } + return 0; + } + + /** + * + * + *
    +       * Maximum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to min_nodes.
    +       * 
    + * + * int32 max_nodes = 3; + * + * @param value The maxNodes to set. + * @return This builder for chaining. + */ + public Builder setMaxNodes(int value) { + + maxLimitCase_ = 3; + maxLimit_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Maximum number of nodes allocated to the instance. If set, this number
    +       * should be greater than or equal to min_nodes.
    +       * 
    + * + * int32 max_nodes = 3; + * + * @return This builder for chaining. + */ + public Builder clearMaxNodes() { + if (maxLimitCase_ == 3) { + maxLimitCase_ = 0; + maxLimit_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Maximum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000 and be greater than or equal to
    +       * min_processing_units.
    +       * 
    + * + * int32 max_processing_units = 4; + * + * @return Whether the maxProcessingUnits field is set. + */ + public boolean hasMaxProcessingUnits() { + return maxLimitCase_ == 4; + } + + /** + * + * + *
    +       * Maximum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000 and be greater than or equal to
    +       * min_processing_units.
    +       * 
    + * + * int32 max_processing_units = 4; + * + * @return The maxProcessingUnits. + */ + public int getMaxProcessingUnits() { + if (maxLimitCase_ == 4) { + return (java.lang.Integer) maxLimit_; + } + return 0; + } + + /** + * + * + *
    +       * Maximum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000 and be greater than or equal to
    +       * min_processing_units.
    +       * 
    + * + * int32 max_processing_units = 4; + * + * @param value The maxProcessingUnits to set. + * @return This builder for chaining. + */ + public Builder setMaxProcessingUnits(int value) { + + maxLimitCase_ = 4; + maxLimit_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Maximum number of processing units allocated to the instance. If set,
    +       * this number should be multiples of 1000 and be greater than or equal to
    +       * min_processing_units.
    +       * 
    + * + * int32 max_processing_units = 4; + * + * @return This builder for chaining. + */ + public Builder clearMaxProcessingUnits() { + if (maxLimitCase_ == 4) { + maxLimitCase_ = 0; + maxLimit_ = null; + onChanged(); + } + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutoscalingLimits parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AutoscalingTargetsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Optional. The target high priority cpu utilization percentage that the
    +     * autoscaler should be trying to achieve for the instance. This number is
    +     * on a scale from 0 (no utilization) to 100 (full utilization). The valid
    +     * range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
    +     * skips scaling based on high priority CPU utilization.
    +     * 
    + * + * + * int32 high_priority_cpu_utilization_percent = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The highPriorityCpuUtilizationPercent. + */ + int getHighPriorityCpuUtilizationPercent(); + + /** + * + * + *
    +     * Optional. The target total CPU utilization percentage that the autoscaler
    +     * should be trying to achieve for the instance. This number is on a scale
    +     * from 0 (no utilization) to 100 (full utilization). The valid range is
    +     * [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
    +     * scaling based on total CPU utilization. If both
    +     * `high_priority_cpu_utilization_percent` and
    +     * `total_cpu_utilization_percent` are specified, the autoscaler provisions
    +     * the larger of the two required compute capacities to satisfy both
    +     * targets.
    +     * 
    + * + * int32 total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The totalCpuUtilizationPercent. + */ + int getTotalCpuUtilizationPercent(); + + /** + * + * + *
    +     * Required. The target storage utilization percentage that the autoscaler
    +     * should be trying to achieve for the instance. This number is on a scale
    +     * from 0 (no utilization) to 100 (full utilization). The valid range is
    +     * [10, 99] inclusive.
    +     * 
    + * + * int32 storage_utilization_percent = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The storageUtilizationPercent. + */ + int getStorageUtilizationPercent(); + } + + /** + * + * + *
    +   * The autoscaling targets for an instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets} + */ + public static final class AutoscalingTargets extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) + AutoscalingTargetsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AutoscalingTargets"); + } + + // Use AutoscalingTargets.newBuilder() to construct. + private AutoscalingTargets(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AutoscalingTargets() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder + .class); + } + + public static final int HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER = 1; + private int highPriorityCpuUtilizationPercent_ = 0; + + /** + * + * + *
    +     * Optional. The target high priority cpu utilization percentage that the
    +     * autoscaler should be trying to achieve for the instance. This number is
    +     * on a scale from 0 (no utilization) to 100 (full utilization). The valid
    +     * range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
    +     * skips scaling based on high priority CPU utilization.
    +     * 
    + * + * + * int32 high_priority_cpu_utilization_percent = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The highPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getHighPriorityCpuUtilizationPercent() { + return highPriorityCpuUtilizationPercent_; + } + + public static final int TOTAL_CPU_UTILIZATION_PERCENT_FIELD_NUMBER = 4; + private int totalCpuUtilizationPercent_ = 0; + + /** + * + * + *
    +     * Optional. The target total CPU utilization percentage that the autoscaler
    +     * should be trying to achieve for the instance. This number is on a scale
    +     * from 0 (no utilization) to 100 (full utilization). The valid range is
    +     * [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
    +     * scaling based on total CPU utilization. If both
    +     * `high_priority_cpu_utilization_percent` and
    +     * `total_cpu_utilization_percent` are specified, the autoscaler provisions
    +     * the larger of the two required compute capacities to satisfy both
    +     * targets.
    +     * 
    + * + * int32 total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The totalCpuUtilizationPercent. + */ + @java.lang.Override + public int getTotalCpuUtilizationPercent() { + return totalCpuUtilizationPercent_; + } + + public static final int STORAGE_UTILIZATION_PERCENT_FIELD_NUMBER = 2; + private int storageUtilizationPercent_ = 0; + + /** + * + * + *
    +     * Required. The target storage utilization percentage that the autoscaler
    +     * should be trying to achieve for the instance. This number is on a scale
    +     * from 0 (no utilization) to 100 (full utilization). The valid range is
    +     * [10, 99] inclusive.
    +     * 
    + * + * int32 storage_utilization_percent = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The storageUtilizationPercent. + */ + @java.lang.Override + public int getStorageUtilizationPercent() { + return storageUtilizationPercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (highPriorityCpuUtilizationPercent_ != 0) { + output.writeInt32(1, highPriorityCpuUtilizationPercent_); + } + if (storageUtilizationPercent_ != 0) { + output.writeInt32(2, storageUtilizationPercent_); + } + if (totalCpuUtilizationPercent_ != 0) { + output.writeInt32(4, totalCpuUtilizationPercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (highPriorityCpuUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 1, highPriorityCpuUtilizationPercent_); + } + if (storageUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size(2, storageUtilizationPercent_); + } + if (totalCpuUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size(4, totalCpuUtilizationPercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) obj; + + if (getHighPriorityCpuUtilizationPercent() != other.getHighPriorityCpuUtilizationPercent()) + return false; + if (getTotalCpuUtilizationPercent() != other.getTotalCpuUtilizationPercent()) return false; + if (getStorageUtilizationPercent() != other.getStorageUtilizationPercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getHighPriorityCpuUtilizationPercent(); + hash = (37 * hash) + TOTAL_CPU_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getTotalCpuUtilizationPercent(); + hash = (37 * hash) + STORAGE_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getStorageUtilizationPercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * The autoscaling targets for an instance.
    +     * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + highPriorityCpuUtilizationPercent_ = 0; + totalCpuUtilizationPercent_ = 0; + storageUtilizationPercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.highPriorityCpuUtilizationPercent_ = highPriorityCpuUtilizationPercent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.totalCpuUtilizationPercent_ = totalCpuUtilizationPercent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.storageUtilizationPercent_ = storageUtilizationPercent_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance()) return this; + if (other.getHighPriorityCpuUtilizationPercent() != 0) { + setHighPriorityCpuUtilizationPercent(other.getHighPriorityCpuUtilizationPercent()); + } + if (other.getTotalCpuUtilizationPercent() != 0) { + setTotalCpuUtilizationPercent(other.getTotalCpuUtilizationPercent()); + } + if (other.getStorageUtilizationPercent() != 0) { + setStorageUtilizationPercent(other.getStorageUtilizationPercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + highPriorityCpuUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + storageUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 16 + case 32: + { + totalCpuUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int highPriorityCpuUtilizationPercent_; + + /** + * + * + *
    +       * Optional. The target high priority cpu utilization percentage that the
    +       * autoscaler should be trying to achieve for the instance. This number is
    +       * on a scale from 0 (no utilization) to 100 (full utilization). The valid
    +       * range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
    +       * skips scaling based on high priority CPU utilization.
    +       * 
    + * + * + * int32 high_priority_cpu_utilization_percent = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The highPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getHighPriorityCpuUtilizationPercent() { + return highPriorityCpuUtilizationPercent_; + } + + /** + * + * + *
    +       * Optional. The target high priority cpu utilization percentage that the
    +       * autoscaler should be trying to achieve for the instance. This number is
    +       * on a scale from 0 (no utilization) to 100 (full utilization). The valid
    +       * range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
    +       * skips scaling based on high priority CPU utilization.
    +       * 
    + * + * + * int32 high_priority_cpu_utilization_percent = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The highPriorityCpuUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setHighPriorityCpuUtilizationPercent(int value) { + + highPriorityCpuUtilizationPercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. The target high priority cpu utilization percentage that the
    +       * autoscaler should be trying to achieve for the instance. This number is
    +       * on a scale from 0 (no utilization) to 100 (full utilization). The valid
    +       * range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
    +       * skips scaling based on high priority CPU utilization.
    +       * 
    + * + * + * int32 high_priority_cpu_utilization_percent = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearHighPriorityCpuUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000001); + highPriorityCpuUtilizationPercent_ = 0; + onChanged(); + return this; + } + + private int totalCpuUtilizationPercent_; + + /** + * + * + *
    +       * Optional. The target total CPU utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
    +       * scaling based on total CPU utilization. If both
    +       * `high_priority_cpu_utilization_percent` and
    +       * `total_cpu_utilization_percent` are specified, the autoscaler provisions
    +       * the larger of the two required compute capacities to satisfy both
    +       * targets.
    +       * 
    + * + * int32 total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The totalCpuUtilizationPercent. + */ + @java.lang.Override + public int getTotalCpuUtilizationPercent() { + return totalCpuUtilizationPercent_; + } + + /** + * + * + *
    +       * Optional. The target total CPU utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
    +       * scaling based on total CPU utilization. If both
    +       * `high_priority_cpu_utilization_percent` and
    +       * `total_cpu_utilization_percent` are specified, the autoscaler provisions
    +       * the larger of the two required compute capacities to satisfy both
    +       * targets.
    +       * 
    + * + * int32 total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The totalCpuUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setTotalCpuUtilizationPercent(int value) { + + totalCpuUtilizationPercent_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. The target total CPU utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
    +       * scaling based on total CPU utilization. If both
    +       * `high_priority_cpu_utilization_percent` and
    +       * `total_cpu_utilization_percent` are specified, the autoscaler provisions
    +       * the larger of the two required compute capacities to satisfy both
    +       * targets.
    +       * 
    + * + * int32 total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearTotalCpuUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000002); + totalCpuUtilizationPercent_ = 0; + onChanged(); + return this; + } + + private int storageUtilizationPercent_; + + /** + * + * + *
    +       * Required. The target storage utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 99] inclusive.
    +       * 
    + * + * int32 storage_utilization_percent = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The storageUtilizationPercent. + */ + @java.lang.Override + public int getStorageUtilizationPercent() { + return storageUtilizationPercent_; + } + + /** + * + * + *
    +       * Required. The target storage utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 99] inclusive.
    +       * 
    + * + * int32 storage_utilization_percent = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The storageUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setStorageUtilizationPercent(int value) { + + storageUtilizationPercent_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The target storage utilization percentage that the autoscaler
    +       * should be trying to achieve for the instance. This number is on a scale
    +       * from 0 (no utilization) to 100 (full utilization). The valid range is
    +       * [10, 99] inclusive.
    +       * 
    + * + * int32 storage_utilization_percent = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearStorageUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000004); + storageUtilizationPercent_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutoscalingTargets parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AsymmetricAutoscalingOptionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + boolean hasReplicaSelection(); + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection(); + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder getReplicaSelectionOrBuilder(); + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + boolean hasOverrides(); + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides(); + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder(); + } + + /** + * + * + *
    +   * AsymmetricAutoscalingOption specifies the scaling of replicas identified by
    +   * the given selection.
    +   * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption} + */ + public static final class AsymmetricAutoscalingOption extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + AsymmetricAutoscalingOptionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AsymmetricAutoscalingOption"); + } + + // Use AsymmetricAutoscalingOption.newBuilder() to construct. + private AsymmetricAutoscalingOption(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AsymmetricAutoscalingOption() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder.class); + } + + public interface AutoscalingConfigOverridesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + boolean hasAutoscalingLimits(); + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits(); + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder(); + + /** + * + * + *
    +       * Optional. If specified, overrides the autoscaling target
    +       * high_priority_cpu_utilization_percent in the top-level autoscaling
    +       * configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + int getAutoscalingTargetHighPriorityCpuUtilizationPercent(); + + /** + * + * + *
    +       * Optional. If specified, overrides the
    +       * autoscaling target `total_cpu_utilization_percent`
    +       * in the top-level autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetTotalCpuUtilizationPercent. + */ + int getAutoscalingTargetTotalCpuUtilizationPercent(); + + /** + * + * + *
    +       * Optional. If true, disables high priority CPU autoscaling for the
    +       * selected replicas and ignores
    +       * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +       * in the top-level autoscaling configuration.
    +       *
    +       * When setting this field to true, setting
    +       * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +       * field to a non-zero value for the same replica is not supported.
    +       *
    +       * If false, the
    +       * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +       * field in the replica will be used if set to a non-zero value.
    +       * Otherwise, the
    +       * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +       * field in the top-level autoscaling configuration will be used.
    +       *
    +       * Setting both
    +       * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +       * and
    +       * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +       * to true for the same replica is not supported.
    +       * 
    + * + * + * bool disable_high_priority_cpu_autoscaling = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableHighPriorityCpuAutoscaling. + */ + boolean getDisableHighPriorityCpuAutoscaling(); + + /** + * + * + *
    +       * Optional. If true, disables total CPU autoscaling for the selected
    +       * replicas and ignores
    +       * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +       * in the top-level autoscaling configuration.
    +       *
    +       * When setting this field to true, setting
    +       * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +       * field to a non-zero value for the same replica is not supported.
    +       *
    +       * If false, the
    +       * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +       * field in the replica will be used if set to a non-zero value.
    +       * Otherwise, the
    +       * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +       * field in the top-level autoscaling configuration will be used.
    +       *
    +       * Setting both
    +       * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +       * and
    +       * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +       * to true for the same replica is not supported.
    +       * 
    + * + * bool disable_total_cpu_autoscaling = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableTotalCpuAutoscaling. + */ + boolean getDisableTotalCpuAutoscaling(); + } + + /** + * + * + *
    +     * Overrides the top-level autoscaling configuration for the replicas
    +     * identified by `replica_selection`. All fields in this message are
    +     * optional. Any unspecified fields will use the corresponding values from
    +     * the top-level autoscaling configuration.
    +     * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides} + */ + public static final class AutoscalingConfigOverrides + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + AutoscalingConfigOverridesOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AutoscalingConfigOverrides"); + } + + // Use AutoscalingConfigOverrides.newBuilder() to construct. + private AutoscalingConfigOverrides(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AutoscalingConfigOverrides() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder.class); + } + + private int bitField0_; + public static final int AUTOSCALING_LIMITS_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + @java.lang.Override + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + public static final int + AUTOSCALING_TARGET_HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER = 2; + private int autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + + /** + * + * + *
    +       * Optional. If specified, overrides the autoscaling target
    +       * high_priority_cpu_utilization_percent in the top-level autoscaling
    +       * configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetHighPriorityCpuUtilizationPercent() { + return autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + + public static final int AUTOSCALING_TARGET_TOTAL_CPU_UTILIZATION_PERCENT_FIELD_NUMBER = 4; + private int autoscalingTargetTotalCpuUtilizationPercent_ = 0; + + /** + * + * + *
    +       * Optional. If specified, overrides the
    +       * autoscaling target `total_cpu_utilization_percent`
    +       * in the top-level autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetTotalCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetTotalCpuUtilizationPercent() { + return autoscalingTargetTotalCpuUtilizationPercent_; + } + + public static final int DISABLE_HIGH_PRIORITY_CPU_AUTOSCALING_FIELD_NUMBER = 5; + private boolean disableHighPriorityCpuAutoscaling_ = false; + + /** + * + * + *
    +       * Optional. If true, disables high priority CPU autoscaling for the
    +       * selected replicas and ignores
    +       * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +       * in the top-level autoscaling configuration.
    +       *
    +       * When setting this field to true, setting
    +       * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +       * field to a non-zero value for the same replica is not supported.
    +       *
    +       * If false, the
    +       * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +       * field in the replica will be used if set to a non-zero value.
    +       * Otherwise, the
    +       * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +       * field in the top-level autoscaling configuration will be used.
    +       *
    +       * Setting both
    +       * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +       * and
    +       * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +       * to true for the same replica is not supported.
    +       * 
    + * + * + * bool disable_high_priority_cpu_autoscaling = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableHighPriorityCpuAutoscaling. + */ + @java.lang.Override + public boolean getDisableHighPriorityCpuAutoscaling() { + return disableHighPriorityCpuAutoscaling_; + } + + public static final int DISABLE_TOTAL_CPU_AUTOSCALING_FIELD_NUMBER = 6; + private boolean disableTotalCpuAutoscaling_ = false; + + /** + * + * + *
    +       * Optional. If true, disables total CPU autoscaling for the selected
    +       * replicas and ignores
    +       * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +       * in the top-level autoscaling configuration.
    +       *
    +       * When setting this field to true, setting
    +       * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +       * field to a non-zero value for the same replica is not supported.
    +       *
    +       * If false, the
    +       * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +       * field in the replica will be used if set to a non-zero value.
    +       * Otherwise, the
    +       * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +       * field in the top-level autoscaling configuration will be used.
    +       *
    +       * Setting both
    +       * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +       * and
    +       * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +       * to true for the same replica is not supported.
    +       * 
    + * + * bool disable_total_cpu_autoscaling = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableTotalCpuAutoscaling. + */ + @java.lang.Override + public boolean getDisableTotalCpuAutoscaling() { + return disableTotalCpuAutoscaling_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAutoscalingLimits()); + } + if (autoscalingTargetHighPriorityCpuUtilizationPercent_ != 0) { + output.writeInt32(2, autoscalingTargetHighPriorityCpuUtilizationPercent_); + } + if (autoscalingTargetTotalCpuUtilizationPercent_ != 0) { + output.writeInt32(4, autoscalingTargetTotalCpuUtilizationPercent_); + } + if (disableHighPriorityCpuAutoscaling_ != false) { + output.writeBool(5, disableHighPriorityCpuAutoscaling_); + } + if (disableTotalCpuAutoscaling_ != false) { + output.writeBool(6, disableTotalCpuAutoscaling_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAutoscalingLimits()); + } + if (autoscalingTargetHighPriorityCpuUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 2, autoscalingTargetHighPriorityCpuUtilizationPercent_); + } + if (autoscalingTargetTotalCpuUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 4, autoscalingTargetTotalCpuUtilizationPercent_); + } + if (disableHighPriorityCpuAutoscaling_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 5, disableHighPriorityCpuAutoscaling_); + } + if (disableTotalCpuAutoscaling_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(6, disableTotalCpuAutoscaling_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) + obj; + + if (hasAutoscalingLimits() != other.hasAutoscalingLimits()) return false; + if (hasAutoscalingLimits()) { + if (!getAutoscalingLimits().equals(other.getAutoscalingLimits())) return false; + } + if (getAutoscalingTargetHighPriorityCpuUtilizationPercent() + != other.getAutoscalingTargetHighPriorityCpuUtilizationPercent()) return false; + if (getAutoscalingTargetTotalCpuUtilizationPercent() + != other.getAutoscalingTargetTotalCpuUtilizationPercent()) return false; + if (getDisableHighPriorityCpuAutoscaling() != other.getDisableHighPriorityCpuAutoscaling()) + return false; + if (getDisableTotalCpuAutoscaling() != other.getDisableTotalCpuAutoscaling()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAutoscalingLimits()) { + hash = (37 * hash) + AUTOSCALING_LIMITS_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingLimits().hashCode(); + } + hash = (37 * hash) + AUTOSCALING_TARGET_HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingTargetHighPriorityCpuUtilizationPercent(); + hash = (37 * hash) + AUTOSCALING_TARGET_TOTAL_CPU_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingTargetTotalCpuUtilizationPercent(); + hash = (37 * hash) + DISABLE_HIGH_PRIORITY_CPU_AUTOSCALING_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashBoolean(getDisableHighPriorityCpuAutoscaling()); + hash = (37 * hash) + DISABLE_TOTAL_CPU_AUTOSCALING_FIELD_NUMBER; + hash = + (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDisableTotalCpuAutoscaling()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * Overrides the top-level autoscaling configuration for the replicas
    +       * identified by `replica_selection`. All fields in this message are
    +       * optional. Any unspecified fields will use the corresponding values from
    +       * the top-level autoscaling configuration.
    +       * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAutoscalingLimitsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + autoscalingTargetTotalCpuUtilizationPercent_ = 0; + disableHighPriorityCpuAutoscaling_ = false; + disableTotalCpuAutoscaling_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.autoscalingLimits_ = + autoscalingLimitsBuilder_ == null + ? autoscalingLimits_ + : autoscalingLimitsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.autoscalingTargetHighPriorityCpuUtilizationPercent_ = + autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.autoscalingTargetTotalCpuUtilizationPercent_ = + autoscalingTargetTotalCpuUtilizationPercent_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.disableHighPriorityCpuAutoscaling_ = disableHighPriorityCpuAutoscaling_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.disableTotalCpuAutoscaling_ = disableTotalCpuAutoscaling_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance()) return this; + if (other.hasAutoscalingLimits()) { + mergeAutoscalingLimits(other.getAutoscalingLimits()); + } + if (other.getAutoscalingTargetHighPriorityCpuUtilizationPercent() != 0) { + setAutoscalingTargetHighPriorityCpuUtilizationPercent( + other.getAutoscalingTargetHighPriorityCpuUtilizationPercent()); + } + if (other.getAutoscalingTargetTotalCpuUtilizationPercent() != 0) { + setAutoscalingTargetTotalCpuUtilizationPercent( + other.getAutoscalingTargetTotalCpuUtilizationPercent()); + } + if (other.getDisableHighPriorityCpuAutoscaling() != false) { + setDisableHighPriorityCpuAutoscaling(other.getDisableHighPriorityCpuAutoscaling()); + } + if (other.getDisableTotalCpuAutoscaling() != false) { + setDisableTotalCpuAutoscaling(other.getDisableTotalCpuAutoscaling()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetAutoscalingLimitsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + autoscalingTargetHighPriorityCpuUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 32: + { + autoscalingTargetTotalCpuUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 32 + case 40: + { + disableHighPriorityCpuAutoscaling_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 48: + { + disableTotalCpuAutoscaling_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + autoscalingLimitsBuilder_; + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + if (autoscalingLimitsBuilder_ == null) { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } else { + return autoscalingLimitsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingLimits_ = value; + } else { + autoscalingLimitsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + builderForValue) { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimits_ = builderForValue.build(); + } else { + autoscalingLimitsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && autoscalingLimits_ != null + && autoscalingLimits_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance()) { + getAutoscalingLimitsBuilder().mergeFrom(value); + } else { + autoscalingLimits_ = value; + } + } else { + autoscalingLimitsBuilder_.mergeFrom(value); + } + if (autoscalingLimits_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoscalingLimits() { + bitField0_ = (bitField0_ & ~0x00000001); + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + getAutoscalingLimitsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetAutoscalingLimitsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + if (autoscalingLimitsBuilder_ != null) { + return autoscalingLimitsBuilder_.getMessageOrBuilder(); + } else { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + } + + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + internalGetAutoscalingLimitsFieldBuilder() { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimitsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AutoscalingLimitsOrBuilder>( + getAutoscalingLimits(), getParentForChildren(), isClean()); + autoscalingLimits_ = null; + } + return autoscalingLimitsBuilder_; + } + + private int autoscalingTargetHighPriorityCpuUtilizationPercent_; + + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetHighPriorityCpuUtilizationPercent() { + return autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The autoscalingTargetHighPriorityCpuUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setAutoscalingTargetHighPriorityCpuUtilizationPercent(int value) { + + autoscalingTargetHighPriorityCpuUtilizationPercent_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAutoscalingTargetHighPriorityCpuUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000002); + autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + onChanged(); + return this; + } + + private int autoscalingTargetTotalCpuUtilizationPercent_; + + /** + * + * + *
    +         * Optional. If specified, overrides the
    +         * autoscaling target `total_cpu_utilization_percent`
    +         * in the top-level autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetTotalCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetTotalCpuUtilizationPercent() { + return autoscalingTargetTotalCpuUtilizationPercent_; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the
    +         * autoscaling target `total_cpu_utilization_percent`
    +         * in the top-level autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The autoscalingTargetTotalCpuUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setAutoscalingTargetTotalCpuUtilizationPercent(int value) { + + autoscalingTargetTotalCpuUtilizationPercent_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If specified, overrides the
    +         * autoscaling target `total_cpu_utilization_percent`
    +         * in the top-level autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_total_cpu_utilization_percent = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAutoscalingTargetTotalCpuUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000004); + autoscalingTargetTotalCpuUtilizationPercent_ = 0; + onChanged(); + return this; + } + + private boolean disableHighPriorityCpuAutoscaling_; + + /** + * + * + *
    +         * Optional. If true, disables high priority CPU autoscaling for the
    +         * selected replicas and ignores
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * + * bool disable_high_priority_cpu_autoscaling = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableHighPriorityCpuAutoscaling. + */ + @java.lang.Override + public boolean getDisableHighPriorityCpuAutoscaling() { + return disableHighPriorityCpuAutoscaling_; + } + + /** + * + * + *
    +         * Optional. If true, disables high priority CPU autoscaling for the
    +         * selected replicas and ignores
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * + * bool disable_high_priority_cpu_autoscaling = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The disableHighPriorityCpuAutoscaling to set. + * @return This builder for chaining. + */ + public Builder setDisableHighPriorityCpuAutoscaling(boolean value) { + + disableHighPriorityCpuAutoscaling_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If true, disables high priority CPU autoscaling for the
    +         * selected replicas and ignores
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * + * bool disable_high_priority_cpu_autoscaling = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDisableHighPriorityCpuAutoscaling() { + bitField0_ = (bitField0_ & ~0x00000008); + disableHighPriorityCpuAutoscaling_ = false; + onChanged(); + return this; + } + + private boolean disableTotalCpuAutoscaling_; + + /** + * + * + *
    +         * Optional. If true, disables total CPU autoscaling for the selected
    +         * replicas and ignores
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * bool disable_total_cpu_autoscaling = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The disableTotalCpuAutoscaling. + */ + @java.lang.Override + public boolean getDisableTotalCpuAutoscaling() { + return disableTotalCpuAutoscaling_; + } + + /** + * + * + *
    +         * Optional. If true, disables total CPU autoscaling for the selected
    +         * replicas and ignores
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * bool disable_total_cpu_autoscaling = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The disableTotalCpuAutoscaling to set. + * @return This builder for chaining. + */ + public Builder setDisableTotalCpuAutoscaling(boolean value) { + + disableTotalCpuAutoscaling_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Optional. If true, disables total CPU autoscaling for the selected
    +         * replicas and ignores
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * in the top-level autoscaling configuration.
    +         *
    +         * When setting this field to true, setting
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field to a non-zero value for the same replica is not supported.
    +         *
    +         * If false, the
    +         * [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
    +         * field in the replica will be used if set to a non-zero value.
    +         * Otherwise, the
    +         * [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
    +         * field in the top-level autoscaling configuration will be used.
    +         *
    +         * Setting both
    +         * [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
    +         * and
    +         * [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
    +         * to true for the same replica is not supported.
    +         * 
    + * + * bool disable_total_cpu_autoscaling = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDisableTotalCpuAutoscaling() { + bitField0_ = (bitField0_ & ~0x00000010); + disableTotalCpuAutoscaling_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutoscalingConfigOverrides parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int REPLICA_SELECTION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + @java.lang.Override + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + public static final int OVERRIDES_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + overrides_; + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + @java.lang.Override + public boolean hasOverrides() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides() { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder() { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReplicaSelection()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getOverrides()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReplicaSelection()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOverrides()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) obj; + + if (hasReplicaSelection() != other.hasReplicaSelection()) return false; + if (hasReplicaSelection()) { + if (!getReplicaSelection().equals(other.getReplicaSelection())) return false; + } + if (hasOverrides() != other.hasOverrides()) return false; + if (hasOverrides()) { + if (!getOverrides().equals(other.getOverrides())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReplicaSelection()) { + hash = (37 * hash) + REPLICA_SELECTION_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelection().hashCode(); + } + if (hasOverrides()) { + hash = (37 * hash) + OVERRIDES_FIELD_NUMBER; + hash = (53 * hash) + getOverrides().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * AsymmetricAutoscalingOption specifies the scaling of replicas identified by
    +     * the given selection.
    +     * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReplicaSelectionFieldBuilder(); + internalGetOverridesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + overrides_ = null; + if (overridesBuilder_ != null) { + overridesBuilder_.dispose(); + overridesBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption( + this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.replicaSelection_ = + replicaSelectionBuilder_ == null + ? replicaSelection_ + : replicaSelectionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.overrides_ = overridesBuilder_ == null ? overrides_ : overridesBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()) return this; + if (other.hasReplicaSelection()) { + mergeReplicaSelection(other.getReplicaSelection()); + } + if (other.hasOverrides()) { + mergeOverrides(other.getOverrides()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetReplicaSelectionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetOverridesFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + replicaSelectionBuilder_; + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + if (replicaSelectionBuilder_ == null) { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } else { + return replicaSelectionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicaSelection_ = value; + } else { + replicaSelectionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionBuilder_ == null) { + replicaSelection_ = builderForValue.build(); + } else { + replicaSelectionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && replicaSelection_ != null + && replicaSelection_ + != com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) { + getReplicaSelectionBuilder().mergeFrom(value); + } else { + replicaSelection_ = value; + } + } else { + replicaSelectionBuilder_.mergeFrom(value); + } + if (replicaSelection_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReplicaSelection() { + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection.Builder + getReplicaSelectionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetReplicaSelectionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + if (replicaSelectionBuilder_ != null) { + return replicaSelectionBuilder_.getMessageOrBuilder(); + } else { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + } + + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + internalGetReplicaSelectionFieldBuilder() { + if (replicaSelectionBuilder_ == null) { + replicaSelectionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder>( + getReplicaSelection(), getParentForChildren(), isClean()); + replicaSelection_ = null; + } + return replicaSelectionBuilder_; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + overrides_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder> + overridesBuilder_; + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + public boolean hasOverrides() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides() { + if (overridesBuilder_ == null) { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } else { + return overridesBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + value) { + if (overridesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + overrides_ = value; + } else { + overridesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder + builderForValue) { + if (overridesBuilder_ == null) { + overrides_ = builderForValue.build(); + } else { + overridesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + value) { + if (overridesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && overrides_ != null + && overrides_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + .getDefaultInstance()) { + getOverridesBuilder().mergeFrom(value); + } else { + overrides_ = value; + } + } else { + overridesBuilder_.mergeFrom(value); + } + if (overrides_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearOverrides() { + bitField0_ = (bitField0_ & ~0x00000002); + overrides_ = null; + if (overridesBuilder_ != null) { + overridesBuilder_.dispose(); + overridesBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder + getOverridesBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetOverridesFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder() { + if (overridesBuilder_ != null) { + return overridesBuilder_.getMessageOrBuilder(); + } else { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + } + + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder> + internalGetOverridesFieldBuilder() { + if (overridesBuilder_ == null) { + overridesBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder>( + getOverrides(), getParentForChildren(), isClean()); + overrides_ = null; + } + return overridesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AsymmetricAutoscalingOption parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int AUTOSCALING_LIMITS_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + @java.lang.Override + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingLimits. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + public static final int AUTOSCALING_TARGETS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + autoscalingTargets_; + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingTargets field is set. + */ + @java.lang.Override + public boolean hasAutoscalingTargets() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingTargets. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getAutoscalingTargets() { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder + getAutoscalingTargetsOrBuilder() { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } + + public static final int ASYMMETRIC_AUTOSCALING_OPTIONS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + asymmetricAutoscalingOptions_; + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + getAsymmetricAutoscalingOptionsList() { + return asymmetricAutoscalingOptions_; + } + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList() { + return asymmetricAutoscalingOptions_; + } + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getAsymmetricAutoscalingOptionsCount() { + return asymmetricAutoscalingOptions_.size(); + } + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index) { + return asymmetricAutoscalingOptions_.get(index); + } + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index) { + return asymmetricAutoscalingOptions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAutoscalingLimits()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getAutoscalingTargets()); + } + for (int i = 0; i < asymmetricAutoscalingOptions_.size(); i++) { + output.writeMessage(3, asymmetricAutoscalingOptions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAutoscalingLimits()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getAutoscalingTargets()); + } + for (int i = 0; i < asymmetricAutoscalingOptions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, asymmetricAutoscalingOptions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig) obj; + + if (hasAutoscalingLimits() != other.hasAutoscalingLimits()) return false; + if (hasAutoscalingLimits()) { + if (!getAutoscalingLimits().equals(other.getAutoscalingLimits())) return false; + } + if (hasAutoscalingTargets() != other.hasAutoscalingTargets()) return false; + if (hasAutoscalingTargets()) { + if (!getAutoscalingTargets().equals(other.getAutoscalingTargets())) return false; + } + if (!getAsymmetricAutoscalingOptionsList().equals(other.getAsymmetricAutoscalingOptionsList())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAutoscalingLimits()) { + hash = (37 * hash) + AUTOSCALING_LIMITS_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingLimits().hashCode(); + } + if (hasAutoscalingTargets()) { + hash = (37 * hash) + AUTOSCALING_TARGETS_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingTargets().hashCode(); + } + if (getAsymmetricAutoscalingOptionsCount() > 0) { + hash = (37 * hash) + ASYMMETRIC_AUTOSCALING_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getAsymmetricAutoscalingOptionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Autoscaling configuration for an instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig) + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.AutoscalingConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAutoscalingLimitsFieldBuilder(); + internalGetAutoscalingTargetsFieldBuilder(); + internalGetAsymmetricAutoscalingOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + autoscalingTargets_ = null; + if (autoscalingTargetsBuilder_ != null) { + autoscalingTargetsBuilder_.dispose(); + autoscalingTargetsBuilder_ = null; + } + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + } else { + asymmetricAutoscalingOptions_ = null; + asymmetricAutoscalingOptionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.AutoscalingConfig result) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + asymmetricAutoscalingOptions_ = + java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.asymmetricAutoscalingOptions_ = asymmetricAutoscalingOptions_; + } else { + result.asymmetricAutoscalingOptions_ = asymmetricAutoscalingOptionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.AutoscalingConfig result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.autoscalingLimits_ = + autoscalingLimitsBuilder_ == null + ? autoscalingLimits_ + : autoscalingLimitsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.autoscalingTargets_ = + autoscalingTargetsBuilder_ == null + ? autoscalingTargets_ + : autoscalingTargetsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig) { + return mergeFrom((com.google.spanner.admin.instance.v1.AutoscalingConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.AutoscalingConfig other) { + if (other == com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) + return this; + if (other.hasAutoscalingLimits()) { + mergeAutoscalingLimits(other.getAutoscalingLimits()); + } + if (other.hasAutoscalingTargets()) { + mergeAutoscalingTargets(other.getAutoscalingTargets()); + } + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (!other.asymmetricAutoscalingOptions_.isEmpty()) { + if (asymmetricAutoscalingOptions_.isEmpty()) { + asymmetricAutoscalingOptions_ = other.asymmetricAutoscalingOptions_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.addAll(other.asymmetricAutoscalingOptions_); + } + onChanged(); + } + } else { + if (!other.asymmetricAutoscalingOptions_.isEmpty()) { + if (asymmetricAutoscalingOptionsBuilder_.isEmpty()) { + asymmetricAutoscalingOptionsBuilder_.dispose(); + asymmetricAutoscalingOptionsBuilder_ = null; + asymmetricAutoscalingOptions_ = other.asymmetricAutoscalingOptions_; + bitField0_ = (bitField0_ & ~0x00000004); + asymmetricAutoscalingOptionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetAsymmetricAutoscalingOptionsFieldBuilder() + : null; + } else { + asymmetricAutoscalingOptionsBuilder_.addAllMessages( + other.asymmetricAutoscalingOptions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetAutoscalingLimitsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetAutoscalingTargetsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + m = + input.readMessage( + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.parser(), + extensionRegistry); + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(m); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + autoscalingLimitsBuilder_; + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingLimits. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + if (autoscalingLimitsBuilder_ == null) { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } else { + return autoscalingLimitsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingLimits_ = value; + } else { + autoscalingLimitsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + builderForValue) { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimits_ = builderForValue.build(); + } else { + autoscalingLimitsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && autoscalingLimits_ != null + && autoscalingLimits_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance()) { + getAutoscalingLimitsBuilder().mergeFrom(value); + } else { + autoscalingLimits_ = value; + } + } else { + autoscalingLimitsBuilder_.mergeFrom(value); + } + if (autoscalingLimits_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearAutoscalingLimits() { + bitField0_ = (bitField0_ & ~0x00000001); + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + getAutoscalingLimitsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetAutoscalingLimitsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + if (autoscalingLimitsBuilder_ != null) { + return autoscalingLimitsBuilder_.getMessageOrBuilder(); + } else { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + } + + /** + * + * + *
    +     * Required. Autoscaling limits for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + internalGetAutoscalingLimitsFieldBuilder() { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimitsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder>( + getAutoscalingLimits(), getParentForChildren(), isClean()); + autoscalingLimits_ = null; + } + return autoscalingLimitsBuilder_; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + autoscalingTargets_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder> + autoscalingTargetsBuilder_; + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingTargets field is set. + */ + public boolean hasAutoscalingTargets() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingTargets. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getAutoscalingTargets() { + if (autoscalingTargetsBuilder_ == null) { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } else { + return autoscalingTargetsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAutoscalingTargets( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets value) { + if (autoscalingTargetsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingTargets_ = value; + } else { + autoscalingTargetsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setAutoscalingTargets( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder + builderForValue) { + if (autoscalingTargetsBuilder_ == null) { + autoscalingTargets_ = builderForValue.build(); + } else { + autoscalingTargetsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeAutoscalingTargets( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets value) { + if (autoscalingTargetsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && autoscalingTargets_ != null + && autoscalingTargets_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance()) { + getAutoscalingTargetsBuilder().mergeFrom(value); + } else { + autoscalingTargets_ = value; + } + } else { + autoscalingTargetsBuilder_.mergeFrom(value); + } + if (autoscalingTargets_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearAutoscalingTargets() { + bitField0_ = (bitField0_ & ~0x00000002); + autoscalingTargets_ = null; + if (autoscalingTargetsBuilder_ != null) { + autoscalingTargetsBuilder_.dispose(); + autoscalingTargetsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder + getAutoscalingTargetsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetAutoscalingTargetsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder + getAutoscalingTargetsOrBuilder() { + if (autoscalingTargetsBuilder_ != null) { + return autoscalingTargetsBuilder_.getMessageOrBuilder(); + } else { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } + } + + /** + * + * + *
    +     * Required. The autoscaling targets for an instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder> + internalGetAutoscalingTargetsFieldBuilder() { + if (autoscalingTargetsBuilder_ == null) { + autoscalingTargetsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder>( + getAutoscalingTargets(), getParentForChildren(), isClean()); + autoscalingTargets_ = null; + } + return autoscalingTargetsBuilder_; + } + + private java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + + private void ensureAsymmetricAutoscalingOptionsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + asymmetricAutoscalingOptions_ = + new java.util.ArrayList< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption>( + asymmetricAutoscalingOptions_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + asymmetricAutoscalingOptionsBuilder_; + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + getAsymmetricAutoscalingOptionsList() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getAsymmetricAutoscalingOptionsCount() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.size(); + } else { + return asymmetricAutoscalingOptionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.get(index); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.set(index, value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.set(index, builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(index, value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(index, builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllAsymmetricAutoscalingOptions( + java.lang.Iterable< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption> + values) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, asymmetricAutoscalingOptions_); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAsymmetricAutoscalingOptions() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeAsymmetricAutoscalingOptions(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.remove(index); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + getAsymmetricAutoscalingOptionsBuilder(int index) { + return internalGetAsymmetricAutoscalingOptionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.get(index); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList() { + if (asymmetricAutoscalingOptionsBuilder_ != null) { + return asymmetricAutoscalingOptionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + } + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + addAsymmetricAutoscalingOptionsBuilder() { + return internalGetAsymmetricAutoscalingOptionsFieldBuilder() + .addBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()); + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + addAsymmetricAutoscalingOptionsBuilder(int index) { + return internalGetAsymmetricAutoscalingOptionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()); + } + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder> + getAsymmetricAutoscalingOptionsBuilderList() { + return internalGetAsymmetricAutoscalingOptionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + internalGetAsymmetricAutoscalingOptionsFieldBuilder() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder>( + asymmetricAutoscalingOptions_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + asymmetricAutoscalingOptions_ = null; + } + return asymmetricAutoscalingOptionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.AutoscalingConfig(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutoscalingConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java new file mode 100644 index 000000000000..64c4f60dc523 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java @@ -0,0 +1,228 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface AutoscalingConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + boolean hasAutoscalingLimits(); + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingLimits. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits getAutoscalingLimits(); + + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder(); + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingTargets field is set. + */ + boolean hasAutoscalingTargets(); + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingTargets. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets getAutoscalingTargets(); + + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder + getAutoscalingTargetsOrBuilder(); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getAsymmetricAutoscalingOptionsList(); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getAsymmetricAutoscalingOptionsCount(); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList(); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java new file mode 100644 index 000000000000..69f300942ee1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java @@ -0,0 +1,116 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public final class CommonProto extends com.google.protobuf.GeneratedFile { + private CommonProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommonProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_OperationProgress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n-google/spanner/admin/instance/v1/commo" + + "n.proto\022 google.spanner.admin.instance.v" + + "1\032\037google/api/field_behavior.proto\032\031goog" + + "le/api/resource.proto\032\037google/protobuf/t" + + "imestamp.proto\"\213\001\n\021OperationProgress\022\030\n\020" + + "progress_percent\030\001 \001(\005\022.\n\nstart_time\030\002 \001" + + "(\0132\032.google.protobuf.Timestamp\022,\n\010end_ti" + + "me\030\003 \001(\0132\032.google.protobuf.Timestamp\")\n\020" + + "ReplicaSelection\022\025\n\010location\030\001 \001(\tB\003\340A\002*" + + "w\n\021FulfillmentPeriod\022\"\n\036FULFILLMENT_PERI" + + "OD_UNSPECIFIED\020\000\022\035\n\031FULFILLMENT_PERIOD_N" + + "ORMAL\020\001\022\037\n\033FULFILLMENT_PERIOD_EXTENDED\020\002" + + "B\375\001\n$com.google.spanner.admin.instance.v" + + "1B\013CommonProtoP\001ZFcloud.google.com/go/sp" + + "anner/admin/instance/apiv1/instancepb;in" + + "stancepb\252\002&Google.Cloud.Spanner.Admin.In" + + "stance.V1\312\002&Google\\Cloud\\Spanner\\Admin\\I" + + "nstance\\V1\352\002+Google::Cloud::Spanner::Adm" + + "in::Instance::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_instance_v1_OperationProgress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor, + new java.lang.String[] { + "ProgressPercent", "StartTime", "EndTime", + }); + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor, + new java.lang.String[] { + "Location", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java new file mode 100644 index 000000000000..c5be9e5a4120 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java @@ -0,0 +1,1298 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceConfigMetadata} + */ +@com.google.protobuf.Generated +public final class CreateInstanceConfigMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + CreateInstanceConfigMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstanceConfigMetadata"); + } + + // Use CreateInstanceConfigMetadata.newBuilder() to construct. + private CreateInstanceConfigMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstanceConfigMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_CONFIG_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + @java.lang.Override + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata other = + (com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) obj; + + if (hasInstanceConfig() != other.hasInstanceConfig()) return false; + if (hasInstanceConfig()) { + if (!getInstanceConfig().equals(other.getInstanceConfig())) return false; + } + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstanceConfig()) { + hash = (37 * hash) + INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfig().hashCode(); + } + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceConfigMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceConfigFieldBuilder(); + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata build() { + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata result = + new com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceConfig_ = + instanceConfigBuilder_ == null ? instanceConfig_ : instanceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata other) { + if (other + == com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.getDefaultInstance()) + return this; + if (other.hasInstanceConfig()) { + mergeInstanceConfig(other.getInstanceConfig()); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigBuilder_; + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + if (instanceConfigBuilder_ == null) { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } else { + return instanceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfig_ = value; + } else { + instanceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder setInstanceConfig( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigBuilder_ == null) { + instanceConfig_ = builderForValue.build(); + } else { + instanceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instanceConfig_ != null + && instanceConfig_ + != com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) { + getInstanceConfigBuilder().mergeFrom(value); + } else { + instanceConfig_ = value; + } + } else { + instanceConfigBuilder_.mergeFrom(value); + } + if (instanceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder clearInstanceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getInstanceConfigOrBuilder() { + if (instanceConfigBuilder_ != null) { + return instanceConfigBuilder_.getMessageOrBuilder(); + } else { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + } + + /** + * + * + *
    +     * The target instance configuration end state.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigFieldBuilder() { + if (instanceConfigBuilder_ == null) { + instanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + getInstanceConfig(), getParentForChildren(), isClean()); + instanceConfig_ = null; + } + return instanceConfigBuilder_; + } + + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.instance.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + private static final com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstanceConfigMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java new file mode 100644 index 000000000000..fb321d38dceb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java @@ -0,0 +1,145 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstanceConfigMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + boolean hasInstanceConfig(); + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig(); + + /** + * + * + *
    +   * The target instance configuration end state.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.instance.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java new file mode 100644 index 000000000000..c72ee75224b4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java @@ -0,0 +1,1306 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceConfigRequest} + */ +@com.google.protobuf.Generated +public final class CreateInstanceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstanceConfigRequest) + CreateInstanceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstanceConfigRequest"); + } + + // Use CreateInstanceConfigRequest.newBuilder() to construct. + private CreateInstanceConfigRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstanceConfigRequest() { + parent_ = ""; + instanceConfigId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_CONFIG_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length. The `custom-` prefix is required to avoid name
    +   * conflicts with Google-managed configurations.
    +   * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceConfigId. + */ + @java.lang.Override + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length. The `custom-` prefix is required to avoid name
    +   * conflicts with Google-managed configurations.
    +   * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_CONFIG_FIELD_NUMBER = 3; + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + @java.lang.Override + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + public static final int VALIDATE_ONLY_FIELD_NUMBER = 4; + private boolean validateOnly_ = false; + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 4; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceConfigId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getInstanceConfig()); + } + if (validateOnly_ != false) { + output.writeBool(4, validateOnly_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceConfigId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInstanceConfig()); + } + if (validateOnly_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, validateOnly_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest other = + (com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getInstanceConfigId().equals(other.getInstanceConfigId())) return false; + if (hasInstanceConfig() != other.hasInstanceConfig()) return false; + if (hasInstanceConfig()) { + if (!getInstanceConfig().equals(other.getInstanceConfig())) return false; + } + if (getValidateOnly() != other.getValidateOnly()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + INSTANCE_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfigId().hashCode(); + if (hasInstanceConfig()) { + hash = (37 * hash) + INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfig().hashCode(); + } + hash = (37 * hash) + VALIDATE_ONLY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getValidateOnly()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstanceConfigRequest) + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + instanceConfigId_ = ""; + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + validateOnly_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest build() { + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest result = + new com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceConfigId_ = instanceConfigId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instanceConfig_ = + instanceConfigBuilder_ == null ? instanceConfig_ : instanceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.validateOnly_ = validateOnly_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest other) { + if (other + == com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceConfigId().isEmpty()) { + instanceConfigId_ = other.instanceConfigId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasInstanceConfig()) { + mergeInstanceConfig(other.getInstanceConfig()); + } + if (other.getValidateOnly() != false) { + setValidateOnly(other.getValidateOnly()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInstanceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + validateOnly_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length. The `custom-` prefix is required to avoid name
    +     * conflicts with Google-managed configurations.
    +     * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceConfigId. + */ + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length. The `custom-` prefix is required to avoid name
    +     * conflicts with Google-managed configurations.
    +     * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceConfigId. + */ + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length. The `custom-` prefix is required to avoid name
    +     * conflicts with Google-managed configurations.
    +     * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfigId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length. The `custom-` prefix is required to avoid name
    +     * conflicts with Google-managed configurations.
    +     * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearInstanceConfigId() { + instanceConfigId_ = getDefaultInstance().getInstanceConfigId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length. The `custom-` prefix is required to avoid name
    +     * conflicts with Google-managed configurations.
    +     * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceConfigId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigBuilder_; + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + if (instanceConfigBuilder_ == null) { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } else { + return instanceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfig_ = value; + } else { + instanceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstanceConfig( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigBuilder_ == null) { + instanceConfig_ = builderForValue.build(); + } else { + instanceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && instanceConfig_ != null + && instanceConfig_ + != com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) { + getInstanceConfigBuilder().mergeFrom(value); + } else { + instanceConfig_ = value; + } + } else { + instanceConfigBuilder_.mergeFrom(value); + } + if (instanceConfig_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstanceConfig() { + bitField0_ = (bitField0_ & ~0x00000004); + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getInstanceConfigOrBuilder() { + if (instanceConfigBuilder_ != null) { + return instanceConfigBuilder_.getMessageOrBuilder(); + } else { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + } + + /** + * + * + *
    +     * Required. The `InstanceConfig` proto of the configuration to create.
    +     * `instance_config.name` must be
    +     * `<parent>/instanceConfigs/<instance_config_id>`.
    +     * `instance_config.base_config` must be a Google-managed configuration name,
    +     * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigFieldBuilder() { + if (instanceConfigBuilder_ == null) { + instanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + getInstanceConfig(), getParentForChildren(), isClean()); + instanceConfig_ = null; + } + return instanceConfigBuilder_; + } + + private boolean validateOnly_; + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 4; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 4; + * + * @param value The validateOnly to set. + * @return This builder for chaining. + */ + public Builder setValidateOnly(boolean value) { + + validateOnly_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 4; + * + * @return This builder for chaining. + */ + public Builder clearValidateOnly() { + bitField0_ = (bitField0_ & ~0x00000008); + validateOnly_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstanceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceConfigRequest) + private static final com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstanceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..d1d76367eb61 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java @@ -0,0 +1,161 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstanceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstanceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length. The `custom-` prefix is required to avoid name
    +   * conflicts with Google-managed configurations.
    +   * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceConfigId. + */ + java.lang.String getInstanceConfigId(); + + /** + * + * + *
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length. The `custom-` prefix is required to avoid name
    +   * conflicts with Google-managed configurations.
    +   * 
    + * + * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceConfigId. + */ + com.google.protobuf.ByteString getInstanceConfigIdBytes(); + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + boolean hasInstanceConfig(); + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig(); + + /** + * + * + *
    +   * Required. The `InstanceConfig` proto of the configuration to create.
    +   * `instance_config.name` must be
    +   * `<parent>/instanceConfigs/<instance_config_id>`.
    +   * `instance_config.base_config` must be a Google-managed configuration name,
    +   * e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 4; + * + * @return The validateOnly. + */ + boolean getValidateOnly(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java new file mode 100644 index 000000000000..e19a002881ab --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java @@ -0,0 +1,1746 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceMetadata} + */ +@com.google.protobuf.Generated +public final class CreateInstanceMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstanceMetadata) + CreateInstanceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstanceMetadata"); + } + + // Use CreateInstanceMetadata.newBuilder() to construct. + private CreateInstanceMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstanceMetadata() { + expectedFulfillmentPeriod_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstanceMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.Instance instance_; + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + @java.lang.Override + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstance() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + public static final int EXPECTED_FULFILLMENT_PERIOD_FIELD_NUMBER = 5; + private int expectedFulfillmentPeriod_ = 0; + + /** + * + * + *
    +   * The expected fulfillment period of this create operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + @java.lang.Override + public int getExpectedFulfillmentPeriodValue() { + return expectedFulfillmentPeriod_; + } + + /** + * + * + *
    +   * The expected fulfillment period of this create operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod() { + com.google.spanner.admin.instance.v1.FulfillmentPeriod result = + com.google.spanner.admin.instance.v1.FulfillmentPeriod.forNumber( + expectedFulfillmentPeriod_); + return result == null + ? com.google.spanner.admin.instance.v1.FulfillmentPeriod.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getEndTime()); + } + if (expectedFulfillmentPeriod_ + != com.google.spanner.admin.instance.v1.FulfillmentPeriod.FULFILLMENT_PERIOD_UNSPECIFIED + .getNumber()) { + output.writeEnum(5, expectedFulfillmentPeriod_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEndTime()); + } + if (expectedFulfillmentPeriod_ + != com.google.spanner.admin.instance.v1.FulfillmentPeriod.FULFILLMENT_PERIOD_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(5, expectedFulfillmentPeriod_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstanceMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstanceMetadata other = + (com.google.spanner.admin.instance.v1.CreateInstanceMetadata) obj; + + if (hasInstance() != other.hasInstance()) return false; + if (hasInstance()) { + if (!getInstance().equals(other.getInstance())) return false; + } + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (expectedFulfillmentPeriod_ != other.expectedFulfillmentPeriod_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (37 * hash) + EXPECTED_FULFILLMENT_PERIOD_FIELD_NUMBER; + hash = (53 * hash) + expectedFulfillmentPeriod_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstanceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstanceMetadata) + com.google.spanner.admin.instance.v1.CreateInstanceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstanceMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.CreateInstanceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceFieldBuilder(); + internalGetStartTimeFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + expectedFulfillmentPeriod_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstanceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceMetadata build() { + com.google.spanner.admin.instance.v1.CreateInstanceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceMetadata buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstanceMetadata result = + new com.google.spanner.admin.instance.v1.CreateInstanceMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.CreateInstanceMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instance_ = instanceBuilder_ == null ? instance_ : instanceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.expectedFulfillmentPeriod_ = expectedFulfillmentPeriod_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstanceMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.CreateInstanceMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.CreateInstanceMetadata other) { + if (other == com.google.spanner.admin.instance.v1.CreateInstanceMetadata.getDefaultInstance()) + return this; + if (other.hasInstance()) { + mergeInstance(other.getInstance()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + if (other.expectedFulfillmentPeriod_ != 0) { + setExpectedFulfillmentPeriodValue(other.getExpectedFulfillmentPeriodValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + expectedFulfillmentPeriod_ = input.readEnum(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.Instance instance_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instanceBuilder_; + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + public com.google.spanner.admin.instance.v1.Instance getInstance() { + if (instanceBuilder_ == null) { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } else { + return instanceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder setInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instance_ = value; + } else { + instanceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder setInstance( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instanceBuilder_ == null) { + instance_ = builderForValue.build(); + } else { + instanceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder mergeInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instance_ != null + && instance_ != com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) { + getInstanceBuilder().mergeFrom(value); + } else { + instance_ = value; + } + } else { + instanceBuilder_.mergeFrom(value); + } + if (instance_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000001); + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstanceBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + if (instanceBuilder_ != null) { + return instanceBuilder_.getMessageOrBuilder(); + } else { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + } + + /** + * + * + *
    +     * The instance being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstanceFieldBuilder() { + if (instanceBuilder_ == null) { + instanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + getInstance(), getParentForChildren(), isClean()); + instance_ = null; + } + return instanceBuilder_; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000008); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + private int expectedFulfillmentPeriod_ = 0; + + /** + * + * + *
    +     * The expected fulfillment period of this create operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + @java.lang.Override + public int getExpectedFulfillmentPeriodValue() { + return expectedFulfillmentPeriod_; + } + + /** + * + * + *
    +     * The expected fulfillment period of this create operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @param value The enum numeric value on the wire for expectedFulfillmentPeriod to set. + * @return This builder for chaining. + */ + public Builder setExpectedFulfillmentPeriodValue(int value) { + expectedFulfillmentPeriod_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The expected fulfillment period of this create operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod() { + com.google.spanner.admin.instance.v1.FulfillmentPeriod result = + com.google.spanner.admin.instance.v1.FulfillmentPeriod.forNumber( + expectedFulfillmentPeriod_); + return result == null + ? com.google.spanner.admin.instance.v1.FulfillmentPeriod.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The expected fulfillment period of this create operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @param value The expectedFulfillmentPeriod to set. + * @return This builder for chaining. + */ + public Builder setExpectedFulfillmentPeriod( + com.google.spanner.admin.instance.v1.FulfillmentPeriod value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + expectedFulfillmentPeriod_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The expected fulfillment period of this create operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return This builder for chaining. + */ + public Builder clearExpectedFulfillmentPeriod() { + bitField0_ = (bitField0_ & ~0x00000010); + expectedFulfillmentPeriod_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstanceMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceMetadata) + private static final com.google.spanner.admin.instance.v1.CreateInstanceMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstanceMetadata(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstanceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java new file mode 100644 index 000000000000..75f88e14f58e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java @@ -0,0 +1,216 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstanceMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstanceMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + boolean hasInstance(); + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + com.google.spanner.admin.instance.v1.Instance getInstance(); + + /** + * + * + *
    +   * The instance being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); + + /** + * + * + *
    +   * The expected fulfillment period of this create operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + int getExpectedFulfillmentPeriodValue(); + + /** + * + * + *
    +   * The expected fulfillment period of this create operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java new file mode 100644 index 000000000000..bca09ef89394 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java @@ -0,0 +1,1592 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstancePartitionMetadata} + */ +@com.google.protobuf.Generated +public final class CreateInstancePartitionMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + CreateInstancePartitionMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstancePartitionMetadata"); + } + + // Use CreateInstancePartitionMetadata.newBuilder() to construct. + private CreateInstancePartitionMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstancePartitionMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_PARTITION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + @java.lang.Override + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getEndTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEndTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata other = + (com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) obj; + + if (hasInstancePartition() != other.hasInstancePartition()) return false; + if (hasInstancePartition()) { + if (!getInstancePartition().equals(other.getInstancePartition())) return false; + } + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstancePartition()) { + hash = (37 * hash) + INSTANCE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartition().hashCode(); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstancePartitionMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.class, + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionFieldBuilder(); + internalGetStartTimeFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata build() { + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata result = + new com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instancePartition_ = + instancePartitionBuilder_ == null + ? instancePartition_ + : instancePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata other) { + if (other + == com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + .getDefaultInstance()) return this; + if (other.hasInstancePartition()) { + mergeInstancePartition(other.getInstancePartition()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstancePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + instancePartitionBuilder_; + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + if (instancePartitionBuilder_ == null) { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } else { + return instancePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartition_ = value; + } else { + instancePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionBuilder_ == null) { + instancePartition_ = builderForValue.build(); + } else { + instancePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder mergeInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instancePartition_ != null + && instancePartition_ + != com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()) { + getInstancePartitionBuilder().mergeFrom(value); + } else { + instancePartition_ = value; + } + } else { + instancePartitionBuilder_.mergeFrom(value); + } + if (instancePartition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder clearInstancePartition() { + bitField0_ = (bitField0_ & ~0x00000001); + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + getInstancePartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstancePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + if (instancePartitionBuilder_ != null) { + return instancePartitionBuilder_.getMessageOrBuilder(); + } else { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + } + + /** + * + * + *
    +     * The instance partition being created.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + internalGetInstancePartitionFieldBuilder() { + if (instancePartitionBuilder_ == null) { + instancePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder>( + getInstancePartition(), getParentForChildren(), isClean()); + instancePartition_ = null; + } + return instancePartitionBuilder_; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * The time at which the
    +     * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000008); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + private static final com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstancePartitionMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java new file mode 100644 index 000000000000..b609606ba479 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java @@ -0,0 +1,188 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstancePartitionMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + boolean hasInstancePartition(); + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition(); + + /** + * + * + *
    +   * The instance partition being created.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder getInstancePartitionOrBuilder(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * The time at which the
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java new file mode 100644 index 000000000000..a08a1f2c9b38 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java @@ -0,0 +1,1191 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstancePartitionRequest} + */ +@com.google.protobuf.Generated +public final class CreateInstancePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstancePartitionRequest) + CreateInstancePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstancePartitionRequest"); + } + + // Use CreateInstancePartitionRequest.newBuilder() to construct. + private CreateInstancePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstancePartitionRequest() { + parent_ = ""; + instancePartitionId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance in which to create the instance
    +   * partition. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance in which to create the instance
    +   * partition. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_PARTITION_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instancePartitionId_ = ""; + + /** + * + * + *
    +   * Required. The ID of the instance partition to create. Valid identifiers are
    +   * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length.
    +   * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instancePartitionId. + */ + @java.lang.Override + public java.lang.String getInstancePartitionId() { + java.lang.Object ref = instancePartitionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instancePartitionId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The ID of the instance partition to create. Valid identifiers are
    +   * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length.
    +   * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instancePartitionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstancePartitionIdBytes() { + java.lang.Object ref = instancePartitionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instancePartitionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_PARTITION_FIELD_NUMBER = 3; + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + @java.lang.Override + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instancePartitionId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instancePartitionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getInstancePartition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instancePartitionId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instancePartitionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInstancePartition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest other = + (com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getInstancePartitionId().equals(other.getInstancePartitionId())) return false; + if (hasInstancePartition() != other.hasInstancePartition()) return false; + if (hasInstancePartition()) { + if (!getInstancePartition().equals(other.getInstancePartition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + INSTANCE_PARTITION_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartitionId().hashCode(); + if (hasInstancePartition()) { + hash = (37 * hash) + INSTANCE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstancePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstancePartitionRequest) + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + instancePartitionId_ = ""; + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest build() { + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest result = + new com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instancePartitionId_ = instancePartitionId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instancePartition_ = + instancePartitionBuilder_ == null + ? instancePartition_ + : instancePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest other) { + if (other + == com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstancePartitionId().isEmpty()) { + instancePartitionId_ = other.instancePartitionId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasInstancePartition()) { + mergeInstancePartition(other.getInstancePartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instancePartitionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInstancePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance in which to create the instance
    +     * partition. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the instance
    +     * partition. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the instance
    +     * partition. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the instance
    +     * partition. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance in which to create the instance
    +     * partition. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instancePartitionId_ = ""; + + /** + * + * + *
    +     * Required. The ID of the instance partition to create. Valid identifiers are
    +     * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length.
    +     * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instancePartitionId. + */ + public java.lang.String getInstancePartitionId() { + java.lang.Object ref = instancePartitionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instancePartitionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance partition to create. Valid identifiers are
    +     * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length.
    +     * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instancePartitionId. + */ + public com.google.protobuf.ByteString getInstancePartitionIdBytes() { + java.lang.Object ref = instancePartitionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instancePartitionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance partition to create. Valid identifiers are
    +     * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length.
    +     * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The instancePartitionId to set. + * @return This builder for chaining. + */ + public Builder setInstancePartitionId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instancePartitionId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance partition to create. Valid identifiers are
    +     * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length.
    +     * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearInstancePartitionId() { + instancePartitionId_ = getDefaultInstance().getInstancePartitionId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance partition to create. Valid identifiers are
    +     * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * characters in length.
    +     * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for instancePartitionId to set. + * @return This builder for chaining. + */ + public Builder setInstancePartitionIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instancePartitionId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + instancePartitionBuilder_; + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + if (instancePartitionBuilder_ == null) { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } else { + return instancePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartition_ = value; + } else { + instancePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionBuilder_ == null) { + instancePartition_ = builderForValue.build(); + } else { + instancePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && instancePartition_ != null + && instancePartition_ + != com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()) { + getInstancePartitionBuilder().mergeFrom(value); + } else { + instancePartition_ = value; + } + } else { + instancePartitionBuilder_.mergeFrom(value); + } + if (instancePartition_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstancePartition() { + bitField0_ = (bitField0_ & ~0x00000004); + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + getInstancePartitionBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetInstancePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + if (instancePartitionBuilder_ != null) { + return instancePartitionBuilder_.getMessageOrBuilder(); + } else { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + } + + /** + * + * + *
    +     * Required. The instance partition to create. The instance_partition.name may
    +     * be omitted, but if specified must be
    +     * `<parent>/instancePartitions/<instance_partition_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + internalGetInstancePartitionFieldBuilder() { + if (instancePartitionBuilder_ == null) { + instancePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder>( + getInstancePartition(), getParentForChildren(), isClean()); + instancePartition_ = null; + } + return instancePartitionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstancePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstancePartitionRequest) + private static final com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstancePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..f844e3d5ce93 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstancePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstancePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance in which to create the instance
    +   * partition. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the instance in which to create the instance
    +   * partition. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The ID of the instance partition to create. Valid identifiers are
    +   * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length.
    +   * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instancePartitionId. + */ + java.lang.String getInstancePartitionId(); + + /** + * + * + *
    +   * Required. The ID of the instance partition to create. Valid identifiers are
    +   * of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * characters in length.
    +   * 
    + * + * string instance_partition_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instancePartitionId. + */ + com.google.protobuf.ByteString getInstancePartitionIdBytes(); + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + boolean hasInstancePartition(); + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition(); + + /** + * + * + *
    +   * Required. The instance partition to create. The instance_partition.name may
    +   * be omitted, but if specified must be
    +   * `<parent>/instancePartitions/<instance_partition_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder getInstancePartitionOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java new file mode 100644 index 000000000000..6d6c09abb714 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java @@ -0,0 +1,1152 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceRequest} + */ +@com.google.protobuf.Generated +public final class CreateInstanceRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.CreateInstanceRequest) + CreateInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateInstanceRequest"); + } + + // Use CreateInstanceRequest.newBuilder() to construct. + private CreateInstanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateInstanceRequest() { + parent_ = ""; + instanceId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceRequest.class, + com.google.spanner.admin.instance.v1.CreateInstanceRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance. Values
    +   * are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance. Values
    +   * are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Required. The ID of the instance to create.  Valid identifiers are of the
    +   * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +   * length.
    +   * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The ID of the instance to create.  Valid identifiers are of the
    +   * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +   * length.
    +   * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_FIELD_NUMBER = 3; + private com.google.spanner.admin.instance.v1.Instance instance_; + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + @java.lang.Override + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstance() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getInstance()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInstance()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.CreateInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.CreateInstanceRequest other = + (com.google.spanner.admin.instance.v1.CreateInstanceRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (hasInstance() != other.hasInstance()) return false; + if (hasInstance()) { + if (!getInstance().equals(other.getInstance())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.CreateInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.CreateInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.CreateInstanceRequest) + com.google.spanner.admin.instance.v1.CreateInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.CreateInstanceRequest.class, + com.google.spanner.admin.instance.v1.CreateInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.CreateInstanceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + instanceId_ = ""; + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.CreateInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceRequest build() { + com.google.spanner.admin.instance.v1.CreateInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.CreateInstanceRequest result = + new com.google.spanner.admin.instance.v1.CreateInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.CreateInstanceRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instance_ = instanceBuilder_ == null ? instance_ : instanceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.CreateInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.CreateInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.CreateInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.CreateInstanceRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasInstance()) { + mergeInstance(other.getInstance()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInstanceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance. Values
    +     * are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance. Values
    +     * are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance. Values
    +     * are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance. Values
    +     * are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project in which to create the instance. Values
    +     * are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Required. The ID of the instance to create.  Valid identifiers are of the
    +     * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +     * length.
    +     * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance to create.  Valid identifiers are of the
    +     * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +     * length.
    +     * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The ID of the instance to create.  Valid identifiers are of the
    +     * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +     * length.
    +     * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance to create.  Valid identifiers are of the
    +     * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +     * length.
    +     * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The ID of the instance to create.  Valid identifiers are of the
    +     * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +     * length.
    +     * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.Instance instance_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instanceBuilder_; + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + public com.google.spanner.admin.instance.v1.Instance getInstance() { + if (instanceBuilder_ == null) { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } else { + return instanceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instance_ = value; + } else { + instanceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstance( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instanceBuilder_ == null) { + instance_ = builderForValue.build(); + } else { + instanceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && instance_ != null + && instance_ != com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) { + getInstanceBuilder().mergeFrom(value); + } else { + instance_ = value; + } + } else { + instanceBuilder_.mergeFrom(value); + } + if (instance_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000004); + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstanceBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + if (instanceBuilder_ != null) { + return instanceBuilder_.getMessageOrBuilder(); + } else { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + } + + /** + * + * + *
    +     * Required. The instance to create.  The name may be omitted, but if
    +     * specified must be `<parent>/instances/<instance_id>`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstanceFieldBuilder() { + if (instanceBuilder_ == null) { + instanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + getInstance(), getParentForChildren(), isClean()); + instance_ = null; + } + return instanceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.CreateInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceRequest) + private static final com.google.spanner.admin.instance.v1.CreateInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.CreateInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.CreateInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.CreateInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java new file mode 100644 index 000000000000..d07f7acedf36 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java @@ -0,0 +1,136 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface CreateInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.CreateInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance. Values
    +   * are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the project in which to create the instance. Values
    +   * are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The ID of the instance to create.  Valid identifiers are of the
    +   * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +   * length.
    +   * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Required. The ID of the instance to create.  Valid identifiers are of the
    +   * form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
    +   * length.
    +   * 
    + * + * string instance_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + boolean hasInstance(); + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + com.google.spanner.admin.instance.v1.Instance getInstance(); + + /** + * + * + *
    +   * Required. The instance to create.  The name may be omitted, but if
    +   * specified must be `<parent>/instances/<instance_id>`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java new file mode 100644 index 000000000000..91b46b0587a8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java @@ -0,0 +1,966 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstanceConfigRequest} + */ +@com.google.protobuf.Generated +public final class DeleteInstanceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) + DeleteInstanceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteInstanceConfigRequest"); + } + + // Use DeleteInstanceConfigRequest.newBuilder() to construct. + private DeleteInstanceConfigRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteInstanceConfigRequest() { + name_ = ""; + etag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance configuration to be deleted.
    +   * Values are of the form
    +   * `projects/<project>/instanceConfigs/<instance_config>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance configuration to be deleted.
    +   * Values are of the form
    +   * `projects/<project>/instanceConfigs/<instance_config>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way to help prevent
    +   * simultaneous deletes of an instance configuration from overwriting each
    +   * other. If not empty, the API
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
    +   * 
    + * + * string etag = 2; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way to help prevent
    +   * simultaneous deletes of an instance configuration from overwriting each
    +   * other. If not empty, the API
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
    +   * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALIDATE_ONLY_FIELD_NUMBER = 3; + private boolean validateOnly_ = false; + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, etag_); + } + if (validateOnly_ != false) { + output.writeBool(3, validateOnly_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, etag_); + } + if (validateOnly_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, validateOnly_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest other = + (com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (getValidateOnly() != other.getValidateOnly()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (37 * hash) + VALIDATE_ONLY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getValidateOnly()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstanceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + etag_ = ""; + validateOnly_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest build() { + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest buildPartial() { + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest result = + new com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.validateOnly_ = validateOnly_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest other) { + if (other + == com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getValidateOnly() != false) { + setValidateOnly(other.getValidateOnly()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + validateOnly_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance configuration to be deleted.
    +     * Values are of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance configuration to be deleted.
    +     * Values are of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance configuration to be deleted.
    +     * Values are of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance configuration to be deleted.
    +     * Values are of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance configuration to be deleted.
    +     * Values are of the form
    +     * `projects/<project>/instanceConfigs/<instance_config>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way to help prevent
    +     * simultaneous deletes of an instance configuration from overwriting each
    +     * other. If not empty, the API
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
    +     * 
    + * + * string etag = 2; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way to help prevent
    +     * simultaneous deletes of an instance configuration from overwriting each
    +     * other. If not empty, the API
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
    +     * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way to help prevent
    +     * simultaneous deletes of an instance configuration from overwriting each
    +     * other. If not empty, the API
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
    +     * 
    + * + * string etag = 2; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way to help prevent
    +     * simultaneous deletes of an instance configuration from overwriting each
    +     * other. If not empty, the API
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
    +     * 
    + * + * string etag = 2; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way to help prevent
    +     * simultaneous deletes of an instance configuration from overwriting each
    +     * other. If not empty, the API
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
    +     * 
    + * + * string etag = 2; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private boolean validateOnly_; + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @param value The validateOnly to set. + * @return This builder for chaining. + */ + public Builder setValidateOnly(boolean value) { + + validateOnly_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @return This builder for chaining. + */ + public Builder clearValidateOnly() { + bitField0_ = (bitField0_ & ~0x00000004); + validateOnly_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) + private static final com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest(); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteInstanceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..5f8c8788855d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface DeleteInstanceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.DeleteInstanceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance configuration to be deleted.
    +   * Values are of the form
    +   * `projects/<project>/instanceConfigs/<instance_config>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the instance configuration to be deleted.
    +   * Values are of the form
    +   * `projects/<project>/instanceConfigs/<instance_config>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way to help prevent
    +   * simultaneous deletes of an instance configuration from overwriting each
    +   * other. If not empty, the API
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
    +   * 
    + * + * string etag = 2; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way to help prevent
    +   * simultaneous deletes of an instance configuration from overwriting each
    +   * other. If not empty, the API
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
    +   * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + boolean getValidateOnly(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java new file mode 100644 index 000000000000..379d0e2ecf46 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java @@ -0,0 +1,848 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstancePartitionRequest} + */ +@com.google.protobuf.Generated +public final class DeleteInstancePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) + DeleteInstancePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteInstancePartitionRequest"); + } + + // Use DeleteInstancePartitionRequest.newBuilder() to construct. + private DeleteInstancePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteInstancePartitionRequest() { + name_ = ""; + etag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance partition to be deleted.
    +   * Values are of the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance partition to be deleted.
    +   * Values are of the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ETAG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
    +   * Optional. If not empty, the API only deletes the instance partition when
    +   * the etag provided matches the current status of the requested instance
    +   * partition. Otherwise, deletes the instance partition without checking the
    +   * current status of the requested instance partition.
    +   * 
    + * + * string etag = 2; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. If not empty, the API only deletes the instance partition when
    +   * the etag provided matches the current status of the requested instance
    +   * partition. Otherwise, deletes the instance partition without checking the
    +   * current status of the requested instance partition.
    +   * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, etag_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, etag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest other = + (com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstancePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + etag_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest build() { + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest buildPartial() { + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest result = + new com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.etag_ = etag_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest other) { + if (other + == com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance partition to be deleted.
    +     * Values are of the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance partition to be deleted.
    +     * Values are of the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance partition to be deleted.
    +     * Values are of the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance partition to be deleted.
    +     * Values are of the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance partition to be deleted.
    +     * Values are of the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
    +     * Optional. If not empty, the API only deletes the instance partition when
    +     * the etag provided matches the current status of the requested instance
    +     * partition. Otherwise, deletes the instance partition without checking the
    +     * current status of the requested instance partition.
    +     * 
    + * + * string etag = 2; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. If not empty, the API only deletes the instance partition when
    +     * the etag provided matches the current status of the requested instance
    +     * partition. Otherwise, deletes the instance partition without checking the
    +     * current status of the requested instance partition.
    +     * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. If not empty, the API only deletes the instance partition when
    +     * the etag provided matches the current status of the requested instance
    +     * partition. Otherwise, deletes the instance partition without checking the
    +     * current status of the requested instance partition.
    +     * 
    + * + * string etag = 2; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If not empty, the API only deletes the instance partition when
    +     * the etag provided matches the current status of the requested instance
    +     * partition. Otherwise, deletes the instance partition without checking the
    +     * current status of the requested instance partition.
    +     * 
    + * + * string etag = 2; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If not empty, the API only deletes the instance partition when
    +     * the etag provided matches the current status of the requested instance
    +     * partition. Otherwise, deletes the instance partition without checking the
    +     * current status of the requested instance partition.
    +     * 
    + * + * string etag = 2; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) + private static final com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest(); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteInstancePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstancePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..50859d8b6fa5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java @@ -0,0 +1,94 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface DeleteInstancePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.DeleteInstancePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance partition to be deleted.
    +   * Values are of the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the instance partition to be deleted.
    +   * Values are of the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Optional. If not empty, the API only deletes the instance partition when
    +   * the etag provided matches the current status of the requested instance
    +   * partition. Otherwise, deletes the instance partition without checking the
    +   * current status of the requested instance partition.
    +   * 
    + * + * string etag = 2; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
    +   * Optional. If not empty, the API only deletes the instance partition when
    +   * the etag provided matches the current status of the requested instance
    +   * partition. Otherwise, deletes the instance partition without checking the
    +   * current status of the requested instance partition.
    +   * 
    + * + * string etag = 2; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java new file mode 100644 index 000000000000..1306dadae305 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java @@ -0,0 +1,620 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstanceRequest} + */ +@com.google.protobuf.Generated +public final class DeleteInstanceRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.DeleteInstanceRequest) + DeleteInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteInstanceRequest"); + } + + // Use DeleteInstanceRequest.newBuilder() to construct. + private DeleteInstanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteInstanceRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstanceRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance to be deleted. Values are of the form
    +   * `projects/<project>/instances/<instance>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance to be deleted. Values are of the form
    +   * `projects/<project>/instances/<instance>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.DeleteInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.DeleteInstanceRequest other = + (com.google.spanner.admin.instance.v1.DeleteInstanceRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.DeleteInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.DeleteInstanceRequest) + com.google.spanner.admin.instance.v1.DeleteInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.DeleteInstanceRequest.class, + com.google.spanner.admin.instance.v1.DeleteInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.DeleteInstanceRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.DeleteInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceRequest build() { + com.google.spanner.admin.instance.v1.DeleteInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.DeleteInstanceRequest result = + new com.google.spanner.admin.instance.v1.DeleteInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.DeleteInstanceRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.DeleteInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.DeleteInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.DeleteInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.DeleteInstanceRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance to be deleted. Values are of the form
    +     * `projects/<project>/instances/<instance>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance to be deleted. Values are of the form
    +     * `projects/<project>/instances/<instance>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance to be deleted. Values are of the form
    +     * `projects/<project>/instances/<instance>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance to be deleted. Values are of the form
    +     * `projects/<project>/instances/<instance>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance to be deleted. Values are of the form
    +     * `projects/<project>/instances/<instance>`
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.DeleteInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.DeleteInstanceRequest) + private static final com.google.spanner.admin.instance.v1.DeleteInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.DeleteInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.DeleteInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.DeleteInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java new file mode 100644 index 000000000000..7e881394ea22 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface DeleteInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.DeleteInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the instance to be deleted. Values are of the form
    +   * `projects/<project>/instances/<instance>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the instance to be deleted. Values are of the form
    +   * `projects/<project>/instances/<instance>`
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadata.java new file mode 100644 index 000000000000..6ea31b814367 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadata.java @@ -0,0 +1,1439 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Free instance specific metadata that is kept even after an instance has been
    + * upgraded for tracking purposes.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.FreeInstanceMetadata} + */ +@com.google.protobuf.Generated +public final class FreeInstanceMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.FreeInstanceMetadata) + FreeInstanceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FreeInstanceMetadata"); + } + + // Use FreeInstanceMetadata.newBuilder() to construct. + private FreeInstanceMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private FreeInstanceMetadata() { + expireBehavior_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.class, + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder.class); + } + + /** + * + * + *
    +   * Allows users to change behavior when a free instance expires.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior} + */ + public enum ExpireBehavior implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * EXPIRE_BEHAVIOR_UNSPECIFIED = 0; + */ + EXPIRE_BEHAVIOR_UNSPECIFIED(0), + /** + * + * + *
    +     * When the free instance expires, upgrade the instance to a provisioned
    +     * instance.
    +     * 
    + * + * FREE_TO_PROVISIONED = 1; + */ + FREE_TO_PROVISIONED(1), + /** + * + * + *
    +     * When the free instance expires, disable the instance, and delete it
    +     * after the grace period passes if it has not been upgraded.
    +     * 
    + * + * REMOVE_AFTER_GRACE_PERIOD = 2; + */ + REMOVE_AFTER_GRACE_PERIOD(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExpireBehavior"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * EXPIRE_BEHAVIOR_UNSPECIFIED = 0; + */ + public static final int EXPIRE_BEHAVIOR_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * When the free instance expires, upgrade the instance to a provisioned
    +     * instance.
    +     * 
    + * + * FREE_TO_PROVISIONED = 1; + */ + public static final int FREE_TO_PROVISIONED_VALUE = 1; + + /** + * + * + *
    +     * When the free instance expires, disable the instance, and delete it
    +     * after the grace period passes if it has not been upgraded.
    +     * 
    + * + * REMOVE_AFTER_GRACE_PERIOD = 2; + */ + public static final int REMOVE_AFTER_GRACE_PERIOD_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ExpireBehavior valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ExpireBehavior forNumber(int value) { + switch (value) { + case 0: + return EXPIRE_BEHAVIOR_UNSPECIFIED; + case 1: + return FREE_TO_PROVISIONED; + case 2: + return REMOVE_AFTER_GRACE_PERIOD; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ExpireBehavior findValueByNumber(int number) { + return ExpireBehavior.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ExpireBehavior[] VALUES = values(); + + public static ExpireBehavior valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ExpireBehavior(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior) + } + + private int bitField0_; + public static final int EXPIRE_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int UPGRADE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp upgradeTime_; + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the upgradeTime field is set. + */ + @java.lang.Override + public boolean hasUpgradeTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The upgradeTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpgradeTime() { + return upgradeTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : upgradeTime_; + } + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpgradeTimeOrBuilder() { + return upgradeTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : upgradeTime_; + } + + public static final int EXPIRE_BEHAVIOR_FIELD_NUMBER = 3; + private int expireBehavior_ = 0; + + /** + * + * + *
    +   * Specifies the expiration behavior of a free instance. The default of
    +   * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +   * or after creation, and before expiration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The enum numeric value on the wire for expireBehavior. + */ + @java.lang.Override + public int getExpireBehaviorValue() { + return expireBehavior_; + } + + /** + * + * + *
    +   * Specifies the expiration behavior of a free instance. The default of
    +   * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +   * or after creation, and before expiration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The expireBehavior. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior + getExpireBehavior() { + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior result = + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior.forNumber( + expireBehavior_); + return result == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpgradeTime()); + } + if (expireBehavior_ + != com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior + .EXPIRE_BEHAVIOR_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, expireBehavior_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpgradeTime()); + } + if (expireBehavior_ + != com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior + .EXPIRE_BEHAVIOR_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, expireBehavior_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.FreeInstanceMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.FreeInstanceMetadata other = + (com.google.spanner.admin.instance.v1.FreeInstanceMetadata) obj; + + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (hasUpgradeTime() != other.hasUpgradeTime()) return false; + if (hasUpgradeTime()) { + if (!getUpgradeTime().equals(other.getUpgradeTime())) return false; + } + if (expireBehavior_ != other.expireBehavior_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + if (hasUpgradeTime()) { + hash = (37 * hash) + UPGRADE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpgradeTime().hashCode(); + } + hash = (37 * hash) + EXPIRE_BEHAVIOR_FIELD_NUMBER; + hash = (53 * hash) + expireBehavior_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Free instance specific metadata that is kept even after an instance has been
    +   * upgraded for tracking purposes.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.FreeInstanceMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.FreeInstanceMetadata) + com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.class, + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.FreeInstanceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + internalGetUpgradeTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + upgradeTime_ = null; + if (upgradeTimeBuilder_ != null) { + upgradeTimeBuilder_.dispose(); + upgradeTimeBuilder_ = null; + } + expireBehavior_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata build() { + com.google.spanner.admin.instance.v1.FreeInstanceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata buildPartial() { + com.google.spanner.admin.instance.v1.FreeInstanceMetadata result = + new com.google.spanner.admin.instance.v1.FreeInstanceMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.FreeInstanceMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.upgradeTime_ = + upgradeTimeBuilder_ == null ? upgradeTime_ : upgradeTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.expireBehavior_ = expireBehavior_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.FreeInstanceMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.FreeInstanceMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.FreeInstanceMetadata other) { + if (other == com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance()) + return this; + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.hasUpgradeTime()) { + mergeUpgradeTime(other.getUpgradeTime()); + } + if (other.expireBehavior_ != 0) { + setExpireBehaviorValue(other.getExpireBehaviorValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpgradeTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + expireBehavior_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000001); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Output only. Timestamp after which the instance will either be upgraded or
    +     * scheduled for deletion after a grace period. ExpireBehavior is used to
    +     * choose between upgrading or scheduling the free instance for deletion. This
    +     * timestamp is set during the creation of a free instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private com.google.protobuf.Timestamp upgradeTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + upgradeTimeBuilder_; + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the upgradeTime field is set. + */ + public boolean hasUpgradeTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The upgradeTime. + */ + public com.google.protobuf.Timestamp getUpgradeTime() { + if (upgradeTimeBuilder_ == null) { + return upgradeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : upgradeTime_; + } else { + return upgradeTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpgradeTime(com.google.protobuf.Timestamp value) { + if (upgradeTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + upgradeTime_ = value; + } else { + upgradeTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpgradeTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (upgradeTimeBuilder_ == null) { + upgradeTime_ = builderForValue.build(); + } else { + upgradeTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpgradeTime(com.google.protobuf.Timestamp value) { + if (upgradeTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && upgradeTime_ != null + && upgradeTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpgradeTimeBuilder().mergeFrom(value); + } else { + upgradeTime_ = value; + } + } else { + upgradeTimeBuilder_.mergeFrom(value); + } + if (upgradeTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpgradeTime() { + bitField0_ = (bitField0_ & ~0x00000002); + upgradeTime_ = null; + if (upgradeTimeBuilder_ != null) { + upgradeTimeBuilder_.dispose(); + upgradeTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpgradeTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpgradeTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpgradeTimeOrBuilder() { + if (upgradeTimeBuilder_ != null) { + return upgradeTimeBuilder_.getMessageOrBuilder(); + } else { + return upgradeTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : upgradeTime_; + } + } + + /** + * + * + *
    +     * Output only. If present, the timestamp at which the free instance was
    +     * upgraded to a provisioned instance.
    +     * 
    + * + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpgradeTimeFieldBuilder() { + if (upgradeTimeBuilder_ == null) { + upgradeTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpgradeTime(), getParentForChildren(), isClean()); + upgradeTime_ = null; + } + return upgradeTimeBuilder_; + } + + private int expireBehavior_ = 0; + + /** + * + * + *
    +     * Specifies the expiration behavior of a free instance. The default of
    +     * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +     * or after creation, and before expiration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The enum numeric value on the wire for expireBehavior. + */ + @java.lang.Override + public int getExpireBehaviorValue() { + return expireBehavior_; + } + + /** + * + * + *
    +     * Specifies the expiration behavior of a free instance. The default of
    +     * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +     * or after creation, and before expiration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @param value The enum numeric value on the wire for expireBehavior to set. + * @return This builder for chaining. + */ + public Builder setExpireBehaviorValue(int value) { + expireBehavior_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the expiration behavior of a free instance. The default of
    +     * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +     * or after creation, and before expiration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The expireBehavior. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior + getExpireBehavior() { + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior result = + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior.forNumber( + expireBehavior_); + return result == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Specifies the expiration behavior of a free instance. The default of
    +     * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +     * or after creation, and before expiration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @param value The expireBehavior to set. + * @return This builder for chaining. + */ + public Builder setExpireBehavior( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + expireBehavior_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies the expiration behavior of a free instance. The default of
    +     * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +     * or after creation, and before expiration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return This builder for chaining. + */ + public Builder clearExpireBehavior() { + bitField0_ = (bitField0_ & ~0x00000004); + expireBehavior_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.FreeInstanceMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.FreeInstanceMetadata) + private static final com.google.spanner.admin.instance.v1.FreeInstanceMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.FreeInstanceMetadata(); + } + + public static com.google.spanner.admin.instance.v1.FreeInstanceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FreeInstanceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadataOrBuilder.java new file mode 100644 index 000000000000..28f41451e7f9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FreeInstanceMetadataOrBuilder.java @@ -0,0 +1,154 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface FreeInstanceMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.FreeInstanceMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Output only. Timestamp after which the instance will either be upgraded or
    +   * scheduled for deletion after a grace period. ExpireBehavior is used to
    +   * choose between upgrading or scheduling the free instance for deletion. This
    +   * timestamp is set during the creation of a free instance.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the upgradeTime field is set. + */ + boolean hasUpgradeTime(); + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The upgradeTime. + */ + com.google.protobuf.Timestamp getUpgradeTime(); + + /** + * + * + *
    +   * Output only. If present, the timestamp at which the free instance was
    +   * upgraded to a provisioned instance.
    +   * 
    + * + * .google.protobuf.Timestamp upgrade_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpgradeTimeOrBuilder(); + + /** + * + * + *
    +   * Specifies the expiration behavior of a free instance. The default of
    +   * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +   * or after creation, and before expiration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The enum numeric value on the wire for expireBehavior. + */ + int getExpireBehaviorValue(); + + /** + * + * + *
    +   * Specifies the expiration behavior of a free instance. The default of
    +   * ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
    +   * or after creation, and before expiration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior expire_behavior = 3; + * + * + * @return The expireBehavior. + */ + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.ExpireBehavior getExpireBehavior(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java new file mode 100644 index 000000000000..4c4a59f191c1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java @@ -0,0 +1,197 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Indicates the expected fulfillment period of an operation.
    + * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.FulfillmentPeriod} + */ +@com.google.protobuf.Generated +public enum FulfillmentPeriod implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * FULFILLMENT_PERIOD_UNSPECIFIED = 0; + */ + FULFILLMENT_PERIOD_UNSPECIFIED(0), + /** + * + * + *
    +   * Normal fulfillment period. The operation is expected to complete within
    +   * minutes.
    +   * 
    + * + * FULFILLMENT_PERIOD_NORMAL = 1; + */ + FULFILLMENT_PERIOD_NORMAL(1), + /** + * + * + *
    +   * Extended fulfillment period. It can take up to an hour for the operation
    +   * to complete.
    +   * 
    + * + * FULFILLMENT_PERIOD_EXTENDED = 2; + */ + FULFILLMENT_PERIOD_EXTENDED(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FulfillmentPeriod"); + } + + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * FULFILLMENT_PERIOD_UNSPECIFIED = 0; + */ + public static final int FULFILLMENT_PERIOD_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +   * Normal fulfillment period. The operation is expected to complete within
    +   * minutes.
    +   * 
    + * + * FULFILLMENT_PERIOD_NORMAL = 1; + */ + public static final int FULFILLMENT_PERIOD_NORMAL_VALUE = 1; + + /** + * + * + *
    +   * Extended fulfillment period. It can take up to an hour for the operation
    +   * to complete.
    +   * 
    + * + * FULFILLMENT_PERIOD_EXTENDED = 2; + */ + public static final int FULFILLMENT_PERIOD_EXTENDED_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static FulfillmentPeriod valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static FulfillmentPeriod forNumber(int value) { + switch (value) { + case 0: + return FULFILLMENT_PERIOD_UNSPECIFIED; + case 1: + return FULFILLMENT_PERIOD_NORMAL; + case 2: + return FULFILLMENT_PERIOD_EXTENDED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FulfillmentPeriod findValueByNumber(int number) { + return FulfillmentPeriod.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto.getDescriptor().getEnumTypes().get(0); + } + + private static final FulfillmentPeriod[] VALUES = values(); + + public static FulfillmentPeriod valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private FulfillmentPeriod(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.FulfillmentPeriod) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java new file mode 100644 index 000000000000..b9228b059edf --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java @@ -0,0 +1,624 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstanceConfigRequest} + */ +@com.google.protobuf.Generated +public final class GetInstanceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.GetInstanceConfigRequest) + GetInstanceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetInstanceConfigRequest"); + } + + // Use GetInstanceConfigRequest.newBuilder() to construct. + private GetInstanceConfigRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetInstanceConfigRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the requested instance configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the requested instance configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.GetInstanceConfigRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest other = + (com.google.spanner.admin.instance.v1.GetInstanceConfigRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstanceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.GetInstanceConfigRequest) + com.google.spanner.admin.instance.v1.GetInstanceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceConfigRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceConfigRequest build() { + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceConfigRequest buildPartial() { + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest result = + new com.google.spanner.admin.instance.v1.GetInstanceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.GetInstanceConfigRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.GetInstanceConfigRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.GetInstanceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.GetInstanceConfigRequest other) { + if (other + == com.google.spanner.admin.instance.v1.GetInstanceConfigRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the requested instance configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.GetInstanceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceConfigRequest) + private static final com.google.spanner.admin.instance.v1.GetInstanceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.GetInstanceConfigRequest(); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceConfigRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetInstanceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceConfigRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..758326023ab1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface GetInstanceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.GetInstanceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the requested instance configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the requested instance configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java new file mode 100644 index 000000000000..157905197dfb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java @@ -0,0 +1,634 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstancePartitionRequest} + */ +@com.google.protobuf.Generated +public final class GetInstancePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.GetInstancePartitionRequest) + GetInstancePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetInstancePartitionRequest"); + } + + // Use GetInstancePartitionRequest.newBuilder() to construct. + private GetInstancePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetInstancePartitionRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the requested instance partition. Values are of
    +   * the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the requested instance partition. Values are of
    +   * the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.GetInstancePartitionRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest other = + (com.google.spanner.admin.instance.v1.GetInstancePartitionRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstancePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.GetInstancePartitionRequest) + com.google.spanner.admin.instance.v1.GetInstancePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstancePartitionRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstancePartitionRequest build() { + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstancePartitionRequest buildPartial() { + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest result = + new com.google.spanner.admin.instance.v1.GetInstancePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.GetInstancePartitionRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.GetInstancePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.GetInstancePartitionRequest other) { + if (other + == com.google.spanner.admin.instance.v1.GetInstancePartitionRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the requested instance partition. Values are of
    +     * the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance partition. Values are of
    +     * the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance partition. Values are of
    +     * the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance partition. Values are of
    +     * the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance partition. Values are of
    +     * the form
    +     * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.GetInstancePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstancePartitionRequest) + private static final com.google.spanner.admin.instance.v1.GetInstancePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.GetInstancePartitionRequest(); + } + + public static com.google.spanner.admin.instance.v1.GetInstancePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetInstancePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstancePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..f7b1aa011e7e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface GetInstancePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.GetInstancePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the requested instance partition. Values are of
    +   * the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the requested instance partition. Values are of
    +   * the form
    +   * `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java new file mode 100644 index 000000000000..57e6368a0b32 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java @@ -0,0 +1,940 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstanceRequest} + */ +@com.google.protobuf.Generated +public final class GetInstanceRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.GetInstanceRequest) + GetInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetInstanceRequest"); + } + + // Use GetInstanceRequest.newBuilder() to construct. + private GetInstanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetInstanceRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstanceRequest.class, + com.google.spanner.admin.instance.v1.GetInstanceRequest.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the requested instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the requested instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FIELD_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask fieldMask_; + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return Whether the fieldMask field is set. + */ + @java.lang.Override + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return The fieldMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getFieldMask() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getFieldMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getFieldMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.GetInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.GetInstanceRequest other = + (com.google.spanner.admin.instance.v1.GetInstanceRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (hasFieldMask() != other.hasFieldMask()) return false; + if (hasFieldMask()) { + if (!getFieldMask().equals(other.getFieldMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasFieldMask()) { + hash = (37 * hash) + FIELD_MASK_FIELD_NUMBER; + hash = (53 * hash) + getFieldMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.GetInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.GetInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.GetInstanceRequest) + com.google.spanner.admin.instance.v1.GetInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.GetInstanceRequest.class, + com.google.spanner.admin.instance.v1.GetInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.GetInstanceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetFieldMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.GetInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceRequest build() { + com.google.spanner.admin.instance.v1.GetInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.GetInstanceRequest result = + new com.google.spanner.admin.instance.v1.GetInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.GetInstanceRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fieldMask_ = fieldMaskBuilder_ == null ? fieldMask_ : fieldMaskBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.GetInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.GetInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.GetInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.GetInstanceRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasFieldMask()) { + mergeFieldMask(other.getFieldMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetFieldMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the requested instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the requested instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the requested instance. Values are of the form
    +     * `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.FieldMask fieldMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + fieldMaskBuilder_; + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return Whether the fieldMask field is set. + */ + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return The fieldMask. + */ + public com.google.protobuf.FieldMask getFieldMask() { + if (fieldMaskBuilder_ == null) { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } else { + return fieldMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public Builder setFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + fieldMask_ = value; + } else { + fieldMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public Builder setFieldMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (fieldMaskBuilder_ == null) { + fieldMask_ = builderForValue.build(); + } else { + fieldMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public Builder mergeFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && fieldMask_ != null + && fieldMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getFieldMaskBuilder().mergeFrom(value); + } else { + fieldMask_ = value; + } + } else { + fieldMaskBuilder_.mergeFrom(value); + } + if (fieldMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public Builder clearFieldMask() { + bitField0_ = (bitField0_ & ~0x00000002); + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public com.google.protobuf.FieldMask.Builder getFieldMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetFieldMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + if (fieldMaskBuilder_ != null) { + return fieldMaskBuilder_.getMessageOrBuilder(); + } else { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + } + + /** + * + * + *
    +     * If field_mask is present, specifies the subset of
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +     * returned. If absent, all
    +     * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetFieldMaskFieldBuilder() { + if (fieldMaskBuilder_ == null) { + fieldMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getFieldMask(), getParentForChildren(), isClean()); + fieldMask_ = null; + } + return fieldMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.GetInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceRequest) + private static final com.google.spanner.admin.instance.v1.GetInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.GetInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.GetInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.GetInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java new file mode 100644 index 000000000000..e8f705bf06a2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface GetInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.GetInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the requested instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the requested instance. Values are of the form
    +   * `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return Whether the fieldMask field is set. + */ + boolean hasFieldMask(); + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + * + * @return The fieldMask. + */ + com.google.protobuf.FieldMask getFieldMask(); + + /** + * + * + *
    +   * If field_mask is present, specifies the subset of
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
    +   * returned. If absent, all
    +   * [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2; + */ + com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java new file mode 100644 index 000000000000..a199934b0ef4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java @@ -0,0 +1,5631 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * An isolated set of Cloud Spanner resources on which databases can be hosted.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.Instance} + */ +@com.google.protobuf.Generated +public final class Instance extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.Instance) + InstanceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Instance"); + } + + // Use Instance.newBuilder() to construct. + private Instance(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Instance() { + name_ = ""; + config_ = ""; + displayName_ = ""; + replicaComputeCapacity_ = java.util.Collections.emptyList(); + state_ = 0; + instanceType_ = 0; + endpointUris_ = com.google.protobuf.LazyStringArrayList.emptyList(); + edition_ = 0; + defaultBackupScheduleType_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.Instance.class, + com.google.spanner.admin.instance.v1.Instance.Builder.class); + } + + /** + * + * + *
    +   * Indicates the current state of the instance.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.Instance.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
    +     * The instance is still being created. Resources may not be
    +     * available yet, and operations such as database creation may not
    +     * work.
    +     * 
    + * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
    +     * The instance is fully created and ready to do work such as
    +     * creating databases.
    +     * 
    + * + * READY = 2; + */ + READY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The instance is still being created. Resources may not be
    +     * available yet, and operations such as database creation may not
    +     * work.
    +     * 
    + * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + + /** + * + * + *
    +     * The instance is fully created and ready to do work such as
    +     * creating databases.
    +     * 
    + * + * READY = 2; + */ + public static final int READY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return READY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.Instance.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.State) + } + + /** + * + * + *
    +   * The type of this instance. The type can be used to distinguish product
    +   * variants, that can affect aspects like: usage restrictions, quotas and
    +   * billing. Currently this is used to distinguish FREE_INSTANCE vs PROVISIONED
    +   * instances.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.Instance.InstanceType} + */ + public enum InstanceType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * INSTANCE_TYPE_UNSPECIFIED = 0; + */ + INSTANCE_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * Provisioned instances have dedicated resources, standard usage limits and
    +     * support.
    +     * 
    + * + * PROVISIONED = 1; + */ + PROVISIONED(1), + /** + * + * + *
    +     * Free instances provide no guarantee for dedicated resources,
    +     * [node_count, processing_units] should be 0. They come
    +     * with stricter usage limits and limited support.
    +     * 
    + * + * FREE_INSTANCE = 2; + */ + FREE_INSTANCE(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InstanceType"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * INSTANCE_TYPE_UNSPECIFIED = 0; + */ + public static final int INSTANCE_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Provisioned instances have dedicated resources, standard usage limits and
    +     * support.
    +     * 
    + * + * PROVISIONED = 1; + */ + public static final int PROVISIONED_VALUE = 1; + + /** + * + * + *
    +     * Free instances provide no guarantee for dedicated resources,
    +     * [node_count, processing_units] should be 0. They come
    +     * with stricter usage limits and limited support.
    +     * 
    + * + * FREE_INSTANCE = 2; + */ + public static final int FREE_INSTANCE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static InstanceType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static InstanceType forNumber(int value) { + switch (value) { + case 0: + return INSTANCE_TYPE_UNSPECIFIED; + case 1: + return PROVISIONED; + case 2: + return FREE_INSTANCE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public InstanceType findValueByNumber(int number) { + return InstanceType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.Instance.getDescriptor().getEnumTypes().get(1); + } + + private static final InstanceType[] VALUES = values(); + + public static InstanceType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private InstanceType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.InstanceType) + } + + /** + * + * + *
    +   * The edition selected for this instance. Different editions provide
    +   * different capabilities at different price points.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.Instance.Edition} + */ + public enum Edition implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Edition not specified.
    +     * 
    + * + * EDITION_UNSPECIFIED = 0; + */ + EDITION_UNSPECIFIED(0), + /** + * + * + *
    +     * Standard edition.
    +     * 
    + * + * STANDARD = 1; + */ + STANDARD(1), + /** + * + * + *
    +     * Enterprise edition.
    +     * 
    + * + * ENTERPRISE = 2; + */ + ENTERPRISE(2), + /** + * + * + *
    +     * Enterprise Plus edition.
    +     * 
    + * + * ENTERPRISE_PLUS = 3; + */ + ENTERPRISE_PLUS(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Edition"); + } + + /** + * + * + *
    +     * Edition not specified.
    +     * 
    + * + * EDITION_UNSPECIFIED = 0; + */ + public static final int EDITION_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Standard edition.
    +     * 
    + * + * STANDARD = 1; + */ + public static final int STANDARD_VALUE = 1; + + /** + * + * + *
    +     * Enterprise edition.
    +     * 
    + * + * ENTERPRISE = 2; + */ + public static final int ENTERPRISE_VALUE = 2; + + /** + * + * + *
    +     * Enterprise Plus edition.
    +     * 
    + * + * ENTERPRISE_PLUS = 3; + */ + public static final int ENTERPRISE_PLUS_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Edition valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Edition forNumber(int value) { + switch (value) { + case 0: + return EDITION_UNSPECIFIED; + case 1: + return STANDARD; + case 2: + return ENTERPRISE; + case 3: + return ENTERPRISE_PLUS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Edition findValueByNumber(int number) { + return Edition.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.Instance.getDescriptor().getEnumTypes().get(2); + } + + private static final Edition[] VALUES = values(); + + public static Edition valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Edition(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.Edition) + } + + /** + * + * + *
    +   * Indicates the
    +   * [default backup
    +   * schedule](https://cloud.google.com/spanner/docs/backup#default-backup-schedules)
    +   * behavior for new databases within the instance.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType} + */ + public enum DefaultBackupScheduleType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED = 0; + */ + DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * A default backup schedule isn't created automatically when a new database
    +     * is created in the instance.
    +     * 
    + * + * NONE = 1; + */ + NONE(1), + /** + * + * + *
    +     * A default backup schedule is created automatically when a new database
    +     * is created in the instance. The default backup schedule creates a full
    +     * backup every 24 hours. These full backups are retained for 7 days.
    +     * You can edit or delete the default backup schedule once it's created.
    +     * 
    + * + * AUTOMATIC = 2; + */ + AUTOMATIC(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DefaultBackupScheduleType"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED = 0; + */ + public static final int DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * A default backup schedule isn't created automatically when a new database
    +     * is created in the instance.
    +     * 
    + * + * NONE = 1; + */ + public static final int NONE_VALUE = 1; + + /** + * + * + *
    +     * A default backup schedule is created automatically when a new database
    +     * is created in the instance. The default backup schedule creates a full
    +     * backup every 24 hours. These full backups are retained for 7 days.
    +     * You can edit or delete the default backup schedule once it's created.
    +     * 
    + * + * AUTOMATIC = 2; + */ + public static final int AUTOMATIC_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DefaultBackupScheduleType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DefaultBackupScheduleType forNumber(int value) { + switch (value) { + case 0: + return DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED; + case 1: + return NONE; + case 2: + return AUTOMATIC; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DefaultBackupScheduleType findValueByNumber(int number) { + return DefaultBackupScheduleType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.Instance.getDescriptor().getEnumTypes().get(3); + } + + private static final DefaultBackupScheduleType[] VALUES = values(); + + public static DefaultBackupScheduleType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DefaultBackupScheduleType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType) + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. A unique identifier for the instance, which cannot be changed
    +   * after the instance is created. Values are of the form
    +   * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +   * segment of the name must be between 2 and 64 characters in length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. A unique identifier for the instance, which cannot be changed
    +   * after the instance is created. Values are of the form
    +   * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +   * segment of the name must be between 2 and 64 characters in length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object config_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance's configuration. Values are of the form
    +   * `projects/<project>/instanceConfigs/<configuration>`. See
    +   * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + @java.lang.Override + public java.lang.String getConfig() { + java.lang.Object ref = config_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + config_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance's configuration. Values are of the form
    +   * `projects/<project>/instanceConfigs/<configuration>`. See
    +   * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + @java.lang.Override + public com.google.protobuf.ByteString getConfigBytes() { + java.lang.Object ref = config_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + config_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * Required. The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NODE_COUNT_FIELD_NUMBER = 5; + private int nodeCount_ = 0; + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most, one of either
    +   * `node_count` or `processing_units` should be present in the message.
    +   *
    +   * Users can set the `node_count` field to specify the target number of nodes
    +   * allocated to the instance.
    +   *
    +   * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +   * field and reflects the current number of nodes allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes, and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +   * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 9; + private int processingUnits_ = 0; + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most, one of
    +   * either `processing_units` or `node_count` should be present in the message.
    +   *
    +   * Users can set the `processing_units` field to specify the target number of
    +   * processing units allocated to the instance.
    +   *
    +   * If autoscaling is enabled, `processing_units` is treated as an
    +   * `OUTPUT_ONLY` field and reflects the current number of processing units
    +   * allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +   * 
    + * + * int32 processing_units = 9; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + public static final int REPLICA_COMPUTE_CAPACITY_FIELD_NUMBER = 19; + + @SuppressWarnings("serial") + private java.util.List + replicaComputeCapacity_; + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getReplicaComputeCapacityList() { + return replicaComputeCapacity_; + } + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + getReplicaComputeCapacityOrBuilderList() { + return replicaComputeCapacity_; + } + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getReplicaComputeCapacityCount() { + return replicaComputeCapacity_.size(); + } + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity( + int index) { + return replicaComputeCapacity_.get(index); + } + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index) { + return replicaComputeCapacity_.get(index); + } + + public static final int AUTOSCALING_CONFIG_FIELD_NUMBER = 17; + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + @java.lang.Override + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + public static final int STATE_FIELD_NUMBER = 6; + private int state_ = 0; + + /** + * + * + *
    +   * Output only. The current instance state. For
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +   * the state must be either omitted or set to `CREATING`. For
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +   * the state must be either omitted or set to `READY`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +   * Output only. The current instance state. For
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +   * the state must be either omitted or set to `CREATING`. For
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +   * the state must be either omitted or set to `READY`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.State getState() { + com.google.spanner.admin.instance.v1.Instance.State result = + com.google.spanner.admin.instance.v1.Instance.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.State.UNRECOGNIZED + : result; + } + + public static final int LABELS_FIELD_NUMBER = 7; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int INSTANCE_TYPE_FIELD_NUMBER = 10; + private int instanceType_ = 0; + + /** + * + * + *
    +   * The `InstanceType` of the current instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The enum numeric value on the wire for instanceType. + */ + @java.lang.Override + public int getInstanceTypeValue() { + return instanceType_; + } + + /** + * + * + *
    +   * The `InstanceType` of the current instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The instanceType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.InstanceType getInstanceType() { + com.google.spanner.admin.instance.v1.Instance.InstanceType result = + com.google.spanner.admin.instance.v1.Instance.InstanceType.forNumber(instanceType_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.InstanceType.UNRECOGNIZED + : result; + } + + public static final int ENDPOINT_URIS_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList endpointUris_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @return A list containing the endpointUris. + */ + public com.google.protobuf.ProtocolStringList getEndpointUrisList() { + return endpointUris_; + } + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @return The count of endpointUris. + */ + public int getEndpointUrisCount() { + return endpointUris_.size(); + } + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the element to return. + * @return The endpointUris at the given index. + */ + public java.lang.String getEndpointUris(int index) { + return endpointUris_.get(index); + } + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the value to return. + * @return The bytes of the endpointUris at the given index. + */ + public com.google.protobuf.ByteString getEndpointUrisBytes(int index) { + return endpointUris_.getByteString(index); + } + + public static final int CREATE_TIME_FIELD_NUMBER = 11; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 12; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int FREE_INSTANCE_METADATA_FIELD_NUMBER = 13; + private com.google.spanner.admin.instance.v1.FreeInstanceMetadata freeInstanceMetadata_; + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return Whether the freeInstanceMetadata field is set. + */ + @java.lang.Override + public boolean hasFreeInstanceMetadata() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return The freeInstanceMetadata. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata getFreeInstanceMetadata() { + return freeInstanceMetadata_ == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance() + : freeInstanceMetadata_; + } + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder + getFreeInstanceMetadataOrBuilder() { + return freeInstanceMetadata_ == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance() + : freeInstanceMetadata_; + } + + public static final int EDITION_FIELD_NUMBER = 20; + private int edition_ = 0; + + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + public static final int DEFAULT_BACKUP_SCHEDULE_TYPE_FIELD_NUMBER = 23; + private int defaultBackupScheduleType_ = 0; + + /** + * + * + *
    +   * Optional. Controls the default backup schedule behavior for new databases
    +   * within the instance. By default, a backup schedule is created automatically
    +   * when a new database is created in a new instance.
    +   *
    +   * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +   * as backups and backup schedules aren't supported for free instances.
    +   *
    +   * In the `GetInstance` or `ListInstances` response, if the value of
    +   * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +   * create a default backup schedule for new databases in the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultBackupScheduleType. + */ + @java.lang.Override + public int getDefaultBackupScheduleTypeValue() { + return defaultBackupScheduleType_; + } + + /** + * + * + *
    +   * Optional. Controls the default backup schedule behavior for new databases
    +   * within the instance. By default, a backup schedule is created automatically
    +   * when a new database is created in a new instance.
    +   *
    +   * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +   * as backups and backup schedules aren't supported for free instances.
    +   *
    +   * In the `GetInstance` or `ListInstances` response, if the value of
    +   * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +   * create a default backup schedule for new databases in the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultBackupScheduleType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType + getDefaultBackupScheduleType() { + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType result = + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType.forNumber( + defaultBackupScheduleType_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(config_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, config_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, displayName_); + } + if (nodeCount_ != 0) { + output.writeInt32(5, nodeCount_); + } + if (state_ + != com.google.spanner.admin.instance.v1.Instance.State.STATE_UNSPECIFIED.getNumber()) { + output.writeEnum(6, state_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 7); + for (int i = 0; i < endpointUris_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, endpointUris_.getRaw(i)); + } + if (processingUnits_ != 0) { + output.writeInt32(9, processingUnits_); + } + if (instanceType_ + != com.google.spanner.admin.instance.v1.Instance.InstanceType.INSTANCE_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(10, instanceType_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(11, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(12, getUpdateTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(13, getFreeInstanceMetadata()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(17, getAutoscalingConfig()); + } + for (int i = 0; i < replicaComputeCapacity_.size(); i++) { + output.writeMessage(19, replicaComputeCapacity_.get(i)); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + output.writeEnum(20, edition_); + } + if (defaultBackupScheduleType_ + != com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType + .DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(23, defaultBackupScheduleType_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(config_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, config_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, displayName_); + } + if (nodeCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, nodeCount_); + } + if (state_ + != com.google.spanner.admin.instance.v1.Instance.State.STATE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(6, state_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, labels__); + } + { + int dataSize = 0; + for (int i = 0; i < endpointUris_.size(); i++) { + dataSize += computeStringSizeNoTag(endpointUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getEndpointUrisList().size(); + } + if (processingUnits_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(9, processingUnits_); + } + if (instanceType_ + != com.google.spanner.admin.instance.v1.Instance.InstanceType.INSTANCE_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, instanceType_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getUpdateTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(13, getFreeInstanceMetadata()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, getAutoscalingConfig()); + } + for (int i = 0; i < replicaComputeCapacity_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 19, replicaComputeCapacity_.get(i)); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(20, edition_); + } + if (defaultBackupScheduleType_ + != com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType + .DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(23, defaultBackupScheduleType_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.Instance)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.Instance other = + (com.google.spanner.admin.instance.v1.Instance) obj; + + if (!getName().equals(other.getName())) return false; + if (!getConfig().equals(other.getConfig())) return false; + if (!getDisplayName().equals(other.getDisplayName())) return false; + if (getNodeCount() != other.getNodeCount()) return false; + if (getProcessingUnits() != other.getProcessingUnits()) return false; + if (!getReplicaComputeCapacityList().equals(other.getReplicaComputeCapacityList())) + return false; + if (hasAutoscalingConfig() != other.hasAutoscalingConfig()) return false; + if (hasAutoscalingConfig()) { + if (!getAutoscalingConfig().equals(other.getAutoscalingConfig())) return false; + } + if (state_ != other.state_) return false; + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (instanceType_ != other.instanceType_) return false; + if (!getEndpointUrisList().equals(other.getEndpointUrisList())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (hasFreeInstanceMetadata() != other.hasFreeInstanceMetadata()) return false; + if (hasFreeInstanceMetadata()) { + if (!getFreeInstanceMetadata().equals(other.getFreeInstanceMetadata())) return false; + } + if (edition_ != other.edition_) return false; + if (defaultBackupScheduleType_ != other.defaultBackupScheduleType_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getConfig().hashCode(); + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + if (getReplicaComputeCapacityCount() > 0) { + hash = (37 * hash) + REPLICA_COMPUTE_CAPACITY_FIELD_NUMBER; + hash = (53 * hash) + getReplicaComputeCapacityList().hashCode(); + } + if (hasAutoscalingConfig()) { + hash = (37 * hash) + AUTOSCALING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingConfig().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (37 * hash) + INSTANCE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + instanceType_; + if (getEndpointUrisCount() > 0) { + hash = (37 * hash) + ENDPOINT_URIS_FIELD_NUMBER; + hash = (53 * hash) + getEndpointUrisList().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (hasFreeInstanceMetadata()) { + hash = (37 * hash) + FREE_INSTANCE_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getFreeInstanceMetadata().hashCode(); + } + hash = (37 * hash) + EDITION_FIELD_NUMBER; + hash = (53 * hash) + edition_; + hash = (37 * hash) + DEFAULT_BACKUP_SCHEDULE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + defaultBackupScheduleType_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.Instance parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.Instance parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.Instance parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.instance.v1.Instance prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * An isolated set of Cloud Spanner resources on which databases can be hosted.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.Instance} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.Instance) + com.google.spanner.admin.instance.v1.InstanceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 7: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.Instance.class, + com.google.spanner.admin.instance.v1.Instance.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.Instance.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReplicaComputeCapacityFieldBuilder(); + internalGetAutoscalingConfigFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + internalGetFreeInstanceMetadataFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + config_ = ""; + displayName_ = ""; + nodeCount_ = 0; + processingUnits_ = 0; + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacity_ = java.util.Collections.emptyList(); + } else { + replicaComputeCapacity_ = null; + replicaComputeCapacityBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + state_ = 0; + internalGetMutableLabels().clear(); + instanceType_ = 0; + endpointUris_ = com.google.protobuf.LazyStringArrayList.emptyList(); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + freeInstanceMetadata_ = null; + if (freeInstanceMetadataBuilder_ != null) { + freeInstanceMetadataBuilder_.dispose(); + freeInstanceMetadataBuilder_ = null; + } + edition_ = 0; + defaultBackupScheduleType_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_Instance_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.Instance.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance build() { + com.google.spanner.admin.instance.v1.Instance result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance buildPartial() { + com.google.spanner.admin.instance.v1.Instance result = + new com.google.spanner.admin.instance.v1.Instance(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.admin.instance.v1.Instance result) { + if (replicaComputeCapacityBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + replicaComputeCapacity_ = java.util.Collections.unmodifiableList(replicaComputeCapacity_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.replicaComputeCapacity_ = replicaComputeCapacity_; + } else { + result.replicaComputeCapacity_ = replicaComputeCapacityBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.Instance result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.config_ = config_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.displayName_ = displayName_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.nodeCount_ = nodeCount_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.processingUnits_ = processingUnits_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000040) != 0)) { + result.autoscalingConfig_ = + autoscalingConfigBuilder_ == null + ? autoscalingConfig_ + : autoscalingConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.instanceType_ = instanceType_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + endpointUris_.makeImmutable(); + result.endpointUris_ = endpointUris_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.freeInstanceMetadata_ = + freeInstanceMetadataBuilder_ == null + ? freeInstanceMetadata_ + : freeInstanceMetadataBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.edition_ = edition_; + } + if (((from_bitField0_ & 0x00008000) != 0)) { + result.defaultBackupScheduleType_ = defaultBackupScheduleType_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.Instance) { + return mergeFrom((com.google.spanner.admin.instance.v1.Instance) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.Instance other) { + if (other == com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getConfig().isEmpty()) { + config_ = other.config_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDisplayName().isEmpty()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getNodeCount() != 0) { + setNodeCount(other.getNodeCount()); + } + if (other.getProcessingUnits() != 0) { + setProcessingUnits(other.getProcessingUnits()); + } + if (replicaComputeCapacityBuilder_ == null) { + if (!other.replicaComputeCapacity_.isEmpty()) { + if (replicaComputeCapacity_.isEmpty()) { + replicaComputeCapacity_ = other.replicaComputeCapacity_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.addAll(other.replicaComputeCapacity_); + } + onChanged(); + } + } else { + if (!other.replicaComputeCapacity_.isEmpty()) { + if (replicaComputeCapacityBuilder_.isEmpty()) { + replicaComputeCapacityBuilder_.dispose(); + replicaComputeCapacityBuilder_ = null; + replicaComputeCapacity_ = other.replicaComputeCapacity_; + bitField0_ = (bitField0_ & ~0x00000020); + replicaComputeCapacityBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicaComputeCapacityFieldBuilder() + : null; + } else { + replicaComputeCapacityBuilder_.addAllMessages(other.replicaComputeCapacity_); + } + } + } + if (other.hasAutoscalingConfig()) { + mergeAutoscalingConfig(other.getAutoscalingConfig()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000100; + if (other.instanceType_ != 0) { + setInstanceTypeValue(other.getInstanceTypeValue()); + } + if (!other.endpointUris_.isEmpty()) { + if (endpointUris_.isEmpty()) { + endpointUris_ = other.endpointUris_; + bitField0_ |= 0x00000400; + } else { + ensureEndpointUrisIsMutable(); + endpointUris_.addAll(other.endpointUris_); + } + onChanged(); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (other.hasFreeInstanceMetadata()) { + mergeFreeInstanceMetadata(other.getFreeInstanceMetadata()); + } + if (other.edition_ != 0) { + setEditionValue(other.getEditionValue()); + } + if (other.defaultBackupScheduleType_ != 0) { + setDefaultBackupScheduleTypeValue(other.getDefaultBackupScheduleTypeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + config_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 40: + { + nodeCount_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 48: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 48 + case 58: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000100; + break; + } // case 58 + case 66: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureEndpointUrisIsMutable(); + endpointUris_.add(s); + break; + } // case 66 + case 72: + { + processingUnits_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 72 + case 80: + { + instanceType_ = input.readEnum(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 90: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000800; + break; + } // case 90 + case 98: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00001000; + break; + } // case 98 + case 106: + { + input.readMessage( + internalGetFreeInstanceMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00002000; + break; + } // case 106 + case 138: + { + input.readMessage( + internalGetAutoscalingConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 138 + case 154: + { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity m = + input.readMessage( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.parser(), + extensionRegistry); + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(m); + } else { + replicaComputeCapacityBuilder_.addMessage(m); + } + break; + } // case 154 + case 160: + { + edition_ = input.readEnum(); + bitField0_ |= 0x00004000; + break; + } // case 160 + case 184: + { + defaultBackupScheduleType_ = input.readEnum(); + bitField0_ |= 0x00008000; + break; + } // case 184 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. A unique identifier for the instance, which cannot be changed
    +     * after the instance is created. Values are of the form
    +     * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +     * segment of the name must be between 2 and 64 characters in length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance, which cannot be changed
    +     * after the instance is created. Values are of the form
    +     * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +     * segment of the name must be between 2 and 64 characters in length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance, which cannot be changed
    +     * after the instance is created. Values are of the form
    +     * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +     * segment of the name must be between 2 and 64 characters in length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance, which cannot be changed
    +     * after the instance is created. Values are of the form
    +     * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +     * segment of the name must be between 2 and 64 characters in length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance, which cannot be changed
    +     * after the instance is created. Values are of the form
    +     * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +     * segment of the name must be between 2 and 64 characters in length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object config_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance's configuration. Values are of the form
    +     * `projects/<project>/instanceConfigs/<configuration>`. See
    +     * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + public java.lang.String getConfig() { + java.lang.Object ref = config_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + config_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance's configuration. Values are of the form
    +     * `projects/<project>/instanceConfigs/<configuration>`. See
    +     * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + public com.google.protobuf.ByteString getConfigBytes() { + java.lang.Object ref = config_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + config_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance's configuration. Values are of the form
    +     * `projects/<project>/instanceConfigs/<configuration>`. See
    +     * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The config to set. + * @return This builder for chaining. + */ + public Builder setConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + config_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance's configuration. Values are of the form
    +     * `projects/<project>/instanceConfigs/<configuration>`. See
    +     * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearConfig() { + config_ = getDefaultInstance().getConfig(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance's configuration. Values are of the form
    +     * `projects/<project>/instanceConfigs/<configuration>`. See
    +     * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for config to set. + * @return This builder for chaining. + */ + public Builder setConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + config_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int nodeCount_; + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + nodeCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + bitField0_ = (bitField0_ & ~0x00000008); + nodeCount_ = 0; + onChanged(); + return this; + } + + private int processingUnits_; + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + processingUnits_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + bitField0_ = (bitField0_ & ~0x00000010); + processingUnits_ = 0; + onChanged(); + return this; + } + + private java.util.List + replicaComputeCapacity_ = java.util.Collections.emptyList(); + + private void ensureReplicaComputeCapacityIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + replicaComputeCapacity_ = + new java.util.ArrayList( + replicaComputeCapacity_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + replicaComputeCapacityBuilder_; + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getReplicaComputeCapacityList() { + if (replicaComputeCapacityBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicaComputeCapacity_); + } else { + return replicaComputeCapacityBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getReplicaComputeCapacityCount() { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.size(); + } else { + return replicaComputeCapacityBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity( + int index) { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.get(index); + } else { + return replicaComputeCapacityBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicaComputeCapacity( + int index, com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.set(index, value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicaComputeCapacity( + int index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.set(index, builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + int index, com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(index, value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + int index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(index, builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllReplicaComputeCapacity( + java.lang.Iterable + values) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicaComputeCapacity_); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearReplicaComputeCapacity() { + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacity_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + replicaComputeCapacityBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeReplicaComputeCapacity(int index) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.remove(index); + onChanged(); + } else { + replicaComputeCapacityBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + getReplicaComputeCapacityBuilder(int index) { + return internalGetReplicaComputeCapacityFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index) { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.get(index); + } else { + return replicaComputeCapacityBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List< + ? extends com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + getReplicaComputeCapacityOrBuilderList() { + if (replicaComputeCapacityBuilder_ != null) { + return replicaComputeCapacityBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicaComputeCapacity_); + } + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + addReplicaComputeCapacityBuilder() { + return internalGetReplicaComputeCapacityFieldBuilder() + .addBuilder( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + addReplicaComputeCapacityBuilder(int index) { + return internalGetReplicaComputeCapacityFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getReplicaComputeCapacityBuilderList() { + return internalGetReplicaComputeCapacityFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + internalGetReplicaComputeCapacityFieldBuilder() { + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacityBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder>( + replicaComputeCapacity_, + ((bitField0_ & 0x00000020) != 0), + getParentForChildren(), + isClean()); + replicaComputeCapacity_ = null; + } + return replicaComputeCapacityBuilder_; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + autoscalingConfigBuilder_; + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + if (autoscalingConfigBuilder_ == null) { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } else { + return autoscalingConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingConfig_ = value; + } else { + autoscalingConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder builderForValue) { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfig_ = builderForValue.build(); + } else { + autoscalingConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && autoscalingConfig_ != null + && autoscalingConfig_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) { + getAutoscalingConfigBuilder().mergeFrom(value); + } else { + autoscalingConfig_ = value; + } + } else { + autoscalingConfigBuilder_.mergeFrom(value); + } + if (autoscalingConfig_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoscalingConfig() { + bitField0_ = (bitField0_ & ~0x00000040); + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder + getAutoscalingConfigBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetAutoscalingConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + if (autoscalingConfigBuilder_ != null) { + return autoscalingConfigBuilder_.getMessageOrBuilder(); + } else { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, node_count and processing_units
    +     * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + internalGetAutoscalingConfigFieldBuilder() { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder>( + getAutoscalingConfig(), getParentForChildren(), isClean()); + autoscalingConfig_ = null; + } + return autoscalingConfigBuilder_; + } + + private int state_ = 0; + + /** + * + * + *
    +     * Output only. The current instance state. For
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +     * the state must be either omitted or set to `CREATING`. For
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +     * the state must be either omitted or set to `READY`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +     * Output only. The current instance state. For
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +     * the state must be either omitted or set to `CREATING`. For
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +     * the state must be either omitted or set to `READY`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance state. For
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +     * the state must be either omitted or set to `CREATING`. For
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +     * the state must be either omitted or set to `READY`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.State getState() { + com.google.spanner.admin.instance.v1.Instance.State result = + com.google.spanner.admin.instance.v1.Instance.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The current instance state. For
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +     * the state must be either omitted or set to `CREATING`. For
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +     * the state must be either omitted or set to `READY`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.spanner.admin.instance.v1.Instance.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance state. For
    +     * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +     * the state must be either omitted or set to `CREATING`. For
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +     * the state must be either omitted or set to `READY`.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000080); + state_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000100; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000100); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000100; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000100; + return this; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. And so you are advised to use an
    +     * internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 7; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000100; + return this; + } + + private int instanceType_ = 0; + + /** + * + * + *
    +     * The `InstanceType` of the current instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The enum numeric value on the wire for instanceType. + */ + @java.lang.Override + public int getInstanceTypeValue() { + return instanceType_; + } + + /** + * + * + *
    +     * The `InstanceType` of the current instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @param value The enum numeric value on the wire for instanceType to set. + * @return This builder for chaining. + */ + public Builder setInstanceTypeValue(int value) { + instanceType_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The `InstanceType` of the current instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The instanceType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.InstanceType getInstanceType() { + com.google.spanner.admin.instance.v1.Instance.InstanceType result = + com.google.spanner.admin.instance.v1.Instance.InstanceType.forNumber(instanceType_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.InstanceType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The `InstanceType` of the current instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @param value The instanceType to set. + * @return This builder for chaining. + */ + public Builder setInstanceType( + com.google.spanner.admin.instance.v1.Instance.InstanceType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + instanceType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The `InstanceType` of the current instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return This builder for chaining. + */ + public Builder clearInstanceType() { + bitField0_ = (bitField0_ & ~0x00000200); + instanceType_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList endpointUris_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureEndpointUrisIsMutable() { + if (!endpointUris_.isModifiable()) { + endpointUris_ = new com.google.protobuf.LazyStringArrayList(endpointUris_); + } + bitField0_ |= 0x00000400; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @return A list containing the endpointUris. + */ + public com.google.protobuf.ProtocolStringList getEndpointUrisList() { + endpointUris_.makeImmutable(); + return endpointUris_; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @return The count of endpointUris. + */ + public int getEndpointUrisCount() { + return endpointUris_.size(); + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the element to return. + * @return The endpointUris at the given index. + */ + public java.lang.String getEndpointUris(int index) { + return endpointUris_.get(index); + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the value to return. + * @return The bytes of the endpointUris at the given index. + */ + public com.google.protobuf.ByteString getEndpointUrisBytes(int index) { + return endpointUris_.getByteString(index); + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index to set the value at. + * @param value The endpointUris to set. + * @return This builder for chaining. + */ + public Builder setEndpointUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEndpointUrisIsMutable(); + endpointUris_.set(index, value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param value The endpointUris to add. + * @return This builder for chaining. + */ + public Builder addEndpointUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureEndpointUrisIsMutable(); + endpointUris_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param values The endpointUris to add. + * @return This builder for chaining. + */ + public Builder addAllEndpointUris(java.lang.Iterable values) { + ensureEndpointUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, endpointUris_); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @return This builder for chaining. + */ + public Builder clearEndpointUris() { + endpointUris_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deprecated. This field is not populated.
    +     * 
    + * + * repeated string endpoint_uris = 8; + * + * @param value The bytes of the endpointUris to add. + * @return This builder for chaining. + */ + public Builder addEndpointUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureEndpointUrisIsMutable(); + endpointUris_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000800; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000800); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00001000) != 0); + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00001000) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00001000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00001000); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00001000; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance was most recently updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.spanner.admin.instance.v1.FreeInstanceMetadata freeInstanceMetadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.FreeInstanceMetadata, + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder, + com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder> + freeInstanceMetadataBuilder_; + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return Whether the freeInstanceMetadata field is set. + */ + public boolean hasFreeInstanceMetadata() { + return ((bitField0_ & 0x00002000) != 0); + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return The freeInstanceMetadata. + */ + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata getFreeInstanceMetadata() { + if (freeInstanceMetadataBuilder_ == null) { + return freeInstanceMetadata_ == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance() + : freeInstanceMetadata_; + } else { + return freeInstanceMetadataBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public Builder setFreeInstanceMetadata( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata value) { + if (freeInstanceMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + freeInstanceMetadata_ = value; + } else { + freeInstanceMetadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public Builder setFreeInstanceMetadata( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder builderForValue) { + if (freeInstanceMetadataBuilder_ == null) { + freeInstanceMetadata_ = builderForValue.build(); + } else { + freeInstanceMetadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public Builder mergeFreeInstanceMetadata( + com.google.spanner.admin.instance.v1.FreeInstanceMetadata value) { + if (freeInstanceMetadataBuilder_ == null) { + if (((bitField0_ & 0x00002000) != 0) + && freeInstanceMetadata_ != null + && freeInstanceMetadata_ + != com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance()) { + getFreeInstanceMetadataBuilder().mergeFrom(value); + } else { + freeInstanceMetadata_ = value; + } + } else { + freeInstanceMetadataBuilder_.mergeFrom(value); + } + if (freeInstanceMetadata_ != null) { + bitField0_ |= 0x00002000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public Builder clearFreeInstanceMetadata() { + bitField0_ = (bitField0_ & ~0x00002000); + freeInstanceMetadata_ = null; + if (freeInstanceMetadataBuilder_ != null) { + freeInstanceMetadataBuilder_.dispose(); + freeInstanceMetadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder + getFreeInstanceMetadataBuilder() { + bitField0_ |= 0x00002000; + onChanged(); + return internalGetFreeInstanceMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + public com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder + getFreeInstanceMetadataOrBuilder() { + if (freeInstanceMetadataBuilder_ != null) { + return freeInstanceMetadataBuilder_.getMessageOrBuilder(); + } else { + return freeInstanceMetadata_ == null + ? com.google.spanner.admin.instance.v1.FreeInstanceMetadata.getDefaultInstance() + : freeInstanceMetadata_; + } + } + + /** + * + * + *
    +     * Free instance metadata. Only populated for free instances.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.FreeInstanceMetadata, + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder, + com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder> + internalGetFreeInstanceMetadataFieldBuilder() { + if (freeInstanceMetadataBuilder_ == null) { + freeInstanceMetadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.FreeInstanceMetadata, + com.google.spanner.admin.instance.v1.FreeInstanceMetadata.Builder, + com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder>( + getFreeInstanceMetadata(), getParentForChildren(), isClean()); + freeInstanceMetadata_ = null; + } + return freeInstanceMetadataBuilder_; + } + + private int edition_ = 0; + + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for edition to set. + * @return This builder for chaining. + */ + public Builder setEditionValue(int value) { + edition_ = value; + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The edition to set. + * @return This builder for chaining. + */ + public Builder setEdition(com.google.spanner.admin.instance.v1.Instance.Edition value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00004000; + edition_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearEdition() { + bitField0_ = (bitField0_ & ~0x00004000); + edition_ = 0; + onChanged(); + return this; + } + + private int defaultBackupScheduleType_ = 0; + + /** + * + * + *
    +     * Optional. Controls the default backup schedule behavior for new databases
    +     * within the instance. By default, a backup schedule is created automatically
    +     * when a new database is created in a new instance.
    +     *
    +     * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +     * as backups and backup schedules aren't supported for free instances.
    +     *
    +     * In the `GetInstance` or `ListInstances` response, if the value of
    +     * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +     * create a default backup schedule for new databases in the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultBackupScheduleType. + */ + @java.lang.Override + public int getDefaultBackupScheduleTypeValue() { + return defaultBackupScheduleType_; + } + + /** + * + * + *
    +     * Optional. Controls the default backup schedule behavior for new databases
    +     * within the instance. By default, a backup schedule is created automatically
    +     * when a new database is created in a new instance.
    +     *
    +     * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +     * as backups and backup schedules aren't supported for free instances.
    +     *
    +     * In the `GetInstance` or `ListInstances` response, if the value of
    +     * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +     * create a default backup schedule for new databases in the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for defaultBackupScheduleType to set. + * @return This builder for chaining. + */ + public Builder setDefaultBackupScheduleTypeValue(int value) { + defaultBackupScheduleType_ = value; + bitField0_ |= 0x00008000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Controls the default backup schedule behavior for new databases
    +     * within the instance. By default, a backup schedule is created automatically
    +     * when a new database is created in a new instance.
    +     *
    +     * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +     * as backups and backup schedules aren't supported for free instances.
    +     *
    +     * In the `GetInstance` or `ListInstances` response, if the value of
    +     * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +     * create a default backup schedule for new databases in the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultBackupScheduleType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType + getDefaultBackupScheduleType() { + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType result = + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType.forNumber( + defaultBackupScheduleType_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Optional. Controls the default backup schedule behavior for new databases
    +     * within the instance. By default, a backup schedule is created automatically
    +     * when a new database is created in a new instance.
    +     *
    +     * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +     * as backups and backup schedules aren't supported for free instances.
    +     *
    +     * In the `GetInstance` or `ListInstances` response, if the value of
    +     * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +     * create a default backup schedule for new databases in the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The defaultBackupScheduleType to set. + * @return This builder for chaining. + */ + public Builder setDefaultBackupScheduleType( + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00008000; + defaultBackupScheduleType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Controls the default backup schedule behavior for new databases
    +     * within the instance. By default, a backup schedule is created automatically
    +     * when a new database is created in a new instance.
    +     *
    +     * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +     * as backups and backup schedules aren't supported for free instances.
    +     *
    +     * In the `GetInstance` or `ListInstances` response, if the value of
    +     * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +     * create a default backup schedule for new databases in the instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearDefaultBackupScheduleType() { + bitField0_ = (bitField0_ & ~0x00008000); + defaultBackupScheduleType_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.Instance) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance) + private static final com.google.spanner.admin.instance.v1.Instance DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.Instance(); + } + + public static com.google.spanner.admin.instance.v1.Instance getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Instance parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java new file mode 100644 index 000000000000..ce4845c4b0d3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java @@ -0,0 +1,5141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * A possible configuration for a Cloud Spanner instance. Configurations
    + * define the geographic placement of nodes and their replication.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.InstanceConfig} + */ +@com.google.protobuf.Generated +public final class InstanceConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.InstanceConfig) + InstanceConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InstanceConfig"); + } + + // Use InstanceConfig.newBuilder() to construct. + private InstanceConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private InstanceConfig() { + name_ = ""; + displayName_ = ""; + configType_ = 0; + replicas_ = java.util.Collections.emptyList(); + optionalReplicas_ = java.util.Collections.emptyList(); + baseConfig_ = ""; + etag_ = ""; + leaderOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + state_ = 0; + freeInstanceAvailability_ = 0; + quorumType_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.InstanceConfig.class, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder.class); + } + + /** + * + * + *
    +   * The type of this configuration.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.InstanceConfig.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Unspecified.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * Google-managed configuration.
    +     * 
    + * + * GOOGLE_MANAGED = 1; + */ + GOOGLE_MANAGED(1), + /** + * + * + *
    +     * User-managed configuration.
    +     * 
    + * + * USER_MANAGED = 2; + */ + USER_MANAGED(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Type"); + } + + /** + * + * + *
    +     * Unspecified.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Google-managed configuration.
    +     * 
    + * + * GOOGLE_MANAGED = 1; + */ + public static final int GOOGLE_MANAGED_VALUE = 1; + + /** + * + * + *
    +     * User-managed configuration.
    +     * 
    + * + * USER_MANAGED = 2; + */ + public static final int USER_MANAGED_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return GOOGLE_MANAGED; + case 2: + return USER_MANAGED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.InstanceConfig.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.InstanceConfig.Type) + } + + /** + * + * + *
    +   * Indicates the current state of the instance configuration.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.InstanceConfig.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
    +     * The instance configuration is still being created.
    +     * 
    + * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
    +     * The instance configuration is fully created and ready to be used to
    +     * create instances.
    +     * 
    + * + * READY = 2; + */ + READY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The instance configuration is still being created.
    +     * 
    + * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + + /** + * + * + *
    +     * The instance configuration is fully created and ready to be used to
    +     * create instances.
    +     * 
    + * + * READY = 2; + */ + public static final int READY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return READY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.InstanceConfig.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.InstanceConfig.State) + } + + /** + * + * + *
    +   * Describes the availability for free instances to be created in an instance
    +   * configuration.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability} + */ + public enum FreeInstanceAvailability implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * FREE_INSTANCE_AVAILABILITY_UNSPECIFIED = 0; + */ + FREE_INSTANCE_AVAILABILITY_UNSPECIFIED(0), + /** + * + * + *
    +     * Indicates that free instances are available to be created in this
    +     * instance configuration.
    +     * 
    + * + * AVAILABLE = 1; + */ + AVAILABLE(1), + /** + * + * + *
    +     * Indicates that free instances are not supported in this instance
    +     * configuration.
    +     * 
    + * + * UNSUPPORTED = 2; + */ + UNSUPPORTED(2), + /** + * + * + *
    +     * Indicates that free instances are currently not available to be created
    +     * in this instance configuration.
    +     * 
    + * + * DISABLED = 3; + */ + DISABLED(3), + /** + * + * + *
    +     * Indicates that additional free instances cannot be created in this
    +     * instance configuration because the project has reached its limit of free
    +     * instances.
    +     * 
    + * + * QUOTA_EXCEEDED = 4; + */ + QUOTA_EXCEEDED(4), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FreeInstanceAvailability"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * FREE_INSTANCE_AVAILABILITY_UNSPECIFIED = 0; + */ + public static final int FREE_INSTANCE_AVAILABILITY_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Indicates that free instances are available to be created in this
    +     * instance configuration.
    +     * 
    + * + * AVAILABLE = 1; + */ + public static final int AVAILABLE_VALUE = 1; + + /** + * + * + *
    +     * Indicates that free instances are not supported in this instance
    +     * configuration.
    +     * 
    + * + * UNSUPPORTED = 2; + */ + public static final int UNSUPPORTED_VALUE = 2; + + /** + * + * + *
    +     * Indicates that free instances are currently not available to be created
    +     * in this instance configuration.
    +     * 
    + * + * DISABLED = 3; + */ + public static final int DISABLED_VALUE = 3; + + /** + * + * + *
    +     * Indicates that additional free instances cannot be created in this
    +     * instance configuration because the project has reached its limit of free
    +     * instances.
    +     * 
    + * + * QUOTA_EXCEEDED = 4; + */ + public static final int QUOTA_EXCEEDED_VALUE = 4; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static FreeInstanceAvailability valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static FreeInstanceAvailability forNumber(int value) { + switch (value) { + case 0: + return FREE_INSTANCE_AVAILABILITY_UNSPECIFIED; + case 1: + return AVAILABLE; + case 2: + return UNSUPPORTED; + case 3: + return DISABLED; + case 4: + return QUOTA_EXCEEDED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FreeInstanceAvailability findValueByNumber(int number) { + return FreeInstanceAvailability.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.InstanceConfig.getDescriptor() + .getEnumTypes() + .get(2); + } + + private static final FreeInstanceAvailability[] VALUES = values(); + + public static FreeInstanceAvailability valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private FreeInstanceAvailability(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability) + } + + /** + * + * + *
    +   * Indicates the quorum type of this instance configuration.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.InstanceConfig.QuorumType} + */ + public enum QuorumType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Quorum type not specified.
    +     * 
    + * + * QUORUM_TYPE_UNSPECIFIED = 0; + */ + QUORUM_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * An instance configuration tagged with `REGION` quorum type forms a write
    +     * quorum in a single region.
    +     * 
    + * + * REGION = 1; + */ + REGION(1), + /** + * + * + *
    +     * An instance configuration tagged with the `DUAL_REGION` quorum type forms
    +     * a write quorum with exactly two read-write regions in a multi-region
    +     * configuration.
    +     *
    +     * This instance configuration requires failover in the event of
    +     * regional failures.
    +     * 
    + * + * DUAL_REGION = 2; + */ + DUAL_REGION(2), + /** + * + * + *
    +     * An instance configuration tagged with the `MULTI_REGION` quorum type
    +     * forms a write quorum from replicas that are spread across more than one
    +     * region in a multi-region configuration.
    +     * 
    + * + * MULTI_REGION = 3; + */ + MULTI_REGION(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QuorumType"); + } + + /** + * + * + *
    +     * Quorum type not specified.
    +     * 
    + * + * QUORUM_TYPE_UNSPECIFIED = 0; + */ + public static final int QUORUM_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * An instance configuration tagged with `REGION` quorum type forms a write
    +     * quorum in a single region.
    +     * 
    + * + * REGION = 1; + */ + public static final int REGION_VALUE = 1; + + /** + * + * + *
    +     * An instance configuration tagged with the `DUAL_REGION` quorum type forms
    +     * a write quorum with exactly two read-write regions in a multi-region
    +     * configuration.
    +     *
    +     * This instance configuration requires failover in the event of
    +     * regional failures.
    +     * 
    + * + * DUAL_REGION = 2; + */ + public static final int DUAL_REGION_VALUE = 2; + + /** + * + * + *
    +     * An instance configuration tagged with the `MULTI_REGION` quorum type
    +     * forms a write quorum from replicas that are spread across more than one
    +     * region in a multi-region configuration.
    +     * 
    + * + * MULTI_REGION = 3; + */ + public static final int MULTI_REGION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QuorumType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static QuorumType forNumber(int value) { + switch (value) { + case 0: + return QUORUM_TYPE_UNSPECIFIED; + case 1: + return REGION; + case 2: + return DUAL_REGION; + case 3: + return MULTI_REGION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public QuorumType findValueByNumber(int number) { + return QuorumType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.InstanceConfig.getDescriptor() + .getEnumTypes() + .get(3); + } + + private static final QuorumType[] VALUES = values(); + + public static QuorumType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private QuorumType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.InstanceConfig.QuorumType) + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * A unique identifier for the instance configuration.  Values
    +   * are of the form
    +   * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +   *
    +   * User instance configuration must start with `custom-`.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * A unique identifier for the instance configuration.  Values
    +   * are of the form
    +   * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +   *
    +   * User instance configuration must start with `custom-`.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * The name of this instance configuration as it appears in UIs.
    +   * 
    + * + * string display_name = 2; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of this instance configuration as it appears in UIs.
    +   * 
    + * + * string display_name = 2; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIG_TYPE_FIELD_NUMBER = 5; + private int configType_ = 0; + + /** + * + * + *
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for configType. + */ + @java.lang.Override + public int getConfigTypeValue() { + return configType_; + } + + /** + * + * + *
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The configType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.Type getConfigType() { + com.google.spanner.admin.instance.v1.InstanceConfig.Type result = + com.google.spanner.admin.instance.v1.InstanceConfig.Type.forNumber(configType_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.Type.UNRECOGNIZED + : result; + } + + public static final int REPLICAS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List replicas_; + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + @java.lang.Override + public java.util.List getReplicasList() { + return replicas_; + } + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + @java.lang.Override + public java.util.List + getReplicasOrBuilderList() { + return replicas_; + } + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + @java.lang.Override + public int getReplicasCount() { + return replicas_.size(); + } + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index) { + return replicas_.get(index); + } + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder(int index) { + return replicas_.get(index); + } + + public static final int OPTIONAL_REPLICAS_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private java.util.List optionalReplicas_; + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getOptionalReplicasList() { + return optionalReplicas_; + } + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getOptionalReplicasOrBuilderList() { + return optionalReplicas_; + } + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getOptionalReplicasCount() { + return optionalReplicas_.size(); + } + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo getOptionalReplicas(int index) { + return optionalReplicas_.get(index); + } + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getOptionalReplicasOrBuilder( + int index) { + return optionalReplicas_.get(index); + } + + public static final int BASE_CONFIG_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private volatile java.lang.Object baseConfig_ = ""; + + /** + * + * + *
    +   * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +   * based on which this configuration is created. Only set for user-managed
    +   * configurations. `base_config` must refer to a configuration of type
    +   * `GOOGLE_MANAGED` in the same project as this configuration.
    +   * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The baseConfig. + */ + @java.lang.Override + public java.lang.String getBaseConfig() { + java.lang.Object ref = baseConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + baseConfig_ = s; + return s; + } + } + + /** + * + * + *
    +   * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +   * based on which this configuration is created. Only set for user-managed
    +   * configurations. `base_config` must refer to a configuration of type
    +   * `GOOGLE_MANAGED` in the same project as this configuration.
    +   * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for baseConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBaseConfigBytes() { + java.lang.Object ref = baseConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + baseConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 8; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int ETAG_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
    +   * etag is used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
    +   * 
    + * + * string etag = 9; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
    +   * etag is used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
    +   * 
    + * + * string etag = 9; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LEADER_OPTIONS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList leaderOptions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @return A list containing the leaderOptions. + */ + public com.google.protobuf.ProtocolStringList getLeaderOptionsList() { + return leaderOptions_; + } + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @return The count of leaderOptions. + */ + public int getLeaderOptionsCount() { + return leaderOptions_.size(); + } + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the element to return. + * @return The leaderOptions at the given index. + */ + public java.lang.String getLeaderOptions(int index) { + return leaderOptions_.get(index); + } + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the value to return. + * @return The bytes of the leaderOptions at the given index. + */ + public com.google.protobuf.ByteString getLeaderOptionsBytes(int index) { + return leaderOptions_.getByteString(index); + } + + public static final int RECONCILING_FIELD_NUMBER = 10; + private boolean reconciling_ = false; + + /** + * + * + *
    +   * Output only. If true, the instance configuration is being created or
    +   * updated. If false, there are no ongoing operations for the instance
    +   * configuration.
    +   * 
    + * + * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + @java.lang.Override + public boolean getReconciling() { + return reconciling_; + } + + public static final int STATE_FIELD_NUMBER = 11; + private int state_ = 0; + + /** + * + * + *
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.State getState() { + com.google.spanner.admin.instance.v1.InstanceConfig.State result = + com.google.spanner.admin.instance.v1.InstanceConfig.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.State.UNRECOGNIZED + : result; + } + + public static final int FREE_INSTANCE_AVAILABILITY_FIELD_NUMBER = 12; + private int freeInstanceAvailability_ = 0; + + /** + * + * + *
    +   * Output only. Describes whether free instances are available to be created
    +   * in this instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for freeInstanceAvailability. + */ + @java.lang.Override + public int getFreeInstanceAvailabilityValue() { + return freeInstanceAvailability_; + } + + /** + * + * + *
    +   * Output only. Describes whether free instances are available to be created
    +   * in this instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The freeInstanceAvailability. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + getFreeInstanceAvailability() { + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability result = + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability.forNumber( + freeInstanceAvailability_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability.UNRECOGNIZED + : result; + } + + public static final int QUORUM_TYPE_FIELD_NUMBER = 18; + private int quorumType_ = 0; + + /** + * + * + *
    +   * Output only. The `QuorumType` of the instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for quorumType. + */ + @java.lang.Override + public int getQuorumTypeValue() { + return quorumType_; + } + + /** + * + * + *
    +   * Output only. The `QuorumType` of the instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The quorumType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType getQuorumType() { + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType result = + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.forNumber(quorumType_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.UNRECOGNIZED + : result; + } + + public static final int STORAGE_LIMIT_PER_PROCESSING_UNIT_FIELD_NUMBER = 19; + private long storageLimitPerProcessingUnit_ = 0L; + + /** + * + * + *
    +   * Output only. The storage limit in bytes per processing unit.
    +   * 
    + * + * + * int64 storage_limit_per_processing_unit = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The storageLimitPerProcessingUnit. + */ + @java.lang.Override + public long getStorageLimitPerProcessingUnit() { + return storageLimitPerProcessingUnit_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, displayName_); + } + for (int i = 0; i < replicas_.size(); i++) { + output.writeMessage(3, replicas_.get(i)); + } + for (int i = 0; i < leaderOptions_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, leaderOptions_.getRaw(i)); + } + if (configType_ + != com.google.spanner.admin.instance.v1.InstanceConfig.Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(5, configType_); + } + for (int i = 0; i < optionalReplicas_.size(); i++) { + output.writeMessage(6, optionalReplicas_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(baseConfig_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 7, baseConfig_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 8); + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, etag_); + } + if (reconciling_ != false) { + output.writeBool(10, reconciling_); + } + if (state_ + != com.google.spanner.admin.instance.v1.InstanceConfig.State.STATE_UNSPECIFIED + .getNumber()) { + output.writeEnum(11, state_); + } + if (freeInstanceAvailability_ + != com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + .FREE_INSTANCE_AVAILABILITY_UNSPECIFIED + .getNumber()) { + output.writeEnum(12, freeInstanceAvailability_); + } + if (quorumType_ + != com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.QUORUM_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(18, quorumType_); + } + if (storageLimitPerProcessingUnit_ != 0L) { + output.writeInt64(19, storageLimitPerProcessingUnit_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, displayName_); + } + for (int i = 0; i < replicas_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, replicas_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < leaderOptions_.size(); i++) { + dataSize += computeStringSizeNoTag(leaderOptions_.getRaw(i)); + } + size += dataSize; + size += 1 * getLeaderOptionsList().size(); + } + if (configType_ + != com.google.spanner.admin.instance.v1.InstanceConfig.Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(5, configType_); + } + for (int i = 0; i < optionalReplicas_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, optionalReplicas_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(baseConfig_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(7, baseConfig_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, labels__); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, etag_); + } + if (reconciling_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(10, reconciling_); + } + if (state_ + != com.google.spanner.admin.instance.v1.InstanceConfig.State.STATE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(11, state_); + } + if (freeInstanceAvailability_ + != com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + .FREE_INSTANCE_AVAILABILITY_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(12, freeInstanceAvailability_); + } + if (quorumType_ + != com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.QUORUM_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(18, quorumType_); + } + if (storageLimitPerProcessingUnit_ != 0L) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 19, storageLimitPerProcessingUnit_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.InstanceConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.InstanceConfig other = + (com.google.spanner.admin.instance.v1.InstanceConfig) obj; + + if (!getName().equals(other.getName())) return false; + if (!getDisplayName().equals(other.getDisplayName())) return false; + if (configType_ != other.configType_) return false; + if (!getReplicasList().equals(other.getReplicasList())) return false; + if (!getOptionalReplicasList().equals(other.getOptionalReplicasList())) return false; + if (!getBaseConfig().equals(other.getBaseConfig())) return false; + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getLeaderOptionsList().equals(other.getLeaderOptionsList())) return false; + if (getReconciling() != other.getReconciling()) return false; + if (state_ != other.state_) return false; + if (freeInstanceAvailability_ != other.freeInstanceAvailability_) return false; + if (quorumType_ != other.quorumType_) return false; + if (getStorageLimitPerProcessingUnit() != other.getStorageLimitPerProcessingUnit()) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + hash = (37 * hash) + CONFIG_TYPE_FIELD_NUMBER; + hash = (53 * hash) + configType_; + if (getReplicasCount() > 0) { + hash = (37 * hash) + REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getReplicasList().hashCode(); + } + if (getOptionalReplicasCount() > 0) { + hash = (37 * hash) + OPTIONAL_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getOptionalReplicasList().hashCode(); + } + hash = (37 * hash) + BASE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getBaseConfig().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + if (getLeaderOptionsCount() > 0) { + hash = (37 * hash) + LEADER_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getLeaderOptionsList().hashCode(); + } + hash = (37 * hash) + RECONCILING_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReconciling()); + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + hash = (37 * hash) + FREE_INSTANCE_AVAILABILITY_FIELD_NUMBER; + hash = (53 * hash) + freeInstanceAvailability_; + hash = (37 * hash) + QUORUM_TYPE_FIELD_NUMBER; + hash = (53 * hash) + quorumType_; + hash = (37 * hash) + STORAGE_LIMIT_PER_PROCESSING_UNIT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getStorageLimitPerProcessingUnit()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.instance.v1.InstanceConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A possible configuration for a Cloud Spanner instance. Configurations
    +   * define the geographic placement of nodes and their replication.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.InstanceConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.InstanceConfig) + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 8: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.InstanceConfig.class, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + displayName_ = ""; + configType_ = 0; + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + } else { + replicas_ = null; + replicasBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + if (optionalReplicasBuilder_ == null) { + optionalReplicas_ = java.util.Collections.emptyList(); + } else { + optionalReplicas_ = null; + optionalReplicasBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + baseConfig_ = ""; + internalGetMutableLabels().clear(); + etag_ = ""; + leaderOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + reconciling_ = false; + state_ = 0; + freeInstanceAvailability_ = 0; + quorumType_ = 0; + storageLimitPerProcessingUnit_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig build() { + com.google.spanner.admin.instance.v1.InstanceConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig buildPartial() { + com.google.spanner.admin.instance.v1.InstanceConfig result = + new com.google.spanner.admin.instance.v1.InstanceConfig(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.InstanceConfig result) { + if (replicasBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + replicas_ = java.util.Collections.unmodifiableList(replicas_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.replicas_ = replicas_; + } else { + result.replicas_ = replicasBuilder_.build(); + } + if (optionalReplicasBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + optionalReplicas_ = java.util.Collections.unmodifiableList(optionalReplicas_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.optionalReplicas_ = optionalReplicas_; + } else { + result.optionalReplicas_ = optionalReplicasBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.InstanceConfig result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.displayName_ = displayName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.configType_ = configType_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.baseConfig_ = baseConfig_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.etag_ = etag_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + leaderOptions_.makeImmutable(); + result.leaderOptions_ = leaderOptions_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.reconciling_ = reconciling_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.freeInstanceAvailability_ = freeInstanceAvailability_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.quorumType_ = quorumType_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.storageLimitPerProcessingUnit_ = storageLimitPerProcessingUnit_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.InstanceConfig) { + return mergeFrom((com.google.spanner.admin.instance.v1.InstanceConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.InstanceConfig other) { + if (other == com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getDisplayName().isEmpty()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.configType_ != 0) { + setConfigTypeValue(other.getConfigTypeValue()); + } + if (replicasBuilder_ == null) { + if (!other.replicas_.isEmpty()) { + if (replicas_.isEmpty()) { + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureReplicasIsMutable(); + replicas_.addAll(other.replicas_); + } + onChanged(); + } + } else { + if (!other.replicas_.isEmpty()) { + if (replicasBuilder_.isEmpty()) { + replicasBuilder_.dispose(); + replicasBuilder_ = null; + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + replicasBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicasFieldBuilder() + : null; + } else { + replicasBuilder_.addAllMessages(other.replicas_); + } + } + } + if (optionalReplicasBuilder_ == null) { + if (!other.optionalReplicas_.isEmpty()) { + if (optionalReplicas_.isEmpty()) { + optionalReplicas_ = other.optionalReplicas_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.addAll(other.optionalReplicas_); + } + onChanged(); + } + } else { + if (!other.optionalReplicas_.isEmpty()) { + if (optionalReplicasBuilder_.isEmpty()) { + optionalReplicasBuilder_.dispose(); + optionalReplicasBuilder_ = null; + optionalReplicas_ = other.optionalReplicas_; + bitField0_ = (bitField0_ & ~0x00000010); + optionalReplicasBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOptionalReplicasFieldBuilder() + : null; + } else { + optionalReplicasBuilder_.addAllMessages(other.optionalReplicas_); + } + } + } + if (!other.getBaseConfig().isEmpty()) { + baseConfig_ = other.baseConfig_; + bitField0_ |= 0x00000020; + onChanged(); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000040; + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (!other.leaderOptions_.isEmpty()) { + if (leaderOptions_.isEmpty()) { + leaderOptions_ = other.leaderOptions_; + bitField0_ |= 0x00000100; + } else { + ensureLeaderOptionsIsMutable(); + leaderOptions_.addAll(other.leaderOptions_); + } + onChanged(); + } + if (other.getReconciling() != false) { + setReconciling(other.getReconciling()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (other.freeInstanceAvailability_ != 0) { + setFreeInstanceAvailabilityValue(other.getFreeInstanceAvailabilityValue()); + } + if (other.quorumType_ != 0) { + setQuorumTypeValue(other.getQuorumTypeValue()); + } + if (other.getStorageLimitPerProcessingUnit() != 0L) { + setStorageLimitPerProcessingUnit(other.getStorageLimitPerProcessingUnit()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.spanner.admin.instance.v1.ReplicaInfo m = + input.readMessage( + com.google.spanner.admin.instance.v1.ReplicaInfo.parser(), + extensionRegistry); + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(m); + } else { + replicasBuilder_.addMessage(m); + } + break; + } // case 26 + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureLeaderOptionsIsMutable(); + leaderOptions_.add(s); + break; + } // case 34 + case 40: + { + configType_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 40 + case 50: + { + com.google.spanner.admin.instance.v1.ReplicaInfo m = + input.readMessage( + com.google.spanner.admin.instance.v1.ReplicaInfo.parser(), + extensionRegistry); + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.add(m); + } else { + optionalReplicasBuilder_.addMessage(m); + } + break; + } // case 50 + case 58: + { + baseConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 58 + case 66: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000040; + break; + } // case 66 + case 74: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 74 + case 80: + { + reconciling_ = input.readBool(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 88: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000400; + break; + } // case 88 + case 96: + { + freeInstanceAvailability_ = input.readEnum(); + bitField0_ |= 0x00000800; + break; + } // case 96 + case 144: + { + quorumType_ = input.readEnum(); + bitField0_ |= 0x00001000; + break; + } // case 144 + case 152: + { + storageLimitPerProcessingUnit_ = input.readInt64(); + bitField0_ |= 0x00002000; + break; + } // case 152 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * A unique identifier for the instance configuration.  Values
    +     * are of the form
    +     * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +     *
    +     * User instance configuration must start with `custom-`.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A unique identifier for the instance configuration.  Values
    +     * are of the form
    +     * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +     *
    +     * User instance configuration must start with `custom-`.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A unique identifier for the instance configuration.  Values
    +     * are of the form
    +     * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +     *
    +     * User instance configuration must start with `custom-`.
    +     * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A unique identifier for the instance configuration.  Values
    +     * are of the form
    +     * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +     *
    +     * User instance configuration must start with `custom-`.
    +     * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A unique identifier for the instance configuration.  Values
    +     * are of the form
    +     * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +     *
    +     * User instance configuration must start with `custom-`.
    +     * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * The name of this instance configuration as it appears in UIs.
    +     * 
    + * + * string display_name = 2; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of this instance configuration as it appears in UIs.
    +     * 
    + * + * string display_name = 2; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of this instance configuration as it appears in UIs.
    +     * 
    + * + * string display_name = 2; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of this instance configuration as it appears in UIs.
    +     * 
    + * + * string display_name = 2; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of this instance configuration as it appears in UIs.
    +     * 
    + * + * string display_name = 2; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int configType_ = 0; + + /** + * + * + *
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for configType. + */ + @java.lang.Override + public int getConfigTypeValue() { + return configType_; + } + + /** + * + * + *
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for configType to set. + * @return This builder for chaining. + */ + public Builder setConfigTypeValue(int value) { + configType_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The configType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.Type getConfigType() { + com.google.spanner.admin.instance.v1.InstanceConfig.Type result = + com.google.spanner.admin.instance.v1.InstanceConfig.Type.forNumber(configType_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The configType to set. + * @return This builder for chaining. + */ + public Builder setConfigType(com.google.spanner.admin.instance.v1.InstanceConfig.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + configType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearConfigType() { + bitField0_ = (bitField0_ & ~0x00000004); + configType_ = 0; + onChanged(); + return this; + } + + private java.util.List replicas_ = + java.util.Collections.emptyList(); + + private void ensureReplicasIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + replicas_ = + new java.util.ArrayList(replicas_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + replicasBuilder_; + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public java.util.List getReplicasList() { + if (replicasBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicas_); + } else { + return replicasBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public int getReplicasCount() { + if (replicasBuilder_ == null) { + return replicas_.size(); + } else { + return replicasBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder setReplicas(int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.set(index, value); + onChanged(); + } else { + replicasBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder setReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.set(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder addReplicas(com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(value); + onChanged(); + } else { + replicasBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder addReplicas(int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(index, value); + onChanged(); + } else { + replicasBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder addReplicas( + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder addReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder addAllReplicas( + java.lang.Iterable values) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicas_); + onChanged(); + } else { + replicasBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder clearReplicas() { + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + replicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public Builder removeReplicas(int index) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.remove(index); + onChanged(); + } else { + replicasBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder getReplicasBuilder(int index) { + return internalGetReplicasFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder( + int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public java.util.List + getReplicasOrBuilderList() { + if (replicasBuilder_ != null) { + return replicasBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicas_); + } + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addReplicasBuilder() { + return internalGetReplicasFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addReplicasBuilder(int index) { + return internalGetReplicasFieldBuilder() + .addBuilder(index, com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * The geographic placement of nodes in this instance configuration and their
    +     * replication properties.
    +     *
    +     * To create user-managed configurations, input
    +     * `replicas` must include all replicas in `replicas` of the `base_config`
    +     * and include one or more replicas in the `optional_replicas` of the
    +     * `base_config`.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + public java.util.List + getReplicasBuilderList() { + return internalGetReplicasFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + internalGetReplicasFieldBuilder() { + if (replicasBuilder_ == null) { + replicasBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder>( + replicas_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + replicas_ = null; + } + return replicasBuilder_; + } + + private java.util.List optionalReplicas_ = + java.util.Collections.emptyList(); + + private void ensureOptionalReplicasIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + optionalReplicas_ = + new java.util.ArrayList( + optionalReplicas_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + optionalReplicasBuilder_; + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getOptionalReplicasList() { + if (optionalReplicasBuilder_ == null) { + return java.util.Collections.unmodifiableList(optionalReplicas_); + } else { + return optionalReplicasBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getOptionalReplicasCount() { + if (optionalReplicasBuilder_ == null) { + return optionalReplicas_.size(); + } else { + return optionalReplicasBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo getOptionalReplicas(int index) { + if (optionalReplicasBuilder_ == null) { + return optionalReplicas_.get(index); + } else { + return optionalReplicasBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOptionalReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (optionalReplicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOptionalReplicasIsMutable(); + optionalReplicas_.set(index, value); + onChanged(); + } else { + optionalReplicasBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setOptionalReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.set(index, builderForValue.build()); + onChanged(); + } else { + optionalReplicasBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addOptionalReplicas(com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (optionalReplicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOptionalReplicasIsMutable(); + optionalReplicas_.add(value); + onChanged(); + } else { + optionalReplicasBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addOptionalReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (optionalReplicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOptionalReplicasIsMutable(); + optionalReplicas_.add(index, value); + onChanged(); + } else { + optionalReplicasBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addOptionalReplicas( + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.add(builderForValue.build()); + onChanged(); + } else { + optionalReplicasBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addOptionalReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.add(index, builderForValue.build()); + onChanged(); + } else { + optionalReplicasBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllOptionalReplicas( + java.lang.Iterable values) { + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, optionalReplicas_); + onChanged(); + } else { + optionalReplicasBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearOptionalReplicas() { + if (optionalReplicasBuilder_ == null) { + optionalReplicas_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + optionalReplicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeOptionalReplicas(int index) { + if (optionalReplicasBuilder_ == null) { + ensureOptionalReplicasIsMutable(); + optionalReplicas_.remove(index); + onChanged(); + } else { + optionalReplicasBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder getOptionalReplicasBuilder( + int index) { + return internalGetOptionalReplicasFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getOptionalReplicasOrBuilder( + int index) { + if (optionalReplicasBuilder_ == null) { + return optionalReplicas_.get(index); + } else { + return optionalReplicasBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getOptionalReplicasOrBuilderList() { + if (optionalReplicasBuilder_ != null) { + return optionalReplicasBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(optionalReplicas_); + } + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addOptionalReplicasBuilder() { + return internalGetOptionalReplicasFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addOptionalReplicasBuilder( + int index) { + return internalGetOptionalReplicasFieldBuilder() + .addBuilder(index, com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Output only. The available optional replicas to choose from for
    +     * user-managed configurations. Populated for Google-managed configurations.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getOptionalReplicasBuilderList() { + return internalGetOptionalReplicasFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + internalGetOptionalReplicasFieldBuilder() { + if (optionalReplicasBuilder_ == null) { + optionalReplicasBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder>( + optionalReplicas_, + ((bitField0_ & 0x00000010) != 0), + getParentForChildren(), + isClean()); + optionalReplicas_ = null; + } + return optionalReplicasBuilder_; + } + + private java.lang.Object baseConfig_ = ""; + + /** + * + * + *
    +     * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +     * based on which this configuration is created. Only set for user-managed
    +     * configurations. `base_config` must refer to a configuration of type
    +     * `GOOGLE_MANAGED` in the same project as this configuration.
    +     * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The baseConfig. + */ + public java.lang.String getBaseConfig() { + java.lang.Object ref = baseConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + baseConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +     * based on which this configuration is created. Only set for user-managed
    +     * configurations. `base_config` must refer to a configuration of type
    +     * `GOOGLE_MANAGED` in the same project as this configuration.
    +     * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for baseConfig. + */ + public com.google.protobuf.ByteString getBaseConfigBytes() { + java.lang.Object ref = baseConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + baseConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +     * based on which this configuration is created. Only set for user-managed
    +     * configurations. `base_config` must refer to a configuration of type
    +     * `GOOGLE_MANAGED` in the same project as this configuration.
    +     * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @param value The baseConfig to set. + * @return This builder for chaining. + */ + public Builder setBaseConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + baseConfig_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +     * based on which this configuration is created. Only set for user-managed
    +     * configurations. `base_config` must refer to a configuration of type
    +     * `GOOGLE_MANAGED` in the same project as this configuration.
    +     * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearBaseConfig() { + baseConfig_ = getDefaultInstance().getBaseConfig(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +     * based on which this configuration is created. Only set for user-managed
    +     * configurations. `base_config` must refer to a configuration of type
    +     * `GOOGLE_MANAGED` in the same project as this configuration.
    +     * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes for baseConfig to set. + * @return This builder for chaining. + */ + public Builder setBaseConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + baseConfig_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000040; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000040); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000040; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000040; + return this; + } + + /** + * + * + *
    +     * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +     * resources into groups that reflect a customer's organizational needs and
    +     * deployment strategies. Cloud Labels can be used to filter collections of
    +     * resources. They can be used to control how resource metrics are aggregated.
    +     * And they can be used as arguments to policy management rules (e.g. route,
    +     * firewall, load balancing, etc.).
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `[a-z0-9_-]{0,63}`.
    +     * * No more than 64 labels can be associated with a given resource.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     *
    +     * If you plan to use labels in your own code, please note that additional
    +     * characters may be allowed in the future. Therefore, you are advised to use
    +     * an internal label representation, such as JSON, which doesn't rely upon
    +     * specific characters being disallowed.  For example, representing labels
    +     * as the string:  name + "_" + value  would prove problematic if we were to
    +     * allow "_" in a future release.
    +     * 
    + * + * map<string, string> labels = 8; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000040; + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
    +     * etag is used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
    +     * 
    + * + * string etag = 9; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * etag is used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
    +     * 
    + * + * string etag = 9; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * etag is used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
    +     * 
    + * + * string etag = 9; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * etag is used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
    +     * 
    + * + * string etag = 9; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
    +     * etag is used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
    +     * 
    + * + * string etag = 9; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList leaderOptions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureLeaderOptionsIsMutable() { + if (!leaderOptions_.isModifiable()) { + leaderOptions_ = new com.google.protobuf.LazyStringArrayList(leaderOptions_); + } + bitField0_ |= 0x00000100; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @return A list containing the leaderOptions. + */ + public com.google.protobuf.ProtocolStringList getLeaderOptionsList() { + leaderOptions_.makeImmutable(); + return leaderOptions_; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @return The count of leaderOptions. + */ + public int getLeaderOptionsCount() { + return leaderOptions_.size(); + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the element to return. + * @return The leaderOptions at the given index. + */ + public java.lang.String getLeaderOptions(int index) { + return leaderOptions_.get(index); + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the value to return. + * @return The bytes of the leaderOptions at the given index. + */ + public com.google.protobuf.ByteString getLeaderOptionsBytes(int index) { + return leaderOptions_.getByteString(index); + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param index The index to set the value at. + * @param value The leaderOptions to set. + * @return This builder for chaining. + */ + public Builder setLeaderOptions(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLeaderOptionsIsMutable(); + leaderOptions_.set(index, value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param value The leaderOptions to add. + * @return This builder for chaining. + */ + public Builder addLeaderOptions(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLeaderOptionsIsMutable(); + leaderOptions_.add(value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param values The leaderOptions to add. + * @return This builder for chaining. + */ + public Builder addAllLeaderOptions(java.lang.Iterable values) { + ensureLeaderOptionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, leaderOptions_); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @return This builder for chaining. + */ + public Builder clearLeaderOptions() { + leaderOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Allowed values of the "default_leader" schema option for databases in
    +     * instances that use this instance configuration.
    +     * 
    + * + * repeated string leader_options = 4; + * + * @param value The bytes of the leaderOptions to add. + * @return This builder for chaining. + */ + public Builder addLeaderOptionsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureLeaderOptionsIsMutable(); + leaderOptions_.add(value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private boolean reconciling_; + + /** + * + * + *
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
    +     * 
    + * + * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + @java.lang.Override + public boolean getReconciling() { + return reconciling_; + } + + /** + * + * + *
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
    +     * 
    + * + * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The reconciling to set. + * @return This builder for chaining. + */ + public Builder setReconciling(boolean value) { + + reconciling_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
    +     * 
    + * + * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearReconciling() { + bitField0_ = (bitField0_ & ~0x00000200); + reconciling_ = false; + onChanged(); + return this; + } + + private int state_ = 0; + + /** + * + * + *
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.State getState() { + com.google.spanner.admin.instance.v1.InstanceConfig.State result = + com.google.spanner.admin.instance.v1.InstanceConfig.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.spanner.admin.instance.v1.InstanceConfig.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000400); + state_ = 0; + onChanged(); + return this; + } + + private int freeInstanceAvailability_ = 0; + + /** + * + * + *
    +     * Output only. Describes whether free instances are available to be created
    +     * in this instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for freeInstanceAvailability. + */ + @java.lang.Override + public int getFreeInstanceAvailabilityValue() { + return freeInstanceAvailability_; + } + + /** + * + * + *
    +     * Output only. Describes whether free instances are available to be created
    +     * in this instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for freeInstanceAvailability to set. + * @return This builder for chaining. + */ + public Builder setFreeInstanceAvailabilityValue(int value) { + freeInstanceAvailability_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Describes whether free instances are available to be created
    +     * in this instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The freeInstanceAvailability. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + getFreeInstanceAvailability() { + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability result = + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability.forNumber( + freeInstanceAvailability_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + .UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. Describes whether free instances are available to be created
    +     * in this instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The freeInstanceAvailability to set. + * @return This builder for chaining. + */ + public Builder setFreeInstanceAvailability( + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + freeInstanceAvailability_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Describes whether free instances are available to be created
    +     * in this instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearFreeInstanceAvailability() { + bitField0_ = (bitField0_ & ~0x00000800); + freeInstanceAvailability_ = 0; + onChanged(); + return this; + } + + private int quorumType_ = 0; + + /** + * + * + *
    +     * Output only. The `QuorumType` of the instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for quorumType. + */ + @java.lang.Override + public int getQuorumTypeValue() { + return quorumType_; + } + + /** + * + * + *
    +     * Output only. The `QuorumType` of the instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for quorumType to set. + * @return This builder for chaining. + */ + public Builder setQuorumTypeValue(int value) { + quorumType_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The `QuorumType` of the instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The quorumType. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType getQuorumType() { + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType result = + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.forNumber(quorumType_); + return result == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The `QuorumType` of the instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The quorumType to set. + * @return This builder for chaining. + */ + public Builder setQuorumType( + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + quorumType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The `QuorumType` of the instance configuration.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearQuorumType() { + bitField0_ = (bitField0_ & ~0x00001000); + quorumType_ = 0; + onChanged(); + return this; + } + + private long storageLimitPerProcessingUnit_; + + /** + * + * + *
    +     * Output only. The storage limit in bytes per processing unit.
    +     * 
    + * + * + * int64 storage_limit_per_processing_unit = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The storageLimitPerProcessingUnit. + */ + @java.lang.Override + public long getStorageLimitPerProcessingUnit() { + return storageLimitPerProcessingUnit_; + } + + /** + * + * + *
    +     * Output only. The storage limit in bytes per processing unit.
    +     * 
    + * + * + * int64 storage_limit_per_processing_unit = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The storageLimitPerProcessingUnit to set. + * @return This builder for chaining. + */ + public Builder setStorageLimitPerProcessingUnit(long value) { + + storageLimitPerProcessingUnit_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The storage limit in bytes per processing unit.
    +     * 
    + * + * + * int64 storage_limit_per_processing_unit = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearStorageLimitPerProcessingUnit() { + bitField0_ = (bitField0_ & ~0x00002000); + storageLimitPerProcessingUnit_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.InstanceConfig) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.InstanceConfig) + private static final com.google.spanner.admin.instance.v1.InstanceConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.InstanceConfig(); + } + + public static com.google.spanner.admin.instance.v1.InstanceConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InstanceConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigName.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigName.java new file mode 100644 index 000000000000..4ddad6fa8095 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigName.java @@ -0,0 +1,193 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.instance.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class InstanceConfigName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_CONFIG = + PathTemplate.createWithoutUrlEncoding("projects/{project}/instanceConfigs/{instance_config}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instanceConfig; + + @Deprecated + protected InstanceConfigName() { + project = null; + instanceConfig = null; + } + + private InstanceConfigName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instanceConfig = Preconditions.checkNotNull(builder.getInstanceConfig()); + } + + public String getProject() { + return project; + } + + public String getInstanceConfig() { + return instanceConfig; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static InstanceConfigName of(String project, String instanceConfig) { + return newBuilder().setProject(project).setInstanceConfig(instanceConfig).build(); + } + + public static String format(String project, String instanceConfig) { + return newBuilder().setProject(project).setInstanceConfig(instanceConfig).build().toString(); + } + + public static InstanceConfigName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_CONFIG.validatedMatch( + formattedString, "InstanceConfigName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance_config")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (InstanceConfigName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_CONFIG.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instanceConfig != null) { + fieldMapBuilder.put("instance_config", instanceConfig); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_CONFIG.instantiate( + "project", project, "instance_config", instanceConfig); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + InstanceConfigName that = ((InstanceConfigName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instanceConfig, that.instanceConfig); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instanceConfig); + return h; + } + + /** Builder for projects/{project}/instanceConfigs/{instance_config}. */ + public static class Builder { + private String project; + private String instanceConfig; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstanceConfig() { + return instanceConfig; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstanceConfig(String instanceConfig) { + this.instanceConfig = instanceConfig; + return this; + } + + private Builder(InstanceConfigName instanceConfigName) { + this.project = instanceConfigName.project; + this.instanceConfig = instanceConfigName.instanceConfig; + } + + public InstanceConfigName build() { + return new InstanceConfigName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java new file mode 100644 index 000000000000..a6613ef534e8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java @@ -0,0 +1,701 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface InstanceConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.InstanceConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A unique identifier for the instance configuration.  Values
    +   * are of the form
    +   * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +   *
    +   * User instance configuration must start with `custom-`.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * A unique identifier for the instance configuration.  Values
    +   * are of the form
    +   * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
    +   *
    +   * User instance configuration must start with `custom-`.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The name of this instance configuration as it appears in UIs.
    +   * 
    + * + * string display_name = 2; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * The name of this instance configuration as it appears in UIs.
    +   * 
    + * + * string display_name = 2; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for configType. + */ + int getConfigTypeValue(); + + /** + * + * + *
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.Type config_type = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The configType. + */ + com.google.spanner.admin.instance.v1.InstanceConfig.Type getConfigType(); + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + java.util.List getReplicasList(); + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index); + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + int getReplicasCount(); + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + java.util.List + getReplicasOrBuilderList(); + + /** + * + * + *
    +   * The geographic placement of nodes in this instance configuration and their
    +   * replication properties.
    +   *
    +   * To create user-managed configurations, input
    +   * `replicas` must include all replicas in `replicas` of the `base_config`
    +   * and include one or more replicas in the `optional_replicas` of the
    +   * `base_config`.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 3; + */ + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder(int index); + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getOptionalReplicasList(); + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaInfo getOptionalReplicas(int index); + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getOptionalReplicasCount(); + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getOptionalReplicasOrBuilderList(); + + /** + * + * + *
    +   * Output only. The available optional replicas to choose from for
    +   * user-managed configurations. Populated for Google-managed configurations.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo optional_replicas = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getOptionalReplicasOrBuilder(int index); + + /** + * + * + *
    +   * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +   * based on which this configuration is created. Only set for user-managed
    +   * configurations. `base_config` must refer to a configuration of type
    +   * `GOOGLE_MANAGED` in the same project as this configuration.
    +   * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The baseConfig. + */ + java.lang.String getBaseConfig(); + + /** + * + * + *
    +   * Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
    +   * based on which this configuration is created. Only set for user-managed
    +   * configurations. `base_config` must refer to a configuration of type
    +   * `GOOGLE_MANAGED` in the same project as this configuration.
    +   * 
    + * + * string base_config = 7 [(.google.api.resource_reference) = { ... } + * + * @return The bytes for baseConfig. + */ + com.google.protobuf.ByteString getBaseConfigBytes(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. Therefore, you are advised to use
    +   * an internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 8; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * etag is used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
    +   * 
    + * + * string etag = 9; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
    +   * etag is used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
    +   * 
    + * + * string etag = 9; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @return A list containing the leaderOptions. + */ + java.util.List getLeaderOptionsList(); + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @return The count of leaderOptions. + */ + int getLeaderOptionsCount(); + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the element to return. + * @return The leaderOptions at the given index. + */ + java.lang.String getLeaderOptions(int index); + + /** + * + * + *
    +   * Allowed values of the "default_leader" schema option for databases in
    +   * instances that use this instance configuration.
    +   * 
    + * + * repeated string leader_options = 4; + * + * @param index The index of the value to return. + * @return The bytes of the leaderOptions at the given index. + */ + com.google.protobuf.ByteString getLeaderOptionsBytes(int index); + + /** + * + * + *
    +   * Output only. If true, the instance configuration is being created or
    +   * updated. If false, there are no ongoing operations for the instance
    +   * configuration.
    +   * 
    + * + * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The reconciling. + */ + boolean getReconciling(); + + /** + * + * + *
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.State state = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.spanner.admin.instance.v1.InstanceConfig.State getState(); + + /** + * + * + *
    +   * Output only. Describes whether free instances are available to be created
    +   * in this instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for freeInstanceAvailability. + */ + int getFreeInstanceAvailabilityValue(); + + /** + * + * + *
    +   * Output only. Describes whether free instances are available to be created
    +   * in this instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability free_instance_availability = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The freeInstanceAvailability. + */ + com.google.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailability + getFreeInstanceAvailability(); + + /** + * + * + *
    +   * Output only. The `QuorumType` of the instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for quorumType. + */ + int getQuorumTypeValue(); + + /** + * + * + *
    +   * Output only. The `QuorumType` of the instance configuration.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig.QuorumType quorum_type = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The quorumType. + */ + com.google.spanner.admin.instance.v1.InstanceConfig.QuorumType getQuorumType(); + + /** + * + * + *
    +   * Output only. The storage limit in bytes per processing unit.
    +   * 
    + * + * + * int64 storage_limit_per_processing_unit = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The storageLimitPerProcessingUnit. + */ + long getStorageLimitPerProcessingUnit(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceName.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceName.java new file mode 100644 index 000000000000..c6f2744d7c7b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceName.java @@ -0,0 +1,192 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.instance.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class InstanceName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE = + PathTemplate.createWithoutUrlEncoding("projects/{project}/instances/{instance}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + + @Deprecated + protected InstanceName() { + project = null; + instance = null; + } + + private InstanceName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static InstanceName of(String project, String instance) { + return newBuilder().setProject(project).setInstance(instance).build(); + } + + public static String format(String project, String instance) { + return newBuilder().setProject(project).setInstance(instance).build().toString(); + } + + public static InstanceName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE.validatedMatch( + formattedString, "InstanceName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (InstanceName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE.instantiate("project", project, "instance", instance); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + InstanceName that = ((InstanceName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + return h; + } + + /** Builder for projects/{project}/instances/{instance}. */ + public static class Builder { + private String project; + private String instance; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + private Builder(InstanceName instanceName) { + this.project = instanceName.project; + this.instance = instanceName.instance; + } + + public InstanceName build() { + return new InstanceName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java new file mode 100644 index 000000000000..f94850acab0a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java @@ -0,0 +1,792 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface InstanceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.Instance) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. A unique identifier for the instance, which cannot be changed
    +   * after the instance is created. Values are of the form
    +   * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +   * segment of the name must be between 2 and 64 characters in length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. A unique identifier for the instance, which cannot be changed
    +   * after the instance is created. Values are of the form
    +   * `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    +   * segment of the name must be between 2 and 64 characters in length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Required. The name of the instance's configuration. Values are of the form
    +   * `projects/<project>/instanceConfigs/<configuration>`. See
    +   * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + java.lang.String getConfig(); + + /** + * + * + *
    +   * Required. The name of the instance's configuration. Values are of the form
    +   * `projects/<project>/instanceConfigs/<configuration>`. See
    +   * also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + com.google.protobuf.ByteString getConfigBytes(); + + /** + * + * + *
    +   * Required. The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * Required. The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most, one of either
    +   * `node_count` or `processing_units` should be present in the message.
    +   *
    +   * Users can set the `node_count` field to specify the target number of nodes
    +   * allocated to the instance.
    +   *
    +   * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +   * field and reflects the current number of nodes allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes, and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +   * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most, one of
    +   * either `processing_units` or `node_count` should be present in the message.
    +   *
    +   * Users can set the `processing_units` field to specify the target number of
    +   * processing units allocated to the instance.
    +   *
    +   * If autoscaling is enabled, `processing_units` is treated as an
    +   * `OUTPUT_ONLY` field and reflects the current number of processing units
    +   * allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +   * 
    + * + * int32 processing_units = 9; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getReplicaComputeCapacityList(); + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity(int index); + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getReplicaComputeCapacityCount(); + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getReplicaComputeCapacityOrBuilderList(); + + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + boolean hasAutoscalingConfig(); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig(); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, node_count and processing_units
    +   * are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder getAutoscalingConfigOrBuilder(); + + /** + * + * + *
    +   * Output only. The current instance state. For
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +   * the state must be either omitted or set to `CREATING`. For
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +   * the state must be either omitted or set to `READY`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
    +   * Output only. The current instance state. For
    +   * [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
    +   * the state must be either omitted or set to `CREATING`. For
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
    +   * the state must be either omitted or set to `READY`.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.State state = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.spanner.admin.instance.v1.Instance.State getState(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * Cloud Labels are a flexible and lightweight mechanism for organizing cloud
    +   * resources into groups that reflect a customer's organizational needs and
    +   * deployment strategies. Cloud Labels can be used to filter collections of
    +   * resources. They can be used to control how resource metrics are aggregated.
    +   * And they can be used as arguments to policy management rules (e.g. route,
    +   * firewall, load balancing, etc.).
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `[a-z0-9_-]{0,63}`.
    +   * * No more than 64 labels can be associated with a given resource.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   *
    +   * If you plan to use labels in your own code, please note that additional
    +   * characters may be allowed in the future. And so you are advised to use an
    +   * internal label representation, such as JSON, which doesn't rely upon
    +   * specific characters being disallowed.  For example, representing labels
    +   * as the string:  name + "_" + value  would prove problematic if we were to
    +   * allow "_" in a future release.
    +   * 
    + * + * map<string, string> labels = 7; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * The `InstanceType` of the current instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The enum numeric value on the wire for instanceType. + */ + int getInstanceTypeValue(); + + /** + * + * + *
    +   * The `InstanceType` of the current instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.InstanceType instance_type = 10; + * + * @return The instanceType. + */ + com.google.spanner.admin.instance.v1.Instance.InstanceType getInstanceType(); + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @return A list containing the endpointUris. + */ + java.util.List getEndpointUrisList(); + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @return The count of endpointUris. + */ + int getEndpointUrisCount(); + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the element to return. + * @return The endpointUris at the given index. + */ + java.lang.String getEndpointUris(int index); + + /** + * + * + *
    +   * Deprecated. This field is not populated.
    +   * 
    + * + * repeated string endpoint_uris = 8; + * + * @param index The index of the value to return. + * @return The bytes of the endpointUris at the given index. + */ + com.google.protobuf.ByteString getEndpointUrisBytes(int index); + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 11 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance was most recently updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return Whether the freeInstanceMetadata field is set. + */ + boolean hasFreeInstanceMetadata(); + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + * + * @return The freeInstanceMetadata. + */ + com.google.spanner.admin.instance.v1.FreeInstanceMetadata getFreeInstanceMetadata(); + + /** + * + * + *
    +   * Free instance metadata. Only populated for free instances.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FreeInstanceMetadata free_instance_metadata = 13; + * + */ + com.google.spanner.admin.instance.v1.FreeInstanceMetadataOrBuilder + getFreeInstanceMetadataOrBuilder(); + + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + int getEditionValue(); + + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + com.google.spanner.admin.instance.v1.Instance.Edition getEdition(); + + /** + * + * + *
    +   * Optional. Controls the default backup schedule behavior for new databases
    +   * within the instance. By default, a backup schedule is created automatically
    +   * when a new database is created in a new instance.
    +   *
    +   * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +   * as backups and backup schedules aren't supported for free instances.
    +   *
    +   * In the `GetInstance` or `ListInstances` response, if the value of
    +   * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +   * create a default backup schedule for new databases in the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for defaultBackupScheduleType. + */ + int getDefaultBackupScheduleTypeValue(); + + /** + * + * + *
    +   * Optional. Controls the default backup schedule behavior for new databases
    +   * within the instance. By default, a backup schedule is created automatically
    +   * when a new database is created in a new instance.
    +   *
    +   * Note that the `AUTOMATIC` value isn't permitted for free instances,
    +   * as backups and backup schedules aren't supported for free instances.
    +   *
    +   * In the `GetInstance` or `ListInstances` response, if the value of
    +   * `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
    +   * create a default backup schedule for new databases in the instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType default_backup_schedule_type = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The defaultBackupScheduleType. + */ + com.google.spanner.admin.instance.v1.Instance.DefaultBackupScheduleType + getDefaultBackupScheduleType(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java new file mode 100644 index 000000000000..afb6ca38d571 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java @@ -0,0 +1,3819 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * An isolated set of Cloud Spanner resources that databases can define
    + * placements on.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.InstancePartition} + */ +@com.google.protobuf.Generated +public final class InstancePartition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.InstancePartition) + InstancePartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InstancePartition"); + } + + // Use InstancePartition.newBuilder() to construct. + private InstancePartition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private InstancePartition() { + name_ = ""; + config_ = ""; + displayName_ = ""; + state_ = 0; + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + etag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstancePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.InstancePartition.class, + com.google.spanner.admin.instance.v1.InstancePartition.Builder.class); + } + + /** + * + * + *
    +   * Indicates the current state of the instance partition.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.InstancePartition.State} + */ + public enum State implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + STATE_UNSPECIFIED(0), + /** + * + * + *
    +     * The instance partition is still being created. Resources may not be
    +     * available yet, and operations such as creating placements using this
    +     * instance partition may not work.
    +     * 
    + * + * CREATING = 1; + */ + CREATING(1), + /** + * + * + *
    +     * The instance partition is fully created and ready to do work such as
    +     * creating placements and using in databases.
    +     * 
    + * + * READY = 2; + */ + READY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "State"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * STATE_UNSPECIFIED = 0; + */ + public static final int STATE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The instance partition is still being created. Resources may not be
    +     * available yet, and operations such as creating placements using this
    +     * instance partition may not work.
    +     * 
    + * + * CREATING = 1; + */ + public static final int CREATING_VALUE = 1; + + /** + * + * + *
    +     * The instance partition is fully created and ready to do work such as
    +     * creating placements and using in databases.
    +     * 
    + * + * READY = 2; + */ + public static final int READY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static State valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static State forNumber(int value) { + switch (value) { + case 0: + return STATE_UNSPECIFIED; + case 1: + return CREATING; + case 2: + return READY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.InstancePartition.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private State(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.InstancePartition.State) + } + + private int bitField0_; + private int computeCapacityCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object computeCapacity_; + + public enum ComputeCapacityCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + NODE_COUNT(5), + PROCESSING_UNITS(6), + COMPUTECAPACITY_NOT_SET(0); + private final int value; + + private ComputeCapacityCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ComputeCapacityCase valueOf(int value) { + return forNumber(value); + } + + public static ComputeCapacityCase forNumber(int value) { + switch (value) { + case 5: + return NODE_COUNT; + case 6: + return PROCESSING_UNITS; + case 0: + return COMPUTECAPACITY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. A unique identifier for the instance partition. Values are of the
    +   * form
    +   * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +   * The final segment of the name must be between 2 and 64 characters in
    +   * length. An instance partition's name cannot be changed after the instance
    +   * partition is created.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. A unique identifier for the instance partition. Values are of the
    +   * form
    +   * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +   * The final segment of the name must be between 2 and 64 characters in
    +   * length. An instance partition's name cannot be changed after the instance
    +   * partition is created.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CONFIG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object config_ = ""; + + /** + * + * + *
    +   * Required. The name of the instance partition's configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + @java.lang.Override + public java.lang.String getConfig() { + java.lang.Object ref = config_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + config_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the instance partition's configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + @java.lang.Override + public com.google.protobuf.ByteString getConfigBytes() { + java.lang.Object ref = config_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + config_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * Required. The descriptive name for this instance partition as it appears in
    +   * UIs. Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The descriptive name for this instance partition as it appears in
    +   * UIs. Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NODE_COUNT_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * The number of nodes allocated to this instance partition.
    +   *
    +   * Users can set the `node_count` field to specify the target number of
    +   * nodes allocated to the instance partition.
    +   *
    +   * This may be zero in API responses for instance partitions that are not
    +   * yet in state `READY`.
    +   * 
    + * + * int32 node_count = 5; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return computeCapacityCase_ == 5; + } + + /** + * + * + *
    +   * The number of nodes allocated to this instance partition.
    +   *
    +   * Users can set the `node_count` field to specify the target number of
    +   * nodes allocated to the instance partition.
    +   *
    +   * This may be zero in API responses for instance partitions that are not
    +   * yet in state `READY`.
    +   * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + if (computeCapacityCase_ == 5) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 6; + + /** + * + * + *
    +   * The number of processing units allocated to this instance partition.
    +   *
    +   * Users can set the `processing_units` field to specify the target number
    +   * of processing units allocated to the instance partition.
    +   *
    +   * This might be zero in API responses for instance partitions that are not
    +   * yet in the `READY` state.
    +   * 
    + * + * int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 6; + } + + /** + * + * + *
    +   * The number of processing units allocated to this instance partition.
    +   *
    +   * Users can set the `processing_units` field to specify the target number
    +   * of processing units allocated to the instance partition.
    +   *
    +   * This might be zero in API responses for instance partitions that are not
    +   * yet in the `READY` state.
    +   * 
    + * + * int32 processing_units = 6; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + if (computeCapacityCase_ == 6) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + public static final int AUTOSCALING_CONFIG_FIELD_NUMBER = 13; + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + @java.lang.Override + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + public static final int STATE_FIELD_NUMBER = 7; + private int state_ = 0; + + /** + * + * + *
    +   * Output only. The current instance partition state.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +   * Output only. The current instance partition state.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition.State getState() { + com.google.spanner.admin.instance.v1.InstancePartition.State result = + com.google.spanner.admin.instance.v1.InstancePartition.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.InstancePartition.State.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 8; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 9; + private com.google.protobuf.Timestamp updateTime_; + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + public static final int REFERENCING_DATABASES_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList referencingDatabases_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return A list containing the referencingDatabases. + */ + public com.google.protobuf.ProtocolStringList getReferencingDatabasesList() { + return referencingDatabases_; + } + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The count of referencingDatabases. + */ + public int getReferencingDatabasesCount() { + return referencingDatabases_.size(); + } + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + public java.lang.String getReferencingDatabases(int index) { + return referencingDatabases_.get(index); + } + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + public com.google.protobuf.ByteString getReferencingDatabasesBytes(int index) { + return referencingDatabases_.getByteString(index); + } + + public static final int REFERENCING_BACKUPS_FIELD_NUMBER = 11; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList referencingBackups_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return A list containing the referencingBackups. + */ + @java.lang.Deprecated + public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { + return referencingBackups_; + } + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return The count of referencingBackups. + */ + @java.lang.Deprecated + public int getReferencingBackupsCount() { + return referencingBackups_.size(); + } + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + @java.lang.Deprecated + public java.lang.String getReferencingBackups(int index) { + return referencingBackups_.get(index); + } + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + @java.lang.Deprecated + public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { + return referencingBackups_.getByteString(index); + } + + public static final int ETAG_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private volatile java.lang.Object etag_ = ""; + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance partition from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance partition
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance partitions, and systems are expected to
    +   * put that etag in the request to update instance partitions to ensure that
    +   * their change will be applied to the same version of the instance partition.
    +   * If no etag is provided in the call to update instance partition, then the
    +   * existing instance partition is overwritten blindly.
    +   * 
    + * + * string etag = 12; + * + * @return The etag. + */ + @java.lang.Override + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } + } + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance partition from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance partition
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance partitions, and systems are expected to
    +   * put that etag in the request to update instance partitions to ensure that
    +   * their change will be applied to the same version of the instance partition.
    +   * If no etag is provided in the call to update instance partition, then the
    +   * existing instance partition is overwritten blindly.
    +   * 
    + * + * string etag = 12; + * + * @return The bytes for etag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(config_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, config_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, displayName_); + } + if (computeCapacityCase_ == 5) { + output.writeInt32(5, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 6) { + output.writeInt32(6, (int) ((java.lang.Integer) computeCapacity_)); + } + if (state_ + != com.google.spanner.admin.instance.v1.InstancePartition.State.STATE_UNSPECIFIED + .getNumber()) { + output.writeEnum(7, state_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(8, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(9, getUpdateTime()); + } + for (int i = 0; i < referencingDatabases_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 10, referencingDatabases_.getRaw(i)); + } + for (int i = 0; i < referencingBackups_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 11, referencingBackups_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 12, etag_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(13, getAutoscalingConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(config_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, config_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, displayName_); + } + if (computeCapacityCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 5, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 6, (int) ((java.lang.Integer) computeCapacity_)); + } + if (state_ + != com.google.spanner.admin.instance.v1.InstancePartition.State.STATE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(7, state_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getCreateTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getUpdateTime()); + } + { + int dataSize = 0; + for (int i = 0; i < referencingDatabases_.size(); i++) { + dataSize += computeStringSizeNoTag(referencingDatabases_.getRaw(i)); + } + size += dataSize; + size += 1 * getReferencingDatabasesList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < referencingBackups_.size(); i++) { + dataSize += computeStringSizeNoTag(referencingBackups_.getRaw(i)); + } + size += dataSize; + size += 1 * getReferencingBackupsList().size(); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(etag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(12, etag_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(13, getAutoscalingConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.InstancePartition)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.InstancePartition other = + (com.google.spanner.admin.instance.v1.InstancePartition) obj; + + if (!getName().equals(other.getName())) return false; + if (!getConfig().equals(other.getConfig())) return false; + if (!getDisplayName().equals(other.getDisplayName())) return false; + if (hasAutoscalingConfig() != other.hasAutoscalingConfig()) return false; + if (hasAutoscalingConfig()) { + if (!getAutoscalingConfig().equals(other.getAutoscalingConfig())) return false; + } + if (state_ != other.state_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getReferencingDatabasesList().equals(other.getReferencingDatabasesList())) return false; + if (!getReferencingBackupsList().equals(other.getReferencingBackupsList())) return false; + if (!getEtag().equals(other.getEtag())) return false; + if (!getComputeCapacityCase().equals(other.getComputeCapacityCase())) return false; + switch (computeCapacityCase_) { + case 5: + if (getNodeCount() != other.getNodeCount()) return false; + break; + case 6: + if (getProcessingUnits() != other.getProcessingUnits()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getConfig().hashCode(); + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + if (hasAutoscalingConfig()) { + hash = (37 * hash) + AUTOSCALING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingConfig().hashCode(); + } + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + state_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + if (getReferencingDatabasesCount() > 0) { + hash = (37 * hash) + REFERENCING_DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getReferencingDatabasesList().hashCode(); + } + if (getReferencingBackupsCount() > 0) { + hash = (37 * hash) + REFERENCING_BACKUPS_FIELD_NUMBER; + hash = (53 * hash) + getReferencingBackupsList().hashCode(); + } + hash = (37 * hash) + ETAG_FIELD_NUMBER; + hash = (53 * hash) + getEtag().hashCode(); + switch (computeCapacityCase_) { + case 5: + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + break; + case 6: + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.InstancePartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * An isolated set of Cloud Spanner resources that databases can define
    +   * placements on.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.InstancePartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.InstancePartition) + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstancePartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.InstancePartition.class, + com.google.spanner.admin.instance.v1.InstancePartition.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.InstancePartition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAutoscalingConfigFieldBuilder(); + internalGetCreateTimeFieldBuilder(); + internalGetUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + config_ = ""; + displayName_ = ""; + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + state_ = 0; + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + etag_ = ""; + computeCapacityCase_ = 0; + computeCapacity_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition build() { + com.google.spanner.admin.instance.v1.InstancePartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition buildPartial() { + com.google.spanner.admin.instance.v1.InstancePartition result = + new com.google.spanner.admin.instance.v1.InstancePartition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.InstancePartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.config_ = config_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.displayName_ = displayName_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.autoscalingConfig_ = + autoscalingConfigBuilder_ == null + ? autoscalingConfig_ + : autoscalingConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + referencingDatabases_.makeImmutable(); + result.referencingDatabases_ = referencingDatabases_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + referencingBackups_.makeImmutable(); + result.referencingBackups_ = referencingBackups_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.etag_ = etag_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.admin.instance.v1.InstancePartition result) { + result.computeCapacityCase_ = computeCapacityCase_; + result.computeCapacity_ = this.computeCapacity_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.InstancePartition) { + return mergeFrom((com.google.spanner.admin.instance.v1.InstancePartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.InstancePartition other) { + if (other == com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getConfig().isEmpty()) { + config_ = other.config_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDisplayName().isEmpty()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasAutoscalingConfig()) { + mergeAutoscalingConfig(other.getAutoscalingConfig()); + } + if (other.state_ != 0) { + setStateValue(other.getStateValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + if (!other.referencingDatabases_.isEmpty()) { + if (referencingDatabases_.isEmpty()) { + referencingDatabases_ = other.referencingDatabases_; + bitField0_ |= 0x00000200; + } else { + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.addAll(other.referencingDatabases_); + } + onChanged(); + } + if (!other.referencingBackups_.isEmpty()) { + if (referencingBackups_.isEmpty()) { + referencingBackups_ = other.referencingBackups_; + bitField0_ |= 0x00000400; + } else { + ensureReferencingBackupsIsMutable(); + referencingBackups_.addAll(other.referencingBackups_); + } + onChanged(); + } + if (!other.getEtag().isEmpty()) { + etag_ = other.etag_; + bitField0_ |= 0x00000800; + onChanged(); + } + switch (other.getComputeCapacityCase()) { + case NODE_COUNT: + { + setNodeCount(other.getNodeCount()); + break; + } + case PROCESSING_UNITS: + { + setProcessingUnits(other.getProcessingUnits()); + break; + } + case COMPUTECAPACITY_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + config_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 40: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 5; + break; + } // case 40 + case 48: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 6; + break; + } // case 48 + case 56: + { + state_ = input.readEnum(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 74: + { + input.readMessage( + internalGetUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 74 + case 82: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(s); + break; + } // case 82 + case 90: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(s); + break; + } // case 90 + case 98: + { + etag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000800; + break; + } // case 98 + case 106: + { + input.readMessage( + internalGetAutoscalingConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 106 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int computeCapacityCase_ = 0; + private java.lang.Object computeCapacity_; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public Builder clearComputeCapacity() { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. A unique identifier for the instance partition. Values are of the
    +     * form
    +     * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +     * The final segment of the name must be between 2 and 64 characters in
    +     * length. An instance partition's name cannot be changed after the instance
    +     * partition is created.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance partition. Values are of the
    +     * form
    +     * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +     * The final segment of the name must be between 2 and 64 characters in
    +     * length. An instance partition's name cannot be changed after the instance
    +     * partition is created.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance partition. Values are of the
    +     * form
    +     * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +     * The final segment of the name must be between 2 and 64 characters in
    +     * length. An instance partition's name cannot be changed after the instance
    +     * partition is created.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance partition. Values are of the
    +     * form
    +     * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +     * The final segment of the name must be between 2 and 64 characters in
    +     * length. An instance partition's name cannot be changed after the instance
    +     * partition is created.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A unique identifier for the instance partition. Values are of the
    +     * form
    +     * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +     * The final segment of the name must be between 2 and 64 characters in
    +     * length. An instance partition's name cannot be changed after the instance
    +     * partition is created.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object config_ = ""; + + /** + * + * + *
    +     * Required. The name of the instance partition's configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + public java.lang.String getConfig() { + java.lang.Object ref = config_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + config_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance partition's configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + public com.google.protobuf.ByteString getConfigBytes() { + java.lang.Object ref = config_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + config_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the instance partition's configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The config to set. + * @return This builder for chaining. + */ + public Builder setConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + config_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance partition's configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearConfig() { + config_ = getDefaultInstance().getConfig(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the instance partition's configuration. Values are of
    +     * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +     * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for config to set. + * @return This builder for chaining. + */ + public Builder setConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + config_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * Required. The descriptive name for this instance partition as it appears in
    +     * UIs. Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance partition as it appears in
    +     * UIs. Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance partition as it appears in
    +     * UIs. Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance partition as it appears in
    +     * UIs. Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The descriptive name for this instance partition as it appears in
    +     * UIs. Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance partition.
    +     *
    +     * Users can set the `node_count` field to specify the target number of
    +     * nodes allocated to the instance partition.
    +     *
    +     * This may be zero in API responses for instance partitions that are not
    +     * yet in state `READY`.
    +     * 
    + * + * int32 node_count = 5; + * + * @return Whether the nodeCount field is set. + */ + public boolean hasNodeCount() { + return computeCapacityCase_ == 5; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance partition.
    +     *
    +     * Users can set the `node_count` field to specify the target number of
    +     * nodes allocated to the instance partition.
    +     *
    +     * This may be zero in API responses for instance partitions that are not
    +     * yet in state `READY`.
    +     * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + public int getNodeCount() { + if (computeCapacityCase_ == 5) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance partition.
    +     *
    +     * Users can set the `node_count` field to specify the target number of
    +     * nodes allocated to the instance partition.
    +     *
    +     * This may be zero in API responses for instance partitions that are not
    +     * yet in state `READY`.
    +     * 
    + * + * int32 node_count = 5; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + computeCapacityCase_ = 5; + computeCapacity_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance partition.
    +     *
    +     * Users can set the `node_count` field to specify the target number of
    +     * nodes allocated to the instance partition.
    +     *
    +     * This may be zero in API responses for instance partitions that are not
    +     * yet in state `READY`.
    +     * 
    + * + * int32 node_count = 5; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + if (computeCapacityCase_ == 5) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance partition.
    +     *
    +     * Users can set the `processing_units` field to specify the target number
    +     * of processing units allocated to the instance partition.
    +     *
    +     * This might be zero in API responses for instance partitions that are not
    +     * yet in the `READY` state.
    +     * 
    + * + * int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 6; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance partition.
    +     *
    +     * Users can set the `processing_units` field to specify the target number
    +     * of processing units allocated to the instance partition.
    +     *
    +     * This might be zero in API responses for instance partitions that are not
    +     * yet in the `READY` state.
    +     * 
    + * + * int32 processing_units = 6; + * + * @return The processingUnits. + */ + public int getProcessingUnits() { + if (computeCapacityCase_ == 6) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance partition.
    +     *
    +     * Users can set the `processing_units` field to specify the target number
    +     * of processing units allocated to the instance partition.
    +     *
    +     * This might be zero in API responses for instance partitions that are not
    +     * yet in the `READY` state.
    +     * 
    + * + * int32 processing_units = 6; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + computeCapacityCase_ = 6; + computeCapacity_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance partition.
    +     *
    +     * Users can set the `processing_units` field to specify the target number
    +     * of processing units allocated to the instance partition.
    +     *
    +     * This might be zero in API responses for instance partitions that are not
    +     * yet in the `READY` state.
    +     * 
    + * + * int32 processing_units = 6; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + if (computeCapacityCase_ == 6) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + autoscalingConfigBuilder_; + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + if (autoscalingConfigBuilder_ == null) { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } else { + return autoscalingConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingConfig_ = value; + } else { + autoscalingConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder builderForValue) { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfig_ = builderForValue.build(); + } else { + autoscalingConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && autoscalingConfig_ != null + && autoscalingConfig_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) { + getAutoscalingConfigBuilder().mergeFrom(value); + } else { + autoscalingConfig_ = value; + } + } else { + autoscalingConfigBuilder_.mergeFrom(value); + } + if (autoscalingConfig_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoscalingConfig() { + bitField0_ = (bitField0_ & ~0x00000020); + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder + getAutoscalingConfigBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetAutoscalingConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + if (autoscalingConfigBuilder_ != null) { + return autoscalingConfigBuilder_.getMessageOrBuilder(); + } else { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + } + + /** + * + * + *
    +     * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +     * field is set. When autoscaling is enabled, fields in compute_capacity are
    +     * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +     * allocated to the instance partition.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + internalGetAutoscalingConfigFieldBuilder() { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder>( + getAutoscalingConfig(), getParentForChildren(), isClean()); + autoscalingConfig_ = null; + } + return autoscalingConfigBuilder_; + } + + private int state_ = 0; + + /** + * + * + *
    +     * Output only. The current instance partition state.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + @java.lang.Override + public int getStateValue() { + return state_; + } + + /** + * + * + *
    +     * Output only. The current instance partition state.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for state to set. + * @return This builder for chaining. + */ + public Builder setStateValue(int value) { + state_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance partition state.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition.State getState() { + com.google.spanner.admin.instance.v1.InstancePartition.State result = + com.google.spanner.admin.instance.v1.InstancePartition.State.forNumber(state_); + return result == null + ? com.google.spanner.admin.instance.v1.InstancePartition.State.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Output only. The current instance partition state.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The state to set. + * @return This builder for chaining. + */ + public Builder setState(com.google.spanner.admin.instance.v1.InstancePartition.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + state_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The current instance partition state.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000040); + state_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000080); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000100); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetUpdateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + + /** + * + * + *
    +     * Output only. The time at which the instance partition was most recently
    +     * updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + private com.google.protobuf.LazyStringArrayList referencingDatabases_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReferencingDatabasesIsMutable() { + if (!referencingDatabases_.isModifiable()) { + referencingDatabases_ = new com.google.protobuf.LazyStringArrayList(referencingDatabases_); + } + bitField0_ |= 0x00000200; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return A list containing the referencingDatabases. + */ + public com.google.protobuf.ProtocolStringList getReferencingDatabasesList() { + referencingDatabases_.makeImmutable(); + return referencingDatabases_; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The count of referencingDatabases. + */ + public int getReferencingDatabasesCount() { + return referencingDatabases_.size(); + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + public java.lang.String getReferencingDatabases(int index) { + return referencingDatabases_.get(index); + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + public com.google.protobuf.ByteString getReferencingDatabasesBytes(int index) { + return referencingDatabases_.getByteString(index); + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index to set the value at. + * @param value The referencingDatabases to set. + * @return This builder for chaining. + */ + public Builder setReferencingDatabases(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.set(index, value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addReferencingDatabases(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param values The referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addAllReferencingDatabases(java.lang.Iterable values) { + ensureReferencingDatabasesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingDatabases_); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearReferencingDatabases() { + referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The names of the databases that reference this
    +     * instance partition. Referencing databases should share the parent instance.
    +     * The existence of any referencing database prevents the instance partition
    +     * from being deleted.
    +     * 
    + * + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The bytes of the referencingDatabases to add. + * @return This builder for chaining. + */ + public Builder addReferencingDatabasesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReferencingDatabasesIsMutable(); + referencingDatabases_.add(value); + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList referencingBackups_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReferencingBackupsIsMutable() { + if (!referencingBackups_.isModifiable()) { + referencingBackups_ = new com.google.protobuf.LazyStringArrayList(referencingBackups_); + } + bitField0_ |= 0x00000400; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return A list containing the referencingBackups. + */ + @java.lang.Deprecated + public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { + referencingBackups_.makeImmutable(); + return referencingBackups_; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return The count of referencingBackups. + */ + @java.lang.Deprecated + public int getReferencingBackupsCount() { + return referencingBackups_.size(); + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + @java.lang.Deprecated + public java.lang.String getReferencingBackups(int index) { + return referencingBackups_.get(index); + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + @java.lang.Deprecated + public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { + return referencingBackups_.getByteString(index); + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index to set the value at. + * @param value The referencingBackups to set. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder setReferencingBackups(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.set(index, value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param value The referencingBackups to add. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder addReferencingBackups(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param values The referencingBackups to add. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder addAllReferencingBackups(java.lang.Iterable values) { + ensureReferencingBackupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingBackups_); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder clearReferencingBackups() { + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000400); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Deprecated: This field is not populated.
    +     * Output only. The names of the backups that reference this instance
    +     * partition. Referencing backups should share the parent instance. The
    +     * existence of any referencing backup prevents the instance partition from
    +     * being deleted.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param value The bytes of the referencingBackups to add. + * @return This builder for chaining. + */ + @java.lang.Deprecated + public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + private java.lang.Object etag_ = ""; + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance partition from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance partition
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance partitions, and systems are expected to
    +     * put that etag in the request to update instance partitions to ensure that
    +     * their change will be applied to the same version of the instance partition.
    +     * If no etag is provided in the call to update instance partition, then the
    +     * existing instance partition is overwritten blindly.
    +     * 
    + * + * string etag = 12; + * + * @return The etag. + */ + public java.lang.String getEtag() { + java.lang.Object ref = etag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + etag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance partition from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance partition
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance partitions, and systems are expected to
    +     * put that etag in the request to update instance partitions to ensure that
    +     * their change will be applied to the same version of the instance partition.
    +     * If no etag is provided in the call to update instance partition, then the
    +     * existing instance partition is overwritten blindly.
    +     * 
    + * + * string etag = 12; + * + * @return The bytes for etag. + */ + public com.google.protobuf.ByteString getEtagBytes() { + java.lang.Object ref = etag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + etag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance partition from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance partition
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance partitions, and systems are expected to
    +     * put that etag in the request to update instance partitions to ensure that
    +     * their change will be applied to the same version of the instance partition.
    +     * If no etag is provided in the call to update instance partition, then the
    +     * existing instance partition is overwritten blindly.
    +     * 
    + * + * string etag = 12; + * + * @param value The etag to set. + * @return This builder for chaining. + */ + public Builder setEtag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + etag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance partition from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance partition
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance partitions, and systems are expected to
    +     * put that etag in the request to update instance partitions to ensure that
    +     * their change will be applied to the same version of the instance partition.
    +     * If no etag is provided in the call to update instance partition, then the
    +     * existing instance partition is overwritten blindly.
    +     * 
    + * + * string etag = 12; + * + * @return This builder for chaining. + */ + public Builder clearEtag() { + etag_ = getDefaultInstance().getEtag(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used for optimistic concurrency control as a way
    +     * to help prevent simultaneous updates of a instance partition from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance partition
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance partitions, and systems are expected to
    +     * put that etag in the request to update instance partitions to ensure that
    +     * their change will be applied to the same version of the instance partition.
    +     * If no etag is provided in the call to update instance partition, then the
    +     * existing instance partition is overwritten blindly.
    +     * 
    + * + * string etag = 12; + * + * @param value The bytes for etag to set. + * @return This builder for chaining. + */ + public Builder setEtagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + etag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.InstancePartition) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.InstancePartition) + private static final com.google.spanner.admin.instance.v1.InstancePartition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.InstancePartition(); + } + + public static com.google.spanner.admin.instance.v1.InstancePartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InstancePartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionName.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionName.java new file mode 100644 index 000000000000..1812ebe25e13 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionName.java @@ -0,0 +1,231 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.instance.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class InstancePartitionName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_INSTANCE_PARTITION = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String instancePartition; + + @Deprecated + protected InstancePartitionName() { + project = null; + instance = null; + instancePartition = null; + } + + private InstancePartitionName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + instancePartition = Preconditions.checkNotNull(builder.getInstancePartition()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getInstancePartition() { + return instancePartition; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static InstancePartitionName of( + String project, String instance, String instancePartition) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setInstancePartition(instancePartition) + .build(); + } + + public static String format(String project, String instance, String instancePartition) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setInstancePartition(instancePartition) + .build() + .toString(); + } + + public static InstancePartitionName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_INSTANCE_PARTITION.validatedMatch( + formattedString, "InstancePartitionName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), matchMap.get("instance"), matchMap.get("instance_partition")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (InstancePartitionName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_INSTANCE_PARTITION.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (instancePartition != null) { + fieldMapBuilder.put("instance_partition", instancePartition); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_INSTANCE_PARTITION.instantiate( + "project", project, "instance", instance, "instance_partition", instancePartition); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + InstancePartitionName that = ((InstancePartitionName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.instancePartition, that.instancePartition); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(instancePartition); + return h; + } + + /** + * Builder for projects/{project}/instances/{instance}/instancePartitions/{instance_partition}. + */ + public static class Builder { + private String project; + private String instance; + private String instancePartition; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getInstancePartition() { + return instancePartition; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setInstancePartition(String instancePartition) { + this.instancePartition = instancePartition; + return this; + } + + private Builder(InstancePartitionName instancePartitionName) { + this.project = instancePartitionName.project; + this.instance = instancePartitionName.instance; + this.instancePartition = instancePartitionName.instancePartition; + } + + public InstancePartitionName build() { + return new InstancePartitionName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java new file mode 100644 index 000000000000..e60669462db9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java @@ -0,0 +1,576 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface InstancePartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.InstancePartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. A unique identifier for the instance partition. Values are of the
    +   * form
    +   * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +   * The final segment of the name must be between 2 and 64 characters in
    +   * length. An instance partition's name cannot be changed after the instance
    +   * partition is created.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. A unique identifier for the instance partition. Values are of the
    +   * form
    +   * `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
    +   * The final segment of the name must be between 2 and 64 characters in
    +   * length. An instance partition's name cannot be changed after the instance
    +   * partition is created.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Required. The name of the instance partition's configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The config. + */ + java.lang.String getConfig(); + + /** + * + * + *
    +   * Required. The name of the instance partition's configuration. Values are of
    +   * the form `projects/<project>/instanceConfigs/<configuration>`. See also
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * + * string config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for config. + */ + com.google.protobuf.ByteString getConfigBytes(); + + /** + * + * + *
    +   * Required. The descriptive name for this instance partition as it appears in
    +   * UIs. Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * Required. The descriptive name for this instance partition as it appears in
    +   * UIs. Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * The number of nodes allocated to this instance partition.
    +   *
    +   * Users can set the `node_count` field to specify the target number of
    +   * nodes allocated to the instance partition.
    +   *
    +   * This may be zero in API responses for instance partitions that are not
    +   * yet in state `READY`.
    +   * 
    + * + * int32 node_count = 5; + * + * @return Whether the nodeCount field is set. + */ + boolean hasNodeCount(); + + /** + * + * + *
    +   * The number of nodes allocated to this instance partition.
    +   *
    +   * Users can set the `node_count` field to specify the target number of
    +   * nodes allocated to the instance partition.
    +   *
    +   * This may be zero in API responses for instance partitions that are not
    +   * yet in state `READY`.
    +   * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * The number of processing units allocated to this instance partition.
    +   *
    +   * Users can set the `processing_units` field to specify the target number
    +   * of processing units allocated to the instance partition.
    +   *
    +   * This might be zero in API responses for instance partitions that are not
    +   * yet in the `READY` state.
    +   * 
    + * + * int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + boolean hasProcessingUnits(); + + /** + * + * + *
    +   * The number of processing units allocated to this instance partition.
    +   *
    +   * Users can set the `processing_units` field to specify the target number
    +   * of processing units allocated to the instance partition.
    +   *
    +   * This might be zero in API responses for instance partitions that are not
    +   * yet in the `READY` state.
    +   * 
    + * + * int32 processing_units = 6; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingConfig field is set. + */ + boolean hasAutoscalingConfig(); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingConfig. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig(); + + /** + * + * + *
    +   * Optional. The autoscaling configuration. Autoscaling is enabled if this
    +   * field is set. When autoscaling is enabled, fields in compute_capacity are
    +   * treated as OUTPUT_ONLY fields and reflect the current compute capacity
    +   * allocated to the instance partition.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 13 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder getAutoscalingConfigOrBuilder(); + + /** + * + * + *
    +   * Output only. The current instance partition state.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for state. + */ + int getStateValue(); + + /** + * + * + *
    +   * Output only. The current instance partition state.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition.State state = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The state. + */ + com.google.spanner.admin.instance.v1.InstancePartition.State getState(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + + /** + * + * + *
    +   * Output only. The time at which the instance partition was most recently
    +   * updated.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return A list containing the referencingDatabases. + */ + java.util.List getReferencingDatabasesList(); + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The count of referencingDatabases. + */ + int getReferencingDatabasesCount(); + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the element to return. + * @return The referencingDatabases at the given index. + */ + java.lang.String getReferencingDatabases(int index); + + /** + * + * + *
    +   * Output only. The names of the databases that reference this
    +   * instance partition. Referencing databases should share the parent instance.
    +   * The existence of any referencing database prevents the instance partition
    +   * from being deleted.
    +   * 
    + * + * repeated string referencing_databases = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingDatabases at the given index. + */ + com.google.protobuf.ByteString getReferencingDatabasesBytes(int index); + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return A list containing the referencingBackups. + */ + @java.lang.Deprecated + java.util.List getReferencingBackupsList(); + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @return The count of referencingBackups. + */ + @java.lang.Deprecated + int getReferencingBackupsCount(); + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + @java.lang.Deprecated + java.lang.String getReferencingBackups(int index); + + /** + * + * + *
    +   * Output only. Deprecated: This field is not populated.
    +   * Output only. The names of the backups that reference this instance
    +   * partition. Referencing backups should share the parent instance. The
    +   * existence of any referencing backup prevents the instance partition from
    +   * being deleted.
    +   * 
    + * + * + * repeated string referencing_backups = 11 [deprecated = true, (.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @deprecated google.spanner.admin.instance.v1.InstancePartition.referencing_backups is + * deprecated. See google/spanner/admin/instance/v1/spanner_instance_admin.proto;l=1855 + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + @java.lang.Deprecated + com.google.protobuf.ByteString getReferencingBackupsBytes(int index); + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance partition from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance partition
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance partitions, and systems are expected to
    +   * put that etag in the request to update instance partitions to ensure that
    +   * their change will be applied to the same version of the instance partition.
    +   * If no etag is provided in the call to update instance partition, then the
    +   * existing instance partition is overwritten blindly.
    +   * 
    + * + * string etag = 12; + * + * @return The etag. + */ + java.lang.String getEtag(); + + /** + * + * + *
    +   * Used for optimistic concurrency control as a way
    +   * to help prevent simultaneous updates of a instance partition from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance partition
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance partitions, and systems are expected to
    +   * put that etag in the request to update instance partitions to ensure that
    +   * their change will be applied to the same version of the instance partition.
    +   * If no etag is provided in the call to update instance partition, then the
    +   * existing instance partition is overwritten blindly.
    +   * 
    + * + * string etag = 12; + * + * @return The bytes for etag. + */ + com.google.protobuf.ByteString getEtagBytes(); + + com.google.spanner.admin.instance.v1.InstancePartition.ComputeCapacityCase + getComputeCapacityCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java new file mode 100644 index 000000000000..f1bc991ca78a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java @@ -0,0 +1,1431 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest} + */ +@com.google.protobuf.Generated +public final class ListInstanceConfigOperationsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) + ListInstanceConfigOperationsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstanceConfigOperationsRequest"); + } + + // Use ListInstanceConfigOperationsRequest.newBuilder() to construct. + private ListInstanceConfigOperationsRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstanceConfigOperationsRequest() { + parent_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The project of the instance configuration operations.
    +   * Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The project of the instance configuration operations.
    +   * Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +   * AND` \
    +   * `(metadata.instance_config.name:custom-config) AND` \
    +   * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +   * * The instance configuration name contains "custom-config".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +   * AND` \
    +   * `(metadata.instance_config.name:custom-config) AND` \
    +   * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +   * * The instance configuration name contains "custom-config".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest other = + (com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest build() { + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest buildPartial() { + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest result = + new com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The project of the instance configuration operations.
    +     * Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The project of the instance configuration operations.
    +     * Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The project of the instance configuration operations.
    +     * Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The project of the instance configuration operations.
    +     * Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The project of the instance configuration operations.
    +     * Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +     * AND` \
    +     * `(metadata.instance_config.name:custom-config) AND` \
    +     * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * * The instance configuration name contains "custom-config".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +     * AND` \
    +     * `(metadata.instance_config.name:custom-config) AND` \
    +     * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * * The instance configuration name contains "custom-config".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +     * AND` \
    +     * `(metadata.instance_config.name:custom-config) AND` \
    +     * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * * The instance configuration name contains "custom-config".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +     * AND` \
    +     * `(metadata.instance_config.name:custom-config) AND` \
    +     * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * * The instance configuration name contains "custom-config".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +     * AND` \
    +     * `(metadata.instance_config.name:custom-config) AND` \
    +     * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +     * * The instance configuration name contains "custom-config".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) + private static final com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest(); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstanceConfigOperationsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java new file mode 100644 index 000000000000..af8d21bbeca0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java @@ -0,0 +1,216 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstanceConfigOperationsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The project of the instance configuration operations.
    +   * Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The project of the instance configuration operations.
    +   * Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +   * AND` \
    +   * `(metadata.instance_config.name:custom-config) AND` \
    +   * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +   * * The instance configuration name contains "custom-config".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
    +   * AND` \
    +   * `(metadata.instance_config.name:custom-config) AND` \
    +   * `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    +   * * The instance configuration name contains "custom-config".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java new file mode 100644 index 000000000000..082dc8d36bca --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java @@ -0,0 +1,1237 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse} + */ +@com.google.protobuf.Generated +public final class ListInstanceConfigOperationsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) + ListInstanceConfigOperationsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstanceConfigOperationsResponse"); + } + + // Use ListInstanceConfigOperationsResponse.newBuilder() to construct. + private ListInstanceConfigOperationsResponse( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstanceConfigOperationsResponse() { + operations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.Builder + .class); + } + + public static final int OPERATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List operations_; + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List getOperationsList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List + getOperationsOrBuilderList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public int getOperationsCount() { + return operations_.size(); + } + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.Operation getOperations(int index) { + return operations_.get(index); + } + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + return operations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < operations_.size(); i++) { + output.writeMessage(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < operations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse other = + (com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) obj; + + if (!getOperationsList().equals(other.getOperationsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOperationsCount() > 0) { + hash = (37 * hash) + OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + } else { + operations_ = null; + operationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse build() { + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + buildPartial() { + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse result = + new com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse result) { + if (operationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + operations_ = java.util.Collections.unmodifiableList(operations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.operations_ = operations_; + } else { + result.operations_ = operationsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + .getDefaultInstance()) return this; + if (operationsBuilder_ == null) { + if (!other.operations_.isEmpty()) { + if (operations_.isEmpty()) { + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOperationsIsMutable(); + operations_.addAll(other.operations_); + } + onChanged(); + } + } else { + if (!other.operations_.isEmpty()) { + if (operationsBuilder_.isEmpty()) { + operationsBuilder_.dispose(); + operationsBuilder_ = null; + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + operationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOperationsFieldBuilder() + : null; + } else { + operationsBuilder_.addAllMessages(other.operations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(m); + } else { + operationsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List operations_ = + java.util.Collections.emptyList(); + + private void ensureOperationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + operations_ = new java.util.ArrayList(operations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + operationsBuilder_; + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsList() { + if (operationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(operations_); + } else { + return operationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public int getOperationsCount() { + if (operationsBuilder_ == null) { + return operations_.size(); + } else { + return operationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation getOperations(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.set(index, value); + onChanged(); + } else { + operationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.set(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(value); + onChanged(); + } else { + operationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(index, value); + onChanged(); + } else { + operationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addAllOperations( + java.lang.Iterable values) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, operations_); + onChanged(); + } else { + operationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder clearOperations() { + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + operationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder removeOperations(int index) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.remove(index); + onChanged(); + } else { + operationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder getOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List + getOperationsOrBuilderList() { + if (operationsBuilder_ != null) { + return operationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(operations_); + } + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder() { + return internalGetOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching instance configuration long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the name of the instance configuration. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsBuilderList() { + return internalGetOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetOperationsFieldBuilder() { + if (operationsBuilder_ == null) { + operationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + operations_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + operations_ = null; + } + return operationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) + private static final com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse(); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstanceConfigOperationsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java new file mode 100644 index 000000000000..4ca7d4055cde --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java @@ -0,0 +1,133 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstanceConfigOperationsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsList(); + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.Operation getOperations(int index); + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + int getOperationsCount(); + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsOrBuilderList(); + + /** + * + * + *
    +   * The list of matching instance configuration long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the name of the instance configuration. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java new file mode 100644 index 000000000000..14e719398e39 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java @@ -0,0 +1,945 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigsRequest} + */ +@com.google.protobuf.Generated +public final class ListInstanceConfigsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) + ListInstanceConfigsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstanceConfigsRequest"); + } + + // Use ListInstanceConfigsRequest.newBuilder() to construct. + private ListInstanceConfigsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstanceConfigsRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the project for which a list of supported instance
    +   * configurations is requested. Values are of the form
    +   * `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the project for which a list of supported instance
    +   * configurations is requested. Values are of the form
    +   * `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of instance configurations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest other = + (com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest build() { + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest buildPartial() { + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest result = + new com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the project for which a list of supported instance
    +     * configurations is requested. Values are of the form
    +     * `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of supported instance
    +     * configurations is requested. Values are of the form
    +     * `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of supported instance
    +     * configurations is requested. Values are of the form
    +     * `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of supported instance
    +     * configurations is requested. Values are of the form
    +     * `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of supported instance
    +     * configurations is requested. Values are of the form
    +     * `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of instance configurations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of instance configurations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of instance configurations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) + private static final com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest(); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstanceConfigsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java new file mode 100644 index 000000000000..0d3a7e8620a4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstanceConfigsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the project for which a list of supported instance
    +   * configurations is requested. Values are of the form
    +   * `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the project for which a list of supported instance
    +   * configurations is requested. Values are of the form
    +   * `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Number of instance configurations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java new file mode 100644 index 000000000000..da4710b4cf1d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java @@ -0,0 +1,1151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigsResponse} + */ +@com.google.protobuf.Generated +public final class ListInstanceConfigsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) + ListInstanceConfigsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstanceConfigsResponse"); + } + + // Use ListInstanceConfigsResponse.newBuilder() to construct. + private ListInstanceConfigsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstanceConfigsResponse() { + instanceConfigs_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.Builder.class); + } + + public static final int INSTANCE_CONFIGS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List instanceConfigs_; + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + @java.lang.Override + public java.util.List + getInstanceConfigsList() { + return instanceConfigs_; + } + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + @java.lang.Override + public java.util.List + getInstanceConfigsOrBuilderList() { + return instanceConfigs_; + } + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + @java.lang.Override + public int getInstanceConfigsCount() { + return instanceConfigs_.size(); + } + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfigs(int index) { + return instanceConfigs_.get(index); + } + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigsOrBuilder( + int index) { + return instanceConfigs_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +   * call to fetch more of the matching instance configurations.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +   * call to fetch more of the matching instance configurations.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < instanceConfigs_.size(); i++) { + output.writeMessage(1, instanceConfigs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < instanceConfigs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, instanceConfigs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse other = + (com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse) obj; + + if (!getInstanceConfigsList().equals(other.getInstanceConfigsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getInstanceConfigsCount() > 0) { + hash = (37 * hash) + INSTANCE_CONFIGS_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfigsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstanceConfigsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.class, + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (instanceConfigsBuilder_ == null) { + instanceConfigs_ = java.util.Collections.emptyList(); + } else { + instanceConfigs_ = null; + instanceConfigsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse build() { + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse buildPartial() { + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse result = + new com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse result) { + if (instanceConfigsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + instanceConfigs_ = java.util.Collections.unmodifiableList(instanceConfigs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.instanceConfigs_ = instanceConfigs_; + } else { + result.instanceConfigs_ = instanceConfigsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse) { + return mergeFrom((com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse.getDefaultInstance()) + return this; + if (instanceConfigsBuilder_ == null) { + if (!other.instanceConfigs_.isEmpty()) { + if (instanceConfigs_.isEmpty()) { + instanceConfigs_ = other.instanceConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.addAll(other.instanceConfigs_); + } + onChanged(); + } + } else { + if (!other.instanceConfigs_.isEmpty()) { + if (instanceConfigsBuilder_.isEmpty()) { + instanceConfigsBuilder_.dispose(); + instanceConfigsBuilder_ = null; + instanceConfigs_ = other.instanceConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + instanceConfigsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetInstanceConfigsFieldBuilder() + : null; + } else { + instanceConfigsBuilder_.addAllMessages(other.instanceConfigs_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.instance.v1.InstanceConfig m = + input.readMessage( + com.google.spanner.admin.instance.v1.InstanceConfig.parser(), + extensionRegistry); + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.add(m); + } else { + instanceConfigsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List instanceConfigs_ = + java.util.Collections.emptyList(); + + private void ensureInstanceConfigsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + instanceConfigs_ = + new java.util.ArrayList( + instanceConfigs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigsBuilder_; + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public java.util.List + getInstanceConfigsList() { + if (instanceConfigsBuilder_ == null) { + return java.util.Collections.unmodifiableList(instanceConfigs_); + } else { + return instanceConfigsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public int getInstanceConfigsCount() { + if (instanceConfigsBuilder_ == null) { + return instanceConfigs_.size(); + } else { + return instanceConfigsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfigs(int index) { + if (instanceConfigsBuilder_ == null) { + return instanceConfigs_.get(index); + } else { + return instanceConfigsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder setInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstanceConfigsIsMutable(); + instanceConfigs_.set(index, value); + onChanged(); + } else { + instanceConfigsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder setInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.set(index, builderForValue.build()); + onChanged(); + } else { + instanceConfigsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder addInstanceConfigs(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstanceConfigsIsMutable(); + instanceConfigs_.add(value); + onChanged(); + } else { + instanceConfigsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder addInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstanceConfigsIsMutable(); + instanceConfigs_.add(index, value); + onChanged(); + } else { + instanceConfigsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder addInstanceConfigs( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.add(builderForValue.build()); + onChanged(); + } else { + instanceConfigsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder addInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.add(index, builderForValue.build()); + onChanged(); + } else { + instanceConfigsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder addAllInstanceConfigs( + java.lang.Iterable values) { + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, instanceConfigs_); + onChanged(); + } else { + instanceConfigsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder clearInstanceConfigs() { + if (instanceConfigsBuilder_ == null) { + instanceConfigs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + instanceConfigsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public Builder removeInstanceConfigs(int index) { + if (instanceConfigsBuilder_ == null) { + ensureInstanceConfigsIsMutable(); + instanceConfigs_.remove(index); + onChanged(); + } else { + instanceConfigsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigsBuilder( + int index) { + return internalGetInstanceConfigsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigsOrBuilder( + int index) { + if (instanceConfigsBuilder_ == null) { + return instanceConfigs_.get(index); + } else { + return instanceConfigsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public java.util.List + getInstanceConfigsOrBuilderList() { + if (instanceConfigsBuilder_ != null) { + return instanceConfigsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(instanceConfigs_); + } + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder addInstanceConfigsBuilder() { + return internalGetInstanceConfigsFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder addInstanceConfigsBuilder( + int index) { + return internalGetInstanceConfigsFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instance configurations.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + public java.util.List + getInstanceConfigsBuilderList() { + return internalGetInstanceConfigsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigsFieldBuilder() { + if (instanceConfigsBuilder_ == null) { + instanceConfigsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + instanceConfigs_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + instanceConfigs_ = null; + } + return instanceConfigsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +     * call to fetch more of the matching instance configurations.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +     * call to fetch more of the matching instance configurations.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +     * call to fetch more of the matching instance configurations.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +     * call to fetch more of the matching instance configurations.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +     * call to fetch more of the matching instance configurations.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) + private static final com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse(); + } + + public static com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstanceConfigsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstanceConfigsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java new file mode 100644 index 000000000000..760d3e75574f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstanceConfigsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + java.util.List getInstanceConfigsList(); + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfigs(int index); + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + int getInstanceConfigsCount(); + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + java.util.List + getInstanceConfigsOrBuilderList(); + + /** + * + * + *
    +   * The list of requested instance configurations.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig instance_configs = 1; + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigsOrBuilder( + int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +   * call to fetch more of the matching instance configurations.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
    +   * call to fetch more of the matching instance configurations.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java new file mode 100644 index 000000000000..57234d4bf92b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java @@ -0,0 +1,1823 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest} + */ +@com.google.protobuf.Generated +public final class ListInstancePartitionOperationsRequest + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) + ListInstancePartitionOperationsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancePartitionOperationsRequest"); + } + + // Use ListInstancePartitionOperationsRequest.newBuilder() to construct. + private ListInstancePartitionOperationsRequest( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancePartitionOperationsRequest() { + parent_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.Builder + .class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The parent instance of the instance partition operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The parent instance of the instance partition operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * Optional. An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +   * AND` \
    +   * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +   * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +   * * The instance partition name contains "custom-instance-partition".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +   * AND` \
    +   * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +   * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +   * * The instance partition name contains "custom-instance-partition".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Optional. Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_PARTITION_DEADLINE_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp instancePartitionDeadline_; + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + @java.lang.Override + public boolean hasInstancePartitionDeadline() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getInstancePartitionDeadline() { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder() { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getInstancePartitionDeadline()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, getInstancePartitionDeadline()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest other = + (com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (hasInstancePartitionDeadline() != other.hasInstancePartitionDeadline()) return false; + if (hasInstancePartitionDeadline()) { + if (!getInstancePartitionDeadline().equals(other.getInstancePartitionDeadline())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + if (hasInstancePartitionDeadline()) { + hash = (37 * hash) + INSTANCE_PARTITION_DEADLINE_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartitionDeadline().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionDeadlineFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + instancePartitionDeadline_ = null; + if (instancePartitionDeadlineBuilder_ != null) { + instancePartitionDeadlineBuilder_.dispose(); + instancePartitionDeadlineBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest build() { + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest result = + new com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.instancePartitionDeadline_ = + instancePartitionDeadlineBuilder_ == null + ? instancePartitionDeadline_ + : instancePartitionDeadlineBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasInstancePartitionDeadline()) { + mergeInstancePartitionDeadline(other.getInstancePartitionDeadline()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetInstancePartitionDeadlineFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The parent instance of the instance partition operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The parent instance of the instance partition operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The parent instance of the instance partition operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The parent instance of the instance partition operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The parent instance of the instance partition operations.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * Optional. An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +     * AND` \
    +     * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +     * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * * The instance partition name contains "custom-instance-partition".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +     * AND` \
    +     * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +     * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * * The instance partition name contains "custom-instance-partition".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +     * AND` \
    +     * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +     * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * * The instance partition name contains "custom-instance-partition".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +     * AND` \
    +     * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +     * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * * The instance partition name contains "custom-instance-partition".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. An expression that filters the list of returned operations.
    +     *
    +     * A filter expression consists of a field name, a
    +     * comparison operator, and a value for filtering.
    +     * The value must be a string, a number, or a boolean. The comparison operator
    +     * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +     * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +     *
    +     * The following fields in the Operation are eligible for filtering:
    +     *
    +     * * `name` - The name of the long-running operation
    +     * * `done` - False if the operation is in progress, else true.
    +     * * `metadata.@type` - the type of metadata. For example, the type string
    +     * for
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +     * is
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +     * * `metadata.<field_name>` - any field in metadata.value.
    +     * `metadata.@type` must be specified first, if filtering on metadata
    +     * fields.
    +     * * `error` - Error associated with the long-running operation.
    +     * * `response.@type` - the type of response.
    +     * * `response.<field_name>` - any field in response.value.
    +     *
    +     * You can combine multiple expressions by enclosing each expression in
    +     * parentheses. By default, expressions are combined with AND logic. However,
    +     * you can specify AND, OR, and NOT logic explicitly.
    +     *
    +     * Here are a few examples:
    +     *
    +     * * `done:true` - The operation is complete.
    +     * * `(metadata.@type=` \
    +     * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +     * AND` \
    +     * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +     * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +     * `(error:*)` - Return operations where:
    +     * * The operation's metadata type is
    +     * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +     * * The instance partition name contains "custom-instance-partition".
    +     * * The operation started before 2021-03-28T14:50:00Z.
    +     * * The operation resulted in an error.
    +     * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Optional. Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Optional. Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Number of operations to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp instancePartitionDeadline_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + instancePartitionDeadlineBuilder_; + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + public boolean hasInstancePartitionDeadline() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + public com.google.protobuf.Timestamp getInstancePartitionDeadline() { + if (instancePartitionDeadlineBuilder_ == null) { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } else { + return instancePartitionDeadlineBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setInstancePartitionDeadline(com.google.protobuf.Timestamp value) { + if (instancePartitionDeadlineBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartitionDeadline_ = value; + } else { + instancePartitionDeadlineBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setInstancePartitionDeadline( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (instancePartitionDeadlineBuilder_ == null) { + instancePartitionDeadline_ = builderForValue.build(); + } else { + instancePartitionDeadlineBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeInstancePartitionDeadline(com.google.protobuf.Timestamp value) { + if (instancePartitionDeadlineBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && instancePartitionDeadline_ != null + && instancePartitionDeadline_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getInstancePartitionDeadlineBuilder().mergeFrom(value); + } else { + instancePartitionDeadline_ = value; + } + } else { + instancePartitionDeadlineBuilder_.mergeFrom(value); + } + if (instancePartitionDeadline_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearInstancePartitionDeadline() { + bitField0_ = (bitField0_ & ~0x00000010); + instancePartitionDeadline_ = null; + if (instancePartitionDeadlineBuilder_ != null) { + instancePartitionDeadlineBuilder_.dispose(); + instancePartitionDeadlineBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getInstancePartitionDeadlineBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetInstancePartitionDeadlineFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder() { + if (instancePartitionDeadlineBuilder_ != null) { + return instancePartitionDeadlineBuilder_.getMessageOrBuilder(); + } else { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partition
    +     * operations. Instance partitions whose operation metadata cannot be
    +     * retrieved within this deadline will be added to
    +     * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +     * in
    +     * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetInstancePartitionDeadlineFieldBuilder() { + if (instancePartitionDeadlineBuilder_ == null) { + instancePartitionDeadlineBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getInstancePartitionDeadline(), getParentForChildren(), isClean()); + instancePartitionDeadline_ = null; + } + return instancePartitionDeadlineBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) + private static final com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancePartitionOperationsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java new file mode 100644 index 000000000000..aac74b47c7ee --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java @@ -0,0 +1,274 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancePartitionOperationsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The parent instance of the instance partition operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The parent instance of the instance partition operations.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Optional. An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +   * AND` \
    +   * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +   * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +   * * The instance partition name contains "custom-instance-partition".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * Optional. An expression that filters the list of returned operations.
    +   *
    +   * A filter expression consists of a field name, a
    +   * comparison operator, and a value for filtering.
    +   * The value must be a string, a number, or a boolean. The comparison operator
    +   * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
    +   * Colon `:` is the contains operator. Filter rules are not case sensitive.
    +   *
    +   * The following fields in the Operation are eligible for filtering:
    +   *
    +   * * `name` - The name of the long-running operation
    +   * * `done` - False if the operation is in progress, else true.
    +   * * `metadata.@type` - the type of metadata. For example, the type string
    +   * for
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
    +   * is
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
    +   * * `metadata.<field_name>` - any field in metadata.value.
    +   * `metadata.@type` must be specified first, if filtering on metadata
    +   * fields.
    +   * * `error` - Error associated with the long-running operation.
    +   * * `response.@type` - the type of response.
    +   * * `response.<field_name>` - any field in response.value.
    +   *
    +   * You can combine multiple expressions by enclosing each expression in
    +   * parentheses. By default, expressions are combined with AND logic. However,
    +   * you can specify AND, OR, and NOT logic explicitly.
    +   *
    +   * Here are a few examples:
    +   *
    +   * * `done:true` - The operation is complete.
    +   * * `(metadata.@type=` \
    +   * `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
    +   * AND` \
    +   * `(metadata.instance_partition.name:custom-instance-partition) AND` \
    +   * `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
    +   * `(error:*)` - Return operations where:
    +   * * The operation's metadata type is
    +   * [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
    +   * * The instance partition name contains "custom-instance-partition".
    +   * * The operation started before 2021-03-28T14:50:00Z.
    +   * * The operation resulted in an error.
    +   * 
    + * + * string filter = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Optional. Number of operations to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + boolean hasInstancePartitionDeadline(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + com.google.protobuf.Timestamp getInstancePartitionDeadline(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partition
    +   * operations. Instance partitions whose operation metadata cannot be
    +   * retrieved within this deadline will be added to
    +   * [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
    +   * in
    +   * [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java new file mode 100644 index 000000000000..5f5ade9bd075 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java @@ -0,0 +1,1577 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse} + */ +@com.google.protobuf.Generated +public final class ListInstancePartitionOperationsResponse + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) + ListInstancePartitionOperationsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancePartitionOperationsResponse"); + } + + // Use ListInstancePartitionOperationsResponse.newBuilder() to construct. + private ListInstancePartitionOperationsResponse( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancePartitionOperationsResponse() { + operations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachableInstancePartitions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.Builder + .class); + } + + public static final int OPERATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List operations_; + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List getOperationsList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public java.util.List + getOperationsOrBuilderList() { + return operations_; + } + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public int getOperationsCount() { + return operations_.size(); + } + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.Operation getOperations(int index) { + return operations_.get(index); + } + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + return operations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_INSTANCE_PARTITIONS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachableInstancePartitions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return A list containing the unreachableInstancePartitions. + */ + public com.google.protobuf.ProtocolStringList getUnreachableInstancePartitionsList() { + return unreachableInstancePartitions_; + } + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return The count of unreachableInstancePartitions. + */ + public int getUnreachableInstancePartitionsCount() { + return unreachableInstancePartitions_.size(); + } + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the element to return. + * @return The unreachableInstancePartitions at the given index. + */ + public java.lang.String getUnreachableInstancePartitions(int index) { + return unreachableInstancePartitions_.get(index); + } + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachableInstancePartitions at the given index. + */ + public com.google.protobuf.ByteString getUnreachableInstancePartitionsBytes(int index) { + return unreachableInstancePartitions_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < operations_.size(); i++) { + output.writeMessage(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachableInstancePartitions_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString( + output, 3, unreachableInstancePartitions_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < operations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, operations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachableInstancePartitions_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachableInstancePartitions_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableInstancePartitionsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse other = + (com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) obj; + + if (!getOperationsList().equals(other.getOperationsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableInstancePartitionsList() + .equals(other.getUnreachableInstancePartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOperationsCount() > 0) { + hash = (37 * hash) + OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableInstancePartitionsCount() > 0) { + hash = (37 * hash) + UNREACHABLE_INSTANCE_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableInstancePartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + } else { + operations_ = null; + operationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachableInstancePartitions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse build() { + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse result = + new com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse result) { + if (operationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + operations_ = java.util.Collections.unmodifiableList(operations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.operations_ = operations_; + } else { + result.operations_ = operationsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachableInstancePartitions_.makeImmutable(); + result.unreachableInstancePartitions_ = unreachableInstancePartitions_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + .getDefaultInstance()) return this; + if (operationsBuilder_ == null) { + if (!other.operations_.isEmpty()) { + if (operations_.isEmpty()) { + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOperationsIsMutable(); + operations_.addAll(other.operations_); + } + onChanged(); + } + } else { + if (!other.operations_.isEmpty()) { + if (operationsBuilder_.isEmpty()) { + operationsBuilder_.dispose(); + operationsBuilder_ = null; + operations_ = other.operations_; + bitField0_ = (bitField0_ & ~0x00000001); + operationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOperationsFieldBuilder() + : null; + } else { + operationsBuilder_.addAllMessages(other.operations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachableInstancePartitions_.isEmpty()) { + if (unreachableInstancePartitions_.isEmpty()) { + unreachableInstancePartitions_ = other.unreachableInstancePartitions_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableInstancePartitionsIsMutable(); + unreachableInstancePartitions_.addAll(other.unreachableInstancePartitions_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(m); + } else { + operationsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableInstancePartitionsIsMutable(); + unreachableInstancePartitions_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List operations_ = + java.util.Collections.emptyList(); + + private void ensureOperationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + operations_ = new java.util.ArrayList(operations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + operationsBuilder_; + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsList() { + if (operationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(operations_); + } else { + return operationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public int getOperationsCount() { + if (operationsBuilder_ == null) { + return operations_.size(); + } else { + return operationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation getOperations(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.set(index, value); + onChanged(); + } else { + operationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder setOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.set(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(value); + onChanged(); + } else { + operationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(int index, com.google.longrunning.Operation value) { + if (operationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOperationsIsMutable(); + operations_.add(index, value); + onChanged(); + } else { + operationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations(com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.add(index, builderForValue.build()); + onChanged(); + } else { + operationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder addAllOperations( + java.lang.Iterable values) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, operations_); + onChanged(); + } else { + operationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder clearOperations() { + if (operationsBuilder_ == null) { + operations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + operationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public Builder removeOperations(int index) { + if (operationsBuilder_ == null) { + ensureOperationsIsMutable(); + operations_.remove(index); + onChanged(); + } else { + operationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder getOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index) { + if (operationsBuilder_ == null) { + return operations_.get(index); + } else { + return operationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List + getOperationsOrBuilderList() { + if (operationsBuilder_ != null) { + return operationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(operations_); + } + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder() { + return internalGetOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public com.google.longrunning.Operation.Builder addOperationsBuilder(int index) { + return internalGetOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of matching instance partition long-running operations. Each
    +     * operation's name will be
    +     * prefixed by the instance partition's name. The operation's
    +     * metadata field type
    +     * `metadata.type_url` describes the type of the metadata.
    +     * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + public java.util.List getOperationsBuilderList() { + return internalGetOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetOperationsFieldBuilder() { + if (operationsBuilder_ == null) { + operationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + operations_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + operations_ = null; + } + return operationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +     * call to fetch more of the matching metadata.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachableInstancePartitions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableInstancePartitionsIsMutable() { + if (!unreachableInstancePartitions_.isModifiable()) { + unreachableInstancePartitions_ = + new com.google.protobuf.LazyStringArrayList(unreachableInstancePartitions_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return A list containing the unreachableInstancePartitions. + */ + public com.google.protobuf.ProtocolStringList getUnreachableInstancePartitionsList() { + unreachableInstancePartitions_.makeImmutable(); + return unreachableInstancePartitions_; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return The count of unreachableInstancePartitions. + */ + public int getUnreachableInstancePartitionsCount() { + return unreachableInstancePartitions_.size(); + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the element to return. + * @return The unreachableInstancePartitions at the given index. + */ + public java.lang.String getUnreachableInstancePartitions(int index) { + return unreachableInstancePartitions_.get(index); + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachableInstancePartitions at the given index. + */ + public com.google.protobuf.ByteString getUnreachableInstancePartitionsBytes(int index) { + return unreachableInstancePartitions_.getByteString(index); + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index to set the value at. + * @param value The unreachableInstancePartitions to set. + * @return This builder for chaining. + */ + public Builder setUnreachableInstancePartitions(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableInstancePartitionsIsMutable(); + unreachableInstancePartitions_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param value The unreachableInstancePartitions to add. + * @return This builder for chaining. + */ + public Builder addUnreachableInstancePartitions(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableInstancePartitionsIsMutable(); + unreachableInstancePartitions_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param values The unreachableInstancePartitions to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachableInstancePartitions( + java.lang.Iterable values) { + ensureUnreachableInstancePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, unreachableInstancePartitions_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return This builder for chaining. + */ + public Builder clearUnreachableInstancePartitions() { + unreachableInstancePartitions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instance partitions.
    +     * It includes the names of instance partitions whose operation metadata could
    +     * not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param value The bytes of the unreachableInstancePartitions to add. + * @return This builder for chaining. + */ + public Builder addUnreachableInstancePartitionsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableInstancePartitionsIsMutable(); + unreachableInstancePartitions_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) + private static final com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancePartitionOperationsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java new file mode 100644 index 000000000000..1650987228a8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java @@ -0,0 +1,199 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancePartitionOperationsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsList(); + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.Operation getOperations(int index); + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + int getOperationsCount(); + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + java.util.List getOperationsOrBuilderList(); + + /** + * + * + *
    +   * The list of matching instance partition long-running operations. Each
    +   * operation's name will be
    +   * prefixed by the instance partition's name. The operation's
    +   * metadata field type
    +   * `metadata.type_url` describes the type of the metadata.
    +   * 
    + * + * repeated .google.longrunning.Operation operations = 1; + */ + com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
    +   * call to fetch more of the matching metadata.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return A list containing the unreachableInstancePartitions. + */ + java.util.List getUnreachableInstancePartitionsList(); + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @return The count of unreachableInstancePartitions. + */ + int getUnreachableInstancePartitionsCount(); + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the element to return. + * @return The unreachableInstancePartitions at the given index. + */ + java.lang.String getUnreachableInstancePartitions(int index); + + /** + * + * + *
    +   * The list of unreachable instance partitions.
    +   * It includes the names of instance partitions whose operation metadata could
    +   * not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable_instance_partitions = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachableInstancePartitions at the given index. + */ + com.google.protobuf.ByteString getUnreachableInstancePartitionsBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java new file mode 100644 index 000000000000..aa50a42a678b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java @@ -0,0 +1,1340 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionsRequest} + */ +@com.google.protobuf.Generated +public final class ListInstancePartitionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancePartitionsRequest) + ListInstancePartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancePartitionsRequest"); + } + + // Use ListInstancePartitionsRequest.newBuilder() to construct. + private ListInstancePartitionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancePartitionsRequest() { + parent_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The instance whose instance partitions should be listed. Values
    +   * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +   * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +   * `projects/myproject/instances/-`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance whose instance partitions should be listed. Values
    +   * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +   * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +   * `projects/myproject/instances/-`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of instance partitions to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_PARTITION_DEADLINE_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp instancePartitionDeadline_; + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + @java.lang.Override + public boolean hasInstancePartitionDeadline() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getInstancePartitionDeadline() { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder() { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getInstancePartitionDeadline()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, getInstancePartitionDeadline()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest other = + (com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (hasInstancePartitionDeadline() != other.hasInstancePartitionDeadline()) return false; + if (hasInstancePartitionDeadline()) { + if (!getInstancePartitionDeadline().equals(other.getInstancePartitionDeadline())) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + if (hasInstancePartitionDeadline()) { + hash = (37 * hash) + INSTANCE_PARTITION_DEADLINE_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartitionDeadline().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancePartitionsRequest) + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionDeadlineFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + instancePartitionDeadline_ = null; + if (instancePartitionDeadlineBuilder_ != null) { + instancePartitionDeadlineBuilder_.dispose(); + instancePartitionDeadlineBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest build() { + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest result = + new com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.instancePartitionDeadline_ = + instancePartitionDeadlineBuilder_ == null + ? instancePartitionDeadline_ + : instancePartitionDeadlineBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasInstancePartitionDeadline()) { + mergeInstancePartitionDeadline(other.getInstancePartitionDeadline()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetInstancePartitionDeadlineFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The instance whose instance partitions should be listed. Values
    +     * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +     * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +     * `projects/myproject/instances/-`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance whose instance partitions should be listed. Values
    +     * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +     * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +     * `projects/myproject/instances/-`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance whose instance partitions should be listed. Values
    +     * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +     * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +     * `projects/myproject/instances/-`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance whose instance partitions should be listed. Values
    +     * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +     * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +     * `projects/myproject/instances/-`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance whose instance partitions should be listed. Values
    +     * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +     * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +     * `projects/myproject/instances/-`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of instance partitions to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of instance partitions to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of instance partitions to be returned in the response. If 0 or less,
    +     * defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp instancePartitionDeadline_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + instancePartitionDeadlineBuilder_; + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + public boolean hasInstancePartitionDeadline() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + public com.google.protobuf.Timestamp getInstancePartitionDeadline() { + if (instancePartitionDeadlineBuilder_ == null) { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } else { + return instancePartitionDeadlineBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setInstancePartitionDeadline(com.google.protobuf.Timestamp value) { + if (instancePartitionDeadlineBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartitionDeadline_ = value; + } else { + instancePartitionDeadlineBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setInstancePartitionDeadline( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (instancePartitionDeadlineBuilder_ == null) { + instancePartitionDeadline_ = builderForValue.build(); + } else { + instancePartitionDeadlineBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeInstancePartitionDeadline(com.google.protobuf.Timestamp value) { + if (instancePartitionDeadlineBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && instancePartitionDeadline_ != null + && instancePartitionDeadline_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getInstancePartitionDeadlineBuilder().mergeFrom(value); + } else { + instancePartitionDeadline_ = value; + } + } else { + instancePartitionDeadlineBuilder_.mergeFrom(value); + } + if (instancePartitionDeadline_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearInstancePartitionDeadline() { + bitField0_ = (bitField0_ & ~0x00000008); + instancePartitionDeadline_ = null; + if (instancePartitionDeadlineBuilder_ != null) { + instancePartitionDeadlineBuilder_.dispose(); + instancePartitionDeadlineBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getInstancePartitionDeadlineBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetInstancePartitionDeadlineFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder() { + if (instancePartitionDeadlineBuilder_ != null) { + return instancePartitionDeadlineBuilder_.getMessageOrBuilder(); + } else { + return instancePartitionDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instancePartitionDeadline_; + } + } + + /** + * + * + *
    +     * Optional. Deadline used while retrieving metadata for instance partitions.
    +     * Instance partitions whose metadata cannot be retrieved within this deadline
    +     * will be added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +     * in
    +     * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +     * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetInstancePartitionDeadlineFieldBuilder() { + if (instancePartitionDeadlineBuilder_ == null) { + instancePartitionDeadlineBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getInstancePartitionDeadline(), getParentForChildren(), isClean()); + instancePartitionDeadline_ = null; + } + return instancePartitionDeadlineBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancePartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancePartitionsRequest) + private static final com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancePartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java new file mode 100644 index 000000000000..11e1a0909641 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancePartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancePartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance whose instance partitions should be listed. Values
    +   * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +   * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +   * `projects/myproject/instances/-`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The instance whose instance partitions should be listed. Values
    +   * are of the form `projects/<project>/instances/<instance>`. Use `{instance}
    +   * = '-'` to list instance partitions for all Instances in a project, e.g.,
    +   * `projects/myproject/instances/-`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Number of instance partitions to be returned in the response. If 0 or less,
    +   * defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the instancePartitionDeadline field is set. + */ + boolean hasInstancePartitionDeadline(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The instancePartitionDeadline. + */ + com.google.protobuf.Timestamp getInstancePartitionDeadline(); + + /** + * + * + *
    +   * Optional. Deadline used while retrieving metadata for instance partitions.
    +   * Instance partitions whose metadata cannot be retrieved within this deadline
    +   * will be added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
    +   * in
    +   * [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
    +   * 
    + * + * + * .google.protobuf.Timestamp instance_partition_deadline = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getInstancePartitionDeadlineOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java new file mode 100644 index 000000000000..1c81a90e270b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java @@ -0,0 +1,1512 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionsResponse} + */ +@com.google.protobuf.Generated +public final class ListInstancePartitionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancePartitionsResponse) + ListInstancePartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancePartitionsResponse"); + } + + // Use ListInstancePartitionsResponse.newBuilder() to construct. + private ListInstancePartitionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancePartitionsResponse() { + instancePartitions_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.Builder.class); + } + + public static final int INSTANCE_PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List + instancePartitions_; + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + @java.lang.Override + public java.util.List + getInstancePartitionsList() { + return instancePartitions_; + } + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + @java.lang.Override + public java.util.List + getInstancePartitionsOrBuilderList() { + return instancePartitions_; + } + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + @java.lang.Override + public int getInstancePartitionsCount() { + return instancePartitions_.size(); + } + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartitions(int index) { + return instancePartitions_.get(index); + } + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionsOrBuilder(int index) { + return instancePartitions_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +   * call to fetch more of the matching instance partitions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +   * call to fetch more of the matching instance partitions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + return unreachable_; + } + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < instancePartitions_.size(); i++) { + output.writeMessage(1, instancePartitions_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachable_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, unreachable_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < instancePartitions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, instancePartitions_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachable_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachable_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse other = + (com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse) obj; + + if (!getInstancePartitionsList().equals(other.getInstancePartitionsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableList().equals(other.getUnreachableList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getInstancePartitionsCount() > 0) { + hash = (37 * hash) + INSTANCE_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartitionsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableCount() > 0) { + hash = (37 * hash) + UNREACHABLE_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancePartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancePartitionsResponse) + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.class, + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (instancePartitionsBuilder_ == null) { + instancePartitions_ = java.util.Collections.emptyList(); + } else { + instancePartitions_ = null; + instancePartitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse build() { + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse result = + new com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse result) { + if (instancePartitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + instancePartitions_ = java.util.Collections.unmodifiableList(instancePartitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.instancePartitions_ = instancePartitions_; + } else { + result.instancePartitions_ = instancePartitionsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachable_.makeImmutable(); + result.unreachable_ = unreachable_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse other) { + if (other + == com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + .getDefaultInstance()) return this; + if (instancePartitionsBuilder_ == null) { + if (!other.instancePartitions_.isEmpty()) { + if (instancePartitions_.isEmpty()) { + instancePartitions_ = other.instancePartitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureInstancePartitionsIsMutable(); + instancePartitions_.addAll(other.instancePartitions_); + } + onChanged(); + } + } else { + if (!other.instancePartitions_.isEmpty()) { + if (instancePartitionsBuilder_.isEmpty()) { + instancePartitionsBuilder_.dispose(); + instancePartitionsBuilder_ = null; + instancePartitions_ = other.instancePartitions_; + bitField0_ = (bitField0_ & ~0x00000001); + instancePartitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetInstancePartitionsFieldBuilder() + : null; + } else { + instancePartitionsBuilder_.addAllMessages(other.instancePartitions_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachable_.isEmpty()) { + if (unreachable_.isEmpty()) { + unreachable_ = other.unreachable_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableIsMutable(); + unreachable_.addAll(other.unreachable_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.instance.v1.InstancePartition m = + input.readMessage( + com.google.spanner.admin.instance.v1.InstancePartition.parser(), + extensionRegistry); + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(m); + } else { + instancePartitionsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableIsMutable(); + unreachable_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + instancePartitions_ = java.util.Collections.emptyList(); + + private void ensureInstancePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + instancePartitions_ = + new java.util.ArrayList( + instancePartitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + instancePartitionsBuilder_; + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public java.util.List + getInstancePartitionsList() { + if (instancePartitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(instancePartitions_); + } else { + return instancePartitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public int getInstancePartitionsCount() { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.size(); + } else { + return instancePartitionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartitions(int index) { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.get(index); + } else { + return instancePartitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder setInstancePartitions( + int index, com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.set(index, value); + onChanged(); + } else { + instancePartitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder setInstancePartitions( + int index, com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.set(index, builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder addInstancePartitions( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(value); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder addInstancePartitions( + int index, com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(index, value); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder addInstancePartitions( + com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder addInstancePartitions( + int index, com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.add(index, builderForValue.build()); + onChanged(); + } else { + instancePartitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder addAllInstancePartitions( + java.lang.Iterable + values) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, instancePartitions_); + onChanged(); + } else { + instancePartitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder clearInstancePartitions() { + if (instancePartitionsBuilder_ == null) { + instancePartitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + instancePartitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public Builder removeInstancePartitions(int index) { + if (instancePartitionsBuilder_ == null) { + ensureInstancePartitionsIsMutable(); + instancePartitions_.remove(index); + onChanged(); + } else { + instancePartitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + getInstancePartitionsBuilder(int index) { + return internalGetInstancePartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionsOrBuilder(int index) { + if (instancePartitionsBuilder_ == null) { + return instancePartitions_.get(index); + } else { + return instancePartitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public java.util.List + getInstancePartitionsOrBuilderList() { + if (instancePartitionsBuilder_ != null) { + return instancePartitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(instancePartitions_); + } + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + addInstancePartitionsBuilder() { + return internalGetInstancePartitionsFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + addInstancePartitionsBuilder(int index) { + return internalGetInstancePartitionsFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instancePartitions.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + public java.util.List + getInstancePartitionsBuilderList() { + return internalGetInstancePartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + internalGetInstancePartitionsFieldBuilder() { + if (instancePartitionsBuilder_ == null) { + instancePartitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder>( + instancePartitions_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + instancePartitions_ = null; + } + return instancePartitionsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +     * call to fetch more of the matching instance partitions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +     * call to fetch more of the matching instance partitions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +     * call to fetch more of the matching instance partitions.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +     * call to fetch more of the matching instance partitions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +     * call to fetch more of the matching instance partitions.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableIsMutable() { + if (!unreachable_.isModifiable()) { + unreachable_ = new com.google.protobuf.LazyStringArrayList(unreachable_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + unreachable_.makeImmutable(); + return unreachable_; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index to set the value at. + * @param value The unreachable to set. + * @return This builder for chaining. + */ + public Builder setUnreachable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param value The unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param values The unreachable to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachable(java.lang.Iterable values) { + ensureUnreachableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, unreachable_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return This builder for chaining. + */ + public Builder clearUnreachable() { + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances or instance partitions.
    +     * It includes the names of instances or instance partitions whose metadata
    +     * could not be retrieved within
    +     * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param value The bytes of the unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancePartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancePartitionsResponse) + private static final com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancePartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java new file mode 100644 index 000000000000..e4acb763854b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java @@ -0,0 +1,187 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancePartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancePartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + java.util.List + getInstancePartitionsList(); + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + com.google.spanner.admin.instance.v1.InstancePartition getInstancePartitions(int index); + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + int getInstancePartitionsCount(); + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + java.util.List + getInstancePartitionsOrBuilderList(); + + /** + * + * + *
    +   * The list of requested instancePartitions.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstancePartition instance_partitions = 1; + * + */ + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder getInstancePartitionsOrBuilder( + int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +   * call to fetch more of the matching instance partitions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
    +   * call to fetch more of the matching instance partitions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + java.util.List getUnreachableList(); + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + int getUnreachableCount(); + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + java.lang.String getUnreachable(int index); + + /** + * + * + *
    +   * The list of unreachable instances or instance partitions.
    +   * It includes the names of instances or instance partitions whose metadata
    +   * could not be retrieved within
    +   * [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + com.google.protobuf.ByteString getUnreachableBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java new file mode 100644 index 000000000000..f9eda7206341 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java @@ -0,0 +1,1599 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancesRequest} + */ +@com.google.protobuf.Generated +public final class ListInstancesRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancesRequest) + ListInstancesRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancesRequest"); + } + + // Use ListInstancesRequest.newBuilder() to construct. + private ListInstancesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancesRequest() { + parent_ = ""; + pageToken_ = ""; + filter_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancesRequest.class, + com.google.spanner.admin.instance.v1.ListInstancesRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + + /** + * + * + *
    +   * Required. The name of the project for which a list of instances is
    +   * requested. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the project for which a list of instances is
    +   * requested. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or less, defaults
    +   * to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `name`
    +   * * `display_name`
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `name:*` --> The instance has a name.
    +   * * `name:Howl` --> The instance's name contains the string "howl".
    +   * * `name:HOWL` --> Equivalent to above.
    +   * * `NAME:howl` --> Equivalent to above.
    +   * * `labels.env:*` --> The instance has the label "env".
    +   * * `labels.env:dev` --> The instance has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +   * it has the label "env" with its value
    +   * containing "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `name`
    +   * * `display_name`
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `name:*` --> The instance has a name.
    +   * * `name:Howl` --> The instance's name contains the string "howl".
    +   * * `name:HOWL` --> Equivalent to above.
    +   * * `NAME:howl` --> Equivalent to above.
    +   * * `labels.env:*` --> The instance has the label "env".
    +   * * `labels.env:dev` --> The instance has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +   * it has the label "env" with its value
    +   * containing "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_DEADLINE_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp instanceDeadline_; + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return Whether the instanceDeadline field is set. + */ + @java.lang.Override + public boolean hasInstanceDeadline() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return The instanceDeadline. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getInstanceDeadline() { + return instanceDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instanceDeadline_; + } + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getInstanceDeadlineOrBuilder() { + return instanceDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instanceDeadline_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, filter_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getInstanceDeadline()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, filter_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getInstanceDeadline()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstancesRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancesRequest other = + (com.google.spanner.admin.instance.v1.ListInstancesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (hasInstanceDeadline() != other.hasInstanceDeadline()) return false; + if (hasInstanceDeadline()) { + if (!getInstanceDeadline().equals(other.getInstanceDeadline())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + if (hasInstanceDeadline()) { + hash = (37 * hash) + INSTANCE_DEADLINE_FIELD_NUMBER; + hash = (53 * hash) + getInstanceDeadline().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancesRequest) + com.google.spanner.admin.instance.v1.ListInstancesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancesRequest.class, + com.google.spanner.admin.instance.v1.ListInstancesRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ListInstancesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceDeadlineFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + filter_ = ""; + instanceDeadline_ = null; + if (instanceDeadlineBuilder_ != null) { + instanceDeadlineBuilder_.dispose(); + instanceDeadlineBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesRequest build() { + com.google.spanner.admin.instance.v1.ListInstancesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesRequest buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancesRequest result = + new com.google.spanner.admin.instance.v1.ListInstancesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ListInstancesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.filter_ = filter_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.instanceDeadline_ = + instanceDeadlineBuilder_ == null ? instanceDeadline_ : instanceDeadlineBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstancesRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.ListInstancesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ListInstancesRequest other) { + if (other == com.google.spanner.admin.instance.v1.ListInstancesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasInstanceDeadline()) { + mergeInstanceDeadline(other.getInstanceDeadline()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetInstanceDeadlineFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + + /** + * + * + *
    +     * Required. The name of the project for which a list of instances is
    +     * requested. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of instances is
    +     * requested. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of instances is
    +     * requested. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of instances is
    +     * requested. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the project for which a list of instances is
    +     * requested. Values are of the form `projects/<project>`.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +     * from a previous
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `name`
    +     * * `display_name`
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `name:*` --> The instance has a name.
    +     * * `name:Howl` --> The instance's name contains the string "howl".
    +     * * `name:HOWL` --> Equivalent to above.
    +     * * `NAME:howl` --> Equivalent to above.
    +     * * `labels.env:*` --> The instance has the label "env".
    +     * * `labels.env:dev` --> The instance has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +     * it has the label "env" with its value
    +     * containing "dev".
    +     * 
    + * + * string filter = 4; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `name`
    +     * * `display_name`
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `name:*` --> The instance has a name.
    +     * * `name:Howl` --> The instance's name contains the string "howl".
    +     * * `name:HOWL` --> Equivalent to above.
    +     * * `NAME:howl` --> Equivalent to above.
    +     * * `labels.env:*` --> The instance has the label "env".
    +     * * `labels.env:dev` --> The instance has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +     * it has the label "env" with its value
    +     * containing "dev".
    +     * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `name`
    +     * * `display_name`
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `name:*` --> The instance has a name.
    +     * * `name:Howl` --> The instance's name contains the string "howl".
    +     * * `name:HOWL` --> Equivalent to above.
    +     * * `NAME:howl` --> Equivalent to above.
    +     * * `labels.env:*` --> The instance has the label "env".
    +     * * `labels.env:dev` --> The instance has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +     * it has the label "env" with its value
    +     * containing "dev".
    +     * 
    + * + * string filter = 4; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `name`
    +     * * `display_name`
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `name:*` --> The instance has a name.
    +     * * `name:Howl` --> The instance's name contains the string "howl".
    +     * * `name:HOWL` --> Equivalent to above.
    +     * * `NAME:howl` --> Equivalent to above.
    +     * * `labels.env:*` --> The instance has the label "env".
    +     * * `labels.env:dev` --> The instance has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +     * it has the label "env" with its value
    +     * containing "dev".
    +     * 
    + * + * string filter = 4; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `name`
    +     * * `display_name`
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `name:*` --> The instance has a name.
    +     * * `name:Howl` --> The instance's name contains the string "howl".
    +     * * `name:HOWL` --> Equivalent to above.
    +     * * `NAME:howl` --> Equivalent to above.
    +     * * `labels.env:*` --> The instance has the label "env".
    +     * * `labels.env:dev` --> The instance has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +     * it has the label "env" with its value
    +     * containing "dev".
    +     * 
    + * + * string filter = 4; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp instanceDeadline_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + instanceDeadlineBuilder_; + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return Whether the instanceDeadline field is set. + */ + public boolean hasInstanceDeadline() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return The instanceDeadline. + */ + public com.google.protobuf.Timestamp getInstanceDeadline() { + if (instanceDeadlineBuilder_ == null) { + return instanceDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instanceDeadline_; + } else { + return instanceDeadlineBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public Builder setInstanceDeadline(com.google.protobuf.Timestamp value) { + if (instanceDeadlineBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceDeadline_ = value; + } else { + instanceDeadlineBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public Builder setInstanceDeadline(com.google.protobuf.Timestamp.Builder builderForValue) { + if (instanceDeadlineBuilder_ == null) { + instanceDeadline_ = builderForValue.build(); + } else { + instanceDeadlineBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public Builder mergeInstanceDeadline(com.google.protobuf.Timestamp value) { + if (instanceDeadlineBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && instanceDeadline_ != null + && instanceDeadline_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getInstanceDeadlineBuilder().mergeFrom(value); + } else { + instanceDeadline_ = value; + } + } else { + instanceDeadlineBuilder_.mergeFrom(value); + } + if (instanceDeadline_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public Builder clearInstanceDeadline() { + bitField0_ = (bitField0_ & ~0x00000010); + instanceDeadline_ = null; + if (instanceDeadlineBuilder_ != null) { + instanceDeadlineBuilder_.dispose(); + instanceDeadlineBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public com.google.protobuf.Timestamp.Builder getInstanceDeadlineBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetInstanceDeadlineFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + public com.google.protobuf.TimestampOrBuilder getInstanceDeadlineOrBuilder() { + if (instanceDeadlineBuilder_ != null) { + return instanceDeadlineBuilder_.getMessageOrBuilder(); + } else { + return instanceDeadline_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : instanceDeadline_; + } + } + + /** + * + * + *
    +     * Deadline used while retrieving metadata for instances.
    +     * Instances whose metadata cannot be retrieved within this deadline will be
    +     * added to
    +     * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +     * in
    +     * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +     * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetInstanceDeadlineFieldBuilder() { + if (instanceDeadlineBuilder_ == null) { + instanceDeadlineBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getInstanceDeadline(), getParentForChildren(), isClean()); + instanceDeadline_ = null; + } + return instanceDeadlineBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancesRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesRequest) + private static final com.google.spanner.admin.instance.v1.ListInstancesRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstancesRequest(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java new file mode 100644 index 000000000000..6f3b8901076e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java @@ -0,0 +1,220 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the project for which a list of instances is
    +   * requested. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + + /** + * + * + *
    +   * Required. The name of the project for which a list of instances is
    +   * requested. Values are of the form `projects/<project>`.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or less, defaults
    +   * to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
    +   * from a previous
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `name`
    +   * * `display_name`
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `name:*` --> The instance has a name.
    +   * * `name:Howl` --> The instance's name contains the string "howl".
    +   * * `name:HOWL` --> Equivalent to above.
    +   * * `NAME:howl` --> Equivalent to above.
    +   * * `labels.env:*` --> The instance has the label "env".
    +   * * `labels.env:dev` --> The instance has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +   * it has the label "env" with its value
    +   * containing "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `name`
    +   * * `display_name`
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `name:*` --> The instance has a name.
    +   * * `name:Howl` --> The instance's name contains the string "howl".
    +   * * `name:HOWL` --> Equivalent to above.
    +   * * `NAME:howl` --> Equivalent to above.
    +   * * `labels.env:*` --> The instance has the label "env".
    +   * * `labels.env:dev` --> The instance has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * * `name:howl labels.env:dev` --> The instance's name contains "howl" and
    +   * it has the label "env" with its value
    +   * containing "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return Whether the instanceDeadline field is set. + */ + boolean hasInstanceDeadline(); + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + * + * @return The instanceDeadline. + */ + com.google.protobuf.Timestamp getInstanceDeadline(); + + /** + * + * + *
    +   * Deadline used while retrieving metadata for instances.
    +   * Instances whose metadata cannot be retrieved within this deadline will be
    +   * added to
    +   * [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
    +   * in
    +   * [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
    +   * 
    + * + * .google.protobuf.Timestamp instance_deadline = 5; + */ + com.google.protobuf.TimestampOrBuilder getInstanceDeadlineOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java new file mode 100644 index 000000000000..22870d6b6d54 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java @@ -0,0 +1,1459 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancesResponse} + */ +@com.google.protobuf.Generated +public final class ListInstancesResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ListInstancesResponse) + ListInstancesResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListInstancesResponse"); + } + + // Use ListInstancesResponse.newBuilder() to construct. + private ListInstancesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListInstancesResponse() { + instances_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancesResponse.class, + com.google.spanner.admin.instance.v1.ListInstancesResponse.Builder.class); + } + + public static final int INSTANCES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List instances_; + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + @java.lang.Override + public java.util.List getInstancesList() { + return instances_; + } + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + @java.lang.Override + public java.util.List + getInstancesOrBuilderList() { + return instances_; + } + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + @java.lang.Override + public int getInstancesCount() { + return instances_.size(); + } + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstances(int index) { + return instances_.get(index); + } + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstancesOrBuilder(int index) { + return instances_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +   * call to fetch more of the matching instances.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +   * call to fetch more of the matching instances.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int UNREACHABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + return unreachable_; + } + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < instances_.size(); i++) { + output.writeMessage(1, instances_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + for (int i = 0; i < unreachable_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, unreachable_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < instances_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, instances_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + { + int dataSize = 0; + for (int i = 0; i < unreachable_.size(); i++) { + dataSize += computeStringSizeNoTag(unreachable_.getRaw(i)); + } + size += dataSize; + size += 1 * getUnreachableList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ListInstancesResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ListInstancesResponse other = + (com.google.spanner.admin.instance.v1.ListInstancesResponse) obj; + + if (!getInstancesList().equals(other.getInstancesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnreachableList().equals(other.getUnreachableList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getInstancesCount() > 0) { + hash = (37 * hash) + INSTANCES_FIELD_NUMBER; + hash = (53 * hash) + getInstancesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (getUnreachableCount() > 0) { + hash = (37 * hash) + UNREACHABLE_FIELD_NUMBER; + hash = (53 * hash) + getUnreachableList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ListInstancesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ListInstancesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ListInstancesResponse) + com.google.spanner.admin.instance.v1.ListInstancesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ListInstancesResponse.class, + com.google.spanner.admin.instance.v1.ListInstancesResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ListInstancesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (instancesBuilder_ == null) { + instances_ = java.util.Collections.emptyList(); + } else { + instances_ = null; + instancesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesResponse getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ListInstancesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesResponse build() { + com.google.spanner.admin.instance.v1.ListInstancesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesResponse buildPartial() { + com.google.spanner.admin.instance.v1.ListInstancesResponse result = + new com.google.spanner.admin.instance.v1.ListInstancesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.ListInstancesResponse result) { + if (instancesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + instances_ = java.util.Collections.unmodifiableList(instances_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.instances_ = instances_; + } else { + result.instances_ = instancesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ListInstancesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + unreachable_.makeImmutable(); + result.unreachable_ = unreachable_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ListInstancesResponse) { + return mergeFrom((com.google.spanner.admin.instance.v1.ListInstancesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ListInstancesResponse other) { + if (other == com.google.spanner.admin.instance.v1.ListInstancesResponse.getDefaultInstance()) + return this; + if (instancesBuilder_ == null) { + if (!other.instances_.isEmpty()) { + if (instances_.isEmpty()) { + instances_ = other.instances_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureInstancesIsMutable(); + instances_.addAll(other.instances_); + } + onChanged(); + } + } else { + if (!other.instances_.isEmpty()) { + if (instancesBuilder_.isEmpty()) { + instancesBuilder_.dispose(); + instancesBuilder_ = null; + instances_ = other.instances_; + bitField0_ = (bitField0_ & ~0x00000001); + instancesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetInstancesFieldBuilder() + : null; + } else { + instancesBuilder_.addAllMessages(other.instances_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.unreachable_.isEmpty()) { + if (unreachable_.isEmpty()) { + unreachable_ = other.unreachable_; + bitField0_ |= 0x00000004; + } else { + ensureUnreachableIsMutable(); + unreachable_.addAll(other.unreachable_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.instance.v1.Instance m = + input.readMessage( + com.google.spanner.admin.instance.v1.Instance.parser(), extensionRegistry); + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + instances_.add(m); + } else { + instancesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureUnreachableIsMutable(); + unreachable_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List instances_ = + java.util.Collections.emptyList(); + + private void ensureInstancesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + instances_ = + new java.util.ArrayList(instances_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instancesBuilder_; + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public java.util.List getInstancesList() { + if (instancesBuilder_ == null) { + return java.util.Collections.unmodifiableList(instances_); + } else { + return instancesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public int getInstancesCount() { + if (instancesBuilder_ == null) { + return instances_.size(); + } else { + return instancesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance getInstances(int index) { + if (instancesBuilder_ == null) { + return instances_.get(index); + } else { + return instancesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder setInstances(int index, com.google.spanner.admin.instance.v1.Instance value) { + if (instancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancesIsMutable(); + instances_.set(index, value); + onChanged(); + } else { + instancesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder setInstances( + int index, com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + instances_.set(index, builderForValue.build()); + onChanged(); + } else { + instancesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder addInstances(com.google.spanner.admin.instance.v1.Instance value) { + if (instancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancesIsMutable(); + instances_.add(value); + onChanged(); + } else { + instancesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder addInstances(int index, com.google.spanner.admin.instance.v1.Instance value) { + if (instancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureInstancesIsMutable(); + instances_.add(index, value); + onChanged(); + } else { + instancesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder addInstances( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + instances_.add(builderForValue.build()); + onChanged(); + } else { + instancesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder addInstances( + int index, com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + instances_.add(index, builderForValue.build()); + onChanged(); + } else { + instancesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder addAllInstances( + java.lang.Iterable values) { + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, instances_); + onChanged(); + } else { + instancesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder clearInstances() { + if (instancesBuilder_ == null) { + instances_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + instancesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public Builder removeInstances(int index) { + if (instancesBuilder_ == null) { + ensureInstancesIsMutable(); + instances_.remove(index); + onChanged(); + } else { + instancesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstancesBuilder(int index) { + return internalGetInstancesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstancesOrBuilder(int index) { + if (instancesBuilder_ == null) { + return instances_.get(index); + } else { + return instancesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public java.util.List + getInstancesOrBuilderList() { + if (instancesBuilder_ != null) { + return instancesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(instances_); + } + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder addInstancesBuilder() { + return internalGetInstancesFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder addInstancesBuilder(int index) { + return internalGetInstancesFieldBuilder() + .addBuilder(index, com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested instances.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + public java.util.List + getInstancesBuilderList() { + return internalGetInstancesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstancesFieldBuilder() { + if (instancesBuilder_ == null) { + instancesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + instances_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + instances_ = null; + } + return instancesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +     * call to fetch more of the matching instances.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +     * call to fetch more of the matching instances.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +     * call to fetch more of the matching instances.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +     * call to fetch more of the matching instances.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +     * call to fetch more of the matching instances.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList unreachable_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureUnreachableIsMutable() { + if (!unreachable_.isModifiable()) { + unreachable_ = new com.google.protobuf.LazyStringArrayList(unreachable_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + public com.google.protobuf.ProtocolStringList getUnreachableList() { + unreachable_.makeImmutable(); + return unreachable_; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + public int getUnreachableCount() { + return unreachable_.size(); + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + public java.lang.String getUnreachable(int index) { + return unreachable_.get(index); + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + public com.google.protobuf.ByteString getUnreachableBytes(int index) { + return unreachable_.getByteString(index); + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param index The index to set the value at. + * @param value The unreachable to set. + * @return This builder for chaining. + */ + public Builder setUnreachable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param value The unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param values The unreachable to add. + * @return This builder for chaining. + */ + public Builder addAllUnreachable(java.lang.Iterable values) { + ensureUnreachableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, unreachable_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @return This builder for chaining. + */ + public Builder clearUnreachable() { + unreachable_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The list of unreachable instances.
    +     * It includes the names of instances whose metadata could not be retrieved
    +     * within
    +     * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +     * 
    + * + * repeated string unreachable = 3; + * + * @param value The bytes of the unreachable to add. + * @return This builder for chaining. + */ + public Builder addUnreachableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureUnreachableIsMutable(); + unreachable_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ListInstancesResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesResponse) + private static final com.google.spanner.admin.instance.v1.ListInstancesResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ListInstancesResponse(); + } + + public static com.google.spanner.admin.instance.v1.ListInstancesResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListInstancesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ListInstancesResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java new file mode 100644 index 000000000000..cbaca0c044e8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java @@ -0,0 +1,180 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ListInstancesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ListInstancesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + java.util.List getInstancesList(); + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + com.google.spanner.admin.instance.v1.Instance getInstances(int index); + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + int getInstancesCount(); + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + java.util.List + getInstancesOrBuilderList(); + + /** + * + * + *
    +   * The list of requested instances.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance instances = 1; + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstancesOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +   * call to fetch more of the matching instances.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
    +   * call to fetch more of the matching instances.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return A list containing the unreachable. + */ + java.util.List getUnreachableList(); + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @return The count of unreachable. + */ + int getUnreachableCount(); + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the element to return. + * @return The unreachable at the given index. + */ + java.lang.String getUnreachable(int index); + + /** + * + * + *
    +   * The list of unreachable instances.
    +   * It includes the names of instances whose metadata could not be retrieved
    +   * within
    +   * [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
    +   * 
    + * + * repeated string unreachable = 3; + * + * @param index The index of the value to return. + * @return The bytes of the unreachable at the given index. + */ + com.google.protobuf.ByteString getUnreachableBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java new file mode 100644 index 000000000000..0678ce39e3ac --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java @@ -0,0 +1,1225 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceMetadata} + */ +@com.google.protobuf.Generated +public final class MoveInstanceMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceMetadata) + MoveInstanceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveInstanceMetadata"); + } + + // Use MoveInstanceMetadata.newBuilder() to construct. + private MoveInstanceMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveInstanceMetadata() { + targetConfig_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.class, + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.Builder.class); + } + + private int bitField0_; + public static final int TARGET_CONFIG_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object targetConfig_ = ""; + + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + @java.lang.Override + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } + } + + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(targetConfig_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, targetConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(targetConfig_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, targetConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceMetadata other = + (com.google.spanner.admin.instance.v1.MoveInstanceMetadata) obj; + + if (!getTargetConfig().equals(other.getTargetConfig())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TARGET_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTargetConfig().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceMetadata) + com.google.spanner.admin.instance.v1.MoveInstanceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.class, + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + targetConfig_ = ""; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata build() { + com.google.spanner.admin.instance.v1.MoveInstanceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceMetadata result = + new com.google.spanner.admin.instance.v1.MoveInstanceMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.MoveInstanceMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.targetConfig_ = targetConfig_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceMetadata other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceMetadata.getDefaultInstance()) + return this; + if (!other.getTargetConfig().isEmpty()) { + targetConfig_ = other.targetConfig_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + targetConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object targetConfig_ = ""; + + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @param value The targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetConfig_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return This builder for chaining. + */ + public Builder clearTargetConfig() { + targetConfig_ = getDefaultInstance().getTargetConfig(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @param value The bytes for targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetConfig_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.instance.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceMetadata) + private static final com.google.spanner.admin.instance.v1.MoveInstanceMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceMetadata(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java new file mode 100644 index 000000000000..4c7f27f843ad --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java @@ -0,0 +1,142 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface MoveInstanceMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + java.lang.String getTargetConfig(); + + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + com.google.protobuf.ByteString getTargetConfigBytes(); + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.instance.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java new file mode 100644 index 000000000000..479dc2c903c1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java @@ -0,0 +1,830 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceRequest} + */ +@com.google.protobuf.Generated +public final class MoveInstanceRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceRequest) + MoveInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveInstanceRequest"); + } + + // Use MoveInstanceRequest.newBuilder() to construct. + private MoveInstanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveInstanceRequest() { + name_ = ""; + targetConfig_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + com.google.spanner.admin.instance.v1.MoveInstanceRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TARGET_CONFIG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object targetConfig_ = ""; + + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + @java.lang.Override + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(targetConfig_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, targetConfig_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(targetConfig_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, targetConfig_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceRequest other = + (com.google.spanner.admin.instance.v1.MoveInstanceRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getTargetConfig().equals(other.getTargetConfig())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TARGET_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTargetConfig().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceRequest) + com.google.spanner.admin.instance.v1.MoveInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + com.google.spanner.admin.instance.v1.MoveInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + targetConfig_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest build() { + com.google.spanner.admin.instance.v1.MoveInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceRequest result = + new com.google.spanner.admin.instance.v1.MoveInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.MoveInstanceRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.targetConfig_ = targetConfig_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTargetConfig().isEmpty()) { + targetConfig_ = other.targetConfig_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + targetConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object targetConfig_ = ""; + + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTargetConfig() { + targetConfig_ = getDefaultInstance().getTargetConfig(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceRequest) + private static final com.google.spanner.admin.instance.v1.MoveInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java new file mode 100644 index 000000000000..9af48e1a6036 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java @@ -0,0 +1,92 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface MoveInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + java.lang.String getTargetConfig(); + + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + com.google.protobuf.ByteString getTargetConfigBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java new file mode 100644 index 000000000000..f0ecd442f86f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java @@ -0,0 +1,399 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceResponse} + */ +@com.google.protobuf.Generated +public final class MoveInstanceResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceResponse) + MoveInstanceResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveInstanceResponse"); + } + + // Use MoveInstanceResponse.newBuilder() to construct. + private MoveInstanceResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveInstanceResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceResponse.class, + com.google.spanner.admin.instance.v1.MoveInstanceResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceResponse other = + (com.google.spanner.admin.instance.v1.MoveInstanceResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceResponse) + com.google.spanner.admin.instance.v1.MoveInstanceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceResponse.class, + com.google.spanner.admin.instance.v1.MoveInstanceResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse build() { + com.google.spanner.admin.instance.v1.MoveInstanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceResponse result = + new com.google.spanner.admin.instance.v1.MoveInstanceResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceResponse) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceResponse other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceResponse) + private static final com.google.spanner.admin.instance.v1.MoveInstanceResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceResponse(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java new file mode 100644 index 000000000000..2c1187f7334d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface MoveInstanceResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java new file mode 100644 index 000000000000..af031df189c7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java @@ -0,0 +1,1078 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Encapsulates progress related information for a Cloud Spanner long
    + * running instance operations.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.OperationProgress} + */ +@com.google.protobuf.Generated +public final class OperationProgress extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.OperationProgress) + OperationProgressOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "OperationProgress"); + } + + // Use OperationProgress.newBuilder() to construct. + private OperationProgress(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private OperationProgress() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_OperationProgress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.OperationProgress.class, + com.google.spanner.admin.instance.v1.OperationProgress.Builder.class); + } + + private int bitField0_; + public static final int PROGRESS_PERCENT_FIELD_NUMBER = 1; + private int progressPercent_ = 0; + + /** + * + * + *
    +   * Percent completion of the operation.
    +   * Values are between 0 and 100 inclusive.
    +   * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (progressPercent_ != 0) { + output.writeInt32(1, progressPercent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getEndTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (progressPercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, progressPercent_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getEndTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.OperationProgress)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.OperationProgress other = + (com.google.spanner.admin.instance.v1.OperationProgress) obj; + + if (getProgressPercent() != other.getProgressPercent()) return false; + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROGRESS_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getProgressPercent(); + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.OperationProgress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Encapsulates progress related information for a Cloud Spanner long
    +   * running instance operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.OperationProgress} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.OperationProgress) + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_OperationProgress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.OperationProgress.class, + com.google.spanner.admin.instance.v1.OperationProgress.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.OperationProgress.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + progressPercent_ = 0; + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress build() { + com.google.spanner.admin.instance.v1.OperationProgress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress buildPartial() { + com.google.spanner.admin.instance.v1.OperationProgress result = + new com.google.spanner.admin.instance.v1.OperationProgress(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.OperationProgress result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.progressPercent_ = progressPercent_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.OperationProgress) { + return mergeFrom((com.google.spanner.admin.instance.v1.OperationProgress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.OperationProgress other) { + if (other == com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance()) + return this; + if (other.getProgressPercent() != 0) { + setProgressPercent(other.getProgressPercent()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + progressPercent_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int progressPercent_; + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + @java.lang.Override + public int getProgressPercent() { + return progressPercent_; + } + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @param value The progressPercent to set. + * @return This builder for chaining. + */ + public Builder setProgressPercent(int value) { + + progressPercent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Percent completion of the operation.
    +     * Values are between 0 and 100 inclusive.
    +     * 
    + * + * int32 progress_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearProgressPercent() { + bitField0_ = (bitField0_ & ~0x00000001); + progressPercent_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * Time the request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000004); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * If set, the time at which this operation failed or was completed
    +     * successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.OperationProgress) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.OperationProgress) + private static final com.google.spanner.admin.instance.v1.OperationProgress DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.OperationProgress(); + } + + public static com.google.spanner.admin.instance.v1.OperationProgress getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OperationProgress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java new file mode 100644 index 000000000000..07b0fb9f8bce --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java @@ -0,0 +1,119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface OperationProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.OperationProgress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Percent completion of the operation.
    +   * Values are between 0 and 100 inclusive.
    +   * 
    + * + * int32 progress_percent = 1; + * + * @return The progressPercent. + */ + int getProgressPercent(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * Time the request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * If set, the time at which this operation failed or was completed
    +   * successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ProjectName.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ProjectName.java new file mode 100644 index 000000000000..e23ddfeff71c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ProjectName.java @@ -0,0 +1,168 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.instance.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class ProjectName implements ResourceName { + private static final PathTemplate PROJECT = + PathTemplate.createWithoutUrlEncoding("projects/{project}"); + private volatile Map fieldValuesMap; + private final String project; + + @Deprecated + protected ProjectName() { + project = null; + } + + private ProjectName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + } + + public String getProject() { + return project; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static ProjectName of(String project) { + return newBuilder().setProject(project).build(); + } + + public static String format(String project) { + return newBuilder().setProject(project).build().toString(); + } + + public static ProjectName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT.validatedMatch( + formattedString, "ProjectName.parse: formattedString not in valid format"); + return of(matchMap.get("project")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (ProjectName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT.instantiate("project", project); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + ProjectName that = ((ProjectName) o); + return Objects.equals(this.project, that.project); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + return h; + } + + /** Builder for projects/{project}. */ + public static class Builder { + private String project; + + protected Builder() {} + + public String getProject() { + return project; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + private Builder(ProjectName projectName) { + this.project = projectName.project; + } + + public ProjectName build() { + return new ProjectName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java new file mode 100644 index 000000000000..1830b4e06ee0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java @@ -0,0 +1,1141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * ReplicaComputeCapacity describes the amount of server resources that are
    + * allocated to each replica identified by the replica selection.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaComputeCapacity} + */ +@com.google.protobuf.Generated +public final class ReplicaComputeCapacity extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + ReplicaComputeCapacityOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReplicaComputeCapacity"); + } + + // Use ReplicaComputeCapacity.newBuilder() to construct. + private ReplicaComputeCapacity(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReplicaComputeCapacity() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.class, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder.class); + } + + private int bitField0_; + private int computeCapacityCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object computeCapacity_; + + public enum ComputeCapacityCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + NODE_COUNT(2), + PROCESSING_UNITS(3), + COMPUTECAPACITY_NOT_SET(0); + private final int value; + + private ComputeCapacityCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ComputeCapacityCase valueOf(int value) { + return forNumber(value); + } + + public static ComputeCapacityCase forNumber(int value) { + switch (value) { + case 2: + return NODE_COUNT; + case 3: + return PROCESSING_UNITS; + case 0: + return COMPUTECAPACITY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public static final int REPLICA_SELECTION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + @java.lang.Override + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + public static final int NODE_COUNT_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return computeCapacityCase_ == 2; + } + + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + if (computeCapacityCase_ == 2) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 3; + } + + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + if (computeCapacityCase_ == 3) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReplicaSelection()); + } + if (computeCapacityCase_ == 2) { + output.writeInt32(2, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 3) { + output.writeInt32(3, (int) ((java.lang.Integer) computeCapacity_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReplicaSelection()); + } + if (computeCapacityCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 2, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 3, (int) ((java.lang.Integer) computeCapacity_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ReplicaComputeCapacity)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity other = + (com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) obj; + + if (hasReplicaSelection() != other.hasReplicaSelection()) return false; + if (hasReplicaSelection()) { + if (!getReplicaSelection().equals(other.getReplicaSelection())) return false; + } + if (!getComputeCapacityCase().equals(other.getComputeCapacityCase())) return false; + switch (computeCapacityCase_) { + case 2: + if (getNodeCount() != other.getNodeCount()) return false; + break; + case 3: + if (getProcessingUnits() != other.getProcessingUnits()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReplicaSelection()) { + hash = (37 * hash) + REPLICA_SELECTION_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelection().hashCode(); + } + switch (computeCapacityCase_) { + case 2: + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + break; + case 3: + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ReplicaComputeCapacity describes the amount of server resources that are
    +   * allocated to each replica identified by the replica selection.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaComputeCapacity} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.class, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReplicaSelectionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + computeCapacityCase_ = 0; + computeCapacity_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity build() { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity buildPartial() { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result = + new com.google.spanner.admin.instance.v1.ReplicaComputeCapacity(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.replicaSelection_ = + replicaSelectionBuilder_ == null ? replicaSelection_ : replicaSelectionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result) { + result.computeCapacityCase_ = computeCapacityCase_; + result.computeCapacity_ = this.computeCapacity_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) { + return mergeFrom((com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ReplicaComputeCapacity other) { + if (other == com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()) + return this; + if (other.hasReplicaSelection()) { + mergeReplicaSelection(other.getReplicaSelection()); + } + switch (other.getComputeCapacityCase()) { + case NODE_COUNT: + { + setNodeCount(other.getNodeCount()); + break; + } + case PROCESSING_UNITS: + { + setProcessingUnits(other.getProcessingUnits()); + break; + } + case COMPUTECAPACITY_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetReplicaSelectionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 2; + break; + } // case 16 + case 24: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 3; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int computeCapacityCase_ = 0; + private java.lang.Object computeCapacity_; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public Builder clearComputeCapacity() { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + replicaSelectionBuilder_; + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + if (replicaSelectionBuilder_ == null) { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } else { + return replicaSelectionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicaSelection_ = value; + } else { + replicaSelectionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionBuilder_ == null) { + replicaSelection_ = builderForValue.build(); + } else { + replicaSelectionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && replicaSelection_ != null + && replicaSelection_ + != com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) { + getReplicaSelectionBuilder().mergeFrom(value); + } else { + replicaSelection_ = value; + } + } else { + replicaSelectionBuilder_.mergeFrom(value); + } + if (replicaSelection_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReplicaSelection() { + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection.Builder + getReplicaSelectionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetReplicaSelectionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + if (replicaSelectionBuilder_ != null) { + return replicaSelectionBuilder_.getMessageOrBuilder(); + } else { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + } + + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + internalGetReplicaSelectionFieldBuilder() { + if (replicaSelectionBuilder_ == null) { + replicaSelectionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder>( + getReplicaSelection(), getParentForChildren(), isClean()); + replicaSelection_ = null; + } + return replicaSelectionBuilder_; + } + + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + public boolean hasNodeCount() { + return computeCapacityCase_ == 2; + } + + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + public int getNodeCount() { + if (computeCapacityCase_ == 2) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + computeCapacityCase_ = 2; + computeCapacity_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + if (computeCapacityCase_ == 2) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 3; + } + + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + public int getProcessingUnits() { + if (computeCapacityCase_ == 3) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + computeCapacityCase_ = 3; + computeCapacity_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + if (computeCapacityCase_ == 3) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + private static final com.google.spanner.admin.instance.v1.ReplicaComputeCapacity DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ReplicaComputeCapacity(); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaComputeCapacity parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java new file mode 100644 index 000000000000..d9f5c6e879ab --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ReplicaComputeCapacityOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + boolean hasReplicaSelection(); + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection(); + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder getReplicaSelectionOrBuilder(); + + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + boolean hasNodeCount(); + + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + boolean hasProcessingUnits(); + + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.ComputeCapacityCase + getComputeCapacityCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java new file mode 100644 index 000000000000..d4146416acc2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java @@ -0,0 +1,1080 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** Protobuf type {@code google.spanner.admin.instance.v1.ReplicaInfo} */ +@com.google.protobuf.Generated +public final class ReplicaInfo extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ReplicaInfo) + ReplicaInfoOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReplicaInfo"); + } + + // Use ReplicaInfo.newBuilder() to construct. + private ReplicaInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReplicaInfo() { + location_ = ""; + type_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaInfo.class, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder.class); + } + + /** + * + * + *
    +   * Indicates the type of replica.  See the [replica types
    +   * documentation](https://cloud.google.com/spanner/docs/replication#replica_types)
    +   * for more details.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType} + */ + public enum ReplicaType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * Read-write replicas support both reads and writes. These replicas:
    +     *
    +     * * Maintain a full copy of your data.
    +     * * Serve reads.
    +     * * Can vote whether to commit a write.
    +     * * Participate in leadership election.
    +     * * Are eligible to become a leader.
    +     * 
    + * + * READ_WRITE = 1; + */ + READ_WRITE(1), + /** + * + * + *
    +     * Read-only replicas only support reads (not writes). Read-only replicas:
    +     *
    +     * * Maintain a full copy of your data.
    +     * * Serve reads.
    +     * * Do not participate in voting to commit writes.
    +     * * Are not eligible to become a leader.
    +     * 
    + * + * READ_ONLY = 2; + */ + READ_ONLY(2), + /** + * + * + *
    +     * Witness replicas don't support reads but do participate in voting to
    +     * commit writes. Witness replicas:
    +     *
    +     * * Do not maintain a full copy of data.
    +     * * Do not serve reads.
    +     * * Vote whether to commit writes.
    +     * * Participate in leader election but are not eligible to become leader.
    +     * 
    + * + * WITNESS = 3; + */ + WITNESS(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReplicaType"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Read-write replicas support both reads and writes. These replicas:
    +     *
    +     * * Maintain a full copy of your data.
    +     * * Serve reads.
    +     * * Can vote whether to commit a write.
    +     * * Participate in leadership election.
    +     * * Are eligible to become a leader.
    +     * 
    + * + * READ_WRITE = 1; + */ + public static final int READ_WRITE_VALUE = 1; + + /** + * + * + *
    +     * Read-only replicas only support reads (not writes). Read-only replicas:
    +     *
    +     * * Maintain a full copy of your data.
    +     * * Serve reads.
    +     * * Do not participate in voting to commit writes.
    +     * * Are not eligible to become a leader.
    +     * 
    + * + * READ_ONLY = 2; + */ + public static final int READ_ONLY_VALUE = 2; + + /** + * + * + *
    +     * Witness replicas don't support reads but do participate in voting to
    +     * commit writes. Witness replicas:
    +     *
    +     * * Do not maintain a full copy of data.
    +     * * Do not serve reads.
    +     * * Vote whether to commit writes.
    +     * * Participate in leader election but are not eligible to become leader.
    +     * 
    + * + * WITNESS = 3; + */ + public static final int WITNESS_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ReplicaType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ReplicaType forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return READ_WRITE; + case 2: + return READ_ONLY; + case 3: + return WITNESS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ReplicaType findValueByNumber(int number) { + return ReplicaType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.ReplicaInfo.getDescriptor().getEnumTypes().get(0); + } + + private static final ReplicaType[] VALUES = values(); + + public static ReplicaType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ReplicaType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType) + } + + public static final int LOCATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
    +   * The location of the serving resources, e.g., "us-central1".
    +   * 
    + * + * string location = 1; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
    +   * The location of the serving resources, e.g., "us-central1".
    +   * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
    +   * The type of replica.
    +   * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +   * The type of replica.
    +   * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType getType() { + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType result = + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.forNumber(type_); + return result == null + ? com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.UNRECOGNIZED + : result; + } + + public static final int DEFAULT_LEADER_LOCATION_FIELD_NUMBER = 3; + private boolean defaultLeaderLocation_ = false; + + /** + * + * + *
    +   * If true, this location is designated as the default leader location where
    +   * leader replicas are placed. See the [region types
    +   * documentation](https://cloud.google.com/spanner/docs/instances#region_types)
    +   * for more details.
    +   * 
    + * + * bool default_leader_location = 3; + * + * @return The defaultLeaderLocation. + */ + @java.lang.Override + public boolean getDefaultLeaderLocation() { + return defaultLeaderLocation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, location_); + } + if (type_ + != com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (defaultLeaderLocation_ != false) { + output.writeBool(3, defaultLeaderLocation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, location_); + } + if (type_ + != com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (defaultLeaderLocation_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, defaultLeaderLocation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ReplicaInfo)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ReplicaInfo other = + (com.google.spanner.admin.instance.v1.ReplicaInfo) obj; + + if (!getLocation().equals(other.getLocation())) return false; + if (type_ != other.type_) return false; + if (getDefaultLeaderLocation() != other.getDefaultLeaderLocation()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + DEFAULT_LEADER_LOCATION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDefaultLeaderLocation()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.instance.v1.ReplicaInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.spanner.admin.instance.v1.ReplicaInfo} */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ReplicaInfo) + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaInfo.class, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ReplicaInfo.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + location_ = ""; + type_ = 0; + defaultLeaderLocation_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo build() { + com.google.spanner.admin.instance.v1.ReplicaInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo buildPartial() { + com.google.spanner.admin.instance.v1.ReplicaInfo result = + new com.google.spanner.admin.instance.v1.ReplicaInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ReplicaInfo result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.location_ = location_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.defaultLeaderLocation_ = defaultLeaderLocation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ReplicaInfo) { + return mergeFrom((com.google.spanner.admin.instance.v1.ReplicaInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ReplicaInfo other) { + if (other == com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()) + return this; + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.getDefaultLeaderLocation() != false) { + setDefaultLeaderLocation(other.getDefaultLeaderLocation()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + defaultLeaderLocation_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object location_ = ""; + + /** + * + * + *
    +     * The location of the serving resources, e.g., "us-central1".
    +     * 
    + * + * string location = 1; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The location of the serving resources, e.g., "us-central1".
    +     * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The location of the serving resources, e.g., "us-central1".
    +     * 
    + * + * string location = 1; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The location of the serving resources, e.g., "us-central1".
    +     * 
    + * + * string location = 1; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The location of the serving resources, e.g., "us-central1".
    +     * 
    + * + * string location = 1; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType getType() { + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType result = + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.forNumber(type_); + return result == null + ? com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + private boolean defaultLeaderLocation_; + + /** + * + * + *
    +     * If true, this location is designated as the default leader location where
    +     * leader replicas are placed. See the [region types
    +     * documentation](https://cloud.google.com/spanner/docs/instances#region_types)
    +     * for more details.
    +     * 
    + * + * bool default_leader_location = 3; + * + * @return The defaultLeaderLocation. + */ + @java.lang.Override + public boolean getDefaultLeaderLocation() { + return defaultLeaderLocation_; + } + + /** + * + * + *
    +     * If true, this location is designated as the default leader location where
    +     * leader replicas are placed. See the [region types
    +     * documentation](https://cloud.google.com/spanner/docs/instances#region_types)
    +     * for more details.
    +     * 
    + * + * bool default_leader_location = 3; + * + * @param value The defaultLeaderLocation to set. + * @return This builder for chaining. + */ + public Builder setDefaultLeaderLocation(boolean value) { + + defaultLeaderLocation_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If true, this location is designated as the default leader location where
    +     * leader replicas are placed. See the [region types
    +     * documentation](https://cloud.google.com/spanner/docs/instances#region_types)
    +     * for more details.
    +     * 
    + * + * bool default_leader_location = 3; + * + * @return This builder for chaining. + */ + public Builder clearDefaultLeaderLocation() { + bitField0_ = (bitField0_ & ~0x00000004); + defaultLeaderLocation_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ReplicaInfo) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaInfo) + private static final com.google.spanner.admin.instance.v1.ReplicaInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ReplicaInfo(); + } + + public static com.google.spanner.admin.instance.v1.ReplicaInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java new file mode 100644 index 000000000000..e0ea15d0a67c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ReplicaInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ReplicaInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The location of the serving resources, e.g., "us-central1".
    +   * 
    + * + * string location = 1; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
    +   * The location of the serving resources, e.g., "us-central1".
    +   * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + + /** + * + * + *
    +   * The type of replica.
    +   * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
    +   * The type of replica.
    +   * 
    + * + * .google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType type = 2; + * + * @return The type. + */ + com.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType getType(); + + /** + * + * + *
    +   * If true, this location is designated as the default leader location where
    +   * leader replicas are placed. See the [region types
    +   * documentation](https://cloud.google.com/spanner/docs/instances#region_types)
    +   * for more details.
    +   * 
    + * + * bool default_leader_location = 3; + * + * @return The defaultLeaderLocation. + */ + boolean getDefaultLeaderLocation(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java new file mode 100644 index 000000000000..1f9568d55830 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java @@ -0,0 +1,597 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * ReplicaSelection identifies replicas with common properties.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaSelection} + */ +@com.google.protobuf.Generated +public final class ReplicaSelection extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ReplicaSelection) + ReplicaSelectionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReplicaSelection"); + } + + // Use ReplicaSelection.newBuilder() to construct. + private ReplicaSelection(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReplicaSelection() { + location_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaSelection.class, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder.class); + } + + public static final int LOCATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, location_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, location_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ReplicaSelection)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ReplicaSelection other = + (com.google.spanner.admin.instance.v1.ReplicaSelection) obj; + + if (!getLocation().equals(other.getLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ReplicaSelection prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ReplicaSelection identifies replicas with common properties.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaSelection} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ReplicaSelection) + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaSelection.class, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ReplicaSelection.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + location_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection build() { + com.google.spanner.admin.instance.v1.ReplicaSelection result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection buildPartial() { + com.google.spanner.admin.instance.v1.ReplicaSelection result = + new com.google.spanner.admin.instance.v1.ReplicaSelection(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ReplicaSelection result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.location_ = location_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ReplicaSelection) { + return mergeFrom((com.google.spanner.admin.instance.v1.ReplicaSelection) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ReplicaSelection other) { + if (other == com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) + return this; + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object location_ = ""; + + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ReplicaSelection) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaSelection) + private static final com.google.spanner.admin.instance.v1.ReplicaSelection DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ReplicaSelection(); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaSelection parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java new file mode 100644 index 000000000000..35ed0f8e243e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/common.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface ReplicaSelectionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ReplicaSelection) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java new file mode 100644 index 000000000000..fcf08cfa3f98 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java @@ -0,0 +1,1126 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public final class SpannerInstanceAdminProto extends com.google.protobuf.GeneratedFile { + private SpannerInstanceAdminProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerInstanceAdminProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ReplicaInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_InstanceConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_Instance_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_Instance_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_InstancePartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "=google/spanner/admin/instance/v1/spanner_instance_admin.proto\022 google.spanner." + + "admin.instance.v1\032\034google/api/annotation" + + "s.proto\032\027google/api/client.proto\032\037google" + + "/api/field_behavior.proto\032\031google/api/re" + + "source.proto\032\036google/iam/v1/iam_policy.p" + + "roto\032\032google/iam/v1/policy.proto\032#google" + + "/longrunning/operations.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\037google/protobuf/timestamp." + + "proto\032-google/spanner/admin/instance/v1/common.proto\"\332\001\n" + + "\013ReplicaInfo\022\020\n" + + "\010location\030\001 \001(\t\022G\n" + + "\004type\030\002 \001(\01629.google.spanner.ad" + + "min.instance.v1.ReplicaInfo.ReplicaType\022\037\n" + + "\027default_leader_location\030\003 \001(\010\"O\n" + + "\013ReplicaType\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\016\n\n" + + "READ_WRITE\020\001\022\r\n" + + "\tREAD_ONLY\020\002\022\013\n" + + "\007WITNESS\020\003\"\300\n\n" + + "\016InstanceConfig\022\014\n" + + "\004name\030\001 \001(\t\022\024\n" + + "\014display_name\030\002 \001(\t\022O\n" + + "\013config_type\030\005 \001(\01625.google.s" + + "panner.admin.instance.v1.InstanceConfig.TypeB\003\340A\003\022?\n" + + "\010replicas\030\003 \003(\0132-.google.spanner.admin.instance.v1.ReplicaInfo\022M\n" + + "\021optional_replicas\030\006" + + " \003(\0132-.google.spanner.admin.instance.v1.ReplicaInfoB\003\340A\003\022?\n" + + "\013base_config\030\007 \001(\tB*\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\022L\n" + + "\006labels\030\010 \003(\0132<.goo" + + "gle.spanner.admin.instance.v1.InstanceConfig.LabelsEntry\022\014\n" + + "\004etag\030\t \001(\t\022\026\n" + + "\016leader_options\030\004 \003(\t\022\030\n" + + "\013reconciling\030\n" + + " \001(\010B\003\340A\003\022J\n" + + "\005state\030\013" + + " \001(\01626.google.spanner.admin.instance.v1.InstanceConfig.StateB\003\340A\003\022r\n" + + "\032free_instance_availability\030\014 \001(\0162I.googl" + + "e.spanner.admin.instance.v1.InstanceConfig.FreeInstanceAvailabilityB\003\340A\003\022U\n" + + "\013quorum_type\030\022 \001(\0162;.google.spanner.admin.ins" + + "tance.v1.InstanceConfig.QuorumTypeB\003\340A\003\022.\n" + + "!storage_limit_per_processing_unit\030\023 \001(\003B\003\340A\003\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"B\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\022\n" + + "\016GOOGLE_MANAGED\020\001\022\020\n" + + "\014USER_MANAGED\020\002\"7\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n" + + "\005READY\020\002\"\210\001\n" + + "\030FreeInstanceAvailability\022*\n" + + "&FREE_INSTANCE_AVAILABILITY_UNSPECIFIED\020\000\022\r\n" + + "\tAVAILABLE\020\001\022\017\n" + + "\013UNSUPPORTED\020\002\022\014\n" + + "\010DISABLED\020\003\022\022\n" + + "\016QUOTA_EXCEEDED\020\004\"X\n\n" + + "QuorumType\022\033\n" + + "\027QUORUM_TYPE_UNSPECIFIED\020\000\022\n\n" + + "\006REGION\020\001\022\017\n" + + "\013DUAL_REGION\020\002\022\020\n" + + "\014MULTI_REGION\020\003:\201\001\352A~\n" + + "%spanner.googleapis.com/InstanceConfig\0224projects/{project}/instan" + + "ceConfigs/{instance_config}*\017instanceConfigs2\016instanceConfig\"\262\001\n" + + "\026ReplicaComputeCapacity\022R\n" + + "\021replica_selection\030\001 \001(\01322.goo" + + "gle.spanner.admin.instance.v1.ReplicaSelectionB\003\340A\002\022\024\n\n" + + "node_count\030\002 \001(\005H\000\022\032\n" + + "\020processing_units\030\003 \001(\005H\000B\022\n" + + "\020compute_capacity\"\204\n\n" + + "\021AutoscalingConfig\022f\n" + + "\022autoscaling_limits\030\001 \001(\0132E.google.spanner.admin.insta" + + "nce.v1.AutoscalingConfig.AutoscalingLimitsB\003\340A\002\022h\n" + + "\023autoscaling_targets\030\002 \001(\0132F.g" + + "oogle.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsB\003\340A\002\022|\n" + + "\036asymmetric_autoscaling_options\030\003 \003(\0132O.goog" + + "le.spanner.admin.instance.v1.Autoscaling" + + "Config.AsymmetricAutoscalingOptionB\003\340A\001\032\227\001\n" + + "\021AutoscalingLimits\022\023\n" + + "\tmin_nodes\030\001 \001(\005H\000\022\036\n" + + "\024min_processing_units\030\002 \001(\005H\000\022\023\n" + + "\tmax_nodes\030\003 \001(\005H\001\022\036\n" + + "\024max_processing_units\030\004 \001(\005H\001B\013\n" + + "\tmin_limitB\013\n" + + "\tmax_limit\032\236\001\n" + + "\022AutoscalingTargets\0222\n" + + "%high_priority_cpu_utilization_percent\030\001 \001(\005B\003\340A\001\022*\n" + + "\035total_cpu_utilization_percent\030\004 \001(\005B\003\340A\001\022(\n" + + "\033storage_utilization_percent\030\002 \001(\005B\003\340A\002\032\343\004\n" + + "\033AsymmetricAutoscalingOption\022R\n" + + "\021replica_selection\030\001" + + " \001(\01322.google.spanner.admin.instance.v1.ReplicaSelectionB\003\340A\002\022\202\001\n" + + "\toverrides\030\002 \001(\0132j.google.spanner.admin.instan" + + "ce.v1.AutoscalingConfig.AsymmetricAutosc" + + "alingOption.AutoscalingConfigOverridesB\003\340A\001\032\352\002\n" + + "\032AutoscalingConfigOverrides\022f\n" + + "\022autoscaling_limits\030\001 \001(\0132E.google.spanner." + + "admin.instance.v1.AutoscalingConfig.AutoscalingLimitsB\003\340A\001\022E\n" + + "8autoscaling_target_high_priority_cpu_utilization_percent\030\002" + + " \001(\005B\003\340A\001\022=\n" + + "0autoscaling_target_total_cpu_utilization_percent\030\004" + + " \001(\005B\003\340A\001\0222\n" + + "%disable_high_priority_cpu_autoscaling\030\005 \001(\010B\003\340A\001\022*\n" + + "\035disable_total_cpu_autoscaling\030\006 \001(\010B\003\340A\001\"\252\013\n" + + "\010Instance\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022=\n" + + "\006config\030\002 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\022\031\n" + + "\014display_name\030\003 \001(\tB\003\340A\002\022\022\n\n" + + "node_count\030\005 \001(\005\022\030\n" + + "\020processing_units\030\t \001(\005\022_\n" + + "\030replica_compute_capacity\030\023" + + " \003(\01328.google.spanner.admin.instance.v1.ReplicaComputeCapacityB\003\340A\003\022T\n" + + "\022autoscaling_config\030\021 \001(\01323.google.spanner.a" + + "dmin.instance.v1.AutoscalingConfigB\003\340A\001\022D\n" + + "\005state\030\006" + + " \001(\01620.google.spanner.admin.instance.v1.Instance.StateB\003\340A\003\022F\n" + + "\006labels\030\007" + + " \003(\01326.google.spanner.admin.instance.v1.Instance.LabelsEntry\022N\n\r" + + "instance_type\030\n" + + " \001(\01627.google.spanner.admin.instance.v1.Instance.InstanceType\022\025\n\r" + + "endpoint_uris\030\010 \003(\t\0224\n" + + "\013create_time\030\013 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\014 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022V\n" + + "\026free_instance_metadata\030\r" + + " \001(\01326.google.spanner.admin.instance.v1.FreeInstanceMetadata\022H\n" + + "\007edition\030\024" + + " \001(\01622.google.spanner.admin.instance.v1.Instance.EditionB\003\340A\001\022o\n" + + "\034default_backup_schedule_type\030\027 \001(\0162D.googl" + + "e.spanner.admin.instance.v1.Instance.DefaultBackupScheduleTypeB\003\340A\001\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"7\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n" + + "\005READY\020\002\"Q\n" + + "\014InstanceType\022\035\n" + + "\031INSTANCE_TYPE_UNSPECIFIED\020\000\022\017\n" + + "\013PROVISIONED\020\001\022\021\n\r" + + "FREE_INSTANCE\020\002\"U\n" + + "\007Edition\022\027\n" + + "\023EDITION_UNSPECIFIED\020\000\022\014\n" + + "\010STANDARD\020\001\022\016\n\n" + + "ENTERPRISE\020\002\022\023\n" + + "\017ENTERPRISE_PLUS\020\003\"b\n" + + "\031DefaultBackupScheduleType\022,\n" + + "(DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED\020\000\022\010\n" + + "\004NONE\020\001\022\r\n" + + "\tAUTOMATIC\020\002:b\352A_\n" + + "\037spanner.googleapis.com/Instance\022\'projects/{project}/instances/{instance}*" + + "\tinstances2\010instance\"\210\001\n" + + "\032ListInstanceConfigsRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\021\n" + + "\tpage_size\030\002 \001(\005\022\022\n\n" + + "page_token\030\003 \001(\t\"\202\001\n" + + "\033ListInstanceConfigsResponse\022J\n" + + "\020instance_configs\030\001" + + " \003(\01320.google.spanner.admin.instance.v1.InstanceConfig\022\027\n" + + "\017next_page_token\030\002 \001(\t\"W\n" + + "\030GetInstanceConfigRequest\022;\n" + + "\004name\030\001 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\"\352\001\n" + + "\033CreateInstanceConfigRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\037\n" + + "\022instance_config_id\030\002 \001(\tB\003\340A\002\022N\n" + + "\017instance_config\030\003" + + " \001(\01320.google.spanner.admin.instance.v1.InstanceConfigB\003\340A\002\022\025\n\r" + + "validate_only\030\004 \001(\010\"\272\001\n" + + "\033UpdateInstanceConfigRequest\022N\n" + + "\017instance_config\030\001 \001(\01320.g" + + "oogle.spanner.admin.instance.v1.InstanceConfigB\003\340A\002\0224\n" + + "\013update_mask\030\002" + + " \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\022\025\n\r" + + "validate_only\030\003 \001(\010\"\177\n" + + "\033DeleteInstanceConfigRequest\022;\n" + + "\004name\030\001 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\022\014\n" + + "\004etag\030\002 \001(\t\022\025\n\r" + + "validate_only\030\003 \001(\010\"\241\001\n" + + "#ListInstanceConfigOperationsRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\016\n" + + "\006filter\030\002 \001(\t\022\021\n" + + "\tpage_size\030\003 \001(\005\022\022\n\n" + + "page_token\030\004 \001(\t\"r\n" + + "$ListInstanceConfigOperationsResponse\0221\n\n" + + "operations\030\001 \003(\0132\035.google.longrunning.Operation\022\027\n" + + "\017next_page_token\030\002 \001(\t\"{\n" + + "\022GetInstanceRequest\0225\n" + + "\004name\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\022.\n\n" + + "field_mask\030\002 \001(\0132\032.google.protobuf.FieldMask\"\271\001\n" + + "\025CreateInstanceRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\030\n" + + "\013instance_id\030\002 \001(\tB\003\340A\002\022A\n" + + "\010instance\030\003 \001(" + + "\0132*.google.spanner.admin.instance.v1.InstanceB\003\340A\002\"\311\001\n" + + "\024ListInstancesRequest\022C\n" + + "\006parent\030\001 \001(\tB3\340A\002\372A-\n" + + "+cloudresourcemanager.googleapis.com/Project\022\021\n" + + "\tpage_size\030\002 \001(\005\022\022\n\n" + + "page_token\030\003 \001(\t\022\016\n" + + "\006filter\030\004 \001(\t\0225\n" + + "\021instance_deadline\030\005 \001(\0132\032.google.protobuf.Timestamp\"\204\001\n" + + "\025ListInstancesResponse\022=\n" + + "\tinstances\030\001 \003(\0132*.google.spanner.admin.instance.v1.Instance\022\027\n" + + "\017next_page_token\030\002 \001(\t\022\023\n" + + "\013unreachable\030\003 \003(\t\"\217\001\n" + + "\025UpdateInstanceRequest\022A\n" + + "\010instance\030\001 \001(\0132*.goog" + + "le.spanner.admin.instance.v1.InstanceB\003\340A\002\0223\n\n" + + "field_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\"N\n" + + "\025DeleteInstanceRequest\0225\n" + + "\004name\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\"\277\002\n" + + "\026CreateInstanceMetadata\022<\n" + + "\010instance\030\001 \001(\0132*.google.spanner.admin.instance.v1.Instance\022.\n\n" + + "start_time\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022,\n" + + "\010end_time\030\004 \001(\0132\032.google.protobuf.Timestamp\022X\n" + + "\033expected_fulfillment_period\030\005" + + " \001(\01623.google.spanner.admin.instance.v1.FulfillmentPeriod\"\277\002\n" + + "\026UpdateInstanceMetadata\022<\n" + + "\010instance\030\001 \001(\0132*.google.spanner.admin.instance.v1.Instance\022.\n\n" + + "start_time\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022,\n" + + "\010end_time\030\004 \001(\0132\032.google.protobuf.Timestamp\022X\n" + + "\033expected_fulfillment_period\030\005" + + " \001(\01623.google.spanner.admin.instance.v1.FulfillmentPeriod\"\316\002\n" + + "\024FreeInstanceMetadata\0224\n" + + "\013expire_time\030\001 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0225\n" + + "\014upgrade_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022^\n" + + "\017expire_behavior\030\003 \001(\0162E.google.spanner.admi" + + "n.instance.v1.FreeInstanceMetadata.ExpireBehavior\"i\n" + + "\016ExpireBehavior\022\037\n" + + "\033EXPIRE_BEHAVIOR_UNSPECIFIED\020\000\022\027\n" + + "\023FREE_TO_PROVISIONED\020\001\022\035\n" + + "\031REMOVE_AFTER_GRACE_PERIOD\020\002\"\341\001\n" + + "\034CreateInstanceConfigMetadata\022I\n" + + "\017instance_config\030\001" + + " \001(\01320.google.spanner.admin.instance.v1.InstanceConfig\022E\n" + + "\010progress\030\002 \001" + + "(\01323.google.spanner.admin.instance.v1.OperationProgress\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\"\341\001\n" + + "\034UpdateInstanceConfigMetadata\022I\n" + + "\017instance_config\030\001 \001" + + "(\01320.google.spanner.admin.instance.v1.InstanceConfig\022E\n" + + "\010progress\030\002 \001(\01323.google." + + "spanner.admin.instance.v1.OperationProgress\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\"\217\006\n" + + "\021InstancePartition\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022=\n" + + "\006config\030\002 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\022\031\n" + + "\014display_name\030\003 \001(\tB\003\340A\002\022\024\n\n" + + "node_count\030\005 \001(\005H\000\022\032\n" + + "\020processing_units\030\006 \001(\005H\000\022T\n" + + "\022autoscaling_config\030\r" + + " \001(\01323.google.spanner.admin.instance.v1.AutoscalingConfigB\003\340A\001\022M\n" + + "\005state\030\007 \001(\01629.google.spanner.admin.in" + + "stance.v1.InstancePartition.StateB\003\340A\003\0224\n" + + "\013create_time\030\010 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0224\n" + + "\013update_time\030\t \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\"\n" + + "\025referencing_databases\030\n" + + " \003(\tB\003\340A\003\022\"\n" + + "\023referencing_backups\030\013 \003(\tB\005\030\001\340A\003\022\014\n" + + "\004etag\030\014 \001(\t\"7\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n" + + "\005READY\020\002:\246\001\352A\242\001\n" + + "(spanner.googleapis.com/InstancePartition\022Oprojects/{project}/i" + + "nstances/{instance}/instancePartitions/{" + + "instance_partition}*\022instancePartitions2\021instancePartitionB\022\n" + + "\020compute_capacity\"\201\002\n" + + "\037CreateInstancePartitionMetadata\022O\n" + + "\022instance_partition\030\001" + + " \001(\01323.google.spanner.admin.instance.v1.InstancePartition\022.\n\n" + + "start_time\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022,\n" + + "\010end_time\030\004 \001(\0132\032.google.protobuf.Timestamp\"\323\001\n" + + "\036CreateInstancePartitionRequest\0227\n" + + "\006parent\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\022\"\n" + + "\025instance_partition_id\030\002 \001(\tB\003\340A\002\022T\n" + + "\022instance_partition\030\003" + + " \001(\01323.google.spanner.admin.instance.v1.InstancePartitionB\003\340A\002\"n\n" + + "\036DeleteInstancePartitionRequest\022>\n" + + "\004name\030\001 \001(\tB0\340A\002\372A*\n" + + "(spanner.googleapis.com/InstancePartition\022\014\n" + + "\004etag\030\002 \001(\t\"]\n" + + "\033GetInstancePartitionRequest\022>\n" + + "\004name\030\001 \001(\tB0\340A\002\372A*\n" + + "(spanner.googleapis.com/InstancePartition\"\253\001\n" + + "\036UpdateInstancePartitionRequest\022T\n" + + "\022instance_partition\030\001 \001(\01323.google.spanner." + + "admin.instance.v1.InstancePartitionB\003\340A\002\0223\n\n" + + "field_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340A\002\"\201\002\n" + + "\037UpdateInstancePartitionMetadata\022O\n" + + "\022instance_partition\030\001 \001(\01323." + + "google.spanner.admin.instance.v1.InstancePartition\022.\n\n" + + "start_time\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022,\n" + + "\010end_time\030\004 \001(\0132\032.google.protobuf.Timestamp\"\305\001\n" + + "\035ListInstancePartitionsRequest\0227\n" + + "\006parent\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\022\021\n" + + "\tpage_size\030\002 \001(\005\022\022\n\n" + + "page_token\030\003 \001(\t\022D\n" + + "\033instance_partition_deadline\030\004" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\"\240\001\n" + + "\036ListInstancePartitionsResponse\022P\n" + + "\023instance_partitions\030\001" + + " \003(\01323.google.spanner.admin.instance.v1.InstancePartition\022\027\n" + + "\017next_page_token\030\002 \001(\t\022\023\n" + + "\013unreachable\030\003 \003(\t\"\355\001\n" + + "&ListInstancePartitionOperationsRequest\0227\n" + + "\006parent\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\022\023\n" + + "\006filter\030\002 \001(\tB\003\340A\001\022\026\n" + + "\tpage_size\030\003 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\004 \001(\tB\003\340A\001\022D\n" + + "\033instance_partition_deadline\030\005" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\001\"\236\001\n" + + "\'ListInstancePartitionOperationsResponse\0221\n\n" + + "operations\030\001 \003(\0132\035.google.longrunning.Operation\022\027\n" + + "\017next_page_token\030\002 \001(\t\022\'\n" + + "\037unreachable_instance_partitions\030\003 \003(\t\"\222\001\n" + + "\023MoveInstanceRequest\0225\n" + + "\004name\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Instance\022D\n\r" + + "target_config\030\002 \001(\tB-\340A\002\372A\'\n" + + "%spanner.googleapis.com/InstanceConfig\"\026\n" + + "\024MoveInstanceResponse\"\245\001\n" + + "\024MoveInstanceMetadata\022\025\n\r" + + "target_config\030\001 \001(\t\022E\n" + + "\010progress\030\002 \001(\01323.g" + + "oogle.spanner.admin.instance.v1.OperationProgress\022/\n" + + "\013cancel_time\030\003 \001(\0132\032.google.protobuf.Timestamp2\332\'\n\r" + + "InstanceAdmin\022\314\001\n" + + "\023ListInstanceConfigs\022<.google.spanner.admin.instance.v1.ListInstanceConfigsReque" + + "st\032=.google.spanner.admin.instance.v1.Li" + + "stInstanceConfigsResponse\"8\332A\006parent\202\323\344\223" + + "\002)\022\'/v1/{parent=projects/*}/instanceConfigs\022\271\001\n" + + "\021GetInstanceConfig\022:.google.spanner.admin.instance.v1.GetInstanceConfigRe" + + "quest\0320.google.spanner.admin.instance.v1" + + ".InstanceConfig\"6\332A\004name\202\323\344\223\002)\022\'/v1/{name=projects/*/instanceConfigs/*}\022\310\002\n" + + "\024CreateInstanceConfig\022=.google.spanner.admin." + + "instance.v1.CreateInstanceConfigRequest\032\035.google.longrunning.Operation\"\321\001\312Ap\n" + + "/google.spanner.admin.instance.v1.InstanceConfig\022=google.spanner.admin.instance.v1." + + "CreateInstanceConfigMetadata\332A)parent,in" + + "stance_config,instance_config_id\202\323\344\223\002,\"\'" + + "/v1/{parent=projects/*}/instanceConfigs:\001*\022\312\002\n" + + "\024UpdateInstanceConfig\022=.google.spanner.admin.instance.v1.UpdateInstanceCon" + + "figRequest\032\035.google.longrunning.Operation\"\323\001\312Ap\n" + + "/google.spanner.admin.instance.v1.InstanceConfig\022=google.spanner.admin.i" + + "nstance.v1.UpdateInstanceConfigMetadata\332" + + "A\033instance_config,update_mask\202\323\344\223\002<27/v1" + + "/{instance_config.name=projects/*/instanceConfigs/*}:\001*\022\245\001\n" + + "\024DeleteInstanceConfig\022=.google.spanner.admin.instance.v1.Dele" + + "teInstanceConfigRequest\032\026.google.protobu" + + "f.Empty\"6\332A\004name\202\323\344\223\002)*\'/v1/{name=projects/*/instanceConfigs/*}\022\360\001\n" + + "\034ListInstanceConfigOperations\022E.google.spanner.admin." + + "instance.v1.ListInstanceConfigOperationsRequest\032F.google.spanner.admin.instance." + + "v1.ListInstanceConfigOperationsResponse\"" + + "A\332A\006parent\202\323\344\223\0022\0220/v1/{parent=projects/*}/instanceConfigOperations\022\264\001\n\r" + + "ListInstances\0226.google.spanner.admin.instance.v1." + + "ListInstancesRequest\0327.google.spanner.ad" + + "min.instance.v1.ListInstancesResponse\"2\332" + + "A\006parent\202\323\344\223\002#\022!/v1/{parent=projects/*}/instances\022\344\001\n" + + "\026ListInstancePartitions\022?.google.spanner.admin.instance.v1.ListInst" + + "ancePartitionsRequest\032@.google.spanner.admin.instance.v1.ListInstancePartitionsR" + + "esponse\"G\332A\006parent\202\323\344\223\0028\0226/v1/{parent=pr" + + "ojects/*/instances/*}/instancePartitions\022\241\001\n" + + "\013GetInstance\0224.google.spanner.admin.instance.v1.GetInstanceRequest\032*.google." + + "spanner.admin.instance.v1.Instance\"0\332A\004n" + + "ame\202\323\344\223\002#\022!/v1/{name=projects/*/instances/*}\022\234\002\n" + + "\016CreateInstance\0227.google.spanner" + + ".admin.instance.v1.CreateInstanceRequest\032\035.google.longrunning.Operation\"\261\001\312Ad\n" + + ")google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v1.Creat" + + "eInstanceMetadata\332A\033parent,instance_id,i" + + "nstance\202\323\344\223\002&\"!/v1/{parent=projects/*}/instances:\001*\022\235\002\n" + + "\016UpdateInstance\0227.google.spanner.admin.instance.v1.UpdateInstance" + + "Request\032\035.google.longrunning.Operation\"\262\001\312Ad\n" + + ")google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v" + + "1.UpdateInstanceMetadata\332A\023instance,fiel" + + "d_mask\202\323\344\223\002/2*/v1/{instance.name=projects/*/instances/*}:\001*\022\223\001\n" + + "\016DeleteInstance\0227.google.spanner.admin.instance.v1.Delete" + + "InstanceRequest\032\026.google.protobuf.Empty\"" + + "0\332A\004name\202\323\344\223\002#*!/v1/{name=projects/*/instances/*}\022\232\001\n" + + "\014SetIamPolicy\022\".google.iam.v1.SetIamPolicyRequest\032\025.google.iam.v1.P" + + "olicy\"O\332A\017resource,policy\202\323\344\223\0027\"2/v1/{re" + + "source=projects/*/instances/*}:setIamPolicy:\001*\022\223\001\n" + + "\014GetIamPolicy\022\".google.iam.v1.GetIamPolicyRequest\032\025.google.iam.v1.Poli" + + "cy\"H\332A\010resource\202\323\344\223\0027\"2/v1/{resource=projects/*/instances/*}:getIamPolicy:\001*\022\305\001\n" + + "\022TestIamPermissions\022(.google.iam.v1.Test" + + "IamPermissionsRequest\032).google.iam.v1.Te" + + "stIamPermissionsResponse\"Z\332A\024resource,pe" + + "rmissions\202\323\344\223\002=\"8/v1/{resource=projects/*/instances/*}:testIamPermissions:\001*\022\321\001\n" + + "\024GetInstancePartition\022=.google.spanner.admin.instance.v1.GetInstancePartitionReq" + + "uest\0323.google.spanner.admin.instance.v1." + + "InstancePartition\"E\332A\004name\202\323\344\223\0028\0226/v1/{n" + + "ame=projects/*/instances/*/instancePartitions/*}\022\351\002\n" + + "\027CreateInstancePartition\022@.google.spanner.admin.instance.v1.CreateIn" + + "stancePartitionRequest\032\035.google.longrunning.Operation\"\354\001\312Av\n" + + "2google.spanner.admin.instance.v1.InstancePartition\022@google." + + "spanner.admin.instance.v1.CreateInstancePartitionMetadata\332A/parent,instance_part" + + "ition,instance_partition_id\202\323\344\223\002;\"6/v1/{" + + "parent=projects/*/instances/*}/instancePartitions:\001*\022\272\001\n" + + "\027DeleteInstancePartition\022@.google.spanner.admin.instance.v1.Dele" + + "teInstancePartitionRequest\032\026.google.prot" + + "obuf.Empty\"E\332A\004name\202\323\344\223\0028*6/v1/{name=pro" + + "jects/*/instances/*/instancePartitions/*}\022\352\002\n" + + "\027UpdateInstancePartition\022@.google.spanner.admin.instance.v1.UpdateInstanceP" + + "artitionRequest\032\035.google.longrunning.Operation\"\355\001\312Av\n" + + "2google.spanner.admin.instance.v1.InstancePartition\022@google.spanner" + + ".admin.instance.v1.UpdateInstancePartitionMetadata\332A\035instance_partition,field_ma" + + "sk\202\323\344\223\002N2I/v1/{instance_partition.name=p" + + "rojects/*/instances/*/instancePartitions/*}:\001*\022\210\002\n" + + "\037ListInstancePartitionOperations\022H.google.spanner.admin.instance.v1.Li" + + "stInstancePartitionOperationsRequest\032I.g", + "oogle.spanner.admin.instance.v1.ListInst" + + "ancePartitionOperationsResponse\"P\332A\006pare" + + "nt\202\323\344\223\002A\022?/v1/{parent=projects/*/instanc" + + "es/*}/instancePartitionOperations\022\211\002\n\014Mo" + + "veInstance\0225.google.spanner.admin.instan" + + "ce.v1.MoveInstanceRequest\032\035.google.longr" + + "unning.Operation\"\242\001\312An\n5google.spanner.a" + + "dmin.instance.v1.MoveInstanceResponse\0225g" + + "oogle.spanner.admin.instance.v1.MoveInst" + + "anceMetadata\202\323\344\223\002+\"&/v1/{name=projects/*" + + "/instances/*}:move:\001*\032x\312A\026spanner.google" + + "apis.com\322A\\https://www.googleapis.com/au" + + "th/cloud-platform,https://www.googleapis" + + ".com/auth/spanner.adminB\213\002\n$com.google.s" + + "panner.admin.instance.v1B\031SpannerInstanc" + + "eAdminProtoP\001ZFcloud.google.com/go/spann" + + "er/admin/instance/apiv1/instancepb;insta" + + "ncepb\252\002&Google.Cloud.Spanner.Admin.Insta" + + "nce.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Inst" + + "ance\\V1\352\002+Google::Cloud::Spanner::Admin:" + + ":Instance::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.iam.v1.IamPolicyProto.getDescriptor(), + com.google.iam.v1.PolicyProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.admin.instance.v1.CommonProto.getDescriptor(), + }); + internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_admin_instance_v1_ReplicaInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ReplicaInfo_descriptor, + new java.lang.String[] { + "Location", "Type", "DefaultLeaderLocation", + }); + internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_admin_instance_v1_InstanceConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor, + new java.lang.String[] { + "Name", + "DisplayName", + "ConfigType", + "Replicas", + "OptionalReplicas", + "BaseConfig", + "Labels", + "Etag", + "LeaderOptions", + "Reconciling", + "State", + "FreeInstanceAvailability", + "QuorumType", + "StorageLimitPerProcessingUnit", + }); + internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_descriptor = + internal_static_google_spanner_admin_instance_v1_InstanceConfig_descriptor.getNestedType(0); + internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor, + new java.lang.String[] { + "ReplicaSelection", "NodeCount", "ProcessingUnits", "ComputeCapacity", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor, + new java.lang.String[] { + "AutoscalingLimits", "AutoscalingTargets", "AsymmetricAutoscalingOptions", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor.getNestedType( + 0); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor, + new java.lang.String[] { + "MinNodes", + "MinProcessingUnits", + "MaxNodes", + "MaxProcessingUnits", + "MinLimit", + "MaxLimit", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor.getNestedType( + 1); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor, + new java.lang.String[] { + "HighPriorityCpuUtilizationPercent", + "TotalCpuUtilizationPercent", + "StorageUtilizationPercent", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor.getNestedType( + 2); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor, + new java.lang.String[] { + "ReplicaSelection", "Overrides", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor + .getNestedType(0); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor, + new java.lang.String[] { + "AutoscalingLimits", + "AutoscalingTargetHighPriorityCpuUtilizationPercent", + "AutoscalingTargetTotalCpuUtilizationPercent", + "DisableHighPriorityCpuAutoscaling", + "DisableTotalCpuAutoscaling", + }); + internal_static_google_spanner_admin_instance_v1_Instance_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_spanner_admin_instance_v1_Instance_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_Instance_descriptor, + new java.lang.String[] { + "Name", + "Config", + "DisplayName", + "NodeCount", + "ProcessingUnits", + "ReplicaComputeCapacity", + "AutoscalingConfig", + "State", + "Labels", + "InstanceType", + "EndpointUris", + "CreateTime", + "UpdateTime", + "FreeInstanceMetadata", + "Edition", + "DefaultBackupScheduleType", + }); + internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_descriptor = + internal_static_google_spanner_admin_instance_v1_Instance_descriptor.getNestedType(0); + internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor, + new java.lang.String[] { + "InstanceConfigs", "NextPageToken", + }); + internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor = + getDescriptor().getMessageType(7); + internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor, + new java.lang.String[] { + "Parent", "InstanceConfigId", "InstanceConfig", "ValidateOnly", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor, + new java.lang.String[] { + "InstanceConfig", "UpdateMask", "ValidateOnly", + }); + internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor, + new java.lang.String[] { + "Name", "Etag", "ValidateOnly", + }); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor, + new java.lang.String[] { + "Operations", "NextPageToken", + }); + internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor, + new java.lang.String[] { + "Name", "FieldMask", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor, + new java.lang.String[] { + "Parent", "InstanceId", "Instance", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "Filter", "InstanceDeadline", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor, + new java.lang.String[] { + "Instances", "NextPageToken", "Unreachable", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor, + new java.lang.String[] { + "Instance", "FieldMask", + }); + internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor, + new java.lang.String[] { + "Instance", "StartTime", "CancelTime", "EndTime", "ExpectedFulfillmentPeriod", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor, + new java.lang.String[] { + "Instance", "StartTime", "CancelTime", "EndTime", "ExpectedFulfillmentPeriod", + }); + internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_FreeInstanceMetadata_descriptor, + new java.lang.String[] { + "ExpireTime", "UpgradeTime", "ExpireBehavior", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor = + getDescriptor().getMessageType(22); + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor, + new java.lang.String[] { + "InstanceConfig", "Progress", "CancelTime", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor, + new java.lang.String[] { + "InstanceConfig", "Progress", "CancelTime", + }); + internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_spanner_admin_instance_v1_InstancePartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor, + new java.lang.String[] { + "Name", + "Config", + "DisplayName", + "NodeCount", + "ProcessingUnits", + "AutoscalingConfig", + "State", + "CreateTime", + "UpdateTime", + "ReferencingDatabases", + "ReferencingBackups", + "Etag", + "ComputeCapacity", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor, + new java.lang.String[] { + "InstancePartition", "StartTime", "CancelTime", "EndTime", + }); + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor, + new java.lang.String[] { + "Parent", "InstancePartitionId", "InstancePartition", + }); + internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor, + new java.lang.String[] { + "Name", "Etag", + }); + internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor = + getDescriptor().getMessageType(29); + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor, + new java.lang.String[] { + "InstancePartition", "FieldMask", + }); + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor = + getDescriptor().getMessageType(30); + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor, + new java.lang.String[] { + "InstancePartition", "StartTime", "CancelTime", "EndTime", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor = + getDescriptor().getMessageType(31); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", "InstancePartitionDeadline", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor = + getDescriptor().getMessageType(32); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor, + new java.lang.String[] { + "InstancePartitions", "NextPageToken", "Unreachable", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor = + getDescriptor().getMessageType(33); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor, + new java.lang.String[] { + "Parent", "Filter", "PageSize", "PageToken", "InstancePartitionDeadline", + }); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor = + getDescriptor().getMessageType(34); + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor, + new java.lang.String[] { + "Operations", "NextPageToken", "UnreachableInstancePartitions", + }); + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor = + getDescriptor().getMessageType(35); + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor, + new java.lang.String[] { + "Name", "TargetConfig", + }); + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor = + getDescriptor().getMessageType(36); + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor = + getDescriptor().getMessageType(37); + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor, + new java.lang.String[] { + "TargetConfig", "Progress", "CancelTime", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.iam.v1.IamPolicyProto.getDescriptor(); + com.google.iam.v1.PolicyProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.admin.instance.v1.CommonProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + registry.add(com.google.longrunning.OperationsProto.operationInfo); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java new file mode 100644 index 000000000000..f090b3a21262 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java @@ -0,0 +1,1298 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateInstanceConfigMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) + UpdateInstanceConfigMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstanceConfigMetadata"); + } + + // Use UpdateInstanceConfigMetadata.newBuilder() to construct. + private UpdateInstanceConfigMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstanceConfigMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_CONFIG_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + @java.lang.Override + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata other = + (com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) obj; + + if (hasInstanceConfig() != other.hasInstanceConfig()) return false; + if (hasInstanceConfig()) { + if (!getInstanceConfig().equals(other.getInstanceConfig())) return false; + } + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstanceConfig()) { + hash = (37 * hash) + INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfig().hashCode(); + } + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceConfigFieldBuilder(); + internalGetProgressFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata build() { + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata result = + new com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceConfig_ = + instanceConfigBuilder_ == null ? instanceConfig_ : instanceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata other) { + if (other + == com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.getDefaultInstance()) + return this; + if (other.hasInstanceConfig()) { + mergeInstanceConfig(other.getInstanceConfig()); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigBuilder_; + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + if (instanceConfigBuilder_ == null) { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } else { + return instanceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfig_ = value; + } else { + instanceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder setInstanceConfig( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigBuilder_ == null) { + instanceConfig_ = builderForValue.build(); + } else { + instanceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instanceConfig_ != null + && instanceConfig_ + != com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) { + getInstanceConfigBuilder().mergeFrom(value); + } else { + instanceConfig_ = value; + } + } else { + instanceConfigBuilder_.mergeFrom(value); + } + if (instanceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public Builder clearInstanceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getInstanceConfigOrBuilder() { + if (instanceConfigBuilder_ != null) { + return instanceConfigBuilder_.getMessageOrBuilder(); + } else { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + } + + /** + * + * + *
    +     * The desired instance configuration after updating.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigFieldBuilder() { + if (instanceConfigBuilder_ == null) { + instanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + getInstanceConfig(), getParentForChildren(), isClean()); + instanceConfig_ = null; + } + return instanceConfigBuilder_; + } + + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + progressBuilder_; + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.instance.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetProgressFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + + /** + * + * + *
    +     * The progress of the
    +     * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +     * operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + internalGetProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) + private static final com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstanceConfigMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java new file mode 100644 index 000000000000..ad420c9ebc39 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java @@ -0,0 +1,145 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstanceConfigMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return Whether the instanceConfig field is set. + */ + boolean hasInstanceConfig(); + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + * + * @return The instanceConfig. + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig(); + + /** + * + * + *
    +   * The desired instance configuration after updating.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.instance.v1.OperationProgress getProgress(); + + /** + * + * + *
    +   * The progress of the
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
    +   * operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java new file mode 100644 index 000000000000..cab42fc5ea62 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java @@ -0,0 +1,1258 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceConfigRequest} + */ +@com.google.protobuf.Generated +public final class UpdateInstanceConfigRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) + UpdateInstanceConfigRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstanceConfigRequest"); + } + + // Use UpdateInstanceConfigRequest.newBuilder() to construct. + private UpdateInstanceConfigRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstanceConfigRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_CONFIG_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + @java.lang.Override + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + public static final int VALIDATE_ONLY_FIELD_NUMBER = 3; + private boolean validateOnly_ = false; + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + if (validateOnly_ != false) { + output.writeBool(3, validateOnly_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstanceConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + if (validateOnly_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, validateOnly_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest other = + (com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) obj; + + if (hasInstanceConfig() != other.hasInstanceConfig()) return false; + if (hasInstanceConfig()) { + if (!getInstanceConfig().equals(other.getInstanceConfig())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (getValidateOnly() != other.getValidateOnly()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstanceConfig()) { + hash = (37 * hash) + INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfig().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (37 * hash) + VALIDATE_ONLY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getValidateOnly()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceConfigRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceConfigFieldBuilder(); + internalGetUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + validateOnly_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest build() { + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest result = + new com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceConfig_ = + instanceConfigBuilder_ == null ? instanceConfig_ : instanceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.validateOnly_ = validateOnly_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest other) { + if (other + == com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.getDefaultInstance()) + return this; + if (other.hasInstanceConfig()) { + mergeInstanceConfig(other.getInstanceConfig()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + if (other.getValidateOnly() != false) { + setValidateOnly(other.getValidateOnly()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + validateOnly_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigBuilder_; + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + if (instanceConfigBuilder_ == null) { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } else { + return instanceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfig_ = value; + } else { + instanceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstanceConfig( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigBuilder_ == null) { + instanceConfig_ = builderForValue.build(); + } else { + instanceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instanceConfig_ != null + && instanceConfig_ + != com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) { + getInstanceConfigBuilder().mergeFrom(value); + } else { + instanceConfig_ = value; + } + } else { + instanceConfigBuilder_.mergeFrom(value); + } + if (instanceConfig_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstanceConfig() { + bitField0_ = (bitField0_ & ~0x00000001); + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getInstanceConfigOrBuilder() { + if (instanceConfigBuilder_ != null) { + return instanceConfigBuilder_.getMessageOrBuilder(); + } else { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + } + + /** + * + * + *
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
    +     * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +     * need be included. To prevent conflicts of concurrent updates,
    +     * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +     * be used.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigFieldBuilder() { + if (instanceConfigBuilder_ == null) { + instanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + getInstanceConfig(), getParentForChildren(), isClean()); + instanceConfig_ = null; + } + return instanceConfigBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +     * updated. The field mask must always be specified; this prevents any future
    +     * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +     * from being erased accidentally by clients that do not know about them. Only
    +     * display_name and labels can be updated.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + private boolean validateOnly_; + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + @java.lang.Override + public boolean getValidateOnly() { + return validateOnly_; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @param value The validateOnly to set. + * @return This builder for chaining. + */ + public Builder setValidateOnly(boolean value) { + + validateOnly_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An option to validate, but not actually execute, a request,
    +     * and provide the same response.
    +     * 
    + * + * bool validate_only = 3; + * + * @return This builder for chaining. + */ + public Builder clearValidateOnly() { + bitField0_ = (bitField0_ & ~0x00000004); + validateOnly_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) + private static final com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstanceConfigRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java new file mode 100644 index 000000000000..bb8bc46355b3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java @@ -0,0 +1,158 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstanceConfigRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstanceConfigRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instanceConfig field is set. + */ + boolean hasInstanceConfig(); + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instanceConfig. + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig(); + + /** + * + * + *
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
    +   * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
    +   * need be included. To prevent conflicts of concurrent updates,
    +   * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    +   * be used.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
    +   * updated. The field mask must always be specified; this prevents any future
    +   * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
    +   * from being erased accidentally by clients that do not know about them. Only
    +   * display_name and labels can be updated.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); + + /** + * + * + *
    +   * An option to validate, but not actually execute, a request,
    +   * and provide the same response.
    +   * 
    + * + * bool validate_only = 3; + * + * @return The validateOnly. + */ + boolean getValidateOnly(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java new file mode 100644 index 000000000000..6457f0fece49 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java @@ -0,0 +1,1746 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateInstanceMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstanceMetadata) + UpdateInstanceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstanceMetadata"); + } + + // Use UpdateInstanceMetadata.newBuilder() to construct. + private UpdateInstanceMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstanceMetadata() { + expectedFulfillmentPeriod_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.Instance instance_; + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + @java.lang.Override + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstance() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + public static final int EXPECTED_FULFILLMENT_PERIOD_FIELD_NUMBER = 5; + private int expectedFulfillmentPeriod_ = 0; + + /** + * + * + *
    +   * The expected fulfillment period of this update operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + @java.lang.Override + public int getExpectedFulfillmentPeriodValue() { + return expectedFulfillmentPeriod_; + } + + /** + * + * + *
    +   * The expected fulfillment period of this update operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod() { + com.google.spanner.admin.instance.v1.FulfillmentPeriod result = + com.google.spanner.admin.instance.v1.FulfillmentPeriod.forNumber( + expectedFulfillmentPeriod_); + return result == null + ? com.google.spanner.admin.instance.v1.FulfillmentPeriod.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getEndTime()); + } + if (expectedFulfillmentPeriod_ + != com.google.spanner.admin.instance.v1.FulfillmentPeriod.FULFILLMENT_PERIOD_UNSPECIFIED + .getNumber()) { + output.writeEnum(5, expectedFulfillmentPeriod_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEndTime()); + } + if (expectedFulfillmentPeriod_ + != com.google.spanner.admin.instance.v1.FulfillmentPeriod.FULFILLMENT_PERIOD_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(5, expectedFulfillmentPeriod_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstanceMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata other = + (com.google.spanner.admin.instance.v1.UpdateInstanceMetadata) obj; + + if (hasInstance() != other.hasInstance()) return false; + if (hasInstance()) { + if (!getInstance().equals(other.getInstance())) return false; + } + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (expectedFulfillmentPeriod_ != other.expectedFulfillmentPeriod_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (37 * hash) + EXPECTED_FULFILLMENT_PERIOD_FIELD_NUMBER; + hash = (53 * hash) + expectedFulfillmentPeriod_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstanceMetadata) + com.google.spanner.admin.instance.v1.UpdateInstanceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceFieldBuilder(); + internalGetStartTimeFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + expectedFulfillmentPeriod_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceMetadata build() { + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceMetadata buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstanceMetadata result = + new com.google.spanner.admin.instance.v1.UpdateInstanceMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.UpdateInstanceMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instance_ = instanceBuilder_ == null ? instance_ : instanceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.expectedFulfillmentPeriod_ = expectedFulfillmentPeriod_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstanceMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.UpdateInstanceMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.UpdateInstanceMetadata other) { + if (other == com.google.spanner.admin.instance.v1.UpdateInstanceMetadata.getDefaultInstance()) + return this; + if (other.hasInstance()) { + mergeInstance(other.getInstance()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + if (other.expectedFulfillmentPeriod_ != 0) { + setExpectedFulfillmentPeriodValue(other.getExpectedFulfillmentPeriodValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + expectedFulfillmentPeriod_ = input.readEnum(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.Instance instance_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instanceBuilder_; + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + public com.google.spanner.admin.instance.v1.Instance getInstance() { + if (instanceBuilder_ == null) { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } else { + return instanceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder setInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instance_ = value; + } else { + instanceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder setInstance( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instanceBuilder_ == null) { + instance_ = builderForValue.build(); + } else { + instanceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder mergeInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instance_ != null + && instance_ != com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) { + getInstanceBuilder().mergeFrom(value); + } else { + instance_ = value; + } + } else { + instanceBuilder_.mergeFrom(value); + } + if (instance_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000001); + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstanceBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + if (instanceBuilder_ != null) { + return instanceBuilder_.getMessageOrBuilder(); + } else { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstanceFieldBuilder() { + if (instanceBuilder_ == null) { + instanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + getInstance(), getParentForChildren(), isClean()); + instance_ = null; + } + return instanceBuilder_; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000008); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + private int expectedFulfillmentPeriod_ = 0; + + /** + * + * + *
    +     * The expected fulfillment period of this update operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + @java.lang.Override + public int getExpectedFulfillmentPeriodValue() { + return expectedFulfillmentPeriod_; + } + + /** + * + * + *
    +     * The expected fulfillment period of this update operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @param value The enum numeric value on the wire for expectedFulfillmentPeriod to set. + * @return This builder for chaining. + */ + public Builder setExpectedFulfillmentPeriodValue(int value) { + expectedFulfillmentPeriod_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The expected fulfillment period of this update operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod() { + com.google.spanner.admin.instance.v1.FulfillmentPeriod result = + com.google.spanner.admin.instance.v1.FulfillmentPeriod.forNumber( + expectedFulfillmentPeriod_); + return result == null + ? com.google.spanner.admin.instance.v1.FulfillmentPeriod.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The expected fulfillment period of this update operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @param value The expectedFulfillmentPeriod to set. + * @return This builder for chaining. + */ + public Builder setExpectedFulfillmentPeriod( + com.google.spanner.admin.instance.v1.FulfillmentPeriod value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + expectedFulfillmentPeriod_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The expected fulfillment period of this update operation.
    +     * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return This builder for chaining. + */ + public Builder clearExpectedFulfillmentPeriod() { + bitField0_ = (bitField0_ & ~0x00000010); + expectedFulfillmentPeriod_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstanceMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceMetadata) + private static final com.google.spanner.admin.instance.v1.UpdateInstanceMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstanceMetadata(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstanceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java new file mode 100644 index 000000000000..db3de1e8ce1d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java @@ -0,0 +1,216 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstanceMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstanceMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return Whether the instance field is set. + */ + boolean hasInstance(); + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + * + * @return The instance. + */ + com.google.spanner.admin.instance.v1.Instance getInstance(); + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 1; + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); + + /** + * + * + *
    +   * The expected fulfillment period of this update operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The enum numeric value on the wire for expectedFulfillmentPeriod. + */ + int getExpectedFulfillmentPeriodValue(); + + /** + * + * + *
    +   * The expected fulfillment period of this update operation.
    +   * 
    + * + * .google.spanner.admin.instance.v1.FulfillmentPeriod expected_fulfillment_period = 5; + * + * + * @return The expectedFulfillmentPeriod. + */ + com.google.spanner.admin.instance.v1.FulfillmentPeriod getExpectedFulfillmentPeriod(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java new file mode 100644 index 000000000000..3d0583b1877c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java @@ -0,0 +1,1592 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata} + */ +@com.google.protobuf.Generated +public final class UpdateInstancePartitionMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) + UpdateInstancePartitionMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstancePartitionMetadata"); + } + + // Use UpdateInstancePartitionMetadata.newBuilder() to construct. + private UpdateInstancePartitionMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstancePartitionMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_PARTITION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + @java.lang.Override + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getEndTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEndTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata other = + (com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) obj; + + if (hasInstancePartition() != other.hasInstancePartition()) return false; + if (hasInstancePartition()) { + if (!getInstancePartition().equals(other.getInstancePartition())) return false; + } + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstancePartition()) { + hash = (37 * hash) + INSTANCE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartition().hashCode(); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.class, + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionFieldBuilder(); + internalGetStartTimeFieldBuilder(); + internalGetCancelTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata build() { + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata result = + new com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instancePartition_ = + instancePartitionBuilder_ == null + ? instancePartition_ + : instancePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata other) { + if (other + == com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + .getDefaultInstance()) return this; + if (other.hasInstancePartition()) { + mergeInstancePartition(other.getInstancePartition()); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstancePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + instancePartitionBuilder_; + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + if (instancePartitionBuilder_ == null) { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } else { + return instancePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartition_ = value; + } else { + instancePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionBuilder_ == null) { + instancePartition_ = builderForValue.build(); + } else { + instancePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder mergeInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instancePartition_ != null + && instancePartition_ + != com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()) { + getInstancePartitionBuilder().mergeFrom(value); + } else { + instancePartition_ = value; + } + } else { + instancePartitionBuilder_.mergeFrom(value); + } + if (instancePartition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public Builder clearInstancePartition() { + bitField0_ = (bitField0_ & ~0x00000001); + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + getInstancePartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstancePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + if (instancePartitionBuilder_ != null) { + return instancePartitionBuilder_.getMessageOrBuilder(); + } else { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + } + + /** + * + * + *
    +     * The desired end state of the update.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + internalGetInstancePartitionFieldBuilder() { + if (instancePartitionBuilder_ == null) { + instancePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder>( + getInstancePartition(), getParentForChildren(), isClean()); + instancePartition_ = null; + } + return instancePartitionBuilder_; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * The time at which
    +     * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +     * request was received.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCancelTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation was cancelled. If set, this operation is
    +     * in the process of undoing itself (which is guaranteed to succeed) and
    +     * cannot be cancelled again.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000008); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * The time at which this operation failed or was completed successfully.
    +     * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) + private static final com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstancePartitionMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java new file mode 100644 index 000000000000..161a8874989c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java @@ -0,0 +1,188 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstancePartitionMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return Whether the instancePartition field is set. + */ + boolean hasInstancePartition(); + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + * + * @return The instancePartition. + */ + com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition(); + + /** + * + * + *
    +   * The desired end state of the update.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1; + */ + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder getInstancePartitionOrBuilder(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * The time at which
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
    +   * request was received.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + + /** + * + * + *
    +   * The time at which this operation was cancelled. If set, this operation is
    +   * in the process of undoing itself (which is guaranteed to succeed) and
    +   * cannot be cancelled again.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * The time at which this operation failed or was completed successfully.
    +   * 
    + * + * .google.protobuf.Timestamp end_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java new file mode 100644 index 000000000000..3555a22092bd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java @@ -0,0 +1,1127 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstancePartitionRequest} + */ +@com.google.protobuf.Generated +public final class UpdateInstancePartitionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) + UpdateInstancePartitionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstancePartitionRequest"); + } + + // Use UpdateInstancePartitionRequest.newBuilder() to construct. + private UpdateInstancePartitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstancePartitionRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_PARTITION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + @java.lang.Override + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + + public static final int FIELD_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask fieldMask_; + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + @java.lang.Override + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getFieldMask() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getFieldMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstancePartition()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getFieldMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest other = + (com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) obj; + + if (hasInstancePartition() != other.hasInstancePartition()) return false; + if (hasInstancePartition()) { + if (!getInstancePartition().equals(other.getInstancePartition())) return false; + } + if (hasFieldMask() != other.hasFieldMask()) return false; + if (hasFieldMask()) { + if (!getFieldMask().equals(other.getFieldMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstancePartition()) { + hash = (37 * hash) + INSTANCE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getInstancePartition().hashCode(); + } + if (hasFieldMask()) { + hash = (37 * hash) + FIELD_MASK_FIELD_NUMBER; + hash = (53 * hash) + getFieldMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstancePartitionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstancePartitionFieldBuilder(); + internalGetFieldMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest build() { + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest result = + new com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instancePartition_ = + instancePartitionBuilder_ == null + ? instancePartition_ + : instancePartitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fieldMask_ = fieldMaskBuilder_ == null ? fieldMask_ : fieldMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest other) { + if (other + == com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + .getDefaultInstance()) return this; + if (other.hasInstancePartition()) { + mergeInstancePartition(other.getInstancePartition()); + } + if (other.hasFieldMask()) { + mergeFieldMask(other.getFieldMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstancePartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetFieldMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.InstancePartition instancePartition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + instancePartitionBuilder_; + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + public boolean hasInstancePartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + public com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition() { + if (instancePartitionBuilder_ == null) { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } else { + return instancePartitionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instancePartition_ = value; + } else { + instancePartitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition.Builder builderForValue) { + if (instancePartitionBuilder_ == null) { + instancePartition_ = builderForValue.build(); + } else { + instancePartitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstancePartition( + com.google.spanner.admin.instance.v1.InstancePartition value) { + if (instancePartitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instancePartition_ != null + && instancePartition_ + != com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance()) { + getInstancePartitionBuilder().mergeFrom(value); + } else { + instancePartition_ = value; + } + } else { + instancePartitionBuilder_.mergeFrom(value); + } + if (instancePartition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstancePartition() { + bitField0_ = (bitField0_ & ~0x00000001); + instancePartition_ = null; + if (instancePartitionBuilder_ != null) { + instancePartitionBuilder_.dispose(); + instancePartitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartition.Builder + getInstancePartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstancePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder + getInstancePartitionOrBuilder() { + if (instancePartitionBuilder_ != null) { + return instancePartitionBuilder_.getMessageOrBuilder(); + } else { + return instancePartition_ == null + ? com.google.spanner.admin.instance.v1.InstancePartition.getDefaultInstance() + : instancePartition_; + } + } + + /** + * + * + *
    +     * Required. The instance partition to update, which must always include the
    +     * instance partition name. Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder> + internalGetInstancePartitionFieldBuilder() { + if (instancePartitionBuilder_ == null) { + instancePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstancePartition, + com.google.spanner.admin.instance.v1.InstancePartition.Builder, + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder>( + getInstancePartition(), getParentForChildren(), isClean()); + instancePartition_ = null; + } + return instancePartitionBuilder_; + } + + private com.google.protobuf.FieldMask fieldMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + fieldMaskBuilder_; + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + public com.google.protobuf.FieldMask getFieldMask() { + if (fieldMaskBuilder_ == null) { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } else { + return fieldMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + fieldMask_ = value; + } else { + fieldMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFieldMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (fieldMaskBuilder_ == null) { + fieldMask_ = builderForValue.build(); + } else { + fieldMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && fieldMask_ != null + && fieldMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getFieldMaskBuilder().mergeFrom(value); + } else { + fieldMask_ = value; + } + } else { + fieldMaskBuilder_.mergeFrom(value); + } + if (fieldMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearFieldMask() { + bitField0_ = (bitField0_ & ~0x00000002); + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getFieldMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetFieldMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + if (fieldMaskBuilder_ != null) { + return fieldMaskBuilder_.getMessageOrBuilder(); + } else { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * should be updated. The field mask must always be specified; this prevents
    +     * any future fields in
    +     * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +     * from being erased accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetFieldMaskFieldBuilder() { + if (fieldMaskBuilder_ == null) { + fieldMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getFieldMask(), getParentForChildren(), isClean()); + fieldMask_ = null; + } + return fieldMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) + private static final com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstancePartitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java new file mode 100644 index 000000000000..a6b59e9c8ee3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstancePartitionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstancePartitionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instancePartition field is set. + */ + boolean hasInstancePartition(); + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instancePartition. + */ + com.google.spanner.admin.instance.v1.InstancePartition getInstancePartition(); + + /** + * + * + *
    +   * Required. The instance partition to update, which must always include the
    +   * instance partition name. Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.InstancePartition instance_partition = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstancePartitionOrBuilder getInstancePartitionOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + boolean hasFieldMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + com.google.protobuf.FieldMask getFieldMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * should be updated. The field mask must always be specified; this prevents
    +   * any future fields in
    +   * [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
    +   * from being erased accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java new file mode 100644 index 000000000000..45abb0f03130 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java @@ -0,0 +1,1095 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceRequest} + */ +@com.google.protobuf.Generated +public final class UpdateInstanceRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.UpdateInstanceRequest) + UpdateInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateInstanceRequest"); + } + + // Use UpdateInstanceRequest.newBuilder() to construct. + private UpdateInstanceRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateInstanceRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstanceRequest.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.Instance instance_; + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + @java.lang.Override + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstance() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + public static final int FIELD_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask fieldMask_; + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + @java.lang.Override + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getFieldMask() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getFieldMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInstance()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getFieldMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.UpdateInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.UpdateInstanceRequest other = + (com.google.spanner.admin.instance.v1.UpdateInstanceRequest) obj; + + if (hasInstance() != other.hasInstance()) return false; + if (hasInstance()) { + if (!getInstance().equals(other.getInstance())) return false; + } + if (hasFieldMask() != other.hasFieldMask()) return false; + if (hasFieldMask()) { + if (!getFieldMask().equals(other.getFieldMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + if (hasFieldMask()) { + hash = (37 * hash) + FIELD_MASK_FIELD_NUMBER; + hash = (53 * hash) + getFieldMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.UpdateInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.UpdateInstanceRequest) + com.google.spanner.admin.instance.v1.UpdateInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.UpdateInstanceRequest.class, + com.google.spanner.admin.instance.v1.UpdateInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.UpdateInstanceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInstanceFieldBuilder(); + internalGetFieldMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.UpdateInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceRequest build() { + com.google.spanner.admin.instance.v1.UpdateInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.UpdateInstanceRequest result = + new com.google.spanner.admin.instance.v1.UpdateInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.UpdateInstanceRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instance_ = instanceBuilder_ == null ? instance_ : instanceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.fieldMask_ = fieldMaskBuilder_ == null ? fieldMask_ : fieldMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.UpdateInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.UpdateInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.UpdateInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.UpdateInstanceRequest.getDefaultInstance()) + return this; + if (other.hasInstance()) { + mergeInstance(other.getInstance()); + } + if (other.hasFieldMask()) { + mergeFieldMask(other.getFieldMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInstanceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetFieldMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.Instance instance_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instanceBuilder_; + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + public com.google.spanner.admin.instance.v1.Instance getInstance() { + if (instanceBuilder_ == null) { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } else { + return instanceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instance_ = value; + } else { + instanceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setInstance( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instanceBuilder_ == null) { + instance_ = builderForValue.build(); + } else { + instanceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && instance_ != null + && instance_ != com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) { + getInstanceBuilder().mergeFrom(value); + } else { + instance_ = value; + } + } else { + instanceBuilder_.mergeFrom(value); + } + if (instance_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000001); + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstanceBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + if (instanceBuilder_ != null) { + return instanceBuilder_.getMessageOrBuilder(); + } else { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + } + + /** + * + * + *
    +     * Required. The instance to update, which must always include the instance
    +     * name.  Otherwise, only fields mentioned in
    +     * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +     * need be included.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstanceFieldBuilder() { + if (instanceBuilder_ == null) { + instanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + getInstance(), getParentForChildren(), isClean()); + instance_ = null; + } + return instanceBuilder_; + } + + private com.google.protobuf.FieldMask fieldMask_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + fieldMaskBuilder_; + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + public boolean hasFieldMask() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + public com.google.protobuf.FieldMask getFieldMask() { + if (fieldMaskBuilder_ == null) { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } else { + return fieldMaskBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + fieldMask_ = value; + } else { + fieldMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setFieldMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (fieldMaskBuilder_ == null) { + fieldMask_ = builderForValue.build(); + } else { + fieldMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeFieldMask(com.google.protobuf.FieldMask value) { + if (fieldMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && fieldMask_ != null + && fieldMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getFieldMaskBuilder().mergeFrom(value); + } else { + fieldMask_ = value; + } + } else { + fieldMaskBuilder_.mergeFrom(value); + } + if (fieldMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearFieldMask() { + bitField0_ = (bitField0_ & ~0x00000002); + fieldMask_ = null; + if (fieldMaskBuilder_ != null) { + fieldMaskBuilder_.dispose(); + fieldMaskBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getFieldMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetFieldMaskFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder() { + if (fieldMaskBuilder_ != null) { + return fieldMaskBuilder_.getMessageOrBuilder(); + } else { + return fieldMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : fieldMask_; + } + } + + /** + * + * + *
    +     * Required. A mask specifying which fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +     * The field mask must always be specified; this prevents any future fields in
    +     * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +     * accidentally by clients that do not know about them.
    +     * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + internalGetFieldMaskFieldBuilder() { + if (fieldMaskBuilder_ == null) { + fieldMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getFieldMask(), getParentForChildren(), isClean()); + fieldMask_ = null; + } + return fieldMaskBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.UpdateInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceRequest) + private static final com.google.spanner.admin.instance.v1.UpdateInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.UpdateInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.UpdateInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.UpdateInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java new file mode 100644 index 000000000000..7afab12c38bb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.admin.instance.v1; + +@com.google.protobuf.Generated +public interface UpdateInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.UpdateInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the instance field is set. + */ + boolean hasInstance(); + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The instance. + */ + com.google.spanner.admin.instance.v1.Instance getInstance(); + + /** + * + * + *
    +   * Required. The instance to update, which must always include the instance
    +   * name.  Otherwise, only fields mentioned in
    +   * [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
    +   * need be included.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance instance = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the fieldMask field is set. + */ + boolean hasFieldMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The fieldMask. + */ + com.google.protobuf.FieldMask getFieldMask(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
    +   * The field mask must always be specified; this prevents any future fields in
    +   * [Instance][google.spanner.admin.instance.v1.Instance] from being erased
    +   * accidentally by clients that do not know about them.
    +   * 
    + * + * .google.protobuf.FieldMask field_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getFieldMaskOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto new file mode 100644 index 000000000000..0b5282c7d874 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto @@ -0,0 +1,64 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.instance.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1"; +option go_package = "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb;instancepb"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.spanner.admin.instance.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Instance::V1"; + +// Encapsulates progress related information for a Cloud Spanner long +// running instance operations. +message OperationProgress { + // Percent completion of the operation. + // Values are between 0 and 100 inclusive. + int32 progress_percent = 1; + + // Time the request was received. + google.protobuf.Timestamp start_time = 2; + + // If set, the time at which this operation failed or was completed + // successfully. + google.protobuf.Timestamp end_time = 3; +} + +// Indicates the expected fulfillment period of an operation. +enum FulfillmentPeriod { + // Not specified. + FULFILLMENT_PERIOD_UNSPECIFIED = 0; + + // Normal fulfillment period. The operation is expected to complete within + // minutes. + FULFILLMENT_PERIOD_NORMAL = 1; + + // Extended fulfillment period. It can take up to an hour for the operation + // to complete. + FULFILLMENT_PERIOD_EXTENDED = 2; +} + +// ReplicaSelection identifies replicas with common properties. +message ReplicaSelection { + // Required. Name of the location of the replicas (e.g., "us-central1"). + string location = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto new file mode 100644 index 000000000000..d16ab2ca5835 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto @@ -0,0 +1,2184 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.instance.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/instance/v1/common.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1"; +option go_package = "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb;instancepb"; +option java_multiple_files = true; +option java_outer_classname = "SpannerInstanceAdminProto"; +option java_package = "com.google.spanner.admin.instance.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Instance::V1"; + +// Cloud Spanner Instance Admin API +// +// The Cloud Spanner Instance Admin API can be used to create, delete, +// modify and list instances. Instances are dedicated Cloud Spanner serving +// and storage resources to be used by Cloud Spanner databases. +// +// Each instance has a "configuration", which dictates where the +// serving resources for the Cloud Spanner instance are located (e.g., +// US-central, Europe). Configurations are created by Google based on +// resource availability. +// +// Cloud Spanner billing is based on the instances that exist and their +// sizes. After an instance exists, there are no additional +// per-database or per-operation charges for use of the instance +// (though there may be additional network bandwidth charges). +// Instances offer isolation: problems with databases in one instance +// will not affect other instances. However, within an instance +// databases can affect each other. For example, if one database in an +// instance receives a lot of requests and consumes most of the +// instance resources, fewer resources are available for other +// databases in that instance, and their performance may suffer. +service InstanceAdmin { + option (google.api.default_host) = "spanner.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/spanner.admin"; + + // Lists the supported instance configurations for a given project. + // + // Returns both Google-managed configurations and user-managed + // configurations. + rpc ListInstanceConfigs(ListInstanceConfigsRequest) + returns (ListInstanceConfigsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*}/instanceConfigs" + }; + option (google.api.method_signature) = "parent"; + } + + // Gets information about a particular instance configuration. + rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instanceConfigs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates an instance configuration and begins preparing it to be used. The + // returned long-running operation + // can be used to track the progress of preparing the new + // instance configuration. The instance configuration name is assigned by the + // caller. If the named instance configuration already exists, + // `CreateInstanceConfig` returns `ALREADY_EXISTS`. + // + // Immediately after the request returns: + // + // * The instance configuration is readable via the API, with all requested + // attributes. The instance configuration's + // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + // field is set to true. Its state is `CREATING`. + // + // While the operation is pending: + // + // * Cancelling the operation renders the instance configuration immediately + // unreadable via the API. + // * Except for deleting the creating resource, all other attempts to modify + // the instance configuration are rejected. + // + // Upon completion of the returned operation: + // + // * Instances can be created using the instance configuration. + // * The instance configuration's + // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + // field becomes false. Its state becomes `READY`. + // + // The returned long-running operation will + // have a name of the format + // `/operations/` and can be used to track + // creation of the instance configuration. The + // metadata field type is + // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + // The response field type is + // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if + // successful. + // + // Authorization requires `spanner.instanceConfigs.create` permission on + // the resource + // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. + rpc CreateInstanceConfig(CreateInstanceConfigRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*}/instanceConfigs" + body: "*" + }; + option (google.api.method_signature) = + "parent,instance_config,instance_config_id"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.InstanceConfig" + metadata_type: "google.spanner.admin.instance.v1.CreateInstanceConfigMetadata" + }; + } + + // Updates an instance configuration. The returned + // long-running operation can be used to track + // the progress of updating the instance. If the named instance configuration + // does not exist, returns `NOT_FOUND`. + // + // Only user-managed configurations can be updated. + // + // Immediately after the request returns: + // + // * The instance configuration's + // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + // field is set to true. + // + // While the operation is pending: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. + // The operation is guaranteed to succeed at undoing all changes, after + // which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance configuration are rejected. + // * Reading the instance configuration via the API continues to give the + // pre-request values. + // + // Upon completion of the returned operation: + // + // * Creating instances using the instance configuration uses the new + // values. + // * The new values of the instance configuration are readable via the API. + // * The instance configuration's + // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + // field becomes false. + // + // The returned long-running operation will + // have a name of the format + // `/operations/` and can be used to track + // the instance configuration modification. The + // metadata field type is + // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. + // The response field type is + // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if + // successful. + // + // Authorization requires `spanner.instanceConfigs.update` permission on + // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + rpc UpdateInstanceConfig(UpdateInstanceConfigRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{instance_config.name=projects/*/instanceConfigs/*}" + body: "*" + }; + option (google.api.method_signature) = "instance_config,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.InstanceConfig" + metadata_type: "google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata" + }; + } + + // Deletes the instance configuration. Deletion is only allowed when no + // instances are using the configuration. If any instances are using + // the configuration, returns `FAILED_PRECONDITION`. + // + // Only user-managed configurations can be deleted. + // + // Authorization requires `spanner.instanceConfigs.delete` permission on + // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. + rpc DeleteInstanceConfig(DeleteInstanceConfigRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instanceConfigs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists the user-managed instance configuration long-running + // operations in the given project. An instance + // configuration operation has a name of the form + // `projects//instanceConfigs//operations/`. + // The long-running operation + // metadata field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. Operations returned are ordered by + // `operation.metadata.value.start_time` in descending order starting + // from the most recently started operation. + rpc ListInstanceConfigOperations(ListInstanceConfigOperationsRequest) + returns (ListInstanceConfigOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*}/instanceConfigOperations" + }; + option (google.api.method_signature) = "parent"; + } + + // Lists all instances in the given project. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*}/instances" + }; + option (google.api.method_signature) = "parent"; + } + + // Lists all instance partitions for the given instance. + rpc ListInstancePartitions(ListInstancePartitionsRequest) + returns (ListInstancePartitionsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/instancePartitions" + }; + option (google.api.method_signature) = "parent"; + } + + // Gets information about a particular instance. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates an instance and begins preparing it to begin serving. The + // returned long-running operation + // can be used to track the progress of preparing the new + // instance. The instance name is assigned by the caller. If the + // named instance already exists, `CreateInstance` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance is readable via the API, with all requested attributes + // but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance immediately unreadable + // via the API. + // * The instance can be deleted. + // * All other attempts to modify the instance are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can be created in the instance. + // * The instance's allocated resource levels are readable via the API. + // * The instance's state becomes `READY`. + // + // The returned long-running operation will + // have a name of the format `/operations/` and + // can be used to track creation of the instance. The + // metadata field type is + // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + // The response field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + rpc CreateInstance(CreateInstanceRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*}/instances" + body: "*" + }; + option (google.api.method_signature) = "parent,instance_id,instance"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.Instance" + metadata_type: "google.spanner.admin.instance.v1.CreateInstanceMetadata" + }; + } + + // Updates an instance, and begins allocating or releasing resources + // as requested. The returned long-running operation can be used to track the + // progress of updating the instance. If the named instance does not + // exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance's allocation + // has been requested, billing is based on the newly-requested level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], + // and begins restoring resources to their pre-request values. The + // operation is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance are rejected. + // * Reading the instance via the API continues to give the pre-request + // resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance's + // tables. + // * The instance's new resource levels are readable via the API. + // + // The returned long-running operation will + // have a name of the format `/operations/` and + // can be used to track the instance modification. The + // metadata field type is + // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + // The response field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + // + // Authorization requires `spanner.instances.update` permission on + // the resource [name][google.spanner.admin.instance.v1.Instance.name]. + rpc UpdateInstance(UpdateInstanceRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{instance.name=projects/*/instances/*}" + body: "*" + }; + option (google.api.method_signature) = "instance,field_mask"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.Instance" + metadata_type: "google.spanner.admin.instance.v1.UpdateInstanceMetadata" + }; + } + + // Deletes an instance. + // + // Immediately upon completion of the request: + // + // * Billing ceases for all of the instance's reserved resources. + // + // Soon afterward: + // + // * The instance and *all of its databases* immediately and + // irrevocably disappear from the API. All data in the databases + // is permanently deleted. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.instances.setIamPolicy` on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*}:setIamPolicy" + body: "*" + }; + option (google.api.method_signature) = "resource,policy"; + } + + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + // + // Authorization requires `spanner.instances.getIamPolicy` on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*}:getIamPolicy" + body: "*" + }; + option (google.api.method_signature) = "resource"; + } + + // Returns permissions that the caller has on the specified instance resource. + // + // Attempting this RPC on a non-existent Cloud Spanner instance resource will + // result in a NOT_FOUND error if the user has `spanner.instances.list` + // permission on the containing Google Cloud Project. Otherwise returns an + // empty set of permissions. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.http) = { + post: "/v1/{resource=projects/*/instances/*}:testIamPermissions" + body: "*" + }; + option (google.api.method_signature) = "resource,permissions"; + } + + // Gets information about a particular instance partition. + rpc GetInstancePartition(GetInstancePartitionRequest) + returns (InstancePartition) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/instancePartitions/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Creates an instance partition and begins preparing it to be used. The + // returned long-running operation + // can be used to track the progress of preparing the new instance partition. + // The instance partition name is assigned by the caller. If the named + // instance partition already exists, `CreateInstancePartition` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance partition is readable via the API, with all requested + // attributes but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance partition immediately + // unreadable via the API. + // * The instance partition can be deleted. + // * All other attempts to modify the instance partition are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can start using this instance partition. + // * The instance partition's allocated resource levels are readable via the + // API. + // * The instance partition's state becomes `READY`. + // + // The returned long-running operation will + // have a name of the format + // `/operations/` and can be used to + // track creation of the instance partition. The + // metadata field type is + // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + // The response field type is + // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + // successful. + rpc CreateInstancePartition(CreateInstancePartitionRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/instancePartitions" + body: "*" + }; + option (google.api.method_signature) = + "parent,instance_partition,instance_partition_id"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.InstancePartition" + metadata_type: "google.spanner.admin.instance.v1.CreateInstancePartitionMetadata" + }; + } + + // Deletes an existing instance partition. Requires that the + // instance partition is not used by any database or backup and is not the + // default instance partition of an instance. + // + // Authorization requires `spanner.instancePartitions.delete` permission on + // the resource + // [name][google.spanner.admin.instance.v1.InstancePartition.name]. + rpc DeleteInstancePartition(DeleteInstancePartitionRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/instancePartitions/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates an instance partition, and begins allocating or releasing resources + // as requested. The returned long-running operation can be used to track the + // progress of updating the instance partition. If the named instance + // partition does not exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance partition's + // allocation has been requested, billing is based on the newly-requested + // level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + // and begins restoring resources to their pre-request values. The + // operation is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance partition are rejected. + // * Reading the instance partition via the API continues to give the + // pre-request resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance + // partition's tables. + // * The instance partition's new resource levels are readable via the API. + // + // The returned long-running operation will + // have a name of the format + // `/operations/` and can be used to + // track the instance partition modification. The + // metadata field type is + // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + // The response field type is + // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + // successful. + // + // Authorization requires `spanner.instancePartitions.update` permission on + // the resource + // [name][google.spanner.admin.instance.v1.InstancePartition.name]. + rpc UpdateInstancePartition(UpdateInstancePartitionRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{instance_partition.name=projects/*/instances/*/instancePartitions/*}" + body: "*" + }; + option (google.api.method_signature) = "instance_partition,field_mask"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.InstancePartition" + metadata_type: "google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata" + }; + } + + // Lists instance partition long-running operations in the given instance. + // An instance partition operation has a name of the form + // `projects//instances//instancePartitions//operations/`. + // The long-running operation + // metadata field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. Operations returned are ordered by + // `operation.metadata.value.start_time` in descending order starting from the + // most recently started operation. + // + // Authorization requires `spanner.instancePartitionOperations.list` + // permission on the resource + // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + rpc ListInstancePartitionOperations(ListInstancePartitionOperationsRequest) + returns (ListInstancePartitionOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/instancePartitionOperations" + }; + option (google.api.method_signature) = "parent"; + } + + // Moves an instance to the target instance configuration. You can use the + // returned long-running operation to track + // the progress of moving the instance. + // + // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of + // the following criteria: + // + // * Is undergoing a move to a different instance configuration + // * Has backups + // * Has an ongoing update + // * Contains any CMEK-enabled databases + // * Is a free trial instance + // + // While the operation is pending: + // + // * All other attempts to modify the instance, including changes to its + // compute capacity, are rejected. + // * The following database and backup admin operations are rejected: + // + // * `DatabaseAdmin.CreateDatabase` + // * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is + // specified in the request.) + // * `DatabaseAdmin.RestoreDatabase` + // * `DatabaseAdmin.CreateBackup` + // * `DatabaseAdmin.CopyBackup` + // + // * Both the source and target instance configurations are subject to + // hourly compute and storage charges. + // * The instance might experience higher read-write latencies and a higher + // transaction abort rate. However, moving an instance doesn't cause any + // downtime. + // + // The returned long-running operation has + // a name of the format + // `/operations/` and can be used to track + // the move instance operation. The + // metadata field type is + // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + // The response field type is + // [Instance][google.spanner.admin.instance.v1.Instance], + // if successful. + // Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + // Cancellation is not immediate because it involves moving any data + // previously moved to the target instance configuration back to the original + // instance configuration. You can use this operation to track the progress of + // the cancellation. Upon successful completion of the cancellation, the + // operation terminates with `CANCELLED` status. + // + // If not cancelled, upon completion of the returned operation: + // + // * The instance successfully moves to the target instance + // configuration. + // * You are billed for compute and storage in target instance + // configuration. + // + // Authorization requires the `spanner.instances.update` permission on + // the resource [instance][google.spanner.admin.instance.v1.Instance]. + // + // For more details, see + // [Move an instance](https://cloud.google.com/spanner/docs/move-instance). + rpc MoveInstance(MoveInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{name=projects/*/instances/*}:move" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.instance.v1.MoveInstanceResponse" + metadata_type: "google.spanner.admin.instance.v1.MoveInstanceMetadata" + }; + } +} + +message ReplicaInfo { + // Indicates the type of replica. See the [replica types + // documentation](https://cloud.google.com/spanner/docs/replication#replica_types) + // for more details. + enum ReplicaType { + // Not specified. + TYPE_UNSPECIFIED = 0; + + // Read-write replicas support both reads and writes. These replicas: + // + // * Maintain a full copy of your data. + // * Serve reads. + // * Can vote whether to commit a write. + // * Participate in leadership election. + // * Are eligible to become a leader. + READ_WRITE = 1; + + // Read-only replicas only support reads (not writes). Read-only replicas: + // + // * Maintain a full copy of your data. + // * Serve reads. + // * Do not participate in voting to commit writes. + // * Are not eligible to become a leader. + READ_ONLY = 2; + + // Witness replicas don't support reads but do participate in voting to + // commit writes. Witness replicas: + // + // * Do not maintain a full copy of data. + // * Do not serve reads. + // * Vote whether to commit writes. + // * Participate in leader election but are not eligible to become leader. + WITNESS = 3; + } + + // The location of the serving resources, e.g., "us-central1". + string location = 1; + + // The type of replica. + ReplicaType type = 2; + + // If true, this location is designated as the default leader location where + // leader replicas are placed. See the [region types + // documentation](https://cloud.google.com/spanner/docs/instances#region_types) + // for more details. + bool default_leader_location = 3; +} + +// A possible configuration for a Cloud Spanner instance. Configurations +// define the geographic placement of nodes and their replication. +message InstanceConfig { + option (google.api.resource) = { + type: "spanner.googleapis.com/InstanceConfig" + pattern: "projects/{project}/instanceConfigs/{instance_config}" + plural: "instanceConfigs" + singular: "instanceConfig" + }; + + // The type of this configuration. + enum Type { + // Unspecified. + TYPE_UNSPECIFIED = 0; + + // Google-managed configuration. + GOOGLE_MANAGED = 1; + + // User-managed configuration. + USER_MANAGED = 2; + } + + // Indicates the current state of the instance configuration. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The instance configuration is still being created. + CREATING = 1; + + // The instance configuration is fully created and ready to be used to + // create instances. + READY = 2; + } + + // Describes the availability for free instances to be created in an instance + // configuration. + enum FreeInstanceAvailability { + // Not specified. + FREE_INSTANCE_AVAILABILITY_UNSPECIFIED = 0; + + // Indicates that free instances are available to be created in this + // instance configuration. + AVAILABLE = 1; + + // Indicates that free instances are not supported in this instance + // configuration. + UNSUPPORTED = 2; + + // Indicates that free instances are currently not available to be created + // in this instance configuration. + DISABLED = 3; + + // Indicates that additional free instances cannot be created in this + // instance configuration because the project has reached its limit of free + // instances. + QUOTA_EXCEEDED = 4; + } + + // Indicates the quorum type of this instance configuration. + enum QuorumType { + // Quorum type not specified. + QUORUM_TYPE_UNSPECIFIED = 0; + + // An instance configuration tagged with `REGION` quorum type forms a write + // quorum in a single region. + REGION = 1; + + // An instance configuration tagged with the `DUAL_REGION` quorum type forms + // a write quorum with exactly two read-write regions in a multi-region + // configuration. + // + // This instance configuration requires failover in the event of + // regional failures. + DUAL_REGION = 2; + + // An instance configuration tagged with the `MULTI_REGION` quorum type + // forms a write quorum from replicas that are spread across more than one + // region in a multi-region configuration. + MULTI_REGION = 3; + } + + // A unique identifier for the instance configuration. Values + // are of the form + // `projects//instanceConfigs/[a-z][-a-z0-9]*`. + // + // User instance configuration must start with `custom-`. + string name = 1; + + // The name of this instance configuration as it appears in UIs. + string display_name = 2; + + // Output only. Whether this instance configuration is a Google-managed or + // user-managed configuration. + Type config_type = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The geographic placement of nodes in this instance configuration and their + // replication properties. + // + // To create user-managed configurations, input + // `replicas` must include all replicas in `replicas` of the `base_config` + // and include one or more replicas in the `optional_replicas` of the + // `base_config`. + repeated ReplicaInfo replicas = 3; + + // Output only. The available optional replicas to choose from for + // user-managed configurations. Populated for Google-managed configurations. + repeated ReplicaInfo optional_replicas = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Base configuration name, e.g. projects//instanceConfigs/nam3, + // based on which this configuration is created. Only set for user-managed + // configurations. `base_config` must refer to a configuration of type + // `GOOGLE_MANAGED` in the same project as this configuration. + string base_config = 7 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + }]; + + // Cloud Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. Cloud Labels can be used to filter collections of + // resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, + // firewall, load balancing, etc.). + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the following regular expression: `[a-z][a-z0-9_-]{0,62}`. + // * Label values must be between 0 and 63 characters long and must conform + // to the regular expression `[a-z0-9_-]{0,63}`. + // * No more than 64 labels can be associated with a given resource. + // + // See https://goo.gl/xmQnxf for more information on and examples of labels. + // + // If you plan to use labels in your own code, please note that additional + // characters may be allowed in the future. Therefore, you are advised to use + // an internal label representation, such as JSON, which doesn't rely upon + // specific characters being disallowed. For example, representing labels + // as the string: name + "_" + value would prove problematic if we were to + // allow "_" in a future release. + map labels = 8; + + // etag is used for optimistic concurrency control as a way + // to help prevent simultaneous updates of a instance configuration from + // overwriting each other. It is strongly suggested that systems make use of + // the etag in the read-modify-write cycle to perform instance configuration + // updates in order to avoid race conditions: An etag is returned in the + // response which contains instance configurations, and systems are expected + // to put that etag in the request to update instance configuration to ensure + // that their change is applied to the same version of the instance + // configuration. If no etag is provided in the call to update the instance + // configuration, then the existing instance configuration is overwritten + // blindly. + string etag = 9; + + // Allowed values of the "default_leader" schema option for databases in + // instances that use this instance configuration. + repeated string leader_options = 4; + + // Output only. If true, the instance configuration is being created or + // updated. If false, there are no ongoing operations for the instance + // configuration. + bool reconciling = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The current instance configuration state. Applicable only for + // `USER_MANAGED` configurations. + State state = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes whether free instances are available to be created + // in this instance configuration. + FreeInstanceAvailability free_instance_availability = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The `QuorumType` of the instance configuration. + QuorumType quorum_type = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The storage limit in bytes per processing unit. + int64 storage_limit_per_processing_unit = 19 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// ReplicaComputeCapacity describes the amount of server resources that are +// allocated to each replica identified by the replica selection. +message ReplicaComputeCapacity { + // Required. Identifies replicas by specified properties. + // All replicas in the selection have the same amount of compute capacity. + ReplicaSelection replica_selection = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Compute capacity allocated to each replica identified by the specified + // selection. + // The unit is selected based on the unit used to specify the instance size + // for non-autoscaling instances, or the unit used in autoscaling limit for + // autoscaling instances. + oneof compute_capacity { + // The number of nodes allocated to each replica. + // + // This may be zero in API responses for instances that are not yet in + // state `READY`. + int32 node_count = 2; + + // The number of processing units allocated to each replica. + // + // This may be zero in API responses for instances that are not yet in + // state `READY`. + int32 processing_units = 3; + } +} + +// Autoscaling configuration for an instance. +message AutoscalingConfig { + // The autoscaling limits for the instance. Users can define the minimum and + // maximum compute capacity allocated to the instance, and the autoscaler will + // only scale within that range. Users can either use nodes or processing + // units to specify the limits, but should use the same unit to set both the + // min_limit and max_limit. + message AutoscalingLimits { + // The minimum compute capacity for the instance. + oneof min_limit { + // Minimum number of nodes allocated to the instance. If set, this number + // should be greater than or equal to 1. + int32 min_nodes = 1; + + // Minimum number of processing units allocated to the instance. If set, + // this number should be multiples of 1000. + int32 min_processing_units = 2; + } + + // The maximum compute capacity for the instance. The maximum compute + // capacity should be less than or equal to 10X the minimum compute + // capacity. + oneof max_limit { + // Maximum number of nodes allocated to the instance. If set, this number + // should be greater than or equal to min_nodes. + int32 max_nodes = 3; + + // Maximum number of processing units allocated to the instance. If set, + // this number should be multiples of 1000 and be greater than or equal to + // min_processing_units. + int32 max_processing_units = 4; + } + } + + // The autoscaling targets for an instance. + message AutoscalingTargets { + // Optional. The target high priority cpu utilization percentage that the + // autoscaler should be trying to achieve for the instance. This number is + // on a scale from 0 (no utilization) to 100 (full utilization). The valid + // range is [10, 90] inclusive. If not specified or set to 0, the autoscaler + // skips scaling based on high priority CPU utilization. + int32 high_priority_cpu_utilization_percent = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The target total CPU utilization percentage that the autoscaler + // should be trying to achieve for the instance. This number is on a scale + // from 0 (no utilization) to 100 (full utilization). The valid range is + // [10, 90] inclusive. If not specified or set to 0, the autoscaler skips + // scaling based on total CPU utilization. If both + // `high_priority_cpu_utilization_percent` and + // `total_cpu_utilization_percent` are specified, the autoscaler provisions + // the larger of the two required compute capacities to satisfy both + // targets. + int32 total_cpu_utilization_percent = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Required. The target storage utilization percentage that the autoscaler + // should be trying to achieve for the instance. This number is on a scale + // from 0 (no utilization) to 100 (full utilization). The valid range is + // [10, 99] inclusive. + int32 storage_utilization_percent = 2 + [(google.api.field_behavior) = REQUIRED]; + } + + // AsymmetricAutoscalingOption specifies the scaling of replicas identified by + // the given selection. + message AsymmetricAutoscalingOption { + // Overrides the top-level autoscaling configuration for the replicas + // identified by `replica_selection`. All fields in this message are + // optional. Any unspecified fields will use the corresponding values from + // the top-level autoscaling configuration. + message AutoscalingConfigOverrides { + // Optional. If specified, overrides the min/max limit in the top-level + // autoscaling configuration for the selected replicas. + AutoscalingLimits autoscaling_limits = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If specified, overrides the autoscaling target + // high_priority_cpu_utilization_percent in the top-level autoscaling + // configuration for the selected replicas. + int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If specified, overrides the + // autoscaling target `total_cpu_utilization_percent` + // in the top-level autoscaling configuration for the selected replicas. + int32 autoscaling_target_total_cpu_utilization_percent = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, disables high priority CPU autoscaling for the + // selected replicas and ignores + // [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent] + // in the top-level autoscaling configuration. + // + // When setting this field to true, setting + // [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent] + // field to a non-zero value for the same replica is not supported. + // + // If false, the + // [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent] + // field in the replica will be used if set to a non-zero value. + // Otherwise, the + // [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent] + // field in the top-level autoscaling configuration will be used. + // + // Setting both + // [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling] + // and + // [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling] + // to true for the same replica is not supported. + bool disable_high_priority_cpu_autoscaling = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, disables total CPU autoscaling for the selected + // replicas and ignores + // [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent] + // in the top-level autoscaling configuration. + // + // When setting this field to true, setting + // [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent] + // field to a non-zero value for the same replica is not supported. + // + // If false, the + // [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent] + // field in the replica will be used if set to a non-zero value. + // Otherwise, the + // [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent] + // field in the top-level autoscaling configuration will be used. + // + // Setting both + // [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling] + // and + // [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling] + // to true for the same replica is not supported. + bool disable_total_cpu_autoscaling = 6 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Required. Selects the replicas to which this AsymmetricAutoscalingOption + // applies. Only read-only replicas are supported. + ReplicaSelection replica_selection = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Overrides applied to the top-level autoscaling configuration + // for the selected replicas. + AutoscalingConfigOverrides overrides = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Required. Autoscaling limits for an instance. + AutoscalingLimits autoscaling_limits = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The autoscaling targets for an instance. + AutoscalingTargets autoscaling_targets = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. Optional asymmetric autoscaling options. + // Replicas matching the replica selection criteria will be autoscaled + // independently from other replicas. The autoscaler will scale the replicas + // based on the utilization of replicas identified by the replica selection. + // Replica selections should not overlap with each other. + // + // Other replicas (those do not match any replica selection) will be + // autoscaled together and will have the same compute capacity allocated to + // them. + repeated AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// An isolated set of Cloud Spanner resources on which databases can be hosted. +message Instance { + option (google.api.resource) = { + type: "spanner.googleapis.com/Instance" + pattern: "projects/{project}/instances/{instance}" + plural: "instances" + singular: "instance" + }; + + // Indicates the current state of the instance. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The instance is still being created. Resources may not be + // available yet, and operations such as database creation may not + // work. + CREATING = 1; + + // The instance is fully created and ready to do work such as + // creating databases. + READY = 2; + } + + // The type of this instance. The type can be used to distinguish product + // variants, that can affect aspects like: usage restrictions, quotas and + // billing. Currently this is used to distinguish FREE_INSTANCE vs PROVISIONED + // instances. + enum InstanceType { + // Not specified. + INSTANCE_TYPE_UNSPECIFIED = 0; + + // Provisioned instances have dedicated resources, standard usage limits and + // support. + PROVISIONED = 1; + + // Free instances provide no guarantee for dedicated resources, + // [node_count, processing_units] should be 0. They come + // with stricter usage limits and limited support. + FREE_INSTANCE = 2; + } + + // The edition selected for this instance. Different editions provide + // different capabilities at different price points. + enum Edition { + // Edition not specified. + EDITION_UNSPECIFIED = 0; + + // Standard edition. + STANDARD = 1; + + // Enterprise edition. + ENTERPRISE = 2; + + // Enterprise Plus edition. + ENTERPRISE_PLUS = 3; + } + + // Indicates the + // [default backup + // schedule](https://cloud.google.com/spanner/docs/backup#default-backup-schedules) + // behavior for new databases within the instance. + enum DefaultBackupScheduleType { + // Not specified. + DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED = 0; + + // A default backup schedule isn't created automatically when a new database + // is created in the instance. + NONE = 1; + + // A default backup schedule is created automatically when a new database + // is created in the instance. The default backup schedule creates a full + // backup every 24 hours. These full backups are retained for 7 days. + // You can edit or delete the default backup schedule once it's created. + AUTOMATIC = 2; + } + + // Required. A unique identifier for the instance, which cannot be changed + // after the instance is created. Values are of the form + // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final + // segment of the name must be between 2 and 64 characters in length. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The name of the instance's configuration. Values are of the form + // `projects//instanceConfigs/`. See + // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + string config = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + } + ]; + + // Required. The descriptive name for this instance as it appears in UIs. + // Must be unique per project and between 4 and 30 characters in length. + string display_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // The number of nodes allocated to this instance. At most, one of either + // `node_count` or `processing_units` should be present in the message. + // + // Users can set the `node_count` field to specify the target number of nodes + // allocated to the instance. + // + // If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY` + // field and reflects the current number of nodes allocated to the instance. + // + // This might be zero in API responses for instances that are not yet in the + // `READY` state. + // + // + // For more information, see + // [Compute capacity, nodes, and processing + // units](https://cloud.google.com/spanner/docs/compute-capacity). + int32 node_count = 5; + + // The number of processing units allocated to this instance. At most, one of + // either `processing_units` or `node_count` should be present in the message. + // + // Users can set the `processing_units` field to specify the target number of + // processing units allocated to the instance. + // + // If autoscaling is enabled, `processing_units` is treated as an + // `OUTPUT_ONLY` field and reflects the current number of processing units + // allocated to the instance. + // + // This might be zero in API responses for instances that are not yet in the + // `READY` state. + // + // + // For more information, see + // [Compute capacity, nodes and processing + // units](https://cloud.google.com/spanner/docs/compute-capacity). + int32 processing_units = 9; + + // Output only. Lists the compute capacity per ReplicaSelection. A replica + // selection identifies a set of replicas with common properties. Replicas + // identified by a ReplicaSelection are scaled with the same compute capacity. + repeated ReplicaComputeCapacity replica_compute_capacity = 19 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The autoscaling configuration. Autoscaling is enabled if this + // field is set. When autoscaling is enabled, node_count and processing_units + // are treated as OUTPUT_ONLY fields and reflect the current compute capacity + // allocated to the instance. + AutoscalingConfig autoscaling_config = 17 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The current instance state. For + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], + // the state must be either omitted or set to `CREATING`. For + // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], + // the state must be either omitted or set to `READY`. + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Cloud Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. Cloud Labels can be used to filter collections of + // resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, + // firewall, load balancing, etc.). + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the following regular expression: `[a-z][a-z0-9_-]{0,62}`. + // * Label values must be between 0 and 63 characters long and must conform + // to the regular expression `[a-z0-9_-]{0,63}`. + // * No more than 64 labels can be associated with a given resource. + // + // See https://goo.gl/xmQnxf for more information on and examples of labels. + // + // If you plan to use labels in your own code, please note that additional + // characters may be allowed in the future. And so you are advised to use an + // internal label representation, such as JSON, which doesn't rely upon + // specific characters being disallowed. For example, representing labels + // as the string: name + "_" + value would prove problematic if we were to + // allow "_" in a future release. + map labels = 7; + + // The `InstanceType` of the current instance. + InstanceType instance_type = 10; + + // Deprecated. This field is not populated. + repeated string endpoint_uris = 8; + + // Output only. The time at which the instance was created. + google.protobuf.Timestamp create_time = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time at which the instance was most recently updated. + google.protobuf.Timestamp update_time = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Free instance metadata. Only populated for free instances. + FreeInstanceMetadata free_instance_metadata = 13; + + // Optional. The `Edition` of the current instance. + Edition edition = 20 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Controls the default backup schedule behavior for new databases + // within the instance. By default, a backup schedule is created automatically + // when a new database is created in a new instance. + // + // Note that the `AUTOMATIC` value isn't permitted for free instances, + // as backups and backup schedules aren't supported for free instances. + // + // In the `GetInstance` or `ListInstances` response, if the value of + // `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't + // create a default backup schedule for new databases in the instance. + DefaultBackupScheduleType default_backup_schedule_type = 23 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for +// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +message ListInstanceConfigsRequest { + // Required. The name of the project for which a list of supported instance + // configurations is requested. Values are of the form + // `projects/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Number of instance configurations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 2; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] + // from a previous + // [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. + string page_token = 3; +} + +// The response for +// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +message ListInstanceConfigsResponse { + // The list of requested instance configurations. + repeated InstanceConfig instance_configs = 1; + + // `next_page_token` can be sent in a subsequent + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] + // call to fetch more of the matching instance configurations. + string next_page_token = 2; +} + +// The request for +// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. +message GetInstanceConfigRequest { + // Required. The name of the requested instance configuration. Values are of + // the form `projects//instanceConfigs/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + } + ]; +} + +// The request for +// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. +message CreateInstanceConfigRequest { + // Required. The name of the project in which to create the instance + // configuration. Values are of the form `projects/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. The ID of the instance configuration to create. Valid identifiers + // are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + // characters in length. The `custom-` prefix is required to avoid name + // conflicts with Google-managed configurations. + string instance_config_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The `InstanceConfig` proto of the configuration to create. + // `instance_config.name` must be + // `/instanceConfigs/`. + // `instance_config.base_config` must be a Google-managed configuration name, + // e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3. + InstanceConfig instance_config = 3 [(google.api.field_behavior) = REQUIRED]; + + // An option to validate, but not actually execute, a request, + // and provide the same response. + bool validate_only = 4; +} + +// The request for +// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. +message UpdateInstanceConfigRequest { + // Required. The user instance configuration to update, which must always + // include the instance configuration name. Otherwise, only fields mentioned + // in + // [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] + // need be included. To prevent conflicts of concurrent updates, + // [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can + // be used. + InstanceConfig instance_config = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields in + // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be + // updated. The field mask must always be specified; this prevents any future + // fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] + // from being erased accidentally by clients that do not know about them. Only + // display_name and labels can be updated. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; + + // An option to validate, but not actually execute, a request, + // and provide the same response. + bool validate_only = 3; +} + +// The request for +// [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig]. +message DeleteInstanceConfigRequest { + // Required. The name of the instance configuration to be deleted. + // Values are of the form + // `projects//instanceConfigs/` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + } + ]; + + // Used for optimistic concurrency control as a way to help prevent + // simultaneous deletes of an instance configuration from overwriting each + // other. If not empty, the API + // only deletes the instance configuration when the etag provided matches the + // current status of the requested instance configuration. Otherwise, deletes + // the instance configuration without checking the current status of the + // requested instance configuration. + string etag = 2; + + // An option to validate, but not actually execute, a request, + // and provide the same response. + bool validate_only = 3; +} + +// The request for +// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]. +message ListInstanceConfigOperationsRequest { + // Required. The project of the instance configuration operations. + // Values are of the form `projects/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // An expression that filters the list of returned operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the Operation are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for + // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata] + // is + // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`. + // * `metadata.` - any field in metadata.value. + // `metadata.@type` must be specified first, if filtering on metadata + // fields. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic. However, + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `(metadata.@type=` \ + // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata) + // AND` \ + // `(metadata.instance_config.name:custom-config) AND` \ + // `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \ + // `(error:*)` - Return operations where: + // * The operation's metadata type is + // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + // * The instance configuration name contains "custom-config". + // * The operation started before 2021-03-28T14:50:00Z. + // * The operation resulted in an error. + string filter = 2; + + // Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token] + // from a previous + // [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse] + // to the same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]. +message ListInstanceConfigOperationsResponse { + // The list of matching instance configuration long-running operations. Each + // operation's name will be + // prefixed by the name of the instance configuration. The operation's + // metadata field type + // `metadata.type_url` describes the type of the metadata. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; +} + +// The request for +// [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. +message GetInstanceRequest { + // Required. The name of the requested instance. Values are of the form + // `projects//instances/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // If field_mask is present, specifies the subset of + // [Instance][google.spanner.admin.instance.v1.Instance] fields that should be + // returned. If absent, all + // [Instance][google.spanner.admin.instance.v1.Instance] fields are returned. + google.protobuf.FieldMask field_mask = 2; +} + +// The request for +// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +message CreateInstanceRequest { + // Required. The name of the project in which to create the instance. Values + // are of the form `projects/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. The ID of the instance to create. Valid identifiers are of the + // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in + // length. + string instance_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The instance to create. The name may be omitted, but if + // specified must be `/instances/`. + Instance instance = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +message ListInstancesRequest { + // Required. The name of the project for which a list of instances is + // requested. Values are of the form `projects/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Number of instances to be returned in the response. If 0 or less, defaults + // to the server's maximum allowed page size. + int32 page_size = 2; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] + // from a previous + // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + string page_token = 3; + + // An expression for filtering the results of the request. Filter rules are + // case insensitive. The fields eligible for filtering are: + // + // * `name` + // * `display_name` + // * `labels.key` where key is the name of a label + // + // Some examples of using filters are: + // + // * `name:*` --> The instance has a name. + // * `name:Howl` --> The instance's name contains the string "howl". + // * `name:HOWL` --> Equivalent to above. + // * `NAME:howl` --> Equivalent to above. + // * `labels.env:*` --> The instance has the label "env". + // * `labels.env:dev` --> The instance has the label "env" and the value of + // the label contains the string "dev". + // * `name:howl labels.env:dev` --> The instance's name contains "howl" and + // it has the label "env" with its value + // containing "dev". + string filter = 4; + + // Deadline used while retrieving metadata for instances. + // Instances whose metadata cannot be retrieved within this deadline will be + // added to + // [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable] + // in + // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + google.protobuf.Timestamp instance_deadline = 5; +} + +// The response for +// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; + + // `next_page_token` can be sent in a subsequent + // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] + // call to fetch more of the matching instances. + string next_page_token = 2; + + // The list of unreachable instances. + // It includes the names of instances whose metadata could not be retrieved + // within + // [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline]. + repeated string unreachable = 3; +} + +// The request for +// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +message UpdateInstanceRequest { + // Required. The instance to update, which must always include the instance + // name. Otherwise, only fields mentioned in + // [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] + // need be included. + Instance instance = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields in + // [Instance][google.spanner.admin.instance.v1.Instance] should be updated. + // The field mask must always be specified; this prevents any future fields in + // [Instance][google.spanner.admin.instance.v1.Instance] from being erased + // accidentally by clients that do not know about them. + google.protobuf.FieldMask field_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. +message DeleteInstanceRequest { + // Required. The name of the instance to be deleted. Values are of the form + // `projects//instances/` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; +} + +// Metadata type for the operation returned by +// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +message CreateInstanceMetadata { + // The instance being created. + Instance instance = 1; + + // The time at which the + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] + // request was received. + google.protobuf.Timestamp start_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp end_time = 4; + + // The expected fulfillment period of this create operation. + FulfillmentPeriod expected_fulfillment_period = 5; +} + +// Metadata type for the operation returned by +// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +message UpdateInstanceMetadata { + // The desired end state of the update. + Instance instance = 1; + + // The time at which + // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] + // request was received. + google.protobuf.Timestamp start_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp end_time = 4; + + // The expected fulfillment period of this update operation. + FulfillmentPeriod expected_fulfillment_period = 5; +} + +// Free instance specific metadata that is kept even after an instance has been +// upgraded for tracking purposes. +message FreeInstanceMetadata { + // Allows users to change behavior when a free instance expires. + enum ExpireBehavior { + // Not specified. + EXPIRE_BEHAVIOR_UNSPECIFIED = 0; + + // When the free instance expires, upgrade the instance to a provisioned + // instance. + FREE_TO_PROVISIONED = 1; + + // When the free instance expires, disable the instance, and delete it + // after the grace period passes if it has not been upgraded. + REMOVE_AFTER_GRACE_PERIOD = 2; + } + + // Output only. Timestamp after which the instance will either be upgraded or + // scheduled for deletion after a grace period. ExpireBehavior is used to + // choose between upgrading or scheduling the free instance for deletion. This + // timestamp is set during the creation of a free instance. + google.protobuf.Timestamp expire_time = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the timestamp at which the free instance was + // upgraded to a provisioned instance. + google.protobuf.Timestamp upgrade_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Specifies the expiration behavior of a free instance. The default of + // ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during + // or after creation, and before expiration. + ExpireBehavior expire_behavior = 3; +} + +// Metadata type for the operation returned by +// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. +message CreateInstanceConfigMetadata { + // The target instance configuration end state. + InstanceConfig instance_config = 1; + + // The progress of the + // [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig] + // operation. + OperationProgress progress = 2; + + // The time at which this operation was cancelled. + google.protobuf.Timestamp cancel_time = 3; +} + +// Metadata type for the operation returned by +// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. +message UpdateInstanceConfigMetadata { + // The desired instance configuration after updating. + InstanceConfig instance_config = 1; + + // The progress of the + // [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig] + // operation. + OperationProgress progress = 2; + + // The time at which this operation was cancelled. + google.protobuf.Timestamp cancel_time = 3; +} + +// An isolated set of Cloud Spanner resources that databases can define +// placements on. +message InstancePartition { + option (google.api.resource) = { + type: "spanner.googleapis.com/InstancePartition" + pattern: "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}" + plural: "instancePartitions" + singular: "instancePartition" + }; + + // Indicates the current state of the instance partition. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The instance partition is still being created. Resources may not be + // available yet, and operations such as creating placements using this + // instance partition may not work. + CREATING = 1; + + // The instance partition is fully created and ready to do work such as + // creating placements and using in databases. + READY = 2; + } + + // Required. A unique identifier for the instance partition. Values are of the + // form + // `projects//instances//instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`. + // The final segment of the name must be between 2 and 64 characters in + // length. An instance partition's name cannot be changed after the instance + // partition is created. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The name of the instance partition's configuration. Values are of + // the form `projects//instanceConfigs/`. See also + // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + string config = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + } + ]; + + // Required. The descriptive name for this instance partition as it appears in + // UIs. Must be unique per project and between 4 and 30 characters in length. + string display_name = 3 [(google.api.field_behavior) = REQUIRED]; + + // Compute capacity defines amount of server and storage resources that are + // available to the databases in an instance partition. At most, one of either + // `node_count` or` processing_units` should be present in the message. For + // more information, see + // [Compute capacity, nodes, and processing + // units](https://cloud.google.com/spanner/docs/compute-capacity). + oneof compute_capacity { + // The number of nodes allocated to this instance partition. + // + // Users can set the `node_count` field to specify the target number of + // nodes allocated to the instance partition. + // + // This may be zero in API responses for instance partitions that are not + // yet in state `READY`. + int32 node_count = 5; + + // The number of processing units allocated to this instance partition. + // + // Users can set the `processing_units` field to specify the target number + // of processing units allocated to the instance partition. + // + // This might be zero in API responses for instance partitions that are not + // yet in the `READY` state. + int32 processing_units = 6; + } + + // Optional. The autoscaling configuration. Autoscaling is enabled if this + // field is set. When autoscaling is enabled, fields in compute_capacity are + // treated as OUTPUT_ONLY fields and reflect the current compute capacity + // allocated to the instance partition. + AutoscalingConfig autoscaling_config = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The current instance partition state. + State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time at which the instance partition was created. + google.protobuf.Timestamp create_time = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time at which the instance partition was most recently + // updated. + google.protobuf.Timestamp update_time = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The names of the databases that reference this + // instance partition. Referencing databases should share the parent instance. + // The existence of any referencing database prevents the instance partition + // from being deleted. + repeated string referencing_databases = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Deprecated: This field is not populated. + // Output only. The names of the backups that reference this instance + // partition. Referencing backups should share the parent instance. The + // existence of any referencing backup prevents the instance partition from + // being deleted. + repeated string referencing_backups = 11 + [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY]; + + // Used for optimistic concurrency control as a way + // to help prevent simultaneous updates of a instance partition from + // overwriting each other. It is strongly suggested that systems make use of + // the etag in the read-modify-write cycle to perform instance partition + // updates in order to avoid race conditions: An etag is returned in the + // response which contains instance partitions, and systems are expected to + // put that etag in the request to update instance partitions to ensure that + // their change will be applied to the same version of the instance partition. + // If no etag is provided in the call to update instance partition, then the + // existing instance partition is overwritten blindly. + string etag = 12; +} + +// Metadata type for the operation returned by +// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +message CreateInstancePartitionMetadata { + // The instance partition being created. + InstancePartition instance_partition = 1; + + // The time at which the + // [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition] + // request was received. + google.protobuf.Timestamp start_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp end_time = 4; +} + +// The request for +// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +message CreateInstancePartitionRequest { + // Required. The name of the instance in which to create the instance + // partition. Values are of the form + // `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The ID of the instance partition to create. Valid identifiers are + // of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + // characters in length. + string instance_partition_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The instance partition to create. The instance_partition.name may + // be omitted, but if specified must be + // `/instancePartitions/`. + InstancePartition instance_partition = 3 + [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition]. +message DeleteInstancePartitionRequest { + // Required. The name of the instance partition to be deleted. + // Values are of the form + // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstancePartition" + } + ]; + + // Optional. If not empty, the API only deletes the instance partition when + // the etag provided matches the current status of the requested instance + // partition. Otherwise, deletes the instance partition without checking the + // current status of the requested instance partition. + string etag = 2; +} + +// The request for +// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition]. +message GetInstancePartitionRequest { + // Required. The name of the requested instance partition. Values are of + // the form + // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstancePartition" + } + ]; +} + +// The request for +// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +message UpdateInstancePartitionRequest { + // Required. The instance partition to update, which must always include the + // instance partition name. Otherwise, only fields mentioned in + // [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask] + // need be included. + InstancePartition instance_partition = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields in + // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + // should be updated. The field mask must always be specified; this prevents + // any future fields in + // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + // from being erased accidentally by clients that do not know about them. + google.protobuf.FieldMask field_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Metadata type for the operation returned by +// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +message UpdateInstancePartitionMetadata { + // The desired end state of the update. + InstancePartition instance_partition = 1; + + // The time at which + // [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition] + // request was received. + google.protobuf.Timestamp start_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp end_time = 4; +} + +// The request for +// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +message ListInstancePartitionsRequest { + // Required. The instance whose instance partitions should be listed. Values + // are of the form `projects//instances/`. Use `{instance} + // = '-'` to list instance partitions for all Instances in a project, e.g., + // `projects/myproject/instances/-`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Number of instance partitions to be returned in the response. If 0 or less, + // defaults to the server's maximum allowed page size. + int32 page_size = 2; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token] + // from a previous + // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + string page_token = 3; + + // Optional. Deadline used while retrieving metadata for instance partitions. + // Instance partitions whose metadata cannot be retrieved within this deadline + // will be added to + // [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable] + // in + // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + google.protobuf.Timestamp instance_partition_deadline = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +message ListInstancePartitionsResponse { + // The list of requested instancePartitions. + repeated InstancePartition instance_partitions = 1; + + // `next_page_token` can be sent in a subsequent + // [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions] + // call to fetch more of the matching instance partitions. + string next_page_token = 2; + + // The list of unreachable instances or instance partitions. + // It includes the names of instances or instance partitions whose metadata + // could not be retrieved within + // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline]. + repeated string unreachable = 3; +} + +// The request for +// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +message ListInstancePartitionOperationsRequest { + // Required. The parent instance of the instance partition operations. + // Values are of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Optional. An expression that filters the list of returned operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the Operation are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for + // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata] + // is + // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`. + // * `metadata.` - any field in metadata.value. + // `metadata.@type` must be specified first, if filtering on metadata + // fields. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic. However, + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `(metadata.@type=` \ + // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + // AND` \ + // `(metadata.instance_partition.name:custom-instance-partition) AND` \ + // `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \ + // `(error:*)` - Return operations where: + // * The operation's metadata type is + // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + // * The instance partition name contains "custom-instance-partition". + // * The operation started before 2021-03-28T14:50:00Z. + // * The operation resulted in an error. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token] + // from a previous + // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse] + // to the same `parent` and with the same `filter`. + string page_token = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Deadline used while retrieving metadata for instance partition + // operations. Instance partitions whose operation metadata cannot be + // retrieved within this deadline will be added to + // [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions] + // in + // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]. + google.protobuf.Timestamp instance_partition_deadline = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +message ListInstancePartitionOperationsResponse { + // The list of matching instance partition long-running operations. Each + // operation's name will be + // prefixed by the instance partition's name. The operation's + // metadata field type + // `metadata.type_url` describes the type of the metadata. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; + + // The list of unreachable instance partitions. + // It includes the names of instance partitions whose operation metadata could + // not be retrieved within + // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline]. + repeated string unreachable_instance_partitions = 3; +} + +// The request for +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +message MoveInstanceRequest { + // Required. The instance to move. + // Values are of the form `projects//instances/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The target instance configuration where to move the instance. + // Values are of the form `projects//instanceConfigs/`. + string target_config = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/InstanceConfig" + } + ]; +} + +// The response for +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +message MoveInstanceResponse {} + +// Metadata type for the operation returned by +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +message MoveInstanceMetadata { + // The target instance configuration where to move the instance. + // Values are of the form `projects//instanceConfigs/`. + string target_config = 1; + + // The progress of the + // [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance] + // operation. + // [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent] + // is reset when cancellation is requested. + OperationProgress progress = 2; + + // The time at which this operation was cancelled. + google.protobuf.Timestamp cancel_time = 3; +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml b/java-spanner/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..50ed2b0eec57 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml @@ -0,0 +1,157 @@ + + + + + 7012 + com/google/spanner/executor/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/spanner/executor/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/spanner/executor/v1/*OrBuilder + boolean has*(*) + + + 8001 + com/google/spanner/executor/v1/SpannerExecutorProxyGrpc + + + 8001 + com/google/spanner/executor/v1/SpannerExecutorProxyGrpc$SpannerExecutorProxyBlockingStub + + + 8001 + com/google/spanner/executor/v1/SpannerExecutorProxyGrpc$SpannerExecutorProxyFutureStub + + + 8001 + com/google/spanner/executor/v1/SpannerExecutorProxyGrpc$SpannerExecutorProxyImplBase + + + 8001 + com/google/spanner/executor/v1/SpannerExecutorProxyGrpc$SpannerExecutorProxyStub + + + + + + 5001 + com/google/spanner/executor/v1/* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/executor/v1/*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/executor/v1/*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/executor/v1/*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/executor/v1/*$*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/executor/v1/*$*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/executor/v1/*Proto + com/google/protobuf/GeneratedFile + + + + 7005 + com/google/spanner/executor/v1/** + * newBuilderForType(*) + ** + + + + 7006 + com/google/spanner/executor/v1/** + * internalGetFieldAccessorTable() + ** + + + + 7014 + com/google/spanner/executor/v1/** + * getDescriptor() + + + 7006 + com/google/spanner/executor/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/executor/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * clear() + ** + + + 7006 + com/google/spanner/executor/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * clone() + ** + + + 7006 + com/google/spanner/executor/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/executor/v1/** + * setUnknownFields(*) + ** + + diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/pom.xml b/java-spanner/proto-google-cloud-spanner-executor-v1/pom.xml new file mode 100644 index 000000000000..674f86aca8cf --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/pom.xml @@ -0,0 +1,78 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + 6.112.1-SNAPSHOT + proto-google-cloud-spanner-executor-v1 + Proto library for google-cloud-spanner + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api.grpc + proto-google-iam-v1 + + + com.google.api + api-common + + + com.google.guava + guava + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-admin-instance-v1 + + + com.google.api.grpc + proto-google-cloud-spanner-admin-database-v1 + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + com.google.api.grpc:proto-google-iam-v1,com.google.api:api-common,com.google.guava:guava + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + org.codehaus.mojo + clirr-maven-plugin + + + com/google/spanner/executor/v1/** + + + + + diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageAction.java new file mode 100644 index 000000000000..f00a4bdb6790 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageAction.java @@ -0,0 +1,1532 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * A single Adapt message request.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdaptMessageAction} + */ +@com.google.protobuf.Generated +public final class AdaptMessageAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.AdaptMessageAction) + AdaptMessageActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AdaptMessageAction"); + } + + // Use AdaptMessageAction.newBuilder() to construct. + private AdaptMessageAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AdaptMessageAction() { + databaseUri_ = ""; + protocol_ = ""; + payload_ = com.google.protobuf.ByteString.EMPTY; + query_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetAttachments(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdaptMessageAction.class, + com.google.spanner.executor.v1.AdaptMessageAction.Builder.class); + } + + public static final int DATABASE_URI_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseUri_ = ""; + + /** + * + * + *
    +   * The fully qualified uri of the database to send AdaptMessage to.
    +   * 
    + * + * string database_uri = 1; + * + * @return The databaseUri. + */ + @java.lang.Override + public java.lang.String getDatabaseUri() { + java.lang.Object ref = databaseUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseUri_ = s; + return s; + } + } + + /** + * + * + *
    +   * The fully qualified uri of the database to send AdaptMessage to.
    +   * 
    + * + * string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseUriBytes() { + java.lang.Object ref = databaseUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROTOCOL_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object protocol_ = ""; + + /** + * + * + *
    +   * The protocol to use for the request.
    +   * 
    + * + * string protocol = 2; + * + * @return The protocol. + */ + @java.lang.Override + public java.lang.String getProtocol() { + java.lang.Object ref = protocol_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + protocol_ = s; + return s; + } + } + + /** + * + * + *
    +   * The protocol to use for the request.
    +   * 
    + * + * string protocol = 2; + * + * @return The bytes for protocol. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtocolBytes() { + java.lang.Object ref = protocol_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + protocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAYLOAD_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The payload of the request.
    +   * 
    + * + * bytes payload = 3; + * + * @return The payload. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPayload() { + return payload_; + } + + public static final int ATTACHMENTS_FIELD_NUMBER = 4; + + private static final class AttachmentsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField attachments_; + + private com.google.protobuf.MapField + internalGetAttachments() { + if (attachments_ == null) { + return com.google.protobuf.MapField.emptyMapField(AttachmentsDefaultEntryHolder.defaultEntry); + } + return attachments_; + } + + public int getAttachmentsCount() { + return internalGetAttachments().getMap().size(); + } + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public boolean containsAttachments(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetAttachments().getMap().containsKey(key); + } + + /** Use {@link #getAttachmentsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getAttachments() { + return getAttachmentsMap(); + } + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public java.util.Map getAttachmentsMap() { + return internalGetAttachments().getMap(); + } + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public /* nullable */ java.lang.String getAttachmentsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetAttachments().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public java.lang.String getAttachmentsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetAttachments().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int QUERY_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object query_ = ""; + + /** + * + * + *
    +   * The query to be sent with the request.
    +   * 
    + * + * string query = 5; + * + * @return The query. + */ + @java.lang.Override + public java.lang.String getQuery() { + java.lang.Object ref = query_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + query_ = s; + return s; + } + } + + /** + * + * + *
    +   * The query to be sent with the request.
    +   * 
    + * + * string query = 5; + * + * @return The bytes for query. + */ + @java.lang.Override + public com.google.protobuf.ByteString getQueryBytes() { + java.lang.Object ref = query_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + query_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PREPARE_THEN_EXECUTE_FIELD_NUMBER = 6; + private boolean prepareThenExecute_ = false; + + /** + * + * + *
    +   * If true, the action will send a Prepare request first and then an
    +   * Execute request right after to execute the query. This is only supported
    +   * for Cloud Client path.
    +   * 
    + * + * bool prepare_then_execute = 6; + * + * @return The prepareThenExecute. + */ + @java.lang.Override + public boolean getPrepareThenExecute() { + return prepareThenExecute_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseUri_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, databaseUri_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(protocol_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, protocol_); + } + if (!payload_.isEmpty()) { + output.writeBytes(3, payload_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetAttachments(), AttachmentsDefaultEntryHolder.defaultEntry, 4); + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(query_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, query_); + } + if (prepareThenExecute_ != false) { + output.writeBool(6, prepareThenExecute_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseUri_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, databaseUri_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(protocol_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, protocol_); + } + if (!payload_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(3, payload_); + } + for (java.util.Map.Entry entry : + internalGetAttachments().getMap().entrySet()) { + com.google.protobuf.MapEntry attachments__ = + AttachmentsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, attachments__); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(query_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, query_); + } + if (prepareThenExecute_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(6, prepareThenExecute_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.AdaptMessageAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.AdaptMessageAction other = + (com.google.spanner.executor.v1.AdaptMessageAction) obj; + + if (!getDatabaseUri().equals(other.getDatabaseUri())) return false; + if (!getProtocol().equals(other.getProtocol())) return false; + if (!getPayload().equals(other.getPayload())) return false; + if (!internalGetAttachments().equals(other.internalGetAttachments())) return false; + if (!getQuery().equals(other.getQuery())) return false; + if (getPrepareThenExecute() != other.getPrepareThenExecute()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_URI_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseUri().hashCode(); + hash = (37 * hash) + PROTOCOL_FIELD_NUMBER; + hash = (53 * hash) + getProtocol().hashCode(); + hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; + hash = (53 * hash) + getPayload().hashCode(); + if (!internalGetAttachments().getMap().isEmpty()) { + hash = (37 * hash) + ATTACHMENTS_FIELD_NUMBER; + hash = (53 * hash) + internalGetAttachments().hashCode(); + } + hash = (37 * hash) + QUERY_FIELD_NUMBER; + hash = (53 * hash) + getQuery().hashCode(); + hash = (37 * hash) + PREPARE_THEN_EXECUTE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getPrepareThenExecute()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.AdaptMessageAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A single Adapt message request.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdaptMessageAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.AdaptMessageAction) + com.google.spanner.executor.v1.AdaptMessageActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetAttachments(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableAttachments(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdaptMessageAction.class, + com.google.spanner.executor.v1.AdaptMessageAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.AdaptMessageAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + databaseUri_ = ""; + protocol_ = ""; + payload_ = com.google.protobuf.ByteString.EMPTY; + internalGetMutableAttachments().clear(); + query_ = ""; + prepareThenExecute_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction build() { + com.google.spanner.executor.v1.AdaptMessageAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction buildPartial() { + com.google.spanner.executor.v1.AdaptMessageAction result = + new com.google.spanner.executor.v1.AdaptMessageAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.AdaptMessageAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.databaseUri_ = databaseUri_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.protocol_ = protocol_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.payload_ = payload_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.attachments_ = internalGetAttachments(); + result.attachments_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.query_ = query_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.prepareThenExecute_ = prepareThenExecute_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.AdaptMessageAction) { + return mergeFrom((com.google.spanner.executor.v1.AdaptMessageAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.AdaptMessageAction other) { + if (other == com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance()) + return this; + if (!other.getDatabaseUri().isEmpty()) { + databaseUri_ = other.databaseUri_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProtocol().isEmpty()) { + protocol_ = other.protocol_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getPayload().isEmpty()) { + setPayload(other.getPayload()); + } + internalGetMutableAttachments().mergeFrom(other.internalGetAttachments()); + bitField0_ |= 0x00000008; + if (!other.getQuery().isEmpty()) { + query_ = other.query_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.getPrepareThenExecute() != false) { + setPrepareThenExecute(other.getPrepareThenExecute()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + databaseUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + protocol_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + payload_ = input.readBytes(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry attachments__ = + input.readMessage( + AttachmentsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableAttachments() + .getMutableMap() + .put(attachments__.getKey(), attachments__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + query_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + prepareThenExecute_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object databaseUri_ = ""; + + /** + * + * + *
    +     * The fully qualified uri of the database to send AdaptMessage to.
    +     * 
    + * + * string database_uri = 1; + * + * @return The databaseUri. + */ + public java.lang.String getDatabaseUri() { + java.lang.Object ref = databaseUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the database to send AdaptMessage to.
    +     * 
    + * + * string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + public com.google.protobuf.ByteString getDatabaseUriBytes() { + java.lang.Object ref = databaseUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the database to send AdaptMessage to.
    +     * 
    + * + * string database_uri = 1; + * + * @param value The databaseUri to set. + * @return This builder for chaining. + */ + public Builder setDatabaseUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the database to send AdaptMessage to.
    +     * 
    + * + * string database_uri = 1; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseUri() { + databaseUri_ = getDefaultInstance().getDatabaseUri(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the database to send AdaptMessage to.
    +     * 
    + * + * string database_uri = 1; + * + * @param value The bytes for databaseUri to set. + * @return This builder for chaining. + */ + public Builder setDatabaseUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object protocol_ = ""; + + /** + * + * + *
    +     * The protocol to use for the request.
    +     * 
    + * + * string protocol = 2; + * + * @return The protocol. + */ + public java.lang.String getProtocol() { + java.lang.Object ref = protocol_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + protocol_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The protocol to use for the request.
    +     * 
    + * + * string protocol = 2; + * + * @return The bytes for protocol. + */ + public com.google.protobuf.ByteString getProtocolBytes() { + java.lang.Object ref = protocol_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + protocol_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The protocol to use for the request.
    +     * 
    + * + * string protocol = 2; + * + * @param value The protocol to set. + * @return This builder for chaining. + */ + public Builder setProtocol(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + protocol_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The protocol to use for the request.
    +     * 
    + * + * string protocol = 2; + * + * @return This builder for chaining. + */ + public Builder clearProtocol() { + protocol_ = getDefaultInstance().getProtocol(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The protocol to use for the request.
    +     * 
    + * + * string protocol = 2; + * + * @param value The bytes for protocol to set. + * @return This builder for chaining. + */ + public Builder setProtocolBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + protocol_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The payload of the request.
    +     * 
    + * + * bytes payload = 3; + * + * @return The payload. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPayload() { + return payload_; + } + + /** + * + * + *
    +     * The payload of the request.
    +     * 
    + * + * bytes payload = 3; + * + * @param value The payload to set. + * @return This builder for chaining. + */ + public Builder setPayload(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + payload_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The payload of the request.
    +     * 
    + * + * bytes payload = 3; + * + * @return This builder for chaining. + */ + public Builder clearPayload() { + bitField0_ = (bitField0_ & ~0x00000004); + payload_ = getDefaultInstance().getPayload(); + onChanged(); + return this; + } + + private com.google.protobuf.MapField attachments_; + + private com.google.protobuf.MapField + internalGetAttachments() { + if (attachments_ == null) { + return com.google.protobuf.MapField.emptyMapField( + AttachmentsDefaultEntryHolder.defaultEntry); + } + return attachments_; + } + + private com.google.protobuf.MapField + internalGetMutableAttachments() { + if (attachments_ == null) { + attachments_ = + com.google.protobuf.MapField.newMapField(AttachmentsDefaultEntryHolder.defaultEntry); + } + if (!attachments_.isMutable()) { + attachments_ = attachments_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return attachments_; + } + + public int getAttachmentsCount() { + return internalGetAttachments().getMap().size(); + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public boolean containsAttachments(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetAttachments().getMap().containsKey(key); + } + + /** Use {@link #getAttachmentsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getAttachments() { + return getAttachmentsMap(); + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public java.util.Map getAttachmentsMap() { + return internalGetAttachments().getMap(); + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public /* nullable */ java.lang.String getAttachmentsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetAttachments().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + @java.lang.Override + public java.lang.String getAttachmentsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetAttachments().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearAttachments() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableAttachments().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + public Builder removeAttachments(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableAttachments().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableAttachments() { + bitField0_ |= 0x00000008; + return internalGetMutableAttachments().getMutableMap(); + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + public Builder putAttachments(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableAttachments().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
    +     * Attachments to be sent with the request.
    +     * 
    + * + * map<string, string> attachments = 4; + */ + public Builder putAllAttachments(java.util.Map values) { + internalGetMutableAttachments().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + private java.lang.Object query_ = ""; + + /** + * + * + *
    +     * The query to be sent with the request.
    +     * 
    + * + * string query = 5; + * + * @return The query. + */ + public java.lang.String getQuery() { + java.lang.Object ref = query_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + query_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The query to be sent with the request.
    +     * 
    + * + * string query = 5; + * + * @return The bytes for query. + */ + public com.google.protobuf.ByteString getQueryBytes() { + java.lang.Object ref = query_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + query_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The query to be sent with the request.
    +     * 
    + * + * string query = 5; + * + * @param value The query to set. + * @return This builder for chaining. + */ + public Builder setQuery(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + query_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The query to be sent with the request.
    +     * 
    + * + * string query = 5; + * + * @return This builder for chaining. + */ + public Builder clearQuery() { + query_ = getDefaultInstance().getQuery(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The query to be sent with the request.
    +     * 
    + * + * string query = 5; + * + * @param value The bytes for query to set. + * @return This builder for chaining. + */ + public Builder setQueryBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + query_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private boolean prepareThenExecute_; + + /** + * + * + *
    +     * If true, the action will send a Prepare request first and then an
    +     * Execute request right after to execute the query. This is only supported
    +     * for Cloud Client path.
    +     * 
    + * + * bool prepare_then_execute = 6; + * + * @return The prepareThenExecute. + */ + @java.lang.Override + public boolean getPrepareThenExecute() { + return prepareThenExecute_; + } + + /** + * + * + *
    +     * If true, the action will send a Prepare request first and then an
    +     * Execute request right after to execute the query. This is only supported
    +     * for Cloud Client path.
    +     * 
    + * + * bool prepare_then_execute = 6; + * + * @param value The prepareThenExecute to set. + * @return This builder for chaining. + */ + public Builder setPrepareThenExecute(boolean value) { + + prepareThenExecute_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If true, the action will send a Prepare request first and then an
    +     * Execute request right after to execute the query. This is only supported
    +     * for Cloud Client path.
    +     * 
    + * + * bool prepare_then_execute = 6; + * + * @return This builder for chaining. + */ + public Builder clearPrepareThenExecute() { + bitField0_ = (bitField0_ & ~0x00000020); + prepareThenExecute_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.AdaptMessageAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.AdaptMessageAction) + private static final com.google.spanner.executor.v1.AdaptMessageAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.AdaptMessageAction(); + } + + public static com.google.spanner.executor.v1.AdaptMessageAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AdaptMessageAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageActionOrBuilder.java new file mode 100644 index 000000000000..b398ea3de822 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdaptMessageActionOrBuilder.java @@ -0,0 +1,197 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface AdaptMessageActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.AdaptMessageAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The fully qualified uri of the database to send AdaptMessage to.
    +   * 
    + * + * string database_uri = 1; + * + * @return The databaseUri. + */ + java.lang.String getDatabaseUri(); + + /** + * + * + *
    +   * The fully qualified uri of the database to send AdaptMessage to.
    +   * 
    + * + * string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + com.google.protobuf.ByteString getDatabaseUriBytes(); + + /** + * + * + *
    +   * The protocol to use for the request.
    +   * 
    + * + * string protocol = 2; + * + * @return The protocol. + */ + java.lang.String getProtocol(); + + /** + * + * + *
    +   * The protocol to use for the request.
    +   * 
    + * + * string protocol = 2; + * + * @return The bytes for protocol. + */ + com.google.protobuf.ByteString getProtocolBytes(); + + /** + * + * + *
    +   * The payload of the request.
    +   * 
    + * + * bytes payload = 3; + * + * @return The payload. + */ + com.google.protobuf.ByteString getPayload(); + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + int getAttachmentsCount(); + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + boolean containsAttachments(java.lang.String key); + + /** Use {@link #getAttachmentsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getAttachments(); + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + java.util.Map getAttachmentsMap(); + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + /* nullable */ + java.lang.String getAttachmentsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * Attachments to be sent with the request.
    +   * 
    + * + * map<string, string> attachments = 4; + */ + java.lang.String getAttachmentsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * The query to be sent with the request.
    +   * 
    + * + * string query = 5; + * + * @return The query. + */ + java.lang.String getQuery(); + + /** + * + * + *
    +   * The query to be sent with the request.
    +   * 
    + * + * string query = 5; + * + * @return The bytes for query. + */ + com.google.protobuf.ByteString getQueryBytes(); + + /** + * + * + *
    +   * If true, the action will send a Prepare request first and then an
    +   * Execute request right after to execute the query. This is only supported
    +   * for Cloud Client path.
    +   * 
    + * + * bool prepare_then_execute = 6; + * + * @return The prepareThenExecute. + */ + boolean getPrepareThenExecute(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsAction.java new file mode 100644 index 000000000000..b9c8edfdda68 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsAction.java @@ -0,0 +1,1498 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that adds a split point to a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AddSplitPointsAction} + */ +@com.google.protobuf.Generated +public final class AddSplitPointsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.AddSplitPointsAction) + AddSplitPointsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AddSplitPointsAction"); + } + + // Use AddSplitPointsAction.newBuilder() to construct. + private AddSplitPointsAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AddSplitPointsAction() { + projectId_ = ""; + instanceId_ = ""; + databaseId_ = ""; + splitPoints_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AddSplitPointsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AddSplitPointsAction.class, + com.google.spanner.executor.v1.AddSplitPointsAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SPLIT_POINTS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List splitPoints_; + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + @java.lang.Override + public java.util.List getSplitPointsList() { + return splitPoints_; + } + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + @java.lang.Override + public java.util.List + getSplitPointsOrBuilderList() { + return splitPoints_; + } + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + @java.lang.Override + public int getSplitPointsCount() { + return splitPoints_.size(); + } + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index) { + return splitPoints_.get(index); + } + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder( + int index) { + return splitPoints_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseId_); + } + for (int i = 0; i < splitPoints_.size(); i++) { + output.writeMessage(4, splitPoints_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseId_); + } + for (int i = 0; i < splitPoints_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, splitPoints_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.AddSplitPointsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.AddSplitPointsAction other = + (com.google.spanner.executor.v1.AddSplitPointsAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (!getSplitPointsList().equals(other.getSplitPointsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (getSplitPointsCount() > 0) { + hash = (37 * hash) + SPLIT_POINTS_FIELD_NUMBER; + hash = (53 * hash) + getSplitPointsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.AddSplitPointsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that adds a split point to a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AddSplitPointsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.AddSplitPointsAction) + com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AddSplitPointsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AddSplitPointsAction.class, + com.google.spanner.executor.v1.AddSplitPointsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.AddSplitPointsAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + databaseId_ = ""; + if (splitPointsBuilder_ == null) { + splitPoints_ = java.util.Collections.emptyList(); + } else { + splitPoints_ = null; + splitPointsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction build() { + com.google.spanner.executor.v1.AddSplitPointsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction buildPartial() { + com.google.spanner.executor.v1.AddSplitPointsAction result = + new com.google.spanner.executor.v1.AddSplitPointsAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.AddSplitPointsAction result) { + if (splitPointsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + splitPoints_ = java.util.Collections.unmodifiableList(splitPoints_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.splitPoints_ = splitPoints_; + } else { + result.splitPoints_ = splitPointsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.AddSplitPointsAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseId_ = databaseId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.AddSplitPointsAction) { + return mergeFrom((com.google.spanner.executor.v1.AddSplitPointsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.AddSplitPointsAction other) { + if (other == com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (splitPointsBuilder_ == null) { + if (!other.splitPoints_.isEmpty()) { + if (splitPoints_.isEmpty()) { + splitPoints_ = other.splitPoints_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureSplitPointsIsMutable(); + splitPoints_.addAll(other.splitPoints_); + } + onChanged(); + } + } else { + if (!other.splitPoints_.isEmpty()) { + if (splitPointsBuilder_.isEmpty()) { + splitPointsBuilder_.dispose(); + splitPointsBuilder_ = null; + splitPoints_ = other.splitPoints_; + bitField0_ = (bitField0_ & ~0x00000008); + splitPointsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSplitPointsFieldBuilder() + : null; + } else { + splitPointsBuilder_.addAllMessages(other.splitPoints_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.spanner.admin.database.v1.SplitPoints m = + input.readMessage( + com.google.spanner.admin.database.v1.SplitPoints.parser(), + extensionRegistry); + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(m); + } else { + splitPointsBuilder_.addMessage(m); + } + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.util.List splitPoints_ = + java.util.Collections.emptyList(); + + private void ensureSplitPointsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + splitPoints_ = + new java.util.ArrayList(splitPoints_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder> + splitPointsBuilder_; + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public java.util.List getSplitPointsList() { + if (splitPointsBuilder_ == null) { + return java.util.Collections.unmodifiableList(splitPoints_); + } else { + return splitPointsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public int getSplitPointsCount() { + if (splitPointsBuilder_ == null) { + return splitPoints_.size(); + } else { + return splitPointsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index) { + if (splitPointsBuilder_ == null) { + return splitPoints_.get(index); + } else { + return splitPointsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder setSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.set(index, value); + onChanged(); + } else { + splitPointsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder setSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.set(index, builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder addSplitPoints(com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.add(value); + onChanged(); + } else { + splitPointsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder addSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints value) { + if (splitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSplitPointsIsMutable(); + splitPoints_.add(index, value); + onChanged(); + } else { + splitPointsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder addSplitPoints( + com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder addSplitPoints( + int index, com.google.spanner.admin.database.v1.SplitPoints.Builder builderForValue) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.add(index, builderForValue.build()); + onChanged(); + } else { + splitPointsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder addAllSplitPoints( + java.lang.Iterable values) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, splitPoints_); + onChanged(); + } else { + splitPointsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder clearSplitPoints() { + if (splitPointsBuilder_ == null) { + splitPoints_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + splitPointsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public Builder removeSplitPoints(int index) { + if (splitPointsBuilder_ == null) { + ensureSplitPointsIsMutable(); + splitPoints_.remove(index); + onChanged(); + } else { + splitPointsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder getSplitPointsBuilder( + int index) { + return internalGetSplitPointsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder( + int index) { + if (splitPointsBuilder_ == null) { + return splitPoints_.get(index); + } else { + return splitPointsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public java.util.List + getSplitPointsOrBuilderList() { + if (splitPointsBuilder_ != null) { + return splitPointsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(splitPoints_); + } + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder addSplitPointsBuilder() { + return internalGetSplitPointsFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance()); + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public com.google.spanner.admin.database.v1.SplitPoints.Builder addSplitPointsBuilder( + int index) { + return internalGetSplitPointsFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.SplitPoints.getDefaultInstance()); + } + + /** + * + * + *
    +     * The split points to add.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + public java.util.List + getSplitPointsBuilderList() { + return internalGetSplitPointsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder> + internalGetSplitPointsFieldBuilder() { + if (splitPointsBuilder_ == null) { + splitPointsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.SplitPoints, + com.google.spanner.admin.database.v1.SplitPoints.Builder, + com.google.spanner.admin.database.v1.SplitPointsOrBuilder>( + splitPoints_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + splitPoints_ = null; + } + return splitPointsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.AddSplitPointsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.AddSplitPointsAction) + private static final com.google.spanner.executor.v1.AddSplitPointsAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.AddSplitPointsAction(); + } + + public static com.google.spanner.executor.v1.AddSplitPointsAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AddSplitPointsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsActionOrBuilder.java new file mode 100644 index 000000000000..08060154f81b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AddSplitPointsActionOrBuilder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface AddSplitPointsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.AddSplitPointsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + java.util.List getSplitPointsList(); + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + com.google.spanner.admin.database.v1.SplitPoints getSplitPoints(int index); + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + int getSplitPointsCount(); + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + java.util.List + getSplitPointsOrBuilderList(); + + /** + * + * + *
    +   * The split points to add.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.SplitPoints split_points = 4; + */ + com.google.spanner.admin.database.v1.SplitPointsOrBuilder getSplitPointsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java new file mode 100644 index 000000000000..8aa0c64cbcc3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java @@ -0,0 +1,9852 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * AdminAction defines all the cloud spanner admin actions, including
    + * instance/database admin ops, backup ops and operation actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdminAction} + */ +@com.google.protobuf.Generated +public final class AdminAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.AdminAction) + AdminActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AdminAction"); + } + + // Use AdminAction.newBuilder() to construct. + private AdminAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AdminAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdminAction.class, + com.google.spanner.executor.v1.AdminAction.Builder.class); + } + + private int actionCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object action_; + + public enum ActionCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + CREATE_USER_INSTANCE_CONFIG(1), + UPDATE_USER_INSTANCE_CONFIG(2), + DELETE_USER_INSTANCE_CONFIG(3), + GET_CLOUD_INSTANCE_CONFIG(4), + LIST_INSTANCE_CONFIGS(5), + CREATE_CLOUD_INSTANCE(6), + UPDATE_CLOUD_INSTANCE(7), + DELETE_CLOUD_INSTANCE(8), + LIST_CLOUD_INSTANCES(9), + GET_CLOUD_INSTANCE(10), + CREATE_CLOUD_DATABASE(11), + UPDATE_CLOUD_DATABASE_DDL(12), + UPDATE_CLOUD_DATABASE(27), + DROP_CLOUD_DATABASE(13), + LIST_CLOUD_DATABASES(14), + LIST_CLOUD_DATABASE_OPERATIONS(15), + RESTORE_CLOUD_DATABASE(16), + GET_CLOUD_DATABASE(17), + CREATE_CLOUD_BACKUP(18), + COPY_CLOUD_BACKUP(19), + GET_CLOUD_BACKUP(20), + UPDATE_CLOUD_BACKUP(21), + DELETE_CLOUD_BACKUP(22), + LIST_CLOUD_BACKUPS(23), + LIST_CLOUD_BACKUP_OPERATIONS(24), + GET_OPERATION(25), + CANCEL_OPERATION(26), + CHANGE_QUORUM_CLOUD_DATABASE(28), + ADD_SPLIT_POINTS(29), + ACTION_NOT_SET(0); + private final int value; + + private ActionCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ActionCase valueOf(int value) { + return forNumber(value); + } + + public static ActionCase forNumber(int value) { + switch (value) { + case 1: + return CREATE_USER_INSTANCE_CONFIG; + case 2: + return UPDATE_USER_INSTANCE_CONFIG; + case 3: + return DELETE_USER_INSTANCE_CONFIG; + case 4: + return GET_CLOUD_INSTANCE_CONFIG; + case 5: + return LIST_INSTANCE_CONFIGS; + case 6: + return CREATE_CLOUD_INSTANCE; + case 7: + return UPDATE_CLOUD_INSTANCE; + case 8: + return DELETE_CLOUD_INSTANCE; + case 9: + return LIST_CLOUD_INSTANCES; + case 10: + return GET_CLOUD_INSTANCE; + case 11: + return CREATE_CLOUD_DATABASE; + case 12: + return UPDATE_CLOUD_DATABASE_DDL; + case 27: + return UPDATE_CLOUD_DATABASE; + case 13: + return DROP_CLOUD_DATABASE; + case 14: + return LIST_CLOUD_DATABASES; + case 15: + return LIST_CLOUD_DATABASE_OPERATIONS; + case 16: + return RESTORE_CLOUD_DATABASE; + case 17: + return GET_CLOUD_DATABASE; + case 18: + return CREATE_CLOUD_BACKUP; + case 19: + return COPY_CLOUD_BACKUP; + case 20: + return GET_CLOUD_BACKUP; + case 21: + return UPDATE_CLOUD_BACKUP; + case 22: + return DELETE_CLOUD_BACKUP; + case 23: + return LIST_CLOUD_BACKUPS; + case 24: + return LIST_CLOUD_BACKUP_OPERATIONS; + case 25: + return GET_OPERATION; + case 26: + return CANCEL_OPERATION; + case 28: + return CHANGE_QUORUM_CLOUD_DATABASE; + case 29: + return ADD_SPLIT_POINTS; + case 0: + return ACTION_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ActionCase getActionCase() { + return ActionCase.forNumber(actionCase_); + } + + public static final int CREATE_USER_INSTANCE_CONFIG_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return Whether the createUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasCreateUserInstanceConfig() { + return actionCase_ == 1; + } + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return The createUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction + getCreateUserInstanceConfig() { + if (actionCase_ == 1) { + return (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder + getCreateUserInstanceConfigOrBuilder() { + if (actionCase_ == 1) { + return (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + + public static final int UPDATE_USER_INSTANCE_CONFIG_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return Whether the updateUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasUpdateUserInstanceConfig() { + return actionCase_ == 2; + } + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return The updateUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction + getUpdateUserInstanceConfig() { + if (actionCase_ == 2) { + return (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder + getUpdateUserInstanceConfigOrBuilder() { + if (actionCase_ == 2) { + return (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + + public static final int DELETE_USER_INSTANCE_CONFIG_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return Whether the deleteUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasDeleteUserInstanceConfig() { + return actionCase_ == 3; + } + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return The deleteUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction + getDeleteUserInstanceConfig() { + if (actionCase_ == 3) { + return (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder + getDeleteUserInstanceConfigOrBuilder() { + if (actionCase_ == 3) { + return (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + + public static final int GET_CLOUD_INSTANCE_CONFIG_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return Whether the getCloudInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasGetCloudInstanceConfig() { + return actionCase_ == 4; + } + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return The getCloudInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction getGetCloudInstanceConfig() { + if (actionCase_ == 4) { + return (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder + getGetCloudInstanceConfigOrBuilder() { + if (actionCase_ == 4) { + return (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + + public static final int LIST_INSTANCE_CONFIGS_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return Whether the listInstanceConfigs field is set. + */ + @java.lang.Override + public boolean hasListInstanceConfigs() { + return actionCase_ == 5; + } + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return The listInstanceConfigs. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction getListInstanceConfigs() { + if (actionCase_ == 5) { + return (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder + getListInstanceConfigsOrBuilder() { + if (actionCase_ == 5) { + return (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + + public static final int CREATE_CLOUD_INSTANCE_FIELD_NUMBER = 6; + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return Whether the createCloudInstance field is set. + */ + @java.lang.Override + public boolean hasCreateCloudInstance() { + return actionCase_ == 6; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return The createCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction getCreateCloudInstance() { + if (actionCase_ == 6) { + return (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder + getCreateCloudInstanceOrBuilder() { + if (actionCase_ == 6) { + return (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + + public static final int UPDATE_CLOUD_INSTANCE_FIELD_NUMBER = 7; + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return Whether the updateCloudInstance field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudInstance() { + return actionCase_ == 7; + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return The updateCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction getUpdateCloudInstance() { + if (actionCase_ == 7) { + return (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder + getUpdateCloudInstanceOrBuilder() { + if (actionCase_ == 7) { + return (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + + public static final int DELETE_CLOUD_INSTANCE_FIELD_NUMBER = 8; + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return Whether the deleteCloudInstance field is set. + */ + @java.lang.Override + public boolean hasDeleteCloudInstance() { + return actionCase_ == 8; + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return The deleteCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction getDeleteCloudInstance() { + if (actionCase_ == 8) { + return (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder + getDeleteCloudInstanceOrBuilder() { + if (actionCase_ == 8) { + return (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + + public static final int LIST_CLOUD_INSTANCES_FIELD_NUMBER = 9; + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return Whether the listCloudInstances field is set. + */ + @java.lang.Override + public boolean hasListCloudInstances() { + return actionCase_ == 9; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return The listCloudInstances. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction getListCloudInstances() { + if (actionCase_ == 9) { + return (com.google.spanner.executor.v1.ListCloudInstancesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder + getListCloudInstancesOrBuilder() { + if (actionCase_ == 9) { + return (com.google.spanner.executor.v1.ListCloudInstancesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + + public static final int GET_CLOUD_INSTANCE_FIELD_NUMBER = 10; + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return Whether the getCloudInstance field is set. + */ + @java.lang.Override + public boolean hasGetCloudInstance() { + return actionCase_ == 10; + } + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return The getCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction getGetCloudInstance() { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.GetCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder + getGetCloudInstanceOrBuilder() { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.GetCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + + public static final int CREATE_CLOUD_DATABASE_FIELD_NUMBER = 11; + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * @return Whether the createCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasCreateCloudDatabase() { + return actionCase_ == 11; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * @return The createCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction getCreateCloudDatabase() { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder + getCreateCloudDatabaseOrBuilder() { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + + public static final int UPDATE_CLOUD_DATABASE_DDL_FIELD_NUMBER = 12; + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return Whether the updateCloudDatabaseDdl field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudDatabaseDdl() { + return actionCase_ == 12; + } + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return The updateCloudDatabaseDdl. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getUpdateCloudDatabaseDdl() { + if (actionCase_ == 12) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder + getUpdateCloudDatabaseDdlOrBuilder() { + if (actionCase_ == 12) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + + public static final int UPDATE_CLOUD_DATABASE_FIELD_NUMBER = 27; + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * @return Whether the updateCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudDatabase() { + return actionCase_ == 27; + } + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * @return The updateCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction getUpdateCloudDatabase() { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder + getUpdateCloudDatabaseOrBuilder() { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + + public static final int DROP_CLOUD_DATABASE_FIELD_NUMBER = 13; + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return Whether the dropCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasDropCloudDatabase() { + return actionCase_ == 13; + } + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return The dropCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction getDropCloudDatabase() { + if (actionCase_ == 13) { + return (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder + getDropCloudDatabaseOrBuilder() { + if (actionCase_ == 13) { + return (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + + public static final int LIST_CLOUD_DATABASES_FIELD_NUMBER = 14; + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return Whether the listCloudDatabases field is set. + */ + @java.lang.Override + public boolean hasListCloudDatabases() { + return actionCase_ == 14; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return The listCloudDatabases. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction getListCloudDatabases() { + if (actionCase_ == 14) { + return (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder + getListCloudDatabasesOrBuilder() { + if (actionCase_ == 14) { + return (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + + public static final int LIST_CLOUD_DATABASE_OPERATIONS_FIELD_NUMBER = 15; + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return Whether the listCloudDatabaseOperations field is set. + */ + @java.lang.Override + public boolean hasListCloudDatabaseOperations() { + return actionCase_ == 15; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return The listCloudDatabaseOperations. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + getListCloudDatabaseOperations() { + if (actionCase_ == 15) { + return (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder + getListCloudDatabaseOperationsOrBuilder() { + if (actionCase_ == 15) { + return (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.getDefaultInstance(); + } + + public static final int RESTORE_CLOUD_DATABASE_FIELD_NUMBER = 16; + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return Whether the restoreCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasRestoreCloudDatabase() { + return actionCase_ == 16; + } + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return The restoreCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction getRestoreCloudDatabase() { + if (actionCase_ == 16) { + return (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder + getRestoreCloudDatabaseOrBuilder() { + if (actionCase_ == 16) { + return (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + + public static final int GET_CLOUD_DATABASE_FIELD_NUMBER = 17; + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return Whether the getCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasGetCloudDatabase() { + return actionCase_ == 17; + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return The getCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction getGetCloudDatabase() { + if (actionCase_ == 17) { + return (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder + getGetCloudDatabaseOrBuilder() { + if (actionCase_ == 17) { + return (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + + public static final int CREATE_CLOUD_BACKUP_FIELD_NUMBER = 18; + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return Whether the createCloudBackup field is set. + */ + @java.lang.Override + public boolean hasCreateCloudBackup() { + return actionCase_ == 18; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return The createCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction getCreateCloudBackup() { + if (actionCase_ == 18) { + return (com.google.spanner.executor.v1.CreateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder + getCreateCloudBackupOrBuilder() { + if (actionCase_ == 18) { + return (com.google.spanner.executor.v1.CreateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + + public static final int COPY_CLOUD_BACKUP_FIELD_NUMBER = 19; + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return Whether the copyCloudBackup field is set. + */ + @java.lang.Override + public boolean hasCopyCloudBackup() { + return actionCase_ == 19; + } + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return The copyCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction getCopyCloudBackup() { + if (actionCase_ == 19) { + return (com.google.spanner.executor.v1.CopyCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder + getCopyCloudBackupOrBuilder() { + if (actionCase_ == 19) { + return (com.google.spanner.executor.v1.CopyCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + + public static final int GET_CLOUD_BACKUP_FIELD_NUMBER = 20; + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return Whether the getCloudBackup field is set. + */ + @java.lang.Override + public boolean hasGetCloudBackup() { + return actionCase_ == 20; + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return The getCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction getGetCloudBackup() { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.GetCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder getGetCloudBackupOrBuilder() { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.GetCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + + public static final int UPDATE_CLOUD_BACKUP_FIELD_NUMBER = 21; + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return Whether the updateCloudBackup field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudBackup() { + return actionCase_ == 21; + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return The updateCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction getUpdateCloudBackup() { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder + getUpdateCloudBackupOrBuilder() { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + + public static final int DELETE_CLOUD_BACKUP_FIELD_NUMBER = 22; + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return Whether the deleteCloudBackup field is set. + */ + @java.lang.Override + public boolean hasDeleteCloudBackup() { + return actionCase_ == 22; + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return The deleteCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction getDeleteCloudBackup() { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder + getDeleteCloudBackupOrBuilder() { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + + public static final int LIST_CLOUD_BACKUPS_FIELD_NUMBER = 23; + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return Whether the listCloudBackups field is set. + */ + @java.lang.Override + public boolean hasListCloudBackups() { + return actionCase_ == 23; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return The listCloudBackups. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction getListCloudBackups() { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.ListCloudBackupsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder + getListCloudBackupsOrBuilder() { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.ListCloudBackupsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + + public static final int LIST_CLOUD_BACKUP_OPERATIONS_FIELD_NUMBER = 24; + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return Whether the listCloudBackupOperations field is set. + */ + @java.lang.Override + public boolean hasListCloudBackupOperations() { + return actionCase_ == 24; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return The listCloudBackupOperations. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction + getListCloudBackupOperations() { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder + getListCloudBackupOperationsOrBuilder() { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + + public static final int GET_OPERATION_FIELD_NUMBER = 25; + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return Whether the getOperation field is set. + */ + @java.lang.Override + public boolean hasGetOperation() { + return actionCase_ == 25; + } + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return The getOperation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction getGetOperation() { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.GetOperationAction) action_; + } + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationActionOrBuilder getGetOperationOrBuilder() { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.GetOperationAction) action_; + } + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + + public static final int CANCEL_OPERATION_FIELD_NUMBER = 26; + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return Whether the cancelOperation field is set. + */ + @java.lang.Override + public boolean hasCancelOperation() { + return actionCase_ == 26; + } + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return The cancelOperation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction getCancelOperation() { + if (actionCase_ == 26) { + return (com.google.spanner.executor.v1.CancelOperationAction) action_; + } + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationActionOrBuilder + getCancelOperationOrBuilder() { + if (actionCase_ == 26) { + return (com.google.spanner.executor.v1.CancelOperationAction) action_; + } + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + + public static final int CHANGE_QUORUM_CLOUD_DATABASE_FIELD_NUMBER = 28; + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return Whether the changeQuorumCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasChangeQuorumCloudDatabase() { + return actionCase_ == 28; + } + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return The changeQuorumCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + getChangeQuorumCloudDatabase() { + if (actionCase_ == 28) { + return (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder + getChangeQuorumCloudDatabaseOrBuilder() { + if (actionCase_ == 28) { + return (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + + public static final int ADD_SPLIT_POINTS_FIELD_NUMBER = 29; + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return Whether the addSplitPoints field is set. + */ + @java.lang.Override + public boolean hasAddSplitPoints() { + return actionCase_ == 29; + } + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return The addSplitPoints. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction getAddSplitPoints() { + if (actionCase_ == 29) { + return (com.google.spanner.executor.v1.AddSplitPointsAction) action_; + } + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder getAddSplitPointsOrBuilder() { + if (actionCase_ == 29) { + return (com.google.spanner.executor.v1.AddSplitPointsAction) action_; + } + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (actionCase_ == 1) { + output.writeMessage( + 1, (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_); + } + if (actionCase_ == 2) { + output.writeMessage( + 2, (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_); + } + if (actionCase_ == 3) { + output.writeMessage( + 3, (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_); + } + if (actionCase_ == 4) { + output.writeMessage(4, (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_); + } + if (actionCase_ == 5) { + output.writeMessage( + 5, (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_); + } + if (actionCase_ == 6) { + output.writeMessage(6, (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_); + } + if (actionCase_ == 7) { + output.writeMessage(7, (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_); + } + if (actionCase_ == 8) { + output.writeMessage(8, (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_); + } + if (actionCase_ == 9) { + output.writeMessage(9, (com.google.spanner.executor.v1.ListCloudInstancesAction) action_); + } + if (actionCase_ == 10) { + output.writeMessage(10, (com.google.spanner.executor.v1.GetCloudInstanceAction) action_); + } + if (actionCase_ == 11) { + output.writeMessage(11, (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_); + } + if (actionCase_ == 12) { + output.writeMessage( + 12, (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_); + } + if (actionCase_ == 13) { + output.writeMessage(13, (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_); + } + if (actionCase_ == 14) { + output.writeMessage(14, (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_); + } + if (actionCase_ == 15) { + output.writeMessage( + 15, (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_); + } + if (actionCase_ == 16) { + output.writeMessage(16, (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_); + } + if (actionCase_ == 17) { + output.writeMessage(17, (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_); + } + if (actionCase_ == 18) { + output.writeMessage(18, (com.google.spanner.executor.v1.CreateCloudBackupAction) action_); + } + if (actionCase_ == 19) { + output.writeMessage(19, (com.google.spanner.executor.v1.CopyCloudBackupAction) action_); + } + if (actionCase_ == 20) { + output.writeMessage(20, (com.google.spanner.executor.v1.GetCloudBackupAction) action_); + } + if (actionCase_ == 21) { + output.writeMessage(21, (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_); + } + if (actionCase_ == 22) { + output.writeMessage(22, (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_); + } + if (actionCase_ == 23) { + output.writeMessage(23, (com.google.spanner.executor.v1.ListCloudBackupsAction) action_); + } + if (actionCase_ == 24) { + output.writeMessage( + 24, (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_); + } + if (actionCase_ == 25) { + output.writeMessage(25, (com.google.spanner.executor.v1.GetOperationAction) action_); + } + if (actionCase_ == 26) { + output.writeMessage(26, (com.google.spanner.executor.v1.CancelOperationAction) action_); + } + if (actionCase_ == 27) { + output.writeMessage(27, (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_); + } + if (actionCase_ == 28) { + output.writeMessage( + 28, (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_); + } + if (actionCase_ == 29) { + output.writeMessage(29, (com.google.spanner.executor.v1.AddSplitPointsAction) action_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (actionCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_); + } + if (actionCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_); + } + if (actionCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_); + } + if (actionCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_); + } + if (actionCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_); + } + if (actionCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_); + } + if (actionCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_); + } + if (actionCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_); + } + if (actionCase_ == 9) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 9, (com.google.spanner.executor.v1.ListCloudInstancesAction) action_); + } + if (actionCase_ == 10) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, (com.google.spanner.executor.v1.GetCloudInstanceAction) action_); + } + if (actionCase_ == 11) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 11, (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_); + } + if (actionCase_ == 12) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 12, (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_); + } + if (actionCase_ == 13) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 13, (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_); + } + if (actionCase_ == 14) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 14, (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_); + } + if (actionCase_ == 15) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 15, (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_); + } + if (actionCase_ == 16) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 16, (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_); + } + if (actionCase_ == 17) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 17, (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_); + } + if (actionCase_ == 18) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 18, (com.google.spanner.executor.v1.CreateCloudBackupAction) action_); + } + if (actionCase_ == 19) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 19, (com.google.spanner.executor.v1.CopyCloudBackupAction) action_); + } + if (actionCase_ == 20) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 20, (com.google.spanner.executor.v1.GetCloudBackupAction) action_); + } + if (actionCase_ == 21) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 21, (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_); + } + if (actionCase_ == 22) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 22, (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_); + } + if (actionCase_ == 23) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 23, (com.google.spanner.executor.v1.ListCloudBackupsAction) action_); + } + if (actionCase_ == 24) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 24, (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_); + } + if (actionCase_ == 25) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 25, (com.google.spanner.executor.v1.GetOperationAction) action_); + } + if (actionCase_ == 26) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 26, (com.google.spanner.executor.v1.CancelOperationAction) action_); + } + if (actionCase_ == 27) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 27, (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_); + } + if (actionCase_ == 28) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 28, (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_); + } + if (actionCase_ == 29) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 29, (com.google.spanner.executor.v1.AddSplitPointsAction) action_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.AdminAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.AdminAction other = + (com.google.spanner.executor.v1.AdminAction) obj; + + if (!getActionCase().equals(other.getActionCase())) return false; + switch (actionCase_) { + case 1: + if (!getCreateUserInstanceConfig().equals(other.getCreateUserInstanceConfig())) + return false; + break; + case 2: + if (!getUpdateUserInstanceConfig().equals(other.getUpdateUserInstanceConfig())) + return false; + break; + case 3: + if (!getDeleteUserInstanceConfig().equals(other.getDeleteUserInstanceConfig())) + return false; + break; + case 4: + if (!getGetCloudInstanceConfig().equals(other.getGetCloudInstanceConfig())) return false; + break; + case 5: + if (!getListInstanceConfigs().equals(other.getListInstanceConfigs())) return false; + break; + case 6: + if (!getCreateCloudInstance().equals(other.getCreateCloudInstance())) return false; + break; + case 7: + if (!getUpdateCloudInstance().equals(other.getUpdateCloudInstance())) return false; + break; + case 8: + if (!getDeleteCloudInstance().equals(other.getDeleteCloudInstance())) return false; + break; + case 9: + if (!getListCloudInstances().equals(other.getListCloudInstances())) return false; + break; + case 10: + if (!getGetCloudInstance().equals(other.getGetCloudInstance())) return false; + break; + case 11: + if (!getCreateCloudDatabase().equals(other.getCreateCloudDatabase())) return false; + break; + case 12: + if (!getUpdateCloudDatabaseDdl().equals(other.getUpdateCloudDatabaseDdl())) return false; + break; + case 27: + if (!getUpdateCloudDatabase().equals(other.getUpdateCloudDatabase())) return false; + break; + case 13: + if (!getDropCloudDatabase().equals(other.getDropCloudDatabase())) return false; + break; + case 14: + if (!getListCloudDatabases().equals(other.getListCloudDatabases())) return false; + break; + case 15: + if (!getListCloudDatabaseOperations().equals(other.getListCloudDatabaseOperations())) + return false; + break; + case 16: + if (!getRestoreCloudDatabase().equals(other.getRestoreCloudDatabase())) return false; + break; + case 17: + if (!getGetCloudDatabase().equals(other.getGetCloudDatabase())) return false; + break; + case 18: + if (!getCreateCloudBackup().equals(other.getCreateCloudBackup())) return false; + break; + case 19: + if (!getCopyCloudBackup().equals(other.getCopyCloudBackup())) return false; + break; + case 20: + if (!getGetCloudBackup().equals(other.getGetCloudBackup())) return false; + break; + case 21: + if (!getUpdateCloudBackup().equals(other.getUpdateCloudBackup())) return false; + break; + case 22: + if (!getDeleteCloudBackup().equals(other.getDeleteCloudBackup())) return false; + break; + case 23: + if (!getListCloudBackups().equals(other.getListCloudBackups())) return false; + break; + case 24: + if (!getListCloudBackupOperations().equals(other.getListCloudBackupOperations())) + return false; + break; + case 25: + if (!getGetOperation().equals(other.getGetOperation())) return false; + break; + case 26: + if (!getCancelOperation().equals(other.getCancelOperation())) return false; + break; + case 28: + if (!getChangeQuorumCloudDatabase().equals(other.getChangeQuorumCloudDatabase())) + return false; + break; + case 29: + if (!getAddSplitPoints().equals(other.getAddSplitPoints())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (actionCase_) { + case 1: + hash = (37 * hash) + CREATE_USER_INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCreateUserInstanceConfig().hashCode(); + break; + case 2: + hash = (37 * hash) + UPDATE_USER_INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getUpdateUserInstanceConfig().hashCode(); + break; + case 3: + hash = (37 * hash) + DELETE_USER_INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getDeleteUserInstanceConfig().hashCode(); + break; + case 4: + hash = (37 * hash) + GET_CLOUD_INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getGetCloudInstanceConfig().hashCode(); + break; + case 5: + hash = (37 * hash) + LIST_INSTANCE_CONFIGS_FIELD_NUMBER; + hash = (53 * hash) + getListInstanceConfigs().hashCode(); + break; + case 6: + hash = (37 * hash) + CREATE_CLOUD_INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getCreateCloudInstance().hashCode(); + break; + case 7: + hash = (37 * hash) + UPDATE_CLOUD_INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getUpdateCloudInstance().hashCode(); + break; + case 8: + hash = (37 * hash) + DELETE_CLOUD_INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getDeleteCloudInstance().hashCode(); + break; + case 9: + hash = (37 * hash) + LIST_CLOUD_INSTANCES_FIELD_NUMBER; + hash = (53 * hash) + getListCloudInstances().hashCode(); + break; + case 10: + hash = (37 * hash) + GET_CLOUD_INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getGetCloudInstance().hashCode(); + break; + case 11: + hash = (37 * hash) + CREATE_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getCreateCloudDatabase().hashCode(); + break; + case 12: + hash = (37 * hash) + UPDATE_CLOUD_DATABASE_DDL_FIELD_NUMBER; + hash = (53 * hash) + getUpdateCloudDatabaseDdl().hashCode(); + break; + case 27: + hash = (37 * hash) + UPDATE_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getUpdateCloudDatabase().hashCode(); + break; + case 13: + hash = (37 * hash) + DROP_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDropCloudDatabase().hashCode(); + break; + case 14: + hash = (37 * hash) + LIST_CLOUD_DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getListCloudDatabases().hashCode(); + break; + case 15: + hash = (37 * hash) + LIST_CLOUD_DATABASE_OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getListCloudDatabaseOperations().hashCode(); + break; + case 16: + hash = (37 * hash) + RESTORE_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getRestoreCloudDatabase().hashCode(); + break; + case 17: + hash = (37 * hash) + GET_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getGetCloudDatabase().hashCode(); + break; + case 18: + hash = (37 * hash) + CREATE_CLOUD_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getCreateCloudBackup().hashCode(); + break; + case 19: + hash = (37 * hash) + COPY_CLOUD_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getCopyCloudBackup().hashCode(); + break; + case 20: + hash = (37 * hash) + GET_CLOUD_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getGetCloudBackup().hashCode(); + break; + case 21: + hash = (37 * hash) + UPDATE_CLOUD_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getUpdateCloudBackup().hashCode(); + break; + case 22: + hash = (37 * hash) + DELETE_CLOUD_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getDeleteCloudBackup().hashCode(); + break; + case 23: + hash = (37 * hash) + LIST_CLOUD_BACKUPS_FIELD_NUMBER; + hash = (53 * hash) + getListCloudBackups().hashCode(); + break; + case 24: + hash = (37 * hash) + LIST_CLOUD_BACKUP_OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getListCloudBackupOperations().hashCode(); + break; + case 25: + hash = (37 * hash) + GET_OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getGetOperation().hashCode(); + break; + case 26: + hash = (37 * hash) + CANCEL_OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getCancelOperation().hashCode(); + break; + case 28: + hash = (37 * hash) + CHANGE_QUORUM_CLOUD_DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getChangeQuorumCloudDatabase().hashCode(); + break; + case 29: + hash = (37 * hash) + ADD_SPLIT_POINTS_FIELD_NUMBER; + hash = (53 * hash) + getAddSplitPoints().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.AdminAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * AdminAction defines all the cloud spanner admin actions, including
    +   * instance/database admin ops, backup ops and operation actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdminAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.AdminAction) + com.google.spanner.executor.v1.AdminActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdminAction.class, + com.google.spanner.executor.v1.AdminAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.AdminAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (createUserInstanceConfigBuilder_ != null) { + createUserInstanceConfigBuilder_.clear(); + } + if (updateUserInstanceConfigBuilder_ != null) { + updateUserInstanceConfigBuilder_.clear(); + } + if (deleteUserInstanceConfigBuilder_ != null) { + deleteUserInstanceConfigBuilder_.clear(); + } + if (getCloudInstanceConfigBuilder_ != null) { + getCloudInstanceConfigBuilder_.clear(); + } + if (listInstanceConfigsBuilder_ != null) { + listInstanceConfigsBuilder_.clear(); + } + if (createCloudInstanceBuilder_ != null) { + createCloudInstanceBuilder_.clear(); + } + if (updateCloudInstanceBuilder_ != null) { + updateCloudInstanceBuilder_.clear(); + } + if (deleteCloudInstanceBuilder_ != null) { + deleteCloudInstanceBuilder_.clear(); + } + if (listCloudInstancesBuilder_ != null) { + listCloudInstancesBuilder_.clear(); + } + if (getCloudInstanceBuilder_ != null) { + getCloudInstanceBuilder_.clear(); + } + if (createCloudDatabaseBuilder_ != null) { + createCloudDatabaseBuilder_.clear(); + } + if (updateCloudDatabaseDdlBuilder_ != null) { + updateCloudDatabaseDdlBuilder_.clear(); + } + if (updateCloudDatabaseBuilder_ != null) { + updateCloudDatabaseBuilder_.clear(); + } + if (dropCloudDatabaseBuilder_ != null) { + dropCloudDatabaseBuilder_.clear(); + } + if (listCloudDatabasesBuilder_ != null) { + listCloudDatabasesBuilder_.clear(); + } + if (listCloudDatabaseOperationsBuilder_ != null) { + listCloudDatabaseOperationsBuilder_.clear(); + } + if (restoreCloudDatabaseBuilder_ != null) { + restoreCloudDatabaseBuilder_.clear(); + } + if (getCloudDatabaseBuilder_ != null) { + getCloudDatabaseBuilder_.clear(); + } + if (createCloudBackupBuilder_ != null) { + createCloudBackupBuilder_.clear(); + } + if (copyCloudBackupBuilder_ != null) { + copyCloudBackupBuilder_.clear(); + } + if (getCloudBackupBuilder_ != null) { + getCloudBackupBuilder_.clear(); + } + if (updateCloudBackupBuilder_ != null) { + updateCloudBackupBuilder_.clear(); + } + if (deleteCloudBackupBuilder_ != null) { + deleteCloudBackupBuilder_.clear(); + } + if (listCloudBackupsBuilder_ != null) { + listCloudBackupsBuilder_.clear(); + } + if (listCloudBackupOperationsBuilder_ != null) { + listCloudBackupOperationsBuilder_.clear(); + } + if (getOperationBuilder_ != null) { + getOperationBuilder_.clear(); + } + if (cancelOperationBuilder_ != null) { + cancelOperationBuilder_.clear(); + } + if (changeQuorumCloudDatabaseBuilder_ != null) { + changeQuorumCloudDatabaseBuilder_.clear(); + } + if (addSplitPointsBuilder_ != null) { + addSplitPointsBuilder_.clear(); + } + actionCase_ = 0; + action_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction build() { + com.google.spanner.executor.v1.AdminAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction buildPartial() { + com.google.spanner.executor.v1.AdminAction result = + new com.google.spanner.executor.v1.AdminAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.AdminAction result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.executor.v1.AdminAction result) { + result.actionCase_ = actionCase_; + result.action_ = this.action_; + if (actionCase_ == 1 && createUserInstanceConfigBuilder_ != null) { + result.action_ = createUserInstanceConfigBuilder_.build(); + } + if (actionCase_ == 2 && updateUserInstanceConfigBuilder_ != null) { + result.action_ = updateUserInstanceConfigBuilder_.build(); + } + if (actionCase_ == 3 && deleteUserInstanceConfigBuilder_ != null) { + result.action_ = deleteUserInstanceConfigBuilder_.build(); + } + if (actionCase_ == 4 && getCloudInstanceConfigBuilder_ != null) { + result.action_ = getCloudInstanceConfigBuilder_.build(); + } + if (actionCase_ == 5 && listInstanceConfigsBuilder_ != null) { + result.action_ = listInstanceConfigsBuilder_.build(); + } + if (actionCase_ == 6 && createCloudInstanceBuilder_ != null) { + result.action_ = createCloudInstanceBuilder_.build(); + } + if (actionCase_ == 7 && updateCloudInstanceBuilder_ != null) { + result.action_ = updateCloudInstanceBuilder_.build(); + } + if (actionCase_ == 8 && deleteCloudInstanceBuilder_ != null) { + result.action_ = deleteCloudInstanceBuilder_.build(); + } + if (actionCase_ == 9 && listCloudInstancesBuilder_ != null) { + result.action_ = listCloudInstancesBuilder_.build(); + } + if (actionCase_ == 10 && getCloudInstanceBuilder_ != null) { + result.action_ = getCloudInstanceBuilder_.build(); + } + if (actionCase_ == 11 && createCloudDatabaseBuilder_ != null) { + result.action_ = createCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 12 && updateCloudDatabaseDdlBuilder_ != null) { + result.action_ = updateCloudDatabaseDdlBuilder_.build(); + } + if (actionCase_ == 27 && updateCloudDatabaseBuilder_ != null) { + result.action_ = updateCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 13 && dropCloudDatabaseBuilder_ != null) { + result.action_ = dropCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 14 && listCloudDatabasesBuilder_ != null) { + result.action_ = listCloudDatabasesBuilder_.build(); + } + if (actionCase_ == 15 && listCloudDatabaseOperationsBuilder_ != null) { + result.action_ = listCloudDatabaseOperationsBuilder_.build(); + } + if (actionCase_ == 16 && restoreCloudDatabaseBuilder_ != null) { + result.action_ = restoreCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 17 && getCloudDatabaseBuilder_ != null) { + result.action_ = getCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 18 && createCloudBackupBuilder_ != null) { + result.action_ = createCloudBackupBuilder_.build(); + } + if (actionCase_ == 19 && copyCloudBackupBuilder_ != null) { + result.action_ = copyCloudBackupBuilder_.build(); + } + if (actionCase_ == 20 && getCloudBackupBuilder_ != null) { + result.action_ = getCloudBackupBuilder_.build(); + } + if (actionCase_ == 21 && updateCloudBackupBuilder_ != null) { + result.action_ = updateCloudBackupBuilder_.build(); + } + if (actionCase_ == 22 && deleteCloudBackupBuilder_ != null) { + result.action_ = deleteCloudBackupBuilder_.build(); + } + if (actionCase_ == 23 && listCloudBackupsBuilder_ != null) { + result.action_ = listCloudBackupsBuilder_.build(); + } + if (actionCase_ == 24 && listCloudBackupOperationsBuilder_ != null) { + result.action_ = listCloudBackupOperationsBuilder_.build(); + } + if (actionCase_ == 25 && getOperationBuilder_ != null) { + result.action_ = getOperationBuilder_.build(); + } + if (actionCase_ == 26 && cancelOperationBuilder_ != null) { + result.action_ = cancelOperationBuilder_.build(); + } + if (actionCase_ == 28 && changeQuorumCloudDatabaseBuilder_ != null) { + result.action_ = changeQuorumCloudDatabaseBuilder_.build(); + } + if (actionCase_ == 29 && addSplitPointsBuilder_ != null) { + result.action_ = addSplitPointsBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.AdminAction) { + return mergeFrom((com.google.spanner.executor.v1.AdminAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.AdminAction other) { + if (other == com.google.spanner.executor.v1.AdminAction.getDefaultInstance()) return this; + switch (other.getActionCase()) { + case CREATE_USER_INSTANCE_CONFIG: + { + mergeCreateUserInstanceConfig(other.getCreateUserInstanceConfig()); + break; + } + case UPDATE_USER_INSTANCE_CONFIG: + { + mergeUpdateUserInstanceConfig(other.getUpdateUserInstanceConfig()); + break; + } + case DELETE_USER_INSTANCE_CONFIG: + { + mergeDeleteUserInstanceConfig(other.getDeleteUserInstanceConfig()); + break; + } + case GET_CLOUD_INSTANCE_CONFIG: + { + mergeGetCloudInstanceConfig(other.getGetCloudInstanceConfig()); + break; + } + case LIST_INSTANCE_CONFIGS: + { + mergeListInstanceConfigs(other.getListInstanceConfigs()); + break; + } + case CREATE_CLOUD_INSTANCE: + { + mergeCreateCloudInstance(other.getCreateCloudInstance()); + break; + } + case UPDATE_CLOUD_INSTANCE: + { + mergeUpdateCloudInstance(other.getUpdateCloudInstance()); + break; + } + case DELETE_CLOUD_INSTANCE: + { + mergeDeleteCloudInstance(other.getDeleteCloudInstance()); + break; + } + case LIST_CLOUD_INSTANCES: + { + mergeListCloudInstances(other.getListCloudInstances()); + break; + } + case GET_CLOUD_INSTANCE: + { + mergeGetCloudInstance(other.getGetCloudInstance()); + break; + } + case CREATE_CLOUD_DATABASE: + { + mergeCreateCloudDatabase(other.getCreateCloudDatabase()); + break; + } + case UPDATE_CLOUD_DATABASE_DDL: + { + mergeUpdateCloudDatabaseDdl(other.getUpdateCloudDatabaseDdl()); + break; + } + case UPDATE_CLOUD_DATABASE: + { + mergeUpdateCloudDatabase(other.getUpdateCloudDatabase()); + break; + } + case DROP_CLOUD_DATABASE: + { + mergeDropCloudDatabase(other.getDropCloudDatabase()); + break; + } + case LIST_CLOUD_DATABASES: + { + mergeListCloudDatabases(other.getListCloudDatabases()); + break; + } + case LIST_CLOUD_DATABASE_OPERATIONS: + { + mergeListCloudDatabaseOperations(other.getListCloudDatabaseOperations()); + break; + } + case RESTORE_CLOUD_DATABASE: + { + mergeRestoreCloudDatabase(other.getRestoreCloudDatabase()); + break; + } + case GET_CLOUD_DATABASE: + { + mergeGetCloudDatabase(other.getGetCloudDatabase()); + break; + } + case CREATE_CLOUD_BACKUP: + { + mergeCreateCloudBackup(other.getCreateCloudBackup()); + break; + } + case COPY_CLOUD_BACKUP: + { + mergeCopyCloudBackup(other.getCopyCloudBackup()); + break; + } + case GET_CLOUD_BACKUP: + { + mergeGetCloudBackup(other.getGetCloudBackup()); + break; + } + case UPDATE_CLOUD_BACKUP: + { + mergeUpdateCloudBackup(other.getUpdateCloudBackup()); + break; + } + case DELETE_CLOUD_BACKUP: + { + mergeDeleteCloudBackup(other.getDeleteCloudBackup()); + break; + } + case LIST_CLOUD_BACKUPS: + { + mergeListCloudBackups(other.getListCloudBackups()); + break; + } + case LIST_CLOUD_BACKUP_OPERATIONS: + { + mergeListCloudBackupOperations(other.getListCloudBackupOperations()); + break; + } + case GET_OPERATION: + { + mergeGetOperation(other.getGetOperation()); + break; + } + case CANCEL_OPERATION: + { + mergeCancelOperation(other.getCancelOperation()); + break; + } + case CHANGE_QUORUM_CLOUD_DATABASE: + { + mergeChangeQuorumCloudDatabase(other.getChangeQuorumCloudDatabase()); + break; + } + case ADD_SPLIT_POINTS: + { + mergeAddSplitPoints(other.getAddSplitPoints()); + break; + } + case ACTION_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCreateUserInstanceConfigFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetUpdateUserInstanceConfigFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetDeleteUserInstanceConfigFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetGetCloudInstanceConfigFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetListInstanceConfigsFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 5; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetCreateCloudInstanceFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 6; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetUpdateCloudInstanceFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetDeleteCloudInstanceFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 8; + break; + } // case 66 + case 74: + { + input.readMessage( + internalGetListCloudInstancesFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 9; + break; + } // case 74 + case 82: + { + input.readMessage( + internalGetGetCloudInstanceFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 10; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetCreateCloudDatabaseFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 11; + break; + } // case 90 + case 98: + { + input.readMessage( + internalGetUpdateCloudDatabaseDdlFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 12; + break; + } // case 98 + case 106: + { + input.readMessage( + internalGetDropCloudDatabaseFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 13; + break; + } // case 106 + case 114: + { + input.readMessage( + internalGetListCloudDatabasesFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 14; + break; + } // case 114 + case 122: + { + input.readMessage( + internalGetListCloudDatabaseOperationsFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 15; + break; + } // case 122 + case 130: + { + input.readMessage( + internalGetRestoreCloudDatabaseFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 16; + break; + } // case 130 + case 138: + { + input.readMessage( + internalGetGetCloudDatabaseFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 17; + break; + } // case 138 + case 146: + { + input.readMessage( + internalGetCreateCloudBackupFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 18; + break; + } // case 146 + case 154: + { + input.readMessage( + internalGetCopyCloudBackupFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 19; + break; + } // case 154 + case 162: + { + input.readMessage( + internalGetGetCloudBackupFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 20; + break; + } // case 162 + case 170: + { + input.readMessage( + internalGetUpdateCloudBackupFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 21; + break; + } // case 170 + case 178: + { + input.readMessage( + internalGetDeleteCloudBackupFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 22; + break; + } // case 178 + case 186: + { + input.readMessage( + internalGetListCloudBackupsFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 23; + break; + } // case 186 + case 194: + { + input.readMessage( + internalGetListCloudBackupOperationsFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 24; + break; + } // case 194 + case 202: + { + input.readMessage( + internalGetGetOperationFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 25; + break; + } // case 202 + case 210: + { + input.readMessage( + internalGetCancelOperationFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 26; + break; + } // case 210 + case 218: + { + input.readMessage( + internalGetUpdateCloudDatabaseFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 27; + break; + } // case 218 + case 226: + { + input.readMessage( + internalGetChangeQuorumCloudDatabaseFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 28; + break; + } // case 226 + case 234: + { + input.readMessage( + internalGetAddSplitPointsFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 29; + break; + } // case 234 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int actionCase_ = 0; + private java.lang.Object action_; + + public ActionCase getActionCase() { + return ActionCase.forNumber(actionCase_); + } + + public Builder clearAction() { + actionCase_ = 0; + action_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateUserInstanceConfigAction, + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder> + createUserInstanceConfigBuilder_; + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return Whether the createUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasCreateUserInstanceConfig() { + return actionCase_ == 1; + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return The createUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction + getCreateUserInstanceConfig() { + if (createUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 1) { + return (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } else { + if (actionCase_ == 1) { + return createUserInstanceConfigBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + public Builder setCreateUserInstanceConfig( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction value) { + if (createUserInstanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + createUserInstanceConfigBuilder_.setMessage(value); + } + actionCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + public Builder setCreateUserInstanceConfig( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder builderForValue) { + if (createUserInstanceConfigBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + createUserInstanceConfigBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + public Builder mergeCreateUserInstanceConfig( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction value) { + if (createUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 1 + && action_ + != com.google.spanner.executor.v1.CreateUserInstanceConfigAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.newBuilder( + (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 1) { + createUserInstanceConfigBuilder_.mergeFrom(value); + } else { + createUserInstanceConfigBuilder_.setMessage(value); + } + } + actionCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + public Builder clearCreateUserInstanceConfig() { + if (createUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 1) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 1) { + actionCase_ = 0; + action_ = null; + } + createUserInstanceConfigBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder + getCreateUserInstanceConfigBuilder() { + return internalGetCreateUserInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder + getCreateUserInstanceConfigOrBuilder() { + if ((actionCase_ == 1) && (createUserInstanceConfigBuilder_ != null)) { + return createUserInstanceConfigBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 1) { + return (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateUserInstanceConfigAction, + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder> + internalGetCreateUserInstanceConfigFieldBuilder() { + if (createUserInstanceConfigBuilder_ == null) { + if (!(actionCase_ == 1)) { + action_ = + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + createUserInstanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateUserInstanceConfigAction, + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder>( + (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 1; + onChanged(); + return createUserInstanceConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction, + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder> + updateUserInstanceConfigBuilder_; + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return Whether the updateUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasUpdateUserInstanceConfig() { + return actionCase_ == 2; + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return The updateUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction + getUpdateUserInstanceConfig() { + if (updateUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 2) { + return (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } else { + if (actionCase_ == 2) { + return updateUserInstanceConfigBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + public Builder setUpdateUserInstanceConfig( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction value) { + if (updateUserInstanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + updateUserInstanceConfigBuilder_.setMessage(value); + } + actionCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + public Builder setUpdateUserInstanceConfig( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder builderForValue) { + if (updateUserInstanceConfigBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + updateUserInstanceConfigBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + public Builder mergeUpdateUserInstanceConfig( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction value) { + if (updateUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 2 + && action_ + != com.google.spanner.executor.v1.UpdateUserInstanceConfigAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.newBuilder( + (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 2) { + updateUserInstanceConfigBuilder_.mergeFrom(value); + } else { + updateUserInstanceConfigBuilder_.setMessage(value); + } + } + actionCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + public Builder clearUpdateUserInstanceConfig() { + if (updateUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 2) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 2) { + actionCase_ = 0; + action_ = null; + } + updateUserInstanceConfigBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder + getUpdateUserInstanceConfigBuilder() { + return internalGetUpdateUserInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder + getUpdateUserInstanceConfigOrBuilder() { + if ((actionCase_ == 2) && (updateUserInstanceConfigBuilder_ != null)) { + return updateUserInstanceConfigBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 2) { + return (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction, + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder> + internalGetUpdateUserInstanceConfigFieldBuilder() { + if (updateUserInstanceConfigBuilder_ == null) { + if (!(actionCase_ == 2)) { + action_ = + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + updateUserInstanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction, + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder>( + (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 2; + onChanged(); + return updateUserInstanceConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction, + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder> + deleteUserInstanceConfigBuilder_; + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return Whether the deleteUserInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasDeleteUserInstanceConfig() { + return actionCase_ == 3; + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return The deleteUserInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction + getDeleteUserInstanceConfig() { + if (deleteUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 3) { + return (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } else { + if (actionCase_ == 3) { + return deleteUserInstanceConfigBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + public Builder setDeleteUserInstanceConfig( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction value) { + if (deleteUserInstanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + deleteUserInstanceConfigBuilder_.setMessage(value); + } + actionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + public Builder setDeleteUserInstanceConfig( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder builderForValue) { + if (deleteUserInstanceConfigBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + deleteUserInstanceConfigBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + public Builder mergeDeleteUserInstanceConfig( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction value) { + if (deleteUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 3 + && action_ + != com.google.spanner.executor.v1.DeleteUserInstanceConfigAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.newBuilder( + (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 3) { + deleteUserInstanceConfigBuilder_.mergeFrom(value); + } else { + deleteUserInstanceConfigBuilder_.setMessage(value); + } + } + actionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + public Builder clearDeleteUserInstanceConfig() { + if (deleteUserInstanceConfigBuilder_ == null) { + if (actionCase_ == 3) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 3) { + actionCase_ = 0; + action_ = null; + } + deleteUserInstanceConfigBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder + getDeleteUserInstanceConfigBuilder() { + return internalGetDeleteUserInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder + getDeleteUserInstanceConfigOrBuilder() { + if ((actionCase_ == 3) && (deleteUserInstanceConfigBuilder_ != null)) { + return deleteUserInstanceConfigBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 3) { + return (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a user instance config.
    +     * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction, + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder> + internalGetDeleteUserInstanceConfigFieldBuilder() { + if (deleteUserInstanceConfigBuilder_ == null) { + if (!(actionCase_ == 3)) { + action_ = + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + deleteUserInstanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction, + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder, + com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder>( + (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 3; + onChanged(); + return deleteUserInstanceConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceConfigAction, + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder> + getCloudInstanceConfigBuilder_; + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return Whether the getCloudInstanceConfig field is set. + */ + @java.lang.Override + public boolean hasGetCloudInstanceConfig() { + return actionCase_ == 4; + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return The getCloudInstanceConfig. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction getGetCloudInstanceConfig() { + if (getCloudInstanceConfigBuilder_ == null) { + if (actionCase_ == 4) { + return (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } else { + if (actionCase_ == 4) { + return getCloudInstanceConfigBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + public Builder setGetCloudInstanceConfig( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction value) { + if (getCloudInstanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + getCloudInstanceConfigBuilder_.setMessage(value); + } + actionCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + public Builder setGetCloudInstanceConfig( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder builderForValue) { + if (getCloudInstanceConfigBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + getCloudInstanceConfigBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + public Builder mergeGetCloudInstanceConfig( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction value) { + if (getCloudInstanceConfigBuilder_ == null) { + if (actionCase_ == 4 + && action_ + != com.google.spanner.executor.v1.GetCloudInstanceConfigAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.newBuilder( + (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 4) { + getCloudInstanceConfigBuilder_.mergeFrom(value); + } else { + getCloudInstanceConfigBuilder_.setMessage(value); + } + } + actionCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + public Builder clearGetCloudInstanceConfig() { + if (getCloudInstanceConfigBuilder_ == null) { + if (actionCase_ == 4) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 4) { + actionCase_ = 0; + action_ = null; + } + getCloudInstanceConfigBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder + getGetCloudInstanceConfigBuilder() { + return internalGetGetCloudInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder + getGetCloudInstanceConfigOrBuilder() { + if ((actionCase_ == 4) && (getCloudInstanceConfigBuilder_ != null)) { + return getCloudInstanceConfigBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 4) { + return (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a user instance config.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceConfigAction, + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder> + internalGetGetCloudInstanceConfigFieldBuilder() { + if (getCloudInstanceConfigBuilder_ == null) { + if (!(actionCase_ == 4)) { + action_ = + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + getCloudInstanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceConfigAction, + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder>( + (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 4; + onChanged(); + return getCloudInstanceConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction, + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder, + com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder> + listInstanceConfigsBuilder_; + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return Whether the listInstanceConfigs field is set. + */ + @java.lang.Override + public boolean hasListInstanceConfigs() { + return actionCase_ == 5; + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return The listInstanceConfigs. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction getListInstanceConfigs() { + if (listInstanceConfigsBuilder_ == null) { + if (actionCase_ == 5) { + return (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } else { + if (actionCase_ == 5) { + return listInstanceConfigsBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + public Builder setListInstanceConfigs( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction value) { + if (listInstanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listInstanceConfigsBuilder_.setMessage(value); + } + actionCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + public Builder setListInstanceConfigs( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder builderForValue) { + if (listInstanceConfigsBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listInstanceConfigsBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + public Builder mergeListInstanceConfigs( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction value) { + if (listInstanceConfigsBuilder_ == null) { + if (actionCase_ == 5 + && action_ + != com.google.spanner.executor.v1.ListCloudInstanceConfigsAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 5) { + listInstanceConfigsBuilder_.mergeFrom(value); + } else { + listInstanceConfigsBuilder_.setMessage(value); + } + } + actionCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + public Builder clearListInstanceConfigs() { + if (listInstanceConfigsBuilder_ == null) { + if (actionCase_ == 5) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 5) { + actionCase_ = 0; + action_ = null; + } + listInstanceConfigsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder + getListInstanceConfigsBuilder() { + return internalGetListInstanceConfigsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder + getListInstanceConfigsOrBuilder() { + if ((actionCase_ == 5) && (listInstanceConfigsBuilder_ != null)) { + return listInstanceConfigsBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 5) { + return (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists user instance configs.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction, + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder, + com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder> + internalGetListInstanceConfigsFieldBuilder() { + if (listInstanceConfigsBuilder_ == null) { + if (!(actionCase_ == 5)) { + action_ = + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + listInstanceConfigsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction, + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder, + com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 5; + onChanged(); + return listInstanceConfigsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudInstanceAction, + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder> + createCloudInstanceBuilder_; + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return Whether the createCloudInstance field is set. + */ + @java.lang.Override + public boolean hasCreateCloudInstance() { + return actionCase_ == 6; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return The createCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction getCreateCloudInstance() { + if (createCloudInstanceBuilder_ == null) { + if (actionCase_ == 6) { + return (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } else { + if (actionCase_ == 6) { + return createCloudInstanceBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + public Builder setCreateCloudInstance( + com.google.spanner.executor.v1.CreateCloudInstanceAction value) { + if (createCloudInstanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + createCloudInstanceBuilder_.setMessage(value); + } + actionCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + public Builder setCreateCloudInstance( + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder builderForValue) { + if (createCloudInstanceBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + createCloudInstanceBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + public Builder mergeCreateCloudInstance( + com.google.spanner.executor.v1.CreateCloudInstanceAction value) { + if (createCloudInstanceBuilder_ == null) { + if (actionCase_ == 6 + && action_ + != com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CreateCloudInstanceAction.newBuilder( + (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 6) { + createCloudInstanceBuilder_.mergeFrom(value); + } else { + createCloudInstanceBuilder_.setMessage(value); + } + } + actionCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + public Builder clearCreateCloudInstance() { + if (createCloudInstanceBuilder_ == null) { + if (actionCase_ == 6) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 6) { + actionCase_ = 0; + action_ = null; + } + createCloudInstanceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + public com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder + getCreateCloudInstanceBuilder() { + return internalGetCreateCloudInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder + getCreateCloudInstanceOrBuilder() { + if ((actionCase_ == 6) && (createCloudInstanceBuilder_ != null)) { + return createCloudInstanceBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 6) { + return (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudInstanceAction, + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder> + internalGetCreateCloudInstanceFieldBuilder() { + if (createCloudInstanceBuilder_ == null) { + if (!(actionCase_ == 6)) { + action_ = com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + createCloudInstanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudInstanceAction, + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder>( + (com.google.spanner.executor.v1.CreateCloudInstanceAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 6; + onChanged(); + return createCloudInstanceBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudInstanceAction, + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder> + updateCloudInstanceBuilder_; + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return Whether the updateCloudInstance field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudInstance() { + return actionCase_ == 7; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return The updateCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction getUpdateCloudInstance() { + if (updateCloudInstanceBuilder_ == null) { + if (actionCase_ == 7) { + return (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } else { + if (actionCase_ == 7) { + return updateCloudInstanceBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + public Builder setUpdateCloudInstance( + com.google.spanner.executor.v1.UpdateCloudInstanceAction value) { + if (updateCloudInstanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + updateCloudInstanceBuilder_.setMessage(value); + } + actionCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + public Builder setUpdateCloudInstance( + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder builderForValue) { + if (updateCloudInstanceBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + updateCloudInstanceBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + public Builder mergeUpdateCloudInstance( + com.google.spanner.executor.v1.UpdateCloudInstanceAction value) { + if (updateCloudInstanceBuilder_ == null) { + if (actionCase_ == 7 + && action_ + != com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.UpdateCloudInstanceAction.newBuilder( + (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 7) { + updateCloudInstanceBuilder_.mergeFrom(value); + } else { + updateCloudInstanceBuilder_.setMessage(value); + } + } + actionCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + public Builder clearUpdateCloudInstance() { + if (updateCloudInstanceBuilder_ == null) { + if (actionCase_ == 7) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 7) { + actionCase_ = 0; + action_ = null; + } + updateCloudInstanceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + public com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder + getUpdateCloudInstanceBuilder() { + return internalGetUpdateCloudInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder + getUpdateCloudInstanceOrBuilder() { + if ((actionCase_ == 7) && (updateCloudInstanceBuilder_ != null)) { + return updateCloudInstanceBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 7) { + return (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudInstanceAction, + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder> + internalGetUpdateCloudInstanceFieldBuilder() { + if (updateCloudInstanceBuilder_ == null) { + if (!(actionCase_ == 7)) { + action_ = com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + updateCloudInstanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudInstanceAction, + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder, + com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder>( + (com.google.spanner.executor.v1.UpdateCloudInstanceAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 7; + onChanged(); + return updateCloudInstanceBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudInstanceAction, + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder, + com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder> + deleteCloudInstanceBuilder_; + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return Whether the deleteCloudInstance field is set. + */ + @java.lang.Override + public boolean hasDeleteCloudInstance() { + return actionCase_ == 8; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return The deleteCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction getDeleteCloudInstance() { + if (deleteCloudInstanceBuilder_ == null) { + if (actionCase_ == 8) { + return (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } else { + if (actionCase_ == 8) { + return deleteCloudInstanceBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + public Builder setDeleteCloudInstance( + com.google.spanner.executor.v1.DeleteCloudInstanceAction value) { + if (deleteCloudInstanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + deleteCloudInstanceBuilder_.setMessage(value); + } + actionCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + public Builder setDeleteCloudInstance( + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder builderForValue) { + if (deleteCloudInstanceBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + deleteCloudInstanceBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + public Builder mergeDeleteCloudInstance( + com.google.spanner.executor.v1.DeleteCloudInstanceAction value) { + if (deleteCloudInstanceBuilder_ == null) { + if (actionCase_ == 8 + && action_ + != com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.DeleteCloudInstanceAction.newBuilder( + (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 8) { + deleteCloudInstanceBuilder_.mergeFrom(value); + } else { + deleteCloudInstanceBuilder_.setMessage(value); + } + } + actionCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + public Builder clearDeleteCloudInstance() { + if (deleteCloudInstanceBuilder_ == null) { + if (actionCase_ == 8) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 8) { + actionCase_ = 0; + action_ = null; + } + deleteCloudInstanceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + public com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder + getDeleteCloudInstanceBuilder() { + return internalGetDeleteCloudInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder + getDeleteCloudInstanceOrBuilder() { + if ((actionCase_ == 8) && (deleteCloudInstanceBuilder_ != null)) { + return deleteCloudInstanceBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 8) { + return (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudInstanceAction, + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder, + com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder> + internalGetDeleteCloudInstanceFieldBuilder() { + if (deleteCloudInstanceBuilder_ == null) { + if (!(actionCase_ == 8)) { + action_ = com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + deleteCloudInstanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudInstanceAction, + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder, + com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder>( + (com.google.spanner.executor.v1.DeleteCloudInstanceAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 8; + onChanged(); + return deleteCloudInstanceBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstancesAction, + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder, + com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder> + listCloudInstancesBuilder_; + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return Whether the listCloudInstances field is set. + */ + @java.lang.Override + public boolean hasListCloudInstances() { + return actionCase_ == 9; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return The listCloudInstances. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction getListCloudInstances() { + if (listCloudInstancesBuilder_ == null) { + if (actionCase_ == 9) { + return (com.google.spanner.executor.v1.ListCloudInstancesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } else { + if (actionCase_ == 9) { + return listCloudInstancesBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + public Builder setListCloudInstances( + com.google.spanner.executor.v1.ListCloudInstancesAction value) { + if (listCloudInstancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listCloudInstancesBuilder_.setMessage(value); + } + actionCase_ = 9; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + public Builder setListCloudInstances( + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder builderForValue) { + if (listCloudInstancesBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listCloudInstancesBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 9; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + public Builder mergeListCloudInstances( + com.google.spanner.executor.v1.ListCloudInstancesAction value) { + if (listCloudInstancesBuilder_ == null) { + if (actionCase_ == 9 + && action_ + != com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudInstancesAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudInstancesAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 9) { + listCloudInstancesBuilder_.mergeFrom(value); + } else { + listCloudInstancesBuilder_.setMessage(value); + } + } + actionCase_ = 9; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + public Builder clearListCloudInstances() { + if (listCloudInstancesBuilder_ == null) { + if (actionCase_ == 9) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 9) { + actionCase_ = 0; + action_ = null; + } + listCloudInstancesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + public com.google.spanner.executor.v1.ListCloudInstancesAction.Builder + getListCloudInstancesBuilder() { + return internalGetListCloudInstancesFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder + getListCloudInstancesOrBuilder() { + if ((actionCase_ == 9) && (listCloudInstancesBuilder_ != null)) { + return listCloudInstancesBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 9) { + return (com.google.spanner.executor.v1.ListCloudInstancesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner instances.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstancesAction, + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder, + com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder> + internalGetListCloudInstancesFieldBuilder() { + if (listCloudInstancesBuilder_ == null) { + if (!(actionCase_ == 9)) { + action_ = com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + listCloudInstancesBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudInstancesAction, + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder, + com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudInstancesAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 9; + onChanged(); + return listCloudInstancesBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceAction, + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder> + getCloudInstanceBuilder_; + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return Whether the getCloudInstance field is set. + */ + @java.lang.Override + public boolean hasGetCloudInstance() { + return actionCase_ == 10; + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return The getCloudInstance. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction getGetCloudInstance() { + if (getCloudInstanceBuilder_ == null) { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.GetCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } else { + if (actionCase_ == 10) { + return getCloudInstanceBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + public Builder setGetCloudInstance( + com.google.spanner.executor.v1.GetCloudInstanceAction value) { + if (getCloudInstanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + getCloudInstanceBuilder_.setMessage(value); + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + public Builder setGetCloudInstance( + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder builderForValue) { + if (getCloudInstanceBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + getCloudInstanceBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + public Builder mergeGetCloudInstance( + com.google.spanner.executor.v1.GetCloudInstanceAction value) { + if (getCloudInstanceBuilder_ == null) { + if (actionCase_ == 10 + && action_ + != com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GetCloudInstanceAction.newBuilder( + (com.google.spanner.executor.v1.GetCloudInstanceAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 10) { + getCloudInstanceBuilder_.mergeFrom(value); + } else { + getCloudInstanceBuilder_.setMessage(value); + } + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + public Builder clearGetCloudInstance() { + if (getCloudInstanceBuilder_ == null) { + if (actionCase_ == 10) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 10) { + actionCase_ = 0; + action_ = null; + } + getCloudInstanceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + public com.google.spanner.executor.v1.GetCloudInstanceAction.Builder + getGetCloudInstanceBuilder() { + return internalGetGetCloudInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder + getGetCloudInstanceOrBuilder() { + if ((actionCase_ == 10) && (getCloudInstanceBuilder_ != null)) { + return getCloudInstanceBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.GetCloudInstanceAction) action_; + } + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that retrieves a Cloud Spanner instance.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceAction, + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder> + internalGetGetCloudInstanceFieldBuilder() { + if (getCloudInstanceBuilder_ == null) { + if (!(actionCase_ == 10)) { + action_ = com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + getCloudInstanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudInstanceAction, + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder, + com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder>( + (com.google.spanner.executor.v1.GetCloudInstanceAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 10; + onChanged(); + return getCloudInstanceBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudDatabaseAction, + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder> + createCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * + * @return Whether the createCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasCreateCloudDatabase() { + return actionCase_ == 11; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * + * @return The createCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction getCreateCloudDatabase() { + if (createCloudDatabaseBuilder_ == null) { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 11) { + return createCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + public Builder setCreateCloudDatabase( + com.google.spanner.executor.v1.CreateCloudDatabaseAction value) { + if (createCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + createCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + public Builder setCreateCloudDatabase( + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder builderForValue) { + if (createCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + createCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + public Builder mergeCreateCloudDatabase( + com.google.spanner.executor.v1.CreateCloudDatabaseAction value) { + if (createCloudDatabaseBuilder_ == null) { + if (actionCase_ == 11 + && action_ + != com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CreateCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 11) { + createCloudDatabaseBuilder_.mergeFrom(value); + } else { + createCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + public Builder clearCreateCloudDatabase() { + if (createCloudDatabaseBuilder_ == null) { + if (actionCase_ == 11) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 11) { + actionCase_ = 0; + action_ = null; + } + createCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + public com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder + getCreateCloudDatabaseBuilder() { + return internalGetCreateCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder + getCreateCloudDatabaseOrBuilder() { + if ((actionCase_ == 11) && (createCloudDatabaseBuilder_ != null)) { + return createCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudDatabaseAction, + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder> + internalGetCreateCloudDatabaseFieldBuilder() { + if (createCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 11)) { + action_ = com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + createCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudDatabaseAction, + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.CreateCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 11; + onChanged(); + return createCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder> + updateCloudDatabaseDdlBuilder_; + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return Whether the updateCloudDatabaseDdl field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudDatabaseDdl() { + return actionCase_ == 12; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return The updateCloudDatabaseDdl. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getUpdateCloudDatabaseDdl() { + if (updateCloudDatabaseDdlBuilder_ == null) { + if (actionCase_ == 12) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } else { + if (actionCase_ == 12) { + return updateCloudDatabaseDdlBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + public Builder setUpdateCloudDatabaseDdl( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction value) { + if (updateCloudDatabaseDdlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + updateCloudDatabaseDdlBuilder_.setMessage(value); + } + actionCase_ = 12; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + public Builder setUpdateCloudDatabaseDdl( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder builderForValue) { + if (updateCloudDatabaseDdlBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + updateCloudDatabaseDdlBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 12; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + public Builder mergeUpdateCloudDatabaseDdl( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction value) { + if (updateCloudDatabaseDdlBuilder_ == null) { + if (actionCase_ == 12 + && action_ + != com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.newBuilder( + (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 12) { + updateCloudDatabaseDdlBuilder_.mergeFrom(value); + } else { + updateCloudDatabaseDdlBuilder_.setMessage(value); + } + } + actionCase_ = 12; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + public Builder clearUpdateCloudDatabaseDdl() { + if (updateCloudDatabaseDdlBuilder_ == null) { + if (actionCase_ == 12) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 12) { + actionCase_ = 0; + action_ = null; + } + updateCloudDatabaseDdlBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder + getUpdateCloudDatabaseDdlBuilder() { + return internalGetUpdateCloudDatabaseDdlFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder + getUpdateCloudDatabaseDdlOrBuilder() { + if ((actionCase_ == 12) && (updateCloudDatabaseDdlBuilder_ != null)) { + return updateCloudDatabaseDdlBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 12) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder> + internalGetUpdateCloudDatabaseDdlFieldBuilder() { + if (updateCloudDatabaseDdlBuilder_ == null) { + if (!(actionCase_ == 12)) { + action_ = + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + updateCloudDatabaseDdlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder>( + (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 12; + onChanged(); + return updateCloudDatabaseDdlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder> + updateCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * + * @return Whether the updateCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudDatabase() { + return actionCase_ == 27; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * + * @return The updateCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction getUpdateCloudDatabase() { + if (updateCloudDatabaseBuilder_ == null) { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 27) { + return updateCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + public Builder setUpdateCloudDatabase( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction value) { + if (updateCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + updateCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + public Builder setUpdateCloudDatabase( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder builderForValue) { + if (updateCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + updateCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + public Builder mergeUpdateCloudDatabase( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction value) { + if (updateCloudDatabaseBuilder_ == null) { + if (actionCase_ == 27 + && action_ + != com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 27) { + updateCloudDatabaseBuilder_.mergeFrom(value); + } else { + updateCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + public Builder clearUpdateCloudDatabase() { + if (updateCloudDatabaseBuilder_ == null) { + if (actionCase_ == 27) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 27) { + actionCase_ = 0; + action_ = null; + } + updateCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder + getUpdateCloudDatabaseBuilder() { + return internalGetUpdateCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder + getUpdateCloudDatabaseOrBuilder() { + if ((actionCase_ == 27) && (updateCloudDatabaseBuilder_ != null)) { + return updateCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates the schema of a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder> + internalGetUpdateCloudDatabaseFieldBuilder() { + if (updateCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 27)) { + action_ = com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + updateCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudDatabaseAction, + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 27; + onChanged(); + return updateCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DropCloudDatabaseAction, + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder> + dropCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return Whether the dropCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasDropCloudDatabase() { + return actionCase_ == 13; + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return The dropCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction getDropCloudDatabase() { + if (dropCloudDatabaseBuilder_ == null) { + if (actionCase_ == 13) { + return (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 13) { + return dropCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + public Builder setDropCloudDatabase( + com.google.spanner.executor.v1.DropCloudDatabaseAction value) { + if (dropCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + dropCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 13; + return this; + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + public Builder setDropCloudDatabase( + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder builderForValue) { + if (dropCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + dropCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 13; + return this; + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + public Builder mergeDropCloudDatabase( + com.google.spanner.executor.v1.DropCloudDatabaseAction value) { + if (dropCloudDatabaseBuilder_ == null) { + if (actionCase_ == 13 + && action_ + != com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.DropCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 13) { + dropCloudDatabaseBuilder_.mergeFrom(value); + } else { + dropCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 13; + return this; + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + public Builder clearDropCloudDatabase() { + if (dropCloudDatabaseBuilder_ == null) { + if (actionCase_ == 13) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 13) { + actionCase_ = 0; + action_ = null; + } + dropCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + public com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder + getDropCloudDatabaseBuilder() { + return internalGetDropCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder + getDropCloudDatabaseOrBuilder() { + if ((actionCase_ == 13) && (dropCloudDatabaseBuilder_ != null)) { + return dropCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 13) { + return (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that drops a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DropCloudDatabaseAction, + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder> + internalGetDropCloudDatabaseFieldBuilder() { + if (dropCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 13)) { + action_ = com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + dropCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DropCloudDatabaseAction, + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.DropCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 13; + onChanged(); + return dropCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabasesAction, + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder> + listCloudDatabasesBuilder_; + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return Whether the listCloudDatabases field is set. + */ + @java.lang.Override + public boolean hasListCloudDatabases() { + return actionCase_ == 14; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return The listCloudDatabases. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction getListCloudDatabases() { + if (listCloudDatabasesBuilder_ == null) { + if (actionCase_ == 14) { + return (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } else { + if (actionCase_ == 14) { + return listCloudDatabasesBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + public Builder setListCloudDatabases( + com.google.spanner.executor.v1.ListCloudDatabasesAction value) { + if (listCloudDatabasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listCloudDatabasesBuilder_.setMessage(value); + } + actionCase_ = 14; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + public Builder setListCloudDatabases( + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder builderForValue) { + if (listCloudDatabasesBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listCloudDatabasesBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 14; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + public Builder mergeListCloudDatabases( + com.google.spanner.executor.v1.ListCloudDatabasesAction value) { + if (listCloudDatabasesBuilder_ == null) { + if (actionCase_ == 14 + && action_ + != com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudDatabasesAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 14) { + listCloudDatabasesBuilder_.mergeFrom(value); + } else { + listCloudDatabasesBuilder_.setMessage(value); + } + } + actionCase_ = 14; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + public Builder clearListCloudDatabases() { + if (listCloudDatabasesBuilder_ == null) { + if (actionCase_ == 14) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 14) { + actionCase_ = 0; + action_ = null; + } + listCloudDatabasesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + public com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder + getListCloudDatabasesBuilder() { + return internalGetListCloudDatabasesFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder + getListCloudDatabasesOrBuilder() { + if ((actionCase_ == 14) && (listCloudDatabasesBuilder_ != null)) { + return listCloudDatabasesBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 14) { + return (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner databases.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabasesAction, + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder> + internalGetListCloudDatabasesFieldBuilder() { + if (listCloudDatabasesBuilder_ == null) { + if (!(actionCase_ == 14)) { + action_ = com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + listCloudDatabasesBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabasesAction, + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudDatabasesAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 14; + onChanged(); + return listCloudDatabasesBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder> + listCloudDatabaseOperationsBuilder_; + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return Whether the listCloudDatabaseOperations field is set. + */ + @java.lang.Override + public boolean hasListCloudDatabaseOperations() { + return actionCase_ == 15; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return The listCloudDatabaseOperations. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + getListCloudDatabaseOperations() { + if (listCloudDatabaseOperationsBuilder_ == null) { + if (actionCase_ == 15) { + return (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + .getDefaultInstance(); + } else { + if (actionCase_ == 15) { + return listCloudDatabaseOperationsBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + public Builder setListCloudDatabaseOperations( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction value) { + if (listCloudDatabaseOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listCloudDatabaseOperationsBuilder_.setMessage(value); + } + actionCase_ = 15; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + public Builder setListCloudDatabaseOperations( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder builderForValue) { + if (listCloudDatabaseOperationsBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listCloudDatabaseOperationsBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 15; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + public Builder mergeListCloudDatabaseOperations( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction value) { + if (listCloudDatabaseOperationsBuilder_ == null) { + if (actionCase_ == 15 + && action_ + != com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 15) { + listCloudDatabaseOperationsBuilder_.mergeFrom(value); + } else { + listCloudDatabaseOperationsBuilder_.setMessage(value); + } + } + actionCase_ = 15; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + public Builder clearListCloudDatabaseOperations() { + if (listCloudDatabaseOperationsBuilder_ == null) { + if (actionCase_ == 15) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 15) { + actionCase_ = 0; + action_ = null; + } + listCloudDatabaseOperationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder + getListCloudDatabaseOperationsBuilder() { + return internalGetListCloudDatabaseOperationsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder + getListCloudDatabaseOperationsOrBuilder() { + if ((actionCase_ == 15) && (listCloudDatabaseOperationsBuilder_ != null)) { + return listCloudDatabaseOperationsBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 15) { + return (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder> + internalGetListCloudDatabaseOperationsFieldBuilder() { + if (listCloudDatabaseOperationsBuilder_ == null) { + if (!(actionCase_ == 15)) { + action_ = + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.getDefaultInstance(); + } + listCloudDatabaseOperationsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 15; + onChanged(); + return listCloudDatabaseOperationsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.RestoreCloudDatabaseAction, + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder> + restoreCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return Whether the restoreCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasRestoreCloudDatabase() { + return actionCase_ == 16; + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return The restoreCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction getRestoreCloudDatabase() { + if (restoreCloudDatabaseBuilder_ == null) { + if (actionCase_ == 16) { + return (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 16) { + return restoreCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + public Builder setRestoreCloudDatabase( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction value) { + if (restoreCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + restoreCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 16; + return this; + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + public Builder setRestoreCloudDatabase( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder builderForValue) { + if (restoreCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + restoreCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 16; + return this; + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + public Builder mergeRestoreCloudDatabase( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction value) { + if (restoreCloudDatabaseBuilder_ == null) { + if (actionCase_ == 16 + && action_ + != com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 16) { + restoreCloudDatabaseBuilder_.mergeFrom(value); + } else { + restoreCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 16; + return this; + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + public Builder clearRestoreCloudDatabase() { + if (restoreCloudDatabaseBuilder_ == null) { + if (actionCase_ == 16) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 16) { + actionCase_ = 0; + action_ = null; + } + restoreCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder + getRestoreCloudDatabaseBuilder() { + return internalGetRestoreCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder + getRestoreCloudDatabaseOrBuilder() { + if ((actionCase_ == 16) && (restoreCloudDatabaseBuilder_ != null)) { + return restoreCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 16) { + return (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that restores a Cloud Spanner database from a backup.
    +     * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.RestoreCloudDatabaseAction, + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder> + internalGetRestoreCloudDatabaseFieldBuilder() { + if (restoreCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 16)) { + action_ = com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + restoreCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.RestoreCloudDatabaseAction, + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 16; + onChanged(); + return restoreCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudDatabaseAction, + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder> + getCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return Whether the getCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasGetCloudDatabase() { + return actionCase_ == 17; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return The getCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction getGetCloudDatabase() { + if (getCloudDatabaseBuilder_ == null) { + if (actionCase_ == 17) { + return (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 17) { + return getCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + public Builder setGetCloudDatabase( + com.google.spanner.executor.v1.GetCloudDatabaseAction value) { + if (getCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + getCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 17; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + public Builder setGetCloudDatabase( + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder builderForValue) { + if (getCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + getCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 17; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + public Builder mergeGetCloudDatabase( + com.google.spanner.executor.v1.GetCloudDatabaseAction value) { + if (getCloudDatabaseBuilder_ == null) { + if (actionCase_ == 17 + && action_ + != com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GetCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 17) { + getCloudDatabaseBuilder_.mergeFrom(value); + } else { + getCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 17; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + public Builder clearGetCloudDatabase() { + if (getCloudDatabaseBuilder_ == null) { + if (actionCase_ == 17) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 17) { + actionCase_ = 0; + action_ = null; + } + getCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + public com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder + getGetCloudDatabaseBuilder() { + return internalGetGetCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder + getGetCloudDatabaseOrBuilder() { + if ((actionCase_ == 17) && (getCloudDatabaseBuilder_ != null)) { + return getCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 17) { + return (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudDatabaseAction, + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder> + internalGetGetCloudDatabaseFieldBuilder() { + if (getCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 17)) { + action_ = com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + getCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudDatabaseAction, + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.GetCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 17; + onChanged(); + return getCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudBackupAction, + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder, + com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder> + createCloudBackupBuilder_; + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return Whether the createCloudBackup field is set. + */ + @java.lang.Override + public boolean hasCreateCloudBackup() { + return actionCase_ == 18; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return The createCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction getCreateCloudBackup() { + if (createCloudBackupBuilder_ == null) { + if (actionCase_ == 18) { + return (com.google.spanner.executor.v1.CreateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } else { + if (actionCase_ == 18) { + return createCloudBackupBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + public Builder setCreateCloudBackup( + com.google.spanner.executor.v1.CreateCloudBackupAction value) { + if (createCloudBackupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + createCloudBackupBuilder_.setMessage(value); + } + actionCase_ = 18; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + public Builder setCreateCloudBackup( + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder builderForValue) { + if (createCloudBackupBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + createCloudBackupBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 18; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + public Builder mergeCreateCloudBackup( + com.google.spanner.executor.v1.CreateCloudBackupAction value) { + if (createCloudBackupBuilder_ == null) { + if (actionCase_ == 18 + && action_ + != com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CreateCloudBackupAction.newBuilder( + (com.google.spanner.executor.v1.CreateCloudBackupAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 18) { + createCloudBackupBuilder_.mergeFrom(value); + } else { + createCloudBackupBuilder_.setMessage(value); + } + } + actionCase_ = 18; + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + public Builder clearCreateCloudBackup() { + if (createCloudBackupBuilder_ == null) { + if (actionCase_ == 18) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 18) { + actionCase_ = 0; + action_ = null; + } + createCloudBackupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + public com.google.spanner.executor.v1.CreateCloudBackupAction.Builder + getCreateCloudBackupBuilder() { + return internalGetCreateCloudBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder + getCreateCloudBackupOrBuilder() { + if ((actionCase_ == 18) && (createCloudBackupBuilder_ != null)) { + return createCloudBackupBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 18) { + return (com.google.spanner.executor.v1.CreateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that creates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudBackupAction, + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder, + com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder> + internalGetCreateCloudBackupFieldBuilder() { + if (createCloudBackupBuilder_ == null) { + if (!(actionCase_ == 18)) { + action_ = com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + createCloudBackupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CreateCloudBackupAction, + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder, + com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder>( + (com.google.spanner.executor.v1.CreateCloudBackupAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 18; + onChanged(); + return createCloudBackupBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CopyCloudBackupAction, + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder, + com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder> + copyCloudBackupBuilder_; + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return Whether the copyCloudBackup field is set. + */ + @java.lang.Override + public boolean hasCopyCloudBackup() { + return actionCase_ == 19; + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return The copyCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction getCopyCloudBackup() { + if (copyCloudBackupBuilder_ == null) { + if (actionCase_ == 19) { + return (com.google.spanner.executor.v1.CopyCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } else { + if (actionCase_ == 19) { + return copyCloudBackupBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + public Builder setCopyCloudBackup(com.google.spanner.executor.v1.CopyCloudBackupAction value) { + if (copyCloudBackupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + copyCloudBackupBuilder_.setMessage(value); + } + actionCase_ = 19; + return this; + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + public Builder setCopyCloudBackup( + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder builderForValue) { + if (copyCloudBackupBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + copyCloudBackupBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 19; + return this; + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + public Builder mergeCopyCloudBackup( + com.google.spanner.executor.v1.CopyCloudBackupAction value) { + if (copyCloudBackupBuilder_ == null) { + if (actionCase_ == 19 + && action_ + != com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CopyCloudBackupAction.newBuilder( + (com.google.spanner.executor.v1.CopyCloudBackupAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 19) { + copyCloudBackupBuilder_.mergeFrom(value); + } else { + copyCloudBackupBuilder_.setMessage(value); + } + } + actionCase_ = 19; + return this; + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + public Builder clearCopyCloudBackup() { + if (copyCloudBackupBuilder_ == null) { + if (actionCase_ == 19) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 19) { + actionCase_ = 0; + action_ = null; + } + copyCloudBackupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + public com.google.spanner.executor.v1.CopyCloudBackupAction.Builder + getCopyCloudBackupBuilder() { + return internalGetCopyCloudBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder + getCopyCloudBackupOrBuilder() { + if ((actionCase_ == 19) && (copyCloudBackupBuilder_ != null)) { + return copyCloudBackupBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 19) { + return (com.google.spanner.executor.v1.CopyCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that copies a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CopyCloudBackupAction, + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder, + com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder> + internalGetCopyCloudBackupFieldBuilder() { + if (copyCloudBackupBuilder_ == null) { + if (!(actionCase_ == 19)) { + action_ = com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + copyCloudBackupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CopyCloudBackupAction, + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder, + com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder>( + (com.google.spanner.executor.v1.CopyCloudBackupAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 19; + onChanged(); + return copyCloudBackupBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudBackupAction, + com.google.spanner.executor.v1.GetCloudBackupAction.Builder, + com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder> + getCloudBackupBuilder_; + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return Whether the getCloudBackup field is set. + */ + @java.lang.Override + public boolean hasGetCloudBackup() { + return actionCase_ == 20; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return The getCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction getGetCloudBackup() { + if (getCloudBackupBuilder_ == null) { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.GetCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } else { + if (actionCase_ == 20) { + return getCloudBackupBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + public Builder setGetCloudBackup(com.google.spanner.executor.v1.GetCloudBackupAction value) { + if (getCloudBackupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + getCloudBackupBuilder_.setMessage(value); + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + public Builder setGetCloudBackup( + com.google.spanner.executor.v1.GetCloudBackupAction.Builder builderForValue) { + if (getCloudBackupBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + getCloudBackupBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + public Builder mergeGetCloudBackup(com.google.spanner.executor.v1.GetCloudBackupAction value) { + if (getCloudBackupBuilder_ == null) { + if (actionCase_ == 20 + && action_ + != com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GetCloudBackupAction.newBuilder( + (com.google.spanner.executor.v1.GetCloudBackupAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 20) { + getCloudBackupBuilder_.mergeFrom(value); + } else { + getCloudBackupBuilder_.setMessage(value); + } + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + public Builder clearGetCloudBackup() { + if (getCloudBackupBuilder_ == null) { + if (actionCase_ == 20) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 20) { + actionCase_ = 0; + action_ = null; + } + getCloudBackupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + public com.google.spanner.executor.v1.GetCloudBackupAction.Builder getGetCloudBackupBuilder() { + return internalGetGetCloudBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder + getGetCloudBackupOrBuilder() { + if ((actionCase_ == 20) && (getCloudBackupBuilder_ != null)) { + return getCloudBackupBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.GetCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudBackupAction, + com.google.spanner.executor.v1.GetCloudBackupAction.Builder, + com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder> + internalGetGetCloudBackupFieldBuilder() { + if (getCloudBackupBuilder_ == null) { + if (!(actionCase_ == 20)) { + action_ = com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + getCloudBackupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetCloudBackupAction, + com.google.spanner.executor.v1.GetCloudBackupAction.Builder, + com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder>( + (com.google.spanner.executor.v1.GetCloudBackupAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 20; + onChanged(); + return getCloudBackupBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudBackupAction, + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder, + com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder> + updateCloudBackupBuilder_; + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return Whether the updateCloudBackup field is set. + */ + @java.lang.Override + public boolean hasUpdateCloudBackup() { + return actionCase_ == 21; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return The updateCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction getUpdateCloudBackup() { + if (updateCloudBackupBuilder_ == null) { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } else { + if (actionCase_ == 21) { + return updateCloudBackupBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + public Builder setUpdateCloudBackup( + com.google.spanner.executor.v1.UpdateCloudBackupAction value) { + if (updateCloudBackupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + updateCloudBackupBuilder_.setMessage(value); + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + public Builder setUpdateCloudBackup( + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder builderForValue) { + if (updateCloudBackupBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + updateCloudBackupBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + public Builder mergeUpdateCloudBackup( + com.google.spanner.executor.v1.UpdateCloudBackupAction value) { + if (updateCloudBackupBuilder_ == null) { + if (actionCase_ == 21 + && action_ + != com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.UpdateCloudBackupAction.newBuilder( + (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 21) { + updateCloudBackupBuilder_.mergeFrom(value); + } else { + updateCloudBackupBuilder_.setMessage(value); + } + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + public Builder clearUpdateCloudBackup() { + if (updateCloudBackupBuilder_ == null) { + if (actionCase_ == 21) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 21) { + actionCase_ = 0; + action_ = null; + } + updateCloudBackupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + public com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder + getUpdateCloudBackupBuilder() { + return internalGetUpdateCloudBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder + getUpdateCloudBackupOrBuilder() { + if ((actionCase_ == 21) && (updateCloudBackupBuilder_ != null)) { + return updateCloudBackupBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that updates a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudBackupAction, + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder, + com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder> + internalGetUpdateCloudBackupFieldBuilder() { + if (updateCloudBackupBuilder_ == null) { + if (!(actionCase_ == 21)) { + action_ = com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + updateCloudBackupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.UpdateCloudBackupAction, + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder, + com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder>( + (com.google.spanner.executor.v1.UpdateCloudBackupAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 21; + onChanged(); + return updateCloudBackupBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudBackupAction, + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder, + com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder> + deleteCloudBackupBuilder_; + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return Whether the deleteCloudBackup field is set. + */ + @java.lang.Override + public boolean hasDeleteCloudBackup() { + return actionCase_ == 22; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return The deleteCloudBackup. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction getDeleteCloudBackup() { + if (deleteCloudBackupBuilder_ == null) { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } else { + if (actionCase_ == 22) { + return deleteCloudBackupBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + public Builder setDeleteCloudBackup( + com.google.spanner.executor.v1.DeleteCloudBackupAction value) { + if (deleteCloudBackupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + deleteCloudBackupBuilder_.setMessage(value); + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + public Builder setDeleteCloudBackup( + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder builderForValue) { + if (deleteCloudBackupBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + deleteCloudBackupBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + public Builder mergeDeleteCloudBackup( + com.google.spanner.executor.v1.DeleteCloudBackupAction value) { + if (deleteCloudBackupBuilder_ == null) { + if (actionCase_ == 22 + && action_ + != com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.DeleteCloudBackupAction.newBuilder( + (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 22) { + deleteCloudBackupBuilder_.mergeFrom(value); + } else { + deleteCloudBackupBuilder_.setMessage(value); + } + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + public Builder clearDeleteCloudBackup() { + if (deleteCloudBackupBuilder_ == null) { + if (actionCase_ == 22) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 22) { + actionCase_ = 0; + action_ = null; + } + deleteCloudBackupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + public com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder + getDeleteCloudBackupBuilder() { + return internalGetDeleteCloudBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder + getDeleteCloudBackupOrBuilder() { + if ((actionCase_ == 22) && (deleteCloudBackupBuilder_ != null)) { + return deleteCloudBackupBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_; + } + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that deletes a Cloud Spanner database backup.
    +     * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudBackupAction, + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder, + com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder> + internalGetDeleteCloudBackupFieldBuilder() { + if (deleteCloudBackupBuilder_ == null) { + if (!(actionCase_ == 22)) { + action_ = com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + deleteCloudBackupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DeleteCloudBackupAction, + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder, + com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder>( + (com.google.spanner.executor.v1.DeleteCloudBackupAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 22; + onChanged(); + return deleteCloudBackupBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupsAction, + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder> + listCloudBackupsBuilder_; + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return Whether the listCloudBackups field is set. + */ + @java.lang.Override + public boolean hasListCloudBackups() { + return actionCase_ == 23; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return The listCloudBackups. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction getListCloudBackups() { + if (listCloudBackupsBuilder_ == null) { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.ListCloudBackupsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } else { + if (actionCase_ == 23) { + return listCloudBackupsBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + public Builder setListCloudBackups( + com.google.spanner.executor.v1.ListCloudBackupsAction value) { + if (listCloudBackupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listCloudBackupsBuilder_.setMessage(value); + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + public Builder setListCloudBackups( + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder builderForValue) { + if (listCloudBackupsBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listCloudBackupsBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + public Builder mergeListCloudBackups( + com.google.spanner.executor.v1.ListCloudBackupsAction value) { + if (listCloudBackupsBuilder_ == null) { + if (actionCase_ == 23 + && action_ + != com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudBackupsAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudBackupsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 23) { + listCloudBackupsBuilder_.mergeFrom(value); + } else { + listCloudBackupsBuilder_.setMessage(value); + } + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + public Builder clearListCloudBackups() { + if (listCloudBackupsBuilder_ == null) { + if (actionCase_ == 23) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 23) { + actionCase_ = 0; + action_ = null; + } + listCloudBackupsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + public com.google.spanner.executor.v1.ListCloudBackupsAction.Builder + getListCloudBackupsBuilder() { + return internalGetListCloudBackupsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder + getListCloudBackupsOrBuilder() { + if ((actionCase_ == 23) && (listCloudBackupsBuilder_ != null)) { + return listCloudBackupsBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.ListCloudBackupsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backups.
    +     * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupsAction, + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder> + internalGetListCloudBackupsFieldBuilder() { + if (listCloudBackupsBuilder_ == null) { + if (!(actionCase_ == 23)) { + action_ = com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + listCloudBackupsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupsAction, + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudBackupsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 23; + onChanged(); + return listCloudBackupsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupOperationsAction, + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder> + listCloudBackupOperationsBuilder_; + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return Whether the listCloudBackupOperations field is set. + */ + @java.lang.Override + public boolean hasListCloudBackupOperations() { + return actionCase_ == 24; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return The listCloudBackupOperations. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction + getListCloudBackupOperations() { + if (listCloudBackupOperationsBuilder_ == null) { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } else { + if (actionCase_ == 24) { + return listCloudBackupOperationsBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + public Builder setListCloudBackupOperations( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction value) { + if (listCloudBackupOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + listCloudBackupOperationsBuilder_.setMessage(value); + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + public Builder setListCloudBackupOperations( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder builderForValue) { + if (listCloudBackupOperationsBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + listCloudBackupOperationsBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + public Builder mergeListCloudBackupOperations( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction value) { + if (listCloudBackupOperationsBuilder_ == null) { + if (actionCase_ == 24 + && action_ + != com.google.spanner.executor.v1.ListCloudBackupOperationsAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.newBuilder( + (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 24) { + listCloudBackupOperationsBuilder_.mergeFrom(value); + } else { + listCloudBackupOperationsBuilder_.setMessage(value); + } + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + public Builder clearListCloudBackupOperations() { + if (listCloudBackupOperationsBuilder_ == null) { + if (actionCase_ == 24) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 24) { + actionCase_ = 0; + action_ = null; + } + listCloudBackupOperationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder + getListCloudBackupOperationsBuilder() { + return internalGetListCloudBackupOperationsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder + getListCloudBackupOperationsOrBuilder() { + if ((actionCase_ == 24) && (listCloudBackupOperationsBuilder_ != null)) { + return listCloudBackupOperationsBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_; + } + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that lists Cloud Spanner database backup operations.
    +     * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupOperationsAction, + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder> + internalGetListCloudBackupOperationsFieldBuilder() { + if (listCloudBackupOperationsBuilder_ == null) { + if (!(actionCase_ == 24)) { + action_ = + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + listCloudBackupOperationsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ListCloudBackupOperationsAction, + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder, + com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder>( + (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 24; + onChanged(); + return listCloudBackupOperationsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetOperationAction, + com.google.spanner.executor.v1.GetOperationAction.Builder, + com.google.spanner.executor.v1.GetOperationActionOrBuilder> + getOperationBuilder_; + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return Whether the getOperation field is set. + */ + @java.lang.Override + public boolean hasGetOperation() { + return actionCase_ == 25; + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return The getOperation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction getGetOperation() { + if (getOperationBuilder_ == null) { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.GetOperationAction) action_; + } + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } else { + if (actionCase_ == 25) { + return getOperationBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + public Builder setGetOperation(com.google.spanner.executor.v1.GetOperationAction value) { + if (getOperationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + getOperationBuilder_.setMessage(value); + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + public Builder setGetOperation( + com.google.spanner.executor.v1.GetOperationAction.Builder builderForValue) { + if (getOperationBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + getOperationBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + public Builder mergeGetOperation(com.google.spanner.executor.v1.GetOperationAction value) { + if (getOperationBuilder_ == null) { + if (actionCase_ == 25 + && action_ != com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GetOperationAction.newBuilder( + (com.google.spanner.executor.v1.GetOperationAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 25) { + getOperationBuilder_.mergeFrom(value); + } else { + getOperationBuilder_.setMessage(value); + } + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + public Builder clearGetOperation() { + if (getOperationBuilder_ == null) { + if (actionCase_ == 25) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 25) { + actionCase_ = 0; + action_ = null; + } + getOperationBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + public com.google.spanner.executor.v1.GetOperationAction.Builder getGetOperationBuilder() { + return internalGetGetOperationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationActionOrBuilder getGetOperationOrBuilder() { + if ((actionCase_ == 25) && (getOperationBuilder_ != null)) { + return getOperationBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.GetOperationAction) action_; + } + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that gets an operation.
    +     * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetOperationAction, + com.google.spanner.executor.v1.GetOperationAction.Builder, + com.google.spanner.executor.v1.GetOperationActionOrBuilder> + internalGetGetOperationFieldBuilder() { + if (getOperationBuilder_ == null) { + if (!(actionCase_ == 25)) { + action_ = com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + getOperationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GetOperationAction, + com.google.spanner.executor.v1.GetOperationAction.Builder, + com.google.spanner.executor.v1.GetOperationActionOrBuilder>( + (com.google.spanner.executor.v1.GetOperationAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 25; + onChanged(); + return getOperationBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CancelOperationAction, + com.google.spanner.executor.v1.CancelOperationAction.Builder, + com.google.spanner.executor.v1.CancelOperationActionOrBuilder> + cancelOperationBuilder_; + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return Whether the cancelOperation field is set. + */ + @java.lang.Override + public boolean hasCancelOperation() { + return actionCase_ == 26; + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return The cancelOperation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction getCancelOperation() { + if (cancelOperationBuilder_ == null) { + if (actionCase_ == 26) { + return (com.google.spanner.executor.v1.CancelOperationAction) action_; + } + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } else { + if (actionCase_ == 26) { + return cancelOperationBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + public Builder setCancelOperation(com.google.spanner.executor.v1.CancelOperationAction value) { + if (cancelOperationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + cancelOperationBuilder_.setMessage(value); + } + actionCase_ = 26; + return this; + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + public Builder setCancelOperation( + com.google.spanner.executor.v1.CancelOperationAction.Builder builderForValue) { + if (cancelOperationBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + cancelOperationBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 26; + return this; + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + public Builder mergeCancelOperation( + com.google.spanner.executor.v1.CancelOperationAction value) { + if (cancelOperationBuilder_ == null) { + if (actionCase_ == 26 + && action_ + != com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CancelOperationAction.newBuilder( + (com.google.spanner.executor.v1.CancelOperationAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 26) { + cancelOperationBuilder_.mergeFrom(value); + } else { + cancelOperationBuilder_.setMessage(value); + } + } + actionCase_ = 26; + return this; + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + public Builder clearCancelOperation() { + if (cancelOperationBuilder_ == null) { + if (actionCase_ == 26) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 26) { + actionCase_ = 0; + action_ = null; + } + cancelOperationBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + public com.google.spanner.executor.v1.CancelOperationAction.Builder + getCancelOperationBuilder() { + return internalGetCancelOperationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationActionOrBuilder + getCancelOperationOrBuilder() { + if ((actionCase_ == 26) && (cancelOperationBuilder_ != null)) { + return cancelOperationBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 26) { + return (com.google.spanner.executor.v1.CancelOperationAction) action_; + } + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that cancels an operation.
    +     * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CancelOperationAction, + com.google.spanner.executor.v1.CancelOperationAction.Builder, + com.google.spanner.executor.v1.CancelOperationActionOrBuilder> + internalGetCancelOperationFieldBuilder() { + if (cancelOperationBuilder_ == null) { + if (!(actionCase_ == 26)) { + action_ = com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + cancelOperationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CancelOperationAction, + com.google.spanner.executor.v1.CancelOperationAction.Builder, + com.google.spanner.executor.v1.CancelOperationActionOrBuilder>( + (com.google.spanner.executor.v1.CancelOperationAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 26; + onChanged(); + return cancelOperationBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder> + changeQuorumCloudDatabaseBuilder_; + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return Whether the changeQuorumCloudDatabase field is set. + */ + @java.lang.Override + public boolean hasChangeQuorumCloudDatabase() { + return actionCase_ == 28; + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return The changeQuorumCloudDatabase. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + getChangeQuorumCloudDatabase() { + if (changeQuorumCloudDatabaseBuilder_ == null) { + if (actionCase_ == 28) { + return (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } else { + if (actionCase_ == 28) { + return changeQuorumCloudDatabaseBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + public Builder setChangeQuorumCloudDatabase( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction value) { + if (changeQuorumCloudDatabaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + changeQuorumCloudDatabaseBuilder_.setMessage(value); + } + actionCase_ = 28; + return this; + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + public Builder setChangeQuorumCloudDatabase( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder builderForValue) { + if (changeQuorumCloudDatabaseBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + changeQuorumCloudDatabaseBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 28; + return this; + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + public Builder mergeChangeQuorumCloudDatabase( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction value) { + if (changeQuorumCloudDatabaseBuilder_ == null) { + if (actionCase_ == 28 + && action_ + != com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.newBuilder( + (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 28) { + changeQuorumCloudDatabaseBuilder_.mergeFrom(value); + } else { + changeQuorumCloudDatabaseBuilder_.setMessage(value); + } + } + actionCase_ = 28; + return this; + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + public Builder clearChangeQuorumCloudDatabase() { + if (changeQuorumCloudDatabaseBuilder_ == null) { + if (actionCase_ == 28) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 28) { + actionCase_ = 0; + action_ = null; + } + changeQuorumCloudDatabaseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder + getChangeQuorumCloudDatabaseBuilder() { + return internalGetChangeQuorumCloudDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder + getChangeQuorumCloudDatabaseOrBuilder() { + if ((actionCase_ == 28) && (changeQuorumCloudDatabaseBuilder_ != null)) { + return changeQuorumCloudDatabaseBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 28) { + return (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_; + } + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that changes quorum of a Cloud Spanner database.
    +     * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder> + internalGetChangeQuorumCloudDatabaseFieldBuilder() { + if (changeQuorumCloudDatabaseBuilder_ == null) { + if (!(actionCase_ == 28)) { + action_ = + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + changeQuorumCloudDatabaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder>( + (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 28; + onChanged(); + return changeQuorumCloudDatabaseBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AddSplitPointsAction, + com.google.spanner.executor.v1.AddSplitPointsAction.Builder, + com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder> + addSplitPointsBuilder_; + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return Whether the addSplitPoints field is set. + */ + @java.lang.Override + public boolean hasAddSplitPoints() { + return actionCase_ == 29; + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return The addSplitPoints. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsAction getAddSplitPoints() { + if (addSplitPointsBuilder_ == null) { + if (actionCase_ == 29) { + return (com.google.spanner.executor.v1.AddSplitPointsAction) action_; + } + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } else { + if (actionCase_ == 29) { + return addSplitPointsBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + public Builder setAddSplitPoints(com.google.spanner.executor.v1.AddSplitPointsAction value) { + if (addSplitPointsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + addSplitPointsBuilder_.setMessage(value); + } + actionCase_ = 29; + return this; + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + public Builder setAddSplitPoints( + com.google.spanner.executor.v1.AddSplitPointsAction.Builder builderForValue) { + if (addSplitPointsBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + addSplitPointsBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 29; + return this; + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + public Builder mergeAddSplitPoints(com.google.spanner.executor.v1.AddSplitPointsAction value) { + if (addSplitPointsBuilder_ == null) { + if (actionCase_ == 29 + && action_ + != com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.AddSplitPointsAction.newBuilder( + (com.google.spanner.executor.v1.AddSplitPointsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 29) { + addSplitPointsBuilder_.mergeFrom(value); + } else { + addSplitPointsBuilder_.setMessage(value); + } + } + actionCase_ = 29; + return this; + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + public Builder clearAddSplitPoints() { + if (addSplitPointsBuilder_ == null) { + if (actionCase_ == 29) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 29) { + actionCase_ = 0; + action_ = null; + } + addSplitPointsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + public com.google.spanner.executor.v1.AddSplitPointsAction.Builder getAddSplitPointsBuilder() { + return internalGetAddSplitPointsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder + getAddSplitPointsOrBuilder() { + if ((actionCase_ == 29) && (addSplitPointsBuilder_ != null)) { + return addSplitPointsBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 29) { + return (com.google.spanner.executor.v1.AddSplitPointsAction) action_; + } + return com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that adds splits to a Cloud Spanner database.
    +     * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AddSplitPointsAction, + com.google.spanner.executor.v1.AddSplitPointsAction.Builder, + com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder> + internalGetAddSplitPointsFieldBuilder() { + if (addSplitPointsBuilder_ == null) { + if (!(actionCase_ == 29)) { + action_ = com.google.spanner.executor.v1.AddSplitPointsAction.getDefaultInstance(); + } + addSplitPointsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AddSplitPointsAction, + com.google.spanner.executor.v1.AddSplitPointsAction.Builder, + com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder>( + (com.google.spanner.executor.v1.AddSplitPointsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 29; + onChanged(); + return addSplitPointsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.AdminAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.AdminAction) + private static final com.google.spanner.executor.v1.AdminAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.AdminAction(); + } + + public static com.google.spanner.executor.v1.AdminAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AdminAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java new file mode 100644 index 000000000000..7d595c13eb8b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java @@ -0,0 +1,1166 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface AdminActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.AdminAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return Whether the createUserInstanceConfig field is set. + */ + boolean hasCreateUserInstanceConfig(); + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + * + * @return The createUserInstanceConfig. + */ + com.google.spanner.executor.v1.CreateUserInstanceConfigAction getCreateUserInstanceConfig(); + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.CreateUserInstanceConfigAction create_user_instance_config = 1; + * + */ + com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder + getCreateUserInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return Whether the updateUserInstanceConfig field is set. + */ + boolean hasUpdateUserInstanceConfig(); + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + * + * @return The updateUserInstanceConfig. + */ + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction getUpdateUserInstanceConfig(); + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.UpdateUserInstanceConfigAction update_user_instance_config = 2; + * + */ + com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder + getUpdateUserInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return Whether the deleteUserInstanceConfig field is set. + */ + boolean hasDeleteUserInstanceConfig(); + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + * + * @return The deleteUserInstanceConfig. + */ + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction getDeleteUserInstanceConfig(); + + /** + * + * + *
    +   * Action that deletes a user instance config.
    +   * 
    + * + * + * .google.spanner.executor.v1.DeleteUserInstanceConfigAction delete_user_instance_config = 3; + * + */ + com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder + getDeleteUserInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return Whether the getCloudInstanceConfig field is set. + */ + boolean hasGetCloudInstanceConfig(); + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + * + * @return The getCloudInstanceConfig. + */ + com.google.spanner.executor.v1.GetCloudInstanceConfigAction getGetCloudInstanceConfig(); + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceConfigAction get_cloud_instance_config = 4; + * + */ + com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder + getGetCloudInstanceConfigOrBuilder(); + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return Whether the listInstanceConfigs field is set. + */ + boolean hasListInstanceConfigs(); + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + * + * @return The listInstanceConfigs. + */ + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction getListInstanceConfigs(); + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstanceConfigsAction list_instance_configs = 5; + * + */ + com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder + getListInstanceConfigsOrBuilder(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return Whether the createCloudInstance field is set. + */ + boolean hasCreateCloudInstance(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + * + * @return The createCloudInstance. + */ + com.google.spanner.executor.v1.CreateCloudInstanceAction getCreateCloudInstance(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudInstanceAction create_cloud_instance = 6; + */ + com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder + getCreateCloudInstanceOrBuilder(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return Whether the updateCloudInstance field is set. + */ + boolean hasUpdateCloudInstance(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + * + * @return The updateCloudInstance. + */ + com.google.spanner.executor.v1.UpdateCloudInstanceAction getUpdateCloudInstance(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudInstanceAction update_cloud_instance = 7; + */ + com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder + getUpdateCloudInstanceOrBuilder(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return Whether the deleteCloudInstance field is set. + */ + boolean hasDeleteCloudInstance(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + * + * @return The deleteCloudInstance. + */ + com.google.spanner.executor.v1.DeleteCloudInstanceAction getDeleteCloudInstance(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudInstanceAction delete_cloud_instance = 8; + */ + com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder + getDeleteCloudInstanceOrBuilder(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return Whether the listCloudInstances field is set. + */ + boolean hasListCloudInstances(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + * + * @return The listCloudInstances. + */ + com.google.spanner.executor.v1.ListCloudInstancesAction getListCloudInstances(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudInstancesAction list_cloud_instances = 9; + */ + com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder getListCloudInstancesOrBuilder(); + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return Whether the getCloudInstance field is set. + */ + boolean hasGetCloudInstance(); + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + * + * @return The getCloudInstance. + */ + com.google.spanner.executor.v1.GetCloudInstanceAction getGetCloudInstance(); + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudInstanceAction get_cloud_instance = 10; + */ + com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder getGetCloudInstanceOrBuilder(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * @return Whether the createCloudDatabase field is set. + */ + boolean hasCreateCloudDatabase(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + * + * @return The createCloudDatabase. + */ + com.google.spanner.executor.v1.CreateCloudDatabaseAction getCreateCloudDatabase(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudDatabaseAction create_cloud_database = 11; + */ + com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder + getCreateCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return Whether the updateCloudDatabaseDdl field is set. + */ + boolean hasUpdateCloudDatabaseDdl(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + * + * @return The updateCloudDatabaseDdl. + */ + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getUpdateCloudDatabaseDdl(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + * + */ + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder + getUpdateCloudDatabaseDdlOrBuilder(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * @return Whether the updateCloudDatabase field is set. + */ + boolean hasUpdateCloudDatabase(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + * + * @return The updateCloudDatabase. + */ + com.google.spanner.executor.v1.UpdateCloudDatabaseAction getUpdateCloudDatabase(); + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudDatabaseAction update_cloud_database = 27; + */ + com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder + getUpdateCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return Whether the dropCloudDatabase field is set. + */ + boolean hasDropCloudDatabase(); + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + * + * @return The dropCloudDatabase. + */ + com.google.spanner.executor.v1.DropCloudDatabaseAction getDropCloudDatabase(); + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.DropCloudDatabaseAction drop_cloud_database = 13; + */ + com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder getDropCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return Whether the listCloudDatabases field is set. + */ + boolean hasListCloudDatabases(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + * + * @return The listCloudDatabases. + */ + com.google.spanner.executor.v1.ListCloudDatabasesAction getListCloudDatabases(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudDatabasesAction list_cloud_databases = 14; + */ + com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder getListCloudDatabasesOrBuilder(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return Whether the listCloudDatabaseOperations field is set. + */ + boolean hasListCloudDatabaseOperations(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + * + * @return The listCloudDatabaseOperations. + */ + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction getListCloudDatabaseOperations(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + * + */ + com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder + getListCloudDatabaseOperationsOrBuilder(); + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return Whether the restoreCloudDatabase field is set. + */ + boolean hasRestoreCloudDatabase(); + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + * + * @return The restoreCloudDatabase. + */ + com.google.spanner.executor.v1.RestoreCloudDatabaseAction getRestoreCloudDatabase(); + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * .google.spanner.executor.v1.RestoreCloudDatabaseAction restore_cloud_database = 16; + * + */ + com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder + getRestoreCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return Whether the getCloudDatabase field is set. + */ + boolean hasGetCloudDatabase(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + * + * @return The getCloudDatabase. + */ + com.google.spanner.executor.v1.GetCloudDatabaseAction getGetCloudDatabase(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudDatabaseAction get_cloud_database = 17; + */ + com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder getGetCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return Whether the createCloudBackup field is set. + */ + boolean hasCreateCloudBackup(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + * + * @return The createCloudBackup. + */ + com.google.spanner.executor.v1.CreateCloudBackupAction getCreateCloudBackup(); + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CreateCloudBackupAction create_cloud_backup = 18; + */ + com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder getCreateCloudBackupOrBuilder(); + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return Whether the copyCloudBackup field is set. + */ + boolean hasCopyCloudBackup(); + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + * + * @return The copyCloudBackup. + */ + com.google.spanner.executor.v1.CopyCloudBackupAction getCopyCloudBackup(); + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.CopyCloudBackupAction copy_cloud_backup = 19; + */ + com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder getCopyCloudBackupOrBuilder(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return Whether the getCloudBackup field is set. + */ + boolean hasGetCloudBackup(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + * + * @return The getCloudBackup. + */ + com.google.spanner.executor.v1.GetCloudBackupAction getGetCloudBackup(); + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.GetCloudBackupAction get_cloud_backup = 20; + */ + com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder getGetCloudBackupOrBuilder(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return Whether the updateCloudBackup field is set. + */ + boolean hasUpdateCloudBackup(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + * + * @return The updateCloudBackup. + */ + com.google.spanner.executor.v1.UpdateCloudBackupAction getUpdateCloudBackup(); + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.UpdateCloudBackupAction update_cloud_backup = 21; + */ + com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder getUpdateCloudBackupOrBuilder(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return Whether the deleteCloudBackup field is set. + */ + boolean hasDeleteCloudBackup(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + * + * @return The deleteCloudBackup. + */ + com.google.spanner.executor.v1.DeleteCloudBackupAction getDeleteCloudBackup(); + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * .google.spanner.executor.v1.DeleteCloudBackupAction delete_cloud_backup = 22; + */ + com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder getDeleteCloudBackupOrBuilder(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return Whether the listCloudBackups field is set. + */ + boolean hasListCloudBackups(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + * + * @return The listCloudBackups. + */ + com.google.spanner.executor.v1.ListCloudBackupsAction getListCloudBackups(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * .google.spanner.executor.v1.ListCloudBackupsAction list_cloud_backups = 23; + */ + com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder getListCloudBackupsOrBuilder(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return Whether the listCloudBackupOperations field is set. + */ + boolean hasListCloudBackupOperations(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + * + * @return The listCloudBackupOperations. + */ + com.google.spanner.executor.v1.ListCloudBackupOperationsAction getListCloudBackupOperations(); + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * + * .google.spanner.executor.v1.ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + * + */ + com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder + getListCloudBackupOperationsOrBuilder(); + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return Whether the getOperation field is set. + */ + boolean hasGetOperation(); + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + * + * @return The getOperation. + */ + com.google.spanner.executor.v1.GetOperationAction getGetOperation(); + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * .google.spanner.executor.v1.GetOperationAction get_operation = 25; + */ + com.google.spanner.executor.v1.GetOperationActionOrBuilder getGetOperationOrBuilder(); + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return Whether the cancelOperation field is set. + */ + boolean hasCancelOperation(); + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + * + * @return The cancelOperation. + */ + com.google.spanner.executor.v1.CancelOperationAction getCancelOperation(); + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * .google.spanner.executor.v1.CancelOperationAction cancel_operation = 26; + */ + com.google.spanner.executor.v1.CancelOperationActionOrBuilder getCancelOperationOrBuilder(); + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return Whether the changeQuorumCloudDatabase field is set. + */ + boolean hasChangeQuorumCloudDatabase(); + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + * + * @return The changeQuorumCloudDatabase. + */ + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction getChangeQuorumCloudDatabase(); + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * + * .google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + * + */ + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder + getChangeQuorumCloudDatabaseOrBuilder(); + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return Whether the addSplitPoints field is set. + */ + boolean hasAddSplitPoints(); + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + * + * @return The addSplitPoints. + */ + com.google.spanner.executor.v1.AddSplitPointsAction getAddSplitPoints(); + + /** + * + * + *
    +   * Action that adds splits to a Cloud Spanner database.
    +   * 
    + * + * .google.spanner.executor.v1.AddSplitPointsAction add_split_points = 29; + */ + com.google.spanner.executor.v1.AddSplitPointsActionOrBuilder getAddSplitPointsOrBuilder(); + + com.google.spanner.executor.v1.AdminAction.ActionCase getActionCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java new file mode 100644 index 000000000000..14b971c3cffd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java @@ -0,0 +1,1870 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * AdminResult contains admin action results, for database/backup/operation.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdminResult} + */ +@com.google.protobuf.Generated +public final class AdminResult extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.AdminResult) + AdminResultOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "AdminResult"); + } + + // Use AdminResult.newBuilder() to construct. + private AdminResult(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private AdminResult() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdminResult.class, + com.google.spanner.executor.v1.AdminResult.Builder.class); + } + + private int bitField0_; + public static final int BACKUP_RESPONSE_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.CloudBackupResponse backupResponse_; + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return Whether the backupResponse field is set. + */ + @java.lang.Override + public boolean hasBackupResponse() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return The backupResponse. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponse getBackupResponse() { + return backupResponse_ == null + ? com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance() + : backupResponse_; + } + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponseOrBuilder getBackupResponseOrBuilder() { + return backupResponse_ == null + ? com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance() + : backupResponse_; + } + + public static final int OPERATION_RESPONSE_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.OperationResponse operationResponse_; + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return Whether the operationResponse field is set. + */ + @java.lang.Override + public boolean hasOperationResponse() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return The operationResponse. + */ + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponse getOperationResponse() { + return operationResponse_ == null + ? com.google.spanner.executor.v1.OperationResponse.getDefaultInstance() + : operationResponse_; + } + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponseOrBuilder getOperationResponseOrBuilder() { + return operationResponse_ == null + ? com.google.spanner.executor.v1.OperationResponse.getDefaultInstance() + : operationResponse_; + } + + public static final int DATABASE_RESPONSE_FIELD_NUMBER = 3; + private com.google.spanner.executor.v1.CloudDatabaseResponse databaseResponse_; + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return Whether the databaseResponse field is set. + */ + @java.lang.Override + public boolean hasDatabaseResponse() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return The databaseResponse. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponse getDatabaseResponse() { + return databaseResponse_ == null + ? com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance() + : databaseResponse_; + } + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder + getDatabaseResponseOrBuilder() { + return databaseResponse_ == null + ? com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance() + : databaseResponse_; + } + + public static final int INSTANCE_RESPONSE_FIELD_NUMBER = 4; + private com.google.spanner.executor.v1.CloudInstanceResponse instanceResponse_; + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return Whether the instanceResponse field is set. + */ + @java.lang.Override + public boolean hasInstanceResponse() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return The instanceResponse. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponse getInstanceResponse() { + return instanceResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance() + : instanceResponse_; + } + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder + getInstanceResponseOrBuilder() { + return instanceResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance() + : instanceResponse_; + } + + public static final int INSTANCE_CONFIG_RESPONSE_FIELD_NUMBER = 5; + private com.google.spanner.executor.v1.CloudInstanceConfigResponse instanceConfigResponse_; + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return Whether the instanceConfigResponse field is set. + */ + @java.lang.Override + public boolean hasInstanceConfigResponse() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return The instanceConfigResponse. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponse getInstanceConfigResponse() { + return instanceConfigResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance() + : instanceConfigResponse_; + } + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder + getInstanceConfigResponseOrBuilder() { + return instanceConfigResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance() + : instanceConfigResponse_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getBackupResponse()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getOperationResponse()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getDatabaseResponse()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getInstanceResponse()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(5, getInstanceConfigResponse()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getBackupResponse()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOperationResponse()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getDatabaseResponse()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getInstanceResponse()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(5, getInstanceConfigResponse()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.AdminResult)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.AdminResult other = + (com.google.spanner.executor.v1.AdminResult) obj; + + if (hasBackupResponse() != other.hasBackupResponse()) return false; + if (hasBackupResponse()) { + if (!getBackupResponse().equals(other.getBackupResponse())) return false; + } + if (hasOperationResponse() != other.hasOperationResponse()) return false; + if (hasOperationResponse()) { + if (!getOperationResponse().equals(other.getOperationResponse())) return false; + } + if (hasDatabaseResponse() != other.hasDatabaseResponse()) return false; + if (hasDatabaseResponse()) { + if (!getDatabaseResponse().equals(other.getDatabaseResponse())) return false; + } + if (hasInstanceResponse() != other.hasInstanceResponse()) return false; + if (hasInstanceResponse()) { + if (!getInstanceResponse().equals(other.getInstanceResponse())) return false; + } + if (hasInstanceConfigResponse() != other.hasInstanceConfigResponse()) return false; + if (hasInstanceConfigResponse()) { + if (!getInstanceConfigResponse().equals(other.getInstanceConfigResponse())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasBackupResponse()) { + hash = (37 * hash) + BACKUP_RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getBackupResponse().hashCode(); + } + if (hasOperationResponse()) { + hash = (37 * hash) + OPERATION_RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getOperationResponse().hashCode(); + } + if (hasDatabaseResponse()) { + hash = (37 * hash) + DATABASE_RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseResponse().hashCode(); + } + if (hasInstanceResponse()) { + hash = (37 * hash) + INSTANCE_RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getInstanceResponse().hashCode(); + } + if (hasInstanceConfigResponse()) { + hash = (37 * hash) + INSTANCE_CONFIG_RESPONSE_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfigResponse().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminResult parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminResult parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.AdminResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.AdminResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * AdminResult contains admin action results, for database/backup/operation.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.AdminResult} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.AdminResult) + com.google.spanner.executor.v1.AdminResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.AdminResult.class, + com.google.spanner.executor.v1.AdminResult.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.AdminResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetBackupResponseFieldBuilder(); + internalGetOperationResponseFieldBuilder(); + internalGetDatabaseResponseFieldBuilder(); + internalGetInstanceResponseFieldBuilder(); + internalGetInstanceConfigResponseFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + backupResponse_ = null; + if (backupResponseBuilder_ != null) { + backupResponseBuilder_.dispose(); + backupResponseBuilder_ = null; + } + operationResponse_ = null; + if (operationResponseBuilder_ != null) { + operationResponseBuilder_.dispose(); + operationResponseBuilder_ = null; + } + databaseResponse_ = null; + if (databaseResponseBuilder_ != null) { + databaseResponseBuilder_.dispose(); + databaseResponseBuilder_ = null; + } + instanceResponse_ = null; + if (instanceResponseBuilder_ != null) { + instanceResponseBuilder_.dispose(); + instanceResponseBuilder_ = null; + } + instanceConfigResponse_ = null; + if (instanceConfigResponseBuilder_ != null) { + instanceConfigResponseBuilder_.dispose(); + instanceConfigResponseBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_AdminResult_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminResult getDefaultInstanceForType() { + return com.google.spanner.executor.v1.AdminResult.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminResult build() { + com.google.spanner.executor.v1.AdminResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminResult buildPartial() { + com.google.spanner.executor.v1.AdminResult result = + new com.google.spanner.executor.v1.AdminResult(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.AdminResult result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.backupResponse_ = + backupResponseBuilder_ == null ? backupResponse_ : backupResponseBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.operationResponse_ = + operationResponseBuilder_ == null + ? operationResponse_ + : operationResponseBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseResponse_ = + databaseResponseBuilder_ == null ? databaseResponse_ : databaseResponseBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.instanceResponse_ = + instanceResponseBuilder_ == null ? instanceResponse_ : instanceResponseBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.instanceConfigResponse_ = + instanceConfigResponseBuilder_ == null + ? instanceConfigResponse_ + : instanceConfigResponseBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.AdminResult) { + return mergeFrom((com.google.spanner.executor.v1.AdminResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.AdminResult other) { + if (other == com.google.spanner.executor.v1.AdminResult.getDefaultInstance()) return this; + if (other.hasBackupResponse()) { + mergeBackupResponse(other.getBackupResponse()); + } + if (other.hasOperationResponse()) { + mergeOperationResponse(other.getOperationResponse()); + } + if (other.hasDatabaseResponse()) { + mergeDatabaseResponse(other.getDatabaseResponse()); + } + if (other.hasInstanceResponse()) { + mergeInstanceResponse(other.getInstanceResponse()); + } + if (other.hasInstanceConfigResponse()) { + mergeInstanceConfigResponse(other.getInstanceConfigResponse()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetBackupResponseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetOperationResponseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetDatabaseResponseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetInstanceResponseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetInstanceConfigResponseFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.CloudBackupResponse backupResponse_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudBackupResponse, + com.google.spanner.executor.v1.CloudBackupResponse.Builder, + com.google.spanner.executor.v1.CloudBackupResponseOrBuilder> + backupResponseBuilder_; + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return Whether the backupResponse field is set. + */ + public boolean hasBackupResponse() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return The backupResponse. + */ + public com.google.spanner.executor.v1.CloudBackupResponse getBackupResponse() { + if (backupResponseBuilder_ == null) { + return backupResponse_ == null + ? com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance() + : backupResponse_; + } else { + return backupResponseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public Builder setBackupResponse(com.google.spanner.executor.v1.CloudBackupResponse value) { + if (backupResponseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupResponse_ = value; + } else { + backupResponseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public Builder setBackupResponse( + com.google.spanner.executor.v1.CloudBackupResponse.Builder builderForValue) { + if (backupResponseBuilder_ == null) { + backupResponse_ = builderForValue.build(); + } else { + backupResponseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public Builder mergeBackupResponse(com.google.spanner.executor.v1.CloudBackupResponse value) { + if (backupResponseBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && backupResponse_ != null + && backupResponse_ + != com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance()) { + getBackupResponseBuilder().mergeFrom(value); + } else { + backupResponse_ = value; + } + } else { + backupResponseBuilder_.mergeFrom(value); + } + if (backupResponse_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public Builder clearBackupResponse() { + bitField0_ = (bitField0_ & ~0x00000001); + backupResponse_ = null; + if (backupResponseBuilder_ != null) { + backupResponseBuilder_.dispose(); + backupResponseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public com.google.spanner.executor.v1.CloudBackupResponse.Builder getBackupResponseBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetBackupResponseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + public com.google.spanner.executor.v1.CloudBackupResponseOrBuilder + getBackupResponseOrBuilder() { + if (backupResponseBuilder_ != null) { + return backupResponseBuilder_.getMessageOrBuilder(); + } else { + return backupResponse_ == null + ? com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance() + : backupResponse_; + } + } + + /** + * + * + *
    +     * Results of cloud backup related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudBackupResponse, + com.google.spanner.executor.v1.CloudBackupResponse.Builder, + com.google.spanner.executor.v1.CloudBackupResponseOrBuilder> + internalGetBackupResponseFieldBuilder() { + if (backupResponseBuilder_ == null) { + backupResponseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudBackupResponse, + com.google.spanner.executor.v1.CloudBackupResponse.Builder, + com.google.spanner.executor.v1.CloudBackupResponseOrBuilder>( + getBackupResponse(), getParentForChildren(), isClean()); + backupResponse_ = null; + } + return backupResponseBuilder_; + } + + private com.google.spanner.executor.v1.OperationResponse operationResponse_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.OperationResponse, + com.google.spanner.executor.v1.OperationResponse.Builder, + com.google.spanner.executor.v1.OperationResponseOrBuilder> + operationResponseBuilder_; + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return Whether the operationResponse field is set. + */ + public boolean hasOperationResponse() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return The operationResponse. + */ + public com.google.spanner.executor.v1.OperationResponse getOperationResponse() { + if (operationResponseBuilder_ == null) { + return operationResponse_ == null + ? com.google.spanner.executor.v1.OperationResponse.getDefaultInstance() + : operationResponse_; + } else { + return operationResponseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public Builder setOperationResponse(com.google.spanner.executor.v1.OperationResponse value) { + if (operationResponseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operationResponse_ = value; + } else { + operationResponseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public Builder setOperationResponse( + com.google.spanner.executor.v1.OperationResponse.Builder builderForValue) { + if (operationResponseBuilder_ == null) { + operationResponse_ = builderForValue.build(); + } else { + operationResponseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public Builder mergeOperationResponse(com.google.spanner.executor.v1.OperationResponse value) { + if (operationResponseBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && operationResponse_ != null + && operationResponse_ + != com.google.spanner.executor.v1.OperationResponse.getDefaultInstance()) { + getOperationResponseBuilder().mergeFrom(value); + } else { + operationResponse_ = value; + } + } else { + operationResponseBuilder_.mergeFrom(value); + } + if (operationResponse_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public Builder clearOperationResponse() { + bitField0_ = (bitField0_ & ~0x00000002); + operationResponse_ = null; + if (operationResponseBuilder_ != null) { + operationResponseBuilder_.dispose(); + operationResponseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public com.google.spanner.executor.v1.OperationResponse.Builder getOperationResponseBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetOperationResponseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + public com.google.spanner.executor.v1.OperationResponseOrBuilder + getOperationResponseOrBuilder() { + if (operationResponseBuilder_ != null) { + return operationResponseBuilder_.getMessageOrBuilder(); + } else { + return operationResponse_ == null + ? com.google.spanner.executor.v1.OperationResponse.getDefaultInstance() + : operationResponse_; + } + } + + /** + * + * + *
    +     * Results of operation related actions.
    +     * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.OperationResponse, + com.google.spanner.executor.v1.OperationResponse.Builder, + com.google.spanner.executor.v1.OperationResponseOrBuilder> + internalGetOperationResponseFieldBuilder() { + if (operationResponseBuilder_ == null) { + operationResponseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.OperationResponse, + com.google.spanner.executor.v1.OperationResponse.Builder, + com.google.spanner.executor.v1.OperationResponseOrBuilder>( + getOperationResponse(), getParentForChildren(), isClean()); + operationResponse_ = null; + } + return operationResponseBuilder_; + } + + private com.google.spanner.executor.v1.CloudDatabaseResponse databaseResponse_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudDatabaseResponse, + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder, + com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder> + databaseResponseBuilder_; + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return Whether the databaseResponse field is set. + */ + public boolean hasDatabaseResponse() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return The databaseResponse. + */ + public com.google.spanner.executor.v1.CloudDatabaseResponse getDatabaseResponse() { + if (databaseResponseBuilder_ == null) { + return databaseResponse_ == null + ? com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance() + : databaseResponse_; + } else { + return databaseResponseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public Builder setDatabaseResponse(com.google.spanner.executor.v1.CloudDatabaseResponse value) { + if (databaseResponseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + databaseResponse_ = value; + } else { + databaseResponseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public Builder setDatabaseResponse( + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder builderForValue) { + if (databaseResponseBuilder_ == null) { + databaseResponse_ = builderForValue.build(); + } else { + databaseResponseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public Builder mergeDatabaseResponse( + com.google.spanner.executor.v1.CloudDatabaseResponse value) { + if (databaseResponseBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && databaseResponse_ != null + && databaseResponse_ + != com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance()) { + getDatabaseResponseBuilder().mergeFrom(value); + } else { + databaseResponse_ = value; + } + } else { + databaseResponseBuilder_.mergeFrom(value); + } + if (databaseResponse_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public Builder clearDatabaseResponse() { + bitField0_ = (bitField0_ & ~0x00000004); + databaseResponse_ = null; + if (databaseResponseBuilder_ != null) { + databaseResponseBuilder_.dispose(); + databaseResponseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public com.google.spanner.executor.v1.CloudDatabaseResponse.Builder + getDatabaseResponseBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetDatabaseResponseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + public com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder + getDatabaseResponseOrBuilder() { + if (databaseResponseBuilder_ != null) { + return databaseResponseBuilder_.getMessageOrBuilder(); + } else { + return databaseResponse_ == null + ? com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance() + : databaseResponse_; + } + } + + /** + * + * + *
    +     * Results of database related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudDatabaseResponse, + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder, + com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder> + internalGetDatabaseResponseFieldBuilder() { + if (databaseResponseBuilder_ == null) { + databaseResponseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudDatabaseResponse, + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder, + com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder>( + getDatabaseResponse(), getParentForChildren(), isClean()); + databaseResponse_ = null; + } + return databaseResponseBuilder_; + } + + private com.google.spanner.executor.v1.CloudInstanceResponse instanceResponse_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceResponse, + com.google.spanner.executor.v1.CloudInstanceResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder> + instanceResponseBuilder_; + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return Whether the instanceResponse field is set. + */ + public boolean hasInstanceResponse() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return The instanceResponse. + */ + public com.google.spanner.executor.v1.CloudInstanceResponse getInstanceResponse() { + if (instanceResponseBuilder_ == null) { + return instanceResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance() + : instanceResponse_; + } else { + return instanceResponseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public Builder setInstanceResponse(com.google.spanner.executor.v1.CloudInstanceResponse value) { + if (instanceResponseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceResponse_ = value; + } else { + instanceResponseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public Builder setInstanceResponse( + com.google.spanner.executor.v1.CloudInstanceResponse.Builder builderForValue) { + if (instanceResponseBuilder_ == null) { + instanceResponse_ = builderForValue.build(); + } else { + instanceResponseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public Builder mergeInstanceResponse( + com.google.spanner.executor.v1.CloudInstanceResponse value) { + if (instanceResponseBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && instanceResponse_ != null + && instanceResponse_ + != com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance()) { + getInstanceResponseBuilder().mergeFrom(value); + } else { + instanceResponse_ = value; + } + } else { + instanceResponseBuilder_.mergeFrom(value); + } + if (instanceResponse_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public Builder clearInstanceResponse() { + bitField0_ = (bitField0_ & ~0x00000008); + instanceResponse_ = null; + if (instanceResponseBuilder_ != null) { + instanceResponseBuilder_.dispose(); + instanceResponseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public com.google.spanner.executor.v1.CloudInstanceResponse.Builder + getInstanceResponseBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetInstanceResponseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + public com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder + getInstanceResponseOrBuilder() { + if (instanceResponseBuilder_ != null) { + return instanceResponseBuilder_.getMessageOrBuilder(); + } else { + return instanceResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance() + : instanceResponse_; + } + } + + /** + * + * + *
    +     * Results of instance related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceResponse, + com.google.spanner.executor.v1.CloudInstanceResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder> + internalGetInstanceResponseFieldBuilder() { + if (instanceResponseBuilder_ == null) { + instanceResponseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceResponse, + com.google.spanner.executor.v1.CloudInstanceResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder>( + getInstanceResponse(), getParentForChildren(), isClean()); + instanceResponse_ = null; + } + return instanceResponseBuilder_; + } + + private com.google.spanner.executor.v1.CloudInstanceConfigResponse instanceConfigResponse_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceConfigResponse, + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder> + instanceConfigResponseBuilder_; + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return Whether the instanceConfigResponse field is set. + */ + public boolean hasInstanceConfigResponse() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return The instanceConfigResponse. + */ + public com.google.spanner.executor.v1.CloudInstanceConfigResponse getInstanceConfigResponse() { + if (instanceConfigResponseBuilder_ == null) { + return instanceConfigResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance() + : instanceConfigResponse_; + } else { + return instanceConfigResponseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public Builder setInstanceConfigResponse( + com.google.spanner.executor.v1.CloudInstanceConfigResponse value) { + if (instanceConfigResponseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfigResponse_ = value; + } else { + instanceConfigResponseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public Builder setInstanceConfigResponse( + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder builderForValue) { + if (instanceConfigResponseBuilder_ == null) { + instanceConfigResponse_ = builderForValue.build(); + } else { + instanceConfigResponseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public Builder mergeInstanceConfigResponse( + com.google.spanner.executor.v1.CloudInstanceConfigResponse value) { + if (instanceConfigResponseBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && instanceConfigResponse_ != null + && instanceConfigResponse_ + != com.google.spanner.executor.v1.CloudInstanceConfigResponse + .getDefaultInstance()) { + getInstanceConfigResponseBuilder().mergeFrom(value); + } else { + instanceConfigResponse_ = value; + } + } else { + instanceConfigResponseBuilder_.mergeFrom(value); + } + if (instanceConfigResponse_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public Builder clearInstanceConfigResponse() { + bitField0_ = (bitField0_ & ~0x00000010); + instanceConfigResponse_ = null; + if (instanceConfigResponseBuilder_ != null) { + instanceConfigResponseBuilder_.dispose(); + instanceConfigResponseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder + getInstanceConfigResponseBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetInstanceConfigResponseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + public com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder + getInstanceConfigResponseOrBuilder() { + if (instanceConfigResponseBuilder_ != null) { + return instanceConfigResponseBuilder_.getMessageOrBuilder(); + } else { + return instanceConfigResponse_ == null + ? com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance() + : instanceConfigResponse_; + } + } + + /** + * + * + *
    +     * Results of instance config related actions.
    +     * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceConfigResponse, + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder> + internalGetInstanceConfigResponseFieldBuilder() { + if (instanceConfigResponseBuilder_ == null) { + instanceConfigResponseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloudInstanceConfigResponse, + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder, + com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder>( + getInstanceConfigResponse(), getParentForChildren(), isClean()); + instanceConfigResponse_ = null; + } + return instanceConfigResponseBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.AdminResult) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.AdminResult) + private static final com.google.spanner.executor.v1.AdminResult DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.AdminResult(); + } + + public static com.google.spanner.executor.v1.AdminResult getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AdminResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.AdminResult getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java new file mode 100644 index 000000000000..c94d4b47cea9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java @@ -0,0 +1,217 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface AdminResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.AdminResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return Whether the backupResponse field is set. + */ + boolean hasBackupResponse(); + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + * + * @return The backupResponse. + */ + com.google.spanner.executor.v1.CloudBackupResponse getBackupResponse(); + + /** + * + * + *
    +   * Results of cloud backup related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudBackupResponse backup_response = 1; + */ + com.google.spanner.executor.v1.CloudBackupResponseOrBuilder getBackupResponseOrBuilder(); + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return Whether the operationResponse field is set. + */ + boolean hasOperationResponse(); + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + * + * @return The operationResponse. + */ + com.google.spanner.executor.v1.OperationResponse getOperationResponse(); + + /** + * + * + *
    +   * Results of operation related actions.
    +   * 
    + * + * .google.spanner.executor.v1.OperationResponse operation_response = 2; + */ + com.google.spanner.executor.v1.OperationResponseOrBuilder getOperationResponseOrBuilder(); + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return Whether the databaseResponse field is set. + */ + boolean hasDatabaseResponse(); + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + * + * @return The databaseResponse. + */ + com.google.spanner.executor.v1.CloudDatabaseResponse getDatabaseResponse(); + + /** + * + * + *
    +   * Results of database related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudDatabaseResponse database_response = 3; + */ + com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder getDatabaseResponseOrBuilder(); + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return Whether the instanceResponse field is set. + */ + boolean hasInstanceResponse(); + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + * + * @return The instanceResponse. + */ + com.google.spanner.executor.v1.CloudInstanceResponse getInstanceResponse(); + + /** + * + * + *
    +   * Results of instance related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceResponse instance_response = 4; + */ + com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder getInstanceResponseOrBuilder(); + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return Whether the instanceConfigResponse field is set. + */ + boolean hasInstanceConfigResponse(); + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + * + * @return The instanceConfigResponse. + */ + com.google.spanner.executor.v1.CloudInstanceConfigResponse getInstanceConfigResponse(); + + /** + * + * + *
    +   * Results of instance config related actions.
    +   * 
    + * + * .google.spanner.executor.v1.CloudInstanceConfigResponse instance_config_response = 5; + * + */ + com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder + getInstanceConfigResponseOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java new file mode 100644 index 000000000000..fb25206e0412 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java @@ -0,0 +1,1070 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Batch of DML statements invoked using batched execution.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.BatchDmlAction} + */ +@com.google.protobuf.Generated +public final class BatchDmlAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.BatchDmlAction) + BatchDmlActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchDmlAction"); + } + + // Use BatchDmlAction.newBuilder() to construct. + private BatchDmlAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchDmlAction() { + updates_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchDmlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.BatchDmlAction.class, + com.google.spanner.executor.v1.BatchDmlAction.Builder.class); + } + + private int bitField0_; + public static final int UPDATES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List updates_; + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + @java.lang.Override + public java.util.List getUpdatesList() { + return updates_; + } + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + @java.lang.Override + public java.util.List + getUpdatesOrBuilderList() { + return updates_; + } + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + @java.lang.Override + public int getUpdatesCount() { + return updates_.size(); + } + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getUpdates(int index) { + return updates_.get(index); + } + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdatesOrBuilder(int index) { + return updates_.get(index); + } + + public static final int LAST_STATEMENTS_FIELD_NUMBER = 2; + private boolean lastStatements_ = false; + + /** + * + * + *
    +   * Whether to set this request with the last statement option in the
    +   * transaction. The transaction should be committed after processing this
    +   * request.
    +   * 
    + * + * optional bool last_statements = 2; + * + * @return Whether the lastStatements field is set. + */ + @java.lang.Override + public boolean hasLastStatements() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Whether to set this request with the last statement option in the
    +   * transaction. The transaction should be committed after processing this
    +   * request.
    +   * 
    + * + * optional bool last_statements = 2; + * + * @return The lastStatements. + */ + @java.lang.Override + public boolean getLastStatements() { + return lastStatements_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < updates_.size(); i++) { + output.writeMessage(1, updates_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeBool(2, lastStatements_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < updates_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, updates_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, lastStatements_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.BatchDmlAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.BatchDmlAction other = + (com.google.spanner.executor.v1.BatchDmlAction) obj; + + if (!getUpdatesList().equals(other.getUpdatesList())) return false; + if (hasLastStatements() != other.hasLastStatements()) return false; + if (hasLastStatements()) { + if (getLastStatements() != other.getLastStatements()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getUpdatesCount() > 0) { + hash = (37 * hash) + UPDATES_FIELD_NUMBER; + hash = (53 * hash) + getUpdatesList().hashCode(); + } + if (hasLastStatements()) { + hash = (37 * hash) + LAST_STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLastStatements()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchDmlAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.BatchDmlAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Batch of DML statements invoked using batched execution.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.BatchDmlAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.BatchDmlAction) + com.google.spanner.executor.v1.BatchDmlActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchDmlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.BatchDmlAction.class, + com.google.spanner.executor.v1.BatchDmlAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.BatchDmlAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (updatesBuilder_ == null) { + updates_ = java.util.Collections.emptyList(); + } else { + updates_ = null; + updatesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + lastStatements_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction build() { + com.google.spanner.executor.v1.BatchDmlAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction buildPartial() { + com.google.spanner.executor.v1.BatchDmlAction result = + new com.google.spanner.executor.v1.BatchDmlAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.BatchDmlAction result) { + if (updatesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + updates_ = java.util.Collections.unmodifiableList(updates_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.updates_ = updates_; + } else { + result.updates_ = updatesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.BatchDmlAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.lastStatements_ = lastStatements_; + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.BatchDmlAction) { + return mergeFrom((com.google.spanner.executor.v1.BatchDmlAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.BatchDmlAction other) { + if (other == com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance()) return this; + if (updatesBuilder_ == null) { + if (!other.updates_.isEmpty()) { + if (updates_.isEmpty()) { + updates_ = other.updates_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureUpdatesIsMutable(); + updates_.addAll(other.updates_); + } + onChanged(); + } + } else { + if (!other.updates_.isEmpty()) { + if (updatesBuilder_.isEmpty()) { + updatesBuilder_.dispose(); + updatesBuilder_ = null; + updates_ = other.updates_; + bitField0_ = (bitField0_ & ~0x00000001); + updatesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetUpdatesFieldBuilder() + : null; + } else { + updatesBuilder_.addAllMessages(other.updates_); + } + } + } + if (other.hasLastStatements()) { + setLastStatements(other.getLastStatements()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.executor.v1.QueryAction m = + input.readMessage( + com.google.spanner.executor.v1.QueryAction.parser(), extensionRegistry); + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + updates_.add(m); + } else { + updatesBuilder_.addMessage(m); + } + break; + } // case 10 + case 16: + { + lastStatements_ = input.readBool(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List updates_ = + java.util.Collections.emptyList(); + + private void ensureUpdatesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + updates_ = new java.util.ArrayList(updates_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + updatesBuilder_; + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public java.util.List getUpdatesList() { + if (updatesBuilder_ == null) { + return java.util.Collections.unmodifiableList(updates_); + } else { + return updatesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public int getUpdatesCount() { + if (updatesBuilder_ == null) { + return updates_.size(); + } else { + return updatesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public com.google.spanner.executor.v1.QueryAction getUpdates(int index) { + if (updatesBuilder_ == null) { + return updates_.get(index); + } else { + return updatesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder setUpdates(int index, com.google.spanner.executor.v1.QueryAction value) { + if (updatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdatesIsMutable(); + updates_.set(index, value); + onChanged(); + } else { + updatesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder setUpdates( + int index, com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + updates_.set(index, builderForValue.build()); + onChanged(); + } else { + updatesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder addUpdates(com.google.spanner.executor.v1.QueryAction value) { + if (updatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdatesIsMutable(); + updates_.add(value); + onChanged(); + } else { + updatesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder addUpdates(int index, com.google.spanner.executor.v1.QueryAction value) { + if (updatesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUpdatesIsMutable(); + updates_.add(index, value); + onChanged(); + } else { + updatesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder addUpdates(com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + updates_.add(builderForValue.build()); + onChanged(); + } else { + updatesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder addUpdates( + int index, com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + updates_.add(index, builderForValue.build()); + onChanged(); + } else { + updatesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder addAllUpdates( + java.lang.Iterable values) { + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, updates_); + onChanged(); + } else { + updatesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder clearUpdates() { + if (updatesBuilder_ == null) { + updates_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + updatesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public Builder removeUpdates(int index) { + if (updatesBuilder_ == null) { + ensureUpdatesIsMutable(); + updates_.remove(index); + onChanged(); + } else { + updatesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public com.google.spanner.executor.v1.QueryAction.Builder getUpdatesBuilder(int index) { + return internalGetUpdatesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdatesOrBuilder(int index) { + if (updatesBuilder_ == null) { + return updates_.get(index); + } else { + return updatesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public java.util.List + getUpdatesOrBuilderList() { + if (updatesBuilder_ != null) { + return updatesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(updates_); + } + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public com.google.spanner.executor.v1.QueryAction.Builder addUpdatesBuilder() { + return internalGetUpdatesFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.QueryAction.getDefaultInstance()); + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public com.google.spanner.executor.v1.QueryAction.Builder addUpdatesBuilder(int index) { + return internalGetUpdatesFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.QueryAction.getDefaultInstance()); + } + + /** + * + * + *
    +     * DML statements.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + public java.util.List + getUpdatesBuilderList() { + return internalGetUpdatesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + internalGetUpdatesFieldBuilder() { + if (updatesBuilder_ == null) { + updatesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder>( + updates_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + updates_ = null; + } + return updatesBuilder_; + } + + private boolean lastStatements_; + + /** + * + * + *
    +     * Whether to set this request with the last statement option in the
    +     * transaction. The transaction should be committed after processing this
    +     * request.
    +     * 
    + * + * optional bool last_statements = 2; + * + * @return Whether the lastStatements field is set. + */ + @java.lang.Override + public boolean hasLastStatements() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Whether to set this request with the last statement option in the
    +     * transaction. The transaction should be committed after processing this
    +     * request.
    +     * 
    + * + * optional bool last_statements = 2; + * + * @return The lastStatements. + */ + @java.lang.Override + public boolean getLastStatements() { + return lastStatements_; + } + + /** + * + * + *
    +     * Whether to set this request with the last statement option in the
    +     * transaction. The transaction should be committed after processing this
    +     * request.
    +     * 
    + * + * optional bool last_statements = 2; + * + * @param value The lastStatements to set. + * @return This builder for chaining. + */ + public Builder setLastStatements(boolean value) { + + lastStatements_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether to set this request with the last statement option in the
    +     * transaction. The transaction should be committed after processing this
    +     * request.
    +     * 
    + * + * optional bool last_statements = 2; + * + * @return This builder for chaining. + */ + public Builder clearLastStatements() { + bitField0_ = (bitField0_ & ~0x00000002); + lastStatements_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.BatchDmlAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.BatchDmlAction) + private static final com.google.spanner.executor.v1.BatchDmlAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.BatchDmlAction(); + } + + public static com.google.spanner.executor.v1.BatchDmlAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchDmlAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java new file mode 100644 index 000000000000..b2eefb53454f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java @@ -0,0 +1,114 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface BatchDmlActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.BatchDmlAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + java.util.List getUpdatesList(); + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + com.google.spanner.executor.v1.QueryAction getUpdates(int index); + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + int getUpdatesCount(); + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + java.util.List + getUpdatesOrBuilderList(); + + /** + * + * + *
    +   * DML statements.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction updates = 1; + */ + com.google.spanner.executor.v1.QueryActionOrBuilder getUpdatesOrBuilder(int index); + + /** + * + * + *
    +   * Whether to set this request with the last statement option in the
    +   * transaction. The transaction should be committed after processing this
    +   * request.
    +   * 
    + * + * optional bool last_statements = 2; + * + * @return Whether the lastStatements field is set. + */ + boolean hasLastStatements(); + + /** + * + * + *
    +   * Whether to set this request with the last statement option in the
    +   * transaction. The transaction should be committed after processing this
    +   * request.
    +   * 
    + * + * optional bool last_statements = 2; + * + * @return The lastStatements. + */ + boolean getLastStatements(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java new file mode 100644 index 000000000000..4f68b2ac15d4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java @@ -0,0 +1,1072 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Identifies a database partition generated for a particular read or query. To
    + * read rows from the partition, use ExecutePartitionAction.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.BatchPartition} + */ +@com.google.protobuf.Generated +public final class BatchPartition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.BatchPartition) + BatchPartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchPartition"); + } + + // Use BatchPartition.newBuilder() to construct. + private BatchPartition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchPartition() { + partition_ = com.google.protobuf.ByteString.EMPTY; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + table_ = ""; + index_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchPartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.BatchPartition.class, + com.google.spanner.executor.v1.BatchPartition.Builder.class); + } + + private int bitField0_; + public static final int PARTITION_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString partition_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Serialized Partition instance.
    +   * 
    + * + * bytes partition = 1; + * + * @return The partition. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartition() { + return partition_; + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The partition token decrypted from partition.
    +   * 
    + * + * bytes partition_token = 2; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + public static final int TABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return Whether the table field is set. + */ + @java.lang.Override + public boolean hasTable() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return Whether the index field is set. + */ + @java.lang.Override + public boolean hasIndex() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!partition_.isEmpty()) { + output.writeBytes(1, partition_); + } + if (!partitionToken_.isEmpty()) { + output.writeBytes(2, partitionToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, index_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!partition_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, partition_); + } + if (!partitionToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, partitionToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, table_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, index_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.BatchPartition)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.BatchPartition other = + (com.google.spanner.executor.v1.BatchPartition) obj; + + if (!getPartition().equals(other.getPartition())) return false; + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (hasTable() != other.hasTable()) return false; + if (hasTable()) { + if (!getTable().equals(other.getTable())) return false; + } + if (hasIndex() != other.hasIndex()) return false; + if (hasIndex()) { + if (!getIndex().equals(other.getIndex())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasIndex()) { + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchPartition parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchPartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.BatchPartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.BatchPartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Identifies a database partition generated for a particular read or query. To
    +   * read rows from the partition, use ExecutePartitionAction.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.BatchPartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.BatchPartition) + com.google.spanner.executor.v1.BatchPartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchPartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.BatchPartition.class, + com.google.spanner.executor.v1.BatchPartition.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.BatchPartition.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + partition_ = com.google.protobuf.ByteString.EMPTY; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + table_ = ""; + index_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_BatchPartition_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition getDefaultInstanceForType() { + return com.google.spanner.executor.v1.BatchPartition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition build() { + com.google.spanner.executor.v1.BatchPartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition buildPartial() { + com.google.spanner.executor.v1.BatchPartition result = + new com.google.spanner.executor.v1.BatchPartition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.BatchPartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.partition_ = partition_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.partitionToken_ = partitionToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.table_ = table_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.index_ = index_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.BatchPartition) { + return mergeFrom((com.google.spanner.executor.v1.BatchPartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.BatchPartition other) { + if (other == com.google.spanner.executor.v1.BatchPartition.getDefaultInstance()) return this; + if (!other.getPartition().isEmpty()) { + setPartition(other.getPartition()); + } + if (!other.getPartitionToken().isEmpty()) { + setPartitionToken(other.getPartitionToken()); + } + if (other.hasTable()) { + table_ = other.table_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasIndex()) { + index_ = other.index_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + partition_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + partitionToken_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString partition_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Serialized Partition instance.
    +     * 
    + * + * bytes partition = 1; + * + * @return The partition. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartition() { + return partition_; + } + + /** + * + * + *
    +     * Serialized Partition instance.
    +     * 
    + * + * bytes partition = 1; + * + * @param value The partition to set. + * @return This builder for chaining. + */ + public Builder setPartition(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Serialized Partition instance.
    +     * 
    + * + * bytes partition = 1; + * + * @return This builder for chaining. + */ + public Builder clearPartition() { + bitField0_ = (bitField0_ & ~0x00000001); + partition_ = getDefaultInstance().getPartition(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The partition token decrypted from partition.
    +     * 
    + * + * bytes partition_token = 2; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + /** + * + * + *
    +     * The partition token decrypted from partition.
    +     * 
    + * + * bytes partition_token = 2; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The partition token decrypted from partition.
    +     * 
    + * + * bytes partition_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + bitField0_ = (bitField0_ & ~0x00000002); + partitionToken_ = getDefaultInstance().getPartitionToken(); + onChanged(); + return this; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @return Whether the table field is set. + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name is set iff the partition was generated for a read (as opposed to
    +     * a query).
    +     * 
    + * + * optional string table = 3; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @return Whether the index field is set. + */ + public boolean hasIndex() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Index name if the partition was generated for an index read.
    +     * 
    + * + * optional string index = 4; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.BatchPartition) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.BatchPartition) + private static final com.google.spanner.executor.v1.BatchPartition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.BatchPartition(); + } + + public static com.google.spanner.executor.v1.BatchPartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchPartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java new file mode 100644 index 000000000000..ec2e07dbf6e1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface BatchPartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.BatchPartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Serialized Partition instance.
    +   * 
    + * + * bytes partition = 1; + * + * @return The partition. + */ + com.google.protobuf.ByteString getPartition(); + + /** + * + * + *
    +   * The partition token decrypted from partition.
    +   * 
    + * + * bytes partition_token = 2; + * + * @return The partitionToken. + */ + com.google.protobuf.ByteString getPartitionToken(); + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return Whether the table field is set. + */ + boolean hasTable(); + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * Table name is set iff the partition was generated for a read (as opposed to
    +   * a query).
    +   * 
    + * + * optional string table = 3; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return Whether the index field is set. + */ + boolean hasIndex(); + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * Index name if the partition was generated for an index read.
    +   * 
    + * + * optional string index = 4; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java new file mode 100644 index 000000000000..c278e607f75f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java @@ -0,0 +1,596 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that cancels an operation.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CancelOperationAction} + */ +@com.google.protobuf.Generated +public final class CancelOperationAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CancelOperationAction) + CancelOperationActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CancelOperationAction"); + } + + // Use CancelOperationAction.newBuilder() to construct. + private CancelOperationAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CancelOperationAction() { + operation_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CancelOperationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CancelOperationAction.class, + com.google.spanner.executor.v1.CancelOperationAction.Builder.class); + } + + public static final int OPERATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object operation_ = ""; + + /** + * + * + *
    +   * The name of the operation resource to be cancelled.
    +   * 
    + * + * string operation = 1; + * + * @return The operation. + */ + @java.lang.Override + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the operation resource to be cancelled.
    +   * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, operation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, operation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CancelOperationAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CancelOperationAction other = + (com.google.spanner.executor.v1.CancelOperationAction) obj; + + if (!getOperation().equals(other.getOperation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getOperation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CancelOperationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.CancelOperationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that cancels an operation.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CancelOperationAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CancelOperationAction) + com.google.spanner.executor.v1.CancelOperationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CancelOperationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CancelOperationAction.class, + com.google.spanner.executor.v1.CancelOperationAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CancelOperationAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + operation_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction build() { + com.google.spanner.executor.v1.CancelOperationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction buildPartial() { + com.google.spanner.executor.v1.CancelOperationAction result = + new com.google.spanner.executor.v1.CancelOperationAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CancelOperationAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.operation_ = operation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CancelOperationAction) { + return mergeFrom((com.google.spanner.executor.v1.CancelOperationAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CancelOperationAction other) { + if (other == com.google.spanner.executor.v1.CancelOperationAction.getDefaultInstance()) + return this; + if (!other.getOperation().isEmpty()) { + operation_ = other.operation_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + operation_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object operation_ = ""; + + /** + * + * + *
    +     * The name of the operation resource to be cancelled.
    +     * 
    + * + * string operation = 1; + * + * @return The operation. + */ + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the operation resource to be cancelled.
    +     * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the operation resource to be cancelled.
    +     * 
    + * + * string operation = 1; + * + * @param value The operation to set. + * @return This builder for chaining. + */ + public Builder setOperation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the operation resource to be cancelled.
    +     * 
    + * + * string operation = 1; + * + * @return This builder for chaining. + */ + public Builder clearOperation() { + operation_ = getDefaultInstance().getOperation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the operation resource to be cancelled.
    +     * 
    + * + * string operation = 1; + * + * @param value The bytes for operation to set. + * @return This builder for chaining. + */ + public Builder setOperationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CancelOperationAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CancelOperationAction) + private static final com.google.spanner.executor.v1.CancelOperationAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CancelOperationAction(); + } + + public static com.google.spanner.executor.v1.CancelOperationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CancelOperationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CancelOperationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java new file mode 100644 index 000000000000..20a22605de18 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CancelOperationActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CancelOperationAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The name of the operation resource to be cancelled.
    +   * 
    + * + * string operation = 1; + * + * @return The operation. + */ + java.lang.String getOperation(); + + /** + * + * + *
    +   * The name of the operation resource to be cancelled.
    +   * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + com.google.protobuf.ByteString getOperationBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java new file mode 100644 index 000000000000..8e7adf8ea953 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java @@ -0,0 +1,933 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that changes quorum of a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class ChangeQuorumCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) + ChangeQuorumCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChangeQuorumCloudDatabaseAction"); + } + + // Use ChangeQuorumCloudDatabaseAction.newBuilder() to construct. + private ChangeQuorumCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChangeQuorumCloudDatabaseAction() { + databaseUri_ = ""; + servingLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.class, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_URI_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseUri_ = ""; + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return Whether the databaseUri field is set. + */ + @java.lang.Override + public boolean hasDatabaseUri() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return The databaseUri. + */ + @java.lang.Override + public java.lang.String getDatabaseUri() { + java.lang.Object ref = databaseUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseUri_ = s; + return s; + } + } + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseUriBytes() { + java.lang.Object ref = databaseUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERVING_LOCATIONS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList servingLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @return A list containing the servingLocations. + */ + public com.google.protobuf.ProtocolStringList getServingLocationsList() { + return servingLocations_; + } + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @return The count of servingLocations. + */ + public int getServingLocationsCount() { + return servingLocations_.size(); + } + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the element to return. + * @return The servingLocations at the given index. + */ + public java.lang.String getServingLocations(int index) { + return servingLocations_.get(index); + } + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the value to return. + * @return The bytes of the servingLocations at the given index. + */ + public com.google.protobuf.ByteString getServingLocationsBytes(int index) { + return servingLocations_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, databaseUri_); + } + for (int i = 0; i < servingLocations_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, servingLocations_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, databaseUri_); + } + { + int dataSize = 0; + for (int i = 0; i < servingLocations_.size(); i++) { + dataSize += computeStringSizeNoTag(servingLocations_.getRaw(i)); + } + size += dataSize; + size += 1 * getServingLocationsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction other = + (com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) obj; + + if (hasDatabaseUri() != other.hasDatabaseUri()) return false; + if (hasDatabaseUri()) { + if (!getDatabaseUri().equals(other.getDatabaseUri())) return false; + } + if (!getServingLocationsList().equals(other.getServingLocationsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasDatabaseUri()) { + hash = (37 * hash) + DATABASE_URI_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseUri().hashCode(); + } + if (getServingLocationsCount() > 0) { + hash = (37 * hash) + SERVING_LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getServingLocationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that changes quorum of a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.class, + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + databaseUri_ = ""; + servingLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction build() { + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction result = + new com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.databaseUri_ = databaseUri_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + servingLocations_.makeImmutable(); + result.servingLocations_ = servingLocations_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction other) { + if (other + == com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction.getDefaultInstance()) + return this; + if (other.hasDatabaseUri()) { + databaseUri_ = other.databaseUri_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.servingLocations_.isEmpty()) { + if (servingLocations_.isEmpty()) { + servingLocations_ = other.servingLocations_; + bitField0_ |= 0x00000002; + } else { + ensureServingLocationsIsMutable(); + servingLocations_.addAll(other.servingLocations_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + databaseUri_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureServingLocationsIsMutable(); + servingLocations_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object databaseUri_ = ""; + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @return Whether the databaseUri field is set. + */ + public boolean hasDatabaseUri() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @return The databaseUri. + */ + public java.lang.String getDatabaseUri() { + java.lang.Object ref = databaseUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + public com.google.protobuf.ByteString getDatabaseUriBytes() { + java.lang.Object ref = databaseUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @param value The databaseUri to set. + * @return This builder for chaining. + */ + public Builder setDatabaseUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseUri() { + databaseUri_ = getDefaultInstance().getDatabaseUri(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the database whose quorum has to be changed.
    +     * 
    + * + * optional string database_uri = 1; + * + * @param value The bytes for databaseUri to set. + * @return This builder for chaining. + */ + public Builder setDatabaseUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseUri_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList servingLocations_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureServingLocationsIsMutable() { + if (!servingLocations_.isModifiable()) { + servingLocations_ = new com.google.protobuf.LazyStringArrayList(servingLocations_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @return A list containing the servingLocations. + */ + public com.google.protobuf.ProtocolStringList getServingLocationsList() { + servingLocations_.makeImmutable(); + return servingLocations_; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @return The count of servingLocations. + */ + public int getServingLocationsCount() { + return servingLocations_.size(); + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the element to return. + * @return The servingLocations at the given index. + */ + public java.lang.String getServingLocations(int index) { + return servingLocations_.get(index); + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the value to return. + * @return The bytes of the servingLocations at the given index. + */ + public com.google.protobuf.ByteString getServingLocationsBytes(int index) { + return servingLocations_.getByteString(index); + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index to set the value at. + * @param value The servingLocations to set. + * @return This builder for chaining. + */ + public Builder setServingLocations(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServingLocationsIsMutable(); + servingLocations_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param value The servingLocations to add. + * @return This builder for chaining. + */ + public Builder addServingLocations(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServingLocationsIsMutable(); + servingLocations_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param values The servingLocations to add. + * @return This builder for chaining. + */ + public Builder addAllServingLocations(java.lang.Iterable values) { + ensureServingLocationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, servingLocations_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @return This builder for chaining. + */ + public Builder clearServingLocations() { + servingLocations_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The locations of the serving regions, e.g. "asia-south1".
    +     * 
    + * + * repeated string serving_locations = 2; + * + * @param value The bytes of the servingLocations to add. + * @return This builder for chaining. + */ + public Builder addServingLocationsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureServingLocationsIsMutable(); + servingLocations_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) + private static final com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChangeQuorumCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..59c996dff72e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java @@ -0,0 +1,121 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ChangeQuorumCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ChangeQuorumCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return Whether the databaseUri field is set. + */ + boolean hasDatabaseUri(); + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return The databaseUri. + */ + java.lang.String getDatabaseUri(); + + /** + * + * + *
    +   * The fully qualified uri of the database whose quorum has to be changed.
    +   * 
    + * + * optional string database_uri = 1; + * + * @return The bytes for databaseUri. + */ + com.google.protobuf.ByteString getDatabaseUriBytes(); + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @return A list containing the servingLocations. + */ + java.util.List getServingLocationsList(); + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @return The count of servingLocations. + */ + int getServingLocationsCount(); + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the element to return. + * @return The servingLocations at the given index. + */ + java.lang.String getServingLocations(int index); + + /** + * + * + *
    +   * The locations of the serving regions, e.g. "asia-south1".
    +   * 
    + * + * repeated string serving_locations = 2; + * + * @param index The index of the value to return. + * @return The bytes of the servingLocations at the given index. + */ + com.google.protobuf.ByteString getServingLocationsBytes(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java new file mode 100644 index 000000000000..e0084b3729ec --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java @@ -0,0 +1,1418 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Raw ChangeStream records.
    + * Encodes one of: DataChangeRecord, HeartbeatRecord, ChildPartitionsRecord
    + * returned from the ChangeStream API.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChangeStreamRecord} + */ +@com.google.protobuf.Generated +public final class ChangeStreamRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ChangeStreamRecord) + ChangeStreamRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChangeStreamRecord"); + } + + // Use ChangeStreamRecord.newBuilder() to construct. + private ChangeStreamRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChangeStreamRecord() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeStreamRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChangeStreamRecord.class, + com.google.spanner.executor.v1.ChangeStreamRecord.Builder.class); + } + + private int recordCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object record_; + + public enum RecordCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + DATA_CHANGE(1), + CHILD_PARTITION(2), + HEARTBEAT(3), + RECORD_NOT_SET(0); + private final int value; + + private RecordCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RecordCase valueOf(int value) { + return forNumber(value); + } + + public static RecordCase forNumber(int value) { + switch (value) { + case 1: + return DATA_CHANGE; + case 2: + return CHILD_PARTITION; + case 3: + return HEARTBEAT; + case 0: + return RECORD_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RecordCase getRecordCase() { + return RecordCase.forNumber(recordCase_); + } + + public static final int DATA_CHANGE_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return Whether the dataChange field is set. + */ + @java.lang.Override + public boolean hasDataChange() { + return recordCase_ == 1; + } + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return The dataChange. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord getDataChange() { + if (recordCase_ == 1) { + return (com.google.spanner.executor.v1.DataChangeRecord) record_; + } + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecordOrBuilder getDataChangeOrBuilder() { + if (recordCase_ == 1) { + return (com.google.spanner.executor.v1.DataChangeRecord) record_; + } + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + + public static final int CHILD_PARTITION_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return Whether the childPartition field is set. + */ + @java.lang.Override + public boolean hasChildPartition() { + return recordCase_ == 2; + } + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return The childPartition. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord getChildPartition() { + if (recordCase_ == 2) { + return (com.google.spanner.executor.v1.ChildPartitionsRecord) record_; + } + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder + getChildPartitionOrBuilder() { + if (recordCase_ == 2) { + return (com.google.spanner.executor.v1.ChildPartitionsRecord) record_; + } + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + + public static final int HEARTBEAT_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return Whether the heartbeat field is set. + */ + @java.lang.Override + public boolean hasHeartbeat() { + return recordCase_ == 3; + } + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return The heartbeat. + */ + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord getHeartbeat() { + if (recordCase_ == 3) { + return (com.google.spanner.executor.v1.HeartbeatRecord) record_; + } + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecordOrBuilder getHeartbeatOrBuilder() { + if (recordCase_ == 3) { + return (com.google.spanner.executor.v1.HeartbeatRecord) record_; + } + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (recordCase_ == 1) { + output.writeMessage(1, (com.google.spanner.executor.v1.DataChangeRecord) record_); + } + if (recordCase_ == 2) { + output.writeMessage(2, (com.google.spanner.executor.v1.ChildPartitionsRecord) record_); + } + if (recordCase_ == 3) { + output.writeMessage(3, (com.google.spanner.executor.v1.HeartbeatRecord) record_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (recordCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.executor.v1.DataChangeRecord) record_); + } + if (recordCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.executor.v1.ChildPartitionsRecord) record_); + } + if (recordCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.executor.v1.HeartbeatRecord) record_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ChangeStreamRecord)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ChangeStreamRecord other = + (com.google.spanner.executor.v1.ChangeStreamRecord) obj; + + if (!getRecordCase().equals(other.getRecordCase())) return false; + switch (recordCase_) { + case 1: + if (!getDataChange().equals(other.getDataChange())) return false; + break; + case 2: + if (!getChildPartition().equals(other.getChildPartition())) return false; + break; + case 3: + if (!getHeartbeat().equals(other.getHeartbeat())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (recordCase_) { + case 1: + hash = (37 * hash) + DATA_CHANGE_FIELD_NUMBER; + hash = (53 * hash) + getDataChange().hashCode(); + break; + case 2: + hash = (37 * hash) + CHILD_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getChildPartition().hashCode(); + break; + case 3: + hash = (37 * hash) + HEARTBEAT_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeat().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ChangeStreamRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Raw ChangeStream records.
    +   * Encodes one of: DataChangeRecord, HeartbeatRecord, ChildPartitionsRecord
    +   * returned from the ChangeStream API.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChangeStreamRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ChangeStreamRecord) + com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeStreamRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChangeStreamRecord.class, + com.google.spanner.executor.v1.ChangeStreamRecord.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ChangeStreamRecord.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (dataChangeBuilder_ != null) { + dataChangeBuilder_.clear(); + } + if (childPartitionBuilder_ != null) { + childPartitionBuilder_.clear(); + } + if (heartbeatBuilder_ != null) { + heartbeatBuilder_.clear(); + } + recordCase_ = 0; + record_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecord getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ChangeStreamRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecord build() { + com.google.spanner.executor.v1.ChangeStreamRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecord buildPartial() { + com.google.spanner.executor.v1.ChangeStreamRecord result = + new com.google.spanner.executor.v1.ChangeStreamRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ChangeStreamRecord result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.executor.v1.ChangeStreamRecord result) { + result.recordCase_ = recordCase_; + result.record_ = this.record_; + if (recordCase_ == 1 && dataChangeBuilder_ != null) { + result.record_ = dataChangeBuilder_.build(); + } + if (recordCase_ == 2 && childPartitionBuilder_ != null) { + result.record_ = childPartitionBuilder_.build(); + } + if (recordCase_ == 3 && heartbeatBuilder_ != null) { + result.record_ = heartbeatBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ChangeStreamRecord) { + return mergeFrom((com.google.spanner.executor.v1.ChangeStreamRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ChangeStreamRecord other) { + if (other == com.google.spanner.executor.v1.ChangeStreamRecord.getDefaultInstance()) + return this; + switch (other.getRecordCase()) { + case DATA_CHANGE: + { + mergeDataChange(other.getDataChange()); + break; + } + case CHILD_PARTITION: + { + mergeChildPartition(other.getChildPartition()); + break; + } + case HEARTBEAT: + { + mergeHeartbeat(other.getHeartbeat()); + break; + } + case RECORD_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetDataChangeFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetChildPartitionFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetHeartbeatFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 3; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int recordCase_ = 0; + private java.lang.Object record_; + + public RecordCase getRecordCase() { + return RecordCase.forNumber(recordCase_); + } + + public Builder clearRecord() { + recordCase_ = 0; + record_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord, + com.google.spanner.executor.v1.DataChangeRecord.Builder, + com.google.spanner.executor.v1.DataChangeRecordOrBuilder> + dataChangeBuilder_; + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return Whether the dataChange field is set. + */ + @java.lang.Override + public boolean hasDataChange() { + return recordCase_ == 1; + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return The dataChange. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord getDataChange() { + if (dataChangeBuilder_ == null) { + if (recordCase_ == 1) { + return (com.google.spanner.executor.v1.DataChangeRecord) record_; + } + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } else { + if (recordCase_ == 1) { + return dataChangeBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + public Builder setDataChange(com.google.spanner.executor.v1.DataChangeRecord value) { + if (dataChangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + dataChangeBuilder_.setMessage(value); + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + public Builder setDataChange( + com.google.spanner.executor.v1.DataChangeRecord.Builder builderForValue) { + if (dataChangeBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + dataChangeBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + public Builder mergeDataChange(com.google.spanner.executor.v1.DataChangeRecord value) { + if (dataChangeBuilder_ == null) { + if (recordCase_ == 1 + && record_ != com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance()) { + record_ = + com.google.spanner.executor.v1.DataChangeRecord.newBuilder( + (com.google.spanner.executor.v1.DataChangeRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 1) { + dataChangeBuilder_.mergeFrom(value); + } else { + dataChangeBuilder_.setMessage(value); + } + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + public Builder clearDataChange() { + if (dataChangeBuilder_ == null) { + if (recordCase_ == 1) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 1) { + recordCase_ = 0; + record_ = null; + } + dataChangeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + public com.google.spanner.executor.v1.DataChangeRecord.Builder getDataChangeBuilder() { + return internalGetDataChangeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecordOrBuilder getDataChangeOrBuilder() { + if ((recordCase_ == 1) && (dataChangeBuilder_ != null)) { + return dataChangeBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 1) { + return (com.google.spanner.executor.v1.DataChangeRecord) record_; + } + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Data change record.
    +     * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord, + com.google.spanner.executor.v1.DataChangeRecord.Builder, + com.google.spanner.executor.v1.DataChangeRecordOrBuilder> + internalGetDataChangeFieldBuilder() { + if (dataChangeBuilder_ == null) { + if (!(recordCase_ == 1)) { + record_ = com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + dataChangeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord, + com.google.spanner.executor.v1.DataChangeRecord.Builder, + com.google.spanner.executor.v1.DataChangeRecordOrBuilder>( + (com.google.spanner.executor.v1.DataChangeRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 1; + onChanged(); + return dataChangeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord, + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder> + childPartitionBuilder_; + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return Whether the childPartition field is set. + */ + @java.lang.Override + public boolean hasChildPartition() { + return recordCase_ == 2; + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return The childPartition. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord getChildPartition() { + if (childPartitionBuilder_ == null) { + if (recordCase_ == 2) { + return (com.google.spanner.executor.v1.ChildPartitionsRecord) record_; + } + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } else { + if (recordCase_ == 2) { + return childPartitionBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + public Builder setChildPartition(com.google.spanner.executor.v1.ChildPartitionsRecord value) { + if (childPartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + childPartitionBuilder_.setMessage(value); + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + public Builder setChildPartition( + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder builderForValue) { + if (childPartitionBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + childPartitionBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + public Builder mergeChildPartition(com.google.spanner.executor.v1.ChildPartitionsRecord value) { + if (childPartitionBuilder_ == null) { + if (recordCase_ == 2 + && record_ + != com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance()) { + record_ = + com.google.spanner.executor.v1.ChildPartitionsRecord.newBuilder( + (com.google.spanner.executor.v1.ChildPartitionsRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 2) { + childPartitionBuilder_.mergeFrom(value); + } else { + childPartitionBuilder_.setMessage(value); + } + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + public Builder clearChildPartition() { + if (childPartitionBuilder_ == null) { + if (recordCase_ == 2) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 2) { + recordCase_ = 0; + record_ = null; + } + childPartitionBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.Builder getChildPartitionBuilder() { + return internalGetChildPartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder + getChildPartitionOrBuilder() { + if ((recordCase_ == 2) && (childPartitionBuilder_ != null)) { + return childPartitionBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 2) { + return (com.google.spanner.executor.v1.ChildPartitionsRecord) record_; + } + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Child partitions record.
    +     * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord, + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder> + internalGetChildPartitionFieldBuilder() { + if (childPartitionBuilder_ == null) { + if (!(recordCase_ == 2)) { + record_ = com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + childPartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord, + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder>( + (com.google.spanner.executor.v1.ChildPartitionsRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 2; + onChanged(); + return childPartitionBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.HeartbeatRecord, + com.google.spanner.executor.v1.HeartbeatRecord.Builder, + com.google.spanner.executor.v1.HeartbeatRecordOrBuilder> + heartbeatBuilder_; + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return Whether the heartbeat field is set. + */ + @java.lang.Override + public boolean hasHeartbeat() { + return recordCase_ == 3; + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return The heartbeat. + */ + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord getHeartbeat() { + if (heartbeatBuilder_ == null) { + if (recordCase_ == 3) { + return (com.google.spanner.executor.v1.HeartbeatRecord) record_; + } + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } else { + if (recordCase_ == 3) { + return heartbeatBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + public Builder setHeartbeat(com.google.spanner.executor.v1.HeartbeatRecord value) { + if (heartbeatBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + heartbeatBuilder_.setMessage(value); + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + public Builder setHeartbeat( + com.google.spanner.executor.v1.HeartbeatRecord.Builder builderForValue) { + if (heartbeatBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + heartbeatBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + public Builder mergeHeartbeat(com.google.spanner.executor.v1.HeartbeatRecord value) { + if (heartbeatBuilder_ == null) { + if (recordCase_ == 3 + && record_ != com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance()) { + record_ = + com.google.spanner.executor.v1.HeartbeatRecord.newBuilder( + (com.google.spanner.executor.v1.HeartbeatRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 3) { + heartbeatBuilder_.mergeFrom(value); + } else { + heartbeatBuilder_.setMessage(value); + } + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + public Builder clearHeartbeat() { + if (heartbeatBuilder_ == null) { + if (recordCase_ == 3) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 3) { + recordCase_ = 0; + record_ = null; + } + heartbeatBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + public com.google.spanner.executor.v1.HeartbeatRecord.Builder getHeartbeatBuilder() { + return internalGetHeartbeatFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecordOrBuilder getHeartbeatOrBuilder() { + if ((recordCase_ == 3) && (heartbeatBuilder_ != null)) { + return heartbeatBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 3) { + return (com.google.spanner.executor.v1.HeartbeatRecord) record_; + } + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Heartbeat record.
    +     * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.HeartbeatRecord, + com.google.spanner.executor.v1.HeartbeatRecord.Builder, + com.google.spanner.executor.v1.HeartbeatRecordOrBuilder> + internalGetHeartbeatFieldBuilder() { + if (heartbeatBuilder_ == null) { + if (!(recordCase_ == 3)) { + record_ = com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + heartbeatBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.HeartbeatRecord, + com.google.spanner.executor.v1.HeartbeatRecord.Builder, + com.google.spanner.executor.v1.HeartbeatRecordOrBuilder>( + (com.google.spanner.executor.v1.HeartbeatRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 3; + onChanged(); + return heartbeatBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ChangeStreamRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ChangeStreamRecord) + private static final com.google.spanner.executor.v1.ChangeStreamRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ChangeStreamRecord(); + } + + public static com.google.spanner.executor.v1.ChangeStreamRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChangeStreamRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java new file mode 100644 index 000000000000..ea4777d9ba04 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ChangeStreamRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ChangeStreamRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return Whether the dataChange field is set. + */ + boolean hasDataChange(); + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + * + * @return The dataChange. + */ + com.google.spanner.executor.v1.DataChangeRecord getDataChange(); + + /** + * + * + *
    +   * Data change record.
    +   * 
    + * + * .google.spanner.executor.v1.DataChangeRecord data_change = 1; + */ + com.google.spanner.executor.v1.DataChangeRecordOrBuilder getDataChangeOrBuilder(); + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return Whether the childPartition field is set. + */ + boolean hasChildPartition(); + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + * + * @return The childPartition. + */ + com.google.spanner.executor.v1.ChildPartitionsRecord getChildPartition(); + + /** + * + * + *
    +   * Child partitions record.
    +   * 
    + * + * .google.spanner.executor.v1.ChildPartitionsRecord child_partition = 2; + */ + com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder getChildPartitionOrBuilder(); + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return Whether the heartbeat field is set. + */ + boolean hasHeartbeat(); + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + * + * @return The heartbeat. + */ + com.google.spanner.executor.v1.HeartbeatRecord getHeartbeat(); + + /** + * + * + *
    +   * Heartbeat record.
    +   * 
    + * + * .google.spanner.executor.v1.HeartbeatRecord heartbeat = 3; + */ + com.google.spanner.executor.v1.HeartbeatRecordOrBuilder getHeartbeatOrBuilder(); + + com.google.spanner.executor.v1.ChangeStreamRecord.RecordCase getRecordCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java new file mode 100644 index 000000000000..de5b768d3c6b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java @@ -0,0 +1,2478 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * ChangeStream child partition record.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChildPartitionsRecord} + */ +@com.google.protobuf.Generated +public final class ChildPartitionsRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ChildPartitionsRecord) + ChildPartitionsRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChildPartitionsRecord"); + } + + // Use ChildPartitionsRecord.newBuilder() to construct. + private ChildPartitionsRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChildPartitionsRecord() { + recordSequence_ = ""; + childPartitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChildPartitionsRecord.class, + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder.class); + } + + public interface ChildPartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Partition token string used to identify the child partition in queries.
    +     * 
    + * + * string token = 1; + * + * @return The token. + */ + java.lang.String getToken(); + + /** + * + * + *
    +     * Partition token string used to identify the child partition in queries.
    +     * 
    + * + * string token = 1; + * + * @return The bytes for token. + */ + com.google.protobuf.ByteString getTokenBytes(); + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return A list containing the parentPartitionTokens. + */ + java.util.List getParentPartitionTokensList(); + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return The count of parentPartitionTokens. + */ + int getParentPartitionTokensCount(); + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the element to return. + * @return The parentPartitionTokens at the given index. + */ + java.lang.String getParentPartitionTokens(int index); + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the value to return. + * @return The bytes of the parentPartitionTokens at the given index. + */ + com.google.protobuf.ByteString getParentPartitionTokensBytes(int index); + } + + /** + * + * + *
    +   * A single child partition.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition} + */ + public static final class ChildPartition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) + ChildPartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChildPartition"); + } + + // Use ChildPartition.newBuilder() to construct. + private ChildPartition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChildPartition() { + token_ = ""; + parentPartitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.class, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder.class); + } + + public static final int TOKEN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object token_ = ""; + + /** + * + * + *
    +     * Partition token string used to identify the child partition in queries.
    +     * 
    + * + * string token = 1; + * + * @return The token. + */ + @java.lang.Override + public java.lang.String getToken() { + java.lang.Object ref = token_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + token_ = s; + return s; + } + } + + /** + * + * + *
    +     * Partition token string used to identify the child partition in queries.
    +     * 
    + * + * string token = 1; + * + * @return The bytes for token. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTokenBytes() { + java.lang.Object ref = token_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARENT_PARTITION_TOKENS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList parentPartitionTokens_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return A list containing the parentPartitionTokens. + */ + public com.google.protobuf.ProtocolStringList getParentPartitionTokensList() { + return parentPartitionTokens_; + } + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return The count of parentPartitionTokens. + */ + public int getParentPartitionTokensCount() { + return parentPartitionTokens_.size(); + } + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the element to return. + * @return The parentPartitionTokens at the given index. + */ + public java.lang.String getParentPartitionTokens(int index) { + return parentPartitionTokens_.get(index); + } + + /** + * + * + *
    +     * Parent partition tokens of this child partition.
    +     * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the value to return. + * @return The bytes of the parentPartitionTokens at the given index. + */ + public com.google.protobuf.ByteString getParentPartitionTokensBytes(int index) { + return parentPartitionTokens_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(token_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, token_); + } + for (int i = 0; i < parentPartitionTokens_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString( + output, 2, parentPartitionTokens_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(token_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, token_); + } + { + int dataSize = 0; + for (int i = 0; i < parentPartitionTokens_.size(); i++) { + dataSize += computeStringSizeNoTag(parentPartitionTokens_.getRaw(i)); + } + size += dataSize; + size += 1 * getParentPartitionTokensList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition other = + (com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) obj; + + if (!getToken().equals(other.getToken())) return false; + if (!getParentPartitionTokensList().equals(other.getParentPartitionTokensList())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getToken().hashCode(); + if (getParentPartitionTokensCount() > 0) { + hash = (37 * hash) + PARENT_PARTITION_TOKENS_FIELD_NUMBER; + hash = (53 * hash) + getParentPartitionTokensList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A single child partition.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.class, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder.class); + } + + // Construct using + // com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + token_ = ""; + parentPartitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition build() { + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition buildPartial() { + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition result = + new com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.token_ = token_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + parentPartitionTokens_.makeImmutable(); + result.parentPartitionTokens_ = parentPartitionTokens_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) { + return mergeFrom( + (com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition other) { + if (other + == com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + .getDefaultInstance()) return this; + if (!other.getToken().isEmpty()) { + token_ = other.token_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.parentPartitionTokens_.isEmpty()) { + if (parentPartitionTokens_.isEmpty()) { + parentPartitionTokens_ = other.parentPartitionTokens_; + bitField0_ |= 0x00000002; + } else { + ensureParentPartitionTokensIsMutable(); + parentPartitionTokens_.addAll(other.parentPartitionTokens_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + token_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureParentPartitionTokensIsMutable(); + parentPartitionTokens_.add(s); + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object token_ = ""; + + /** + * + * + *
    +       * Partition token string used to identify the child partition in queries.
    +       * 
    + * + * string token = 1; + * + * @return The token. + */ + public java.lang.String getToken() { + java.lang.Object ref = token_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + token_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Partition token string used to identify the child partition in queries.
    +       * 
    + * + * string token = 1; + * + * @return The bytes for token. + */ + public com.google.protobuf.ByteString getTokenBytes() { + java.lang.Object ref = token_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Partition token string used to identify the child partition in queries.
    +       * 
    + * + * string token = 1; + * + * @param value The token to set. + * @return This builder for chaining. + */ + public Builder setToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + token_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Partition token string used to identify the child partition in queries.
    +       * 
    + * + * string token = 1; + * + * @return This builder for chaining. + */ + public Builder clearToken() { + token_ = getDefaultInstance().getToken(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Partition token string used to identify the child partition in queries.
    +       * 
    + * + * string token = 1; + * + * @param value The bytes for token to set. + * @return This builder for chaining. + */ + public Builder setTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + token_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList parentPartitionTokens_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureParentPartitionTokensIsMutable() { + if (!parentPartitionTokens_.isModifiable()) { + parentPartitionTokens_ = + new com.google.protobuf.LazyStringArrayList(parentPartitionTokens_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return A list containing the parentPartitionTokens. + */ + public com.google.protobuf.ProtocolStringList getParentPartitionTokensList() { + parentPartitionTokens_.makeImmutable(); + return parentPartitionTokens_; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return The count of parentPartitionTokens. + */ + public int getParentPartitionTokensCount() { + return parentPartitionTokens_.size(); + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the element to return. + * @return The parentPartitionTokens at the given index. + */ + public java.lang.String getParentPartitionTokens(int index) { + return parentPartitionTokens_.get(index); + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index of the value to return. + * @return The bytes of the parentPartitionTokens at the given index. + */ + public com.google.protobuf.ByteString getParentPartitionTokensBytes(int index) { + return parentPartitionTokens_.getByteString(index); + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param index The index to set the value at. + * @param value The parentPartitionTokens to set. + * @return This builder for chaining. + */ + public Builder setParentPartitionTokens(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentPartitionTokensIsMutable(); + parentPartitionTokens_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param value The parentPartitionTokens to add. + * @return This builder for chaining. + */ + public Builder addParentPartitionTokens(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentPartitionTokensIsMutable(); + parentPartitionTokens_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param values The parentPartitionTokens to add. + * @return This builder for chaining. + */ + public Builder addAllParentPartitionTokens(java.lang.Iterable values) { + ensureParentPartitionTokensIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, parentPartitionTokens_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @return This builder for chaining. + */ + public Builder clearParentPartitionTokens() { + parentPartitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parent partition tokens of this child partition.
    +       * 
    + * + * repeated string parent_partition_tokens = 2; + * + * @param value The bytes of the parentPartitionTokens to add. + * @return This builder for chaining. + */ + public Builder addParentPartitionTokensBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureParentPartitionTokensIsMutable(); + parentPartitionTokens_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition) + private static final com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition(); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChildPartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int START_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +   * A monotonically increasing sequence number that can be used to define the
    +   * ordering of the child partitions record when there are multiple child
    +   * partitions records returned with the same start_time in a particular
    +   * partition.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +   * A monotonically increasing sequence number that can be used to define the
    +   * ordering of the child partitions record when there are multiple child
    +   * partitions records returned with the same start_time in a particular
    +   * partition.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CHILD_PARTITIONS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List + childPartitions_; + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + @java.lang.Override + public java.util.List + getChildPartitionsList() { + return childPartitions_; + } + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder> + getChildPartitionsOrBuilderList() { + return childPartitions_; + } + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + @java.lang.Override + public int getChildPartitionsCount() { + return childPartitions_.size(); + } + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition getChildPartitions( + int index) { + return childPartitions_.get(index); + } + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder + getChildPartitionsOrBuilder(int index) { + return childPartitions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getStartTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + for (int i = 0; i < childPartitions_.size(); i++) { + output.writeMessage(3, childPartitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStartTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + for (int i = 0; i < childPartitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, childPartitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ChildPartitionsRecord)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ChildPartitionsRecord other = + (com.google.spanner.executor.v1.ChildPartitionsRecord) obj; + + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getChildPartitionsList().equals(other.getChildPartitionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + if (getChildPartitionsCount() > 0) { + hash = (37 * hash) + CHILD_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getChildPartitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ChildPartitionsRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ChangeStream child partition record.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ChildPartitionsRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ChildPartitionsRecord) + com.google.spanner.executor.v1.ChildPartitionsRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ChildPartitionsRecord.class, + com.google.spanner.executor.v1.ChildPartitionsRecord.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ChildPartitionsRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartTimeFieldBuilder(); + internalGetChildPartitionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + recordSequence_ = ""; + if (childPartitionsBuilder_ == null) { + childPartitions_ = java.util.Collections.emptyList(); + } else { + childPartitions_ = null; + childPartitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord build() { + com.google.spanner.executor.v1.ChildPartitionsRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord buildPartial() { + com.google.spanner.executor.v1.ChildPartitionsRecord result = + new com.google.spanner.executor.v1.ChildPartitionsRecord(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.ChildPartitionsRecord result) { + if (childPartitionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + childPartitions_ = java.util.Collections.unmodifiableList(childPartitions_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.childPartitions_ = childPartitions_; + } else { + result.childPartitions_ = childPartitionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.ChildPartitionsRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ChildPartitionsRecord) { + return mergeFrom((com.google.spanner.executor.v1.ChildPartitionsRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ChildPartitionsRecord other) { + if (other == com.google.spanner.executor.v1.ChildPartitionsRecord.getDefaultInstance()) + return this; + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (childPartitionsBuilder_ == null) { + if (!other.childPartitions_.isEmpty()) { + if (childPartitions_.isEmpty()) { + childPartitions_ = other.childPartitions_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureChildPartitionsIsMutable(); + childPartitions_.addAll(other.childPartitions_); + } + onChanged(); + } + } else { + if (!other.childPartitions_.isEmpty()) { + if (childPartitionsBuilder_.isEmpty()) { + childPartitionsBuilder_.dispose(); + childPartitionsBuilder_ = null; + childPartitions_ = other.childPartitions_; + bitField0_ = (bitField0_ & ~0x00000004); + childPartitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetChildPartitionsFieldBuilder() + : null; + } else { + childPartitionsBuilder_.addAllMessages(other.childPartitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition m = + input.readMessage( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + .parser(), + extensionRegistry); + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + childPartitions_.add(m); + } else { + childPartitionsBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000001); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * Data change records returned from child partitions in this child partitions
    +     * record will have a commit timestamp greater than or equal to start_time.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * A monotonically increasing sequence number that can be used to define the
    +     * ordering of the child partitions record when there are multiple child
    +     * partitions records returned with the same start_time in a particular
    +     * partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A monotonically increasing sequence number that can be used to define the
    +     * ordering of the child partitions record when there are multiple child
    +     * partitions records returned with the same start_time in a particular
    +     * partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A monotonically increasing sequence number that can be used to define the
    +     * ordering of the child partitions record when there are multiple child
    +     * partitions records returned with the same start_time in a particular
    +     * partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A monotonically increasing sequence number that can be used to define the
    +     * ordering of the child partitions record when there are multiple child
    +     * partitions records returned with the same start_time in a particular
    +     * partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A monotonically increasing sequence number that can be used to define the
    +     * ordering of the child partitions record when there are multiple child
    +     * partitions records returned with the same start_time in a particular
    +     * partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.util.List + childPartitions_ = java.util.Collections.emptyList(); + + private void ensureChildPartitionsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + childPartitions_ = + new java.util.ArrayList< + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition>( + childPartitions_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder> + childPartitionsBuilder_; + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public java.util.List + getChildPartitionsList() { + if (childPartitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(childPartitions_); + } else { + return childPartitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public int getChildPartitionsCount() { + if (childPartitionsBuilder_ == null) { + return childPartitions_.size(); + } else { + return childPartitionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition getChildPartitions( + int index) { + if (childPartitionsBuilder_ == null) { + return childPartitions_.get(index); + } else { + return childPartitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder setChildPartitions( + int index, com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition value) { + if (childPartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildPartitionsIsMutable(); + childPartitions_.set(index, value); + onChanged(); + } else { + childPartitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder setChildPartitions( + int index, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + builderForValue) { + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + childPartitions_.set(index, builderForValue.build()); + onChanged(); + } else { + childPartitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder addChildPartitions( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition value) { + if (childPartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildPartitionsIsMutable(); + childPartitions_.add(value); + onChanged(); + } else { + childPartitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder addChildPartitions( + int index, com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition value) { + if (childPartitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildPartitionsIsMutable(); + childPartitions_.add(index, value); + onChanged(); + } else { + childPartitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder addChildPartitions( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + builderForValue) { + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + childPartitions_.add(builderForValue.build()); + onChanged(); + } else { + childPartitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder addChildPartitions( + int index, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + builderForValue) { + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + childPartitions_.add(index, builderForValue.build()); + onChanged(); + } else { + childPartitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder addAllChildPartitions( + java.lang.Iterable< + ? extends com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition> + values) { + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, childPartitions_); + onChanged(); + } else { + childPartitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder clearChildPartitions() { + if (childPartitionsBuilder_ == null) { + childPartitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + childPartitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public Builder removeChildPartitions(int index) { + if (childPartitionsBuilder_ == null) { + ensureChildPartitionsIsMutable(); + childPartitions_.remove(index); + onChanged(); + } else { + childPartitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + getChildPartitionsBuilder(int index) { + return internalGetChildPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder + getChildPartitionsOrBuilder(int index) { + if (childPartitionsBuilder_ == null) { + return childPartitions_.get(index); + } else { + return childPartitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public java.util.List< + ? extends com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder> + getChildPartitionsOrBuilderList() { + if (childPartitionsBuilder_ != null) { + return childPartitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(childPartitions_); + } + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + addChildPartitionsBuilder() { + return internalGetChildPartitionsFieldBuilder() + .addBuilder( + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + .getDefaultInstance()); + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder + addChildPartitionsBuilder(int index) { + return internalGetChildPartitionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition + .getDefaultInstance()); + } + + /** + * + * + *
    +     * A set of child partitions and their associated information.
    +     * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + public java.util.List< + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder> + getChildPartitionsBuilderList() { + return internalGetChildPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder> + internalGetChildPartitionsFieldBuilder() { + if (childPartitionsBuilder_ == null) { + childPartitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition.Builder, + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder>( + childPartitions_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + childPartitions_ = null; + } + return childPartitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ChildPartitionsRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ChildPartitionsRecord) + private static final com.google.spanner.executor.v1.ChildPartitionsRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ChildPartitionsRecord(); + } + + public static com.google.spanner.executor.v1.ChildPartitionsRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChildPartitionsRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ChildPartitionsRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java new file mode 100644 index 000000000000..43faee8ccf05 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java @@ -0,0 +1,169 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ChildPartitionsRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ChildPartitionsRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * Data change records returned from child partitions in this child partitions
    +   * record will have a commit timestamp greater than or equal to start_time.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * A monotonically increasing sequence number that can be used to define the
    +   * ordering of the child partitions record when there are multiple child
    +   * partitions records returned with the same start_time in a particular
    +   * partition.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +   * A monotonically increasing sequence number that can be used to define the
    +   * ordering of the child partitions record when there are multiple child
    +   * partitions records returned with the same start_time in a particular
    +   * partition.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + java.util.List + getChildPartitionsList(); + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition getChildPartitions(int index); + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + int getChildPartitionsCount(); + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + java.util.List< + ? extends com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder> + getChildPartitionsOrBuilderList(); + + /** + * + * + *
    +   * A set of child partitions and their associated information.
    +   * 
    + * + * + * repeated .google.spanner.executor.v1.ChildPartitionsRecord.ChildPartition child_partitions = 3; + * + */ + com.google.spanner.executor.v1.ChildPartitionsRecord.ChildPartitionOrBuilder + getChildPartitionsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java new file mode 100644 index 000000000000..e40df76973b4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java @@ -0,0 +1,520 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Closes or cleans up the currently opened batch read-only transaction.
    + *
    + * Once a transaction is closed, the Executor can be disposed of or used to
    + * start start another transaction. Closing a batch transaction in one Executor
    + * doesn't affect the transaction's state in other Executors that also read from
    + * it.
    + *
    + * When a transaction is cleaned up, it becomes globally invalid. Cleaning up is
    + * optional, but recommended.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloseBatchTransactionAction} + */ +@com.google.protobuf.Generated +public final class CloseBatchTransactionAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CloseBatchTransactionAction) + CloseBatchTransactionActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloseBatchTransactionAction"); + } + + // Use CloseBatchTransactionAction.newBuilder() to construct. + private CloseBatchTransactionAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloseBatchTransactionAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloseBatchTransactionAction.class, + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder.class); + } + + public static final int CLEANUP_FIELD_NUMBER = 1; + private boolean cleanup_ = false; + + /** + * + * + *
    +   * Indicates whether the transaction needs to be cleaned up.
    +   * 
    + * + * bool cleanup = 1; + * + * @return The cleanup. + */ + @java.lang.Override + public boolean getCleanup() { + return cleanup_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (cleanup_ != false) { + output.writeBool(1, cleanup_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (cleanup_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, cleanup_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CloseBatchTransactionAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CloseBatchTransactionAction other = + (com.google.spanner.executor.v1.CloseBatchTransactionAction) obj; + + if (getCleanup() != other.getCleanup()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CLEANUP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getCleanup()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CloseBatchTransactionAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Closes or cleans up the currently opened batch read-only transaction.
    +   *
    +   * Once a transaction is closed, the Executor can be disposed of or used to
    +   * start start another transaction. Closing a batch transaction in one Executor
    +   * doesn't affect the transaction's state in other Executors that also read from
    +   * it.
    +   *
    +   * When a transaction is cleaned up, it becomes globally invalid. Cleaning up is
    +   * optional, but recommended.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloseBatchTransactionAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CloseBatchTransactionAction) + com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloseBatchTransactionAction.class, + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CloseBatchTransactionAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + cleanup_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction build() { + com.google.spanner.executor.v1.CloseBatchTransactionAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction buildPartial() { + com.google.spanner.executor.v1.CloseBatchTransactionAction result = + new com.google.spanner.executor.v1.CloseBatchTransactionAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CloseBatchTransactionAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.cleanup_ = cleanup_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CloseBatchTransactionAction) { + return mergeFrom((com.google.spanner.executor.v1.CloseBatchTransactionAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CloseBatchTransactionAction other) { + if (other == com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance()) + return this; + if (other.getCleanup() != false) { + setCleanup(other.getCleanup()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + cleanup_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean cleanup_; + + /** + * + * + *
    +     * Indicates whether the transaction needs to be cleaned up.
    +     * 
    + * + * bool cleanup = 1; + * + * @return The cleanup. + */ + @java.lang.Override + public boolean getCleanup() { + return cleanup_; + } + + /** + * + * + *
    +     * Indicates whether the transaction needs to be cleaned up.
    +     * 
    + * + * bool cleanup = 1; + * + * @param value The cleanup to set. + * @return This builder for chaining. + */ + public Builder setCleanup(boolean value) { + + cleanup_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates whether the transaction needs to be cleaned up.
    +     * 
    + * + * bool cleanup = 1; + * + * @return This builder for chaining. + */ + public Builder clearCleanup() { + bitField0_ = (bitField0_ & ~0x00000001); + cleanup_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CloseBatchTransactionAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CloseBatchTransactionAction) + private static final com.google.spanner.executor.v1.CloseBatchTransactionAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CloseBatchTransactionAction(); + } + + public static com.google.spanner.executor.v1.CloseBatchTransactionAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloseBatchTransactionAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java new file mode 100644 index 000000000000..a5451ff1194c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java @@ -0,0 +1,41 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CloseBatchTransactionActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CloseBatchTransactionAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Indicates whether the transaction needs to be cleaned up.
    +   * 
    + * + * bool cleanup = 1; + * + * @return The cleanup. + */ + boolean getCleanup(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java new file mode 100644 index 000000000000..216dc3e8bbfc --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java @@ -0,0 +1,1936 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * CloudBackupResponse contains results returned by cloud backup related
    + * actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudBackupResponse} + */ +@com.google.protobuf.Generated +public final class CloudBackupResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CloudBackupResponse) + CloudBackupResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudBackupResponse"); + } + + // Use CloudBackupResponse.newBuilder() to construct. + private CloudBackupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudBackupResponse() { + listedBackups_ = java.util.Collections.emptyList(); + listedBackupOperations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudBackupResponse.class, + com.google.spanner.executor.v1.CloudBackupResponse.Builder.class); + } + + private int bitField0_; + public static final int LISTED_BACKUPS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List listedBackups_; + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + @java.lang.Override + public java.util.List getListedBackupsList() { + return listedBackups_; + } + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + @java.lang.Override + public java.util.List + getListedBackupsOrBuilderList() { + return listedBackups_; + } + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + @java.lang.Override + public int getListedBackupsCount() { + return listedBackups_.size(); + } + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getListedBackups(int index) { + return listedBackups_.get(index); + } + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupOrBuilder getListedBackupsOrBuilder(int index) { + return listedBackups_.get(index); + } + + public static final int LISTED_BACKUP_OPERATIONS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List listedBackupOperations_; + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + @java.lang.Override + public java.util.List getListedBackupOperationsList() { + return listedBackupOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + @java.lang.Override + public java.util.List + getListedBackupOperationsOrBuilderList() { + return listedBackupOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + @java.lang.Override + public int getListedBackupOperationsCount() { + return listedBackupOperations_.size(); + } + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + @java.lang.Override + public com.google.longrunning.Operation getListedBackupOperations(int index) { + return listedBackupOperations_.get(index); + } + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getListedBackupOperationsOrBuilder(int index) { + return listedBackupOperations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.Backup backup_; + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return Whether the backup field is set. + */ + @java.lang.Override + public boolean hasBackup() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return The backup. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Backup getBackup() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < listedBackups_.size(); i++) { + output.writeMessage(1, listedBackups_.get(i)); + } + for (int i = 0; i < listedBackupOperations_.size(); i++) { + output.writeMessage(2, listedBackupOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getBackup()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < listedBackups_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, listedBackups_.get(i)); + } + for (int i = 0; i < listedBackupOperations_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, listedBackupOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getBackup()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CloudBackupResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CloudBackupResponse other = + (com.google.spanner.executor.v1.CloudBackupResponse) obj; + + if (!getListedBackupsList().equals(other.getListedBackupsList())) return false; + if (!getListedBackupOperationsList().equals(other.getListedBackupOperationsList())) + return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (hasBackup() != other.hasBackup()) return false; + if (hasBackup()) { + if (!getBackup().equals(other.getBackup())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getListedBackupsCount() > 0) { + hash = (37 * hash) + LISTED_BACKUPS_FIELD_NUMBER; + hash = (53 * hash) + getListedBackupsList().hashCode(); + } + if (getListedBackupOperationsCount() > 0) { + hash = (37 * hash) + LISTED_BACKUP_OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getListedBackupOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (hasBackup()) { + hash = (37 * hash) + BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getBackup().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.CloudBackupResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * CloudBackupResponse contains results returned by cloud backup related
    +   * actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudBackupResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CloudBackupResponse) + com.google.spanner.executor.v1.CloudBackupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudBackupResponse.class, + com.google.spanner.executor.v1.CloudBackupResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CloudBackupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetListedBackupsFieldBuilder(); + internalGetListedBackupOperationsFieldBuilder(); + internalGetBackupFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (listedBackupsBuilder_ == null) { + listedBackups_ = java.util.Collections.emptyList(); + } else { + listedBackups_ = null; + listedBackupsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (listedBackupOperationsBuilder_ == null) { + listedBackupOperations_ = java.util.Collections.emptyList(); + } else { + listedBackupOperations_ = null; + listedBackupOperationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + nextPageToken_ = ""; + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponse build() { + com.google.spanner.executor.v1.CloudBackupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponse buildPartial() { + com.google.spanner.executor.v1.CloudBackupResponse result = + new com.google.spanner.executor.v1.CloudBackupResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.CloudBackupResponse result) { + if (listedBackupsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + listedBackups_ = java.util.Collections.unmodifiableList(listedBackups_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.listedBackups_ = listedBackups_; + } else { + result.listedBackups_ = listedBackupsBuilder_.build(); + } + if (listedBackupOperationsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + listedBackupOperations_ = java.util.Collections.unmodifiableList(listedBackupOperations_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.listedBackupOperations_ = listedBackupOperations_; + } else { + result.listedBackupOperations_ = listedBackupOperationsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.CloudBackupResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.backup_ = backupBuilder_ == null ? backup_ : backupBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CloudBackupResponse) { + return mergeFrom((com.google.spanner.executor.v1.CloudBackupResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CloudBackupResponse other) { + if (other == com.google.spanner.executor.v1.CloudBackupResponse.getDefaultInstance()) + return this; + if (listedBackupsBuilder_ == null) { + if (!other.listedBackups_.isEmpty()) { + if (listedBackups_.isEmpty()) { + listedBackups_ = other.listedBackups_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureListedBackupsIsMutable(); + listedBackups_.addAll(other.listedBackups_); + } + onChanged(); + } + } else { + if (!other.listedBackups_.isEmpty()) { + if (listedBackupsBuilder_.isEmpty()) { + listedBackupsBuilder_.dispose(); + listedBackupsBuilder_ = null; + listedBackups_ = other.listedBackups_; + bitField0_ = (bitField0_ & ~0x00000001); + listedBackupsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedBackupsFieldBuilder() + : null; + } else { + listedBackupsBuilder_.addAllMessages(other.listedBackups_); + } + } + } + if (listedBackupOperationsBuilder_ == null) { + if (!other.listedBackupOperations_.isEmpty()) { + if (listedBackupOperations_.isEmpty()) { + listedBackupOperations_ = other.listedBackupOperations_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.addAll(other.listedBackupOperations_); + } + onChanged(); + } + } else { + if (!other.listedBackupOperations_.isEmpty()) { + if (listedBackupOperationsBuilder_.isEmpty()) { + listedBackupOperationsBuilder_.dispose(); + listedBackupOperationsBuilder_ = null; + listedBackupOperations_ = other.listedBackupOperations_; + bitField0_ = (bitField0_ & ~0x00000002); + listedBackupOperationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedBackupOperationsFieldBuilder() + : null; + } else { + listedBackupOperationsBuilder_.addAllMessages(other.listedBackupOperations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasBackup()) { + mergeBackup(other.getBackup()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.Backup m = + input.readMessage( + com.google.spanner.admin.database.v1.Backup.parser(), extensionRegistry); + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + listedBackups_.add(m); + } else { + listedBackupsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.add(m); + } else { + listedBackupOperationsBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetBackupFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List listedBackups_ = + java.util.Collections.emptyList(); + + private void ensureListedBackupsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + listedBackups_ = + new java.util.ArrayList(listedBackups_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + listedBackupsBuilder_; + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public java.util.List getListedBackupsList() { + if (listedBackupsBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedBackups_); + } else { + return listedBackupsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public int getListedBackupsCount() { + if (listedBackupsBuilder_ == null) { + return listedBackups_.size(); + } else { + return listedBackupsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup getListedBackups(int index) { + if (listedBackupsBuilder_ == null) { + return listedBackups_.get(index); + } else { + return listedBackupsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder setListedBackups(int index, com.google.spanner.admin.database.v1.Backup value) { + if (listedBackupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupsIsMutable(); + listedBackups_.set(index, value); + onChanged(); + } else { + listedBackupsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder setListedBackups( + int index, com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + listedBackups_.set(index, builderForValue.build()); + onChanged(); + } else { + listedBackupsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder addListedBackups(com.google.spanner.admin.database.v1.Backup value) { + if (listedBackupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupsIsMutable(); + listedBackups_.add(value); + onChanged(); + } else { + listedBackupsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder addListedBackups(int index, com.google.spanner.admin.database.v1.Backup value) { + if (listedBackupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupsIsMutable(); + listedBackups_.add(index, value); + onChanged(); + } else { + listedBackupsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder addListedBackups( + com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + listedBackups_.add(builderForValue.build()); + onChanged(); + } else { + listedBackupsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder addListedBackups( + int index, com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + listedBackups_.add(index, builderForValue.build()); + onChanged(); + } else { + listedBackupsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder addAllListedBackups( + java.lang.Iterable values) { + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedBackups_); + onChanged(); + } else { + listedBackupsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder clearListedBackups() { + if (listedBackupsBuilder_ == null) { + listedBackups_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + listedBackupsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public Builder removeListedBackups(int index) { + if (listedBackupsBuilder_ == null) { + ensureListedBackupsIsMutable(); + listedBackups_.remove(index); + onChanged(); + } else { + listedBackupsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder getListedBackupsBuilder(int index) { + return internalGetListedBackupsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public com.google.spanner.admin.database.v1.BackupOrBuilder getListedBackupsOrBuilder( + int index) { + if (listedBackupsBuilder_ == null) { + return listedBackups_.get(index); + } else { + return listedBackupsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public java.util.List + getListedBackupsOrBuilderList() { + if (listedBackupsBuilder_ != null) { + return listedBackupsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedBackups_); + } + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder addListedBackupsBuilder() { + return internalGetListedBackupsFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.Backup.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public com.google.spanner.admin.database.v1.Backup.Builder addListedBackupsBuilder(int index) { + return internalGetListedBackupsFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.Backup.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of backups returned by ListCloudBackupsAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + public java.util.List + getListedBackupsBuilderList() { + return internalGetListedBackupsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + internalGetListedBackupsFieldBuilder() { + if (listedBackupsBuilder_ == null) { + listedBackupsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder>( + listedBackups_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + listedBackups_ = null; + } + return listedBackupsBuilder_; + } + + private java.util.List listedBackupOperations_ = + java.util.Collections.emptyList(); + + private void ensureListedBackupOperationsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + listedBackupOperations_ = + new java.util.ArrayList(listedBackupOperations_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + listedBackupOperationsBuilder_; + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public java.util.List getListedBackupOperationsList() { + if (listedBackupOperationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedBackupOperations_); + } else { + return listedBackupOperationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public int getListedBackupOperationsCount() { + if (listedBackupOperationsBuilder_ == null) { + return listedBackupOperations_.size(); + } else { + return listedBackupOperationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public com.google.longrunning.Operation getListedBackupOperations(int index) { + if (listedBackupOperationsBuilder_ == null) { + return listedBackupOperations_.get(index); + } else { + return listedBackupOperationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder setListedBackupOperations(int index, com.google.longrunning.Operation value) { + if (listedBackupOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.set(index, value); + onChanged(); + } else { + listedBackupOperationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder setListedBackupOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.set(index, builderForValue.build()); + onChanged(); + } else { + listedBackupOperationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder addListedBackupOperations(com.google.longrunning.Operation value) { + if (listedBackupOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.add(value); + onChanged(); + } else { + listedBackupOperationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder addListedBackupOperations(int index, com.google.longrunning.Operation value) { + if (listedBackupOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.add(index, value); + onChanged(); + } else { + listedBackupOperationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder addListedBackupOperations( + com.google.longrunning.Operation.Builder builderForValue) { + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.add(builderForValue.build()); + onChanged(); + } else { + listedBackupOperationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder addListedBackupOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.add(index, builderForValue.build()); + onChanged(); + } else { + listedBackupOperationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder addAllListedBackupOperations( + java.lang.Iterable values) { + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedBackupOperations_); + onChanged(); + } else { + listedBackupOperationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder clearListedBackupOperations() { + if (listedBackupOperationsBuilder_ == null) { + listedBackupOperations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + listedBackupOperationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public Builder removeListedBackupOperations(int index) { + if (listedBackupOperationsBuilder_ == null) { + ensureListedBackupOperationsIsMutable(); + listedBackupOperations_.remove(index); + onChanged(); + } else { + listedBackupOperationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public com.google.longrunning.Operation.Builder getListedBackupOperationsBuilder(int index) { + return internalGetListedBackupOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public com.google.longrunning.OperationOrBuilder getListedBackupOperationsOrBuilder(int index) { + if (listedBackupOperationsBuilder_ == null) { + return listedBackupOperations_.get(index); + } else { + return listedBackupOperationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public java.util.List + getListedBackupOperationsOrBuilderList() { + if (listedBackupOperationsBuilder_ != null) { + return listedBackupOperationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedBackupOperations_); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public com.google.longrunning.Operation.Builder addListedBackupOperationsBuilder() { + return internalGetListedBackupOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public com.google.longrunning.Operation.Builder addListedBackupOperationsBuilder(int index) { + return internalGetListedBackupOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudBackupOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + public java.util.List + getListedBackupOperationsBuilderList() { + return internalGetListedBackupOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetListedBackupOperationsFieldBuilder() { + if (listedBackupOperationsBuilder_ == null) { + listedBackupOperationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + listedBackupOperations_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + listedBackupOperations_ = null; + } + return listedBackupOperationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.Backup backup_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + backupBuilder_; + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return Whether the backup field is set. + */ + public boolean hasBackup() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return The backup. + */ + public com.google.spanner.admin.database.v1.Backup getBackup() { + if (backupBuilder_ == null) { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } else { + return backupBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backup_ = value; + } else { + backupBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public Builder setBackup(com.google.spanner.admin.database.v1.Backup.Builder builderForValue) { + if (backupBuilder_ == null) { + backup_ = builderForValue.build(); + } else { + backupBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public Builder mergeBackup(com.google.spanner.admin.database.v1.Backup value) { + if (backupBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && backup_ != null + && backup_ != com.google.spanner.admin.database.v1.Backup.getDefaultInstance()) { + getBackupBuilder().mergeFrom(value); + } else { + backup_ = value; + } + } else { + backupBuilder_.mergeFrom(value); + } + if (backup_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public Builder clearBackup() { + bitField0_ = (bitField0_ & ~0x00000008); + backup_ = null; + if (backupBuilder_ != null) { + backupBuilder_.dispose(); + backupBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public com.google.spanner.admin.database.v1.Backup.Builder getBackupBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetBackupFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() { + if (backupBuilder_ != null) { + return backupBuilder_.getMessageOrBuilder(); + } else { + return backup_ == null + ? com.google.spanner.admin.database.v1.Backup.getDefaultInstance() + : backup_; + } + } + + /** + * + * + *
    +     * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +     * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder> + internalGetBackupFieldBuilder() { + if (backupBuilder_ == null) { + backupBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Backup, + com.google.spanner.admin.database.v1.Backup.Builder, + com.google.spanner.admin.database.v1.BackupOrBuilder>( + getBackup(), getParentForChildren(), isClean()); + backup_ = null; + } + return backupBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CloudBackupResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CloudBackupResponse) + private static final com.google.spanner.executor.v1.CloudBackupResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CloudBackupResponse(); + } + + public static com.google.spanner.executor.v1.CloudBackupResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudBackupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudBackupResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java new file mode 100644 index 000000000000..5b10cd1f537c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CloudBackupResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CloudBackupResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + java.util.List getListedBackupsList(); + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + com.google.spanner.admin.database.v1.Backup getListedBackups(int index); + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + int getListedBackupsCount(); + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + java.util.List + getListedBackupsOrBuilderList(); + + /** + * + * + *
    +   * List of backups returned by ListCloudBackupsAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Backup listed_backups = 1; + */ + com.google.spanner.admin.database.v1.BackupOrBuilder getListedBackupsOrBuilder(int index); + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + java.util.List getListedBackupOperationsList(); + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + com.google.longrunning.Operation getListedBackupOperations(int index); + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + int getListedBackupOperationsCount(); + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + java.util.List + getListedBackupOperationsOrBuilderList(); + + /** + * + * + *
    +   * List of operations returned by ListCloudBackupOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_backup_operations = 2; + */ + com.google.longrunning.OperationOrBuilder getListedBackupOperationsOrBuilder(int index); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return Whether the backup field is set. + */ + boolean hasBackup(); + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + * + * @return The backup. + */ + com.google.spanner.admin.database.v1.Backup getBackup(); + + /** + * + * + *
    +   * Backup returned by GetCloudBackupAction/UpdateCloudBackupAction.
    +   * 
    + * + * .google.spanner.admin.database.v1.Backup backup = 4; + */ + com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java new file mode 100644 index 000000000000..911942c101d8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java @@ -0,0 +1,1946 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * CloudDatabaseResponse contains results returned by cloud database related
    + * actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudDatabaseResponse} + */ +@com.google.protobuf.Generated +public final class CloudDatabaseResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CloudDatabaseResponse) + CloudDatabaseResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudDatabaseResponse"); + } + + // Use CloudDatabaseResponse.newBuilder() to construct. + private CloudDatabaseResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudDatabaseResponse() { + listedDatabases_ = java.util.Collections.emptyList(); + listedDatabaseOperations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudDatabaseResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudDatabaseResponse.class, + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder.class); + } + + private int bitField0_; + public static final int LISTED_DATABASES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List listedDatabases_; + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + @java.lang.Override + public java.util.List getListedDatabasesList() { + return listedDatabases_; + } + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + @java.lang.Override + public java.util.List + getListedDatabasesOrBuilderList() { + return listedDatabases_; + } + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + @java.lang.Override + public int getListedDatabasesCount() { + return listedDatabases_.size(); + } + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getListedDatabases(int index) { + return listedDatabases_.get(index); + } + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getListedDatabasesOrBuilder( + int index) { + return listedDatabases_.get(index); + } + + public static final int LISTED_DATABASE_OPERATIONS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List listedDatabaseOperations_; + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + @java.lang.Override + public java.util.List getListedDatabaseOperationsList() { + return listedDatabaseOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + @java.lang.Override + public java.util.List + getListedDatabaseOperationsOrBuilderList() { + return listedDatabaseOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + @java.lang.Override + public int getListedDatabaseOperationsCount() { + return listedDatabaseOperations_.size(); + } + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + @java.lang.Override + public com.google.longrunning.Operation getListedDatabaseOperations(int index) { + return listedDatabaseOperations_.get(index); + } + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getListedDatabaseOperationsOrBuilder(int index) { + return listedDatabaseOperations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.Database database_; + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return Whether the database field is set. + */ + @java.lang.Override + public boolean hasDatabase() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return The database. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.Database getDatabase() { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder() { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < listedDatabases_.size(); i++) { + output.writeMessage(1, listedDatabases_.get(i)); + } + for (int i = 0; i < listedDatabaseOperations_.size(); i++) { + output.writeMessage(2, listedDatabaseOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getDatabase()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < listedDatabases_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, listedDatabases_.get(i)); + } + for (int i = 0; i < listedDatabaseOperations_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, listedDatabaseOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getDatabase()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CloudDatabaseResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CloudDatabaseResponse other = + (com.google.spanner.executor.v1.CloudDatabaseResponse) obj; + + if (!getListedDatabasesList().equals(other.getListedDatabasesList())) return false; + if (!getListedDatabaseOperationsList().equals(other.getListedDatabaseOperationsList())) + return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (hasDatabase() != other.hasDatabase()) return false; + if (hasDatabase()) { + if (!getDatabase().equals(other.getDatabase())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getListedDatabasesCount() > 0) { + hash = (37 * hash) + LISTED_DATABASES_FIELD_NUMBER; + hash = (53 * hash) + getListedDatabasesList().hashCode(); + } + if (getListedDatabaseOperationsCount() > 0) { + hash = (37 * hash) + LISTED_DATABASE_OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getListedDatabaseOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (hasDatabase()) { + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.CloudDatabaseResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * CloudDatabaseResponse contains results returned by cloud database related
    +   * actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudDatabaseResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CloudDatabaseResponse) + com.google.spanner.executor.v1.CloudDatabaseResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudDatabaseResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudDatabaseResponse.class, + com.google.spanner.executor.v1.CloudDatabaseResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CloudDatabaseResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetListedDatabasesFieldBuilder(); + internalGetListedDatabaseOperationsFieldBuilder(); + internalGetDatabaseFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (listedDatabasesBuilder_ == null) { + listedDatabases_ = java.util.Collections.emptyList(); + } else { + listedDatabases_ = null; + listedDatabasesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (listedDatabaseOperationsBuilder_ == null) { + listedDatabaseOperations_ = java.util.Collections.emptyList(); + } else { + listedDatabaseOperations_ = null; + listedDatabaseOperationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + nextPageToken_ = ""; + database_ = null; + if (databaseBuilder_ != null) { + databaseBuilder_.dispose(); + databaseBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponse build() { + com.google.spanner.executor.v1.CloudDatabaseResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponse buildPartial() { + com.google.spanner.executor.v1.CloudDatabaseResponse result = + new com.google.spanner.executor.v1.CloudDatabaseResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.CloudDatabaseResponse result) { + if (listedDatabasesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + listedDatabases_ = java.util.Collections.unmodifiableList(listedDatabases_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.listedDatabases_ = listedDatabases_; + } else { + result.listedDatabases_ = listedDatabasesBuilder_.build(); + } + if (listedDatabaseOperationsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + listedDatabaseOperations_ = + java.util.Collections.unmodifiableList(listedDatabaseOperations_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.listedDatabaseOperations_ = listedDatabaseOperations_; + } else { + result.listedDatabaseOperations_ = listedDatabaseOperationsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.CloudDatabaseResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.database_ = databaseBuilder_ == null ? database_ : databaseBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CloudDatabaseResponse) { + return mergeFrom((com.google.spanner.executor.v1.CloudDatabaseResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CloudDatabaseResponse other) { + if (other == com.google.spanner.executor.v1.CloudDatabaseResponse.getDefaultInstance()) + return this; + if (listedDatabasesBuilder_ == null) { + if (!other.listedDatabases_.isEmpty()) { + if (listedDatabases_.isEmpty()) { + listedDatabases_ = other.listedDatabases_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureListedDatabasesIsMutable(); + listedDatabases_.addAll(other.listedDatabases_); + } + onChanged(); + } + } else { + if (!other.listedDatabases_.isEmpty()) { + if (listedDatabasesBuilder_.isEmpty()) { + listedDatabasesBuilder_.dispose(); + listedDatabasesBuilder_ = null; + listedDatabases_ = other.listedDatabases_; + bitField0_ = (bitField0_ & ~0x00000001); + listedDatabasesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedDatabasesFieldBuilder() + : null; + } else { + listedDatabasesBuilder_.addAllMessages(other.listedDatabases_); + } + } + } + if (listedDatabaseOperationsBuilder_ == null) { + if (!other.listedDatabaseOperations_.isEmpty()) { + if (listedDatabaseOperations_.isEmpty()) { + listedDatabaseOperations_ = other.listedDatabaseOperations_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.addAll(other.listedDatabaseOperations_); + } + onChanged(); + } + } else { + if (!other.listedDatabaseOperations_.isEmpty()) { + if (listedDatabaseOperationsBuilder_.isEmpty()) { + listedDatabaseOperationsBuilder_.dispose(); + listedDatabaseOperationsBuilder_ = null; + listedDatabaseOperations_ = other.listedDatabaseOperations_; + bitField0_ = (bitField0_ & ~0x00000002); + listedDatabaseOperationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedDatabaseOperationsFieldBuilder() + : null; + } else { + listedDatabaseOperationsBuilder_.addAllMessages(other.listedDatabaseOperations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasDatabase()) { + mergeDatabase(other.getDatabase()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.Database m = + input.readMessage( + com.google.spanner.admin.database.v1.Database.parser(), extensionRegistry); + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + listedDatabases_.add(m); + } else { + listedDatabasesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.add(m); + } else { + listedDatabaseOperationsBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetDatabaseFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List listedDatabases_ = + java.util.Collections.emptyList(); + + private void ensureListedDatabasesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + listedDatabases_ = + new java.util.ArrayList( + listedDatabases_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + listedDatabasesBuilder_; + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public java.util.List getListedDatabasesList() { + if (listedDatabasesBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedDatabases_); + } else { + return listedDatabasesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public int getListedDatabasesCount() { + if (listedDatabasesBuilder_ == null) { + return listedDatabases_.size(); + } else { + return listedDatabasesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public com.google.spanner.admin.database.v1.Database getListedDatabases(int index) { + if (listedDatabasesBuilder_ == null) { + return listedDatabases_.get(index); + } else { + return listedDatabasesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder setListedDatabases( + int index, com.google.spanner.admin.database.v1.Database value) { + if (listedDatabasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabasesIsMutable(); + listedDatabases_.set(index, value); + onChanged(); + } else { + listedDatabasesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder setListedDatabases( + int index, com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + listedDatabases_.set(index, builderForValue.build()); + onChanged(); + } else { + listedDatabasesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder addListedDatabases(com.google.spanner.admin.database.v1.Database value) { + if (listedDatabasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabasesIsMutable(); + listedDatabases_.add(value); + onChanged(); + } else { + listedDatabasesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder addListedDatabases( + int index, com.google.spanner.admin.database.v1.Database value) { + if (listedDatabasesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabasesIsMutable(); + listedDatabases_.add(index, value); + onChanged(); + } else { + listedDatabasesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder addListedDatabases( + com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + listedDatabases_.add(builderForValue.build()); + onChanged(); + } else { + listedDatabasesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder addListedDatabases( + int index, com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + listedDatabases_.add(index, builderForValue.build()); + onChanged(); + } else { + listedDatabasesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder addAllListedDatabases( + java.lang.Iterable values) { + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedDatabases_); + onChanged(); + } else { + listedDatabasesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder clearListedDatabases() { + if (listedDatabasesBuilder_ == null) { + listedDatabases_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + listedDatabasesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public Builder removeListedDatabases(int index) { + if (listedDatabasesBuilder_ == null) { + ensureListedDatabasesIsMutable(); + listedDatabases_.remove(index); + onChanged(); + } else { + listedDatabasesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder getListedDatabasesBuilder( + int index) { + return internalGetListedDatabasesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getListedDatabasesOrBuilder( + int index) { + if (listedDatabasesBuilder_ == null) { + return listedDatabases_.get(index); + } else { + return listedDatabasesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public java.util.List + getListedDatabasesOrBuilderList() { + if (listedDatabasesBuilder_ != null) { + return listedDatabasesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedDatabases_); + } + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder addListedDatabasesBuilder() { + return internalGetListedDatabasesFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.Database.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public com.google.spanner.admin.database.v1.Database.Builder addListedDatabasesBuilder( + int index) { + return internalGetListedDatabasesFieldBuilder() + .addBuilder(index, com.google.spanner.admin.database.v1.Database.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of databases returned by ListCloudDatabasesAction.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + public java.util.List + getListedDatabasesBuilderList() { + return internalGetListedDatabasesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + internalGetListedDatabasesFieldBuilder() { + if (listedDatabasesBuilder_ == null) { + listedDatabasesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder>( + listedDatabases_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + listedDatabases_ = null; + } + return listedDatabasesBuilder_; + } + + private java.util.List listedDatabaseOperations_ = + java.util.Collections.emptyList(); + + private void ensureListedDatabaseOperationsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + listedDatabaseOperations_ = + new java.util.ArrayList(listedDatabaseOperations_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + listedDatabaseOperationsBuilder_; + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public java.util.List getListedDatabaseOperationsList() { + if (listedDatabaseOperationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedDatabaseOperations_); + } else { + return listedDatabaseOperationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public int getListedDatabaseOperationsCount() { + if (listedDatabaseOperationsBuilder_ == null) { + return listedDatabaseOperations_.size(); + } else { + return listedDatabaseOperationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public com.google.longrunning.Operation getListedDatabaseOperations(int index) { + if (listedDatabaseOperationsBuilder_ == null) { + return listedDatabaseOperations_.get(index); + } else { + return listedDatabaseOperationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder setListedDatabaseOperations(int index, com.google.longrunning.Operation value) { + if (listedDatabaseOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.set(index, value); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder setListedDatabaseOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.set(index, builderForValue.build()); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder addListedDatabaseOperations(com.google.longrunning.Operation value) { + if (listedDatabaseOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.add(value); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder addListedDatabaseOperations(int index, com.google.longrunning.Operation value) { + if (listedDatabaseOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.add(index, value); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder addListedDatabaseOperations( + com.google.longrunning.Operation.Builder builderForValue) { + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.add(builderForValue.build()); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder addListedDatabaseOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.add(index, builderForValue.build()); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder addAllListedDatabaseOperations( + java.lang.Iterable values) { + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedDatabaseOperations_); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder clearListedDatabaseOperations() { + if (listedDatabaseOperationsBuilder_ == null) { + listedDatabaseOperations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public Builder removeListedDatabaseOperations(int index) { + if (listedDatabaseOperationsBuilder_ == null) { + ensureListedDatabaseOperationsIsMutable(); + listedDatabaseOperations_.remove(index); + onChanged(); + } else { + listedDatabaseOperationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public com.google.longrunning.Operation.Builder getListedDatabaseOperationsBuilder(int index) { + return internalGetListedDatabaseOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public com.google.longrunning.OperationOrBuilder getListedDatabaseOperationsOrBuilder( + int index) { + if (listedDatabaseOperationsBuilder_ == null) { + return listedDatabaseOperations_.get(index); + } else { + return listedDatabaseOperationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public java.util.List + getListedDatabaseOperationsOrBuilderList() { + if (listedDatabaseOperationsBuilder_ != null) { + return listedDatabaseOperationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedDatabaseOperations_); + } + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public com.google.longrunning.Operation.Builder addListedDatabaseOperationsBuilder() { + return internalGetListedDatabaseOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public com.google.longrunning.Operation.Builder addListedDatabaseOperationsBuilder(int index) { + return internalGetListedDatabaseOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListCloudDatabaseOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + public java.util.List + getListedDatabaseOperationsBuilderList() { + return internalGetListedDatabaseOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetListedDatabaseOperationsFieldBuilder() { + if (listedDatabaseOperationsBuilder_ == null) { + listedDatabaseOperationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + listedDatabaseOperations_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + listedDatabaseOperations_ = null; + } + return listedDatabaseOperationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 3; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.Database database_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + databaseBuilder_; + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return Whether the database field is set. + */ + public boolean hasDatabase() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return The database. + */ + public com.google.spanner.admin.database.v1.Database getDatabase() { + if (databaseBuilder_ == null) { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } else { + return databaseBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public Builder setDatabase(com.google.spanner.admin.database.v1.Database value) { + if (databaseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + } else { + databaseBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public Builder setDatabase( + com.google.spanner.admin.database.v1.Database.Builder builderForValue) { + if (databaseBuilder_ == null) { + database_ = builderForValue.build(); + } else { + databaseBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public Builder mergeDatabase(com.google.spanner.admin.database.v1.Database value) { + if (databaseBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && database_ != null + && database_ != com.google.spanner.admin.database.v1.Database.getDefaultInstance()) { + getDatabaseBuilder().mergeFrom(value); + } else { + database_ = value; + } + } else { + databaseBuilder_.mergeFrom(value); + } + if (database_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public Builder clearDatabase() { + bitField0_ = (bitField0_ & ~0x00000008); + database_ = null; + if (databaseBuilder_ != null) { + databaseBuilder_.dispose(); + databaseBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public com.google.spanner.admin.database.v1.Database.Builder getDatabaseBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetDatabaseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder() { + if (databaseBuilder_ != null) { + return databaseBuilder_.getMessageOrBuilder(); + } else { + return database_ == null + ? com.google.spanner.admin.database.v1.Database.getDefaultInstance() + : database_; + } + } + + /** + * + * + *
    +     * Database returned by GetCloudDatabaseAction
    +     * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder> + internalGetDatabaseFieldBuilder() { + if (databaseBuilder_ == null) { + databaseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.Database, + com.google.spanner.admin.database.v1.Database.Builder, + com.google.spanner.admin.database.v1.DatabaseOrBuilder>( + getDatabase(), getParentForChildren(), isClean()); + database_ = null; + } + return databaseBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CloudDatabaseResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CloudDatabaseResponse) + private static final com.google.spanner.executor.v1.CloudDatabaseResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CloudDatabaseResponse(); + } + + public static com.google.spanner.executor.v1.CloudDatabaseResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudDatabaseResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudDatabaseResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java new file mode 100644 index 000000000000..ae9f5799c982 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java @@ -0,0 +1,205 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CloudDatabaseResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CloudDatabaseResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + java.util.List getListedDatabasesList(); + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + com.google.spanner.admin.database.v1.Database getListedDatabases(int index); + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + int getListedDatabasesCount(); + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + java.util.List + getListedDatabasesOrBuilderList(); + + /** + * + * + *
    +   * List of databases returned by ListCloudDatabasesAction.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.Database listed_databases = 1; + */ + com.google.spanner.admin.database.v1.DatabaseOrBuilder getListedDatabasesOrBuilder(int index); + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + java.util.List getListedDatabaseOperationsList(); + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + com.google.longrunning.Operation getListedDatabaseOperations(int index); + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + int getListedDatabaseOperationsCount(); + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + java.util.List + getListedDatabaseOperationsOrBuilderList(); + + /** + * + * + *
    +   * List of operations returned by ListCloudDatabaseOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_database_operations = 2; + */ + com.google.longrunning.OperationOrBuilder getListedDatabaseOperationsOrBuilder(int index); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 3; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return Whether the database field is set. + */ + boolean hasDatabase(); + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + * + * @return The database. + */ + com.google.spanner.admin.database.v1.Database getDatabase(); + + /** + * + * + *
    +   * Database returned by GetCloudDatabaseAction
    +   * 
    + * + * .google.spanner.admin.database.v1.Database database = 4; + */ + com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabaseOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java new file mode 100644 index 000000000000..85079577198b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java @@ -0,0 +1,1894 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public final class CloudExecutorProto extends com.google.protobuf.GeneratedFile { + private CloudExecutorProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudExecutorProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SpannerAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SpannerAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ReadAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ReadAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_QueryAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_QueryAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_QueryAction_Parameter_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DmlAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DmlAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_BatchDmlAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_Value_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_Value_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_KeyRange_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_KeyRange_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_KeySet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_KeySet_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ValueList_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ValueList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_MutationAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_MutationAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_MutationAction_Mod_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_WriteMutationsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_StartTransactionAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_Concurrency_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_Concurrency_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_TableMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_TableMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ColumnMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_TransactionExecutionOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_FinishTransactionAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_AdminAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_AdminAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_AdaptMessageAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudInstancesAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GetCloudInstanceAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CreateCloudBackupAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CopyCloudBackupAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GetCloudBackupAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudBackupsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GetOperationAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GetOperationAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CancelOperationAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_AddSplitPointsAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_StartBatchTransactionAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_BatchPartition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_BatchPartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ExecutePartitionAction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SpannerActionOutcome_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_AdminResult_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_AdminResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CloudBackupResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_OperationResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_OperationResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CloudInstanceResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_CloudDatabaseResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ReadResult_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ReadResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_QueryResult_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_QueryResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ChangeStreamRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DataChangeRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_HeartbeatRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SpannerOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SpannerOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_executor_v1_SessionPoolOptions_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "/google/spanner/executor/v1/cloud_execu" + + "tor.proto\022\032google.spanner.executor.v1\032\027g" + + "oogle/api/client.proto\032\037google/api/field" + + "_behavior.proto\032#google/longrunning/oper" + + "ations.proto\032\037google/protobuf/timestamp." + + "proto\032\027google/rpc/status.proto\032-google/s" + + "panner/admin/database/v1/backup.proto\032-google/spanner/admin/database/v1/common.p" + + "roto\032=google/spanner/admin/database/v1/spanner_database_admin.proto\032=google/span" + + "ner/admin/instance/v1/spanner_instance_a" + + "dmin.proto\032\037google/spanner/v1/spanner.proto\032\034google/spanner/v1/type.proto\"i\n" + + "\031SpannerAsyncActionRequest\022\021\n" + + "\taction_id\030\001 \001(\005\0229\n" + + "\006action\030\002 \001(\0132).google.spanner.executor.v1.SpannerAction\"r\n" + + "\032SpannerAsyncActionResponse\022\021\n" + + "\taction_id\030\001 \001(\005\022A\n" + + "\007outcome\030\002" + + " \001(\01320.google.spanner.executor.v1.SpannerActionOutcome\"\272\013\n\r" + + "SpannerAction\022\025\n\r" + + "database_path\030\001 \001(\t\022C\n" + + "\017spanner_options\030\002 \001" + + "(\0132*.google.spanner.executor.v1.SpannerOptions\022C\n" + + "\005start\030\n" + + " \001(\01322.google.spanner.executor.v1.StartTransactionActionH\000\022E\n" + + "\006finish\030\013" + + " \001(\01323.google.spanner.executor.v1.FinishTransactionActionH\000\0226\n" + + "\004read\030\024 \001(\0132&.google.spanner.executor.v1.ReadActionH\000\0228\n" + + "\005query\030\025 \001(\0132\'.google.spanner.executor.v1.QueryActionH\000\022>\n" + + "\010mutation\030\026 \001(\0132*.google.spanner.executor.v1.MutationActionH\000\0224\n" + + "\003dml\030\027 \001(\0132%.google.spanner.executor.v1.DmlActionH\000\022?\n" + + "\tbatch_dml\030\030 \001(\0132*.google.spanner.executor.v1.BatchDmlActionH\000\022A\n" + + "\005write\030\031" + + " \001(\01320.google.spanner.executor.v1.WriteMutationsActionH\000\022Q\n" + + "\022partitioned_update\030\033" + + " \001(\01323.google.spanner.executor.v1.PartitionedUpdateActionH\000\0228\n" + + "\005admin\030\036 \001(\0132\'.google.spanner.executor.v1.AdminActionH\000\022R\n" + + "\017start_batch_txn\030( \001(\01327.g" + + "oogle.spanner.executor.v1.StartBatchTransactionActionH\000\022R\n" + + "\017close_batch_txn\030) \001(\013" + + "27.google.spanner.executor.v1.CloseBatchTransactionActionH\000\022d\n" + + "\033generate_db_partitions_read\030* \001(\0132=.google.spanner.execut" + + "or.v1.GenerateDbPartitionsForReadActionH\000\022f\n" + + "\034generate_db_partitions_query\030+ \001(\0132" + + ">.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionH\000\022O\n" + + "\021execute_partition\030," + + " \001(\01322.google.spanner.executor.v1.ExecutePartitionActionH\000\022[\n" + + "\033execute_change_stream_query\0302 \001(\01324.google.spanner" + + ".executor.v1.ExecuteChangeStreamQueryH\000\022Q\n" + + "\022query_cancellation\0303 \001(\01323.google.spa" + + "nner.executor.v1.QueryCancellationActionH\000\022G\n\r" + + "adapt_message\0304" + + " \001(\0132..google.spanner.executor.v1.AdaptMessageActionH\000B\010\n" + + "\006action\"\212\001\n\n" + + "ReadAction\022\r\n" + + "\005table\030\001 \001(\t\022\022\n" + + "\005index\030\002 \001(\tH\000\210\001\001\022\016\n" + + "\006column\030\003 \003(\t\0220\n" + + "\004keys\030\004 \001(\0132\".google.spanner.executor.v1.KeySet\022\r\n" + + "\005limit\030\005 \001(\005B\010\n" + + "\006_index\"\321\001\n" + + "\013QueryAction\022\013\n" + + "\003sql\030\001 \001(\t\022A\n" + + "\006params\030\002 \003(\01321.google.spanner.executor.v1.QueryAction.Parameter\032r\n" + + "\tParameter\022\014\n" + + "\004name\030\001 \001(\t\022%\n" + + "\004type\030\002 \001(\0132\027.google.spanner.v1.Type\0220\n" + + "\005value\030\003 \001(\0132!.google.spanner.executor.v1.Value\"\266\001\n" + + "\tDmlAction\0227\n" + + "\006update\030\001 \001(\0132\'.google.spanner.executor.v1.QueryAction\022$\n" + + "\027autocommit_if_supported\030\002 \001(\010H\000\210\001\001\022\033\n" + + "\016last_statement\030\003 \001(\010H\001\210\001\001B\032\n" + + "\030_autocommit_if_supportedB\021\n" + + "\017_last_statement\"|\n" + + "\016BatchDmlAction\0228\n" + + "\007updates\030\001 \003(\0132\'.google.spanner.executor.v1.QueryAction\022\034\n" + + "\017last_statements\030\002 \001(\010H\000\210\001\001B\022\n" + + "\020_last_statements\"\311\003\n" + + "\005Value\022\021\n" + + "\007is_null\030\001 \001(\010H\000\022\023\n" + + "\tint_value\030\002 \001(\003H\000\022\024\n\n" + + "bool_value\030\003 \001(\010H\000\022\026\n" + + "\014double_value\030\004 \001(\001H\000\022\025\n" + + "\013bytes_value\030\005 \001(\014H\000\022\026\n" + + "\014string_value\030\006 \001(\tH\000\022=\n" + + "\014struct_value\030\007 \001(\0132%.google.spanner.executor.v1.ValueListH\000\0225\n" + + "\017timestamp_value\030\010 \001(\0132\032.google.protobuf.TimestampH\000\022\031\n" + + "\017date_days_value\030\t \001(\005H\000\022\035\n" + + "\023is_commit_timestamp\030\n" + + " \001(\010H\000\022<\n" + + "\013array_value\030\013 \001(\0132%.google.spanner.executor.v1.ValueListH\000\0220\n\n" + + "array_type\030\014 \001(\0132\027.google.spanner.v1.TypeH\001\210\001\001B\014\n\n" + + "value_typeB\r\n" + + "\013_array_type\"\237\002\n" + + "\010KeyRange\0224\n" + + "\005start\030\001 \001(\0132%.google.spanner.executor.v1.ValueList\0224\n" + + "\005limit\030\002 \001(\0132%.google.spanner.executor.v1.ValueList\022<\n" + + "\004type\030\003" + + " \001(\0162).google.spanner.executor.v1.KeyRange.TypeH\000\210\001\001\"`\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\021\n\r" + + "CLOSED_CLOSED\020\001\022\017\n" + + "\013CLOSED_OPEN\020\002\022\017\n" + + "\013OPEN_CLOSED\020\003\022\r\n" + + "\tOPEN_OPEN\020\004B\007\n" + + "\005_type\"\200\001\n" + + "\006KeySet\0224\n" + + "\005point\030\001 \003(\0132%.google.spanner.executor.v1.ValueList\0223\n" + + "\005range\030\002 \003(\0132$.google.spanner.executor.v1.KeyRange\022\013\n" + + "\003all\030\003 \001(\010\"=\n" + + "\tValueList\0220\n" + + "\005value\030\001 \003(\0132!.google.spanner.executor.v1.Value\"\274\005\n" + + "\016MutationAction\022;\n" + + "\003mod\030\001 \003(\0132..google.spanner.executor.v1.MutationAction.Mod\032z\n\n" + + "InsertArgs\022\016\n" + + "\006column\030\001 \003(\t\022%\n" + + "\004type\030\002 \003(\0132\027.google.spanner.v1.Type\0225\n" + + "\006values\030\003 \003(\0132%.google.spanner.executor.v1.ValueList\032z\n\n" + + "UpdateArgs\022\016\n" + + "\006column\030\001 \003(\t\022%\n" + + "\004type\030\002 \003(\0132\027.google.spanner.v1.Type\0225\n" + + "\006values\030\003 \003(\0132%.google.spanner.executor.v1.ValueList\032\364\002\n" + + "\003Mod\022\r\n" + + "\005table\030\001 \001(\t\022E\n" + + "\006insert\030\002" + + " \001(\01325.google.spanner.executor.v1.MutationAction.InsertArgs\022E\n" + + "\006update\030\003" + + " \001(\01325.google.spanner.executor.v1.MutationAction.UpdateArgs\022O\n" + + "\020insert_or_update\030\004" + + " \001(\01325.google.spanner.executor.v1.MutationAction.InsertArgs\022F\n" + + "\007replace\030\005" + + " \001(\01325.google.spanner.executor.v1.MutationAction.InsertArgs\0227\n" + + "\013delete_keys\030\006 \001(\0132\".google.spanner.executor.v1.KeySet\"T\n" + + "\024WriteMutationsAction\022<\n" + + "\010mutation\030\001 \001(\0132*.google.spanner.executor.v1.MutationAction\"\337\002\n" + + "\027PartitionedUpdateAction\022i\n" + + "\007options\030\001 \001(\0132S.google.spanner.executor.v1.Par" + + "titionedUpdateAction.ExecutePartitionedUpdateOptionsH\000\210\001\001\0227\n" + + "\006update\030\002 \001(\0132\'.google.spanner.executor.v1.QueryAction\032\223\001\n" + + "\037ExecutePartitionedUpdateOptions\022E\n" + + "\014rpc_priority\030\001" + + " \001(\0162*.google.spanner.v1.RequestOptions.PriorityH\000\210\001\001\022\020\n" + + "\003tag\030\002 \001(\tH\001\210\001\001B\017\n\r" + + "_rpc_priorityB\006\n" + + "\004_tagB\n\n" + + "\010_options\"\256\002\n" + + "\026StartTransactionAction\022A\n" + + "\013concurrency\030\001" + + " \001(\0132\'.google.spanner.executor.v1.ConcurrencyH\000\210\001\001\0228\n" + + "\005table\030\002 \003(\0132).google.spanner.executor.v1.TableMetadata\022\030\n" + + "\020transaction_seed\030\003 \001(\t\022W\n" + + "\021execution_options\030\004 \001(" + + "\01327.google.spanner.executor.v1.TransactionExecutionOptionsH\001\210\001\001B\016\n" + + "\014_concurrencyB\024\n" + + "\022_execution_options\"\256\002\n" + + "\013Concurrency\022\033\n" + + "\021staleness_seconds\030\001 \001(\001H\000\022#\n" + + "\031min_read_timestamp_micros\030\002 \001(\003H\000\022\037\n" + + "\025max_staleness_seconds\030\003 \001(\001H\000\022 \n" + + "\026exact_timestamp_micros\030\004 \001(\003H\000\022\020\n" + + "\006strong\030\005 \001(\010H\000\022\017\n" + + "\005batch\030\006 \001(\010H\000\022\033\n" + + "\023snapshot_epoch_read\030\007 \001(\010\022!\n" + + "\031snapshot_epoch_root_table\030\010 \001(\t\022#\n" + + "\033batch_read_timestamp_micros\030\t \001(\003B\022\n" + + "\020concurrency_mode\"\231\001\n\r" + + "TableMetadata\022\014\n" + + "\004name\030\001 \001(\t\022:\n" + + "\006column\030\002 \003(\0132*.google.spanner.executor.v1.ColumnMetadata\022>\n\n" + + "key_column\030\003 \003(\0132*.google.spanner.executor.v1.ColumnMetadata\"E\n" + + "\016ColumnMetadata\022\014\n" + + "\004name\030\001 \001(\t\022%\n" + + "\004type\030\002 \001(\0132\027.google.spanner.v1.Type\"\357\001\n" + + "\033TransactionExecutionOptions\022\022\n\n" + + "optimistic\030\001 \001(\010\022#\n" + + "\033exclude_from_change_streams\030\002 \001(\010\022\037\n" + + "\027serializable_optimistic\030\003 \001(\010\022%\n" + + "\035snapshot_isolation_optimistic\030\004 \001(\010\022&\n" + + "\036snapshot_isolation_pessimistic\030\005 \001(\010\022\'\n" + + "\037exclude_txn_from_change_streams\030\006 \001(\010\"\230\001\n" + + "\027FinishTransactionAction\022F\n" + + "\004mode\030\001 \001(\01628." + + "google.spanner.executor.v1.FinishTransactionAction.Mode\"5\n" + + "\004Mode\022\024\n" + + "\020MODE_UNSPECIFIED\020\000\022\n\n" + + "\006COMMIT\020\001\022\013\n" + + "\007ABANDON\020\002\"\226\024\n" + + "\013AdminAction\022a\n" + + "\033create_user_instance_config\030\001 " + + "\001(\0132:.google.spanner.executor.v1.CreateUserInstanceConfigActionH\000\022a\n" + + "\033update_user_instance_config\030\002 \001(\0132:.google.spanner." + + "executor.v1.UpdateUserInstanceConfigActionH\000\022a\n" + + "\033delete_user_instance_config\030\003 \001(" + + "\0132:.google.spanner.executor.v1.DeleteUserInstanceConfigActionH\000\022]\n" + + "\031get_cloud_instance_config\030\004 \001(\01328.google.spanner.exec" + + "utor.v1.GetCloudInstanceConfigActionH\000\022[\n" + + "\025list_instance_configs\030\005 \001(\0132:.google.s" + + "panner.executor.v1.ListCloudInstanceConfigsActionH\000\022V\n" + + "\025create_cloud_instance\030\006 \001" + + "(\01325.google.spanner.executor.v1.CreateCloudInstanceActionH\000\022V\n" + + "\025update_cloud_instance\030\007" + + " \001(\01325.google.spanner.executor.v1.UpdateCloudInstanceActionH\000\022V\n" + + "\025delete_cloud_instance\030\010" + + " \001(\01325.google.spanner.executor.v1.DeleteCloudInstanceActionH\000\022T\n" + + "\024list_cloud_instances\030\t \001(\01324.google.spann" + + "er.executor.v1.ListCloudInstancesActionH\000\022P\n" + + "\022get_cloud_instance\030\n" + + " \001(\01322.google.spanner.executor.v1.GetCloudInstanceActionH\000\022V\n" + + "\025create_cloud_database\030\013 \001(\01325.goo" + + "gle.spanner.executor.v1.CreateCloudDatabaseActionH\000\022]\n" + + "\031update_cloud_database_ddl\030\014" + + " \001(\01328.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionH\000\022V\n" + + "\025update_cloud_database\030\033" + + " \001(\01325.google.spanner.executor.v1.UpdateCloudDatabaseActionH\000\022R\n" + + "\023drop_cloud_database\030\r" + + " \001(\01323.google.spanner.executor.v1.DropCloudDatabaseActionH\000\022T\n" + + "\024list_cloud_databases\030\016 \001(\01324.google.sp" + + "anner.executor.v1.ListCloudDatabasesActionH\000\022g\n" + + "\036list_cloud_database_operations\030\017" + + " \001(\0132=.google.spanner.executor.v1.ListCloudDatabaseOperationsActionH\000\022X\n" + + "\026restore_cloud_database\030\020 \001(\01326.google.spanner.e" + + "xecutor.v1.RestoreCloudDatabaseActionH\000\022P\n" + + "\022get_cloud_database\030\021 \001(\01322.google.spa" + + "nner.executor.v1.GetCloudDatabaseActionH\000\022R\n" + + "\023create_cloud_backup\030\022 \001(\01323.google." + + "spanner.executor.v1.CreateCloudBackupActionH\000\022N\n" + + "\021copy_cloud_backup\030\023 \001(\01321.googl" + + "e.spanner.executor.v1.CopyCloudBackupActionH\000\022L\n" + + "\020get_cloud_backup\030\024 \001(\01320.google" + + ".spanner.executor.v1.GetCloudBackupActionH\000\022R\n" + + "\023update_cloud_backup\030\025 \001(\01323.googl" + + "e.spanner.executor.v1.UpdateCloudBackupActionH\000\022R\n" + + "\023delete_cloud_backup\030\026 \001(\01323.g" + + "oogle.spanner.executor.v1.DeleteCloudBackupActionH\000\022P\n" + + "\022list_cloud_backups\030\027 \001(\0132" + + "2.google.spanner.executor.v1.ListCloudBackupsActionH\000\022c\n" + + "\034list_cloud_backup_operations\030\030" + + " \001(\0132;.google.spanner.executor.v1.ListCloudBackupOperationsActionH\000\022G\n\r" + + "get_operation\030\031" + + " \001(\0132..google.spanner.executor.v1.GetOperationActionH\000\022M\n" + + "\020cancel_operation\030\032" + + " \001(\01321.google.spanner.executor.v1.CancelOperationActionH\000\022c\n" + + "\034change_quorum_cloud_database\030\034 \001(\0132;.google.spanne" + + "r.executor.v1.ChangeQuorumCloudDatabaseActionH\000\022L\n" + + "\020add_split_points\030\035 \001(\01320.goog" + + "le.spanner.executor.v1.AddSplitPointsActionH\000B\010\n" + + "\006action\"\245\001\n" + + "\036CreateUserInstanceConfigAction\022\026\n" + + "\016user_config_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\026\n" + + "\016base_config_id\030\003 \001(\t\022?\n" + + "\010replicas\030\004" + + " \003(\0132-.google.spanner.admin.instance.v1.ReplicaInfo\"\377\001\n" + + "\036UpdateUserInstanceConfigAction\022\026\n" + + "\016user_config_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\031\n" + + "\014display_name\030\003 \001(\tH\000\210\001\001\022V\n" + + "\006labels\030\004 \003(\0132F.google.spann" + + "er.executor.v1.UpdateUserInstanceConfigAction.LabelsEntry\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\017\n\r" + + "_display_name\"N\n" + + "\034GetCloudInstanceConfigAction\022\032\n" + + "\022instance_config_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\"L\n" + + "\036DeleteUserInstanceConfigAction\022\026\n" + + "\016user_config_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\"\202\001\n" + + "\036ListCloudInstanceConfigsAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\026\n" + + "\tpage_size\030\002 \001(\005H\000\210\001\001\022\027\n\n" + + "page_token\030\003 \001(\tH\001\210\001\001B\014\n\n" + + "_page_sizeB\r\n" + + "\013_page_token\"\360\003\n" + + "\031CreateCloudInstanceAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\032\n" + + "\022instance_config_id\030\003 \001(\t\022\027\n\n" + + "node_count\030\004 \001(\005H\000\210\001\001\022\035\n" + + "\020processing_units\030\006 \001(\005H\001\210\001\001\022T\n" + + "\022autoscaling_config\030\007 \001(\01323.go" + + "ogle.spanner.admin.instance.v1.AutoscalingConfigH\002\210\001\001\022Q\n" + + "\006labels\030\005 \003(\0132A.google.s" + + "panner.executor.v1.CreateCloudInstanceAction.LabelsEntry\022C\n" + + "\007edition\030\010 \001(\01622.goog" + + "le.spanner.admin.instance.v1.Instance.Edition\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\r\n" + + "\013_node_countB\023\n" + + "\021_processing_unitsB\025\n" + + "\023_autoscaling_config\"\200\004\n" + + "\031UpdateCloudInstanceAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\031\n" + + "\014display_name\030\003 \001(\tH\000\210\001\001\022\027\n\n" + + "node_count\030\004 \001(\005H\001\210\001\001\022\035\n" + + "\020processing_units\030\005 \001(\005H\002\210\001\001\022T\n" + + "\022autoscaling_config\030\007" + + " \001(\01323.google.spanner.admin.instance.v1.AutoscalingConfigH\003\210\001\001\022Q\n" + + "\006labels\030\006" + + " \003(\0132A.google.spanner.executor.v1.UpdateCloudInstanceAction.LabelsEntry\022C\n" + + "\007edition\030\010" + + " \001(\01622.google.spanner.admin.instance.v1.Instance.Edition\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\017\n\r" + + "_display_nameB\r\n" + + "\013_node_countB\023\n" + + "\021_processing_unitsB\025\n" + + "\023_autoscaling_config\"D\n" + + "\031DeleteCloudInstanceAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\"\227\002\n" + + "\031CreateCloudDatabaseAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\023\n" + + "\013database_id\030\003 \001(\t\022\025\n\r" + + "sdl_statement\030\004 \003(\t\022M\n" + + "\021encryption_config\030\005 \001(\01322" + + ".google.spanner.admin.database.v1.EncryptionConfig\022\024\n" + + "\007dialect\030\006 \001(\tH\000\210\001\001\022\036\n" + + "\021proto_descriptors\030\007 \001(\014H\001\210\001\001B\n\n" + + "\010_dialectB\024\n" + + "\022_proto_descriptors\"\277\001\n" + + "\034UpdateCloudDatabaseDdlAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\023\n" + + "\013database_id\030\003 \001(\t\022\025\n\r" + + "sdl_statement\030\004 \003(\t\022\024\n" + + "\014operation_id\030\005 \001(\t\022\036\n" + + "\021proto_descriptors\030\006 \001(\014H\000\210\001\001B\024\n" + + "\022_proto_descriptors\"{\n" + + "\031UpdateCloudDatabaseAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\025\n\r" + + "database_name\030\003 \001(\t\022\036\n" + + "\026enable_drop_protection\030\004 \001(\010\"W\n" + + "\027DropCloudDatabaseAction\022\023\n" + + "\013instance_id\030\001 \001(\t\022\022\n\n" + + "project_id\030\002 \001(\t\022\023\n" + + "\013database_id\030\003 \001(\t\"h\n" + + "\037ChangeQuorumCloudDatabaseAction\022\031\n" + + "\014database_uri\030\001 \001(\tH\000\210\001\001\022\031\n" + + "\021serving_locations\030\002 \003(\tB\017\n\r" + + "_database_uri\"\204\002\n" + + "\022AdaptMessageAction\022\024\n" + + "\014database_uri\030\001 \001(\t\022\020\n" + + "\010protocol\030\002 \001(\t\022\017\n" + + "\007payload\030\003 \001(\014\022T\n" + + "\013attachments\030\004 \003(\0132?.goog" + + "le.spanner.executor.v1.AdaptMessageAction.AttachmentsEntry\022\r\n" + + "\005query\030\005 \001(\t\022\034\n" + + "\024prepare_then_execute\030\006 \001(\010\0322\n" + + "\020AttachmentsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001\"j\n" + + "\030ListCloudDatabasesAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tpage_size\030\003 \001(\005\022\022\n\n" + + "page_token\030\004 \001(\t\"\234\001\n" + + "\030ListCloudInstancesAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\006filter\030\002 \001(\tH\000\210\001\001\022\026\n" + + "\tpage_size\030\003 \001(\005H\001\210\001\001\022\027\n" + + "\n" + + "page_token\030\004 \001(\tH\002\210\001\001B\t\n" + + "\007_filterB\014\n\n" + + "_page_sizeB\r\n" + + "\013_page_token\"A\n" + + "\026GetCloudInstanceAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\"\203\001\n" + + "!ListCloudDatabaseOperationsAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\016\n" + + "\006filter\030\003 \001(\t\022\021\n" + + "\tpage_size\030\004 \001(\005\022\022\n\n" + + "page_token\030\005 \001(\t\"\341\001\n" + + "\032RestoreCloudDatabaseAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\032\n" + + "\022backup_instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\022\034\n" + + "\024database_instance_id\030\004 \001(\t\022\023\n" + + "\013database_id\030\005 \001(\t\022M\n" + + "\021encryption_config\030\007 \001(\0132" + + "2.google.spanner.admin.database.v1.EncryptionConfig\"V\n" + + "\026GetCloudDatabaseAction\022\022\n" + + "\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\023\n" + + "\013database_id\030\003 \001(\t\"\267\002\n" + + "\027CreateCloudBackupAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\022\023\n" + + "\013database_id\030\004 \001(\t\0224\n" + + "\013expire_time\030\005 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0225\n" + + "\014version_time\030\006" + + " \001(\0132\032.google.protobuf.TimestampH\000\210\001\001\022M\n" + + "\021encryption_config\030\007" + + " \001(\01322.google.spanner.admin.database.v1.EncryptionConfigB\017\n\r" + + "_version_time\"\240\001\n" + + "\025CopyCloudBackupAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\022\025\n\r" + + "source_backup\030\004 \001(\t\0224\n" + + "\013expire_time\030\005 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\"R\n" + + "\024GetCloudBackupAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\"\213\001\n" + + "\027UpdateCloudBackupAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\0224\n" + + "\013expire_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\"U\n" + + "\027DeleteCloudBackupAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\021\n" + + "\tbackup_id\030\003 \001(\t\"x\n" + + "\026ListCloudBackupsAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\016\n" + + "\006filter\030\003 \001(\t\022\021\n" + + "\tpage_size\030\004 \001(\005\022\022\n\n" + + "page_token\030\005 \001(\t\"\201\001\n" + + "\037ListCloudBackupOperationsAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\016\n" + + "\006filter\030\003 \001(\t\022\021\n" + + "\tpage_size\030\004 \001(\005\022\022\n\n" + + "page_token\030\005 \001(\t\"\'\n" + + "\022GetOperationAction\022\021\n" + + "\toperation\030\001 \001(\t\"I\n" + + "\027QueryCancellationAction\022\030\n" + + "\020long_running_sql\030\001 \001(\t\022\024\n" + + "\014cancel_query\030\002 \001(\t\"*\n" + + "\025CancelOperationAction\022\021\n" + + "\toperation\030\001 \001(\t\"\231\001\n" + + "\024AddSplitPointsAction\022\022\n\n" + + "project_id\030\001 \001(\t\022\023\n" + + "\013instance_id\030\002 \001(\t\022\023\n" + + "\013database_id\030\003 \001(\t\022C\n" + + "\014split_points\030\004" + + " \003(\0132-.google.spanner.admin.database.v1.SplitPoints\"\210\001\n" + + "\033StartBatchTransactionAction\0224\n" + + "\016batch_txn_time\030\001 \001(\0132\032.google.protobuf.TimestampH\000\022\r\n" + + "\003tid\030\002 \001(\014H\000\022\033\n" + + "\023cloud_database_role\030\003 \001(\tB\007\n" + + "\005param\".\n" + + "\033CloseBatchTransactionAction\022\017\n" + + "\007cleanup\030\001 \001(\010\"\227\002\n" + + "!GenerateDbPartitionsForReadAction\0224\n" + + "\004read\030\001 \001(\0132&.google.spanner.executor.v1.ReadAction\0228\n" + + "\005table\030\002 \003(\0132).google.spanner.executor.v1.TableMetadata\022(\n" + + "\033desired_bytes_per_partition\030\003 \001(\003H\000\210\001\001\022 \n" + + "\023max_partition_count\030\004 \001(\003H\001\210\001\001B\036\n" + + "\034_desired_bytes_per_partitionB\026\n" + + "\024_max_partition_count\"\246\001\n" + + "\"GenerateDbPartitionsForQueryAction\0226\n" + + "\005query\030\001 \001(\0132\'.google.spanner.executor.v1.QueryAction\022(\n" + + "\033desired_bytes_per_partition\030\002 \001(\003H\000\210\001\001B\036\n" + + "\034_desired_bytes_per_partition\"x\n" + + "\016BatchPartition\022\021\n" + + "\tpartition\030\001 \001(\014\022\027\n" + + "\017partition_token\030\002 \001(\014\022\022\n" + + "\005table\030\003 \001(\tH\000\210\001\001\022\022\n" + + "\005index\030\004 \001(\tH\001\210\001\001B\010\n" + + "\006_tableB\010\n" + + "\006_index\"W\n" + + "\026ExecutePartitionAction\022=\n" + + "\tpartition\030\001 \001(\0132*.google.spanner.executor.v1.BatchPartition\"\216\003\n" + + "\030ExecuteChangeStreamQuery\022\014\n" + + "\004name\030\001 \001(\t\022.\n\n" + + "start_time\030\002 \001(\0132\032.google.protobuf.Timestamp\0221\n" + + "\010end_time\030\003" + + " \001(\0132\032.google.protobuf.TimestampH\000\210\001\001\022\034\n" + + "\017partition_token\030\004 \001(\tH\001\210\001\001\022\024\n" + + "\014read_options\030\005 \003(\t\022#\n" + + "\026heartbeat_milliseconds\030\006 \001(\005H\002\210\001\001\022\035\n" + + "\020deadline_seconds\030\007 \001(\003H\003\210\001\001\022 \n" + + "\023cloud_database_role\030\010 \001(\tH\004\210\001\001B\013\n" + + "\t_end_timeB\022\n" + + "\020_partition_tokenB\031\n" + + "\027_heartbeat_millisecondsB\023\n" + + "\021_deadline_secondsB\026\n" + + "\024_cloud_database_role\"\200\006\n" + + "\024SpannerActionOutcome\022\'\n" + + "\006status\030\001 \001(\0132\022.google.rpc.StatusH\000\210\001\001\0224\n" + + "\013commit_time\030\002" + + " \001(\0132\032.google.protobuf.TimestampH\001\210\001\001\022@\n" + + "\013read_result\030\003" + + " \001(\0132&.google.spanner.executor.v1.ReadResultH\002\210\001\001\022B\n" + + "\014query_result\030\004 \001(\0132\'." + + "google.spanner.executor.v1.QueryResultH\003\210\001\001\022\"\n" + + "\025transaction_restarted\030\005 \001(\010H\004\210\001\001\022\031\n" + + "\014batch_txn_id\030\006 \001(\014H\005\210\001\001\022@\n" + + "\014db_partition\030\007 \003(\0132*.google.spanner.executor.v1.BatchPartition\022B\n" + + "\014admin_result\030\010 \001(\0132\'.goo" + + "gle.spanner.executor.v1.AdminResultH\006\210\001\001\022\031\n" + + "\021dml_rows_modified\030\t \003(\003\022M\n" + + "\025change_stream_records\030\n" + + " \003(\0132..google.spanner.executor.v1.ChangeStreamRecord\0222\n" + + "%snapshot_isolation_txn_read_timestamp\030\013 \001(\003H\007\210\001\001B\t\n" + + "\007_statusB\016\n" + + "\014_commit_timeB\016\n" + + "\014_read_resultB\017\n\r" + + "_query_resultB\030\n" + + "\026_transaction_restartedB\017\n\r" + + "_batch_txn_idB\017\n\r" + + "_admin_resultB(\n" + + "&_snapshot_isolation_txn_read_timestamp\"\231\003\n" + + "\013AdminResult\022H\n" + + "\017backup_response\030\001 \001(" + + "\0132/.google.spanner.executor.v1.CloudBackupResponse\022I\n" + + "\022operation_response\030\002 \001(\0132-" + + ".google.spanner.executor.v1.OperationResponse\022L\n" + + "\021database_response\030\003 \001(\01321.googl", + "e.spanner.executor.v1.CloudDatabaseResponse\022L\n" + + "\021instance_response\030\004 \001(\01321.google." + + "spanner.executor.v1.CloudInstanceResponse\022Y\n" + + "\030instance_config_response\030\005 \001(\01327.go" + + "ogle.spanner.executor.v1.CloudInstanceConfigResponse\"\353\001\n" + + "\023CloudBackupResponse\022@\n" + + "\016listed_backups\030\001 \003(\0132(.google.spanner.admin.database.v1.Backup\022?\n" + + "\030listed_backup_operations\030\002" + + " \003(\0132\035.google.longrunning.Operation\022\027\n" + + "\017next_page_token\030\003 \001(\t\0228\n" + + "\006backup\030\004 \001(\0132(.google.spanner.admin.database.v1.Backup\"\230\001\n" + + "\021OperationResponse\0228\n" + + "\021listed_operations\030\001 \003(\0132\035.google.longrunning.Operation\022\027\n" + + "\017next_page_token\030\002 \001(\t\0220\n" + + "\toperation\030\003 \001(\0132\035.google.longrunning.Operation\"\264\001\n" + + "\025CloudInstanceResponse\022D\n" + + "\020listed_instances\030\001" + + " \003(\0132*.google.spanner.admin.instance.v1.Instance\022\027\n" + + "\017next_page_token\030\002 \001(\t\022<\n" + + "\010instance\030\003 \001(\0132*.google.spanner.admin.instance.v1.Instance\"\324\001\n" + + "\033CloudInstanceConfigResponse\022Q\n" + + "\027listed_instance_configs\030\001" + + " \003(\01320.google.spanner.admin.instance.v1.InstanceConfig\022\027\n" + + "\017next_page_token\030\002 \001(\t\022I\n" + + "\017instance_config\030\003 \001(\01320.goog" + + "le.spanner.admin.instance.v1.InstanceConfig\"\367\001\n" + + "\025CloudDatabaseResponse\022D\n" + + "\020listed_databases\030\001" + + " \003(\0132*.google.spanner.admin.database.v1.Database\022A\n" + + "\032listed_database_operations\030\002" + + " \003(\0132\035.google.longrunning.Operation\022\027\n" + + "\017next_page_token\030\003 \001(\t\022<\n" + + "\010database\030\004" + + " \001(\0132*.google.spanner.admin.database.v1.Database\"\336\001\n\n" + + "ReadResult\022\r\n" + + "\005table\030\001 \001(\t\022\022\n" + + "\005index\030\002 \001(\tH\000\210\001\001\022\032\n\r" + + "request_index\030\003 \001(\005H\001\210\001\001\0222\n" + + "\003row\030\004 \003(\0132%.google.spanner.executor.v1.ValueList\0224\n" + + "\010row_type\030\005" + + " \001(\0132\035.google.spanner.v1.StructTypeH\002\210\001\001B\010\n" + + "\006_indexB\020\n" + + "\016_request_indexB\013\n" + + "\t_row_type\"\204\001\n" + + "\013QueryResult\0222\n" + + "\003row\030\001 \003(\0132%.google.spanner.executor.v1.ValueList\0224\n" + + "\010row_type\030\002" + + " \001(\0132\035.google.spanner.v1.StructTypeH\000\210\001\001B\013\n" + + "\t_row_type\"\363\001\n" + + "\022ChangeStreamRecord\022C\n" + + "\013data_change\030\001" + + " \001(\0132,.google.spanner.executor.v1.DataChangeRecordH\000\022L\n" + + "\017child_partition\030\002" + + " \001(\01321.google.spanner.executor.v1.ChildPartitionsRecordH\000\022@\n" + + "\theartbeat\030\003 \001(\0132+.google.spanner.executor.v1.HeartbeatRecordH\000B\010\n" + + "\006record\"\330\004\n" + + "\020DataChangeRecord\022/\n" + + "\013commit_time\030\001 \001(\0132\032.google.protobuf.Timestamp\022\027\n" + + "\017record_sequence\030\002 \001(\t\022\026\n" + + "\016transaction_id\030\003 \001(\t\022\026\n" + + "\016is_last_record\030\004 \001(\010\022\r\n" + + "\005table\030\005 \001(\t\022M\n" + + "\014column_types\030\006 \003(\0132" + + "7.google.spanner.executor.v1.DataChangeRecord.ColumnType\022>\n" + + "\004mods\030\007 \003(\01320.google.spanner.executor.v1.DataChangeRecord.Mod\022\020\n" + + "\010mod_type\030\010 \001(\t\022\032\n" + + "\022value_capture_type\030\t \001(\t\022\024\n" + + "\014record_count\030\n" + + " \001(\003\022\027\n" + + "\017partition_count\030\013 \001(\003\022\027\n" + + "\017transaction_tag\030\014 \001(\t\022\035\n" + + "\025is_system_transaction\030\r" + + " \001(\010\032Z\n\n" + + "ColumnType\022\014\n" + + "\004name\030\001 \001(\t\022\014\n" + + "\004type\030\002 \001(\t\022\026\n" + + "\016is_primary_key\030\003 \001(\010\022\030\n" + + "\020ordinal_position\030\004 \001(\003\032;\n" + + "\003Mod\022\014\n" + + "\004keys\030\001 \001(\t\022\022\n\n" + + "new_values\030\002 \001(\t\022\022\n\n" + + "old_values\030\003 \001(\t\"\376\001\n" + + "\025ChildPartitionsRecord\022.\n\n" + + "start_time\030\001 \001(\0132\032.google.protobuf.Timestamp\022\027\n" + + "\017record_sequence\030\002 \001(\t\022Z\n" + + "\020child_partitions\030\003 \003(\0132@.google.spa" + + "nner.executor.v1.ChildPartitionsRecord.ChildPartition\032@\n" + + "\016ChildPartition\022\r\n" + + "\005token\030\001 \001(\t\022\037\n" + + "\027parent_partition_tokens\030\002 \003(\t\"E\n" + + "\017HeartbeatRecord\0222\n" + + "\016heartbeat_time\030\001 \001(\0132\032.google.protobuf.Timestamp\"^\n" + + "\016SpannerOptions\022L\n" + + "\024session_pool_options\030\001 \001(\0132." + + ".google.spanner.executor.v1.SessionPoolOptions\"-\n" + + "\022SessionPoolOptions\022\027\n" + + "\017use_multiplexed\030\001 \001(\0102\314\001\n" + + "\024SpannerExecutorProxy\022\211\001\n" + + "\022ExecuteActionAsync\0225.google.spanner.e" + + "xecutor.v1.SpannerAsyncActionRequest\0326.google.spanner.executor.v1.SpannerAsyncAc" + + "tionResponse\"\000(\0010\001\032(\312A%spanner-cloud-executor.googleapis.comBx\n" + + "\036com.google.spanner.executor.v1B\022CloudExecutorProtoP\001Z@cl" + + "oud.google.com/go/spanner/executor/apiv1/executorpb;executorpbb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(), + com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto.getDescriptor(), + com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto.getDescriptor(), + com.google.spanner.v1.SpannerProto.getDescriptor(), + com.google.spanner.v1.TypeProto.getDescriptor(), + }); + internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor, + new java.lang.String[] { + "ActionId", "Action", + }); + internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor, + new java.lang.String[] { + "ActionId", "Outcome", + }); + internal_static_google_spanner_executor_v1_SpannerAction_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_executor_v1_SpannerAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SpannerAction_descriptor, + new java.lang.String[] { + "DatabasePath", + "SpannerOptions", + "Start", + "Finish", + "Read", + "Query", + "Mutation", + "Dml", + "BatchDml", + "Write", + "PartitionedUpdate", + "Admin", + "StartBatchTxn", + "CloseBatchTxn", + "GenerateDbPartitionsRead", + "GenerateDbPartitionsQuery", + "ExecutePartition", + "ExecuteChangeStreamQuery", + "QueryCancellation", + "AdaptMessage", + "Action", + }); + internal_static_google_spanner_executor_v1_ReadAction_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_executor_v1_ReadAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ReadAction_descriptor, + new java.lang.String[] { + "Table", "Index", "Column", "Keys", "Limit", + }); + internal_static_google_spanner_executor_v1_QueryAction_descriptor = + getDescriptor().getMessageType(4); + internal_static_google_spanner_executor_v1_QueryAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_QueryAction_descriptor, + new java.lang.String[] { + "Sql", "Params", + }); + internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor = + internal_static_google_spanner_executor_v1_QueryAction_descriptor.getNestedType(0); + internal_static_google_spanner_executor_v1_QueryAction_Parameter_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor, + new java.lang.String[] { + "Name", "Type", "Value", + }); + internal_static_google_spanner_executor_v1_DmlAction_descriptor = + getDescriptor().getMessageType(5); + internal_static_google_spanner_executor_v1_DmlAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DmlAction_descriptor, + new java.lang.String[] { + "Update", "AutocommitIfSupported", "LastStatement", + }); + internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor = + getDescriptor().getMessageType(6); + internal_static_google_spanner_executor_v1_BatchDmlAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_BatchDmlAction_descriptor, + new java.lang.String[] { + "Updates", "LastStatements", + }); + internal_static_google_spanner_executor_v1_Value_descriptor = getDescriptor().getMessageType(7); + internal_static_google_spanner_executor_v1_Value_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_Value_descriptor, + new java.lang.String[] { + "IsNull", + "IntValue", + "BoolValue", + "DoubleValue", + "BytesValue", + "StringValue", + "StructValue", + "TimestampValue", + "DateDaysValue", + "IsCommitTimestamp", + "ArrayValue", + "ArrayType", + "ValueType", + }); + internal_static_google_spanner_executor_v1_KeyRange_descriptor = + getDescriptor().getMessageType(8); + internal_static_google_spanner_executor_v1_KeyRange_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_KeyRange_descriptor, + new java.lang.String[] { + "Start", "Limit", "Type", + }); + internal_static_google_spanner_executor_v1_KeySet_descriptor = + getDescriptor().getMessageType(9); + internal_static_google_spanner_executor_v1_KeySet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_KeySet_descriptor, + new java.lang.String[] { + "Point", "Range", "All", + }); + internal_static_google_spanner_executor_v1_ValueList_descriptor = + getDescriptor().getMessageType(10); + internal_static_google_spanner_executor_v1_ValueList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ValueList_descriptor, + new java.lang.String[] { + "Value", + }); + internal_static_google_spanner_executor_v1_MutationAction_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_spanner_executor_v1_MutationAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_MutationAction_descriptor, + new java.lang.String[] { + "Mod", + }); + internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor = + internal_static_google_spanner_executor_v1_MutationAction_descriptor.getNestedType(0); + internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor, + new java.lang.String[] { + "Column", "Type", "Values", + }); + internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor = + internal_static_google_spanner_executor_v1_MutationAction_descriptor.getNestedType(1); + internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor, + new java.lang.String[] { + "Column", "Type", "Values", + }); + internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor = + internal_static_google_spanner_executor_v1_MutationAction_descriptor.getNestedType(2); + internal_static_google_spanner_executor_v1_MutationAction_Mod_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor, + new java.lang.String[] { + "Table", "Insert", "Update", "InsertOrUpdate", "Replace", "DeleteKeys", + }); + internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor = + getDescriptor().getMessageType(12); + internal_static_google_spanner_executor_v1_WriteMutationsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor, + new java.lang.String[] { + "Mutation", + }); + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor = + getDescriptor().getMessageType(13); + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor, + new java.lang.String[] { + "Options", "Update", + }); + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor = + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor.getNestedType( + 0); + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor, + new java.lang.String[] { + "RpcPriority", "Tag", + }); + internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor = + getDescriptor().getMessageType(14); + internal_static_google_spanner_executor_v1_StartTransactionAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor, + new java.lang.String[] { + "Concurrency", "Table", "TransactionSeed", "ExecutionOptions", + }); + internal_static_google_spanner_executor_v1_Concurrency_descriptor = + getDescriptor().getMessageType(15); + internal_static_google_spanner_executor_v1_Concurrency_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_Concurrency_descriptor, + new java.lang.String[] { + "StalenessSeconds", + "MinReadTimestampMicros", + "MaxStalenessSeconds", + "ExactTimestampMicros", + "Strong", + "Batch", + "SnapshotEpochRead", + "SnapshotEpochRootTable", + "BatchReadTimestampMicros", + "ConcurrencyMode", + }); + internal_static_google_spanner_executor_v1_TableMetadata_descriptor = + getDescriptor().getMessageType(16); + internal_static_google_spanner_executor_v1_TableMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_TableMetadata_descriptor, + new java.lang.String[] { + "Name", "Column", "KeyColumn", + }); + internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor = + getDescriptor().getMessageType(17); + internal_static_google_spanner_executor_v1_ColumnMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor, + new java.lang.String[] { + "Name", "Type", + }); + internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor = + getDescriptor().getMessageType(18); + internal_static_google_spanner_executor_v1_TransactionExecutionOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor, + new java.lang.String[] { + "Optimistic", + "ExcludeFromChangeStreams", + "SerializableOptimistic", + "SnapshotIsolationOptimistic", + "SnapshotIsolationPessimistic", + "ExcludeTxnFromChangeStreams", + }); + internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor = + getDescriptor().getMessageType(19); + internal_static_google_spanner_executor_v1_FinishTransactionAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor, + new java.lang.String[] { + "Mode", + }); + internal_static_google_spanner_executor_v1_AdminAction_descriptor = + getDescriptor().getMessageType(20); + internal_static_google_spanner_executor_v1_AdminAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_AdminAction_descriptor, + new java.lang.String[] { + "CreateUserInstanceConfig", + "UpdateUserInstanceConfig", + "DeleteUserInstanceConfig", + "GetCloudInstanceConfig", + "ListInstanceConfigs", + "CreateCloudInstance", + "UpdateCloudInstance", + "DeleteCloudInstance", + "ListCloudInstances", + "GetCloudInstance", + "CreateCloudDatabase", + "UpdateCloudDatabaseDdl", + "UpdateCloudDatabase", + "DropCloudDatabase", + "ListCloudDatabases", + "ListCloudDatabaseOperations", + "RestoreCloudDatabase", + "GetCloudDatabase", + "CreateCloudBackup", + "CopyCloudBackup", + "GetCloudBackup", + "UpdateCloudBackup", + "DeleteCloudBackup", + "ListCloudBackups", + "ListCloudBackupOperations", + "GetOperation", + "CancelOperation", + "ChangeQuorumCloudDatabase", + "AddSplitPoints", + "Action", + }); + internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor = + getDescriptor().getMessageType(21); + internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor, + new java.lang.String[] { + "UserConfigId", "ProjectId", "BaseConfigId", "Replicas", + }); + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor = + getDescriptor().getMessageType(22); + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor, + new java.lang.String[] { + "UserConfigId", "ProjectId", "DisplayName", "Labels", + }); + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_descriptor = + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor + .getNestedType(0); + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor = + getDescriptor().getMessageType(23); + internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor, + new java.lang.String[] { + "InstanceConfigId", "ProjectId", + }); + internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor = + getDescriptor().getMessageType(24); + internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor, + new java.lang.String[] { + "UserConfigId", "ProjectId", + }); + internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor = + getDescriptor().getMessageType(25); + internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor, + new java.lang.String[] { + "ProjectId", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor = + getDescriptor().getMessageType(26); + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor, + new java.lang.String[] { + "InstanceId", + "ProjectId", + "InstanceConfigId", + "NodeCount", + "ProcessingUnits", + "AutoscalingConfig", + "Labels", + "Edition", + }); + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_descriptor = + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor + .getNestedType(0); + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor = + getDescriptor().getMessageType(27); + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor, + new java.lang.String[] { + "InstanceId", + "ProjectId", + "DisplayName", + "NodeCount", + "ProcessingUnits", + "AutoscalingConfig", + "Labels", + "Edition", + }); + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_descriptor = + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor + .getNestedType(0); + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor = + getDescriptor().getMessageType(28); + internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor, + new java.lang.String[] { + "InstanceId", "ProjectId", + }); + internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(29); + internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor, + new java.lang.String[] { + "InstanceId", + "ProjectId", + "DatabaseId", + "SdlStatement", + "EncryptionConfig", + "Dialect", + "ProtoDescriptors", + }); + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor = + getDescriptor().getMessageType(30); + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor, + new java.lang.String[] { + "InstanceId", + "ProjectId", + "DatabaseId", + "SdlStatement", + "OperationId", + "ProtoDescriptors", + }); + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(31); + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor, + new java.lang.String[] { + "InstanceId", "ProjectId", "DatabaseName", "EnableDropProtection", + }); + internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(32); + internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor, + new java.lang.String[] { + "InstanceId", "ProjectId", "DatabaseId", + }); + internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(33); + internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ChangeQuorumCloudDatabaseAction_descriptor, + new java.lang.String[] { + "DatabaseUri", "ServingLocations", + }); + internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor = + getDescriptor().getMessageType(34); + internal_static_google_spanner_executor_v1_AdaptMessageAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor, + new java.lang.String[] { + "DatabaseUri", "Protocol", "Payload", "Attachments", "Query", "PrepareThenExecute", + }); + internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_descriptor = + internal_static_google_spanner_executor_v1_AdaptMessageAction_descriptor.getNestedType(0); + internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_AdaptMessageAction_AttachmentsEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor = + getDescriptor().getMessageType(35); + internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor = + getDescriptor().getMessageType(36); + internal_static_google_spanner_executor_v1_ListCloudInstancesAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor, + new java.lang.String[] { + "ProjectId", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor = + getDescriptor().getMessageType(37); + internal_static_google_spanner_executor_v1_GetCloudInstanceAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", + }); + internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor = + getDescriptor().getMessageType(38); + internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(39); + internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor, + new java.lang.String[] { + "ProjectId", + "BackupInstanceId", + "BackupId", + "DatabaseInstanceId", + "DatabaseId", + "EncryptionConfig", + }); + internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor = + getDescriptor().getMessageType(40); + internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "DatabaseId", + }); + internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor = + getDescriptor().getMessageType(41); + internal_static_google_spanner_executor_v1_CreateCloudBackupAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor, + new java.lang.String[] { + "ProjectId", + "InstanceId", + "BackupId", + "DatabaseId", + "ExpireTime", + "VersionTime", + "EncryptionConfig", + }); + internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor = + getDescriptor().getMessageType(42); + internal_static_google_spanner_executor_v1_CopyCloudBackupAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "BackupId", "SourceBackup", "ExpireTime", + }); + internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor = + getDescriptor().getMessageType(43); + internal_static_google_spanner_executor_v1_GetCloudBackupAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "BackupId", + }); + internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor = + getDescriptor().getMessageType(44); + internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "BackupId", "ExpireTime", + }); + internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor = + getDescriptor().getMessageType(45); + internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "BackupId", + }); + internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor = + getDescriptor().getMessageType(46); + internal_static_google_spanner_executor_v1_ListCloudBackupsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor = + getDescriptor().getMessageType(47); + internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "Filter", "PageSize", "PageToken", + }); + internal_static_google_spanner_executor_v1_GetOperationAction_descriptor = + getDescriptor().getMessageType(48); + internal_static_google_spanner_executor_v1_GetOperationAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GetOperationAction_descriptor, + new java.lang.String[] { + "Operation", + }); + internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor = + getDescriptor().getMessageType(49); + internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor, + new java.lang.String[] { + "LongRunningSql", "CancelQuery", + }); + internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor = + getDescriptor().getMessageType(50); + internal_static_google_spanner_executor_v1_CancelOperationAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor, + new java.lang.String[] { + "Operation", + }); + internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor = + getDescriptor().getMessageType(51); + internal_static_google_spanner_executor_v1_AddSplitPointsAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_AddSplitPointsAction_descriptor, + new java.lang.String[] { + "ProjectId", "InstanceId", "DatabaseId", "SplitPoints", + }); + internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor = + getDescriptor().getMessageType(52); + internal_static_google_spanner_executor_v1_StartBatchTransactionAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor, + new java.lang.String[] { + "BatchTxnTime", "Tid", "CloudDatabaseRole", "Param", + }); + internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor = + getDescriptor().getMessageType(53); + internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor, + new java.lang.String[] { + "Cleanup", + }); + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor = + getDescriptor().getMessageType(54); + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor, + new java.lang.String[] { + "Read", "Table", "DesiredBytesPerPartition", "MaxPartitionCount", + }); + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor = + getDescriptor().getMessageType(55); + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor, + new java.lang.String[] { + "Query", "DesiredBytesPerPartition", + }); + internal_static_google_spanner_executor_v1_BatchPartition_descriptor = + getDescriptor().getMessageType(56); + internal_static_google_spanner_executor_v1_BatchPartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_BatchPartition_descriptor, + new java.lang.String[] { + "Partition", "PartitionToken", "Table", "Index", + }); + internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor = + getDescriptor().getMessageType(57); + internal_static_google_spanner_executor_v1_ExecutePartitionAction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor, + new java.lang.String[] { + "Partition", + }); + internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor = + getDescriptor().getMessageType(58); + internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor, + new java.lang.String[] { + "Name", + "StartTime", + "EndTime", + "PartitionToken", + "ReadOptions", + "HeartbeatMilliseconds", + "DeadlineSeconds", + "CloudDatabaseRole", + }); + internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor = + getDescriptor().getMessageType(59); + internal_static_google_spanner_executor_v1_SpannerActionOutcome_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor, + new java.lang.String[] { + "Status", + "CommitTime", + "ReadResult", + "QueryResult", + "TransactionRestarted", + "BatchTxnId", + "DbPartition", + "AdminResult", + "DmlRowsModified", + "ChangeStreamRecords", + "SnapshotIsolationTxnReadTimestamp", + }); + internal_static_google_spanner_executor_v1_AdminResult_descriptor = + getDescriptor().getMessageType(60); + internal_static_google_spanner_executor_v1_AdminResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_AdminResult_descriptor, + new java.lang.String[] { + "BackupResponse", + "OperationResponse", + "DatabaseResponse", + "InstanceResponse", + "InstanceConfigResponse", + }); + internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor = + getDescriptor().getMessageType(61); + internal_static_google_spanner_executor_v1_CloudBackupResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor, + new java.lang.String[] { + "ListedBackups", "ListedBackupOperations", "NextPageToken", "Backup", + }); + internal_static_google_spanner_executor_v1_OperationResponse_descriptor = + getDescriptor().getMessageType(62); + internal_static_google_spanner_executor_v1_OperationResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_OperationResponse_descriptor, + new java.lang.String[] { + "ListedOperations", "NextPageToken", "Operation", + }); + internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor = + getDescriptor().getMessageType(63); + internal_static_google_spanner_executor_v1_CloudInstanceResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor, + new java.lang.String[] { + "ListedInstances", "NextPageToken", "Instance", + }); + internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor = + getDescriptor().getMessageType(64); + internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor, + new java.lang.String[] { + "ListedInstanceConfigs", "NextPageToken", "InstanceConfig", + }); + internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor = + getDescriptor().getMessageType(65); + internal_static_google_spanner_executor_v1_CloudDatabaseResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor, + new java.lang.String[] { + "ListedDatabases", "ListedDatabaseOperations", "NextPageToken", "Database", + }); + internal_static_google_spanner_executor_v1_ReadResult_descriptor = + getDescriptor().getMessageType(66); + internal_static_google_spanner_executor_v1_ReadResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ReadResult_descriptor, + new java.lang.String[] { + "Table", "Index", "RequestIndex", "Row", "RowType", + }); + internal_static_google_spanner_executor_v1_QueryResult_descriptor = + getDescriptor().getMessageType(67); + internal_static_google_spanner_executor_v1_QueryResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_QueryResult_descriptor, + new java.lang.String[] { + "Row", "RowType", + }); + internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor = + getDescriptor().getMessageType(68); + internal_static_google_spanner_executor_v1_ChangeStreamRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor, + new java.lang.String[] { + "DataChange", "ChildPartition", "Heartbeat", "Record", + }); + internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor = + getDescriptor().getMessageType(69); + internal_static_google_spanner_executor_v1_DataChangeRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor, + new java.lang.String[] { + "CommitTime", + "RecordSequence", + "TransactionId", + "IsLastRecord", + "Table", + "ColumnTypes", + "Mods", + "ModType", + "ValueCaptureType", + "RecordCount", + "PartitionCount", + "TransactionTag", + "IsSystemTransaction", + }); + internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor = + internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor.getNestedType(0); + internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor, + new java.lang.String[] { + "Name", "Type", "IsPrimaryKey", "OrdinalPosition", + }); + internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor = + internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor.getNestedType(1); + internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor, + new java.lang.String[] { + "Keys", "NewValues", "OldValues", + }); + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor = + getDescriptor().getMessageType(70); + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor, + new java.lang.String[] { + "StartTime", "RecordSequence", "ChildPartitions", + }); + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor = + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor.getNestedType( + 0); + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_ChildPartitionsRecord_ChildPartition_descriptor, + new java.lang.String[] { + "Token", "ParentPartitionTokens", + }); + internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor = + getDescriptor().getMessageType(71); + internal_static_google_spanner_executor_v1_HeartbeatRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor, + new java.lang.String[] { + "HeartbeatTime", + }); + internal_static_google_spanner_executor_v1_SpannerOptions_descriptor = + getDescriptor().getMessageType(72); + internal_static_google_spanner_executor_v1_SpannerOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SpannerOptions_descriptor, + new java.lang.String[] { + "SessionPoolOptions", + }); + internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor = + getDescriptor().getMessageType(73); + internal_static_google_spanner_executor_v1_SessionPoolOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor, + new java.lang.String[] { + "UseMultiplexed", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); + com.google.spanner.admin.database.v1.CommonProto.getDescriptor(); + com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto.getDescriptor(); + com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto.getDescriptor(); + com.google.spanner.v1.SpannerProto.getDescriptor(); + com.google.spanner.v1.TypeProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java new file mode 100644 index 000000000000..9773de307292 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java @@ -0,0 +1,1462 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * CloudInstanceConfigResponse contains results returned by cloud instance
    + * config related actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudInstanceConfigResponse} + */ +@com.google.protobuf.Generated +public final class CloudInstanceConfigResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CloudInstanceConfigResponse) + CloudInstanceConfigResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudInstanceConfigResponse"); + } + + // Use CloudInstanceConfigResponse.newBuilder() to construct. + private CloudInstanceConfigResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudInstanceConfigResponse() { + listedInstanceConfigs_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudInstanceConfigResponse.class, + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder.class); + } + + private int bitField0_; + public static final int LISTED_INSTANCE_CONFIGS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List + listedInstanceConfigs_; + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + @java.lang.Override + public java.util.List + getListedInstanceConfigsList() { + return listedInstanceConfigs_; + } + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + @java.lang.Override + public java.util.List + getListedInstanceConfigsOrBuilderList() { + return listedInstanceConfigs_; + } + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + @java.lang.Override + public int getListedInstanceConfigsCount() { + return listedInstanceConfigs_.size(); + } + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getListedInstanceConfigs(int index) { + return listedInstanceConfigs_.get(index); + } + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getListedInstanceConfigsOrBuilder(int index) { + return listedInstanceConfigs_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_CONFIG_FIELD_NUMBER = 3; + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return Whether the instanceConfig field is set. + */ + @java.lang.Override + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return The instanceConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder() { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < listedInstanceConfigs_.size(); i++) { + output.writeMessage(1, listedInstanceConfigs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getInstanceConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < listedInstanceConfigs_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, listedInstanceConfigs_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInstanceConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CloudInstanceConfigResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CloudInstanceConfigResponse other = + (com.google.spanner.executor.v1.CloudInstanceConfigResponse) obj; + + if (!getListedInstanceConfigsList().equals(other.getListedInstanceConfigsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (hasInstanceConfig() != other.hasInstanceConfig()) return false; + if (hasInstanceConfig()) { + if (!getInstanceConfig().equals(other.getInstanceConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getListedInstanceConfigsCount() > 0) { + hash = (37 * hash) + LISTED_INSTANCE_CONFIGS_FIELD_NUMBER; + hash = (53 * hash) + getListedInstanceConfigsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (hasInstanceConfig()) { + hash = (37 * hash) + INSTANCE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CloudInstanceConfigResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * CloudInstanceConfigResponse contains results returned by cloud instance
    +   * config related actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudInstanceConfigResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CloudInstanceConfigResponse) + com.google.spanner.executor.v1.CloudInstanceConfigResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudInstanceConfigResponse.class, + com.google.spanner.executor.v1.CloudInstanceConfigResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CloudInstanceConfigResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetListedInstanceConfigsFieldBuilder(); + internalGetInstanceConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (listedInstanceConfigsBuilder_ == null) { + listedInstanceConfigs_ = java.util.Collections.emptyList(); + } else { + listedInstanceConfigs_ = null; + listedInstanceConfigsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponse build() { + com.google.spanner.executor.v1.CloudInstanceConfigResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponse buildPartial() { + com.google.spanner.executor.v1.CloudInstanceConfigResponse result = + new com.google.spanner.executor.v1.CloudInstanceConfigResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.CloudInstanceConfigResponse result) { + if (listedInstanceConfigsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + listedInstanceConfigs_ = java.util.Collections.unmodifiableList(listedInstanceConfigs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.listedInstanceConfigs_ = listedInstanceConfigs_; + } else { + result.listedInstanceConfigs_ = listedInstanceConfigsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.CloudInstanceConfigResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instanceConfig_ = + instanceConfigBuilder_ == null ? instanceConfig_ : instanceConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CloudInstanceConfigResponse) { + return mergeFrom((com.google.spanner.executor.v1.CloudInstanceConfigResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CloudInstanceConfigResponse other) { + if (other == com.google.spanner.executor.v1.CloudInstanceConfigResponse.getDefaultInstance()) + return this; + if (listedInstanceConfigsBuilder_ == null) { + if (!other.listedInstanceConfigs_.isEmpty()) { + if (listedInstanceConfigs_.isEmpty()) { + listedInstanceConfigs_ = other.listedInstanceConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.addAll(other.listedInstanceConfigs_); + } + onChanged(); + } + } else { + if (!other.listedInstanceConfigs_.isEmpty()) { + if (listedInstanceConfigsBuilder_.isEmpty()) { + listedInstanceConfigsBuilder_.dispose(); + listedInstanceConfigsBuilder_ = null; + listedInstanceConfigs_ = other.listedInstanceConfigs_; + bitField0_ = (bitField0_ & ~0x00000001); + listedInstanceConfigsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedInstanceConfigsFieldBuilder() + : null; + } else { + listedInstanceConfigsBuilder_.addAllMessages(other.listedInstanceConfigs_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasInstanceConfig()) { + mergeInstanceConfig(other.getInstanceConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.instance.v1.InstanceConfig m = + input.readMessage( + com.google.spanner.admin.instance.v1.InstanceConfig.parser(), + extensionRegistry); + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.add(m); + } else { + listedInstanceConfigsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInstanceConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + listedInstanceConfigs_ = java.util.Collections.emptyList(); + + private void ensureListedInstanceConfigsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + listedInstanceConfigs_ = + new java.util.ArrayList( + listedInstanceConfigs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + listedInstanceConfigsBuilder_; + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public java.util.List + getListedInstanceConfigsList() { + if (listedInstanceConfigsBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedInstanceConfigs_); + } else { + return listedInstanceConfigsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public int getListedInstanceConfigsCount() { + if (listedInstanceConfigsBuilder_ == null) { + return listedInstanceConfigs_.size(); + } else { + return listedInstanceConfigsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getListedInstanceConfigs(int index) { + if (listedInstanceConfigsBuilder_ == null) { + return listedInstanceConfigs_.get(index); + } else { + return listedInstanceConfigsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder setListedInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (listedInstanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.set(index, value); + onChanged(); + } else { + listedInstanceConfigsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder setListedInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.set(index, builderForValue.build()); + onChanged(); + } else { + listedInstanceConfigsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder addListedInstanceConfigs( + com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (listedInstanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.add(value); + onChanged(); + } else { + listedInstanceConfigsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder addListedInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (listedInstanceConfigsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.add(index, value); + onChanged(); + } else { + listedInstanceConfigsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder addListedInstanceConfigs( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.add(builderForValue.build()); + onChanged(); + } else { + listedInstanceConfigsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder addListedInstanceConfigs( + int index, com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.add(index, builderForValue.build()); + onChanged(); + } else { + listedInstanceConfigsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder addAllListedInstanceConfigs( + java.lang.Iterable values) { + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedInstanceConfigs_); + onChanged(); + } else { + listedInstanceConfigsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder clearListedInstanceConfigs() { + if (listedInstanceConfigsBuilder_ == null) { + listedInstanceConfigs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + listedInstanceConfigsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public Builder removeListedInstanceConfigs(int index) { + if (listedInstanceConfigsBuilder_ == null) { + ensureListedInstanceConfigsIsMutable(); + listedInstanceConfigs_.remove(index); + onChanged(); + } else { + listedInstanceConfigsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder + getListedInstanceConfigsBuilder(int index) { + return internalGetListedInstanceConfigsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getListedInstanceConfigsOrBuilder(int index) { + if (listedInstanceConfigsBuilder_ == null) { + return listedInstanceConfigs_.get(index); + } else { + return listedInstanceConfigsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public java.util.List + getListedInstanceConfigsOrBuilderList() { + if (listedInstanceConfigsBuilder_ != null) { + return listedInstanceConfigsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedInstanceConfigs_); + } + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder + addListedInstanceConfigsBuilder() { + return internalGetListedInstanceConfigsFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder + addListedInstanceConfigsBuilder(int index) { + return internalGetListedInstanceConfigsFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of instance configs returned by ListCloudInstanceConfigsAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + public java.util.List + getListedInstanceConfigsBuilderList() { + return internalGetListedInstanceConfigsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetListedInstanceConfigsFieldBuilder() { + if (listedInstanceConfigsBuilder_ == null) { + listedInstanceConfigsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + listedInstanceConfigs_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + listedInstanceConfigs_ = null; + } + return listedInstanceConfigsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + instanceConfigBuilder_; + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return Whether the instanceConfig field is set. + */ + public boolean hasInstanceConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return The instanceConfig. + */ + public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { + if (instanceConfigBuilder_ == null) { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } else { + return instanceConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfig_ = value; + } else { + instanceConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public Builder setInstanceConfig( + com.google.spanner.admin.instance.v1.InstanceConfig.Builder builderForValue) { + if (instanceConfigBuilder_ == null) { + instanceConfig_ = builderForValue.build(); + } else { + instanceConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.InstanceConfig value) { + if (instanceConfigBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && instanceConfig_ != null + && instanceConfig_ + != com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance()) { + getInstanceConfigBuilder().mergeFrom(value); + } else { + instanceConfig_ = value; + } + } else { + instanceConfigBuilder_.mergeFrom(value); + } + if (instanceConfig_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public Builder clearInstanceConfig() { + bitField0_ = (bitField0_ & ~0x00000004); + instanceConfig_ = null; + if (instanceConfigBuilder_ != null) { + instanceConfigBuilder_.dispose(); + instanceConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceConfigBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetInstanceConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + public com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder + getInstanceConfigOrBuilder() { + if (instanceConfigBuilder_ != null) { + return instanceConfigBuilder_.getMessageOrBuilder(); + } else { + return instanceConfig_ == null + ? com.google.spanner.admin.instance.v1.InstanceConfig.getDefaultInstance() + : instanceConfig_; + } + } + + /** + * + * + *
    +     * Instance config returned by GetCloudInstanceConfigAction.
    +     * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder> + internalGetInstanceConfigFieldBuilder() { + if (instanceConfigBuilder_ == null) { + instanceConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.InstanceConfig, + com.google.spanner.admin.instance.v1.InstanceConfig.Builder, + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder>( + getInstanceConfig(), getParentForChildren(), isClean()); + instanceConfig_ = null; + } + return instanceConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CloudInstanceConfigResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CloudInstanceConfigResponse) + private static final com.google.spanner.executor.v1.CloudInstanceConfigResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CloudInstanceConfigResponse(); + } + + public static com.google.spanner.executor.v1.CloudInstanceConfigResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudInstanceConfigResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceConfigResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java new file mode 100644 index 000000000000..83486d1db430 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java @@ -0,0 +1,156 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CloudInstanceConfigResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CloudInstanceConfigResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + java.util.List + getListedInstanceConfigsList(); + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + com.google.spanner.admin.instance.v1.InstanceConfig getListedInstanceConfigs(int index); + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + int getListedInstanceConfigsCount(); + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + java.util.List + getListedInstanceConfigsOrBuilderList(); + + /** + * + * + *
    +   * List of instance configs returned by ListCloudInstanceConfigsAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.InstanceConfig listed_instance_configs = 1; + * + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getListedInstanceConfigsOrBuilder( + int index); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return Whether the instanceConfig field is set. + */ + boolean hasInstanceConfig(); + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + * + * @return The instanceConfig. + */ + com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig(); + + /** + * + * + *
    +   * Instance config returned by GetCloudInstanceConfigAction.
    +   * 
    + * + * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; + */ + com.google.spanner.admin.instance.v1.InstanceConfigOrBuilder getInstanceConfigOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java new file mode 100644 index 000000000000..cb6e538edd92 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java @@ -0,0 +1,1426 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * CloudInstanceResponse contains results returned by cloud instance related
    + * actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudInstanceResponse} + */ +@com.google.protobuf.Generated +public final class CloudInstanceResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CloudInstanceResponse) + CloudInstanceResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CloudInstanceResponse"); + } + + // Use CloudInstanceResponse.newBuilder() to construct. + private CloudInstanceResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CloudInstanceResponse() { + listedInstances_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudInstanceResponse.class, + com.google.spanner.executor.v1.CloudInstanceResponse.Builder.class); + } + + private int bitField0_; + public static final int LISTED_INSTANCES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List listedInstances_; + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + @java.lang.Override + public java.util.List getListedInstancesList() { + return listedInstances_; + } + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + @java.lang.Override + public java.util.List + getListedInstancesOrBuilderList() { + return listedInstances_; + } + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + @java.lang.Override + public int getListedInstancesCount() { + return listedInstances_.size(); + } + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getListedInstances(int index) { + return listedInstances_.get(index); + } + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getListedInstancesOrBuilder( + int index) { + return listedInstances_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_FIELD_NUMBER = 3; + private com.google.spanner.admin.instance.v1.Instance instance_; + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return Whether the instance field is set. + */ + @java.lang.Override + public boolean hasInstance() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return The instance. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance getInstance() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < listedInstances_.size(); i++) { + output.writeMessage(1, listedInstances_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getInstance()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < listedInstances_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, listedInstances_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInstance()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CloudInstanceResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CloudInstanceResponse other = + (com.google.spanner.executor.v1.CloudInstanceResponse) obj; + + if (!getListedInstancesList().equals(other.getListedInstancesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (hasInstance() != other.hasInstance()) return false; + if (hasInstance()) { + if (!getInstance().equals(other.getInstance())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getListedInstancesCount() > 0) { + hash = (37 * hash) + LISTED_INSTANCES_FIELD_NUMBER; + hash = (53 * hash) + getListedInstancesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (hasInstance()) { + hash = (37 * hash) + INSTANCE_FIELD_NUMBER; + hash = (53 * hash) + getInstance().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.CloudInstanceResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * CloudInstanceResponse contains results returned by cloud instance related
    +   * actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CloudInstanceResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CloudInstanceResponse) + com.google.spanner.executor.v1.CloudInstanceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CloudInstanceResponse.class, + com.google.spanner.executor.v1.CloudInstanceResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CloudInstanceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetListedInstancesFieldBuilder(); + internalGetInstanceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (listedInstancesBuilder_ == null) { + listedInstances_ = java.util.Collections.emptyList(); + } else { + listedInstances_ = null; + listedInstancesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponse build() { + com.google.spanner.executor.v1.CloudInstanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponse buildPartial() { + com.google.spanner.executor.v1.CloudInstanceResponse result = + new com.google.spanner.executor.v1.CloudInstanceResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.CloudInstanceResponse result) { + if (listedInstancesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + listedInstances_ = java.util.Collections.unmodifiableList(listedInstances_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.listedInstances_ = listedInstances_; + } else { + result.listedInstances_ = listedInstancesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.CloudInstanceResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instance_ = instanceBuilder_ == null ? instance_ : instanceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CloudInstanceResponse) { + return mergeFrom((com.google.spanner.executor.v1.CloudInstanceResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CloudInstanceResponse other) { + if (other == com.google.spanner.executor.v1.CloudInstanceResponse.getDefaultInstance()) + return this; + if (listedInstancesBuilder_ == null) { + if (!other.listedInstances_.isEmpty()) { + if (listedInstances_.isEmpty()) { + listedInstances_ = other.listedInstances_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureListedInstancesIsMutable(); + listedInstances_.addAll(other.listedInstances_); + } + onChanged(); + } + } else { + if (!other.listedInstances_.isEmpty()) { + if (listedInstancesBuilder_.isEmpty()) { + listedInstancesBuilder_.dispose(); + listedInstancesBuilder_ = null; + listedInstances_ = other.listedInstances_; + bitField0_ = (bitField0_ & ~0x00000001); + listedInstancesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedInstancesFieldBuilder() + : null; + } else { + listedInstancesBuilder_.addAllMessages(other.listedInstances_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasInstance()) { + mergeInstance(other.getInstance()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.instance.v1.Instance m = + input.readMessage( + com.google.spanner.admin.instance.v1.Instance.parser(), extensionRegistry); + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + listedInstances_.add(m); + } else { + listedInstancesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInstanceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List listedInstances_ = + java.util.Collections.emptyList(); + + private void ensureListedInstancesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + listedInstances_ = + new java.util.ArrayList( + listedInstances_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + listedInstancesBuilder_; + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public java.util.List getListedInstancesList() { + if (listedInstancesBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedInstances_); + } else { + return listedInstancesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public int getListedInstancesCount() { + if (listedInstancesBuilder_ == null) { + return listedInstances_.size(); + } else { + return listedInstancesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance getListedInstances(int index) { + if (listedInstancesBuilder_ == null) { + return listedInstances_.get(index); + } else { + return listedInstancesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder setListedInstances( + int index, com.google.spanner.admin.instance.v1.Instance value) { + if (listedInstancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstancesIsMutable(); + listedInstances_.set(index, value); + onChanged(); + } else { + listedInstancesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder setListedInstances( + int index, com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + listedInstances_.set(index, builderForValue.build()); + onChanged(); + } else { + listedInstancesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder addListedInstances(com.google.spanner.admin.instance.v1.Instance value) { + if (listedInstancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstancesIsMutable(); + listedInstances_.add(value); + onChanged(); + } else { + listedInstancesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder addListedInstances( + int index, com.google.spanner.admin.instance.v1.Instance value) { + if (listedInstancesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedInstancesIsMutable(); + listedInstances_.add(index, value); + onChanged(); + } else { + listedInstancesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder addListedInstances( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + listedInstances_.add(builderForValue.build()); + onChanged(); + } else { + listedInstancesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder addListedInstances( + int index, com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + listedInstances_.add(index, builderForValue.build()); + onChanged(); + } else { + listedInstancesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder addAllListedInstances( + java.lang.Iterable values) { + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedInstances_); + onChanged(); + } else { + listedInstancesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder clearListedInstances() { + if (listedInstancesBuilder_ == null) { + listedInstances_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + listedInstancesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public Builder removeListedInstances(int index) { + if (listedInstancesBuilder_ == null) { + ensureListedInstancesIsMutable(); + listedInstances_.remove(index); + onChanged(); + } else { + listedInstancesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getListedInstancesBuilder( + int index) { + return internalGetListedInstancesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getListedInstancesOrBuilder( + int index) { + if (listedInstancesBuilder_ == null) { + return listedInstances_.get(index); + } else { + return listedInstancesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public java.util.List + getListedInstancesOrBuilderList() { + if (listedInstancesBuilder_ != null) { + return listedInstancesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedInstances_); + } + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder addListedInstancesBuilder() { + return internalGetListedInstancesFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder addListedInstancesBuilder( + int index) { + return internalGetListedInstancesFieldBuilder() + .addBuilder(index, com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of instances returned by ListCloudInstancesAction.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + public java.util.List + getListedInstancesBuilderList() { + return internalGetListedInstancesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetListedInstancesFieldBuilder() { + if (listedInstancesBuilder_ == null) { + listedInstancesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + listedInstances_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + listedInstances_ = null; + } + return listedInstancesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.Instance instance_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + instanceBuilder_; + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return Whether the instance field is set. + */ + public boolean hasInstance() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return The instance. + */ + public com.google.spanner.admin.instance.v1.Instance getInstance() { + if (instanceBuilder_ == null) { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } else { + return instanceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public Builder setInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + instance_ = value; + } else { + instanceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public Builder setInstance( + com.google.spanner.admin.instance.v1.Instance.Builder builderForValue) { + if (instanceBuilder_ == null) { + instance_ = builderForValue.build(); + } else { + instanceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public Builder mergeInstance(com.google.spanner.admin.instance.v1.Instance value) { + if (instanceBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && instance_ != null + && instance_ != com.google.spanner.admin.instance.v1.Instance.getDefaultInstance()) { + getInstanceBuilder().mergeFrom(value); + } else { + instance_ = value; + } + } else { + instanceBuilder_.mergeFrom(value); + } + if (instance_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public Builder clearInstance() { + bitField0_ = (bitField0_ & ~0x00000004); + instance_ = null; + if (instanceBuilder_ != null) { + instanceBuilder_.dispose(); + instanceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public com.google.spanner.admin.instance.v1.Instance.Builder getInstanceBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetInstanceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + public com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder() { + if (instanceBuilder_ != null) { + return instanceBuilder_.getMessageOrBuilder(); + } else { + return instance_ == null + ? com.google.spanner.admin.instance.v1.Instance.getDefaultInstance() + : instance_; + } + } + + /** + * + * + *
    +     * Instance returned by GetCloudInstanceAction
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder> + internalGetInstanceFieldBuilder() { + if (instanceBuilder_ == null) { + instanceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.Instance, + com.google.spanner.admin.instance.v1.Instance.Builder, + com.google.spanner.admin.instance.v1.InstanceOrBuilder>( + getInstance(), getParentForChildren(), isClean()); + instance_ = null; + } + return instanceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CloudInstanceResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CloudInstanceResponse) + private static final com.google.spanner.executor.v1.CloudInstanceResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CloudInstanceResponse(); + } + + public static com.google.spanner.executor.v1.CloudInstanceResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CloudInstanceResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CloudInstanceResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java new file mode 100644 index 000000000000..37efde132bbb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CloudInstanceResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CloudInstanceResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + java.util.List getListedInstancesList(); + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + com.google.spanner.admin.instance.v1.Instance getListedInstances(int index); + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + int getListedInstancesCount(); + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + java.util.List + getListedInstancesOrBuilderList(); + + /** + * + * + *
    +   * List of instances returned by ListCloudInstancesAction.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.Instance listed_instances = 1; + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getListedInstancesOrBuilder(int index); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return Whether the instance field is set. + */ + boolean hasInstance(); + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + * + * @return The instance. + */ + com.google.spanner.admin.instance.v1.Instance getInstance(); + + /** + * + * + *
    +   * Instance returned by GetCloudInstanceAction
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance instance = 3; + */ + com.google.spanner.admin.instance.v1.InstanceOrBuilder getInstanceOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java new file mode 100644 index 000000000000..ba2787df35ba --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java @@ -0,0 +1,877 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * ColumnMetadata represents metadata of a single column.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ColumnMetadata} + */ +@com.google.protobuf.Generated +public final class ColumnMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ColumnMetadata) + ColumnMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ColumnMetadata"); + } + + // Use ColumnMetadata.newBuilder() to construct. + private ColumnMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ColumnMetadata() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ColumnMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ColumnMetadata.class, + com.google.spanner.executor.v1.ColumnMetadata.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Column name.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Column name.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Type type_; + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.Type getType() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getType()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getType()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ColumnMetadata)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ColumnMetadata other = + (com.google.spanner.executor.v1.ColumnMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (!getType().equals(other.getType())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ColumnMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ColumnMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ColumnMetadata represents metadata of a single column.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ColumnMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ColumnMetadata) + com.google.spanner.executor.v1.ColumnMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ColumnMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ColumnMetadata.class, + com.google.spanner.executor.v1.ColumnMetadata.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ColumnMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ColumnMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata build() { + com.google.spanner.executor.v1.ColumnMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata buildPartial() { + com.google.spanner.executor.v1.ColumnMetadata result = + new com.google.spanner.executor.v1.ColumnMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ColumnMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = typeBuilder_ == null ? type_ : typeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ColumnMetadata) { + return mergeFrom((com.google.spanner.executor.v1.ColumnMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ColumnMetadata other) { + if (other == com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasType()) { + mergeType(other.getType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type type_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + public com.google.spanner.v1.Type getType() { + if (typeBuilder_ == null) { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } else { + return typeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + } else { + typeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + type_ = builderForValue.build(); + } else { + typeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder mergeType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && type_ != null + && type_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getTypeBuilder().mergeFrom(value); + } else { + type_ = value; + } + } else { + typeBuilder_.mergeFrom(value); + } + if (type_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilder(); + } else { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + } + + /** + * + * + *
    +     * Column type.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>(getType(), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ColumnMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ColumnMetadata) + private static final com.google.spanner.executor.v1.ColumnMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ColumnMetadata(); + } + + public static com.google.spanner.executor.v1.ColumnMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ColumnMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java new file mode 100644 index 000000000000..979b87da0837 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ColumnMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ColumnMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Column name.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Column name.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + com.google.spanner.v1.Type getType(); + + /** + * + * + *
    +   * Column type.
    +   * 
    + * + * .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java new file mode 100644 index 000000000000..d50f497cf33f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java @@ -0,0 +1,1742 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Concurrency for read-only transactions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.Concurrency} + */ +@com.google.protobuf.Generated +public final class Concurrency extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.Concurrency) + ConcurrencyOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Concurrency"); + } + + // Use Concurrency.newBuilder() to construct. + private Concurrency(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Concurrency() { + snapshotEpochRootTable_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Concurrency_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Concurrency_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.Concurrency.class, + com.google.spanner.executor.v1.Concurrency.Builder.class); + } + + private int concurrencyModeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object concurrencyMode_; + + public enum ConcurrencyModeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + STALENESS_SECONDS(1), + MIN_READ_TIMESTAMP_MICROS(2), + MAX_STALENESS_SECONDS(3), + EXACT_TIMESTAMP_MICROS(4), + STRONG(5), + BATCH(6), + CONCURRENCYMODE_NOT_SET(0); + private final int value; + + private ConcurrencyModeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ConcurrencyModeCase valueOf(int value) { + return forNumber(value); + } + + public static ConcurrencyModeCase forNumber(int value) { + switch (value) { + case 1: + return STALENESS_SECONDS; + case 2: + return MIN_READ_TIMESTAMP_MICROS; + case 3: + return MAX_STALENESS_SECONDS; + case 4: + return EXACT_TIMESTAMP_MICROS; + case 5: + return STRONG; + case 6: + return BATCH; + case 0: + return CONCURRENCYMODE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ConcurrencyModeCase getConcurrencyModeCase() { + return ConcurrencyModeCase.forNumber(concurrencyModeCase_); + } + + public static final int STALENESS_SECONDS_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp that is specified relative to
    +   * now. That is, if the caller has specified an exact staleness of s
    +   * seconds, we will read at now - s.
    +   * 
    + * + * double staleness_seconds = 1; + * + * @return Whether the stalenessSeconds field is set. + */ + @java.lang.Override + public boolean hasStalenessSeconds() { + return concurrencyModeCase_ == 1; + } + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp that is specified relative to
    +   * now. That is, if the caller has specified an exact staleness of s
    +   * seconds, we will read at now - s.
    +   * 
    + * + * double staleness_seconds = 1; + * + * @return The stalenessSeconds. + */ + @java.lang.Override + public double getStalenessSeconds() { + if (concurrencyModeCase_ == 1) { + return (java.lang.Double) concurrencyMode_; + } + return 0D; + } + + public static final int MIN_READ_TIMESTAMP_MICROS_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Indicates a boundedly stale read that reads at a timestamp >= T.
    +   * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return Whether the minReadTimestampMicros field is set. + */ + @java.lang.Override + public boolean hasMinReadTimestampMicros() { + return concurrencyModeCase_ == 2; + } + + /** + * + * + *
    +   * Indicates a boundedly stale read that reads at a timestamp >= T.
    +   * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return The minReadTimestampMicros. + */ + @java.lang.Override + public long getMinReadTimestampMicros() { + if (concurrencyModeCase_ == 2) { + return (java.lang.Long) concurrencyMode_; + } + return 0L; + } + + public static final int MAX_STALENESS_SECONDS_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Indicates a boundedly stale read that is at most N seconds stale.
    +   * 
    + * + * double max_staleness_seconds = 3; + * + * @return Whether the maxStalenessSeconds field is set. + */ + @java.lang.Override + public boolean hasMaxStalenessSeconds() { + return concurrencyModeCase_ == 3; + } + + /** + * + * + *
    +   * Indicates a boundedly stale read that is at most N seconds stale.
    +   * 
    + * + * double max_staleness_seconds = 3; + * + * @return The maxStalenessSeconds. + */ + @java.lang.Override + public double getMaxStalenessSeconds() { + if (concurrencyModeCase_ == 3) { + return (java.lang.Double) concurrencyMode_; + } + return 0D; + } + + public static final int EXACT_TIMESTAMP_MICROS_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp.
    +   * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return Whether the exactTimestampMicros field is set. + */ + @java.lang.Override + public boolean hasExactTimestampMicros() { + return concurrencyModeCase_ == 4; + } + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp.
    +   * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return The exactTimestampMicros. + */ + @java.lang.Override + public long getExactTimestampMicros() { + if (concurrencyModeCase_ == 4) { + return (java.lang.Long) concurrencyMode_; + } + return 0L; + } + + public static final int STRONG_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * Indicates a strong read, must only be set to true, or unset.
    +   * 
    + * + * bool strong = 5; + * + * @return Whether the strong field is set. + */ + @java.lang.Override + public boolean hasStrong() { + return concurrencyModeCase_ == 5; + } + + /** + * + * + *
    +   * Indicates a strong read, must only be set to true, or unset.
    +   * 
    + * + * bool strong = 5; + * + * @return The strong. + */ + @java.lang.Override + public boolean getStrong() { + if (concurrencyModeCase_ == 5) { + return (java.lang.Boolean) concurrencyMode_; + } + return false; + } + + public static final int BATCH_FIELD_NUMBER = 6; + + /** + * + * + *
    +   * Indicates a batch read, must only be set to true, or unset.
    +   * 
    + * + * bool batch = 6; + * + * @return Whether the batch field is set. + */ + @java.lang.Override + public boolean hasBatch() { + return concurrencyModeCase_ == 6; + } + + /** + * + * + *
    +   * Indicates a batch read, must only be set to true, or unset.
    +   * 
    + * + * bool batch = 6; + * + * @return The batch. + */ + @java.lang.Override + public boolean getBatch() { + if (concurrencyModeCase_ == 6) { + return (java.lang.Boolean) concurrencyMode_; + } + return false; + } + + public static final int SNAPSHOT_EPOCH_READ_FIELD_NUMBER = 7; + private boolean snapshotEpochRead_ = false; + + /** + * + * + *
    +   * True if exact_timestamp_micros is set, and the chosen timestamp is that of
    +   * a snapshot epoch.
    +   * 
    + * + * bool snapshot_epoch_read = 7; + * + * @return The snapshotEpochRead. + */ + @java.lang.Override + public boolean getSnapshotEpochRead() { + return snapshotEpochRead_; + } + + public static final int SNAPSHOT_EPOCH_ROOT_TABLE_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object snapshotEpochRootTable_ = ""; + + /** + * + * + *
    +   * If set, this is a snapshot epoch read constrained to read only the
    +   * specified log scope root table, and its children. Will not be set for full
    +   * database epochs.
    +   * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The snapshotEpochRootTable. + */ + @java.lang.Override + public java.lang.String getSnapshotEpochRootTable() { + java.lang.Object ref = snapshotEpochRootTable_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + snapshotEpochRootTable_ = s; + return s; + } + } + + /** + * + * + *
    +   * If set, this is a snapshot epoch read constrained to read only the
    +   * specified log scope root table, and its children. Will not be set for full
    +   * database epochs.
    +   * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The bytes for snapshotEpochRootTable. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSnapshotEpochRootTableBytes() { + java.lang.Object ref = snapshotEpochRootTable_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + snapshotEpochRootTable_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BATCH_READ_TIMESTAMP_MICROS_FIELD_NUMBER = 9; + private long batchReadTimestampMicros_ = 0L; + + /** + * + * + *
    +   * Set only when batch is true.
    +   * 
    + * + * int64 batch_read_timestamp_micros = 9; + * + * @return The batchReadTimestampMicros. + */ + @java.lang.Override + public long getBatchReadTimestampMicros() { + return batchReadTimestampMicros_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (concurrencyModeCase_ == 1) { + output.writeDouble(1, (double) ((java.lang.Double) concurrencyMode_)); + } + if (concurrencyModeCase_ == 2) { + output.writeInt64(2, (long) ((java.lang.Long) concurrencyMode_)); + } + if (concurrencyModeCase_ == 3) { + output.writeDouble(3, (double) ((java.lang.Double) concurrencyMode_)); + } + if (concurrencyModeCase_ == 4) { + output.writeInt64(4, (long) ((java.lang.Long) concurrencyMode_)); + } + if (concurrencyModeCase_ == 5) { + output.writeBool(5, (boolean) ((java.lang.Boolean) concurrencyMode_)); + } + if (concurrencyModeCase_ == 6) { + output.writeBool(6, (boolean) ((java.lang.Boolean) concurrencyMode_)); + } + if (snapshotEpochRead_ != false) { + output.writeBool(7, snapshotEpochRead_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(snapshotEpochRootTable_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, snapshotEpochRootTable_); + } + if (batchReadTimestampMicros_ != 0L) { + output.writeInt64(9, batchReadTimestampMicros_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (concurrencyModeCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeDoubleSize( + 1, (double) ((java.lang.Double) concurrencyMode_)); + } + if (concurrencyModeCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 2, (long) ((java.lang.Long) concurrencyMode_)); + } + if (concurrencyModeCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeDoubleSize( + 3, (double) ((java.lang.Double) concurrencyMode_)); + } + if (concurrencyModeCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 4, (long) ((java.lang.Long) concurrencyMode_)); + } + if (concurrencyModeCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 5, (boolean) ((java.lang.Boolean) concurrencyMode_)); + } + if (concurrencyModeCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 6, (boolean) ((java.lang.Boolean) concurrencyMode_)); + } + if (snapshotEpochRead_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, snapshotEpochRead_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(snapshotEpochRootTable_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, snapshotEpochRootTable_); + } + if (batchReadTimestampMicros_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, batchReadTimestampMicros_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.Concurrency)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.Concurrency other = + (com.google.spanner.executor.v1.Concurrency) obj; + + if (getSnapshotEpochRead() != other.getSnapshotEpochRead()) return false; + if (!getSnapshotEpochRootTable().equals(other.getSnapshotEpochRootTable())) return false; + if (getBatchReadTimestampMicros() != other.getBatchReadTimestampMicros()) return false; + if (!getConcurrencyModeCase().equals(other.getConcurrencyModeCase())) return false; + switch (concurrencyModeCase_) { + case 1: + if (java.lang.Double.doubleToLongBits(getStalenessSeconds()) + != java.lang.Double.doubleToLongBits(other.getStalenessSeconds())) return false; + break; + case 2: + if (getMinReadTimestampMicros() != other.getMinReadTimestampMicros()) return false; + break; + case 3: + if (java.lang.Double.doubleToLongBits(getMaxStalenessSeconds()) + != java.lang.Double.doubleToLongBits(other.getMaxStalenessSeconds())) return false; + break; + case 4: + if (getExactTimestampMicros() != other.getExactTimestampMicros()) return false; + break; + case 5: + if (getStrong() != other.getStrong()) return false; + break; + case 6: + if (getBatch() != other.getBatch()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SNAPSHOT_EPOCH_READ_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSnapshotEpochRead()); + hash = (37 * hash) + SNAPSHOT_EPOCH_ROOT_TABLE_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotEpochRootTable().hashCode(); + hash = (37 * hash) + BATCH_READ_TIMESTAMP_MICROS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getBatchReadTimestampMicros()); + switch (concurrencyModeCase_) { + case 1: + hash = (37 * hash) + STALENESS_SECONDS_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getStalenessSeconds())); + break; + case 2: + hash = (37 * hash) + MIN_READ_TIMESTAMP_MICROS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMinReadTimestampMicros()); + break; + case 3: + hash = (37 * hash) + MAX_STALENESS_SECONDS_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getMaxStalenessSeconds())); + break; + case 4: + hash = (37 * hash) + EXACT_TIMESTAMP_MICROS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getExactTimestampMicros()); + break; + case 5: + hash = (37 * hash) + STRONG_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getStrong()); + break; + case 6: + hash = (37 * hash) + BATCH_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getBatch()); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Concurrency parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Concurrency parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Concurrency parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.Concurrency prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Concurrency for read-only transactions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.Concurrency} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.Concurrency) + com.google.spanner.executor.v1.ConcurrencyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Concurrency_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Concurrency_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.Concurrency.class, + com.google.spanner.executor.v1.Concurrency.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.Concurrency.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + snapshotEpochRead_ = false; + snapshotEpochRootTable_ = ""; + batchReadTimestampMicros_ = 0L; + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Concurrency_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Concurrency getDefaultInstanceForType() { + return com.google.spanner.executor.v1.Concurrency.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.Concurrency build() { + com.google.spanner.executor.v1.Concurrency result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Concurrency buildPartial() { + com.google.spanner.executor.v1.Concurrency result = + new com.google.spanner.executor.v1.Concurrency(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.Concurrency result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000040) != 0)) { + result.snapshotEpochRead_ = snapshotEpochRead_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.snapshotEpochRootTable_ = snapshotEpochRootTable_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.batchReadTimestampMicros_ = batchReadTimestampMicros_; + } + } + + private void buildPartialOneofs(com.google.spanner.executor.v1.Concurrency result) { + result.concurrencyModeCase_ = concurrencyModeCase_; + result.concurrencyMode_ = this.concurrencyMode_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.Concurrency) { + return mergeFrom((com.google.spanner.executor.v1.Concurrency) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.Concurrency other) { + if (other == com.google.spanner.executor.v1.Concurrency.getDefaultInstance()) return this; + if (other.getSnapshotEpochRead() != false) { + setSnapshotEpochRead(other.getSnapshotEpochRead()); + } + if (!other.getSnapshotEpochRootTable().isEmpty()) { + snapshotEpochRootTable_ = other.snapshotEpochRootTable_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (other.getBatchReadTimestampMicros() != 0L) { + setBatchReadTimestampMicros(other.getBatchReadTimestampMicros()); + } + switch (other.getConcurrencyModeCase()) { + case STALENESS_SECONDS: + { + setStalenessSeconds(other.getStalenessSeconds()); + break; + } + case MIN_READ_TIMESTAMP_MICROS: + { + setMinReadTimestampMicros(other.getMinReadTimestampMicros()); + break; + } + case MAX_STALENESS_SECONDS: + { + setMaxStalenessSeconds(other.getMaxStalenessSeconds()); + break; + } + case EXACT_TIMESTAMP_MICROS: + { + setExactTimestampMicros(other.getExactTimestampMicros()); + break; + } + case STRONG: + { + setStrong(other.getStrong()); + break; + } + case BATCH: + { + setBatch(other.getBatch()); + break; + } + case CONCURRENCYMODE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 9: + { + concurrencyMode_ = input.readDouble(); + concurrencyModeCase_ = 1; + break; + } // case 9 + case 16: + { + concurrencyMode_ = input.readInt64(); + concurrencyModeCase_ = 2; + break; + } // case 16 + case 25: + { + concurrencyMode_ = input.readDouble(); + concurrencyModeCase_ = 3; + break; + } // case 25 + case 32: + { + concurrencyMode_ = input.readInt64(); + concurrencyModeCase_ = 4; + break; + } // case 32 + case 40: + { + concurrencyMode_ = input.readBool(); + concurrencyModeCase_ = 5; + break; + } // case 40 + case 48: + { + concurrencyMode_ = input.readBool(); + concurrencyModeCase_ = 6; + break; + } // case 48 + case 56: + { + snapshotEpochRead_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + snapshotEpochRootTable_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 72: + { + batchReadTimestampMicros_ = input.readInt64(); + bitField0_ |= 0x00000100; + break; + } // case 72 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int concurrencyModeCase_ = 0; + private java.lang.Object concurrencyMode_; + + public ConcurrencyModeCase getConcurrencyModeCase() { + return ConcurrencyModeCase.forNumber(concurrencyModeCase_); + } + + public Builder clearConcurrencyMode() { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp that is specified relative to
    +     * now. That is, if the caller has specified an exact staleness of s
    +     * seconds, we will read at now - s.
    +     * 
    + * + * double staleness_seconds = 1; + * + * @return Whether the stalenessSeconds field is set. + */ + public boolean hasStalenessSeconds() { + return concurrencyModeCase_ == 1; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp that is specified relative to
    +     * now. That is, if the caller has specified an exact staleness of s
    +     * seconds, we will read at now - s.
    +     * 
    + * + * double staleness_seconds = 1; + * + * @return The stalenessSeconds. + */ + public double getStalenessSeconds() { + if (concurrencyModeCase_ == 1) { + return (java.lang.Double) concurrencyMode_; + } + return 0D; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp that is specified relative to
    +     * now. That is, if the caller has specified an exact staleness of s
    +     * seconds, we will read at now - s.
    +     * 
    + * + * double staleness_seconds = 1; + * + * @param value The stalenessSeconds to set. + * @return This builder for chaining. + */ + public Builder setStalenessSeconds(double value) { + + concurrencyModeCase_ = 1; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp that is specified relative to
    +     * now. That is, if the caller has specified an exact staleness of s
    +     * seconds, we will read at now - s.
    +     * 
    + * + * double staleness_seconds = 1; + * + * @return This builder for chaining. + */ + public Builder clearStalenessSeconds() { + if (concurrencyModeCase_ == 1) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that reads at a timestamp >= T.
    +     * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return Whether the minReadTimestampMicros field is set. + */ + public boolean hasMinReadTimestampMicros() { + return concurrencyModeCase_ == 2; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that reads at a timestamp >= T.
    +     * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return The minReadTimestampMicros. + */ + public long getMinReadTimestampMicros() { + if (concurrencyModeCase_ == 2) { + return (java.lang.Long) concurrencyMode_; + } + return 0L; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that reads at a timestamp >= T.
    +     * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @param value The minReadTimestampMicros to set. + * @return This builder for chaining. + */ + public Builder setMinReadTimestampMicros(long value) { + + concurrencyModeCase_ = 2; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that reads at a timestamp >= T.
    +     * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return This builder for chaining. + */ + public Builder clearMinReadTimestampMicros() { + if (concurrencyModeCase_ == 2) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that is at most N seconds stale.
    +     * 
    + * + * double max_staleness_seconds = 3; + * + * @return Whether the maxStalenessSeconds field is set. + */ + public boolean hasMaxStalenessSeconds() { + return concurrencyModeCase_ == 3; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that is at most N seconds stale.
    +     * 
    + * + * double max_staleness_seconds = 3; + * + * @return The maxStalenessSeconds. + */ + public double getMaxStalenessSeconds() { + if (concurrencyModeCase_ == 3) { + return (java.lang.Double) concurrencyMode_; + } + return 0D; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that is at most N seconds stale.
    +     * 
    + * + * double max_staleness_seconds = 3; + * + * @param value The maxStalenessSeconds to set. + * @return This builder for chaining. + */ + public Builder setMaxStalenessSeconds(double value) { + + concurrencyModeCase_ = 3; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a boundedly stale read that is at most N seconds stale.
    +     * 
    + * + * double max_staleness_seconds = 3; + * + * @return This builder for chaining. + */ + public Builder clearMaxStalenessSeconds() { + if (concurrencyModeCase_ == 3) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp.
    +     * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return Whether the exactTimestampMicros field is set. + */ + public boolean hasExactTimestampMicros() { + return concurrencyModeCase_ == 4; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp.
    +     * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return The exactTimestampMicros. + */ + public long getExactTimestampMicros() { + if (concurrencyModeCase_ == 4) { + return (java.lang.Long) concurrencyMode_; + } + return 0L; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp.
    +     * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @param value The exactTimestampMicros to set. + * @return This builder for chaining. + */ + public Builder setExactTimestampMicros(long value) { + + concurrencyModeCase_ = 4; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a read at a consistent timestamp.
    +     * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return This builder for chaining. + */ + public Builder clearExactTimestampMicros() { + if (concurrencyModeCase_ == 4) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates a strong read, must only be set to true, or unset.
    +     * 
    + * + * bool strong = 5; + * + * @return Whether the strong field is set. + */ + public boolean hasStrong() { + return concurrencyModeCase_ == 5; + } + + /** + * + * + *
    +     * Indicates a strong read, must only be set to true, or unset.
    +     * 
    + * + * bool strong = 5; + * + * @return The strong. + */ + public boolean getStrong() { + if (concurrencyModeCase_ == 5) { + return (java.lang.Boolean) concurrencyMode_; + } + return false; + } + + /** + * + * + *
    +     * Indicates a strong read, must only be set to true, or unset.
    +     * 
    + * + * bool strong = 5; + * + * @param value The strong to set. + * @return This builder for chaining. + */ + public Builder setStrong(boolean value) { + + concurrencyModeCase_ = 5; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a strong read, must only be set to true, or unset.
    +     * 
    + * + * bool strong = 5; + * + * @return This builder for chaining. + */ + public Builder clearStrong() { + if (concurrencyModeCase_ == 5) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates a batch read, must only be set to true, or unset.
    +     * 
    + * + * bool batch = 6; + * + * @return Whether the batch field is set. + */ + public boolean hasBatch() { + return concurrencyModeCase_ == 6; + } + + /** + * + * + *
    +     * Indicates a batch read, must only be set to true, or unset.
    +     * 
    + * + * bool batch = 6; + * + * @return The batch. + */ + public boolean getBatch() { + if (concurrencyModeCase_ == 6) { + return (java.lang.Boolean) concurrencyMode_; + } + return false; + } + + /** + * + * + *
    +     * Indicates a batch read, must only be set to true, or unset.
    +     * 
    + * + * bool batch = 6; + * + * @param value The batch to set. + * @return This builder for chaining. + */ + public Builder setBatch(boolean value) { + + concurrencyModeCase_ = 6; + concurrencyMode_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates a batch read, must only be set to true, or unset.
    +     * 
    + * + * bool batch = 6; + * + * @return This builder for chaining. + */ + public Builder clearBatch() { + if (concurrencyModeCase_ == 6) { + concurrencyModeCase_ = 0; + concurrencyMode_ = null; + onChanged(); + } + return this; + } + + private boolean snapshotEpochRead_; + + /** + * + * + *
    +     * True if exact_timestamp_micros is set, and the chosen timestamp is that of
    +     * a snapshot epoch.
    +     * 
    + * + * bool snapshot_epoch_read = 7; + * + * @return The snapshotEpochRead. + */ + @java.lang.Override + public boolean getSnapshotEpochRead() { + return snapshotEpochRead_; + } + + /** + * + * + *
    +     * True if exact_timestamp_micros is set, and the chosen timestamp is that of
    +     * a snapshot epoch.
    +     * 
    + * + * bool snapshot_epoch_read = 7; + * + * @param value The snapshotEpochRead to set. + * @return This builder for chaining. + */ + public Builder setSnapshotEpochRead(boolean value) { + + snapshotEpochRead_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * True if exact_timestamp_micros is set, and the chosen timestamp is that of
    +     * a snapshot epoch.
    +     * 
    + * + * bool snapshot_epoch_read = 7; + * + * @return This builder for chaining. + */ + public Builder clearSnapshotEpochRead() { + bitField0_ = (bitField0_ & ~0x00000040); + snapshotEpochRead_ = false; + onChanged(); + return this; + } + + private java.lang.Object snapshotEpochRootTable_ = ""; + + /** + * + * + *
    +     * If set, this is a snapshot epoch read constrained to read only the
    +     * specified log scope root table, and its children. Will not be set for full
    +     * database epochs.
    +     * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The snapshotEpochRootTable. + */ + public java.lang.String getSnapshotEpochRootTable() { + java.lang.Object ref = snapshotEpochRootTable_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + snapshotEpochRootTable_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If set, this is a snapshot epoch read constrained to read only the
    +     * specified log scope root table, and its children. Will not be set for full
    +     * database epochs.
    +     * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The bytes for snapshotEpochRootTable. + */ + public com.google.protobuf.ByteString getSnapshotEpochRootTableBytes() { + java.lang.Object ref = snapshotEpochRootTable_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + snapshotEpochRootTable_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If set, this is a snapshot epoch read constrained to read only the
    +     * specified log scope root table, and its children. Will not be set for full
    +     * database epochs.
    +     * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @param value The snapshotEpochRootTable to set. + * @return This builder for chaining. + */ + public Builder setSnapshotEpochRootTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + snapshotEpochRootTable_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, this is a snapshot epoch read constrained to read only the
    +     * specified log scope root table, and its children. Will not be set for full
    +     * database epochs.
    +     * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return This builder for chaining. + */ + public Builder clearSnapshotEpochRootTable() { + snapshotEpochRootTable_ = getDefaultInstance().getSnapshotEpochRootTable(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, this is a snapshot epoch read constrained to read only the
    +     * specified log scope root table, and its children. Will not be set for full
    +     * database epochs.
    +     * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @param value The bytes for snapshotEpochRootTable to set. + * @return This builder for chaining. + */ + public Builder setSnapshotEpochRootTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + snapshotEpochRootTable_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private long batchReadTimestampMicros_; + + /** + * + * + *
    +     * Set only when batch is true.
    +     * 
    + * + * int64 batch_read_timestamp_micros = 9; + * + * @return The batchReadTimestampMicros. + */ + @java.lang.Override + public long getBatchReadTimestampMicros() { + return batchReadTimestampMicros_; + } + + /** + * + * + *
    +     * Set only when batch is true.
    +     * 
    + * + * int64 batch_read_timestamp_micros = 9; + * + * @param value The batchReadTimestampMicros to set. + * @return This builder for chaining. + */ + public Builder setBatchReadTimestampMicros(long value) { + + batchReadTimestampMicros_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Set only when batch is true.
    +     * 
    + * + * int64 batch_read_timestamp_micros = 9; + * + * @return This builder for chaining. + */ + public Builder clearBatchReadTimestampMicros() { + bitField0_ = (bitField0_ & ~0x00000100); + batchReadTimestampMicros_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.Concurrency) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.Concurrency) + private static final com.google.spanner.executor.v1.Concurrency DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.Concurrency(); + } + + public static com.google.spanner.executor.v1.Concurrency getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Concurrency parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Concurrency getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java new file mode 100644 index 000000000000..4fed9fa7f708 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java @@ -0,0 +1,247 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ConcurrencyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.Concurrency) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp that is specified relative to
    +   * now. That is, if the caller has specified an exact staleness of s
    +   * seconds, we will read at now - s.
    +   * 
    + * + * double staleness_seconds = 1; + * + * @return Whether the stalenessSeconds field is set. + */ + boolean hasStalenessSeconds(); + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp that is specified relative to
    +   * now. That is, if the caller has specified an exact staleness of s
    +   * seconds, we will read at now - s.
    +   * 
    + * + * double staleness_seconds = 1; + * + * @return The stalenessSeconds. + */ + double getStalenessSeconds(); + + /** + * + * + *
    +   * Indicates a boundedly stale read that reads at a timestamp >= T.
    +   * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return Whether the minReadTimestampMicros field is set. + */ + boolean hasMinReadTimestampMicros(); + + /** + * + * + *
    +   * Indicates a boundedly stale read that reads at a timestamp >= T.
    +   * 
    + * + * int64 min_read_timestamp_micros = 2; + * + * @return The minReadTimestampMicros. + */ + long getMinReadTimestampMicros(); + + /** + * + * + *
    +   * Indicates a boundedly stale read that is at most N seconds stale.
    +   * 
    + * + * double max_staleness_seconds = 3; + * + * @return Whether the maxStalenessSeconds field is set. + */ + boolean hasMaxStalenessSeconds(); + + /** + * + * + *
    +   * Indicates a boundedly stale read that is at most N seconds stale.
    +   * 
    + * + * double max_staleness_seconds = 3; + * + * @return The maxStalenessSeconds. + */ + double getMaxStalenessSeconds(); + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp.
    +   * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return Whether the exactTimestampMicros field is set. + */ + boolean hasExactTimestampMicros(); + + /** + * + * + *
    +   * Indicates a read at a consistent timestamp.
    +   * 
    + * + * int64 exact_timestamp_micros = 4; + * + * @return The exactTimestampMicros. + */ + long getExactTimestampMicros(); + + /** + * + * + *
    +   * Indicates a strong read, must only be set to true, or unset.
    +   * 
    + * + * bool strong = 5; + * + * @return Whether the strong field is set. + */ + boolean hasStrong(); + + /** + * + * + *
    +   * Indicates a strong read, must only be set to true, or unset.
    +   * 
    + * + * bool strong = 5; + * + * @return The strong. + */ + boolean getStrong(); + + /** + * + * + *
    +   * Indicates a batch read, must only be set to true, or unset.
    +   * 
    + * + * bool batch = 6; + * + * @return Whether the batch field is set. + */ + boolean hasBatch(); + + /** + * + * + *
    +   * Indicates a batch read, must only be set to true, or unset.
    +   * 
    + * + * bool batch = 6; + * + * @return The batch. + */ + boolean getBatch(); + + /** + * + * + *
    +   * True if exact_timestamp_micros is set, and the chosen timestamp is that of
    +   * a snapshot epoch.
    +   * 
    + * + * bool snapshot_epoch_read = 7; + * + * @return The snapshotEpochRead. + */ + boolean getSnapshotEpochRead(); + + /** + * + * + *
    +   * If set, this is a snapshot epoch read constrained to read only the
    +   * specified log scope root table, and its children. Will not be set for full
    +   * database epochs.
    +   * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The snapshotEpochRootTable. + */ + java.lang.String getSnapshotEpochRootTable(); + + /** + * + * + *
    +   * If set, this is a snapshot epoch read constrained to read only the
    +   * specified log scope root table, and its children. Will not be set for full
    +   * database epochs.
    +   * 
    + * + * string snapshot_epoch_root_table = 8; + * + * @return The bytes for snapshotEpochRootTable. + */ + com.google.protobuf.ByteString getSnapshotEpochRootTableBytes(); + + /** + * + * + *
    +   * Set only when batch is true.
    +   * 
    + * + * int64 batch_read_timestamp_micros = 9; + * + * @return The batchReadTimestampMicros. + */ + long getBatchReadTimestampMicros(); + + com.google.spanner.executor.v1.Concurrency.ConcurrencyModeCase getConcurrencyModeCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java new file mode 100644 index 000000000000..0a1dea9e4149 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java @@ -0,0 +1,1498 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that copies a Cloud Spanner database backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CopyCloudBackupAction} + */ +@com.google.protobuf.Generated +public final class CopyCloudBackupAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CopyCloudBackupAction) + CopyCloudBackupActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CopyCloudBackupAction"); + } + + // Use CopyCloudBackupAction.newBuilder() to construct. + private CopyCloudBackupAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CopyCloudBackupAction() { + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + sourceBackup_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CopyCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CopyCloudBackupAction.class, + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SOURCE_BACKUP_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +   * The fully qualified uri of the source backup from which this
    +   * backup was copied. eg.
    +   * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +   * 
    + * + * string source_backup = 4; + * + * @return The sourceBackup. + */ + @java.lang.Override + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } + } + + /** + * + * + *
    +   * The fully qualified uri of the source backup from which this
    +   * backup was copied. eg.
    +   * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +   * 
    + * + * string source_backup = 4; + * + * @return The bytes for sourceBackup. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getExpireTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourceBackup_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, sourceBackup_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getExpireTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CopyCloudBackupAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CopyCloudBackupAction other = + (com.google.spanner.executor.v1.CopyCloudBackupAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getSourceBackup().equals(other.getSourceBackup())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (37 * hash) + SOURCE_BACKUP_FIELD_NUMBER; + hash = (53 * hash) + getSourceBackup().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.CopyCloudBackupAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that copies a Cloud Spanner database backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CopyCloudBackupAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CopyCloudBackupAction) + com.google.spanner.executor.v1.CopyCloudBackupActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CopyCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CopyCloudBackupAction.class, + com.google.spanner.executor.v1.CopyCloudBackupAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CopyCloudBackupAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + sourceBackup_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CopyCloudBackupAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction build() { + com.google.spanner.executor.v1.CopyCloudBackupAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction buildPartial() { + com.google.spanner.executor.v1.CopyCloudBackupAction result = + new com.google.spanner.executor.v1.CopyCloudBackupAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CopyCloudBackupAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.sourceBackup_ = sourceBackup_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CopyCloudBackupAction) { + return mergeFrom((com.google.spanner.executor.v1.CopyCloudBackupAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CopyCloudBackupAction other) { + if (other == com.google.spanner.executor.v1.CopyCloudBackupAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getSourceBackup().isEmpty()) { + sourceBackup_ = other.sourceBackup_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + sourceBackup_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object sourceBackup_ = ""; + + /** + * + * + *
    +     * The fully qualified uri of the source backup from which this
    +     * backup was copied. eg.
    +     * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +     * 
    + * + * string source_backup = 4; + * + * @return The sourceBackup. + */ + public java.lang.String getSourceBackup() { + java.lang.Object ref = sourceBackup_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourceBackup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the source backup from which this
    +     * backup was copied. eg.
    +     * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +     * 
    + * + * string source_backup = 4; + * + * @return The bytes for sourceBackup. + */ + public com.google.protobuf.ByteString getSourceBackupBytes() { + java.lang.Object ref = sourceBackup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourceBackup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The fully qualified uri of the source backup from which this
    +     * backup was copied. eg.
    +     * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +     * 
    + * + * string source_backup = 4; + * + * @param value The sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackup(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourceBackup_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the source backup from which this
    +     * backup was copied. eg.
    +     * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +     * 
    + * + * string source_backup = 4; + * + * @return This builder for chaining. + */ + public Builder clearSourceBackup() { + sourceBackup_ = getDefaultInstance().getSourceBackup(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The fully qualified uri of the source backup from which this
    +     * backup was copied. eg.
    +     * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +     * 
    + * + * string source_backup = 4; + * + * @param value The bytes for sourceBackup to set. + * @return This builder for chaining. + */ + public Builder setSourceBackupBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourceBackup_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000010); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CopyCloudBackupAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CopyCloudBackupAction) + private static final com.google.spanner.executor.v1.CopyCloudBackupAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CopyCloudBackupAction(); + } + + public static com.google.spanner.executor.v1.CopyCloudBackupAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CopyCloudBackupAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CopyCloudBackupAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java new file mode 100644 index 000000000000..9c1acde0f992 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java @@ -0,0 +1,179 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CopyCloudBackupActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CopyCloudBackupAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * The fully qualified uri of the source backup from which this
    +   * backup was copied. eg.
    +   * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +   * 
    + * + * string source_backup = 4; + * + * @return The sourceBackup. + */ + java.lang.String getSourceBackup(); + + /** + * + * + *
    +   * The fully qualified uri of the source backup from which this
    +   * backup was copied. eg.
    +   * "projects/<project_id>/instances/<instance_id>/backups/<backup_id>".
    +   * 
    + * + * string source_backup = 4; + * + * @return The bytes for sourceBackup. + */ + com.google.protobuf.ByteString getSourceBackupBytes(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java new file mode 100644 index 000000000000..d1dc1060482d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java @@ -0,0 +1,2102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that creates a Cloud Spanner database backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudBackupAction} + */ +@com.google.protobuf.Generated +public final class CreateCloudBackupAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CreateCloudBackupAction) + CreateCloudBackupActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateCloudBackupAction"); + } + + // Use CreateCloudBackupAction.newBuilder() to construct. + private CreateCloudBackupAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateCloudBackupAction() { + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + databaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudBackupAction.class, + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * The id of the database from which this backup was
    +   * created, e.g. "db0". Note that this needs to be in the
    +   * same instance as the backup.
    +   * 
    + * + * string database_id = 4; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the database from which this backup was
    +   * created, e.g. "db0". Note that this needs to be in the
    +   * same instance as the backup.
    +   * 
    + * + * string database_id = 4; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + public static final int VERSION_TIME_FIELD_NUMBER = 6; + private com.google.protobuf.Timestamp versionTime_; + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return Whether the versionTime field is set. + */ + @java.lang.Override + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return The versionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getVersionTime() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + return versionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : versionTime_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 7; + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, databaseId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getVersionTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getEncryptionConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, databaseId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getExpireTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getVersionTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getEncryptionConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CreateCloudBackupAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CreateCloudBackupAction other = + (com.google.spanner.executor.v1.CreateCloudBackupAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (hasVersionTime() != other.hasVersionTime()) return false; + if (hasVersionTime()) { + if (!getVersionTime().equals(other.getVersionTime())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + if (hasVersionTime()) { + hash = (37 * hash) + VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getVersionTime().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CreateCloudBackupAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudBackupAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CreateCloudBackupAction) + com.google.spanner.executor.v1.CreateCloudBackupActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudBackupAction.class, + com.google.spanner.executor.v1.CreateCloudBackupAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CreateCloudBackupAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + internalGetVersionTimeFieldBuilder(); + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + databaseId_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudBackupAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction build() { + com.google.spanner.executor.v1.CreateCloudBackupAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction buildPartial() { + com.google.spanner.executor.v1.CreateCloudBackupAction result = + new com.google.spanner.executor.v1.CreateCloudBackupAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CreateCloudBackupAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.databaseId_ = databaseId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.versionTime_ = + versionTimeBuilder_ == null ? versionTime_ : versionTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CreateCloudBackupAction) { + return mergeFrom((com.google.spanner.executor.v1.CreateCloudBackupAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CreateCloudBackupAction other) { + if (other == com.google.spanner.executor.v1.CreateCloudBackupAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.hasVersionTime()) { + mergeVersionTime(other.getVersionTime()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to be created, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * The id of the database from which this backup was
    +     * created, e.g. "db0". Note that this needs to be in the
    +     * same instance as the backup.
    +     * 
    + * + * string database_id = 4; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the database from which this backup was
    +     * created, e.g. "db0". Note that this needs to be in the
    +     * same instance as the backup.
    +     * 
    + * + * string database_id = 4; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the database from which this backup was
    +     * created, e.g. "db0". Note that this needs to be in the
    +     * same instance as the backup.
    +     * 
    + * + * string database_id = 4; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database from which this backup was
    +     * created, e.g. "db0". Note that this needs to be in the
    +     * same instance as the backup.
    +     * 
    + * + * string database_id = 4; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database from which this backup was
    +     * created, e.g. "db0". Note that this needs to be in the
    +     * same instance as the backup.
    +     * 
    + * + * string database_id = 4; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000010); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Output only. The expiration time of the backup, which must be at least 6
    +     * hours and at most 366 days from the time the request is received.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private com.google.protobuf.Timestamp versionTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + versionTimeBuilder_; + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return Whether the versionTime field is set. + */ + public boolean hasVersionTime() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return The versionTime. + */ + public com.google.protobuf.Timestamp getVersionTime() { + if (versionTimeBuilder_ == null) { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } else { + return versionTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versionTime_ = value; + } else { + versionTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public Builder setVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (versionTimeBuilder_ == null) { + versionTime_ = builderForValue.build(); + } else { + versionTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public Builder mergeVersionTime(com.google.protobuf.Timestamp value) { + if (versionTimeBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && versionTime_ != null + && versionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getVersionTimeBuilder().mergeFrom(value); + } else { + versionTime_ = value; + } + } else { + versionTimeBuilder_.mergeFrom(value); + } + if (versionTime_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public Builder clearVersionTime() { + bitField0_ = (bitField0_ & ~0x00000020); + versionTime_ = null; + if (versionTimeBuilder_ != null) { + versionTimeBuilder_.dispose(); + versionTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public com.google.protobuf.Timestamp.Builder getVersionTimeBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetVersionTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { + if (versionTimeBuilder_ != null) { + return versionTimeBuilder_.getMessageOrBuilder(); + } else { + return versionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : versionTime_; + } + } + + /** + * + * + *
    +     * The version time of the backup, which must be within the time range of
    +     * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +     * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +     * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetVersionTimeFieldBuilder() { + if (versionTimeBuilder_ == null) { + versionTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getVersionTime(), getParentForChildren(), isClean()); + versionTime_ = null; + } + return versionTimeBuilder_; + } + + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000040); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public com.google.spanner.admin.database.v1.EncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the backup to be created if the backup
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CreateCloudBackupAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CreateCloudBackupAction) + private static final com.google.spanner.executor.v1.CreateCloudBackupAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CreateCloudBackupAction(); + } + + public static com.google.spanner.executor.v1.CreateCloudBackupAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateCloudBackupAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudBackupAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java new file mode 100644 index 000000000000..ac639243b498 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java @@ -0,0 +1,262 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CreateCloudBackupActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CreateCloudBackupAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup to be created, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * The id of the database from which this backup was
    +   * created, e.g. "db0". Note that this needs to be in the
    +   * same instance as the backup.
    +   * 
    + * + * string database_id = 4; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * The id of the database from which this backup was
    +   * created, e.g. "db0". Note that this needs to be in the
    +   * same instance as the backup.
    +   * 
    + * + * string database_id = 4; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Output only. The expiration time of the backup, which must be at least 6
    +   * hours and at most 366 days from the time the request is received.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return Whether the versionTime field is set. + */ + boolean hasVersionTime(); + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + * + * @return The versionTime. + */ + com.google.protobuf.Timestamp getVersionTime(); + + /** + * + * + *
    +   * The version time of the backup, which must be within the time range of
    +   * [earliest_version_time, NOW], where earliest_version_time is retrieved by
    +   * cloud spanner frontend API (See details: go/cs-pitr-lite-design).
    +   * 
    + * + * optional .google.protobuf.Timestamp version_time = 6; + */ + com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the backup to be created if the backup
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder getEncryptionConfigOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java new file mode 100644 index 000000000000..deae557eaeb9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java @@ -0,0 +1,1905 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that creates a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class CreateCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CreateCloudDatabaseAction) + CreateCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateCloudDatabaseAction"); + } + + // Use CreateCloudDatabaseAction.newBuilder() to construct. + private CreateCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateCloudDatabaseAction() { + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + dialect_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudDatabaseAction.class, + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SDL_STATEMENT_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList sdlStatement_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + public com.google.protobuf.ProtocolStringList getSdlStatementList() { + return sdlStatement_; + } + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + public int getSdlStatementCount() { + return sdlStatement_.size(); + } + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + public java.lang.String getSdlStatement(int index) { + return sdlStatement_.get(index); + } + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + public com.google.protobuf.ByteString getSdlStatementBytes(int index) { + return sdlStatement_.getByteString(index); + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 5; + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + public static final int DIALECT_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private volatile java.lang.Object dialect_ = ""; + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return Whether the dialect field is set. + */ + @java.lang.Override + public boolean hasDialect() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return The dialect. + */ + @java.lang.Override + public java.lang.String getDialect() { + java.lang.Object ref = dialect_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dialect_ = s; + return s; + } + } + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return The bytes for dialect. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDialectBytes() { + java.lang.Object ref = dialect_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + dialect_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROTO_DESCRIPTORS_FIELD_NUMBER = 7; + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * optional bytes proto_descriptors = 7; + * + * @return Whether the protoDescriptors field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptors() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * optional bytes proto_descriptors = 7; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseId_); + } + for (int i = 0; i < sdlStatement_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, sdlStatement_.getRaw(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, dialect_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeBytes(7, protoDescriptors_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseId_); + } + { + int dataSize = 0; + for (int i = 0; i < sdlStatement_.size(); i++) { + dataSize += computeStringSizeNoTag(sdlStatement_.getRaw(i)); + } + size += dataSize; + size += 1 * getSdlStatementList().size(); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, dialect_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(7, protoDescriptors_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CreateCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CreateCloudDatabaseAction other = + (com.google.spanner.executor.v1.CreateCloudDatabaseAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (!getSdlStatementList().equals(other.getSdlStatementList())) return false; + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (hasDialect() != other.hasDialect()) return false; + if (hasDialect()) { + if (!getDialect().equals(other.getDialect())) return false; + } + if (hasProtoDescriptors() != other.hasProtoDescriptors()) return false; + if (hasProtoDescriptors()) { + if (!getProtoDescriptors().equals(other.getProtoDescriptors())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (getSdlStatementCount() > 0) { + hash = (37 * hash) + SDL_STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + getSdlStatementList().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + if (hasDialect()) { + hash = (37 * hash) + DIALECT_FIELD_NUMBER; + hash = (53 * hash) + getDialect().hashCode(); + } + if (hasProtoDescriptors()) { + hash = (37 * hash) + PROTO_DESCRIPTORS_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptors().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CreateCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CreateCloudDatabaseAction) + com.google.spanner.executor.v1.CreateCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudDatabaseAction.class, + com.google.spanner.executor.v1.CreateCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CreateCloudDatabaseAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + dialect_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction build() { + com.google.spanner.executor.v1.CreateCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.CreateCloudDatabaseAction result = + new com.google.spanner.executor.v1.CreateCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CreateCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseId_ = databaseId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + sdlStatement_.makeImmutable(); + result.sdlStatement_ = sdlStatement_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.dialect_ = dialect_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.protoDescriptors_ = protoDescriptors_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CreateCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.CreateCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CreateCloudDatabaseAction other) { + if (other == com.google.spanner.executor.v1.CreateCloudDatabaseAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.sdlStatement_.isEmpty()) { + if (sdlStatement_.isEmpty()) { + sdlStatement_ = other.sdlStatement_; + bitField0_ |= 0x00000008; + } else { + ensureSdlStatementIsMutable(); + sdlStatement_.addAll(other.sdlStatement_); + } + onChanged(); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + if (other.hasDialect()) { + dialect_ = other.dialect_; + bitField0_ |= 0x00000020; + onChanged(); + } + if (other.hasProtoDescriptors()) { + setProtoDescriptors(other.getProtoDescriptors()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSdlStatementIsMutable(); + sdlStatement_.add(s); + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + dialect_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + protoDescriptors_ = input.readBytes(); + bitField0_ |= 0x00000040; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList sdlStatement_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSdlStatementIsMutable() { + if (!sdlStatement_.isModifiable()) { + sdlStatement_ = new com.google.protobuf.LazyStringArrayList(sdlStatement_); + } + bitField0_ |= 0x00000008; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + public com.google.protobuf.ProtocolStringList getSdlStatementList() { + sdlStatement_.makeImmutable(); + return sdlStatement_; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + public int getSdlStatementCount() { + return sdlStatement_.size(); + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + public java.lang.String getSdlStatement(int index) { + return sdlStatement_.get(index); + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + public com.google.protobuf.ByteString getSdlStatementBytes(int index) { + return sdlStatement_.getByteString(index); + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index to set the value at. + * @param value The sdlStatement to set. + * @return This builder for chaining. + */ + public Builder setSdlStatement(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSdlStatementIsMutable(); + sdlStatement_.set(index, value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param value The sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addSdlStatement(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSdlStatementIsMutable(); + sdlStatement_.add(value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param values The sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addAllSdlStatement(java.lang.Iterable values) { + ensureSdlStatementIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, sdlStatement_); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return This builder for chaining. + */ + public Builder clearSdlStatement() { + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the new database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param value The bytes of the sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addSdlStatementBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSdlStatementIsMutable(); + sdlStatement_.add(value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000010); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public com.google.spanner.admin.database.v1.EncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * The KMS key used to encrypt the database to be created if the database
    +     * should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + private java.lang.Object dialect_ = ""; + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @return Whether the dialect field is set. + */ + public boolean hasDialect() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @return The dialect. + */ + public java.lang.String getDialect() { + java.lang.Object ref = dialect_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + dialect_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @return The bytes for dialect. + */ + public com.google.protobuf.ByteString getDialectBytes() { + java.lang.Object ref = dialect_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + dialect_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @param value The dialect to set. + * @return This builder for chaining. + */ + public Builder setDialect(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + dialect_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @return This builder for chaining. + */ + public Builder clearDialect() { + dialect_ = getDefaultInstance().getDialect(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +     * 
    + * + * optional string dialect = 6; + * + * @param value The bytes for dialect to set. + * @return This builder for chaining. + */ + public Builder setDialectBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + dialect_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * optional bytes proto_descriptors = 7; + * + * @return Whether the protoDescriptors field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptors() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * optional bytes proto_descriptors = 7; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + /** + * optional bytes proto_descriptors = 7; + * + * @param value The protoDescriptors to set. + * @return This builder for chaining. + */ + public Builder setProtoDescriptors(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptors_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * optional bytes proto_descriptors = 7; + * + * @return This builder for chaining. + */ + public Builder clearProtoDescriptors() { + bitField0_ = (bitField0_ & ~0x00000040); + protoDescriptors_ = getDefaultInstance().getProtoDescriptors(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CreateCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CreateCloudDatabaseAction) + private static final com.google.spanner.executor.v1.CreateCloudDatabaseAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CreateCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.CreateCloudDatabaseAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudDatabaseAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..2760710196be --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java @@ -0,0 +1,253 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CreateCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CreateCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + java.util.List getSdlStatementList(); + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + int getSdlStatementCount(); + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + java.lang.String getSdlStatement(int index); + + /** + * + * + *
    +   * SDL statements to apply to the new database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + com.google.protobuf.ByteString getSdlStatementBytes(int index); + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key used to encrypt the database to be created if the database
    +   * should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + */ + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder getEncryptionConfigOrBuilder(); + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return Whether the dialect field is set. + */ + boolean hasDialect(); + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return The dialect. + */ + java.lang.String getDialect(); + + /** + * + * + *
    +   * Optional SQL dialect (GOOGLESQL or POSTGRESQL).  Default: GOOGLESQL.
    +   * 
    + * + * optional string dialect = 6; + * + * @return The bytes for dialect. + */ + com.google.protobuf.ByteString getDialectBytes(); + + /** + * optional bytes proto_descriptors = 7; + * + * @return Whether the protoDescriptors field is set. + */ + boolean hasProtoDescriptors(); + + /** + * optional bytes proto_descriptors = 7; + * + * @return The protoDescriptors. + */ + com.google.protobuf.ByteString getProtoDescriptors(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java new file mode 100644 index 000000000000..6f49bbbaf039 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java @@ -0,0 +1,2093 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that creates a Cloud Spanner instance.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudInstanceAction} + */ +@com.google.protobuf.Generated +public final class CreateCloudInstanceAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CreateCloudInstanceAction) + CreateCloudInstanceActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateCloudInstanceAction"); + } + + // Use CreateCloudInstanceAction.newBuilder() to construct. + private CreateCloudInstanceAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateCloudInstanceAction() { + instanceId_ = ""; + projectId_ = ""; + instanceConfigId_ = ""; + edition_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudInstanceAction.class, + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_CONFIG_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +   * Instance config ID, e.g. "test-config".
    +   * 
    + * + * string instance_config_id = 3; + * + * @return The instanceConfigId. + */ + @java.lang.Override + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Instance config ID, e.g. "test-config".
    +   * 
    + * + * string instance_config_id = 3; + * + * @return The bytes for instanceConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NODE_COUNT_FIELD_NUMBER = 4; + private int nodeCount_ = 0; + + /** + * + * + *
    +   * Number of nodes (processing_units should not be set or set to 0 if used).
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Number of nodes (processing_units should not be set or set to 0 if used).
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 6; + private int processingUnits_ = 0; + + /** + * + * + *
    +   * Number of processing units (node_count should be set to 0 if used).
    +   * 
    + * + * optional int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Number of processing units (node_count should be set to 0 if used).
    +   * 
    + * + * optional int32 processing_units = 6; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + public static final int AUTOSCALING_CONFIG_FIELD_NUMBER = 7; + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + @java.lang.Override + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + public static final int LABELS_FIELD_NUMBER = 5; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int EDITION_FIELD_NUMBER = 8; + private int edition_ = 0; + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, instanceConfigId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt32(4, nodeCount_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 5); + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt32(6, processingUnits_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getAutoscalingConfig()); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + output.writeEnum(8, edition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, instanceConfigId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, nodeCount_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, labels__); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(6, processingUnits_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getAutoscalingConfig()); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(8, edition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CreateCloudInstanceAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CreateCloudInstanceAction other = + (com.google.spanner.executor.v1.CreateCloudInstanceAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceConfigId().equals(other.getInstanceConfigId())) return false; + if (hasNodeCount() != other.hasNodeCount()) return false; + if (hasNodeCount()) { + if (getNodeCount() != other.getNodeCount()) return false; + } + if (hasProcessingUnits() != other.hasProcessingUnits()) return false; + if (hasProcessingUnits()) { + if (getProcessingUnits() != other.getProcessingUnits()) return false; + } + if (hasAutoscalingConfig() != other.hasAutoscalingConfig()) return false; + if (hasAutoscalingConfig()) { + if (!getAutoscalingConfig().equals(other.getAutoscalingConfig())) return false; + } + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (edition_ != other.edition_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfigId().hashCode(); + if (hasNodeCount()) { + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + } + if (hasProcessingUnits()) { + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + } + if (hasAutoscalingConfig()) { + hash = (37 * hash) + AUTOSCALING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingConfig().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (37 * hash) + EDITION_FIELD_NUMBER; + hash = (53 * hash) + edition_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CreateCloudInstanceAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that creates a Cloud Spanner instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateCloudInstanceAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CreateCloudInstanceAction) + com.google.spanner.executor.v1.CreateCloudInstanceActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateCloudInstanceAction.class, + com.google.spanner.executor.v1.CreateCloudInstanceAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CreateCloudInstanceAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAutoscalingConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + instanceConfigId_ = ""; + nodeCount_ = 0; + processingUnits_ = 0; + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + internalGetMutableLabels().clear(); + edition_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateCloudInstanceAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction build() { + com.google.spanner.executor.v1.CreateCloudInstanceAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction buildPartial() { + com.google.spanner.executor.v1.CreateCloudInstanceAction result = + new com.google.spanner.executor.v1.CreateCloudInstanceAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.CreateCloudInstanceAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.instanceConfigId_ = instanceConfigId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.nodeCount_ = nodeCount_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.processingUnits_ = processingUnits_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.autoscalingConfig_ = + autoscalingConfigBuilder_ == null + ? autoscalingConfig_ + : autoscalingConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.edition_ = edition_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CreateCloudInstanceAction) { + return mergeFrom((com.google.spanner.executor.v1.CreateCloudInstanceAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CreateCloudInstanceAction other) { + if (other == com.google.spanner.executor.v1.CreateCloudInstanceAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getInstanceConfigId().isEmpty()) { + instanceConfigId_ = other.instanceConfigId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasNodeCount()) { + setNodeCount(other.getNodeCount()); + } + if (other.hasProcessingUnits()) { + setProcessingUnits(other.getProcessingUnits()); + } + if (other.hasAutoscalingConfig()) { + mergeAutoscalingConfig(other.getAutoscalingConfig()); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000040; + if (other.edition_ != 0) { + setEditionValue(other.getEditionValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + instanceConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + nodeCount_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000040; + break; + } // case 42 + case 48: + { + processingUnits_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 48 + case 58: + { + input.readMessage( + internalGetAutoscalingConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 58 + case 64: + { + edition_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 64 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +     * Instance config ID, e.g. "test-config".
    +     * 
    + * + * string instance_config_id = 3; + * + * @return The instanceConfigId. + */ + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Instance config ID, e.g. "test-config".
    +     * 
    + * + * string instance_config_id = 3; + * + * @return The bytes for instanceConfigId. + */ + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Instance config ID, e.g. "test-config".
    +     * 
    + * + * string instance_config_id = 3; + * + * @param value The instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfigId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config ID, e.g. "test-config".
    +     * 
    + * + * string instance_config_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearInstanceConfigId() { + instanceConfigId_ = getDefaultInstance().getInstanceConfigId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config ID, e.g. "test-config".
    +     * 
    + * + * string instance_config_id = 3; + * + * @param value The bytes for instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceConfigId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int nodeCount_; + + /** + * + * + *
    +     * Number of nodes (processing_units should not be set or set to 0 if used).
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Number of nodes (processing_units should not be set or set to 0 if used).
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + /** + * + * + *
    +     * Number of nodes (processing_units should not be set or set to 0 if used).
    +     * 
    + * + * optional int32 node_count = 4; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + nodeCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of nodes (processing_units should not be set or set to 0 if used).
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + bitField0_ = (bitField0_ & ~0x00000008); + nodeCount_ = 0; + onChanged(); + return this; + } + + private int processingUnits_; + + /** + * + * + *
    +     * Number of processing units (node_count should be set to 0 if used).
    +     * 
    + * + * optional int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Number of processing units (node_count should be set to 0 if used).
    +     * 
    + * + * optional int32 processing_units = 6; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + /** + * + * + *
    +     * Number of processing units (node_count should be set to 0 if used).
    +     * 
    + * + * optional int32 processing_units = 6; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + processingUnits_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of processing units (node_count should be set to 0 if used).
    +     * 
    + * + * optional int32 processing_units = 6; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + bitField0_ = (bitField0_ & ~0x00000010); + processingUnits_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + autoscalingConfigBuilder_; + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + if (autoscalingConfigBuilder_ == null) { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } else { + return autoscalingConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingConfig_ = value; + } else { + autoscalingConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder builderForValue) { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfig_ = builderForValue.build(); + } else { + autoscalingConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder mergeAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && autoscalingConfig_ != null + && autoscalingConfig_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) { + getAutoscalingConfigBuilder().mergeFrom(value); + } else { + autoscalingConfig_ = value; + } + } else { + autoscalingConfigBuilder_.mergeFrom(value); + } + if (autoscalingConfig_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder clearAutoscalingConfig() { + bitField0_ = (bitField0_ & ~0x00000020); + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder + getAutoscalingConfigBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetAutoscalingConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + if (autoscalingConfigBuilder_ != null) { + return autoscalingConfigBuilder_.getMessageOrBuilder(); + } else { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, an autoscaling
    +     * instance will be created (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + internalGetAutoscalingConfigFieldBuilder() { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder>( + getAutoscalingConfig(), getParentForChildren(), isClean()); + autoscalingConfig_ = null; + } + return autoscalingConfigBuilder_; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000040; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000040); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000040; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000040; + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 5; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000040; + return this; + } + + private int edition_ = 0; + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @param value The enum numeric value on the wire for edition to set. + * @return This builder for chaining. + */ + public Builder setEditionValue(int value) { + edition_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @param value The edition to set. + * @return This builder for chaining. + */ + public Builder setEdition(com.google.spanner.admin.instance.v1.Instance.Edition value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + edition_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return This builder for chaining. + */ + public Builder clearEdition() { + bitField0_ = (bitField0_ & ~0x00000080); + edition_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CreateCloudInstanceAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CreateCloudInstanceAction) + private static final com.google.spanner.executor.v1.CreateCloudInstanceAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CreateCloudInstanceAction(); + } + + public static com.google.spanner.executor.v1.CreateCloudInstanceAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateCloudInstanceAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateCloudInstanceAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java new file mode 100644 index 000000000000..85a3715f09e7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java @@ -0,0 +1,293 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CreateCloudInstanceActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CreateCloudInstanceAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Instance config ID, e.g. "test-config".
    +   * 
    + * + * string instance_config_id = 3; + * + * @return The instanceConfigId. + */ + java.lang.String getInstanceConfigId(); + + /** + * + * + *
    +   * Instance config ID, e.g. "test-config".
    +   * 
    + * + * string instance_config_id = 3; + * + * @return The bytes for instanceConfigId. + */ + com.google.protobuf.ByteString getInstanceConfigIdBytes(); + + /** + * + * + *
    +   * Number of nodes (processing_units should not be set or set to 0 if used).
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + boolean hasNodeCount(); + + /** + * + * + *
    +   * Number of nodes (processing_units should not be set or set to 0 if used).
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * Number of processing units (node_count should be set to 0 if used).
    +   * 
    + * + * optional int32 processing_units = 6; + * + * @return Whether the processingUnits field is set. + */ + boolean hasProcessingUnits(); + + /** + * + * + *
    +   * Number of processing units (node_count should be set to 0 if used).
    +   * 
    + * + * optional int32 processing_units = 6; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + boolean hasAutoscalingConfig(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, an autoscaling
    +   * instance will be created (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder getAutoscalingConfigOrBuilder(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 5; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + int getEditionValue(); + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + com.google.spanner.admin.instance.v1.Instance.Edition getEdition(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java new file mode 100644 index 000000000000..6cd1d04f9385 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java @@ -0,0 +1,1498 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that creates a user instance config.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateUserInstanceConfigAction} + */ +@com.google.protobuf.Generated +public final class CreateUserInstanceConfigAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.CreateUserInstanceConfigAction) + CreateUserInstanceConfigActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateUserInstanceConfigAction"); + } + + // Use CreateUserInstanceConfigAction.newBuilder() to construct. + private CreateUserInstanceConfigAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateUserInstanceConfigAction() { + userConfigId_ = ""; + projectId_ = ""; + baseConfigId_ = ""; + replicas_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.class, + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder.class); + } + + public static final int USER_CONFIG_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + @java.lang.Override + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BASE_CONFIG_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object baseConfigId_ = ""; + + /** + * + * + *
    +   * Base config ID, e.g. "test-config".
    +   * 
    + * + * string base_config_id = 3; + * + * @return The baseConfigId. + */ + @java.lang.Override + public java.lang.String getBaseConfigId() { + java.lang.Object ref = baseConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + baseConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Base config ID, e.g. "test-config".
    +   * 
    + * + * string base_config_id = 3; + * + * @return The bytes for baseConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBaseConfigIdBytes() { + java.lang.Object ref = baseConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + baseConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REPLICAS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List replicas_; + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + @java.lang.Override + public java.util.List getReplicasList() { + return replicas_; + } + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + @java.lang.Override + public java.util.List + getReplicasOrBuilderList() { + return replicas_; + } + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + @java.lang.Override + public int getReplicasCount() { + return replicas_.size(); + } + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index) { + return replicas_.get(index); + } + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder(int index) { + return replicas_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(baseConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, baseConfigId_); + } + for (int i = 0; i < replicas_.size(); i++) { + output.writeMessage(4, replicas_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(baseConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, baseConfigId_); + } + for (int i = 0; i < replicas_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, replicas_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.CreateUserInstanceConfigAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.CreateUserInstanceConfigAction other = + (com.google.spanner.executor.v1.CreateUserInstanceConfigAction) obj; + + if (!getUserConfigId().equals(other.getUserConfigId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getBaseConfigId().equals(other.getBaseConfigId())) return false; + if (!getReplicasList().equals(other.getReplicasList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + USER_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getUserConfigId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + BASE_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getBaseConfigId().hashCode(); + if (getReplicasCount() > 0) { + hash = (37 * hash) + REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getReplicasList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that creates a user instance config.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.CreateUserInstanceConfigAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.CreateUserInstanceConfigAction) + com.google.spanner.executor.v1.CreateUserInstanceConfigActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.class, + com.google.spanner.executor.v1.CreateUserInstanceConfigAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.CreateUserInstanceConfigAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + userConfigId_ = ""; + projectId_ = ""; + baseConfigId_ = ""; + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + } else { + replicas_ = null; + replicasBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_CreateUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction build() { + com.google.spanner.executor.v1.CreateUserInstanceConfigAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction buildPartial() { + com.google.spanner.executor.v1.CreateUserInstanceConfigAction result = + new com.google.spanner.executor.v1.CreateUserInstanceConfigAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction result) { + if (replicasBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + replicas_ = java.util.Collections.unmodifiableList(replicas_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.replicas_ = replicas_; + } else { + result.replicas_ = replicasBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.executor.v1.CreateUserInstanceConfigAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.userConfigId_ = userConfigId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.baseConfigId_ = baseConfigId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.CreateUserInstanceConfigAction) { + return mergeFrom((com.google.spanner.executor.v1.CreateUserInstanceConfigAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.CreateUserInstanceConfigAction other) { + if (other + == com.google.spanner.executor.v1.CreateUserInstanceConfigAction.getDefaultInstance()) + return this; + if (!other.getUserConfigId().isEmpty()) { + userConfigId_ = other.userConfigId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBaseConfigId().isEmpty()) { + baseConfigId_ = other.baseConfigId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (replicasBuilder_ == null) { + if (!other.replicas_.isEmpty()) { + if (replicas_.isEmpty()) { + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureReplicasIsMutable(); + replicas_.addAll(other.replicas_); + } + onChanged(); + } + } else { + if (!other.replicas_.isEmpty()) { + if (replicasBuilder_.isEmpty()) { + replicasBuilder_.dispose(); + replicasBuilder_ = null; + replicas_ = other.replicas_; + bitField0_ = (bitField0_ & ~0x00000008); + replicasBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicasFieldBuilder() + : null; + } else { + replicasBuilder_.addAllMessages(other.replicas_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + userConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + baseConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.spanner.admin.instance.v1.ReplicaInfo m = + input.readMessage( + com.google.spanner.admin.instance.v1.ReplicaInfo.parser(), + extensionRegistry); + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(m); + } else { + replicasBuilder_.addMessage(m); + } + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUserConfigId() { + userConfigId_ = getDefaultInstance().getUserConfigId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The bytes for userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object baseConfigId_ = ""; + + /** + * + * + *
    +     * Base config ID, e.g. "test-config".
    +     * 
    + * + * string base_config_id = 3; + * + * @return The baseConfigId. + */ + public java.lang.String getBaseConfigId() { + java.lang.Object ref = baseConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + baseConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Base config ID, e.g. "test-config".
    +     * 
    + * + * string base_config_id = 3; + * + * @return The bytes for baseConfigId. + */ + public com.google.protobuf.ByteString getBaseConfigIdBytes() { + java.lang.Object ref = baseConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + baseConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Base config ID, e.g. "test-config".
    +     * 
    + * + * string base_config_id = 3; + * + * @param value The baseConfigId to set. + * @return This builder for chaining. + */ + public Builder setBaseConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + baseConfigId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Base config ID, e.g. "test-config".
    +     * 
    + * + * string base_config_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBaseConfigId() { + baseConfigId_ = getDefaultInstance().getBaseConfigId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Base config ID, e.g. "test-config".
    +     * 
    + * + * string base_config_id = 3; + * + * @param value The bytes for baseConfigId to set. + * @return This builder for chaining. + */ + public Builder setBaseConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + baseConfigId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.util.List replicas_ = + java.util.Collections.emptyList(); + + private void ensureReplicasIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + replicas_ = + new java.util.ArrayList(replicas_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + replicasBuilder_; + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public java.util.List getReplicasList() { + if (replicasBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicas_); + } else { + return replicasBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public int getReplicasCount() { + if (replicasBuilder_ == null) { + return replicas_.size(); + } else { + return replicasBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder setReplicas(int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.set(index, value); + onChanged(); + } else { + replicasBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder setReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.set(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder addReplicas(com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(value); + onChanged(); + } else { + replicasBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder addReplicas(int index, com.google.spanner.admin.instance.v1.ReplicaInfo value) { + if (replicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicasIsMutable(); + replicas_.add(index, value); + onChanged(); + } else { + replicasBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder addReplicas( + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder addReplicas( + int index, com.google.spanner.admin.instance.v1.ReplicaInfo.Builder builderForValue) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.add(index, builderForValue.build()); + onChanged(); + } else { + replicasBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder addAllReplicas( + java.lang.Iterable values) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicas_); + onChanged(); + } else { + replicasBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder clearReplicas() { + if (replicasBuilder_ == null) { + replicas_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + replicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public Builder removeReplicas(int index) { + if (replicasBuilder_ == null) { + ensureReplicasIsMutable(); + replicas_.remove(index); + onChanged(); + } else { + replicasBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder getReplicasBuilder(int index) { + return internalGetReplicasFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder( + int index) { + if (replicasBuilder_ == null) { + return replicas_.get(index); + } else { + return replicasBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public java.util.List + getReplicasOrBuilderList() { + if (replicasBuilder_ != null) { + return replicasBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicas_); + } + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addReplicasBuilder() { + return internalGetReplicasFieldBuilder() + .addBuilder(com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public com.google.spanner.admin.instance.v1.ReplicaInfo.Builder addReplicasBuilder(int index) { + return internalGetReplicasFieldBuilder() + .addBuilder(index, com.google.spanner.admin.instance.v1.ReplicaInfo.getDefaultInstance()); + } + + /** + * + * + *
    +     * Replicas that should be included in the user config.
    +     * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + public java.util.List + getReplicasBuilderList() { + return internalGetReplicasFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder> + internalGetReplicasFieldBuilder() { + if (replicasBuilder_ == null) { + replicasBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.admin.instance.v1.ReplicaInfo, + com.google.spanner.admin.instance.v1.ReplicaInfo.Builder, + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder>( + replicas_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + replicas_ = null; + } + return replicasBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.CreateUserInstanceConfigAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.CreateUserInstanceConfigAction) + private static final com.google.spanner.executor.v1.CreateUserInstanceConfigAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.CreateUserInstanceConfigAction(); + } + + public static com.google.spanner.executor.v1.CreateUserInstanceConfigAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateUserInstanceConfigAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.CreateUserInstanceConfigAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java new file mode 100644 index 000000000000..0f72401a5773 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface CreateUserInstanceConfigActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.CreateUserInstanceConfigAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + java.lang.String getUserConfigId(); + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + com.google.protobuf.ByteString getUserConfigIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Base config ID, e.g. "test-config".
    +   * 
    + * + * string base_config_id = 3; + * + * @return The baseConfigId. + */ + java.lang.String getBaseConfigId(); + + /** + * + * + *
    +   * Base config ID, e.g. "test-config".
    +   * 
    + * + * string base_config_id = 3; + * + * @return The bytes for baseConfigId. + */ + com.google.protobuf.ByteString getBaseConfigIdBytes(); + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + java.util.List getReplicasList(); + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + com.google.spanner.admin.instance.v1.ReplicaInfo getReplicas(int index); + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + int getReplicasCount(); + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + java.util.List + getReplicasOrBuilderList(); + + /** + * + * + *
    +   * Replicas that should be included in the user config.
    +   * 
    + * + * repeated .google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; + */ + com.google.spanner.admin.instance.v1.ReplicaInfoOrBuilder getReplicasOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java new file mode 100644 index 000000000000..56b81536c800 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java @@ -0,0 +1,5402 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * ChangeStream data change record.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord} + */ +@com.google.protobuf.Generated +public final class DataChangeRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DataChangeRecord) + DataChangeRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DataChangeRecord"); + } + + // Use DataChangeRecord.newBuilder() to construct. + private DataChangeRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DataChangeRecord() { + recordSequence_ = ""; + transactionId_ = ""; + table_ = ""; + columnTypes_ = java.util.Collections.emptyList(); + mods_ = java.util.Collections.emptyList(); + modType_ = ""; + valueCaptureType_ = ""; + transactionTag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.class, + com.google.spanner.executor.v1.DataChangeRecord.Builder.class); + } + + public interface ColumnTypeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DataChangeRecord.ColumnType) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +     * Column type in JSON.
    +     * 
    + * + * string type = 2; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
    +     * Column type in JSON.
    +     * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); + + /** + * + * + *
    +     * Whether the column is a primary key column.
    +     * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + boolean getIsPrimaryKey(); + + /** + * + * + *
    +     * The position of the column as defined in the schema.
    +     * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + long getOrdinalPosition(); + } + + /** + * + * + *
    +   * Column types.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord.ColumnType} + */ + public static final class ColumnType extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DataChangeRecord.ColumnType) + ColumnTypeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ColumnType"); + } + + // Use ColumnType.newBuilder() to construct. + private ColumnType(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ColumnType() { + name_ = ""; + type_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.class, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +     * Column name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
    +     * Column type in JSON.
    +     * 
    + * + * string type = 2; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
    +     * Column type in JSON.
    +     * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IS_PRIMARY_KEY_FIELD_NUMBER = 3; + private boolean isPrimaryKey_ = false; + + /** + * + * + *
    +     * Whether the column is a primary key column.
    +     * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + @java.lang.Override + public boolean getIsPrimaryKey() { + return isPrimaryKey_; + } + + public static final int ORDINAL_POSITION_FIELD_NUMBER = 4; + private long ordinalPosition_ = 0L; + + /** + * + * + *
    +     * The position of the column as defined in the schema.
    +     * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + @java.lang.Override + public long getOrdinalPosition() { + return ordinalPosition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, type_); + } + if (isPrimaryKey_ != false) { + output.writeBool(3, isPrimaryKey_); + } + if (ordinalPosition_ != 0L) { + output.writeInt64(4, ordinalPosition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, type_); + } + if (isPrimaryKey_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, isPrimaryKey_); + } + if (ordinalPosition_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ordinalPosition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DataChangeRecord.ColumnType)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DataChangeRecord.ColumnType other = + (com.google.spanner.executor.v1.DataChangeRecord.ColumnType) obj; + + if (!getName().equals(other.getName())) return false; + if (!getType().equals(other.getType())) return false; + if (getIsPrimaryKey() != other.getIsPrimaryKey()) return false; + if (getOrdinalPosition() != other.getOrdinalPosition()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (37 * hash) + IS_PRIMARY_KEY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsPrimaryKey()); + hash = (37 * hash) + ORDINAL_POSITION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOrdinalPosition()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Column types.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord.ColumnType} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DataChangeRecord.ColumnType) + com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.class, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DataChangeRecord.ColumnType.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = ""; + isPrimaryKey_ = false; + ordinalPosition_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_ColumnType_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DataChangeRecord.ColumnType.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType build() { + com.google.spanner.executor.v1.DataChangeRecord.ColumnType result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType buildPartial() { + com.google.spanner.executor.v1.DataChangeRecord.ColumnType result = + new com.google.spanner.executor.v1.DataChangeRecord.ColumnType(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.isPrimaryKey_ = isPrimaryKey_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ordinalPosition_ = ordinalPosition_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DataChangeRecord.ColumnType) { + return mergeFrom((com.google.spanner.executor.v1.DataChangeRecord.ColumnType) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DataChangeRecord.ColumnType other) { + if (other + == com.google.spanner.executor.v1.DataChangeRecord.ColumnType.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getIsPrimaryKey() != false) { + setIsPrimaryKey(other.getIsPrimaryKey()); + } + if (other.getOrdinalPosition() != 0L) { + setOrdinalPosition(other.getOrdinalPosition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + isPrimaryKey_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ordinalPosition_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +       * Column name.
    +       * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Column name.
    +       * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Column name.
    +       * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Column name.
    +       * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Column name.
    +       * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
    +       * Column type in JSON.
    +       * 
    + * + * string type = 2; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Column type in JSON.
    +       * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Column type in JSON.
    +       * 
    + * + * string type = 2; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Column type in JSON.
    +       * 
    + * + * string type = 2; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Column type in JSON.
    +       * 
    + * + * string type = 2; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private boolean isPrimaryKey_; + + /** + * + * + *
    +       * Whether the column is a primary key column.
    +       * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + @java.lang.Override + public boolean getIsPrimaryKey() { + return isPrimaryKey_; + } + + /** + * + * + *
    +       * Whether the column is a primary key column.
    +       * 
    + * + * bool is_primary_key = 3; + * + * @param value The isPrimaryKey to set. + * @return This builder for chaining. + */ + public Builder setIsPrimaryKey(boolean value) { + + isPrimaryKey_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Whether the column is a primary key column.
    +       * 
    + * + * bool is_primary_key = 3; + * + * @return This builder for chaining. + */ + public Builder clearIsPrimaryKey() { + bitField0_ = (bitField0_ & ~0x00000004); + isPrimaryKey_ = false; + onChanged(); + return this; + } + + private long ordinalPosition_; + + /** + * + * + *
    +       * The position of the column as defined in the schema.
    +       * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + @java.lang.Override + public long getOrdinalPosition() { + return ordinalPosition_; + } + + /** + * + * + *
    +       * The position of the column as defined in the schema.
    +       * 
    + * + * int64 ordinal_position = 4; + * + * @param value The ordinalPosition to set. + * @return This builder for chaining. + */ + public Builder setOrdinalPosition(long value) { + + ordinalPosition_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The position of the column as defined in the schema.
    +       * 
    + * + * int64 ordinal_position = 4; + * + * @return This builder for chaining. + */ + public Builder clearOrdinalPosition() { + bitField0_ = (bitField0_ & ~0x00000008); + ordinalPosition_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DataChangeRecord.ColumnType) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DataChangeRecord.ColumnType) + private static final com.google.spanner.executor.v1.DataChangeRecord.ColumnType + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DataChangeRecord.ColumnType(); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.ColumnType getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ColumnType parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ModOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DataChangeRecord.Mod) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The primary key values in JSON.
    +     * 
    + * + * string keys = 1; + * + * @return The keys. + */ + java.lang.String getKeys(); + + /** + * + * + *
    +     * The primary key values in JSON.
    +     * 
    + * + * string keys = 1; + * + * @return The bytes for keys. + */ + com.google.protobuf.ByteString getKeysBytes(); + + /** + * + * + *
    +     * The new values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string new_values = 2; + * + * @return The newValues. + */ + java.lang.String getNewValues(); + + /** + * + * + *
    +     * The new values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string new_values = 2; + * + * @return The bytes for newValues. + */ + com.google.protobuf.ByteString getNewValuesBytes(); + + /** + * + * + *
    +     * The old values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string old_values = 3; + * + * @return The oldValues. + */ + java.lang.String getOldValues(); + + /** + * + * + *
    +     * The old values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string old_values = 3; + * + * @return The bytes for oldValues. + */ + com.google.protobuf.ByteString getOldValuesBytes(); + } + + /** + * + * + *
    +   * Describes the changes that were made.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord.Mod} + */ + public static final class Mod extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DataChangeRecord.Mod) + ModOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mod"); + } + + // Use Mod.newBuilder() to construct. + private Mod(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Mod() { + keys_ = ""; + newValues_ = ""; + oldValues_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.Mod.class, + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder.class); + } + + public static final int KEYS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object keys_ = ""; + + /** + * + * + *
    +     * The primary key values in JSON.
    +     * 
    + * + * string keys = 1; + * + * @return The keys. + */ + @java.lang.Override + public java.lang.String getKeys() { + java.lang.Object ref = keys_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + keys_ = s; + return s; + } + } + + /** + * + * + *
    +     * The primary key values in JSON.
    +     * 
    + * + * string keys = 1; + * + * @return The bytes for keys. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKeysBytes() { + java.lang.Object ref = keys_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + keys_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NEW_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object newValues_ = ""; + + /** + * + * + *
    +     * The new values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string new_values = 2; + * + * @return The newValues. + */ + @java.lang.Override + public java.lang.String getNewValues() { + java.lang.Object ref = newValues_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + newValues_ = s; + return s; + } + } + + /** + * + * + *
    +     * The new values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string new_values = 2; + * + * @return The bytes for newValues. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNewValuesBytes() { + java.lang.Object ref = newValues_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + newValues_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OLD_VALUES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object oldValues_ = ""; + + /** + * + * + *
    +     * The old values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string old_values = 3; + * + * @return The oldValues. + */ + @java.lang.Override + public java.lang.String getOldValues() { + java.lang.Object ref = oldValues_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + oldValues_ = s; + return s; + } + } + + /** + * + * + *
    +     * The old values of the changed columns in JSON. Only contain the non-key
    +     * columns.
    +     * 
    + * + * string old_values = 3; + * + * @return The bytes for oldValues. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOldValuesBytes() { + java.lang.Object ref = oldValues_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + oldValues_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(keys_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, keys_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(newValues_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, newValues_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(oldValues_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, oldValues_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(keys_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, keys_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(newValues_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, newValues_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(oldValues_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, oldValues_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DataChangeRecord.Mod)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DataChangeRecord.Mod other = + (com.google.spanner.executor.v1.DataChangeRecord.Mod) obj; + + if (!getKeys().equals(other.getKeys())) return false; + if (!getNewValues().equals(other.getNewValues())) return false; + if (!getOldValues().equals(other.getOldValues())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeys().hashCode(); + hash = (37 * hash) + NEW_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getNewValues().hashCode(); + hash = (37 * hash) + OLD_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getOldValues().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DataChangeRecord.Mod prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord.Mod} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DataChangeRecord.Mod) + com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.Mod.class, + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DataChangeRecord.Mod.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + keys_ = ""; + newValues_ = ""; + oldValues_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.Mod getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DataChangeRecord.Mod.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.Mod build() { + com.google.spanner.executor.v1.DataChangeRecord.Mod result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.Mod buildPartial() { + com.google.spanner.executor.v1.DataChangeRecord.Mod result = + new com.google.spanner.executor.v1.DataChangeRecord.Mod(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.DataChangeRecord.Mod result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.keys_ = keys_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.newValues_ = newValues_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.oldValues_ = oldValues_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DataChangeRecord.Mod) { + return mergeFrom((com.google.spanner.executor.v1.DataChangeRecord.Mod) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DataChangeRecord.Mod other) { + if (other == com.google.spanner.executor.v1.DataChangeRecord.Mod.getDefaultInstance()) + return this; + if (!other.getKeys().isEmpty()) { + keys_ = other.keys_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getNewValues().isEmpty()) { + newValues_ = other.newValues_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getOldValues().isEmpty()) { + oldValues_ = other.oldValues_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + keys_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + newValues_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + oldValues_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object keys_ = ""; + + /** + * + * + *
    +       * The primary key values in JSON.
    +       * 
    + * + * string keys = 1; + * + * @return The keys. + */ + public java.lang.String getKeys() { + java.lang.Object ref = keys_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + keys_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The primary key values in JSON.
    +       * 
    + * + * string keys = 1; + * + * @return The bytes for keys. + */ + public com.google.protobuf.ByteString getKeysBytes() { + java.lang.Object ref = keys_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + keys_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The primary key values in JSON.
    +       * 
    + * + * string keys = 1; + * + * @param value The keys to set. + * @return This builder for chaining. + */ + public Builder setKeys(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The primary key values in JSON.
    +       * 
    + * + * string keys = 1; + * + * @return This builder for chaining. + */ + public Builder clearKeys() { + keys_ = getDefaultInstance().getKeys(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The primary key values in JSON.
    +       * 
    + * + * string keys = 1; + * + * @param value The bytes for keys to set. + * @return This builder for chaining. + */ + public Builder setKeysBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + keys_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object newValues_ = ""; + + /** + * + * + *
    +       * The new values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string new_values = 2; + * + * @return The newValues. + */ + public java.lang.String getNewValues() { + java.lang.Object ref = newValues_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + newValues_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The new values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string new_values = 2; + * + * @return The bytes for newValues. + */ + public com.google.protobuf.ByteString getNewValuesBytes() { + java.lang.Object ref = newValues_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + newValues_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The new values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string new_values = 2; + * + * @param value The newValues to set. + * @return This builder for chaining. + */ + public Builder setNewValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + newValues_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The new values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string new_values = 2; + * + * @return This builder for chaining. + */ + public Builder clearNewValues() { + newValues_ = getDefaultInstance().getNewValues(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The new values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string new_values = 2; + * + * @param value The bytes for newValues to set. + * @return This builder for chaining. + */ + public Builder setNewValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + newValues_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object oldValues_ = ""; + + /** + * + * + *
    +       * The old values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string old_values = 3; + * + * @return The oldValues. + */ + public java.lang.String getOldValues() { + java.lang.Object ref = oldValues_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + oldValues_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The old values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string old_values = 3; + * + * @return The bytes for oldValues. + */ + public com.google.protobuf.ByteString getOldValuesBytes() { + java.lang.Object ref = oldValues_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + oldValues_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The old values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string old_values = 3; + * + * @param value The oldValues to set. + * @return This builder for chaining. + */ + public Builder setOldValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + oldValues_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The old values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string old_values = 3; + * + * @return This builder for chaining. + */ + public Builder clearOldValues() { + oldValues_ = getDefaultInstance().getOldValues(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The old values of the changed columns in JSON. Only contain the non-key
    +       * columns.
    +       * 
    + * + * string old_values = 3; + * + * @param value The bytes for oldValues to set. + * @return This builder for chaining. + */ + public Builder setOldValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + oldValues_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DataChangeRecord.Mod) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DataChangeRecord.Mod) + private static final com.google.spanner.executor.v1.DataChangeRecord.Mod DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DataChangeRecord.Mod(); + } + + public static com.google.spanner.executor.v1.DataChangeRecord.Mod getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Mod parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.Mod getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int COMMIT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +   * The sequence number for the record within the transaction.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +   * The sequence number for the record within the transaction.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + + /** + * + * + *
    +   * A globally unique string that represents the transaction in which the
    +   * change was committed.
    +   * 
    + * + * string transaction_id = 3; + * + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + + /** + * + * + *
    +   * A globally unique string that represents the transaction in which the
    +   * change was committed.
    +   * 
    + * + * string transaction_id = 3; + * + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IS_LAST_RECORD_FIELD_NUMBER = 4; + private boolean isLastRecord_ = false; + + /** + * + * + *
    +   * Indicates whether this is the last record for a transaction in the current
    +   * partition.
    +   * 
    + * + * bool is_last_record = 4; + * + * @return The isLastRecord. + */ + @java.lang.Override + public boolean getIsLastRecord() { + return isLastRecord_; + } + + public static final int TABLE_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * Name of the table affected by the change.
    +   * 
    + * + * string table = 5; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name of the table affected by the change.
    +   * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMN_TYPES_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private java.util.List columnTypes_; + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + @java.lang.Override + public java.util.List + getColumnTypesList() { + return columnTypes_; + } + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder> + getColumnTypesOrBuilderList() { + return columnTypes_; + } + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + @java.lang.Override + public int getColumnTypesCount() { + return columnTypes_.size(); + } + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType getColumnTypes(int index) { + return columnTypes_.get(index); + } + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder + getColumnTypesOrBuilder(int index) { + return columnTypes_.get(index); + } + + public static final int MODS_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private java.util.List mods_; + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public java.util.List getModsList() { + return mods_; + } + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public java.util.List + getModsOrBuilderList() { + return mods_; + } + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public int getModsCount() { + return mods_.size(); + } + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.Mod getMods(int index) { + return mods_.get(index); + } + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder getModsOrBuilder(int index) { + return mods_.get(index); + } + + public static final int MOD_TYPE_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object modType_ = ""; + + /** + * + * + *
    +   * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +   * 
    + * + * string mod_type = 8; + * + * @return The modType. + */ + @java.lang.Override + public java.lang.String getModType() { + java.lang.Object ref = modType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + modType_ = s; + return s; + } + } + + /** + * + * + *
    +   * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +   * 
    + * + * string mod_type = 8; + * + * @return The bytes for modType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getModTypeBytes() { + java.lang.Object ref = modType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + modType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_CAPTURE_TYPE_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private volatile java.lang.Object valueCaptureType_ = ""; + + /** + * + * + *
    +   * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +   * 
    + * + * string value_capture_type = 9; + * + * @return The valueCaptureType. + */ + @java.lang.Override + public java.lang.String getValueCaptureType() { + java.lang.Object ref = valueCaptureType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + valueCaptureType_ = s; + return s; + } + } + + /** + * + * + *
    +   * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +   * 
    + * + * string value_capture_type = 9; + * + * @return The bytes for valueCaptureType. + */ + @java.lang.Override + public com.google.protobuf.ByteString getValueCaptureTypeBytes() { + java.lang.Object ref = valueCaptureType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + valueCaptureType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int RECORD_COUNT_FIELD_NUMBER = 10; + private long recordCount_ = 0L; + + /** + * + * + *
    +   * Number of records in transactions.
    +   * 
    + * + * int64 record_count = 10; + * + * @return The recordCount. + */ + @java.lang.Override + public long getRecordCount() { + return recordCount_; + } + + public static final int PARTITION_COUNT_FIELD_NUMBER = 11; + private long partitionCount_ = 0L; + + /** + * + * + *
    +   * Number of partitions in transactions.
    +   * 
    + * + * int64 partition_count = 11; + * + * @return The partitionCount. + */ + @java.lang.Override + public long getPartitionCount() { + return partitionCount_; + } + + public static final int TRANSACTION_TAG_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private volatile java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +   * Transaction tag info.
    +   * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + @java.lang.Override + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } + } + + /** + * + * + *
    +   * Transaction tag info.
    +   * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IS_SYSTEM_TRANSACTION_FIELD_NUMBER = 13; + private boolean isSystemTransaction_ = false; + + /** + * + * + *
    +   * Whether the transaction is a system transactionn.
    +   * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + @java.lang.Override + public boolean getIsSystemTransaction() { + return isSystemTransaction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, transactionId_); + } + if (isLastRecord_ != false) { + output.writeBool(4, isLastRecord_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, table_); + } + for (int i = 0; i < columnTypes_.size(); i++) { + output.writeMessage(6, columnTypes_.get(i)); + } + for (int i = 0; i < mods_.size(); i++) { + output.writeMessage(7, mods_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(modType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, modType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(valueCaptureType_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 9, valueCaptureType_); + } + if (recordCount_ != 0L) { + output.writeInt64(10, recordCount_); + } + if (partitionCount_ != 0L) { + output.writeInt64(11, partitionCount_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 12, transactionTag_); + } + if (isSystemTransaction_ != false) { + output.writeBool(13, isSystemTransaction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, transactionId_); + } + if (isLastRecord_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, isLastRecord_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, table_); + } + for (int i = 0; i < columnTypes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, columnTypes_.get(i)); + } + for (int i = 0; i < mods_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, mods_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(modType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, modType_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(valueCaptureType_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(9, valueCaptureType_); + } + if (recordCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(10, recordCount_); + } + if (partitionCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(11, partitionCount_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(12, transactionTag_); + } + if (isSystemTransaction_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, isSystemTransaction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DataChangeRecord)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DataChangeRecord other = + (com.google.spanner.executor.v1.DataChangeRecord) obj; + + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getTransactionId().equals(other.getTransactionId())) return false; + if (getIsLastRecord() != other.getIsLastRecord()) return false; + if (!getTable().equals(other.getTable())) return false; + if (!getColumnTypesList().equals(other.getColumnTypesList())) return false; + if (!getModsList().equals(other.getModsList())) return false; + if (!getModType().equals(other.getModType())) return false; + if (!getValueCaptureType().equals(other.getValueCaptureType())) return false; + if (getRecordCount() != other.getRecordCount()) return false; + if (getPartitionCount() != other.getPartitionCount()) return false; + if (!getTransactionTag().equals(other.getTransactionTag())) return false; + if (getIsSystemTransaction() != other.getIsSystemTransaction()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (37 * hash) + IS_LAST_RECORD_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsLastRecord()); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (getColumnTypesCount() > 0) { + hash = (37 * hash) + COLUMN_TYPES_FIELD_NUMBER; + hash = (53 * hash) + getColumnTypesList().hashCode(); + } + if (getModsCount() > 0) { + hash = (37 * hash) + MODS_FIELD_NUMBER; + hash = (53 * hash) + getModsList().hashCode(); + } + hash = (37 * hash) + MOD_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getModType().hashCode(); + hash = (37 * hash) + VALUE_CAPTURE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getValueCaptureType().hashCode(); + hash = (37 * hash) + RECORD_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRecordCount()); + hash = (37 * hash) + PARTITION_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPartitionCount()); + hash = (37 * hash) + TRANSACTION_TAG_FIELD_NUMBER; + hash = (53 * hash) + getTransactionTag().hashCode(); + hash = (37 * hash) + IS_SYSTEM_TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsSystemTransaction()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DataChangeRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.DataChangeRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ChangeStream data change record.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DataChangeRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DataChangeRecord) + com.google.spanner.executor.v1.DataChangeRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DataChangeRecord.class, + com.google.spanner.executor.v1.DataChangeRecord.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DataChangeRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommitTimeFieldBuilder(); + internalGetColumnTypesFieldBuilder(); + internalGetModsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + recordSequence_ = ""; + transactionId_ = ""; + isLastRecord_ = false; + table_ = ""; + if (columnTypesBuilder_ == null) { + columnTypes_ = java.util.Collections.emptyList(); + } else { + columnTypes_ = null; + columnTypesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + if (modsBuilder_ == null) { + mods_ = java.util.Collections.emptyList(); + } else { + mods_ = null; + modsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + modType_ = ""; + valueCaptureType_ = ""; + recordCount_ = 0L; + partitionCount_ = 0L; + transactionTag_ = ""; + isSystemTransaction_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord build() { + com.google.spanner.executor.v1.DataChangeRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord buildPartial() { + com.google.spanner.executor.v1.DataChangeRecord result = + new com.google.spanner.executor.v1.DataChangeRecord(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.DataChangeRecord result) { + if (columnTypesBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + columnTypes_ = java.util.Collections.unmodifiableList(columnTypes_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.columnTypes_ = columnTypes_; + } else { + result.columnTypes_ = columnTypesBuilder_.build(); + } + if (modsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0)) { + mods_ = java.util.Collections.unmodifiableList(mods_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.mods_ = mods_; + } else { + result.mods_ = modsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.DataChangeRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.transactionId_ = transactionId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.isLastRecord_ = isLastRecord_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.modType_ = modType_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.valueCaptureType_ = valueCaptureType_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.recordCount_ = recordCount_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.partitionCount_ = partitionCount_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.transactionTag_ = transactionTag_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.isSystemTransaction_ = isSystemTransaction_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DataChangeRecord) { + return mergeFrom((com.google.spanner.executor.v1.DataChangeRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DataChangeRecord other) { + if (other == com.google.spanner.executor.v1.DataChangeRecord.getDefaultInstance()) + return this; + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getIsLastRecord() != false) { + setIsLastRecord(other.getIsLastRecord()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (columnTypesBuilder_ == null) { + if (!other.columnTypes_.isEmpty()) { + if (columnTypes_.isEmpty()) { + columnTypes_ = other.columnTypes_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureColumnTypesIsMutable(); + columnTypes_.addAll(other.columnTypes_); + } + onChanged(); + } + } else { + if (!other.columnTypes_.isEmpty()) { + if (columnTypesBuilder_.isEmpty()) { + columnTypesBuilder_.dispose(); + columnTypesBuilder_ = null; + columnTypes_ = other.columnTypes_; + bitField0_ = (bitField0_ & ~0x00000020); + columnTypesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetColumnTypesFieldBuilder() + : null; + } else { + columnTypesBuilder_.addAllMessages(other.columnTypes_); + } + } + } + if (modsBuilder_ == null) { + if (!other.mods_.isEmpty()) { + if (mods_.isEmpty()) { + mods_ = other.mods_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureModsIsMutable(); + mods_.addAll(other.mods_); + } + onChanged(); + } + } else { + if (!other.mods_.isEmpty()) { + if (modsBuilder_.isEmpty()) { + modsBuilder_.dispose(); + modsBuilder_ = null; + mods_ = other.mods_; + bitField0_ = (bitField0_ & ~0x00000040); + modsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetModsFieldBuilder() + : null; + } else { + modsBuilder_.addAllMessages(other.mods_); + } + } + } + if (!other.getModType().isEmpty()) { + modType_ = other.modType_; + bitField0_ |= 0x00000080; + onChanged(); + } + if (!other.getValueCaptureType().isEmpty()) { + valueCaptureType_ = other.valueCaptureType_; + bitField0_ |= 0x00000100; + onChanged(); + } + if (other.getRecordCount() != 0L) { + setRecordCount(other.getRecordCount()); + } + if (other.getPartitionCount() != 0L) { + setPartitionCount(other.getPartitionCount()); + } + if (!other.getTransactionTag().isEmpty()) { + transactionTag_ = other.transactionTag_; + bitField0_ |= 0x00000800; + onChanged(); + } + if (other.getIsSystemTransaction() != false) { + setIsSystemTransaction(other.getIsSystemTransaction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + isLastRecord_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + com.google.spanner.executor.v1.DataChangeRecord.ColumnType m = + input.readMessage( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.parser(), + extensionRegistry); + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + columnTypes_.add(m); + } else { + columnTypesBuilder_.addMessage(m); + } + break; + } // case 50 + case 58: + { + com.google.spanner.executor.v1.DataChangeRecord.Mod m = + input.readMessage( + com.google.spanner.executor.v1.DataChangeRecord.Mod.parser(), + extensionRegistry); + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(m); + } else { + modsBuilder_.addMessage(m); + } + break; + } // case 58 + case 66: + { + modType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 74: + { + valueCaptureType_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000100; + break; + } // case 74 + case 80: + { + recordCount_ = input.readInt64(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 88: + { + partitionCount_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 88 + case 98: + { + transactionTag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000800; + break; + } // case 98 + case 104: + { + isSystemTransaction_ = input.readBool(); + bitField0_ |= 0x00001000; + break; + } // case 104 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
    +     * The timestamp in which the change was committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * The sequence number for the record within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The sequence number for the record within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The sequence number for the record within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The sequence number for the record within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The sequence number for the record within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object transactionId_ = ""; + + /** + * + * + *
    +     * A globally unique string that represents the transaction in which the
    +     * change was committed.
    +     * 
    + * + * string transaction_id = 3; + * + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A globally unique string that represents the transaction in which the
    +     * change was committed.
    +     * 
    + * + * string transaction_id = 3; + * + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A globally unique string that represents the transaction in which the
    +     * change was committed.
    +     * 
    + * + * string transaction_id = 3; + * + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + transactionId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A globally unique string that represents the transaction in which the
    +     * change was committed.
    +     * 
    + * + * string transaction_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A globally unique string that represents the transaction in which the
    +     * change was committed.
    +     * 
    + * + * string transaction_id = 3; + * + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private boolean isLastRecord_; + + /** + * + * + *
    +     * Indicates whether this is the last record for a transaction in the current
    +     * partition.
    +     * 
    + * + * bool is_last_record = 4; + * + * @return The isLastRecord. + */ + @java.lang.Override + public boolean getIsLastRecord() { + return isLastRecord_; + } + + /** + * + * + *
    +     * Indicates whether this is the last record for a transaction in the current
    +     * partition.
    +     * 
    + * + * bool is_last_record = 4; + * + * @param value The isLastRecord to set. + * @return This builder for chaining. + */ + public Builder setIsLastRecord(boolean value) { + + isLastRecord_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates whether this is the last record for a transaction in the current
    +     * partition.
    +     * 
    + * + * bool is_last_record = 4; + * + * @return This builder for chaining. + */ + public Builder clearIsLastRecord() { + bitField0_ = (bitField0_ & ~0x00000008); + isLastRecord_ = false; + onChanged(); + return this; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.util.List + columnTypes_ = java.util.Collections.emptyList(); + + private void ensureColumnTypesIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + columnTypes_ = + new java.util.ArrayList( + columnTypes_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.ColumnType, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder> + columnTypesBuilder_; + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public java.util.List + getColumnTypesList() { + if (columnTypesBuilder_ == null) { + return java.util.Collections.unmodifiableList(columnTypes_); + } else { + return columnTypesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public int getColumnTypesCount() { + if (columnTypesBuilder_ == null) { + return columnTypes_.size(); + } else { + return columnTypesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType getColumnTypes(int index) { + if (columnTypesBuilder_ == null) { + return columnTypes_.get(index); + } else { + return columnTypesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder setColumnTypes( + int index, com.google.spanner.executor.v1.DataChangeRecord.ColumnType value) { + if (columnTypesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnTypesIsMutable(); + columnTypes_.set(index, value); + onChanged(); + } else { + columnTypesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder setColumnTypes( + int index, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder builderForValue) { + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + columnTypes_.set(index, builderForValue.build()); + onChanged(); + } else { + columnTypesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder addColumnTypes( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType value) { + if (columnTypesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnTypesIsMutable(); + columnTypes_.add(value); + onChanged(); + } else { + columnTypesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder addColumnTypes( + int index, com.google.spanner.executor.v1.DataChangeRecord.ColumnType value) { + if (columnTypesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnTypesIsMutable(); + columnTypes_.add(index, value); + onChanged(); + } else { + columnTypesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder addColumnTypes( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder builderForValue) { + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + columnTypes_.add(builderForValue.build()); + onChanged(); + } else { + columnTypesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder addColumnTypes( + int index, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder builderForValue) { + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + columnTypes_.add(index, builderForValue.build()); + onChanged(); + } else { + columnTypesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder addAllColumnTypes( + java.lang.Iterable + values) { + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columnTypes_); + onChanged(); + } else { + columnTypesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder clearColumnTypes() { + if (columnTypesBuilder_ == null) { + columnTypes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + columnTypesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public Builder removeColumnTypes(int index) { + if (columnTypesBuilder_ == null) { + ensureColumnTypesIsMutable(); + columnTypes_.remove(index); + onChanged(); + } else { + columnTypesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder getColumnTypesBuilder( + int index) { + return internalGetColumnTypesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder + getColumnTypesOrBuilder(int index) { + if (columnTypesBuilder_ == null) { + return columnTypes_.get(index); + } else { + return columnTypesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public java.util.List< + ? extends com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder> + getColumnTypesOrBuilderList() { + if (columnTypesBuilder_ != null) { + return columnTypesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(columnTypes_); + } + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder + addColumnTypesBuilder() { + return internalGetColumnTypesFieldBuilder() + .addBuilder( + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.getDefaultInstance()); + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder addColumnTypesBuilder( + int index) { + return internalGetColumnTypesFieldBuilder() + .addBuilder( + index, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.getDefaultInstance()); + } + + /** + * + * + *
    +     * Column types defined in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + * + */ + public java.util.List + getColumnTypesBuilderList() { + return internalGetColumnTypesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.ColumnType, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder> + internalGetColumnTypesFieldBuilder() { + if (columnTypesBuilder_ == null) { + columnTypesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.ColumnType, + com.google.spanner.executor.v1.DataChangeRecord.ColumnType.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder>( + columnTypes_, ((bitField0_ & 0x00000020) != 0), getParentForChildren(), isClean()); + columnTypes_ = null; + } + return columnTypesBuilder_; + } + + private java.util.List mods_ = + java.util.Collections.emptyList(); + + private void ensureModsIsMutable() { + if (!((bitField0_ & 0x00000040) != 0)) { + mods_ = new java.util.ArrayList(mods_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.Mod, + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder> + modsBuilder_; + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public java.util.List getModsList() { + if (modsBuilder_ == null) { + return java.util.Collections.unmodifiableList(mods_); + } else { + return modsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public int getModsCount() { + if (modsBuilder_ == null) { + return mods_.size(); + } else { + return modsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.executor.v1.DataChangeRecord.Mod getMods(int index) { + if (modsBuilder_ == null) { + return mods_.get(index); + } else { + return modsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder setMods(int index, com.google.spanner.executor.v1.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.set(index, value); + onChanged(); + } else { + modsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder setMods( + int index, com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.set(index, builderForValue.build()); + onChanged(); + } else { + modsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods(com.google.spanner.executor.v1.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.add(value); + onChanged(); + } else { + modsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods(int index, com.google.spanner.executor.v1.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.add(index, value); + onChanged(); + } else { + modsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods( + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(builderForValue.build()); + onChanged(); + } else { + modsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods( + int index, com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(index, builderForValue.build()); + onChanged(); + } else { + modsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder addAllMods( + java.lang.Iterable values) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mods_); + onChanged(); + } else { + modsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder clearMods() { + if (modsBuilder_ == null) { + mods_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + modsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public Builder removeMods(int index) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.remove(index); + onChanged(); + } else { + modsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder getModsBuilder(int index) { + return internalGetModsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder getModsOrBuilder( + int index) { + if (modsBuilder_ == null) { + return mods_.get(index); + } else { + return modsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public java.util.List + getModsOrBuilderList() { + if (modsBuilder_ != null) { + return modsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mods_); + } + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder addModsBuilder() { + return internalGetModsFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.DataChangeRecord.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder addModsBuilder(int index) { + return internalGetModsFieldBuilder() + .addBuilder( + index, com.google.spanner.executor.v1.DataChangeRecord.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +     * Changes made in the transaction.
    +     * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + public java.util.List + getModsBuilderList() { + return internalGetModsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.Mod, + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder> + internalGetModsFieldBuilder() { + if (modsBuilder_ == null) { + modsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.DataChangeRecord.Mod, + com.google.spanner.executor.v1.DataChangeRecord.Mod.Builder, + com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder>( + mods_, ((bitField0_ & 0x00000040) != 0), getParentForChildren(), isClean()); + mods_ = null; + } + return modsBuilder_; + } + + private java.lang.Object modType_ = ""; + + /** + * + * + *
    +     * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +     * 
    + * + * string mod_type = 8; + * + * @return The modType. + */ + public java.lang.String getModType() { + java.lang.Object ref = modType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + modType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +     * 
    + * + * string mod_type = 8; + * + * @return The bytes for modType. + */ + public com.google.protobuf.ByteString getModTypeBytes() { + java.lang.Object ref = modType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + modType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +     * 
    + * + * string mod_type = 8; + * + * @param value The modType to set. + * @return This builder for chaining. + */ + public Builder setModType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + modType_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +     * 
    + * + * string mod_type = 8; + * + * @return This builder for chaining. + */ + public Builder clearModType() { + modType_ = getDefaultInstance().getModType(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +     * 
    + * + * string mod_type = 8; + * + * @param value The bytes for modType to set. + * @return This builder for chaining. + */ + public Builder setModTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + modType_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + private java.lang.Object valueCaptureType_ = ""; + + /** + * + * + *
    +     * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +     * 
    + * + * string value_capture_type = 9; + * + * @return The valueCaptureType. + */ + public java.lang.String getValueCaptureType() { + java.lang.Object ref = valueCaptureType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + valueCaptureType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +     * 
    + * + * string value_capture_type = 9; + * + * @return The bytes for valueCaptureType. + */ + public com.google.protobuf.ByteString getValueCaptureTypeBytes() { + java.lang.Object ref = valueCaptureType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + valueCaptureType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +     * 
    + * + * string value_capture_type = 9; + * + * @param value The valueCaptureType to set. + * @return This builder for chaining. + */ + public Builder setValueCaptureType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + valueCaptureType_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +     * 
    + * + * string value_capture_type = 9; + * + * @return This builder for chaining. + */ + public Builder clearValueCaptureType() { + valueCaptureType_ = getDefaultInstance().getValueCaptureType(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + /** + * + * + *
    +     * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +     * 
    + * + * string value_capture_type = 9; + * + * @param value The bytes for valueCaptureType to set. + * @return This builder for chaining. + */ + public Builder setValueCaptureTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + valueCaptureType_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + private long recordCount_; + + /** + * + * + *
    +     * Number of records in transactions.
    +     * 
    + * + * int64 record_count = 10; + * + * @return The recordCount. + */ + @java.lang.Override + public long getRecordCount() { + return recordCount_; + } + + /** + * + * + *
    +     * Number of records in transactions.
    +     * 
    + * + * int64 record_count = 10; + * + * @param value The recordCount to set. + * @return This builder for chaining. + */ + public Builder setRecordCount(long value) { + + recordCount_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of records in transactions.
    +     * 
    + * + * int64 record_count = 10; + * + * @return This builder for chaining. + */ + public Builder clearRecordCount() { + bitField0_ = (bitField0_ & ~0x00000200); + recordCount_ = 0L; + onChanged(); + return this; + } + + private long partitionCount_; + + /** + * + * + *
    +     * Number of partitions in transactions.
    +     * 
    + * + * int64 partition_count = 11; + * + * @return The partitionCount. + */ + @java.lang.Override + public long getPartitionCount() { + return partitionCount_; + } + + /** + * + * + *
    +     * Number of partitions in transactions.
    +     * 
    + * + * int64 partition_count = 11; + * + * @param value The partitionCount to set. + * @return This builder for chaining. + */ + public Builder setPartitionCount(long value) { + + partitionCount_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of partitions in transactions.
    +     * 
    + * + * int64 partition_count = 11; + * + * @return This builder for chaining. + */ + public Builder clearPartitionCount() { + bitField0_ = (bitField0_ & ~0x00000400); + partitionCount_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +     * Transaction tag info.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Transaction tag info.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Transaction tag info.
    +     * 
    + * + * string transaction_tag = 12; + * + * @param value The transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + transactionTag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction tag info.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return This builder for chaining. + */ + public Builder clearTransactionTag() { + transactionTag_ = getDefaultInstance().getTransactionTag(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction tag info.
    +     * 
    + * + * string transaction_tag = 12; + * + * @param value The bytes for transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + transactionTag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + private boolean isSystemTransaction_; + + /** + * + * + *
    +     * Whether the transaction is a system transactionn.
    +     * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + @java.lang.Override + public boolean getIsSystemTransaction() { + return isSystemTransaction_; + } + + /** + * + * + *
    +     * Whether the transaction is a system transactionn.
    +     * 
    + * + * bool is_system_transaction = 13; + * + * @param value The isSystemTransaction to set. + * @return This builder for chaining. + */ + public Builder setIsSystemTransaction(boolean value) { + + isSystemTransaction_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether the transaction is a system transactionn.
    +     * 
    + * + * bool is_system_transaction = 13; + * + * @return This builder for chaining. + */ + public Builder clearIsSystemTransaction() { + bitField0_ = (bitField0_ & ~0x00001000); + isSystemTransaction_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DataChangeRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DataChangeRecord) + private static final com.google.spanner.executor.v1.DataChangeRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DataChangeRecord(); + } + + public static com.google.spanner.executor.v1.DataChangeRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DataChangeRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DataChangeRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java new file mode 100644 index 000000000000..e14b7c5a0062 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java @@ -0,0 +1,389 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DataChangeRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DataChangeRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
    +   * The timestamp in which the change was committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
    +   * The sequence number for the record within the transaction.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +   * The sequence number for the record within the transaction.
    +   * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +   * A globally unique string that represents the transaction in which the
    +   * change was committed.
    +   * 
    + * + * string transaction_id = 3; + * + * @return The transactionId. + */ + java.lang.String getTransactionId(); + + /** + * + * + *
    +   * A globally unique string that represents the transaction in which the
    +   * change was committed.
    +   * 
    + * + * string transaction_id = 3; + * + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString getTransactionIdBytes(); + + /** + * + * + *
    +   * Indicates whether this is the last record for a transaction in the current
    +   * partition.
    +   * 
    + * + * bool is_last_record = 4; + * + * @return The isLastRecord. + */ + boolean getIsLastRecord(); + + /** + * + * + *
    +   * Name of the table affected by the change.
    +   * 
    + * + * string table = 5; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * Name of the table affected by the change.
    +   * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + java.util.List getColumnTypesList(); + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + com.google.spanner.executor.v1.DataChangeRecord.ColumnType getColumnTypes(int index); + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + int getColumnTypesCount(); + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + java.util.List + getColumnTypesOrBuilderList(); + + /** + * + * + *
    +   * Column types defined in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.ColumnType column_types = 6; + */ + com.google.spanner.executor.v1.DataChangeRecord.ColumnTypeOrBuilder getColumnTypesOrBuilder( + int index); + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + java.util.List getModsList(); + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + com.google.spanner.executor.v1.DataChangeRecord.Mod getMods(int index); + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + int getModsCount(); + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + java.util.List + getModsOrBuilderList(); + + /** + * + * + *
    +   * Changes made in the transaction.
    +   * 
    + * + * repeated .google.spanner.executor.v1.DataChangeRecord.Mod mods = 7; + */ + com.google.spanner.executor.v1.DataChangeRecord.ModOrBuilder getModsOrBuilder(int index); + + /** + * + * + *
    +   * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +   * 
    + * + * string mod_type = 8; + * + * @return The modType. + */ + java.lang.String getModType(); + + /** + * + * + *
    +   * Describes the type of change. One of INSERT, UPDATE or DELETE.
    +   * 
    + * + * string mod_type = 8; + * + * @return The bytes for modType. + */ + com.google.protobuf.ByteString getModTypeBytes(); + + /** + * + * + *
    +   * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +   * 
    + * + * string value_capture_type = 9; + * + * @return The valueCaptureType. + */ + java.lang.String getValueCaptureType(); + + /** + * + * + *
    +   * One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES.
    +   * 
    + * + * string value_capture_type = 9; + * + * @return The bytes for valueCaptureType. + */ + com.google.protobuf.ByteString getValueCaptureTypeBytes(); + + /** + * + * + *
    +   * Number of records in transactions.
    +   * 
    + * + * int64 record_count = 10; + * + * @return The recordCount. + */ + long getRecordCount(); + + /** + * + * + *
    +   * Number of partitions in transactions.
    +   * 
    + * + * int64 partition_count = 11; + * + * @return The partitionCount. + */ + long getPartitionCount(); + + /** + * + * + *
    +   * Transaction tag info.
    +   * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + java.lang.String getTransactionTag(); + + /** + * + * + *
    +   * Transaction tag info.
    +   * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + com.google.protobuf.ByteString getTransactionTagBytes(); + + /** + * + * + *
    +   * Whether the transaction is a system transactionn.
    +   * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + boolean getIsSystemTransaction(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java new file mode 100644 index 000000000000..6d26b911de5b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java @@ -0,0 +1,975 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that deletes a Cloud Spanner database backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteCloudBackupAction} + */ +@com.google.protobuf.Generated +public final class DeleteCloudBackupAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DeleteCloudBackupAction) + DeleteCloudBackupActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteCloudBackupAction"); + } + + // Use DeleteCloudBackupAction.newBuilder() to construct. + private DeleteCloudBackupAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteCloudBackupAction() { + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteCloudBackupAction.class, + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup to delete, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup to delete, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DeleteCloudBackupAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DeleteCloudBackupAction other = + (com.google.spanner.executor.v1.DeleteCloudBackupAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DeleteCloudBackupAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner database backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteCloudBackupAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DeleteCloudBackupAction) + com.google.spanner.executor.v1.DeleteCloudBackupActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteCloudBackupAction.class, + com.google.spanner.executor.v1.DeleteCloudBackupAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DeleteCloudBackupAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudBackupAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction build() { + com.google.spanner.executor.v1.DeleteCloudBackupAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction buildPartial() { + com.google.spanner.executor.v1.DeleteCloudBackupAction result = + new com.google.spanner.executor.v1.DeleteCloudBackupAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.DeleteCloudBackupAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DeleteCloudBackupAction) { + return mergeFrom((com.google.spanner.executor.v1.DeleteCloudBackupAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DeleteCloudBackupAction other) { + if (other == com.google.spanner.executor.v1.DeleteCloudBackupAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup to delete, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to delete, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to delete, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to delete, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to delete, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DeleteCloudBackupAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DeleteCloudBackupAction) + private static final com.google.spanner.executor.v1.DeleteCloudBackupAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DeleteCloudBackupAction(); + } + + public static com.google.spanner.executor.v1.DeleteCloudBackupAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteCloudBackupAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudBackupAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java new file mode 100644 index 000000000000..f896c6a37171 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DeleteCloudBackupActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DeleteCloudBackupAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup to delete, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup to delete, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java new file mode 100644 index 000000000000..3a3e5fbd6c5e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java @@ -0,0 +1,786 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that deletes a Cloud Spanner instance.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteCloudInstanceAction} + */ +@com.google.protobuf.Generated +public final class DeleteCloudInstanceAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DeleteCloudInstanceAction) + DeleteCloudInstanceActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteCloudInstanceAction"); + } + + // Use DeleteCloudInstanceAction.newBuilder() to construct. + private DeleteCloudInstanceAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteCloudInstanceAction() { + instanceId_ = ""; + projectId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteCloudInstanceAction.class, + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder.class); + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DeleteCloudInstanceAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DeleteCloudInstanceAction other = + (com.google.spanner.executor.v1.DeleteCloudInstanceAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DeleteCloudInstanceAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that deletes a Cloud Spanner instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteCloudInstanceAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DeleteCloudInstanceAction) + com.google.spanner.executor.v1.DeleteCloudInstanceActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteCloudInstanceAction.class, + com.google.spanner.executor.v1.DeleteCloudInstanceAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DeleteCloudInstanceAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteCloudInstanceAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction build() { + com.google.spanner.executor.v1.DeleteCloudInstanceAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction buildPartial() { + com.google.spanner.executor.v1.DeleteCloudInstanceAction result = + new com.google.spanner.executor.v1.DeleteCloudInstanceAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.DeleteCloudInstanceAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DeleteCloudInstanceAction) { + return mergeFrom((com.google.spanner.executor.v1.DeleteCloudInstanceAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DeleteCloudInstanceAction other) { + if (other == com.google.spanner.executor.v1.DeleteCloudInstanceAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DeleteCloudInstanceAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DeleteCloudInstanceAction) + private static final com.google.spanner.executor.v1.DeleteCloudInstanceAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DeleteCloudInstanceAction(); + } + + public static com.google.spanner.executor.v1.DeleteCloudInstanceAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteCloudInstanceAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteCloudInstanceAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java new file mode 100644 index 000000000000..ac90394f58a4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DeleteCloudInstanceActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DeleteCloudInstanceAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java new file mode 100644 index 000000000000..e31d83470e1b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java @@ -0,0 +1,790 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that deletes a user instance configs.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteUserInstanceConfigAction} + */ +@com.google.protobuf.Generated +public final class DeleteUserInstanceConfigAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DeleteUserInstanceConfigAction) + DeleteUserInstanceConfigActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteUserInstanceConfigAction"); + } + + // Use DeleteUserInstanceConfigAction.newBuilder() to construct. + private DeleteUserInstanceConfigAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteUserInstanceConfigAction() { + userConfigId_ = ""; + projectId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.class, + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder.class); + } + + public static final int USER_CONFIG_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + @java.lang.Override + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DeleteUserInstanceConfigAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction other = + (com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) obj; + + if (!getUserConfigId().equals(other.getUserConfigId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + USER_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getUserConfigId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that deletes a user instance configs.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DeleteUserInstanceConfigAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DeleteUserInstanceConfigAction) + com.google.spanner.executor.v1.DeleteUserInstanceConfigActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.class, + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + userConfigId_ = ""; + projectId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DeleteUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction build() { + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction buildPartial() { + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction result = + new com.google.spanner.executor.v1.DeleteUserInstanceConfigAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.DeleteUserInstanceConfigAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.userConfigId_ = userConfigId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) { + return mergeFrom((com.google.spanner.executor.v1.DeleteUserInstanceConfigAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DeleteUserInstanceConfigAction other) { + if (other + == com.google.spanner.executor.v1.DeleteUserInstanceConfigAction.getDefaultInstance()) + return this; + if (!other.getUserConfigId().isEmpty()) { + userConfigId_ = other.userConfigId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + userConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUserConfigId() { + userConfigId_ = getDefaultInstance().getUserConfigId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The bytes for userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DeleteUserInstanceConfigAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DeleteUserInstanceConfigAction) + private static final com.google.spanner.executor.v1.DeleteUserInstanceConfigAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DeleteUserInstanceConfigAction(); + } + + public static com.google.spanner.executor.v1.DeleteUserInstanceConfigAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteUserInstanceConfigAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DeleteUserInstanceConfigAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java new file mode 100644 index 000000000000..87afb9605b3c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DeleteUserInstanceConfigActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DeleteUserInstanceConfigAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + java.lang.String getUserConfigId(); + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + com.google.protobuf.ByteString getUserConfigIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java new file mode 100644 index 000000000000..19206b84b17d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java @@ -0,0 +1,983 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * A single DML statement.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DmlAction} + */ +@com.google.protobuf.Generated +public final class DmlAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DmlAction) + DmlActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DmlAction"); + } + + // Use DmlAction.newBuilder() to construct. + private DmlAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DmlAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DmlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DmlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DmlAction.class, + com.google.spanner.executor.v1.DmlAction.Builder.class); + } + + private int bitField0_; + public static final int UPDATE_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.QueryAction update_; + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return Whether the update field is set. + */ + @java.lang.Override + public boolean hasUpdate() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return The update. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getUpdate() { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder() { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + + public static final int AUTOCOMMIT_IF_SUPPORTED_FIELD_NUMBER = 2; + private boolean autocommitIfSupported_ = false; + + /** + * + * + *
    +   * Whether to autocommit the transaction after executing the DML statement,
    +   * if the Executor supports autocommit.
    +   * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return Whether the autocommitIfSupported field is set. + */ + @java.lang.Override + public boolean hasAutocommitIfSupported() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Whether to autocommit the transaction after executing the DML statement,
    +   * if the Executor supports autocommit.
    +   * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return The autocommitIfSupported. + */ + @java.lang.Override + public boolean getAutocommitIfSupported() { + return autocommitIfSupported_; + } + + public static final int LAST_STATEMENT_FIELD_NUMBER = 3; + private boolean lastStatement_ = false; + + /** + * + * + *
    +   * Whether to set this DML statement as the last statement in the
    +   * transaction. The transaction should be committed after processing this DML
    +   * statement.
    +   * 
    + * + * optional bool last_statement = 3; + * + * @return Whether the lastStatement field is set. + */ + @java.lang.Override + public boolean hasLastStatement() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Whether to set this DML statement as the last statement in the
    +   * transaction. The transaction should be committed after processing this DML
    +   * statement.
    +   * 
    + * + * optional bool last_statement = 3; + * + * @return The lastStatement. + */ + @java.lang.Override + public boolean getLastStatement() { + return lastStatement_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getUpdate()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeBool(2, autocommitIfSupported_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeBool(3, lastStatement_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getUpdate()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, autocommitIfSupported_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, lastStatement_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DmlAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DmlAction other = (com.google.spanner.executor.v1.DmlAction) obj; + + if (hasUpdate() != other.hasUpdate()) return false; + if (hasUpdate()) { + if (!getUpdate().equals(other.getUpdate())) return false; + } + if (hasAutocommitIfSupported() != other.hasAutocommitIfSupported()) return false; + if (hasAutocommitIfSupported()) { + if (getAutocommitIfSupported() != other.getAutocommitIfSupported()) return false; + } + if (hasLastStatement() != other.hasLastStatement()) return false; + if (hasLastStatement()) { + if (getLastStatement() != other.getLastStatement()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasUpdate()) { + hash = (37 * hash) + UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getUpdate().hashCode(); + } + if (hasAutocommitIfSupported()) { + hash = (37 * hash) + AUTOCOMMIT_IF_SUPPORTED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAutocommitIfSupported()); + } + if (hasLastStatement()) { + hash = (37 * hash) + LAST_STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLastStatement()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DmlAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DmlAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DmlAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.DmlAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A single DML statement.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DmlAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DmlAction) + com.google.spanner.executor.v1.DmlActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DmlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DmlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DmlAction.class, + com.google.spanner.executor.v1.DmlAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DmlAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + autocommitIfSupported_ = false; + lastStatement_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DmlAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction build() { + com.google.spanner.executor.v1.DmlAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction buildPartial() { + com.google.spanner.executor.v1.DmlAction result = + new com.google.spanner.executor.v1.DmlAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.DmlAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.update_ = updateBuilder_ == null ? update_ : updateBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.autocommitIfSupported_ = autocommitIfSupported_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.lastStatement_ = lastStatement_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DmlAction) { + return mergeFrom((com.google.spanner.executor.v1.DmlAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DmlAction other) { + if (other == com.google.spanner.executor.v1.DmlAction.getDefaultInstance()) return this; + if (other.hasUpdate()) { + mergeUpdate(other.getUpdate()); + } + if (other.hasAutocommitIfSupported()) { + setAutocommitIfSupported(other.getAutocommitIfSupported()); + } + if (other.hasLastStatement()) { + setLastStatement(other.getLastStatement()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + autocommitIfSupported_ = input.readBool(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + lastStatement_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.QueryAction update_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + updateBuilder_; + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return Whether the update field is set. + */ + public boolean hasUpdate() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return The update. + */ + public com.google.spanner.executor.v1.QueryAction getUpdate() { + if (updateBuilder_ == null) { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } else { + return updateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public Builder setUpdate(com.google.spanner.executor.v1.QueryAction value) { + if (updateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + update_ = value; + } else { + updateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public Builder setUpdate(com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (updateBuilder_ == null) { + update_ = builderForValue.build(); + } else { + updateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public Builder mergeUpdate(com.google.spanner.executor.v1.QueryAction value) { + if (updateBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && update_ != null + && update_ != com.google.spanner.executor.v1.QueryAction.getDefaultInstance()) { + getUpdateBuilder().mergeFrom(value); + } else { + update_ = value; + } + } else { + updateBuilder_.mergeFrom(value); + } + if (update_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public Builder clearUpdate() { + bitField0_ = (bitField0_ & ~0x00000001); + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public com.google.spanner.executor.v1.QueryAction.Builder getUpdateBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder() { + if (updateBuilder_ != null) { + return updateBuilder_.getMessageOrBuilder(); + } else { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + } + + /** + * + * + *
    +     * DML statement.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + internalGetUpdateFieldBuilder() { + if (updateBuilder_ == null) { + updateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder>( + getUpdate(), getParentForChildren(), isClean()); + update_ = null; + } + return updateBuilder_; + } + + private boolean autocommitIfSupported_; + + /** + * + * + *
    +     * Whether to autocommit the transaction after executing the DML statement,
    +     * if the Executor supports autocommit.
    +     * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return Whether the autocommitIfSupported field is set. + */ + @java.lang.Override + public boolean hasAutocommitIfSupported() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Whether to autocommit the transaction after executing the DML statement,
    +     * if the Executor supports autocommit.
    +     * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return The autocommitIfSupported. + */ + @java.lang.Override + public boolean getAutocommitIfSupported() { + return autocommitIfSupported_; + } + + /** + * + * + *
    +     * Whether to autocommit the transaction after executing the DML statement,
    +     * if the Executor supports autocommit.
    +     * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @param value The autocommitIfSupported to set. + * @return This builder for chaining. + */ + public Builder setAutocommitIfSupported(boolean value) { + + autocommitIfSupported_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether to autocommit the transaction after executing the DML statement,
    +     * if the Executor supports autocommit.
    +     * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return This builder for chaining. + */ + public Builder clearAutocommitIfSupported() { + bitField0_ = (bitField0_ & ~0x00000002); + autocommitIfSupported_ = false; + onChanged(); + return this; + } + + private boolean lastStatement_; + + /** + * + * + *
    +     * Whether to set this DML statement as the last statement in the
    +     * transaction. The transaction should be committed after processing this DML
    +     * statement.
    +     * 
    + * + * optional bool last_statement = 3; + * + * @return Whether the lastStatement field is set. + */ + @java.lang.Override + public boolean hasLastStatement() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Whether to set this DML statement as the last statement in the
    +     * transaction. The transaction should be committed after processing this DML
    +     * statement.
    +     * 
    + * + * optional bool last_statement = 3; + * + * @return The lastStatement. + */ + @java.lang.Override + public boolean getLastStatement() { + return lastStatement_; + } + + /** + * + * + *
    +     * Whether to set this DML statement as the last statement in the
    +     * transaction. The transaction should be committed after processing this DML
    +     * statement.
    +     * 
    + * + * optional bool last_statement = 3; + * + * @param value The lastStatement to set. + * @return This builder for chaining. + */ + public Builder setLastStatement(boolean value) { + + lastStatement_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether to set this DML statement as the last statement in the
    +     * transaction. The transaction should be committed after processing this DML
    +     * statement.
    +     * 
    + * + * optional bool last_statement = 3; + * + * @return This builder for chaining. + */ + public Builder clearLastStatement() { + bitField0_ = (bitField0_ & ~0x00000004); + lastStatement_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DmlAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DmlAction) + private static final com.google.spanner.executor.v1.DmlAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DmlAction(); + } + + public static com.google.spanner.executor.v1.DmlAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DmlAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java new file mode 100644 index 000000000000..9f4ec9ebc829 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DmlActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DmlAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return Whether the update field is set. + */ + boolean hasUpdate(); + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + * + * @return The update. + */ + com.google.spanner.executor.v1.QueryAction getUpdate(); + + /** + * + * + *
    +   * DML statement.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 1; + */ + com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder(); + + /** + * + * + *
    +   * Whether to autocommit the transaction after executing the DML statement,
    +   * if the Executor supports autocommit.
    +   * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return Whether the autocommitIfSupported field is set. + */ + boolean hasAutocommitIfSupported(); + + /** + * + * + *
    +   * Whether to autocommit the transaction after executing the DML statement,
    +   * if the Executor supports autocommit.
    +   * 
    + * + * optional bool autocommit_if_supported = 2; + * + * @return The autocommitIfSupported. + */ + boolean getAutocommitIfSupported(); + + /** + * + * + *
    +   * Whether to set this DML statement as the last statement in the
    +   * transaction. The transaction should be committed after processing this DML
    +   * statement.
    +   * 
    + * + * optional bool last_statement = 3; + * + * @return Whether the lastStatement field is set. + */ + boolean hasLastStatement(); + + /** + * + * + *
    +   * Whether to set this DML statement as the last statement in the
    +   * transaction. The transaction should be committed after processing this DML
    +   * statement.
    +   * 
    + * + * optional bool last_statement = 3; + * + * @return The lastStatement. + */ + boolean getLastStatement(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java new file mode 100644 index 000000000000..735e11106b7e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java @@ -0,0 +1,975 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that drops a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DropCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class DropCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.DropCloudDatabaseAction) + DropCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DropCloudDatabaseAction"); + } + + // Use DropCloudDatabaseAction.newBuilder() to construct. + private DropCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DropCloudDatabaseAction() { + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DropCloudDatabaseAction.class, + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder.class); + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.DropCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.DropCloudDatabaseAction other = + (com.google.spanner.executor.v1.DropCloudDatabaseAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.DropCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that drops a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.DropCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.DropCloudDatabaseAction) + com.google.spanner.executor.v1.DropCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.DropCloudDatabaseAction.class, + com.google.spanner.executor.v1.DropCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.DropCloudDatabaseAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_DropCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction build() { + com.google.spanner.executor.v1.DropCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.DropCloudDatabaseAction result = + new com.google.spanner.executor.v1.DropCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.DropCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseId_ = databaseId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.DropCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.DropCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.DropCloudDatabaseAction other) { + if (other == com.google.spanner.executor.v1.DropCloudDatabaseAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.DropCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.DropCloudDatabaseAction) + private static final com.google.spanner.executor.v1.DropCloudDatabaseAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.DropCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.DropCloudDatabaseAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DropCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.DropCloudDatabaseAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..e4ab5af63ad1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface DropCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.DropCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java new file mode 100644 index 000000000000..f605cdc41320 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java @@ -0,0 +1,2222 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Execute a change stream TVF query.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ExecuteChangeStreamQuery} + */ +@com.google.protobuf.Generated +public final class ExecuteChangeStreamQuery extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ExecuteChangeStreamQuery) + ExecuteChangeStreamQueryOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecuteChangeStreamQuery"); + } + + // Use ExecuteChangeStreamQuery.newBuilder() to construct. + private ExecuteChangeStreamQuery(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecuteChangeStreamQuery() { + name_ = ""; + partitionToken_ = ""; + readOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + cloudDatabaseRole_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.class, + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Name for this change stream.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Name for this change stream.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int START_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp startTime_; + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + @java.lang.Override + public boolean hasStartTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTime() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + + public static final int END_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp endTime_; + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + @java.lang.Override + public boolean hasEndTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return Whether the partitionToken field is set. + */ + @java.lang.Override + public boolean hasPartitionToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return The partitionToken. + */ + @java.lang.Override + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return The bytes for partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int READ_OPTIONS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList readOptions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @return A list containing the readOptions. + */ + public com.google.protobuf.ProtocolStringList getReadOptionsList() { + return readOptions_; + } + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @return The count of readOptions. + */ + public int getReadOptionsCount() { + return readOptions_.size(); + } + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the element to return. + * @return The readOptions at the given index. + */ + public java.lang.String getReadOptions(int index) { + return readOptions_.get(index); + } + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the value to return. + * @return The bytes of the readOptions at the given index. + */ + public com.google.protobuf.ByteString getReadOptionsBytes(int index) { + return readOptions_.getByteString(index); + } + + public static final int HEARTBEAT_MILLISECONDS_FIELD_NUMBER = 6; + private int heartbeatMilliseconds_ = 0; + + /** + * + * + *
    +   * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +   * there are no transactions committed in this partition, in milliseconds.
    +   * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return Whether the heartbeatMilliseconds field is set. + */ + @java.lang.Override + public boolean hasHeartbeatMilliseconds() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +   * there are no transactions committed in this partition, in milliseconds.
    +   * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return The heartbeatMilliseconds. + */ + @java.lang.Override + public int getHeartbeatMilliseconds() { + return heartbeatMilliseconds_; + } + + public static final int DEADLINE_SECONDS_FIELD_NUMBER = 7; + private long deadlineSeconds_ = 0L; + + /** + * + * + *
    +   * Deadline for this change stream query, in seconds.
    +   * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return Whether the deadlineSeconds field is set. + */ + @java.lang.Override + public boolean hasDeadlineSeconds() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * Deadline for this change stream query, in seconds.
    +   * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return The deadlineSeconds. + */ + @java.lang.Override + public long getDeadlineSeconds() { + return deadlineSeconds_; + } + + public static final int CLOUD_DATABASE_ROLE_FIELD_NUMBER = 8; + + @SuppressWarnings("serial") + private volatile java.lang.Object cloudDatabaseRole_ = ""; + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return Whether the cloudDatabaseRole field is set. + */ + @java.lang.Override + public boolean hasCloudDatabaseRole() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return The cloudDatabaseRole. + */ + @java.lang.Override + public java.lang.String getCloudDatabaseRole() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cloudDatabaseRole_ = s; + return s; + } + } + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return The bytes for cloudDatabaseRole. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCloudDatabaseRoleBytes() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cloudDatabaseRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getEndTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, partitionToken_); + } + for (int i = 0; i < readOptions_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, readOptions_.getRaw(i)); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeInt32(6, heartbeatMilliseconds_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeInt64(7, deadlineSeconds_); + } + if (((bitField0_ & 0x00000020) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 8, cloudDatabaseRole_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStartTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getEndTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, partitionToken_); + } + { + int dataSize = 0; + for (int i = 0; i < readOptions_.size(); i++) { + dataSize += computeStringSizeNoTag(readOptions_.getRaw(i)); + } + size += dataSize; + size += 1 * getReadOptionsList().size(); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(6, heartbeatMilliseconds_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(7, deadlineSeconds_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(8, cloudDatabaseRole_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ExecuteChangeStreamQuery)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ExecuteChangeStreamQuery other = + (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) obj; + + if (!getName().equals(other.getName())) return false; + if (hasStartTime() != other.hasStartTime()) return false; + if (hasStartTime()) { + if (!getStartTime().equals(other.getStartTime())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (hasPartitionToken() != other.hasPartitionToken()) return false; + if (hasPartitionToken()) { + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + } + if (!getReadOptionsList().equals(other.getReadOptionsList())) return false; + if (hasHeartbeatMilliseconds() != other.hasHeartbeatMilliseconds()) return false; + if (hasHeartbeatMilliseconds()) { + if (getHeartbeatMilliseconds() != other.getHeartbeatMilliseconds()) return false; + } + if (hasDeadlineSeconds() != other.hasDeadlineSeconds()) return false; + if (hasDeadlineSeconds()) { + if (getDeadlineSeconds() != other.getDeadlineSeconds()) return false; + } + if (hasCloudDatabaseRole() != other.hasCloudDatabaseRole()) return false; + if (hasCloudDatabaseRole()) { + if (!getCloudDatabaseRole().equals(other.getCloudDatabaseRole())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + if (hasPartitionToken()) { + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + } + if (getReadOptionsCount() > 0) { + hash = (37 * hash) + READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReadOptionsList().hashCode(); + } + if (hasHeartbeatMilliseconds()) { + hash = (37 * hash) + HEARTBEAT_MILLISECONDS_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeatMilliseconds(); + } + if (hasDeadlineSeconds()) { + hash = (37 * hash) + DEADLINE_SECONDS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getDeadlineSeconds()); + } + if (hasCloudDatabaseRole()) { + hash = (37 * hash) + CLOUD_DATABASE_ROLE_FIELD_NUMBER; + hash = (53 * hash) + getCloudDatabaseRole().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Execute a change stream TVF query.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ExecuteChangeStreamQuery} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ExecuteChangeStreamQuery) + com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.class, + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ExecuteChangeStreamQuery.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartTimeFieldBuilder(); + internalGetEndTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + partitionToken_ = ""; + readOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + heartbeatMilliseconds_ = 0; + deadlineSeconds_ = 0L; + cloudDatabaseRole_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery build() { + com.google.spanner.executor.v1.ExecuteChangeStreamQuery result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery buildPartial() { + com.google.spanner.executor.v1.ExecuteChangeStreamQuery result = + new com.google.spanner.executor.v1.ExecuteChangeStreamQuery(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ExecuteChangeStreamQuery result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.startTime_ = startTimeBuilder_ == null ? startTime_ : startTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.endTime_ = endTimeBuilder_ == null ? endTime_ : endTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.partitionToken_ = partitionToken_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + readOptions_.makeImmutable(); + result.readOptions_ = readOptions_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.heartbeatMilliseconds_ = heartbeatMilliseconds_; + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.deadlineSeconds_ = deadlineSeconds_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.cloudDatabaseRole_ = cloudDatabaseRole_; + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ExecuteChangeStreamQuery) { + return mergeFrom((com.google.spanner.executor.v1.ExecuteChangeStreamQuery) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ExecuteChangeStreamQuery other) { + if (other == com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasStartTime()) { + mergeStartTime(other.getStartTime()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + if (other.hasPartitionToken()) { + partitionToken_ = other.partitionToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.readOptions_.isEmpty()) { + if (readOptions_.isEmpty()) { + readOptions_ = other.readOptions_; + bitField0_ |= 0x00000010; + } else { + ensureReadOptionsIsMutable(); + readOptions_.addAll(other.readOptions_); + } + onChanged(); + } + if (other.hasHeartbeatMilliseconds()) { + setHeartbeatMilliseconds(other.getHeartbeatMilliseconds()); + } + if (other.hasDeadlineSeconds()) { + setDeadlineSeconds(other.getDeadlineSeconds()); + } + if (other.hasCloudDatabaseRole()) { + cloudDatabaseRole_ = other.cloudDatabaseRole_; + bitField0_ |= 0x00000080; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetEndTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + partitionToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureReadOptionsIsMutable(); + readOptions_.add(s); + break; + } // case 42 + case 48: + { + heartbeatMilliseconds_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + deadlineSeconds_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + cloudDatabaseRole_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000080; + break; + } // case 66 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Name for this change stream.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Name for this change stream.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Name for this change stream.
    +     * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name for this change stream.
    +     * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Name for this change stream.
    +     * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp startTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } else { + return startTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTime_ = value; + } else { + startTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startTime_ = builderForValue.build(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && startTime_ != null + && startTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimeBuilder().mergeFrom(value); + } else { + startTime_ = value; + } + } else { + startTimeBuilder_.mergeFrom(value); + } + if (startTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = null; + if (startTimeBuilder_ != null) { + startTimeBuilder_.dispose(); + startTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStartTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startTimeBuilder_ != null) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + return startTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : startTime_; + } + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp greater than or equal to
    +     * start_time should be returned.
    +     * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTime(), getParentForChildren(), isClean()); + startTime_ = null; + } + return startTimeBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + } else { + endTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && endTime_ != null + && endTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimeBuilder().mergeFrom(value); + } else { + endTime_ = value; + } + } else { + endTimeBuilder_.mergeFrom(value); + } + if (endTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public Builder clearEndTime() { + bitField0_ = (bitField0_ & ~0x00000004); + endTime_ = null; + if (endTimeBuilder_ != null) { + endTimeBuilder_.dispose(); + endTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetEndTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + + /** + * + * + *
    +     * Specifies that records with commit_timestamp less than or equal to
    +     * end_time should be returned.
    +     * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + private java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @return Whether the partitionToken field is set. + */ + public boolean hasPartitionToken() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @return The partitionToken. + */ + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @return The bytes for partitionToken. + */ + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + partitionToken_ = getDefaultInstance().getPartitionToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Specifies which change stream partition to query, based on the content of
    +     * child partitions records.
    +     * 
    + * + * optional string partition_token = 4; + * + * @param value The bytes for partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + partitionToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList readOptions_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReadOptionsIsMutable() { + if (!readOptions_.isModifiable()) { + readOptions_ = new com.google.protobuf.LazyStringArrayList(readOptions_); + } + bitField0_ |= 0x00000010; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @return A list containing the readOptions. + */ + public com.google.protobuf.ProtocolStringList getReadOptionsList() { + readOptions_.makeImmutable(); + return readOptions_; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @return The count of readOptions. + */ + public int getReadOptionsCount() { + return readOptions_.size(); + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the element to return. + * @return The readOptions at the given index. + */ + public java.lang.String getReadOptions(int index) { + return readOptions_.get(index); + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the value to return. + * @return The bytes of the readOptions at the given index. + */ + public com.google.protobuf.ByteString getReadOptionsBytes(int index) { + return readOptions_.getByteString(index); + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param index The index to set the value at. + * @param value The readOptions to set. + * @return This builder for chaining. + */ + public Builder setReadOptions(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadOptionsIsMutable(); + readOptions_.set(index, value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param value The readOptions to add. + * @return This builder for chaining. + */ + public Builder addReadOptions(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReadOptionsIsMutable(); + readOptions_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param values The readOptions to add. + * @return This builder for chaining. + */ + public Builder addAllReadOptions(java.lang.Iterable values) { + ensureReadOptionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, readOptions_); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @return This builder for chaining. + */ + public Builder clearReadOptions() { + readOptions_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read options for this change stream query.
    +     * 
    + * + * repeated string read_options = 5; + * + * @param value The bytes of the readOptions to add. + * @return This builder for chaining. + */ + public Builder addReadOptionsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReadOptionsIsMutable(); + readOptions_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private int heartbeatMilliseconds_; + + /** + * + * + *
    +     * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +     * there are no transactions committed in this partition, in milliseconds.
    +     * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return Whether the heartbeatMilliseconds field is set. + */ + @java.lang.Override + public boolean hasHeartbeatMilliseconds() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +     * there are no transactions committed in this partition, in milliseconds.
    +     * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return The heartbeatMilliseconds. + */ + @java.lang.Override + public int getHeartbeatMilliseconds() { + return heartbeatMilliseconds_; + } + + /** + * + * + *
    +     * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +     * there are no transactions committed in this partition, in milliseconds.
    +     * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @param value The heartbeatMilliseconds to set. + * @return This builder for chaining. + */ + public Builder setHeartbeatMilliseconds(int value) { + + heartbeatMilliseconds_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +     * there are no transactions committed in this partition, in milliseconds.
    +     * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return This builder for chaining. + */ + public Builder clearHeartbeatMilliseconds() { + bitField0_ = (bitField0_ & ~0x00000020); + heartbeatMilliseconds_ = 0; + onChanged(); + return this; + } + + private long deadlineSeconds_; + + /** + * + * + *
    +     * Deadline for this change stream query, in seconds.
    +     * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return Whether the deadlineSeconds field is set. + */ + @java.lang.Override + public boolean hasDeadlineSeconds() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * Deadline for this change stream query, in seconds.
    +     * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return The deadlineSeconds. + */ + @java.lang.Override + public long getDeadlineSeconds() { + return deadlineSeconds_; + } + + /** + * + * + *
    +     * Deadline for this change stream query, in seconds.
    +     * 
    + * + * optional int64 deadline_seconds = 7; + * + * @param value The deadlineSeconds to set. + * @return This builder for chaining. + */ + public Builder setDeadlineSeconds(long value) { + + deadlineSeconds_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Deadline for this change stream query, in seconds.
    +     * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return This builder for chaining. + */ + public Builder clearDeadlineSeconds() { + bitField0_ = (bitField0_ & ~0x00000040); + deadlineSeconds_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object cloudDatabaseRole_ = ""; + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @return Whether the cloudDatabaseRole field is set. + */ + public boolean hasCloudDatabaseRole() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @return The cloudDatabaseRole. + */ + public java.lang.String getCloudDatabaseRole() { + java.lang.Object ref = cloudDatabaseRole_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cloudDatabaseRole_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @return The bytes for cloudDatabaseRole. + */ + public com.google.protobuf.ByteString getCloudDatabaseRoleBytes() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cloudDatabaseRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @param value The cloudDatabaseRole to set. + * @return This builder for chaining. + */ + public Builder setCloudDatabaseRole(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + cloudDatabaseRole_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @return This builder for chaining. + */ + public Builder clearCloudDatabaseRole() { + cloudDatabaseRole_ = getDefaultInstance().getCloudDatabaseRole(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database role to assume while performing this action. This should only be
    +     * set for cloud requests. Setting the database role will enforce additional
    +     * role-based access checks on this action.
    +     * 
    + * + * optional string cloud_database_role = 8; + * + * @param value The bytes for cloudDatabaseRole to set. + * @return This builder for chaining. + */ + public Builder setCloudDatabaseRoleBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + cloudDatabaseRole_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ExecuteChangeStreamQuery) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ExecuteChangeStreamQuery) + private static final com.google.spanner.executor.v1.ExecuteChangeStreamQuery DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ExecuteChangeStreamQuery(); + } + + public static com.google.spanner.executor.v1.ExecuteChangeStreamQuery getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecuteChangeStreamQuery parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java new file mode 100644 index 000000000000..8883d60f9173 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java @@ -0,0 +1,329 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ExecuteChangeStreamQueryOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ExecuteChangeStreamQuery) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Name for this change stream.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Name for this change stream.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp greater than or equal to
    +   * start_time should be returned.
    +   * 
    + * + * .google.protobuf.Timestamp start_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + + /** + * + * + *
    +   * Specifies that records with commit_timestamp less than or equal to
    +   * end_time should be returned.
    +   * 
    + * + * optional .google.protobuf.Timestamp end_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return Whether the partitionToken field is set. + */ + boolean hasPartitionToken(); + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return The partitionToken. + */ + java.lang.String getPartitionToken(); + + /** + * + * + *
    +   * Specifies which change stream partition to query, based on the content of
    +   * child partitions records.
    +   * 
    + * + * optional string partition_token = 4; + * + * @return The bytes for partitionToken. + */ + com.google.protobuf.ByteString getPartitionTokenBytes(); + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @return A list containing the readOptions. + */ + java.util.List getReadOptionsList(); + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @return The count of readOptions. + */ + int getReadOptionsCount(); + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the element to return. + * @return The readOptions at the given index. + */ + java.lang.String getReadOptions(int index); + + /** + * + * + *
    +   * Read options for this change stream query.
    +   * 
    + * + * repeated string read_options = 5; + * + * @param index The index of the value to return. + * @return The bytes of the readOptions at the given index. + */ + com.google.protobuf.ByteString getReadOptionsBytes(int index); + + /** + * + * + *
    +   * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +   * there are no transactions committed in this partition, in milliseconds.
    +   * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return Whether the heartbeatMilliseconds field is set. + */ + boolean hasHeartbeatMilliseconds(); + + /** + * + * + *
    +   * Determines how frequently a heartbeat ChangeRecord will be returned in case
    +   * there are no transactions committed in this partition, in milliseconds.
    +   * 
    + * + * optional int32 heartbeat_milliseconds = 6; + * + * @return The heartbeatMilliseconds. + */ + int getHeartbeatMilliseconds(); + + /** + * + * + *
    +   * Deadline for this change stream query, in seconds.
    +   * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return Whether the deadlineSeconds field is set. + */ + boolean hasDeadlineSeconds(); + + /** + * + * + *
    +   * Deadline for this change stream query, in seconds.
    +   * 
    + * + * optional int64 deadline_seconds = 7; + * + * @return The deadlineSeconds. + */ + long getDeadlineSeconds(); + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return Whether the cloudDatabaseRole field is set. + */ + boolean hasCloudDatabaseRole(); + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return The cloudDatabaseRole. + */ + java.lang.String getCloudDatabaseRole(); + + /** + * + * + *
    +   * Database role to assume while performing this action. This should only be
    +   * set for cloud requests. Setting the database role will enforce additional
    +   * role-based access checks on this action.
    +   * 
    + * + * optional string cloud_database_role = 8; + * + * @return The bytes for cloudDatabaseRole. + */ + com.google.protobuf.ByteString getCloudDatabaseRoleBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java new file mode 100644 index 000000000000..873ea820e110 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java @@ -0,0 +1,704 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Performs a read or query for the given partitions. This action must be
    + * executed in the context of the same transaction that was used to generate
    + * given partitions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ExecutePartitionAction} + */ +@com.google.protobuf.Generated +public final class ExecutePartitionAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ExecutePartitionAction) + ExecutePartitionActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecutePartitionAction"); + } + + // Use ExecutePartitionAction.newBuilder() to construct. + private ExecutePartitionAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecutePartitionAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecutePartitionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ExecutePartitionAction.class, + com.google.spanner.executor.v1.ExecutePartitionAction.Builder.class); + } + + private int bitField0_; + public static final int PARTITION_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.BatchPartition partition_; + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return Whether the partition field is set. + */ + @java.lang.Override + public boolean hasPartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return The partition. + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition getPartition() { + return partition_ == null + ? com.google.spanner.executor.v1.BatchPartition.getDefaultInstance() + : partition_; + } + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartitionOrBuilder getPartitionOrBuilder() { + return partition_ == null + ? com.google.spanner.executor.v1.BatchPartition.getDefaultInstance() + : partition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getPartition()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPartition()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ExecutePartitionAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ExecutePartitionAction other = + (com.google.spanner.executor.v1.ExecutePartitionAction) obj; + + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ExecutePartitionAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Performs a read or query for the given partitions. This action must be
    +   * executed in the context of the same transaction that was used to generate
    +   * given partitions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ExecutePartitionAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ExecutePartitionAction) + com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecutePartitionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ExecutePartitionAction.class, + com.google.spanner.executor.v1.ExecutePartitionAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ExecutePartitionAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPartitionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction build() { + com.google.spanner.executor.v1.ExecutePartitionAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction buildPartial() { + com.google.spanner.executor.v1.ExecutePartitionAction result = + new com.google.spanner.executor.v1.ExecutePartitionAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ExecutePartitionAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.partition_ = partitionBuilder_ == null ? partition_ : partitionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ExecutePartitionAction) { + return mergeFrom((com.google.spanner.executor.v1.ExecutePartitionAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ExecutePartitionAction other) { + if (other == com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance()) + return this; + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetPartitionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.BatchPartition partition_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder> + partitionBuilder_; + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return The partition. + */ + public com.google.spanner.executor.v1.BatchPartition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null + ? com.google.spanner.executor.v1.BatchPartition.getDefaultInstance() + : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public Builder setPartition(com.google.spanner.executor.v1.BatchPartition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + } else { + partitionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public Builder setPartition( + com.google.spanner.executor.v1.BatchPartition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public Builder mergePartition(com.google.spanner.executor.v1.BatchPartition value) { + if (partitionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && partition_ != null + && partition_ != com.google.spanner.executor.v1.BatchPartition.getDefaultInstance()) { + getPartitionBuilder().mergeFrom(value); + } else { + partition_ = value; + } + } else { + partitionBuilder_.mergeFrom(value); + } + if (partition_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public Builder clearPartition() { + bitField0_ = (bitField0_ & ~0x00000001); + partition_ = null; + if (partitionBuilder_ != null) { + partitionBuilder_.dispose(); + partitionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public com.google.spanner.executor.v1.BatchPartition.Builder getPartitionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetPartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + public com.google.spanner.executor.v1.BatchPartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null + ? com.google.spanner.executor.v1.BatchPartition.getDefaultInstance() + : partition_; + } + } + + /** + * + * + *
    +     * Batch partition to execute on.
    +     * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder> + internalGetPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ExecutePartitionAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ExecutePartitionAction) + private static final com.google.spanner.executor.v1.ExecutePartitionAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ExecutePartitionAction(); + } + + public static com.google.spanner.executor.v1.ExecutePartitionAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecutePartitionAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java new file mode 100644 index 000000000000..d68a98c2da95 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ExecutePartitionActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ExecutePartitionAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + * + * @return The partition. + */ + com.google.spanner.executor.v1.BatchPartition getPartition(); + + /** + * + * + *
    +   * Batch partition to execute on.
    +   * 
    + * + * .google.spanner.executor.v1.BatchPartition partition = 1; + */ + com.google.spanner.executor.v1.BatchPartitionOrBuilder getPartitionOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java new file mode 100644 index 000000000000..2b986acb4819 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java @@ -0,0 +1,749 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * FinishTransactionAction defines an action of finishing a transaction.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.FinishTransactionAction} + */ +@com.google.protobuf.Generated +public final class FinishTransactionAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.FinishTransactionAction) + FinishTransactionActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "FinishTransactionAction"); + } + + // Use FinishTransactionAction.newBuilder() to construct. + private FinishTransactionAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private FinishTransactionAction() { + mode_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_FinishTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.FinishTransactionAction.class, + com.google.spanner.executor.v1.FinishTransactionAction.Builder.class); + } + + /** + * + * + *
    +   * Mode indicates how the transaction should be finished.
    +   * 
    + * + * Protobuf enum {@code google.spanner.executor.v1.FinishTransactionAction.Mode} + */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * "MODE_UNSPECIFIED" is equivalent to "COMMIT".
    +     * 
    + * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** + * + * + *
    +     * Commit the transaction.
    +     * 
    + * + * COMMIT = 1; + */ + COMMIT(1), + /** + * + * + *
    +     * Drop the transaction without committing it.
    +     * 
    + * + * ABANDON = 2; + */ + ABANDON(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mode"); + } + + /** + * + * + *
    +     * "MODE_UNSPECIFIED" is equivalent to "COMMIT".
    +     * 
    + * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Commit the transaction.
    +     * 
    + * + * COMMIT = 1; + */ + public static final int COMMIT_VALUE = 1; + + /** + * + * + *
    +     * Drop the transaction without committing it.
    +     * 
    + * + * ABANDON = 2; + */ + public static final int ABANDON_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return COMMIT; + case 2: + return ABANDON; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.executor.v1.FinishTransactionAction.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.executor.v1.FinishTransactionAction.Mode) + } + + public static final int MODE_FIELD_NUMBER = 1; + private int mode_ = 0; + + /** + * + * + *
    +   * Defines how exactly the transaction should be completed, e.g. with
    +   * commit or abortion.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
    +   * Defines how exactly the transaction should be completed, e.g. with
    +   * commit or abortion.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The mode. + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction.Mode getMode() { + com.google.spanner.executor.v1.FinishTransactionAction.Mode result = + com.google.spanner.executor.v1.FinishTransactionAction.Mode.forNumber(mode_); + return result == null + ? com.google.spanner.executor.v1.FinishTransactionAction.Mode.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (mode_ + != com.google.spanner.executor.v1.FinishTransactionAction.Mode.MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, mode_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (mode_ + != com.google.spanner.executor.v1.FinishTransactionAction.Mode.MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, mode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.FinishTransactionAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.FinishTransactionAction other = + (com.google.spanner.executor.v1.FinishTransactionAction) obj; + + if (mode_ != other.mode_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.FinishTransactionAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * FinishTransactionAction defines an action of finishing a transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.FinishTransactionAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.FinishTransactionAction) + com.google.spanner.executor.v1.FinishTransactionActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_FinishTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.FinishTransactionAction.class, + com.google.spanner.executor.v1.FinishTransactionAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.FinishTransactionAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mode_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_FinishTransactionAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction build() { + com.google.spanner.executor.v1.FinishTransactionAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction buildPartial() { + com.google.spanner.executor.v1.FinishTransactionAction result = + new com.google.spanner.executor.v1.FinishTransactionAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.FinishTransactionAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mode_ = mode_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.FinishTransactionAction) { + return mergeFrom((com.google.spanner.executor.v1.FinishTransactionAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.FinishTransactionAction other) { + if (other == com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance()) + return this; + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + mode_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int mode_ = 0; + + /** + * + * + *
    +     * Defines how exactly the transaction should be completed, e.g. with
    +     * commit or abortion.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The enum numeric value on the wire for mode. + */ + @java.lang.Override + public int getModeValue() { + return mode_; + } + + /** + * + * + *
    +     * Defines how exactly the transaction should be completed, e.g. with
    +     * commit or abortion.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + mode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Defines how exactly the transaction should be completed, e.g. with
    +     * commit or abortion.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The mode. + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction.Mode getMode() { + com.google.spanner.executor.v1.FinishTransactionAction.Mode result = + com.google.spanner.executor.v1.FinishTransactionAction.Mode.forNumber(mode_); + return result == null + ? com.google.spanner.executor.v1.FinishTransactionAction.Mode.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Defines how exactly the transaction should be completed, e.g. with
    +     * commit or abortion.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode(com.google.spanner.executor.v1.FinishTransactionAction.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + mode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Defines how exactly the transaction should be completed, e.g. with
    +     * commit or abortion.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return This builder for chaining. + */ + public Builder clearMode() { + bitField0_ = (bitField0_ & ~0x00000001); + mode_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.FinishTransactionAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.FinishTransactionAction) + private static final com.google.spanner.executor.v1.FinishTransactionAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.FinishTransactionAction(); + } + + public static com.google.spanner.executor.v1.FinishTransactionAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinishTransactionAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java new file mode 100644 index 000000000000..5192a3203e2e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface FinishTransactionActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.FinishTransactionAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Defines how exactly the transaction should be completed, e.g. with
    +   * commit or abortion.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + + /** + * + * + *
    +   * Defines how exactly the transaction should be completed, e.g. with
    +   * commit or abortion.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction.Mode mode = 1; + * + * @return The mode. + */ + com.google.spanner.executor.v1.FinishTransactionAction.Mode getMode(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java new file mode 100644 index 000000000000..86782df76402 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java @@ -0,0 +1,851 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Generate database partitions for the given query. Successful outcomes will
    + * contain database partitions in the db_partition field.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GenerateDbPartitionsForQueryAction} + */ +@com.google.protobuf.Generated +public final class GenerateDbPartitionsForQueryAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) + GenerateDbPartitionsForQueryActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GenerateDbPartitionsForQueryAction"); + } + + // Use GenerateDbPartitionsForQueryAction.newBuilder() to construct. + private GenerateDbPartitionsForQueryAction( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GenerateDbPartitionsForQueryAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.class, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder.class); + } + + private int bitField0_; + public static final int QUERY_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.QueryAction query_; + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return Whether the query field is set. + */ + @java.lang.Override + public boolean hasQuery() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return The query. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getQuery() { + return query_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : query_; + } + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder() { + return query_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : query_; + } + + public static final int DESIRED_BYTES_PER_PARTITION_FIELD_NUMBER = 2; + private long desiredBytesPerPartition_ = 0L; + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + @java.lang.Override + public boolean hasDesiredBytesPerPartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return The desiredBytesPerPartition. + */ + @java.lang.Override + public long getDesiredBytesPerPartition() { + return desiredBytesPerPartition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getQuery()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(2, desiredBytesPerPartition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getQuery()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, desiredBytesPerPartition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction other = + (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) obj; + + if (hasQuery() != other.hasQuery()) return false; + if (hasQuery()) { + if (!getQuery().equals(other.getQuery())) return false; + } + if (hasDesiredBytesPerPartition() != other.hasDesiredBytesPerPartition()) return false; + if (hasDesiredBytesPerPartition()) { + if (getDesiredBytesPerPartition() != other.getDesiredBytesPerPartition()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasQuery()) { + hash = (37 * hash) + QUERY_FIELD_NUMBER; + hash = (53 * hash) + getQuery().hashCode(); + } + if (hasDesiredBytesPerPartition()) { + hash = (37 * hash) + DESIRED_BYTES_PER_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getDesiredBytesPerPartition()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Generate database partitions for the given query. Successful outcomes will
    +   * contain database partitions in the db_partition field.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GenerateDbPartitionsForQueryAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.class, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder.class); + } + + // Construct using + // com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetQueryFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + query_ = null; + if (queryBuilder_ != null) { + queryBuilder_.dispose(); + queryBuilder_ = null; + } + desiredBytesPerPartition_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction build() { + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction buildPartial() { + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction result = + new com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.query_ = queryBuilder_ == null ? query_ : queryBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.desiredBytesPerPartition_ = desiredBytesPerPartition_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) { + return mergeFrom((com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction other) { + if (other + == com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.getDefaultInstance()) + return this; + if (other.hasQuery()) { + mergeQuery(other.getQuery()); + } + if (other.hasDesiredBytesPerPartition()) { + setDesiredBytesPerPartition(other.getDesiredBytesPerPartition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetQueryFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + desiredBytesPerPartition_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.QueryAction query_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + queryBuilder_; + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return Whether the query field is set. + */ + public boolean hasQuery() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return The query. + */ + public com.google.spanner.executor.v1.QueryAction getQuery() { + if (queryBuilder_ == null) { + return query_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : query_; + } else { + return queryBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public Builder setQuery(com.google.spanner.executor.v1.QueryAction value) { + if (queryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + query_ = value; + } else { + queryBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public Builder setQuery(com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (queryBuilder_ == null) { + query_ = builderForValue.build(); + } else { + queryBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public Builder mergeQuery(com.google.spanner.executor.v1.QueryAction value) { + if (queryBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && query_ != null + && query_ != com.google.spanner.executor.v1.QueryAction.getDefaultInstance()) { + getQueryBuilder().mergeFrom(value); + } else { + query_ = value; + } + } else { + queryBuilder_.mergeFrom(value); + } + if (query_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public Builder clearQuery() { + bitField0_ = (bitField0_ & ~0x00000001); + query_ = null; + if (queryBuilder_ != null) { + queryBuilder_.dispose(); + queryBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public com.google.spanner.executor.v1.QueryAction.Builder getQueryBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetQueryFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + public com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder() { + if (queryBuilder_ != null) { + return queryBuilder_.getMessageOrBuilder(); + } else { + return query_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : query_; + } + } + + /** + * + * + *
    +     * Query to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + internalGetQueryFieldBuilder() { + if (queryBuilder_ == null) { + queryBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder>( + getQuery(), getParentForChildren(), isClean()); + query_ = null; + } + return queryBuilder_; + } + + private long desiredBytesPerPartition_; + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + @java.lang.Override + public boolean hasDesiredBytesPerPartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return The desiredBytesPerPartition. + */ + @java.lang.Override + public long getDesiredBytesPerPartition() { + return desiredBytesPerPartition_; + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @param value The desiredBytesPerPartition to set. + * @return This builder for chaining. + */ + public Builder setDesiredBytesPerPartition(long value) { + + desiredBytesPerPartition_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return This builder for chaining. + */ + public Builder clearDesiredBytesPerPartition() { + bitField0_ = (bitField0_ & ~0x00000002); + desiredBytesPerPartition_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) + private static final com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction(); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GenerateDbPartitionsForQueryAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java new file mode 100644 index 000000000000..417c35af6d41 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java @@ -0,0 +1,93 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GenerateDbPartitionsForQueryActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return Whether the query field is set. + */ + boolean hasQuery(); + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + * + * @return The query. + */ + com.google.spanner.executor.v1.QueryAction getQuery(); + + /** + * + * + *
    +   * Query to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 1; + */ + com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder(); + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + boolean hasDesiredBytesPerPartition(); + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 2; + * + * @return The desiredBytesPerPartition. + */ + long getDesiredBytesPerPartition(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java new file mode 100644 index 000000000000..f3d0c178534b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java @@ -0,0 +1,1503 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Generate database partitions for the given read. Successful outcomes will
    + * contain database partitions in the db_partition field.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GenerateDbPartitionsForReadAction} + */ +@com.google.protobuf.Generated +public final class GenerateDbPartitionsForReadAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GenerateDbPartitionsForReadAction) + GenerateDbPartitionsForReadActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GenerateDbPartitionsForReadAction"); + } + + // Use GenerateDbPartitionsForReadAction.newBuilder() to construct. + private GenerateDbPartitionsForReadAction( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GenerateDbPartitionsForReadAction() { + table_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.class, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder.class); + } + + private int bitField0_; + public static final int READ_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.ReadAction read_; + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return Whether the read field is set. + */ + @java.lang.Override + public boolean hasRead() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return The read. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction getRead() { + return read_ == null ? com.google.spanner.executor.v1.ReadAction.getDefaultInstance() : read_; + } + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder() { + return read_ == null ? com.google.spanner.executor.v1.ReadAction.getDefaultInstance() : read_; + } + + public static final int TABLE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List table_; + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public java.util.List getTableList() { + return table_; + } + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public java.util.List + getTableOrBuilderList() { + return table_; + } + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public int getTableCount() { + return table_.size(); + } + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata getTable(int index) { + return table_.get(index); + } + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index) { + return table_.get(index); + } + + public static final int DESIRED_BYTES_PER_PARTITION_FIELD_NUMBER = 3; + private long desiredBytesPerPartition_ = 0L; + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + @java.lang.Override + public boolean hasDesiredBytesPerPartition() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return The desiredBytesPerPartition. + */ + @java.lang.Override + public long getDesiredBytesPerPartition() { + return desiredBytesPerPartition_; + } + + public static final int MAX_PARTITION_COUNT_FIELD_NUMBER = 4; + private long maxPartitionCount_ = 0L; + + /** + * + * + *
    +   * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 max_partition_count = 4; + * + * @return Whether the maxPartitionCount field is set. + */ + @java.lang.Override + public boolean hasMaxPartitionCount() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 max_partition_count = 4; + * + * @return The maxPartitionCount. + */ + @java.lang.Override + public long getMaxPartitionCount() { + return maxPartitionCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getRead()); + } + for (int i = 0; i < table_.size(); i++) { + output.writeMessage(2, table_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt64(3, desiredBytesPerPartition_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt64(4, maxPartitionCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRead()); + } + for (int i = 0; i < table_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, table_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(3, desiredBytesPerPartition_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, maxPartitionCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction other = + (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) obj; + + if (hasRead() != other.hasRead()) return false; + if (hasRead()) { + if (!getRead().equals(other.getRead())) return false; + } + if (!getTableList().equals(other.getTableList())) return false; + if (hasDesiredBytesPerPartition() != other.hasDesiredBytesPerPartition()) return false; + if (hasDesiredBytesPerPartition()) { + if (getDesiredBytesPerPartition() != other.getDesiredBytesPerPartition()) return false; + } + if (hasMaxPartitionCount() != other.hasMaxPartitionCount()) return false; + if (hasMaxPartitionCount()) { + if (getMaxPartitionCount() != other.getMaxPartitionCount()) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRead()) { + hash = (37 * hash) + READ_FIELD_NUMBER; + hash = (53 * hash) + getRead().hashCode(); + } + if (getTableCount() > 0) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTableList().hashCode(); + } + if (hasDesiredBytesPerPartition()) { + hash = (37 * hash) + DESIRED_BYTES_PER_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getDesiredBytesPerPartition()); + } + if (hasMaxPartitionCount()) { + hash = (37 * hash) + MAX_PARTITION_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxPartitionCount()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Generate database partitions for the given read. Successful outcomes will
    +   * contain database partitions in the db_partition field.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GenerateDbPartitionsForReadAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GenerateDbPartitionsForReadAction) + com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.class, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadFieldBuilder(); + internalGetTableFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + read_ = null; + if (readBuilder_ != null) { + readBuilder_.dispose(); + readBuilder_ = null; + } + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + } else { + table_ = null; + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + desiredBytesPerPartition_ = 0L; + maxPartitionCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction build() { + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction buildPartial() { + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction result = + new com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction result) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + table_ = java.util.Collections.unmodifiableList(table_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.read_ = readBuilder_ == null ? read_ : readBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.desiredBytesPerPartition_ = desiredBytesPerPartition_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.maxPartitionCount_ = maxPartitionCount_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) { + return mergeFrom((com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction other) { + if (other + == com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.getDefaultInstance()) + return this; + if (other.hasRead()) { + mergeRead(other.getRead()); + } + if (tableBuilder_ == null) { + if (!other.table_.isEmpty()) { + if (table_.isEmpty()) { + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableIsMutable(); + table_.addAll(other.table_); + } + onChanged(); + } + } else { + if (!other.table_.isEmpty()) { + if (tableBuilder_.isEmpty()) { + tableBuilder_.dispose(); + tableBuilder_ = null; + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + tableBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTableFieldBuilder() + : null; + } else { + tableBuilder_.addAllMessages(other.table_); + } + } + } + if (other.hasDesiredBytesPerPartition()) { + setDesiredBytesPerPartition(other.getDesiredBytesPerPartition()); + } + if (other.hasMaxPartitionCount()) { + setMaxPartitionCount(other.getMaxPartitionCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetReadFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.spanner.executor.v1.TableMetadata m = + input.readMessage( + com.google.spanner.executor.v1.TableMetadata.parser(), extensionRegistry); + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(m); + } else { + tableBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + desiredBytesPerPartition_ = input.readInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + maxPartitionCount_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.ReadAction read_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder> + readBuilder_; + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return Whether the read field is set. + */ + public boolean hasRead() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return The read. + */ + public com.google.spanner.executor.v1.ReadAction getRead() { + if (readBuilder_ == null) { + return read_ == null + ? com.google.spanner.executor.v1.ReadAction.getDefaultInstance() + : read_; + } else { + return readBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public Builder setRead(com.google.spanner.executor.v1.ReadAction value) { + if (readBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + read_ = value; + } else { + readBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public Builder setRead(com.google.spanner.executor.v1.ReadAction.Builder builderForValue) { + if (readBuilder_ == null) { + read_ = builderForValue.build(); + } else { + readBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public Builder mergeRead(com.google.spanner.executor.v1.ReadAction value) { + if (readBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && read_ != null + && read_ != com.google.spanner.executor.v1.ReadAction.getDefaultInstance()) { + getReadBuilder().mergeFrom(value); + } else { + read_ = value; + } + } else { + readBuilder_.mergeFrom(value); + } + if (read_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public Builder clearRead() { + bitField0_ = (bitField0_ & ~0x00000001); + read_ = null; + if (readBuilder_ != null) { + readBuilder_.dispose(); + readBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public com.google.spanner.executor.v1.ReadAction.Builder getReadBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetReadFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + public com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder() { + if (readBuilder_ != null) { + return readBuilder_.getMessageOrBuilder(); + } else { + return read_ == null + ? com.google.spanner.executor.v1.ReadAction.getDefaultInstance() + : read_; + } + } + + /** + * + * + *
    +     * Read to generate partitions for.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder> + internalGetReadFieldBuilder() { + if (readBuilder_ == null) { + readBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder>( + getRead(), getParentForChildren(), isClean()); + read_ = null; + } + return readBuilder_; + } + + private java.util.List table_ = + java.util.Collections.emptyList(); + + private void ensureTableIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + table_ = new java.util.ArrayList(table_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder> + tableBuilder_; + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List getTableList() { + if (tableBuilder_ == null) { + return java.util.Collections.unmodifiableList(table_); + } else { + return tableBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public int getTableCount() { + if (tableBuilder_ == null) { + return table_.size(); + } else { + return tableBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata getTable(int index) { + if (tableBuilder_ == null) { + return table_.get(index); + } else { + return tableBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder setTable(int index, com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.set(index, value); + onChanged(); + } else { + tableBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder setTable( + int index, com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(value); + onChanged(); + } else { + tableBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(int index, com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(index, value); + onChanged(); + } else { + tableBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable( + int index, com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addAllTable( + java.lang.Iterable values) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, table_); + onChanged(); + } else { + tableBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder removeTable(int index) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.remove(index); + onChanged(); + } else { + tableBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder getTableBuilder(int index) { + return internalGetTableFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index) { + if (tableBuilder_ == null) { + return table_.get(index); + } else { + return tableBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List + getTableOrBuilderList() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(table_); + } + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder addTableBuilder() { + return internalGetTableFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.TableMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder addTableBuilder(int index) { + return internalGetTableFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.TableMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Metadata related to the tables involved in the read.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List + getTableBuilderList() { + return internalGetTableFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder> + internalGetTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder>( + table_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + table_ = null; + } + return tableBuilder_; + } + + private long desiredBytesPerPartition_; + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + @java.lang.Override + public boolean hasDesiredBytesPerPartition() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return The desiredBytesPerPartition. + */ + @java.lang.Override + public long getDesiredBytesPerPartition() { + return desiredBytesPerPartition_; + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @param value The desiredBytesPerPartition to set. + * @return This builder for chaining. + */ + public Builder setDesiredBytesPerPartition(long value) { + + desiredBytesPerPartition_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Desired size of data in each partition. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return This builder for chaining. + */ + public Builder clearDesiredBytesPerPartition() { + bitField0_ = (bitField0_ & ~0x00000004); + desiredBytesPerPartition_ = 0L; + onChanged(); + return this; + } + + private long maxPartitionCount_; + + /** + * + * + *
    +     * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 max_partition_count = 4; + * + * @return Whether the maxPartitionCount field is set. + */ + @java.lang.Override + public boolean hasMaxPartitionCount() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 max_partition_count = 4; + * + * @return The maxPartitionCount. + */ + @java.lang.Override + public long getMaxPartitionCount() { + return maxPartitionCount_; + } + + /** + * + * + *
    +     * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 max_partition_count = 4; + * + * @param value The maxPartitionCount to set. + * @return This builder for chaining. + */ + public Builder setMaxPartitionCount(long value) { + + maxPartitionCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +     * respect this value.
    +     * 
    + * + * optional int64 max_partition_count = 4; + * + * @return This builder for chaining. + */ + public Builder clearMaxPartitionCount() { + bitField0_ = (bitField0_ & ~0x00000008); + maxPartitionCount_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GenerateDbPartitionsForReadAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GenerateDbPartitionsForReadAction) + private static final com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction(); + } + + public static com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GenerateDbPartitionsForReadAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java new file mode 100644 index 000000000000..ee1632fb6dca --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java @@ -0,0 +1,177 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GenerateDbPartitionsForReadActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GenerateDbPartitionsForReadAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return Whether the read field is set. + */ + boolean hasRead(); + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + * + * @return The read. + */ + com.google.spanner.executor.v1.ReadAction getRead(); + + /** + * + * + *
    +   * Read to generate partitions for.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 1; + */ + com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder(); + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + java.util.List getTableList(); + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + com.google.spanner.executor.v1.TableMetadata getTable(int index); + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + int getTableCount(); + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + java.util.List + getTableOrBuilderList(); + + /** + * + * + *
    +   * Metadata related to the tables involved in the read.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index); + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return Whether the desiredBytesPerPartition field is set. + */ + boolean hasDesiredBytesPerPartition(); + + /** + * + * + *
    +   * Desired size of data in each partition. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 desired_bytes_per_partition = 3; + * + * @return The desiredBytesPerPartition. + */ + long getDesiredBytesPerPartition(); + + /** + * + * + *
    +   * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 max_partition_count = 4; + * + * @return Whether the maxPartitionCount field is set. + */ + boolean hasMaxPartitionCount(); + + /** + * + * + *
    +   * If set, the desired max number of partitions. Spanner doesn't guarantee to
    +   * respect this value.
    +   * 
    + * + * optional int64 max_partition_count = 4; + * + * @return The maxPartitionCount. + */ + long getMaxPartitionCount(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java new file mode 100644 index 000000000000..e57bdb7e3488 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java @@ -0,0 +1,974 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that gets a Cloud Spanner database backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudBackupAction} + */ +@com.google.protobuf.Generated +public final class GetCloudBackupAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GetCloudBackupAction) + GetCloudBackupActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetCloudBackupAction"); + } + + // Use GetCloudBackupAction.newBuilder() to construct. + private GetCloudBackupAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetCloudBackupAction() { + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudBackupAction.class, + com.google.spanner.executor.v1.GetCloudBackupAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup to get, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup to get, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GetCloudBackupAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GetCloudBackupAction other = + (com.google.spanner.executor.v1.GetCloudBackupAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.GetCloudBackupAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudBackupAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GetCloudBackupAction) + com.google.spanner.executor.v1.GetCloudBackupActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudBackupAction.class, + com.google.spanner.executor.v1.GetCloudBackupAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GetCloudBackupAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudBackupAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction build() { + com.google.spanner.executor.v1.GetCloudBackupAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction buildPartial() { + com.google.spanner.executor.v1.GetCloudBackupAction result = + new com.google.spanner.executor.v1.GetCloudBackupAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.GetCloudBackupAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GetCloudBackupAction) { + return mergeFrom((com.google.spanner.executor.v1.GetCloudBackupAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.GetCloudBackupAction other) { + if (other == com.google.spanner.executor.v1.GetCloudBackupAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup to get, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to get, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to get, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to get, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to get, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GetCloudBackupAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GetCloudBackupAction) + private static final com.google.spanner.executor.v1.GetCloudBackupAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GetCloudBackupAction(); + } + + public static com.google.spanner.executor.v1.GetCloudBackupAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCloudBackupAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudBackupAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java new file mode 100644 index 000000000000..a42478d11322 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GetCloudBackupActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GetCloudBackupAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup to get, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup to get, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java new file mode 100644 index 000000000000..fc799e82d6cf --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java @@ -0,0 +1,975 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that gets a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class GetCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GetCloudDatabaseAction) + GetCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetCloudDatabaseAction"); + } + + // Use GetCloudDatabaseAction.newBuilder() to construct. + private GetCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetCloudDatabaseAction() { + projectId_ = ""; + instanceId_ = ""; + databaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudDatabaseAction.class, + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * The id of the database to get, e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the database to get, e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GetCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GetCloudDatabaseAction other = + (com.google.spanner.executor.v1.GetCloudDatabaseAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.GetCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that gets a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GetCloudDatabaseAction) + com.google.spanner.executor.v1.GetCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudDatabaseAction.class, + com.google.spanner.executor.v1.GetCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GetCloudDatabaseAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + databaseId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction build() { + com.google.spanner.executor.v1.GetCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.GetCloudDatabaseAction result = + new com.google.spanner.executor.v1.GetCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.GetCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseId_ = databaseId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GetCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.GetCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.GetCloudDatabaseAction other) { + if (other == com.google.spanner.executor.v1.GetCloudDatabaseAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * The id of the database to get, e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the database to get, e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the database to get, e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database to get, e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database to get, e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GetCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GetCloudDatabaseAction) + private static final com.google.spanner.executor.v1.GetCloudDatabaseAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GetCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.GetCloudDatabaseAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudDatabaseAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..b33e2d8bfade --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GetCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GetCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the database to get, e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * The id of the database to get, e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java new file mode 100644 index 000000000000..7fde64569b52 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java @@ -0,0 +1,793 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that retrieves a Cloud Spanner instance.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudInstanceAction} + */ +@com.google.protobuf.Generated +public final class GetCloudInstanceAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GetCloudInstanceAction) + GetCloudInstanceActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetCloudInstanceAction"); + } + + // Use GetCloudInstanceAction.newBuilder() to construct. + private GetCloudInstanceAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetCloudInstanceAction() { + projectId_ = ""; + instanceId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudInstanceAction.class, + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) to retrieve the instance from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) to retrieve the instance from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GetCloudInstanceAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GetCloudInstanceAction other = + (com.google.spanner.executor.v1.GetCloudInstanceAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.GetCloudInstanceAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that retrieves a Cloud Spanner instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudInstanceAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GetCloudInstanceAction) + com.google.spanner.executor.v1.GetCloudInstanceActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudInstanceAction.class, + com.google.spanner.executor.v1.GetCloudInstanceAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GetCloudInstanceAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction build() { + com.google.spanner.executor.v1.GetCloudInstanceAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction buildPartial() { + com.google.spanner.executor.v1.GetCloudInstanceAction result = + new com.google.spanner.executor.v1.GetCloudInstanceAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.GetCloudInstanceAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GetCloudInstanceAction) { + return mergeFrom((com.google.spanner.executor.v1.GetCloudInstanceAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.GetCloudInstanceAction other) { + if (other == com.google.spanner.executor.v1.GetCloudInstanceAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) to retrieve the instance from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to retrieve the instance from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to retrieve the instance from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to retrieve the instance from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to retrieve the instance from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GetCloudInstanceAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GetCloudInstanceAction) + private static final com.google.spanner.executor.v1.GetCloudInstanceAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GetCloudInstanceAction(); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCloudInstanceAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java new file mode 100644 index 000000000000..05811d3ae4bd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java @@ -0,0 +1,82 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GetCloudInstanceActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GetCloudInstanceAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to retrieve the instance from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to retrieve the instance from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java new file mode 100644 index 000000000000..fd37c45e3758 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java @@ -0,0 +1,786 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that gets a user instance config.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudInstanceConfigAction} + */ +@com.google.protobuf.Generated +public final class GetCloudInstanceConfigAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GetCloudInstanceConfigAction) + GetCloudInstanceConfigActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetCloudInstanceConfigAction"); + } + + // Use GetCloudInstanceConfigAction.newBuilder() to construct. + private GetCloudInstanceConfigAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetCloudInstanceConfigAction() { + instanceConfigId_ = ""; + projectId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.class, + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder.class); + } + + public static final int INSTANCE_CONFIG_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +   * Instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string instance_config_id = 1; + * + * @return The instanceConfigId. + */ + @java.lang.Override + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string instance_config_id = 1; + * + * @return The bytes for instanceConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GetCloudInstanceConfigAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GetCloudInstanceConfigAction other = + (com.google.spanner.executor.v1.GetCloudInstanceConfigAction) obj; + + if (!getInstanceConfigId().equals(other.getInstanceConfigId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceConfigId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that gets a user instance config.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetCloudInstanceConfigAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GetCloudInstanceConfigAction) + com.google.spanner.executor.v1.GetCloudInstanceConfigActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.class, + com.google.spanner.executor.v1.GetCloudInstanceConfigAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GetCloudInstanceConfigAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceConfigId_ = ""; + projectId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetCloudInstanceConfigAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction build() { + com.google.spanner.executor.v1.GetCloudInstanceConfigAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction buildPartial() { + com.google.spanner.executor.v1.GetCloudInstanceConfigAction result = + new com.google.spanner.executor.v1.GetCloudInstanceConfigAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.GetCloudInstanceConfigAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceConfigId_ = instanceConfigId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GetCloudInstanceConfigAction) { + return mergeFrom((com.google.spanner.executor.v1.GetCloudInstanceConfigAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.GetCloudInstanceConfigAction other) { + if (other == com.google.spanner.executor.v1.GetCloudInstanceConfigAction.getDefaultInstance()) + return this; + if (!other.getInstanceConfigId().isEmpty()) { + instanceConfigId_ = other.instanceConfigId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceConfigId_ = ""; + + /** + * + * + *
    +     * Instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string instance_config_id = 1; + * + * @return The instanceConfigId. + */ + public java.lang.String getInstanceConfigId() { + java.lang.Object ref = instanceConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string instance_config_id = 1; + * + * @return The bytes for instanceConfigId. + */ + public com.google.protobuf.ByteString getInstanceConfigIdBytes() { + java.lang.Object ref = instanceConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string instance_config_id = 1; + * + * @param value The instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string instance_config_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceConfigId() { + instanceConfigId_ = getDefaultInstance().getInstanceConfigId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string instance_config_id = 1; + * + * @param value The bytes for instanceConfigId to set. + * @return This builder for chaining. + */ + public Builder setInstanceConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GetCloudInstanceConfigAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GetCloudInstanceConfigAction) + private static final com.google.spanner.executor.v1.GetCloudInstanceConfigAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GetCloudInstanceConfigAction(); + } + + public static com.google.spanner.executor.v1.GetCloudInstanceConfigAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCloudInstanceConfigAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetCloudInstanceConfigAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java new file mode 100644 index 000000000000..d4517c60c821 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GetCloudInstanceConfigActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GetCloudInstanceConfigAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string instance_config_id = 1; + * + * @return The instanceConfigId. + */ + java.lang.String getInstanceConfigId(); + + /** + * + * + *
    +   * Instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string instance_config_id = 1; + * + * @return The bytes for instanceConfigId. + */ + com.google.protobuf.ByteString getInstanceConfigIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java new file mode 100644 index 000000000000..e16121429930 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java @@ -0,0 +1,596 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that gets an operation.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetOperationAction} + */ +@com.google.protobuf.Generated +public final class GetOperationAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.GetOperationAction) + GetOperationActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetOperationAction"); + } + + // Use GetOperationAction.newBuilder() to construct. + private GetOperationAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetOperationAction() { + operation_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetOperationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetOperationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetOperationAction.class, + com.google.spanner.executor.v1.GetOperationAction.Builder.class); + } + + public static final int OPERATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object operation_ = ""; + + /** + * + * + *
    +   * The name of the operation resource.
    +   * 
    + * + * string operation = 1; + * + * @return The operation. + */ + @java.lang.Override + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } + } + + /** + * + * + *
    +   * The name of the operation resource.
    +   * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, operation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operation_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, operation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.GetOperationAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.GetOperationAction other = + (com.google.spanner.executor.v1.GetOperationAction) obj; + + if (!getOperation().equals(other.getOperation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getOperation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.GetOperationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.GetOperationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that gets an operation.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.GetOperationAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.GetOperationAction) + com.google.spanner.executor.v1.GetOperationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetOperationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetOperationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.GetOperationAction.class, + com.google.spanner.executor.v1.GetOperationAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.GetOperationAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + operation_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_GetOperationAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction build() { + com.google.spanner.executor.v1.GetOperationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction buildPartial() { + com.google.spanner.executor.v1.GetOperationAction result = + new com.google.spanner.executor.v1.GetOperationAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.GetOperationAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.operation_ = operation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.GetOperationAction) { + return mergeFrom((com.google.spanner.executor.v1.GetOperationAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.GetOperationAction other) { + if (other == com.google.spanner.executor.v1.GetOperationAction.getDefaultInstance()) + return this; + if (!other.getOperation().isEmpty()) { + operation_ = other.operation_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + operation_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object operation_ = ""; + + /** + * + * + *
    +     * The name of the operation resource.
    +     * 
    + * + * string operation = 1; + * + * @return The operation. + */ + public java.lang.String getOperation() { + java.lang.Object ref = operation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The name of the operation resource.
    +     * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + public com.google.protobuf.ByteString getOperationBytes() { + java.lang.Object ref = operation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The name of the operation resource.
    +     * 
    + * + * string operation = 1; + * + * @param value The operation to set. + * @return This builder for chaining. + */ + public Builder setOperation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the operation resource.
    +     * 
    + * + * string operation = 1; + * + * @return This builder for chaining. + */ + public Builder clearOperation() { + operation_ = getDefaultInstance().getOperation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The name of the operation resource.
    +     * 
    + * + * string operation = 1; + * + * @param value The bytes for operation to set. + * @return This builder for chaining. + */ + public Builder setOperationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operation_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.GetOperationAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.GetOperationAction) + private static final com.google.spanner.executor.v1.GetOperationAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.GetOperationAction(); + } + + public static com.google.spanner.executor.v1.GetOperationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOperationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.GetOperationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java new file mode 100644 index 000000000000..01de76b42ec4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface GetOperationActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.GetOperationAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The name of the operation resource.
    +   * 
    + * + * string operation = 1; + * + * @return The operation. + */ + java.lang.String getOperation(); + + /** + * + * + *
    +   * The name of the operation resource.
    +   * 
    + * + * string operation = 1; + * + * @return The bytes for operation. + */ + com.google.protobuf.ByteString getOperationBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java new file mode 100644 index 000000000000..e7b5e27ed786 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java @@ -0,0 +1,698 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * ChangeStream heartbeat record.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.HeartbeatRecord} + */ +@com.google.protobuf.Generated +public final class HeartbeatRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.HeartbeatRecord) + HeartbeatRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HeartbeatRecord"); + } + + // Use HeartbeatRecord.newBuilder() to construct. + private HeartbeatRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HeartbeatRecord() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_HeartbeatRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.HeartbeatRecord.class, + com.google.spanner.executor.v1.HeartbeatRecord.Builder.class); + } + + private int bitField0_; + public static final int HEARTBEAT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp heartbeatTime_; + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return Whether the heartbeatTime field is set. + */ + @java.lang.Override + public boolean hasHeartbeatTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return The heartbeatTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getHeartbeatTime() { + return heartbeatTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : heartbeatTime_; + } + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getHeartbeatTimeOrBuilder() { + return heartbeatTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : heartbeatTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getHeartbeatTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getHeartbeatTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.HeartbeatRecord)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.HeartbeatRecord other = + (com.google.spanner.executor.v1.HeartbeatRecord) obj; + + if (hasHeartbeatTime() != other.hasHeartbeatTime()) return false; + if (hasHeartbeatTime()) { + if (!getHeartbeatTime().equals(other.getHeartbeatTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasHeartbeatTime()) { + hash = (37 * hash) + HEARTBEAT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeatTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.HeartbeatRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ChangeStream heartbeat record.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.HeartbeatRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.HeartbeatRecord) + com.google.spanner.executor.v1.HeartbeatRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_HeartbeatRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.HeartbeatRecord.class, + com.google.spanner.executor.v1.HeartbeatRecord.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.HeartbeatRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetHeartbeatTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + heartbeatTime_ = null; + if (heartbeatTimeBuilder_ != null) { + heartbeatTimeBuilder_.dispose(); + heartbeatTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord getDefaultInstanceForType() { + return com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord build() { + com.google.spanner.executor.v1.HeartbeatRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord buildPartial() { + com.google.spanner.executor.v1.HeartbeatRecord result = + new com.google.spanner.executor.v1.HeartbeatRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.HeartbeatRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.heartbeatTime_ = + heartbeatTimeBuilder_ == null ? heartbeatTime_ : heartbeatTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.HeartbeatRecord) { + return mergeFrom((com.google.spanner.executor.v1.HeartbeatRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.HeartbeatRecord other) { + if (other == com.google.spanner.executor.v1.HeartbeatRecord.getDefaultInstance()) return this; + if (other.hasHeartbeatTime()) { + mergeHeartbeatTime(other.getHeartbeatTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetHeartbeatTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp heartbeatTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + heartbeatTimeBuilder_; + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return Whether the heartbeatTime field is set. + */ + public boolean hasHeartbeatTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return The heartbeatTime. + */ + public com.google.protobuf.Timestamp getHeartbeatTime() { + if (heartbeatTimeBuilder_ == null) { + return heartbeatTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : heartbeatTime_; + } else { + return heartbeatTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public Builder setHeartbeatTime(com.google.protobuf.Timestamp value) { + if (heartbeatTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + heartbeatTime_ = value; + } else { + heartbeatTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public Builder setHeartbeatTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (heartbeatTimeBuilder_ == null) { + heartbeatTime_ = builderForValue.build(); + } else { + heartbeatTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public Builder mergeHeartbeatTime(com.google.protobuf.Timestamp value) { + if (heartbeatTimeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && heartbeatTime_ != null + && heartbeatTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getHeartbeatTimeBuilder().mergeFrom(value); + } else { + heartbeatTime_ = value; + } + } else { + heartbeatTimeBuilder_.mergeFrom(value); + } + if (heartbeatTime_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public Builder clearHeartbeatTime() { + bitField0_ = (bitField0_ & ~0x00000001); + heartbeatTime_ = null; + if (heartbeatTimeBuilder_ != null) { + heartbeatTimeBuilder_.dispose(); + heartbeatTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getHeartbeatTimeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetHeartbeatTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getHeartbeatTimeOrBuilder() { + if (heartbeatTimeBuilder_ != null) { + return heartbeatTimeBuilder_.getMessageOrBuilder(); + } else { + return heartbeatTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : heartbeatTime_; + } + } + + /** + * + * + *
    +     * Timestamp for this heartbeat check.
    +     * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetHeartbeatTimeFieldBuilder() { + if (heartbeatTimeBuilder_ == null) { + heartbeatTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getHeartbeatTime(), getParentForChildren(), isClean()); + heartbeatTime_ = null; + } + return heartbeatTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.HeartbeatRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.HeartbeatRecord) + private static final com.google.spanner.executor.v1.HeartbeatRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.HeartbeatRecord(); + } + + public static com.google.spanner.executor.v1.HeartbeatRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HeartbeatRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.HeartbeatRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java new file mode 100644 index 000000000000..e2ba4d9b7233 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface HeartbeatRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.HeartbeatRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return Whether the heartbeatTime field is set. + */ + boolean hasHeartbeatTime(); + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + * + * @return The heartbeatTime. + */ + com.google.protobuf.Timestamp getHeartbeatTime(); + + /** + * + * + *
    +   * Timestamp for this heartbeat check.
    +   * 
    + * + * .google.protobuf.Timestamp heartbeat_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getHeartbeatTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java new file mode 100644 index 000000000000..1abd7c7d77d8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java @@ -0,0 +1,1413 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * KeyRange represents a range of rows in a table or index.
    + *
    + * A range has a start key and an end key. These keys can be open or
    + * closed, indicating if the range includes rows with that key.
    + *
    + * Keys are represented by "ValueList", where the ith value in the list
    + * corresponds to the ith component of the table or index primary key.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.KeyRange} + */ +@com.google.protobuf.Generated +public final class KeyRange extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.KeyRange) + KeyRangeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeyRange"); + } + + // Use KeyRange.newBuilder() to construct. + private KeyRange(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private KeyRange() { + type_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeyRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeyRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.KeyRange.class, + com.google.spanner.executor.v1.KeyRange.Builder.class); + } + + /** + * + * + *
    +   * Type controls whether "start" and "limit" are open or closed. By default,
    +   * "start" is closed, and "limit" is open.
    +   * 
    + * + * Protobuf enum {@code google.spanner.executor.v1.KeyRange.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * "TYPE_UNSPECIFIED" is equivalent to "CLOSED_OPEN".
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +     * [start,limit]
    +     * 
    + * + * CLOSED_CLOSED = 1; + */ + CLOSED_CLOSED(1), + /** + * + * + *
    +     * [start,limit)
    +     * 
    + * + * CLOSED_OPEN = 2; + */ + CLOSED_OPEN(2), + /** + * + * + *
    +     * (start,limit]
    +     * 
    + * + * OPEN_CLOSED = 3; + */ + OPEN_CLOSED(3), + /** + * + * + *
    +     * (start,limit)
    +     * 
    + * + * OPEN_OPEN = 4; + */ + OPEN_OPEN(4), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Type"); + } + + /** + * + * + *
    +     * "TYPE_UNSPECIFIED" is equivalent to "CLOSED_OPEN".
    +     * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * [start,limit]
    +     * 
    + * + * CLOSED_CLOSED = 1; + */ + public static final int CLOSED_CLOSED_VALUE = 1; + + /** + * + * + *
    +     * [start,limit)
    +     * 
    + * + * CLOSED_OPEN = 2; + */ + public static final int CLOSED_OPEN_VALUE = 2; + + /** + * + * + *
    +     * (start,limit]
    +     * 
    + * + * OPEN_CLOSED = 3; + */ + public static final int OPEN_CLOSED_VALUE = 3; + + /** + * + * + *
    +     * (start,limit)
    +     * 
    + * + * OPEN_OPEN = 4; + */ + public static final int OPEN_OPEN_VALUE = 4; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return CLOSED_CLOSED; + case 2: + return CLOSED_OPEN; + case 3: + return OPEN_CLOSED; + case 4: + return OPEN_OPEN; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.executor.v1.KeyRange.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.executor.v1.KeyRange.Type) + } + + private int bitField0_; + public static final int START_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.ValueList start_; + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return Whether the start field is set. + */ + @java.lang.Override + public boolean hasStart() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return The start. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getStart() { + return start_ == null ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() : start_; + } + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getStartOrBuilder() { + return start_ == null ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() : start_; + } + + public static final int LIMIT_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.ValueList limit_; + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return Whether the limit field is set. + */ + @java.lang.Override + public boolean hasLimit() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return The limit. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getLimit() { + return limit_ == null ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() : limit_; + } + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getLimitOrBuilder() { + return limit_ == null ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() : limit_; + } + + public static final int TYPE_FIELD_NUMBER = 3; + private int type_ = 0; + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange.Type getType() { + com.google.spanner.executor.v1.KeyRange.Type result = + com.google.spanner.executor.v1.KeyRange.Type.forNumber(type_); + return result == null ? com.google.spanner.executor.v1.KeyRange.Type.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getStart()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getLimit()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeEnum(3, type_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStart()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getLimit()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, type_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.KeyRange)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.KeyRange other = (com.google.spanner.executor.v1.KeyRange) obj; + + if (hasStart() != other.hasStart()) return false; + if (hasStart()) { + if (!getStart().equals(other.getStart())) return false; + } + if (hasLimit() != other.hasLimit()) return false; + if (hasLimit()) { + if (!getLimit().equals(other.getLimit())) return false; + } + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (type_ != other.type_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStart()) { + hash = (37 * hash) + START_FIELD_NUMBER; + hash = (53 * hash) + getStart().hashCode(); + } + if (hasLimit()) { + hash = (37 * hash) + LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getLimit().hashCode(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeyRange parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeyRange parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeyRange parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.KeyRange prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * KeyRange represents a range of rows in a table or index.
    +   *
    +   * A range has a start key and an end key. These keys can be open or
    +   * closed, indicating if the range includes rows with that key.
    +   *
    +   * Keys are represented by "ValueList", where the ith value in the list
    +   * corresponds to the ith component of the table or index primary key.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.KeyRange} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.KeyRange) + com.google.spanner.executor.v1.KeyRangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeyRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeyRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.KeyRange.class, + com.google.spanner.executor.v1.KeyRange.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.KeyRange.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartFieldBuilder(); + internalGetLimitFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + start_ = null; + if (startBuilder_ != null) { + startBuilder_.dispose(); + startBuilder_ = null; + } + limit_ = null; + if (limitBuilder_ != null) { + limitBuilder_.dispose(); + limitBuilder_ = null; + } + type_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeyRange_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange getDefaultInstanceForType() { + return com.google.spanner.executor.v1.KeyRange.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange build() { + com.google.spanner.executor.v1.KeyRange result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange buildPartial() { + com.google.spanner.executor.v1.KeyRange result = + new com.google.spanner.executor.v1.KeyRange(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.KeyRange result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.start_ = startBuilder_ == null ? start_ : startBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.limit_ = limitBuilder_ == null ? limit_ : limitBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.type_ = type_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.KeyRange) { + return mergeFrom((com.google.spanner.executor.v1.KeyRange) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.KeyRange other) { + if (other == com.google.spanner.executor.v1.KeyRange.getDefaultInstance()) return this; + if (other.hasStart()) { + mergeStart(other.getStart()); + } + if (other.hasLimit()) { + mergeLimit(other.getLimit()); + } + if (other.hasType()) { + setTypeValue(other.getTypeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetStartFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetLimitFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.ValueList start_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + startBuilder_; + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return Whether the start field is set. + */ + public boolean hasStart() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return The start. + */ + public com.google.spanner.executor.v1.ValueList getStart() { + if (startBuilder_ == null) { + return start_ == null + ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() + : start_; + } else { + return startBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public Builder setStart(com.google.spanner.executor.v1.ValueList value) { + if (startBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + start_ = value; + } else { + startBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public Builder setStart(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (startBuilder_ == null) { + start_ = builderForValue.build(); + } else { + startBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public Builder mergeStart(com.google.spanner.executor.v1.ValueList value) { + if (startBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && start_ != null + && start_ != com.google.spanner.executor.v1.ValueList.getDefaultInstance()) { + getStartBuilder().mergeFrom(value); + } else { + start_ = value; + } + } else { + startBuilder_.mergeFrom(value); + } + if (start_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public Builder clearStart() { + bitField0_ = (bitField0_ & ~0x00000001); + start_ = null; + if (startBuilder_ != null) { + startBuilder_.dispose(); + startBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder getStartBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetStartFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getStartOrBuilder() { + if (startBuilder_ != null) { + return startBuilder_.getMessageOrBuilder(); + } else { + return start_ == null + ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() + : start_; + } + } + + /** + * + * + *
    +     * "start" and "limit" must have the same number of key parts,
    +     * though they may name only a prefix of the table or index key.
    +     * The start key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetStartFieldBuilder() { + if (startBuilder_ == null) { + startBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + getStart(), getParentForChildren(), isClean()); + start_ = null; + } + return startBuilder_; + } + + private com.google.spanner.executor.v1.ValueList limit_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + limitBuilder_; + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return Whether the limit field is set. + */ + public boolean hasLimit() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return The limit. + */ + public com.google.spanner.executor.v1.ValueList getLimit() { + if (limitBuilder_ == null) { + return limit_ == null + ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() + : limit_; + } else { + return limitBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public Builder setLimit(com.google.spanner.executor.v1.ValueList value) { + if (limitBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + limit_ = value; + } else { + limitBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public Builder setLimit(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (limitBuilder_ == null) { + limit_ = builderForValue.build(); + } else { + limitBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public Builder mergeLimit(com.google.spanner.executor.v1.ValueList value) { + if (limitBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && limit_ != null + && limit_ != com.google.spanner.executor.v1.ValueList.getDefaultInstance()) { + getLimitBuilder().mergeFrom(value); + } else { + limit_ = value; + } + } else { + limitBuilder_.mergeFrom(value); + } + if (limit_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public Builder clearLimit() { + bitField0_ = (bitField0_ & ~0x00000002); + limit_ = null; + if (limitBuilder_ != null) { + limitBuilder_.dispose(); + limitBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public com.google.spanner.executor.v1.ValueList.Builder getLimitBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetLimitFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getLimitOrBuilder() { + if (limitBuilder_ != null) { + return limitBuilder_.getMessageOrBuilder(); + } else { + return limit_ == null + ? com.google.spanner.executor.v1.ValueList.getDefaultInstance() + : limit_; + } + } + + /** + * + * + *
    +     * The end key of this KeyRange.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetLimitFieldBuilder() { + if (limitBuilder_ == null) { + limitBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + getLimit(), getParentForChildren(), isClean()); + limit_ = null; + } + return limitBuilder_; + } + + private int type_ = 0; + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange.Type getType() { + com.google.spanner.executor.v1.KeyRange.Type result = + com.google.spanner.executor.v1.KeyRange.Type.forNumber(type_); + return result == null ? com.google.spanner.executor.v1.KeyRange.Type.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(com.google.spanner.executor.v1.KeyRange.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "start" and "limit" type for this KeyRange.
    +     * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000004); + type_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.KeyRange) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.KeyRange) + private static final com.google.spanner.executor.v1.KeyRange DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.KeyRange(); + } + + public static com.google.spanner.executor.v1.KeyRange getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public KeyRange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java new file mode 100644 index 000000000000..19db055c9070 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java @@ -0,0 +1,147 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface KeyRangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.KeyRange) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return Whether the start field is set. + */ + boolean hasStart(); + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + * + * @return The start. + */ + com.google.spanner.executor.v1.ValueList getStart(); + + /** + * + * + *
    +   * "start" and "limit" must have the same number of key parts,
    +   * though they may name only a prefix of the table or index key.
    +   * The start key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList start = 1; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getStartOrBuilder(); + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return Whether the limit field is set. + */ + boolean hasLimit(); + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + * + * @return The limit. + */ + com.google.spanner.executor.v1.ValueList getLimit(); + + /** + * + * + *
    +   * The end key of this KeyRange.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList limit = 2; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getLimitOrBuilder(); + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
    +   * "start" and "limit" type for this KeyRange.
    +   * 
    + * + * optional .google.spanner.executor.v1.KeyRange.Type type = 3; + * + * @return The type. + */ + com.google.spanner.executor.v1.KeyRange.Type getType(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java new file mode 100644 index 000000000000..569f3f281347 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java @@ -0,0 +1,1581 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * KeySet defines a collection of Spanner keys and/or key ranges. All
    + * the keys are expected to be in the same table. The keys need not be
    + * sorted in any particular way.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.KeySet} + */ +@com.google.protobuf.Generated +public final class KeySet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.KeySet) + KeySetOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeySet"); + } + + // Use KeySet.newBuilder() to construct. + private KeySet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private KeySet() { + point_ = java.util.Collections.emptyList(); + range_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeySet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeySet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.KeySet.class, + com.google.spanner.executor.v1.KeySet.Builder.class); + } + + public static final int POINT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List point_; + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + @java.lang.Override + public java.util.List getPointList() { + return point_; + } + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + @java.lang.Override + public java.util.List + getPointOrBuilderList() { + return point_; + } + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + @java.lang.Override + public int getPointCount() { + return point_.size(); + } + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getPoint(int index) { + return point_.get(index); + } + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getPointOrBuilder(int index) { + return point_.get(index); + } + + public static final int RANGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List range_; + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + @java.lang.Override + public java.util.List getRangeList() { + return range_; + } + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + @java.lang.Override + public java.util.List + getRangeOrBuilderList() { + return range_; + } + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + @java.lang.Override + public int getRangeCount() { + return range_.size(); + } + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeyRange getRange(int index) { + return range_.get(index); + } + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeyRangeOrBuilder getRangeOrBuilder(int index) { + return range_.get(index); + } + + public static final int ALL_FIELD_NUMBER = 3; + private boolean all_ = false; + + /** + * + * + *
    +   * For convenience "all" can be set to "true" to indicate that this
    +   * "KeySet" matches all keys in the table or index. Note that any keys
    +   * specified in "keys" or "ranges" are only yielded once.
    +   * 
    + * + * bool all = 3; + * + * @return The all. + */ + @java.lang.Override + public boolean getAll() { + return all_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < point_.size(); i++) { + output.writeMessage(1, point_.get(i)); + } + for (int i = 0; i < range_.size(); i++) { + output.writeMessage(2, range_.get(i)); + } + if (all_ != false) { + output.writeBool(3, all_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < point_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, point_.get(i)); + } + for (int i = 0; i < range_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, range_.get(i)); + } + if (all_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, all_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.KeySet)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.KeySet other = (com.google.spanner.executor.v1.KeySet) obj; + + if (!getPointList().equals(other.getPointList())) return false; + if (!getRangeList().equals(other.getRangeList())) return false; + if (getAll() != other.getAll()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPointCount() > 0) { + hash = (37 * hash) + POINT_FIELD_NUMBER; + hash = (53 * hash) + getPointList().hashCode(); + } + if (getRangeCount() > 0) { + hash = (37 * hash) + RANGE_FIELD_NUMBER; + hash = (53 * hash) + getRangeList().hashCode(); + } + hash = (37 * hash) + ALL_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAll()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.KeySet parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeySet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeySet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.KeySet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.KeySet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * KeySet defines a collection of Spanner keys and/or key ranges. All
    +   * the keys are expected to be in the same table. The keys need not be
    +   * sorted in any particular way.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.KeySet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.KeySet) + com.google.spanner.executor.v1.KeySetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeySet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeySet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.KeySet.class, + com.google.spanner.executor.v1.KeySet.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.KeySet.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (pointBuilder_ == null) { + point_ = java.util.Collections.emptyList(); + } else { + point_ = null; + pointBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + } else { + range_ = null; + rangeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + all_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_KeySet_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeySet getDefaultInstanceForType() { + return com.google.spanner.executor.v1.KeySet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeySet build() { + com.google.spanner.executor.v1.KeySet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeySet buildPartial() { + com.google.spanner.executor.v1.KeySet result = + new com.google.spanner.executor.v1.KeySet(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.KeySet result) { + if (pointBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + point_ = java.util.Collections.unmodifiableList(point_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.point_ = point_; + } else { + result.point_ = pointBuilder_.build(); + } + if (rangeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + range_ = java.util.Collections.unmodifiableList(range_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.range_ = range_; + } else { + result.range_ = rangeBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.KeySet result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.all_ = all_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.KeySet) { + return mergeFrom((com.google.spanner.executor.v1.KeySet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.KeySet other) { + if (other == com.google.spanner.executor.v1.KeySet.getDefaultInstance()) return this; + if (pointBuilder_ == null) { + if (!other.point_.isEmpty()) { + if (point_.isEmpty()) { + point_ = other.point_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePointIsMutable(); + point_.addAll(other.point_); + } + onChanged(); + } + } else { + if (!other.point_.isEmpty()) { + if (pointBuilder_.isEmpty()) { + pointBuilder_.dispose(); + pointBuilder_ = null; + point_ = other.point_; + bitField0_ = (bitField0_ & ~0x00000001); + pointBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPointFieldBuilder() + : null; + } else { + pointBuilder_.addAllMessages(other.point_); + } + } + } + if (rangeBuilder_ == null) { + if (!other.range_.isEmpty()) { + if (range_.isEmpty()) { + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRangeIsMutable(); + range_.addAll(other.range_); + } + onChanged(); + } + } else { + if (!other.range_.isEmpty()) { + if (rangeBuilder_.isEmpty()) { + rangeBuilder_.dispose(); + rangeBuilder_ = null; + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000002); + rangeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRangeFieldBuilder() + : null; + } else { + rangeBuilder_.addAllMessages(other.range_); + } + } + } + if (other.getAll() != false) { + setAll(other.getAll()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.executor.v1.ValueList m = + input.readMessage( + com.google.spanner.executor.v1.ValueList.parser(), extensionRegistry); + if (pointBuilder_ == null) { + ensurePointIsMutable(); + point_.add(m); + } else { + pointBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + com.google.spanner.executor.v1.KeyRange m = + input.readMessage( + com.google.spanner.executor.v1.KeyRange.parser(), extensionRegistry); + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(m); + } else { + rangeBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + all_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List point_ = + java.util.Collections.emptyList(); + + private void ensurePointIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + point_ = new java.util.ArrayList(point_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + pointBuilder_; + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public java.util.List getPointList() { + if (pointBuilder_ == null) { + return java.util.Collections.unmodifiableList(point_); + } else { + return pointBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public int getPointCount() { + if (pointBuilder_ == null) { + return point_.size(); + } else { + return pointBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public com.google.spanner.executor.v1.ValueList getPoint(int index) { + if (pointBuilder_ == null) { + return point_.get(index); + } else { + return pointBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder setPoint(int index, com.google.spanner.executor.v1.ValueList value) { + if (pointBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePointIsMutable(); + point_.set(index, value); + onChanged(); + } else { + pointBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder setPoint( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (pointBuilder_ == null) { + ensurePointIsMutable(); + point_.set(index, builderForValue.build()); + onChanged(); + } else { + pointBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder addPoint(com.google.spanner.executor.v1.ValueList value) { + if (pointBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePointIsMutable(); + point_.add(value); + onChanged(); + } else { + pointBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder addPoint(int index, com.google.spanner.executor.v1.ValueList value) { + if (pointBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePointIsMutable(); + point_.add(index, value); + onChanged(); + } else { + pointBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder addPoint(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (pointBuilder_ == null) { + ensurePointIsMutable(); + point_.add(builderForValue.build()); + onChanged(); + } else { + pointBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder addPoint( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (pointBuilder_ == null) { + ensurePointIsMutable(); + point_.add(index, builderForValue.build()); + onChanged(); + } else { + pointBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder addAllPoint( + java.lang.Iterable values) { + if (pointBuilder_ == null) { + ensurePointIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, point_); + onChanged(); + } else { + pointBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder clearPoint() { + if (pointBuilder_ == null) { + point_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + pointBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public Builder removePoint(int index) { + if (pointBuilder_ == null) { + ensurePointIsMutable(); + point_.remove(index); + onChanged(); + } else { + pointBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder getPointBuilder(int index) { + return internalGetPointFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getPointOrBuilder(int index) { + if (pointBuilder_ == null) { + return point_.get(index); + } else { + return pointBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public java.util.List + getPointOrBuilderList() { + if (pointBuilder_ != null) { + return pointBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(point_); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder addPointBuilder() { + return internalGetPointFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder addPointBuilder(int index) { + return internalGetPointFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in "keys" should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this "KeySet" is used.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + public java.util.List getPointBuilderList() { + return internalGetPointFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetPointFieldBuilder() { + if (pointBuilder_ == null) { + pointBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + point_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + point_ = null; + } + return pointBuilder_; + } + + private java.util.List range_ = + java.util.Collections.emptyList(); + + private void ensureRangeIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + range_ = new java.util.ArrayList(range_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.KeyRange, + com.google.spanner.executor.v1.KeyRange.Builder, + com.google.spanner.executor.v1.KeyRangeOrBuilder> + rangeBuilder_; + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public java.util.List getRangeList() { + if (rangeBuilder_ == null) { + return java.util.Collections.unmodifiableList(range_); + } else { + return rangeBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public int getRangeCount() { + if (rangeBuilder_ == null) { + return range_.size(); + } else { + return rangeBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public com.google.spanner.executor.v1.KeyRange getRange(int index) { + if (rangeBuilder_ == null) { + return range_.get(index); + } else { + return rangeBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder setRange(int index, com.google.spanner.executor.v1.KeyRange value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.set(index, value); + onChanged(); + } else { + rangeBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder setRange( + int index, com.google.spanner.executor.v1.KeyRange.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.set(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder addRange(com.google.spanner.executor.v1.KeyRange value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(value); + onChanged(); + } else { + rangeBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder addRange(int index, com.google.spanner.executor.v1.KeyRange value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(index, value); + onChanged(); + } else { + rangeBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder addRange(com.google.spanner.executor.v1.KeyRange.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder addRange( + int index, com.google.spanner.executor.v1.KeyRange.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder addAllRange( + java.lang.Iterable values) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, range_); + onChanged(); + } else { + rangeBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder clearRange() { + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rangeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public Builder removeRange(int index) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.remove(index); + onChanged(); + } else { + rangeBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public com.google.spanner.executor.v1.KeyRange.Builder getRangeBuilder(int index) { + return internalGetRangeFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public com.google.spanner.executor.v1.KeyRangeOrBuilder getRangeOrBuilder(int index) { + if (rangeBuilder_ == null) { + return range_.get(index); + } else { + return rangeBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public java.util.List + getRangeOrBuilderList() { + if (rangeBuilder_ != null) { + return rangeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(range_); + } + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public com.google.spanner.executor.v1.KeyRange.Builder addRangeBuilder() { + return internalGetRangeFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.KeyRange.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public com.google.spanner.executor.v1.KeyRange.Builder addRangeBuilder(int index) { + return internalGetRangeFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.KeyRange.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of key ranges.
    +     * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + public java.util.List getRangeBuilderList() { + return internalGetRangeFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.KeyRange, + com.google.spanner.executor.v1.KeyRange.Builder, + com.google.spanner.executor.v1.KeyRangeOrBuilder> + internalGetRangeFieldBuilder() { + if (rangeBuilder_ == null) { + rangeBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.KeyRange, + com.google.spanner.executor.v1.KeyRange.Builder, + com.google.spanner.executor.v1.KeyRangeOrBuilder>( + range_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + range_ = null; + } + return rangeBuilder_; + } + + private boolean all_; + + /** + * + * + *
    +     * For convenience "all" can be set to "true" to indicate that this
    +     * "KeySet" matches all keys in the table or index. Note that any keys
    +     * specified in "keys" or "ranges" are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @return The all. + */ + @java.lang.Override + public boolean getAll() { + return all_; + } + + /** + * + * + *
    +     * For convenience "all" can be set to "true" to indicate that this
    +     * "KeySet" matches all keys in the table or index. Note that any keys
    +     * specified in "keys" or "ranges" are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @param value The all to set. + * @return This builder for chaining. + */ + public Builder setAll(boolean value) { + + all_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For convenience "all" can be set to "true" to indicate that this
    +     * "KeySet" matches all keys in the table or index. Note that any keys
    +     * specified in "keys" or "ranges" are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @return This builder for chaining. + */ + public Builder clearAll() { + bitField0_ = (bitField0_ & ~0x00000004); + all_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.KeySet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.KeySet) + private static final com.google.spanner.executor.v1.KeySet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.KeySet(); + } + + public static com.google.spanner.executor.v1.KeySet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public KeySet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.KeySet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java new file mode 100644 index 000000000000..eea2bd4f6e01 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface KeySetOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.KeySet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + java.util.List getPointList(); + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + com.google.spanner.executor.v1.ValueList getPoint(int index); + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + int getPointCount(); + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + java.util.List + getPointOrBuilderList(); + + /** + * + * + *
    +   * A list of specific keys. Entries in "keys" should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this "KeySet" is used.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList point = 1; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getPointOrBuilder(int index); + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + java.util.List getRangeList(); + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + com.google.spanner.executor.v1.KeyRange getRange(int index); + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + int getRangeCount(); + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + java.util.List + getRangeOrBuilderList(); + + /** + * + * + *
    +   * A list of key ranges.
    +   * 
    + * + * repeated .google.spanner.executor.v1.KeyRange range = 2; + */ + com.google.spanner.executor.v1.KeyRangeOrBuilder getRangeOrBuilder(int index); + + /** + * + * + *
    +   * For convenience "all" can be set to "true" to indicate that this
    +   * "KeySet" matches all keys in the table or index. Note that any keys
    +   * specified in "keys" or "ranges" are only yielded once.
    +   * 
    + * + * bool all = 3; + * + * @return The all. + */ + boolean getAll(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java new file mode 100644 index 000000000000..27d39e09ff21 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java @@ -0,0 +1,1320 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists Cloud Spanner database backup operations.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudBackupOperationsAction} + */ +@com.google.protobuf.Generated +public final class ListCloudBackupOperationsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudBackupOperationsAction) + ListCloudBackupOperationsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudBackupOperationsAction"); + } + + // Use ListCloudBackupOperationsAction.newBuilder() to construct. + private ListCloudBackupOperationsAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudBackupOperationsAction() { + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.class, + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backup operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backup operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupOperationsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupOperationsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 4; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudBackupOperationsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudBackupOperationsAction other = + (com.google.spanner.executor.v1.ListCloudBackupOperationsAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backup operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudBackupOperationsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudBackupOperationsAction) + com.google.spanner.executor.v1.ListCloudBackupOperationsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.class, + com.google.spanner.executor.v1.ListCloudBackupOperationsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudBackupOperationsAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupOperationsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction build() { + com.google.spanner.executor.v1.ListCloudBackupOperationsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction buildPartial() { + com.google.spanner.executor.v1.ListCloudBackupOperationsAction result = + new com.google.spanner.executor.v1.ListCloudBackupOperationsAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.ListCloudBackupOperationsAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudBackupOperationsAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudBackupOperationsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ListCloudBackupOperationsAction other) { + if (other + == com.google.spanner.executor.v1.ListCloudBackupOperationsAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backup operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backup operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backup operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backup operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backup operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupOperationsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupOperationsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupOperationsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupOperationsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupOperationsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000008); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudBackupOperationsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudBackupOperationsAction) + private static final com.google.spanner.executor.v1.ListCloudBackupOperationsAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudBackupOperationsAction(); + } + + public static com.google.spanner.executor.v1.ListCloudBackupOperationsAction + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudBackupOperationsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupOperationsAction + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java new file mode 100644 index 000000000000..88915e7c847a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudBackupOperationsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudBackupOperationsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backup operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backup operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupOperationsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupOperationsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java new file mode 100644 index 000000000000..f9c2f246ab92 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java @@ -0,0 +1,1300 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists Cloud Spanner database backups.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudBackupsAction} + */ +@com.google.protobuf.Generated +public final class ListCloudBackupsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudBackupsAction) + ListCloudBackupsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudBackupsAction"); + } + + // Use ListCloudBackupsAction.newBuilder() to construct. + private ListCloudBackupsAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudBackupsAction() { + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudBackupsAction.class, + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * A filter expression that filters backups listed in the response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * A filter expression that filters backups listed in the response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 4; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudBackupsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudBackupsAction other = + (com.google.spanner.executor.v1.ListCloudBackupsAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudBackupsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database backups.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudBackupsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudBackupsAction) + com.google.spanner.executor.v1.ListCloudBackupsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudBackupsAction.class, + com.google.spanner.executor.v1.ListCloudBackupsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudBackupsAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudBackupsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction build() { + com.google.spanner.executor.v1.ListCloudBackupsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction buildPartial() { + com.google.spanner.executor.v1.ListCloudBackupsAction result = + new com.google.spanner.executor.v1.ListCloudBackupsAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ListCloudBackupsAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudBackupsAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudBackupsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ListCloudBackupsAction other) { + if (other == com.google.spanner.executor.v1.ListCloudBackupsAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * A filter expression that filters backups listed in the response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters backups listed in the response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters backups listed in the response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters backups listed in the response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters backups listed in the response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer backup.proto.ListBackupsRequest for detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of backups to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000008); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListBackupsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudBackupsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudBackupsAction) + private static final com.google.spanner.executor.v1.ListCloudBackupsAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudBackupsAction(); + } + + public static com.google.spanner.executor.v1.ListCloudBackupsAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudBackupsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudBackupsAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java new file mode 100644 index 000000000000..4a8ee9bdc71d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java @@ -0,0 +1,156 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudBackupsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudBackupsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list backups from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * A filter expression that filters backups listed in the response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * A filter expression that filters backups listed in the response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer backup.proto.ListBackupsRequest for detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of backups to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListBackupsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java new file mode 100644 index 000000000000..8eb8a9a81d9a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java @@ -0,0 +1,1329 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists Cloud Spanner database operations.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudDatabaseOperationsAction} + */ +@com.google.protobuf.Generated +public final class ListCloudDatabaseOperationsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudDatabaseOperationsAction) + ListCloudDatabaseOperationsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudDatabaseOperationsAction"); + } + + // Use ListCloudDatabaseOperationsAction.newBuilder() to construct. + private ListCloudDatabaseOperationsAction( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudDatabaseOperationsAction() { + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.class, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) to list database operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) to list database operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +   * detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +   * detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 4; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabaseOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabaseOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, filter_); + } + if (pageSize_ != 0) { + output.writeInt32(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, filter_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction other = + (com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner database operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudDatabaseOperationsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudDatabaseOperationsAction) + com.google.spanner.executor.v1.ListCloudDatabaseOperationsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.class, + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabaseOperationsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction build() { + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction buildPartial() { + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction result = + new com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.filter_ = filter_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction other) { + if (other + == com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) to list database operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list database operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list database operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list database operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list database operations from,
    +     * e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +     * detail.
    +     * 
    + * + * string filter = 3; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +     * detail.
    +     * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +     * detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +     * detail.
    +     * 
    + * + * string filter = 3; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +     * detail.
    +     * 
    + * + * string filter = 3; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000008); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabaseOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabaseOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabaseOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabaseOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabaseOperationsResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 5; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudDatabaseOperationsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudDatabaseOperationsAction) + private static final com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction(); + } + + public static com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudDatabaseOperationsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java new file mode 100644 index 000000000000..885c26988d99 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java @@ -0,0 +1,162 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudDatabaseOperationsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudDatabaseOperationsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list database operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list database operations from,
    +   * e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +   * detail.
    +   * 
    + * + * string filter = 3; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for
    +   * detail.
    +   * 
    + * + * string filter = 3; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 4; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabaseOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabaseOperationsResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 5; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java new file mode 100644 index 000000000000..863aabdbdf8a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java @@ -0,0 +1,1090 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists Cloud Spanner databases.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudDatabasesAction} + */ +@com.google.protobuf.Generated +public final class ListCloudDatabasesAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudDatabasesAction) + ListCloudDatabasesActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudDatabasesAction"); + } + + // Use ListCloudDatabasesAction.newBuilder() to construct. + private ListCloudDatabasesAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudDatabasesAction() { + projectId_ = ""; + instanceId_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudDatabasesAction.class, + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabasesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabasesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (pageSize_ != 0) { + output.writeInt32(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudDatabasesAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudDatabasesAction other = + (com.google.spanner.executor.v1.ListCloudDatabasesAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudDatabasesAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner databases.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudDatabasesAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudDatabasesAction) + com.google.spanner.executor.v1.ListCloudDatabasesActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudDatabasesAction.class, + com.google.spanner.executor.v1.ListCloudDatabasesAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudDatabasesAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudDatabasesAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction build() { + com.google.spanner.executor.v1.ListCloudDatabasesAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction buildPartial() { + com.google.spanner.executor.v1.ListCloudDatabasesAction result = + new com.google.spanner.executor.v1.ListCloudDatabasesAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ListCloudDatabasesAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudDatabasesAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudDatabasesAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ListCloudDatabasesAction other) { + if (other == com.google.spanner.executor.v1.ListCloudDatabasesAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of databases to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabasesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabasesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabasesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabasesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListDatabasesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudDatabasesAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudDatabasesAction) + private static final com.google.spanner.executor.v1.ListCloudDatabasesAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudDatabasesAction(); + } + + public static com.google.spanner.executor.v1.ListCloudDatabasesAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudDatabasesAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudDatabasesAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java new file mode 100644 index 000000000000..5e877fe46014 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java @@ -0,0 +1,124 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudDatabasesActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudDatabasesAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) to list databases from, e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Number of databases to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabasesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListDatabasesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java new file mode 100644 index 000000000000..07783c3dc33c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java @@ -0,0 +1,980 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists user instance configs.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudInstanceConfigsAction} + */ +@com.google.protobuf.Generated +public final class ListCloudInstanceConfigsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudInstanceConfigsAction) + ListCloudInstanceConfigsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudInstanceConfigsAction"); + } + + // Use ListCloudInstanceConfigsAction.newBuilder() to construct. + private ListCloudInstanceConfigsAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudInstanceConfigsAction() { + projectId_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.class, + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of instance configs to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 2; + * + * @return Whether the pageSize field is set. + */ + @java.lang.Override + public boolean hasPageSize() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Number of instance configs to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return Whether the pageToken field is set. + */ + @java.lang.Override + public boolean hasPageToken() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt32(2, pageSize_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudInstanceConfigsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction other = + (com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (hasPageSize() != other.hasPageSize()) return false; + if (hasPageSize()) { + if (getPageSize() != other.getPageSize()) return false; + } + if (hasPageToken() != other.hasPageToken()) return false; + if (hasPageToken()) { + if (!getPageToken().equals(other.getPageToken())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + if (hasPageSize()) { + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + } + if (hasPageToken()) { + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists user instance configs.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudInstanceConfigsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudInstanceConfigsAction) + com.google.spanner.executor.v1.ListCloudInstanceConfigsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.class, + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstanceConfigsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction build() { + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction buildPartial() { + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction result = + new com.google.spanner.executor.v1.ListCloudInstanceConfigsAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.ListCloudInstanceConfigsAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudInstanceConfigsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ListCloudInstanceConfigsAction other) { + if (other + == com.google.spanner.executor.v1.ListCloudInstanceConfigsAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasPageSize()) { + setPageSize(other.getPageSize()); + } + if (other.hasPageToken()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of instance configs to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 2; + * + * @return Whether the pageSize field is set. + */ + @java.lang.Override + public boolean hasPageSize() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Number of instance configs to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of instance configs to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of instance configs to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @return Whether the pageToken field is set. + */ + public boolean hasPageToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstanceConfigsResponse to the same "parent".
    +     * 
    + * + * optional string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudInstanceConfigsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudInstanceConfigsAction) + private static final com.google.spanner.executor.v1.ListCloudInstanceConfigsAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudInstanceConfigsAction(); + } + + public static com.google.spanner.executor.v1.ListCloudInstanceConfigsAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudInstanceConfigsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstanceConfigsAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java new file mode 100644 index 000000000000..f5605722c404 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java @@ -0,0 +1,124 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudInstanceConfigsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudInstanceConfigsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Number of instance configs to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 2; + * + * @return Whether the pageSize field is set. + */ + boolean hasPageSize(); + + /** + * + * + *
    +   * Number of instance configs to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return Whether the pageToken field is set. + */ + boolean hasPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstanceConfigsResponse to the same "parent".
    +   * 
    + * + * optional string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java new file mode 100644 index 000000000000..8c75267e1421 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java @@ -0,0 +1,1256 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that lists Cloud Spanner instances.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudInstancesAction} + */ +@com.google.protobuf.Generated +public final class ListCloudInstancesAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ListCloudInstancesAction) + ListCloudInstancesActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListCloudInstancesAction"); + } + + // Use ListCloudInstancesAction.newBuilder() to construct. + private ListCloudInstancesAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListCloudInstancesAction() { + projectId_ = ""; + filter_ = ""; + pageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstancesAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudInstancesAction.class, + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return Whether the filter field is set. + */ + @java.lang.Override + public boolean hasFilter() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 3; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 3; + * + * @return Whether the pageSize field is set. + */ + @java.lang.Override + public boolean hasPageSize() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return Whether the pageToken field is set. + */ + @java.lang.Override + public boolean hasPageToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, filter_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt32(3, pageSize_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, filter_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, pageSize_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ListCloudInstancesAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ListCloudInstancesAction other = + (com.google.spanner.executor.v1.ListCloudInstancesAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (hasFilter() != other.hasFilter()) return false; + if (hasFilter()) { + if (!getFilter().equals(other.getFilter())) return false; + } + if (hasPageSize() != other.hasPageSize()) return false; + if (hasPageSize()) { + if (getPageSize() != other.getPageSize()) return false; + } + if (hasPageToken() != other.hasPageToken()) return false; + if (hasPageToken()) { + if (!getPageToken().equals(other.getPageToken())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + if (hasFilter()) { + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + } + if (hasPageSize()) { + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + } + if (hasPageToken()) { + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.ListCloudInstancesAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that lists Cloud Spanner instances.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ListCloudInstancesAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ListCloudInstancesAction) + com.google.spanner.executor.v1.ListCloudInstancesActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstancesAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ListCloudInstancesAction.class, + com.google.spanner.executor.v1.ListCloudInstancesAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ListCloudInstancesAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + filter_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ListCloudInstancesAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction build() { + com.google.spanner.executor.v1.ListCloudInstancesAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction buildPartial() { + com.google.spanner.executor.v1.ListCloudInstancesAction result = + new com.google.spanner.executor.v1.ListCloudInstancesAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ListCloudInstancesAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.filter_ = filter_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageSize_ = pageSize_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.pageToken_ = pageToken_; + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ListCloudInstancesAction) { + return mergeFrom((com.google.spanner.executor.v1.ListCloudInstancesAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ListCloudInstancesAction other) { + if (other == com.google.spanner.executor.v1.ListCloudInstancesAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasFilter()) { + filter_ = other.filter_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasPageSize()) { + setPageSize(other.getPageSize()); + } + if (other.hasPageToken()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @return Whether the filter field is set. + */ + public boolean hasFilter() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A filter expression that filters what operations are returned in the
    +     * response.
    +     * The expression must specify the field name, a comparison operator,
    +     * and the value that you want to use for filtering.
    +     * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +     * detail.
    +     * 
    + * + * optional string filter = 2; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 3; + * + * @return Whether the pageSize field is set. + */ + @java.lang.Override + public boolean hasPageSize() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 3; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 3; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of instances to be returned in the response. If 0 or
    +     * less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * optional int32 page_size = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000004); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @return Whether the pageToken field is set. + */ + public boolean hasPageToken() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, "page_token" should contain a next_page_token
    +     * from a previous ListInstancesResponse to the same "parent"
    +     * and with the same "filter".
    +     * 
    + * + * optional string page_token = 4; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ListCloudInstancesAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ListCloudInstancesAction) + private static final com.google.spanner.executor.v1.ListCloudInstancesAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ListCloudInstancesAction(); + } + + public static com.google.spanner.executor.v1.ListCloudInstancesAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListCloudInstancesAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ListCloudInstancesAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java new file mode 100644 index 000000000000..05a23503d17a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java @@ -0,0 +1,181 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ListCloudInstancesActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ListCloudInstancesAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return Whether the filter field is set. + */ + boolean hasFilter(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * A filter expression that filters what operations are returned in the
    +   * response.
    +   * The expression must specify the field name, a comparison operator,
    +   * and the value that you want to use for filtering.
    +   * Refer spanner_instance_admin.proto.ListInstancesRequest for
    +   * detail.
    +   * 
    + * + * optional string filter = 2; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 3; + * + * @return Whether the pageSize field is set. + */ + boolean hasPageSize(); + + /** + * + * + *
    +   * Number of instances to be returned in the response. If 0 or
    +   * less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * optional int32 page_size = 3; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return Whether the pageToken field is set. + */ + boolean hasPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, "page_token" should contain a next_page_token
    +   * from a previous ListInstancesResponse to the same "parent"
    +   * and with the same "filter".
    +   * 
    + * + * optional string page_token = 4; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java new file mode 100644 index 000000000000..e1b8bc62386a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java @@ -0,0 +1,6902 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * A single mutation request.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction} + */ +@com.google.protobuf.Generated +public final class MutationAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.MutationAction) + MutationActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MutationAction"); + } + + // Use MutationAction.newBuilder() to construct. + private MutationAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MutationAction() { + mod_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.class, + com.google.spanner.executor.v1.MutationAction.Builder.class); + } + + public interface InsertArgsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.MutationAction.InsertArgs) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + java.util.List getColumnList(); + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + int getColumnCount(); + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + java.lang.String getColumn(int index); + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + com.google.protobuf.ByteString getColumnBytes(int index); + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + java.util.List getTypeList(); + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.Type getType(int index); + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + int getTypeCount(); + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + java.util.List getTypeOrBuilderList(); + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index); + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + java.util.List getValuesList(); + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + com.google.spanner.executor.v1.ValueList getValues(int index); + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + int getValuesCount(); + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + java.util.List + getValuesOrBuilderList(); + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index); + } + + /** + * + * + *
    +   * Arguments to Insert, InsertOrUpdate, and Replace operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.InsertArgs} + */ + public static final class InsertArgs extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.MutationAction.InsertArgs) + InsertArgsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "InsertArgs"); + } + + // Use InsertArgs.newBuilder() to construct. + private InsertArgs(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private InsertArgs() { + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + type_ = java.util.Collections.emptyList(); + values_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.InsertArgs.class, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder.class); + } + + public static final int COLUMN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + return column_; + } + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +     * The names of the columns to be written.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List type_; + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public java.util.List getTypeList() { + return type_; + } + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public java.util.List getTypeOrBuilderList() { + return type_; + } + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public int getTypeCount() { + return type_.size(); + } + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Type getType(int index) { + return type_.get(index); + } + + /** + * + * + *
    +     * Type information for the "values" entries below.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index) { + return type_.get(index); + } + + public static final int VALUES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List values_; + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public java.util.List getValuesList() { + return values_; + } + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public java.util.List + getValuesOrBuilderList() { + return values_; + } + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
    +     * The values to be written.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index) { + return values_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < column_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, column_.getRaw(i)); + } + for (int i = 0; i < type_.size(); i++) { + output.writeMessage(2, type_.get(i)); + } + for (int i = 0; i < values_.size(); i++) { + output.writeMessage(3, values_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < column_.size(); i++) { + dataSize += computeStringSizeNoTag(column_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnList().size(); + } + for (int i = 0; i < type_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, type_.get(i)); + } + for (int i = 0; i < values_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, values_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.MutationAction.InsertArgs)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.MutationAction.InsertArgs other = + (com.google.spanner.executor.v1.MutationAction.InsertArgs) obj; + + if (!getColumnList().equals(other.getColumnList())) return false; + if (!getTypeList().equals(other.getTypeList())) return false; + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getColumnCount() > 0) { + hash = (37 * hash) + COLUMN_FIELD_NUMBER; + hash = (53 * hash) + getColumnList().hashCode(); + } + if (getTypeCount() > 0) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getTypeList().hashCode(); + } + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.MutationAction.InsertArgs prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to Insert, InsertOrUpdate, and Replace operations.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.InsertArgs} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.MutationAction.InsertArgs) + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.InsertArgs.class, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.MutationAction.InsertArgs.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (typeBuilder_ == null) { + type_ = java.util.Collections.emptyList(); + } else { + type_ = null; + typeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + } else { + values_ = null; + valuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_InsertArgs_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs getDefaultInstanceForType() { + return com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs build() { + com.google.spanner.executor.v1.MutationAction.InsertArgs result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs buildPartial() { + com.google.spanner.executor.v1.MutationAction.InsertArgs result = + new com.google.spanner.executor.v1.MutationAction.InsertArgs(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.MutationAction.InsertArgs result) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + type_ = java.util.Collections.unmodifiableList(type_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.type_ = type_; + } else { + result.type_ = typeBuilder_.build(); + } + if (valuesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.values_ = values_; + } else { + result.values_ = valuesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.MutationAction.InsertArgs result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + column_.makeImmutable(); + result.column_ = column_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.MutationAction.InsertArgs) { + return mergeFrom((com.google.spanner.executor.v1.MutationAction.InsertArgs) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.MutationAction.InsertArgs other) { + if (other == com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance()) + return this; + if (!other.column_.isEmpty()) { + if (column_.isEmpty()) { + column_ = other.column_; + bitField0_ |= 0x00000001; + } else { + ensureColumnIsMutable(); + column_.addAll(other.column_); + } + onChanged(); + } + if (typeBuilder_ == null) { + if (!other.type_.isEmpty()) { + if (type_.isEmpty()) { + type_ = other.type_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTypeIsMutable(); + type_.addAll(other.type_); + } + onChanged(); + } + } else { + if (!other.type_.isEmpty()) { + if (typeBuilder_.isEmpty()) { + typeBuilder_.dispose(); + typeBuilder_ = null; + type_ = other.type_; + bitField0_ = (bitField0_ & ~0x00000002); + typeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTypeFieldBuilder() + : null; + } else { + typeBuilder_.addAllMessages(other.type_); + } + } + } + if (valuesBuilder_ == null) { + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + } else { + if (!other.values_.isEmpty()) { + if (valuesBuilder_.isEmpty()) { + valuesBuilder_.dispose(); + valuesBuilder_ = null; + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + valuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetValuesFieldBuilder() + : null; + } else { + valuesBuilder_.addAllMessages(other.values_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnIsMutable(); + column_.add(s); + break; + } // case 10 + case 18: + { + com.google.spanner.v1.Type m = + input.readMessage(com.google.spanner.v1.Type.parser(), extensionRegistry); + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(m); + } else { + typeBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.spanner.executor.v1.ValueList m = + input.readMessage( + com.google.spanner.executor.v1.ValueList.parser(), extensionRegistry); + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(m); + } else { + valuesBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnIsMutable() { + if (!column_.isModifiable()) { + column_ = new com.google.protobuf.LazyStringArrayList(column_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + column_.makeImmutable(); + return column_; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index to set the value at. + * @param value The column to set. + * @return This builder for chaining. + */ + public Builder setColumn(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param value The column to add. + * @return This builder for chaining. + */ + public Builder addColumn(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param values The column to add. + * @return This builder for chaining. + */ + public Builder addAllColumn(java.lang.Iterable values) { + ensureColumnIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, column_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @return This builder for chaining. + */ + public Builder clearColumn() { + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns to be written.
    +       * 
    + * + * repeated string column = 1; + * + * @param value The bytes of the column to add. + * @return This builder for chaining. + */ + public Builder addColumnBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List type_ = java.util.Collections.emptyList(); + + private void ensureTypeIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + type_ = new java.util.ArrayList(type_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeList() { + if (typeBuilder_ == null) { + return java.util.Collections.unmodifiableList(type_); + } else { + return typeBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public int getTypeCount() { + if (typeBuilder_ == null) { + return type_.size(); + } else { + return typeBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type getType(int index) { + if (typeBuilder_ == null) { + return type_.get(index); + } else { + return typeBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder setType(int index, com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.set(index, value); + onChanged(); + } else { + typeBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder setType(int index, com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.set(index, builderForValue.build()); + onChanged(); + } else { + typeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.add(value); + onChanged(); + } else { + typeBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(int index, com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.add(index, value); + onChanged(); + } else { + typeBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(builderForValue.build()); + onChanged(); + } else { + typeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(int index, com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(index, builderForValue.build()); + onChanged(); + } else { + typeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addAllType(java.lang.Iterable values) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, type_); + onChanged(); + } else { + typeBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + if (typeBuilder_ == null) { + type_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + typeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder removeType(int index) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.remove(index); + onChanged(); + } else { + typeBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder(int index) { + return internalGetTypeFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index) { + if (typeBuilder_ == null) { + return type_.get(index); + } else { + return typeBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeOrBuilderList() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(type_); + } + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder addTypeBuilder() { + return internalGetTypeFieldBuilder() + .addBuilder(com.google.spanner.v1.Type.getDefaultInstance()); + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder addTypeBuilder(int index) { + return internalGetTypeFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Type.getDefaultInstance()); + } + + /** + * + * + *
    +       * Type information for the "values" entries below.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeBuilderList() { + return internalGetTypeFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + type_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + private java.util.List values_ = + java.util.Collections.emptyList(); + + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + valuesBuilder_; + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List getValuesList() { + if (valuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(values_); + } else { + return valuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public int getValuesCount() { + if (valuesBuilder_ == null) { + return values_.size(); + } else { + return valuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList getValues(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder setValues(int index, com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + } else { + valuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder setValues( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.set(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + } else { + valuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(int index, com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(index, value); + onChanged(); + } else { + valuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addAllValues( + java.lang.Iterable values) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + onChanged(); + } else { + valuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder clearValues() { + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + valuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder removeValues(int index) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.remove(index); + onChanged(); + } else { + valuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder getValuesBuilder(int index) { + return internalGetValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List + getValuesOrBuilderList() { + if (valuesBuilder_ != null) { + return valuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(values_); + } + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder addValuesBuilder() { + return internalGetValuesFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder addValuesBuilder(int index) { + return internalGetValuesFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be written.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List + getValuesBuilderList() { + return internalGetValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetValuesFieldBuilder() { + if (valuesBuilder_ == null) { + valuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + values_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + values_ = null; + } + return valuesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.MutationAction.InsertArgs) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.MutationAction.InsertArgs) + private static final com.google.spanner.executor.v1.MutationAction.InsertArgs DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.MutationAction.InsertArgs(); + } + + public static com.google.spanner.executor.v1.MutationAction.InsertArgs getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InsertArgs parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface UpdateArgsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.MutationAction.UpdateArgs) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + java.util.List getColumnList(); + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + int getColumnCount(); + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + java.lang.String getColumn(int index); + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + com.google.protobuf.ByteString getColumnBytes(int index); + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + java.util.List getTypeList(); + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.Type getType(int index); + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + int getTypeCount(); + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + java.util.List getTypeOrBuilderList(); + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index); + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + java.util.List getValuesList(); + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + com.google.spanner.executor.v1.ValueList getValues(int index); + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + int getValuesCount(); + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + java.util.List + getValuesOrBuilderList(); + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index); + } + + /** + * + * + *
    +   * Arguments to Update.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.UpdateArgs} + */ + public static final class UpdateArgs extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.MutationAction.UpdateArgs) + UpdateArgsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateArgs"); + } + + // Use UpdateArgs.newBuilder() to construct. + private UpdateArgs(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateArgs() { + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + type_ = java.util.Collections.emptyList(); + values_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.UpdateArgs.class, + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder.class); + } + + public static final int COLUMN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + return column_; + } + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +     * The columns to be updated. Identical to InsertArgs.column.
    +     * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List type_; + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public java.util.List getTypeList() { + return type_; + } + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public java.util.List getTypeOrBuilderList() { + return type_; + } + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public int getTypeCount() { + return type_.size(); + } + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Type getType(int index) { + return type_.get(index); + } + + /** + * + * + *
    +     * Type information for "values". Identical to InsertArgs.type.
    +     * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index) { + return type_.get(index); + } + + public static final int VALUES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List values_; + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public java.util.List getValuesList() { + return values_; + } + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public java.util.List + getValuesOrBuilderList() { + return values_; + } + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
    +     * The values to be updated. Identical to InsertArgs.values.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index) { + return values_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < column_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, column_.getRaw(i)); + } + for (int i = 0; i < type_.size(); i++) { + output.writeMessage(2, type_.get(i)); + } + for (int i = 0; i < values_.size(); i++) { + output.writeMessage(3, values_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < column_.size(); i++) { + dataSize += computeStringSizeNoTag(column_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnList().size(); + } + for (int i = 0; i < type_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, type_.get(i)); + } + for (int i = 0; i < values_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, values_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.MutationAction.UpdateArgs)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.MutationAction.UpdateArgs other = + (com.google.spanner.executor.v1.MutationAction.UpdateArgs) obj; + + if (!getColumnList().equals(other.getColumnList())) return false; + if (!getTypeList().equals(other.getTypeList())) return false; + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getColumnCount() > 0) { + hash = (37 * hash) + COLUMN_FIELD_NUMBER; + hash = (53 * hash) + getColumnList().hashCode(); + } + if (getTypeCount() > 0) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getTypeList().hashCode(); + } + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.MutationAction.UpdateArgs prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to Update.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.UpdateArgs} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.MutationAction.UpdateArgs) + com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.UpdateArgs.class, + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.MutationAction.UpdateArgs.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (typeBuilder_ == null) { + type_ = java.util.Collections.emptyList(); + } else { + type_ = null; + typeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + } else { + values_ = null; + valuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_UpdateArgs_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgs getDefaultInstanceForType() { + return com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgs build() { + com.google.spanner.executor.v1.MutationAction.UpdateArgs result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgs buildPartial() { + com.google.spanner.executor.v1.MutationAction.UpdateArgs result = + new com.google.spanner.executor.v1.MutationAction.UpdateArgs(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.MutationAction.UpdateArgs result) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + type_ = java.util.Collections.unmodifiableList(type_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.type_ = type_; + } else { + result.type_ = typeBuilder_.build(); + } + if (valuesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.values_ = values_; + } else { + result.values_ = valuesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.MutationAction.UpdateArgs result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + column_.makeImmutable(); + result.column_ = column_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.MutationAction.UpdateArgs) { + return mergeFrom((com.google.spanner.executor.v1.MutationAction.UpdateArgs) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.MutationAction.UpdateArgs other) { + if (other == com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance()) + return this; + if (!other.column_.isEmpty()) { + if (column_.isEmpty()) { + column_ = other.column_; + bitField0_ |= 0x00000001; + } else { + ensureColumnIsMutable(); + column_.addAll(other.column_); + } + onChanged(); + } + if (typeBuilder_ == null) { + if (!other.type_.isEmpty()) { + if (type_.isEmpty()) { + type_ = other.type_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTypeIsMutable(); + type_.addAll(other.type_); + } + onChanged(); + } + } else { + if (!other.type_.isEmpty()) { + if (typeBuilder_.isEmpty()) { + typeBuilder_.dispose(); + typeBuilder_ = null; + type_ = other.type_; + bitField0_ = (bitField0_ & ~0x00000002); + typeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTypeFieldBuilder() + : null; + } else { + typeBuilder_.addAllMessages(other.type_); + } + } + } + if (valuesBuilder_ == null) { + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + } else { + if (!other.values_.isEmpty()) { + if (valuesBuilder_.isEmpty()) { + valuesBuilder_.dispose(); + valuesBuilder_ = null; + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + valuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetValuesFieldBuilder() + : null; + } else { + valuesBuilder_.addAllMessages(other.values_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnIsMutable(); + column_.add(s); + break; + } // case 10 + case 18: + { + com.google.spanner.v1.Type m = + input.readMessage(com.google.spanner.v1.Type.parser(), extensionRegistry); + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(m); + } else { + typeBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.spanner.executor.v1.ValueList m = + input.readMessage( + com.google.spanner.executor.v1.ValueList.parser(), extensionRegistry); + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(m); + } else { + valuesBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnIsMutable() { + if (!column_.isModifiable()) { + column_ = new com.google.protobuf.LazyStringArrayList(column_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + column_.makeImmutable(); + return column_; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param index The index to set the value at. + * @param value The column to set. + * @return This builder for chaining. + */ + public Builder setColumn(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param value The column to add. + * @return This builder for chaining. + */ + public Builder addColumn(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param values The column to add. + * @return This builder for chaining. + */ + public Builder addAllColumn(java.lang.Iterable values) { + ensureColumnIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, column_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @return This builder for chaining. + */ + public Builder clearColumn() { + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The columns to be updated. Identical to InsertArgs.column.
    +       * 
    + * + * repeated string column = 1; + * + * @param value The bytes of the column to add. + * @return This builder for chaining. + */ + public Builder addColumnBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List type_ = java.util.Collections.emptyList(); + + private void ensureTypeIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + type_ = new java.util.ArrayList(type_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeList() { + if (typeBuilder_ == null) { + return java.util.Collections.unmodifiableList(type_); + } else { + return typeBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public int getTypeCount() { + if (typeBuilder_ == null) { + return type_.size(); + } else { + return typeBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type getType(int index) { + if (typeBuilder_ == null) { + return type_.get(index); + } else { + return typeBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder setType(int index, com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.set(index, value); + onChanged(); + } else { + typeBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder setType(int index, com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.set(index, builderForValue.build()); + onChanged(); + } else { + typeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.add(value); + onChanged(); + } else { + typeBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(int index, com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTypeIsMutable(); + type_.add(index, value); + onChanged(); + } else { + typeBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(builderForValue.build()); + onChanged(); + } else { + typeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addType(int index, com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.add(index, builderForValue.build()); + onChanged(); + } else { + typeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder addAllType(java.lang.Iterable values) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, type_); + onChanged(); + } else { + typeBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + if (typeBuilder_ == null) { + type_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + typeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public Builder removeType(int index) { + if (typeBuilder_ == null) { + ensureTypeIsMutable(); + type_.remove(index); + onChanged(); + } else { + typeBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder(int index) { + return internalGetTypeFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(int index) { + if (typeBuilder_ == null) { + return type_.get(index); + } else { + return typeBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeOrBuilderList() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(type_); + } + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder addTypeBuilder() { + return internalGetTypeFieldBuilder() + .addBuilder(com.google.spanner.v1.Type.getDefaultInstance()); + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder addTypeBuilder(int index) { + return internalGetTypeFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Type.getDefaultInstance()); + } + + /** + * + * + *
    +       * Type information for "values". Identical to InsertArgs.type.
    +       * 
    + * + * repeated .google.spanner.v1.Type type = 2; + */ + public java.util.List getTypeBuilderList() { + return internalGetTypeFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + type_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + private java.util.List values_ = + java.util.Collections.emptyList(); + + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + valuesBuilder_; + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List getValuesList() { + if (valuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(values_); + } else { + return valuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public int getValuesCount() { + if (valuesBuilder_ == null) { + return values_.size(); + } else { + return valuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList getValues(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder setValues(int index, com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + } else { + valuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder setValues( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.set(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + } else { + valuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(int index, com.google.spanner.executor.v1.ValueList value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(index, value); + onChanged(); + } else { + valuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addValues( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder addAllValues( + java.lang.Iterable values) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + onChanged(); + } else { + valuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder clearValues() { + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + valuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public Builder removeValues(int index) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.remove(index); + onChanged(); + } else { + valuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder getValuesBuilder(int index) { + return internalGetValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getValuesOrBuilder(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List + getValuesOrBuilderList() { + if (valuesBuilder_ != null) { + return valuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(values_); + } + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder addValuesBuilder() { + return internalGetValuesFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public com.google.spanner.executor.v1.ValueList.Builder addValuesBuilder(int index) { + return internalGetValuesFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be updated. Identical to InsertArgs.values.
    +       * 
    + * + * repeated .google.spanner.executor.v1.ValueList values = 3; + */ + public java.util.List + getValuesBuilderList() { + return internalGetValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetValuesFieldBuilder() { + if (valuesBuilder_ == null) { + valuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + values_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + values_ = null; + } + return valuesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.MutationAction.UpdateArgs) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.MutationAction.UpdateArgs) + private static final com.google.spanner.executor.v1.MutationAction.UpdateArgs DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.MutationAction.UpdateArgs(); + } + + public static com.google.spanner.executor.v1.MutationAction.UpdateArgs getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateArgs parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgs getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ModOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.MutationAction.Mod) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The table to write.
    +     * 
    + * + * string table = 1; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +     * The table to write.
    +     * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return Whether the insert field is set. + */ + boolean hasInsert(); + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return The insert. + */ + com.google.spanner.executor.v1.MutationAction.InsertArgs getInsert(); + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder getInsertOrBuilder(); + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return Whether the update field is set. + */ + boolean hasUpdate(); + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return The update. + */ + com.google.spanner.executor.v1.MutationAction.UpdateArgs getUpdate(); + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder getUpdateOrBuilder(); + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return Whether the insertOrUpdate field is set. + */ + boolean hasInsertOrUpdate(); + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return The insertOrUpdate. + */ + com.google.spanner.executor.v1.MutationAction.InsertArgs getInsertOrUpdate(); + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder getInsertOrUpdateOrBuilder(); + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return Whether the replace field is set. + */ + boolean hasReplace(); + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return The replace. + */ + com.google.spanner.executor.v1.MutationAction.InsertArgs getReplace(); + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder getReplaceOrBuilder(); + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return Whether the deleteKeys field is set. + */ + boolean hasDeleteKeys(); + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return The deleteKeys. + */ + com.google.spanner.executor.v1.KeySet getDeleteKeys(); + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + com.google.spanner.executor.v1.KeySetOrBuilder getDeleteKeysOrBuilder(); + } + + /** + * + * + *
    +   * Mod represents the write action that will be perform to a table. Each mod
    +   * will specify exactly one action, from insert, update, insert_or_update,
    +   * replace and delete.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.Mod} + */ + public static final class Mod extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.MutationAction.Mod) + ModOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mod"); + } + + // Use Mod.newBuilder() to construct. + private Mod(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Mod() { + table_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.Mod.class, + com.google.spanner.executor.v1.MutationAction.Mod.Builder.class); + } + + private int bitField0_; + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +     * The table to write.
    +     * 
    + * + * string table = 1; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +     * The table to write.
    +     * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSERT_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.MutationAction.InsertArgs insert_; + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return Whether the insert field is set. + */ + @java.lang.Override + public boolean hasInsert() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return The insert. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs getInsert() { + return insert_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insert_; + } + + /** + * + * + *
    +     * Exactly one of the remaining elements may be present.
    +     * Insert new rows into "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder getInsertOrBuilder() { + return insert_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insert_; + } + + public static final int UPDATE_FIELD_NUMBER = 3; + private com.google.spanner.executor.v1.MutationAction.UpdateArgs update_; + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return Whether the update field is set. + */ + @java.lang.Override + public boolean hasUpdate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return The update. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgs getUpdate() { + return update_ == null + ? com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance() + : update_; + } + + /** + * + * + *
    +     * Update columns stored in existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder getUpdateOrBuilder() { + return update_ == null + ? com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance() + : update_; + } + + public static final int INSERT_OR_UPDATE_FIELD_NUMBER = 4; + private com.google.spanner.executor.v1.MutationAction.InsertArgs insertOrUpdate_; + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return Whether the insertOrUpdate field is set. + */ + @java.lang.Override + public boolean hasInsertOrUpdate() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return The insertOrUpdate. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs getInsertOrUpdate() { + return insertOrUpdate_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insertOrUpdate_; + } + + /** + * + * + *
    +     * Insert or update existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder + getInsertOrUpdateOrBuilder() { + return insertOrUpdate_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insertOrUpdate_; + } + + public static final int REPLACE_FIELD_NUMBER = 5; + private com.google.spanner.executor.v1.MutationAction.InsertArgs replace_; + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return Whether the replace field is set. + */ + @java.lang.Override + public boolean hasReplace() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return The replace. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgs getReplace() { + return replace_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : replace_; + } + + /** + * + * + *
    +     * Replace existing rows of "table".
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder getReplaceOrBuilder() { + return replace_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : replace_; + } + + public static final int DELETE_KEYS_FIELD_NUMBER = 6; + private com.google.spanner.executor.v1.KeySet deleteKeys_; + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return Whether the deleteKeys field is set. + */ + @java.lang.Override + public boolean hasDeleteKeys() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return The deleteKeys. + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeySet getDeleteKeys() { + return deleteKeys_ == null + ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() + : deleteKeys_; + } + + /** + * + * + *
    +     * Delete rows from "table".
    +     * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeySetOrBuilder getDeleteKeysOrBuilder() { + return deleteKeys_ == null + ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() + : deleteKeys_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getInsert()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getUpdate()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getInsertOrUpdate()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(5, getReplace()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(6, getDeleteKeys()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getInsert()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdate()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getInsertOrUpdate()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getReplace()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getDeleteKeys()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.MutationAction.Mod)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.MutationAction.Mod other = + (com.google.spanner.executor.v1.MutationAction.Mod) obj; + + if (!getTable().equals(other.getTable())) return false; + if (hasInsert() != other.hasInsert()) return false; + if (hasInsert()) { + if (!getInsert().equals(other.getInsert())) return false; + } + if (hasUpdate() != other.hasUpdate()) return false; + if (hasUpdate()) { + if (!getUpdate().equals(other.getUpdate())) return false; + } + if (hasInsertOrUpdate() != other.hasInsertOrUpdate()) return false; + if (hasInsertOrUpdate()) { + if (!getInsertOrUpdate().equals(other.getInsertOrUpdate())) return false; + } + if (hasReplace() != other.hasReplace()) return false; + if (hasReplace()) { + if (!getReplace().equals(other.getReplace())) return false; + } + if (hasDeleteKeys() != other.hasDeleteKeys()) return false; + if (hasDeleteKeys()) { + if (!getDeleteKeys().equals(other.getDeleteKeys())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasInsert()) { + hash = (37 * hash) + INSERT_FIELD_NUMBER; + hash = (53 * hash) + getInsert().hashCode(); + } + if (hasUpdate()) { + hash = (37 * hash) + UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getUpdate().hashCode(); + } + if (hasInsertOrUpdate()) { + hash = (37 * hash) + INSERT_OR_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getInsertOrUpdate().hashCode(); + } + if (hasReplace()) { + hash = (37 * hash) + REPLACE_FIELD_NUMBER; + hash = (53 * hash) + getReplace().hashCode(); + } + if (hasDeleteKeys()) { + hash = (37 * hash) + DELETE_KEYS_FIELD_NUMBER; + hash = (53 * hash) + getDeleteKeys().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.MutationAction.Mod prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Mod represents the write action that will be perform to a table. Each mod
    +     * will specify exactly one action, from insert, update, insert_or_update,
    +     * replace and delete.
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction.Mod} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.MutationAction.Mod) + com.google.spanner.executor.v1.MutationAction.ModOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.Mod.class, + com.google.spanner.executor.v1.MutationAction.Mod.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.MutationAction.Mod.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInsertFieldBuilder(); + internalGetUpdateFieldBuilder(); + internalGetInsertOrUpdateFieldBuilder(); + internalGetReplaceFieldBuilder(); + internalGetDeleteKeysFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + insert_ = null; + if (insertBuilder_ != null) { + insertBuilder_.dispose(); + insertBuilder_ = null; + } + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + insertOrUpdate_ = null; + if (insertOrUpdateBuilder_ != null) { + insertOrUpdateBuilder_.dispose(); + insertOrUpdateBuilder_ = null; + } + replace_ = null; + if (replaceBuilder_ != null) { + replaceBuilder_.dispose(); + replaceBuilder_ = null; + } + deleteKeys_ = null; + if (deleteKeysBuilder_ != null) { + deleteKeysBuilder_.dispose(); + deleteKeysBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_Mod_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.Mod getDefaultInstanceForType() { + return com.google.spanner.executor.v1.MutationAction.Mod.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.Mod build() { + com.google.spanner.executor.v1.MutationAction.Mod result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.Mod buildPartial() { + com.google.spanner.executor.v1.MutationAction.Mod result = + new com.google.spanner.executor.v1.MutationAction.Mod(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.MutationAction.Mod result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.insert_ = insertBuilder_ == null ? insert_ : insertBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.update_ = updateBuilder_ == null ? update_ : updateBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.insertOrUpdate_ = + insertOrUpdateBuilder_ == null ? insertOrUpdate_ : insertOrUpdateBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.replace_ = replaceBuilder_ == null ? replace_ : replaceBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.deleteKeys_ = + deleteKeysBuilder_ == null ? deleteKeys_ : deleteKeysBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.MutationAction.Mod) { + return mergeFrom((com.google.spanner.executor.v1.MutationAction.Mod) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.MutationAction.Mod other) { + if (other == com.google.spanner.executor.v1.MutationAction.Mod.getDefaultInstance()) + return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasInsert()) { + mergeInsert(other.getInsert()); + } + if (other.hasUpdate()) { + mergeUpdate(other.getUpdate()); + } + if (other.hasInsertOrUpdate()) { + mergeInsertOrUpdate(other.getInsertOrUpdate()); + } + if (other.hasReplace()) { + mergeReplace(other.getReplace()); + } + if (other.hasDeleteKeys()) { + mergeDeleteKeys(other.getDeleteKeys()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetInsertFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetInsertOrUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetReplaceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetDeleteKeysFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +       * The table to write.
    +       * 
    + * + * string table = 1; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The table to write.
    +       * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The table to write.
    +       * 
    + * + * string table = 1; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The table to write.
    +       * 
    + * + * string table = 1; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The table to write.
    +       * 
    + * + * string table = 1; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.MutationAction.InsertArgs insert_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + insertBuilder_; + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return Whether the insert field is set. + */ + public boolean hasInsert() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + * + * @return The insert. + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs getInsert() { + if (insertBuilder_ == null) { + return insert_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insert_; + } else { + return insertBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public Builder setInsert(com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (insertBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + insert_ = value; + } else { + insertBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public Builder setInsert( + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder builderForValue) { + if (insertBuilder_ == null) { + insert_ = builderForValue.build(); + } else { + insertBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public Builder mergeInsert(com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (insertBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && insert_ != null + && insert_ + != com.google.spanner.executor.v1.MutationAction.InsertArgs + .getDefaultInstance()) { + getInsertBuilder().mergeFrom(value); + } else { + insert_ = value; + } + } else { + insertBuilder_.mergeFrom(value); + } + if (insert_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public Builder clearInsert() { + bitField0_ = (bitField0_ & ~0x00000002); + insert_ = null; + if (insertBuilder_ != null) { + insertBuilder_.dispose(); + insertBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder getInsertBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetInsertFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder + getInsertOrBuilder() { + if (insertBuilder_ != null) { + return insertBuilder_.getMessageOrBuilder(); + } else { + return insert_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insert_; + } + } + + /** + * + * + *
    +       * Exactly one of the remaining elements may be present.
    +       * Insert new rows into "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + internalGetInsertFieldBuilder() { + if (insertBuilder_ == null) { + insertBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder>( + getInsert(), getParentForChildren(), isClean()); + insert_ = null; + } + return insertBuilder_; + } + + private com.google.spanner.executor.v1.MutationAction.UpdateArgs update_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.UpdateArgs, + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder, + com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder> + updateBuilder_; + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return Whether the update field is set. + */ + public boolean hasUpdate() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + * + * @return The update. + */ + public com.google.spanner.executor.v1.MutationAction.UpdateArgs getUpdate() { + if (updateBuilder_ == null) { + return update_ == null + ? com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance() + : update_; + } else { + return updateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public Builder setUpdate(com.google.spanner.executor.v1.MutationAction.UpdateArgs value) { + if (updateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + update_ = value; + } else { + updateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public Builder setUpdate( + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder builderForValue) { + if (updateBuilder_ == null) { + update_ = builderForValue.build(); + } else { + updateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public Builder mergeUpdate(com.google.spanner.executor.v1.MutationAction.UpdateArgs value) { + if (updateBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && update_ != null + && update_ + != com.google.spanner.executor.v1.MutationAction.UpdateArgs + .getDefaultInstance()) { + getUpdateBuilder().mergeFrom(value); + } else { + update_ = value; + } + } else { + updateBuilder_.mergeFrom(value); + } + if (update_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public Builder clearUpdate() { + bitField0_ = (bitField0_ & ~0x00000004); + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder getUpdateBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + public com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder + getUpdateOrBuilder() { + if (updateBuilder_ != null) { + return updateBuilder_.getMessageOrBuilder(); + } else { + return update_ == null + ? com.google.spanner.executor.v1.MutationAction.UpdateArgs.getDefaultInstance() + : update_; + } + } + + /** + * + * + *
    +       * Update columns stored in existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.UpdateArgs update = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.UpdateArgs, + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder, + com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder> + internalGetUpdateFieldBuilder() { + if (updateBuilder_ == null) { + updateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.UpdateArgs, + com.google.spanner.executor.v1.MutationAction.UpdateArgs.Builder, + com.google.spanner.executor.v1.MutationAction.UpdateArgsOrBuilder>( + getUpdate(), getParentForChildren(), isClean()); + update_ = null; + } + return updateBuilder_; + } + + private com.google.spanner.executor.v1.MutationAction.InsertArgs insertOrUpdate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + insertOrUpdateBuilder_; + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return Whether the insertOrUpdate field is set. + */ + public boolean hasInsertOrUpdate() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + * + * @return The insertOrUpdate. + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs getInsertOrUpdate() { + if (insertOrUpdateBuilder_ == null) { + return insertOrUpdate_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insertOrUpdate_; + } else { + return insertOrUpdateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public Builder setInsertOrUpdate( + com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (insertOrUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + insertOrUpdate_ = value; + } else { + insertOrUpdateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public Builder setInsertOrUpdate( + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder builderForValue) { + if (insertOrUpdateBuilder_ == null) { + insertOrUpdate_ = builderForValue.build(); + } else { + insertOrUpdateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public Builder mergeInsertOrUpdate( + com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (insertOrUpdateBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && insertOrUpdate_ != null + && insertOrUpdate_ + != com.google.spanner.executor.v1.MutationAction.InsertArgs + .getDefaultInstance()) { + getInsertOrUpdateBuilder().mergeFrom(value); + } else { + insertOrUpdate_ = value; + } + } else { + insertOrUpdateBuilder_.mergeFrom(value); + } + if (insertOrUpdate_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public Builder clearInsertOrUpdate() { + bitField0_ = (bitField0_ & ~0x00000008); + insertOrUpdate_ = null; + if (insertOrUpdateBuilder_ != null) { + insertOrUpdateBuilder_.dispose(); + insertOrUpdateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder + getInsertOrUpdateBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetInsertOrUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder + getInsertOrUpdateOrBuilder() { + if (insertOrUpdateBuilder_ != null) { + return insertOrUpdateBuilder_.getMessageOrBuilder(); + } else { + return insertOrUpdate_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : insertOrUpdate_; + } + } + + /** + * + * + *
    +       * Insert or update existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs insert_or_update = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + internalGetInsertOrUpdateFieldBuilder() { + if (insertOrUpdateBuilder_ == null) { + insertOrUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder>( + getInsertOrUpdate(), getParentForChildren(), isClean()); + insertOrUpdate_ = null; + } + return insertOrUpdateBuilder_; + } + + private com.google.spanner.executor.v1.MutationAction.InsertArgs replace_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + replaceBuilder_; + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return Whether the replace field is set. + */ + public boolean hasReplace() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + * + * @return The replace. + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs getReplace() { + if (replaceBuilder_ == null) { + return replace_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : replace_; + } else { + return replaceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public Builder setReplace(com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (replaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replace_ = value; + } else { + replaceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public Builder setReplace( + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder builderForValue) { + if (replaceBuilder_ == null) { + replace_ = builderForValue.build(); + } else { + replaceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public Builder mergeReplace(com.google.spanner.executor.v1.MutationAction.InsertArgs value) { + if (replaceBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && replace_ != null + && replace_ + != com.google.spanner.executor.v1.MutationAction.InsertArgs + .getDefaultInstance()) { + getReplaceBuilder().mergeFrom(value); + } else { + replace_ = value; + } + } else { + replaceBuilder_.mergeFrom(value); + } + if (replace_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public Builder clearReplace() { + bitField0_ = (bitField0_ & ~0x00000010); + replace_ = null; + if (replaceBuilder_ != null) { + replaceBuilder_.dispose(); + replaceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder getReplaceBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetReplaceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + public com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder + getReplaceOrBuilder() { + if (replaceBuilder_ != null) { + return replaceBuilder_.getMessageOrBuilder(); + } else { + return replace_ == null + ? com.google.spanner.executor.v1.MutationAction.InsertArgs.getDefaultInstance() + : replace_; + } + } + + /** + * + * + *
    +       * Replace existing rows of "table".
    +       * 
    + * + * .google.spanner.executor.v1.MutationAction.InsertArgs replace = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder> + internalGetReplaceFieldBuilder() { + if (replaceBuilder_ == null) { + replaceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction.InsertArgs, + com.google.spanner.executor.v1.MutationAction.InsertArgs.Builder, + com.google.spanner.executor.v1.MutationAction.InsertArgsOrBuilder>( + getReplace(), getParentForChildren(), isClean()); + replace_ = null; + } + return replaceBuilder_; + } + + private com.google.spanner.executor.v1.KeySet deleteKeys_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder> + deleteKeysBuilder_; + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return Whether the deleteKeys field is set. + */ + public boolean hasDeleteKeys() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + * + * @return The deleteKeys. + */ + public com.google.spanner.executor.v1.KeySet getDeleteKeys() { + if (deleteKeysBuilder_ == null) { + return deleteKeys_ == null + ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() + : deleteKeys_; + } else { + return deleteKeysBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public Builder setDeleteKeys(com.google.spanner.executor.v1.KeySet value) { + if (deleteKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deleteKeys_ = value; + } else { + deleteKeysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public Builder setDeleteKeys(com.google.spanner.executor.v1.KeySet.Builder builderForValue) { + if (deleteKeysBuilder_ == null) { + deleteKeys_ = builderForValue.build(); + } else { + deleteKeysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public Builder mergeDeleteKeys(com.google.spanner.executor.v1.KeySet value) { + if (deleteKeysBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && deleteKeys_ != null + && deleteKeys_ != com.google.spanner.executor.v1.KeySet.getDefaultInstance()) { + getDeleteKeysBuilder().mergeFrom(value); + } else { + deleteKeys_ = value; + } + } else { + deleteKeysBuilder_.mergeFrom(value); + } + if (deleteKeys_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public Builder clearDeleteKeys() { + bitField0_ = (bitField0_ & ~0x00000020); + deleteKeys_ = null; + if (deleteKeysBuilder_ != null) { + deleteKeysBuilder_.dispose(); + deleteKeysBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public com.google.spanner.executor.v1.KeySet.Builder getDeleteKeysBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetDeleteKeysFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + public com.google.spanner.executor.v1.KeySetOrBuilder getDeleteKeysOrBuilder() { + if (deleteKeysBuilder_ != null) { + return deleteKeysBuilder_.getMessageOrBuilder(); + } else { + return deleteKeys_ == null + ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() + : deleteKeys_; + } + } + + /** + * + * + *
    +       * Delete rows from "table".
    +       * 
    + * + * .google.spanner.executor.v1.KeySet delete_keys = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder> + internalGetDeleteKeysFieldBuilder() { + if (deleteKeysBuilder_ == null) { + deleteKeysBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder>( + getDeleteKeys(), getParentForChildren(), isClean()); + deleteKeys_ = null; + } + return deleteKeysBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.MutationAction.Mod) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.MutationAction.Mod) + private static final com.google.spanner.executor.v1.MutationAction.Mod DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.MutationAction.Mod(); + } + + public static com.google.spanner.executor.v1.MutationAction.Mod getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Mod parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.Mod getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int MOD_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List mod_; + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + @java.lang.Override + public java.util.List getModList() { + return mod_; + } + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + @java.lang.Override + public java.util.List + getModOrBuilderList() { + return mod_; + } + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + @java.lang.Override + public int getModCount() { + return mod_.size(); + } + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.Mod getMod(int index) { + return mod_.get(index); + } + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction.ModOrBuilder getModOrBuilder(int index) { + return mod_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < mod_.size(); i++) { + output.writeMessage(1, mod_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < mod_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, mod_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.MutationAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.MutationAction other = + (com.google.spanner.executor.v1.MutationAction) obj; + + if (!getModList().equals(other.getModList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getModCount() > 0) { + hash = (37 * hash) + MOD_FIELD_NUMBER; + hash = (53 * hash) + getModList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.MutationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.MutationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A single mutation request.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.MutationAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.MutationAction) + com.google.spanner.executor.v1.MutationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.MutationAction.class, + com.google.spanner.executor.v1.MutationAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.MutationAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (modBuilder_ == null) { + mod_ = java.util.Collections.emptyList(); + } else { + mod_ = null; + modBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_MutationAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction build() { + com.google.spanner.executor.v1.MutationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction buildPartial() { + com.google.spanner.executor.v1.MutationAction result = + new com.google.spanner.executor.v1.MutationAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.MutationAction result) { + if (modBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + mod_ = java.util.Collections.unmodifiableList(mod_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.mod_ = mod_; + } else { + result.mod_ = modBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.MutationAction result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.MutationAction) { + return mergeFrom((com.google.spanner.executor.v1.MutationAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.MutationAction other) { + if (other == com.google.spanner.executor.v1.MutationAction.getDefaultInstance()) return this; + if (modBuilder_ == null) { + if (!other.mod_.isEmpty()) { + if (mod_.isEmpty()) { + mod_ = other.mod_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureModIsMutable(); + mod_.addAll(other.mod_); + } + onChanged(); + } + } else { + if (!other.mod_.isEmpty()) { + if (modBuilder_.isEmpty()) { + modBuilder_.dispose(); + modBuilder_ = null; + mod_ = other.mod_; + bitField0_ = (bitField0_ & ~0x00000001); + modBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetModFieldBuilder() + : null; + } else { + modBuilder_.addAllMessages(other.mod_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.executor.v1.MutationAction.Mod m = + input.readMessage( + com.google.spanner.executor.v1.MutationAction.Mod.parser(), + extensionRegistry); + if (modBuilder_ == null) { + ensureModIsMutable(); + mod_.add(m); + } else { + modBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List mod_ = + java.util.Collections.emptyList(); + + private void ensureModIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + mod_ = new java.util.ArrayList(mod_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.MutationAction.Mod, + com.google.spanner.executor.v1.MutationAction.Mod.Builder, + com.google.spanner.executor.v1.MutationAction.ModOrBuilder> + modBuilder_; + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public java.util.List getModList() { + if (modBuilder_ == null) { + return java.util.Collections.unmodifiableList(mod_); + } else { + return modBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public int getModCount() { + if (modBuilder_ == null) { + return mod_.size(); + } else { + return modBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public com.google.spanner.executor.v1.MutationAction.Mod getMod(int index) { + if (modBuilder_ == null) { + return mod_.get(index); + } else { + return modBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder setMod(int index, com.google.spanner.executor.v1.MutationAction.Mod value) { + if (modBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModIsMutable(); + mod_.set(index, value); + onChanged(); + } else { + modBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder setMod( + int index, com.google.spanner.executor.v1.MutationAction.Mod.Builder builderForValue) { + if (modBuilder_ == null) { + ensureModIsMutable(); + mod_.set(index, builderForValue.build()); + onChanged(); + } else { + modBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder addMod(com.google.spanner.executor.v1.MutationAction.Mod value) { + if (modBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModIsMutable(); + mod_.add(value); + onChanged(); + } else { + modBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder addMod(int index, com.google.spanner.executor.v1.MutationAction.Mod value) { + if (modBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModIsMutable(); + mod_.add(index, value); + onChanged(); + } else { + modBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder addMod( + com.google.spanner.executor.v1.MutationAction.Mod.Builder builderForValue) { + if (modBuilder_ == null) { + ensureModIsMutable(); + mod_.add(builderForValue.build()); + onChanged(); + } else { + modBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder addMod( + int index, com.google.spanner.executor.v1.MutationAction.Mod.Builder builderForValue) { + if (modBuilder_ == null) { + ensureModIsMutable(); + mod_.add(index, builderForValue.build()); + onChanged(); + } else { + modBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder addAllMod( + java.lang.Iterable values) { + if (modBuilder_ == null) { + ensureModIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mod_); + onChanged(); + } else { + modBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder clearMod() { + if (modBuilder_ == null) { + mod_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + modBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public Builder removeMod(int index) { + if (modBuilder_ == null) { + ensureModIsMutable(); + mod_.remove(index); + onChanged(); + } else { + modBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public com.google.spanner.executor.v1.MutationAction.Mod.Builder getModBuilder(int index) { + return internalGetModFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public com.google.spanner.executor.v1.MutationAction.ModOrBuilder getModOrBuilder(int index) { + if (modBuilder_ == null) { + return mod_.get(index); + } else { + return modBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public java.util.List + getModOrBuilderList() { + if (modBuilder_ != null) { + return modBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mod_); + } + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public com.google.spanner.executor.v1.MutationAction.Mod.Builder addModBuilder() { + return internalGetModFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.MutationAction.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public com.google.spanner.executor.v1.MutationAction.Mod.Builder addModBuilder(int index) { + return internalGetModFieldBuilder() + .addBuilder( + index, com.google.spanner.executor.v1.MutationAction.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +     * Mods that contained in this mutation.
    +     * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + public java.util.List + getModBuilderList() { + return internalGetModFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.MutationAction.Mod, + com.google.spanner.executor.v1.MutationAction.Mod.Builder, + com.google.spanner.executor.v1.MutationAction.ModOrBuilder> + internalGetModFieldBuilder() { + if (modBuilder_ == null) { + modBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.MutationAction.Mod, + com.google.spanner.executor.v1.MutationAction.Mod.Builder, + com.google.spanner.executor.v1.MutationAction.ModOrBuilder>( + mod_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + mod_ = null; + } + return modBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.MutationAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.MutationAction) + private static final com.google.spanner.executor.v1.MutationAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.MutationAction(); + } + + public static com.google.spanner.executor.v1.MutationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MutationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java new file mode 100644 index 000000000000..06622159be68 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface MutationActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.MutationAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + java.util.List getModList(); + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + com.google.spanner.executor.v1.MutationAction.Mod getMod(int index); + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + int getModCount(); + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + java.util.List + getModOrBuilderList(); + + /** + * + * + *
    +   * Mods that contained in this mutation.
    +   * 
    + * + * repeated .google.spanner.executor.v1.MutationAction.Mod mod = 1; + */ + com.google.spanner.executor.v1.MutationAction.ModOrBuilder getModOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java new file mode 100644 index 000000000000..7e35e7eb9686 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java @@ -0,0 +1,1410 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * OperationResponse contains results returned by operation related actions.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.OperationResponse} + */ +@com.google.protobuf.Generated +public final class OperationResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.OperationResponse) + OperationResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "OperationResponse"); + } + + // Use OperationResponse.newBuilder() to construct. + private OperationResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private OperationResponse() { + listedOperations_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_OperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_OperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.OperationResponse.class, + com.google.spanner.executor.v1.OperationResponse.Builder.class); + } + + private int bitField0_; + public static final int LISTED_OPERATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List listedOperations_; + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + @java.lang.Override + public java.util.List getListedOperationsList() { + return listedOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + @java.lang.Override + public java.util.List + getListedOperationsOrBuilderList() { + return listedOperations_; + } + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + @java.lang.Override + public int getListedOperationsCount() { + return listedOperations_.size(); + } + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + @java.lang.Override + public com.google.longrunning.Operation getListedOperations(int index) { + return listedOperations_.get(index); + } + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getListedOperationsOrBuilder(int index) { + return listedOperations_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPERATION_FIELD_NUMBER = 3; + private com.google.longrunning.Operation operation_; + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return Whether the operation field is set. + */ + @java.lang.Override + public boolean hasOperation() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return The operation. + */ + @java.lang.Override + public com.google.longrunning.Operation getOperation() { + return operation_ == null ? com.google.longrunning.Operation.getDefaultInstance() : operation_; + } + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + */ + @java.lang.Override + public com.google.longrunning.OperationOrBuilder getOperationOrBuilder() { + return operation_ == null ? com.google.longrunning.Operation.getDefaultInstance() : operation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < listedOperations_.size(); i++) { + output.writeMessage(1, listedOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getOperation()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < listedOperations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, listedOperations_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getOperation()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.OperationResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.OperationResponse other = + (com.google.spanner.executor.v1.OperationResponse) obj; + + if (!getListedOperationsList().equals(other.getListedOperationsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (hasOperation() != other.hasOperation()) return false; + if (hasOperation()) { + if (!getOperation().equals(other.getOperation())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getListedOperationsCount() > 0) { + hash = (37 * hash) + LISTED_OPERATIONS_FIELD_NUMBER; + hash = (53 * hash) + getListedOperationsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + if (hasOperation()) { + hash = (37 * hash) + OPERATION_FIELD_NUMBER; + hash = (53 * hash) + getOperation().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.OperationResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.OperationResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.OperationResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.OperationResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * OperationResponse contains results returned by operation related actions.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.OperationResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.OperationResponse) + com.google.spanner.executor.v1.OperationResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_OperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_OperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.OperationResponse.class, + com.google.spanner.executor.v1.OperationResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.OperationResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetListedOperationsFieldBuilder(); + internalGetOperationFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (listedOperationsBuilder_ == null) { + listedOperations_ = java.util.Collections.emptyList(); + } else { + listedOperations_ = null; + listedOperationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + operation_ = null; + if (operationBuilder_ != null) { + operationBuilder_.dispose(); + operationBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_OperationResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.OperationResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponse build() { + com.google.spanner.executor.v1.OperationResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponse buildPartial() { + com.google.spanner.executor.v1.OperationResponse result = + new com.google.spanner.executor.v1.OperationResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.OperationResponse result) { + if (listedOperationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + listedOperations_ = java.util.Collections.unmodifiableList(listedOperations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.listedOperations_ = listedOperations_; + } else { + result.listedOperations_ = listedOperationsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.OperationResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.operation_ = operationBuilder_ == null ? operation_ : operationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.OperationResponse) { + return mergeFrom((com.google.spanner.executor.v1.OperationResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.OperationResponse other) { + if (other == com.google.spanner.executor.v1.OperationResponse.getDefaultInstance()) + return this; + if (listedOperationsBuilder_ == null) { + if (!other.listedOperations_.isEmpty()) { + if (listedOperations_.isEmpty()) { + listedOperations_ = other.listedOperations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureListedOperationsIsMutable(); + listedOperations_.addAll(other.listedOperations_); + } + onChanged(); + } + } else { + if (!other.listedOperations_.isEmpty()) { + if (listedOperationsBuilder_.isEmpty()) { + listedOperationsBuilder_.dispose(); + listedOperationsBuilder_ = null; + listedOperations_ = other.listedOperations_; + bitField0_ = (bitField0_ & ~0x00000001); + listedOperationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetListedOperationsFieldBuilder() + : null; + } else { + listedOperationsBuilder_.addAllMessages(other.listedOperations_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasOperation()) { + mergeOperation(other.getOperation()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.longrunning.Operation m = + input.readMessage(com.google.longrunning.Operation.parser(), extensionRegistry); + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + listedOperations_.add(m); + } else { + listedOperationsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetOperationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List listedOperations_ = + java.util.Collections.emptyList(); + + private void ensureListedOperationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + listedOperations_ = + new java.util.ArrayList(listedOperations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + listedOperationsBuilder_; + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public java.util.List getListedOperationsList() { + if (listedOperationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(listedOperations_); + } else { + return listedOperationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public int getListedOperationsCount() { + if (listedOperationsBuilder_ == null) { + return listedOperations_.size(); + } else { + return listedOperationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public com.google.longrunning.Operation getListedOperations(int index) { + if (listedOperationsBuilder_ == null) { + return listedOperations_.get(index); + } else { + return listedOperationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder setListedOperations(int index, com.google.longrunning.Operation value) { + if (listedOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedOperationsIsMutable(); + listedOperations_.set(index, value); + onChanged(); + } else { + listedOperationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder setListedOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + listedOperations_.set(index, builderForValue.build()); + onChanged(); + } else { + listedOperationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder addListedOperations(com.google.longrunning.Operation value) { + if (listedOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedOperationsIsMutable(); + listedOperations_.add(value); + onChanged(); + } else { + listedOperationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder addListedOperations(int index, com.google.longrunning.Operation value) { + if (listedOperationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureListedOperationsIsMutable(); + listedOperations_.add(index, value); + onChanged(); + } else { + listedOperationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder addListedOperations(com.google.longrunning.Operation.Builder builderForValue) { + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + listedOperations_.add(builderForValue.build()); + onChanged(); + } else { + listedOperationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder addListedOperations( + int index, com.google.longrunning.Operation.Builder builderForValue) { + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + listedOperations_.add(index, builderForValue.build()); + onChanged(); + } else { + listedOperationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder addAllListedOperations( + java.lang.Iterable values) { + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, listedOperations_); + onChanged(); + } else { + listedOperationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder clearListedOperations() { + if (listedOperationsBuilder_ == null) { + listedOperations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + listedOperationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public Builder removeListedOperations(int index) { + if (listedOperationsBuilder_ == null) { + ensureListedOperationsIsMutable(); + listedOperations_.remove(index); + onChanged(); + } else { + listedOperationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public com.google.longrunning.Operation.Builder getListedOperationsBuilder(int index) { + return internalGetListedOperationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public com.google.longrunning.OperationOrBuilder getListedOperationsOrBuilder(int index) { + if (listedOperationsBuilder_ == null) { + return listedOperations_.get(index); + } else { + return listedOperationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public java.util.List + getListedOperationsOrBuilderList() { + if (listedOperationsBuilder_ != null) { + return listedOperationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(listedOperations_); + } + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public com.google.longrunning.Operation.Builder addListedOperationsBuilder() { + return internalGetListedOperationsFieldBuilder() + .addBuilder(com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public com.google.longrunning.Operation.Builder addListedOperationsBuilder(int index) { + return internalGetListedOperationsFieldBuilder() + .addBuilder(index, com.google.longrunning.Operation.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of operations returned by ListOperationsAction.
    +     * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + public java.util.List + getListedOperationsBuilderList() { + return internalGetListedOperationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetListedOperationsFieldBuilder() { + if (listedOperationsBuilder_ == null) { + listedOperationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + listedOperations_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + listedOperations_ = null; + } + return listedOperationsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * "next_page_token" can be sent in a subsequent list action
    +     * to fetch more of the matching data.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.longrunning.Operation operation_; + private com.google.protobuf.SingleFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + operationBuilder_; + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return Whether the operation field is set. + */ + public boolean hasOperation() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return The operation. + */ + public com.google.longrunning.Operation getOperation() { + if (operationBuilder_ == null) { + return operation_ == null + ? com.google.longrunning.Operation.getDefaultInstance() + : operation_; + } else { + return operationBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public Builder setOperation(com.google.longrunning.Operation value) { + if (operationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + } else { + operationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public Builder setOperation(com.google.longrunning.Operation.Builder builderForValue) { + if (operationBuilder_ == null) { + operation_ = builderForValue.build(); + } else { + operationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public Builder mergeOperation(com.google.longrunning.Operation value) { + if (operationBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && operation_ != null + && operation_ != com.google.longrunning.Operation.getDefaultInstance()) { + getOperationBuilder().mergeFrom(value); + } else { + operation_ = value; + } + } else { + operationBuilder_.mergeFrom(value); + } + if (operation_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public Builder clearOperation() { + bitField0_ = (bitField0_ & ~0x00000004); + operation_ = null; + if (operationBuilder_ != null) { + operationBuilder_.dispose(); + operationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public com.google.longrunning.Operation.Builder getOperationBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetOperationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + public com.google.longrunning.OperationOrBuilder getOperationOrBuilder() { + if (operationBuilder_ != null) { + return operationBuilder_.getMessageOrBuilder(); + } else { + return operation_ == null + ? com.google.longrunning.Operation.getDefaultInstance() + : operation_; + } + } + + /** + * + * + *
    +     * Operation returned by GetOperationAction.
    +     * 
    + * + * .google.longrunning.Operation operation = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder> + internalGetOperationFieldBuilder() { + if (operationBuilder_ == null) { + operationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.longrunning.Operation, + com.google.longrunning.Operation.Builder, + com.google.longrunning.OperationOrBuilder>( + getOperation(), getParentForChildren(), isClean()); + operation_ = null; + } + return operationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.OperationResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.OperationResponse) + private static final com.google.spanner.executor.v1.OperationResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.OperationResponse(); + } + + public static com.google.spanner.executor.v1.OperationResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public OperationResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.OperationResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java new file mode 100644 index 000000000000..e14a77029a47 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface OperationResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.OperationResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + java.util.List getListedOperationsList(); + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + com.google.longrunning.Operation getListedOperations(int index); + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + int getListedOperationsCount(); + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + java.util.List + getListedOperationsOrBuilderList(); + + /** + * + * + *
    +   * List of operations returned by ListOperationsAction.
    +   * 
    + * + * repeated .google.longrunning.Operation listed_operations = 1; + */ + com.google.longrunning.OperationOrBuilder getListedOperationsOrBuilder(int index); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * "next_page_token" can be sent in a subsequent list action
    +   * to fetch more of the matching data.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return Whether the operation field is set. + */ + boolean hasOperation(); + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + * + * @return The operation. + */ + com.google.longrunning.Operation getOperation(); + + /** + * + * + *
    +   * Operation returned by GetOperationAction.
    +   * 
    + * + * .google.longrunning.Operation operation = 3; + */ + com.google.longrunning.OperationOrBuilder getOperationOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java new file mode 100644 index 000000000000..409b254f30d0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java @@ -0,0 +1,1972 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * PartitionedUpdateAction defines an action to execute a partitioned DML
    + * which runs different partitions in parallel.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.PartitionedUpdateAction} + */ +@com.google.protobuf.Generated +public final class PartitionedUpdateAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.PartitionedUpdateAction) + PartitionedUpdateActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionedUpdateAction"); + } + + // Use PartitionedUpdateAction.newBuilder() to construct. + private PartitionedUpdateAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionedUpdateAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.PartitionedUpdateAction.class, + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder.class); + } + + public interface ExecutePartitionedUpdateOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return Whether the rpcPriority field is set. + */ + boolean hasRpcPriority(); + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The enum numeric value on the wire for rpcPriority. + */ + int getRpcPriorityValue(); + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The rpcPriority. + */ + com.google.spanner.v1.RequestOptions.Priority getRpcPriority(); + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return Whether the tag field is set. + */ + boolean hasTag(); + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return The tag. + */ + java.lang.String getTag(); + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return The bytes for tag. + */ + com.google.protobuf.ByteString getTagBytes(); + } + + /** + * Protobuf type {@code + * google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions} + */ + public static final class ExecutePartitionedUpdateOptions + extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + ExecutePartitionedUpdateOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecutePartitionedUpdateOptions"); + } + + // Use ExecutePartitionedUpdateOptions.newBuilder() to construct. + private ExecutePartitionedUpdateOptions( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecutePartitionedUpdateOptions() { + rpcPriority_ = 0; + tag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .class, + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .Builder.class); + } + + private int bitField0_; + public static final int RPC_PRIORITY_FIELD_NUMBER = 1; + private int rpcPriority_ = 0; + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return Whether the rpcPriority field is set. + */ + @java.lang.Override + public boolean hasRpcPriority() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The enum numeric value on the wire for rpcPriority. + */ + @java.lang.Override + public int getRpcPriorityValue() { + return rpcPriority_; + } + + /** + * + * + *
    +     * RPC Priority
    +     * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The rpcPriority. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.Priority getRpcPriority() { + com.google.spanner.v1.RequestOptions.Priority result = + com.google.spanner.v1.RequestOptions.Priority.forNumber(rpcPriority_); + return result == null ? com.google.spanner.v1.RequestOptions.Priority.UNRECOGNIZED : result; + } + + public static final int TAG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object tag_ = ""; + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return Whether the tag field is set. + */ + @java.lang.Override + public boolean hasTag() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return The tag. + */ + @java.lang.Override + public java.lang.String getTag() { + java.lang.Object ref = tag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tag_ = s; + return s; + } + } + + /** + * + * + *
    +     * Transaction tag
    +     * 
    + * + * optional string tag = 2; + * + * @return The bytes for tag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTagBytes() { + java.lang.Object ref = tag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeEnum(1, rpcPriority_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, tag_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, rpcPriority_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, tag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions other = + (com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + obj; + + if (hasRpcPriority() != other.hasRpcPriority()) return false; + if (hasRpcPriority()) { + if (rpcPriority_ != other.rpcPriority_) return false; + } + if (hasTag() != other.hasTag()) return false; + if (hasTag()) { + if (!getTag().equals(other.getTag())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRpcPriority()) { + hash = (37 * hash) + RPC_PRIORITY_FIELD_NUMBER; + hash = (53 * hash) + rpcPriority_; + } + if (hasTag()) { + hash = (37 * hash) + TAG_FIELD_NUMBER; + hash = (53 * hash) + getTag().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * Protobuf type {@code + * google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.class, + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.Builder.class); + } + + // Construct using + // com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + rpcPriority_ = 0; + tag_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_ExecutePartitionedUpdateOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + build() { + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + buildPartial() { + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + result = + new com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.rpcPriority_ = rpcPriority_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.tag_ = tag_; + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions) { + return mergeFrom( + (com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + other) { + if (other + == com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.getDefaultInstance()) return this; + if (other.hasRpcPriority()) { + setRpcPriorityValue(other.getRpcPriorityValue()); + } + if (other.hasTag()) { + tag_ = other.tag_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + rpcPriority_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + tag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int rpcPriority_ = 0; + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return Whether the rpcPriority field is set. + */ + @java.lang.Override + public boolean hasRpcPriority() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The enum numeric value on the wire for rpcPriority. + */ + @java.lang.Override + public int getRpcPriorityValue() { + return rpcPriority_; + } + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @param value The enum numeric value on the wire for rpcPriority to set. + * @return This builder for chaining. + */ + public Builder setRpcPriorityValue(int value) { + rpcPriority_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return The rpcPriority. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.Priority getRpcPriority() { + com.google.spanner.v1.RequestOptions.Priority result = + com.google.spanner.v1.RequestOptions.Priority.forNumber(rpcPriority_); + return result == null ? com.google.spanner.v1.RequestOptions.Priority.UNRECOGNIZED : result; + } + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @param value The rpcPriority to set. + * @return This builder for chaining. + */ + public Builder setRpcPriority(com.google.spanner.v1.RequestOptions.Priority value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rpcPriority_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * RPC Priority
    +       * 
    + * + * optional .google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + * + * @return This builder for chaining. + */ + public Builder clearRpcPriority() { + bitField0_ = (bitField0_ & ~0x00000001); + rpcPriority_ = 0; + onChanged(); + return this; + } + + private java.lang.Object tag_ = ""; + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @return Whether the tag field is set. + */ + public boolean hasTag() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @return The tag. + */ + public java.lang.String getTag() { + java.lang.Object ref = tag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + tag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @return The bytes for tag. + */ + public com.google.protobuf.ByteString getTagBytes() { + java.lang.Object ref = tag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + tag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @param value The tag to set. + * @return This builder for chaining. + */ + public Builder setTag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + tag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @return This builder for chaining. + */ + public Builder clearTag() { + tag_ = getDefaultInstance().getTag(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Transaction tag
    +       * 
    + * + * optional string tag = 2; + * + * @param value The bytes for tag to set. + * @return This builder for chaining. + */ + public Builder setTagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + tag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions) + private static final com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions(); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecutePartitionedUpdateOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int OPTIONS_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + options_; + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return Whether the options field is set. + */ + @java.lang.Override + public boolean hasOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return The options. + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + getOptions() { + return options_ == null + ? com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .getDefaultInstance() + : options_; + } + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder + getOptionsOrBuilder() { + return options_ == null + ? com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .getDefaultInstance() + : options_; + } + + public static final int UPDATE_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.QueryAction update_; + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return Whether the update field is set. + */ + @java.lang.Override + public boolean hasUpdate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return The update. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getUpdate() { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder() { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getOptions()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdate()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getOptions()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdate()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.PartitionedUpdateAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.PartitionedUpdateAction other = + (com.google.spanner.executor.v1.PartitionedUpdateAction) obj; + + if (hasOptions() != other.hasOptions()) return false; + if (hasOptions()) { + if (!getOptions().equals(other.getOptions())) return false; + } + if (hasUpdate() != other.hasUpdate()) return false; + if (hasUpdate()) { + if (!getUpdate().equals(other.getUpdate())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOptions()) { + hash = (37 * hash) + OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getOptions().hashCode(); + } + if (hasUpdate()) { + hash = (37 * hash) + UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getUpdate().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.PartitionedUpdateAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * PartitionedUpdateAction defines an action to execute a partitioned DML
    +   * which runs different partitions in parallel.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.PartitionedUpdateAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.PartitionedUpdateAction) + com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.PartitionedUpdateAction.class, + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.PartitionedUpdateAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetOptionsFieldBuilder(); + internalGetUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + options_ = null; + if (optionsBuilder_ != null) { + optionsBuilder_.dispose(); + optionsBuilder_ = null; + } + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_PartitionedUpdateAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction build() { + com.google.spanner.executor.v1.PartitionedUpdateAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction buildPartial() { + com.google.spanner.executor.v1.PartitionedUpdateAction result = + new com.google.spanner.executor.v1.PartitionedUpdateAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.PartitionedUpdateAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.options_ = optionsBuilder_ == null ? options_ : optionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.update_ = updateBuilder_ == null ? update_ : updateBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.PartitionedUpdateAction) { + return mergeFrom((com.google.spanner.executor.v1.PartitionedUpdateAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.PartitionedUpdateAction other) { + if (other == com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance()) + return this; + if (other.hasOptions()) { + mergeOptions(other.getOptions()); + } + if (other.hasUpdate()) { + mergeUpdate(other.getUpdate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + options_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions, + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .Builder, + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder> + optionsBuilder_; + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return Whether the options field is set. + */ + public boolean hasOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return The options. + */ + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + getOptions() { + if (optionsBuilder_ == null) { + return options_ == null + ? com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .getDefaultInstance() + : options_; + } else { + return optionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public Builder setOptions( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + value) { + if (optionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + options_ = value; + } else { + optionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public Builder setOptions( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .Builder + builderForValue) { + if (optionsBuilder_ == null) { + options_ = builderForValue.build(); + } else { + optionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public Builder mergeOptions( + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + value) { + if (optionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && options_ != null + && options_ + != com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.getDefaultInstance()) { + getOptionsBuilder().mergeFrom(value); + } else { + options_ = value; + } + } else { + optionsBuilder_.mergeFrom(value); + } + if (options_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public Builder clearOptions() { + bitField0_ = (bitField0_ & ~0x00000001); + options_ = null; + if (optionsBuilder_ != null) { + optionsBuilder_.dispose(); + optionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .Builder + getOptionsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + public com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder + getOptionsOrBuilder() { + if (optionsBuilder_ != null) { + return optionsBuilder_.getMessageOrBuilder(); + } else { + return options_ == null + ? com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .getDefaultInstance() + : options_; + } + } + + /** + * + * + *
    +     * Options for partitioned update.
    +     * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions, + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + .Builder, + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder> + internalGetOptionsFieldBuilder() { + if (optionsBuilder_ == null) { + optionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions, + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptions.Builder, + com.google.spanner.executor.v1.PartitionedUpdateAction + .ExecutePartitionedUpdateOptionsOrBuilder>( + getOptions(), getParentForChildren(), isClean()); + options_ = null; + } + return optionsBuilder_; + } + + private com.google.spanner.executor.v1.QueryAction update_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + updateBuilder_; + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return Whether the update field is set. + */ + public boolean hasUpdate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return The update. + */ + public com.google.spanner.executor.v1.QueryAction getUpdate() { + if (updateBuilder_ == null) { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } else { + return updateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public Builder setUpdate(com.google.spanner.executor.v1.QueryAction value) { + if (updateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + update_ = value; + } else { + updateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public Builder setUpdate(com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (updateBuilder_ == null) { + update_ = builderForValue.build(); + } else { + updateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public Builder mergeUpdate(com.google.spanner.executor.v1.QueryAction value) { + if (updateBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && update_ != null + && update_ != com.google.spanner.executor.v1.QueryAction.getDefaultInstance()) { + getUpdateBuilder().mergeFrom(value); + } else { + update_ = value; + } + } else { + updateBuilder_.mergeFrom(value); + } + if (update_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public Builder clearUpdate() { + bitField0_ = (bitField0_ & ~0x00000002); + update_ = null; + if (updateBuilder_ != null) { + updateBuilder_.dispose(); + updateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public com.google.spanner.executor.v1.QueryAction.Builder getUpdateBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + public com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder() { + if (updateBuilder_ != null) { + return updateBuilder_.getMessageOrBuilder(); + } else { + return update_ == null + ? com.google.spanner.executor.v1.QueryAction.getDefaultInstance() + : update_; + } + } + + /** + * + * + *
    +     * Partitioned dml query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + internalGetUpdateFieldBuilder() { + if (updateBuilder_ == null) { + updateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder>( + getUpdate(), getParentForChildren(), isClean()); + update_ = null; + } + return updateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.PartitionedUpdateAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.PartitionedUpdateAction) + private static final com.google.spanner.executor.v1.PartitionedUpdateAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.PartitionedUpdateAction(); + } + + public static com.google.spanner.executor.v1.PartitionedUpdateAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionedUpdateAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java new file mode 100644 index 000000000000..b369d382833f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface PartitionedUpdateActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.PartitionedUpdateAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return Whether the options field is set. + */ + boolean hasOptions(); + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + * + * @return The options. + */ + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions + getOptions(); + + /** + * + * + *
    +   * Options for partitioned update.
    +   * 
    + * + * + * optional .google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptions options = 1; + * + */ + com.google.spanner.executor.v1.PartitionedUpdateAction.ExecutePartitionedUpdateOptionsOrBuilder + getOptionsOrBuilder(); + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return Whether the update field is set. + */ + boolean hasUpdate(); + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + * + * @return The update. + */ + com.google.spanner.executor.v1.QueryAction getUpdate(); + + /** + * + * + *
    +   * Partitioned dml query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction update = 2; + */ + com.google.spanner.executor.v1.QueryActionOrBuilder getUpdateOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java new file mode 100644 index 000000000000..80f44547aae2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java @@ -0,0 +1,2360 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * A SQL query request.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryAction} + */ +@com.google.protobuf.Generated +public final class QueryAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.QueryAction) + QueryActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryAction"); + } + + // Use QueryAction.newBuilder() to construct. + private QueryAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryAction() { + sql_ = ""; + params_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryAction.class, + com.google.spanner.executor.v1.QueryAction.Builder.class); + } + + public interface ParameterOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.QueryAction.Parameter) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Name of the parameter (with no leading @).
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +     * Name of the parameter (with no leading @).
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + com.google.spanner.v1.Type getType(); + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(); + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return Whether the value field is set. + */ + boolean hasValue(); + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return The value. + */ + com.google.spanner.executor.v1.Value getValue(); + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder(); + } + + /** + * + * + *
    +   * Parameter that bind to placeholders in the SQL string
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryAction.Parameter} + */ + public static final class Parameter extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.QueryAction.Parameter) + ParameterOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Parameter"); + } + + // Use Parameter.newBuilder() to construct. + private Parameter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Parameter() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_Parameter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryAction.Parameter.class, + com.google.spanner.executor.v1.QueryAction.Parameter.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Name of the parameter (with no leading @).
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +     * Name of the parameter (with no leading @).
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Type type_; + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.Type getType() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + /** + * + * + *
    +     * Type of the parameter.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + public static final int VALUE_FIELD_NUMBER = 3; + private com.google.spanner.executor.v1.Value value_; + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return Whether the value field is set. + */ + @java.lang.Override + public boolean hasValue() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return The value. + */ + @java.lang.Override + public com.google.spanner.executor.v1.Value getValue() { + return value_ == null ? com.google.spanner.executor.v1.Value.getDefaultInstance() : value_; + } + + /** + * + * + *
    +     * Value of the parameter.
    +     * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder() { + return value_ == null ? com.google.spanner.executor.v1.Value.getDefaultInstance() : value_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getValue()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getValue()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.QueryAction.Parameter)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.QueryAction.Parameter other = + (com.google.spanner.executor.v1.QueryAction.Parameter) obj; + + if (!getName().equals(other.getName())) return false; + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (!getType().equals(other.getType())) return false; + } + if (hasValue() != other.hasValue()) return false; + if (hasValue()) { + if (!getValue().equals(other.getValue())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.QueryAction.Parameter prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Parameter that bind to placeholders in the SQL string
    +     * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryAction.Parameter} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.QueryAction.Parameter) + com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_Parameter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryAction.Parameter.class, + com.google.spanner.executor.v1.QueryAction.Parameter.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.QueryAction.Parameter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTypeFieldBuilder(); + internalGetValueFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + value_ = null; + if (valueBuilder_ != null) { + valueBuilder_.dispose(); + valueBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_Parameter_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.Parameter getDefaultInstanceForType() { + return com.google.spanner.executor.v1.QueryAction.Parameter.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.Parameter build() { + com.google.spanner.executor.v1.QueryAction.Parameter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.Parameter buildPartial() { + com.google.spanner.executor.v1.QueryAction.Parameter result = + new com.google.spanner.executor.v1.QueryAction.Parameter(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.QueryAction.Parameter result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = typeBuilder_ == null ? type_ : typeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.value_ = valueBuilder_ == null ? value_ : valueBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.QueryAction.Parameter) { + return mergeFrom((com.google.spanner.executor.v1.QueryAction.Parameter) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.QueryAction.Parameter other) { + if (other == com.google.spanner.executor.v1.QueryAction.Parameter.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasType()) { + mergeType(other.getType()); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetValueFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +       * Name of the parameter (with no leading @).
    +       * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Name of the parameter (with no leading @).
    +       * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Name of the parameter (with no leading @).
    +       * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Name of the parameter (with no leading @).
    +       * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Name of the parameter (with no leading @).
    +       * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type type_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + public com.google.spanner.v1.Type getType() { + if (typeBuilder_ == null) { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } else { + return typeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + } else { + typeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + type_ = builderForValue.build(); + } else { + typeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder mergeType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && type_ != null + && type_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getTypeBuilder().mergeFrom(value); + } else { + type_ = value; + } + } else { + typeBuilder_.mergeFrom(value); + } + if (type_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilder(); + } else { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + } + + /** + * + * + *
    +       * Type of the parameter.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getType(), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + private com.google.spanner.executor.v1.Value value_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder> + valueBuilder_; + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return Whether the value field is set. + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + * + * @return The value. + */ + public com.google.spanner.executor.v1.Value getValue() { + if (valueBuilder_ == null) { + return value_ == null + ? com.google.spanner.executor.v1.Value.getDefaultInstance() + : value_; + } else { + return valueBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public Builder setValue(com.google.spanner.executor.v1.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public Builder setValue(com.google.spanner.executor.v1.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public Builder mergeValue(com.google.spanner.executor.v1.Value value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && value_ != null + && value_ != com.google.spanner.executor.v1.Value.getDefaultInstance()) { + getValueBuilder().mergeFrom(value); + } else { + value_ = value; + } + } else { + valueBuilder_.mergeFrom(value); + } + if (value_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000004); + value_ = null; + if (valueBuilder_ != null) { + valueBuilder_.dispose(); + valueBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public com.google.spanner.executor.v1.Value.Builder getValueBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + public com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_ == null + ? com.google.spanner.executor.v1.Value.getDefaultInstance() + : value_; + } + } + + /** + * + * + *
    +       * Value of the parameter.
    +       * 
    + * + * .google.spanner.executor.v1.Value value = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder> + internalGetValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder>( + getValue(), getParentForChildren(), isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.QueryAction.Parameter) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.QueryAction.Parameter) + private static final com.google.spanner.executor.v1.QueryAction.Parameter DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.QueryAction.Parameter(); + } + + public static com.google.spanner.executor.v1.QueryAction.Parameter getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Parameter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.Parameter getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int SQL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object sql_ = ""; + + /** + * + * + *
    +   * The SQL string.
    +   * 
    + * + * string sql = 1; + * + * @return The sql. + */ + @java.lang.Override + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } + } + + /** + * + * + *
    +   * The SQL string.
    +   * 
    + * + * string sql = 1; + * + * @return The bytes for sql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List params_; + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + @java.lang.Override + public java.util.List getParamsList() { + return params_; + } + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + @java.lang.Override + public java.util.List + getParamsOrBuilderList() { + return params_; + } + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + @java.lang.Override + public int getParamsCount() { + return params_.size(); + } + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.Parameter getParams(int index) { + return params_.get(index); + } + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder getParamsOrBuilder( + int index) { + return params_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, sql_); + } + for (int i = 0; i < params_.size(); i++) { + output.writeMessage(2, params_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, sql_); + } + for (int i = 0; i < params_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, params_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.QueryAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.QueryAction other = + (com.google.spanner.executor.v1.QueryAction) obj; + + if (!getSql().equals(other.getSql())) return false; + if (!getParamsList().equals(other.getParamsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SQL_FIELD_NUMBER; + hash = (53 * hash) + getSql().hashCode(); + if (getParamsCount() > 0) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParamsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.QueryAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A SQL query request.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.QueryAction) + com.google.spanner.executor.v1.QueryActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryAction.class, + com.google.spanner.executor.v1.QueryAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.QueryAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sql_ = ""; + if (paramsBuilder_ == null) { + params_ = java.util.Collections.emptyList(); + } else { + params_ = null; + paramsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction build() { + com.google.spanner.executor.v1.QueryAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction buildPartial() { + com.google.spanner.executor.v1.QueryAction result = + new com.google.spanner.executor.v1.QueryAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.QueryAction result) { + if (paramsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + params_ = java.util.Collections.unmodifiableList(params_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.params_ = params_; + } else { + result.params_ = paramsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.QueryAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.sql_ = sql_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.QueryAction) { + return mergeFrom((com.google.spanner.executor.v1.QueryAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.QueryAction other) { + if (other == com.google.spanner.executor.v1.QueryAction.getDefaultInstance()) return this; + if (!other.getSql().isEmpty()) { + sql_ = other.sql_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (paramsBuilder_ == null) { + if (!other.params_.isEmpty()) { + if (params_.isEmpty()) { + params_ = other.params_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureParamsIsMutable(); + params_.addAll(other.params_); + } + onChanged(); + } + } else { + if (!other.params_.isEmpty()) { + if (paramsBuilder_.isEmpty()) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + params_ = other.params_; + bitField0_ = (bitField0_ & ~0x00000002); + paramsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetParamsFieldBuilder() + : null; + } else { + paramsBuilder_.addAllMessages(other.params_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + sql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.spanner.executor.v1.QueryAction.Parameter m = + input.readMessage( + com.google.spanner.executor.v1.QueryAction.Parameter.parser(), + extensionRegistry); + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.add(m); + } else { + paramsBuilder_.addMessage(m); + } + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object sql_ = ""; + + /** + * + * + *
    +     * The SQL string.
    +     * 
    + * + * string sql = 1; + * + * @return The sql. + */ + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The SQL string.
    +     * 
    + * + * string sql = 1; + * + * @return The bytes for sql. + */ + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The SQL string.
    +     * 
    + * + * string sql = 1; + * + * @param value The sql to set. + * @return This builder for chaining. + */ + public Builder setSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The SQL string.
    +     * 
    + * + * string sql = 1; + * + * @return This builder for chaining. + */ + public Builder clearSql() { + sql_ = getDefaultInstance().getSql(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The SQL string.
    +     * 
    + * + * string sql = 1; + * + * @param value The bytes for sql to set. + * @return This builder for chaining. + */ + public Builder setSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List params_ = + java.util.Collections.emptyList(); + + private void ensureParamsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + params_ = + new java.util.ArrayList(params_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction.Parameter, + com.google.spanner.executor.v1.QueryAction.Parameter.Builder, + com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder> + paramsBuilder_; + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public java.util.List getParamsList() { + if (paramsBuilder_ == null) { + return java.util.Collections.unmodifiableList(params_); + } else { + return paramsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public int getParamsCount() { + if (paramsBuilder_ == null) { + return params_.size(); + } else { + return paramsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public com.google.spanner.executor.v1.QueryAction.Parameter getParams(int index) { + if (paramsBuilder_ == null) { + return params_.get(index); + } else { + return paramsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder setParams( + int index, com.google.spanner.executor.v1.QueryAction.Parameter value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.set(index, value); + onChanged(); + } else { + paramsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder setParams( + int index, com.google.spanner.executor.v1.QueryAction.Parameter.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.set(index, builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder addParams(com.google.spanner.executor.v1.QueryAction.Parameter value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.add(value); + onChanged(); + } else { + paramsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder addParams( + int index, com.google.spanner.executor.v1.QueryAction.Parameter value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParamsIsMutable(); + params_.add(index, value); + onChanged(); + } else { + paramsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder addParams( + com.google.spanner.executor.v1.QueryAction.Parameter.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.add(builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder addParams( + int index, com.google.spanner.executor.v1.QueryAction.Parameter.Builder builderForValue) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.add(index, builderForValue.build()); + onChanged(); + } else { + paramsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder addAllParams( + java.lang.Iterable values) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, params_); + onChanged(); + } else { + paramsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder clearParams() { + if (paramsBuilder_ == null) { + params_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + paramsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public Builder removeParams(int index) { + if (paramsBuilder_ == null) { + ensureParamsIsMutable(); + params_.remove(index); + onChanged(); + } else { + paramsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public com.google.spanner.executor.v1.QueryAction.Parameter.Builder getParamsBuilder( + int index) { + return internalGetParamsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder getParamsOrBuilder( + int index) { + if (paramsBuilder_ == null) { + return params_.get(index); + } else { + return paramsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public java.util.List + getParamsOrBuilderList() { + if (paramsBuilder_ != null) { + return paramsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(params_); + } + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public com.google.spanner.executor.v1.QueryAction.Parameter.Builder addParamsBuilder() { + return internalGetParamsFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.QueryAction.Parameter.getDefaultInstance()); + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public com.google.spanner.executor.v1.QueryAction.Parameter.Builder addParamsBuilder( + int index) { + return internalGetParamsFieldBuilder() + .addBuilder( + index, com.google.spanner.executor.v1.QueryAction.Parameter.getDefaultInstance()); + } + + /** + * + * + *
    +     * Parameters for the SQL string.
    +     * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + public java.util.List + getParamsBuilderList() { + return internalGetParamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction.Parameter, + com.google.spanner.executor.v1.QueryAction.Parameter.Builder, + com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder> + internalGetParamsFieldBuilder() { + if (paramsBuilder_ == null) { + paramsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.QueryAction.Parameter, + com.google.spanner.executor.v1.QueryAction.Parameter.Builder, + com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder>( + params_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + params_ = null; + } + return paramsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.QueryAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.QueryAction) + private static final com.google.spanner.executor.v1.QueryAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.QueryAction(); + } + + public static com.google.spanner.executor.v1.QueryAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java new file mode 100644 index 000000000000..f1e479598c0e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface QueryActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.QueryAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The SQL string.
    +   * 
    + * + * string sql = 1; + * + * @return The sql. + */ + java.lang.String getSql(); + + /** + * + * + *
    +   * The SQL string.
    +   * 
    + * + * string sql = 1; + * + * @return The bytes for sql. + */ + com.google.protobuf.ByteString getSqlBytes(); + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + java.util.List getParamsList(); + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + com.google.spanner.executor.v1.QueryAction.Parameter getParams(int index); + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + int getParamsCount(); + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + java.util.List + getParamsOrBuilderList(); + + /** + * + * + *
    +   * Parameters for the SQL string.
    +   * 
    + * + * repeated .google.spanner.executor.v1.QueryAction.Parameter params = 2; + */ + com.google.spanner.executor.v1.QueryAction.ParameterOrBuilder getParamsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java new file mode 100644 index 000000000000..ffe852b0941c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java @@ -0,0 +1,788 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Query cancellation action defines the long running query and the cancel query
    + * format depening on the Cloud database dialect.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryCancellationAction} + */ +@com.google.protobuf.Generated +public final class QueryCancellationAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.QueryCancellationAction) + QueryCancellationActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryCancellationAction"); + } + + // Use QueryCancellationAction.newBuilder() to construct. + private QueryCancellationAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryCancellationAction() { + longRunningSql_ = ""; + cancelQuery_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryCancellationAction.class, + com.google.spanner.executor.v1.QueryCancellationAction.Builder.class); + } + + public static final int LONG_RUNNING_SQL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object longRunningSql_ = ""; + + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + @java.lang.Override + public java.lang.String getLongRunningSql() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + longRunningSql_ = s; + return s; + } + } + + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLongRunningSqlBytes() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + longRunningSql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CANCEL_QUERY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object cancelQuery_ = ""; + + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + @java.lang.Override + public java.lang.String getCancelQuery() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cancelQuery_ = s; + return s; + } + } + + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCancelQueryBytes() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cancelQuery_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(longRunningSql_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, longRunningSql_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cancelQuery_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, cancelQuery_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(longRunningSql_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, longRunningSql_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cancelQuery_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, cancelQuery_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.QueryCancellationAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.QueryCancellationAction other = + (com.google.spanner.executor.v1.QueryCancellationAction) obj; + + if (!getLongRunningSql().equals(other.getLongRunningSql())) return false; + if (!getCancelQuery().equals(other.getCancelQuery())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LONG_RUNNING_SQL_FIELD_NUMBER; + hash = (53 * hash) + getLongRunningSql().hashCode(); + hash = (37 * hash) + CANCEL_QUERY_FIELD_NUMBER; + hash = (53 * hash) + getCancelQuery().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.QueryCancellationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Query cancellation action defines the long running query and the cancel query
    +   * format depening on the Cloud database dialect.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryCancellationAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.QueryCancellationAction) + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryCancellationAction.class, + com.google.spanner.executor.v1.QueryCancellationAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.QueryCancellationAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + longRunningSql_ = ""; + cancelQuery_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction build() { + com.google.spanner.executor.v1.QueryCancellationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction buildPartial() { + com.google.spanner.executor.v1.QueryCancellationAction result = + new com.google.spanner.executor.v1.QueryCancellationAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.QueryCancellationAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.longRunningSql_ = longRunningSql_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.cancelQuery_ = cancelQuery_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.QueryCancellationAction) { + return mergeFrom((com.google.spanner.executor.v1.QueryCancellationAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.QueryCancellationAction other) { + if (other == com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance()) + return this; + if (!other.getLongRunningSql().isEmpty()) { + longRunningSql_ = other.longRunningSql_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getCancelQuery().isEmpty()) { + cancelQuery_ = other.cancelQuery_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + longRunningSql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + cancelQuery_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object longRunningSql_ = ""; + + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + public java.lang.String getLongRunningSql() { + java.lang.Object ref = longRunningSql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + longRunningSql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + public com.google.protobuf.ByteString getLongRunningSqlBytes() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + longRunningSql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @param value The longRunningSql to set. + * @return This builder for chaining. + */ + public Builder setLongRunningSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + longRunningSql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return This builder for chaining. + */ + public Builder clearLongRunningSql() { + longRunningSql_ = getDefaultInstance().getLongRunningSql(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @param value The bytes for longRunningSql to set. + * @return This builder for chaining. + */ + public Builder setLongRunningSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + longRunningSql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object cancelQuery_ = ""; + + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + public java.lang.String getCancelQuery() { + java.lang.Object ref = cancelQuery_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cancelQuery_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + public com.google.protobuf.ByteString getCancelQueryBytes() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cancelQuery_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @param value The cancelQuery to set. + * @return This builder for chaining. + */ + public Builder setCancelQuery(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + cancelQuery_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return This builder for chaining. + */ + public Builder clearCancelQuery() { + cancelQuery_ = getDefaultInstance().getCancelQuery(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @param value The bytes for cancelQuery to set. + * @return This builder for chaining. + */ + public Builder setCancelQueryBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + cancelQuery_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.QueryCancellationAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.QueryCancellationAction) + private static final com.google.spanner.executor.v1.QueryCancellationAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.QueryCancellationAction(); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryCancellationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java new file mode 100644 index 000000000000..74bf2fe5e7fa --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface QueryCancellationActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.QueryCancellationAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + java.lang.String getLongRunningSql(); + + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + com.google.protobuf.ByteString getLongRunningSqlBytes(); + + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + java.lang.String getCancelQuery(); + + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + com.google.protobuf.ByteString getCancelQueryBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java new file mode 100644 index 000000000000..23bde12400c1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java @@ -0,0 +1,1226 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * QueryResult contains result of a Query.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryResult} + */ +@com.google.protobuf.Generated +public final class QueryResult extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.QueryResult) + QueryResultOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryResult"); + } + + // Use QueryResult.newBuilder() to construct. + private QueryResult(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryResult() { + row_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryResult.class, + com.google.spanner.executor.v1.QueryResult.Builder.class); + } + + private int bitField0_; + public static final int ROW_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List row_; + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + @java.lang.Override + public java.util.List getRowList() { + return row_; + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + @java.lang.Override + public java.util.List + getRowOrBuilderList() { + return row_; + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + @java.lang.Override + public int getRowCount() { + return row_.size(); + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getRow(int index) { + return row_.get(index); + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index) { + return row_.get(index); + } + + public static final int ROW_TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.StructType rowType_; + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return Whether the rowType field is set. + */ + @java.lang.Override + public boolean hasRowType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return The rowType. + */ + @java.lang.Override + public com.google.spanner.v1.StructType getRowType() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < row_.size(); i++) { + output.writeMessage(1, row_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getRowType()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < row_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, row_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRowType()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.QueryResult)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.QueryResult other = + (com.google.spanner.executor.v1.QueryResult) obj; + + if (!getRowList().equals(other.getRowList())) return false; + if (hasRowType() != other.hasRowType()) return false; + if (hasRowType()) { + if (!getRowType().equals(other.getRowType())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getRowCount() > 0) { + hash = (37 * hash) + ROW_FIELD_NUMBER; + hash = (53 * hash) + getRowList().hashCode(); + } + if (hasRowType()) { + hash = (37 * hash) + ROW_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getRowType().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryResult parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryResult parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.QueryResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * QueryResult contains result of a Query.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryResult} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.QueryResult) + com.google.spanner.executor.v1.QueryResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryResult.class, + com.google.spanner.executor.v1.QueryResult.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.QueryResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRowFieldBuilder(); + internalGetRowTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (rowBuilder_ == null) { + row_ = java.util.Collections.emptyList(); + } else { + row_ = null; + rowBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryResult_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryResult getDefaultInstanceForType() { + return com.google.spanner.executor.v1.QueryResult.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryResult build() { + com.google.spanner.executor.v1.QueryResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryResult buildPartial() { + com.google.spanner.executor.v1.QueryResult result = + new com.google.spanner.executor.v1.QueryResult(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.QueryResult result) { + if (rowBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + row_ = java.util.Collections.unmodifiableList(row_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.row_ = row_; + } else { + result.row_ = rowBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.QueryResult result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.rowType_ = rowTypeBuilder_ == null ? rowType_ : rowTypeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.QueryResult) { + return mergeFrom((com.google.spanner.executor.v1.QueryResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.QueryResult other) { + if (other == com.google.spanner.executor.v1.QueryResult.getDefaultInstance()) return this; + if (rowBuilder_ == null) { + if (!other.row_.isEmpty()) { + if (row_.isEmpty()) { + row_ = other.row_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRowIsMutable(); + row_.addAll(other.row_); + } + onChanged(); + } + } else { + if (!other.row_.isEmpty()) { + if (rowBuilder_.isEmpty()) { + rowBuilder_.dispose(); + rowBuilder_ = null; + row_ = other.row_; + bitField0_ = (bitField0_ & ~0x00000001); + rowBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRowFieldBuilder() + : null; + } else { + rowBuilder_.addAllMessages(other.row_); + } + } + } + if (other.hasRowType()) { + mergeRowType(other.getRowType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.executor.v1.ValueList m = + input.readMessage( + com.google.spanner.executor.v1.ValueList.parser(), extensionRegistry); + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(m); + } else { + rowBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + input.readMessage(internalGetRowTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List row_ = + java.util.Collections.emptyList(); + + private void ensureRowIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + row_ = new java.util.ArrayList(row_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + rowBuilder_; + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public java.util.List getRowList() { + if (rowBuilder_ == null) { + return java.util.Collections.unmodifiableList(row_); + } else { + return rowBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public int getRowCount() { + if (rowBuilder_ == null) { + return row_.size(); + } else { + return rowBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public com.google.spanner.executor.v1.ValueList getRow(int index) { + if (rowBuilder_ == null) { + return row_.get(index); + } else { + return rowBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder setRow(int index, com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.set(index, value); + onChanged(); + } else { + rowBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder setRow( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.set(index, builderForValue.build()); + onChanged(); + } else { + rowBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder addRow(com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.add(value); + onChanged(); + } else { + rowBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder addRow(int index, com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.add(index, value); + onChanged(); + } else { + rowBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder addRow(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(builderForValue.build()); + onChanged(); + } else { + rowBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder addRow( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(index, builderForValue.build()); + onChanged(); + } else { + rowBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder addAllRow( + java.lang.Iterable values) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, row_); + onChanged(); + } else { + rowBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder clearRow() { + if (rowBuilder_ == null) { + row_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + rowBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public Builder removeRow(int index) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.remove(index); + onChanged(); + } else { + rowBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder getRowBuilder(int index) { + return internalGetRowFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index) { + if (rowBuilder_ == null) { + return row_.get(index); + } else { + return rowBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public java.util.List + getRowOrBuilderList() { + if (rowBuilder_ != null) { + return rowBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(row_); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder addRowBuilder() { + return internalGetRowFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public com.google.spanner.executor.v1.ValueList.Builder addRowBuilder(int index) { + return internalGetRowFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + public java.util.List getRowBuilderList() { + return internalGetRowFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetRowFieldBuilder() { + if (rowBuilder_ == null) { + rowBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + row_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + row_ = null; + } + return rowBuilder_; + } + + private com.google.spanner.v1.StructType rowType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + rowTypeBuilder_; + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return Whether the rowType field is set. + */ + public boolean hasRowType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return The rowType. + */ + public com.google.spanner.v1.StructType getRowType() { + if (rowTypeBuilder_ == null) { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } else { + return rowTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public Builder setRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rowType_ = value; + } else { + rowTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public Builder setRowType(com.google.spanner.v1.StructType.Builder builderForValue) { + if (rowTypeBuilder_ == null) { + rowType_ = builderForValue.build(); + } else { + rowTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public Builder mergeRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && rowType_ != null + && rowType_ != com.google.spanner.v1.StructType.getDefaultInstance()) { + getRowTypeBuilder().mergeFrom(value); + } else { + rowType_ = value; + } + } else { + rowTypeBuilder_.mergeFrom(value); + } + if (rowType_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public Builder clearRowType() { + bitField0_ = (bitField0_ & ~0x00000002); + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public com.google.spanner.v1.StructType.Builder getRowTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetRowTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + if (rowTypeBuilder_ != null) { + return rowTypeBuilder_.getMessageOrBuilder(); + } else { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + internalGetRowTypeFieldBuilder() { + if (rowTypeBuilder_ == null) { + rowTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder>( + getRowType(), getParentForChildren(), isClean()); + rowType_ = null; + } + return rowTypeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.QueryResult) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.QueryResult) + private static final com.google.spanner.executor.v1.QueryResult DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.QueryResult(); + } + + public static com.google.spanner.executor.v1.QueryResult getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryResult getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java new file mode 100644 index 000000000000..70fd618004ba --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java @@ -0,0 +1,125 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface QueryResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.QueryResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + java.util.List getRowList(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + com.google.spanner.executor.v1.ValueList getRow(int index); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + int getRowCount(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + java.util.List getRowOrBuilderList(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 1; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return Whether the rowType field is set. + */ + boolean hasRowType(); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + * + * @return The rowType. + */ + com.google.spanner.v1.StructType getRowType(); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 2; + */ + com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java new file mode 100644 index 000000000000..5a9151254086 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java @@ -0,0 +1,1491 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * A single read request.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ReadAction} + */ +@com.google.protobuf.Generated +public final class ReadAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ReadAction) + ReadActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadAction"); + } + + // Use ReadAction.newBuilder() to construct. + private ReadAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadAction() { + table_ = ""; + index_ = ""; + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ReadAction.class, + com.google.spanner.executor.v1.ReadAction.Builder.class); + } + + private int bitField0_; + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * The table to read at.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * The table to read at.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + @java.lang.Override + public boolean hasIndex() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + return column_; + } + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + public static final int KEYS_FIELD_NUMBER = 4; + private com.google.spanner.executor.v1.KeySet keys_; + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return Whether the keys field is set. + */ + @java.lang.Override + public boolean hasKeys() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return The keys. + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeySet getKeys() { + return keys_ == null ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() : keys_; + } + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.KeySetOrBuilder getKeysOrBuilder() { + return keys_ == null ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() : keys_; + } + + public static final int LIMIT_FIELD_NUMBER = 5; + private int limit_ = 0; + + /** + * + * + *
    +   * Limit on number of rows to read. If set, must be positive.
    +   * 
    + * + * int32 limit = 5; + * + * @return The limit. + */ + @java.lang.Override + public int getLimit() { + return limit_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, index_); + } + for (int i = 0; i < column_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, column_.getRaw(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getKeys()); + } + if (limit_ != 0) { + output.writeInt32(5, limit_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, index_); + } + { + int dataSize = 0; + for (int i = 0; i < column_.size(); i++) { + dataSize += computeStringSizeNoTag(column_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnList().size(); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getKeys()); + } + if (limit_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, limit_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ReadAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ReadAction other = + (com.google.spanner.executor.v1.ReadAction) obj; + + if (!getTable().equals(other.getTable())) return false; + if (hasIndex() != other.hasIndex()) return false; + if (hasIndex()) { + if (!getIndex().equals(other.getIndex())) return false; + } + if (!getColumnList().equals(other.getColumnList())) return false; + if (hasKeys() != other.hasKeys()) return false; + if (hasKeys()) { + if (!getKeys().equals(other.getKeys())) return false; + } + if (getLimit() != other.getLimit()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasIndex()) { + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + } + if (getColumnCount() > 0) { + hash = (37 * hash) + COLUMN_FIELD_NUMBER; + hash = (53 * hash) + getColumnList().hashCode(); + } + if (hasKeys()) { + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeys().hashCode(); + } + hash = (37 * hash) + LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getLimit(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ReadAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A single read request.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ReadAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ReadAction) + com.google.spanner.executor.v1.ReadActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ReadAction.class, + com.google.spanner.executor.v1.ReadAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ReadAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeysFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + index_ = ""; + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + keys_ = null; + if (keysBuilder_ != null) { + keysBuilder_.dispose(); + keysBuilder_ = null; + } + limit_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction build() { + com.google.spanner.executor.v1.ReadAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction buildPartial() { + com.google.spanner.executor.v1.ReadAction result = + new com.google.spanner.executor.v1.ReadAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.ReadAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.index_ = index_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + column_.makeImmutable(); + result.column_ = column_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.keys_ = keysBuilder_ == null ? keys_ : keysBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.limit_ = limit_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ReadAction) { + return mergeFrom((com.google.spanner.executor.v1.ReadAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ReadAction other) { + if (other == com.google.spanner.executor.v1.ReadAction.getDefaultInstance()) return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIndex()) { + index_ = other.index_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.column_.isEmpty()) { + if (column_.isEmpty()) { + column_ = other.column_; + bitField0_ |= 0x00000004; + } else { + ensureColumnIsMutable(); + column_.addAll(other.column_); + } + onChanged(); + } + if (other.hasKeys()) { + mergeKeys(other.getKeys()); + } + if (other.getLimit() != 0) { + setLimit(other.getLimit()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnIsMutable(); + column_.add(s); + break; + } // case 26 + case 34: + { + input.readMessage(internalGetKeysFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + limit_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * The table to read at.
    +     * 
    + * + * string table = 1; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The table to read at.
    +     * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The table to read at.
    +     * 
    + * + * string table = 1; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The table to read at.
    +     * 
    + * + * string table = 1; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The table to read at.
    +     * 
    + * + * string table = 1; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + public boolean hasIndex() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The index to read at if it's an index read.
    +     * 
    + * + * optional string index = 2; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList column_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnIsMutable() { + if (!column_.isModifiable()) { + column_ = new com.google.protobuf.LazyStringArrayList(column_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @return A list containing the column. + */ + public com.google.protobuf.ProtocolStringList getColumnList() { + column_.makeImmutable(); + return column_; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @return The count of column. + */ + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + public java.lang.String getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + public com.google.protobuf.ByteString getColumnBytes(int index) { + return column_.getByteString(index); + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param index The index to set the value at. + * @param value The column to set. + * @return This builder for chaining. + */ + public Builder setColumn(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param value The column to add. + * @return This builder for chaining. + */ + public Builder addColumn(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param values The column to add. + * @return This builder for chaining. + */ + public Builder addAllColumn(java.lang.Iterable values) { + ensureColumnIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, column_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @return This builder for chaining. + */ + public Builder clearColumn() { + column_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * List of columns must begin with the key columns used for the read.
    +     * 
    + * + * repeated string column = 3; + * + * @param value The bytes of the column to add. + * @return This builder for chaining. + */ + public Builder addColumnBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnIsMutable(); + column_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.KeySet keys_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder> + keysBuilder_; + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return Whether the keys field is set. + */ + public boolean hasKeys() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return The keys. + */ + public com.google.spanner.executor.v1.KeySet getKeys() { + if (keysBuilder_ == null) { + return keys_ == null ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() : keys_; + } else { + return keysBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public Builder setKeys(com.google.spanner.executor.v1.KeySet value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + } else { + keysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public Builder setKeys(com.google.spanner.executor.v1.KeySet.Builder builderForValue) { + if (keysBuilder_ == null) { + keys_ = builderForValue.build(); + } else { + keysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public Builder mergeKeys(com.google.spanner.executor.v1.KeySet value) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && keys_ != null + && keys_ != com.google.spanner.executor.v1.KeySet.getDefaultInstance()) { + getKeysBuilder().mergeFrom(value); + } else { + keys_ = value; + } + } else { + keysBuilder_.mergeFrom(value); + } + if (keys_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public Builder clearKeys() { + bitField0_ = (bitField0_ & ~0x00000008); + keys_ = null; + if (keysBuilder_ != null) { + keysBuilder_.dispose(); + keysBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public com.google.spanner.executor.v1.KeySet.Builder getKeysBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetKeysFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + public com.google.spanner.executor.v1.KeySetOrBuilder getKeysOrBuilder() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilder(); + } else { + return keys_ == null ? com.google.spanner.executor.v1.KeySet.getDefaultInstance() : keys_; + } + } + + /** + * + * + *
    +     * Keys for performing this read.
    +     * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder> + internalGetKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.KeySet, + com.google.spanner.executor.v1.KeySet.Builder, + com.google.spanner.executor.v1.KeySetOrBuilder>( + getKeys(), getParentForChildren(), isClean()); + keys_ = null; + } + return keysBuilder_; + } + + private int limit_; + + /** + * + * + *
    +     * Limit on number of rows to read. If set, must be positive.
    +     * 
    + * + * int32 limit = 5; + * + * @return The limit. + */ + @java.lang.Override + public int getLimit() { + return limit_; + } + + /** + * + * + *
    +     * Limit on number of rows to read. If set, must be positive.
    +     * 
    + * + * int32 limit = 5; + * + * @param value The limit to set. + * @return This builder for chaining. + */ + public Builder setLimit(int value) { + + limit_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Limit on number of rows to read. If set, must be positive.
    +     * 
    + * + * int32 limit = 5; + * + * @return This builder for chaining. + */ + public Builder clearLimit() { + bitField0_ = (bitField0_ & ~0x00000010); + limit_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ReadAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ReadAction) + private static final com.google.spanner.executor.v1.ReadAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ReadAction(); + } + + public static com.google.spanner.executor.v1.ReadAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java new file mode 100644 index 000000000000..a7233bc91acd --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java @@ -0,0 +1,197 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ReadActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ReadAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The table to read at.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * The table to read at.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + boolean hasIndex(); + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * The index to read at if it's an index read.
    +   * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @return A list containing the column. + */ + java.util.List getColumnList(); + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @return The count of column. + */ + int getColumnCount(); + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @param index The index of the element to return. + * @return The column at the given index. + */ + java.lang.String getColumn(int index); + + /** + * + * + *
    +   * List of columns must begin with the key columns used for the read.
    +   * 
    + * + * repeated string column = 3; + * + * @param index The index of the value to return. + * @return The bytes of the column at the given index. + */ + com.google.protobuf.ByteString getColumnBytes(int index); + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return Whether the keys field is set. + */ + boolean hasKeys(); + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + * + * @return The keys. + */ + com.google.spanner.executor.v1.KeySet getKeys(); + + /** + * + * + *
    +   * Keys for performing this read.
    +   * 
    + * + * .google.spanner.executor.v1.KeySet keys = 4; + */ + com.google.spanner.executor.v1.KeySetOrBuilder getKeysOrBuilder(); + + /** + * + * + *
    +   * Limit on number of rows to read. If set, must be positive.
    +   * 
    + * + * int32 limit = 5; + * + * @return The limit. + */ + int getLimit(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java new file mode 100644 index 000000000000..f7fc7d204e76 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java @@ -0,0 +1,1776 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * ReadResult contains rows read.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ReadResult} + */ +@com.google.protobuf.Generated +public final class ReadResult extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ReadResult) + ReadResultOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadResult"); + } + + // Use ReadResult.newBuilder() to construct. + private ReadResult(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadResult() { + table_ = ""; + index_ = ""; + row_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ReadResult.class, + com.google.spanner.executor.v1.ReadResult.Builder.class); + } + + private int bitField0_; + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + @java.lang.Override + public boolean hasIndex() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_INDEX_FIELD_NUMBER = 3; + private int requestIndex_ = 0; + + /** + * + * + *
    +   * Request index (multiread only).
    +   * 
    + * + * optional int32 request_index = 3; + * + * @return Whether the requestIndex field is set. + */ + @java.lang.Override + public boolean hasRequestIndex() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Request index (multiread only).
    +   * 
    + * + * optional int32 request_index = 3; + * + * @return The requestIndex. + */ + @java.lang.Override + public int getRequestIndex() { + return requestIndex_; + } + + public static final int ROW_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List row_; + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + @java.lang.Override + public java.util.List getRowList() { + return row_; + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + @java.lang.Override + public java.util.List + getRowOrBuilderList() { + return row_; + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + @java.lang.Override + public int getRowCount() { + return row_.size(); + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getRow(int index) { + return row_.get(index); + } + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index) { + return row_.get(index); + } + + public static final int ROW_TYPE_FIELD_NUMBER = 5; + private com.google.spanner.v1.StructType rowType_; + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return Whether the rowType field is set. + */ + @java.lang.Override + public boolean hasRowType() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return The rowType. + */ + @java.lang.Override + public com.google.spanner.v1.StructType getRowType() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + @java.lang.Override + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, index_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt32(3, requestIndex_); + } + for (int i = 0; i < row_.size(); i++) { + output.writeMessage(4, row_.get(i)); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getRowType()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, index_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, requestIndex_); + } + for (int i = 0; i < row_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, row_.get(i)); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getRowType()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ReadResult)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ReadResult other = + (com.google.spanner.executor.v1.ReadResult) obj; + + if (!getTable().equals(other.getTable())) return false; + if (hasIndex() != other.hasIndex()) return false; + if (hasIndex()) { + if (!getIndex().equals(other.getIndex())) return false; + } + if (hasRequestIndex() != other.hasRequestIndex()) return false; + if (hasRequestIndex()) { + if (getRequestIndex() != other.getRequestIndex()) return false; + } + if (!getRowList().equals(other.getRowList())) return false; + if (hasRowType() != other.hasRowType()) return false; + if (hasRowType()) { + if (!getRowType().equals(other.getRowType())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasIndex()) { + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + } + if (hasRequestIndex()) { + hash = (37 * hash) + REQUEST_INDEX_FIELD_NUMBER; + hash = (53 * hash) + getRequestIndex(); + } + if (getRowCount() > 0) { + hash = (37 * hash) + ROW_FIELD_NUMBER; + hash = (53 * hash) + getRowList().hashCode(); + } + if (hasRowType()) { + hash = (37 * hash) + ROW_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getRowType().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadResult parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadResult parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ReadResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ReadResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * ReadResult contains rows read.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ReadResult} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ReadResult) + com.google.spanner.executor.v1.ReadResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ReadResult.class, + com.google.spanner.executor.v1.ReadResult.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ReadResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRowFieldBuilder(); + internalGetRowTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + index_ = ""; + requestIndex_ = 0; + if (rowBuilder_ == null) { + row_ = java.util.Collections.emptyList(); + } else { + row_ = null; + rowBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ReadResult_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadResult getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ReadResult.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadResult build() { + com.google.spanner.executor.v1.ReadResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadResult buildPartial() { + com.google.spanner.executor.v1.ReadResult result = + new com.google.spanner.executor.v1.ReadResult(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.ReadResult result) { + if (rowBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + row_ = java.util.Collections.unmodifiableList(row_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.row_ = row_; + } else { + result.row_ = rowBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.ReadResult result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.index_ = index_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestIndex_ = requestIndex_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.rowType_ = rowTypeBuilder_ == null ? rowType_ : rowTypeBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ReadResult) { + return mergeFrom((com.google.spanner.executor.v1.ReadResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ReadResult other) { + if (other == com.google.spanner.executor.v1.ReadResult.getDefaultInstance()) return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasIndex()) { + index_ = other.index_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasRequestIndex()) { + setRequestIndex(other.getRequestIndex()); + } + if (rowBuilder_ == null) { + if (!other.row_.isEmpty()) { + if (row_.isEmpty()) { + row_ = other.row_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureRowIsMutable(); + row_.addAll(other.row_); + } + onChanged(); + } + } else { + if (!other.row_.isEmpty()) { + if (rowBuilder_.isEmpty()) { + rowBuilder_.dispose(); + rowBuilder_ = null; + row_ = other.row_; + bitField0_ = (bitField0_ & ~0x00000008); + rowBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRowFieldBuilder() + : null; + } else { + rowBuilder_.addAllMessages(other.row_); + } + } + } + if (other.hasRowType()) { + mergeRowType(other.getRowType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + requestIndex_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + com.google.spanner.executor.v1.ValueList m = + input.readMessage( + com.google.spanner.executor.v1.ValueList.parser(), extensionRegistry); + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(m); + } else { + rowBuilder_.addMessage(m); + } + break; + } // case 34 + case 42: + { + input.readMessage(internalGetRowTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string table = 1; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string table = 1; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string table = 1; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string table = 1; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + public boolean hasIndex() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Index name, if read from an index.
    +     * 
    + * + * optional string index = 2; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int requestIndex_; + + /** + * + * + *
    +     * Request index (multiread only).
    +     * 
    + * + * optional int32 request_index = 3; + * + * @return Whether the requestIndex field is set. + */ + @java.lang.Override + public boolean hasRequestIndex() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Request index (multiread only).
    +     * 
    + * + * optional int32 request_index = 3; + * + * @return The requestIndex. + */ + @java.lang.Override + public int getRequestIndex() { + return requestIndex_; + } + + /** + * + * + *
    +     * Request index (multiread only).
    +     * 
    + * + * optional int32 request_index = 3; + * + * @param value The requestIndex to set. + * @return This builder for chaining. + */ + public Builder setRequestIndex(int value) { + + requestIndex_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Request index (multiread only).
    +     * 
    + * + * optional int32 request_index = 3; + * + * @return This builder for chaining. + */ + public Builder clearRequestIndex() { + bitField0_ = (bitField0_ & ~0x00000004); + requestIndex_ = 0; + onChanged(); + return this; + } + + private java.util.List row_ = + java.util.Collections.emptyList(); + + private void ensureRowIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + row_ = new java.util.ArrayList(row_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + rowBuilder_; + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public java.util.List getRowList() { + if (rowBuilder_ == null) { + return java.util.Collections.unmodifiableList(row_); + } else { + return rowBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public int getRowCount() { + if (rowBuilder_ == null) { + return row_.size(); + } else { + return rowBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public com.google.spanner.executor.v1.ValueList getRow(int index) { + if (rowBuilder_ == null) { + return row_.get(index); + } else { + return rowBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder setRow(int index, com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.set(index, value); + onChanged(); + } else { + rowBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder setRow( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.set(index, builderForValue.build()); + onChanged(); + } else { + rowBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder addRow(com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.add(value); + onChanged(); + } else { + rowBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder addRow(int index, com.google.spanner.executor.v1.ValueList value) { + if (rowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowIsMutable(); + row_.add(index, value); + onChanged(); + } else { + rowBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder addRow(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(builderForValue.build()); + onChanged(); + } else { + rowBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder addRow( + int index, com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.add(index, builderForValue.build()); + onChanged(); + } else { + rowBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder addAllRow( + java.lang.Iterable values) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, row_); + onChanged(); + } else { + rowBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder clearRow() { + if (rowBuilder_ == null) { + row_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + rowBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public Builder removeRow(int index) { + if (rowBuilder_ == null) { + ensureRowIsMutable(); + row_.remove(index); + onChanged(); + } else { + rowBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public com.google.spanner.executor.v1.ValueList.Builder getRowBuilder(int index) { + return internalGetRowFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index) { + if (rowBuilder_ == null) { + return row_.get(index); + } else { + return rowBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public java.util.List + getRowOrBuilderList() { + if (rowBuilder_ != null) { + return rowBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(row_); + } + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public com.google.spanner.executor.v1.ValueList.Builder addRowBuilder() { + return internalGetRowFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public com.google.spanner.executor.v1.ValueList.Builder addRowBuilder(int index) { + return internalGetRowFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ValueList.getDefaultInstance()); + } + + /** + * + * + *
    +     * Rows read. Each row is a struct with multiple fields, one for each column
    +     * in read result. All rows have the same type.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + public java.util.List getRowBuilderList() { + return internalGetRowFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetRowFieldBuilder() { + if (rowBuilder_ == null) { + rowBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + row_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + row_ = null; + } + return rowBuilder_; + } + + private com.google.spanner.v1.StructType rowType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + rowTypeBuilder_; + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return Whether the rowType field is set. + */ + public boolean hasRowType() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return The rowType. + */ + public com.google.spanner.v1.StructType getRowType() { + if (rowTypeBuilder_ == null) { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } else { + return rowTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public Builder setRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rowType_ = value; + } else { + rowTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public Builder setRowType(com.google.spanner.v1.StructType.Builder builderForValue) { + if (rowTypeBuilder_ == null) { + rowType_ = builderForValue.build(); + } else { + rowTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public Builder mergeRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && rowType_ != null + && rowType_ != com.google.spanner.v1.StructType.getDefaultInstance()) { + getRowTypeBuilder().mergeFrom(value); + } else { + rowType_ = value; + } + } else { + rowTypeBuilder_.mergeFrom(value); + } + if (rowType_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public Builder clearRowType() { + bitField0_ = (bitField0_ & ~0x00000010); + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public com.google.spanner.v1.StructType.Builder getRowTypeBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetRowTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + if (rowTypeBuilder_ != null) { + return rowTypeBuilder_.getMessageOrBuilder(); + } else { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + } + + /** + * + * + *
    +     * The type of rows read. It must be set if at least one row was read.
    +     * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + internalGetRowTypeFieldBuilder() { + if (rowTypeBuilder_ == null) { + rowTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder>( + getRowType(), getParentForChildren(), isClean()); + rowType_ = null; + } + return rowTypeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ReadResult) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ReadResult) + private static final com.google.spanner.executor.v1.ReadResult DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ReadResult(); + } + + public static com.google.spanner.executor.v1.ReadResult getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ReadResult getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java new file mode 100644 index 000000000000..70b29905f121 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java @@ -0,0 +1,216 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ReadResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ReadResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string table = 1; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string table = 1; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return Whether the index field is set. + */ + boolean hasIndex(); + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * Index name, if read from an index.
    +   * 
    + * + * optional string index = 2; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); + + /** + * + * + *
    +   * Request index (multiread only).
    +   * 
    + * + * optional int32 request_index = 3; + * + * @return Whether the requestIndex field is set. + */ + boolean hasRequestIndex(); + + /** + * + * + *
    +   * Request index (multiread only).
    +   * 
    + * + * optional int32 request_index = 3; + * + * @return The requestIndex. + */ + int getRequestIndex(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + java.util.List getRowList(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + com.google.spanner.executor.v1.ValueList getRow(int index); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + int getRowCount(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + java.util.List getRowOrBuilderList(); + + /** + * + * + *
    +   * Rows read. Each row is a struct with multiple fields, one for each column
    +   * in read result. All rows have the same type.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ValueList row = 4; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getRowOrBuilder(int index); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return Whether the rowType field is set. + */ + boolean hasRowType(); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + * + * @return The rowType. + */ + com.google.spanner.v1.StructType getRowType(); + + /** + * + * + *
    +   * The type of rows read. It must be set if at least one row was read.
    +   * 
    + * + * optional .google.spanner.v1.StructType row_type = 5; + */ + com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java new file mode 100644 index 000000000000..41a1eaca23b4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java @@ -0,0 +1,1679 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that restores a Cloud Spanner database from a backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.RestoreCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class RestoreCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.RestoreCloudDatabaseAction) + RestoreCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RestoreCloudDatabaseAction"); + } + + // Use RestoreCloudDatabaseAction.newBuilder() to construct. + private RestoreCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RestoreCloudDatabaseAction() { + projectId_ = ""; + backupInstanceId_ = ""; + backupId_ = ""; + databaseInstanceId_ = ""; + databaseId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.class, + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupInstanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +   * 
    + * + * string backup_instance_id = 2; + * + * @return The backupInstanceId. + */ + @java.lang.Override + public java.lang.String getBackupInstanceId() { + java.lang.Object ref = backupInstanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupInstanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +   * 
    + * + * string backup_instance_id = 2; + * + * @return The bytes for backupInstanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupInstanceIdBytes() { + java.lang.Object ref = backupInstanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupInstanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup from which to restore, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup from which to restore, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_INSTANCE_ID_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseInstanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the database, e.g.
    +   * "database-instance".
    +   * 
    + * + * string database_instance_id = 4; + * + * @return The databaseInstanceId. + */ + @java.lang.Override + public java.lang.String getDatabaseInstanceId() { + java.lang.Object ref = databaseInstanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseInstanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the database, e.g.
    +   * "database-instance".
    +   * 
    + * + * string database_instance_id = 4; + * + * @return The bytes for databaseInstanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseInstanceIdBytes() { + java.lang.Object ref = databaseInstanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseInstanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * The id of the database to create and restore to, e.g. "db0". Note that this
    +   * database must not already exist.
    +   * 
    + * + * string database_id = 5; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the database to create and restore to, e.g. "db0". Note that this
    +   * database must not already exist.
    +   * 
    + * + * string database_id = 5; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 7; + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupInstanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, backupInstanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseInstanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, databaseInstanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, databaseId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(7, getEncryptionConfig()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupInstanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, backupInstanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseInstanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, databaseInstanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, databaseId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getEncryptionConfig()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.RestoreCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.RestoreCloudDatabaseAction other = + (com.google.spanner.executor.v1.RestoreCloudDatabaseAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getBackupInstanceId().equals(other.getBackupInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (!getDatabaseInstanceId().equals(other.getDatabaseInstanceId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + BACKUP_INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + hash = (37 * hash) + DATABASE_INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseInstanceId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that restores a Cloud Spanner database from a backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.RestoreCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.RestoreCloudDatabaseAction) + com.google.spanner.executor.v1.RestoreCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.class, + com.google.spanner.executor.v1.RestoreCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.RestoreCloudDatabaseAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEncryptionConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + backupInstanceId_ = ""; + backupId_ = ""; + databaseInstanceId_ = ""; + databaseId_ = ""; + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_RestoreCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction build() { + com.google.spanner.executor.v1.RestoreCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.RestoreCloudDatabaseAction result = + new com.google.spanner.executor.v1.RestoreCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.RestoreCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.backupInstanceId_ = backupInstanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.databaseInstanceId_ = databaseInstanceId_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.databaseId_ = databaseId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.RestoreCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.RestoreCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.RestoreCloudDatabaseAction other) { + if (other == com.google.spanner.executor.v1.RestoreCloudDatabaseAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBackupInstanceId().isEmpty()) { + backupInstanceId_ = other.backupInstanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getDatabaseInstanceId().isEmpty()) { + databaseInstanceId_ = other.databaseInstanceId_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + backupInstanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + databaseInstanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 58: + { + input.readMessage( + internalGetEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object backupInstanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +     * 
    + * + * string backup_instance_id = 2; + * + * @return The backupInstanceId. + */ + public java.lang.String getBackupInstanceId() { + java.lang.Object ref = backupInstanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupInstanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +     * 
    + * + * string backup_instance_id = 2; + * + * @return The bytes for backupInstanceId. + */ + public com.google.protobuf.ByteString getBackupInstanceIdBytes() { + java.lang.Object ref = backupInstanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupInstanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +     * 
    + * + * string backup_instance_id = 2; + * + * @param value The backupInstanceId to set. + * @return This builder for chaining. + */ + public Builder setBackupInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupInstanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +     * 
    + * + * string backup_instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearBackupInstanceId() { + backupInstanceId_ = getDefaultInstance().getBackupInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +     * 
    + * + * string backup_instance_id = 2; + * + * @param value The bytes for backupInstanceId to set. + * @return This builder for chaining. + */ + public Builder setBackupInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupInstanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup from which to restore, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup from which to restore, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup from which to restore, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup from which to restore, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup from which to restore, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object databaseInstanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the database, e.g.
    +     * "database-instance".
    +     * 
    + * + * string database_instance_id = 4; + * + * @return The databaseInstanceId. + */ + public java.lang.String getDatabaseInstanceId() { + java.lang.Object ref = databaseInstanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseInstanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the database, e.g.
    +     * "database-instance".
    +     * 
    + * + * string database_instance_id = 4; + * + * @return The bytes for databaseInstanceId. + */ + public com.google.protobuf.ByteString getDatabaseInstanceIdBytes() { + java.lang.Object ref = databaseInstanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseInstanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the database, e.g.
    +     * "database-instance".
    +     * 
    + * + * string database_instance_id = 4; + * + * @param value The databaseInstanceId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseInstanceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the database, e.g.
    +     * "database-instance".
    +     * 
    + * + * string database_instance_id = 4; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseInstanceId() { + databaseInstanceId_ = getDefaultInstance().getDatabaseInstanceId(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path) containing the database, e.g.
    +     * "database-instance".
    +     * 
    + * + * string database_instance_id = 4; + * + * @param value The bytes for databaseInstanceId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseInstanceId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * The id of the database to create and restore to, e.g. "db0". Note that this
    +     * database must not already exist.
    +     * 
    + * + * string database_id = 5; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the database to create and restore to, e.g. "db0". Note that this
    +     * database must not already exist.
    +     * 
    + * + * string database_id = 5; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the database to create and restore to, e.g. "db0". Note that this
    +     * database must not already exist.
    +     * 
    + * + * string database_id = 5; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database to create and restore to, e.g. "db0". Note that this
    +     * database must not already exist.
    +     * 
    + * + * string database_id = 5; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the database to create and restore to, e.g. "db0". Note that this
    +     * database must not already exist.
    +     * 
    + * + * string database_id = 5; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.EncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + encryptionConfigBuilder_; + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.EncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000020); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public com.google.spanner.admin.database.v1.EncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetEncryptionConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + public com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.EncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + + /** + * + * + *
    +     * The KMS key(s) used to encrypt the restored database to be created if the
    +     * restored database should be CMEK protected.
    +     * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder> + internalGetEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.database.v1.EncryptionConfig, + com.google.spanner.admin.database.v1.EncryptionConfig.Builder, + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.RestoreCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.RestoreCloudDatabaseAction) + private static final com.google.spanner.executor.v1.RestoreCloudDatabaseAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.RestoreCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.RestoreCloudDatabaseAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RestoreCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.RestoreCloudDatabaseAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..ebded7a2c12e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java @@ -0,0 +1,202 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface RestoreCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.RestoreCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +   * 
    + * + * string backup_instance_id = 2; + * + * @return The backupInstanceId. + */ + java.lang.String getBackupInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the backup, e.g. "backup-instance".
    +   * 
    + * + * string backup_instance_id = 2; + * + * @return The bytes for backupInstanceId. + */ + com.google.protobuf.ByteString getBackupInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup from which to restore, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup from which to restore, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the database, e.g.
    +   * "database-instance".
    +   * 
    + * + * string database_instance_id = 4; + * + * @return The databaseInstanceId. + */ + java.lang.String getDatabaseInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path) containing the database, e.g.
    +   * "database-instance".
    +   * 
    + * + * string database_instance_id = 4; + * + * @return The bytes for databaseInstanceId. + */ + com.google.protobuf.ByteString getDatabaseInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the database to create and restore to, e.g. "db0". Note that this
    +   * database must not already exist.
    +   * 
    + * + * string database_id = 5; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * The id of the database to create and restore to, e.g. "db0". Note that this
    +   * database must not already exist.
    +   * 
    + * + * string database_id = 5; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig(); + + /** + * + * + *
    +   * The KMS key(s) used to encrypt the restored database to be created if the
    +   * restored database should be CMEK protected.
    +   * 
    + * + * .google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; + */ + com.google.spanner.admin.database.v1.EncryptionConfigOrBuilder getEncryptionConfigOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java new file mode 100644 index 000000000000..33cd6d845fcf --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java @@ -0,0 +1,507 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Options for the session pool used by the DatabaseClient.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SessionPoolOptions} + */ +@com.google.protobuf.Generated +public final class SessionPoolOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SessionPoolOptions) + SessionPoolOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SessionPoolOptions"); + } + + // Use SessionPoolOptions.newBuilder() to construct. + private SessionPoolOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SessionPoolOptions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SessionPoolOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SessionPoolOptions.class, + com.google.spanner.executor.v1.SessionPoolOptions.Builder.class); + } + + public static final int USE_MULTIPLEXED_FIELD_NUMBER = 1; + private boolean useMultiplexed_ = false; + + /** + * + * + *
    +   * passing this as true, will make applicable RPCs use multiplexed sessions
    +   * instead of regular sessions
    +   * 
    + * + * bool use_multiplexed = 1; + * + * @return The useMultiplexed. + */ + @java.lang.Override + public boolean getUseMultiplexed() { + return useMultiplexed_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (useMultiplexed_ != false) { + output.writeBool(1, useMultiplexed_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (useMultiplexed_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, useMultiplexed_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SessionPoolOptions)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SessionPoolOptions other = + (com.google.spanner.executor.v1.SessionPoolOptions) obj; + + if (getUseMultiplexed() != other.getUseMultiplexed()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + USE_MULTIPLEXED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseMultiplexed()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.SessionPoolOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Options for the session pool used by the DatabaseClient.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SessionPoolOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SessionPoolOptions) + com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SessionPoolOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SessionPoolOptions.class, + com.google.spanner.executor.v1.SessionPoolOptions.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SessionPoolOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + useMultiplexed_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptions getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptions build() { + com.google.spanner.executor.v1.SessionPoolOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptions buildPartial() { + com.google.spanner.executor.v1.SessionPoolOptions result = + new com.google.spanner.executor.v1.SessionPoolOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.SessionPoolOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.useMultiplexed_ = useMultiplexed_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SessionPoolOptions) { + return mergeFrom((com.google.spanner.executor.v1.SessionPoolOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SessionPoolOptions other) { + if (other == com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance()) + return this; + if (other.getUseMultiplexed() != false) { + setUseMultiplexed(other.getUseMultiplexed()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + useMultiplexed_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean useMultiplexed_; + + /** + * + * + *
    +     * passing this as true, will make applicable RPCs use multiplexed sessions
    +     * instead of regular sessions
    +     * 
    + * + * bool use_multiplexed = 1; + * + * @return The useMultiplexed. + */ + @java.lang.Override + public boolean getUseMultiplexed() { + return useMultiplexed_; + } + + /** + * + * + *
    +     * passing this as true, will make applicable RPCs use multiplexed sessions
    +     * instead of regular sessions
    +     * 
    + * + * bool use_multiplexed = 1; + * + * @param value The useMultiplexed to set. + * @return This builder for chaining. + */ + public Builder setUseMultiplexed(boolean value) { + + useMultiplexed_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * passing this as true, will make applicable RPCs use multiplexed sessions
    +     * instead of regular sessions
    +     * 
    + * + * bool use_multiplexed = 1; + * + * @return This builder for chaining. + */ + public Builder clearUseMultiplexed() { + bitField0_ = (bitField0_ & ~0x00000001); + useMultiplexed_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SessionPoolOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SessionPoolOptions) + private static final com.google.spanner.executor.v1.SessionPoolOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SessionPoolOptions(); + } + + public static com.google.spanner.executor.v1.SessionPoolOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SessionPoolOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java new file mode 100644 index 000000000000..bfa7bd42f901 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java @@ -0,0 +1,42 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SessionPoolOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SessionPoolOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * passing this as true, will make applicable RPCs use multiplexed sessions
    +   * instead of regular sessions
    +   * 
    + * + * bool use_multiplexed = 1; + * + * @return The useMultiplexed. + */ + boolean getUseMultiplexed(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java new file mode 100644 index 000000000000..31f571f9569b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java @@ -0,0 +1,6670 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * SpannerAction defines a primitive action that can be performed against
    + * Spanner, such as begin or commit a transaction, or perform a read or
    + * mutation.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAction} + */ +@com.google.protobuf.Generated +public final class SpannerAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SpannerAction) + SpannerActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerAction"); + } + + // Use SpannerAction.newBuilder() to construct. + private SpannerAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SpannerAction() { + databasePath_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAction.class, + com.google.spanner.executor.v1.SpannerAction.Builder.class); + } + + private int bitField0_; + private int actionCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object action_; + + public enum ActionCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + START(10), + FINISH(11), + READ(20), + QUERY(21), + MUTATION(22), + DML(23), + BATCH_DML(24), + WRITE(25), + PARTITIONED_UPDATE(27), + ADMIN(30), + START_BATCH_TXN(40), + CLOSE_BATCH_TXN(41), + GENERATE_DB_PARTITIONS_READ(42), + GENERATE_DB_PARTITIONS_QUERY(43), + EXECUTE_PARTITION(44), + EXECUTE_CHANGE_STREAM_QUERY(50), + QUERY_CANCELLATION(51), + ADAPT_MESSAGE(52), + ACTION_NOT_SET(0); + private final int value; + + private ActionCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ActionCase valueOf(int value) { + return forNumber(value); + } + + public static ActionCase forNumber(int value) { + switch (value) { + case 10: + return START; + case 11: + return FINISH; + case 20: + return READ; + case 21: + return QUERY; + case 22: + return MUTATION; + case 23: + return DML; + case 24: + return BATCH_DML; + case 25: + return WRITE; + case 27: + return PARTITIONED_UPDATE; + case 30: + return ADMIN; + case 40: + return START_BATCH_TXN; + case 41: + return CLOSE_BATCH_TXN; + case 42: + return GENERATE_DB_PARTITIONS_READ; + case 43: + return GENERATE_DB_PARTITIONS_QUERY; + case 44: + return EXECUTE_PARTITION; + case 50: + return EXECUTE_CHANGE_STREAM_QUERY; + case 51: + return QUERY_CANCELLATION; + case 52: + return ADAPT_MESSAGE; + case 0: + return ACTION_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ActionCase getActionCase() { + return ActionCase.forNumber(actionCase_); + } + + public static final int DATABASE_PATH_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object databasePath_ = ""; + + /** + * + * + *
    +   * Database against which to perform action.
    +   * In a context where a series of actions take place, an action may omit
    +   * database path if it applies to the same database as the previous action.
    +   * 
    + * + * string database_path = 1; + * + * @return The databasePath. + */ + @java.lang.Override + public java.lang.String getDatabasePath() { + java.lang.Object ref = databasePath_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databasePath_ = s; + return s; + } + } + + /** + * + * + *
    +   * Database against which to perform action.
    +   * In a context where a series of actions take place, an action may omit
    +   * database path if it applies to the same database as the previous action.
    +   * 
    + * + * string database_path = 1; + * + * @return The bytes for databasePath. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabasePathBytes() { + java.lang.Object ref = databasePath_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databasePath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SPANNER_OPTIONS_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.SpannerOptions spannerOptions_; + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return Whether the spannerOptions field is set. + */ + @java.lang.Override + public boolean hasSpannerOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return The spannerOptions. + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptions getSpannerOptions() { + return spannerOptions_ == null + ? com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance() + : spannerOptions_; + } + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptionsOrBuilder getSpannerOptionsOrBuilder() { + return spannerOptions_ == null + ? com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance() + : spannerOptions_; + } + + public static final int START_FIELD_NUMBER = 10; + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return Whether the start field is set. + */ + @java.lang.Override + public boolean hasStart() { + return actionCase_ == 10; + } + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return The start. + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction getStart() { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.StartTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionActionOrBuilder getStartOrBuilder() { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.StartTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + + public static final int FINISH_FIELD_NUMBER = 11; + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return Whether the finish field is set. + */ + @java.lang.Override + public boolean hasFinish() { + return actionCase_ == 11; + } + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return The finish. + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction getFinish() { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.FinishTransactionAction) action_; + } + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionActionOrBuilder getFinishOrBuilder() { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.FinishTransactionAction) action_; + } + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + + public static final int READ_FIELD_NUMBER = 20; + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return Whether the read field is set. + */ + @java.lang.Override + public boolean hasRead() { + return actionCase_ == 20; + } + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return The read. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction getRead() { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.ReadAction) action_; + } + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder() { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.ReadAction) action_; + } + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + + public static final int QUERY_FIELD_NUMBER = 21; + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return Whether the query field is set. + */ + @java.lang.Override + public boolean hasQuery() { + return actionCase_ == 21; + } + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return The query. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getQuery() { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.QueryAction) action_; + } + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder() { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.QueryAction) action_; + } + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + + public static final int MUTATION_FIELD_NUMBER = 22; + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return Whether the mutation field is set. + */ + @java.lang.Override + public boolean hasMutation() { + return actionCase_ == 22; + } + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return The mutation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction getMutation() { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.MutationAction) action_; + } + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder() { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.MutationAction) action_; + } + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + + public static final int DML_FIELD_NUMBER = 23; + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return Whether the dml field is set. + */ + @java.lang.Override + public boolean hasDml() { + return actionCase_ == 23; + } + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return The dml. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction getDml() { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.DmlAction) action_; + } + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DmlActionOrBuilder getDmlOrBuilder() { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.DmlAction) action_; + } + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + + public static final int BATCH_DML_FIELD_NUMBER = 24; + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return Whether the batchDml field is set. + */ + @java.lang.Override + public boolean hasBatchDml() { + return actionCase_ == 24; + } + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return The batchDml. + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction getBatchDml() { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.BatchDmlAction) action_; + } + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlActionOrBuilder getBatchDmlOrBuilder() { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.BatchDmlAction) action_; + } + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + + public static final int WRITE_FIELD_NUMBER = 25; + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return Whether the write field is set. + */ + @java.lang.Override + public boolean hasWrite() { + return actionCase_ == 25; + } + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return The write. + */ + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction getWrite() { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.WriteMutationsAction) action_; + } + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsActionOrBuilder getWriteOrBuilder() { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.WriteMutationsAction) action_; + } + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + + public static final int PARTITIONED_UPDATE_FIELD_NUMBER = 27; + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return Whether the partitionedUpdate field is set. + */ + @java.lang.Override + public boolean hasPartitionedUpdate() { + return actionCase_ == 27; + } + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return The partitionedUpdate. + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction getPartitionedUpdate() { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.PartitionedUpdateAction) action_; + } + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder + getPartitionedUpdateOrBuilder() { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.PartitionedUpdateAction) action_; + } + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + + public static final int ADMIN_FIELD_NUMBER = 30; + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return Whether the admin field is set. + */ + @java.lang.Override + public boolean hasAdmin() { + return actionCase_ == 30; + } + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return The admin. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction getAdmin() { + if (actionCase_ == 30) { + return (com.google.spanner.executor.v1.AdminAction) action_; + } + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminActionOrBuilder getAdminOrBuilder() { + if (actionCase_ == 30) { + return (com.google.spanner.executor.v1.AdminAction) action_; + } + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + + public static final int START_BATCH_TXN_FIELD_NUMBER = 40; + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return Whether the startBatchTxn field is set. + */ + @java.lang.Override + public boolean hasStartBatchTxn() { + return actionCase_ == 40; + } + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return The startBatchTxn. + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction getStartBatchTxn() { + if (actionCase_ == 40) { + return (com.google.spanner.executor.v1.StartBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder + getStartBatchTxnOrBuilder() { + if (actionCase_ == 40) { + return (com.google.spanner.executor.v1.StartBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + + public static final int CLOSE_BATCH_TXN_FIELD_NUMBER = 41; + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return Whether the closeBatchTxn field is set. + */ + @java.lang.Override + public boolean hasCloseBatchTxn() { + return actionCase_ == 41; + } + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return The closeBatchTxn. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction getCloseBatchTxn() { + if (actionCase_ == 41) { + return (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder + getCloseBatchTxnOrBuilder() { + if (actionCase_ == 41) { + return (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + + public static final int GENERATE_DB_PARTITIONS_READ_FIELD_NUMBER = 42; + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return Whether the generateDbPartitionsRead field is set. + */ + @java.lang.Override + public boolean hasGenerateDbPartitionsRead() { + return actionCase_ == 42; + } + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return The generateDbPartitionsRead. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + getGenerateDbPartitionsRead() { + if (actionCase_ == 42) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder + getGenerateDbPartitionsReadOrBuilder() { + if (actionCase_ == 42) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.getDefaultInstance(); + } + + public static final int GENERATE_DB_PARTITIONS_QUERY_FIELD_NUMBER = 43; + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return Whether the generateDbPartitionsQuery field is set. + */ + @java.lang.Override + public boolean hasGenerateDbPartitionsQuery() { + return actionCase_ == 43; + } + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return The generateDbPartitionsQuery. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + getGenerateDbPartitionsQuery() { + if (actionCase_ == 43) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder + getGenerateDbPartitionsQueryOrBuilder() { + if (actionCase_ == 43) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.getDefaultInstance(); + } + + public static final int EXECUTE_PARTITION_FIELD_NUMBER = 44; + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return Whether the executePartition field is set. + */ + @java.lang.Override + public boolean hasExecutePartition() { + return actionCase_ == 44; + } + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return The executePartition. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction getExecutePartition() { + if (actionCase_ == 44) { + return (com.google.spanner.executor.v1.ExecutePartitionAction) action_; + } + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder + getExecutePartitionOrBuilder() { + if (actionCase_ == 44) { + return (com.google.spanner.executor.v1.ExecutePartitionAction) action_; + } + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + + public static final int EXECUTE_CHANGE_STREAM_QUERY_FIELD_NUMBER = 50; + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return Whether the executeChangeStreamQuery field is set. + */ + @java.lang.Override + public boolean hasExecuteChangeStreamQuery() { + return actionCase_ == 50; + } + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return The executeChangeStreamQuery. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery getExecuteChangeStreamQuery() { + if (actionCase_ == 50) { + return (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_; + } + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder + getExecuteChangeStreamQueryOrBuilder() { + if (actionCase_ == 50) { + return (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_; + } + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + + public static final int QUERY_CANCELLATION_FIELD_NUMBER = 51; + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + @java.lang.Override + public boolean hasQueryCancellation() { + return actionCase_ == 51; + } + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation() { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationActionOrBuilder + getQueryCancellationOrBuilder() { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + + public static final int ADAPT_MESSAGE_FIELD_NUMBER = 52; + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return Whether the adaptMessage field is set. + */ + @java.lang.Override + public boolean hasAdaptMessage() { + return actionCase_ == 52; + } + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return The adaptMessage. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction getAdaptMessage() { + if (actionCase_ == 52) { + return (com.google.spanner.executor.v1.AdaptMessageAction) action_; + } + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageActionOrBuilder getAdaptMessageOrBuilder() { + if (actionCase_ == 52) { + return (com.google.spanner.executor.v1.AdaptMessageAction) action_; + } + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databasePath_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, databasePath_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getSpannerOptions()); + } + if (actionCase_ == 10) { + output.writeMessage(10, (com.google.spanner.executor.v1.StartTransactionAction) action_); + } + if (actionCase_ == 11) { + output.writeMessage(11, (com.google.spanner.executor.v1.FinishTransactionAction) action_); + } + if (actionCase_ == 20) { + output.writeMessage(20, (com.google.spanner.executor.v1.ReadAction) action_); + } + if (actionCase_ == 21) { + output.writeMessage(21, (com.google.spanner.executor.v1.QueryAction) action_); + } + if (actionCase_ == 22) { + output.writeMessage(22, (com.google.spanner.executor.v1.MutationAction) action_); + } + if (actionCase_ == 23) { + output.writeMessage(23, (com.google.spanner.executor.v1.DmlAction) action_); + } + if (actionCase_ == 24) { + output.writeMessage(24, (com.google.spanner.executor.v1.BatchDmlAction) action_); + } + if (actionCase_ == 25) { + output.writeMessage(25, (com.google.spanner.executor.v1.WriteMutationsAction) action_); + } + if (actionCase_ == 27) { + output.writeMessage(27, (com.google.spanner.executor.v1.PartitionedUpdateAction) action_); + } + if (actionCase_ == 30) { + output.writeMessage(30, (com.google.spanner.executor.v1.AdminAction) action_); + } + if (actionCase_ == 40) { + output.writeMessage(40, (com.google.spanner.executor.v1.StartBatchTransactionAction) action_); + } + if (actionCase_ == 41) { + output.writeMessage(41, (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_); + } + if (actionCase_ == 42) { + output.writeMessage( + 42, (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_); + } + if (actionCase_ == 43) { + output.writeMessage( + 43, (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_); + } + if (actionCase_ == 44) { + output.writeMessage(44, (com.google.spanner.executor.v1.ExecutePartitionAction) action_); + } + if (actionCase_ == 50) { + output.writeMessage(50, (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_); + } + if (actionCase_ == 51) { + output.writeMessage(51, (com.google.spanner.executor.v1.QueryCancellationAction) action_); + } + if (actionCase_ == 52) { + output.writeMessage(52, (com.google.spanner.executor.v1.AdaptMessageAction) action_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databasePath_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, databasePath_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getSpannerOptions()); + } + if (actionCase_ == 10) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, (com.google.spanner.executor.v1.StartTransactionAction) action_); + } + if (actionCase_ == 11) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 11, (com.google.spanner.executor.v1.FinishTransactionAction) action_); + } + if (actionCase_ == 20) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 20, (com.google.spanner.executor.v1.ReadAction) action_); + } + if (actionCase_ == 21) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 21, (com.google.spanner.executor.v1.QueryAction) action_); + } + if (actionCase_ == 22) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 22, (com.google.spanner.executor.v1.MutationAction) action_); + } + if (actionCase_ == 23) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 23, (com.google.spanner.executor.v1.DmlAction) action_); + } + if (actionCase_ == 24) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 24, (com.google.spanner.executor.v1.BatchDmlAction) action_); + } + if (actionCase_ == 25) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 25, (com.google.spanner.executor.v1.WriteMutationsAction) action_); + } + if (actionCase_ == 27) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 27, (com.google.spanner.executor.v1.PartitionedUpdateAction) action_); + } + if (actionCase_ == 30) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 30, (com.google.spanner.executor.v1.AdminAction) action_); + } + if (actionCase_ == 40) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 40, (com.google.spanner.executor.v1.StartBatchTransactionAction) action_); + } + if (actionCase_ == 41) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 41, (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_); + } + if (actionCase_ == 42) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 42, (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_); + } + if (actionCase_ == 43) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 43, (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_); + } + if (actionCase_ == 44) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 44, (com.google.spanner.executor.v1.ExecutePartitionAction) action_); + } + if (actionCase_ == 50) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 50, (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_); + } + if (actionCase_ == 51) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 51, (com.google.spanner.executor.v1.QueryCancellationAction) action_); + } + if (actionCase_ == 52) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 52, (com.google.spanner.executor.v1.AdaptMessageAction) action_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SpannerAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SpannerAction other = + (com.google.spanner.executor.v1.SpannerAction) obj; + + if (!getDatabasePath().equals(other.getDatabasePath())) return false; + if (hasSpannerOptions() != other.hasSpannerOptions()) return false; + if (hasSpannerOptions()) { + if (!getSpannerOptions().equals(other.getSpannerOptions())) return false; + } + if (!getActionCase().equals(other.getActionCase())) return false; + switch (actionCase_) { + case 10: + if (!getStart().equals(other.getStart())) return false; + break; + case 11: + if (!getFinish().equals(other.getFinish())) return false; + break; + case 20: + if (!getRead().equals(other.getRead())) return false; + break; + case 21: + if (!getQuery().equals(other.getQuery())) return false; + break; + case 22: + if (!getMutation().equals(other.getMutation())) return false; + break; + case 23: + if (!getDml().equals(other.getDml())) return false; + break; + case 24: + if (!getBatchDml().equals(other.getBatchDml())) return false; + break; + case 25: + if (!getWrite().equals(other.getWrite())) return false; + break; + case 27: + if (!getPartitionedUpdate().equals(other.getPartitionedUpdate())) return false; + break; + case 30: + if (!getAdmin().equals(other.getAdmin())) return false; + break; + case 40: + if (!getStartBatchTxn().equals(other.getStartBatchTxn())) return false; + break; + case 41: + if (!getCloseBatchTxn().equals(other.getCloseBatchTxn())) return false; + break; + case 42: + if (!getGenerateDbPartitionsRead().equals(other.getGenerateDbPartitionsRead())) + return false; + break; + case 43: + if (!getGenerateDbPartitionsQuery().equals(other.getGenerateDbPartitionsQuery())) + return false; + break; + case 44: + if (!getExecutePartition().equals(other.getExecutePartition())) return false; + break; + case 50: + if (!getExecuteChangeStreamQuery().equals(other.getExecuteChangeStreamQuery())) + return false; + break; + case 51: + if (!getQueryCancellation().equals(other.getQueryCancellation())) return false; + break; + case 52: + if (!getAdaptMessage().equals(other.getAdaptMessage())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_PATH_FIELD_NUMBER; + hash = (53 * hash) + getDatabasePath().hashCode(); + if (hasSpannerOptions()) { + hash = (37 * hash) + SPANNER_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getSpannerOptions().hashCode(); + } + switch (actionCase_) { + case 10: + hash = (37 * hash) + START_FIELD_NUMBER; + hash = (53 * hash) + getStart().hashCode(); + break; + case 11: + hash = (37 * hash) + FINISH_FIELD_NUMBER; + hash = (53 * hash) + getFinish().hashCode(); + break; + case 20: + hash = (37 * hash) + READ_FIELD_NUMBER; + hash = (53 * hash) + getRead().hashCode(); + break; + case 21: + hash = (37 * hash) + QUERY_FIELD_NUMBER; + hash = (53 * hash) + getQuery().hashCode(); + break; + case 22: + hash = (37 * hash) + MUTATION_FIELD_NUMBER; + hash = (53 * hash) + getMutation().hashCode(); + break; + case 23: + hash = (37 * hash) + DML_FIELD_NUMBER; + hash = (53 * hash) + getDml().hashCode(); + break; + case 24: + hash = (37 * hash) + BATCH_DML_FIELD_NUMBER; + hash = (53 * hash) + getBatchDml().hashCode(); + break; + case 25: + hash = (37 * hash) + WRITE_FIELD_NUMBER; + hash = (53 * hash) + getWrite().hashCode(); + break; + case 27: + hash = (37 * hash) + PARTITIONED_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getPartitionedUpdate().hashCode(); + break; + case 30: + hash = (37 * hash) + ADMIN_FIELD_NUMBER; + hash = (53 * hash) + getAdmin().hashCode(); + break; + case 40: + hash = (37 * hash) + START_BATCH_TXN_FIELD_NUMBER; + hash = (53 * hash) + getStartBatchTxn().hashCode(); + break; + case 41: + hash = (37 * hash) + CLOSE_BATCH_TXN_FIELD_NUMBER; + hash = (53 * hash) + getCloseBatchTxn().hashCode(); + break; + case 42: + hash = (37 * hash) + GENERATE_DB_PARTITIONS_READ_FIELD_NUMBER; + hash = (53 * hash) + getGenerateDbPartitionsRead().hashCode(); + break; + case 43: + hash = (37 * hash) + GENERATE_DB_PARTITIONS_QUERY_FIELD_NUMBER; + hash = (53 * hash) + getGenerateDbPartitionsQuery().hashCode(); + break; + case 44: + hash = (37 * hash) + EXECUTE_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getExecutePartition().hashCode(); + break; + case 50: + hash = (37 * hash) + EXECUTE_CHANGE_STREAM_QUERY_FIELD_NUMBER; + hash = (53 * hash) + getExecuteChangeStreamQuery().hashCode(); + break; + case 51: + hash = (37 * hash) + QUERY_CANCELLATION_FIELD_NUMBER; + hash = (53 * hash) + getQueryCancellation().hashCode(); + break; + case 52: + hash = (37 * hash) + ADAPT_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getAdaptMessage().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.SpannerAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * SpannerAction defines a primitive action that can be performed against
    +   * Spanner, such as begin or commit a transaction, or perform a read or
    +   * mutation.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SpannerAction) + com.google.spanner.executor.v1.SpannerActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAction.class, + com.google.spanner.executor.v1.SpannerAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SpannerAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetSpannerOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + databasePath_ = ""; + spannerOptions_ = null; + if (spannerOptionsBuilder_ != null) { + spannerOptionsBuilder_.dispose(); + spannerOptionsBuilder_ = null; + } + if (startBuilder_ != null) { + startBuilder_.clear(); + } + if (finishBuilder_ != null) { + finishBuilder_.clear(); + } + if (readBuilder_ != null) { + readBuilder_.clear(); + } + if (queryBuilder_ != null) { + queryBuilder_.clear(); + } + if (mutationBuilder_ != null) { + mutationBuilder_.clear(); + } + if (dmlBuilder_ != null) { + dmlBuilder_.clear(); + } + if (batchDmlBuilder_ != null) { + batchDmlBuilder_.clear(); + } + if (writeBuilder_ != null) { + writeBuilder_.clear(); + } + if (partitionedUpdateBuilder_ != null) { + partitionedUpdateBuilder_.clear(); + } + if (adminBuilder_ != null) { + adminBuilder_.clear(); + } + if (startBatchTxnBuilder_ != null) { + startBatchTxnBuilder_.clear(); + } + if (closeBatchTxnBuilder_ != null) { + closeBatchTxnBuilder_.clear(); + } + if (generateDbPartitionsReadBuilder_ != null) { + generateDbPartitionsReadBuilder_.clear(); + } + if (generateDbPartitionsQueryBuilder_ != null) { + generateDbPartitionsQueryBuilder_.clear(); + } + if (executePartitionBuilder_ != null) { + executePartitionBuilder_.clear(); + } + if (executeChangeStreamQueryBuilder_ != null) { + executeChangeStreamQueryBuilder_.clear(); + } + if (queryCancellationBuilder_ != null) { + queryCancellationBuilder_.clear(); + } + if (adaptMessageBuilder_ != null) { + adaptMessageBuilder_.clear(); + } + actionCase_ = 0; + action_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SpannerAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAction build() { + com.google.spanner.executor.v1.SpannerAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAction buildPartial() { + com.google.spanner.executor.v1.SpannerAction result = + new com.google.spanner.executor.v1.SpannerAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.SpannerAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.databasePath_ = databasePath_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.spannerOptions_ = + spannerOptionsBuilder_ == null ? spannerOptions_ : spannerOptionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.executor.v1.SpannerAction result) { + result.actionCase_ = actionCase_; + result.action_ = this.action_; + if (actionCase_ == 10 && startBuilder_ != null) { + result.action_ = startBuilder_.build(); + } + if (actionCase_ == 11 && finishBuilder_ != null) { + result.action_ = finishBuilder_.build(); + } + if (actionCase_ == 20 && readBuilder_ != null) { + result.action_ = readBuilder_.build(); + } + if (actionCase_ == 21 && queryBuilder_ != null) { + result.action_ = queryBuilder_.build(); + } + if (actionCase_ == 22 && mutationBuilder_ != null) { + result.action_ = mutationBuilder_.build(); + } + if (actionCase_ == 23 && dmlBuilder_ != null) { + result.action_ = dmlBuilder_.build(); + } + if (actionCase_ == 24 && batchDmlBuilder_ != null) { + result.action_ = batchDmlBuilder_.build(); + } + if (actionCase_ == 25 && writeBuilder_ != null) { + result.action_ = writeBuilder_.build(); + } + if (actionCase_ == 27 && partitionedUpdateBuilder_ != null) { + result.action_ = partitionedUpdateBuilder_.build(); + } + if (actionCase_ == 30 && adminBuilder_ != null) { + result.action_ = adminBuilder_.build(); + } + if (actionCase_ == 40 && startBatchTxnBuilder_ != null) { + result.action_ = startBatchTxnBuilder_.build(); + } + if (actionCase_ == 41 && closeBatchTxnBuilder_ != null) { + result.action_ = closeBatchTxnBuilder_.build(); + } + if (actionCase_ == 42 && generateDbPartitionsReadBuilder_ != null) { + result.action_ = generateDbPartitionsReadBuilder_.build(); + } + if (actionCase_ == 43 && generateDbPartitionsQueryBuilder_ != null) { + result.action_ = generateDbPartitionsQueryBuilder_.build(); + } + if (actionCase_ == 44 && executePartitionBuilder_ != null) { + result.action_ = executePartitionBuilder_.build(); + } + if (actionCase_ == 50 && executeChangeStreamQueryBuilder_ != null) { + result.action_ = executeChangeStreamQueryBuilder_.build(); + } + if (actionCase_ == 51 && queryCancellationBuilder_ != null) { + result.action_ = queryCancellationBuilder_.build(); + } + if (actionCase_ == 52 && adaptMessageBuilder_ != null) { + result.action_ = adaptMessageBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SpannerAction) { + return mergeFrom((com.google.spanner.executor.v1.SpannerAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SpannerAction other) { + if (other == com.google.spanner.executor.v1.SpannerAction.getDefaultInstance()) return this; + if (!other.getDatabasePath().isEmpty()) { + databasePath_ = other.databasePath_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasSpannerOptions()) { + mergeSpannerOptions(other.getSpannerOptions()); + } + switch (other.getActionCase()) { + case START: + { + mergeStart(other.getStart()); + break; + } + case FINISH: + { + mergeFinish(other.getFinish()); + break; + } + case READ: + { + mergeRead(other.getRead()); + break; + } + case QUERY: + { + mergeQuery(other.getQuery()); + break; + } + case MUTATION: + { + mergeMutation(other.getMutation()); + break; + } + case DML: + { + mergeDml(other.getDml()); + break; + } + case BATCH_DML: + { + mergeBatchDml(other.getBatchDml()); + break; + } + case WRITE: + { + mergeWrite(other.getWrite()); + break; + } + case PARTITIONED_UPDATE: + { + mergePartitionedUpdate(other.getPartitionedUpdate()); + break; + } + case ADMIN: + { + mergeAdmin(other.getAdmin()); + break; + } + case START_BATCH_TXN: + { + mergeStartBatchTxn(other.getStartBatchTxn()); + break; + } + case CLOSE_BATCH_TXN: + { + mergeCloseBatchTxn(other.getCloseBatchTxn()); + break; + } + case GENERATE_DB_PARTITIONS_READ: + { + mergeGenerateDbPartitionsRead(other.getGenerateDbPartitionsRead()); + break; + } + case GENERATE_DB_PARTITIONS_QUERY: + { + mergeGenerateDbPartitionsQuery(other.getGenerateDbPartitionsQuery()); + break; + } + case EXECUTE_PARTITION: + { + mergeExecutePartition(other.getExecutePartition()); + break; + } + case EXECUTE_CHANGE_STREAM_QUERY: + { + mergeExecuteChangeStreamQuery(other.getExecuteChangeStreamQuery()); + break; + } + case QUERY_CANCELLATION: + { + mergeQueryCancellation(other.getQueryCancellation()); + break; + } + case ADAPT_MESSAGE: + { + mergeAdaptMessage(other.getAdaptMessage()); + break; + } + case ACTION_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + databasePath_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetSpannerOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 82: + { + input.readMessage(internalGetStartFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 10; + break; + } // case 82 + case 90: + { + input.readMessage(internalGetFinishFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 11; + break; + } // case 90 + case 162: + { + input.readMessage(internalGetReadFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 20; + break; + } // case 162 + case 170: + { + input.readMessage(internalGetQueryFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 21; + break; + } // case 170 + case 178: + { + input.readMessage( + internalGetMutationFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 22; + break; + } // case 178 + case 186: + { + input.readMessage(internalGetDmlFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 23; + break; + } // case 186 + case 194: + { + input.readMessage( + internalGetBatchDmlFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 24; + break; + } // case 194 + case 202: + { + input.readMessage(internalGetWriteFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 25; + break; + } // case 202 + case 218: + { + input.readMessage( + internalGetPartitionedUpdateFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 27; + break; + } // case 218 + case 242: + { + input.readMessage(internalGetAdminFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 30; + break; + } // case 242 + case 322: + { + input.readMessage( + internalGetStartBatchTxnFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 40; + break; + } // case 322 + case 330: + { + input.readMessage( + internalGetCloseBatchTxnFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 41; + break; + } // case 330 + case 338: + { + input.readMessage( + internalGetGenerateDbPartitionsReadFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 42; + break; + } // case 338 + case 346: + { + input.readMessage( + internalGetGenerateDbPartitionsQueryFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 43; + break; + } // case 346 + case 354: + { + input.readMessage( + internalGetExecutePartitionFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 44; + break; + } // case 354 + case 402: + { + input.readMessage( + internalGetExecuteChangeStreamQueryFieldBuilder().getBuilder(), + extensionRegistry); + actionCase_ = 50; + break; + } // case 402 + case 410: + { + input.readMessage( + internalGetQueryCancellationFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 51; + break; + } // case 410 + case 418: + { + input.readMessage( + internalGetAdaptMessageFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 52; + break; + } // case 418 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int actionCase_ = 0; + private java.lang.Object action_; + + public ActionCase getActionCase() { + return ActionCase.forNumber(actionCase_); + } + + public Builder clearAction() { + actionCase_ = 0; + action_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object databasePath_ = ""; + + /** + * + * + *
    +     * Database against which to perform action.
    +     * In a context where a series of actions take place, an action may omit
    +     * database path if it applies to the same database as the previous action.
    +     * 
    + * + * string database_path = 1; + * + * @return The databasePath. + */ + public java.lang.String getDatabasePath() { + java.lang.Object ref = databasePath_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databasePath_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Database against which to perform action.
    +     * In a context where a series of actions take place, an action may omit
    +     * database path if it applies to the same database as the previous action.
    +     * 
    + * + * string database_path = 1; + * + * @return The bytes for databasePath. + */ + public com.google.protobuf.ByteString getDatabasePathBytes() { + java.lang.Object ref = databasePath_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databasePath_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Database against which to perform action.
    +     * In a context where a series of actions take place, an action may omit
    +     * database path if it applies to the same database as the previous action.
    +     * 
    + * + * string database_path = 1; + * + * @param value The databasePath to set. + * @return This builder for chaining. + */ + public Builder setDatabasePath(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databasePath_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database against which to perform action.
    +     * In a context where a series of actions take place, an action may omit
    +     * database path if it applies to the same database as the previous action.
    +     * 
    + * + * string database_path = 1; + * + * @return This builder for chaining. + */ + public Builder clearDatabasePath() { + databasePath_ = getDefaultInstance().getDatabasePath(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database against which to perform action.
    +     * In a context where a series of actions take place, an action may omit
    +     * database path if it applies to the same database as the previous action.
    +     * 
    + * + * string database_path = 1; + * + * @param value The bytes for databasePath to set. + * @return This builder for chaining. + */ + public Builder setDatabasePathBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databasePath_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.SpannerOptions spannerOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerOptions, + com.google.spanner.executor.v1.SpannerOptions.Builder, + com.google.spanner.executor.v1.SpannerOptionsOrBuilder> + spannerOptionsBuilder_; + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return Whether the spannerOptions field is set. + */ + public boolean hasSpannerOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return The spannerOptions. + */ + public com.google.spanner.executor.v1.SpannerOptions getSpannerOptions() { + if (spannerOptionsBuilder_ == null) { + return spannerOptions_ == null + ? com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance() + : spannerOptions_; + } else { + return spannerOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public Builder setSpannerOptions(com.google.spanner.executor.v1.SpannerOptions value) { + if (spannerOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + spannerOptions_ = value; + } else { + spannerOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public Builder setSpannerOptions( + com.google.spanner.executor.v1.SpannerOptions.Builder builderForValue) { + if (spannerOptionsBuilder_ == null) { + spannerOptions_ = builderForValue.build(); + } else { + spannerOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public Builder mergeSpannerOptions(com.google.spanner.executor.v1.SpannerOptions value) { + if (spannerOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && spannerOptions_ != null + && spannerOptions_ + != com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance()) { + getSpannerOptionsBuilder().mergeFrom(value); + } else { + spannerOptions_ = value; + } + } else { + spannerOptionsBuilder_.mergeFrom(value); + } + if (spannerOptions_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public Builder clearSpannerOptions() { + bitField0_ = (bitField0_ & ~0x00000002); + spannerOptions_ = null; + if (spannerOptionsBuilder_ != null) { + spannerOptionsBuilder_.dispose(); + spannerOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public com.google.spanner.executor.v1.SpannerOptions.Builder getSpannerOptionsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetSpannerOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + public com.google.spanner.executor.v1.SpannerOptionsOrBuilder getSpannerOptionsOrBuilder() { + if (spannerOptionsBuilder_ != null) { + return spannerOptionsBuilder_.getMessageOrBuilder(); + } else { + return spannerOptions_ == null + ? com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance() + : spannerOptions_; + } + } + + /** + * + * + *
    +     * Configuration options for Spanner backend
    +     * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerOptions, + com.google.spanner.executor.v1.SpannerOptions.Builder, + com.google.spanner.executor.v1.SpannerOptionsOrBuilder> + internalGetSpannerOptionsFieldBuilder() { + if (spannerOptionsBuilder_ == null) { + spannerOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerOptions, + com.google.spanner.executor.v1.SpannerOptions.Builder, + com.google.spanner.executor.v1.SpannerOptionsOrBuilder>( + getSpannerOptions(), getParentForChildren(), isClean()); + spannerOptions_ = null; + } + return spannerOptionsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartTransactionAction, + com.google.spanner.executor.v1.StartTransactionAction.Builder, + com.google.spanner.executor.v1.StartTransactionActionOrBuilder> + startBuilder_; + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return Whether the start field is set. + */ + @java.lang.Override + public boolean hasStart() { + return actionCase_ == 10; + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return The start. + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction getStart() { + if (startBuilder_ == null) { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.StartTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } else { + if (actionCase_ == 10) { + return startBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + public Builder setStart(com.google.spanner.executor.v1.StartTransactionAction value) { + if (startBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + startBuilder_.setMessage(value); + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + public Builder setStart( + com.google.spanner.executor.v1.StartTransactionAction.Builder builderForValue) { + if (startBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + startBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + public Builder mergeStart(com.google.spanner.executor.v1.StartTransactionAction value) { + if (startBuilder_ == null) { + if (actionCase_ == 10 + && action_ + != com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.StartTransactionAction.newBuilder( + (com.google.spanner.executor.v1.StartTransactionAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 10) { + startBuilder_.mergeFrom(value); + } else { + startBuilder_.setMessage(value); + } + } + actionCase_ = 10; + return this; + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + public Builder clearStart() { + if (startBuilder_ == null) { + if (actionCase_ == 10) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 10) { + actionCase_ = 0; + action_ = null; + } + startBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + public com.google.spanner.executor.v1.StartTransactionAction.Builder getStartBuilder() { + return internalGetStartFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionActionOrBuilder getStartOrBuilder() { + if ((actionCase_ == 10) && (startBuilder_ != null)) { + return startBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 10) { + return (com.google.spanner.executor.v1.StartTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to start a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartTransactionAction, + com.google.spanner.executor.v1.StartTransactionAction.Builder, + com.google.spanner.executor.v1.StartTransactionActionOrBuilder> + internalGetStartFieldBuilder() { + if (startBuilder_ == null) { + if (!(actionCase_ == 10)) { + action_ = com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + startBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartTransactionAction, + com.google.spanner.executor.v1.StartTransactionAction.Builder, + com.google.spanner.executor.v1.StartTransactionActionOrBuilder>( + (com.google.spanner.executor.v1.StartTransactionAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 10; + onChanged(); + return startBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.FinishTransactionAction, + com.google.spanner.executor.v1.FinishTransactionAction.Builder, + com.google.spanner.executor.v1.FinishTransactionActionOrBuilder> + finishBuilder_; + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return Whether the finish field is set. + */ + @java.lang.Override + public boolean hasFinish() { + return actionCase_ == 11; + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return The finish. + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionAction getFinish() { + if (finishBuilder_ == null) { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.FinishTransactionAction) action_; + } + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } else { + if (actionCase_ == 11) { + return finishBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + public Builder setFinish(com.google.spanner.executor.v1.FinishTransactionAction value) { + if (finishBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + finishBuilder_.setMessage(value); + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + public Builder setFinish( + com.google.spanner.executor.v1.FinishTransactionAction.Builder builderForValue) { + if (finishBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + finishBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + public Builder mergeFinish(com.google.spanner.executor.v1.FinishTransactionAction value) { + if (finishBuilder_ == null) { + if (actionCase_ == 11 + && action_ + != com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.FinishTransactionAction.newBuilder( + (com.google.spanner.executor.v1.FinishTransactionAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 11) { + finishBuilder_.mergeFrom(value); + } else { + finishBuilder_.setMessage(value); + } + } + actionCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + public Builder clearFinish() { + if (finishBuilder_ == null) { + if (actionCase_ == 11) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 11) { + actionCase_ = 0; + action_ = null; + } + finishBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + public com.google.spanner.executor.v1.FinishTransactionAction.Builder getFinishBuilder() { + return internalGetFinishFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + @java.lang.Override + public com.google.spanner.executor.v1.FinishTransactionActionOrBuilder getFinishOrBuilder() { + if ((actionCase_ == 11) && (finishBuilder_ != null)) { + return finishBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 11) { + return (com.google.spanner.executor.v1.FinishTransactionAction) action_; + } + return com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to finish a transaction.
    +     * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.FinishTransactionAction, + com.google.spanner.executor.v1.FinishTransactionAction.Builder, + com.google.spanner.executor.v1.FinishTransactionActionOrBuilder> + internalGetFinishFieldBuilder() { + if (finishBuilder_ == null) { + if (!(actionCase_ == 11)) { + action_ = com.google.spanner.executor.v1.FinishTransactionAction.getDefaultInstance(); + } + finishBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.FinishTransactionAction, + com.google.spanner.executor.v1.FinishTransactionAction.Builder, + com.google.spanner.executor.v1.FinishTransactionActionOrBuilder>( + (com.google.spanner.executor.v1.FinishTransactionAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 11; + onChanged(); + return finishBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder> + readBuilder_; + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return Whether the read field is set. + */ + @java.lang.Override + public boolean hasRead() { + return actionCase_ == 20; + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return The read. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadAction getRead() { + if (readBuilder_ == null) { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.ReadAction) action_; + } + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } else { + if (actionCase_ == 20) { + return readBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + public Builder setRead(com.google.spanner.executor.v1.ReadAction value) { + if (readBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + readBuilder_.setMessage(value); + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + public Builder setRead(com.google.spanner.executor.v1.ReadAction.Builder builderForValue) { + if (readBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + readBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + public Builder mergeRead(com.google.spanner.executor.v1.ReadAction value) { + if (readBuilder_ == null) { + if (actionCase_ == 20 + && action_ != com.google.spanner.executor.v1.ReadAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ReadAction.newBuilder( + (com.google.spanner.executor.v1.ReadAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 20) { + readBuilder_.mergeFrom(value); + } else { + readBuilder_.setMessage(value); + } + } + actionCase_ = 20; + return this; + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + public Builder clearRead() { + if (readBuilder_ == null) { + if (actionCase_ == 20) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 20) { + actionCase_ = 0; + action_ = null; + } + readBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + public com.google.spanner.executor.v1.ReadAction.Builder getReadBuilder() { + return internalGetReadFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder() { + if ((actionCase_ == 20) && (readBuilder_ != null)) { + return readBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 20) { + return (com.google.spanner.executor.v1.ReadAction) action_; + } + return com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to do a normal read.
    +     * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder> + internalGetReadFieldBuilder() { + if (readBuilder_ == null) { + if (!(actionCase_ == 20)) { + action_ = com.google.spanner.executor.v1.ReadAction.getDefaultInstance(); + } + readBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadAction, + com.google.spanner.executor.v1.ReadAction.Builder, + com.google.spanner.executor.v1.ReadActionOrBuilder>( + (com.google.spanner.executor.v1.ReadAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 20; + onChanged(); + return readBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + queryBuilder_; + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return Whether the query field is set. + */ + @java.lang.Override + public boolean hasQuery() { + return actionCase_ == 21; + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return The query. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryAction getQuery() { + if (queryBuilder_ == null) { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.QueryAction) action_; + } + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } else { + if (actionCase_ == 21) { + return queryBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + public Builder setQuery(com.google.spanner.executor.v1.QueryAction value) { + if (queryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + queryBuilder_.setMessage(value); + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + public Builder setQuery(com.google.spanner.executor.v1.QueryAction.Builder builderForValue) { + if (queryBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + queryBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + public Builder mergeQuery(com.google.spanner.executor.v1.QueryAction value) { + if (queryBuilder_ == null) { + if (actionCase_ == 21 + && action_ != com.google.spanner.executor.v1.QueryAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.QueryAction.newBuilder( + (com.google.spanner.executor.v1.QueryAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 21) { + queryBuilder_.mergeFrom(value); + } else { + queryBuilder_.setMessage(value); + } + } + actionCase_ = 21; + return this; + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + public Builder clearQuery() { + if (queryBuilder_ == null) { + if (actionCase_ == 21) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 21) { + actionCase_ = 0; + action_ = null; + } + queryBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + public com.google.spanner.executor.v1.QueryAction.Builder getQueryBuilder() { + return internalGetQueryFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder() { + if ((actionCase_ == 21) && (queryBuilder_ != null)) { + return queryBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 21) { + return (com.google.spanner.executor.v1.QueryAction) action_; + } + return com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to do a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder> + internalGetQueryFieldBuilder() { + if (queryBuilder_ == null) { + if (!(actionCase_ == 21)) { + action_ = com.google.spanner.executor.v1.QueryAction.getDefaultInstance(); + } + queryBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryAction, + com.google.spanner.executor.v1.QueryAction.Builder, + com.google.spanner.executor.v1.QueryActionOrBuilder>( + (com.google.spanner.executor.v1.QueryAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 21; + onChanged(); + return queryBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder> + mutationBuilder_; + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return Whether the mutation field is set. + */ + @java.lang.Override + public boolean hasMutation() { + return actionCase_ == 22; + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return The mutation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction getMutation() { + if (mutationBuilder_ == null) { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.MutationAction) action_; + } + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } else { + if (actionCase_ == 22) { + return mutationBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + public Builder setMutation(com.google.spanner.executor.v1.MutationAction value) { + if (mutationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + mutationBuilder_.setMessage(value); + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + public Builder setMutation( + com.google.spanner.executor.v1.MutationAction.Builder builderForValue) { + if (mutationBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + mutationBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + public Builder mergeMutation(com.google.spanner.executor.v1.MutationAction value) { + if (mutationBuilder_ == null) { + if (actionCase_ == 22 + && action_ != com.google.spanner.executor.v1.MutationAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.MutationAction.newBuilder( + (com.google.spanner.executor.v1.MutationAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 22) { + mutationBuilder_.mergeFrom(value); + } else { + mutationBuilder_.setMessage(value); + } + } + actionCase_ = 22; + return this; + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + public Builder clearMutation() { + if (mutationBuilder_ == null) { + if (actionCase_ == 22) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 22) { + actionCase_ = 0; + action_ = null; + } + mutationBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + public com.google.spanner.executor.v1.MutationAction.Builder getMutationBuilder() { + return internalGetMutationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder() { + if ((actionCase_ == 22) && (mutationBuilder_ != null)) { + return mutationBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 22) { + return (com.google.spanner.executor.v1.MutationAction) action_; + } + return com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to buffer a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder> + internalGetMutationFieldBuilder() { + if (mutationBuilder_ == null) { + if (!(actionCase_ == 22)) { + action_ = com.google.spanner.executor.v1.MutationAction.getDefaultInstance(); + } + mutationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder>( + (com.google.spanner.executor.v1.MutationAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 22; + onChanged(); + return mutationBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DmlAction, + com.google.spanner.executor.v1.DmlAction.Builder, + com.google.spanner.executor.v1.DmlActionOrBuilder> + dmlBuilder_; + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return Whether the dml field is set. + */ + @java.lang.Override + public boolean hasDml() { + return actionCase_ == 23; + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return The dml. + */ + @java.lang.Override + public com.google.spanner.executor.v1.DmlAction getDml() { + if (dmlBuilder_ == null) { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.DmlAction) action_; + } + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } else { + if (actionCase_ == 23) { + return dmlBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + public Builder setDml(com.google.spanner.executor.v1.DmlAction value) { + if (dmlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + dmlBuilder_.setMessage(value); + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + public Builder setDml(com.google.spanner.executor.v1.DmlAction.Builder builderForValue) { + if (dmlBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + dmlBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + public Builder mergeDml(com.google.spanner.executor.v1.DmlAction value) { + if (dmlBuilder_ == null) { + if (actionCase_ == 23 + && action_ != com.google.spanner.executor.v1.DmlAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.DmlAction.newBuilder( + (com.google.spanner.executor.v1.DmlAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 23) { + dmlBuilder_.mergeFrom(value); + } else { + dmlBuilder_.setMessage(value); + } + } + actionCase_ = 23; + return this; + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + public Builder clearDml() { + if (dmlBuilder_ == null) { + if (actionCase_ == 23) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 23) { + actionCase_ = 0; + action_ = null; + } + dmlBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + public com.google.spanner.executor.v1.DmlAction.Builder getDmlBuilder() { + return internalGetDmlFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + @java.lang.Override + public com.google.spanner.executor.v1.DmlActionOrBuilder getDmlOrBuilder() { + if ((actionCase_ == 23) && (dmlBuilder_ != null)) { + return dmlBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 23) { + return (com.google.spanner.executor.v1.DmlAction) action_; + } + return com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a DML.
    +     * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DmlAction, + com.google.spanner.executor.v1.DmlAction.Builder, + com.google.spanner.executor.v1.DmlActionOrBuilder> + internalGetDmlFieldBuilder() { + if (dmlBuilder_ == null) { + if (!(actionCase_ == 23)) { + action_ = com.google.spanner.executor.v1.DmlAction.getDefaultInstance(); + } + dmlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.DmlAction, + com.google.spanner.executor.v1.DmlAction.Builder, + com.google.spanner.executor.v1.DmlActionOrBuilder>( + (com.google.spanner.executor.v1.DmlAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 23; + onChanged(); + return dmlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchDmlAction, + com.google.spanner.executor.v1.BatchDmlAction.Builder, + com.google.spanner.executor.v1.BatchDmlActionOrBuilder> + batchDmlBuilder_; + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return Whether the batchDml field is set. + */ + @java.lang.Override + public boolean hasBatchDml() { + return actionCase_ == 24; + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return The batchDml. + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlAction getBatchDml() { + if (batchDmlBuilder_ == null) { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.BatchDmlAction) action_; + } + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } else { + if (actionCase_ == 24) { + return batchDmlBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + public Builder setBatchDml(com.google.spanner.executor.v1.BatchDmlAction value) { + if (batchDmlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + batchDmlBuilder_.setMessage(value); + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + public Builder setBatchDml( + com.google.spanner.executor.v1.BatchDmlAction.Builder builderForValue) { + if (batchDmlBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + batchDmlBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + public Builder mergeBatchDml(com.google.spanner.executor.v1.BatchDmlAction value) { + if (batchDmlBuilder_ == null) { + if (actionCase_ == 24 + && action_ != com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.BatchDmlAction.newBuilder( + (com.google.spanner.executor.v1.BatchDmlAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 24) { + batchDmlBuilder_.mergeFrom(value); + } else { + batchDmlBuilder_.setMessage(value); + } + } + actionCase_ = 24; + return this; + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + public Builder clearBatchDml() { + if (batchDmlBuilder_ == null) { + if (actionCase_ == 24) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 24) { + actionCase_ = 0; + action_ = null; + } + batchDmlBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + public com.google.spanner.executor.v1.BatchDmlAction.Builder getBatchDmlBuilder() { + return internalGetBatchDmlFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchDmlActionOrBuilder getBatchDmlOrBuilder() { + if ((actionCase_ == 24) && (batchDmlBuilder_ != null)) { + return batchDmlBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 24) { + return (com.google.spanner.executor.v1.BatchDmlAction) action_; + } + return com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a batch DML.
    +     * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchDmlAction, + com.google.spanner.executor.v1.BatchDmlAction.Builder, + com.google.spanner.executor.v1.BatchDmlActionOrBuilder> + internalGetBatchDmlFieldBuilder() { + if (batchDmlBuilder_ == null) { + if (!(actionCase_ == 24)) { + action_ = com.google.spanner.executor.v1.BatchDmlAction.getDefaultInstance(); + } + batchDmlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.BatchDmlAction, + com.google.spanner.executor.v1.BatchDmlAction.Builder, + com.google.spanner.executor.v1.BatchDmlActionOrBuilder>( + (com.google.spanner.executor.v1.BatchDmlAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 24; + onChanged(); + return batchDmlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.WriteMutationsAction, + com.google.spanner.executor.v1.WriteMutationsAction.Builder, + com.google.spanner.executor.v1.WriteMutationsActionOrBuilder> + writeBuilder_; + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return Whether the write field is set. + */ + @java.lang.Override + public boolean hasWrite() { + return actionCase_ == 25; + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return The write. + */ + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction getWrite() { + if (writeBuilder_ == null) { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.WriteMutationsAction) action_; + } + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } else { + if (actionCase_ == 25) { + return writeBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + public Builder setWrite(com.google.spanner.executor.v1.WriteMutationsAction value) { + if (writeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + writeBuilder_.setMessage(value); + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + public Builder setWrite( + com.google.spanner.executor.v1.WriteMutationsAction.Builder builderForValue) { + if (writeBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + writeBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + public Builder mergeWrite(com.google.spanner.executor.v1.WriteMutationsAction value) { + if (writeBuilder_ == null) { + if (actionCase_ == 25 + && action_ + != com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.WriteMutationsAction.newBuilder( + (com.google.spanner.executor.v1.WriteMutationsAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 25) { + writeBuilder_.mergeFrom(value); + } else { + writeBuilder_.setMessage(value); + } + } + actionCase_ = 25; + return this; + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + public Builder clearWrite() { + if (writeBuilder_ == null) { + if (actionCase_ == 25) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 25) { + actionCase_ = 0; + action_ = null; + } + writeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + public com.google.spanner.executor.v1.WriteMutationsAction.Builder getWriteBuilder() { + return internalGetWriteFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsActionOrBuilder getWriteOrBuilder() { + if ((actionCase_ == 25) && (writeBuilder_ != null)) { + return writeBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 25) { + return (com.google.spanner.executor.v1.WriteMutationsAction) action_; + } + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to write a mutation.
    +     * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.WriteMutationsAction, + com.google.spanner.executor.v1.WriteMutationsAction.Builder, + com.google.spanner.executor.v1.WriteMutationsActionOrBuilder> + internalGetWriteFieldBuilder() { + if (writeBuilder_ == null) { + if (!(actionCase_ == 25)) { + action_ = com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + writeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.WriteMutationsAction, + com.google.spanner.executor.v1.WriteMutationsAction.Builder, + com.google.spanner.executor.v1.WriteMutationsActionOrBuilder>( + (com.google.spanner.executor.v1.WriteMutationsAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 25; + onChanged(); + return writeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction, + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder, + com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder> + partitionedUpdateBuilder_; + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return Whether the partitionedUpdate field is set. + */ + @java.lang.Override + public boolean hasPartitionedUpdate() { + return actionCase_ == 27; + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return The partitionedUpdate. + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateAction getPartitionedUpdate() { + if (partitionedUpdateBuilder_ == null) { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.PartitionedUpdateAction) action_; + } + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } else { + if (actionCase_ == 27) { + return partitionedUpdateBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + public Builder setPartitionedUpdate( + com.google.spanner.executor.v1.PartitionedUpdateAction value) { + if (partitionedUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + partitionedUpdateBuilder_.setMessage(value); + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + public Builder setPartitionedUpdate( + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder builderForValue) { + if (partitionedUpdateBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + partitionedUpdateBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + public Builder mergePartitionedUpdate( + com.google.spanner.executor.v1.PartitionedUpdateAction value) { + if (partitionedUpdateBuilder_ == null) { + if (actionCase_ == 27 + && action_ + != com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.PartitionedUpdateAction.newBuilder( + (com.google.spanner.executor.v1.PartitionedUpdateAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 27) { + partitionedUpdateBuilder_.mergeFrom(value); + } else { + partitionedUpdateBuilder_.setMessage(value); + } + } + actionCase_ = 27; + return this; + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + public Builder clearPartitionedUpdate() { + if (partitionedUpdateBuilder_ == null) { + if (actionCase_ == 27) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 27) { + actionCase_ = 0; + action_ = null; + } + partitionedUpdateBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + public com.google.spanner.executor.v1.PartitionedUpdateAction.Builder + getPartitionedUpdateBuilder() { + return internalGetPartitionedUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + @java.lang.Override + public com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder + getPartitionedUpdateOrBuilder() { + if ((actionCase_ == 27) && (partitionedUpdateBuilder_ != null)) { + return partitionedUpdateBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 27) { + return (com.google.spanner.executor.v1.PartitionedUpdateAction) action_; + } + return com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to a partitioned update.
    +     * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction, + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder, + com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder> + internalGetPartitionedUpdateFieldBuilder() { + if (partitionedUpdateBuilder_ == null) { + if (!(actionCase_ == 27)) { + action_ = com.google.spanner.executor.v1.PartitionedUpdateAction.getDefaultInstance(); + } + partitionedUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.PartitionedUpdateAction, + com.google.spanner.executor.v1.PartitionedUpdateAction.Builder, + com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder>( + (com.google.spanner.executor.v1.PartitionedUpdateAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 27; + onChanged(); + return partitionedUpdateBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminAction, + com.google.spanner.executor.v1.AdminAction.Builder, + com.google.spanner.executor.v1.AdminActionOrBuilder> + adminBuilder_; + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return Whether the admin field is set. + */ + @java.lang.Override + public boolean hasAdmin() { + return actionCase_ == 30; + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return The admin. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminAction getAdmin() { + if (adminBuilder_ == null) { + if (actionCase_ == 30) { + return (com.google.spanner.executor.v1.AdminAction) action_; + } + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } else { + if (actionCase_ == 30) { + return adminBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + public Builder setAdmin(com.google.spanner.executor.v1.AdminAction value) { + if (adminBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + adminBuilder_.setMessage(value); + } + actionCase_ = 30; + return this; + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + public Builder setAdmin(com.google.spanner.executor.v1.AdminAction.Builder builderForValue) { + if (adminBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + adminBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 30; + return this; + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + public Builder mergeAdmin(com.google.spanner.executor.v1.AdminAction value) { + if (adminBuilder_ == null) { + if (actionCase_ == 30 + && action_ != com.google.spanner.executor.v1.AdminAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.AdminAction.newBuilder( + (com.google.spanner.executor.v1.AdminAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 30) { + adminBuilder_.mergeFrom(value); + } else { + adminBuilder_.setMessage(value); + } + } + actionCase_ = 30; + return this; + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + public Builder clearAdmin() { + if (adminBuilder_ == null) { + if (actionCase_ == 30) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 30) { + actionCase_ = 0; + action_ = null; + } + adminBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + public com.google.spanner.executor.v1.AdminAction.Builder getAdminBuilder() { + return internalGetAdminFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminActionOrBuilder getAdminOrBuilder() { + if ((actionCase_ == 30) && (adminBuilder_ != null)) { + return adminBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 30) { + return (com.google.spanner.executor.v1.AdminAction) action_; + } + return com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action that contains any administrative operation, like database,
    +     * instance manipulation.
    +     * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminAction, + com.google.spanner.executor.v1.AdminAction.Builder, + com.google.spanner.executor.v1.AdminActionOrBuilder> + internalGetAdminFieldBuilder() { + if (adminBuilder_ == null) { + if (!(actionCase_ == 30)) { + action_ = com.google.spanner.executor.v1.AdminAction.getDefaultInstance(); + } + adminBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminAction, + com.google.spanner.executor.v1.AdminAction.Builder, + com.google.spanner.executor.v1.AdminActionOrBuilder>( + (com.google.spanner.executor.v1.AdminAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 30; + onChanged(); + return adminBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartBatchTransactionAction, + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder, + com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder> + startBatchTxnBuilder_; + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return Whether the startBatchTxn field is set. + */ + @java.lang.Override + public boolean hasStartBatchTxn() { + return actionCase_ == 40; + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return The startBatchTxn. + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction getStartBatchTxn() { + if (startBatchTxnBuilder_ == null) { + if (actionCase_ == 40) { + return (com.google.spanner.executor.v1.StartBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } else { + if (actionCase_ == 40) { + return startBatchTxnBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + public Builder setStartBatchTxn( + com.google.spanner.executor.v1.StartBatchTransactionAction value) { + if (startBatchTxnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + startBatchTxnBuilder_.setMessage(value); + } + actionCase_ = 40; + return this; + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + public Builder setStartBatchTxn( + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder builderForValue) { + if (startBatchTxnBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + startBatchTxnBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 40; + return this; + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + public Builder mergeStartBatchTxn( + com.google.spanner.executor.v1.StartBatchTransactionAction value) { + if (startBatchTxnBuilder_ == null) { + if (actionCase_ == 40 + && action_ + != com.google.spanner.executor.v1.StartBatchTransactionAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.StartBatchTransactionAction.newBuilder( + (com.google.spanner.executor.v1.StartBatchTransactionAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 40) { + startBatchTxnBuilder_.mergeFrom(value); + } else { + startBatchTxnBuilder_.setMessage(value); + } + } + actionCase_ = 40; + return this; + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + public Builder clearStartBatchTxn() { + if (startBatchTxnBuilder_ == null) { + if (actionCase_ == 40) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 40) { + actionCase_ = 0; + action_ = null; + } + startBatchTxnBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + public com.google.spanner.executor.v1.StartBatchTransactionAction.Builder + getStartBatchTxnBuilder() { + return internalGetStartBatchTxnFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder + getStartBatchTxnOrBuilder() { + if ((actionCase_ == 40) && (startBatchTxnBuilder_ != null)) { + return startBatchTxnBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 40) { + return (com.google.spanner.executor.v1.StartBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to start a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartBatchTransactionAction, + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder, + com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder> + internalGetStartBatchTxnFieldBuilder() { + if (startBatchTxnBuilder_ == null) { + if (!(actionCase_ == 40)) { + action_ = com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + startBatchTxnBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.StartBatchTransactionAction, + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder, + com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder>( + (com.google.spanner.executor.v1.StartBatchTransactionAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 40; + onChanged(); + return startBatchTxnBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloseBatchTransactionAction, + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder, + com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder> + closeBatchTxnBuilder_; + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return Whether the closeBatchTxn field is set. + */ + @java.lang.Override + public boolean hasCloseBatchTxn() { + return actionCase_ == 41; + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return The closeBatchTxn. + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionAction getCloseBatchTxn() { + if (closeBatchTxnBuilder_ == null) { + if (actionCase_ == 41) { + return (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } else { + if (actionCase_ == 41) { + return closeBatchTxnBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + public Builder setCloseBatchTxn( + com.google.spanner.executor.v1.CloseBatchTransactionAction value) { + if (closeBatchTxnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + closeBatchTxnBuilder_.setMessage(value); + } + actionCase_ = 41; + return this; + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + public Builder setCloseBatchTxn( + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder builderForValue) { + if (closeBatchTxnBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + closeBatchTxnBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 41; + return this; + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + public Builder mergeCloseBatchTxn( + com.google.spanner.executor.v1.CloseBatchTransactionAction value) { + if (closeBatchTxnBuilder_ == null) { + if (actionCase_ == 41 + && action_ + != com.google.spanner.executor.v1.CloseBatchTransactionAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.CloseBatchTransactionAction.newBuilder( + (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 41) { + closeBatchTxnBuilder_.mergeFrom(value); + } else { + closeBatchTxnBuilder_.setMessage(value); + } + } + actionCase_ = 41; + return this; + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + public Builder clearCloseBatchTxn() { + if (closeBatchTxnBuilder_ == null) { + if (actionCase_ == 41) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 41) { + actionCase_ = 0; + action_ = null; + } + closeBatchTxnBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + public com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder + getCloseBatchTxnBuilder() { + return internalGetCloseBatchTxnFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + @java.lang.Override + public com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder + getCloseBatchTxnOrBuilder() { + if ((actionCase_ == 41) && (closeBatchTxnBuilder_ != null)) { + return closeBatchTxnBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 41) { + return (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_; + } + return com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to close a batch transaction.
    +     * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloseBatchTransactionAction, + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder, + com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder> + internalGetCloseBatchTxnFieldBuilder() { + if (closeBatchTxnBuilder_ == null) { + if (!(actionCase_ == 41)) { + action_ = com.google.spanner.executor.v1.CloseBatchTransactionAction.getDefaultInstance(); + } + closeBatchTxnBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.CloseBatchTransactionAction, + com.google.spanner.executor.v1.CloseBatchTransactionAction.Builder, + com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder>( + (com.google.spanner.executor.v1.CloseBatchTransactionAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 41; + onChanged(); + return closeBatchTxnBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder> + generateDbPartitionsReadBuilder_; + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return Whether the generateDbPartitionsRead field is set. + */ + @java.lang.Override + public boolean hasGenerateDbPartitionsRead() { + return actionCase_ == 42; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return The generateDbPartitionsRead. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + getGenerateDbPartitionsRead() { + if (generateDbPartitionsReadBuilder_ == null) { + if (actionCase_ == 42) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + .getDefaultInstance(); + } else { + if (actionCase_ == 42) { + return generateDbPartitionsReadBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + public Builder setGenerateDbPartitionsRead( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction value) { + if (generateDbPartitionsReadBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + generateDbPartitionsReadBuilder_.setMessage(value); + } + actionCase_ = 42; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + public Builder setGenerateDbPartitionsRead( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder builderForValue) { + if (generateDbPartitionsReadBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + generateDbPartitionsReadBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 42; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + public Builder mergeGenerateDbPartitionsRead( + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction value) { + if (generateDbPartitionsReadBuilder_ == null) { + if (actionCase_ == 42 + && action_ + != com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.newBuilder( + (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 42) { + generateDbPartitionsReadBuilder_.mergeFrom(value); + } else { + generateDbPartitionsReadBuilder_.setMessage(value); + } + } + actionCase_ = 42; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + public Builder clearGenerateDbPartitionsRead() { + if (generateDbPartitionsReadBuilder_ == null) { + if (actionCase_ == 42) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 42) { + actionCase_ = 0; + action_ = null; + } + generateDbPartitionsReadBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder + getGenerateDbPartitionsReadBuilder() { + return internalGetGenerateDbPartitionsReadFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder + getGenerateDbPartitionsReadOrBuilder() { + if ((actionCase_ == 42) && (generateDbPartitionsReadBuilder_ != null)) { + return generateDbPartitionsReadBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 42) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to generate database partitions for batch read.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder> + internalGetGenerateDbPartitionsReadFieldBuilder() { + if (generateDbPartitionsReadBuilder_ == null) { + if (!(actionCase_ == 42)) { + action_ = + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.getDefaultInstance(); + } + generateDbPartitionsReadBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder>( + (com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 42; + onChanged(); + return generateDbPartitionsReadBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder> + generateDbPartitionsQueryBuilder_; + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return Whether the generateDbPartitionsQuery field is set. + */ + @java.lang.Override + public boolean hasGenerateDbPartitionsQuery() { + return actionCase_ == 43; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return The generateDbPartitionsQuery. + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + getGenerateDbPartitionsQuery() { + if (generateDbPartitionsQueryBuilder_ == null) { + if (actionCase_ == 43) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + .getDefaultInstance(); + } else { + if (actionCase_ == 43) { + return generateDbPartitionsQueryBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + public Builder setGenerateDbPartitionsQuery( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction value) { + if (generateDbPartitionsQueryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + generateDbPartitionsQueryBuilder_.setMessage(value); + } + actionCase_ = 43; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + public Builder setGenerateDbPartitionsQuery( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder builderForValue) { + if (generateDbPartitionsQueryBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + generateDbPartitionsQueryBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 43; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + public Builder mergeGenerateDbPartitionsQuery( + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction value) { + if (generateDbPartitionsQueryBuilder_ == null) { + if (actionCase_ == 43 + && action_ + != com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + .getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.newBuilder( + (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 43) { + generateDbPartitionsQueryBuilder_.mergeFrom(value); + } else { + generateDbPartitionsQueryBuilder_.setMessage(value); + } + } + actionCase_ = 43; + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + public Builder clearGenerateDbPartitionsQuery() { + if (generateDbPartitionsQueryBuilder_ == null) { + if (actionCase_ == 43) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 43) { + actionCase_ = 0; + action_ = null; + } + generateDbPartitionsQueryBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder + getGenerateDbPartitionsQueryBuilder() { + return internalGetGenerateDbPartitionsQueryFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder + getGenerateDbPartitionsQueryOrBuilder() { + if ((actionCase_ == 43) && (generateDbPartitionsQueryBuilder_ != null)) { + return generateDbPartitionsQueryBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 43) { + return (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_; + } + return com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + .getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to generate database partitions for batch query.
    +     * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder> + internalGetGenerateDbPartitionsQueryFieldBuilder() { + if (generateDbPartitionsQueryBuilder_ == null) { + if (!(actionCase_ == 43)) { + action_ = + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction + .getDefaultInstance(); + } + generateDbPartitionsQueryBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction.Builder, + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder>( + (com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 43; + onChanged(); + return generateDbPartitionsQueryBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecutePartitionAction, + com.google.spanner.executor.v1.ExecutePartitionAction.Builder, + com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder> + executePartitionBuilder_; + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return Whether the executePartition field is set. + */ + @java.lang.Override + public boolean hasExecutePartition() { + return actionCase_ == 44; + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return The executePartition. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionAction getExecutePartition() { + if (executePartitionBuilder_ == null) { + if (actionCase_ == 44) { + return (com.google.spanner.executor.v1.ExecutePartitionAction) action_; + } + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } else { + if (actionCase_ == 44) { + return executePartitionBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + public Builder setExecutePartition( + com.google.spanner.executor.v1.ExecutePartitionAction value) { + if (executePartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + executePartitionBuilder_.setMessage(value); + } + actionCase_ = 44; + return this; + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + public Builder setExecutePartition( + com.google.spanner.executor.v1.ExecutePartitionAction.Builder builderForValue) { + if (executePartitionBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + executePartitionBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 44; + return this; + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + public Builder mergeExecutePartition( + com.google.spanner.executor.v1.ExecutePartitionAction value) { + if (executePartitionBuilder_ == null) { + if (actionCase_ == 44 + && action_ + != com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ExecutePartitionAction.newBuilder( + (com.google.spanner.executor.v1.ExecutePartitionAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 44) { + executePartitionBuilder_.mergeFrom(value); + } else { + executePartitionBuilder_.setMessage(value); + } + } + actionCase_ = 44; + return this; + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + public Builder clearExecutePartition() { + if (executePartitionBuilder_ == null) { + if (actionCase_ == 44) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 44) { + actionCase_ = 0; + action_ = null; + } + executePartitionBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + public com.google.spanner.executor.v1.ExecutePartitionAction.Builder + getExecutePartitionBuilder() { + return internalGetExecutePartitionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder + getExecutePartitionOrBuilder() { + if ((actionCase_ == 44) && (executePartitionBuilder_ != null)) { + return executePartitionBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 44) { + return (com.google.spanner.executor.v1.ExecutePartitionAction) action_; + } + return com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to execute batch actions on generated partitions.
    +     * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecutePartitionAction, + com.google.spanner.executor.v1.ExecutePartitionAction.Builder, + com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder> + internalGetExecutePartitionFieldBuilder() { + if (executePartitionBuilder_ == null) { + if (!(actionCase_ == 44)) { + action_ = com.google.spanner.executor.v1.ExecutePartitionAction.getDefaultInstance(); + } + executePartitionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecutePartitionAction, + com.google.spanner.executor.v1.ExecutePartitionAction.Builder, + com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder>( + (com.google.spanner.executor.v1.ExecutePartitionAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 44; + onChanged(); + return executePartitionBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecuteChangeStreamQuery, + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder, + com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder> + executeChangeStreamQueryBuilder_; + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return Whether the executeChangeStreamQuery field is set. + */ + @java.lang.Override + public boolean hasExecuteChangeStreamQuery() { + return actionCase_ == 50; + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return The executeChangeStreamQuery. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery getExecuteChangeStreamQuery() { + if (executeChangeStreamQueryBuilder_ == null) { + if (actionCase_ == 50) { + return (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_; + } + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } else { + if (actionCase_ == 50) { + return executeChangeStreamQueryBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + public Builder setExecuteChangeStreamQuery( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery value) { + if (executeChangeStreamQueryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + executeChangeStreamQueryBuilder_.setMessage(value); + } + actionCase_ = 50; + return this; + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + public Builder setExecuteChangeStreamQuery( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder builderForValue) { + if (executeChangeStreamQueryBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + executeChangeStreamQueryBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 50; + return this; + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + public Builder mergeExecuteChangeStreamQuery( + com.google.spanner.executor.v1.ExecuteChangeStreamQuery value) { + if (executeChangeStreamQueryBuilder_ == null) { + if (actionCase_ == 50 + && action_ + != com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.newBuilder( + (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 50) { + executeChangeStreamQueryBuilder_.mergeFrom(value); + } else { + executeChangeStreamQueryBuilder_.setMessage(value); + } + } + actionCase_ = 50; + return this; + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + public Builder clearExecuteChangeStreamQuery() { + if (executeChangeStreamQueryBuilder_ == null) { + if (actionCase_ == 50) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 50) { + actionCase_ = 0; + action_ = null; + } + executeChangeStreamQueryBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + public com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder + getExecuteChangeStreamQueryBuilder() { + return internalGetExecuteChangeStreamQueryFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder + getExecuteChangeStreamQueryOrBuilder() { + if ((actionCase_ == 50) && (executeChangeStreamQueryBuilder_ != null)) { + return executeChangeStreamQueryBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 50) { + return (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_; + } + return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to execute change stream query.
    +     * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecuteChangeStreamQuery, + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder, + com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder> + internalGetExecuteChangeStreamQueryFieldBuilder() { + if (executeChangeStreamQueryBuilder_ == null) { + if (!(actionCase_ == 50)) { + action_ = com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); + } + executeChangeStreamQueryBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ExecuteChangeStreamQuery, + com.google.spanner.executor.v1.ExecuteChangeStreamQuery.Builder, + com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder>( + (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 50; + onChanged(); + return executeChangeStreamQueryBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder> + queryCancellationBuilder_; + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + @java.lang.Override + public boolean hasQueryCancellation() { + return actionCase_ == 51; + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation() { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } else { + if (actionCase_ == 51) { + return queryCancellationBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder setQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction value) { + if (queryCancellationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + queryCancellationBuilder_.setMessage(value); + } + actionCase_ = 51; + return this; + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder setQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction.Builder builderForValue) { + if (queryCancellationBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + queryCancellationBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 51; + return this; + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder mergeQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction value) { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51 + && action_ + != com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.QueryCancellationAction.newBuilder( + (com.google.spanner.executor.v1.QueryCancellationAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 51) { + queryCancellationBuilder_.mergeFrom(value); + } else { + queryCancellationBuilder_.setMessage(value); + } + } + actionCase_ = 51; + return this; + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder clearQueryCancellation() { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 51) { + actionCase_ = 0; + action_ = null; + } + queryCancellationBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public com.google.spanner.executor.v1.QueryCancellationAction.Builder + getQueryCancellationBuilder() { + return internalGetQueryCancellationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationActionOrBuilder + getQueryCancellationOrBuilder() { + if ((actionCase_ == 51) && (queryCancellationBuilder_ != null)) { + return queryCancellationBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder> + internalGetQueryCancellationFieldBuilder() { + if (queryCancellationBuilder_ == null) { + if (!(actionCase_ == 51)) { + action_ = com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + queryCancellationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder>( + (com.google.spanner.executor.v1.QueryCancellationAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 51; + onChanged(); + return queryCancellationBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdaptMessageAction, + com.google.spanner.executor.v1.AdaptMessageAction.Builder, + com.google.spanner.executor.v1.AdaptMessageActionOrBuilder> + adaptMessageBuilder_; + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return Whether the adaptMessage field is set. + */ + @java.lang.Override + public boolean hasAdaptMessage() { + return actionCase_ == 52; + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return The adaptMessage. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageAction getAdaptMessage() { + if (adaptMessageBuilder_ == null) { + if (actionCase_ == 52) { + return (com.google.spanner.executor.v1.AdaptMessageAction) action_; + } + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } else { + if (actionCase_ == 52) { + return adaptMessageBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + public Builder setAdaptMessage(com.google.spanner.executor.v1.AdaptMessageAction value) { + if (adaptMessageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + adaptMessageBuilder_.setMessage(value); + } + actionCase_ = 52; + return this; + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + public Builder setAdaptMessage( + com.google.spanner.executor.v1.AdaptMessageAction.Builder builderForValue) { + if (adaptMessageBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + adaptMessageBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 52; + return this; + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + public Builder mergeAdaptMessage(com.google.spanner.executor.v1.AdaptMessageAction value) { + if (adaptMessageBuilder_ == null) { + if (actionCase_ == 52 + && action_ != com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.AdaptMessageAction.newBuilder( + (com.google.spanner.executor.v1.AdaptMessageAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 52) { + adaptMessageBuilder_.mergeFrom(value); + } else { + adaptMessageBuilder_.setMessage(value); + } + } + actionCase_ = 52; + return this; + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + public Builder clearAdaptMessage() { + if (adaptMessageBuilder_ == null) { + if (actionCase_ == 52) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 52) { + actionCase_ = 0; + action_ = null; + } + adaptMessageBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + public com.google.spanner.executor.v1.AdaptMessageAction.Builder getAdaptMessageBuilder() { + return internalGetAdaptMessageFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdaptMessageActionOrBuilder getAdaptMessageOrBuilder() { + if ((actionCase_ == 52) && (adaptMessageBuilder_ != null)) { + return adaptMessageBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 52) { + return (com.google.spanner.executor.v1.AdaptMessageAction) action_; + } + return com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Action to adapt a message.
    +     * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdaptMessageAction, + com.google.spanner.executor.v1.AdaptMessageAction.Builder, + com.google.spanner.executor.v1.AdaptMessageActionOrBuilder> + internalGetAdaptMessageFieldBuilder() { + if (adaptMessageBuilder_ == null) { + if (!(actionCase_ == 52)) { + action_ = com.google.spanner.executor.v1.AdaptMessageAction.getDefaultInstance(); + } + adaptMessageBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdaptMessageAction, + com.google.spanner.executor.v1.AdaptMessageAction.Builder, + com.google.spanner.executor.v1.AdaptMessageActionOrBuilder>( + (com.google.spanner.executor.v1.AdaptMessageAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 52; + onChanged(); + return adaptMessageBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SpannerAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SpannerAction) + private static final com.google.spanner.executor.v1.SpannerAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SpannerAction(); + } + + public static com.google.spanner.executor.v1.SpannerAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SpannerAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java new file mode 100644 index 000000000000..05e5744b0463 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java @@ -0,0 +1,784 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SpannerActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SpannerAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Database against which to perform action.
    +   * In a context where a series of actions take place, an action may omit
    +   * database path if it applies to the same database as the previous action.
    +   * 
    + * + * string database_path = 1; + * + * @return The databasePath. + */ + java.lang.String getDatabasePath(); + + /** + * + * + *
    +   * Database against which to perform action.
    +   * In a context where a series of actions take place, an action may omit
    +   * database path if it applies to the same database as the previous action.
    +   * 
    + * + * string database_path = 1; + * + * @return The bytes for databasePath. + */ + com.google.protobuf.ByteString getDatabasePathBytes(); + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return Whether the spannerOptions field is set. + */ + boolean hasSpannerOptions(); + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + * + * @return The spannerOptions. + */ + com.google.spanner.executor.v1.SpannerOptions getSpannerOptions(); + + /** + * + * + *
    +   * Configuration options for Spanner backend
    +   * 
    + * + * .google.spanner.executor.v1.SpannerOptions spanner_options = 2; + */ + com.google.spanner.executor.v1.SpannerOptionsOrBuilder getSpannerOptionsOrBuilder(); + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return Whether the start field is set. + */ + boolean hasStart(); + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + * + * @return The start. + */ + com.google.spanner.executor.v1.StartTransactionAction getStart(); + + /** + * + * + *
    +   * Action to start a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartTransactionAction start = 10; + */ + com.google.spanner.executor.v1.StartTransactionActionOrBuilder getStartOrBuilder(); + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return Whether the finish field is set. + */ + boolean hasFinish(); + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + * + * @return The finish. + */ + com.google.spanner.executor.v1.FinishTransactionAction getFinish(); + + /** + * + * + *
    +   * Action to finish a transaction.
    +   * 
    + * + * .google.spanner.executor.v1.FinishTransactionAction finish = 11; + */ + com.google.spanner.executor.v1.FinishTransactionActionOrBuilder getFinishOrBuilder(); + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return Whether the read field is set. + */ + boolean hasRead(); + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + * + * @return The read. + */ + com.google.spanner.executor.v1.ReadAction getRead(); + + /** + * + * + *
    +   * Action to do a normal read.
    +   * 
    + * + * .google.spanner.executor.v1.ReadAction read = 20; + */ + com.google.spanner.executor.v1.ReadActionOrBuilder getReadOrBuilder(); + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return Whether the query field is set. + */ + boolean hasQuery(); + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + * + * @return The query. + */ + com.google.spanner.executor.v1.QueryAction getQuery(); + + /** + * + * + *
    +   * Action to do a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryAction query = 21; + */ + com.google.spanner.executor.v1.QueryActionOrBuilder getQueryOrBuilder(); + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return Whether the mutation field is set. + */ + boolean hasMutation(); + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + * + * @return The mutation. + */ + com.google.spanner.executor.v1.MutationAction getMutation(); + + /** + * + * + *
    +   * Action to buffer a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 22; + */ + com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder(); + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return Whether the dml field is set. + */ + boolean hasDml(); + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + * + * @return The dml. + */ + com.google.spanner.executor.v1.DmlAction getDml(); + + /** + * + * + *
    +   * Action to a DML.
    +   * 
    + * + * .google.spanner.executor.v1.DmlAction dml = 23; + */ + com.google.spanner.executor.v1.DmlActionOrBuilder getDmlOrBuilder(); + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return Whether the batchDml field is set. + */ + boolean hasBatchDml(); + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + * + * @return The batchDml. + */ + com.google.spanner.executor.v1.BatchDmlAction getBatchDml(); + + /** + * + * + *
    +   * Action to a batch DML.
    +   * 
    + * + * .google.spanner.executor.v1.BatchDmlAction batch_dml = 24; + */ + com.google.spanner.executor.v1.BatchDmlActionOrBuilder getBatchDmlOrBuilder(); + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return Whether the write field is set. + */ + boolean hasWrite(); + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + * + * @return The write. + */ + com.google.spanner.executor.v1.WriteMutationsAction getWrite(); + + /** + * + * + *
    +   * Action to write a mutation.
    +   * 
    + * + * .google.spanner.executor.v1.WriteMutationsAction write = 25; + */ + com.google.spanner.executor.v1.WriteMutationsActionOrBuilder getWriteOrBuilder(); + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return Whether the partitionedUpdate field is set. + */ + boolean hasPartitionedUpdate(); + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + * + * @return The partitionedUpdate. + */ + com.google.spanner.executor.v1.PartitionedUpdateAction getPartitionedUpdate(); + + /** + * + * + *
    +   * Action to a partitioned update.
    +   * 
    + * + * .google.spanner.executor.v1.PartitionedUpdateAction partitioned_update = 27; + */ + com.google.spanner.executor.v1.PartitionedUpdateActionOrBuilder getPartitionedUpdateOrBuilder(); + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return Whether the admin field is set. + */ + boolean hasAdmin(); + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + * + * @return The admin. + */ + com.google.spanner.executor.v1.AdminAction getAdmin(); + + /** + * + * + *
    +   * Action that contains any administrative operation, like database,
    +   * instance manipulation.
    +   * 
    + * + * .google.spanner.executor.v1.AdminAction admin = 30; + */ + com.google.spanner.executor.v1.AdminActionOrBuilder getAdminOrBuilder(); + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return Whether the startBatchTxn field is set. + */ + boolean hasStartBatchTxn(); + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + * + * @return The startBatchTxn. + */ + com.google.spanner.executor.v1.StartBatchTransactionAction getStartBatchTxn(); + + /** + * + * + *
    +   * Action to start a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.StartBatchTransactionAction start_batch_txn = 40; + */ + com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder getStartBatchTxnOrBuilder(); + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return Whether the closeBatchTxn field is set. + */ + boolean hasCloseBatchTxn(); + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + * + * @return The closeBatchTxn. + */ + com.google.spanner.executor.v1.CloseBatchTransactionAction getCloseBatchTxn(); + + /** + * + * + *
    +   * Action to close a batch transaction.
    +   * 
    + * + * .google.spanner.executor.v1.CloseBatchTransactionAction close_batch_txn = 41; + */ + com.google.spanner.executor.v1.CloseBatchTransactionActionOrBuilder getCloseBatchTxnOrBuilder(); + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return Whether the generateDbPartitionsRead field is set. + */ + boolean hasGenerateDbPartitionsRead(); + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + * + * @return The generateDbPartitionsRead. + */ + com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction getGenerateDbPartitionsRead(); + + /** + * + * + *
    +   * Action to generate database partitions for batch read.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + * + */ + com.google.spanner.executor.v1.GenerateDbPartitionsForReadActionOrBuilder + getGenerateDbPartitionsReadOrBuilder(); + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return Whether the generateDbPartitionsQuery field is set. + */ + boolean hasGenerateDbPartitionsQuery(); + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + * + * @return The generateDbPartitionsQuery. + */ + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction getGenerateDbPartitionsQuery(); + + /** + * + * + *
    +   * Action to generate database partitions for batch query.
    +   * 
    + * + * + * .google.spanner.executor.v1.GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + * + */ + com.google.spanner.executor.v1.GenerateDbPartitionsForQueryActionOrBuilder + getGenerateDbPartitionsQueryOrBuilder(); + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return Whether the executePartition field is set. + */ + boolean hasExecutePartition(); + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + * + * @return The executePartition. + */ + com.google.spanner.executor.v1.ExecutePartitionAction getExecutePartition(); + + /** + * + * + *
    +   * Action to execute batch actions on generated partitions.
    +   * 
    + * + * .google.spanner.executor.v1.ExecutePartitionAction execute_partition = 44; + */ + com.google.spanner.executor.v1.ExecutePartitionActionOrBuilder getExecutePartitionOrBuilder(); + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return Whether the executeChangeStreamQuery field is set. + */ + boolean hasExecuteChangeStreamQuery(); + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + * + * @return The executeChangeStreamQuery. + */ + com.google.spanner.executor.v1.ExecuteChangeStreamQuery getExecuteChangeStreamQuery(); + + /** + * + * + *
    +   * Action to execute change stream query.
    +   * 
    + * + * .google.spanner.executor.v1.ExecuteChangeStreamQuery execute_change_stream_query = 50; + * + */ + com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder + getExecuteChangeStreamQueryOrBuilder(); + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + boolean hasQueryCancellation(); + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation(); + + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder getQueryCancellationOrBuilder(); + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return Whether the adaptMessage field is set. + */ + boolean hasAdaptMessage(); + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + * + * @return The adaptMessage. + */ + com.google.spanner.executor.v1.AdaptMessageAction getAdaptMessage(); + + /** + * + * + *
    +   * Action to adapt a message.
    +   * 
    + * + * .google.spanner.executor.v1.AdaptMessageAction adapt_message = 52; + */ + com.google.spanner.executor.v1.AdaptMessageActionOrBuilder getAdaptMessageOrBuilder(); + + com.google.spanner.executor.v1.SpannerAction.ActionCase getActionCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java new file mode 100644 index 000000000000..ff10ce58be7f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java @@ -0,0 +1,3647 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * SpannerActionOutcome defines a result of execution of a single SpannerAction.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerActionOutcome} + */ +@com.google.protobuf.Generated +public final class SpannerActionOutcome extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SpannerActionOutcome) + SpannerActionOutcomeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerActionOutcome"); + } + + // Use SpannerActionOutcome.newBuilder() to construct. + private SpannerActionOutcome(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SpannerActionOutcome() { + batchTxnId_ = com.google.protobuf.ByteString.EMPTY; + dbPartition_ = java.util.Collections.emptyList(); + dmlRowsModified_ = emptyLongList(); + changeStreamRecords_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerActionOutcome_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerActionOutcome.class, + com.google.spanner.executor.v1.SpannerActionOutcome.Builder.class); + } + + private int bitField0_; + public static final int STATUS_FIELD_NUMBER = 1; + private com.google.rpc.Status status_; + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp commitTime_; + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return Whether the commitTime field is set. + */ + @java.lang.Override + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return The commitTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + + public static final int READ_RESULT_FIELD_NUMBER = 3; + private com.google.spanner.executor.v1.ReadResult readResult_; + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return Whether the readResult field is set. + */ + @java.lang.Override + public boolean hasReadResult() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return The readResult. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadResult getReadResult() { + return readResult_ == null + ? com.google.spanner.executor.v1.ReadResult.getDefaultInstance() + : readResult_; + } + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ReadResultOrBuilder getReadResultOrBuilder() { + return readResult_ == null + ? com.google.spanner.executor.v1.ReadResult.getDefaultInstance() + : readResult_; + } + + public static final int QUERY_RESULT_FIELD_NUMBER = 4; + private com.google.spanner.executor.v1.QueryResult queryResult_; + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return Whether the queryResult field is set. + */ + @java.lang.Override + public boolean hasQueryResult() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return The queryResult. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryResult getQueryResult() { + return queryResult_ == null + ? com.google.spanner.executor.v1.QueryResult.getDefaultInstance() + : queryResult_; + } + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryResultOrBuilder getQueryResultOrBuilder() { + return queryResult_ == null + ? com.google.spanner.executor.v1.QueryResult.getDefaultInstance() + : queryResult_; + } + + public static final int TRANSACTION_RESTARTED_FIELD_NUMBER = 5; + private boolean transactionRestarted_ = false; + + /** + * + * + *
    +   * This bit indicates that Spanner has restarted the current transaction. It
    +   * means that the client should replay all the reads and writes.
    +   * Setting it to true is only valid in the context of a read-write
    +   * transaction, as an outcome of a committing FinishTransactionAction.
    +   * 
    + * + * optional bool transaction_restarted = 5; + * + * @return Whether the transactionRestarted field is set. + */ + @java.lang.Override + public boolean hasTransactionRestarted() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * This bit indicates that Spanner has restarted the current transaction. It
    +   * means that the client should replay all the reads and writes.
    +   * Setting it to true is only valid in the context of a read-write
    +   * transaction, as an outcome of a committing FinishTransactionAction.
    +   * 
    + * + * optional bool transaction_restarted = 5; + * + * @return The transactionRestarted. + */ + @java.lang.Override + public boolean getTransactionRestarted() { + return transactionRestarted_; + } + + public static final int BATCH_TXN_ID_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString batchTxnId_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +   * the transaction.
    +   * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return Whether the batchTxnId field is set. + */ + @java.lang.Override + public boolean hasBatchTxnId() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +   * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +   * the transaction.
    +   * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return The batchTxnId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBatchTxnId() { + return batchTxnId_; + } + + public static final int DB_PARTITION_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private java.util.List dbPartition_; + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + @java.lang.Override + public java.util.List getDbPartitionList() { + return dbPartition_; + } + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + @java.lang.Override + public java.util.List + getDbPartitionOrBuilderList() { + return dbPartition_; + } + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + @java.lang.Override + public int getDbPartitionCount() { + return dbPartition_.size(); + } + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartition getDbPartition(int index) { + return dbPartition_.get(index); + } + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.BatchPartitionOrBuilder getDbPartitionOrBuilder(int index) { + return dbPartition_.get(index); + } + + public static final int ADMIN_RESULT_FIELD_NUMBER = 8; + private com.google.spanner.executor.v1.AdminResult adminResult_; + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return Whether the adminResult field is set. + */ + @java.lang.Override + public boolean hasAdminResult() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return The adminResult. + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminResult getAdminResult() { + return adminResult_ == null + ? com.google.spanner.executor.v1.AdminResult.getDefaultInstance() + : adminResult_; + } + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + @java.lang.Override + public com.google.spanner.executor.v1.AdminResultOrBuilder getAdminResultOrBuilder() { + return adminResult_ == null + ? com.google.spanner.executor.v1.AdminResult.getDefaultInstance() + : adminResult_; + } + + public static final int DML_ROWS_MODIFIED_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.LongList dmlRowsModified_ = emptyLongList(); + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return A list containing the dmlRowsModified. + */ + @java.lang.Override + public java.util.List getDmlRowsModifiedList() { + return dmlRowsModified_; + } + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return The count of dmlRowsModified. + */ + public int getDmlRowsModifiedCount() { + return dmlRowsModified_.size(); + } + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param index The index of the element to return. + * @return The dmlRowsModified at the given index. + */ + public long getDmlRowsModified(int index) { + return dmlRowsModified_.getLong(index); + } + + private int dmlRowsModifiedMemoizedSerializedSize = -1; + + public static final int CHANGE_STREAM_RECORDS_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private java.util.List changeStreamRecords_; + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + @java.lang.Override + public java.util.List + getChangeStreamRecordsList() { + return changeStreamRecords_; + } + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + @java.lang.Override + public java.util.List + getChangeStreamRecordsOrBuilderList() { + return changeStreamRecords_; + } + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + @java.lang.Override + public int getChangeStreamRecordsCount() { + return changeStreamRecords_.size(); + } + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecord getChangeStreamRecords(int index) { + return changeStreamRecords_.get(index); + } + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder getChangeStreamRecordsOrBuilder( + int index) { + return changeStreamRecords_.get(index); + } + + public static final int SNAPSHOT_ISOLATION_TXN_READ_TIMESTAMP_FIELD_NUMBER = 11; + private long snapshotIsolationTxnReadTimestamp_ = 0L; + + /** + * + * + *
    +   * If not zero, it indicates the read timestamp to use for validating
    +   * the SnapshotIsolation transaction.
    +   * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return Whether the snapshotIsolationTxnReadTimestamp field is set. + */ + @java.lang.Override + public boolean hasSnapshotIsolationTxnReadTimestamp() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +   * If not zero, it indicates the read timestamp to use for validating
    +   * the SnapshotIsolation transaction.
    +   * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return The snapshotIsolationTxnReadTimestamp. + */ + @java.lang.Override + public long getSnapshotIsolationTxnReadTimestamp() { + return snapshotIsolationTxnReadTimestamp_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getReadResult()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(4, getQueryResult()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeBool(5, transactionRestarted_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeBytes(6, batchTxnId_); + } + for (int i = 0; i < dbPartition_.size(); i++) { + output.writeMessage(7, dbPartition_.get(i)); + } + if (((bitField0_ & 0x00000040) != 0)) { + output.writeMessage(8, getAdminResult()); + } + if (getDmlRowsModifiedList().size() > 0) { + output.writeUInt32NoTag(74); + output.writeUInt32NoTag(dmlRowsModifiedMemoizedSerializedSize); + } + for (int i = 0; i < dmlRowsModified_.size(); i++) { + output.writeInt64NoTag(dmlRowsModified_.getLong(i)); + } + for (int i = 0; i < changeStreamRecords_.size(); i++) { + output.writeMessage(10, changeStreamRecords_.get(i)); + } + if (((bitField0_ & 0x00000080) != 0)) { + output.writeInt64(11, snapshotIsolationTxnReadTimestamp_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCommitTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getReadResult()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getQueryResult()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, transactionRestarted_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(6, batchTxnId_); + } + for (int i = 0; i < dbPartition_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, dbPartition_.get(i)); + } + if (((bitField0_ & 0x00000040) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getAdminResult()); + } + { + int dataSize = 0; + for (int i = 0; i < dmlRowsModified_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeInt64SizeNoTag( + dmlRowsModified_.getLong(i)); + } + size += dataSize; + if (!getDmlRowsModifiedList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + dmlRowsModifiedMemoizedSerializedSize = dataSize; + } + for (int i = 0; i < changeStreamRecords_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(10, changeStreamRecords_.get(i)); + } + if (((bitField0_ & 0x00000080) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 11, snapshotIsolationTxnReadTimestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SpannerActionOutcome)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SpannerActionOutcome other = + (com.google.spanner.executor.v1.SpannerActionOutcome) obj; + + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (hasReadResult() != other.hasReadResult()) return false; + if (hasReadResult()) { + if (!getReadResult().equals(other.getReadResult())) return false; + } + if (hasQueryResult() != other.hasQueryResult()) return false; + if (hasQueryResult()) { + if (!getQueryResult().equals(other.getQueryResult())) return false; + } + if (hasTransactionRestarted() != other.hasTransactionRestarted()) return false; + if (hasTransactionRestarted()) { + if (getTransactionRestarted() != other.getTransactionRestarted()) return false; + } + if (hasBatchTxnId() != other.hasBatchTxnId()) return false; + if (hasBatchTxnId()) { + if (!getBatchTxnId().equals(other.getBatchTxnId())) return false; + } + if (!getDbPartitionList().equals(other.getDbPartitionList())) return false; + if (hasAdminResult() != other.hasAdminResult()) return false; + if (hasAdminResult()) { + if (!getAdminResult().equals(other.getAdminResult())) return false; + } + if (!getDmlRowsModifiedList().equals(other.getDmlRowsModifiedList())) return false; + if (!getChangeStreamRecordsList().equals(other.getChangeStreamRecordsList())) return false; + if (hasSnapshotIsolationTxnReadTimestamp() != other.hasSnapshotIsolationTxnReadTimestamp()) + return false; + if (hasSnapshotIsolationTxnReadTimestamp()) { + if (getSnapshotIsolationTxnReadTimestamp() != other.getSnapshotIsolationTxnReadTimestamp()) + return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (hasReadResult()) { + hash = (37 * hash) + READ_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getReadResult().hashCode(); + } + if (hasQueryResult()) { + hash = (37 * hash) + QUERY_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getQueryResult().hashCode(); + } + if (hasTransactionRestarted()) { + hash = (37 * hash) + TRANSACTION_RESTARTED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getTransactionRestarted()); + } + if (hasBatchTxnId()) { + hash = (37 * hash) + BATCH_TXN_ID_FIELD_NUMBER; + hash = (53 * hash) + getBatchTxnId().hashCode(); + } + if (getDbPartitionCount() > 0) { + hash = (37 * hash) + DB_PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getDbPartitionList().hashCode(); + } + if (hasAdminResult()) { + hash = (37 * hash) + ADMIN_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getAdminResult().hashCode(); + } + if (getDmlRowsModifiedCount() > 0) { + hash = (37 * hash) + DML_ROWS_MODIFIED_FIELD_NUMBER; + hash = (53 * hash) + getDmlRowsModifiedList().hashCode(); + } + if (getChangeStreamRecordsCount() > 0) { + hash = (37 * hash) + CHANGE_STREAM_RECORDS_FIELD_NUMBER; + hash = (53 * hash) + getChangeStreamRecordsList().hashCode(); + } + if (hasSnapshotIsolationTxnReadTimestamp()) { + hash = (37 * hash) + SNAPSHOT_ISOLATION_TXN_READ_TIMESTAMP_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong(getSnapshotIsolationTxnReadTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.SpannerActionOutcome prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * SpannerActionOutcome defines a result of execution of a single SpannerAction.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerActionOutcome} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SpannerActionOutcome) + com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerActionOutcome_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerActionOutcome.class, + com.google.spanner.executor.v1.SpannerActionOutcome.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SpannerActionOutcome.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStatusFieldBuilder(); + internalGetCommitTimeFieldBuilder(); + internalGetReadResultFieldBuilder(); + internalGetQueryResultFieldBuilder(); + internalGetDbPartitionFieldBuilder(); + internalGetAdminResultFieldBuilder(); + internalGetChangeStreamRecordsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + readResult_ = null; + if (readResultBuilder_ != null) { + readResultBuilder_.dispose(); + readResultBuilder_ = null; + } + queryResult_ = null; + if (queryResultBuilder_ != null) { + queryResultBuilder_.dispose(); + queryResultBuilder_ = null; + } + transactionRestarted_ = false; + batchTxnId_ = com.google.protobuf.ByteString.EMPTY; + if (dbPartitionBuilder_ == null) { + dbPartition_ = java.util.Collections.emptyList(); + } else { + dbPartition_ = null; + dbPartitionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + adminResult_ = null; + if (adminResultBuilder_ != null) { + adminResultBuilder_.dispose(); + adminResultBuilder_ = null; + } + dmlRowsModified_ = emptyLongList(); + if (changeStreamRecordsBuilder_ == null) { + changeStreamRecords_ = java.util.Collections.emptyList(); + } else { + changeStreamRecords_ = null; + changeStreamRecordsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + snapshotIsolationTxnReadTimestamp_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcome getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcome build() { + com.google.spanner.executor.v1.SpannerActionOutcome result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcome buildPartial() { + com.google.spanner.executor.v1.SpannerActionOutcome result = + new com.google.spanner.executor.v1.SpannerActionOutcome(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.SpannerActionOutcome result) { + if (dbPartitionBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0)) { + dbPartition_ = java.util.Collections.unmodifiableList(dbPartition_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.dbPartition_ = dbPartition_; + } else { + result.dbPartition_ = dbPartitionBuilder_.build(); + } + if (changeStreamRecordsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0)) { + changeStreamRecords_ = java.util.Collections.unmodifiableList(changeStreamRecords_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.changeStreamRecords_ = changeStreamRecords_; + } else { + result.changeStreamRecords_ = changeStreamRecordsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.SpannerActionOutcome result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.commitTime_ = commitTimeBuilder_ == null ? commitTime_ : commitTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.readResult_ = readResultBuilder_ == null ? readResult_ : readResultBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.queryResult_ = + queryResultBuilder_ == null ? queryResult_ : queryResultBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.transactionRestarted_ = transactionRestarted_; + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.batchTxnId_ = batchTxnId_; + to_bitField0_ |= 0x00000020; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.adminResult_ = + adminResultBuilder_ == null ? adminResult_ : adminResultBuilder_.build(); + to_bitField0_ |= 0x00000040; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + dmlRowsModified_.makeImmutable(); + result.dmlRowsModified_ = dmlRowsModified_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.snapshotIsolationTxnReadTimestamp_ = snapshotIsolationTxnReadTimestamp_; + to_bitField0_ |= 0x00000080; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SpannerActionOutcome) { + return mergeFrom((com.google.spanner.executor.v1.SpannerActionOutcome) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SpannerActionOutcome other) { + if (other == com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance()) + return this; + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (other.hasReadResult()) { + mergeReadResult(other.getReadResult()); + } + if (other.hasQueryResult()) { + mergeQueryResult(other.getQueryResult()); + } + if (other.hasTransactionRestarted()) { + setTransactionRestarted(other.getTransactionRestarted()); + } + if (other.hasBatchTxnId()) { + setBatchTxnId(other.getBatchTxnId()); + } + if (dbPartitionBuilder_ == null) { + if (!other.dbPartition_.isEmpty()) { + if (dbPartition_.isEmpty()) { + dbPartition_ = other.dbPartition_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureDbPartitionIsMutable(); + dbPartition_.addAll(other.dbPartition_); + } + onChanged(); + } + } else { + if (!other.dbPartition_.isEmpty()) { + if (dbPartitionBuilder_.isEmpty()) { + dbPartitionBuilder_.dispose(); + dbPartitionBuilder_ = null; + dbPartition_ = other.dbPartition_; + bitField0_ = (bitField0_ & ~0x00000040); + dbPartitionBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetDbPartitionFieldBuilder() + : null; + } else { + dbPartitionBuilder_.addAllMessages(other.dbPartition_); + } + } + } + if (other.hasAdminResult()) { + mergeAdminResult(other.getAdminResult()); + } + if (!other.dmlRowsModified_.isEmpty()) { + if (dmlRowsModified_.isEmpty()) { + dmlRowsModified_ = other.dmlRowsModified_; + dmlRowsModified_.makeImmutable(); + bitField0_ |= 0x00000100; + } else { + ensureDmlRowsModifiedIsMutable(); + dmlRowsModified_.addAll(other.dmlRowsModified_); + } + onChanged(); + } + if (changeStreamRecordsBuilder_ == null) { + if (!other.changeStreamRecords_.isEmpty()) { + if (changeStreamRecords_.isEmpty()) { + changeStreamRecords_ = other.changeStreamRecords_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.addAll(other.changeStreamRecords_); + } + onChanged(); + } + } else { + if (!other.changeStreamRecords_.isEmpty()) { + if (changeStreamRecordsBuilder_.isEmpty()) { + changeStreamRecordsBuilder_.dispose(); + changeStreamRecordsBuilder_ = null; + changeStreamRecords_ = other.changeStreamRecords_; + bitField0_ = (bitField0_ & ~0x00000200); + changeStreamRecordsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetChangeStreamRecordsFieldBuilder() + : null; + } else { + changeStreamRecordsBuilder_.addAllMessages(other.changeStreamRecords_); + } + } + } + if (other.hasSnapshotIsolationTxnReadTimestamp()) { + setSnapshotIsolationTxnReadTimestamp(other.getSnapshotIsolationTxnReadTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCommitTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetReadResultFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetQueryResultFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 40: + { + transactionRestarted_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + batchTxnId_ = input.readBytes(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + com.google.spanner.executor.v1.BatchPartition m = + input.readMessage( + com.google.spanner.executor.v1.BatchPartition.parser(), extensionRegistry); + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + dbPartition_.add(m); + } else { + dbPartitionBuilder_.addMessage(m); + } + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetAdminResultFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 72: + { + long v = input.readInt64(); + ensureDmlRowsModifiedIsMutable(); + dmlRowsModified_.addLong(v); + break; + } // case 72 + case 74: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + ensureDmlRowsModifiedIsMutable(); + while (input.getBytesUntilLimit() > 0) { + dmlRowsModified_.addLong(input.readInt64()); + } + input.popLimit(limit); + break; + } // case 74 + case 82: + { + com.google.spanner.executor.v1.ChangeStreamRecord m = + input.readMessage( + com.google.spanner.executor.v1.ChangeStreamRecord.parser(), + extensionRegistry); + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.add(m); + } else { + changeStreamRecordsBuilder_.addMessage(m); + } + break; + } // case 82 + case 88: + { + snapshotIsolationTxnReadTimestamp_ = input.readInt64(); + bitField0_ |= 0x00000400; + break; + } // case 88 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000001); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
    +     * If an outcome is split into multiple parts, status will be set only in the
    +     * last part.
    +     * 
    + * + * optional .google.rpc.Status status = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + } else { + commitTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && commitTime_ != null + && commitTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimeBuilder().mergeFrom(value); + } else { + commitTime_ = value; + } + } else { + commitTimeBuilder_.mergeFrom(value); + } + if (commitTime_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public Builder clearCommitTime() { + bitField0_ = (bitField0_ & ~0x00000002); + commitTime_ = null; + if (commitTimeBuilder_ != null) { + commitTimeBuilder_.dispose(); + commitTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCommitTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + + /** + * + * + *
    +     * Transaction timestamp. It must be set for successful committed actions.
    +     * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private com.google.spanner.executor.v1.ReadResult readResult_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadResult, + com.google.spanner.executor.v1.ReadResult.Builder, + com.google.spanner.executor.v1.ReadResultOrBuilder> + readResultBuilder_; + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return Whether the readResult field is set. + */ + public boolean hasReadResult() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return The readResult. + */ + public com.google.spanner.executor.v1.ReadResult getReadResult() { + if (readResultBuilder_ == null) { + return readResult_ == null + ? com.google.spanner.executor.v1.ReadResult.getDefaultInstance() + : readResult_; + } else { + return readResultBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public Builder setReadResult(com.google.spanner.executor.v1.ReadResult value) { + if (readResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readResult_ = value; + } else { + readResultBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public Builder setReadResult( + com.google.spanner.executor.v1.ReadResult.Builder builderForValue) { + if (readResultBuilder_ == null) { + readResult_ = builderForValue.build(); + } else { + readResultBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public Builder mergeReadResult(com.google.spanner.executor.v1.ReadResult value) { + if (readResultBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && readResult_ != null + && readResult_ != com.google.spanner.executor.v1.ReadResult.getDefaultInstance()) { + getReadResultBuilder().mergeFrom(value); + } else { + readResult_ = value; + } + } else { + readResultBuilder_.mergeFrom(value); + } + if (readResult_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public Builder clearReadResult() { + bitField0_ = (bitField0_ & ~0x00000004); + readResult_ = null; + if (readResultBuilder_ != null) { + readResultBuilder_.dispose(); + readResultBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public com.google.spanner.executor.v1.ReadResult.Builder getReadResultBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetReadResultFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + public com.google.spanner.executor.v1.ReadResultOrBuilder getReadResultOrBuilder() { + if (readResultBuilder_ != null) { + return readResultBuilder_.getMessageOrBuilder(); + } else { + return readResult_ == null + ? com.google.spanner.executor.v1.ReadResult.getDefaultInstance() + : readResult_; + } + } + + /** + * + * + *
    +     * Result of a ReadAction. This field must be set for ReadActions even if
    +     * no rows were read.
    +     * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadResult, + com.google.spanner.executor.v1.ReadResult.Builder, + com.google.spanner.executor.v1.ReadResultOrBuilder> + internalGetReadResultFieldBuilder() { + if (readResultBuilder_ == null) { + readResultBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ReadResult, + com.google.spanner.executor.v1.ReadResult.Builder, + com.google.spanner.executor.v1.ReadResultOrBuilder>( + getReadResult(), getParentForChildren(), isClean()); + readResult_ = null; + } + return readResultBuilder_; + } + + private com.google.spanner.executor.v1.QueryResult queryResult_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryResult, + com.google.spanner.executor.v1.QueryResult.Builder, + com.google.spanner.executor.v1.QueryResultOrBuilder> + queryResultBuilder_; + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return Whether the queryResult field is set. + */ + public boolean hasQueryResult() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return The queryResult. + */ + public com.google.spanner.executor.v1.QueryResult getQueryResult() { + if (queryResultBuilder_ == null) { + return queryResult_ == null + ? com.google.spanner.executor.v1.QueryResult.getDefaultInstance() + : queryResult_; + } else { + return queryResultBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public Builder setQueryResult(com.google.spanner.executor.v1.QueryResult value) { + if (queryResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryResult_ = value; + } else { + queryResultBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public Builder setQueryResult( + com.google.spanner.executor.v1.QueryResult.Builder builderForValue) { + if (queryResultBuilder_ == null) { + queryResult_ = builderForValue.build(); + } else { + queryResultBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public Builder mergeQueryResult(com.google.spanner.executor.v1.QueryResult value) { + if (queryResultBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && queryResult_ != null + && queryResult_ != com.google.spanner.executor.v1.QueryResult.getDefaultInstance()) { + getQueryResultBuilder().mergeFrom(value); + } else { + queryResult_ = value; + } + } else { + queryResultBuilder_.mergeFrom(value); + } + if (queryResult_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public Builder clearQueryResult() { + bitField0_ = (bitField0_ & ~0x00000008); + queryResult_ = null; + if (queryResultBuilder_ != null) { + queryResultBuilder_.dispose(); + queryResultBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public com.google.spanner.executor.v1.QueryResult.Builder getQueryResultBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetQueryResultFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + public com.google.spanner.executor.v1.QueryResultOrBuilder getQueryResultOrBuilder() { + if (queryResultBuilder_ != null) { + return queryResultBuilder_.getMessageOrBuilder(); + } else { + return queryResult_ == null + ? com.google.spanner.executor.v1.QueryResult.getDefaultInstance() + : queryResult_; + } + } + + /** + * + * + *
    +     * Result of a Query. This field must be set for Queries even if no rows were
    +     * read.
    +     * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryResult, + com.google.spanner.executor.v1.QueryResult.Builder, + com.google.spanner.executor.v1.QueryResultOrBuilder> + internalGetQueryResultFieldBuilder() { + if (queryResultBuilder_ == null) { + queryResultBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.QueryResult, + com.google.spanner.executor.v1.QueryResult.Builder, + com.google.spanner.executor.v1.QueryResultOrBuilder>( + getQueryResult(), getParentForChildren(), isClean()); + queryResult_ = null; + } + return queryResultBuilder_; + } + + private boolean transactionRestarted_; + + /** + * + * + *
    +     * This bit indicates that Spanner has restarted the current transaction. It
    +     * means that the client should replay all the reads and writes.
    +     * Setting it to true is only valid in the context of a read-write
    +     * transaction, as an outcome of a committing FinishTransactionAction.
    +     * 
    + * + * optional bool transaction_restarted = 5; + * + * @return Whether the transactionRestarted field is set. + */ + @java.lang.Override + public boolean hasTransactionRestarted() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * This bit indicates that Spanner has restarted the current transaction. It
    +     * means that the client should replay all the reads and writes.
    +     * Setting it to true is only valid in the context of a read-write
    +     * transaction, as an outcome of a committing FinishTransactionAction.
    +     * 
    + * + * optional bool transaction_restarted = 5; + * + * @return The transactionRestarted. + */ + @java.lang.Override + public boolean getTransactionRestarted() { + return transactionRestarted_; + } + + /** + * + * + *
    +     * This bit indicates that Spanner has restarted the current transaction. It
    +     * means that the client should replay all the reads and writes.
    +     * Setting it to true is only valid in the context of a read-write
    +     * transaction, as an outcome of a committing FinishTransactionAction.
    +     * 
    + * + * optional bool transaction_restarted = 5; + * + * @param value The transactionRestarted to set. + * @return This builder for chaining. + */ + public Builder setTransactionRestarted(boolean value) { + + transactionRestarted_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * This bit indicates that Spanner has restarted the current transaction. It
    +     * means that the client should replay all the reads and writes.
    +     * Setting it to true is only valid in the context of a read-write
    +     * transaction, as an outcome of a committing FinishTransactionAction.
    +     * 
    + * + * optional bool transaction_restarted = 5; + * + * @return This builder for chaining. + */ + public Builder clearTransactionRestarted() { + bitField0_ = (bitField0_ & ~0x00000010); + transactionRestarted_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString batchTxnId_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +     * the transaction.
    +     * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return Whether the batchTxnId field is set. + */ + @java.lang.Override + public boolean hasBatchTxnId() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +     * the transaction.
    +     * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return The batchTxnId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBatchTxnId() { + return batchTxnId_; + } + + /** + * + * + *
    +     * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +     * the transaction.
    +     * 
    + * + * optional bytes batch_txn_id = 6; + * + * @param value The batchTxnId to set. + * @return This builder for chaining. + */ + public Builder setBatchTxnId(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + batchTxnId_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +     * the transaction.
    +     * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return This builder for chaining. + */ + public Builder clearBatchTxnId() { + bitField0_ = (bitField0_ & ~0x00000020); + batchTxnId_ = getDefaultInstance().getBatchTxnId(); + onChanged(); + return this; + } + + private java.util.List dbPartition_ = + java.util.Collections.emptyList(); + + private void ensureDbPartitionIsMutable() { + if (!((bitField0_ & 0x00000040) != 0)) { + dbPartition_ = + new java.util.ArrayList(dbPartition_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder> + dbPartitionBuilder_; + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public java.util.List getDbPartitionList() { + if (dbPartitionBuilder_ == null) { + return java.util.Collections.unmodifiableList(dbPartition_); + } else { + return dbPartitionBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public int getDbPartitionCount() { + if (dbPartitionBuilder_ == null) { + return dbPartition_.size(); + } else { + return dbPartitionBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public com.google.spanner.executor.v1.BatchPartition getDbPartition(int index) { + if (dbPartitionBuilder_ == null) { + return dbPartition_.get(index); + } else { + return dbPartitionBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder setDbPartition(int index, com.google.spanner.executor.v1.BatchPartition value) { + if (dbPartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDbPartitionIsMutable(); + dbPartition_.set(index, value); + onChanged(); + } else { + dbPartitionBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder setDbPartition( + int index, com.google.spanner.executor.v1.BatchPartition.Builder builderForValue) { + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + dbPartition_.set(index, builderForValue.build()); + onChanged(); + } else { + dbPartitionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder addDbPartition(com.google.spanner.executor.v1.BatchPartition value) { + if (dbPartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDbPartitionIsMutable(); + dbPartition_.add(value); + onChanged(); + } else { + dbPartitionBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder addDbPartition(int index, com.google.spanner.executor.v1.BatchPartition value) { + if (dbPartitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDbPartitionIsMutable(); + dbPartition_.add(index, value); + onChanged(); + } else { + dbPartitionBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder addDbPartition( + com.google.spanner.executor.v1.BatchPartition.Builder builderForValue) { + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + dbPartition_.add(builderForValue.build()); + onChanged(); + } else { + dbPartitionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder addDbPartition( + int index, com.google.spanner.executor.v1.BatchPartition.Builder builderForValue) { + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + dbPartition_.add(index, builderForValue.build()); + onChanged(); + } else { + dbPartitionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder addAllDbPartition( + java.lang.Iterable values) { + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, dbPartition_); + onChanged(); + } else { + dbPartitionBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder clearDbPartition() { + if (dbPartitionBuilder_ == null) { + dbPartition_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + dbPartitionBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public Builder removeDbPartition(int index) { + if (dbPartitionBuilder_ == null) { + ensureDbPartitionIsMutable(); + dbPartition_.remove(index); + onChanged(); + } else { + dbPartitionBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public com.google.spanner.executor.v1.BatchPartition.Builder getDbPartitionBuilder(int index) { + return internalGetDbPartitionFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public com.google.spanner.executor.v1.BatchPartitionOrBuilder getDbPartitionOrBuilder( + int index) { + if (dbPartitionBuilder_ == null) { + return dbPartition_.get(index); + } else { + return dbPartitionBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public java.util.List + getDbPartitionOrBuilderList() { + if (dbPartitionBuilder_ != null) { + return dbPartitionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(dbPartition_); + } + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public com.google.spanner.executor.v1.BatchPartition.Builder addDbPartitionBuilder() { + return internalGetDbPartitionFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.BatchPartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public com.google.spanner.executor.v1.BatchPartition.Builder addDbPartitionBuilder(int index) { + return internalGetDbPartitionFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.BatchPartition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Generated database partitions (result of a
    +     * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +     * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + public java.util.List + getDbPartitionBuilderList() { + return internalGetDbPartitionFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder> + internalGetDbPartitionFieldBuilder() { + if (dbPartitionBuilder_ == null) { + dbPartitionBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.BatchPartition, + com.google.spanner.executor.v1.BatchPartition.Builder, + com.google.spanner.executor.v1.BatchPartitionOrBuilder>( + dbPartition_, ((bitField0_ & 0x00000040) != 0), getParentForChildren(), isClean()); + dbPartition_ = null; + } + return dbPartitionBuilder_; + } + + private com.google.spanner.executor.v1.AdminResult adminResult_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminResult, + com.google.spanner.executor.v1.AdminResult.Builder, + com.google.spanner.executor.v1.AdminResultOrBuilder> + adminResultBuilder_; + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return Whether the adminResult field is set. + */ + public boolean hasAdminResult() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return The adminResult. + */ + public com.google.spanner.executor.v1.AdminResult getAdminResult() { + if (adminResultBuilder_ == null) { + return adminResult_ == null + ? com.google.spanner.executor.v1.AdminResult.getDefaultInstance() + : adminResult_; + } else { + return adminResultBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public Builder setAdminResult(com.google.spanner.executor.v1.AdminResult value) { + if (adminResultBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + adminResult_ = value; + } else { + adminResultBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public Builder setAdminResult( + com.google.spanner.executor.v1.AdminResult.Builder builderForValue) { + if (adminResultBuilder_ == null) { + adminResult_ = builderForValue.build(); + } else { + adminResultBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public Builder mergeAdminResult(com.google.spanner.executor.v1.AdminResult value) { + if (adminResultBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && adminResult_ != null + && adminResult_ != com.google.spanner.executor.v1.AdminResult.getDefaultInstance()) { + getAdminResultBuilder().mergeFrom(value); + } else { + adminResult_ = value; + } + } else { + adminResultBuilder_.mergeFrom(value); + } + if (adminResult_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public Builder clearAdminResult() { + bitField0_ = (bitField0_ & ~0x00000080); + adminResult_ = null; + if (adminResultBuilder_ != null) { + adminResultBuilder_.dispose(); + adminResultBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public com.google.spanner.executor.v1.AdminResult.Builder getAdminResultBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetAdminResultFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + public com.google.spanner.executor.v1.AdminResultOrBuilder getAdminResultOrBuilder() { + if (adminResultBuilder_ != null) { + return adminResultBuilder_.getMessageOrBuilder(); + } else { + return adminResult_ == null + ? com.google.spanner.executor.v1.AdminResult.getDefaultInstance() + : adminResult_; + } + } + + /** + * + * + *
    +     * Result of admin related actions.
    +     * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminResult, + com.google.spanner.executor.v1.AdminResult.Builder, + com.google.spanner.executor.v1.AdminResultOrBuilder> + internalGetAdminResultFieldBuilder() { + if (adminResultBuilder_ == null) { + adminResultBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.AdminResult, + com.google.spanner.executor.v1.AdminResult.Builder, + com.google.spanner.executor.v1.AdminResultOrBuilder>( + getAdminResult(), getParentForChildren(), isClean()); + adminResult_ = null; + } + return adminResultBuilder_; + } + + private com.google.protobuf.Internal.LongList dmlRowsModified_ = emptyLongList(); + + private void ensureDmlRowsModifiedIsMutable() { + if (!dmlRowsModified_.isModifiable()) { + dmlRowsModified_ = makeMutableCopy(dmlRowsModified_); + } + bitField0_ |= 0x00000100; + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return A list containing the dmlRowsModified. + */ + public java.util.List getDmlRowsModifiedList() { + dmlRowsModified_.makeImmutable(); + return dmlRowsModified_; + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return The count of dmlRowsModified. + */ + public int getDmlRowsModifiedCount() { + return dmlRowsModified_.size(); + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param index The index of the element to return. + * @return The dmlRowsModified at the given index. + */ + public long getDmlRowsModified(int index) { + return dmlRowsModified_.getLong(index); + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param index The index to set the value at. + * @param value The dmlRowsModified to set. + * @return This builder for chaining. + */ + public Builder setDmlRowsModified(int index, long value) { + + ensureDmlRowsModifiedIsMutable(); + dmlRowsModified_.setLong(index, value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param value The dmlRowsModified to add. + * @return This builder for chaining. + */ + public Builder addDmlRowsModified(long value) { + + ensureDmlRowsModifiedIsMutable(); + dmlRowsModified_.addLong(value); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param values The dmlRowsModified to add. + * @return This builder for chaining. + */ + public Builder addAllDmlRowsModified(java.lang.Iterable values) { + ensureDmlRowsModifiedIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, dmlRowsModified_); + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Stores rows modified by query in single DML or batch DML action.
    +     * In case of batch DML action, stores 0 as row count of errored DML query.
    +     * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return This builder for chaining. + */ + public Builder clearDmlRowsModified() { + dmlRowsModified_ = emptyLongList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + return this; + } + + private java.util.List changeStreamRecords_ = + java.util.Collections.emptyList(); + + private void ensureChangeStreamRecordsIsMutable() { + if (!((bitField0_ & 0x00000200) != 0)) { + changeStreamRecords_ = + new java.util.ArrayList( + changeStreamRecords_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChangeStreamRecord, + com.google.spanner.executor.v1.ChangeStreamRecord.Builder, + com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder> + changeStreamRecordsBuilder_; + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public java.util.List + getChangeStreamRecordsList() { + if (changeStreamRecordsBuilder_ == null) { + return java.util.Collections.unmodifiableList(changeStreamRecords_); + } else { + return changeStreamRecordsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public int getChangeStreamRecordsCount() { + if (changeStreamRecordsBuilder_ == null) { + return changeStreamRecords_.size(); + } else { + return changeStreamRecordsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public com.google.spanner.executor.v1.ChangeStreamRecord getChangeStreamRecords(int index) { + if (changeStreamRecordsBuilder_ == null) { + return changeStreamRecords_.get(index); + } else { + return changeStreamRecordsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder setChangeStreamRecords( + int index, com.google.spanner.executor.v1.ChangeStreamRecord value) { + if (changeStreamRecordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.set(index, value); + onChanged(); + } else { + changeStreamRecordsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder setChangeStreamRecords( + int index, com.google.spanner.executor.v1.ChangeStreamRecord.Builder builderForValue) { + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.set(index, builderForValue.build()); + onChanged(); + } else { + changeStreamRecordsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder addChangeStreamRecords(com.google.spanner.executor.v1.ChangeStreamRecord value) { + if (changeStreamRecordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.add(value); + onChanged(); + } else { + changeStreamRecordsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder addChangeStreamRecords( + int index, com.google.spanner.executor.v1.ChangeStreamRecord value) { + if (changeStreamRecordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.add(index, value); + onChanged(); + } else { + changeStreamRecordsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder addChangeStreamRecords( + com.google.spanner.executor.v1.ChangeStreamRecord.Builder builderForValue) { + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.add(builderForValue.build()); + onChanged(); + } else { + changeStreamRecordsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder addChangeStreamRecords( + int index, com.google.spanner.executor.v1.ChangeStreamRecord.Builder builderForValue) { + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.add(index, builderForValue.build()); + onChanged(); + } else { + changeStreamRecordsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder addAllChangeStreamRecords( + java.lang.Iterable values) { + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, changeStreamRecords_); + onChanged(); + } else { + changeStreamRecordsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder clearChangeStreamRecords() { + if (changeStreamRecordsBuilder_ == null) { + changeStreamRecords_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + changeStreamRecordsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public Builder removeChangeStreamRecords(int index) { + if (changeStreamRecordsBuilder_ == null) { + ensureChangeStreamRecordsIsMutable(); + changeStreamRecords_.remove(index); + onChanged(); + } else { + changeStreamRecordsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public com.google.spanner.executor.v1.ChangeStreamRecord.Builder getChangeStreamRecordsBuilder( + int index) { + return internalGetChangeStreamRecordsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder + getChangeStreamRecordsOrBuilder(int index) { + if (changeStreamRecordsBuilder_ == null) { + return changeStreamRecords_.get(index); + } else { + return changeStreamRecordsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public java.util.List + getChangeStreamRecordsOrBuilderList() { + if (changeStreamRecordsBuilder_ != null) { + return changeStreamRecordsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(changeStreamRecords_); + } + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public com.google.spanner.executor.v1.ChangeStreamRecord.Builder + addChangeStreamRecordsBuilder() { + return internalGetChangeStreamRecordsFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ChangeStreamRecord.getDefaultInstance()); + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public com.google.spanner.executor.v1.ChangeStreamRecord.Builder addChangeStreamRecordsBuilder( + int index) { + return internalGetChangeStreamRecordsFieldBuilder() + .addBuilder( + index, com.google.spanner.executor.v1.ChangeStreamRecord.getDefaultInstance()); + } + + /** + * + * + *
    +     * Change stream records returned by a change stream query.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + public java.util.List + getChangeStreamRecordsBuilderList() { + return internalGetChangeStreamRecordsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChangeStreamRecord, + com.google.spanner.executor.v1.ChangeStreamRecord.Builder, + com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder> + internalGetChangeStreamRecordsFieldBuilder() { + if (changeStreamRecordsBuilder_ == null) { + changeStreamRecordsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ChangeStreamRecord, + com.google.spanner.executor.v1.ChangeStreamRecord.Builder, + com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder>( + changeStreamRecords_, + ((bitField0_ & 0x00000200) != 0), + getParentForChildren(), + isClean()); + changeStreamRecords_ = null; + } + return changeStreamRecordsBuilder_; + } + + private long snapshotIsolationTxnReadTimestamp_; + + /** + * + * + *
    +     * If not zero, it indicates the read timestamp to use for validating
    +     * the SnapshotIsolation transaction.
    +     * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return Whether the snapshotIsolationTxnReadTimestamp field is set. + */ + @java.lang.Override + public boolean hasSnapshotIsolationTxnReadTimestamp() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
    +     * If not zero, it indicates the read timestamp to use for validating
    +     * the SnapshotIsolation transaction.
    +     * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return The snapshotIsolationTxnReadTimestamp. + */ + @java.lang.Override + public long getSnapshotIsolationTxnReadTimestamp() { + return snapshotIsolationTxnReadTimestamp_; + } + + /** + * + * + *
    +     * If not zero, it indicates the read timestamp to use for validating
    +     * the SnapshotIsolation transaction.
    +     * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @param value The snapshotIsolationTxnReadTimestamp to set. + * @return This builder for chaining. + */ + public Builder setSnapshotIsolationTxnReadTimestamp(long value) { + + snapshotIsolationTxnReadTimestamp_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If not zero, it indicates the read timestamp to use for validating
    +     * the SnapshotIsolation transaction.
    +     * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return This builder for chaining. + */ + public Builder clearSnapshotIsolationTxnReadTimestamp() { + bitField0_ = (bitField0_ & ~0x00000400); + snapshotIsolationTxnReadTimestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SpannerActionOutcome) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SpannerActionOutcome) + private static final com.google.spanner.executor.v1.SpannerActionOutcome DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SpannerActionOutcome(); + } + + public static com.google.spanner.executor.v1.SpannerActionOutcome getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SpannerActionOutcome parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcome getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java new file mode 100644 index 000000000000..f4670e974cc6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java @@ -0,0 +1,476 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SpannerActionOutcomeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SpannerActionOutcome) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
    +   * If an outcome is split into multiple parts, status will be set only in the
    +   * last part.
    +   * 
    + * + * optional .google.rpc.Status status = 1; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + + /** + * + * + *
    +   * Transaction timestamp. It must be set for successful committed actions.
    +   * 
    + * + * optional .google.protobuf.Timestamp commit_time = 2; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return Whether the readResult field is set. + */ + boolean hasReadResult(); + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + * + * @return The readResult. + */ + com.google.spanner.executor.v1.ReadResult getReadResult(); + + /** + * + * + *
    +   * Result of a ReadAction. This field must be set for ReadActions even if
    +   * no rows were read.
    +   * 
    + * + * optional .google.spanner.executor.v1.ReadResult read_result = 3; + */ + com.google.spanner.executor.v1.ReadResultOrBuilder getReadResultOrBuilder(); + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return Whether the queryResult field is set. + */ + boolean hasQueryResult(); + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + * + * @return The queryResult. + */ + com.google.spanner.executor.v1.QueryResult getQueryResult(); + + /** + * + * + *
    +   * Result of a Query. This field must be set for Queries even if no rows were
    +   * read.
    +   * 
    + * + * optional .google.spanner.executor.v1.QueryResult query_result = 4; + */ + com.google.spanner.executor.v1.QueryResultOrBuilder getQueryResultOrBuilder(); + + /** + * + * + *
    +   * This bit indicates that Spanner has restarted the current transaction. It
    +   * means that the client should replay all the reads and writes.
    +   * Setting it to true is only valid in the context of a read-write
    +   * transaction, as an outcome of a committing FinishTransactionAction.
    +   * 
    + * + * optional bool transaction_restarted = 5; + * + * @return Whether the transactionRestarted field is set. + */ + boolean hasTransactionRestarted(); + + /** + * + * + *
    +   * This bit indicates that Spanner has restarted the current transaction. It
    +   * means that the client should replay all the reads and writes.
    +   * Setting it to true is only valid in the context of a read-write
    +   * transaction, as an outcome of a committing FinishTransactionAction.
    +   * 
    + * + * optional bool transaction_restarted = 5; + * + * @return The transactionRestarted. + */ + boolean getTransactionRestarted(); + + /** + * + * + *
    +   * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +   * the transaction.
    +   * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return Whether the batchTxnId field is set. + */ + boolean hasBatchTxnId(); + + /** + * + * + *
    +   * In successful StartBatchTransactionAction outcomes, this contains the ID of
    +   * the transaction.
    +   * 
    + * + * optional bytes batch_txn_id = 6; + * + * @return The batchTxnId. + */ + com.google.protobuf.ByteString getBatchTxnId(); + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + java.util.List getDbPartitionList(); + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + com.google.spanner.executor.v1.BatchPartition getDbPartition(int index); + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + int getDbPartitionCount(); + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + java.util.List + getDbPartitionOrBuilderList(); + + /** + * + * + *
    +   * Generated database partitions (result of a
    +   * GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction).
    +   * 
    + * + * repeated .google.spanner.executor.v1.BatchPartition db_partition = 7; + */ + com.google.spanner.executor.v1.BatchPartitionOrBuilder getDbPartitionOrBuilder(int index); + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return Whether the adminResult field is set. + */ + boolean hasAdminResult(); + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + * + * @return The adminResult. + */ + com.google.spanner.executor.v1.AdminResult getAdminResult(); + + /** + * + * + *
    +   * Result of admin related actions.
    +   * 
    + * + * optional .google.spanner.executor.v1.AdminResult admin_result = 8; + */ + com.google.spanner.executor.v1.AdminResultOrBuilder getAdminResultOrBuilder(); + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return A list containing the dmlRowsModified. + */ + java.util.List getDmlRowsModifiedList(); + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @return The count of dmlRowsModified. + */ + int getDmlRowsModifiedCount(); + + /** + * + * + *
    +   * Stores rows modified by query in single DML or batch DML action.
    +   * In case of batch DML action, stores 0 as row count of errored DML query.
    +   * 
    + * + * repeated int64 dml_rows_modified = 9; + * + * @param index The index of the element to return. + * @return The dmlRowsModified at the given index. + */ + long getDmlRowsModified(int index); + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + java.util.List getChangeStreamRecordsList(); + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + com.google.spanner.executor.v1.ChangeStreamRecord getChangeStreamRecords(int index); + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + int getChangeStreamRecordsCount(); + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + java.util.List + getChangeStreamRecordsOrBuilderList(); + + /** + * + * + *
    +   * Change stream records returned by a change stream query.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ChangeStreamRecord change_stream_records = 10; + * + */ + com.google.spanner.executor.v1.ChangeStreamRecordOrBuilder getChangeStreamRecordsOrBuilder( + int index); + + /** + * + * + *
    +   * If not zero, it indicates the read timestamp to use for validating
    +   * the SnapshotIsolation transaction.
    +   * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return Whether the snapshotIsolationTxnReadTimestamp field is set. + */ + boolean hasSnapshotIsolationTxnReadTimestamp(); + + /** + * + * + *
    +   * If not zero, it indicates the read timestamp to use for validating
    +   * the SnapshotIsolation transaction.
    +   * 
    + * + * optional int64 snapshot_isolation_txn_read_timestamp = 11; + * + * @return The snapshotIsolationTxnReadTimestamp. + */ + long getSnapshotIsolationTxnReadTimestamp(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java new file mode 100644 index 000000000000..8234124e88bc --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java @@ -0,0 +1,795 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Request to executor service that start a new Spanner action.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAsyncActionRequest} + */ +@com.google.protobuf.Generated +public final class SpannerAsyncActionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SpannerAsyncActionRequest) + SpannerAsyncActionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerAsyncActionRequest"); + } + + // Use SpannerAsyncActionRequest.newBuilder() to construct. + private SpannerAsyncActionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SpannerAsyncActionRequest() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAsyncActionRequest.class, + com.google.spanner.executor.v1.SpannerAsyncActionRequest.Builder.class); + } + + private int bitField0_; + public static final int ACTION_ID_FIELD_NUMBER = 1; + private int actionId_ = 0; + + /** + * + * + *
    +   * Action id to uniquely identify this action request.
    +   * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + @java.lang.Override + public int getActionId() { + return actionId_; + } + + public static final int ACTION_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.SpannerAction action_; + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return Whether the action field is set. + */ + @java.lang.Override + public boolean hasAction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return The action. + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAction getAction() { + return action_ == null + ? com.google.spanner.executor.v1.SpannerAction.getDefaultInstance() + : action_; + } + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOrBuilder getActionOrBuilder() { + return action_ == null + ? com.google.spanner.executor.v1.SpannerAction.getDefaultInstance() + : action_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (actionId_ != 0) { + output.writeInt32(1, actionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getAction()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (actionId_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, actionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getAction()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SpannerAsyncActionRequest)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SpannerAsyncActionRequest other = + (com.google.spanner.executor.v1.SpannerAsyncActionRequest) obj; + + if (getActionId() != other.getActionId()) return false; + if (hasAction() != other.hasAction()) return false; + if (hasAction()) { + if (!getAction().equals(other.getAction())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getActionId(); + if (hasAction()) { + hash = (37 * hash) + ACTION_FIELD_NUMBER; + hash = (53 * hash) + getAction().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.SpannerAsyncActionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Request to executor service that start a new Spanner action.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAsyncActionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SpannerAsyncActionRequest) + com.google.spanner.executor.v1.SpannerAsyncActionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAsyncActionRequest.class, + com.google.spanner.executor.v1.SpannerAsyncActionRequest.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SpannerAsyncActionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetActionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + actionId_ = 0; + action_ = null; + if (actionBuilder_ != null) { + actionBuilder_.dispose(); + actionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionRequest getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SpannerAsyncActionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionRequest build() { + com.google.spanner.executor.v1.SpannerAsyncActionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionRequest buildPartial() { + com.google.spanner.executor.v1.SpannerAsyncActionRequest result = + new com.google.spanner.executor.v1.SpannerAsyncActionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.SpannerAsyncActionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.actionId_ = actionId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.action_ = actionBuilder_ == null ? action_ : actionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SpannerAsyncActionRequest) { + return mergeFrom((com.google.spanner.executor.v1.SpannerAsyncActionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SpannerAsyncActionRequest other) { + if (other == com.google.spanner.executor.v1.SpannerAsyncActionRequest.getDefaultInstance()) + return this; + if (other.getActionId() != 0) { + setActionId(other.getActionId()); + } + if (other.hasAction()) { + mergeAction(other.getAction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + actionId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage(internalGetActionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int actionId_; + + /** + * + * + *
    +     * Action id to uniquely identify this action request.
    +     * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + @java.lang.Override + public int getActionId() { + return actionId_; + } + + /** + * + * + *
    +     * Action id to uniquely identify this action request.
    +     * 
    + * + * int32 action_id = 1; + * + * @param value The actionId to set. + * @return This builder for chaining. + */ + public Builder setActionId(int value) { + + actionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Action id to uniquely identify this action request.
    +     * 
    + * + * int32 action_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearActionId() { + bitField0_ = (bitField0_ & ~0x00000001); + actionId_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.SpannerAction action_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerAction, + com.google.spanner.executor.v1.SpannerAction.Builder, + com.google.spanner.executor.v1.SpannerActionOrBuilder> + actionBuilder_; + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return Whether the action field is set. + */ + public boolean hasAction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return The action. + */ + public com.google.spanner.executor.v1.SpannerAction getAction() { + if (actionBuilder_ == null) { + return action_ == null + ? com.google.spanner.executor.v1.SpannerAction.getDefaultInstance() + : action_; + } else { + return actionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public Builder setAction(com.google.spanner.executor.v1.SpannerAction value) { + if (actionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + } else { + actionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public Builder setAction(com.google.spanner.executor.v1.SpannerAction.Builder builderForValue) { + if (actionBuilder_ == null) { + action_ = builderForValue.build(); + } else { + actionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public Builder mergeAction(com.google.spanner.executor.v1.SpannerAction value) { + if (actionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && action_ != null + && action_ != com.google.spanner.executor.v1.SpannerAction.getDefaultInstance()) { + getActionBuilder().mergeFrom(value); + } else { + action_ = value; + } + } else { + actionBuilder_.mergeFrom(value); + } + if (action_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public Builder clearAction() { + bitField0_ = (bitField0_ & ~0x00000002); + action_ = null; + if (actionBuilder_ != null) { + actionBuilder_.dispose(); + actionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public com.google.spanner.executor.v1.SpannerAction.Builder getActionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetActionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + public com.google.spanner.executor.v1.SpannerActionOrBuilder getActionOrBuilder() { + if (actionBuilder_ != null) { + return actionBuilder_.getMessageOrBuilder(); + } else { + return action_ == null + ? com.google.spanner.executor.v1.SpannerAction.getDefaultInstance() + : action_; + } + } + + /** + * + * + *
    +     * The actual SpannerAction to perform.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerAction, + com.google.spanner.executor.v1.SpannerAction.Builder, + com.google.spanner.executor.v1.SpannerActionOrBuilder> + internalGetActionFieldBuilder() { + if (actionBuilder_ == null) { + actionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerAction, + com.google.spanner.executor.v1.SpannerAction.Builder, + com.google.spanner.executor.v1.SpannerActionOrBuilder>( + getAction(), getParentForChildren(), isClean()); + action_ = null; + } + return actionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SpannerAsyncActionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SpannerAsyncActionRequest) + private static final com.google.spanner.executor.v1.SpannerAsyncActionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SpannerAsyncActionRequest(); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SpannerAsyncActionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java new file mode 100644 index 000000000000..2def902b47f3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java @@ -0,0 +1,78 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SpannerAsyncActionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SpannerAsyncActionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Action id to uniquely identify this action request.
    +   * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + int getActionId(); + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return Whether the action field is set. + */ + boolean hasAction(); + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + * + * @return The action. + */ + com.google.spanner.executor.v1.SpannerAction getAction(); + + /** + * + * + *
    +   * The actual SpannerAction to perform.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerAction action = 2; + */ + com.google.spanner.executor.v1.SpannerActionOrBuilder getActionOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java new file mode 100644 index 000000000000..aff91a1e20e5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java @@ -0,0 +1,809 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Response from executor service.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAsyncActionResponse} + */ +@com.google.protobuf.Generated +public final class SpannerAsyncActionResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SpannerAsyncActionResponse) + SpannerAsyncActionResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerAsyncActionResponse"); + } + + // Use SpannerAsyncActionResponse.newBuilder() to construct. + private SpannerAsyncActionResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SpannerAsyncActionResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAsyncActionResponse.class, + com.google.spanner.executor.v1.SpannerAsyncActionResponse.Builder.class); + } + + private int bitField0_; + public static final int ACTION_ID_FIELD_NUMBER = 1; + private int actionId_ = 0; + + /** + * + * + *
    +   * Action id corresponds to the request.
    +   * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + @java.lang.Override + public int getActionId() { + return actionId_; + } + + public static final int OUTCOME_FIELD_NUMBER = 2; + private com.google.spanner.executor.v1.SpannerActionOutcome outcome_; + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return Whether the outcome field is set. + */ + @java.lang.Override + public boolean hasOutcome() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return The outcome. + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcome getOutcome() { + return outcome_ == null + ? com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance() + : outcome_; + } + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder getOutcomeOrBuilder() { + return outcome_ == null + ? com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance() + : outcome_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (actionId_ != 0) { + output.writeInt32(1, actionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOutcome()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (actionId_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, actionId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOutcome()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SpannerAsyncActionResponse)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SpannerAsyncActionResponse other = + (com.google.spanner.executor.v1.SpannerAsyncActionResponse) obj; + + if (getActionId() != other.getActionId()) return false; + if (hasOutcome() != other.hasOutcome()) return false; + if (hasOutcome()) { + if (!getOutcome().equals(other.getOutcome())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getActionId(); + if (hasOutcome()) { + hash = (37 * hash) + OUTCOME_FIELD_NUMBER; + hash = (53 * hash) + getOutcome().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.SpannerAsyncActionResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Response from executor service.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerAsyncActionResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SpannerAsyncActionResponse) + com.google.spanner.executor.v1.SpannerAsyncActionResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerAsyncActionResponse.class, + com.google.spanner.executor.v1.SpannerAsyncActionResponse.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SpannerAsyncActionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetOutcomeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + actionId_ = 0; + outcome_ = null; + if (outcomeBuilder_ != null) { + outcomeBuilder_.dispose(); + outcomeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerAsyncActionResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionResponse getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SpannerAsyncActionResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionResponse build() { + com.google.spanner.executor.v1.SpannerAsyncActionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionResponse buildPartial() { + com.google.spanner.executor.v1.SpannerAsyncActionResponse result = + new com.google.spanner.executor.v1.SpannerAsyncActionResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.SpannerAsyncActionResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.actionId_ = actionId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.outcome_ = outcomeBuilder_ == null ? outcome_ : outcomeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SpannerAsyncActionResponse) { + return mergeFrom((com.google.spanner.executor.v1.SpannerAsyncActionResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SpannerAsyncActionResponse other) { + if (other == com.google.spanner.executor.v1.SpannerAsyncActionResponse.getDefaultInstance()) + return this; + if (other.getActionId() != 0) { + setActionId(other.getActionId()); + } + if (other.hasOutcome()) { + mergeOutcome(other.getOutcome()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + actionId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage(internalGetOutcomeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int actionId_; + + /** + * + * + *
    +     * Action id corresponds to the request.
    +     * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + @java.lang.Override + public int getActionId() { + return actionId_; + } + + /** + * + * + *
    +     * Action id corresponds to the request.
    +     * 
    + * + * int32 action_id = 1; + * + * @param value The actionId to set. + * @return This builder for chaining. + */ + public Builder setActionId(int value) { + + actionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Action id corresponds to the request.
    +     * 
    + * + * int32 action_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearActionId() { + bitField0_ = (bitField0_ & ~0x00000001); + actionId_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.SpannerActionOutcome outcome_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerActionOutcome, + com.google.spanner.executor.v1.SpannerActionOutcome.Builder, + com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder> + outcomeBuilder_; + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return Whether the outcome field is set. + */ + public boolean hasOutcome() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return The outcome. + */ + public com.google.spanner.executor.v1.SpannerActionOutcome getOutcome() { + if (outcomeBuilder_ == null) { + return outcome_ == null + ? com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance() + : outcome_; + } else { + return outcomeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public Builder setOutcome(com.google.spanner.executor.v1.SpannerActionOutcome value) { + if (outcomeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + outcome_ = value; + } else { + outcomeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public Builder setOutcome( + com.google.spanner.executor.v1.SpannerActionOutcome.Builder builderForValue) { + if (outcomeBuilder_ == null) { + outcome_ = builderForValue.build(); + } else { + outcomeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public Builder mergeOutcome(com.google.spanner.executor.v1.SpannerActionOutcome value) { + if (outcomeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && outcome_ != null + && outcome_ + != com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance()) { + getOutcomeBuilder().mergeFrom(value); + } else { + outcome_ = value; + } + } else { + outcomeBuilder_.mergeFrom(value); + } + if (outcome_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public Builder clearOutcome() { + bitField0_ = (bitField0_ & ~0x00000002); + outcome_ = null; + if (outcomeBuilder_ != null) { + outcomeBuilder_.dispose(); + outcomeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public com.google.spanner.executor.v1.SpannerActionOutcome.Builder getOutcomeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetOutcomeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + public com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder getOutcomeOrBuilder() { + if (outcomeBuilder_ != null) { + return outcomeBuilder_.getMessageOrBuilder(); + } else { + return outcome_ == null + ? com.google.spanner.executor.v1.SpannerActionOutcome.getDefaultInstance() + : outcome_; + } + } + + /** + * + * + *
    +     * If action results are split into multiple responses, only the last response
    +     * can and should contain status.
    +     * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerActionOutcome, + com.google.spanner.executor.v1.SpannerActionOutcome.Builder, + com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder> + internalGetOutcomeFieldBuilder() { + if (outcomeBuilder_ == null) { + outcomeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SpannerActionOutcome, + com.google.spanner.executor.v1.SpannerActionOutcome.Builder, + com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder>( + getOutcome(), getParentForChildren(), isClean()); + outcome_ = null; + } + return outcomeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SpannerAsyncActionResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SpannerAsyncActionResponse) + private static final com.google.spanner.executor.v1.SpannerAsyncActionResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SpannerAsyncActionResponse(); + } + + public static com.google.spanner.executor.v1.SpannerAsyncActionResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SpannerAsyncActionResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerAsyncActionResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java new file mode 100644 index 000000000000..b65ccd6e3cb5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SpannerAsyncActionResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SpannerAsyncActionResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Action id corresponds to the request.
    +   * 
    + * + * int32 action_id = 1; + * + * @return The actionId. + */ + int getActionId(); + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return Whether the outcome field is set. + */ + boolean hasOutcome(); + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + * + * @return The outcome. + */ + com.google.spanner.executor.v1.SpannerActionOutcome getOutcome(); + + /** + * + * + *
    +   * If action results are split into multiple responses, only the last response
    +   * can and should contain status.
    +   * 
    + * + * .google.spanner.executor.v1.SpannerActionOutcome outcome = 2; + */ + com.google.spanner.executor.v1.SpannerActionOutcomeOrBuilder getOutcomeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java new file mode 100644 index 000000000000..26c5fb160bf3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java @@ -0,0 +1,706 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Options for Cloud Spanner Service.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerOptions} + */ +@com.google.protobuf.Generated +public final class SpannerOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.SpannerOptions) + SpannerOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerOptions"); + } + + // Use SpannerOptions.newBuilder() to construct. + private SpannerOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SpannerOptions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerOptions.class, + com.google.spanner.executor.v1.SpannerOptions.Builder.class); + } + + private int bitField0_; + public static final int SESSION_POOL_OPTIONS_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.SessionPoolOptions sessionPoolOptions_; + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return Whether the sessionPoolOptions field is set. + */ + @java.lang.Override + public boolean hasSessionPoolOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return The sessionPoolOptions. + */ + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptions getSessionPoolOptions() { + return sessionPoolOptions_ == null + ? com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance() + : sessionPoolOptions_; + } + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder + getSessionPoolOptionsOrBuilder() { + return sessionPoolOptions_ == null + ? com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance() + : sessionPoolOptions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getSessionPoolOptions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSessionPoolOptions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.SpannerOptions)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.SpannerOptions other = + (com.google.spanner.executor.v1.SpannerOptions) obj; + + if (hasSessionPoolOptions() != other.hasSessionPoolOptions()) return false; + if (hasSessionPoolOptions()) { + if (!getSessionPoolOptions().equals(other.getSessionPoolOptions())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSessionPoolOptions()) { + hash = (37 * hash) + SESSION_POOL_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getSessionPoolOptions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.SpannerOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.SpannerOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Options for Cloud Spanner Service.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.SpannerOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.SpannerOptions) + com.google.spanner.executor.v1.SpannerOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.SpannerOptions.class, + com.google.spanner.executor.v1.SpannerOptions.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.SpannerOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetSessionPoolOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sessionPoolOptions_ = null; + if (sessionPoolOptionsBuilder_ != null) { + sessionPoolOptionsBuilder_.dispose(); + sessionPoolOptionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_SpannerOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptions getDefaultInstanceForType() { + return com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptions build() { + com.google.spanner.executor.v1.SpannerOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptions buildPartial() { + com.google.spanner.executor.v1.SpannerOptions result = + new com.google.spanner.executor.v1.SpannerOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.SpannerOptions result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.sessionPoolOptions_ = + sessionPoolOptionsBuilder_ == null + ? sessionPoolOptions_ + : sessionPoolOptionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.SpannerOptions) { + return mergeFrom((com.google.spanner.executor.v1.SpannerOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.SpannerOptions other) { + if (other == com.google.spanner.executor.v1.SpannerOptions.getDefaultInstance()) return this; + if (other.hasSessionPoolOptions()) { + mergeSessionPoolOptions(other.getSessionPoolOptions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetSessionPoolOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.SessionPoolOptions sessionPoolOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SessionPoolOptions, + com.google.spanner.executor.v1.SessionPoolOptions.Builder, + com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder> + sessionPoolOptionsBuilder_; + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return Whether the sessionPoolOptions field is set. + */ + public boolean hasSessionPoolOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return The sessionPoolOptions. + */ + public com.google.spanner.executor.v1.SessionPoolOptions getSessionPoolOptions() { + if (sessionPoolOptionsBuilder_ == null) { + return sessionPoolOptions_ == null + ? com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance() + : sessionPoolOptions_; + } else { + return sessionPoolOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public Builder setSessionPoolOptions(com.google.spanner.executor.v1.SessionPoolOptions value) { + if (sessionPoolOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sessionPoolOptions_ = value; + } else { + sessionPoolOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public Builder setSessionPoolOptions( + com.google.spanner.executor.v1.SessionPoolOptions.Builder builderForValue) { + if (sessionPoolOptionsBuilder_ == null) { + sessionPoolOptions_ = builderForValue.build(); + } else { + sessionPoolOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public Builder mergeSessionPoolOptions( + com.google.spanner.executor.v1.SessionPoolOptions value) { + if (sessionPoolOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && sessionPoolOptions_ != null + && sessionPoolOptions_ + != com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance()) { + getSessionPoolOptionsBuilder().mergeFrom(value); + } else { + sessionPoolOptions_ = value; + } + } else { + sessionPoolOptionsBuilder_.mergeFrom(value); + } + if (sessionPoolOptions_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public Builder clearSessionPoolOptions() { + bitField0_ = (bitField0_ & ~0x00000001); + sessionPoolOptions_ = null; + if (sessionPoolOptionsBuilder_ != null) { + sessionPoolOptionsBuilder_.dispose(); + sessionPoolOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public com.google.spanner.executor.v1.SessionPoolOptions.Builder + getSessionPoolOptionsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetSessionPoolOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + public com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder + getSessionPoolOptionsOrBuilder() { + if (sessionPoolOptionsBuilder_ != null) { + return sessionPoolOptionsBuilder_.getMessageOrBuilder(); + } else { + return sessionPoolOptions_ == null + ? com.google.spanner.executor.v1.SessionPoolOptions.getDefaultInstance() + : sessionPoolOptions_; + } + } + + /** + * + * + *
    +     * Options for configuring the session pool
    +     * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SessionPoolOptions, + com.google.spanner.executor.v1.SessionPoolOptions.Builder, + com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder> + internalGetSessionPoolOptionsFieldBuilder() { + if (sessionPoolOptionsBuilder_ == null) { + sessionPoolOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.SessionPoolOptions, + com.google.spanner.executor.v1.SessionPoolOptions.Builder, + com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder>( + getSessionPoolOptions(), getParentForChildren(), isClean()); + sessionPoolOptions_ = null; + } + return sessionPoolOptionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.SpannerOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.SpannerOptions) + private static final com.google.spanner.executor.v1.SpannerOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.SpannerOptions(); + } + + public static com.google.spanner.executor.v1.SpannerOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SpannerOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.SpannerOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java new file mode 100644 index 000000000000..724808e73553 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface SpannerOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.SpannerOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return Whether the sessionPoolOptions field is set. + */ + boolean hasSessionPoolOptions(); + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + * + * @return The sessionPoolOptions. + */ + com.google.spanner.executor.v1.SessionPoolOptions getSessionPoolOptions(); + + /** + * + * + *
    +   * Options for configuring the session pool
    +   * 
    + * + * .google.spanner.executor.v1.SessionPoolOptions session_pool_options = 1; + */ + com.google.spanner.executor.v1.SessionPoolOptionsOrBuilder getSessionPoolOptionsOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java new file mode 100644 index 000000000000..da00551fc009 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java @@ -0,0 +1,1211 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Starts a batch read-only transaction in executor. Successful outcomes of this
    + * action will contain batch_txn_id--the identificator that can be used to start
    + * the same transaction in other Executors to parallelize partition processing.
    + *
    + * Example of a batch read flow:
    + * 1. Start batch transaction with a timestamp (StartBatchTransactionAction)
    + * 2. Generate database partitions for a read or query
    + * (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction)
    + * 3. Call ExecutePartitionAction for some or all partitions, process rows
    + * 4. Clean up the transaction (CloseBatchTransactionAction).
    + *
    + * More sophisticated example, with parallel processing:
    + * 1. Start batch transaction with a timestamp (StartBatchTransactionAction),
    + * note the returned BatchTransactionId
    + * 2. Generate database partitions for a read or query
    + * (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction)
    + * 3. Distribute the partitions over a pool of workers, along with the
    + * transaction ID.
    + *
    + * In each worker:
    + * 4-1. StartBatchTransactionAction with the given transaction ID
    + * 4-2. ExecutePartitionAction for each partition it got, process read results
    + * 4-3. Close (not cleanup) the transaction (CloseBatchTransactionAction).
    + *
    + * When all workers are done:
    + * 5. Cleanup the transaction (CloseBatchTransactionAction). This can be done
    + * either by the last worker to finish the job, or by the main Executor that
    + * initialized this transaction in the first place. It is also possible to clean
    + * it up with a brand new Executor -- just execute StartBatchTransactionAction
    + * with the ID, then clean it up right away.
    + *
    + * Cleaning up is optional, but recommended.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.StartBatchTransactionAction} + */ +@com.google.protobuf.Generated +public final class StartBatchTransactionAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.StartBatchTransactionAction) + StartBatchTransactionActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StartBatchTransactionAction"); + } + + // Use StartBatchTransactionAction.newBuilder() to construct. + private StartBatchTransactionAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StartBatchTransactionAction() { + cloudDatabaseRole_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartBatchTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.StartBatchTransactionAction.class, + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder.class); + } + + private int paramCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object param_; + + public enum ParamCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + BATCH_TXN_TIME(1), + TID(2), + PARAM_NOT_SET(0); + private final int value; + + private ParamCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ParamCase valueOf(int value) { + return forNumber(value); + } + + public static ParamCase forNumber(int value) { + switch (value) { + case 1: + return BATCH_TXN_TIME; + case 2: + return TID; + case 0: + return PARAM_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ParamCase getParamCase() { + return ParamCase.forNumber(paramCase_); + } + + public static final int BATCH_TXN_TIME_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return Whether the batchTxnTime field is set. + */ + @java.lang.Override + public boolean hasBatchTxnTime() { + return paramCase_ == 1; + } + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return The batchTxnTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getBatchTxnTime() { + if (paramCase_ == 1) { + return (com.google.protobuf.Timestamp) param_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getBatchTxnTimeOrBuilder() { + if (paramCase_ == 1) { + return (com.google.protobuf.Timestamp) param_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int TID_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * ID of a batch read-only transaction. It can be used to start the same
    +   * batch transaction on multiple executors and parallelize partition
    +   * processing.
    +   * 
    + * + * bytes tid = 2; + * + * @return Whether the tid field is set. + */ + @java.lang.Override + public boolean hasTid() { + return paramCase_ == 2; + } + + /** + * + * + *
    +   * ID of a batch read-only transaction. It can be used to start the same
    +   * batch transaction on multiple executors and parallelize partition
    +   * processing.
    +   * 
    + * + * bytes tid = 2; + * + * @return The tid. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTid() { + if (paramCase_ == 2) { + return (com.google.protobuf.ByteString) param_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + public static final int CLOUD_DATABASE_ROLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object cloudDatabaseRole_ = ""; + + /** + * + * + *
    +   * Database role to assume while performing this action. Setting the
    +   * database_role will enforce additional role-based access checks on this
    +   * action.
    +   * 
    + * + * string cloud_database_role = 3; + * + * @return The cloudDatabaseRole. + */ + @java.lang.Override + public java.lang.String getCloudDatabaseRole() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cloudDatabaseRole_ = s; + return s; + } + } + + /** + * + * + *
    +   * Database role to assume while performing this action. Setting the
    +   * database_role will enforce additional role-based access checks on this
    +   * action.
    +   * 
    + * + * string cloud_database_role = 3; + * + * @return The bytes for cloudDatabaseRole. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCloudDatabaseRoleBytes() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cloudDatabaseRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (paramCase_ == 1) { + output.writeMessage(1, (com.google.protobuf.Timestamp) param_); + } + if (paramCase_ == 2) { + output.writeBytes(2, (com.google.protobuf.ByteString) param_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cloudDatabaseRole_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, cloudDatabaseRole_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (paramCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.protobuf.Timestamp) param_); + } + if (paramCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 2, (com.google.protobuf.ByteString) param_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(cloudDatabaseRole_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, cloudDatabaseRole_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.StartBatchTransactionAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.StartBatchTransactionAction other = + (com.google.spanner.executor.v1.StartBatchTransactionAction) obj; + + if (!getCloudDatabaseRole().equals(other.getCloudDatabaseRole())) return false; + if (!getParamCase().equals(other.getParamCase())) return false; + switch (paramCase_) { + case 1: + if (!getBatchTxnTime().equals(other.getBatchTxnTime())) return false; + break; + case 2: + if (!getTid().equals(other.getTid())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CLOUD_DATABASE_ROLE_FIELD_NUMBER; + hash = (53 * hash) + getCloudDatabaseRole().hashCode(); + switch (paramCase_) { + case 1: + hash = (37 * hash) + BATCH_TXN_TIME_FIELD_NUMBER; + hash = (53 * hash) + getBatchTxnTime().hashCode(); + break; + case 2: + hash = (37 * hash) + TID_FIELD_NUMBER; + hash = (53 * hash) + getTid().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.StartBatchTransactionAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Starts a batch read-only transaction in executor. Successful outcomes of this
    +   * action will contain batch_txn_id--the identificator that can be used to start
    +   * the same transaction in other Executors to parallelize partition processing.
    +   *
    +   * Example of a batch read flow:
    +   * 1. Start batch transaction with a timestamp (StartBatchTransactionAction)
    +   * 2. Generate database partitions for a read or query
    +   * (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction)
    +   * 3. Call ExecutePartitionAction for some or all partitions, process rows
    +   * 4. Clean up the transaction (CloseBatchTransactionAction).
    +   *
    +   * More sophisticated example, with parallel processing:
    +   * 1. Start batch transaction with a timestamp (StartBatchTransactionAction),
    +   * note the returned BatchTransactionId
    +   * 2. Generate database partitions for a read or query
    +   * (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction)
    +   * 3. Distribute the partitions over a pool of workers, along with the
    +   * transaction ID.
    +   *
    +   * In each worker:
    +   * 4-1. StartBatchTransactionAction with the given transaction ID
    +   * 4-2. ExecutePartitionAction for each partition it got, process read results
    +   * 4-3. Close (not cleanup) the transaction (CloseBatchTransactionAction).
    +   *
    +   * When all workers are done:
    +   * 5. Cleanup the transaction (CloseBatchTransactionAction). This can be done
    +   * either by the last worker to finish the job, or by the main Executor that
    +   * initialized this transaction in the first place. It is also possible to clean
    +   * it up with a brand new Executor -- just execute StartBatchTransactionAction
    +   * with the ID, then clean it up right away.
    +   *
    +   * Cleaning up is optional, but recommended.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.StartBatchTransactionAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.StartBatchTransactionAction) + com.google.spanner.executor.v1.StartBatchTransactionActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartBatchTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.StartBatchTransactionAction.class, + com.google.spanner.executor.v1.StartBatchTransactionAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.StartBatchTransactionAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (batchTxnTimeBuilder_ != null) { + batchTxnTimeBuilder_.clear(); + } + cloudDatabaseRole_ = ""; + paramCase_ = 0; + param_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction build() { + com.google.spanner.executor.v1.StartBatchTransactionAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction buildPartial() { + com.google.spanner.executor.v1.StartBatchTransactionAction result = + new com.google.spanner.executor.v1.StartBatchTransactionAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.StartBatchTransactionAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cloudDatabaseRole_ = cloudDatabaseRole_; + } + } + + private void buildPartialOneofs( + com.google.spanner.executor.v1.StartBatchTransactionAction result) { + result.paramCase_ = paramCase_; + result.param_ = this.param_; + if (paramCase_ == 1 && batchTxnTimeBuilder_ != null) { + result.param_ = batchTxnTimeBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.StartBatchTransactionAction) { + return mergeFrom((com.google.spanner.executor.v1.StartBatchTransactionAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.StartBatchTransactionAction other) { + if (other == com.google.spanner.executor.v1.StartBatchTransactionAction.getDefaultInstance()) + return this; + if (!other.getCloudDatabaseRole().isEmpty()) { + cloudDatabaseRole_ = other.cloudDatabaseRole_; + bitField0_ |= 0x00000004; + onChanged(); + } + switch (other.getParamCase()) { + case BATCH_TXN_TIME: + { + mergeBatchTxnTime(other.getBatchTxnTime()); + break; + } + case TID: + { + setTid(other.getTid()); + break; + } + case PARAM_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetBatchTxnTimeFieldBuilder().getBuilder(), extensionRegistry); + paramCase_ = 1; + break; + } // case 10 + case 18: + { + param_ = input.readBytes(); + paramCase_ = 2; + break; + } // case 18 + case 26: + { + cloudDatabaseRole_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int paramCase_ = 0; + private java.lang.Object param_; + + public ParamCase getParamCase() { + return ParamCase.forNumber(paramCase_); + } + + public Builder clearParam() { + paramCase_ = 0; + param_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + batchTxnTimeBuilder_; + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return Whether the batchTxnTime field is set. + */ + @java.lang.Override + public boolean hasBatchTxnTime() { + return paramCase_ == 1; + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return The batchTxnTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getBatchTxnTime() { + if (batchTxnTimeBuilder_ == null) { + if (paramCase_ == 1) { + return (com.google.protobuf.Timestamp) param_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (paramCase_ == 1) { + return batchTxnTimeBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + public Builder setBatchTxnTime(com.google.protobuf.Timestamp value) { + if (batchTxnTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + param_ = value; + onChanged(); + } else { + batchTxnTimeBuilder_.setMessage(value); + } + paramCase_ = 1; + return this; + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + public Builder setBatchTxnTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (batchTxnTimeBuilder_ == null) { + param_ = builderForValue.build(); + onChanged(); + } else { + batchTxnTimeBuilder_.setMessage(builderForValue.build()); + } + paramCase_ = 1; + return this; + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + public Builder mergeBatchTxnTime(com.google.protobuf.Timestamp value) { + if (batchTxnTimeBuilder_ == null) { + if (paramCase_ == 1 && param_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + param_ = + com.google.protobuf.Timestamp.newBuilder((com.google.protobuf.Timestamp) param_) + .mergeFrom(value) + .buildPartial(); + } else { + param_ = value; + } + onChanged(); + } else { + if (paramCase_ == 1) { + batchTxnTimeBuilder_.mergeFrom(value); + } else { + batchTxnTimeBuilder_.setMessage(value); + } + } + paramCase_ = 1; + return this; + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + public Builder clearBatchTxnTime() { + if (batchTxnTimeBuilder_ == null) { + if (paramCase_ == 1) { + paramCase_ = 0; + param_ = null; + onChanged(); + } + } else { + if (paramCase_ == 1) { + paramCase_ = 0; + param_ = null; + } + batchTxnTimeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getBatchTxnTimeBuilder() { + return internalGetBatchTxnTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getBatchTxnTimeOrBuilder() { + if ((paramCase_ == 1) && (batchTxnTimeBuilder_ != null)) { + return batchTxnTimeBuilder_.getMessageOrBuilder(); + } else { + if (paramCase_ == 1) { + return (com.google.protobuf.Timestamp) param_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * The exact timestamp to start the batch transaction.
    +     * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetBatchTxnTimeFieldBuilder() { + if (batchTxnTimeBuilder_ == null) { + if (!(paramCase_ == 1)) { + param_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + batchTxnTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) param_, getParentForChildren(), isClean()); + param_ = null; + } + paramCase_ = 1; + onChanged(); + return batchTxnTimeBuilder_; + } + + /** + * + * + *
    +     * ID of a batch read-only transaction. It can be used to start the same
    +     * batch transaction on multiple executors and parallelize partition
    +     * processing.
    +     * 
    + * + * bytes tid = 2; + * + * @return Whether the tid field is set. + */ + public boolean hasTid() { + return paramCase_ == 2; + } + + /** + * + * + *
    +     * ID of a batch read-only transaction. It can be used to start the same
    +     * batch transaction on multiple executors and parallelize partition
    +     * processing.
    +     * 
    + * + * bytes tid = 2; + * + * @return The tid. + */ + public com.google.protobuf.ByteString getTid() { + if (paramCase_ == 2) { + return (com.google.protobuf.ByteString) param_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + /** + * + * + *
    +     * ID of a batch read-only transaction. It can be used to start the same
    +     * batch transaction on multiple executors and parallelize partition
    +     * processing.
    +     * 
    + * + * bytes tid = 2; + * + * @param value The tid to set. + * @return This builder for chaining. + */ + public Builder setTid(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + paramCase_ = 2; + param_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * ID of a batch read-only transaction. It can be used to start the same
    +     * batch transaction on multiple executors and parallelize partition
    +     * processing.
    +     * 
    + * + * bytes tid = 2; + * + * @return This builder for chaining. + */ + public Builder clearTid() { + if (paramCase_ == 2) { + paramCase_ = 0; + param_ = null; + onChanged(); + } + return this; + } + + private java.lang.Object cloudDatabaseRole_ = ""; + + /** + * + * + *
    +     * Database role to assume while performing this action. Setting the
    +     * database_role will enforce additional role-based access checks on this
    +     * action.
    +     * 
    + * + * string cloud_database_role = 3; + * + * @return The cloudDatabaseRole. + */ + public java.lang.String getCloudDatabaseRole() { + java.lang.Object ref = cloudDatabaseRole_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cloudDatabaseRole_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Database role to assume while performing this action. Setting the
    +     * database_role will enforce additional role-based access checks on this
    +     * action.
    +     * 
    + * + * string cloud_database_role = 3; + * + * @return The bytes for cloudDatabaseRole. + */ + public com.google.protobuf.ByteString getCloudDatabaseRoleBytes() { + java.lang.Object ref = cloudDatabaseRole_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cloudDatabaseRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Database role to assume while performing this action. Setting the
    +     * database_role will enforce additional role-based access checks on this
    +     * action.
    +     * 
    + * + * string cloud_database_role = 3; + * + * @param value The cloudDatabaseRole to set. + * @return This builder for chaining. + */ + public Builder setCloudDatabaseRole(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + cloudDatabaseRole_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database role to assume while performing this action. Setting the
    +     * database_role will enforce additional role-based access checks on this
    +     * action.
    +     * 
    + * + * string cloud_database_role = 3; + * + * @return This builder for chaining. + */ + public Builder clearCloudDatabaseRole() { + cloudDatabaseRole_ = getDefaultInstance().getCloudDatabaseRole(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Database role to assume while performing this action. Setting the
    +     * database_role will enforce additional role-based access checks on this
    +     * action.
    +     * 
    + * + * string cloud_database_role = 3; + * + * @param value The bytes for cloudDatabaseRole to set. + * @return This builder for chaining. + */ + public Builder setCloudDatabaseRoleBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + cloudDatabaseRole_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.StartBatchTransactionAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.StartBatchTransactionAction) + private static final com.google.spanner.executor.v1.StartBatchTransactionAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.StartBatchTransactionAction(); + } + + public static com.google.spanner.executor.v1.StartBatchTransactionAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StartBatchTransactionAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartBatchTransactionAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java new file mode 100644 index 000000000000..01187e03acbe --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java @@ -0,0 +1,127 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface StartBatchTransactionActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.StartBatchTransactionAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return Whether the batchTxnTime field is set. + */ + boolean hasBatchTxnTime(); + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + * + * @return The batchTxnTime. + */ + com.google.protobuf.Timestamp getBatchTxnTime(); + + /** + * + * + *
    +   * The exact timestamp to start the batch transaction.
    +   * 
    + * + * .google.protobuf.Timestamp batch_txn_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getBatchTxnTimeOrBuilder(); + + /** + * + * + *
    +   * ID of a batch read-only transaction. It can be used to start the same
    +   * batch transaction on multiple executors and parallelize partition
    +   * processing.
    +   * 
    + * + * bytes tid = 2; + * + * @return Whether the tid field is set. + */ + boolean hasTid(); + + /** + * + * + *
    +   * ID of a batch read-only transaction. It can be used to start the same
    +   * batch transaction on multiple executors and parallelize partition
    +   * processing.
    +   * 
    + * + * bytes tid = 2; + * + * @return The tid. + */ + com.google.protobuf.ByteString getTid(); + + /** + * + * + *
    +   * Database role to assume while performing this action. Setting the
    +   * database_role will enforce additional role-based access checks on this
    +   * action.
    +   * 
    + * + * string cloud_database_role = 3; + * + * @return The cloudDatabaseRole. + */ + java.lang.String getCloudDatabaseRole(); + + /** + * + * + *
    +   * Database role to assume while performing this action. Setting the
    +   * database_role will enforce additional role-based access checks on this
    +   * action.
    +   * 
    + * + * string cloud_database_role = 3; + * + * @return The bytes for cloudDatabaseRole. + */ + com.google.protobuf.ByteString getCloudDatabaseRoleBytes(); + + com.google.spanner.executor.v1.StartBatchTransactionAction.ParamCase getParamCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java new file mode 100644 index 000000000000..3cce17f84b55 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java @@ -0,0 +1,1763 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * StartTransactionAction defines an action of initializing a transaction.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.StartTransactionAction} + */ +@com.google.protobuf.Generated +public final class StartTransactionAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.StartTransactionAction) + StartTransactionActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StartTransactionAction"); + } + + // Use StartTransactionAction.newBuilder() to construct. + private StartTransactionAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StartTransactionAction() { + table_ = java.util.Collections.emptyList(); + transactionSeed_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.StartTransactionAction.class, + com.google.spanner.executor.v1.StartTransactionAction.Builder.class); + } + + private int bitField0_; + public static final int CONCURRENCY_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.Concurrency concurrency_; + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return Whether the concurrency field is set. + */ + @java.lang.Override + public boolean hasConcurrency() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return The concurrency. + */ + @java.lang.Override + public com.google.spanner.executor.v1.Concurrency getConcurrency() { + return concurrency_ == null + ? com.google.spanner.executor.v1.Concurrency.getDefaultInstance() + : concurrency_; + } + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ConcurrencyOrBuilder getConcurrencyOrBuilder() { + return concurrency_ == null + ? com.google.spanner.executor.v1.Concurrency.getDefaultInstance() + : concurrency_; + } + + public static final int TABLE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List table_; + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public java.util.List getTableList() { + return table_; + } + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public java.util.List + getTableOrBuilderList() { + return table_; + } + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public int getTableCount() { + return table_.size(); + } + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata getTable(int index) { + return table_.get(index); + } + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index) { + return table_.get(index); + } + + public static final int TRANSACTION_SEED_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object transactionSeed_ = ""; + + /** + * + * + *
    +   * Transaction_seed contains workid and op pair for this transaction, used for
    +   * testing.
    +   * 
    + * + * string transaction_seed = 3; + * + * @return The transactionSeed. + */ + @java.lang.Override + public java.lang.String getTransactionSeed() { + java.lang.Object ref = transactionSeed_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionSeed_ = s; + return s; + } + } + + /** + * + * + *
    +   * Transaction_seed contains workid and op pair for this transaction, used for
    +   * testing.
    +   * 
    + * + * string transaction_seed = 3; + * + * @return The bytes for transactionSeed. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionSeedBytes() { + java.lang.Object ref = transactionSeed_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionSeed_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXECUTION_OPTIONS_FIELD_NUMBER = 4; + private com.google.spanner.executor.v1.TransactionExecutionOptions executionOptions_; + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return Whether the executionOptions field is set. + */ + @java.lang.Override + public boolean hasExecutionOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return The executionOptions. + */ + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptions getExecutionOptions() { + return executionOptions_ == null + ? com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance() + : executionOptions_; + } + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder + getExecutionOptionsOrBuilder() { + return executionOptions_ == null + ? com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance() + : executionOptions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getConcurrency()); + } + for (int i = 0; i < table_.size(); i++) { + output.writeMessage(2, table_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionSeed_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, transactionSeed_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getExecutionOptions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getConcurrency()); + } + for (int i = 0; i < table_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, table_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionSeed_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, transactionSeed_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getExecutionOptions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.StartTransactionAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.StartTransactionAction other = + (com.google.spanner.executor.v1.StartTransactionAction) obj; + + if (hasConcurrency() != other.hasConcurrency()) return false; + if (hasConcurrency()) { + if (!getConcurrency().equals(other.getConcurrency())) return false; + } + if (!getTableList().equals(other.getTableList())) return false; + if (!getTransactionSeed().equals(other.getTransactionSeed())) return false; + if (hasExecutionOptions() != other.hasExecutionOptions()) return false; + if (hasExecutionOptions()) { + if (!getExecutionOptions().equals(other.getExecutionOptions())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasConcurrency()) { + hash = (37 * hash) + CONCURRENCY_FIELD_NUMBER; + hash = (53 * hash) + getConcurrency().hashCode(); + } + if (getTableCount() > 0) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTableList().hashCode(); + } + hash = (37 * hash) + TRANSACTION_SEED_FIELD_NUMBER; + hash = (53 * hash) + getTransactionSeed().hashCode(); + if (hasExecutionOptions()) { + hash = (37 * hash) + EXECUTION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getExecutionOptions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.StartTransactionAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.StartTransactionAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * StartTransactionAction defines an action of initializing a transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.StartTransactionAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.StartTransactionAction) + com.google.spanner.executor.v1.StartTransactionActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartTransactionAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.StartTransactionAction.class, + com.google.spanner.executor.v1.StartTransactionAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.StartTransactionAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetConcurrencyFieldBuilder(); + internalGetTableFieldBuilder(); + internalGetExecutionOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + concurrency_ = null; + if (concurrencyBuilder_ != null) { + concurrencyBuilder_.dispose(); + concurrencyBuilder_ = null; + } + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + } else { + table_ = null; + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + transactionSeed_ = ""; + executionOptions_ = null; + if (executionOptionsBuilder_ != null) { + executionOptionsBuilder_.dispose(); + executionOptionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_StartTransactionAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction build() { + com.google.spanner.executor.v1.StartTransactionAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction buildPartial() { + com.google.spanner.executor.v1.StartTransactionAction result = + new com.google.spanner.executor.v1.StartTransactionAction(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.executor.v1.StartTransactionAction result) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + table_ = java.util.Collections.unmodifiableList(table_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.StartTransactionAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.concurrency_ = + concurrencyBuilder_ == null ? concurrency_ : concurrencyBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.transactionSeed_ = transactionSeed_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.executionOptions_ = + executionOptionsBuilder_ == null ? executionOptions_ : executionOptionsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.StartTransactionAction) { + return mergeFrom((com.google.spanner.executor.v1.StartTransactionAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.StartTransactionAction other) { + if (other == com.google.spanner.executor.v1.StartTransactionAction.getDefaultInstance()) + return this; + if (other.hasConcurrency()) { + mergeConcurrency(other.getConcurrency()); + } + if (tableBuilder_ == null) { + if (!other.table_.isEmpty()) { + if (table_.isEmpty()) { + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableIsMutable(); + table_.addAll(other.table_); + } + onChanged(); + } + } else { + if (!other.table_.isEmpty()) { + if (tableBuilder_.isEmpty()) { + tableBuilder_.dispose(); + tableBuilder_ = null; + table_ = other.table_; + bitField0_ = (bitField0_ & ~0x00000002); + tableBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTableFieldBuilder() + : null; + } else { + tableBuilder_.addAllMessages(other.table_); + } + } + } + if (!other.getTransactionSeed().isEmpty()) { + transactionSeed_ = other.transactionSeed_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasExecutionOptions()) { + mergeExecutionOptions(other.getExecutionOptions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetConcurrencyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.spanner.executor.v1.TableMetadata m = + input.readMessage( + com.google.spanner.executor.v1.TableMetadata.parser(), extensionRegistry); + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(m); + } else { + tableBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + transactionSeed_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetExecutionOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.Concurrency concurrency_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Concurrency, + com.google.spanner.executor.v1.Concurrency.Builder, + com.google.spanner.executor.v1.ConcurrencyOrBuilder> + concurrencyBuilder_; + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return Whether the concurrency field is set. + */ + public boolean hasConcurrency() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return The concurrency. + */ + public com.google.spanner.executor.v1.Concurrency getConcurrency() { + if (concurrencyBuilder_ == null) { + return concurrency_ == null + ? com.google.spanner.executor.v1.Concurrency.getDefaultInstance() + : concurrency_; + } else { + return concurrencyBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public Builder setConcurrency(com.google.spanner.executor.v1.Concurrency value) { + if (concurrencyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + concurrency_ = value; + } else { + concurrencyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public Builder setConcurrency( + com.google.spanner.executor.v1.Concurrency.Builder builderForValue) { + if (concurrencyBuilder_ == null) { + concurrency_ = builderForValue.build(); + } else { + concurrencyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public Builder mergeConcurrency(com.google.spanner.executor.v1.Concurrency value) { + if (concurrencyBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && concurrency_ != null + && concurrency_ != com.google.spanner.executor.v1.Concurrency.getDefaultInstance()) { + getConcurrencyBuilder().mergeFrom(value); + } else { + concurrency_ = value; + } + } else { + concurrencyBuilder_.mergeFrom(value); + } + if (concurrency_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public Builder clearConcurrency() { + bitField0_ = (bitField0_ & ~0x00000001); + concurrency_ = null; + if (concurrencyBuilder_ != null) { + concurrencyBuilder_.dispose(); + concurrencyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public com.google.spanner.executor.v1.Concurrency.Builder getConcurrencyBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetConcurrencyFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + public com.google.spanner.executor.v1.ConcurrencyOrBuilder getConcurrencyOrBuilder() { + if (concurrencyBuilder_ != null) { + return concurrencyBuilder_.getMessageOrBuilder(); + } else { + return concurrency_ == null + ? com.google.spanner.executor.v1.Concurrency.getDefaultInstance() + : concurrency_; + } + } + + /** + * + * + *
    +     * Concurrency is for read-only transactions and must be omitted for
    +     * read-write transactions.
    +     * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Concurrency, + com.google.spanner.executor.v1.Concurrency.Builder, + com.google.spanner.executor.v1.ConcurrencyOrBuilder> + internalGetConcurrencyFieldBuilder() { + if (concurrencyBuilder_ == null) { + concurrencyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.Concurrency, + com.google.spanner.executor.v1.Concurrency.Builder, + com.google.spanner.executor.v1.ConcurrencyOrBuilder>( + getConcurrency(), getParentForChildren(), isClean()); + concurrency_ = null; + } + return concurrencyBuilder_; + } + + private java.util.List table_ = + java.util.Collections.emptyList(); + + private void ensureTableIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + table_ = new java.util.ArrayList(table_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder> + tableBuilder_; + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List getTableList() { + if (tableBuilder_ == null) { + return java.util.Collections.unmodifiableList(table_); + } else { + return tableBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public int getTableCount() { + if (tableBuilder_ == null) { + return table_.size(); + } else { + return tableBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata getTable(int index) { + if (tableBuilder_ == null) { + return table_.get(index); + } else { + return tableBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder setTable(int index, com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.set(index, value); + onChanged(); + } else { + tableBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder setTable( + int index, com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(value); + onChanged(); + } else { + tableBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(int index, com.google.spanner.executor.v1.TableMetadata value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableIsMutable(); + table_.add(index, value); + onChanged(); + } else { + tableBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable(com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addTable( + int index, com.google.spanner.executor.v1.TableMetadata.Builder builderForValue) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder addAllTable( + java.lang.Iterable values) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, table_); + onChanged(); + } else { + tableBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public Builder removeTable(int index) { + if (tableBuilder_ == null) { + ensureTableIsMutable(); + table_.remove(index); + onChanged(); + } else { + tableBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder getTableBuilder(int index) { + return internalGetTableFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index) { + if (tableBuilder_ == null) { + return table_.get(index); + } else { + return tableBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List + getTableOrBuilderList() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(table_); + } + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder addTableBuilder() { + return internalGetTableFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.TableMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public com.google.spanner.executor.v1.TableMetadata.Builder addTableBuilder(int index) { + return internalGetTableFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.TableMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Metadata about tables and columns that will be involved in this
    +     * transaction. It is to convert values of key parts correctly.
    +     * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + public java.util.List + getTableBuilderList() { + return internalGetTableFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder> + internalGetTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.TableMetadata, + com.google.spanner.executor.v1.TableMetadata.Builder, + com.google.spanner.executor.v1.TableMetadataOrBuilder>( + table_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + table_ = null; + } + return tableBuilder_; + } + + private java.lang.Object transactionSeed_ = ""; + + /** + * + * + *
    +     * Transaction_seed contains workid and op pair for this transaction, used for
    +     * testing.
    +     * 
    + * + * string transaction_seed = 3; + * + * @return The transactionSeed. + */ + public java.lang.String getTransactionSeed() { + java.lang.Object ref = transactionSeed_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionSeed_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Transaction_seed contains workid and op pair for this transaction, used for
    +     * testing.
    +     * 
    + * + * string transaction_seed = 3; + * + * @return The bytes for transactionSeed. + */ + public com.google.protobuf.ByteString getTransactionSeedBytes() { + java.lang.Object ref = transactionSeed_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionSeed_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Transaction_seed contains workid and op pair for this transaction, used for
    +     * testing.
    +     * 
    + * + * string transaction_seed = 3; + * + * @param value The transactionSeed to set. + * @return This builder for chaining. + */ + public Builder setTransactionSeed(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + transactionSeed_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction_seed contains workid and op pair for this transaction, used for
    +     * testing.
    +     * 
    + * + * string transaction_seed = 3; + * + * @return This builder for chaining. + */ + public Builder clearTransactionSeed() { + transactionSeed_ = getDefaultInstance().getTransactionSeed(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction_seed contains workid and op pair for this transaction, used for
    +     * testing.
    +     * 
    + * + * string transaction_seed = 3; + * + * @param value The bytes for transactionSeed to set. + * @return This builder for chaining. + */ + public Builder setTransactionSeedBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + transactionSeed_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.executor.v1.TransactionExecutionOptions executionOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.TransactionExecutionOptions, + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder, + com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder> + executionOptionsBuilder_; + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return Whether the executionOptions field is set. + */ + public boolean hasExecutionOptions() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return The executionOptions. + */ + public com.google.spanner.executor.v1.TransactionExecutionOptions getExecutionOptions() { + if (executionOptionsBuilder_ == null) { + return executionOptions_ == null + ? com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance() + : executionOptions_; + } else { + return executionOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public Builder setExecutionOptions( + com.google.spanner.executor.v1.TransactionExecutionOptions value) { + if (executionOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + executionOptions_ = value; + } else { + executionOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public Builder setExecutionOptions( + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder builderForValue) { + if (executionOptionsBuilder_ == null) { + executionOptions_ = builderForValue.build(); + } else { + executionOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public Builder mergeExecutionOptions( + com.google.spanner.executor.v1.TransactionExecutionOptions value) { + if (executionOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && executionOptions_ != null + && executionOptions_ + != com.google.spanner.executor.v1.TransactionExecutionOptions + .getDefaultInstance()) { + getExecutionOptionsBuilder().mergeFrom(value); + } else { + executionOptions_ = value; + } + } else { + executionOptionsBuilder_.mergeFrom(value); + } + if (executionOptions_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public Builder clearExecutionOptions() { + bitField0_ = (bitField0_ & ~0x00000008); + executionOptions_ = null; + if (executionOptionsBuilder_ != null) { + executionOptionsBuilder_.dispose(); + executionOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public com.google.spanner.executor.v1.TransactionExecutionOptions.Builder + getExecutionOptionsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetExecutionOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + public com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder + getExecutionOptionsOrBuilder() { + if (executionOptionsBuilder_ != null) { + return executionOptionsBuilder_.getMessageOrBuilder(); + } else { + return executionOptions_ == null + ? com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance() + : executionOptions_; + } + } + + /** + * + * + *
    +     * Execution options (e.g., whether transaction is opaque, optimistic,
    +     * excluded from change streams).
    +     * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.TransactionExecutionOptions, + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder, + com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder> + internalGetExecutionOptionsFieldBuilder() { + if (executionOptionsBuilder_ == null) { + executionOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.TransactionExecutionOptions, + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder, + com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder>( + getExecutionOptions(), getParentForChildren(), isClean()); + executionOptions_ = null; + } + return executionOptionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.StartTransactionAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.StartTransactionAction) + private static final com.google.spanner.executor.v1.StartTransactionAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.StartTransactionAction(); + } + + public static com.google.spanner.executor.v1.StartTransactionAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StartTransactionAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.StartTransactionAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java new file mode 100644 index 000000000000..e82420ed5a9a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface StartTransactionActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.StartTransactionAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return Whether the concurrency field is set. + */ + boolean hasConcurrency(); + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + * + * @return The concurrency. + */ + com.google.spanner.executor.v1.Concurrency getConcurrency(); + + /** + * + * + *
    +   * Concurrency is for read-only transactions and must be omitted for
    +   * read-write transactions.
    +   * 
    + * + * optional .google.spanner.executor.v1.Concurrency concurrency = 1; + */ + com.google.spanner.executor.v1.ConcurrencyOrBuilder getConcurrencyOrBuilder(); + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + java.util.List getTableList(); + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + com.google.spanner.executor.v1.TableMetadata getTable(int index); + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + int getTableCount(); + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + java.util.List + getTableOrBuilderList(); + + /** + * + * + *
    +   * Metadata about tables and columns that will be involved in this
    +   * transaction. It is to convert values of key parts correctly.
    +   * 
    + * + * repeated .google.spanner.executor.v1.TableMetadata table = 2; + */ + com.google.spanner.executor.v1.TableMetadataOrBuilder getTableOrBuilder(int index); + + /** + * + * + *
    +   * Transaction_seed contains workid and op pair for this transaction, used for
    +   * testing.
    +   * 
    + * + * string transaction_seed = 3; + * + * @return The transactionSeed. + */ + java.lang.String getTransactionSeed(); + + /** + * + * + *
    +   * Transaction_seed contains workid and op pair for this transaction, used for
    +   * testing.
    +   * 
    + * + * string transaction_seed = 3; + * + * @return The bytes for transactionSeed. + */ + com.google.protobuf.ByteString getTransactionSeedBytes(); + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return Whether the executionOptions field is set. + */ + boolean hasExecutionOptions(); + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + * + * @return The executionOptions. + */ + com.google.spanner.executor.v1.TransactionExecutionOptions getExecutionOptions(); + + /** + * + * + *
    +   * Execution options (e.g., whether transaction is opaque, optimistic,
    +   * excluded from change streams).
    +   * 
    + * + * optional .google.spanner.executor.v1.TransactionExecutionOptions execution_options = 4; + * + */ + com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder + getExecutionOptionsOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java new file mode 100644 index 000000000000..a7f2dcc8b334 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java @@ -0,0 +1,1622 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * TableMetadata contains metadata of a single table.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.TableMetadata} + */ +@com.google.protobuf.Generated +public final class TableMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.TableMetadata) + TableMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TableMetadata"); + } + + // Use TableMetadata.newBuilder() to construct. + private TableMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TableMetadata() { + name_ = ""; + column_ = java.util.Collections.emptyList(); + keyColumn_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TableMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TableMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.TableMetadata.class, + com.google.spanner.executor.v1.TableMetadata.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List column_; + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + @java.lang.Override + public java.util.List getColumnList() { + return column_; + } + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + @java.lang.Override + public java.util.List + getColumnOrBuilderList() { + return column_; + } + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + @java.lang.Override + public int getColumnCount() { + return column_.size(); + } + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata getColumn(int index) { + return column_.get(index); + } + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadataOrBuilder getColumnOrBuilder(int index) { + return column_.get(index); + } + + public static final int KEY_COLUMN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List keyColumn_; + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + @java.lang.Override + public java.util.List getKeyColumnList() { + return keyColumn_; + } + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + @java.lang.Override + public java.util.List + getKeyColumnOrBuilderList() { + return keyColumn_; + } + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + @java.lang.Override + public int getKeyColumnCount() { + return keyColumn_.size(); + } + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadata getKeyColumn(int index) { + return keyColumn_.get(index); + } + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ColumnMetadataOrBuilder getKeyColumnOrBuilder(int index) { + return keyColumn_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + for (int i = 0; i < column_.size(); i++) { + output.writeMessage(2, column_.get(i)); + } + for (int i = 0; i < keyColumn_.size(); i++) { + output.writeMessage(3, keyColumn_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + for (int i = 0; i < column_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, column_.get(i)); + } + for (int i = 0; i < keyColumn_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, keyColumn_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.TableMetadata)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.TableMetadata other = + (com.google.spanner.executor.v1.TableMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (!getColumnList().equals(other.getColumnList())) return false; + if (!getKeyColumnList().equals(other.getKeyColumnList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (getColumnCount() > 0) { + hash = (37 * hash) + COLUMN_FIELD_NUMBER; + hash = (53 * hash) + getColumnList().hashCode(); + } + if (getKeyColumnCount() > 0) { + hash = (37 * hash) + KEY_COLUMN_FIELD_NUMBER; + hash = (53 * hash) + getKeyColumnList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TableMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TableMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TableMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.TableMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * TableMetadata contains metadata of a single table.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.TableMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.TableMetadata) + com.google.spanner.executor.v1.TableMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TableMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TableMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.TableMetadata.class, + com.google.spanner.executor.v1.TableMetadata.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.TableMetadata.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + if (columnBuilder_ == null) { + column_ = java.util.Collections.emptyList(); + } else { + column_ = null; + columnBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (keyColumnBuilder_ == null) { + keyColumn_ = java.util.Collections.emptyList(); + } else { + keyColumn_ = null; + keyColumnBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TableMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata getDefaultInstanceForType() { + return com.google.spanner.executor.v1.TableMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata build() { + com.google.spanner.executor.v1.TableMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata buildPartial() { + com.google.spanner.executor.v1.TableMetadata result = + new com.google.spanner.executor.v1.TableMetadata(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.TableMetadata result) { + if (columnBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + column_ = java.util.Collections.unmodifiableList(column_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.column_ = column_; + } else { + result.column_ = columnBuilder_.build(); + } + if (keyColumnBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + keyColumn_ = java.util.Collections.unmodifiableList(keyColumn_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.keyColumn_ = keyColumn_; + } else { + result.keyColumn_ = keyColumnBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.TableMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.TableMetadata) { + return mergeFrom((com.google.spanner.executor.v1.TableMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.TableMetadata other) { + if (other == com.google.spanner.executor.v1.TableMetadata.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (columnBuilder_ == null) { + if (!other.column_.isEmpty()) { + if (column_.isEmpty()) { + column_ = other.column_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureColumnIsMutable(); + column_.addAll(other.column_); + } + onChanged(); + } + } else { + if (!other.column_.isEmpty()) { + if (columnBuilder_.isEmpty()) { + columnBuilder_.dispose(); + columnBuilder_ = null; + column_ = other.column_; + bitField0_ = (bitField0_ & ~0x00000002); + columnBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetColumnFieldBuilder() + : null; + } else { + columnBuilder_.addAllMessages(other.column_); + } + } + } + if (keyColumnBuilder_ == null) { + if (!other.keyColumn_.isEmpty()) { + if (keyColumn_.isEmpty()) { + keyColumn_ = other.keyColumn_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureKeyColumnIsMutable(); + keyColumn_.addAll(other.keyColumn_); + } + onChanged(); + } + } else { + if (!other.keyColumn_.isEmpty()) { + if (keyColumnBuilder_.isEmpty()) { + keyColumnBuilder_.dispose(); + keyColumnBuilder_ = null; + keyColumn_ = other.keyColumn_; + bitField0_ = (bitField0_ & ~0x00000004); + keyColumnBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetKeyColumnFieldBuilder() + : null; + } else { + keyColumnBuilder_.addAllMessages(other.keyColumn_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.spanner.executor.v1.ColumnMetadata m = + input.readMessage( + com.google.spanner.executor.v1.ColumnMetadata.parser(), extensionRegistry); + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + column_.add(m); + } else { + columnBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.spanner.executor.v1.ColumnMetadata m = + input.readMessage( + com.google.spanner.executor.v1.ColumnMetadata.parser(), extensionRegistry); + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + keyColumn_.add(m); + } else { + keyColumnBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Table name.
    +     * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.util.List column_ = + java.util.Collections.emptyList(); + + private void ensureColumnIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + column_ = new java.util.ArrayList(column_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder> + columnBuilder_; + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public java.util.List getColumnList() { + if (columnBuilder_ == null) { + return java.util.Collections.unmodifiableList(column_); + } else { + return columnBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public int getColumnCount() { + if (columnBuilder_ == null) { + return column_.size(); + } else { + return columnBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public com.google.spanner.executor.v1.ColumnMetadata getColumn(int index) { + if (columnBuilder_ == null) { + return column_.get(index); + } else { + return columnBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder setColumn(int index, com.google.spanner.executor.v1.ColumnMetadata value) { + if (columnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.set(index, value); + onChanged(); + } else { + columnBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder setColumn( + int index, com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + column_.set(index, builderForValue.build()); + onChanged(); + } else { + columnBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder addColumn(com.google.spanner.executor.v1.ColumnMetadata value) { + if (columnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.add(value); + onChanged(); + } else { + columnBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder addColumn(int index, com.google.spanner.executor.v1.ColumnMetadata value) { + if (columnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnIsMutable(); + column_.add(index, value); + onChanged(); + } else { + columnBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder addColumn( + com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + column_.add(builderForValue.build()); + onChanged(); + } else { + columnBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder addColumn( + int index, com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + column_.add(index, builderForValue.build()); + onChanged(); + } else { + columnBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder addAllColumn( + java.lang.Iterable values) { + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, column_); + onChanged(); + } else { + columnBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder clearColumn() { + if (columnBuilder_ == null) { + column_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + columnBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public Builder removeColumn(int index) { + if (columnBuilder_ == null) { + ensureColumnIsMutable(); + column_.remove(index); + onChanged(); + } else { + columnBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder getColumnBuilder(int index) { + return internalGetColumnFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public com.google.spanner.executor.v1.ColumnMetadataOrBuilder getColumnOrBuilder(int index) { + if (columnBuilder_ == null) { + return column_.get(index); + } else { + return columnBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public java.util.List + getColumnOrBuilderList() { + if (columnBuilder_ != null) { + return columnBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(column_); + } + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder addColumnBuilder() { + return internalGetColumnFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder addColumnBuilder(int index) { + return internalGetColumnFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Columns, in the same order as in the schema.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + public java.util.List + getColumnBuilderList() { + return internalGetColumnFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder> + internalGetColumnFieldBuilder() { + if (columnBuilder_ == null) { + columnBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder>( + column_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + column_ = null; + } + return columnBuilder_; + } + + private java.util.List keyColumn_ = + java.util.Collections.emptyList(); + + private void ensureKeyColumnIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + keyColumn_ = + new java.util.ArrayList(keyColumn_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder> + keyColumnBuilder_; + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public java.util.List getKeyColumnList() { + if (keyColumnBuilder_ == null) { + return java.util.Collections.unmodifiableList(keyColumn_); + } else { + return keyColumnBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public int getKeyColumnCount() { + if (keyColumnBuilder_ == null) { + return keyColumn_.size(); + } else { + return keyColumnBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public com.google.spanner.executor.v1.ColumnMetadata getKeyColumn(int index) { + if (keyColumnBuilder_ == null) { + return keyColumn_.get(index); + } else { + return keyColumnBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder setKeyColumn(int index, com.google.spanner.executor.v1.ColumnMetadata value) { + if (keyColumnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyColumnIsMutable(); + keyColumn_.set(index, value); + onChanged(); + } else { + keyColumnBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder setKeyColumn( + int index, com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + keyColumn_.set(index, builderForValue.build()); + onChanged(); + } else { + keyColumnBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder addKeyColumn(com.google.spanner.executor.v1.ColumnMetadata value) { + if (keyColumnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyColumnIsMutable(); + keyColumn_.add(value); + onChanged(); + } else { + keyColumnBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder addKeyColumn(int index, com.google.spanner.executor.v1.ColumnMetadata value) { + if (keyColumnBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyColumnIsMutable(); + keyColumn_.add(index, value); + onChanged(); + } else { + keyColumnBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder addKeyColumn( + com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + keyColumn_.add(builderForValue.build()); + onChanged(); + } else { + keyColumnBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder addKeyColumn( + int index, com.google.spanner.executor.v1.ColumnMetadata.Builder builderForValue) { + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + keyColumn_.add(index, builderForValue.build()); + onChanged(); + } else { + keyColumnBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder addAllKeyColumn( + java.lang.Iterable values) { + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, keyColumn_); + onChanged(); + } else { + keyColumnBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder clearKeyColumn() { + if (keyColumnBuilder_ == null) { + keyColumn_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + keyColumnBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public Builder removeKeyColumn(int index) { + if (keyColumnBuilder_ == null) { + ensureKeyColumnIsMutable(); + keyColumn_.remove(index); + onChanged(); + } else { + keyColumnBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder getKeyColumnBuilder(int index) { + return internalGetKeyColumnFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public com.google.spanner.executor.v1.ColumnMetadataOrBuilder getKeyColumnOrBuilder(int index) { + if (keyColumnBuilder_ == null) { + return keyColumn_.get(index); + } else { + return keyColumnBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public java.util.List + getKeyColumnOrBuilderList() { + if (keyColumnBuilder_ != null) { + return keyColumnBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(keyColumn_); + } + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder addKeyColumnBuilder() { + return internalGetKeyColumnFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public com.google.spanner.executor.v1.ColumnMetadata.Builder addKeyColumnBuilder(int index) { + return internalGetKeyColumnFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.ColumnMetadata.getDefaultInstance()); + } + + /** + * + * + *
    +     * Keys, in order. Column name is currently not populated.
    +     * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + public java.util.List + getKeyColumnBuilderList() { + return internalGetKeyColumnFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder> + internalGetKeyColumnFieldBuilder() { + if (keyColumnBuilder_ == null) { + keyColumnBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.ColumnMetadata, + com.google.spanner.executor.v1.ColumnMetadata.Builder, + com.google.spanner.executor.v1.ColumnMetadataOrBuilder>( + keyColumn_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + keyColumn_ = null; + } + return keyColumnBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.TableMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.TableMetadata) + private static final com.google.spanner.executor.v1.TableMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.TableMetadata(); + } + + public static com.google.spanner.executor.v1.TableMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TableMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java new file mode 100644 index 000000000000..4e9df18c3ae1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java @@ -0,0 +1,166 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface TableMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.TableMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Table name.
    +   * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + java.util.List getColumnList(); + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + com.google.spanner.executor.v1.ColumnMetadata getColumn(int index); + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + int getColumnCount(); + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + java.util.List + getColumnOrBuilderList(); + + /** + * + * + *
    +   * Columns, in the same order as in the schema.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata column = 2; + */ + com.google.spanner.executor.v1.ColumnMetadataOrBuilder getColumnOrBuilder(int index); + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + java.util.List getKeyColumnList(); + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + com.google.spanner.executor.v1.ColumnMetadata getKeyColumn(int index); + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + int getKeyColumnCount(); + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + java.util.List + getKeyColumnOrBuilderList(); + + /** + * + * + *
    +   * Keys, in order. Column name is currently not populated.
    +   * 
    + * + * repeated .google.spanner.executor.v1.ColumnMetadata key_column = 3; + */ + com.google.spanner.executor.v1.ColumnMetadataOrBuilder getKeyColumnOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java new file mode 100644 index 000000000000..73155bb9dae1 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java @@ -0,0 +1,997 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** Protobuf type {@code google.spanner.executor.v1.TransactionExecutionOptions} */ +@com.google.protobuf.Generated +public final class TransactionExecutionOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.TransactionExecutionOptions) + TransactionExecutionOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TransactionExecutionOptions"); + } + + // Use TransactionExecutionOptions.newBuilder() to construct. + private TransactionExecutionOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TransactionExecutionOptions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TransactionExecutionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.TransactionExecutionOptions.class, + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder.class); + } + + public static final int OPTIMISTIC_FIELD_NUMBER = 1; + private boolean optimistic_ = false; + + /** + * + * + *
    +   * Whether optimistic concurrency should be used to execute this transaction.
    +   * 
    + * + * bool optimistic = 1; + * + * @return The optimistic. + */ + @java.lang.Override + public boolean getOptimistic() { + return optimistic_; + } + + public static final int EXCLUDE_FROM_CHANGE_STREAMS_FIELD_NUMBER = 2; + private boolean excludeFromChangeStreams_ = false; + + /** + * + * + *
    +   * Whether traffic from this transaction will be excluded from tracking change
    +   * streams with allow_txn_exclusion=true.
    +   * 
    + * + * bool exclude_from_change_streams = 2; + * + * @return The excludeFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeFromChangeStreams() { + return excludeFromChangeStreams_; + } + + public static final int SERIALIZABLE_OPTIMISTIC_FIELD_NUMBER = 3; + private boolean serializableOptimistic_ = false; + + /** + * + * + *
    +   * Whether serializable isolation with optimistic mode concurrency should be
    +   * used to execute this transaction.
    +   * 
    + * + * bool serializable_optimistic = 3; + * + * @return The serializableOptimistic. + */ + @java.lang.Override + public boolean getSerializableOptimistic() { + return serializableOptimistic_; + } + + public static final int SNAPSHOT_ISOLATION_OPTIMISTIC_FIELD_NUMBER = 4; + private boolean snapshotIsolationOptimistic_ = false; + + /** + * + * + *
    +   * Whether snapshot isolation with optimistic mode concurrency should be used
    +   * to execute this transaction.
    +   * 
    + * + * bool snapshot_isolation_optimistic = 4; + * + * @return The snapshotIsolationOptimistic. + */ + @java.lang.Override + public boolean getSnapshotIsolationOptimistic() { + return snapshotIsolationOptimistic_; + } + + public static final int SNAPSHOT_ISOLATION_PESSIMISTIC_FIELD_NUMBER = 5; + private boolean snapshotIsolationPessimistic_ = false; + + /** + * + * + *
    +   * Whether snapshot isolation with pessimistic mode concurrency should be used
    +   * to execute this transaction.
    +   * 
    + * + * bool snapshot_isolation_pessimistic = 5; + * + * @return The snapshotIsolationPessimistic. + */ + @java.lang.Override + public boolean getSnapshotIsolationPessimistic() { + return snapshotIsolationPessimistic_; + } + + public static final int EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER = 6; + private boolean excludeTxnFromChangeStreams_ = false; + + /** + * + * + *
    +   * Whether to exclude mutations of this transaction from the allowed tracking
    +   * change streams.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 6; + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (optimistic_ != false) { + output.writeBool(1, optimistic_); + } + if (excludeFromChangeStreams_ != false) { + output.writeBool(2, excludeFromChangeStreams_); + } + if (serializableOptimistic_ != false) { + output.writeBool(3, serializableOptimistic_); + } + if (snapshotIsolationOptimistic_ != false) { + output.writeBool(4, snapshotIsolationOptimistic_); + } + if (snapshotIsolationPessimistic_ != false) { + output.writeBool(5, snapshotIsolationPessimistic_); + } + if (excludeTxnFromChangeStreams_ != false) { + output.writeBool(6, excludeTxnFromChangeStreams_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optimistic_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, optimistic_); + } + if (excludeFromChangeStreams_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, excludeFromChangeStreams_); + } + if (serializableOptimistic_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, serializableOptimistic_); + } + if (snapshotIsolationOptimistic_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(4, snapshotIsolationOptimistic_); + } + if (snapshotIsolationPessimistic_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(5, snapshotIsolationPessimistic_); + } + if (excludeTxnFromChangeStreams_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(6, excludeTxnFromChangeStreams_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.TransactionExecutionOptions)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.TransactionExecutionOptions other = + (com.google.spanner.executor.v1.TransactionExecutionOptions) obj; + + if (getOptimistic() != other.getOptimistic()) return false; + if (getExcludeFromChangeStreams() != other.getExcludeFromChangeStreams()) return false; + if (getSerializableOptimistic() != other.getSerializableOptimistic()) return false; + if (getSnapshotIsolationOptimistic() != other.getSnapshotIsolationOptimistic()) return false; + if (getSnapshotIsolationPessimistic() != other.getSnapshotIsolationPessimistic()) return false; + if (getExcludeTxnFromChangeStreams() != other.getExcludeTxnFromChangeStreams()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPTIMISTIC_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getOptimistic()); + hash = (37 * hash) + EXCLUDE_FROM_CHANGE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getExcludeFromChangeStreams()); + hash = (37 * hash) + SERIALIZABLE_OPTIMISTIC_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSerializableOptimistic()); + hash = (37 * hash) + SNAPSHOT_ISOLATION_OPTIMISTIC_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSnapshotIsolationOptimistic()); + hash = (37 * hash) + SNAPSHOT_ISOLATION_PESSIMISTIC_FIELD_NUMBER; + hash = + (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSnapshotIsolationPessimistic()); + hash = (37 * hash) + EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getExcludeTxnFromChangeStreams()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.TransactionExecutionOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** Protobuf type {@code google.spanner.executor.v1.TransactionExecutionOptions} */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.TransactionExecutionOptions) + com.google.spanner.executor.v1.TransactionExecutionOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TransactionExecutionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.TransactionExecutionOptions.class, + com.google.spanner.executor.v1.TransactionExecutionOptions.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.TransactionExecutionOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + optimistic_ = false; + excludeFromChangeStreams_ = false; + serializableOptimistic_ = false; + snapshotIsolationOptimistic_ = false; + snapshotIsolationPessimistic_ = false; + excludeTxnFromChangeStreams_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_TransactionExecutionOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptions getDefaultInstanceForType() { + return com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptions build() { + com.google.spanner.executor.v1.TransactionExecutionOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptions buildPartial() { + com.google.spanner.executor.v1.TransactionExecutionOptions result = + new com.google.spanner.executor.v1.TransactionExecutionOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.TransactionExecutionOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.optimistic_ = optimistic_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.excludeFromChangeStreams_ = excludeFromChangeStreams_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.serializableOptimistic_ = serializableOptimistic_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.snapshotIsolationOptimistic_ = snapshotIsolationOptimistic_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.snapshotIsolationPessimistic_ = snapshotIsolationPessimistic_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.excludeTxnFromChangeStreams_ = excludeTxnFromChangeStreams_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.TransactionExecutionOptions) { + return mergeFrom((com.google.spanner.executor.v1.TransactionExecutionOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.TransactionExecutionOptions other) { + if (other == com.google.spanner.executor.v1.TransactionExecutionOptions.getDefaultInstance()) + return this; + if (other.getOptimistic() != false) { + setOptimistic(other.getOptimistic()); + } + if (other.getExcludeFromChangeStreams() != false) { + setExcludeFromChangeStreams(other.getExcludeFromChangeStreams()); + } + if (other.getSerializableOptimistic() != false) { + setSerializableOptimistic(other.getSerializableOptimistic()); + } + if (other.getSnapshotIsolationOptimistic() != false) { + setSnapshotIsolationOptimistic(other.getSnapshotIsolationOptimistic()); + } + if (other.getSnapshotIsolationPessimistic() != false) { + setSnapshotIsolationPessimistic(other.getSnapshotIsolationPessimistic()); + } + if (other.getExcludeTxnFromChangeStreams() != false) { + setExcludeTxnFromChangeStreams(other.getExcludeTxnFromChangeStreams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + optimistic_ = input.readBool(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + excludeFromChangeStreams_ = input.readBool(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + serializableOptimistic_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + snapshotIsolationOptimistic_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + snapshotIsolationPessimistic_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 48: + { + excludeTxnFromChangeStreams_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private boolean optimistic_; + + /** + * + * + *
    +     * Whether optimistic concurrency should be used to execute this transaction.
    +     * 
    + * + * bool optimistic = 1; + * + * @return The optimistic. + */ + @java.lang.Override + public boolean getOptimistic() { + return optimistic_; + } + + /** + * + * + *
    +     * Whether optimistic concurrency should be used to execute this transaction.
    +     * 
    + * + * bool optimistic = 1; + * + * @param value The optimistic to set. + * @return This builder for chaining. + */ + public Builder setOptimistic(boolean value) { + + optimistic_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether optimistic concurrency should be used to execute this transaction.
    +     * 
    + * + * bool optimistic = 1; + * + * @return This builder for chaining. + */ + public Builder clearOptimistic() { + bitField0_ = (bitField0_ & ~0x00000001); + optimistic_ = false; + onChanged(); + return this; + } + + private boolean excludeFromChangeStreams_; + + /** + * + * + *
    +     * Whether traffic from this transaction will be excluded from tracking change
    +     * streams with allow_txn_exclusion=true.
    +     * 
    + * + * bool exclude_from_change_streams = 2; + * + * @return The excludeFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeFromChangeStreams() { + return excludeFromChangeStreams_; + } + + /** + * + * + *
    +     * Whether traffic from this transaction will be excluded from tracking change
    +     * streams with allow_txn_exclusion=true.
    +     * 
    + * + * bool exclude_from_change_streams = 2; + * + * @param value The excludeFromChangeStreams to set. + * @return This builder for chaining. + */ + public Builder setExcludeFromChangeStreams(boolean value) { + + excludeFromChangeStreams_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether traffic from this transaction will be excluded from tracking change
    +     * streams with allow_txn_exclusion=true.
    +     * 
    + * + * bool exclude_from_change_streams = 2; + * + * @return This builder for chaining. + */ + public Builder clearExcludeFromChangeStreams() { + bitField0_ = (bitField0_ & ~0x00000002); + excludeFromChangeStreams_ = false; + onChanged(); + return this; + } + + private boolean serializableOptimistic_; + + /** + * + * + *
    +     * Whether serializable isolation with optimistic mode concurrency should be
    +     * used to execute this transaction.
    +     * 
    + * + * bool serializable_optimistic = 3; + * + * @return The serializableOptimistic. + */ + @java.lang.Override + public boolean getSerializableOptimistic() { + return serializableOptimistic_; + } + + /** + * + * + *
    +     * Whether serializable isolation with optimistic mode concurrency should be
    +     * used to execute this transaction.
    +     * 
    + * + * bool serializable_optimistic = 3; + * + * @param value The serializableOptimistic to set. + * @return This builder for chaining. + */ + public Builder setSerializableOptimistic(boolean value) { + + serializableOptimistic_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether serializable isolation with optimistic mode concurrency should be
    +     * used to execute this transaction.
    +     * 
    + * + * bool serializable_optimistic = 3; + * + * @return This builder for chaining. + */ + public Builder clearSerializableOptimistic() { + bitField0_ = (bitField0_ & ~0x00000004); + serializableOptimistic_ = false; + onChanged(); + return this; + } + + private boolean snapshotIsolationOptimistic_; + + /** + * + * + *
    +     * Whether snapshot isolation with optimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_optimistic = 4; + * + * @return The snapshotIsolationOptimistic. + */ + @java.lang.Override + public boolean getSnapshotIsolationOptimistic() { + return snapshotIsolationOptimistic_; + } + + /** + * + * + *
    +     * Whether snapshot isolation with optimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_optimistic = 4; + * + * @param value The snapshotIsolationOptimistic to set. + * @return This builder for chaining. + */ + public Builder setSnapshotIsolationOptimistic(boolean value) { + + snapshotIsolationOptimistic_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether snapshot isolation with optimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_optimistic = 4; + * + * @return This builder for chaining. + */ + public Builder clearSnapshotIsolationOptimistic() { + bitField0_ = (bitField0_ & ~0x00000008); + snapshotIsolationOptimistic_ = false; + onChanged(); + return this; + } + + private boolean snapshotIsolationPessimistic_; + + /** + * + * + *
    +     * Whether snapshot isolation with pessimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_pessimistic = 5; + * + * @return The snapshotIsolationPessimistic. + */ + @java.lang.Override + public boolean getSnapshotIsolationPessimistic() { + return snapshotIsolationPessimistic_; + } + + /** + * + * + *
    +     * Whether snapshot isolation with pessimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_pessimistic = 5; + * + * @param value The snapshotIsolationPessimistic to set. + * @return This builder for chaining. + */ + public Builder setSnapshotIsolationPessimistic(boolean value) { + + snapshotIsolationPessimistic_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether snapshot isolation with pessimistic mode concurrency should be used
    +     * to execute this transaction.
    +     * 
    + * + * bool snapshot_isolation_pessimistic = 5; + * + * @return This builder for chaining. + */ + public Builder clearSnapshotIsolationPessimistic() { + bitField0_ = (bitField0_ & ~0x00000010); + snapshotIsolationPessimistic_ = false; + onChanged(); + return this; + } + + private boolean excludeTxnFromChangeStreams_; + + /** + * + * + *
    +     * Whether to exclude mutations of this transaction from the allowed tracking
    +     * change streams.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 6; + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + /** + * + * + *
    +     * Whether to exclude mutations of this transaction from the allowed tracking
    +     * change streams.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 6; + * + * @param value The excludeTxnFromChangeStreams to set. + * @return This builder for chaining. + */ + public Builder setExcludeTxnFromChangeStreams(boolean value) { + + excludeTxnFromChangeStreams_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Whether to exclude mutations of this transaction from the allowed tracking
    +     * change streams.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 6; + * + * @return This builder for chaining. + */ + public Builder clearExcludeTxnFromChangeStreams() { + bitField0_ = (bitField0_ & ~0x00000020); + excludeTxnFromChangeStreams_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.TransactionExecutionOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.TransactionExecutionOptions) + private static final com.google.spanner.executor.v1.TransactionExecutionOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.TransactionExecutionOptions(); + } + + public static com.google.spanner.executor.v1.TransactionExecutionOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TransactionExecutionOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.TransactionExecutionOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java new file mode 100644 index 000000000000..d8a2ec73d1ea --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface TransactionExecutionOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.TransactionExecutionOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Whether optimistic concurrency should be used to execute this transaction.
    +   * 
    + * + * bool optimistic = 1; + * + * @return The optimistic. + */ + boolean getOptimistic(); + + /** + * + * + *
    +   * Whether traffic from this transaction will be excluded from tracking change
    +   * streams with allow_txn_exclusion=true.
    +   * 
    + * + * bool exclude_from_change_streams = 2; + * + * @return The excludeFromChangeStreams. + */ + boolean getExcludeFromChangeStreams(); + + /** + * + * + *
    +   * Whether serializable isolation with optimistic mode concurrency should be
    +   * used to execute this transaction.
    +   * 
    + * + * bool serializable_optimistic = 3; + * + * @return The serializableOptimistic. + */ + boolean getSerializableOptimistic(); + + /** + * + * + *
    +   * Whether snapshot isolation with optimistic mode concurrency should be used
    +   * to execute this transaction.
    +   * 
    + * + * bool snapshot_isolation_optimistic = 4; + * + * @return The snapshotIsolationOptimistic. + */ + boolean getSnapshotIsolationOptimistic(); + + /** + * + * + *
    +   * Whether snapshot isolation with pessimistic mode concurrency should be used
    +   * to execute this transaction.
    +   * 
    + * + * bool snapshot_isolation_pessimistic = 5; + * + * @return The snapshotIsolationPessimistic. + */ + boolean getSnapshotIsolationPessimistic(); + + /** + * + * + *
    +   * Whether to exclude mutations of this transaction from the allowed tracking
    +   * change streams.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 6; + * + * @return The excludeTxnFromChangeStreams. + */ + boolean getExcludeTxnFromChangeStreams(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java new file mode 100644 index 000000000000..d6ecbab2ba5e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java @@ -0,0 +1,1296 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that updates a Cloud Spanner database backup.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudBackupAction} + */ +@com.google.protobuf.Generated +public final class UpdateCloudBackupAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.UpdateCloudBackupAction) + UpdateCloudBackupActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateCloudBackupAction"); + } + + // Use UpdateCloudBackupAction.newBuilder() to construct. + private UpdateCloudBackupAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateCloudBackupAction() { + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudBackupAction.class, + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder.class); + } + + private int bitField0_; + public static final int PROJECT_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupId_ = ""; + + /** + * + * + *
    +   * The id of the backup to update, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + @java.lang.Override + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } + } + + /** + * + * + *
    +   * The id of the backup to update, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp expireTime_; + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + @java.lang.Override + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, backupId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getExpireTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(backupId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, backupId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getExpireTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.UpdateCloudBackupAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.UpdateCloudBackupAction other = + (com.google.spanner.executor.v1.UpdateCloudBackupAction) obj; + + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getBackupId().equals(other.getBackupId())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.UpdateCloudBackupAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudBackupAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.UpdateCloudBackupAction) + com.google.spanner.executor.v1.UpdateCloudBackupActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudBackupAction.class, + com.google.spanner.executor.v1.UpdateCloudBackupAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.UpdateCloudBackupAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetExpireTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + projectId_ = ""; + instanceId_ = ""; + backupId_ = ""; + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudBackupAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction build() { + com.google.spanner.executor.v1.UpdateCloudBackupAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction buildPartial() { + com.google.spanner.executor.v1.UpdateCloudBackupAction result = + new com.google.spanner.executor.v1.UpdateCloudBackupAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.UpdateCloudBackupAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupId_ = backupId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.expireTime_ = expireTimeBuilder_ == null ? expireTime_ : expireTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.UpdateCloudBackupAction) { + return mergeFrom((com.google.spanner.executor.v1.UpdateCloudBackupAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.UpdateCloudBackupAction other) { + if (other == com.google.spanner.executor.v1.UpdateCloudBackupAction.getDefaultInstance()) + return this; + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getBackupId().isEmpty()) { + backupId_ = other.backupId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + backupId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetExpireTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 1; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 2; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object backupId_ = ""; + + /** + * + * + *
    +     * The id of the backup to update, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to update, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + public com.google.protobuf.ByteString getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The id of the backup to update, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to update, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearBackupId() { + backupId_ = getDefaultInstance().getBackupId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The id of the backup to update, e.g. "test-backup".
    +     * 
    + * + * string backup_id = 3; + * + * @param value The bytes for backupId to set. + * @return This builder for chaining. + */ + public Builder setBackupIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + } else { + expireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && expireTime_ != null + && expireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getExpireTimeBuilder().mergeFrom(value); + } else { + expireTime_ = value; + } + } else { + expireTimeBuilder_.mergeFrom(value); + } + if (expireTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + bitField0_ = (bitField0_ & ~0x00000008); + expireTime_ = null; + if (expireTimeBuilder_ != null) { + expireTimeBuilder_.dispose(); + expireTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetExpireTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + + /** + * + * + *
    +     * Output only. Updated value of expire_time, this is the only field
    +     * that supported to be updated.
    +     * 
    + * + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.UpdateCloudBackupAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.UpdateCloudBackupAction) + private static final com.google.spanner.executor.v1.UpdateCloudBackupAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.UpdateCloudBackupAction(); + } + + public static com.google.spanner.executor.v1.UpdateCloudBackupAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateCloudBackupAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudBackupAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java new file mode 100644 index 000000000000..f7631f2017d9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface UpdateCloudBackupActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.UpdateCloudBackupAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 1; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 2; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * The id of the backup to update, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The backupId. + */ + java.lang.String getBackupId(); + + /** + * + * + *
    +   * The id of the backup to update, e.g. "test-backup".
    +   * 
    + * + * string backup_id = 3; + * + * @return The bytes for backupId. + */ + com.google.protobuf.ByteString getBackupIdBytes(); + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + + /** + * + * + *
    +   * Output only. Updated value of expire_time, this is the only field
    +   * that supported to be updated.
    +   * 
    + * + * .google.protobuf.Timestamp expire_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java new file mode 100644 index 000000000000..3a390421fbbc --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java @@ -0,0 +1,1076 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that updates a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudDatabaseAction} + */ +@com.google.protobuf.Generated +public final class UpdateCloudDatabaseAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.UpdateCloudDatabaseAction) + UpdateCloudDatabaseActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateCloudDatabaseAction"); + } + + // Use UpdateCloudDatabaseAction.newBuilder() to construct. + private UpdateCloudDatabaseAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateCloudDatabaseAction() { + instanceId_ = ""; + projectId_ = ""; + databaseName_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.class, + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder.class); + } + + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseName_ = ""; + + /** + * + * + *
    +   * Cloud database name (not full path), e.g. "db0".
    +   * 
    + * + * string database_name = 3; + * + * @return The databaseName. + */ + @java.lang.Override + public java.lang.String getDatabaseName() { + java.lang.Object ref = databaseName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseName_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud database name (not full path), e.g. "db0".
    +   * 
    + * + * string database_name = 3; + * + * @return The bytes for databaseName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseNameBytes() { + java.lang.Object ref = databaseName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENABLE_DROP_PROTECTION_FIELD_NUMBER = 4; + private boolean enableDropProtection_ = false; + + /** + * + * + *
    +   * Updated value of enable_drop_protection, this is the only field that has
    +   * supported to be updated.
    +   * 
    + * + * bool enable_drop_protection = 4; + * + * @return The enableDropProtection. + */ + @java.lang.Override + public boolean getEnableDropProtection() { + return enableDropProtection_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseName_); + } + if (enableDropProtection_ != false) { + output.writeBool(4, enableDropProtection_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseName_); + } + if (enableDropProtection_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, enableDropProtection_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.UpdateCloudDatabaseAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.UpdateCloudDatabaseAction other = + (com.google.spanner.executor.v1.UpdateCloudDatabaseAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getDatabaseName().equals(other.getDatabaseName())) return false; + if (getEnableDropProtection() != other.getEnableDropProtection()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + DATABASE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseName().hashCode(); + hash = (37 * hash) + ENABLE_DROP_PROTECTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableDropProtection()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudDatabaseAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.UpdateCloudDatabaseAction) + com.google.spanner.executor.v1.UpdateCloudDatabaseActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.class, + com.google.spanner.executor.v1.UpdateCloudDatabaseAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.UpdateCloudDatabaseAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + databaseName_ = ""; + enableDropProtection_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction build() { + com.google.spanner.executor.v1.UpdateCloudDatabaseAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction buildPartial() { + com.google.spanner.executor.v1.UpdateCloudDatabaseAction result = + new com.google.spanner.executor.v1.UpdateCloudDatabaseAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.UpdateCloudDatabaseAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseName_ = databaseName_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.enableDropProtection_ = enableDropProtection_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.UpdateCloudDatabaseAction) { + return mergeFrom((com.google.spanner.executor.v1.UpdateCloudDatabaseAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.UpdateCloudDatabaseAction other) { + if (other == com.google.spanner.executor.v1.UpdateCloudDatabaseAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseName().isEmpty()) { + databaseName_ = other.databaseName_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getEnableDropProtection() != false) { + setEnableDropProtection(other.getEnableDropProtection()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + enableDropProtection_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseName_ = ""; + + /** + * + * + *
    +     * Cloud database name (not full path), e.g. "db0".
    +     * 
    + * + * string database_name = 3; + * + * @return The databaseName. + */ + public java.lang.String getDatabaseName() { + java.lang.Object ref = databaseName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud database name (not full path), e.g. "db0".
    +     * 
    + * + * string database_name = 3; + * + * @return The bytes for databaseName. + */ + public com.google.protobuf.ByteString getDatabaseNameBytes() { + java.lang.Object ref = databaseName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud database name (not full path), e.g. "db0".
    +     * 
    + * + * string database_name = 3; + * + * @param value The databaseName to set. + * @return This builder for chaining. + */ + public Builder setDatabaseName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database name (not full path), e.g. "db0".
    +     * 
    + * + * string database_name = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseName() { + databaseName_ = getDefaultInstance().getDatabaseName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database name (not full path), e.g. "db0".
    +     * 
    + * + * string database_name = 3; + * + * @param value The bytes for databaseName to set. + * @return This builder for chaining. + */ + public Builder setDatabaseNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private boolean enableDropProtection_; + + /** + * + * + *
    +     * Updated value of enable_drop_protection, this is the only field that has
    +     * supported to be updated.
    +     * 
    + * + * bool enable_drop_protection = 4; + * + * @return The enableDropProtection. + */ + @java.lang.Override + public boolean getEnableDropProtection() { + return enableDropProtection_; + } + + /** + * + * + *
    +     * Updated value of enable_drop_protection, this is the only field that has
    +     * supported to be updated.
    +     * 
    + * + * bool enable_drop_protection = 4; + * + * @param value The enableDropProtection to set. + * @return This builder for chaining. + */ + public Builder setEnableDropProtection(boolean value) { + + enableDropProtection_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Updated value of enable_drop_protection, this is the only field that has
    +     * supported to be updated.
    +     * 
    + * + * bool enable_drop_protection = 4; + * + * @return This builder for chaining. + */ + public Builder clearEnableDropProtection() { + bitField0_ = (bitField0_ & ~0x00000008); + enableDropProtection_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.UpdateCloudDatabaseAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.UpdateCloudDatabaseAction) + private static final com.google.spanner.executor.v1.UpdateCloudDatabaseAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.UpdateCloudDatabaseAction(); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateCloudDatabaseAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java new file mode 100644 index 000000000000..56e207cc9b9c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java @@ -0,0 +1,120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface UpdateCloudDatabaseActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.UpdateCloudDatabaseAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud database name (not full path), e.g. "db0".
    +   * 
    + * + * string database_name = 3; + * + * @return The databaseName. + */ + java.lang.String getDatabaseName(); + + /** + * + * + *
    +   * Cloud database name (not full path), e.g. "db0".
    +   * 
    + * + * string database_name = 3; + * + * @return The bytes for databaseName. + */ + com.google.protobuf.ByteString getDatabaseNameBytes(); + + /** + * + * + *
    +   * Updated value of enable_drop_protection, this is the only field that has
    +   * supported to be updated.
    +   * 
    + * + * bool enable_drop_protection = 4; + * + * @return The enableDropProtection. + */ + boolean getEnableDropProtection(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java new file mode 100644 index 000000000000..50ac941aa1d9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java @@ -0,0 +1,1573 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that updates the schema of a Cloud Spanner database.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudDatabaseDdlAction} + */ +@com.google.protobuf.Generated +public final class UpdateCloudDatabaseDdlAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) + UpdateCloudDatabaseDdlActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateCloudDatabaseDdlAction"); + } + + // Use UpdateCloudDatabaseDdlAction.newBuilder() to construct. + private UpdateCloudDatabaseDdlAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateCloudDatabaseDdlAction() { + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + operationId_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.class, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DATABASE_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + @java.lang.Override + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SDL_STATEMENT_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList sdlStatement_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + public com.google.protobuf.ProtocolStringList getSdlStatementList() { + return sdlStatement_; + } + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + public int getSdlStatementCount() { + return sdlStatement_.size(); + } + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + public java.lang.String getSdlStatement(int index) { + return sdlStatement_.get(index); + } + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + public com.google.protobuf.ByteString getSdlStatementBytes(int index) { + return sdlStatement_.getByteString(index); + } + + public static final int OPERATION_ID_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object operationId_ = ""; + + /** + * + * + *
    +   * Op ID can be used to track progress of the update. If set, it must be
    +   * unique per database. If not set, Cloud Spanner will generate operation ID
    +   * automatically.
    +   * 
    + * + * string operation_id = 5; + * + * @return The operationId. + */ + @java.lang.Override + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Op ID can be used to track progress of the update. If set, it must be
    +   * unique per database. If not set, Cloud Spanner will generate operation ID
    +   * automatically.
    +   * 
    + * + * string operation_id = 5; + * + * @return The bytes for operationId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROTO_DESCRIPTORS_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * optional bytes proto_descriptors = 6; + * + * @return Whether the protoDescriptors field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptors() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * optional bytes proto_descriptors = 6; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, databaseId_); + } + for (int i = 0; i < sdlStatement_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, sdlStatement_.getRaw(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, operationId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeBytes(6, protoDescriptors_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(databaseId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, databaseId_); + } + { + int dataSize = 0; + for (int i = 0; i < sdlStatement_.size(); i++) { + dataSize += computeStringSizeNoTag(sdlStatement_.getRaw(i)); + } + size += dataSize; + size += 1 * getSdlStatementList().size(); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(operationId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, operationId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(6, protoDescriptors_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction other = + (com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (!getDatabaseId().equals(other.getDatabaseId())) return false; + if (!getSdlStatementList().equals(other.getSdlStatementList())) return false; + if (!getOperationId().equals(other.getOperationId())) return false; + if (hasProtoDescriptors() != other.hasProtoDescriptors()) return false; + if (hasProtoDescriptors()) { + if (!getProtoDescriptors().equals(other.getProtoDescriptors())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + getDatabaseId().hashCode(); + if (getSdlStatementCount() > 0) { + hash = (37 * hash) + SDL_STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + getSdlStatementList().hashCode(); + } + hash = (37 * hash) + OPERATION_ID_FIELD_NUMBER; + hash = (53 * hash) + getOperationId().hashCode(); + if (hasProtoDescriptors()) { + hash = (37 * hash) + PROTO_DESCRIPTORS_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptors().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that updates the schema of a Cloud Spanner database.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudDatabaseDdlAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.class, + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + databaseId_ = ""; + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + operationId_ = ""; + protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudDatabaseDdlAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction build() { + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction buildPartial() { + com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction result = + new com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.databaseId_ = databaseId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + sdlStatement_.makeImmutable(); + result.sdlStatement_ = sdlStatement_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.operationId_ = operationId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.protoDescriptors_ = protoDescriptors_; + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) { + return mergeFrom((com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction other) { + if (other == com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getDatabaseId().isEmpty()) { + databaseId_ = other.databaseId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.sdlStatement_.isEmpty()) { + if (sdlStatement_.isEmpty()) { + sdlStatement_ = other.sdlStatement_; + bitField0_ |= 0x00000008; + } else { + ensureSdlStatementIsMutable(); + sdlStatement_.addAll(other.sdlStatement_); + } + onChanged(); + } + if (!other.getOperationId().isEmpty()) { + operationId_ = other.operationId_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.hasProtoDescriptors()) { + setProtoDescriptors(other.getProtoDescriptors()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + databaseId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureSdlStatementIsMutable(); + sdlStatement_.add(s); + break; + } // case 34 + case 42: + { + operationId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + protoDescriptors_ = input.readBytes(); + bitField0_ |= 0x00000020; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object databaseId_ = ""; + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + public java.lang.String getDatabaseId() { + java.lang.Object ref = databaseId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + databaseId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + public com.google.protobuf.ByteString getDatabaseIdBytes() { + java.lang.Object ref = databaseId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + databaseId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + databaseId_ = getDefaultInstance().getDatabaseId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud database ID (not full path), e.g. "db0".
    +     * 
    + * + * string database_id = 3; + * + * @param value The bytes for databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + databaseId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList sdlStatement_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureSdlStatementIsMutable() { + if (!sdlStatement_.isModifiable()) { + sdlStatement_ = new com.google.protobuf.LazyStringArrayList(sdlStatement_); + } + bitField0_ |= 0x00000008; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + public com.google.protobuf.ProtocolStringList getSdlStatementList() { + sdlStatement_.makeImmutable(); + return sdlStatement_; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + public int getSdlStatementCount() { + return sdlStatement_.size(); + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + public java.lang.String getSdlStatement(int index) { + return sdlStatement_.get(index); + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + public com.google.protobuf.ByteString getSdlStatementBytes(int index) { + return sdlStatement_.getByteString(index); + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index to set the value at. + * @param value The sdlStatement to set. + * @return This builder for chaining. + */ + public Builder setSdlStatement(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSdlStatementIsMutable(); + sdlStatement_.set(index, value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param value The sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addSdlStatement(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSdlStatementIsMutable(); + sdlStatement_.add(value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param values The sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addAllSdlStatement(java.lang.Iterable values) { + ensureSdlStatementIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, sdlStatement_); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @return This builder for chaining. + */ + public Builder clearSdlStatement() { + sdlStatement_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * SDL statements to apply to the database.
    +     * 
    + * + * repeated string sdl_statement = 4; + * + * @param value The bytes of the sdlStatement to add. + * @return This builder for chaining. + */ + public Builder addSdlStatementBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSdlStatementIsMutable(); + sdlStatement_.add(value); + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.lang.Object operationId_ = ""; + + /** + * + * + *
    +     * Op ID can be used to track progress of the update. If set, it must be
    +     * unique per database. If not set, Cloud Spanner will generate operation ID
    +     * automatically.
    +     * 
    + * + * string operation_id = 5; + * + * @return The operationId. + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Op ID can be used to track progress of the update. If set, it must be
    +     * unique per database. If not set, Cloud Spanner will generate operation ID
    +     * automatically.
    +     * 
    + * + * string operation_id = 5; + * + * @return The bytes for operationId. + */ + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Op ID can be used to track progress of the update. If set, it must be
    +     * unique per database. If not set, Cloud Spanner will generate operation ID
    +     * automatically.
    +     * 
    + * + * string operation_id = 5; + * + * @param value The operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operationId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Op ID can be used to track progress of the update. If set, it must be
    +     * unique per database. If not set, Cloud Spanner will generate operation ID
    +     * automatically.
    +     * 
    + * + * string operation_id = 5; + * + * @return This builder for chaining. + */ + public Builder clearOperationId() { + operationId_ = getDefaultInstance().getOperationId(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Op ID can be used to track progress of the update. If set, it must be
    +     * unique per database. If not set, Cloud Spanner will generate operation ID
    +     * automatically.
    +     * 
    + * + * string operation_id = 5; + * + * @param value The bytes for operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operationId_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString protoDescriptors_ = com.google.protobuf.ByteString.EMPTY; + + /** + * optional bytes proto_descriptors = 6; + * + * @return Whether the protoDescriptors field is set. + */ + @java.lang.Override + public boolean hasProtoDescriptors() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * optional bytes proto_descriptors = 6; + * + * @return The protoDescriptors. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoDescriptors() { + return protoDescriptors_; + } + + /** + * optional bytes proto_descriptors = 6; + * + * @param value The protoDescriptors to set. + * @return This builder for chaining. + */ + public Builder setProtoDescriptors(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptors_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * optional bytes proto_descriptors = 6; + * + * @return This builder for chaining. + */ + public Builder clearProtoDescriptors() { + bitField0_ = (bitField0_ & ~0x00000020); + protoDescriptors_ = getDefaultInstance().getProtoDescriptors(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) + private static final com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction(); + } + + public static com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateCloudDatabaseDdlAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java new file mode 100644 index 000000000000..bf0a800729b7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java @@ -0,0 +1,204 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface UpdateCloudDatabaseDdlActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.UpdateCloudDatabaseDdlAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The databaseId. + */ + java.lang.String getDatabaseId(); + + /** + * + * + *
    +   * Cloud database ID (not full path), e.g. "db0".
    +   * 
    + * + * string database_id = 3; + * + * @return The bytes for databaseId. + */ + com.google.protobuf.ByteString getDatabaseIdBytes(); + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return A list containing the sdlStatement. + */ + java.util.List getSdlStatementList(); + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @return The count of sdlStatement. + */ + int getSdlStatementCount(); + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the element to return. + * @return The sdlStatement at the given index. + */ + java.lang.String getSdlStatement(int index); + + /** + * + * + *
    +   * SDL statements to apply to the database.
    +   * 
    + * + * repeated string sdl_statement = 4; + * + * @param index The index of the value to return. + * @return The bytes of the sdlStatement at the given index. + */ + com.google.protobuf.ByteString getSdlStatementBytes(int index); + + /** + * + * + *
    +   * Op ID can be used to track progress of the update. If set, it must be
    +   * unique per database. If not set, Cloud Spanner will generate operation ID
    +   * automatically.
    +   * 
    + * + * string operation_id = 5; + * + * @return The operationId. + */ + java.lang.String getOperationId(); + + /** + * + * + *
    +   * Op ID can be used to track progress of the update. If set, it must be
    +   * unique per database. If not set, Cloud Spanner will generate operation ID
    +   * automatically.
    +   * 
    + * + * string operation_id = 5; + * + * @return The bytes for operationId. + */ + com.google.protobuf.ByteString getOperationIdBytes(); + + /** + * optional bytes proto_descriptors = 6; + * + * @return Whether the protoDescriptors field is set. + */ + boolean hasProtoDescriptors(); + + /** + * optional bytes proto_descriptors = 6; + * + * @return The protoDescriptors. + */ + com.google.protobuf.ByteString getProtoDescriptors(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java new file mode 100644 index 000000000000..ec50faabe6a6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java @@ -0,0 +1,2151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that updates a Cloud Spanner instance.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudInstanceAction} + */ +@com.google.protobuf.Generated +public final class UpdateCloudInstanceAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.UpdateCloudInstanceAction) + UpdateCloudInstanceActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateCloudInstanceAction"); + } + + // Use UpdateCloudInstanceAction.newBuilder() to construct. + private UpdateCloudInstanceAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateCloudInstanceAction() { + instanceId_ = ""; + projectId_ = ""; + displayName_ = ""; + edition_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 6: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudInstanceAction.class, + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder.class); + } + + private int bitField0_; + public static final int INSTANCE_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + @java.lang.Override + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + @java.lang.Override + public boolean hasDisplayName() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NODE_COUNT_FIELD_NUMBER = 4; + private int nodeCount_ = 0; + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most one of either
    +   * node_count or processing_units should be present in the message.
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most one of either
    +   * node_count or processing_units should be present in the message.
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 5; + private int processingUnits_ = 0; + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most one of
    +   * processing_units or node_count should be present in the message.
    +   * 
    + * + * optional int32 processing_units = 5; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most one of
    +   * processing_units or node_count should be present in the message.
    +   * 
    + * + * optional int32 processing_units = 5; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + public static final int AUTOSCALING_CONFIG_FIELD_NUMBER = 7; + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + @java.lang.Override + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + + public static final int LABELS_FIELD_NUMBER = 6; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int EDITION_FIELD_NUMBER = 8; + private int edition_ = 0; + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, displayName_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeInt32(4, nodeCount_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeInt32(5, processingUnits_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 6); + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(7, getAutoscalingConfig()); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + output.writeEnum(8, edition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(instanceId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, instanceId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, displayName_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, nodeCount_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, processingUnits_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, labels__); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getAutoscalingConfig()); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(8, edition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.UpdateCloudInstanceAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.UpdateCloudInstanceAction other = + (com.google.spanner.executor.v1.UpdateCloudInstanceAction) obj; + + if (!getInstanceId().equals(other.getInstanceId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (hasDisplayName() != other.hasDisplayName()) return false; + if (hasDisplayName()) { + if (!getDisplayName().equals(other.getDisplayName())) return false; + } + if (hasNodeCount() != other.hasNodeCount()) return false; + if (hasNodeCount()) { + if (getNodeCount() != other.getNodeCount()) return false; + } + if (hasProcessingUnits() != other.hasProcessingUnits()) return false; + if (hasProcessingUnits()) { + if (getProcessingUnits() != other.getProcessingUnits()) return false; + } + if (hasAutoscalingConfig() != other.hasAutoscalingConfig()) return false; + if (hasAutoscalingConfig()) { + if (!getAutoscalingConfig().equals(other.getAutoscalingConfig())) return false; + } + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (edition_ != other.edition_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INSTANCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getInstanceId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + if (hasDisplayName()) { + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + } + if (hasNodeCount()) { + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + } + if (hasProcessingUnits()) { + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + } + if (hasAutoscalingConfig()) { + hash = (37 * hash) + AUTOSCALING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingConfig().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (37 * hash) + EDITION_FIELD_NUMBER; + hash = (53 * hash) + edition_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.UpdateCloudInstanceAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that updates a Cloud Spanner instance.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateCloudInstanceAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.UpdateCloudInstanceAction) + com.google.spanner.executor.v1.UpdateCloudInstanceActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 6: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 6: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateCloudInstanceAction.class, + com.google.spanner.executor.v1.UpdateCloudInstanceAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.UpdateCloudInstanceAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetAutoscalingConfigFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + instanceId_ = ""; + projectId_ = ""; + displayName_ = ""; + nodeCount_ = 0; + processingUnits_ = 0; + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + internalGetMutableLabels().clear(); + edition_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateCloudInstanceAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction build() { + com.google.spanner.executor.v1.UpdateCloudInstanceAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction buildPartial() { + com.google.spanner.executor.v1.UpdateCloudInstanceAction result = + new com.google.spanner.executor.v1.UpdateCloudInstanceAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.UpdateCloudInstanceAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.instanceId_ = instanceId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.displayName_ = displayName_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.nodeCount_ = nodeCount_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.processingUnits_ = processingUnits_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.autoscalingConfig_ = + autoscalingConfigBuilder_ == null + ? autoscalingConfig_ + : autoscalingConfigBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.edition_ = edition_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.UpdateCloudInstanceAction) { + return mergeFrom((com.google.spanner.executor.v1.UpdateCloudInstanceAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.UpdateCloudInstanceAction other) { + if (other == com.google.spanner.executor.v1.UpdateCloudInstanceAction.getDefaultInstance()) + return this; + if (!other.getInstanceId().isEmpty()) { + instanceId_ = other.instanceId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasDisplayName()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasNodeCount()) { + setNodeCount(other.getNodeCount()); + } + if (other.hasProcessingUnits()) { + setProcessingUnits(other.getProcessingUnits()); + } + if (other.hasAutoscalingConfig()) { + mergeAutoscalingConfig(other.getAutoscalingConfig()); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000040; + if (other.edition_ != 0) { + setEditionValue(other.getEditionValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + instanceId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + nodeCount_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: + { + processingUnits_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000040; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetAutoscalingConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 58 + case 64: + { + edition_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 64 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object instanceId_ = ""; + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + public java.lang.String getInstanceId() { + java.lang.Object ref = instanceId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + instanceId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + public com.google.protobuf.ByteString getInstanceIdBytes() { + java.lang.Object ref = instanceId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + instanceId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearInstanceId() { + instanceId_ = getDefaultInstance().getInstanceId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud instance ID (not path), e.g. "test-instance".
    +     * 
    + * + * string instance_id = 1; + * + * @param value The bytes for instanceId to set. + * @return This builder for chaining. + */ + public Builder setInstanceIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + instanceId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + public boolean hasDisplayName() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * optional string display_name = 3; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int nodeCount_; + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most one of either
    +     * node_count or processing_units should be present in the message.
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most one of either
    +     * node_count or processing_units should be present in the message.
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most one of either
    +     * node_count or processing_units should be present in the message.
    +     * 
    + * + * optional int32 node_count = 4; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + nodeCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most one of either
    +     * node_count or processing_units should be present in the message.
    +     * 
    + * + * optional int32 node_count = 4; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + bitField0_ = (bitField0_ & ~0x00000008); + nodeCount_ = 0; + onChanged(); + return this; + } + + private int processingUnits_; + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most one of
    +     * processing_units or node_count should be present in the message.
    +     * 
    + * + * optional int32 processing_units = 5; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most one of
    +     * processing_units or node_count should be present in the message.
    +     * 
    + * + * optional int32 processing_units = 5; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most one of
    +     * processing_units or node_count should be present in the message.
    +     * 
    + * + * optional int32 processing_units = 5; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + processingUnits_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most one of
    +     * processing_units or node_count should be present in the message.
    +     * 
    + * + * optional int32 processing_units = 5; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + bitField0_ = (bitField0_ & ~0x00000010); + processingUnits_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + autoscalingConfigBuilder_; + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + public boolean hasAutoscalingConfig() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig() { + if (autoscalingConfigBuilder_ == null) { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } else { + return autoscalingConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingConfig_ = value; + } else { + autoscalingConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder setAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder builderForValue) { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfig_ = builderForValue.build(); + } else { + autoscalingConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder mergeAutoscalingConfig( + com.google.spanner.admin.instance.v1.AutoscalingConfig value) { + if (autoscalingConfigBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && autoscalingConfig_ != null + && autoscalingConfig_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) { + getAutoscalingConfigBuilder().mergeFrom(value); + } else { + autoscalingConfig_ = value; + } + } else { + autoscalingConfigBuilder_.mergeFrom(value); + } + if (autoscalingConfig_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public Builder clearAutoscalingConfig() { + bitField0_ = (bitField0_ & ~0x00000020); + autoscalingConfig_ = null; + if (autoscalingConfigBuilder_ != null) { + autoscalingConfigBuilder_.dispose(); + autoscalingConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder + getAutoscalingConfigBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetAutoscalingConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder + getAutoscalingConfigOrBuilder() { + if (autoscalingConfigBuilder_ != null) { + return autoscalingConfigBuilder_.getMessageOrBuilder(); + } else { + return autoscalingConfig_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance() + : autoscalingConfig_; + } + } + + /** + * + * + *
    +     * The autoscaling config for this instance. If non-empty, this instance is
    +     * using autoscaling (processing_units and node_count should be set to
    +     * 0 if used).
    +     * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder> + internalGetAutoscalingConfigFieldBuilder() { + if (autoscalingConfigBuilder_ == null) { + autoscalingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.admin.instance.v1.AutoscalingConfig, + com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder>( + getAutoscalingConfig(), getParentForChildren(), isClean()); + autoscalingConfig_ = null; + } + return autoscalingConfigBuilder_; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000040; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000040); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000040; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000040; + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 6; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000040; + return this; + } + + private int edition_ = 0; + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @param value The enum numeric value on the wire for edition to set. + * @return This builder for chaining. + */ + public Builder setEditionValue(int value) { + edition_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @param value The edition to set. + * @return This builder for chaining. + */ + public Builder setEdition(com.google.spanner.admin.instance.v1.Instance.Edition value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + edition_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The edition of the instance.
    +     * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return This builder for chaining. + */ + public Builder clearEdition() { + bitField0_ = (bitField0_ & ~0x00000080); + edition_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.UpdateCloudInstanceAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.UpdateCloudInstanceAction) + private static final com.google.spanner.executor.v1.UpdateCloudInstanceAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.UpdateCloudInstanceAction(); + } + + public static com.google.spanner.executor.v1.UpdateCloudInstanceAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateCloudInstanceAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateCloudInstanceAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java new file mode 100644 index 000000000000..157043851e24 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java @@ -0,0 +1,313 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface UpdateCloudInstanceActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.UpdateCloudInstanceAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The instanceId. + */ + java.lang.String getInstanceId(); + + /** + * + * + *
    +   * Cloud instance ID (not path), e.g. "test-instance".
    +   * 
    + * + * string instance_id = 1; + * + * @return The bytes for instanceId. + */ + com.google.protobuf.ByteString getInstanceIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + boolean hasDisplayName(); + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * The descriptive name for this instance as it appears in UIs.
    +   * Must be unique per project and between 4 and 30 characters in length.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most one of either
    +   * node_count or processing_units should be present in the message.
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return Whether the nodeCount field is set. + */ + boolean hasNodeCount(); + + /** + * + * + *
    +   * The number of nodes allocated to this instance. At most one of either
    +   * node_count or processing_units should be present in the message.
    +   * 
    + * + * optional int32 node_count = 4; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most one of
    +   * processing_units or node_count should be present in the message.
    +   * 
    + * + * optional int32 processing_units = 5; + * + * @return Whether the processingUnits field is set. + */ + boolean hasProcessingUnits(); + + /** + * + * + *
    +   * The number of processing units allocated to this instance. At most one of
    +   * processing_units or node_count should be present in the message.
    +   * 
    + * + * optional int32 processing_units = 5; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return Whether the autoscalingConfig field is set. + */ + boolean hasAutoscalingConfig(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + * + * @return The autoscalingConfig. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig getAutoscalingConfig(); + + /** + * + * + *
    +   * The autoscaling config for this instance. If non-empty, this instance is
    +   * using autoscaling (processing_units and node_count should be set to
    +   * 0 if used).
    +   * 
    + * + * optional .google.spanner.admin.instance.v1.AutoscalingConfig autoscaling_config = 7; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfigOrBuilder getAutoscalingConfigOrBuilder(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 6; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The enum numeric value on the wire for edition. + */ + int getEditionValue(); + + /** + * + * + *
    +   * The edition of the instance.
    +   * 
    + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 8; + * + * @return The edition. + */ + com.google.spanner.admin.instance.v1.Instance.Edition getEdition(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java new file mode 100644 index 000000000000..26227dd965f2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java @@ -0,0 +1,1368 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Action that updates a user instance config.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateUserInstanceConfigAction} + */ +@com.google.protobuf.Generated +public final class UpdateUserInstanceConfigAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.UpdateUserInstanceConfigAction) + UpdateUserInstanceConfigActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "UpdateUserInstanceConfigAction"); + } + + // Use UpdateUserInstanceConfigAction.newBuilder() to construct. + private UpdateUserInstanceConfigAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private UpdateUserInstanceConfigAction() { + userConfigId_ = ""; + projectId_ = ""; + displayName_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.class, + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder.class); + } + + private int bitField0_; + public static final int USER_CONFIG_ID_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + @java.lang.Override + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } + } + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROJECT_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object projectId_ = ""; + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + @java.lang.Override + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } + } + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + @java.lang.Override + public boolean hasDisplayName() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 4; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, displayName_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 4); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(userConfigId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, userConfigId_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(projectId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, projectId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, displayName_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, labels__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.UpdateUserInstanceConfigAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction other = + (com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) obj; + + if (!getUserConfigId().equals(other.getUserConfigId())) return false; + if (!getProjectId().equals(other.getProjectId())) return false; + if (hasDisplayName() != other.hasDisplayName()) return false; + if (hasDisplayName()) { + if (!getDisplayName().equals(other.getDisplayName())) return false; + } + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + USER_CONFIG_ID_FIELD_NUMBER; + hash = (53 * hash) + getUserConfigId().hashCode(); + hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER; + hash = (53 * hash) + getProjectId().hashCode(); + if (hasDisplayName()) { + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + } + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Action that updates a user instance config.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.UpdateUserInstanceConfigAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.UpdateUserInstanceConfigAction) + com.google.spanner.executor.v1.UpdateUserInstanceConfigActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 4: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.class, + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + userConfigId_ = ""; + projectId_ = ""; + displayName_ = ""; + internalGetMutableLabels().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_UpdateUserInstanceConfigAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction + getDefaultInstanceForType() { + return com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction build() { + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction buildPartial() { + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction result = + new com.google.spanner.executor.v1.UpdateUserInstanceConfigAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.executor.v1.UpdateUserInstanceConfigAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.userConfigId_ = userConfigId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.projectId_ = projectId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.displayName_ = displayName_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) { + return mergeFrom((com.google.spanner.executor.v1.UpdateUserInstanceConfigAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.UpdateUserInstanceConfigAction other) { + if (other + == com.google.spanner.executor.v1.UpdateUserInstanceConfigAction.getDefaultInstance()) + return this; + if (!other.getUserConfigId().isEmpty()) { + userConfigId_ = other.userConfigId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getProjectId().isEmpty()) { + projectId_ = other.projectId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasDisplayName()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000004; + onChanged(); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000008; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + userConfigId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + projectId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object userConfigId_ = ""; + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + public java.lang.String getUserConfigId() { + java.lang.Object ref = userConfigId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + userConfigId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + public com.google.protobuf.ByteString getUserConfigIdBytes() { + java.lang.Object ref = userConfigId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + userConfigId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearUserConfigId() { + userConfigId_ = getDefaultInstance().getUserConfigId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * User instance config ID (not path), e.g. "custom-config".
    +     * 
    + * + * string user_config_id = 1; + * + * @param value The bytes for userConfigId to set. + * @return This builder for chaining. + */ + public Builder setUserConfigIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + userConfigId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object projectId_ = ""; + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + public java.lang.String getProjectId() { + java.lang.Object ref = projectId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + projectId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + public com.google.protobuf.ByteString getProjectIdBytes() { + java.lang.Object ref = projectId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + projectId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearProjectId() { + projectId_ = getDefaultInstance().getProjectId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Cloud project ID, e.g. "spanner-cloud-systest".
    +     * 
    + * + * string project_id = 2; + * + * @param value The bytes for projectId to set. + * @return This builder for chaining. + */ + public Builder setProjectIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + projectId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + public boolean hasDisplayName() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The descriptive name for this instance config as it appears in UIs.
    +     * 
    + * + * optional string display_name = 3; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000008; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000008); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000008; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000008; + return this; + } + + /** + * + * + *
    +     * labels.
    +     * 
    + * + * map<string, string> labels = 4; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000008; + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.UpdateUserInstanceConfigAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.UpdateUserInstanceConfigAction) + private static final com.google.spanner.executor.v1.UpdateUserInstanceConfigAction + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.UpdateUserInstanceConfigAction(); + } + + public static com.google.spanner.executor.v1.UpdateUserInstanceConfigAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateUserInstanceConfigAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.UpdateUserInstanceConfigAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java new file mode 100644 index 000000000000..43305028a47b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java @@ -0,0 +1,182 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface UpdateUserInstanceConfigActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.UpdateUserInstanceConfigAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The userConfigId. + */ + java.lang.String getUserConfigId(); + + /** + * + * + *
    +   * User instance config ID (not path), e.g. "custom-config".
    +   * 
    + * + * string user_config_id = 1; + * + * @return The bytes for userConfigId. + */ + com.google.protobuf.ByteString getUserConfigIdBytes(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The projectId. + */ + java.lang.String getProjectId(); + + /** + * + * + *
    +   * Cloud project ID, e.g. "spanner-cloud-systest".
    +   * 
    + * + * string project_id = 2; + * + * @return The bytes for projectId. + */ + com.google.protobuf.ByteString getProjectIdBytes(); + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return Whether the displayName field is set. + */ + boolean hasDisplayName(); + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * The descriptive name for this instance config as it appears in UIs.
    +   * 
    + * + * optional string display_name = 3; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * labels.
    +   * 
    + * + * map<string, string> labels = 4; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java new file mode 100644 index 000000000000..7099245848ab --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java @@ -0,0 +1,2945 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * Value represents a single value that can be read or written to/from
    + * Spanner.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.Value} + */ +@com.google.protobuf.Generated +public final class Value extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.Value) + ValueOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Value"); + } + + // Use Value.newBuilder() to construct. + private Value(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Value() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Value_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Value_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.Value.class, + com.google.spanner.executor.v1.Value.Builder.class); + } + + private int bitField0_; + private int valueTypeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object valueType_; + + public enum ValueTypeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + IS_NULL(1), + INT_VALUE(2), + BOOL_VALUE(3), + DOUBLE_VALUE(4), + BYTES_VALUE(5), + STRING_VALUE(6), + STRUCT_VALUE(7), + TIMESTAMP_VALUE(8), + DATE_DAYS_VALUE(9), + IS_COMMIT_TIMESTAMP(10), + ARRAY_VALUE(11), + VALUETYPE_NOT_SET(0); + private final int value; + + private ValueTypeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ValueTypeCase valueOf(int value) { + return forNumber(value); + } + + public static ValueTypeCase forNumber(int value) { + switch (value) { + case 1: + return IS_NULL; + case 2: + return INT_VALUE; + case 3: + return BOOL_VALUE; + case 4: + return DOUBLE_VALUE; + case 5: + return BYTES_VALUE; + case 6: + return STRING_VALUE; + case 7: + return STRUCT_VALUE; + case 8: + return TIMESTAMP_VALUE; + case 9: + return DATE_DAYS_VALUE; + case 10: + return IS_COMMIT_TIMESTAMP; + case 11: + return ARRAY_VALUE; + case 0: + return VALUETYPE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ValueTypeCase getValueTypeCase() { + return ValueTypeCase.forNumber(valueTypeCase_); + } + + public static final int IS_NULL_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * If is_null is set, then this value is null.
    +   * 
    + * + * bool is_null = 1; + * + * @return Whether the isNull field is set. + */ + @java.lang.Override + public boolean hasIsNull() { + return valueTypeCase_ == 1; + } + + /** + * + * + *
    +   * If is_null is set, then this value is null.
    +   * 
    + * + * bool is_null = 1; + * + * @return The isNull. + */ + @java.lang.Override + public boolean getIsNull() { + if (valueTypeCase_ == 1) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + public static final int INT_VALUE_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Int type value. It's used for all integer number types, like int32 and
    +   * int64.
    +   * 
    + * + * int64 int_value = 2; + * + * @return Whether the intValue field is set. + */ + @java.lang.Override + public boolean hasIntValue() { + return valueTypeCase_ == 2; + } + + /** + * + * + *
    +   * Int type value. It's used for all integer number types, like int32 and
    +   * int64.
    +   * 
    + * + * int64 int_value = 2; + * + * @return The intValue. + */ + @java.lang.Override + public long getIntValue() { + if (valueTypeCase_ == 2) { + return (java.lang.Long) valueType_; + } + return 0L; + } + + public static final int BOOL_VALUE_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Bool type value.
    +   * 
    + * + * bool bool_value = 3; + * + * @return Whether the boolValue field is set. + */ + @java.lang.Override + public boolean hasBoolValue() { + return valueTypeCase_ == 3; + } + + /** + * + * + *
    +   * Bool type value.
    +   * 
    + * + * bool bool_value = 3; + * + * @return The boolValue. + */ + @java.lang.Override + public boolean getBoolValue() { + if (valueTypeCase_ == 3) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + public static final int DOUBLE_VALUE_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Double type value. It's used for all float point types, like float and
    +   * double.
    +   * 
    + * + * double double_value = 4; + * + * @return Whether the doubleValue field is set. + */ + @java.lang.Override + public boolean hasDoubleValue() { + return valueTypeCase_ == 4; + } + + /** + * + * + *
    +   * Double type value. It's used for all float point types, like float and
    +   * double.
    +   * 
    + * + * double double_value = 4; + * + * @return The doubleValue. + */ + @java.lang.Override + public double getDoubleValue() { + if (valueTypeCase_ == 4) { + return (java.lang.Double) valueType_; + } + return 0D; + } + + public static final int BYTES_VALUE_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +   * 
    + * + * bytes bytes_value = 5; + * + * @return Whether the bytesValue field is set. + */ + @java.lang.Override + public boolean hasBytesValue() { + return valueTypeCase_ == 5; + } + + /** + * + * + *
    +   * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +   * 
    + * + * bytes bytes_value = 5; + * + * @return The bytesValue. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBytesValue() { + if (valueTypeCase_ == 5) { + return (com.google.protobuf.ByteString) valueType_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + public static final int STRING_VALUE_FIELD_NUMBER = 6; + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return Whether the stringValue field is set. + */ + public boolean hasStringValue() { + return valueTypeCase_ == 6; + } + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return The stringValue. + */ + public java.lang.String getStringValue() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 6) { + ref = valueType_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (valueTypeCase_ == 6) { + valueType_ = s; + } + return s; + } + } + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return The bytes for stringValue. + */ + public com.google.protobuf.ByteString getStringValueBytes() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 6) { + ref = valueType_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (valueTypeCase_ == 6) { + valueType_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STRUCT_VALUE_FIELD_NUMBER = 7; + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return Whether the structValue field is set. + */ + @java.lang.Override + public boolean hasStructValue() { + return valueTypeCase_ == 7; + } + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return The structValue. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getStructValue() { + if (valueTypeCase_ == 7) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getStructValueOrBuilder() { + if (valueTypeCase_ == 7) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + + public static final int TIMESTAMP_VALUE_FIELD_NUMBER = 8; + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return Whether the timestampValue field is set. + */ + @java.lang.Override + public boolean hasTimestampValue() { + return valueTypeCase_ == 8; + } + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return The timestampValue. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getTimestampValue() { + if (valueTypeCase_ == 8) { + return (com.google.protobuf.Timestamp) valueType_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getTimestampValueOrBuilder() { + if (valueTypeCase_ == 8) { + return (com.google.protobuf.Timestamp) valueType_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int DATE_DAYS_VALUE_FIELD_NUMBER = 9; + + /** + * + * + *
    +   * Date type value. Date is specified as a number of days since Unix epoch.
    +   * 
    + * + * int32 date_days_value = 9; + * + * @return Whether the dateDaysValue field is set. + */ + @java.lang.Override + public boolean hasDateDaysValue() { + return valueTypeCase_ == 9; + } + + /** + * + * + *
    +   * Date type value. Date is specified as a number of days since Unix epoch.
    +   * 
    + * + * int32 date_days_value = 9; + * + * @return The dateDaysValue. + */ + @java.lang.Override + public int getDateDaysValue() { + if (valueTypeCase_ == 9) { + return (java.lang.Integer) valueType_; + } + return 0; + } + + public static final int IS_COMMIT_TIMESTAMP_FIELD_NUMBER = 10; + + /** + * + * + *
    +   * If set, holds the sentinel value for the transaction CommitTimestamp.
    +   * 
    + * + * bool is_commit_timestamp = 10; + * + * @return Whether the isCommitTimestamp field is set. + */ + @java.lang.Override + public boolean hasIsCommitTimestamp() { + return valueTypeCase_ == 10; + } + + /** + * + * + *
    +   * If set, holds the sentinel value for the transaction CommitTimestamp.
    +   * 
    + * + * bool is_commit_timestamp = 10; + * + * @return The isCommitTimestamp. + */ + @java.lang.Override + public boolean getIsCommitTimestamp() { + if (valueTypeCase_ == 10) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + public static final int ARRAY_VALUE_FIELD_NUMBER = 11; + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return Whether the arrayValue field is set. + */ + @java.lang.Override + public boolean hasArrayValue() { + return valueTypeCase_ == 11; + } + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return The arrayValue. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getArrayValue() { + if (valueTypeCase_ == 11) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getArrayValueOrBuilder() { + if (valueTypeCase_ == 11) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + + public static final int ARRAY_TYPE_FIELD_NUMBER = 12; + private com.google.spanner.v1.Type arrayType_; + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return Whether the arrayType field is set. + */ + @java.lang.Override + public boolean hasArrayType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return The arrayType. + */ + @java.lang.Override + public com.google.spanner.v1.Type getArrayType() { + return arrayType_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : arrayType_; + } + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getArrayTypeOrBuilder() { + return arrayType_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : arrayType_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (valueTypeCase_ == 1) { + output.writeBool(1, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 2) { + output.writeInt64(2, (long) ((java.lang.Long) valueType_)); + } + if (valueTypeCase_ == 3) { + output.writeBool(3, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 4) { + output.writeDouble(4, (double) ((java.lang.Double) valueType_)); + } + if (valueTypeCase_ == 5) { + output.writeBytes(5, (com.google.protobuf.ByteString) valueType_); + } + if (valueTypeCase_ == 6) { + com.google.protobuf.GeneratedMessage.writeString(output, 6, valueType_); + } + if (valueTypeCase_ == 7) { + output.writeMessage(7, (com.google.spanner.executor.v1.ValueList) valueType_); + } + if (valueTypeCase_ == 8) { + output.writeMessage(8, (com.google.protobuf.Timestamp) valueType_); + } + if (valueTypeCase_ == 9) { + output.writeInt32(9, (int) ((java.lang.Integer) valueType_)); + } + if (valueTypeCase_ == 10) { + output.writeBool(10, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 11) { + output.writeMessage(11, (com.google.spanner.executor.v1.ValueList) valueType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(12, getArrayType()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (valueTypeCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 1, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 2, (long) ((java.lang.Long) valueType_)); + } + if (valueTypeCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 3, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeDoubleSize( + 4, (double) ((java.lang.Double) valueType_)); + } + if (valueTypeCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 5, (com.google.protobuf.ByteString) valueType_); + } + if (valueTypeCase_ == 6) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(6, valueType_); + } + if (valueTypeCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.spanner.executor.v1.ValueList) valueType_); + } + if (valueTypeCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.protobuf.Timestamp) valueType_); + } + if (valueTypeCase_ == 9) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 9, (int) ((java.lang.Integer) valueType_)); + } + if (valueTypeCase_ == 10) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 10, (boolean) ((java.lang.Boolean) valueType_)); + } + if (valueTypeCase_ == 11) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 11, (com.google.spanner.executor.v1.ValueList) valueType_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getArrayType()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.Value)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.Value other = (com.google.spanner.executor.v1.Value) obj; + + if (hasArrayType() != other.hasArrayType()) return false; + if (hasArrayType()) { + if (!getArrayType().equals(other.getArrayType())) return false; + } + if (!getValueTypeCase().equals(other.getValueTypeCase())) return false; + switch (valueTypeCase_) { + case 1: + if (getIsNull() != other.getIsNull()) return false; + break; + case 2: + if (getIntValue() != other.getIntValue()) return false; + break; + case 3: + if (getBoolValue() != other.getBoolValue()) return false; + break; + case 4: + if (java.lang.Double.doubleToLongBits(getDoubleValue()) + != java.lang.Double.doubleToLongBits(other.getDoubleValue())) return false; + break; + case 5: + if (!getBytesValue().equals(other.getBytesValue())) return false; + break; + case 6: + if (!getStringValue().equals(other.getStringValue())) return false; + break; + case 7: + if (!getStructValue().equals(other.getStructValue())) return false; + break; + case 8: + if (!getTimestampValue().equals(other.getTimestampValue())) return false; + break; + case 9: + if (getDateDaysValue() != other.getDateDaysValue()) return false; + break; + case 10: + if (getIsCommitTimestamp() != other.getIsCommitTimestamp()) return false; + break; + case 11: + if (!getArrayValue().equals(other.getArrayValue())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasArrayType()) { + hash = (37 * hash) + ARRAY_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getArrayType().hashCode(); + } + switch (valueTypeCase_) { + case 1: + hash = (37 * hash) + IS_NULL_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsNull()); + break; + case 2: + hash = (37 * hash) + INT_VALUE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIntValue()); + break; + case 3: + hash = (37 * hash) + BOOL_VALUE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getBoolValue()); + break; + case 4: + hash = (37 * hash) + DOUBLE_VALUE_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getDoubleValue())); + break; + case 5: + hash = (37 * hash) + BYTES_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getBytesValue().hashCode(); + break; + case 6: + hash = (37 * hash) + STRING_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getStringValue().hashCode(); + break; + case 7: + hash = (37 * hash) + STRUCT_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getStructValue().hashCode(); + break; + case 8: + hash = (37 * hash) + TIMESTAMP_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getTimestampValue().hashCode(); + break; + case 9: + hash = (37 * hash) + DATE_DAYS_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getDateDaysValue(); + break; + case 10: + hash = (37 * hash) + IS_COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsCommitTimestamp()); + break; + case 11: + hash = (37 * hash) + ARRAY_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getArrayValue().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.Value parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Value parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Value parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Value parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Value parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Value parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.Value parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.Value prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Value represents a single value that can be read or written to/from
    +   * Spanner.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.Value} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.Value) + com.google.spanner.executor.v1.ValueOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Value_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Value_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.Value.class, + com.google.spanner.executor.v1.Value.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.Value.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetArrayTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (structValueBuilder_ != null) { + structValueBuilder_.clear(); + } + if (timestampValueBuilder_ != null) { + timestampValueBuilder_.clear(); + } + if (arrayValueBuilder_ != null) { + arrayValueBuilder_.clear(); + } + arrayType_ = null; + if (arrayTypeBuilder_ != null) { + arrayTypeBuilder_.dispose(); + arrayTypeBuilder_ = null; + } + valueTypeCase_ = 0; + valueType_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_Value_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Value getDefaultInstanceForType() { + return com.google.spanner.executor.v1.Value.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.Value build() { + com.google.spanner.executor.v1.Value result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Value buildPartial() { + com.google.spanner.executor.v1.Value result = new com.google.spanner.executor.v1.Value(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.Value result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000800) != 0)) { + result.arrayType_ = arrayTypeBuilder_ == null ? arrayType_ : arrayTypeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.executor.v1.Value result) { + result.valueTypeCase_ = valueTypeCase_; + result.valueType_ = this.valueType_; + if (valueTypeCase_ == 7 && structValueBuilder_ != null) { + result.valueType_ = structValueBuilder_.build(); + } + if (valueTypeCase_ == 8 && timestampValueBuilder_ != null) { + result.valueType_ = timestampValueBuilder_.build(); + } + if (valueTypeCase_ == 11 && arrayValueBuilder_ != null) { + result.valueType_ = arrayValueBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.Value) { + return mergeFrom((com.google.spanner.executor.v1.Value) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.Value other) { + if (other == com.google.spanner.executor.v1.Value.getDefaultInstance()) return this; + if (other.hasArrayType()) { + mergeArrayType(other.getArrayType()); + } + switch (other.getValueTypeCase()) { + case IS_NULL: + { + setIsNull(other.getIsNull()); + break; + } + case INT_VALUE: + { + setIntValue(other.getIntValue()); + break; + } + case BOOL_VALUE: + { + setBoolValue(other.getBoolValue()); + break; + } + case DOUBLE_VALUE: + { + setDoubleValue(other.getDoubleValue()); + break; + } + case BYTES_VALUE: + { + setBytesValue(other.getBytesValue()); + break; + } + case STRING_VALUE: + { + valueTypeCase_ = 6; + valueType_ = other.valueType_; + onChanged(); + break; + } + case STRUCT_VALUE: + { + mergeStructValue(other.getStructValue()); + break; + } + case TIMESTAMP_VALUE: + { + mergeTimestampValue(other.getTimestampValue()); + break; + } + case DATE_DAYS_VALUE: + { + setDateDaysValue(other.getDateDaysValue()); + break; + } + case IS_COMMIT_TIMESTAMP: + { + setIsCommitTimestamp(other.getIsCommitTimestamp()); + break; + } + case ARRAY_VALUE: + { + mergeArrayValue(other.getArrayValue()); + break; + } + case VALUETYPE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + valueType_ = input.readBool(); + valueTypeCase_ = 1; + break; + } // case 8 + case 16: + { + valueType_ = input.readInt64(); + valueTypeCase_ = 2; + break; + } // case 16 + case 24: + { + valueType_ = input.readBool(); + valueTypeCase_ = 3; + break; + } // case 24 + case 33: + { + valueType_ = input.readDouble(); + valueTypeCase_ = 4; + break; + } // case 33 + case 42: + { + valueType_ = input.readBytes(); + valueTypeCase_ = 5; + break; + } // case 42 + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + valueTypeCase_ = 6; + valueType_ = s; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetStructValueFieldBuilder().getBuilder(), extensionRegistry); + valueTypeCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage( + internalGetTimestampValueFieldBuilder().getBuilder(), extensionRegistry); + valueTypeCase_ = 8; + break; + } // case 66 + case 72: + { + valueType_ = input.readInt32(); + valueTypeCase_ = 9; + break; + } // case 72 + case 80: + { + valueType_ = input.readBool(); + valueTypeCase_ = 10; + break; + } // case 80 + case 90: + { + input.readMessage( + internalGetArrayValueFieldBuilder().getBuilder(), extensionRegistry); + valueTypeCase_ = 11; + break; + } // case 90 + case 98: + { + input.readMessage( + internalGetArrayTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000800; + break; + } // case 98 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int valueTypeCase_ = 0; + private java.lang.Object valueType_; + + public ValueTypeCase getValueTypeCase() { + return ValueTypeCase.forNumber(valueTypeCase_); + } + + public Builder clearValueType() { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
    +     * If is_null is set, then this value is null.
    +     * 
    + * + * bool is_null = 1; + * + * @return Whether the isNull field is set. + */ + public boolean hasIsNull() { + return valueTypeCase_ == 1; + } + + /** + * + * + *
    +     * If is_null is set, then this value is null.
    +     * 
    + * + * bool is_null = 1; + * + * @return The isNull. + */ + public boolean getIsNull() { + if (valueTypeCase_ == 1) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + /** + * + * + *
    +     * If is_null is set, then this value is null.
    +     * 
    + * + * bool is_null = 1; + * + * @param value The isNull to set. + * @return This builder for chaining. + */ + public Builder setIsNull(boolean value) { + + valueTypeCase_ = 1; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If is_null is set, then this value is null.
    +     * 
    + * + * bool is_null = 1; + * + * @return This builder for chaining. + */ + public Builder clearIsNull() { + if (valueTypeCase_ == 1) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Int type value. It's used for all integer number types, like int32 and
    +     * int64.
    +     * 
    + * + * int64 int_value = 2; + * + * @return Whether the intValue field is set. + */ + public boolean hasIntValue() { + return valueTypeCase_ == 2; + } + + /** + * + * + *
    +     * Int type value. It's used for all integer number types, like int32 and
    +     * int64.
    +     * 
    + * + * int64 int_value = 2; + * + * @return The intValue. + */ + public long getIntValue() { + if (valueTypeCase_ == 2) { + return (java.lang.Long) valueType_; + } + return 0L; + } + + /** + * + * + *
    +     * Int type value. It's used for all integer number types, like int32 and
    +     * int64.
    +     * 
    + * + * int64 int_value = 2; + * + * @param value The intValue to set. + * @return This builder for chaining. + */ + public Builder setIntValue(long value) { + + valueTypeCase_ = 2; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Int type value. It's used for all integer number types, like int32 and
    +     * int64.
    +     * 
    + * + * int64 int_value = 2; + * + * @return This builder for chaining. + */ + public Builder clearIntValue() { + if (valueTypeCase_ == 2) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Bool type value.
    +     * 
    + * + * bool bool_value = 3; + * + * @return Whether the boolValue field is set. + */ + public boolean hasBoolValue() { + return valueTypeCase_ == 3; + } + + /** + * + * + *
    +     * Bool type value.
    +     * 
    + * + * bool bool_value = 3; + * + * @return The boolValue. + */ + public boolean getBoolValue() { + if (valueTypeCase_ == 3) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + /** + * + * + *
    +     * Bool type value.
    +     * 
    + * + * bool bool_value = 3; + * + * @param value The boolValue to set. + * @return This builder for chaining. + */ + public Builder setBoolValue(boolean value) { + + valueTypeCase_ = 3; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Bool type value.
    +     * 
    + * + * bool bool_value = 3; + * + * @return This builder for chaining. + */ + public Builder clearBoolValue() { + if (valueTypeCase_ == 3) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Double type value. It's used for all float point types, like float and
    +     * double.
    +     * 
    + * + * double double_value = 4; + * + * @return Whether the doubleValue field is set. + */ + public boolean hasDoubleValue() { + return valueTypeCase_ == 4; + } + + /** + * + * + *
    +     * Double type value. It's used for all float point types, like float and
    +     * double.
    +     * 
    + * + * double double_value = 4; + * + * @return The doubleValue. + */ + public double getDoubleValue() { + if (valueTypeCase_ == 4) { + return (java.lang.Double) valueType_; + } + return 0D; + } + + /** + * + * + *
    +     * Double type value. It's used for all float point types, like float and
    +     * double.
    +     * 
    + * + * double double_value = 4; + * + * @param value The doubleValue to set. + * @return This builder for chaining. + */ + public Builder setDoubleValue(double value) { + + valueTypeCase_ = 4; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Double type value. It's used for all float point types, like float and
    +     * double.
    +     * 
    + * + * double double_value = 4; + * + * @return This builder for chaining. + */ + public Builder clearDoubleValue() { + if (valueTypeCase_ == 4) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +     * 
    + * + * bytes bytes_value = 5; + * + * @return Whether the bytesValue field is set. + */ + public boolean hasBytesValue() { + return valueTypeCase_ == 5; + } + + /** + * + * + *
    +     * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +     * 
    + * + * bytes bytes_value = 5; + * + * @return The bytesValue. + */ + public com.google.protobuf.ByteString getBytesValue() { + if (valueTypeCase_ == 5) { + return (com.google.protobuf.ByteString) valueType_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + /** + * + * + *
    +     * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +     * 
    + * + * bytes bytes_value = 5; + * + * @param value The bytesValue to set. + * @return This builder for chaining. + */ + public Builder setBytesValue(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + valueTypeCase_ = 5; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +     * 
    + * + * bytes bytes_value = 5; + * + * @return This builder for chaining. + */ + public Builder clearBytesValue() { + if (valueTypeCase_ == 5) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @return Whether the stringValue field is set. + */ + @java.lang.Override + public boolean hasStringValue() { + return valueTypeCase_ == 6; + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @return The stringValue. + */ + @java.lang.Override + public java.lang.String getStringValue() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 6) { + ref = valueType_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (valueTypeCase_ == 6) { + valueType_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @return The bytes for stringValue. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStringValueBytes() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 6) { + ref = valueType_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (valueTypeCase_ == 6) { + valueType_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @param value The stringValue to set. + * @return This builder for chaining. + */ + public Builder setStringValue(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + valueTypeCase_ = 6; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @return This builder for chaining. + */ + public Builder clearStringValue() { + if (valueTypeCase_ == 6) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * String type value, stored in CORD.
    +     * 
    + * + * string string_value = 6; + * + * @param value The bytes for stringValue to set. + * @return This builder for chaining. + */ + public Builder setStringValueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + valueTypeCase_ = 6; + valueType_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + structValueBuilder_; + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return Whether the structValue field is set. + */ + @java.lang.Override + public boolean hasStructValue() { + return valueTypeCase_ == 7; + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return The structValue. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getStructValue() { + if (structValueBuilder_ == null) { + if (valueTypeCase_ == 7) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } else { + if (valueTypeCase_ == 7) { + return structValueBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + public Builder setStructValue(com.google.spanner.executor.v1.ValueList value) { + if (structValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + valueType_ = value; + onChanged(); + } else { + structValueBuilder_.setMessage(value); + } + valueTypeCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + public Builder setStructValue( + com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (structValueBuilder_ == null) { + valueType_ = builderForValue.build(); + onChanged(); + } else { + structValueBuilder_.setMessage(builderForValue.build()); + } + valueTypeCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + public Builder mergeStructValue(com.google.spanner.executor.v1.ValueList value) { + if (structValueBuilder_ == null) { + if (valueTypeCase_ == 7 + && valueType_ != com.google.spanner.executor.v1.ValueList.getDefaultInstance()) { + valueType_ = + com.google.spanner.executor.v1.ValueList.newBuilder( + (com.google.spanner.executor.v1.ValueList) valueType_) + .mergeFrom(value) + .buildPartial(); + } else { + valueType_ = value; + } + onChanged(); + } else { + if (valueTypeCase_ == 7) { + structValueBuilder_.mergeFrom(value); + } else { + structValueBuilder_.setMessage(value); + } + } + valueTypeCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + public Builder clearStructValue() { + if (structValueBuilder_ == null) { + if (valueTypeCase_ == 7) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + } else { + if (valueTypeCase_ == 7) { + valueTypeCase_ = 0; + valueType_ = null; + } + structValueBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + public com.google.spanner.executor.v1.ValueList.Builder getStructValueBuilder() { + return internalGetStructValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getStructValueOrBuilder() { + if ((valueTypeCase_ == 7) && (structValueBuilder_ != null)) { + return structValueBuilder_.getMessageOrBuilder(); + } else { + if (valueTypeCase_ == 7) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Struct type value. It contains a ValueList representing the values in
    +     * this struct.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetStructValueFieldBuilder() { + if (structValueBuilder_ == null) { + if (!(valueTypeCase_ == 7)) { + valueType_ = com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + structValueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + (com.google.spanner.executor.v1.ValueList) valueType_, + getParentForChildren(), + isClean()); + valueType_ = null; + } + valueTypeCase_ = 7; + onChanged(); + return structValueBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + timestampValueBuilder_; + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return Whether the timestampValue field is set. + */ + @java.lang.Override + public boolean hasTimestampValue() { + return valueTypeCase_ == 8; + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return The timestampValue. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getTimestampValue() { + if (timestampValueBuilder_ == null) { + if (valueTypeCase_ == 8) { + return (com.google.protobuf.Timestamp) valueType_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (valueTypeCase_ == 8) { + return timestampValueBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + public Builder setTimestampValue(com.google.protobuf.Timestamp value) { + if (timestampValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + valueType_ = value; + onChanged(); + } else { + timestampValueBuilder_.setMessage(value); + } + valueTypeCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + public Builder setTimestampValue(com.google.protobuf.Timestamp.Builder builderForValue) { + if (timestampValueBuilder_ == null) { + valueType_ = builderForValue.build(); + onChanged(); + } else { + timestampValueBuilder_.setMessage(builderForValue.build()); + } + valueTypeCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + public Builder mergeTimestampValue(com.google.protobuf.Timestamp value) { + if (timestampValueBuilder_ == null) { + if (valueTypeCase_ == 8 + && valueType_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + valueType_ = + com.google.protobuf.Timestamp.newBuilder((com.google.protobuf.Timestamp) valueType_) + .mergeFrom(value) + .buildPartial(); + } else { + valueType_ = value; + } + onChanged(); + } else { + if (valueTypeCase_ == 8) { + timestampValueBuilder_.mergeFrom(value); + } else { + timestampValueBuilder_.setMessage(value); + } + } + valueTypeCase_ = 8; + return this; + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + public Builder clearTimestampValue() { + if (timestampValueBuilder_ == null) { + if (valueTypeCase_ == 8) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + } else { + if (valueTypeCase_ == 8) { + valueTypeCase_ = 0; + valueType_ = null; + } + timestampValueBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + public com.google.protobuf.Timestamp.Builder getTimestampValueBuilder() { + return internalGetTimestampValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getTimestampValueOrBuilder() { + if ((valueTypeCase_ == 8) && (timestampValueBuilder_ != null)) { + return timestampValueBuilder_.getMessageOrBuilder(); + } else { + if (valueTypeCase_ == 8) { + return (com.google.protobuf.Timestamp) valueType_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Timestamp type value.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetTimestampValueFieldBuilder() { + if (timestampValueBuilder_ == null) { + if (!(valueTypeCase_ == 8)) { + valueType_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + timestampValueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) valueType_, getParentForChildren(), isClean()); + valueType_ = null; + } + valueTypeCase_ = 8; + onChanged(); + return timestampValueBuilder_; + } + + /** + * + * + *
    +     * Date type value. Date is specified as a number of days since Unix epoch.
    +     * 
    + * + * int32 date_days_value = 9; + * + * @return Whether the dateDaysValue field is set. + */ + public boolean hasDateDaysValue() { + return valueTypeCase_ == 9; + } + + /** + * + * + *
    +     * Date type value. Date is specified as a number of days since Unix epoch.
    +     * 
    + * + * int32 date_days_value = 9; + * + * @return The dateDaysValue. + */ + public int getDateDaysValue() { + if (valueTypeCase_ == 9) { + return (java.lang.Integer) valueType_; + } + return 0; + } + + /** + * + * + *
    +     * Date type value. Date is specified as a number of days since Unix epoch.
    +     * 
    + * + * int32 date_days_value = 9; + * + * @param value The dateDaysValue to set. + * @return This builder for chaining. + */ + public Builder setDateDaysValue(int value) { + + valueTypeCase_ = 9; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Date type value. Date is specified as a number of days since Unix epoch.
    +     * 
    + * + * int32 date_days_value = 9; + * + * @return This builder for chaining. + */ + public Builder clearDateDaysValue() { + if (valueTypeCase_ == 9) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If set, holds the sentinel value for the transaction CommitTimestamp.
    +     * 
    + * + * bool is_commit_timestamp = 10; + * + * @return Whether the isCommitTimestamp field is set. + */ + public boolean hasIsCommitTimestamp() { + return valueTypeCase_ == 10; + } + + /** + * + * + *
    +     * If set, holds the sentinel value for the transaction CommitTimestamp.
    +     * 
    + * + * bool is_commit_timestamp = 10; + * + * @return The isCommitTimestamp. + */ + public boolean getIsCommitTimestamp() { + if (valueTypeCase_ == 10) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + /** + * + * + *
    +     * If set, holds the sentinel value for the transaction CommitTimestamp.
    +     * 
    + * + * bool is_commit_timestamp = 10; + * + * @param value The isCommitTimestamp to set. + * @return This builder for chaining. + */ + public Builder setIsCommitTimestamp(boolean value) { + + valueTypeCase_ = 10; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If set, holds the sentinel value for the transaction CommitTimestamp.
    +     * 
    + * + * bool is_commit_timestamp = 10; + * + * @return This builder for chaining. + */ + public Builder clearIsCommitTimestamp() { + if (valueTypeCase_ == 10) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + arrayValueBuilder_; + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return Whether the arrayValue field is set. + */ + @java.lang.Override + public boolean hasArrayValue() { + return valueTypeCase_ == 11; + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return The arrayValue. + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getArrayValue() { + if (arrayValueBuilder_ == null) { + if (valueTypeCase_ == 11) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } else { + if (valueTypeCase_ == 11) { + return arrayValueBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + public Builder setArrayValue(com.google.spanner.executor.v1.ValueList value) { + if (arrayValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + valueType_ = value; + onChanged(); + } else { + arrayValueBuilder_.setMessage(value); + } + valueTypeCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + public Builder setArrayValue(com.google.spanner.executor.v1.ValueList.Builder builderForValue) { + if (arrayValueBuilder_ == null) { + valueType_ = builderForValue.build(); + onChanged(); + } else { + arrayValueBuilder_.setMessage(builderForValue.build()); + } + valueTypeCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + public Builder mergeArrayValue(com.google.spanner.executor.v1.ValueList value) { + if (arrayValueBuilder_ == null) { + if (valueTypeCase_ == 11 + && valueType_ != com.google.spanner.executor.v1.ValueList.getDefaultInstance()) { + valueType_ = + com.google.spanner.executor.v1.ValueList.newBuilder( + (com.google.spanner.executor.v1.ValueList) valueType_) + .mergeFrom(value) + .buildPartial(); + } else { + valueType_ = value; + } + onChanged(); + } else { + if (valueTypeCase_ == 11) { + arrayValueBuilder_.mergeFrom(value); + } else { + arrayValueBuilder_.setMessage(value); + } + } + valueTypeCase_ = 11; + return this; + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + public Builder clearArrayValue() { + if (arrayValueBuilder_ == null) { + if (valueTypeCase_ == 11) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + } else { + if (valueTypeCase_ == 11) { + valueTypeCase_ = 0; + valueType_ = null; + } + arrayValueBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + public com.google.spanner.executor.v1.ValueList.Builder getArrayValueBuilder() { + return internalGetArrayValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueListOrBuilder getArrayValueOrBuilder() { + if ((valueTypeCase_ == 11) && (arrayValueBuilder_ != null)) { + return arrayValueBuilder_.getMessageOrBuilder(); + } else { + if (valueTypeCase_ == 11) { + return (com.google.spanner.executor.v1.ValueList) valueType_; + } + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Array type value. The underlying Valuelist should have values that have
    +     * the same type.
    +     * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder> + internalGetArrayValueFieldBuilder() { + if (arrayValueBuilder_ == null) { + if (!(valueTypeCase_ == 11)) { + valueType_ = com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + arrayValueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.ValueList, + com.google.spanner.executor.v1.ValueList.Builder, + com.google.spanner.executor.v1.ValueListOrBuilder>( + (com.google.spanner.executor.v1.ValueList) valueType_, + getParentForChildren(), + isClean()); + valueType_ = null; + } + valueTypeCase_ = 11; + onChanged(); + return arrayValueBuilder_; + } + + private com.google.spanner.v1.Type arrayType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + arrayTypeBuilder_; + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return Whether the arrayType field is set. + */ + public boolean hasArrayType() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return The arrayType. + */ + public com.google.spanner.v1.Type getArrayType() { + if (arrayTypeBuilder_ == null) { + return arrayType_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : arrayType_; + } else { + return arrayTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public Builder setArrayType(com.google.spanner.v1.Type value) { + if (arrayTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + arrayType_ = value; + } else { + arrayTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public Builder setArrayType(com.google.spanner.v1.Type.Builder builderForValue) { + if (arrayTypeBuilder_ == null) { + arrayType_ = builderForValue.build(); + } else { + arrayTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public Builder mergeArrayType(com.google.spanner.v1.Type value) { + if (arrayTypeBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0) + && arrayType_ != null + && arrayType_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getArrayTypeBuilder().mergeFrom(value); + } else { + arrayType_ = value; + } + } else { + arrayTypeBuilder_.mergeFrom(value); + } + if (arrayType_ != null) { + bitField0_ |= 0x00000800; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public Builder clearArrayType() { + bitField0_ = (bitField0_ & ~0x00000800); + arrayType_ = null; + if (arrayTypeBuilder_ != null) { + arrayTypeBuilder_.dispose(); + arrayTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public com.google.spanner.v1.Type.Builder getArrayTypeBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return internalGetArrayTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + public com.google.spanner.v1.TypeOrBuilder getArrayTypeOrBuilder() { + if (arrayTypeBuilder_ != null) { + return arrayTypeBuilder_.getMessageOrBuilder(); + } else { + return arrayType_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : arrayType_; + } + } + + /** + * + * + *
    +     * Type of array element. Only set if value is an array.
    +     * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetArrayTypeFieldBuilder() { + if (arrayTypeBuilder_ == null) { + arrayTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getArrayType(), getParentForChildren(), isClean()); + arrayType_ = null; + } + return arrayTypeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.Value) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.Value) + private static final com.google.spanner.executor.v1.Value DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.Value(); + } + + public static com.google.spanner.executor.v1.Value getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Value parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.Value getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java new file mode 100644 index 000000000000..f50220b69d3e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java @@ -0,0 +1,918 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * List of values.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ValueList} + */ +@com.google.protobuf.Generated +public final class ValueList extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.ValueList) + ValueListOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ValueList"); + } + + // Use ValueList.newBuilder() to construct. + private ValueList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ValueList() { + value_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ValueList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ValueList.class, + com.google.spanner.executor.v1.ValueList.Builder.class); + } + + public static final int VALUE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List value_; + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + @java.lang.Override + public java.util.List getValueList() { + return value_; + } + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + @java.lang.Override + public java.util.List + getValueOrBuilderList() { + return value_; + } + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + @java.lang.Override + public int getValueCount() { + return value_.size(); + } + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.Value getValue(int index) { + return value_.get(index); + } + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder(int index) { + return value_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < value_.size(); i++) { + output.writeMessage(1, value_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < value_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, value_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.ValueList)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.ValueList other = (com.google.spanner.executor.v1.ValueList) obj; + + if (!getValueList().equals(other.getValueList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getValueCount() > 0) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValueList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.ValueList parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ValueList parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ValueList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.ValueList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.ValueList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * List of values.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.ValueList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.ValueList) + com.google.spanner.executor.v1.ValueListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ValueList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.ValueList.class, + com.google.spanner.executor.v1.ValueList.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.ValueList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (valueBuilder_ == null) { + value_ = java.util.Collections.emptyList(); + } else { + value_ = null; + valueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_ValueList_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getDefaultInstanceForType() { + return com.google.spanner.executor.v1.ValueList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.ValueList build() { + com.google.spanner.executor.v1.ValueList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ValueList buildPartial() { + com.google.spanner.executor.v1.ValueList result = + new com.google.spanner.executor.v1.ValueList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.executor.v1.ValueList result) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + value_ = java.util.Collections.unmodifiableList(value_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.value_ = value_; + } else { + result.value_ = valueBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.executor.v1.ValueList result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.ValueList) { + return mergeFrom((com.google.spanner.executor.v1.ValueList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.ValueList other) { + if (other == com.google.spanner.executor.v1.ValueList.getDefaultInstance()) return this; + if (valueBuilder_ == null) { + if (!other.value_.isEmpty()) { + if (value_.isEmpty()) { + value_ = other.value_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureValueIsMutable(); + value_.addAll(other.value_); + } + onChanged(); + } + } else { + if (!other.value_.isEmpty()) { + if (valueBuilder_.isEmpty()) { + valueBuilder_.dispose(); + valueBuilder_ = null; + value_ = other.value_; + bitField0_ = (bitField0_ & ~0x00000001); + valueBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetValueFieldBuilder() + : null; + } else { + valueBuilder_.addAllMessages(other.value_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.executor.v1.Value m = + input.readMessage( + com.google.spanner.executor.v1.Value.parser(), extensionRegistry); + if (valueBuilder_ == null) { + ensureValueIsMutable(); + value_.add(m); + } else { + valueBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List value_ = + java.util.Collections.emptyList(); + + private void ensureValueIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + value_ = new java.util.ArrayList(value_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder> + valueBuilder_; + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public java.util.List getValueList() { + if (valueBuilder_ == null) { + return java.util.Collections.unmodifiableList(value_); + } else { + return valueBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public int getValueCount() { + if (valueBuilder_ == null) { + return value_.size(); + } else { + return valueBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public com.google.spanner.executor.v1.Value getValue(int index) { + if (valueBuilder_ == null) { + return value_.get(index); + } else { + return valueBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder setValue(int index, com.google.spanner.executor.v1.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValueIsMutable(); + value_.set(index, value); + onChanged(); + } else { + valueBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder setValue( + int index, com.google.spanner.executor.v1.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + ensureValueIsMutable(); + value_.set(index, builderForValue.build()); + onChanged(); + } else { + valueBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder addValue(com.google.spanner.executor.v1.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValueIsMutable(); + value_.add(value); + onChanged(); + } else { + valueBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder addValue(int index, com.google.spanner.executor.v1.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValueIsMutable(); + value_.add(index, value); + onChanged(); + } else { + valueBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder addValue(com.google.spanner.executor.v1.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + ensureValueIsMutable(); + value_.add(builderForValue.build()); + onChanged(); + } else { + valueBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder addValue( + int index, com.google.spanner.executor.v1.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + ensureValueIsMutable(); + value_.add(index, builderForValue.build()); + onChanged(); + } else { + valueBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder addAllValue( + java.lang.Iterable values) { + if (valueBuilder_ == null) { + ensureValueIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, value_); + onChanged(); + } else { + valueBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + value_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + valueBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public Builder removeValue(int index) { + if (valueBuilder_ == null) { + ensureValueIsMutable(); + value_.remove(index); + onChanged(); + } else { + valueBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public com.google.spanner.executor.v1.Value.Builder getValueBuilder(int index) { + return internalGetValueFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder(int index) { + if (valueBuilder_ == null) { + return value_.get(index); + } else { + return valueBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public java.util.List + getValueOrBuilderList() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(value_); + } + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public com.google.spanner.executor.v1.Value.Builder addValueBuilder() { + return internalGetValueFieldBuilder() + .addBuilder(com.google.spanner.executor.v1.Value.getDefaultInstance()); + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public com.google.spanner.executor.v1.Value.Builder addValueBuilder(int index) { + return internalGetValueFieldBuilder() + .addBuilder(index, com.google.spanner.executor.v1.Value.getDefaultInstance()); + } + + /** + * + * + *
    +     * Values contained in this ValueList.
    +     * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + public java.util.List getValueBuilderList() { + return internalGetValueFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder> + internalGetValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.executor.v1.Value, + com.google.spanner.executor.v1.Value.Builder, + com.google.spanner.executor.v1.ValueOrBuilder>( + value_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.ValueList) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.ValueList) + private static final com.google.spanner.executor.v1.ValueList DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.ValueList(); + } + + public static com.google.spanner.executor.v1.ValueList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ValueList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.ValueList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java new file mode 100644 index 000000000000..0b85fa204599 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ValueListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.ValueList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + java.util.List getValueList(); + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + com.google.spanner.executor.v1.Value getValue(int index); + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + int getValueCount(); + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + java.util.List getValueOrBuilderList(); + + /** + * + * + *
    +   * Values contained in this ValueList.
    +   * 
    + * + * repeated .google.spanner.executor.v1.Value value = 1; + */ + com.google.spanner.executor.v1.ValueOrBuilder getValueOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java new file mode 100644 index 000000000000..28d08170bf81 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java @@ -0,0 +1,409 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface ValueOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.Value) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * If is_null is set, then this value is null.
    +   * 
    + * + * bool is_null = 1; + * + * @return Whether the isNull field is set. + */ + boolean hasIsNull(); + + /** + * + * + *
    +   * If is_null is set, then this value is null.
    +   * 
    + * + * bool is_null = 1; + * + * @return The isNull. + */ + boolean getIsNull(); + + /** + * + * + *
    +   * Int type value. It's used for all integer number types, like int32 and
    +   * int64.
    +   * 
    + * + * int64 int_value = 2; + * + * @return Whether the intValue field is set. + */ + boolean hasIntValue(); + + /** + * + * + *
    +   * Int type value. It's used for all integer number types, like int32 and
    +   * int64.
    +   * 
    + * + * int64 int_value = 2; + * + * @return The intValue. + */ + long getIntValue(); + + /** + * + * + *
    +   * Bool type value.
    +   * 
    + * + * bool bool_value = 3; + * + * @return Whether the boolValue field is set. + */ + boolean hasBoolValue(); + + /** + * + * + *
    +   * Bool type value.
    +   * 
    + * + * bool bool_value = 3; + * + * @return The boolValue. + */ + boolean getBoolValue(); + + /** + * + * + *
    +   * Double type value. It's used for all float point types, like float and
    +   * double.
    +   * 
    + * + * double double_value = 4; + * + * @return Whether the doubleValue field is set. + */ + boolean hasDoubleValue(); + + /** + * + * + *
    +   * Double type value. It's used for all float point types, like float and
    +   * double.
    +   * 
    + * + * double double_value = 4; + * + * @return The doubleValue. + */ + double getDoubleValue(); + + /** + * + * + *
    +   * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +   * 
    + * + * bytes bytes_value = 5; + * + * @return Whether the bytesValue field is set. + */ + boolean hasBytesValue(); + + /** + * + * + *
    +   * Bytes type value, stored in CORD. It's also used for PROTO type value.
    +   * 
    + * + * bytes bytes_value = 5; + * + * @return The bytesValue. + */ + com.google.protobuf.ByteString getBytesValue(); + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return Whether the stringValue field is set. + */ + boolean hasStringValue(); + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return The stringValue. + */ + java.lang.String getStringValue(); + + /** + * + * + *
    +   * String type value, stored in CORD.
    +   * 
    + * + * string string_value = 6; + * + * @return The bytes for stringValue. + */ + com.google.protobuf.ByteString getStringValueBytes(); + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return Whether the structValue field is set. + */ + boolean hasStructValue(); + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + * + * @return The structValue. + */ + com.google.spanner.executor.v1.ValueList getStructValue(); + + /** + * + * + *
    +   * Struct type value. It contains a ValueList representing the values in
    +   * this struct.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList struct_value = 7; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getStructValueOrBuilder(); + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return Whether the timestampValue field is set. + */ + boolean hasTimestampValue(); + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + * + * @return The timestampValue. + */ + com.google.protobuf.Timestamp getTimestampValue(); + + /** + * + * + *
    +   * Timestamp type value.
    +   * 
    + * + * .google.protobuf.Timestamp timestamp_value = 8; + */ + com.google.protobuf.TimestampOrBuilder getTimestampValueOrBuilder(); + + /** + * + * + *
    +   * Date type value. Date is specified as a number of days since Unix epoch.
    +   * 
    + * + * int32 date_days_value = 9; + * + * @return Whether the dateDaysValue field is set. + */ + boolean hasDateDaysValue(); + + /** + * + * + *
    +   * Date type value. Date is specified as a number of days since Unix epoch.
    +   * 
    + * + * int32 date_days_value = 9; + * + * @return The dateDaysValue. + */ + int getDateDaysValue(); + + /** + * + * + *
    +   * If set, holds the sentinel value for the transaction CommitTimestamp.
    +   * 
    + * + * bool is_commit_timestamp = 10; + * + * @return Whether the isCommitTimestamp field is set. + */ + boolean hasIsCommitTimestamp(); + + /** + * + * + *
    +   * If set, holds the sentinel value for the transaction CommitTimestamp.
    +   * 
    + * + * bool is_commit_timestamp = 10; + * + * @return The isCommitTimestamp. + */ + boolean getIsCommitTimestamp(); + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return Whether the arrayValue field is set. + */ + boolean hasArrayValue(); + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + * + * @return The arrayValue. + */ + com.google.spanner.executor.v1.ValueList getArrayValue(); + + /** + * + * + *
    +   * Array type value. The underlying Valuelist should have values that have
    +   * the same type.
    +   * 
    + * + * .google.spanner.executor.v1.ValueList array_value = 11; + */ + com.google.spanner.executor.v1.ValueListOrBuilder getArrayValueOrBuilder(); + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return Whether the arrayType field is set. + */ + boolean hasArrayType(); + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + * + * @return The arrayType. + */ + com.google.spanner.v1.Type getArrayType(); + + /** + * + * + *
    +   * Type of array element. Only set if value is an array.
    +   * 
    + * + * optional .google.spanner.v1.Type array_type = 12; + */ + com.google.spanner.v1.TypeOrBuilder getArrayTypeOrBuilder(); + + com.google.spanner.executor.v1.Value.ValueTypeCase getValueTypeCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java new file mode 100644 index 000000000000..572253538a3d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java @@ -0,0 +1,701 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +/** + * + * + *
    + * WriteMutationAction defines an action of flushing the mutation so they
    + * are visible to subsequent operations in the transaction.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.WriteMutationsAction} + */ +@com.google.protobuf.Generated +public final class WriteMutationsAction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.WriteMutationsAction) + WriteMutationsActionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "WriteMutationsAction"); + } + + // Use WriteMutationsAction.newBuilder() to construct. + private WriteMutationsAction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private WriteMutationsAction() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_WriteMutationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.WriteMutationsAction.class, + com.google.spanner.executor.v1.WriteMutationsAction.Builder.class); + } + + private int bitField0_; + public static final int MUTATION_FIELD_NUMBER = 1; + private com.google.spanner.executor.v1.MutationAction mutation_; + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return Whether the mutation field is set. + */ + @java.lang.Override + public boolean hasMutation() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return The mutation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationAction getMutation() { + return mutation_ == null + ? com.google.spanner.executor.v1.MutationAction.getDefaultInstance() + : mutation_; + } + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + @java.lang.Override + public com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder() { + return mutation_ == null + ? com.google.spanner.executor.v1.MutationAction.getDefaultInstance() + : mutation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMutation()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMutation()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.WriteMutationsAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.WriteMutationsAction other = + (com.google.spanner.executor.v1.WriteMutationsAction) obj; + + if (hasMutation() != other.hasMutation()) return false; + if (hasMutation()) { + if (!getMutation().equals(other.getMutation())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMutation()) { + hash = (37 * hash) + MUTATION_FIELD_NUMBER; + hash = (53 * hash) + getMutation().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.executor.v1.WriteMutationsAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * WriteMutationAction defines an action of flushing the mutation so they
    +   * are visible to subsequent operations in the transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.WriteMutationsAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.WriteMutationsAction) + com.google.spanner.executor.v1.WriteMutationsActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_WriteMutationsAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.WriteMutationsAction.class, + com.google.spanner.executor.v1.WriteMutationsAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.WriteMutationsAction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetMutationFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mutation_ = null; + if (mutationBuilder_ != null) { + mutationBuilder_.dispose(); + mutationBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_WriteMutationsAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction build() { + com.google.spanner.executor.v1.WriteMutationsAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction buildPartial() { + com.google.spanner.executor.v1.WriteMutationsAction result = + new com.google.spanner.executor.v1.WriteMutationsAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.WriteMutationsAction result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mutation_ = mutationBuilder_ == null ? mutation_ : mutationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.WriteMutationsAction) { + return mergeFrom((com.google.spanner.executor.v1.WriteMutationsAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.WriteMutationsAction other) { + if (other == com.google.spanner.executor.v1.WriteMutationsAction.getDefaultInstance()) + return this; + if (other.hasMutation()) { + mergeMutation(other.getMutation()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetMutationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.executor.v1.MutationAction mutation_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder> + mutationBuilder_; + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return Whether the mutation field is set. + */ + public boolean hasMutation() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return The mutation. + */ + public com.google.spanner.executor.v1.MutationAction getMutation() { + if (mutationBuilder_ == null) { + return mutation_ == null + ? com.google.spanner.executor.v1.MutationAction.getDefaultInstance() + : mutation_; + } else { + return mutationBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public Builder setMutation(com.google.spanner.executor.v1.MutationAction value) { + if (mutationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mutation_ = value; + } else { + mutationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public Builder setMutation( + com.google.spanner.executor.v1.MutationAction.Builder builderForValue) { + if (mutationBuilder_ == null) { + mutation_ = builderForValue.build(); + } else { + mutationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public Builder mergeMutation(com.google.spanner.executor.v1.MutationAction value) { + if (mutationBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && mutation_ != null + && mutation_ != com.google.spanner.executor.v1.MutationAction.getDefaultInstance()) { + getMutationBuilder().mergeFrom(value); + } else { + mutation_ = value; + } + } else { + mutationBuilder_.mergeFrom(value); + } + if (mutation_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public Builder clearMutation() { + bitField0_ = (bitField0_ & ~0x00000001); + mutation_ = null; + if (mutationBuilder_ != null) { + mutationBuilder_.dispose(); + mutationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public com.google.spanner.executor.v1.MutationAction.Builder getMutationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetMutationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + public com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder() { + if (mutationBuilder_ != null) { + return mutationBuilder_.getMessageOrBuilder(); + } else { + return mutation_ == null + ? com.google.spanner.executor.v1.MutationAction.getDefaultInstance() + : mutation_; + } + } + + /** + * + * + *
    +     * The mutation to write.
    +     * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder> + internalGetMutationFieldBuilder() { + if (mutationBuilder_ == null) { + mutationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.executor.v1.MutationAction, + com.google.spanner.executor.v1.MutationAction.Builder, + com.google.spanner.executor.v1.MutationActionOrBuilder>( + getMutation(), getParentForChildren(), isClean()); + mutation_ = null; + } + return mutationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.WriteMutationsAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.WriteMutationsAction) + private static final com.google.spanner.executor.v1.WriteMutationsAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.WriteMutationsAction(); + } + + public static com.google.spanner.executor.v1.WriteMutationsAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteMutationsAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.WriteMutationsAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java new file mode 100644 index 000000000000..f8a67ee7a2b7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/executor/v1/cloud_executor.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.executor.v1; + +@com.google.protobuf.Generated +public interface WriteMutationsActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.WriteMutationsAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return Whether the mutation field is set. + */ + boolean hasMutation(); + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + * + * @return The mutation. + */ + com.google.spanner.executor.v1.MutationAction getMutation(); + + /** + * + * + *
    +   * The mutation to write.
    +   * 
    + * + * .google.spanner.executor.v1.MutationAction mutation = 1; + */ + com.google.spanner.executor.v1.MutationActionOrBuilder getMutationOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto new file mode 100644 index 000000000000..5ca3b25ac2a2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto @@ -0,0 +1,1604 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.executor.v1; + +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/spanner/admin/database/v1/backup.proto"; +import "google/spanner/admin/database/v1/common.proto"; +import "google/spanner/admin/database/v1/spanner_database_admin.proto"; +import "google/spanner/admin/instance/v1/spanner_instance_admin.proto"; +import "google/spanner/v1/spanner.proto"; +import "google/spanner/v1/type.proto"; + +option go_package = "cloud.google.com/go/spanner/executor/apiv1/executorpb;executorpb"; +option java_multiple_files = true; +option java_outer_classname = "CloudExecutorProto"; +option java_package = "com.google.spanner.executor.v1"; + +// Service that executes SpannerActions asynchronously. +service SpannerExecutorProxy { + option (google.api.default_host) = "spanner-cloud-executor.googleapis.com"; + + // ExecuteActionAsync is a streaming call that starts executing a new Spanner + // action. + // + // For each request, the server will reply with one or more responses, but + // only the last response will contain status in the outcome. + // + // Responses can be matched to requests by action_id. It is allowed to have + // multiple actions in flight--in that case, actions are be executed in + // parallel. + rpc ExecuteActionAsync(stream SpannerAsyncActionRequest) + returns (stream SpannerAsyncActionResponse) {} +} + +// Request to executor service that start a new Spanner action. +message SpannerAsyncActionRequest { + // Action id to uniquely identify this action request. + int32 action_id = 1; + + // The actual SpannerAction to perform. + SpannerAction action = 2; +} + +// Response from executor service. +message SpannerAsyncActionResponse { + // Action id corresponds to the request. + int32 action_id = 1; + + // If action results are split into multiple responses, only the last response + // can and should contain status. + SpannerActionOutcome outcome = 2; +} + +// SpannerAction defines a primitive action that can be performed against +// Spanner, such as begin or commit a transaction, or perform a read or +// mutation. +message SpannerAction { + // Database against which to perform action. + // In a context where a series of actions take place, an action may omit + // database path if it applies to the same database as the previous action. + string database_path = 1; + + // Configuration options for Spanner backend + SpannerOptions spanner_options = 2; + + // Action represents a spanner action kind, there will only be one action kind + // per SpannerAction. + oneof action { + // Action to start a transaction. + StartTransactionAction start = 10; + + // Action to finish a transaction. + FinishTransactionAction finish = 11; + + // Action to do a normal read. + ReadAction read = 20; + + // Action to do a query. + QueryAction query = 21; + + // Action to buffer a mutation. + MutationAction mutation = 22; + + // Action to a DML. + DmlAction dml = 23; + + // Action to a batch DML. + BatchDmlAction batch_dml = 24; + + // Action to write a mutation. + WriteMutationsAction write = 25; + + // Action to a partitioned update. + PartitionedUpdateAction partitioned_update = 27; + + // Action that contains any administrative operation, like database, + // instance manipulation. + AdminAction admin = 30; + + // Action to start a batch transaction. + StartBatchTransactionAction start_batch_txn = 40; + + // Action to close a batch transaction. + CloseBatchTransactionAction close_batch_txn = 41; + + // Action to generate database partitions for batch read. + GenerateDbPartitionsForReadAction generate_db_partitions_read = 42; + + // Action to generate database partitions for batch query. + GenerateDbPartitionsForQueryAction generate_db_partitions_query = 43; + + // Action to execute batch actions on generated partitions. + ExecutePartitionAction execute_partition = 44; + + // Action to execute change stream query. + ExecuteChangeStreamQuery execute_change_stream_query = 50; + + // Query cancellation action for testing the cancellation of a query. + QueryCancellationAction query_cancellation = 51; + + // Action to adapt a message. + AdaptMessageAction adapt_message = 52; + } +} + +// A single read request. +message ReadAction { + // The table to read at. + string table = 1; + + // The index to read at if it's an index read. + optional string index = 2; + + // List of columns must begin with the key columns used for the read. + repeated string column = 3; + + // Keys for performing this read. + KeySet keys = 4; + + // Limit on number of rows to read. If set, must be positive. + int32 limit = 5; +} + +// A SQL query request. +message QueryAction { + // Parameter that bind to placeholders in the SQL string + message Parameter { + // Name of the parameter (with no leading @). + string name = 1; + + // Type of the parameter. + google.spanner.v1.Type type = 2; + + // Value of the parameter. + Value value = 3; + } + + // The SQL string. + string sql = 1; + + // Parameters for the SQL string. + repeated Parameter params = 2; +} + +// A single DML statement. +message DmlAction { + // DML statement. + QueryAction update = 1; + + // Whether to autocommit the transaction after executing the DML statement, + // if the Executor supports autocommit. + optional bool autocommit_if_supported = 2; + + // Whether to set this DML statement as the last statement in the + // transaction. The transaction should be committed after processing this DML + // statement. + optional bool last_statement = 3; +} + +// Batch of DML statements invoked using batched execution. +message BatchDmlAction { + // DML statements. + repeated QueryAction updates = 1; + + // Whether to set this request with the last statement option in the + // transaction. The transaction should be committed after processing this + // request. + optional bool last_statements = 2; +} + +// Value represents a single value that can be read or written to/from +// Spanner. +message Value { + // Exactly one of the following fields will be present. + oneof value_type { + // If is_null is set, then this value is null. + bool is_null = 1; + + // Int type value. It's used for all integer number types, like int32 and + // int64. + int64 int_value = 2; + + // Bool type value. + bool bool_value = 3; + + // Double type value. It's used for all float point types, like float and + // double. + double double_value = 4; + + // Bytes type value, stored in CORD. It's also used for PROTO type value. + bytes bytes_value = 5; + + // String type value, stored in CORD. + string string_value = 6; + + // Struct type value. It contains a ValueList representing the values in + // this struct. + ValueList struct_value = 7; + + // Timestamp type value. + google.protobuf.Timestamp timestamp_value = 8; + + // Date type value. Date is specified as a number of days since Unix epoch. + int32 date_days_value = 9; + + // If set, holds the sentinel value for the transaction CommitTimestamp. + bool is_commit_timestamp = 10; + + // Array type value. The underlying Valuelist should have values that have + // the same type. + ValueList array_value = 11; + } + + // Type of array element. Only set if value is an array. + optional google.spanner.v1.Type array_type = 12; +} + +// KeyRange represents a range of rows in a table or index. +// +// A range has a start key and an end key. These keys can be open or +// closed, indicating if the range includes rows with that key. +// +// Keys are represented by "ValueList", where the ith value in the list +// corresponds to the ith component of the table or index primary key. +message KeyRange { + // Type controls whether "start" and "limit" are open or closed. By default, + // "start" is closed, and "limit" is open. + enum Type { + // "TYPE_UNSPECIFIED" is equivalent to "CLOSED_OPEN". + TYPE_UNSPECIFIED = 0; + + // [start,limit] + CLOSED_CLOSED = 1; + + // [start,limit) + CLOSED_OPEN = 2; + + // (start,limit] + OPEN_CLOSED = 3; + + // (start,limit) + OPEN_OPEN = 4; + } + + // "start" and "limit" must have the same number of key parts, + // though they may name only a prefix of the table or index key. + // The start key of this KeyRange. + ValueList start = 1; + + // The end key of this KeyRange. + ValueList limit = 2; + + // "start" and "limit" type for this KeyRange. + optional Type type = 3; +} + +// KeySet defines a collection of Spanner keys and/or key ranges. All +// the keys are expected to be in the same table. The keys need not be +// sorted in any particular way. +message KeySet { + // A list of specific keys. Entries in "keys" should have exactly as + // many elements as there are columns in the primary or index key + // with which this "KeySet" is used. + repeated ValueList point = 1; + + // A list of key ranges. + repeated KeyRange range = 2; + + // For convenience "all" can be set to "true" to indicate that this + // "KeySet" matches all keys in the table or index. Note that any keys + // specified in "keys" or "ranges" are only yielded once. + bool all = 3; +} + +// List of values. +message ValueList { + // Values contained in this ValueList. + repeated Value value = 1; +} + +// A single mutation request. +message MutationAction { + // Arguments to Insert, InsertOrUpdate, and Replace operations. + message InsertArgs { + // The names of the columns to be written. + repeated string column = 1; + + // Type information for the "values" entries below. + repeated google.spanner.v1.Type type = 2; + + // The values to be written. + repeated ValueList values = 3; + } + + // Arguments to Update. + message UpdateArgs { + // The columns to be updated. Identical to InsertArgs.column. + repeated string column = 1; + + // Type information for "values". Identical to InsertArgs.type. + repeated google.spanner.v1.Type type = 2; + + // The values to be updated. Identical to InsertArgs.values. + repeated ValueList values = 3; + } + + // Mod represents the write action that will be perform to a table. Each mod + // will specify exactly one action, from insert, update, insert_or_update, + // replace and delete. + message Mod { + // The table to write. + string table = 1; + + // Exactly one of the remaining elements may be present. + // Insert new rows into "table". + InsertArgs insert = 2; + + // Update columns stored in existing rows of "table". + UpdateArgs update = 3; + + // Insert or update existing rows of "table". + InsertArgs insert_or_update = 4; + + // Replace existing rows of "table". + InsertArgs replace = 5; + + // Delete rows from "table". + KeySet delete_keys = 6; + } + + // Mods that contained in this mutation. + repeated Mod mod = 1; +} + +// WriteMutationAction defines an action of flushing the mutation so they +// are visible to subsequent operations in the transaction. +message WriteMutationsAction { + // The mutation to write. + MutationAction mutation = 1; +} + +// PartitionedUpdateAction defines an action to execute a partitioned DML +// which runs different partitions in parallel. +message PartitionedUpdateAction { + message ExecutePartitionedUpdateOptions { + // RPC Priority + optional google.spanner.v1.RequestOptions.Priority rpc_priority = 1; + + // Transaction tag + optional string tag = 2; + } + + // Options for partitioned update. + optional ExecutePartitionedUpdateOptions options = 1; + + // Partitioned dml query. + QueryAction update = 2; +} + +// StartTransactionAction defines an action of initializing a transaction. +message StartTransactionAction { + // Concurrency is for read-only transactions and must be omitted for + // read-write transactions. + optional Concurrency concurrency = 1; + + // Metadata about tables and columns that will be involved in this + // transaction. It is to convert values of key parts correctly. + repeated TableMetadata table = 2; + + // Transaction_seed contains workid and op pair for this transaction, used for + // testing. + string transaction_seed = 3; + + // Execution options (e.g., whether transaction is opaque, optimistic, + // excluded from change streams). + optional TransactionExecutionOptions execution_options = 4; +} + +// Concurrency for read-only transactions. +message Concurrency { + // Concurrency mode set for read-only transactions, exactly one mode below + // should be set. + oneof concurrency_mode { + // Indicates a read at a consistent timestamp that is specified relative to + // now. That is, if the caller has specified an exact staleness of s + // seconds, we will read at now - s. + double staleness_seconds = 1; + + // Indicates a boundedly stale read that reads at a timestamp >= T. + int64 min_read_timestamp_micros = 2; + + // Indicates a boundedly stale read that is at most N seconds stale. + double max_staleness_seconds = 3; + + // Indicates a read at a consistent timestamp. + int64 exact_timestamp_micros = 4; + + // Indicates a strong read, must only be set to true, or unset. + bool strong = 5; + + // Indicates a batch read, must only be set to true, or unset. + bool batch = 6; + } + + // True if exact_timestamp_micros is set, and the chosen timestamp is that of + // a snapshot epoch. + bool snapshot_epoch_read = 7; + + // If set, this is a snapshot epoch read constrained to read only the + // specified log scope root table, and its children. Will not be set for full + // database epochs. + string snapshot_epoch_root_table = 8; + + // Set only when batch is true. + int64 batch_read_timestamp_micros = 9; +} + +// TableMetadata contains metadata of a single table. +message TableMetadata { + // Table name. + string name = 1; + + // Columns, in the same order as in the schema. + repeated ColumnMetadata column = 2; + + // Keys, in order. Column name is currently not populated. + repeated ColumnMetadata key_column = 3; +} + +// ColumnMetadata represents metadata of a single column. +message ColumnMetadata { + // Column name. + string name = 1; + + // Column type. + google.spanner.v1.Type type = 2; +} + +message TransactionExecutionOptions { + // Whether optimistic concurrency should be used to execute this transaction. + bool optimistic = 1; + + // Whether traffic from this transaction will be excluded from tracking change + // streams with allow_txn_exclusion=true. + bool exclude_from_change_streams = 2; + + // Whether serializable isolation with optimistic mode concurrency should be + // used to execute this transaction. + bool serializable_optimistic = 3; + + // Whether snapshot isolation with optimistic mode concurrency should be used + // to execute this transaction. + bool snapshot_isolation_optimistic = 4; + + // Whether snapshot isolation with pessimistic mode concurrency should be used + // to execute this transaction. + bool snapshot_isolation_pessimistic = 5; + + // Whether to exclude mutations of this transaction from the allowed tracking + // change streams. + bool exclude_txn_from_change_streams = 6; +} + +// FinishTransactionAction defines an action of finishing a transaction. +message FinishTransactionAction { + // Mode indicates how the transaction should be finished. + enum Mode { + // "MODE_UNSPECIFIED" is equivalent to "COMMIT". + MODE_UNSPECIFIED = 0; + + // Commit the transaction. + COMMIT = 1; + + // Drop the transaction without committing it. + ABANDON = 2; + } + + // Defines how exactly the transaction should be completed, e.g. with + // commit or abortion. + Mode mode = 1; +} + +// AdminAction defines all the cloud spanner admin actions, including +// instance/database admin ops, backup ops and operation actions. +message AdminAction { + // Exactly one of the actions below will be performed in AdminAction. + oneof action { + // Action that creates a user instance config. + CreateUserInstanceConfigAction create_user_instance_config = 1; + + // Action that updates a user instance config. + UpdateUserInstanceConfigAction update_user_instance_config = 2; + + // Action that deletes a user instance config. + DeleteUserInstanceConfigAction delete_user_instance_config = 3; + + // Action that gets a user instance config. + GetCloudInstanceConfigAction get_cloud_instance_config = 4; + + // Action that lists user instance configs. + ListCloudInstanceConfigsAction list_instance_configs = 5; + + // Action that creates a Cloud Spanner instance. + CreateCloudInstanceAction create_cloud_instance = 6; + + // Action that updates a Cloud Spanner instance. + UpdateCloudInstanceAction update_cloud_instance = 7; + + // Action that deletes a Cloud Spanner instance. + DeleteCloudInstanceAction delete_cloud_instance = 8; + + // Action that lists Cloud Spanner instances. + ListCloudInstancesAction list_cloud_instances = 9; + + // Action that retrieves a Cloud Spanner instance. + GetCloudInstanceAction get_cloud_instance = 10; + + // Action that creates a Cloud Spanner database. + CreateCloudDatabaseAction create_cloud_database = 11; + + // Action that updates the schema of a Cloud Spanner database. + UpdateCloudDatabaseDdlAction update_cloud_database_ddl = 12; + + // Action that updates the schema of a Cloud Spanner database. + UpdateCloudDatabaseAction update_cloud_database = 27; + + // Action that drops a Cloud Spanner database. + DropCloudDatabaseAction drop_cloud_database = 13; + + // Action that lists Cloud Spanner databases. + ListCloudDatabasesAction list_cloud_databases = 14; + + // Action that lists Cloud Spanner database operations. + ListCloudDatabaseOperationsAction list_cloud_database_operations = 15; + + // Action that restores a Cloud Spanner database from a backup. + RestoreCloudDatabaseAction restore_cloud_database = 16; + + // Action that gets a Cloud Spanner database. + GetCloudDatabaseAction get_cloud_database = 17; + + // Action that creates a Cloud Spanner database backup. + CreateCloudBackupAction create_cloud_backup = 18; + + // Action that copies a Cloud Spanner database backup. + CopyCloudBackupAction copy_cloud_backup = 19; + + // Action that gets a Cloud Spanner database backup. + GetCloudBackupAction get_cloud_backup = 20; + + // Action that updates a Cloud Spanner database backup. + UpdateCloudBackupAction update_cloud_backup = 21; + + // Action that deletes a Cloud Spanner database backup. + DeleteCloudBackupAction delete_cloud_backup = 22; + + // Action that lists Cloud Spanner database backups. + ListCloudBackupsAction list_cloud_backups = 23; + + // Action that lists Cloud Spanner database backup operations. + ListCloudBackupOperationsAction list_cloud_backup_operations = 24; + + // Action that gets an operation. + GetOperationAction get_operation = 25; + + // Action that cancels an operation. + CancelOperationAction cancel_operation = 26; + + // Action that changes quorum of a Cloud Spanner database. + ChangeQuorumCloudDatabaseAction change_quorum_cloud_database = 28; + + // Action that adds splits to a Cloud Spanner database. + AddSplitPointsAction add_split_points = 29; + } +} + +// Action that creates a user instance config. +message CreateUserInstanceConfigAction { + // User instance config ID (not path), e.g. "custom-config". + string user_config_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Base config ID, e.g. "test-config". + string base_config_id = 3; + + // Replicas that should be included in the user config. + repeated google.spanner.admin.instance.v1.ReplicaInfo replicas = 4; +} + +// Action that updates a user instance config. +message UpdateUserInstanceConfigAction { + // User instance config ID (not path), e.g. "custom-config". + string user_config_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // The descriptive name for this instance config as it appears in UIs. + optional string display_name = 3; + + // labels. + map labels = 4; +} + +// Action that gets a user instance config. +message GetCloudInstanceConfigAction { + // Instance config ID (not path), e.g. "custom-config". + string instance_config_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; +} + +// Action that deletes a user instance configs. +message DeleteUserInstanceConfigAction { + // User instance config ID (not path), e.g. "custom-config". + string user_config_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; +} + +// Action that lists user instance configs. +message ListCloudInstanceConfigsAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Number of instance configs to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + optional int32 page_size = 2; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListInstanceConfigsResponse to the same "parent". + optional string page_token = 3; +} + +// Action that creates a Cloud Spanner instance. +message CreateCloudInstanceAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Instance config ID, e.g. "test-config". + string instance_config_id = 3; + + // Number of nodes (processing_units should not be set or set to 0 if used). + optional int32 node_count = 4; + + // Number of processing units (node_count should be set to 0 if used). + optional int32 processing_units = 6; + + // The autoscaling config for this instance. If non-empty, an autoscaling + // instance will be created (processing_units and node_count should be set to + // 0 if used). + optional google.spanner.admin.instance.v1.AutoscalingConfig + autoscaling_config = 7; + + // labels. + map labels = 5; + + // The edition of the instance. + google.spanner.admin.instance.v1.Instance.Edition edition = 8; +} + +// Action that updates a Cloud Spanner instance. +message UpdateCloudInstanceAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // The descriptive name for this instance as it appears in UIs. + // Must be unique per project and between 4 and 30 characters in length. + optional string display_name = 3; + + // The number of nodes allocated to this instance. At most one of either + // node_count or processing_units should be present in the message. + optional int32 node_count = 4; + + // The number of processing units allocated to this instance. At most one of + // processing_units or node_count should be present in the message. + optional int32 processing_units = 5; + + // The autoscaling config for this instance. If non-empty, this instance is + // using autoscaling (processing_units and node_count should be set to + // 0 if used). + optional google.spanner.admin.instance.v1.AutoscalingConfig + autoscaling_config = 7; + + // labels. + map labels = 6; + + // The edition of the instance. + google.spanner.admin.instance.v1.Instance.Edition edition = 8; +} + +// Action that deletes a Cloud Spanner instance. +message DeleteCloudInstanceAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; +} + +// Action that creates a Cloud Spanner database. +message CreateCloudDatabaseAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Cloud database ID (not full path), e.g. "db0". + string database_id = 3; + + // SDL statements to apply to the new database. + repeated string sdl_statement = 4; + + // The KMS key used to encrypt the database to be created if the database + // should be CMEK protected. + google.spanner.admin.database.v1.EncryptionConfig encryption_config = 5; + + // Optional SQL dialect (GOOGLESQL or POSTGRESQL). Default: GOOGLESQL. + optional string dialect = 6; + + optional bytes proto_descriptors = 7; +} + +// Action that updates the schema of a Cloud Spanner database. +message UpdateCloudDatabaseDdlAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Cloud database ID (not full path), e.g. "db0". + string database_id = 3; + + // SDL statements to apply to the database. + repeated string sdl_statement = 4; + + // Op ID can be used to track progress of the update. If set, it must be + // unique per database. If not set, Cloud Spanner will generate operation ID + // automatically. + string operation_id = 5; + + optional bytes proto_descriptors = 6; +} + +// Action that updates a Cloud Spanner database. +message UpdateCloudDatabaseAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Cloud database name (not full path), e.g. "db0". + string database_name = 3; + + // Updated value of enable_drop_protection, this is the only field that has + // supported to be updated. + bool enable_drop_protection = 4; +} + +// Action that drops a Cloud Spanner database. +message DropCloudDatabaseAction { + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 1; + + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 2; + + // Cloud database ID (not full path), e.g. "db0". + string database_id = 3; +} + +// Action that changes quorum of a Cloud Spanner database. +message ChangeQuorumCloudDatabaseAction { + // The fully qualified uri of the database whose quorum has to be changed. + optional string database_uri = 1; + + // The locations of the serving regions, e.g. "asia-south1". + repeated string serving_locations = 2; +} + +// A single Adapt message request. +message AdaptMessageAction { + // The fully qualified uri of the database to send AdaptMessage to. + string database_uri = 1; + + // The protocol to use for the request. + string protocol = 2; + + // The payload of the request. + bytes payload = 3; + + // Attachments to be sent with the request. + map attachments = 4; + + // The query to be sent with the request. + string query = 5; + + // If true, the action will send a Prepare request first and then an + // Execute request right after to execute the query. This is only supported + // for Cloud Client path. + bool prepare_then_execute = 6; +} + +// Action that lists Cloud Spanner databases. +message ListCloudDatabasesAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) to list databases from, e.g. "test-instance". + string instance_id = 2; + + // Number of databases to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListDatabasesResponse to the same "parent" + // and with the same "filter". + string page_token = 4; +} + +// Action that lists Cloud Spanner instances. +message ListCloudInstancesAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // A filter expression that filters what operations are returned in the + // response. + // The expression must specify the field name, a comparison operator, + // and the value that you want to use for filtering. + // Refer spanner_instance_admin.proto.ListInstancesRequest for + // detail. + optional string filter = 2; + + // Number of instances to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + optional int32 page_size = 3; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListInstancesResponse to the same "parent" + // and with the same "filter". + optional string page_token = 4; +} + +// Action that retrieves a Cloud Spanner instance. +message GetCloudInstanceAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) to retrieve the instance from, + // e.g. "test-instance". + string instance_id = 2; +} + +// Action that lists Cloud Spanner database operations. +message ListCloudDatabaseOperationsAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) to list database operations from, + // e.g. "test-instance". + string instance_id = 2; + + // A filter expression that filters what operations are returned in the + // response. + // The expression must specify the field name, a comparison operator, + // and the value that you want to use for filtering. + // Refer spanner_database_admin.proto.ListDatabaseOperationsRequest for + // detail. + string filter = 3; + + // Number of databases to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 4; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListDatabaseOperationsResponse to the same "parent" + // and with the same "filter". + string page_token = 5; +} + +// Action that restores a Cloud Spanner database from a backup. +message RestoreCloudDatabaseAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) containing the backup, e.g. "backup-instance". + string backup_instance_id = 2; + + // The id of the backup from which to restore, e.g. "test-backup". + string backup_id = 3; + + // Cloud instance ID (not path) containing the database, e.g. + // "database-instance". + string database_instance_id = 4; + + // The id of the database to create and restore to, e.g. "db0". Note that this + // database must not already exist. + string database_id = 5; + + // The KMS key(s) used to encrypt the restored database to be created if the + // restored database should be CMEK protected. + google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; +} + +// Action that gets a Cloud Spanner database. +message GetCloudDatabaseAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the database to get, e.g. "db0". + string database_id = 3; +} + +// Action that creates a Cloud Spanner database backup. +message CreateCloudBackupAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the backup to be created, e.g. "test-backup". + string backup_id = 3; + + // The id of the database from which this backup was + // created, e.g. "db0". Note that this needs to be in the + // same instance as the backup. + string database_id = 4; + + // Output only. The expiration time of the backup, which must be at least 6 + // hours and at most 366 days from the time the request is received. + google.protobuf.Timestamp expire_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The version time of the backup, which must be within the time range of + // [earliest_version_time, NOW], where earliest_version_time is retrieved by + // cloud spanner frontend API (See details: go/cs-pitr-lite-design). + optional google.protobuf.Timestamp version_time = 6; + + // The KMS key(s) used to encrypt the backup to be created if the backup + // should be CMEK protected. + google.spanner.admin.database.v1.EncryptionConfig encryption_config = 7; +} + +// Action that copies a Cloud Spanner database backup. +message CopyCloudBackupAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the backup to be created, e.g. "test-backup". + string backup_id = 3; + + // The fully qualified uri of the source backup from which this + // backup was copied. eg. + // "projects//instances//backups/". + string source_backup = 4; + + // Output only. The expiration time of the backup, which must be at least 6 + // hours and at most 366 days from the time the request is received. + google.protobuf.Timestamp expire_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Action that gets a Cloud Spanner database backup. +message GetCloudBackupAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the backup to get, e.g. "test-backup". + string backup_id = 3; +} + +// Action that updates a Cloud Spanner database backup. +message UpdateCloudBackupAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the backup to update, e.g. "test-backup". + string backup_id = 3; + + // Output only. Updated value of expire_time, this is the only field + // that supported to be updated. + google.protobuf.Timestamp expire_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Action that deletes a Cloud Spanner database backup. +message DeleteCloudBackupAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // The id of the backup to delete, e.g. "test-backup". + string backup_id = 3; +} + +// Action that lists Cloud Spanner database backups. +message ListCloudBackupsAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) to list backups from, e.g. "test-instance". + string instance_id = 2; + + // A filter expression that filters backups listed in the response. + // The expression must specify the field name, a comparison operator, + // and the value that you want to use for filtering. + // Refer backup.proto.ListBackupsRequest for detail. + string filter = 3; + + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 4; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListBackupsResponse to the same "parent" + // and with the same "filter". + string page_token = 5; +} + +// Action that lists Cloud Spanner database backup operations. +message ListCloudBackupOperationsAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path) to list backup operations from, + // e.g. "test-instance". + string instance_id = 2; + + // A filter expression that filters what operations are returned in the + // response. + // The expression must specify the field name, a comparison operator, + // and the value that you want to use for filtering. + // Refer backup.proto.ListBackupOperationsRequest for detail. + string filter = 3; + + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 4; + + // If non-empty, "page_token" should contain a next_page_token + // from a previous ListBackupOperationsResponse to the same "parent" + // and with the same "filter". + string page_token = 5; +} + +// Action that gets an operation. +message GetOperationAction { + // The name of the operation resource. + string operation = 1; +} + +// Query cancellation action defines the long running query and the cancel query +// format depening on the Cloud database dialect. +message QueryCancellationAction { + // Long running query. + string long_running_sql = 1; + + // Format of the cancel query for the cloud database dialect. + string cancel_query = 2; +} + +// Action that cancels an operation. +message CancelOperationAction { + // The name of the operation resource to be cancelled. + string operation = 1; +} + +// Action that adds a split point to a Cloud Spanner database. +message AddSplitPointsAction { + // Cloud project ID, e.g. "spanner-cloud-systest". + string project_id = 1; + + // Cloud instance ID (not path), e.g. "test-instance". + string instance_id = 2; + + // Cloud database ID (not full path), e.g. "db0". + string database_id = 3; + + // The split points to add. + repeated google.spanner.admin.database.v1.SplitPoints split_points = 4; +} + +// Starts a batch read-only transaction in executor. Successful outcomes of this +// action will contain batch_txn_id--the identificator that can be used to start +// the same transaction in other Executors to parallelize partition processing. +// +// Example of a batch read flow: +// 1. Start batch transaction with a timestamp (StartBatchTransactionAction) +// 2. Generate database partitions for a read or query +// (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction) +// 3. Call ExecutePartitionAction for some or all partitions, process rows +// 4. Clean up the transaction (CloseBatchTransactionAction). +// +// More sophisticated example, with parallel processing: +// 1. Start batch transaction with a timestamp (StartBatchTransactionAction), +// note the returned BatchTransactionId +// 2. Generate database partitions for a read or query +// (GenerateDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction) +// 3. Distribute the partitions over a pool of workers, along with the +// transaction ID. +// +// In each worker: +// 4-1. StartBatchTransactionAction with the given transaction ID +// 4-2. ExecutePartitionAction for each partition it got, process read results +// 4-3. Close (not cleanup) the transaction (CloseBatchTransactionAction). +// +// When all workers are done: +// 5. Cleanup the transaction (CloseBatchTransactionAction). This can be done +// either by the last worker to finish the job, or by the main Executor that +// initialized this transaction in the first place. It is also possible to clean +// it up with a brand new Executor -- just execute StartBatchTransactionAction +// with the ID, then clean it up right away. +// +// Cleaning up is optional, but recommended. +message StartBatchTransactionAction { + // To start a new transaction, specify an exact timestamp. Alternatively, an + // existing batch transaction ID can be used. Either one of two must be + // set. + oneof param { + // The exact timestamp to start the batch transaction. + google.protobuf.Timestamp batch_txn_time = 1; + + // ID of a batch read-only transaction. It can be used to start the same + // batch transaction on multiple executors and parallelize partition + // processing. + bytes tid = 2; + } + + // Database role to assume while performing this action. Setting the + // database_role will enforce additional role-based access checks on this + // action. + string cloud_database_role = 3; +} + +// Closes or cleans up the currently opened batch read-only transaction. +// +// Once a transaction is closed, the Executor can be disposed of or used to +// start start another transaction. Closing a batch transaction in one Executor +// doesn't affect the transaction's state in other Executors that also read from +// it. +// +// When a transaction is cleaned up, it becomes globally invalid. Cleaning up is +// optional, but recommended. +message CloseBatchTransactionAction { + // Indicates whether the transaction needs to be cleaned up. + bool cleanup = 1; +} + +// Generate database partitions for the given read. Successful outcomes will +// contain database partitions in the db_partition field. +message GenerateDbPartitionsForReadAction { + // Read to generate partitions for. + ReadAction read = 1; + + // Metadata related to the tables involved in the read. + repeated TableMetadata table = 2; + + // Desired size of data in each partition. Spanner doesn't guarantee to + // respect this value. + optional int64 desired_bytes_per_partition = 3; + + // If set, the desired max number of partitions. Spanner doesn't guarantee to + // respect this value. + optional int64 max_partition_count = 4; +} + +// Generate database partitions for the given query. Successful outcomes will +// contain database partitions in the db_partition field. +message GenerateDbPartitionsForQueryAction { + // Query to generate partitions for. + QueryAction query = 1; + + // Desired size of data in each partition. Spanner doesn't guarantee to + // respect this value. + optional int64 desired_bytes_per_partition = 2; +} + +// Identifies a database partition generated for a particular read or query. To +// read rows from the partition, use ExecutePartitionAction. +message BatchPartition { + // Serialized Partition instance. + bytes partition = 1; + + // The partition token decrypted from partition. + bytes partition_token = 2; + + // Table name is set iff the partition was generated for a read (as opposed to + // a query). + optional string table = 3; + + // Index name if the partition was generated for an index read. + optional string index = 4; +} + +// Performs a read or query for the given partitions. This action must be +// executed in the context of the same transaction that was used to generate +// given partitions. +message ExecutePartitionAction { + // Batch partition to execute on. + BatchPartition partition = 1; +} + +// Execute a change stream TVF query. +message ExecuteChangeStreamQuery { + // Name for this change stream. + string name = 1; + + // Specifies that records with commit_timestamp greater than or equal to + // start_time should be returned. + google.protobuf.Timestamp start_time = 2; + + // Specifies that records with commit_timestamp less than or equal to + // end_time should be returned. + optional google.protobuf.Timestamp end_time = 3; + + // Specifies which change stream partition to query, based on the content of + // child partitions records. + optional string partition_token = 4; + + // Read options for this change stream query. + repeated string read_options = 5; + + // Determines how frequently a heartbeat ChangeRecord will be returned in case + // there are no transactions committed in this partition, in milliseconds. + optional int32 heartbeat_milliseconds = 6; + + // Deadline for this change stream query, in seconds. + optional int64 deadline_seconds = 7; + + // Database role to assume while performing this action. This should only be + // set for cloud requests. Setting the database role will enforce additional + // role-based access checks on this action. + optional string cloud_database_role = 8; +} + +// SpannerActionOutcome defines a result of execution of a single SpannerAction. +message SpannerActionOutcome { + // If an outcome is split into multiple parts, status will be set only in the + // last part. + optional google.rpc.Status status = 1; + + // Transaction timestamp. It must be set for successful committed actions. + optional google.protobuf.Timestamp commit_time = 2; + + // Result of a ReadAction. This field must be set for ReadActions even if + // no rows were read. + optional ReadResult read_result = 3; + + // Result of a Query. This field must be set for Queries even if no rows were + // read. + optional QueryResult query_result = 4; + + // This bit indicates that Spanner has restarted the current transaction. It + // means that the client should replay all the reads and writes. + // Setting it to true is only valid in the context of a read-write + // transaction, as an outcome of a committing FinishTransactionAction. + optional bool transaction_restarted = 5; + + // In successful StartBatchTransactionAction outcomes, this contains the ID of + // the transaction. + optional bytes batch_txn_id = 6; + + // Generated database partitions (result of a + // GenetageDbPartitionsForReadAction/GenerateDbPartitionsForQueryAction). + repeated BatchPartition db_partition = 7; + + // Result of admin related actions. + optional AdminResult admin_result = 8; + + // Stores rows modified by query in single DML or batch DML action. + // In case of batch DML action, stores 0 as row count of errored DML query. + repeated int64 dml_rows_modified = 9; + + // Change stream records returned by a change stream query. + repeated ChangeStreamRecord change_stream_records = 10; + + // If not zero, it indicates the read timestamp to use for validating + // the SnapshotIsolation transaction. + optional int64 snapshot_isolation_txn_read_timestamp = 11; +} + +// AdminResult contains admin action results, for database/backup/operation. +message AdminResult { + // Results of cloud backup related actions. + CloudBackupResponse backup_response = 1; + + // Results of operation related actions. + OperationResponse operation_response = 2; + + // Results of database related actions. + CloudDatabaseResponse database_response = 3; + + // Results of instance related actions. + CloudInstanceResponse instance_response = 4; + + // Results of instance config related actions. + CloudInstanceConfigResponse instance_config_response = 5; +} + +// CloudBackupResponse contains results returned by cloud backup related +// actions. +message CloudBackupResponse { + // List of backups returned by ListCloudBackupsAction. + repeated google.spanner.admin.database.v1.Backup listed_backups = 1; + + // List of operations returned by ListCloudBackupOperationsAction. + repeated google.longrunning.Operation listed_backup_operations = 2; + + // "next_page_token" can be sent in a subsequent list action + // to fetch more of the matching data. + string next_page_token = 3; + + // Backup returned by GetCloudBackupAction/UpdateCloudBackupAction. + google.spanner.admin.database.v1.Backup backup = 4; +} + +// OperationResponse contains results returned by operation related actions. +message OperationResponse { + // List of operations returned by ListOperationsAction. + repeated google.longrunning.Operation listed_operations = 1; + + // "next_page_token" can be sent in a subsequent list action + // to fetch more of the matching data. + string next_page_token = 2; + + // Operation returned by GetOperationAction. + google.longrunning.Operation operation = 3; +} + +// CloudInstanceResponse contains results returned by cloud instance related +// actions. +message CloudInstanceResponse { + // List of instances returned by ListCloudInstancesAction. + repeated google.spanner.admin.instance.v1.Instance listed_instances = 1; + + // "next_page_token" can be sent in a subsequent list action + // to fetch more of the matching data. + string next_page_token = 2; + + // Instance returned by GetCloudInstanceAction + google.spanner.admin.instance.v1.Instance instance = 3; +} + +// CloudInstanceConfigResponse contains results returned by cloud instance +// config related actions. +message CloudInstanceConfigResponse { + // List of instance configs returned by ListCloudInstanceConfigsAction. + repeated google.spanner.admin.instance.v1.InstanceConfig + listed_instance_configs = 1; + + // "next_page_token" can be sent in a subsequent list action + // to fetch more of the matching data. + string next_page_token = 2; + + // Instance config returned by GetCloudInstanceConfigAction. + google.spanner.admin.instance.v1.InstanceConfig instance_config = 3; +} + +// CloudDatabaseResponse contains results returned by cloud database related +// actions. +message CloudDatabaseResponse { + // List of databases returned by ListCloudDatabasesAction. + repeated google.spanner.admin.database.v1.Database listed_databases = 1; + + // List of operations returned by ListCloudDatabaseOperationsAction. + repeated google.longrunning.Operation listed_database_operations = 2; + + // "next_page_token" can be sent in a subsequent list action + // to fetch more of the matching data. + string next_page_token = 3; + + // Database returned by GetCloudDatabaseAction + google.spanner.admin.database.v1.Database database = 4; +} + +// ReadResult contains rows read. +message ReadResult { + // Table name. + string table = 1; + + // Index name, if read from an index. + optional string index = 2; + + // Request index (multiread only). + optional int32 request_index = 3; + + // Rows read. Each row is a struct with multiple fields, one for each column + // in read result. All rows have the same type. + repeated ValueList row = 4; + + // The type of rows read. It must be set if at least one row was read. + optional google.spanner.v1.StructType row_type = 5; +} + +// QueryResult contains result of a Query. +message QueryResult { + // Rows read. Each row is a struct with multiple fields, one for each column + // in read result. All rows have the same type. + repeated ValueList row = 1; + + // The type of rows read. It must be set if at least one row was read. + optional google.spanner.v1.StructType row_type = 2; +} + +// Raw ChangeStream records. +// Encodes one of: DataChangeRecord, HeartbeatRecord, ChildPartitionsRecord +// returned from the ChangeStream API. +message ChangeStreamRecord { + // Record represents one type of the change stream record. + oneof record { + // Data change record. + DataChangeRecord data_change = 1; + + // Child partitions record. + ChildPartitionsRecord child_partition = 2; + + // Heartbeat record. + HeartbeatRecord heartbeat = 3; + } +} + +// ChangeStream data change record. +message DataChangeRecord { + // Column types. + message ColumnType { + // Column name. + string name = 1; + + // Column type in JSON. + string type = 2; + + // Whether the column is a primary key column. + bool is_primary_key = 3; + + // The position of the column as defined in the schema. + int64 ordinal_position = 4; + } + + // Describes the changes that were made. + message Mod { + // The primary key values in JSON. + string keys = 1; + + // The new values of the changed columns in JSON. Only contain the non-key + // columns. + string new_values = 2; + + // The old values of the changed columns in JSON. Only contain the non-key + // columns. + string old_values = 3; + } + + // The timestamp in which the change was committed. + google.protobuf.Timestamp commit_time = 1; + + // The sequence number for the record within the transaction. + string record_sequence = 2; + + // A globally unique string that represents the transaction in which the + // change was committed. + string transaction_id = 3; + + // Indicates whether this is the last record for a transaction in the current + // partition. + bool is_last_record = 4; + + // Name of the table affected by the change. + string table = 5; + + // Column types defined in the schema. + repeated ColumnType column_types = 6; + + // Changes made in the transaction. + repeated Mod mods = 7; + + // Describes the type of change. One of INSERT, UPDATE or DELETE. + string mod_type = 8; + + // One of value capture type: NEW_VALUES, OLD_VALUES, OLD_AND_NEW_VALUES. + string value_capture_type = 9; + + // Number of records in transactions. + int64 record_count = 10; + + // Number of partitions in transactions. + int64 partition_count = 11; + + // Transaction tag info. + string transaction_tag = 12; + + // Whether the transaction is a system transactionn. + bool is_system_transaction = 13; +} + +// ChangeStream child partition record. +message ChildPartitionsRecord { + // A single child partition. + message ChildPartition { + // Partition token string used to identify the child partition in queries. + string token = 1; + + // Parent partition tokens of this child partition. + repeated string parent_partition_tokens = 2; + } + + // Data change records returned from child partitions in this child partitions + // record will have a commit timestamp greater than or equal to start_time. + google.protobuf.Timestamp start_time = 1; + + // A monotonically increasing sequence number that can be used to define the + // ordering of the child partitions record when there are multiple child + // partitions records returned with the same start_time in a particular + // partition. + string record_sequence = 2; + + // A set of child partitions and their associated information. + repeated ChildPartition child_partitions = 3; +} + +// ChangeStream heartbeat record. +message HeartbeatRecord { + // Timestamp for this heartbeat check. + google.protobuf.Timestamp heartbeat_time = 1; +} + +// Options for Cloud Spanner Service. +message SpannerOptions { + // Options for configuring the session pool + SessionPoolOptions session_pool_options = 1; +} + +// Options for the session pool used by the DatabaseClient. +message SessionPoolOptions { + // passing this as true, will make applicable RPCs use multiplexed sessions + // instead of regular sessions + bool use_multiplexed = 1; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml b/java-spanner/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml new file mode 100644 index 000000000000..7cb9c078e678 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml @@ -0,0 +1,137 @@ + + + + + 7012 + com/google/spanner/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/spanner/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/spanner/v1/*OrBuilder + boolean has*(*) + + + + + + 5001 + com/google/spanner/v1/* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/v1/*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/v1/*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/v1/*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/v1/*$*$* + com/google/protobuf/GeneratedMessage + + + 5001 + com/google/spanner/v1/*$*$*$Builder + com/google/protobuf/GeneratedMessage$Builder + + + 5001 + com/google/spanner/v1/*Proto + com/google/protobuf/GeneratedFile + + + + 7005 + com/google/spanner/v1/** + * newBuilderForType(*) + ** + + + + 7006 + com/google/spanner/v1/** + * internalGetFieldAccessorTable() + ** + + + + 7014 + com/google/spanner/v1/** + * getDescriptor() + + + 7006 + com/google/spanner/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/v1/** + * clear() + ** + + + 7006 + com/google/spanner/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/v1/** + * clone() + ** + + + 7006 + com/google/spanner/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/v1/** + * setUnknownFields(*) + ** + + diff --git a/java-spanner/proto-google-cloud-spanner-v1/pom.xml b/java-spanner/proto-google-cloud-spanner-v1/pom.xml new file mode 100644 index 000000000000..255d7c1b48e6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/pom.xml @@ -0,0 +1,43 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + proto-google-cloud-spanner-v1 + PROTO library for proto-google-cloud-spanner-v1 + + com.google.cloud + google-cloud-spanner-parent + 6.112.1-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + api-common + + + com.google.guava + guava + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + \ No newline at end of file diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java new file mode 100644 index 000000000000..2fcfd868b581 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java @@ -0,0 +1,1022 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for
    + * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.BatchCreateSessionsRequest} + */ +@com.google.protobuf.Generated +public final class BatchCreateSessionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BatchCreateSessionsRequest) + BatchCreateSessionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchCreateSessionsRequest"); + } + + // Use BatchCreateSessionsRequest.newBuilder() to construct. + private BatchCreateSessionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchCreateSessionsRequest() { + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchCreateSessionsRequest.class, + com.google.spanner.v1.BatchCreateSessionsRequest.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database in which the new sessions are created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database in which the new sessions are created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SESSION_TEMPLATE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Session sessionTemplate_; + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return Whether the sessionTemplate field is set. + */ + @java.lang.Override + public boolean hasSessionTemplate() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return The sessionTemplate. + */ + @java.lang.Override + public com.google.spanner.v1.Session getSessionTemplate() { + return sessionTemplate_ == null + ? com.google.spanner.v1.Session.getDefaultInstance() + : sessionTemplate_; + } + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + @java.lang.Override + public com.google.spanner.v1.SessionOrBuilder getSessionTemplateOrBuilder() { + return sessionTemplate_ == null + ? com.google.spanner.v1.Session.getDefaultInstance() + : sessionTemplate_; + } + + public static final int SESSION_COUNT_FIELD_NUMBER = 3; + private int sessionCount_ = 0; + + /** + * + * + *
    +   * Required. The number of sessions to be created in this batch call. At least
    +   * one session is created. The API can return fewer than the requested number
    +   * of sessions. If a specific number of sessions are desired, the client can
    +   * make additional calls to `BatchCreateSessions` (adjusting
    +   * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
    +   * as necessary).
    +   * 
    + * + * int32 session_count = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sessionCount. + */ + @java.lang.Override + public int getSessionCount() { + return sessionCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getSessionTemplate()); + } + if (sessionCount_ != 0) { + output.writeInt32(3, sessionCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getSessionTemplate()); + } + if (sessionCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, sessionCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BatchCreateSessionsRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.BatchCreateSessionsRequest other = + (com.google.spanner.v1.BatchCreateSessionsRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (hasSessionTemplate() != other.hasSessionTemplate()) return false; + if (hasSessionTemplate()) { + if (!getSessionTemplate().equals(other.getSessionTemplate())) return false; + } + if (getSessionCount() != other.getSessionCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (hasSessionTemplate()) { + hash = (37 * hash) + SESSION_TEMPLATE_FIELD_NUMBER; + hash = (53 * hash) + getSessionTemplate().hashCode(); + } + hash = (37 * hash) + SESSION_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getSessionCount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.BatchCreateSessionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BatchCreateSessionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BatchCreateSessionsRequest) + com.google.spanner.v1.BatchCreateSessionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchCreateSessionsRequest.class, + com.google.spanner.v1.BatchCreateSessionsRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.BatchCreateSessionsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetSessionTemplateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + sessionTemplate_ = null; + if (sessionTemplateBuilder_ != null) { + sessionTemplateBuilder_.dispose(); + sessionTemplateBuilder_ = null; + } + sessionCount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsRequest getDefaultInstanceForType() { + return com.google.spanner.v1.BatchCreateSessionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsRequest build() { + com.google.spanner.v1.BatchCreateSessionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsRequest buildPartial() { + com.google.spanner.v1.BatchCreateSessionsRequest result = + new com.google.spanner.v1.BatchCreateSessionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.BatchCreateSessionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.sessionTemplate_ = + sessionTemplateBuilder_ == null ? sessionTemplate_ : sessionTemplateBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.sessionCount_ = sessionCount_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BatchCreateSessionsRequest) { + return mergeFrom((com.google.spanner.v1.BatchCreateSessionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BatchCreateSessionsRequest other) { + if (other == com.google.spanner.v1.BatchCreateSessionsRequest.getDefaultInstance()) + return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasSessionTemplate()) { + mergeSessionTemplate(other.getSessionTemplate()); + } + if (other.getSessionCount() != 0) { + setSessionCount(other.getSessionCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetSessionTemplateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + sessionCount_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database in which the new sessions are created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which the new sessions are created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which the new sessions are created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which the new sessions are created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which the new sessions are created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Session sessionTemplate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + sessionTemplateBuilder_; + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return Whether the sessionTemplate field is set. + */ + public boolean hasSessionTemplate() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return The sessionTemplate. + */ + public com.google.spanner.v1.Session getSessionTemplate() { + if (sessionTemplateBuilder_ == null) { + return sessionTemplate_ == null + ? com.google.spanner.v1.Session.getDefaultInstance() + : sessionTemplate_; + } else { + return sessionTemplateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public Builder setSessionTemplate(com.google.spanner.v1.Session value) { + if (sessionTemplateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sessionTemplate_ = value; + } else { + sessionTemplateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public Builder setSessionTemplate(com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionTemplateBuilder_ == null) { + sessionTemplate_ = builderForValue.build(); + } else { + sessionTemplateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public Builder mergeSessionTemplate(com.google.spanner.v1.Session value) { + if (sessionTemplateBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && sessionTemplate_ != null + && sessionTemplate_ != com.google.spanner.v1.Session.getDefaultInstance()) { + getSessionTemplateBuilder().mergeFrom(value); + } else { + sessionTemplate_ = value; + } + } else { + sessionTemplateBuilder_.mergeFrom(value); + } + if (sessionTemplate_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public Builder clearSessionTemplate() { + bitField0_ = (bitField0_ & ~0x00000002); + sessionTemplate_ = null; + if (sessionTemplateBuilder_ != null) { + sessionTemplateBuilder_.dispose(); + sessionTemplateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public com.google.spanner.v1.Session.Builder getSessionTemplateBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetSessionTemplateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + public com.google.spanner.v1.SessionOrBuilder getSessionTemplateOrBuilder() { + if (sessionTemplateBuilder_ != null) { + return sessionTemplateBuilder_.getMessageOrBuilder(); + } else { + return sessionTemplate_ == null + ? com.google.spanner.v1.Session.getDefaultInstance() + : sessionTemplate_; + } + } + + /** + * + * + *
    +     * Parameters to apply to each created session.
    +     * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + internalGetSessionTemplateFieldBuilder() { + if (sessionTemplateBuilder_ == null) { + sessionTemplateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder>( + getSessionTemplate(), getParentForChildren(), isClean()); + sessionTemplate_ = null; + } + return sessionTemplateBuilder_; + } + + private int sessionCount_; + + /** + * + * + *
    +     * Required. The number of sessions to be created in this batch call. At least
    +     * one session is created. The API can return fewer than the requested number
    +     * of sessions. If a specific number of sessions are desired, the client can
    +     * make additional calls to `BatchCreateSessions` (adjusting
    +     * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
    +     * as necessary).
    +     * 
    + * + * int32 session_count = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sessionCount. + */ + @java.lang.Override + public int getSessionCount() { + return sessionCount_; + } + + /** + * + * + *
    +     * Required. The number of sessions to be created in this batch call. At least
    +     * one session is created. The API can return fewer than the requested number
    +     * of sessions. If a specific number of sessions are desired, the client can
    +     * make additional calls to `BatchCreateSessions` (adjusting
    +     * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
    +     * as necessary).
    +     * 
    + * + * int32 session_count = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sessionCount to set. + * @return This builder for chaining. + */ + public Builder setSessionCount(int value) { + + sessionCount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The number of sessions to be created in this batch call. At least
    +     * one session is created. The API can return fewer than the requested number
    +     * of sessions. If a specific number of sessions are desired, the client can
    +     * make additional calls to `BatchCreateSessions` (adjusting
    +     * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
    +     * as necessary).
    +     * 
    + * + * int32 session_count = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSessionCount() { + bitField0_ = (bitField0_ & ~0x00000004); + sessionCount_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BatchCreateSessionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BatchCreateSessionsRequest) + private static final com.google.spanner.v1.BatchCreateSessionsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BatchCreateSessionsRequest(); + } + + public static com.google.spanner.v1.BatchCreateSessionsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateSessionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java new file mode 100644 index 000000000000..d19787771816 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface BatchCreateSessionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BatchCreateSessionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database in which the new sessions are created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database in which the new sessions are created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return Whether the sessionTemplate field is set. + */ + boolean hasSessionTemplate(); + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + * + * @return The sessionTemplate. + */ + com.google.spanner.v1.Session getSessionTemplate(); + + /** + * + * + *
    +   * Parameters to apply to each created session.
    +   * 
    + * + * .google.spanner.v1.Session session_template = 2; + */ + com.google.spanner.v1.SessionOrBuilder getSessionTemplateOrBuilder(); + + /** + * + * + *
    +   * Required. The number of sessions to be created in this batch call. At least
    +   * one session is created. The API can return fewer than the requested number
    +   * of sessions. If a specific number of sessions are desired, the client can
    +   * make additional calls to `BatchCreateSessions` (adjusting
    +   * [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
    +   * as necessary).
    +   * 
    + * + * int32 session_count = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sessionCount. + */ + int getSessionCount(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java new file mode 100644 index 000000000000..2277f7f66f3f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java @@ -0,0 +1,920 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The response for
    + * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.BatchCreateSessionsResponse} + */ +@com.google.protobuf.Generated +public final class BatchCreateSessionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BatchCreateSessionsResponse) + BatchCreateSessionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchCreateSessionsResponse"); + } + + // Use BatchCreateSessionsResponse.newBuilder() to construct. + private BatchCreateSessionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchCreateSessionsResponse() { + session_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchCreateSessionsResponse.class, + com.google.spanner.v1.BatchCreateSessionsResponse.Builder.class); + } + + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List session_; + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + @java.lang.Override + public java.util.List getSessionList() { + return session_; + } + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + @java.lang.Override + public java.util.List + getSessionOrBuilderList() { + return session_; + } + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + @java.lang.Override + public int getSessionCount() { + return session_.size(); + } + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + @java.lang.Override + public com.google.spanner.v1.Session getSession(int index) { + return session_.get(index); + } + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + @java.lang.Override + public com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder(int index) { + return session_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < session_.size(); i++) { + output.writeMessage(1, session_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < session_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, session_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BatchCreateSessionsResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.BatchCreateSessionsResponse other = + (com.google.spanner.v1.BatchCreateSessionsResponse) obj; + + if (!getSessionList().equals(other.getSessionList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSessionCount() > 0) { + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSessionList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.BatchCreateSessionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BatchCreateSessionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BatchCreateSessionsResponse) + com.google.spanner.v1.BatchCreateSessionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchCreateSessionsResponse.class, + com.google.spanner.v1.BatchCreateSessionsResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.BatchCreateSessionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (sessionBuilder_ == null) { + session_ = java.util.Collections.emptyList(); + } else { + session_ = null; + sessionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchCreateSessionsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsResponse getDefaultInstanceForType() { + return com.google.spanner.v1.BatchCreateSessionsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsResponse build() { + com.google.spanner.v1.BatchCreateSessionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsResponse buildPartial() { + com.google.spanner.v1.BatchCreateSessionsResponse result = + new com.google.spanner.v1.BatchCreateSessionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.BatchCreateSessionsResponse result) { + if (sessionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + session_ = java.util.Collections.unmodifiableList(session_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.session_ = session_; + } else { + result.session_ = sessionBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.BatchCreateSessionsResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BatchCreateSessionsResponse) { + return mergeFrom((com.google.spanner.v1.BatchCreateSessionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BatchCreateSessionsResponse other) { + if (other == com.google.spanner.v1.BatchCreateSessionsResponse.getDefaultInstance()) + return this; + if (sessionBuilder_ == null) { + if (!other.session_.isEmpty()) { + if (session_.isEmpty()) { + session_ = other.session_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSessionIsMutable(); + session_.addAll(other.session_); + } + onChanged(); + } + } else { + if (!other.session_.isEmpty()) { + if (sessionBuilder_.isEmpty()) { + sessionBuilder_.dispose(); + sessionBuilder_ = null; + session_ = other.session_; + bitField0_ = (bitField0_ & ~0x00000001); + sessionBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSessionFieldBuilder() + : null; + } else { + sessionBuilder_.addAllMessages(other.session_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.Session m = + input.readMessage(com.google.spanner.v1.Session.parser(), extensionRegistry); + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + session_.add(m); + } else { + sessionBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List session_ = + java.util.Collections.emptyList(); + + private void ensureSessionIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + session_ = new java.util.ArrayList(session_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + sessionBuilder_; + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public java.util.List getSessionList() { + if (sessionBuilder_ == null) { + return java.util.Collections.unmodifiableList(session_); + } else { + return sessionBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public int getSessionCount() { + if (sessionBuilder_ == null) { + return session_.size(); + } else { + return sessionBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public com.google.spanner.v1.Session getSession(int index) { + if (sessionBuilder_ == null) { + return session_.get(index); + } else { + return sessionBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder setSession(int index, com.google.spanner.v1.Session value) { + if (sessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionIsMutable(); + session_.set(index, value); + onChanged(); + } else { + sessionBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder setSession(int index, com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + session_.set(index, builderForValue.build()); + onChanged(); + } else { + sessionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder addSession(com.google.spanner.v1.Session value) { + if (sessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionIsMutable(); + session_.add(value); + onChanged(); + } else { + sessionBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder addSession(int index, com.google.spanner.v1.Session value) { + if (sessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionIsMutable(); + session_.add(index, value); + onChanged(); + } else { + sessionBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder addSession(com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + session_.add(builderForValue.build()); + onChanged(); + } else { + sessionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder addSession(int index, com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + session_.add(index, builderForValue.build()); + onChanged(); + } else { + sessionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder addAllSession( + java.lang.Iterable values) { + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, session_); + onChanged(); + } else { + sessionBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder clearSession() { + if (sessionBuilder_ == null) { + session_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + sessionBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public Builder removeSession(int index) { + if (sessionBuilder_ == null) { + ensureSessionIsMutable(); + session_.remove(index); + onChanged(); + } else { + sessionBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public com.google.spanner.v1.Session.Builder getSessionBuilder(int index) { + return internalGetSessionFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder(int index) { + if (sessionBuilder_ == null) { + return session_.get(index); + } else { + return sessionBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public java.util.List + getSessionOrBuilderList() { + if (sessionBuilder_ != null) { + return sessionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(session_); + } + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public com.google.spanner.v1.Session.Builder addSessionBuilder() { + return internalGetSessionFieldBuilder() + .addBuilder(com.google.spanner.v1.Session.getDefaultInstance()); + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public com.google.spanner.v1.Session.Builder addSessionBuilder(int index) { + return internalGetSessionFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Session.getDefaultInstance()); + } + + /** + * + * + *
    +     * The freshly created sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + public java.util.List getSessionBuilderList() { + return internalGetSessionFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + internalGetSessionFieldBuilder() { + if (sessionBuilder_ == null) { + sessionBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder>( + session_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + session_ = null; + } + return sessionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BatchCreateSessionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BatchCreateSessionsResponse) + private static final com.google.spanner.v1.BatchCreateSessionsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BatchCreateSessionsResponse(); + } + + public static com.google.spanner.v1.BatchCreateSessionsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCreateSessionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BatchCreateSessionsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java new file mode 100644 index 000000000000..b849013ac7bb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface BatchCreateSessionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BatchCreateSessionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + java.util.List getSessionList(); + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + com.google.spanner.v1.Session getSession(int index); + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + int getSessionCount(); + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + java.util.List getSessionOrBuilderList(); + + /** + * + * + *
    +   * The freshly created sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session session = 1; + */ + com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java new file mode 100644 index 000000000000..9c4646e4ece0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java @@ -0,0 +1,2611 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteRequest} + */ +@com.google.protobuf.Generated +public final class BatchWriteRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BatchWriteRequest) + BatchWriteRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchWriteRequest"); + } + + // Use BatchWriteRequest.newBuilder() to construct. + private BatchWriteRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchWriteRequest() { + session_ = ""; + mutationGroups_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteRequest.class, + com.google.spanner.v1.BatchWriteRequest.Builder.class); + } + + public interface MutationGroupOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BatchWriteRequest.MutationGroup) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getMutationsList(); + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.Mutation getMutations(int index); + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getMutationsCount(); + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getMutationsOrBuilderList(); + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index); + } + + /** + * + * + *
    +   * A group of mutations to be committed together. Related mutations should be
    +   * placed in a group. For example, two mutations inserting rows with the same
    +   * primary key prefix in both parent and child tables are related.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteRequest.MutationGroup} + */ + public static final class MutationGroup extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BatchWriteRequest.MutationGroup) + MutationGroupOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MutationGroup"); + } + + // Use MutationGroup.newBuilder() to construct. + private MutationGroup(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MutationGroup() { + mutations_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteRequest.MutationGroup.class, + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder.class); + } + + public static final int MUTATIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List mutations_; + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List getMutationsList() { + return mutations_; + } + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getMutationsOrBuilderList() { + return mutations_; + } + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getMutationsCount() { + return mutations_.size(); + } + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.Mutation getMutations(int index) { + return mutations_.get(index); + } + + /** + * + * + *
    +     * Required. The mutations in this group.
    +     * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index) { + return mutations_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < mutations_.size(); i++) { + output.writeMessage(1, mutations_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < mutations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, mutations_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BatchWriteRequest.MutationGroup)) { + return super.equals(obj); + } + com.google.spanner.v1.BatchWriteRequest.MutationGroup other = + (com.google.spanner.v1.BatchWriteRequest.MutationGroup) obj; + + if (!getMutationsList().equals(other.getMutationsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getMutationsCount() > 0) { + hash = (37 * hash) + MUTATIONS_FIELD_NUMBER; + hash = (53 * hash) + getMutationsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.BatchWriteRequest.MutationGroup prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A group of mutations to be committed together. Related mutations should be
    +     * placed in a group. For example, two mutations inserting rows with the same
    +     * primary key prefix in both parent and child tables are related.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteRequest.MutationGroup} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BatchWriteRequest.MutationGroup) + com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteRequest.MutationGroup.class, + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder.class); + } + + // Construct using com.google.spanner.v1.BatchWriteRequest.MutationGroup.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (mutationsBuilder_ == null) { + mutations_ = java.util.Collections.emptyList(); + } else { + mutations_ = null; + mutationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroup getDefaultInstanceForType() { + return com.google.spanner.v1.BatchWriteRequest.MutationGroup.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroup build() { + com.google.spanner.v1.BatchWriteRequest.MutationGroup result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroup buildPartial() { + com.google.spanner.v1.BatchWriteRequest.MutationGroup result = + new com.google.spanner.v1.BatchWriteRequest.MutationGroup(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.BatchWriteRequest.MutationGroup result) { + if (mutationsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + mutations_ = java.util.Collections.unmodifiableList(mutations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.mutations_ = mutations_; + } else { + result.mutations_ = mutationsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.BatchWriteRequest.MutationGroup result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BatchWriteRequest.MutationGroup) { + return mergeFrom((com.google.spanner.v1.BatchWriteRequest.MutationGroup) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BatchWriteRequest.MutationGroup other) { + if (other == com.google.spanner.v1.BatchWriteRequest.MutationGroup.getDefaultInstance()) + return this; + if (mutationsBuilder_ == null) { + if (!other.mutations_.isEmpty()) { + if (mutations_.isEmpty()) { + mutations_ = other.mutations_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureMutationsIsMutable(); + mutations_.addAll(other.mutations_); + } + onChanged(); + } + } else { + if (!other.mutations_.isEmpty()) { + if (mutationsBuilder_.isEmpty()) { + mutationsBuilder_.dispose(); + mutationsBuilder_ = null; + mutations_ = other.mutations_; + bitField0_ = (bitField0_ & ~0x00000001); + mutationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetMutationsFieldBuilder() + : null; + } else { + mutationsBuilder_.addAllMessages(other.mutations_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.Mutation m = + input.readMessage(com.google.spanner.v1.Mutation.parser(), extensionRegistry); + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(m); + } else { + mutationsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List mutations_ = + java.util.Collections.emptyList(); + + private void ensureMutationsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + mutations_ = new java.util.ArrayList(mutations_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + mutationsBuilder_; + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List getMutationsList() { + if (mutationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(mutations_); + } else { + return mutationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getMutationsCount() { + if (mutationsBuilder_ == null) { + return mutations_.size(); + } else { + return mutationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.Mutation getMutations(int index) { + if (mutationsBuilder_ == null) { + return mutations_.get(index); + } else { + return mutationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMutations(int index, com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.set(index, value); + onChanged(); + } else { + mutationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMutations( + int index, com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.set(index, builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutations(com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.add(value); + onChanged(); + } else { + mutationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutations(int index, com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.add(index, value); + onChanged(); + } else { + mutationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutations(com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutations( + int index, com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(index, builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllMutations( + java.lang.Iterable values) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mutations_); + onChanged(); + } else { + mutationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMutations() { + if (mutationsBuilder_ == null) { + mutations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + mutationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeMutations(int index) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.remove(index); + onChanged(); + } else { + mutationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.Mutation.Builder getMutationsBuilder(int index) { + return internalGetMutationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index) { + if (mutationsBuilder_ == null) { + return mutations_.get(index); + } else { + return mutationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getMutationsOrBuilderList() { + if (mutationsBuilder_ != null) { + return mutationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mutations_); + } + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.Mutation.Builder addMutationsBuilder() { + return internalGetMutationsFieldBuilder() + .addBuilder(com.google.spanner.v1.Mutation.getDefaultInstance()); + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.Mutation.Builder addMutationsBuilder(int index) { + return internalGetMutationsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Mutation.getDefaultInstance()); + } + + /** + * + * + *
    +       * Required. The mutations in this group.
    +       * 
    + * + * + * repeated .google.spanner.v1.Mutation mutations = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List getMutationsBuilderList() { + return internalGetMutationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + internalGetMutationsFieldBuilder() { + if (mutationsBuilder_ == null) { + mutationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder>( + mutations_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + mutations_ = null; + } + return mutationsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BatchWriteRequest.MutationGroup) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BatchWriteRequest.MutationGroup) + private static final com.google.spanner.v1.BatchWriteRequest.MutationGroup DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BatchWriteRequest.MutationGroup(); + } + + public static com.google.spanner.v1.BatchWriteRequest.MutationGroup getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MutationGroup parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroup getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the batch request is to be run.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the batch request is to be run.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 3; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int MUTATION_GROUPS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List mutationGroups_; + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getMutationGroupsList() { + return mutationGroups_; + } + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getMutationGroupsOrBuilderList() { + return mutationGroups_; + } + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getMutationGroupsCount() { + return mutationGroups_.size(); + } + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroup getMutationGroups(int index) { + return mutationGroups_.get(index); + } + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder getMutationGroupsOrBuilder( + int index) { + return mutationGroups_.get(index); + } + + public static final int EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER = 5; + private boolean excludeTxnFromChangeStreams_ = false; + + /** + * + * + *
    +   * Optional. If you don't set the `exclude_txn_from_change_streams` option or
    +   * if it's set to `false`, then any change streams monitoring columns modified
    +   * by transactions will capture the updates made within that transaction.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getRequestOptions()); + } + for (int i = 0; i < mutationGroups_.size(); i++) { + output.writeMessage(4, mutationGroups_.get(i)); + } + if (excludeTxnFromChangeStreams_ != false) { + output.writeBool(5, excludeTxnFromChangeStreams_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getRequestOptions()); + } + for (int i = 0; i < mutationGroups_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, mutationGroups_.get(i)); + } + if (excludeTxnFromChangeStreams_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(5, excludeTxnFromChangeStreams_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BatchWriteRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.BatchWriteRequest other = (com.google.spanner.v1.BatchWriteRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (!getMutationGroupsList().equals(other.getMutationGroupsList())) return false; + if (getExcludeTxnFromChangeStreams() != other.getExcludeTxnFromChangeStreams()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + if (getMutationGroupsCount() > 0) { + hash = (37 * hash) + MUTATION_GROUPS_FIELD_NUMBER; + hash = (53 * hash) + getMutationGroupsList().hashCode(); + } + hash = (37 * hash) + EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getExcludeTxnFromChangeStreams()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.BatchWriteRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BatchWriteRequest) + com.google.spanner.v1.BatchWriteRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteRequest.class, + com.google.spanner.v1.BatchWriteRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.BatchWriteRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRequestOptionsFieldBuilder(); + internalGetMutationGroupsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + if (mutationGroupsBuilder_ == null) { + mutationGroups_ = java.util.Collections.emptyList(); + } else { + mutationGroups_ = null; + mutationGroupsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + excludeTxnFromChangeStreams_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest getDefaultInstanceForType() { + return com.google.spanner.v1.BatchWriteRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest build() { + com.google.spanner.v1.BatchWriteRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest buildPartial() { + com.google.spanner.v1.BatchWriteRequest result = + new com.google.spanner.v1.BatchWriteRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.BatchWriteRequest result) { + if (mutationGroupsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + mutationGroups_ = java.util.Collections.unmodifiableList(mutationGroups_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.mutationGroups_ = mutationGroups_; + } else { + result.mutationGroups_ = mutationGroupsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.BatchWriteRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.excludeTxnFromChangeStreams_ = excludeTxnFromChangeStreams_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BatchWriteRequest) { + return mergeFrom((com.google.spanner.v1.BatchWriteRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BatchWriteRequest other) { + if (other == com.google.spanner.v1.BatchWriteRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (mutationGroupsBuilder_ == null) { + if (!other.mutationGroups_.isEmpty()) { + if (mutationGroups_.isEmpty()) { + mutationGroups_ = other.mutationGroups_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureMutationGroupsIsMutable(); + mutationGroups_.addAll(other.mutationGroups_); + } + onChanged(); + } + } else { + if (!other.mutationGroups_.isEmpty()) { + if (mutationGroupsBuilder_.isEmpty()) { + mutationGroupsBuilder_.dispose(); + mutationGroupsBuilder_ = null; + mutationGroups_ = other.mutationGroups_; + bitField0_ = (bitField0_ & ~0x00000004); + mutationGroupsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetMutationGroupsFieldBuilder() + : null; + } else { + mutationGroupsBuilder_.addAllMessages(other.mutationGroups_); + } + } + } + if (other.getExcludeTxnFromChangeStreams() != false) { + setExcludeTxnFromChangeStreams(other.getExcludeTxnFromChangeStreams()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 26 + case 34: + { + com.google.spanner.v1.BatchWriteRequest.MutationGroup m = + input.readMessage( + com.google.spanner.v1.BatchWriteRequest.MutationGroup.parser(), + extensionRegistry); + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + mutationGroups_.add(m); + } else { + mutationGroupsBuilder_.addMessage(m); + } + break; + } // case 34 + case 40: + { + excludeTxnFromChangeStreams_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 40 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the batch request is to be run.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the batch request is to be run.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the batch request is to be run.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the batch request is to be run.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the batch request is to be run.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000002); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private java.util.List mutationGroups_ = + java.util.Collections.emptyList(); + + private void ensureMutationGroupsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + mutationGroups_ = + new java.util.ArrayList( + mutationGroups_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.BatchWriteRequest.MutationGroup, + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder, + com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder> + mutationGroupsBuilder_; + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getMutationGroupsList() { + if (mutationGroupsBuilder_ == null) { + return java.util.Collections.unmodifiableList(mutationGroups_); + } else { + return mutationGroupsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getMutationGroupsCount() { + if (mutationGroupsBuilder_ == null) { + return mutationGroups_.size(); + } else { + return mutationGroupsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.BatchWriteRequest.MutationGroup getMutationGroups(int index) { + if (mutationGroupsBuilder_ == null) { + return mutationGroups_.get(index); + } else { + return mutationGroupsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMutationGroups( + int index, com.google.spanner.v1.BatchWriteRequest.MutationGroup value) { + if (mutationGroupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationGroupsIsMutable(); + mutationGroups_.set(index, value); + onChanged(); + } else { + mutationGroupsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setMutationGroups( + int index, com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder builderForValue) { + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + mutationGroups_.set(index, builderForValue.build()); + onChanged(); + } else { + mutationGroupsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutationGroups(com.google.spanner.v1.BatchWriteRequest.MutationGroup value) { + if (mutationGroupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationGroupsIsMutable(); + mutationGroups_.add(value); + onChanged(); + } else { + mutationGroupsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutationGroups( + int index, com.google.spanner.v1.BatchWriteRequest.MutationGroup value) { + if (mutationGroupsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationGroupsIsMutable(); + mutationGroups_.add(index, value); + onChanged(); + } else { + mutationGroupsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutationGroups( + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder builderForValue) { + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + mutationGroups_.add(builderForValue.build()); + onChanged(); + } else { + mutationGroupsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addMutationGroups( + int index, com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder builderForValue) { + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + mutationGroups_.add(index, builderForValue.build()); + onChanged(); + } else { + mutationGroupsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllMutationGroups( + java.lang.Iterable + values) { + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mutationGroups_); + onChanged(); + } else { + mutationGroupsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearMutationGroups() { + if (mutationGroupsBuilder_ == null) { + mutationGroups_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + mutationGroupsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeMutationGroups(int index) { + if (mutationGroupsBuilder_ == null) { + ensureMutationGroupsIsMutable(); + mutationGroups_.remove(index); + onChanged(); + } else { + mutationGroupsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder getMutationGroupsBuilder( + int index) { + return internalGetMutationGroupsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder + getMutationGroupsOrBuilder(int index) { + if (mutationGroupsBuilder_ == null) { + return mutationGroups_.get(index); + } else { + return mutationGroupsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getMutationGroupsOrBuilderList() { + if (mutationGroupsBuilder_ != null) { + return mutationGroupsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mutationGroups_); + } + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder + addMutationGroupsBuilder() { + return internalGetMutationGroupsFieldBuilder() + .addBuilder(com.google.spanner.v1.BatchWriteRequest.MutationGroup.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder addMutationGroupsBuilder( + int index) { + return internalGetMutationGroupsFieldBuilder() + .addBuilder( + index, com.google.spanner.v1.BatchWriteRequest.MutationGroup.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The groups of mutations to be applied.
    +     * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getMutationGroupsBuilderList() { + return internalGetMutationGroupsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.BatchWriteRequest.MutationGroup, + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder, + com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder> + internalGetMutationGroupsFieldBuilder() { + if (mutationGroupsBuilder_ == null) { + mutationGroupsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.BatchWriteRequest.MutationGroup, + com.google.spanner.v1.BatchWriteRequest.MutationGroup.Builder, + com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder>( + mutationGroups_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + mutationGroups_ = null; + } + return mutationGroupsBuilder_; + } + + private boolean excludeTxnFromChangeStreams_; + + /** + * + * + *
    +     * Optional. If you don't set the `exclude_txn_from_change_streams` option or
    +     * if it's set to `false`, then any change streams monitoring columns modified
    +     * by transactions will capture the updates made within that transaction.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + /** + * + * + *
    +     * Optional. If you don't set the `exclude_txn_from_change_streams` option or
    +     * if it's set to `false`, then any change streams monitoring columns modified
    +     * by transactions will capture the updates made within that transaction.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The excludeTxnFromChangeStreams to set. + * @return This builder for chaining. + */ + public Builder setExcludeTxnFromChangeStreams(boolean value) { + + excludeTxnFromChangeStreams_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If you don't set the `exclude_txn_from_change_streams` option or
    +     * if it's set to `false`, then any change streams monitoring columns modified
    +     * by transactions will capture the updates made within that transaction.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearExcludeTxnFromChangeStreams() { + bitField0_ = (bitField0_ & ~0x00000008); + excludeTxnFromChangeStreams_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BatchWriteRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BatchWriteRequest) + private static final com.google.spanner.v1.BatchWriteRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BatchWriteRequest(); + } + + public static com.google.spanner.v1.BatchWriteRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchWriteRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java new file mode 100644 index 000000000000..3144f27ea9be --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java @@ -0,0 +1,178 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface BatchWriteRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BatchWriteRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the batch request is to be run.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the batch request is to be run.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getMutationGroupsList(); + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.BatchWriteRequest.MutationGroup getMutationGroups(int index); + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getMutationGroupsCount(); + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getMutationGroupsOrBuilderList(); + + /** + * + * + *
    +   * Required. The groups of mutations to be applied.
    +   * 
    + * + * + * repeated .google.spanner.v1.BatchWriteRequest.MutationGroup mutation_groups = 4 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.BatchWriteRequest.MutationGroupOrBuilder getMutationGroupsOrBuilder( + int index); + + /** + * + * + *
    +   * Optional. If you don't set the `exclude_txn_from_change_streams` option or
    +   * if it's set to `false`, then any change streams monitoring columns modified
    +   * by transactions will capture the updates made within that transaction.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The excludeTxnFromChangeStreams. + */ + boolean getExcludeTxnFromChangeStreams(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java new file mode 100644 index 000000000000..35279f82854a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java @@ -0,0 +1,1298 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The result of applying a batch of mutations.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteResponse} + */ +@com.google.protobuf.Generated +public final class BatchWriteResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BatchWriteResponse) + BatchWriteResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BatchWriteResponse"); + } + + // Use BatchWriteResponse.newBuilder() to construct. + private BatchWriteResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BatchWriteResponse() { + indexes_ = emptyIntList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteResponse.class, + com.google.spanner.v1.BatchWriteResponse.Builder.class); + } + + private int bitField0_; + public static final int INDEXES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.IntList indexes_ = emptyIntList(); + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @return A list containing the indexes. + */ + @java.lang.Override + public java.util.List getIndexesList() { + return indexes_; + } + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @return The count of indexes. + */ + public int getIndexesCount() { + return indexes_.size(); + } + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @param index The index of the element to return. + * @return The indexes at the given index. + */ + public int getIndexes(int index) { + return indexes_.getInt(index); + } + + private int indexesMemoizedSerializedSize = -1; + + public static final int STATUS_FIELD_NUMBER = 2; + private com.google.rpc.Status status_; + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp commitTimestamp_; + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return Whether the commitTimestamp field is set. + */ + @java.lang.Override + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return The commitTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTimestamp() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + if (getIndexesList().size() > 0) { + output.writeUInt32NoTag(10); + output.writeUInt32NoTag(indexesMemoizedSerializedSize); + } + for (int i = 0; i < indexes_.size(); i++) { + output.writeInt32NoTag(indexes_.getInt(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getCommitTimestamp()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < indexes_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(indexes_.getInt(i)); + } + size += dataSize; + if (!getIndexesList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + indexesMemoizedSerializedSize = dataSize; + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCommitTimestamp()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BatchWriteResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.BatchWriteResponse other = (com.google.spanner.v1.BatchWriteResponse) obj; + + if (!getIndexesList().equals(other.getIndexesList())) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (hasCommitTimestamp() != other.hasCommitTimestamp()) return false; + if (hasCommitTimestamp()) { + if (!getCommitTimestamp().equals(other.getCommitTimestamp())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getIndexesCount() > 0) { + hash = (37 * hash) + INDEXES_FIELD_NUMBER; + hash = (53 * hash) + getIndexesList().hashCode(); + } + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (hasCommitTimestamp()) { + hash = (37 * hash) + COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestamp().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BatchWriteResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.BatchWriteResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The result of applying a batch of mutations.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BatchWriteResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BatchWriteResponse) + com.google.spanner.v1.BatchWriteResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BatchWriteResponse.class, + com.google.spanner.v1.BatchWriteResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.BatchWriteResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStatusFieldBuilder(); + internalGetCommitTimestampFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + indexes_ = emptyIntList(); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BatchWriteResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteResponse getDefaultInstanceForType() { + return com.google.spanner.v1.BatchWriteResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteResponse build() { + com.google.spanner.v1.BatchWriteResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteResponse buildPartial() { + com.google.spanner.v1.BatchWriteResponse result = + new com.google.spanner.v1.BatchWriteResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.BatchWriteResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + indexes_.makeImmutable(); + result.indexes_ = indexes_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.commitTimestamp_ = + commitTimestampBuilder_ == null ? commitTimestamp_ : commitTimestampBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BatchWriteResponse) { + return mergeFrom((com.google.spanner.v1.BatchWriteResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BatchWriteResponse other) { + if (other == com.google.spanner.v1.BatchWriteResponse.getDefaultInstance()) return this; + if (!other.indexes_.isEmpty()) { + if (indexes_.isEmpty()) { + indexes_ = other.indexes_; + indexes_.makeImmutable(); + bitField0_ |= 0x00000001; + } else { + ensureIndexesIsMutable(); + indexes_.addAll(other.indexes_); + } + onChanged(); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (other.hasCommitTimestamp()) { + mergeCommitTimestamp(other.getCommitTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + int v = input.readInt32(); + ensureIndexesIsMutable(); + indexes_.addInt(v); + break; + } // case 8 + case 10: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + ensureIndexesIsMutable(); + while (input.getBytesUntilLimit() > 0) { + indexes_.addInt(input.readInt32()); + } + input.popLimit(limit); + break; + } // case 10 + case 18: + { + input.readMessage(internalGetStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCommitTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Internal.IntList indexes_ = emptyIntList(); + + private void ensureIndexesIsMutable() { + if (!indexes_.isModifiable()) { + indexes_ = makeMutableCopy(indexes_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @return A list containing the indexes. + */ + public java.util.List getIndexesList() { + indexes_.makeImmutable(); + return indexes_; + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @return The count of indexes. + */ + public int getIndexesCount() { + return indexes_.size(); + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @param index The index of the element to return. + * @return The indexes at the given index. + */ + public int getIndexes(int index) { + return indexes_.getInt(index); + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @param index The index to set the value at. + * @param value The indexes to set. + * @return This builder for chaining. + */ + public Builder setIndexes(int index, int value) { + + ensureIndexesIsMutable(); + indexes_.setInt(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @param value The indexes to add. + * @return This builder for chaining. + */ + public Builder addIndexes(int value) { + + ensureIndexesIsMutable(); + indexes_.addInt(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @param values The indexes to add. + * @return This builder for chaining. + */ + public Builder addAllIndexes(java.lang.Iterable values) { + ensureIndexesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, indexes_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The mutation groups applied in this batch. The values index into the
    +     * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +     * 
    + * + * repeated int32 indexes = 1; + * + * @return This builder for chaining. + */ + public Builder clearIndexes() { + indexes_ = emptyIntList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000002); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
    +     * An `OK` status indicates success. Any other status indicates a failure.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + private com.google.protobuf.Timestamp commitTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampBuilder_; + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return Whether the commitTimestamp field is set. + */ + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return The commitTimestamp. + */ + public com.google.protobuf.Timestamp getCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } else { + return commitTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTimestamp_ = value; + } else { + commitTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = builderForValue.build(); + } else { + commitTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public Builder mergeCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && commitTimestamp_ != null + && commitTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimestampBuilder().mergeFrom(value); + } else { + commitTimestamp_ = value; + } + } else { + commitTimestampBuilder_.mergeFrom(value); + } + if (commitTimestamp_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public Builder clearCommitTimestamp() { + bitField0_ = (bitField0_ & ~0x00000004); + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCommitTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + if (commitTimestampBuilder_ != null) { + return commitTimestampBuilder_.getMessageOrBuilder(); + } else { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + } + + /** + * + * + *
    +     * The commit timestamp of the transaction that applied this batch.
    +     * Present if status is OK and the mutation groups were applied, absent
    +     * otherwise.
    +     *
    +     * For mutation groups with conditions, a status=OK and missing
    +     * commit_timestamp means that the mutation groups were not applied due to the
    +     * condition not being satisfied after evaluation.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimestampFieldBuilder() { + if (commitTimestampBuilder_ == null) { + commitTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTimestamp(), getParentForChildren(), isClean()); + commitTimestamp_ = null; + } + return commitTimestampBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BatchWriteResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BatchWriteResponse) + private static final com.google.spanner.v1.BatchWriteResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BatchWriteResponse(); + } + + public static com.google.spanner.v1.BatchWriteResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchWriteResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BatchWriteResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java new file mode 100644 index 000000000000..05be85b753a2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java @@ -0,0 +1,163 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface BatchWriteResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BatchWriteResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @return A list containing the indexes. + */ + java.util.List getIndexesList(); + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @return The count of indexes. + */ + int getIndexesCount(); + + /** + * + * + *
    +   * The mutation groups applied in this batch. The values index into the
    +   * `mutation_groups` field in the corresponding `BatchWriteRequest`.
    +   * 
    + * + * repeated int32 indexes = 1; + * + * @param index The index of the element to return. + * @return The indexes at the given index. + */ + int getIndexes(int index); + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
    +   * An `OK` status indicates success. Any other status indicates a failure.
    +   * 
    + * + * .google.rpc.Status status = 2; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return Whether the commitTimestamp field is set. + */ + boolean hasCommitTimestamp(); + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + * + * @return The commitTimestamp. + */ + com.google.protobuf.Timestamp getCommitTimestamp(); + + /** + * + * + *
    +   * The commit timestamp of the transaction that applied this batch.
    +   * Present if status is OK and the mutation groups were applied, absent
    +   * otherwise.
    +   *
    +   * For mutation groups with conditions, a status=OK and missing
    +   * commit_timestamp means that the mutation groups were not applied due to the
    +   * condition not being satisfied after evaluation.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 3; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java new file mode 100644 index 000000000000..aa66a1eebb55 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java @@ -0,0 +1,1952 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for
    + * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.BeginTransactionRequest} + */ +@com.google.protobuf.Generated +public final class BeginTransactionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.BeginTransactionRequest) + BeginTransactionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "BeginTransactionRequest"); + } + + // Use BeginTransactionRequest.newBuilder() to construct. + private BeginTransactionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private BeginTransactionRequest() { + session_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BeginTransactionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BeginTransactionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BeginTransactionRequest.class, + com.google.spanner.v1.BeginTransactionRequest.Builder.class); + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the transaction runs.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the transaction runs.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPTIONS_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionOptions options_; + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the options field is set. + */ + @java.lang.Override + public boolean hasOptions() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The options. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getOptions() { + return options_ == null + ? com.google.spanner.v1.TransactionOptions.getDefaultInstance() + : options_; + } + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getOptionsOrBuilder() { + return options_ == null + ? com.google.spanner.v1.TransactionOptions.getDefaultInstance() + : options_; + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 3; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int MUTATION_KEY_FIELD_NUMBER = 4; + private com.google.spanner.v1.Mutation mutationKey_; + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + @java.lang.Override + public boolean hasMutationKey() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation getMutationKey() { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder() { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + + public static final int ROUTING_HINT_FIELD_NUMBER = 5; + private com.google.spanner.v1.RoutingHint routingHint_; + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + @java.lang.Override + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint getRoutingHint() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getOptions()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getRequestOptions()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getMutationKey()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(5, getRoutingHint()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOptions()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getRequestOptions()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getMutationKey()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getRoutingHint()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.BeginTransactionRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.BeginTransactionRequest other = + (com.google.spanner.v1.BeginTransactionRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasOptions() != other.hasOptions()) return false; + if (hasOptions()) { + if (!getOptions().equals(other.getOptions())) return false; + } + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (hasMutationKey() != other.hasMutationKey()) return false; + if (hasMutationKey()) { + if (!getMutationKey().equals(other.getMutationKey())) return false; + } + if (hasRoutingHint() != other.hasRoutingHint()) return false; + if (hasRoutingHint()) { + if (!getRoutingHint().equals(other.getRoutingHint())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasOptions()) { + hash = (37 * hash) + OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getOptions().hashCode(); + } + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + if (hasMutationKey()) { + hash = (37 * hash) + MUTATION_KEY_FIELD_NUMBER; + hash = (53 * hash) + getMutationKey().hashCode(); + } + if (hasRoutingHint()) { + hash = (37 * hash) + ROUTING_HINT_FIELD_NUMBER; + hash = (53 * hash) + getRoutingHint().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.BeginTransactionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.BeginTransactionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.BeginTransactionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.BeginTransactionRequest) + com.google.spanner.v1.BeginTransactionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BeginTransactionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BeginTransactionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.BeginTransactionRequest.class, + com.google.spanner.v1.BeginTransactionRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.BeginTransactionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetOptionsFieldBuilder(); + internalGetRequestOptionsFieldBuilder(); + internalGetMutationKeyFieldBuilder(); + internalGetRoutingHintFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + options_ = null; + if (optionsBuilder_ != null) { + optionsBuilder_.dispose(); + optionsBuilder_ = null; + } + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + mutationKey_ = null; + if (mutationKeyBuilder_ != null) { + mutationKeyBuilder_.dispose(); + mutationKeyBuilder_ = null; + } + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_BeginTransactionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.BeginTransactionRequest getDefaultInstanceForType() { + return com.google.spanner.v1.BeginTransactionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.BeginTransactionRequest build() { + com.google.spanner.v1.BeginTransactionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.BeginTransactionRequest buildPartial() { + com.google.spanner.v1.BeginTransactionRequest result = + new com.google.spanner.v1.BeginTransactionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.BeginTransactionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.options_ = optionsBuilder_ == null ? options_ : optionsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.mutationKey_ = + mutationKeyBuilder_ == null ? mutationKey_ : mutationKeyBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.routingHint_ = + routingHintBuilder_ == null ? routingHint_ : routingHintBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.BeginTransactionRequest) { + return mergeFrom((com.google.spanner.v1.BeginTransactionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.BeginTransactionRequest other) { + if (other == com.google.spanner.v1.BeginTransactionRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasOptions()) { + mergeOptions(other.getOptions()); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (other.hasMutationKey()) { + mergeMutationKey(other.getMutationKey()); + } + if (other.hasRoutingHint()) { + mergeRoutingHint(other.getRoutingHint()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetMutationKeyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetRoutingHintFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the transaction runs.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction runs.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction runs.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction runs.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction runs.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionOptions options_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + optionsBuilder_; + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the options field is set. + */ + public boolean hasOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The options. + */ + public com.google.spanner.v1.TransactionOptions getOptions() { + if (optionsBuilder_ == null) { + return options_ == null + ? com.google.spanner.v1.TransactionOptions.getDefaultInstance() + : options_; + } else { + return optionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setOptions(com.google.spanner.v1.TransactionOptions value) { + if (optionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + options_ = value; + } else { + optionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setOptions(com.google.spanner.v1.TransactionOptions.Builder builderForValue) { + if (optionsBuilder_ == null) { + options_ = builderForValue.build(); + } else { + optionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeOptions(com.google.spanner.v1.TransactionOptions value) { + if (optionsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && options_ != null + && options_ != com.google.spanner.v1.TransactionOptions.getDefaultInstance()) { + getOptionsBuilder().mergeFrom(value); + } else { + options_ = value; + } + } else { + optionsBuilder_.mergeFrom(value); + } + if (options_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearOptions() { + bitField0_ = (bitField0_ & ~0x00000002); + options_ = null; + if (optionsBuilder_ != null) { + optionsBuilder_.dispose(); + optionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.TransactionOptions.Builder getOptionsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.TransactionOptionsOrBuilder getOptionsOrBuilder() { + if (optionsBuilder_ != null) { + return optionsBuilder_.getMessageOrBuilder(); + } else { + return options_ == null + ? com.google.spanner.v1.TransactionOptions.getDefaultInstance() + : options_; + } + } + + /** + * + * + *
    +     * Required. Options for the new transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + internalGetOptionsFieldBuilder() { + if (optionsBuilder_ == null) { + optionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder>( + getOptions(), getParentForChildren(), isClean()); + options_ = null; + } + return optionsBuilder_; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000004); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * Priority is ignored for this request. Setting the priority in this
    +     * `request_options` struct doesn't do anything. To set the priority for a
    +     * transaction, set it on the reads and writes that are part of this
    +     * transaction instead.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private com.google.spanner.v1.Mutation mutationKey_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + mutationKeyBuilder_; + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + public boolean hasMutationKey() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + public com.google.spanner.v1.Mutation getMutationKey() { + if (mutationKeyBuilder_ == null) { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } else { + return mutationKeyBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMutationKey(com.google.spanner.v1.Mutation value) { + if (mutationKeyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mutationKey_ = value; + } else { + mutationKeyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMutationKey(com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationKeyBuilder_ == null) { + mutationKey_ = builderForValue.build(); + } else { + mutationKeyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeMutationKey(com.google.spanner.v1.Mutation value) { + if (mutationKeyBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && mutationKey_ != null + && mutationKey_ != com.google.spanner.v1.Mutation.getDefaultInstance()) { + getMutationKeyBuilder().mergeFrom(value); + } else { + mutationKey_ = value; + } + } else { + mutationKeyBuilder_.mergeFrom(value); + } + if (mutationKey_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMutationKey() { + bitField0_ = (bitField0_ & ~0x00000008); + mutationKey_ = null; + if (mutationKeyBuilder_ != null) { + mutationKeyBuilder_.dispose(); + mutationKeyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.Mutation.Builder getMutationKeyBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetMutationKeyFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder() { + if (mutationKeyBuilder_ != null) { + return mutationKeyBuilder_.getMessageOrBuilder(); + } else { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + } + + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but don't perform any reads or queries. You must
    +     * randomly select one of the mutations from the mutation set and send it as a
    +     * part of this request.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + internalGetMutationKeyFieldBuilder() { + if (mutationKeyBuilder_ == null) { + mutationKeyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder>( + getMutationKey(), getParentForChildren(), isClean()); + mutationKey_ = null; + } + return mutationKeyBuilder_; + } + + private com.google.spanner.v1.RoutingHint routingHint_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + routingHintBuilder_; + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + public com.google.spanner.v1.RoutingHint getRoutingHint() { + if (routingHintBuilder_ == null) { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } else { + return routingHintBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + routingHint_ = value; + } else { + routingHintBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint.Builder builderForValue) { + if (routingHintBuilder_ == null) { + routingHint_ = builderForValue.build(); + } else { + routingHintBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && routingHint_ != null + && routingHint_ != com.google.spanner.v1.RoutingHint.getDefaultInstance()) { + getRoutingHintBuilder().mergeFrom(value); + } else { + routingHint_ = value; + } + } else { + routingHintBuilder_.mergeFrom(value); + } + if (routingHint_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRoutingHint() { + bitField0_ = (bitField0_ & ~0x00000010); + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHint.Builder getRoutingHintBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetRoutingHintFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + if (routingHintBuilder_ != null) { + return routingHintBuilder_.getMessageOrBuilder(); + } else { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + internalGetRoutingHintFieldBuilder() { + if (routingHintBuilder_ == null) { + routingHintBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder>( + getRoutingHint(), getParentForChildren(), isClean()); + routingHint_ = null; + } + return routingHintBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.BeginTransactionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.BeginTransactionRequest) + private static final com.google.spanner.v1.BeginTransactionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.BeginTransactionRequest(); + } + + public static com.google.spanner.v1.BeginTransactionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BeginTransactionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.BeginTransactionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java new file mode 100644 index 000000000000..30c8901404ed --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java @@ -0,0 +1,257 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface BeginTransactionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.BeginTransactionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the transaction runs.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the transaction runs.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the options field is set. + */ + boolean hasOptions(); + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The options. + */ + com.google.spanner.v1.TransactionOptions getOptions(); + + /** + * + * + *
    +   * Required. Options for the new transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionOptions options = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.TransactionOptionsOrBuilder getOptionsOrBuilder(); + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * Priority is ignored for this request. Setting the priority in this
    +   * `request_options` struct doesn't do anything. To set the priority for a
    +   * transaction, set it on the reads and writes that are part of this
    +   * transaction instead.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 3; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + boolean hasMutationKey(); + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + com.google.spanner.v1.Mutation getMutationKey(); + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but don't perform any reads or queries. You must
    +   * randomly select one of the mutations from the mutation set and send it as a
    +   * part of this request.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + boolean hasRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + com.google.spanner.v1.RoutingHint getRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdate.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdate.java new file mode 100644 index 000000000000..c807c6168e70 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdate.java @@ -0,0 +1,1820 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `CacheUpdate` expresses a set of changes the client should incorporate into
    + * its location cache. These changes may or may not be newer than what the
    + * client has in its cache, and should be discarded if necessary. `CacheUpdate`s
    + * can be obtained in response to requests that included a `RoutingHint`
    + * field, but may also be obtained by explicit location-fetching RPCs which may
    + * be added in the future.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.CacheUpdate} + */ +@com.google.protobuf.Generated +public final class CacheUpdate extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.CacheUpdate) + CacheUpdateOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CacheUpdate"); + } + + // Use CacheUpdate.newBuilder() to construct. + private CacheUpdate(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CacheUpdate() { + range_ = java.util.Collections.emptyList(); + group_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_CacheUpdate_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_CacheUpdate_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CacheUpdate.class, + com.google.spanner.v1.CacheUpdate.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_ID_FIELD_NUMBER = 1; + private long databaseId_ = 0L; + + /** + * + * + *
    +   * An internal ID for the database. Database names can be reused if a database
    +   * is deleted and re-created. Each time the database is re-created, it will
    +   * get a new database ID, which will never be re-used for any other database.
    +   * 
    + * + * uint64 database_id = 1; + * + * @return The databaseId. + */ + @java.lang.Override + public long getDatabaseId() { + return databaseId_; + } + + public static final int RANGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List range_; + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + @java.lang.Override + public java.util.List getRangeList() { + return range_; + } + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + @java.lang.Override + public java.util.List getRangeOrBuilderList() { + return range_; + } + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + @java.lang.Override + public int getRangeCount() { + return range_.size(); + } + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Range getRange(int index) { + return range_.get(index); + } + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + @java.lang.Override + public com.google.spanner.v1.RangeOrBuilder getRangeOrBuilder(int index) { + return range_.get(index); + } + + public static final int GROUP_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List group_; + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + @java.lang.Override + public java.util.List getGroupList() { + return group_; + } + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + @java.lang.Override + public java.util.List getGroupOrBuilderList() { + return group_; + } + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + @java.lang.Override + public int getGroupCount() { + return group_.size(); + } + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + @java.lang.Override + public com.google.spanner.v1.Group getGroup(int index) { + return group_.get(index); + } + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + @java.lang.Override + public com.google.spanner.v1.GroupOrBuilder getGroupOrBuilder(int index) { + return group_.get(index); + } + + public static final int KEY_RECIPES_FIELD_NUMBER = 5; + private com.google.spanner.v1.RecipeList keyRecipes_; + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return Whether the keyRecipes field is set. + */ + @java.lang.Override + public boolean hasKeyRecipes() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return The keyRecipes. + */ + @java.lang.Override + public com.google.spanner.v1.RecipeList getKeyRecipes() { + return keyRecipes_ == null + ? com.google.spanner.v1.RecipeList.getDefaultInstance() + : keyRecipes_; + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + @java.lang.Override + public com.google.spanner.v1.RecipeListOrBuilder getKeyRecipesOrBuilder() { + return keyRecipes_ == null + ? com.google.spanner.v1.RecipeList.getDefaultInstance() + : keyRecipes_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (databaseId_ != 0L) { + output.writeUInt64(1, databaseId_); + } + for (int i = 0; i < range_.size(); i++) { + output.writeMessage(2, range_.get(i)); + } + for (int i = 0; i < group_.size(); i++) { + output.writeMessage(3, group_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getKeyRecipes()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (databaseId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, databaseId_); + } + for (int i = 0; i < range_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, range_.get(i)); + } + for (int i = 0; i < group_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, group_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getKeyRecipes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.CacheUpdate)) { + return super.equals(obj); + } + com.google.spanner.v1.CacheUpdate other = (com.google.spanner.v1.CacheUpdate) obj; + + if (getDatabaseId() != other.getDatabaseId()) return false; + if (!getRangeList().equals(other.getRangeList())) return false; + if (!getGroupList().equals(other.getGroupList())) return false; + if (hasKeyRecipes() != other.hasKeyRecipes()) return false; + if (hasKeyRecipes()) { + if (!getKeyRecipes().equals(other.getKeyRecipes())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getDatabaseId()); + if (getRangeCount() > 0) { + hash = (37 * hash) + RANGE_FIELD_NUMBER; + hash = (53 * hash) + getRangeList().hashCode(); + } + if (getGroupCount() > 0) { + hash = (37 * hash) + GROUP_FIELD_NUMBER; + hash = (53 * hash) + getGroupList().hashCode(); + } + if (hasKeyRecipes()) { + hash = (37 * hash) + KEY_RECIPES_FIELD_NUMBER; + hash = (53 * hash) + getKeyRecipes().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.CacheUpdate parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CacheUpdate parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CacheUpdate parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CacheUpdate parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.CacheUpdate prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `CacheUpdate` expresses a set of changes the client should incorporate into
    +   * its location cache. These changes may or may not be newer than what the
    +   * client has in its cache, and should be discarded if necessary. `CacheUpdate`s
    +   * can be obtained in response to requests that included a `RoutingHint`
    +   * field, but may also be obtained by explicit location-fetching RPCs which may
    +   * be added in the future.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.CacheUpdate} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.CacheUpdate) + com.google.spanner.v1.CacheUpdateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_CacheUpdate_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_CacheUpdate_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CacheUpdate.class, + com.google.spanner.v1.CacheUpdate.Builder.class); + } + + // Construct using com.google.spanner.v1.CacheUpdate.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRangeFieldBuilder(); + internalGetGroupFieldBuilder(); + internalGetKeyRecipesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + databaseId_ = 0L; + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + } else { + range_ = null; + rangeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (groupBuilder_ == null) { + group_ = java.util.Collections.emptyList(); + } else { + group_ = null; + groupBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + keyRecipes_ = null; + if (keyRecipesBuilder_ != null) { + keyRecipesBuilder_.dispose(); + keyRecipesBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_CacheUpdate_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getDefaultInstanceForType() { + return com.google.spanner.v1.CacheUpdate.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.CacheUpdate build() { + com.google.spanner.v1.CacheUpdate result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.CacheUpdate buildPartial() { + com.google.spanner.v1.CacheUpdate result = new com.google.spanner.v1.CacheUpdate(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.CacheUpdate result) { + if (rangeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + range_ = java.util.Collections.unmodifiableList(range_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.range_ = range_; + } else { + result.range_ = rangeBuilder_.build(); + } + if (groupBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + group_ = java.util.Collections.unmodifiableList(group_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.group_ = group_; + } else { + result.group_ = groupBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.CacheUpdate result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.databaseId_ = databaseId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.keyRecipes_ = keyRecipesBuilder_ == null ? keyRecipes_ : keyRecipesBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.CacheUpdate) { + return mergeFrom((com.google.spanner.v1.CacheUpdate) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.CacheUpdate other) { + if (other == com.google.spanner.v1.CacheUpdate.getDefaultInstance()) return this; + if (other.getDatabaseId() != 0L) { + setDatabaseId(other.getDatabaseId()); + } + if (rangeBuilder_ == null) { + if (!other.range_.isEmpty()) { + if (range_.isEmpty()) { + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRangeIsMutable(); + range_.addAll(other.range_); + } + onChanged(); + } + } else { + if (!other.range_.isEmpty()) { + if (rangeBuilder_.isEmpty()) { + rangeBuilder_.dispose(); + rangeBuilder_ = null; + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000002); + rangeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRangeFieldBuilder() + : null; + } else { + rangeBuilder_.addAllMessages(other.range_); + } + } + } + if (groupBuilder_ == null) { + if (!other.group_.isEmpty()) { + if (group_.isEmpty()) { + group_ = other.group_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureGroupIsMutable(); + group_.addAll(other.group_); + } + onChanged(); + } + } else { + if (!other.group_.isEmpty()) { + if (groupBuilder_.isEmpty()) { + groupBuilder_.dispose(); + groupBuilder_ = null; + group_ = other.group_; + bitField0_ = (bitField0_ & ~0x00000004); + groupBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetGroupFieldBuilder() + : null; + } else { + groupBuilder_.addAllMessages(other.group_); + } + } + } + if (other.hasKeyRecipes()) { + mergeKeyRecipes(other.getKeyRecipes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + databaseId_ = input.readUInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + com.google.spanner.v1.Range m = + input.readMessage(com.google.spanner.v1.Range.parser(), extensionRegistry); + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(m); + } else { + rangeBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.spanner.v1.Group m = + input.readMessage(com.google.spanner.v1.Group.parser(), extensionRegistry); + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + group_.add(m); + } else { + groupBuilder_.addMessage(m); + } + break; + } // case 26 + case 42: + { + input.readMessage( + internalGetKeyRecipesFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long databaseId_; + + /** + * + * + *
    +     * An internal ID for the database. Database names can be reused if a database
    +     * is deleted and re-created. Each time the database is re-created, it will
    +     * get a new database ID, which will never be re-used for any other database.
    +     * 
    + * + * uint64 database_id = 1; + * + * @return The databaseId. + */ + @java.lang.Override + public long getDatabaseId() { + return databaseId_; + } + + /** + * + * + *
    +     * An internal ID for the database. Database names can be reused if a database
    +     * is deleted and re-created. Each time the database is re-created, it will
    +     * get a new database ID, which will never be re-used for any other database.
    +     * 
    + * + * uint64 database_id = 1; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(long value) { + + databaseId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An internal ID for the database. Database names can be reused if a database
    +     * is deleted and re-created. Each time the database is re-created, it will
    +     * get a new database ID, which will never be re-used for any other database.
    +     * 
    + * + * uint64 database_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + bitField0_ = (bitField0_ & ~0x00000001); + databaseId_ = 0L; + onChanged(); + return this; + } + + private java.util.List range_ = java.util.Collections.emptyList(); + + private void ensureRangeIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + range_ = new java.util.ArrayList(range_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Range, + com.google.spanner.v1.Range.Builder, + com.google.spanner.v1.RangeOrBuilder> + rangeBuilder_; + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public java.util.List getRangeList() { + if (rangeBuilder_ == null) { + return java.util.Collections.unmodifiableList(range_); + } else { + return rangeBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public int getRangeCount() { + if (rangeBuilder_ == null) { + return range_.size(); + } else { + return rangeBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public com.google.spanner.v1.Range getRange(int index) { + if (rangeBuilder_ == null) { + return range_.get(index); + } else { + return rangeBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder setRange(int index, com.google.spanner.v1.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.set(index, value); + onChanged(); + } else { + rangeBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder setRange(int index, com.google.spanner.v1.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.set(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder addRange(com.google.spanner.v1.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(value); + onChanged(); + } else { + rangeBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder addRange(int index, com.google.spanner.v1.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(index, value); + onChanged(); + } else { + rangeBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder addRange(com.google.spanner.v1.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder addRange(int index, com.google.spanner.v1.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder addAllRange(java.lang.Iterable values) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, range_); + onChanged(); + } else { + rangeBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder clearRange() { + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rangeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public Builder removeRange(int index) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.remove(index); + onChanged(); + } else { + rangeBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public com.google.spanner.v1.Range.Builder getRangeBuilder(int index) { + return internalGetRangeFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public com.google.spanner.v1.RangeOrBuilder getRangeOrBuilder(int index) { + if (rangeBuilder_ == null) { + return range_.get(index); + } else { + return rangeBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public java.util.List getRangeOrBuilderList() { + if (rangeBuilder_ != null) { + return rangeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(range_); + } + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public com.google.spanner.v1.Range.Builder addRangeBuilder() { + return internalGetRangeFieldBuilder() + .addBuilder(com.google.spanner.v1.Range.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public com.google.spanner.v1.Range.Builder addRangeBuilder(int index) { + return internalGetRangeFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Range.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of ranges to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + public java.util.List getRangeBuilderList() { + return internalGetRangeFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Range, + com.google.spanner.v1.Range.Builder, + com.google.spanner.v1.RangeOrBuilder> + internalGetRangeFieldBuilder() { + if (rangeBuilder_ == null) { + rangeBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Range, + com.google.spanner.v1.Range.Builder, + com.google.spanner.v1.RangeOrBuilder>( + range_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + range_ = null; + } + return rangeBuilder_; + } + + private java.util.List group_ = java.util.Collections.emptyList(); + + private void ensureGroupIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + group_ = new java.util.ArrayList(group_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Group, + com.google.spanner.v1.Group.Builder, + com.google.spanner.v1.GroupOrBuilder> + groupBuilder_; + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public java.util.List getGroupList() { + if (groupBuilder_ == null) { + return java.util.Collections.unmodifiableList(group_); + } else { + return groupBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public int getGroupCount() { + if (groupBuilder_ == null) { + return group_.size(); + } else { + return groupBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public com.google.spanner.v1.Group getGroup(int index) { + if (groupBuilder_ == null) { + return group_.get(index); + } else { + return groupBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder setGroup(int index, com.google.spanner.v1.Group value) { + if (groupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupIsMutable(); + group_.set(index, value); + onChanged(); + } else { + groupBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder setGroup(int index, com.google.spanner.v1.Group.Builder builderForValue) { + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + group_.set(index, builderForValue.build()); + onChanged(); + } else { + groupBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder addGroup(com.google.spanner.v1.Group value) { + if (groupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupIsMutable(); + group_.add(value); + onChanged(); + } else { + groupBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder addGroup(int index, com.google.spanner.v1.Group value) { + if (groupBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupIsMutable(); + group_.add(index, value); + onChanged(); + } else { + groupBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder addGroup(com.google.spanner.v1.Group.Builder builderForValue) { + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + group_.add(builderForValue.build()); + onChanged(); + } else { + groupBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder addGroup(int index, com.google.spanner.v1.Group.Builder builderForValue) { + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + group_.add(index, builderForValue.build()); + onChanged(); + } else { + groupBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder addAllGroup(java.lang.Iterable values) { + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, group_); + onChanged(); + } else { + groupBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder clearGroup() { + if (groupBuilder_ == null) { + group_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + groupBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public Builder removeGroup(int index) { + if (groupBuilder_ == null) { + ensureGroupIsMutable(); + group_.remove(index); + onChanged(); + } else { + groupBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public com.google.spanner.v1.Group.Builder getGroupBuilder(int index) { + return internalGetGroupFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public com.google.spanner.v1.GroupOrBuilder getGroupOrBuilder(int index) { + if (groupBuilder_ == null) { + return group_.get(index); + } else { + return groupBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public java.util.List getGroupOrBuilderList() { + if (groupBuilder_ != null) { + return groupBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(group_); + } + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public com.google.spanner.v1.Group.Builder addGroupBuilder() { + return internalGetGroupFieldBuilder() + .addBuilder(com.google.spanner.v1.Group.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public com.google.spanner.v1.Group.Builder addGroupBuilder(int index) { + return internalGetGroupFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Group.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of groups to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + public java.util.List getGroupBuilderList() { + return internalGetGroupFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Group, + com.google.spanner.v1.Group.Builder, + com.google.spanner.v1.GroupOrBuilder> + internalGetGroupFieldBuilder() { + if (groupBuilder_ == null) { + groupBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Group, + com.google.spanner.v1.Group.Builder, + com.google.spanner.v1.GroupOrBuilder>( + group_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + group_ = null; + } + return groupBuilder_; + } + + private com.google.spanner.v1.RecipeList keyRecipes_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RecipeList, + com.google.spanner.v1.RecipeList.Builder, + com.google.spanner.v1.RecipeListOrBuilder> + keyRecipesBuilder_; + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return Whether the keyRecipes field is set. + */ + public boolean hasKeyRecipes() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return The keyRecipes. + */ + public com.google.spanner.v1.RecipeList getKeyRecipes() { + if (keyRecipesBuilder_ == null) { + return keyRecipes_ == null + ? com.google.spanner.v1.RecipeList.getDefaultInstance() + : keyRecipes_; + } else { + return keyRecipesBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public Builder setKeyRecipes(com.google.spanner.v1.RecipeList value) { + if (keyRecipesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keyRecipes_ = value; + } else { + keyRecipesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public Builder setKeyRecipes(com.google.spanner.v1.RecipeList.Builder builderForValue) { + if (keyRecipesBuilder_ == null) { + keyRecipes_ = builderForValue.build(); + } else { + keyRecipesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public Builder mergeKeyRecipes(com.google.spanner.v1.RecipeList value) { + if (keyRecipesBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && keyRecipes_ != null + && keyRecipes_ != com.google.spanner.v1.RecipeList.getDefaultInstance()) { + getKeyRecipesBuilder().mergeFrom(value); + } else { + keyRecipes_ = value; + } + } else { + keyRecipesBuilder_.mergeFrom(value); + } + if (keyRecipes_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public Builder clearKeyRecipes() { + bitField0_ = (bitField0_ & ~0x00000008); + keyRecipes_ = null; + if (keyRecipesBuilder_ != null) { + keyRecipesBuilder_.dispose(); + keyRecipesBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public com.google.spanner.v1.RecipeList.Builder getKeyRecipesBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetKeyRecipesFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + public com.google.spanner.v1.RecipeListOrBuilder getKeyRecipesOrBuilder() { + if (keyRecipesBuilder_ != null) { + return keyRecipesBuilder_.getMessageOrBuilder(); + } else { + return keyRecipes_ == null + ? com.google.spanner.v1.RecipeList.getDefaultInstance() + : keyRecipes_; + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RecipeList, + com.google.spanner.v1.RecipeList.Builder, + com.google.spanner.v1.RecipeListOrBuilder> + internalGetKeyRecipesFieldBuilder() { + if (keyRecipesBuilder_ == null) { + keyRecipesBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RecipeList, + com.google.spanner.v1.RecipeList.Builder, + com.google.spanner.v1.RecipeListOrBuilder>( + getKeyRecipes(), getParentForChildren(), isClean()); + keyRecipes_ = null; + } + return keyRecipesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.CacheUpdate) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.CacheUpdate) + private static final com.google.spanner.v1.CacheUpdate DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.CacheUpdate(); + } + + public static com.google.spanner.v1.CacheUpdate getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CacheUpdate parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdateOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdateOrBuilder.java new file mode 100644 index 000000000000..aafedbae4fb5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CacheUpdateOrBuilder.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface CacheUpdateOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.CacheUpdate) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * An internal ID for the database. Database names can be reused if a database
    +   * is deleted and re-created. Each time the database is re-created, it will
    +   * get a new database ID, which will never be re-used for any other database.
    +   * 
    + * + * uint64 database_id = 1; + * + * @return The databaseId. + */ + long getDatabaseId(); + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + java.util.List getRangeList(); + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + com.google.spanner.v1.Range getRange(int index); + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + int getRangeCount(); + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + java.util.List getRangeOrBuilderList(); + + /** + * + * + *
    +   * A list of ranges to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Range range = 2; + */ + com.google.spanner.v1.RangeOrBuilder getRangeOrBuilder(int index); + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + java.util.List getGroupList(); + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + com.google.spanner.v1.Group getGroup(int index); + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + int getGroupCount(); + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + java.util.List getGroupOrBuilderList(); + + /** + * + * + *
    +   * A list of groups to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.Group group = 3; + */ + com.google.spanner.v1.GroupOrBuilder getGroupOrBuilder(int index); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return Whether the keyRecipes field is set. + */ + boolean hasKeyRecipes(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + * + * @return The keyRecipes. + */ + com.google.spanner.v1.RecipeList getKeyRecipes(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * .google.spanner.v1.RecipeList key_recipes = 5; + */ + com.google.spanner.v1.RecipeListOrBuilder getKeyRecipesOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamProto.java new file mode 100644 index 000000000000..ce0a156c394d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamProto.java @@ -0,0 +1,300 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/change_stream.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class ChangeStreamProto extends com.google.protobuf.GeneratedFile { + private ChangeStreamProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChangeStreamProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n%google/spanner/v1/change_stream.proto\022" + + "\021google.spanner.v1\032\034google/protobuf/stru" + + "ct.proto\032\037google/protobuf/timestamp.prot" + + "o\032\034google/spanner/v1/type.proto\"\226\024\n\022Chan" + + "geStreamRecord\022T\n\022data_change_record\030\001 \001" + + "(\01326.google.spanner.v1.ChangeStreamRecor" + + "d.DataChangeRecordH\000\022Q\n\020heartbeat_record" + + "\030\002 \001(\01325.google.spanner.v1.ChangeStreamR" + + "ecord.HeartbeatRecordH\000\022\\\n\026partition_sta" + + "rt_record\030\003 \001(\0132:.google.spanner.v1.Chan" + + "geStreamRecord.PartitionStartRecordH\000\022X\n" + + "\024partition_end_record\030\004 \001(\01328.google.spa" + + "nner.v1.ChangeStreamRecord.PartitionEndR" + + "ecordH\000\022\\\n\026partition_event_record\030\005 \001(\0132" + + ":.google.spanner.v1.ChangeStreamRecord.P" + + "artitionEventRecordH\000\032\322\n\n\020DataChangeReco" + + "rd\0224\n\020commit_timestamp\030\001 \001(\0132\032.google.pr" + + "otobuf.Timestamp\022\027\n\017record_sequence\030\002 \001(" + + "\t\022\035\n\025server_transaction_id\030\003 \001(\t\0222\n*is_l" + + "ast_record_in_transaction_in_partition\030\004" + + " \001(\010\022\r\n\005table\030\005 \001(\t\022^\n\017column_metadata\030\006" + + " \003(\0132E.google.spanner.v1.ChangeStreamRec" + + "ord.DataChangeRecord.ColumnMetadata\022H\n\004m" + + "ods\030\007 \003(\0132:.google.spanner.v1.ChangeStre" + + "amRecord.DataChangeRecord.Mod\022P\n\010mod_typ" + + "e\030\010 \001(\0162>.google.spanner.v1.ChangeStream" + + "Record.DataChangeRecord.ModType\022c\n\022value" + + "_capture_type\030\t \001(\0162G.google.spanner.v1." + + "ChangeStreamRecord.DataChangeRecord.Valu" + + "eCaptureType\022(\n number_of_records_in_tra" + + "nsaction\030\n \001(\005\022+\n#number_of_partitions_i" + + "n_transaction\030\013 \001(\005\022\027\n\017transaction_tag\030\014" + + " \001(\t\022\035\n\025is_system_transaction\030\r \001(\010\032w\n\016C" + + "olumnMetadata\022\014\n\004name\030\001 \001(\t\022%\n\004type\030\002 \001(" + + "\0132\027.google.spanner.v1.Type\022\026\n\016is_primary" + + "_key\030\003 \001(\010\022\030\n\020ordinal_position\030\004 \001(\003\032P\n\010" + + "ModValue\022\035\n\025column_metadata_index\030\001 \001(\005\022" + + "%\n\005value\030\002 \001(\0132\026.google.protobuf.Value\032\376" + + "\001\n\003Mod\022M\n\004keys\030\001 \003(\0132?.google.spanner.v1" + + ".ChangeStreamRecord.DataChangeRecord.Mod" + + "Value\022S\n\nold_values\030\002 \003(\0132?.google.spann" + + "er.v1.ChangeStreamRecord.DataChangeRecor" + + "d.ModValue\022S\n\nnew_values\030\003 \003(\0132?.google." + + "spanner.v1.ChangeStreamRecord.DataChange" + + "Record.ModValue\"G\n\007ModType\022\030\n\024MOD_TYPE_U" + + "NSPECIFIED\020\000\022\n\n\006INSERT\020\n\022\n\n\006UPDATE\020\024\022\n\n\006" + + "DELETE\020\036\"\207\001\n\020ValueCaptureType\022\"\n\036VALUE_C" + + "APTURE_TYPE_UNSPECIFIED\020\000\022\026\n\022OLD_AND_NEW" + + "_VALUES\020\n\022\016\n\nNEW_VALUES\020\024\022\013\n\007NEW_ROW\020\036\022\032" + + "\n\026NEW_ROW_AND_OLD_VALUES\020(\032@\n\017HeartbeatR" + + "ecord\022-\n\ttimestamp\030\001 \001(\0132\032.google.protob" + + "uf.Timestamp\032~\n\024PartitionStartRecord\0223\n\017" + + "start_timestamp\030\001 \001(\0132\032.google.protobuf." + + "Timestamp\022\027\n\017record_sequence\030\002 \001(\t\022\030\n\020pa" + + "rtition_tokens\030\003 \003(\t\032y\n\022PartitionEndReco" + + "rd\0221\n\rend_timestamp\030\001 \001(\0132\032.google.proto" + + "buf.Timestamp\022\027\n\017record_sequence\030\002 \001(\t\022\027" + + "\n\017partition_token\030\003 \001(\t\032\244\003\n\024PartitionEve" + + "ntRecord\0224\n\020commit_timestamp\030\001 \001(\0132\032.goo" + + "gle.protobuf.Timestamp\022\027\n\017record_sequenc" + + "e\030\002 \001(\t\022\027\n\017partition_token\030\003 \001(\t\022^\n\016move" + + "_in_events\030\004 \003(\0132F.google.spanner.v1.Cha" + + "ngeStreamRecord.PartitionEventRecord.Mov" + + "eInEvent\022`\n\017move_out_events\030\005 \003(\0132G.goog" + + "le.spanner.v1.ChangeStreamRecord.Partiti" + + "onEventRecord.MoveOutEvent\032-\n\013MoveInEven" + + "t\022\036\n\026source_partition_token\030\001 \001(\t\0323\n\014Mov" + + "eOutEvent\022#\n\033destination_partition_token" + + "\030\001 \001(\tB\010\n\006recordB\264\001\n\025com.google.spanner." + + "v1B\021ChangeStreamProtoP\001Z5cloud.google.co" + + "m/go/spanner/apiv1/spannerpb;spannerpb\252\002" + + "\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\" + + "Spanner\\V1\352\002\032Google::Cloud::Spanner::V1b" + + "\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.StructProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.v1.TypeProto.getDescriptor(), + }); + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_ChangeStreamRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor, + new java.lang.String[] { + "DataChangeRecord", + "HeartbeatRecord", + "PartitionStartRecord", + "PartitionEndRecord", + "PartitionEventRecord", + "Record", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor.getNestedType(0); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor, + new java.lang.String[] { + "CommitTimestamp", + "RecordSequence", + "ServerTransactionId", + "IsLastRecordInTransactionInPartition", + "Table", + "ColumnMetadata", + "Mods", + "ModType", + "ValueCaptureType", + "NumberOfRecordsInTransaction", + "NumberOfPartitionsInTransaction", + "TransactionTag", + "IsSystemTransaction", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor + .getNestedType(0); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor, + new java.lang.String[] { + "Name", "Type", "IsPrimaryKey", "OrdinalPosition", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor + .getNestedType(1); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor, + new java.lang.String[] { + "ColumnMetadataIndex", "Value", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor + .getNestedType(2); + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor, + new java.lang.String[] { + "Keys", "OldValues", "NewValues", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor.getNestedType(1); + internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor, + new java.lang.String[] { + "Timestamp", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor.getNestedType(2); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor, + new java.lang.String[] { + "StartTimestamp", "RecordSequence", "PartitionTokens", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor.getNestedType(3); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor, + new java.lang.String[] { + "EndTimestamp", "RecordSequence", "PartitionToken", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_descriptor.getNestedType(4); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor, + new java.lang.String[] { + "CommitTimestamp", + "RecordSequence", + "PartitionToken", + "MoveInEvents", + "MoveOutEvents", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor + .getNestedType(0); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor, + new java.lang.String[] { + "SourcePartitionToken", + }); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor = + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor + .getNestedType(1); + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor, + new java.lang.String[] { + "DestinationPartitionToken", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.v1.TypeProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecord.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecord.java new file mode 100644 index 000000000000..9409738a81a3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecord.java @@ -0,0 +1,20870 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/change_stream.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Spanner Change Streams enable customers to capture and stream out changes to
    + * their Spanner databases in real-time. A change stream
    + * can be created with option partition_mode='IMMUTABLE_KEY_RANGE' or
    + * partition_mode='MUTABLE_KEY_RANGE'.
    + *
    + * This message is only used in Change Streams created with the option
    + * partition_mode='MUTABLE_KEY_RANGE'. Spanner automatically creates a special
    + * Table-Valued Function (TVF) along with each Change Streams. The function
    + * provides access to the change stream's records. The function is named
    + * READ_<change_stream_name> (where <change_stream_name> is the
    + * name of the change stream), and it returns a table with only one column
    + * called ChangeRecord.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord} + */ +@com.google.protobuf.Generated +public final class ChangeStreamRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord) + ChangeStreamRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChangeStreamRecord"); + } + + // Use ChangeStreamRecord.newBuilder() to construct. + private ChangeStreamRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChangeStreamRecord() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.class, + com.google.spanner.v1.ChangeStreamRecord.Builder.class); + } + + public interface DataChangeRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.DataChangeRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + boolean hasCommitTimestamp(); + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + com.google.protobuf.Timestamp getCommitTimestamp(); + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     *
    +     * The record sequence number ordering across partitions is only meaningful
    +     * in the context of a specific transaction. Record sequence numbers are
    +     * unique across partitions for a specific transaction. Sort the
    +     * DataChangeRecords for the same
    +     * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +     * by
    +     * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +     * to reconstruct the ordering of the changes within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     *
    +     * The record sequence number ordering across partitions is only meaningful
    +     * in the context of a specific transaction. Record sequence numbers are
    +     * unique across partitions for a specific transaction. Sort the
    +     * DataChangeRecords for the same
    +     * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +     * by
    +     * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +     * to reconstruct the ordering of the changes within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +     * Provides a globally unique string that represents the transaction in
    +     * which the change was committed. Multiple transactions can have the same
    +     * commit timestamp, but each transaction has a unique
    +     * server_transaction_id.
    +     * 
    + * + * string server_transaction_id = 3; + * + * @return The serverTransactionId. + */ + java.lang.String getServerTransactionId(); + + /** + * + * + *
    +     * Provides a globally unique string that represents the transaction in
    +     * which the change was committed. Multiple transactions can have the same
    +     * commit timestamp, but each transaction has a unique
    +     * server_transaction_id.
    +     * 
    + * + * string server_transaction_id = 3; + * + * @return The bytes for serverTransactionId. + */ + com.google.protobuf.ByteString getServerTransactionIdBytes(); + + /** + * + * + *
    +     * Indicates whether this is the last record for a transaction in the
    +     * current partition. Clients can use this field to determine when all
    +     * records for a transaction in the current partition have been received.
    +     * 
    + * + * bool is_last_record_in_transaction_in_partition = 4; + * + * @return The isLastRecordInTransactionInPartition. + */ + boolean getIsLastRecordInTransactionInPartition(); + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + java.util.List + getColumnMetadataList(); + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata getColumnMetadata( + int index); + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + int getColumnMetadataCount(); + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder> + getColumnMetadataOrBuilderList(); + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder + getColumnMetadataOrBuilder(int index); + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + java.util.List getModsList(); + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod getMods(int index); + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + int getModsCount(); + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + java.util.List + getModsOrBuilderList(); + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder getModsOrBuilder( + int index); + + /** + * + * + *
    +     * Describes the type of change.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The enum numeric value on the wire for modType. + */ + int getModTypeValue(); + + /** + * + * + *
    +     * Describes the type of change.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The modType. + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType getModType(); + + /** + * + * + *
    +     * Describes the value capture type that was specified in the change stream
    +     * configuration when this change was captured.
    +     * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The enum numeric value on the wire for valueCaptureType. + */ + int getValueCaptureTypeValue(); + + /** + * + * + *
    +     * Describes the value capture type that was specified in the change stream
    +     * configuration when this change was captured.
    +     * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The valueCaptureType. + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + getValueCaptureType(); + + /** + * + * + *
    +     * Indicates the number of data change records that are part of this
    +     * transaction across all change stream partitions. This value can be used
    +     * to assemble all the records associated with a particular transaction.
    +     * 
    + * + * int32 number_of_records_in_transaction = 10; + * + * @return The numberOfRecordsInTransaction. + */ + int getNumberOfRecordsInTransaction(); + + /** + * + * + *
    +     * Indicates the number of partitions that return data change records for
    +     * this transaction. This value can be helpful in assembling all records
    +     * associated with a particular transaction.
    +     * 
    + * + * int32 number_of_partitions_in_transaction = 11; + * + * @return The numberOfPartitionsInTransaction. + */ + int getNumberOfPartitionsInTransaction(); + + /** + * + * + *
    +     * Indicates the transaction tag associated with this transaction.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + java.lang.String getTransactionTag(); + + /** + * + * + *
    +     * Indicates the transaction tag associated with this transaction.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + com.google.protobuf.ByteString getTransactionTagBytes(); + + /** + * + * + *
    +     * Indicates whether the transaction is a system transaction. System
    +     * transactions include those issued by time-to-live (TTL), column backfill,
    +     * etc.
    +     * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + boolean getIsSystemTransaction(); + } + + /** + * + * + *
    +   * A data change record contains a set of changes to a table with the same
    +   * modification type (insert, update, or delete) committed at the same commit
    +   * timestamp in one change stream partition for the same transaction. Multiple
    +   * data change records can be returned for the same transaction across
    +   * multiple change stream partitions.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord} + */ + public static final class DataChangeRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord) + DataChangeRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DataChangeRecord"); + } + + // Use DataChangeRecord.newBuilder() to construct. + private DataChangeRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DataChangeRecord() { + recordSequence_ = ""; + serverTransactionId_ = ""; + table_ = ""; + columnMetadata_ = java.util.Collections.emptyList(); + mods_ = java.util.Collections.emptyList(); + modType_ = 0; + valueCaptureType_ = 0; + transactionTag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder.class); + } + + /** + * + * + *
    +     * Mod type describes the type of change Spanner applied to the data. For
    +     * example, if the client submits an INSERT_OR_UPDATE request, Spanner will
    +     * perform an insert if there is no existing row and return ModType INSERT.
    +     * Alternatively, if there is an existing row, Spanner will perform an
    +     * update and return ModType UPDATE.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType} + */ + public enum ModType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * MOD_TYPE_UNSPECIFIED = 0; + */ + MOD_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +       * Indicates data was inserted.
    +       * 
    + * + * INSERT = 10; + */ + INSERT(10), + /** + * + * + *
    +       * Indicates existing data was updated.
    +       * 
    + * + * UPDATE = 20; + */ + UPDATE(20), + /** + * + * + *
    +       * Indicates existing data was deleted.
    +       * 
    + * + * DELETE = 30; + */ + DELETE(30), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ModType"); + } + + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * MOD_TYPE_UNSPECIFIED = 0; + */ + public static final int MOD_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * Indicates data was inserted.
    +       * 
    + * + * INSERT = 10; + */ + public static final int INSERT_VALUE = 10; + + /** + * + * + *
    +       * Indicates existing data was updated.
    +       * 
    + * + * UPDATE = 20; + */ + public static final int UPDATE_VALUE = 20; + + /** + * + * + *
    +       * Indicates existing data was deleted.
    +       * 
    + * + * DELETE = 30; + */ + public static final int DELETE_VALUE = 30; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ModType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ModType forNumber(int value) { + switch (value) { + case 0: + return MOD_TYPE_UNSPECIFIED; + case 10: + return INSERT; + case 20: + return UPDATE; + case 30: + return DELETE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ModType findValueByNumber(int number) { + return ModType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ModType[] VALUES = values(); + + public static ModType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ModType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType) + } + + /** + * + * + *
    +     * Value capture type describes which values are recorded in the data
    +     * change record.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType} + */ + public enum ValueCaptureType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * VALUE_CAPTURE_TYPE_UNSPECIFIED = 0; + */ + VALUE_CAPTURE_TYPE_UNSPECIFIED(0), + /** + * + * + *
    +       * Records both old and new values of the modified watched columns.
    +       * 
    + * + * OLD_AND_NEW_VALUES = 10; + */ + OLD_AND_NEW_VALUES(10), + /** + * + * + *
    +       * Records only new values of the modified watched columns.
    +       * 
    + * + * NEW_VALUES = 20; + */ + NEW_VALUES(20), + /** + * + * + *
    +       * Records new values of all watched columns, including modified and
    +       * unmodified columns.
    +       * 
    + * + * NEW_ROW = 30; + */ + NEW_ROW(30), + /** + * + * + *
    +       * Records the new values of all watched columns, including modified and
    +       * unmodified columns. Also records the old values of the modified
    +       * columns.
    +       * 
    + * + * NEW_ROW_AND_OLD_VALUES = 40; + */ + NEW_ROW_AND_OLD_VALUES(40), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ValueCaptureType"); + } + + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * VALUE_CAPTURE_TYPE_UNSPECIFIED = 0; + */ + public static final int VALUE_CAPTURE_TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * Records both old and new values of the modified watched columns.
    +       * 
    + * + * OLD_AND_NEW_VALUES = 10; + */ + public static final int OLD_AND_NEW_VALUES_VALUE = 10; + + /** + * + * + *
    +       * Records only new values of the modified watched columns.
    +       * 
    + * + * NEW_VALUES = 20; + */ + public static final int NEW_VALUES_VALUE = 20; + + /** + * + * + *
    +       * Records new values of all watched columns, including modified and
    +       * unmodified columns.
    +       * 
    + * + * NEW_ROW = 30; + */ + public static final int NEW_ROW_VALUE = 30; + + /** + * + * + *
    +       * Records the new values of all watched columns, including modified and
    +       * unmodified columns. Also records the old values of the modified
    +       * columns.
    +       * 
    + * + * NEW_ROW_AND_OLD_VALUES = 40; + */ + public static final int NEW_ROW_AND_OLD_VALUES_VALUE = 40; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ValueCaptureType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ValueCaptureType forNumber(int value) { + switch (value) { + case 0: + return VALUE_CAPTURE_TYPE_UNSPECIFIED; + case 10: + return OLD_AND_NEW_VALUES; + case 20: + return NEW_VALUES; + case 30: + return NEW_ROW; + case 40: + return NEW_ROW_AND_OLD_VALUES; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ValueCaptureType findValueByNumber(int number) { + return ValueCaptureType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final ValueCaptureType[] VALUES = values(); + + public static ValueCaptureType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ValueCaptureType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType) + } + + public interface ColumnMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * Name of the column.
    +       * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +       * Name of the column.
    +       * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + com.google.spanner.v1.Type getType(); + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(); + + /** + * + * + *
    +       * Indicates whether the column is a primary key column.
    +       * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + boolean getIsPrimaryKey(); + + /** + * + * + *
    +       * Ordinal position of the column based on the original table definition
    +       * in the schema starting with a value of 1.
    +       * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + long getOrdinalPosition(); + } + + /** + * + * + *
    +     * Metadata for a column.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata} + */ + public static final class ColumnMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) + ColumnMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ColumnMetadata"); + } + + // Use ColumnMetadata.newBuilder() to construct. + private ColumnMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ColumnMetadata() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + .class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +       * Name of the column.
    +       * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +       * Name of the column.
    +       * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Type type_; + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.Type getType() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + /** + * + * + *
    +       * Type of the column.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + public static final int IS_PRIMARY_KEY_FIELD_NUMBER = 3; + private boolean isPrimaryKey_ = false; + + /** + * + * + *
    +       * Indicates whether the column is a primary key column.
    +       * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + @java.lang.Override + public boolean getIsPrimaryKey() { + return isPrimaryKey_; + } + + public static final int ORDINAL_POSITION_FIELD_NUMBER = 4; + private long ordinalPosition_ = 0L; + + /** + * + * + *
    +       * Ordinal position of the column based on the original table definition
    +       * in the schema starting with a value of 1.
    +       * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + @java.lang.Override + public long getOrdinalPosition() { + return ordinalPosition_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getType()); + } + if (isPrimaryKey_ != false) { + output.writeBool(3, isPrimaryKey_); + } + if (ordinalPosition_ != 0L) { + output.writeInt64(4, ordinalPosition_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getType()); + } + if (isPrimaryKey_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, isPrimaryKey_); + } + if (ordinalPosition_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, ordinalPosition_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata other = + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) obj; + + if (!getName().equals(other.getName())) return false; + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (!getType().equals(other.getType())) return false; + } + if (getIsPrimaryKey() != other.getIsPrimaryKey()) return false; + if (getOrdinalPosition() != other.getOrdinalPosition()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + } + hash = (37 * hash) + IS_PRIMARY_KEY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsPrimaryKey()); + hash = (37 * hash) + ORDINAL_POSITION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOrdinalPosition()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * Metadata for a column.
    +       * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + .class); + } + + // Construct using + // com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + isPrimaryKey_ = false; + ordinalPosition_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ColumnMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata build() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata result = + new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = typeBuilder_ == null ? type_ : typeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.isPrimaryKey_ = isPrimaryKey_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.ordinalPosition_ = ordinalPosition_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) { + return mergeFrom( + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasType()) { + mergeType(other.getType()); + } + if (other.getIsPrimaryKey() != false) { + setIsPrimaryKey(other.getIsPrimaryKey()); + } + if (other.getOrdinalPosition() != 0L) { + setOrdinalPosition(other.getOrdinalPosition()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + isPrimaryKey_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + ordinalPosition_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +         * Name of the column.
    +         * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +         * Name of the column.
    +         * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +         * Name of the column.
    +         * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Name of the column.
    +         * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +         * Name of the column.
    +         * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type type_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + public com.google.spanner.v1.Type getType() { + if (typeBuilder_ == null) { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } else { + return typeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + } else { + typeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + type_ = builderForValue.build(); + } else { + typeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder mergeType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && type_ != null + && type_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getTypeBuilder().mergeFrom(value); + } else { + type_ = value; + } + } else { + typeBuilder_.mergeFrom(value); + } + if (type_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilder(); + } else { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + } + + /** + * + * + *
    +         * Type of the column.
    +         * 
    + * + * .google.spanner.v1.Type type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getType(), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + private boolean isPrimaryKey_; + + /** + * + * + *
    +         * Indicates whether the column is a primary key column.
    +         * 
    + * + * bool is_primary_key = 3; + * + * @return The isPrimaryKey. + */ + @java.lang.Override + public boolean getIsPrimaryKey() { + return isPrimaryKey_; + } + + /** + * + * + *
    +         * Indicates whether the column is a primary key column.
    +         * 
    + * + * bool is_primary_key = 3; + * + * @param value The isPrimaryKey to set. + * @return This builder for chaining. + */ + public Builder setIsPrimaryKey(boolean value) { + + isPrimaryKey_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Indicates whether the column is a primary key column.
    +         * 
    + * + * bool is_primary_key = 3; + * + * @return This builder for chaining. + */ + public Builder clearIsPrimaryKey() { + bitField0_ = (bitField0_ & ~0x00000004); + isPrimaryKey_ = false; + onChanged(); + return this; + } + + private long ordinalPosition_; + + /** + * + * + *
    +         * Ordinal position of the column based on the original table definition
    +         * in the schema starting with a value of 1.
    +         * 
    + * + * int64 ordinal_position = 4; + * + * @return The ordinalPosition. + */ + @java.lang.Override + public long getOrdinalPosition() { + return ordinalPosition_; + } + + /** + * + * + *
    +         * Ordinal position of the column based on the original table definition
    +         * in the schema starting with a value of 1.
    +         * 
    + * + * int64 ordinal_position = 4; + * + * @param value The ordinalPosition to set. + * @return This builder for chaining. + */ + public Builder setOrdinalPosition(long value) { + + ordinalPosition_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Ordinal position of the column based on the original table definition
    +         * in the schema starting with a value of 1.
    +         * 
    + * + * int64 ordinal_position = 4; + * + * @return This builder for chaining. + */ + public Builder clearOrdinalPosition() { + bitField0_ = (bitField0_ & ~0x00000008); + ordinalPosition_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata) + private static final com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ColumnMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ModValueOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * Index within the repeated
    +       * [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
    +       * field, to obtain the column metadata for the column that was modified.
    +       * 
    + * + * int32 column_metadata_index = 1; + * + * @return The columnMetadataIndex. + */ + int getColumnMetadataIndex(); + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + * + * @return Whether the value field is set. + */ + boolean hasValue(); + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + * + * @return The value. + */ + com.google.protobuf.Value getValue(); + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + */ + com.google.protobuf.ValueOrBuilder getValueOrBuilder(); + } + + /** + * + * + *
    +     * Returns the value and associated metadata for a particular field of the
    +     * [Mod][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod].
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue} + */ + public static final class ModValue extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) + ModValueOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ModValue"); + } + + // Use ModValue.newBuilder() to construct. + private ModValue(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ModValue() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder.class); + } + + private int bitField0_; + public static final int COLUMN_METADATA_INDEX_FIELD_NUMBER = 1; + private int columnMetadataIndex_ = 0; + + /** + * + * + *
    +       * Index within the repeated
    +       * [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
    +       * field, to obtain the column metadata for the column that was modified.
    +       * 
    + * + * int32 column_metadata_index = 1; + * + * @return The columnMetadataIndex. + */ + @java.lang.Override + public int getColumnMetadataIndex() { + return columnMetadataIndex_; + } + + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.Value value_; + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + * + * @return Whether the value field is set. + */ + @java.lang.Override + public boolean hasValue() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + * + * @return The value. + */ + @java.lang.Override + public com.google.protobuf.Value getValue() { + return value_ == null ? com.google.protobuf.Value.getDefaultInstance() : value_; + } + + /** + * + * + *
    +       * The value of the column.
    +       * 
    + * + * .google.protobuf.Value value = 2; + */ + @java.lang.Override + public com.google.protobuf.ValueOrBuilder getValueOrBuilder() { + return value_ == null ? com.google.protobuf.Value.getDefaultInstance() : value_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (columnMetadataIndex_ != 0) { + output.writeInt32(1, columnMetadataIndex_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getValue()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (columnMetadataIndex_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, columnMetadataIndex_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getValue()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue other = + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) obj; + + if (getColumnMetadataIndex() != other.getColumnMetadataIndex()) return false; + if (hasValue() != other.hasValue()) return false; + if (hasValue()) { + if (!getValue().equals(other.getValue())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + COLUMN_METADATA_INDEX_FIELD_NUMBER; + hash = (53 * hash) + getColumnMetadataIndex(); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * Returns the value and associated metadata for a particular field of the
    +       * [Mod][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod].
    +       * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder.class); + } + + // Construct using + // com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetValueFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + columnMetadataIndex_ = 0; + value_ = null; + if (valueBuilder_ != null) { + valueBuilder_.dispose(); + valueBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_ModValue_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue build() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue result = + new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.columnMetadataIndex_ = columnMetadataIndex_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.value_ = valueBuilder_ == null ? value_ : valueBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) { + return mergeFrom( + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()) return this; + if (other.getColumnMetadataIndex() != 0) { + setColumnMetadataIndex(other.getColumnMetadataIndex()); + } + if (other.hasValue()) { + mergeValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + columnMetadataIndex_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetValueFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int columnMetadataIndex_; + + /** + * + * + *
    +         * Index within the repeated
    +         * [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
    +         * field, to obtain the column metadata for the column that was modified.
    +         * 
    + * + * int32 column_metadata_index = 1; + * + * @return The columnMetadataIndex. + */ + @java.lang.Override + public int getColumnMetadataIndex() { + return columnMetadataIndex_; + } + + /** + * + * + *
    +         * Index within the repeated
    +         * [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
    +         * field, to obtain the column metadata for the column that was modified.
    +         * 
    + * + * int32 column_metadata_index = 1; + * + * @param value The columnMetadataIndex to set. + * @return This builder for chaining. + */ + public Builder setColumnMetadataIndex(int value) { + + columnMetadataIndex_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * Index within the repeated
    +         * [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata]
    +         * field, to obtain the column metadata for the column that was modified.
    +         * 
    + * + * int32 column_metadata_index = 1; + * + * @return This builder for chaining. + */ + public Builder clearColumnMetadataIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + columnMetadataIndex_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Value value_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + valueBuilder_; + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + * + * @return Whether the value field is set. + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + * + * @return The value. + */ + public com.google.protobuf.Value getValue() { + if (valueBuilder_ == null) { + return value_ == null ? com.google.protobuf.Value.getDefaultInstance() : value_; + } else { + return valueBuilder_.getMessage(); + } + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public Builder setValue(com.google.protobuf.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + value_ = value; + } else { + valueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public Builder setValue(com.google.protobuf.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + value_ = builderForValue.build(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public Builder mergeValue(com.google.protobuf.Value value) { + if (valueBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && value_ != null + && value_ != com.google.protobuf.Value.getDefaultInstance()) { + getValueBuilder().mergeFrom(value); + } else { + value_ = value; + } + } else { + valueBuilder_.mergeFrom(value); + } + if (value_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = null; + if (valueBuilder_ != null) { + valueBuilder_.dispose(); + valueBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public com.google.protobuf.Value.Builder getValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + public com.google.protobuf.ValueOrBuilder getValueOrBuilder() { + if (valueBuilder_ != null) { + return valueBuilder_.getMessageOrBuilder(); + } else { + return value_ == null ? com.google.protobuf.Value.getDefaultInstance() : value_; + } + } + + /** + * + * + *
    +         * The value of the column.
    +         * 
    + * + * .google.protobuf.Value value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + internalGetValueFieldBuilder() { + if (valueBuilder_ == null) { + valueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder>( + getValue(), getParentForChildren(), isClean()); + value_ = null; + } + return valueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue) + private static final com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ModValue parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ModOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + java.util.List + getKeysList(); + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getKeys(int index); + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + int getKeysCount(); + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getKeysOrBuilderList(); + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder getKeysOrBuilder( + int index); + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + java.util.List + getOldValuesList(); + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getOldValues(int index); + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + int getOldValuesCount(); + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getOldValuesOrBuilderList(); + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getOldValuesOrBuilder(int index); + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + java.util.List + getNewValuesList(); + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getNewValues(int index); + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + int getNewValuesCount(); + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getNewValuesOrBuilderList(); + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getNewValuesOrBuilder(int index); + } + + /** + * + * + *
    +     * A mod describes all data changes in a watched table row.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod} + */ + public static final class Mod extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) + ModOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mod"); + } + + // Use Mod.newBuilder() to construct. + private Mod(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Mod() { + keys_ = java.util.Collections.emptyList(); + oldValues_ = java.util.Collections.emptyList(); + newValues_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder.class); + } + + public static final int KEYS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List + keys_; + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + @java.lang.Override + public java.util.List + getKeysList() { + return keys_; + } + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getKeysOrBuilderList() { + return keys_; + } + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + @java.lang.Override + public int getKeysCount() { + return keys_.size(); + } + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getKeys(int index) { + return keys_.get(index); + } + + /** + * + * + *
    +       * Returns the value of the primary key of the modified row.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getKeysOrBuilder(int index) { + return keys_.get(index); + } + + public static final int OLD_VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List + oldValues_; + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + @java.lang.Override + public java.util.List + getOldValuesList() { + return oldValues_; + } + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getOldValuesOrBuilderList() { + return oldValues_; + } + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + @java.lang.Override + public int getOldValuesCount() { + return oldValues_.size(); + } + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getOldValues( + int index) { + return oldValues_.get(index); + } + + /** + * + * + *
    +       * Returns the old values before the change for the modified columns.
    +       * Always empty for
    +       * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +       * or if old values are not being captured specified by
    +       * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getOldValuesOrBuilder(int index) { + return oldValues_.get(index); + } + + public static final int NEW_VALUES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List + newValues_; + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + @java.lang.Override + public java.util.List + getNewValuesList() { + return newValues_; + } + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getNewValuesOrBuilderList() { + return newValues_; + } + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + @java.lang.Override + public int getNewValuesCount() { + return newValues_.size(); + } + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getNewValues( + int index) { + return newValues_.get(index); + } + + /** + * + * + *
    +       * Returns the new values after the change for the modified columns.
    +       * Always empty for
    +       * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getNewValuesOrBuilder(int index) { + return newValues_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < keys_.size(); i++) { + output.writeMessage(1, keys_.get(i)); + } + for (int i = 0; i < oldValues_.size(); i++) { + output.writeMessage(2, oldValues_.get(i)); + } + for (int i = 0; i < newValues_.size(); i++) { + output.writeMessage(3, newValues_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < keys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, keys_.get(i)); + } + for (int i = 0; i < oldValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, oldValues_.get(i)); + } + for (int i = 0; i < newValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, newValues_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod other = + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) obj; + + if (!getKeysList().equals(other.getKeysList())) return false; + if (!getOldValuesList().equals(other.getOldValuesList())) return false; + if (!getNewValuesList().equals(other.getNewValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getKeysCount() > 0) { + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeysList().hashCode(); + } + if (getOldValuesCount() > 0) { + hash = (37 * hash) + OLD_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getOldValuesList().hashCode(); + } + if (getNewValuesCount() > 0) { + hash = (37 * hash) + NEW_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getNewValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * A mod describes all data changes in a watched table row.
    +       * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder.class); + } + + // Construct using + // com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + } else { + keys_ = null; + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (oldValuesBuilder_ == null) { + oldValues_ = java.util.Collections.emptyList(); + } else { + oldValues_ = null; + oldValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (newValuesBuilder_ == null) { + newValues_ = java.util.Collections.emptyList(); + } else { + newValues_ = null; + newValuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_Mod_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod build() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod result = + new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod result) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + keys_ = java.util.Collections.unmodifiableList(keys_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + if (oldValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + oldValues_ = java.util.Collections.unmodifiableList(oldValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.oldValues_ = oldValues_; + } else { + result.oldValues_ = oldValuesBuilder_.build(); + } + if (newValuesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + newValues_ = java.util.Collections.unmodifiableList(newValues_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.newValues_ = newValues_; + } else { + result.newValues_ = newValuesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.getDefaultInstance()) + return this; + if (keysBuilder_ == null) { + if (!other.keys_.isEmpty()) { + if (keys_.isEmpty()) { + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureKeysIsMutable(); + keys_.addAll(other.keys_); + } + onChanged(); + } + } else { + if (!other.keys_.isEmpty()) { + if (keysBuilder_.isEmpty()) { + keysBuilder_.dispose(); + keysBuilder_ = null; + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000001); + keysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetKeysFieldBuilder() + : null; + } else { + keysBuilder_.addAllMessages(other.keys_); + } + } + } + if (oldValuesBuilder_ == null) { + if (!other.oldValues_.isEmpty()) { + if (oldValues_.isEmpty()) { + oldValues_ = other.oldValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureOldValuesIsMutable(); + oldValues_.addAll(other.oldValues_); + } + onChanged(); + } + } else { + if (!other.oldValues_.isEmpty()) { + if (oldValuesBuilder_.isEmpty()) { + oldValuesBuilder_.dispose(); + oldValuesBuilder_ = null; + oldValues_ = other.oldValues_; + bitField0_ = (bitField0_ & ~0x00000002); + oldValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetOldValuesFieldBuilder() + : null; + } else { + oldValuesBuilder_.addAllMessages(other.oldValues_); + } + } + } + if (newValuesBuilder_ == null) { + if (!other.newValues_.isEmpty()) { + if (newValues_.isEmpty()) { + newValues_ = other.newValues_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureNewValuesIsMutable(); + newValues_.addAll(other.newValues_); + } + onChanged(); + } + } else { + if (!other.newValues_.isEmpty()) { + if (newValuesBuilder_.isEmpty()) { + newValuesBuilder_.dispose(); + newValuesBuilder_ = null; + newValues_ = other.newValues_; + bitField0_ = (bitField0_ & ~0x00000004); + newValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetNewValuesFieldBuilder() + : null; + } else { + newValuesBuilder_.addAllMessages(other.newValues_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .parser(), + extensionRegistry); + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(m); + } else { + keysBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .parser(), + extensionRegistry); + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + oldValues_.add(m); + } else { + oldValuesBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .parser(), + extensionRegistry); + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + newValues_.add(m); + } else { + newValuesBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + keys_ = java.util.Collections.emptyList(); + + private void ensureKeysIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + keys_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue>(keys_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + keysBuilder_; + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public java.util.List + getKeysList() { + if (keysBuilder_ == null) { + return java.util.Collections.unmodifiableList(keys_); + } else { + return keysBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public int getKeysCount() { + if (keysBuilder_ == null) { + return keys_.size(); + } else { + return keysBuilder_.getCount(); + } + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getKeys( + int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder setKeys( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.set(index, value); + onChanged(); + } else { + keysBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder setKeys( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.set(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder addKeys( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(value); + onChanged(); + } else { + keysBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder addKeys( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(index, value); + onChanged(); + } else { + keysBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder addKeys( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder addKeys( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder addAllKeys( + java.lang.Iterable< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue> + values) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, keys_); + onChanged(); + } else { + keysBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + keysBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public Builder removeKeys(int index) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.remove(index); + onChanged(); + } else { + keysBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + getKeysBuilder(int index) { + return internalGetKeysFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getKeysOrBuilder(int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getKeysOrBuilderList() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(keys_); + } + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addKeysBuilder() { + return internalGetKeysFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addKeysBuilder(int index) { + return internalGetKeysFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the value of the primary key of the modified row.
    +         * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue keys = 1; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder> + getKeysBuilderList() { + return internalGetKeysFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + internalGetKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder>( + keys_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + keys_ = null; + } + return keysBuilder_; + } + + private java.util.List + oldValues_ = java.util.Collections.emptyList(); + + private void ensureOldValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + oldValues_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue>(oldValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + oldValuesBuilder_; + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public java.util.List + getOldValuesList() { + if (oldValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(oldValues_); + } else { + return oldValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public int getOldValuesCount() { + if (oldValuesBuilder_ == null) { + return oldValues_.size(); + } else { + return oldValuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getOldValues( + int index) { + if (oldValuesBuilder_ == null) { + return oldValues_.get(index); + } else { + return oldValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder setOldValues( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (oldValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOldValuesIsMutable(); + oldValues_.set(index, value); + onChanged(); + } else { + oldValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder setOldValues( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + oldValues_.set(index, builderForValue.build()); + onChanged(); + } else { + oldValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder addOldValues( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (oldValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOldValuesIsMutable(); + oldValues_.add(value); + onChanged(); + } else { + oldValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder addOldValues( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (oldValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOldValuesIsMutable(); + oldValues_.add(index, value); + onChanged(); + } else { + oldValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder addOldValues( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + oldValues_.add(builderForValue.build()); + onChanged(); + } else { + oldValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder addOldValues( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + oldValues_.add(index, builderForValue.build()); + onChanged(); + } else { + oldValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder addAllOldValues( + java.lang.Iterable< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue> + values) { + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, oldValues_); + onChanged(); + } else { + oldValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder clearOldValues() { + if (oldValuesBuilder_ == null) { + oldValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + oldValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public Builder removeOldValues(int index) { + if (oldValuesBuilder_ == null) { + ensureOldValuesIsMutable(); + oldValues_.remove(index); + onChanged(); + } else { + oldValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + getOldValuesBuilder(int index) { + return internalGetOldValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getOldValuesOrBuilder(int index) { + if (oldValuesBuilder_ == null) { + return oldValues_.get(index); + } else { + return oldValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getOldValuesOrBuilderList() { + if (oldValuesBuilder_ != null) { + return oldValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(oldValues_); + } + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addOldValuesBuilder() { + return internalGetOldValuesFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addOldValuesBuilder(int index) { + return internalGetOldValuesFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the old values before the change for the modified columns.
    +         * Always empty for
    +         * [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT],
    +         * or if old values are not being captured specified by
    +         * [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue old_values = 2; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder> + getOldValuesBuilderList() { + return internalGetOldValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + internalGetOldValuesFieldBuilder() { + if (oldValuesBuilder_ == null) { + oldValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder>( + oldValues_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + oldValues_ = null; + } + return oldValuesBuilder_; + } + + private java.util.List + newValues_ = java.util.Collections.emptyList(); + + private void ensureNewValuesIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + newValues_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue>(newValues_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + newValuesBuilder_; + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public java.util.List + getNewValuesList() { + if (newValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(newValues_); + } else { + return newValuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public int getNewValuesCount() { + if (newValuesBuilder_ == null) { + return newValues_.size(); + } else { + return newValuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue getNewValues( + int index) { + if (newValuesBuilder_ == null) { + return newValues_.get(index); + } else { + return newValuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder setNewValues( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (newValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNewValuesIsMutable(); + newValues_.set(index, value); + onChanged(); + } else { + newValuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder setNewValues( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + newValues_.set(index, builderForValue.build()); + onChanged(); + } else { + newValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder addNewValues( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (newValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNewValuesIsMutable(); + newValues_.add(value); + onChanged(); + } else { + newValuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder addNewValues( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue value) { + if (newValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNewValuesIsMutable(); + newValues_.add(index, value); + onChanged(); + } else { + newValuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder addNewValues( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + newValues_.add(builderForValue.build()); + onChanged(); + } else { + newValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder addNewValues( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + builderForValue) { + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + newValues_.add(index, builderForValue.build()); + onChanged(); + } else { + newValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder addAllNewValues( + java.lang.Iterable< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue> + values) { + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, newValues_); + onChanged(); + } else { + newValuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder clearNewValues() { + if (newValuesBuilder_ == null) { + newValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + newValuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public Builder removeNewValues(int index) { + if (newValuesBuilder_ == null) { + ensureNewValuesIsMutable(); + newValues_.remove(index); + onChanged(); + } else { + newValuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + getNewValuesBuilder(int index) { + return internalGetNewValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder + getNewValuesOrBuilder(int index) { + if (newValuesBuilder_ == null) { + return newValues_.get(index); + } else { + return newValuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + getNewValuesOrBuilderList() { + if (newValuesBuilder_ != null) { + return newValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(newValues_); + } + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addNewValuesBuilder() { + return internalGetNewValuesFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder + addNewValuesBuilder(int index) { + return internalGetNewValuesFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue + .getDefaultInstance()); + } + + /** + * + * + *
    +         * Returns the new values after the change for the modified columns.
    +         * Always empty for
    +         * [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE].
    +         * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue new_values = 3; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder> + getNewValuesBuilderList() { + return internalGetNewValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder> + internalGetNewValuesFieldBuilder() { + if (newValuesBuilder_ == null) { + newValuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValue.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModValueOrBuilder>( + newValues_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + newValues_ = null; + } + return newValuesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod) + private static final com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Mod parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTimestamp_; + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + @java.lang.Override + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTimestamp() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + /** + * + * + *
    +     * Indicates the timestamp in which the change was committed.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     *
    +     * The record sequence number ordering across partitions is only meaningful
    +     * in the context of a specific transaction. Record sequence numbers are
    +     * unique across partitions for a specific transaction. Sort the
    +     * DataChangeRecords for the same
    +     * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +     * by
    +     * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +     * to reconstruct the ordering of the changes within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     *
    +     * The record sequence number ordering across partitions is only meaningful
    +     * in the context of a specific transaction. Record sequence numbers are
    +     * unique across partitions for a specific transaction. Sort the
    +     * DataChangeRecords for the same
    +     * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +     * by
    +     * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +     * to reconstruct the ordering of the changes within the transaction.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERVER_TRANSACTION_ID_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object serverTransactionId_ = ""; + + /** + * + * + *
    +     * Provides a globally unique string that represents the transaction in
    +     * which the change was committed. Multiple transactions can have the same
    +     * commit timestamp, but each transaction has a unique
    +     * server_transaction_id.
    +     * 
    + * + * string server_transaction_id = 3; + * + * @return The serverTransactionId. + */ + @java.lang.Override + public java.lang.String getServerTransactionId() { + java.lang.Object ref = serverTransactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serverTransactionId_ = s; + return s; + } + } + + /** + * + * + *
    +     * Provides a globally unique string that represents the transaction in
    +     * which the change was committed. Multiple transactions can have the same
    +     * commit timestamp, but each transaction has a unique
    +     * server_transaction_id.
    +     * 
    + * + * string server_transaction_id = 3; + * + * @return The bytes for serverTransactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getServerTransactionIdBytes() { + java.lang.Object ref = serverTransactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serverTransactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IS_LAST_RECORD_IN_TRANSACTION_IN_PARTITION_FIELD_NUMBER = 4; + private boolean isLastRecordInTransactionInPartition_ = false; + + /** + * + * + *
    +     * Indicates whether this is the last record for a transaction in the
    +     * current partition. Clients can use this field to determine when all
    +     * records for a transaction in the current partition have been received.
    +     * 
    + * + * bool is_last_record_in_transaction_in_partition = 4; + * + * @return The isLastRecordInTransactionInPartition. + */ + @java.lang.Override + public boolean getIsLastRecordInTransactionInPartition() { + return isLastRecordInTransactionInPartition_; + } + + public static final int TABLE_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +     * Name of the table affected by the change.
    +     * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMN_METADATA_FIELD_NUMBER = 6; + + @SuppressWarnings("serial") + private java.util.List + columnMetadata_; + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + @java.lang.Override + public java.util.List + getColumnMetadataList() { + return columnMetadata_; + } + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder> + getColumnMetadataOrBuilderList() { + return columnMetadata_; + } + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + @java.lang.Override + public int getColumnMetadataCount() { + return columnMetadata_.size(); + } + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + getColumnMetadata(int index) { + return columnMetadata_.get(index); + } + + /** + * + * + *
    +     * Provides metadata describing the columns associated with the
    +     * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +     * below.
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder + getColumnMetadataOrBuilder(int index) { + return columnMetadata_.get(index); + } + + public static final int MODS_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private java.util.List mods_; + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public java.util.List + getModsList() { + return mods_; + } + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder> + getModsOrBuilderList() { + return mods_; + } + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public int getModsCount() { + return mods_.size(); + } + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod getMods(int index) { + return mods_.get(index); + } + + /** + * + * + *
    +     * Describes the changes that were made.
    +     * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder getModsOrBuilder( + int index) { + return mods_.get(index); + } + + public static final int MOD_TYPE_FIELD_NUMBER = 8; + private int modType_ = 0; + + /** + * + * + *
    +     * Describes the type of change.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The enum numeric value on the wire for modType. + */ + @java.lang.Override + public int getModTypeValue() { + return modType_; + } + + /** + * + * + *
    +     * Describes the type of change.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The modType. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType getModType() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType result = + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.forNumber(modType_); + return result == null + ? com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.UNRECOGNIZED + : result; + } + + public static final int VALUE_CAPTURE_TYPE_FIELD_NUMBER = 9; + private int valueCaptureType_ = 0; + + /** + * + * + *
    +     * Describes the value capture type that was specified in the change stream
    +     * configuration when this change was captured.
    +     * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The enum numeric value on the wire for valueCaptureType. + */ + @java.lang.Override + public int getValueCaptureTypeValue() { + return valueCaptureType_; + } + + /** + * + * + *
    +     * Describes the value capture type that was specified in the change stream
    +     * configuration when this change was captured.
    +     * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The valueCaptureType. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + getValueCaptureType() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType result = + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType.forNumber( + valueCaptureType_); + return result == null + ? com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType.UNRECOGNIZED + : result; + } + + public static final int NUMBER_OF_RECORDS_IN_TRANSACTION_FIELD_NUMBER = 10; + private int numberOfRecordsInTransaction_ = 0; + + /** + * + * + *
    +     * Indicates the number of data change records that are part of this
    +     * transaction across all change stream partitions. This value can be used
    +     * to assemble all the records associated with a particular transaction.
    +     * 
    + * + * int32 number_of_records_in_transaction = 10; + * + * @return The numberOfRecordsInTransaction. + */ + @java.lang.Override + public int getNumberOfRecordsInTransaction() { + return numberOfRecordsInTransaction_; + } + + public static final int NUMBER_OF_PARTITIONS_IN_TRANSACTION_FIELD_NUMBER = 11; + private int numberOfPartitionsInTransaction_ = 0; + + /** + * + * + *
    +     * Indicates the number of partitions that return data change records for
    +     * this transaction. This value can be helpful in assembling all records
    +     * associated with a particular transaction.
    +     * 
    + * + * int32 number_of_partitions_in_transaction = 11; + * + * @return The numberOfPartitionsInTransaction. + */ + @java.lang.Override + public int getNumberOfPartitionsInTransaction() { + return numberOfPartitionsInTransaction_; + } + + public static final int TRANSACTION_TAG_FIELD_NUMBER = 12; + + @SuppressWarnings("serial") + private volatile java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +     * Indicates the transaction tag associated with this transaction.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + @java.lang.Override + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } + } + + /** + * + * + *
    +     * Indicates the transaction tag associated with this transaction.
    +     * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int IS_SYSTEM_TRANSACTION_FIELD_NUMBER = 13; + private boolean isSystemTransaction_ = false; + + /** + * + * + *
    +     * Indicates whether the transaction is a system transaction. System
    +     * transactions include those issued by time-to-live (TTL), column backfill,
    +     * etc.
    +     * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + @java.lang.Override + public boolean getIsSystemTransaction() { + return isSystemTransaction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serverTransactionId_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, serverTransactionId_); + } + if (isLastRecordInTransactionInPartition_ != false) { + output.writeBool(4, isLastRecordInTransactionInPartition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, table_); + } + for (int i = 0; i < columnMetadata_.size(); i++) { + output.writeMessage(6, columnMetadata_.get(i)); + } + for (int i = 0; i < mods_.size(); i++) { + output.writeMessage(7, mods_.get(i)); + } + if (modType_ + != com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.MOD_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(8, modType_); + } + if (valueCaptureType_ + != com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + .VALUE_CAPTURE_TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(9, valueCaptureType_); + } + if (numberOfRecordsInTransaction_ != 0) { + output.writeInt32(10, numberOfRecordsInTransaction_); + } + if (numberOfPartitionsInTransaction_ != 0) { + output.writeInt32(11, numberOfPartitionsInTransaction_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 12, transactionTag_); + } + if (isSystemTransaction_ != false) { + output.writeBool(13, isSystemTransaction_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serverTransactionId_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, serverTransactionId_); + } + if (isLastRecordInTransactionInPartition_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 4, isLastRecordInTransactionInPartition_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, table_); + } + for (int i = 0; i < columnMetadata_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, columnMetadata_.get(i)); + } + for (int i = 0; i < mods_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, mods_.get(i)); + } + if (modType_ + != com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.MOD_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(8, modType_); + } + if (valueCaptureType_ + != com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + .VALUE_CAPTURE_TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(9, valueCaptureType_); + } + if (numberOfRecordsInTransaction_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 10, numberOfRecordsInTransaction_); + } + if (numberOfPartitionsInTransaction_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 11, numberOfPartitionsInTransaction_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(12, transactionTag_); + } + if (isSystemTransaction_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, isSystemTransaction_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord other = + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) obj; + + if (hasCommitTimestamp() != other.hasCommitTimestamp()) return false; + if (hasCommitTimestamp()) { + if (!getCommitTimestamp().equals(other.getCommitTimestamp())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getServerTransactionId().equals(other.getServerTransactionId())) return false; + if (getIsLastRecordInTransactionInPartition() + != other.getIsLastRecordInTransactionInPartition()) return false; + if (!getTable().equals(other.getTable())) return false; + if (!getColumnMetadataList().equals(other.getColumnMetadataList())) return false; + if (!getModsList().equals(other.getModsList())) return false; + if (modType_ != other.modType_) return false; + if (valueCaptureType_ != other.valueCaptureType_) return false; + if (getNumberOfRecordsInTransaction() != other.getNumberOfRecordsInTransaction()) + return false; + if (getNumberOfPartitionsInTransaction() != other.getNumberOfPartitionsInTransaction()) + return false; + if (!getTransactionTag().equals(other.getTransactionTag())) return false; + if (getIsSystemTransaction() != other.getIsSystemTransaction()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTimestamp()) { + hash = (37 * hash) + COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestamp().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + hash = (37 * hash) + SERVER_TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getServerTransactionId().hashCode(); + hash = (37 * hash) + IS_LAST_RECORD_IN_TRANSACTION_IN_PARTITION_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashBoolean(getIsLastRecordInTransactionInPartition()); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (getColumnMetadataCount() > 0) { + hash = (37 * hash) + COLUMN_METADATA_FIELD_NUMBER; + hash = (53 * hash) + getColumnMetadataList().hashCode(); + } + if (getModsCount() > 0) { + hash = (37 * hash) + MODS_FIELD_NUMBER; + hash = (53 * hash) + getModsList().hashCode(); + } + hash = (37 * hash) + MOD_TYPE_FIELD_NUMBER; + hash = (53 * hash) + modType_; + hash = (37 * hash) + VALUE_CAPTURE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + valueCaptureType_; + hash = (37 * hash) + NUMBER_OF_RECORDS_IN_TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getNumberOfRecordsInTransaction(); + hash = (37 * hash) + NUMBER_OF_PARTITIONS_IN_TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getNumberOfPartitionsInTransaction(); + hash = (37 * hash) + TRANSACTION_TAG_FIELD_NUMBER; + hash = (53 * hash) + getTransactionTag().hashCode(); + hash = (37 * hash) + IS_SYSTEM_TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsSystemTransaction()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A data change record contains a set of changes to a table with the same
    +     * modification type (insert, update, or delete) committed at the same commit
    +     * timestamp in one change stream partition for the same transaction. Multiple
    +     * data change records can be returned for the same transaction across
    +     * multiple change stream partitions.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.DataChangeRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.DataChangeRecord) + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.class, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommitTimestampFieldBuilder(); + internalGetColumnMetadataFieldBuilder(); + internalGetModsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + recordSequence_ = ""; + serverTransactionId_ = ""; + isLastRecordInTransactionInPartition_ = false; + table_ = ""; + if (columnMetadataBuilder_ == null) { + columnMetadata_ = java.util.Collections.emptyList(); + } else { + columnMetadata_ = null; + columnMetadataBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + if (modsBuilder_ == null) { + mods_ = java.util.Collections.emptyList(); + } else { + mods_ = null; + modsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + modType_ = 0; + valueCaptureType_ = 0; + numberOfRecordsInTransaction_ = 0; + numberOfPartitionsInTransaction_ = 0; + transactionTag_ = ""; + isSystemTransaction_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_DataChangeRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord build() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord result = + new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord result) { + if (columnMetadataBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + columnMetadata_ = java.util.Collections.unmodifiableList(columnMetadata_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.columnMetadata_ = columnMetadata_; + } else { + result.columnMetadata_ = columnMetadataBuilder_.build(); + } + if (modsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0)) { + mods_ = java.util.Collections.unmodifiableList(mods_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.mods_ = mods_; + } else { + result.mods_ = modsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTimestamp_ = + commitTimestampBuilder_ == null ? commitTimestamp_ : commitTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.serverTransactionId_ = serverTransactionId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.isLastRecordInTransactionInPartition_ = isLastRecordInTransactionInPartition_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.modType_ = modType_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.valueCaptureType_ = valueCaptureType_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.numberOfRecordsInTransaction_ = numberOfRecordsInTransaction_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.numberOfPartitionsInTransaction_ = numberOfPartitionsInTransaction_; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.transactionTag_ = transactionTag_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.isSystemTransaction_ = isSystemTransaction_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord other) { + if (other == com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance()) + return this; + if (other.hasCommitTimestamp()) { + mergeCommitTimestamp(other.getCommitTimestamp()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getServerTransactionId().isEmpty()) { + serverTransactionId_ = other.serverTransactionId_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getIsLastRecordInTransactionInPartition() != false) { + setIsLastRecordInTransactionInPartition(other.getIsLastRecordInTransactionInPartition()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (columnMetadataBuilder_ == null) { + if (!other.columnMetadata_.isEmpty()) { + if (columnMetadata_.isEmpty()) { + columnMetadata_ = other.columnMetadata_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureColumnMetadataIsMutable(); + columnMetadata_.addAll(other.columnMetadata_); + } + onChanged(); + } + } else { + if (!other.columnMetadata_.isEmpty()) { + if (columnMetadataBuilder_.isEmpty()) { + columnMetadataBuilder_.dispose(); + columnMetadataBuilder_ = null; + columnMetadata_ = other.columnMetadata_; + bitField0_ = (bitField0_ & ~0x00000020); + columnMetadataBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetColumnMetadataFieldBuilder() + : null; + } else { + columnMetadataBuilder_.addAllMessages(other.columnMetadata_); + } + } + } + if (modsBuilder_ == null) { + if (!other.mods_.isEmpty()) { + if (mods_.isEmpty()) { + mods_ = other.mods_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureModsIsMutable(); + mods_.addAll(other.mods_); + } + onChanged(); + } + } else { + if (!other.mods_.isEmpty()) { + if (modsBuilder_.isEmpty()) { + modsBuilder_.dispose(); + modsBuilder_ = null; + mods_ = other.mods_; + bitField0_ = (bitField0_ & ~0x00000040); + modsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetModsFieldBuilder() + : null; + } else { + modsBuilder_.addAllMessages(other.mods_); + } + } + } + if (other.modType_ != 0) { + setModTypeValue(other.getModTypeValue()); + } + if (other.valueCaptureType_ != 0) { + setValueCaptureTypeValue(other.getValueCaptureTypeValue()); + } + if (other.getNumberOfRecordsInTransaction() != 0) { + setNumberOfRecordsInTransaction(other.getNumberOfRecordsInTransaction()); + } + if (other.getNumberOfPartitionsInTransaction() != 0) { + setNumberOfPartitionsInTransaction(other.getNumberOfPartitionsInTransaction()); + } + if (!other.getTransactionTag().isEmpty()) { + transactionTag_ = other.transactionTag_; + bitField0_ |= 0x00000800; + onChanged(); + } + if (other.getIsSystemTransaction() != false) { + setIsSystemTransaction(other.getIsSystemTransaction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommitTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + serverTransactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + isLastRecordInTransactionInPartition_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + .parser(), + extensionRegistry); + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + columnMetadata_.add(m); + } else { + columnMetadataBuilder_.addMessage(m); + } + break; + } // case 50 + case 58: + { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.parser(), + extensionRegistry); + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(m); + } else { + modsBuilder_.addMessage(m); + } + break; + } // case 58 + case 64: + { + modType_ = input.readEnum(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 72: + { + valueCaptureType_ = input.readEnum(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 80: + { + numberOfRecordsInTransaction_ = input.readInt32(); + bitField0_ |= 0x00000200; + break; + } // case 80 + case 88: + { + numberOfPartitionsInTransaction_ = input.readInt32(); + bitField0_ |= 0x00000400; + break; + } // case 88 + case 98: + { + transactionTag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000800; + break; + } // case 98 + case 104: + { + isSystemTransaction_ = input.readBool(); + bitField0_ |= 0x00001000; + break; + } // case 104 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampBuilder_; + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + public com.google.protobuf.Timestamp getCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } else { + return commitTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTimestamp_ = value; + } else { + commitTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = builderForValue.build(); + } else { + commitTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder mergeCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTimestamp_ != null + && commitTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimestampBuilder().mergeFrom(value); + } else { + commitTimestamp_ = value; + } + } else { + commitTimestampBuilder_.mergeFrom(value); + } + if (commitTimestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder clearCommitTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommitTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + if (commitTimestampBuilder_ != null) { + return commitTimestampBuilder_.getMessageOrBuilder(); + } else { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + } + + /** + * + * + *
    +       * Indicates the timestamp in which the change was committed.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimestampFieldBuilder() { + if (commitTimestampBuilder_ == null) { + commitTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTimestamp(), getParentForChildren(), isClean()); + commitTimestamp_ = null; + } + return commitTimestampBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       *
    +       * The record sequence number ordering across partitions is only meaningful
    +       * in the context of a specific transaction. Record sequence numbers are
    +       * unique across partitions for a specific transaction. Sort the
    +       * DataChangeRecords for the same
    +       * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +       * by
    +       * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +       * to reconstruct the ordering of the changes within the transaction.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       *
    +       * The record sequence number ordering across partitions is only meaningful
    +       * in the context of a specific transaction. Record sequence numbers are
    +       * unique across partitions for a specific transaction. Sort the
    +       * DataChangeRecords for the same
    +       * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +       * by
    +       * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +       * to reconstruct the ordering of the changes within the transaction.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       *
    +       * The record sequence number ordering across partitions is only meaningful
    +       * in the context of a specific transaction. Record sequence numbers are
    +       * unique across partitions for a specific transaction. Sort the
    +       * DataChangeRecords for the same
    +       * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +       * by
    +       * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +       * to reconstruct the ordering of the changes within the transaction.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       *
    +       * The record sequence number ordering across partitions is only meaningful
    +       * in the context of a specific transaction. Record sequence numbers are
    +       * unique across partitions for a specific transaction. Sort the
    +       * DataChangeRecords for the same
    +       * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +       * by
    +       * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +       * to reconstruct the ordering of the changes within the transaction.
    +       * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       *
    +       * The record sequence number ordering across partitions is only meaningful
    +       * in the context of a specific transaction. Record sequence numbers are
    +       * unique across partitions for a specific transaction. Sort the
    +       * DataChangeRecords for the same
    +       * [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id]
    +       * by
    +       * [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence]
    +       * to reconstruct the ordering of the changes within the transaction.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object serverTransactionId_ = ""; + + /** + * + * + *
    +       * Provides a globally unique string that represents the transaction in
    +       * which the change was committed. Multiple transactions can have the same
    +       * commit timestamp, but each transaction has a unique
    +       * server_transaction_id.
    +       * 
    + * + * string server_transaction_id = 3; + * + * @return The serverTransactionId. + */ + public java.lang.String getServerTransactionId() { + java.lang.Object ref = serverTransactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serverTransactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Provides a globally unique string that represents the transaction in
    +       * which the change was committed. Multiple transactions can have the same
    +       * commit timestamp, but each transaction has a unique
    +       * server_transaction_id.
    +       * 
    + * + * string server_transaction_id = 3; + * + * @return The bytes for serverTransactionId. + */ + public com.google.protobuf.ByteString getServerTransactionIdBytes() { + java.lang.Object ref = serverTransactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serverTransactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Provides a globally unique string that represents the transaction in
    +       * which the change was committed. Multiple transactions can have the same
    +       * commit timestamp, but each transaction has a unique
    +       * server_transaction_id.
    +       * 
    + * + * string server_transaction_id = 3; + * + * @param value The serverTransactionId to set. + * @return This builder for chaining. + */ + public Builder setServerTransactionId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serverTransactionId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Provides a globally unique string that represents the transaction in
    +       * which the change was committed. Multiple transactions can have the same
    +       * commit timestamp, but each transaction has a unique
    +       * server_transaction_id.
    +       * 
    + * + * string server_transaction_id = 3; + * + * @return This builder for chaining. + */ + public Builder clearServerTransactionId() { + serverTransactionId_ = getDefaultInstance().getServerTransactionId(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Provides a globally unique string that represents the transaction in
    +       * which the change was committed. Multiple transactions can have the same
    +       * commit timestamp, but each transaction has a unique
    +       * server_transaction_id.
    +       * 
    + * + * string server_transaction_id = 3; + * + * @param value The bytes for serverTransactionId to set. + * @return This builder for chaining. + */ + public Builder setServerTransactionIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serverTransactionId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private boolean isLastRecordInTransactionInPartition_; + + /** + * + * + *
    +       * Indicates whether this is the last record for a transaction in the
    +       * current partition. Clients can use this field to determine when all
    +       * records for a transaction in the current partition have been received.
    +       * 
    + * + * bool is_last_record_in_transaction_in_partition = 4; + * + * @return The isLastRecordInTransactionInPartition. + */ + @java.lang.Override + public boolean getIsLastRecordInTransactionInPartition() { + return isLastRecordInTransactionInPartition_; + } + + /** + * + * + *
    +       * Indicates whether this is the last record for a transaction in the
    +       * current partition. Clients can use this field to determine when all
    +       * records for a transaction in the current partition have been received.
    +       * 
    + * + * bool is_last_record_in_transaction_in_partition = 4; + * + * @param value The isLastRecordInTransactionInPartition to set. + * @return This builder for chaining. + */ + public Builder setIsLastRecordInTransactionInPartition(boolean value) { + + isLastRecordInTransactionInPartition_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates whether this is the last record for a transaction in the
    +       * current partition. Clients can use this field to determine when all
    +       * records for a transaction in the current partition have been received.
    +       * 
    + * + * bool is_last_record_in_transaction_in_partition = 4; + * + * @return This builder for chaining. + */ + public Builder clearIsLastRecordInTransactionInPartition() { + bitField0_ = (bitField0_ & ~0x00000008); + isLastRecordInTransactionInPartition_ = false; + onChanged(); + return this; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +       * Name of the table affected by the change.
    +       * 
    + * + * string table = 5; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Name of the table affected by the change.
    +       * 
    + * + * string table = 5; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Name of the table affected by the change.
    +       * 
    + * + * string table = 5; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Name of the table affected by the change.
    +       * 
    + * + * string table = 5; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Name of the table affected by the change.
    +       * 
    + * + * string table = 5; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata> + columnMetadata_ = java.util.Collections.emptyList(); + + private void ensureColumnMetadataIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + columnMetadata_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata>( + columnMetadata_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder> + columnMetadataBuilder_; + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata> + getColumnMetadataList() { + if (columnMetadataBuilder_ == null) { + return java.util.Collections.unmodifiableList(columnMetadata_); + } else { + return columnMetadataBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public int getColumnMetadataCount() { + if (columnMetadataBuilder_ == null) { + return columnMetadata_.size(); + } else { + return columnMetadataBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + getColumnMetadata(int index) { + if (columnMetadataBuilder_ == null) { + return columnMetadata_.get(index); + } else { + return columnMetadataBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder setColumnMetadata( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata value) { + if (columnMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnMetadataIsMutable(); + columnMetadata_.set(index, value); + onChanged(); + } else { + columnMetadataBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder setColumnMetadata( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + builderForValue) { + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + columnMetadata_.set(index, builderForValue.build()); + onChanged(); + } else { + columnMetadataBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder addColumnMetadata( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata value) { + if (columnMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnMetadataIsMutable(); + columnMetadata_.add(value); + onChanged(); + } else { + columnMetadataBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder addColumnMetadata( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata value) { + if (columnMetadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnMetadataIsMutable(); + columnMetadata_.add(index, value); + onChanged(); + } else { + columnMetadataBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder addColumnMetadata( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + builderForValue) { + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + columnMetadata_.add(builderForValue.build()); + onChanged(); + } else { + columnMetadataBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder addColumnMetadata( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + builderForValue) { + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + columnMetadata_.add(index, builderForValue.build()); + onChanged(); + } else { + columnMetadataBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder addAllColumnMetadata( + java.lang.Iterable< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata> + values) { + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columnMetadata_); + onChanged(); + } else { + columnMetadataBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder clearColumnMetadata() { + if (columnMetadataBuilder_ == null) { + columnMetadata_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + columnMetadataBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public Builder removeColumnMetadata(int index) { + if (columnMetadataBuilder_ == null) { + ensureColumnMetadataIsMutable(); + columnMetadata_.remove(index); + onChanged(); + } else { + columnMetadataBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + getColumnMetadataBuilder(int index) { + return internalGetColumnMetadataFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder + getColumnMetadataOrBuilder(int index) { + if (columnMetadataBuilder_ == null) { + return columnMetadata_.get(index); + } else { + return columnMetadataBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder> + getColumnMetadataOrBuilderList() { + if (columnMetadataBuilder_ != null) { + return columnMetadataBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(columnMetadata_); + } + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + addColumnMetadataBuilder() { + return internalGetColumnMetadataFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder + addColumnMetadataBuilder(int index) { + return internalGetColumnMetadataFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Provides metadata describing the columns associated with the
    +       * [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed
    +       * below.
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata column_metadata = 6; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder> + getColumnMetadataBuilderList() { + return internalGetColumnMetadataFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadataOrBuilder> + internalGetColumnMetadataFieldBuilder() { + if (columnMetadataBuilder_ == null) { + columnMetadataBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ColumnMetadata.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord + .ColumnMetadataOrBuilder>( + columnMetadata_, + ((bitField0_ & 0x00000020) != 0), + getParentForChildren(), + isClean()); + columnMetadata_ = null; + } + return columnMetadataBuilder_; + } + + private java.util.List mods_ = + java.util.Collections.emptyList(); + + private void ensureModsIsMutable() { + if (!((bitField0_ & 0x00000040) != 0)) { + mods_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod>(mods_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder> + modsBuilder_; + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public java.util.List + getModsList() { + if (modsBuilder_ == null) { + return java.util.Collections.unmodifiableList(mods_); + } else { + return modsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public int getModsCount() { + if (modsBuilder_ == null) { + return mods_.size(); + } else { + return modsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod getMods(int index) { + if (modsBuilder_ == null) { + return mods_.get(index); + } else { + return modsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder setMods( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.set(index, value); + onChanged(); + } else { + modsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder setMods( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.set(index, builderForValue.build()); + onChanged(); + } else { + modsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods(com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.add(value); + onChanged(); + } else { + modsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods( + int index, com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod value) { + if (modsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureModsIsMutable(); + mods_.add(index, value); + onChanged(); + } else { + modsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(builderForValue.build()); + onChanged(); + } else { + modsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder addMods( + int index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder builderForValue) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.add(index, builderForValue.build()); + onChanged(); + } else { + modsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder addAllMods( + java.lang.Iterable< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod> + values) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mods_); + onChanged(); + } else { + modsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder clearMods() { + if (modsBuilder_ == null) { + mods_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + modsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public Builder removeMods(int index) { + if (modsBuilder_ == null) { + ensureModsIsMutable(); + mods_.remove(index); + onChanged(); + } else { + modsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder getModsBuilder( + int index) { + return internalGetModsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder + getModsOrBuilder(int index) { + if (modsBuilder_ == null) { + return mods_.get(index); + } else { + return modsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public java.util.List< + ? extends com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder> + getModsOrBuilderList() { + if (modsBuilder_ != null) { + return modsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mods_); + } + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder + addModsBuilder() { + return internalGetModsFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder addModsBuilder( + int index) { + return internalGetModsFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.getDefaultInstance()); + } + + /** + * + * + *
    +       * Describes the changes that were made.
    +       * 
    + * + * repeated .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod mods = 7; + */ + public java.util.List + getModsBuilderList() { + return internalGetModsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder> + internalGetModsFieldBuilder() { + if (modsBuilder_ == null) { + modsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModOrBuilder>( + mods_, ((bitField0_ & 0x00000040) != 0), getParentForChildren(), isClean()); + mods_ = null; + } + return modsBuilder_; + } + + private int modType_ = 0; + + /** + * + * + *
    +       * Describes the type of change.
    +       * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The enum numeric value on the wire for modType. + */ + @java.lang.Override + public int getModTypeValue() { + return modType_; + } + + /** + * + * + *
    +       * Describes the type of change.
    +       * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @param value The enum numeric value on the wire for modType to set. + * @return This builder for chaining. + */ + public Builder setModTypeValue(int value) { + modType_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Describes the type of change.
    +       * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return The modType. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType getModType() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType result = + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.forNumber(modType_); + return result == null + ? com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +       * Describes the type of change.
    +       * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @param value The modType to set. + * @return This builder for chaining. + */ + public Builder setModType( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + modType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Describes the type of change.
    +       * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType mod_type = 8; + * + * @return This builder for chaining. + */ + public Builder clearModType() { + bitField0_ = (bitField0_ & ~0x00000080); + modType_ = 0; + onChanged(); + return this; + } + + private int valueCaptureType_ = 0; + + /** + * + * + *
    +       * Describes the value capture type that was specified in the change stream
    +       * configuration when this change was captured.
    +       * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The enum numeric value on the wire for valueCaptureType. + */ + @java.lang.Override + public int getValueCaptureTypeValue() { + return valueCaptureType_; + } + + /** + * + * + *
    +       * Describes the value capture type that was specified in the change stream
    +       * configuration when this change was captured.
    +       * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @param value The enum numeric value on the wire for valueCaptureType to set. + * @return This builder for chaining. + */ + public Builder setValueCaptureTypeValue(int value) { + valueCaptureType_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Describes the value capture type that was specified in the change stream
    +       * configuration when this change was captured.
    +       * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return The valueCaptureType. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + getValueCaptureType() { + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType result = + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType.forNumber( + valueCaptureType_); + return result == null + ? com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType + .UNRECOGNIZED + : result; + } + + /** + * + * + *
    +       * Describes the value capture type that was specified in the change stream
    +       * configuration when this change was captured.
    +       * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @param value The valueCaptureType to set. + * @return This builder for chaining. + */ + public Builder setValueCaptureType( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000100; + valueCaptureType_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Describes the value capture type that was specified in the change stream
    +       * configuration when this change was captured.
    +       * 
    + * + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType value_capture_type = 9; + * + * + * @return This builder for chaining. + */ + public Builder clearValueCaptureType() { + bitField0_ = (bitField0_ & ~0x00000100); + valueCaptureType_ = 0; + onChanged(); + return this; + } + + private int numberOfRecordsInTransaction_; + + /** + * + * + *
    +       * Indicates the number of data change records that are part of this
    +       * transaction across all change stream partitions. This value can be used
    +       * to assemble all the records associated with a particular transaction.
    +       * 
    + * + * int32 number_of_records_in_transaction = 10; + * + * @return The numberOfRecordsInTransaction. + */ + @java.lang.Override + public int getNumberOfRecordsInTransaction() { + return numberOfRecordsInTransaction_; + } + + /** + * + * + *
    +       * Indicates the number of data change records that are part of this
    +       * transaction across all change stream partitions. This value can be used
    +       * to assemble all the records associated with a particular transaction.
    +       * 
    + * + * int32 number_of_records_in_transaction = 10; + * + * @param value The numberOfRecordsInTransaction to set. + * @return This builder for chaining. + */ + public Builder setNumberOfRecordsInTransaction(int value) { + + numberOfRecordsInTransaction_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the number of data change records that are part of this
    +       * transaction across all change stream partitions. This value can be used
    +       * to assemble all the records associated with a particular transaction.
    +       * 
    + * + * int32 number_of_records_in_transaction = 10; + * + * @return This builder for chaining. + */ + public Builder clearNumberOfRecordsInTransaction() { + bitField0_ = (bitField0_ & ~0x00000200); + numberOfRecordsInTransaction_ = 0; + onChanged(); + return this; + } + + private int numberOfPartitionsInTransaction_; + + /** + * + * + *
    +       * Indicates the number of partitions that return data change records for
    +       * this transaction. This value can be helpful in assembling all records
    +       * associated with a particular transaction.
    +       * 
    + * + * int32 number_of_partitions_in_transaction = 11; + * + * @return The numberOfPartitionsInTransaction. + */ + @java.lang.Override + public int getNumberOfPartitionsInTransaction() { + return numberOfPartitionsInTransaction_; + } + + /** + * + * + *
    +       * Indicates the number of partitions that return data change records for
    +       * this transaction. This value can be helpful in assembling all records
    +       * associated with a particular transaction.
    +       * 
    + * + * int32 number_of_partitions_in_transaction = 11; + * + * @param value The numberOfPartitionsInTransaction to set. + * @return This builder for chaining. + */ + public Builder setNumberOfPartitionsInTransaction(int value) { + + numberOfPartitionsInTransaction_ = value; + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the number of partitions that return data change records for
    +       * this transaction. This value can be helpful in assembling all records
    +       * associated with a particular transaction.
    +       * 
    + * + * int32 number_of_partitions_in_transaction = 11; + * + * @return This builder for chaining. + */ + public Builder clearNumberOfPartitionsInTransaction() { + bitField0_ = (bitField0_ & ~0x00000400); + numberOfPartitionsInTransaction_ = 0; + onChanged(); + return this; + } + + private java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +       * Indicates the transaction tag associated with this transaction.
    +       * 
    + * + * string transaction_tag = 12; + * + * @return The transactionTag. + */ + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Indicates the transaction tag associated with this transaction.
    +       * 
    + * + * string transaction_tag = 12; + * + * @return The bytes for transactionTag. + */ + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Indicates the transaction tag associated with this transaction.
    +       * 
    + * + * string transaction_tag = 12; + * + * @param value The transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + transactionTag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the transaction tag associated with this transaction.
    +       * 
    + * + * string transaction_tag = 12; + * + * @return This builder for chaining. + */ + public Builder clearTransactionTag() { + transactionTag_ = getDefaultInstance().getTransactionTag(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the transaction tag associated with this transaction.
    +       * 
    + * + * string transaction_tag = 12; + * + * @param value The bytes for transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + transactionTag_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + private boolean isSystemTransaction_; + + /** + * + * + *
    +       * Indicates whether the transaction is a system transaction. System
    +       * transactions include those issued by time-to-live (TTL), column backfill,
    +       * etc.
    +       * 
    + * + * bool is_system_transaction = 13; + * + * @return The isSystemTransaction. + */ + @java.lang.Override + public boolean getIsSystemTransaction() { + return isSystemTransaction_; + } + + /** + * + * + *
    +       * Indicates whether the transaction is a system transaction. System
    +       * transactions include those issued by time-to-live (TTL), column backfill,
    +       * etc.
    +       * 
    + * + * bool is_system_transaction = 13; + * + * @param value The isSystemTransaction to set. + * @return This builder for chaining. + */ + public Builder setIsSystemTransaction(boolean value) { + + isSystemTransaction_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates whether the transaction is a system transaction. System
    +       * transactions include those issued by time-to-live (TTL), column backfill,
    +       * etc.
    +       * 
    + * + * bool is_system_transaction = 13; + * + * @return This builder for chaining. + */ + public Builder clearIsSystemTransaction() { + bitField0_ = (bitField0_ & ~0x00001000); + isSystemTransaction_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.DataChangeRecord) + private static final com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DataChangeRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface HeartbeatRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return Whether the timestamp field is set. + */ + boolean hasTimestamp(); + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return The timestamp. + */ + com.google.protobuf.Timestamp getTimestamp(); + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getTimestampOrBuilder(); + } + + /** + * + * + *
    +   * A heartbeat record is returned as a progress indicator, when there are no
    +   * data changes or any other partition record types in the change stream
    +   * partition.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.HeartbeatRecord} + */ + public static final class HeartbeatRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) + HeartbeatRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "HeartbeatRecord"); + } + + // Use HeartbeatRecord.newBuilder() to construct. + private HeartbeatRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private HeartbeatRecord() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.class, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder.class); + } + + private int bitField0_; + public static final int TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp timestamp_; + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return Whether the timestamp field is set. + */ + @java.lang.Override + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return The timestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getTimestamp() { + return timestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestamp_; + } + + /** + * + * + *
    +     * Indicates the timestamp at which the query has returned all the records
    +     * in the change stream partition with timestamp <= heartbeat timestamp.
    +     * The heartbeat timestamp will not be the same as the timestamps of other
    +     * record types in the same partition.
    +     * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getTimestampOrBuilder() { + return timestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestamp_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getTimestamp()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getTimestamp()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord other = + (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) obj; + + if (hasTimestamp() != other.hasTimestamp()) return false; + if (hasTimestamp()) { + if (!getTimestamp().equals(other.getTimestamp())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getTimestamp().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A heartbeat record is returned as a progress indicator, when there are no
    +     * data changes or any other partition record types in the change stream
    +     * partition.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.HeartbeatRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.class, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTimestampFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + timestamp_ = null; + if (timestampBuilder_ != null) { + timestampBuilder_.dispose(); + timestampBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_HeartbeatRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord build() { + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord result = + new com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.timestamp_ = timestampBuilder_ == null ? timestamp_ : timestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord other) { + if (other == com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance()) + return this; + if (other.hasTimestamp()) { + mergeTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp timestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + timestampBuilder_; + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return Whether the timestamp field is set. + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + * + * @return The timestamp. + */ + public com.google.protobuf.Timestamp getTimestamp() { + if (timestampBuilder_ == null) { + return timestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : timestamp_; + } else { + return timestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public Builder setTimestamp(com.google.protobuf.Timestamp value) { + if (timestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestamp_ = value; + } else { + timestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public Builder setTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (timestampBuilder_ == null) { + timestamp_ = builderForValue.build(); + } else { + timestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public Builder mergeTimestamp(com.google.protobuf.Timestamp value) { + if (timestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && timestamp_ != null + && timestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getTimestampBuilder().mergeFrom(value); + } else { + timestamp_ = value; + } + } else { + timestampBuilder_.mergeFrom(value); + } + if (timestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = null; + if (timestampBuilder_ != null) { + timestampBuilder_.dispose(); + timestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getTimestampOrBuilder() { + if (timestampBuilder_ != null) { + return timestampBuilder_.getMessageOrBuilder(); + } else { + return timestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : timestamp_; + } + } + + /** + * + * + *
    +       * Indicates the timestamp at which the query has returned all the records
    +       * in the change stream partition with timestamp <= heartbeat timestamp.
    +       * The heartbeat timestamp will not be the same as the timestamps of other
    +       * record types in the same partition.
    +       * 
    + * + * .google.protobuf.Timestamp timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetTimestampFieldBuilder() { + if (timestampBuilder_ == null) { + timestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getTimestamp(), getParentForChildren(), isClean()); + timestamp_ = null; + } + return timestampBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) + private static final com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public HeartbeatRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface PartitionStartRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return Whether the startTimestamp field is set. + */ + boolean hasStartTimestamp(); + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return The startTimestamp. + */ + com.google.protobuf.Timestamp getStartTimestamp(); + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getStartTimestampOrBuilder(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @return A list containing the partitionTokens. + */ + java.util.List getPartitionTokensList(); + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @return The count of partitionTokens. + */ + int getPartitionTokensCount(); + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the element to return. + * @return The partitionTokens at the given index. + */ + java.lang.String getPartitionTokens(int index); + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the value to return. + * @return The bytes of the partitionTokens at the given index. + */ + com.google.protobuf.ByteString getPartitionTokensBytes(int index); + } + + /** + * + * + *
    +   * A partition start record serves as a notification that the client should
    +   * schedule the partitions to be queried. PartitionStartRecord returns
    +   * information about one or more partitions.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionStartRecord} + */ + public static final class PartitionStartRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) + PartitionStartRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionStartRecord"); + } + + // Use PartitionStartRecord.newBuilder() to construct. + private PartitionStartRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionStartRecord() { + recordSequence_ = ""; + partitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder.class); + } + + private int bitField0_; + public static final int START_TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp startTimestamp_; + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return Whether the startTimestamp field is set. + */ + @java.lang.Override + public boolean hasStartTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return The startTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getStartTimestamp() { + return startTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : startTimestamp_; + } + + /** + * + * + *
    +     * Start timestamp at which the partitions should be queried to return
    +     * change stream records with timestamps >= start_timestamp.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getStartTimestampOrBuilder() { + return startTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : startTimestamp_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_TOKENS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList partitionTokens_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @return A list containing the partitionTokens. + */ + public com.google.protobuf.ProtocolStringList getPartitionTokensList() { + return partitionTokens_; + } + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @return The count of partitionTokens. + */ + public int getPartitionTokensCount() { + return partitionTokens_.size(); + } + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the element to return. + * @return The partitionTokens at the given index. + */ + public java.lang.String getPartitionTokens(int index) { + return partitionTokens_.get(index); + } + + /** + * + * + *
    +     * Unique partition identifiers to be used in queries.
    +     * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the value to return. + * @return The bytes of the partitionTokens at the given index. + */ + public com.google.protobuf.ByteString getPartitionTokensBytes(int index) { + return partitionTokens_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getStartTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + for (int i = 0; i < partitionTokens_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, partitionTokens_.getRaw(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStartTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + { + int dataSize = 0; + for (int i = 0; i < partitionTokens_.size(); i++) { + dataSize += computeStringSizeNoTag(partitionTokens_.getRaw(i)); + } + size += dataSize; + size += 1 * getPartitionTokensList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord other = + (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) obj; + + if (hasStartTimestamp() != other.hasStartTimestamp()) return false; + if (hasStartTimestamp()) { + if (!getStartTimestamp().equals(other.getStartTimestamp())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getPartitionTokensList().equals(other.getPartitionTokensList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStartTimestamp()) { + hash = (37 * hash) + START_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getStartTimestamp().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + if (getPartitionTokensCount() > 0) { + hash = (37 * hash) + PARTITION_TOKENS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionTokensList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A partition start record serves as a notification that the client should
    +     * schedule the partitions to be queried. PartitionStartRecord returns
    +     * information about one or more partitions.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionStartRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetStartTimestampFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + startTimestamp_ = null; + if (startTimestampBuilder_ != null) { + startTimestampBuilder_.dispose(); + startTimestampBuilder_ = null; + } + recordSequence_ = ""; + partitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionStartRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord build() { + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord result = + new com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.startTimestamp_ = + startTimestampBuilder_ == null ? startTimestamp_ : startTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + partitionTokens_.makeImmutable(); + result.partitionTokens_ = partitionTokens_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance()) + return this; + if (other.hasStartTimestamp()) { + mergeStartTimestamp(other.getStartTimestamp()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.partitionTokens_.isEmpty()) { + if (partitionTokens_.isEmpty()) { + partitionTokens_ = other.partitionTokens_; + bitField0_ |= 0x00000004; + } else { + ensurePartitionTokensIsMutable(); + partitionTokens_.addAll(other.partitionTokens_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetStartTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensurePartitionTokensIsMutable(); + partitionTokens_.add(s); + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp startTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimestampBuilder_; + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return Whether the startTimestamp field is set. + */ + public boolean hasStartTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + * + * @return The startTimestamp. + */ + public com.google.protobuf.Timestamp getStartTimestamp() { + if (startTimestampBuilder_ == null) { + return startTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : startTimestamp_; + } else { + return startTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public Builder setStartTimestamp(com.google.protobuf.Timestamp value) { + if (startTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startTimestamp_ = value; + } else { + startTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public Builder setStartTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimestampBuilder_ == null) { + startTimestamp_ = builderForValue.build(); + } else { + startTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public Builder mergeStartTimestamp(com.google.protobuf.Timestamp value) { + if (startTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && startTimestamp_ != null + && startTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getStartTimestampBuilder().mergeFrom(value); + } else { + startTimestamp_ = value; + } + } else { + startTimestampBuilder_.mergeFrom(value); + } + if (startTimestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public Builder clearStartTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + startTimestamp_ = null; + if (startTimestampBuilder_ != null) { + startTimestampBuilder_.dispose(); + startTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getStartTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetStartTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getStartTimestampOrBuilder() { + if (startTimestampBuilder_ != null) { + return startTimestampBuilder_.getMessageOrBuilder(); + } else { + return startTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : startTimestamp_; + } + } + + /** + * + * + *
    +       * Start timestamp at which the partitions should be queried to return
    +       * change stream records with timestamps >= start_timestamp.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp start_timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetStartTimestampFieldBuilder() { + if (startTimestampBuilder_ == null) { + startTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getStartTimestamp(), getParentForChildren(), isClean()); + startTimestamp_ = null; + } + return startTimestampBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList partitionTokens_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensurePartitionTokensIsMutable() { + if (!partitionTokens_.isModifiable()) { + partitionTokens_ = new com.google.protobuf.LazyStringArrayList(partitionTokens_); + } + bitField0_ |= 0x00000004; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @return A list containing the partitionTokens. + */ + public com.google.protobuf.ProtocolStringList getPartitionTokensList() { + partitionTokens_.makeImmutable(); + return partitionTokens_; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @return The count of partitionTokens. + */ + public int getPartitionTokensCount() { + return partitionTokens_.size(); + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the element to return. + * @return The partitionTokens at the given index. + */ + public java.lang.String getPartitionTokens(int index) { + return partitionTokens_.get(index); + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index of the value to return. + * @return The bytes of the partitionTokens at the given index. + */ + public com.google.protobuf.ByteString getPartitionTokensBytes(int index) { + return partitionTokens_.getByteString(index); + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param index The index to set the value at. + * @param value The partitionTokens to set. + * @return This builder for chaining. + */ + public Builder setPartitionTokens(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionTokensIsMutable(); + partitionTokens_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param value The partitionTokens to add. + * @return This builder for chaining. + */ + public Builder addPartitionTokens(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionTokensIsMutable(); + partitionTokens_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param values The partitionTokens to add. + * @return This builder for chaining. + */ + public Builder addAllPartitionTokens(java.lang.Iterable values) { + ensurePartitionTokensIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionTokens_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @return This builder for chaining. + */ + public Builder clearPartitionTokens() { + partitionTokens_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifiers to be used in queries.
    +       * 
    + * + * repeated string partition_tokens = 3; + * + * @param value The bytes of the partitionTokens to add. + * @return This builder for chaining. + */ + public Builder addPartitionTokensBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensurePartitionTokensIsMutable(); + partitionTokens_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) + private static final com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionStartRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface PartitionEndRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return Whether the endTimestamp field is set. + */ + boolean hasEndTimestamp(); + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return The endTimestamp. + */ + com.google.protobuf.Timestamp getEndTimestamp(); + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getEndTimestampOrBuilder(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +     * Unique partition identifier describing the terminated change stream
    +     * partition.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEndRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + java.lang.String getPartitionToken(); + + /** + * + * + *
    +     * Unique partition identifier describing the terminated change stream
    +     * partition.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEndRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + com.google.protobuf.ByteString getPartitionTokenBytes(); + } + + /** + * + * + *
    +   * A partition end record serves as a notification that the client should stop
    +   * reading the partition. No further records are expected to be retrieved on
    +   * it.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEndRecord} + */ + public static final class PartitionEndRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) + PartitionEndRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionEndRecord"); + } + + // Use PartitionEndRecord.newBuilder() to construct. + private PartitionEndRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionEndRecord() { + recordSequence_ = ""; + partitionToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder.class); + } + + private int bitField0_; + public static final int END_TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp endTimestamp_; + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return Whether the endTimestamp field is set. + */ + @java.lang.Override + public boolean hasEndTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return The endTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getEndTimestamp() { + return endTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : endTimestamp_; + } + + /** + * + * + *
    +     * End timestamp at which the change stream partition is terminated. All
    +     * changes generated by this partition will have timestamps <=
    +     * end_timestamp. DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition. PartitionEndRecord is the last record returned for a
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getEndTimestampOrBuilder() { + return endTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : endTimestamp_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +     * Unique partition identifier describing the terminated change stream
    +     * partition.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEndRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + @java.lang.Override + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } + } + + /** + * + * + *
    +     * Unique partition identifier describing the terminated change stream
    +     * partition.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEndRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getEndTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(partitionToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, partitionToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getEndTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(partitionToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, partitionToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord other = + (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) obj; + + if (hasEndTimestamp() != other.hasEndTimestamp()) return false; + if (hasEndTimestamp()) { + if (!getEndTimestamp().equals(other.getEndTimestamp())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasEndTimestamp()) { + hash = (37 * hash) + END_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getEndTimestamp().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A partition end record serves as a notification that the client should stop
    +     * reading the partition. No further records are expected to be retrieved on
    +     * it.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEndRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetEndTimestampFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + endTimestamp_ = null; + if (endTimestampBuilder_ != null) { + endTimestampBuilder_.dispose(); + endTimestampBuilder_ = null; + } + recordSequence_ = ""; + partitionToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEndRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord build() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord result = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.endTimestamp_ = + endTimestampBuilder_ == null ? endTimestamp_ : endTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.partitionToken_ = partitionToken_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance()) + return this; + if (other.hasEndTimestamp()) { + mergeEndTimestamp(other.getEndTimestamp()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getPartitionToken().isEmpty()) { + partitionToken_ = other.partitionToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetEndTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + partitionToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp endTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimestampBuilder_; + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return Whether the endTimestamp field is set. + */ + public boolean hasEndTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + * + * @return The endTimestamp. + */ + public com.google.protobuf.Timestamp getEndTimestamp() { + if (endTimestampBuilder_ == null) { + return endTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : endTimestamp_; + } else { + return endTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public Builder setEndTimestamp(com.google.protobuf.Timestamp value) { + if (endTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTimestamp_ = value; + } else { + endTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public Builder setEndTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimestampBuilder_ == null) { + endTimestamp_ = builderForValue.build(); + } else { + endTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public Builder mergeEndTimestamp(com.google.protobuf.Timestamp value) { + if (endTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && endTimestamp_ != null + && endTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getEndTimestampBuilder().mergeFrom(value); + } else { + endTimestamp_ = value; + } + } else { + endTimestampBuilder_.mergeFrom(value); + } + if (endTimestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public Builder clearEndTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + endTimestamp_ = null; + if (endTimestampBuilder_ != null) { + endTimestampBuilder_.dispose(); + endTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getEndTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetEndTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimestampOrBuilder() { + if (endTimestampBuilder_ != null) { + return endTimestampBuilder_.getMessageOrBuilder(); + } else { + return endTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : endTimestamp_; + } + } + + /** + * + * + *
    +       * End timestamp at which the change stream partition is terminated. All
    +       * changes generated by this partition will have timestamps <=
    +       * end_timestamp. DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition. PartitionEndRecord is the last record returned for a
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp end_timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetEndTimestampFieldBuilder() { + if (endTimestampBuilder_ == null) { + endTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTimestamp(), getParentForChildren(), isClean()); + endTimestamp_ = null; + } + return endTimestampBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +       * Unique partition identifier describing the terminated change stream
    +       * partition.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEndRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Unique partition identifier describing the terminated change stream
    +       * partition.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEndRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Unique partition identifier describing the terminated change stream
    +       * partition.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEndRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifier describing the terminated change stream
    +       * partition.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEndRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + partitionToken_ = getDefaultInstance().getPartitionToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifier describing the terminated change stream
    +       * partition.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEndRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @param value The bytes for partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + partitionToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) + private static final com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionEndRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface PartitionEventRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + boolean hasCommitTimestamp(); + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + com.google.protobuf.Timestamp getCommitTimestamp(); + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + java.lang.String getRecordSequence(); + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + com.google.protobuf.ByteString getRecordSequenceBytes(); + + /** + * + * + *
    +     * Unique partition identifier describing the partition this event
    +     * occurred on.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEventRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + java.lang.String getPartitionToken(); + + /** + * + * + *
    +     * Unique partition identifier describing the partition this event
    +     * occurred on.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEventRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + com.google.protobuf.ByteString getPartitionTokenBytes(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + java.util.List + getMoveInEventsList(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent getMoveInEvents( + int index); + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + int getMoveInEventsCount(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder> + getMoveInEventsOrBuilderList(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder + getMoveInEventsOrBuilder(int index); + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + java.util.List + getMoveOutEventsList(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent getMoveOutEvents( + int index); + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + int getMoveOutEventsCount(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder> + getMoveOutEventsOrBuilderList(); + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder + getMoveOutEventsOrBuilder(int index); + } + + /** + * + * + *
    +   * A partition event record describes key range changes for a change stream
    +   * partition. The changes to a row defined by its primary key can be captured
    +   * in one change stream partition for a specific time range, and then be
    +   * captured in a different change stream partition for a different time range.
    +   * This movement of key ranges across change stream partitions is a reflection
    +   * of activities, such as Spanner's dynamic splitting and load balancing, etc.
    +   * Processing this event is needed if users want to guarantee processing of
    +   * the changes for any key in timestamp order. If time ordered processing of
    +   * changes for a primary key is not needed, this event can be ignored.
    +   * To guarantee time ordered processing for each primary key, if the event
    +   * describes move-ins, the reader of this partition needs to wait until the
    +   * readers of the source partitions have processed all records with timestamps
    +   * <= this PartitionEventRecord.commit_timestamp, before advancing beyond this
    +   * PartitionEventRecord. If the event describes move-outs, the reader can
    +   * notify the readers of the destination partitions that they can continue
    +   * processing.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEventRecord} + */ + public static final class PartitionEventRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) + PartitionEventRecordOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionEventRecord"); + } + + // Use PartitionEventRecord.newBuilder() to construct. + private PartitionEventRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionEventRecord() { + recordSequence_ = ""; + partitionToken_ = ""; + moveInEvents_ = java.util.Collections.emptyList(); + moveOutEvents_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder.class); + } + + public interface MoveInEventOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * An unique partition identifier describing the source change stream
    +       * partition that recorded changes for the key range that is moving
    +       * into this partition.
    +       * 
    + * + * string source_partition_token = 1; + * + * @return The sourcePartitionToken. + */ + java.lang.String getSourcePartitionToken(); + + /** + * + * + *
    +       * An unique partition identifier describing the source change stream
    +       * partition that recorded changes for the key range that is moving
    +       * into this partition.
    +       * 
    + * + * string source_partition_token = 1; + * + * @return The bytes for sourcePartitionToken. + */ + com.google.protobuf.ByteString getSourcePartitionTokenBytes(); + } + + /** + * + * + *
    +     * Describes move-in of the key ranges into the change stream partition
    +     * identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * To maintain processing the changes for a particular key in timestamp
    +     * order, the query processing the change stream partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * should not advance beyond the partition event record commit timestamp
    +     * until the queries processing the source change stream partitions have
    +     * processed all change stream records with timestamps <= the partition
    +     * event record commit timestamp.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent} + */ + public static final class MoveInEvent extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) + MoveInEventOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveInEvent"); + } + + // Use MoveInEvent.newBuilder() to construct. + private MoveInEvent(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveInEvent() { + sourcePartitionToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + .class); + } + + public static final int SOURCE_PARTITION_TOKEN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object sourcePartitionToken_ = ""; + + /** + * + * + *
    +       * An unique partition identifier describing the source change stream
    +       * partition that recorded changes for the key range that is moving
    +       * into this partition.
    +       * 
    + * + * string source_partition_token = 1; + * + * @return The sourcePartitionToken. + */ + @java.lang.Override + public java.lang.String getSourcePartitionToken() { + java.lang.Object ref = sourcePartitionToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourcePartitionToken_ = s; + return s; + } + } + + /** + * + * + *
    +       * An unique partition identifier describing the source change stream
    +       * partition that recorded changes for the key range that is moving
    +       * into this partition.
    +       * 
    + * + * string source_partition_token = 1; + * + * @return The bytes for sourcePartitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSourcePartitionTokenBytes() { + java.lang.Object ref = sourcePartitionToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourcePartitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourcePartitionToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, sourcePartitionToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sourcePartitionToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, sourcePartitionToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent other = + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) obj; + + if (!getSourcePartitionToken().equals(other.getSourcePartitionToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SOURCE_PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getSourcePartitionToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * Describes move-in of the key ranges into the change stream partition
    +       * identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * To maintain processing the changes for a particular key in timestamp
    +       * order, the query processing the change stream partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * should not advance beyond the partition event record commit timestamp
    +       * until the queries processing the source change stream partitions have
    +       * processed all change stream records with timestamps <= the partition
    +       * event record commit timestamp.
    +       * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + .class); + } + + // Construct using + // com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sourcePartitionToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveInEvent_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent build() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent result = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.sourcePartitionToken_ = sourcePartitionToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) { + return mergeFrom( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + .getDefaultInstance()) return this; + if (!other.getSourcePartitionToken().isEmpty()) { + sourcePartitionToken_ = other.sourcePartitionToken_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + sourcePartitionToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object sourcePartitionToken_ = ""; + + /** + * + * + *
    +         * An unique partition identifier describing the source change stream
    +         * partition that recorded changes for the key range that is moving
    +         * into this partition.
    +         * 
    + * + * string source_partition_token = 1; + * + * @return The sourcePartitionToken. + */ + public java.lang.String getSourcePartitionToken() { + java.lang.Object ref = sourcePartitionToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sourcePartitionToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +         * An unique partition identifier describing the source change stream
    +         * partition that recorded changes for the key range that is moving
    +         * into this partition.
    +         * 
    + * + * string source_partition_token = 1; + * + * @return The bytes for sourcePartitionToken. + */ + public com.google.protobuf.ByteString getSourcePartitionTokenBytes() { + java.lang.Object ref = sourcePartitionToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sourcePartitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +         * An unique partition identifier describing the source change stream
    +         * partition that recorded changes for the key range that is moving
    +         * into this partition.
    +         * 
    + * + * string source_partition_token = 1; + * + * @param value The sourcePartitionToken to set. + * @return This builder for chaining. + */ + public Builder setSourcePartitionToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sourcePartitionToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * An unique partition identifier describing the source change stream
    +         * partition that recorded changes for the key range that is moving
    +         * into this partition.
    +         * 
    + * + * string source_partition_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearSourcePartitionToken() { + sourcePartitionToken_ = getDefaultInstance().getSourcePartitionToken(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +         * An unique partition identifier describing the source change stream
    +         * partition that recorded changes for the key range that is moving
    +         * into this partition.
    +         * 
    + * + * string source_partition_token = 1; + * + * @param value The bytes for sourcePartitionToken to set. + * @return This builder for chaining. + */ + public Builder setSourcePartitionTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sourcePartitionToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent) + private static final com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInEvent parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface MoveOutEventOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * An unique partition identifier describing the destination change
    +       * stream partition that will record changes for the key range that is
    +       * moving out of this partition.
    +       * 
    + * + * string destination_partition_token = 1; + * + * @return The destinationPartitionToken. + */ + java.lang.String getDestinationPartitionToken(); + + /** + * + * + *
    +       * An unique partition identifier describing the destination change
    +       * stream partition that will record changes for the key range that is
    +       * moving out of this partition.
    +       * 
    + * + * string destination_partition_token = 1; + * + * @return The bytes for destinationPartitionToken. + */ + com.google.protobuf.ByteString getDestinationPartitionTokenBytes(); + } + + /** + * + * + *
    +     * Describes move-out of the key ranges out of the change stream partition
    +     * identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * To maintain processing the changes for a particular key in timestamp
    +     * order, the query processing the
    +     * [MoveOutEvent][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent]
    +     * in the partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * should inform the queries processing the destination partitions that
    +     * they can unblock and proceed processing records past the
    +     * [commit_timestamp][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.commit_timestamp].
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent} + */ + public static final class MoveOutEvent extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) + MoveOutEventOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MoveOutEvent"); + } + + // Use MoveOutEvent.newBuilder() to construct. + private MoveOutEvent(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MoveOutEvent() { + destinationPartitionToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + .class); + } + + public static final int DESTINATION_PARTITION_TOKEN_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object destinationPartitionToken_ = ""; + + /** + * + * + *
    +       * An unique partition identifier describing the destination change
    +       * stream partition that will record changes for the key range that is
    +       * moving out of this partition.
    +       * 
    + * + * string destination_partition_token = 1; + * + * @return The destinationPartitionToken. + */ + @java.lang.Override + public java.lang.String getDestinationPartitionToken() { + java.lang.Object ref = destinationPartitionToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPartitionToken_ = s; + return s; + } + } + + /** + * + * + *
    +       * An unique partition identifier describing the destination change
    +       * stream partition that will record changes for the key range that is
    +       * moving out of this partition.
    +       * 
    + * + * string destination_partition_token = 1; + * + * @return The bytes for destinationPartitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDestinationPartitionTokenBytes() { + java.lang.Object ref = destinationPartitionToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPartitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPartitionToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, destinationPartitionToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(destinationPartitionToken_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize(1, destinationPartitionToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent other = + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) obj; + + if (!getDestinationPartitionToken().equals(other.getDestinationPartitionToken())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DESTINATION_PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getDestinationPartitionToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +       * Describes move-out of the key ranges out of the change stream partition
    +       * identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * To maintain processing the changes for a particular key in timestamp
    +       * order, the query processing the
    +       * [MoveOutEvent][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent]
    +       * in the partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * should inform the queries processing the destination partitions that
    +       * they can unblock and proceed processing records past the
    +       * [commit_timestamp][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.commit_timestamp].
    +       * 
    + * + * Protobuf type {@code + * google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + .class); + } + + // Construct using + // com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + destinationPartitionToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_MoveOutEvent_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent build() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent result = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.destinationPartitionToken_ = destinationPartitionToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) { + return mergeFrom( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .getDefaultInstance()) return this; + if (!other.getDestinationPartitionToken().isEmpty()) { + destinationPartitionToken_ = other.destinationPartitionToken_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + destinationPartitionToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object destinationPartitionToken_ = ""; + + /** + * + * + *
    +         * An unique partition identifier describing the destination change
    +         * stream partition that will record changes for the key range that is
    +         * moving out of this partition.
    +         * 
    + * + * string destination_partition_token = 1; + * + * @return The destinationPartitionToken. + */ + public java.lang.String getDestinationPartitionToken() { + java.lang.Object ref = destinationPartitionToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + destinationPartitionToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +         * An unique partition identifier describing the destination change
    +         * stream partition that will record changes for the key range that is
    +         * moving out of this partition.
    +         * 
    + * + * string destination_partition_token = 1; + * + * @return The bytes for destinationPartitionToken. + */ + public com.google.protobuf.ByteString getDestinationPartitionTokenBytes() { + java.lang.Object ref = destinationPartitionToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + destinationPartitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +         * An unique partition identifier describing the destination change
    +         * stream partition that will record changes for the key range that is
    +         * moving out of this partition.
    +         * 
    + * + * string destination_partition_token = 1; + * + * @param value The destinationPartitionToken to set. + * @return This builder for chaining. + */ + public Builder setDestinationPartitionToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + destinationPartitionToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +         * An unique partition identifier describing the destination change
    +         * stream partition that will record changes for the key range that is
    +         * moving out of this partition.
    +         * 
    + * + * string destination_partition_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearDestinationPartitionToken() { + destinationPartitionToken_ = getDefaultInstance().getDestinationPartitionToken(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +         * An unique partition identifier describing the destination change
    +         * stream partition that will record changes for the key range that is
    +         * moving out of this partition.
    +         * 
    + * + * string destination_partition_token = 1; + * + * @param value The bytes for destinationPartitionToken to set. + * @return This builder for chaining. + */ + public Builder setDestinationPartitionTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + destinationPartitionToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent) + private static final com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .MoveOutEvent + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveOutEvent parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTimestamp_; + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + @java.lang.Override + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTimestamp() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + /** + * + * + *
    +     * Indicates the commit timestamp at which the key range change occurred.
    +     * DataChangeRecord.commit_timestamps,
    +     * PartitionStartRecord.start_timestamps,
    +     * PartitionEventRecord.commit_timestamps, and
    +     * PartitionEndRecord.end_timestamps can have the same value in the same
    +     * partition.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + public static final int RECORD_SEQUENCE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + @java.lang.Override + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } + } + + /** + * + * + *
    +     * Record sequence numbers are unique and monotonically increasing (but not
    +     * necessarily contiguous) for a specific timestamp across record
    +     * types in the same partition. To guarantee ordered processing, the reader
    +     * should process records (of potentially different types) in
    +     * record_sequence order for a specific timestamp in the same partition.
    +     * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +     * Unique partition identifier describing the partition this event
    +     * occurred on.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEventRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + @java.lang.Override + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } + } + + /** + * + * + *
    +     * Unique partition identifier describing the partition this event
    +     * occurred on.
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +     * is equal to the partition token of the change stream partition currently
    +     * queried to return this PartitionEventRecord.
    +     * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MOVE_IN_EVENTS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent> + moveInEvents_; + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + @java.lang.Override + public java.util.List + getMoveInEventsList() { + return moveInEvents_; + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder> + getMoveInEventsOrBuilderList() { + return moveInEvents_; + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + @java.lang.Override + public int getMoveInEventsCount() { + return moveInEvents_.size(); + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + getMoveInEvents(int index) { + return moveInEvents_.get(index); + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved into the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_in_events {
    +     * source_partition_token: "P2"
    +     * }
    +     * move_in_events {
    +     * source_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_out_events {
    +     * destination_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder + getMoveInEventsOrBuilder(int index) { + return moveInEvents_.get(index); + } + + public static final int MOVE_OUT_EVENTS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent> + moveOutEvents_; + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + @java.lang.Override + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent> + getMoveOutEventsList() { + return moveOutEvents_; + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder> + getMoveOutEventsOrBuilderList() { + return moveOutEvents_; + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + @java.lang.Override + public int getMoveOutEventsCount() { + return moveOutEvents_.size(); + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + getMoveOutEvents(int index) { + return moveOutEvents_.get(index); + } + + /** + * + * + *
    +     * Set when one or more key ranges are moved out of the change stream
    +     * partition identified by
    +     * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +     *
    +     * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +     * and partition (P3) in a single transaction at timestamp T.
    +     *
    +     * The PartitionEventRecord returned in P1 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P1"
    +     * move_out_events {
    +     * destination_partition_token: "P2"
    +     * }
    +     * move_out_events {
    +     * destination_partition_token: "P3"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P2 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P2"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     *
    +     * The PartitionEventRecord returned in P3 will reflect the move as:
    +     *
    +     * PartitionEventRecord {
    +     * commit_timestamp: T
    +     * partition_token: "P3"
    +     * move_in_events {
    +     * source_partition_token: "P1"
    +     * }
    +     * }
    +     * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder + getMoveOutEventsOrBuilder(int index) { + return moveOutEvents_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(partitionToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, partitionToken_); + } + for (int i = 0; i < moveInEvents_.size(); i++) { + output.writeMessage(4, moveInEvents_.get(i)); + } + for (int i = 0; i < moveOutEvents_.size(); i++) { + output.writeMessage(5, moveOutEvents_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTimestamp()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(recordSequence_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, recordSequence_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(partitionToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, partitionToken_); + } + for (int i = 0; i < moveInEvents_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, moveInEvents_.get(i)); + } + for (int i = 0; i < moveOutEvents_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, moveOutEvents_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord other = + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) obj; + + if (hasCommitTimestamp() != other.hasCommitTimestamp()) return false; + if (hasCommitTimestamp()) { + if (!getCommitTimestamp().equals(other.getCommitTimestamp())) return false; + } + if (!getRecordSequence().equals(other.getRecordSequence())) return false; + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (!getMoveInEventsList().equals(other.getMoveInEventsList())) return false; + if (!getMoveOutEventsList().equals(other.getMoveOutEventsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTimestamp()) { + hash = (37 * hash) + COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestamp().hashCode(); + } + hash = (37 * hash) + RECORD_SEQUENCE_FIELD_NUMBER; + hash = (53 * hash) + getRecordSequence().hashCode(); + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + if (getMoveInEventsCount() > 0) { + hash = (37 * hash) + MOVE_IN_EVENTS_FIELD_NUMBER; + hash = (53 * hash) + getMoveInEventsList().hashCode(); + } + if (getMoveOutEventsCount() > 0) { + hash = (37 * hash) + MOVE_OUT_EVENTS_FIELD_NUMBER; + hash = (53 * hash) + getMoveOutEventsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A partition event record describes key range changes for a change stream
    +     * partition. The changes to a row defined by its primary key can be captured
    +     * in one change stream partition for a specific time range, and then be
    +     * captured in a different change stream partition for a different time range.
    +     * This movement of key ranges across change stream partitions is a reflection
    +     * of activities, such as Spanner's dynamic splitting and load balancing, etc.
    +     * Processing this event is needed if users want to guarantee processing of
    +     * the changes for any key in timestamp order. If time ordered processing of
    +     * changes for a primary key is not needed, this event can be ignored.
    +     * To guarantee time ordered processing for each primary key, if the event
    +     * describes move-ins, the reader of this partition needs to wait until the
    +     * readers of the source partitions have processed all records with timestamps
    +     * <= this PartitionEventRecord.commit_timestamp, before advancing beyond this
    +     * PartitionEventRecord. If the event describes move-outs, the reader can
    +     * notify the readers of the destination partitions that they can continue
    +     * processing.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord.PartitionEventRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.class, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommitTimestampFieldBuilder(); + internalGetMoveInEventsFieldBuilder(); + internalGetMoveOutEventsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + recordSequence_ = ""; + partitionToken_ = ""; + if (moveInEventsBuilder_ == null) { + moveInEvents_ = java.util.Collections.emptyList(); + } else { + moveInEvents_ = null; + moveInEventsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + if (moveOutEventsBuilder_ == null) { + moveOutEvents_ = java.util.Collections.emptyList(); + } else { + moveOutEvents_ = null; + moveOutEventsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_PartitionEventRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord build() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord result = + new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord result) { + if (moveInEventsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + moveInEvents_ = java.util.Collections.unmodifiableList(moveInEvents_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.moveInEvents_ = moveInEvents_; + } else { + result.moveInEvents_ = moveInEventsBuilder_.build(); + } + if (moveOutEventsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + moveOutEvents_ = java.util.Collections.unmodifiableList(moveOutEvents_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.moveOutEvents_ = moveOutEvents_; + } else { + result.moveOutEvents_ = moveOutEventsBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTimestamp_ = + commitTimestampBuilder_ == null ? commitTimestamp_ : commitTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.recordSequence_ = recordSequence_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.partitionToken_ = partitionToken_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord other) { + if (other + == com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance()) + return this; + if (other.hasCommitTimestamp()) { + mergeCommitTimestamp(other.getCommitTimestamp()); + } + if (!other.getRecordSequence().isEmpty()) { + recordSequence_ = other.recordSequence_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getPartitionToken().isEmpty()) { + partitionToken_ = other.partitionToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (moveInEventsBuilder_ == null) { + if (!other.moveInEvents_.isEmpty()) { + if (moveInEvents_.isEmpty()) { + moveInEvents_ = other.moveInEvents_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureMoveInEventsIsMutable(); + moveInEvents_.addAll(other.moveInEvents_); + } + onChanged(); + } + } else { + if (!other.moveInEvents_.isEmpty()) { + if (moveInEventsBuilder_.isEmpty()) { + moveInEventsBuilder_.dispose(); + moveInEventsBuilder_ = null; + moveInEvents_ = other.moveInEvents_; + bitField0_ = (bitField0_ & ~0x00000008); + moveInEventsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetMoveInEventsFieldBuilder() + : null; + } else { + moveInEventsBuilder_.addAllMessages(other.moveInEvents_); + } + } + } + if (moveOutEventsBuilder_ == null) { + if (!other.moveOutEvents_.isEmpty()) { + if (moveOutEvents_.isEmpty()) { + moveOutEvents_ = other.moveOutEvents_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.addAll(other.moveOutEvents_); + } + onChanged(); + } + } else { + if (!other.moveOutEvents_.isEmpty()) { + if (moveOutEventsBuilder_.isEmpty()) { + moveOutEventsBuilder_.dispose(); + moveOutEventsBuilder_ = null; + moveOutEvents_ = other.moveOutEvents_; + bitField0_ = (bitField0_ & ~0x00000010); + moveOutEventsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetMoveOutEventsFieldBuilder() + : null; + } else { + moveOutEventsBuilder_.addAllMessages(other.moveOutEvents_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommitTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + recordSequence_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + partitionToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + .parser(), + extensionRegistry); + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + moveInEvents_.add(m); + } else { + moveInEventsBuilder_.addMessage(m); + } + break; + } // case 34 + case 42: + { + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent m = + input.readMessage( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .parser(), + extensionRegistry); + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.add(m); + } else { + moveOutEventsBuilder_.addMessage(m); + } + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampBuilder_; + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + public com.google.protobuf.Timestamp getCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } else { + return commitTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTimestamp_ = value; + } else { + commitTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = builderForValue.build(); + } else { + commitTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder mergeCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTimestamp_ != null + && commitTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimestampBuilder().mergeFrom(value); + } else { + commitTimestamp_ = value; + } + } else { + commitTimestampBuilder_.mergeFrom(value); + } + if (commitTimestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder clearCommitTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommitTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + if (commitTimestampBuilder_ != null) { + return commitTimestampBuilder_.getMessageOrBuilder(); + } else { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + } + + /** + * + * + *
    +       * Indicates the commit timestamp at which the key range change occurred.
    +       * DataChangeRecord.commit_timestamps,
    +       * PartitionStartRecord.start_timestamps,
    +       * PartitionEventRecord.commit_timestamps, and
    +       * PartitionEndRecord.end_timestamps can have the same value in the same
    +       * partition.
    +       * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimestampFieldBuilder() { + if (commitTimestampBuilder_ == null) { + commitTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTimestamp(), getParentForChildren(), isClean()); + commitTimestamp_ = null; + } + return commitTimestampBuilder_; + } + + private java.lang.Object recordSequence_ = ""; + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The recordSequence. + */ + public java.lang.String getRecordSequence() { + java.lang.Object ref = recordSequence_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + recordSequence_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return The bytes for recordSequence. + */ + public com.google.protobuf.ByteString getRecordSequenceBytes() { + java.lang.Object ref = recordSequence_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + recordSequence_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequence(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @return This builder for chaining. + */ + public Builder clearRecordSequence() { + recordSequence_ = getDefaultInstance().getRecordSequence(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Record sequence numbers are unique and monotonically increasing (but not
    +       * necessarily contiguous) for a specific timestamp across record
    +       * types in the same partition. To guarantee ordered processing, the reader
    +       * should process records (of potentially different types) in
    +       * record_sequence order for a specific timestamp in the same partition.
    +       * 
    + * + * string record_sequence = 2; + * + * @param value The bytes for recordSequence to set. + * @return This builder for chaining. + */ + public Builder setRecordSequenceBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + recordSequence_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object partitionToken_ = ""; + + /** + * + * + *
    +       * Unique partition identifier describing the partition this event
    +       * occurred on.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEventRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return The partitionToken. + */ + public java.lang.String getPartitionToken() { + java.lang.Object ref = partitionToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + partitionToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Unique partition identifier describing the partition this event
    +       * occurred on.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEventRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return The bytes for partitionToken. + */ + public com.google.protobuf.ByteString getPartitionTokenBytes() { + java.lang.Object ref = partitionToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + partitionToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Unique partition identifier describing the partition this event
    +       * occurred on.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEventRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifier describing the partition this event
    +       * occurred on.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEventRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + partitionToken_ = getDefaultInstance().getPartitionToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Unique partition identifier describing the partition this event
    +       * occurred on.
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]
    +       * is equal to the partition token of the change stream partition currently
    +       * queried to return this PartitionEventRecord.
    +       * 
    + * + * string partition_token = 3; + * + * @param value The bytes for partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + partitionToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent> + moveInEvents_ = java.util.Collections.emptyList(); + + private void ensureMoveInEventsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + moveInEvents_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent>( + moveInEvents_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder> + moveInEventsBuilder_; + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent> + getMoveInEventsList() { + if (moveInEventsBuilder_ == null) { + return java.util.Collections.unmodifiableList(moveInEvents_); + } else { + return moveInEventsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public int getMoveInEventsCount() { + if (moveInEventsBuilder_ == null) { + return moveInEvents_.size(); + } else { + return moveInEventsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + getMoveInEvents(int index) { + if (moveInEventsBuilder_ == null) { + return moveInEvents_.get(index); + } else { + return moveInEventsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder setMoveInEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent value) { + if (moveInEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveInEventsIsMutable(); + moveInEvents_.set(index, value); + onChanged(); + } else { + moveInEventsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder setMoveInEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + builderForValue) { + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + moveInEvents_.set(index, builderForValue.build()); + onChanged(); + } else { + moveInEventsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder addMoveInEvents( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent value) { + if (moveInEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveInEventsIsMutable(); + moveInEvents_.add(value); + onChanged(); + } else { + moveInEventsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder addMoveInEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent value) { + if (moveInEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveInEventsIsMutable(); + moveInEvents_.add(index, value); + onChanged(); + } else { + moveInEventsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder addMoveInEvents( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + builderForValue) { + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + moveInEvents_.add(builderForValue.build()); + onChanged(); + } else { + moveInEventsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder addMoveInEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + builderForValue) { + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + moveInEvents_.add(index, builderForValue.build()); + onChanged(); + } else { + moveInEventsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder addAllMoveInEvents( + java.lang.Iterable< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent> + values) { + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, moveInEvents_); + onChanged(); + } else { + moveInEventsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder clearMoveInEvents() { + if (moveInEventsBuilder_ == null) { + moveInEvents_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + moveInEventsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public Builder removeMoveInEvents(int index) { + if (moveInEventsBuilder_ == null) { + ensureMoveInEventsIsMutable(); + moveInEvents_.remove(index); + onChanged(); + } else { + moveInEventsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + getMoveInEventsBuilder(int index) { + return internalGetMoveInEventsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder + getMoveInEventsOrBuilder(int index) { + if (moveInEventsBuilder_ == null) { + return moveInEvents_.get(index); + } else { + return moveInEventsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .MoveInEventOrBuilder> + getMoveInEventsOrBuilderList() { + if (moveInEventsBuilder_ != null) { + return moveInEventsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(moveInEvents_); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + addMoveInEventsBuilder() { + return internalGetMoveInEventsFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder + addMoveInEventsBuilder(int index) { + return internalGetMoveInEventsFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved into the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved into partition (P1) from partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_in_events {
    +       * source_partition_token: "P2"
    +       * }
    +       * move_in_events {
    +       * source_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_out_events {
    +       * destination_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent move_in_events = 4; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder> + getMoveInEventsBuilderList() { + return internalGetMoveInEventsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEventOrBuilder> + internalGetMoveInEventsFieldBuilder() { + if (moveInEventsBuilder_ == null) { + moveInEventsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveInEvent.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .MoveInEventOrBuilder>( + moveInEvents_, + ((bitField0_ & 0x00000008) != 0), + getParentForChildren(), + isClean()); + moveInEvents_ = null; + } + return moveInEventsBuilder_; + } + + private java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent> + moveOutEvents_ = java.util.Collections.emptyList(); + + private void ensureMoveOutEventsIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + moveOutEvents_ = + new java.util.ArrayList< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent>( + moveOutEvents_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder> + moveOutEventsBuilder_; + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent> + getMoveOutEventsList() { + if (moveOutEventsBuilder_ == null) { + return java.util.Collections.unmodifiableList(moveOutEvents_); + } else { + return moveOutEventsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public int getMoveOutEventsCount() { + if (moveOutEventsBuilder_ == null) { + return moveOutEvents_.size(); + } else { + return moveOutEventsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + getMoveOutEvents(int index) { + if (moveOutEventsBuilder_ == null) { + return moveOutEvents_.get(index); + } else { + return moveOutEventsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder setMoveOutEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent value) { + if (moveOutEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveOutEventsIsMutable(); + moveOutEvents_.set(index, value); + onChanged(); + } else { + moveOutEventsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder setMoveOutEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + builderForValue) { + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.set(index, builderForValue.build()); + onChanged(); + } else { + moveOutEventsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder addMoveOutEvents( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent value) { + if (moveOutEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveOutEventsIsMutable(); + moveOutEvents_.add(value); + onChanged(); + } else { + moveOutEventsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder addMoveOutEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent value) { + if (moveOutEventsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMoveOutEventsIsMutable(); + moveOutEvents_.add(index, value); + onChanged(); + } else { + moveOutEventsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder addMoveOutEvents( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + builderForValue) { + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.add(builderForValue.build()); + onChanged(); + } else { + moveOutEventsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder addMoveOutEvents( + int index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + builderForValue) { + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.add(index, builderForValue.build()); + onChanged(); + } else { + moveOutEventsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder addAllMoveOutEvents( + java.lang.Iterable< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent> + values) { + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, moveOutEvents_); + onChanged(); + } else { + moveOutEventsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder clearMoveOutEvents() { + if (moveOutEventsBuilder_ == null) { + moveOutEvents_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + moveOutEventsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public Builder removeMoveOutEvents(int index) { + if (moveOutEventsBuilder_ == null) { + ensureMoveOutEventsIsMutable(); + moveOutEvents_.remove(index); + onChanged(); + } else { + moveOutEventsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + getMoveOutEventsBuilder(int index) { + return internalGetMoveOutEventsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder + getMoveOutEventsOrBuilder(int index) { + if (moveOutEventsBuilder_ == null) { + return moveOutEvents_.get(index); + } else { + return moveOutEventsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public java.util.List< + ? extends + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .MoveOutEventOrBuilder> + getMoveOutEventsOrBuilderList() { + if (moveOutEventsBuilder_ != null) { + return moveOutEventsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(moveOutEvents_); + } + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + addMoveOutEventsBuilder() { + return internalGetMoveOutEventsFieldBuilder() + .addBuilder( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder + addMoveOutEventsBuilder(int index) { + return internalGetMoveOutEventsFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .getDefaultInstance()); + } + + /** + * + * + *
    +       * Set when one or more key ranges are moved out of the change stream
    +       * partition identified by
    +       * [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token].
    +       *
    +       * Example: Two key ranges are moved out of partition (P1) to partition (P2)
    +       * and partition (P3) in a single transaction at timestamp T.
    +       *
    +       * The PartitionEventRecord returned in P1 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P1"
    +       * move_out_events {
    +       * destination_partition_token: "P2"
    +       * }
    +       * move_out_events {
    +       * destination_partition_token: "P3"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P2 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P2"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       *
    +       * The PartitionEventRecord returned in P3 will reflect the move as:
    +       *
    +       * PartitionEventRecord {
    +       * commit_timestamp: T
    +       * partition_token: "P3"
    +       * move_in_events {
    +       * source_partition_token: "P1"
    +       * }
    +       * }
    +       * 
    + * + * + * repeated .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent move_out_events = 5; + * + */ + public java.util.List< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder> + getMoveOutEventsBuilderList() { + return internalGetMoveOutEventsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEventOrBuilder> + internalGetMoveOutEventsFieldBuilder() { + if (moveOutEventsBuilder_ == null) { + moveOutEventsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent + .Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .MoveOutEventOrBuilder>( + moveOutEvents_, + ((bitField0_ & 0x00000010) != 0), + getParentForChildren(), + isClean()); + moveOutEvents_ = null; + } + return moveOutEventsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) + private static final com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionEventRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int recordCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object record_; + + public enum RecordCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + DATA_CHANGE_RECORD(1), + HEARTBEAT_RECORD(2), + PARTITION_START_RECORD(3), + PARTITION_END_RECORD(4), + PARTITION_EVENT_RECORD(5), + RECORD_NOT_SET(0); + private final int value; + + private RecordCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RecordCase valueOf(int value) { + return forNumber(value); + } + + public static RecordCase forNumber(int value) { + switch (value) { + case 1: + return DATA_CHANGE_RECORD; + case 2: + return HEARTBEAT_RECORD; + case 3: + return PARTITION_START_RECORD; + case 4: + return PARTITION_END_RECORD; + case 5: + return PARTITION_EVENT_RECORD; + case 0: + return RECORD_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RecordCase getRecordCase() { + return RecordCase.forNumber(recordCase_); + } + + public static final int DATA_CHANGE_RECORD_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return Whether the dataChangeRecord field is set. + */ + @java.lang.Override + public boolean hasDataChangeRecord() { + return recordCase_ == 1; + } + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return The dataChangeRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDataChangeRecord() { + if (recordCase_ == 1) { + return (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder + getDataChangeRecordOrBuilder() { + if (recordCase_ == 1) { + return (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + + public static final int HEARTBEAT_RECORD_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return Whether the heartbeatRecord field is set. + */ + @java.lang.Override + public boolean hasHeartbeatRecord() { + return recordCase_ == 2; + } + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return The heartbeatRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getHeartbeatRecord() { + if (recordCase_ == 2) { + return (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder + getHeartbeatRecordOrBuilder() { + if (recordCase_ == 2) { + return (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + + public static final int PARTITION_START_RECORD_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return Whether the partitionStartRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionStartRecord() { + return recordCase_ == 3; + } + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return The partitionStartRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord getPartitionStartRecord() { + if (recordCase_ == 3) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder + getPartitionStartRecordOrBuilder() { + if (recordCase_ == 3) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + + public static final int PARTITION_END_RECORD_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * @return Whether the partitionEndRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionEndRecord() { + return recordCase_ == 4; + } + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * @return The partitionEndRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord getPartitionEndRecord() { + if (recordCase_ == 4) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder + getPartitionEndRecordOrBuilder() { + if (recordCase_ == 4) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + + public static final int PARTITION_EVENT_RECORD_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return Whether the partitionEventRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionEventRecord() { + return recordCase_ == 5; + } + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return The partitionEventRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord getPartitionEventRecord() { + if (recordCase_ == 5) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder + getPartitionEventRecordOrBuilder() { + if (recordCase_ == 5) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (recordCase_ == 1) { + output.writeMessage(1, (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_); + } + if (recordCase_ == 2) { + output.writeMessage(2, (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_); + } + if (recordCase_ == 3) { + output.writeMessage( + 3, (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_); + } + if (recordCase_ == 4) { + output.writeMessage(4, (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_); + } + if (recordCase_ == 5) { + output.writeMessage( + 5, (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (recordCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_); + } + if (recordCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_); + } + if (recordCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_); + } + if (recordCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_); + } + if (recordCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ChangeStreamRecord)) { + return super.equals(obj); + } + com.google.spanner.v1.ChangeStreamRecord other = (com.google.spanner.v1.ChangeStreamRecord) obj; + + if (!getRecordCase().equals(other.getRecordCase())) return false; + switch (recordCase_) { + case 1: + if (!getDataChangeRecord().equals(other.getDataChangeRecord())) return false; + break; + case 2: + if (!getHeartbeatRecord().equals(other.getHeartbeatRecord())) return false; + break; + case 3: + if (!getPartitionStartRecord().equals(other.getPartitionStartRecord())) return false; + break; + case 4: + if (!getPartitionEndRecord().equals(other.getPartitionEndRecord())) return false; + break; + case 5: + if (!getPartitionEventRecord().equals(other.getPartitionEventRecord())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (recordCase_) { + case 1: + hash = (37 * hash) + DATA_CHANGE_RECORD_FIELD_NUMBER; + hash = (53 * hash) + getDataChangeRecord().hashCode(); + break; + case 2: + hash = (37 * hash) + HEARTBEAT_RECORD_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeatRecord().hashCode(); + break; + case 3: + hash = (37 * hash) + PARTITION_START_RECORD_FIELD_NUMBER; + hash = (53 * hash) + getPartitionStartRecord().hashCode(); + break; + case 4: + hash = (37 * hash) + PARTITION_END_RECORD_FIELD_NUMBER; + hash = (53 * hash) + getPartitionEndRecord().hashCode(); + break; + case 5: + hash = (37 * hash) + PARTITION_EVENT_RECORD_FIELD_NUMBER; + hash = (53 * hash) + getPartitionEventRecord().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ChangeStreamRecord parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ChangeStreamRecord prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Spanner Change Streams enable customers to capture and stream out changes to
    +   * their Spanner databases in real-time. A change stream
    +   * can be created with option partition_mode='IMMUTABLE_KEY_RANGE' or
    +   * partition_mode='MUTABLE_KEY_RANGE'.
    +   *
    +   * This message is only used in Change Streams created with the option
    +   * partition_mode='MUTABLE_KEY_RANGE'. Spanner automatically creates a special
    +   * Table-Valued Function (TVF) along with each Change Streams. The function
    +   * provides access to the change stream's records. The function is named
    +   * READ_<change_stream_name> (where <change_stream_name> is the
    +   * name of the change stream), and it returns a table with only one column
    +   * called ChangeRecord.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ChangeStreamRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ChangeStreamRecord) + com.google.spanner.v1.ChangeStreamRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ChangeStreamRecord.class, + com.google.spanner.v1.ChangeStreamRecord.Builder.class); + } + + // Construct using com.google.spanner.v1.ChangeStreamRecord.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (dataChangeRecordBuilder_ != null) { + dataChangeRecordBuilder_.clear(); + } + if (heartbeatRecordBuilder_ != null) { + heartbeatRecordBuilder_.clear(); + } + if (partitionStartRecordBuilder_ != null) { + partitionStartRecordBuilder_.clear(); + } + if (partitionEndRecordBuilder_ != null) { + partitionEndRecordBuilder_.clear(); + } + if (partitionEventRecordBuilder_ != null) { + partitionEventRecordBuilder_.clear(); + } + recordCase_ = 0; + record_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ChangeStreamProto + .internal_static_google_spanner_v1_ChangeStreamRecord_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord getDefaultInstanceForType() { + return com.google.spanner.v1.ChangeStreamRecord.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord build() { + com.google.spanner.v1.ChangeStreamRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord buildPartial() { + com.google.spanner.v1.ChangeStreamRecord result = + new com.google.spanner.v1.ChangeStreamRecord(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ChangeStreamRecord result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.ChangeStreamRecord result) { + result.recordCase_ = recordCase_; + result.record_ = this.record_; + if (recordCase_ == 1 && dataChangeRecordBuilder_ != null) { + result.record_ = dataChangeRecordBuilder_.build(); + } + if (recordCase_ == 2 && heartbeatRecordBuilder_ != null) { + result.record_ = heartbeatRecordBuilder_.build(); + } + if (recordCase_ == 3 && partitionStartRecordBuilder_ != null) { + result.record_ = partitionStartRecordBuilder_.build(); + } + if (recordCase_ == 4 && partitionEndRecordBuilder_ != null) { + result.record_ = partitionEndRecordBuilder_.build(); + } + if (recordCase_ == 5 && partitionEventRecordBuilder_ != null) { + result.record_ = partitionEventRecordBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ChangeStreamRecord) { + return mergeFrom((com.google.spanner.v1.ChangeStreamRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ChangeStreamRecord other) { + if (other == com.google.spanner.v1.ChangeStreamRecord.getDefaultInstance()) return this; + switch (other.getRecordCase()) { + case DATA_CHANGE_RECORD: + { + mergeDataChangeRecord(other.getDataChangeRecord()); + break; + } + case HEARTBEAT_RECORD: + { + mergeHeartbeatRecord(other.getHeartbeatRecord()); + break; + } + case PARTITION_START_RECORD: + { + mergePartitionStartRecord(other.getPartitionStartRecord()); + break; + } + case PARTITION_END_RECORD: + { + mergePartitionEndRecord(other.getPartitionEndRecord()); + break; + } + case PARTITION_EVENT_RECORD: + { + mergePartitionEventRecord(other.getPartitionEventRecord()); + break; + } + case RECORD_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetDataChangeRecordFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetHeartbeatRecordFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetPartitionStartRecordFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetPartitionEndRecordFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetPartitionEventRecordFieldBuilder().getBuilder(), extensionRegistry); + recordCase_ = 5; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int recordCase_ = 0; + private java.lang.Object record_; + + public RecordCase getRecordCase() { + return RecordCase.forNumber(recordCase_); + } + + public Builder clearRecord() { + recordCase_ = 0; + record_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder> + dataChangeRecordBuilder_; + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return Whether the dataChangeRecord field is set. + */ + @java.lang.Override + public boolean hasDataChangeRecord() { + return recordCase_ == 1; + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return The dataChangeRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDataChangeRecord() { + if (dataChangeRecordBuilder_ == null) { + if (recordCase_ == 1) { + return (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } else { + if (recordCase_ == 1) { + return dataChangeRecordBuilder_.getMessage(); + } + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + public Builder setDataChangeRecord( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord value) { + if (dataChangeRecordBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + dataChangeRecordBuilder_.setMessage(value); + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + public Builder setDataChangeRecord( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder builderForValue) { + if (dataChangeRecordBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + dataChangeRecordBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + public Builder mergeDataChangeRecord( + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord value) { + if (dataChangeRecordBuilder_ == null) { + if (recordCase_ == 1 + && record_ + != com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance()) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.newBuilder( + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 1) { + dataChangeRecordBuilder_.mergeFrom(value); + } else { + dataChangeRecordBuilder_.setMessage(value); + } + } + recordCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + public Builder clearDataChangeRecord() { + if (dataChangeRecordBuilder_ == null) { + if (recordCase_ == 1) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 1) { + recordCase_ = 0; + record_ = null; + } + dataChangeRecordBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder + getDataChangeRecordBuilder() { + return internalGetDataChangeRecordFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder + getDataChangeRecordOrBuilder() { + if ((recordCase_ == 1) && (dataChangeRecordBuilder_ != null)) { + return dataChangeRecordBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 1) { + return (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Data change record describing a data change for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder> + internalGetDataChangeRecordFieldBuilder() { + if (dataChangeRecordBuilder_ == null) { + if (!(recordCase_ == 1)) { + record_ = com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.getDefaultInstance(); + } + dataChangeRecordBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder>( + (com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 1; + onChanged(); + return dataChangeRecordBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder> + heartbeatRecordBuilder_; + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return Whether the heartbeatRecord field is set. + */ + @java.lang.Override + public boolean hasHeartbeatRecord() { + return recordCase_ == 2; + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return The heartbeatRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getHeartbeatRecord() { + if (heartbeatRecordBuilder_ == null) { + if (recordCase_ == 2) { + return (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } else { + if (recordCase_ == 2) { + return heartbeatRecordBuilder_.getMessage(); + } + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + public Builder setHeartbeatRecord( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord value) { + if (heartbeatRecordBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + heartbeatRecordBuilder_.setMessage(value); + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + public Builder setHeartbeatRecord( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder builderForValue) { + if (heartbeatRecordBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + heartbeatRecordBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + public Builder mergeHeartbeatRecord( + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord value) { + if (heartbeatRecordBuilder_ == null) { + if (recordCase_ == 2 + && record_ + != com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance()) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.newBuilder( + (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 2) { + heartbeatRecordBuilder_.mergeFrom(value); + } else { + heartbeatRecordBuilder_.setMessage(value); + } + } + recordCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + public Builder clearHeartbeatRecord() { + if (heartbeatRecordBuilder_ == null) { + if (recordCase_ == 2) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 2) { + recordCase_ = 0; + record_ = null; + } + heartbeatRecordBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder + getHeartbeatRecordBuilder() { + return internalGetHeartbeatRecordFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder + getHeartbeatRecordOrBuilder() { + if ((recordCase_ == 2) && (heartbeatRecordBuilder_ != null)) { + return heartbeatRecordBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 2) { + return (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Heartbeat record describing a heartbeat for a change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder> + internalGetHeartbeatRecordFieldBuilder() { + if (heartbeatRecordBuilder_ == null) { + if (!(recordCase_ == 2)) { + record_ = com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.getDefaultInstance(); + } + heartbeatRecordBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder>( + (com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 2; + onChanged(); + return heartbeatRecordBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder> + partitionStartRecordBuilder_; + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return Whether the partitionStartRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionStartRecord() { + return recordCase_ == 3; + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return The partitionStartRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord getPartitionStartRecord() { + if (partitionStartRecordBuilder_ == null) { + if (recordCase_ == 3) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } else { + if (recordCase_ == 3) { + return partitionStartRecordBuilder_.getMessage(); + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + public Builder setPartitionStartRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord value) { + if (partitionStartRecordBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + partitionStartRecordBuilder_.setMessage(value); + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + public Builder setPartitionStartRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder builderForValue) { + if (partitionStartRecordBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + partitionStartRecordBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + public Builder mergePartitionStartRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord value) { + if (partitionStartRecordBuilder_ == null) { + if (recordCase_ == 3 + && record_ + != com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord + .getDefaultInstance()) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.newBuilder( + (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 3) { + partitionStartRecordBuilder_.mergeFrom(value); + } else { + partitionStartRecordBuilder_.setMessage(value); + } + } + recordCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + public Builder clearPartitionStartRecord() { + if (partitionStartRecordBuilder_ == null) { + if (recordCase_ == 3) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 3) { + recordCase_ = 0; + record_ = null; + } + partitionStartRecordBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder + getPartitionStartRecordBuilder() { + return internalGetPartitionStartRecordFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder + getPartitionStartRecordOrBuilder() { + if ((recordCase_ == 3) && (partitionStartRecordBuilder_ != null)) { + return partitionStartRecordBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 3) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition start record describing a new change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder> + internalGetPartitionStartRecordFieldBuilder() { + if (partitionStartRecordBuilder_ == null) { + if (!(recordCase_ == 3)) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.getDefaultInstance(); + } + partitionStartRecordBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder>( + (com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 3; + onChanged(); + return partitionStartRecordBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder> + partitionEndRecordBuilder_; + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * + * @return Whether the partitionEndRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionEndRecord() { + return recordCase_ == 4; + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * + * @return The partitionEndRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord getPartitionEndRecord() { + if (partitionEndRecordBuilder_ == null) { + if (recordCase_ == 4) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } else { + if (recordCase_ == 4) { + return partitionEndRecordBuilder_.getMessage(); + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + public Builder setPartitionEndRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord value) { + if (partitionEndRecordBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + partitionEndRecordBuilder_.setMessage(value); + } + recordCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + public Builder setPartitionEndRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder builderForValue) { + if (partitionEndRecordBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + partitionEndRecordBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + public Builder mergePartitionEndRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord value) { + if (partitionEndRecordBuilder_ == null) { + if (recordCase_ == 4 + && record_ + != com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord + .getDefaultInstance()) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.newBuilder( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 4) { + partitionEndRecordBuilder_.mergeFrom(value); + } else { + partitionEndRecordBuilder_.setMessage(value); + } + } + recordCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + public Builder clearPartitionEndRecord() { + if (partitionEndRecordBuilder_ == null) { + if (recordCase_ == 4) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 4) { + recordCase_ = 0; + record_ = null; + } + partitionEndRecordBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder + getPartitionEndRecordBuilder() { + return internalGetPartitionEndRecordFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder + getPartitionEndRecordOrBuilder() { + if ((recordCase_ == 4) && (partitionEndRecordBuilder_ != null)) { + return partitionEndRecordBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 4) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition end record describing a terminated change stream partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder> + internalGetPartitionEndRecordFieldBuilder() { + if (partitionEndRecordBuilder_ == null) { + if (!(recordCase_ == 4)) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.getDefaultInstance(); + } + partitionEndRecordBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder>( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 4; + onChanged(); + return partitionEndRecordBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder> + partitionEventRecordBuilder_; + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return Whether the partitionEventRecord field is set. + */ + @java.lang.Override + public boolean hasPartitionEventRecord() { + return recordCase_ == 5; + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return The partitionEventRecord. + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord getPartitionEventRecord() { + if (partitionEventRecordBuilder_ == null) { + if (recordCase_ == 5) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } else { + if (recordCase_ == 5) { + return partitionEventRecordBuilder_.getMessage(); + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + public Builder setPartitionEventRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord value) { + if (partitionEventRecordBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + record_ = value; + onChanged(); + } else { + partitionEventRecordBuilder_.setMessage(value); + } + recordCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + public Builder setPartitionEventRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder builderForValue) { + if (partitionEventRecordBuilder_ == null) { + record_ = builderForValue.build(); + onChanged(); + } else { + partitionEventRecordBuilder_.setMessage(builderForValue.build()); + } + recordCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + public Builder mergePartitionEventRecord( + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord value) { + if (partitionEventRecordBuilder_ == null) { + if (recordCase_ == 5 + && record_ + != com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord + .getDefaultInstance()) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.newBuilder( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_) + .mergeFrom(value) + .buildPartial(); + } else { + record_ = value; + } + onChanged(); + } else { + if (recordCase_ == 5) { + partitionEventRecordBuilder_.mergeFrom(value); + } else { + partitionEventRecordBuilder_.setMessage(value); + } + } + recordCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + public Builder clearPartitionEventRecord() { + if (partitionEventRecordBuilder_ == null) { + if (recordCase_ == 5) { + recordCase_ = 0; + record_ = null; + onChanged(); + } + } else { + if (recordCase_ == 5) { + recordCase_ = 0; + record_ = null; + } + partitionEventRecordBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder + getPartitionEventRecordBuilder() { + return internalGetPartitionEventRecordFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder + getPartitionEventRecordOrBuilder() { + if ((recordCase_ == 5) && (partitionEventRecordBuilder_ != null)) { + return partitionEventRecordBuilder_.getMessageOrBuilder(); + } else { + if (recordCase_ == 5) { + return (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_; + } + return com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partition event record describing key range changes for a change stream
    +     * partition.
    +     * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder> + internalGetPartitionEventRecordFieldBuilder() { + if (partitionEventRecordBuilder_ == null) { + if (!(recordCase_ == 5)) { + record_ = + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.getDefaultInstance(); + } + partitionEventRecordBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.Builder, + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder>( + (com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord) record_, + getParentForChildren(), + isClean()); + record_ = null; + } + recordCase_ = 5; + onChanged(); + return partitionEventRecordBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ChangeStreamRecord) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ChangeStreamRecord) + private static final com.google.spanner.v1.ChangeStreamRecord DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ChangeStreamRecord(); + } + + public static com.google.spanner.v1.ChangeStreamRecord getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChangeStreamRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ChangeStreamRecord getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecordOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecordOrBuilder.java new file mode 100644 index 000000000000..7b72f8a5d947 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ChangeStreamRecordOrBuilder.java @@ -0,0 +1,230 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/change_stream.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ChangeStreamRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ChangeStreamRecord) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return Whether the dataChangeRecord field is set. + */ + boolean hasDataChangeRecord(); + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + * + * @return The dataChangeRecord. + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecord getDataChangeRecord(); + + /** + * + * + *
    +   * Data change record describing a data change for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.DataChangeRecord data_change_record = 1; + */ + com.google.spanner.v1.ChangeStreamRecord.DataChangeRecordOrBuilder getDataChangeRecordOrBuilder(); + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return Whether the heartbeatRecord field is set. + */ + boolean hasHeartbeatRecord(); + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + * + * @return The heartbeatRecord. + */ + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecord getHeartbeatRecord(); + + /** + * + * + *
    +   * Heartbeat record describing a heartbeat for a change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.HeartbeatRecord heartbeat_record = 2; + */ + com.google.spanner.v1.ChangeStreamRecord.HeartbeatRecordOrBuilder getHeartbeatRecordOrBuilder(); + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return Whether the partitionStartRecord field is set. + */ + boolean hasPartitionStartRecord(); + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + * + * @return The partitionStartRecord. + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecord getPartitionStartRecord(); + + /** + * + * + *
    +   * Partition start record describing a new change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionStartRecord partition_start_record = 3; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionStartRecordOrBuilder + getPartitionStartRecordOrBuilder(); + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * @return Whether the partitionEndRecord field is set. + */ + boolean hasPartitionEndRecord(); + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + * + * @return The partitionEndRecord. + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecord getPartitionEndRecord(); + + /** + * + * + *
    +   * Partition end record describing a terminated change stream partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEndRecord partition_end_record = 4; + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEndRecordOrBuilder + getPartitionEndRecordOrBuilder(); + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return Whether the partitionEventRecord field is set. + */ + boolean hasPartitionEventRecord(); + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + * + * @return The partitionEventRecord. + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecord getPartitionEventRecord(); + + /** + * + * + *
    +   * Partition event record describing key range changes for a change stream
    +   * partition.
    +   * 
    + * + * .google.spanner.v1.ChangeStreamRecord.PartitionEventRecord partition_event_record = 5; + * + */ + com.google.spanner.v1.ChangeStreamRecord.PartitionEventRecordOrBuilder + getPartitionEventRecordOrBuilder(); + + com.google.spanner.v1.ChangeStreamRecord.RecordCase getRecordCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java new file mode 100644 index 000000000000..5fc62e537c6a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java @@ -0,0 +1,3252 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [Commit][google.spanner.v1.Spanner.Commit].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.CommitRequest} + */ +@com.google.protobuf.Generated +public final class CommitRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.CommitRequest) + CommitRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommitRequest"); + } + + // Use CommitRequest.newBuilder() to construct. + private CommitRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CommitRequest() { + session_ = ""; + mutations_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitRequest.class, + com.google.spanner.v1.CommitRequest.Builder.class); + } + + private int bitField0_; + private int transactionCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object transaction_; + + public enum TransactionCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + TRANSACTION_ID(2), + SINGLE_USE_TRANSACTION(3), + TRANSACTION_NOT_SET(0); + private final int value; + + private TransactionCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TransactionCase valueOf(int value) { + return forNumber(value); + } + + public static TransactionCase forNumber(int value) { + switch (value) { + case 2: + return TRANSACTION_ID; + case 3: + return SINGLE_USE_TRANSACTION; + case 0: + return TRANSACTION_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public TransactionCase getTransactionCase() { + return TransactionCase.forNumber(transactionCase_); + } + + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the transaction to be committed is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the transaction to be committed is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Commit a previously-started transaction.
    +   * 
    + * + * bytes transaction_id = 2; + * + * @return Whether the transactionId field is set. + */ + @java.lang.Override + public boolean hasTransactionId() { + return transactionCase_ == 2; + } + + /** + * + * + *
    +   * Commit a previously-started transaction.
    +   * 
    + * + * bytes transaction_id = 2; + * + * @return The transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionId() { + if (transactionCase_ == 2) { + return (com.google.protobuf.ByteString) transaction_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + public static final int SINGLE_USE_TRANSACTION_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return Whether the singleUseTransaction field is set. + */ + @java.lang.Override + public boolean hasSingleUseTransaction() { + return transactionCase_ == 3; + } + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return The singleUseTransaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getSingleUseTransaction() { + if (transactionCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) transaction_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseTransactionOrBuilder() { + if (transactionCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) transaction_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + public static final int MUTATIONS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List mutations_; + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + @java.lang.Override + public java.util.List getMutationsList() { + return mutations_; + } + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + @java.lang.Override + public java.util.List + getMutationsOrBuilderList() { + return mutations_; + } + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + @java.lang.Override + public int getMutationsCount() { + return mutations_.size(); + } + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation getMutations(int index) { + return mutations_.get(index); + } + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + @java.lang.Override + public com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index) { + return mutations_.get(index); + } + + public static final int RETURN_COMMIT_STATS_FIELD_NUMBER = 5; + private boolean returnCommitStats_ = false; + + /** + * + * + *
    +   * If `true`, then statistics related to the transaction is included in
    +   * the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
    +   * Default value is `false`.
    +   * 
    + * + * bool return_commit_stats = 5; + * + * @return The returnCommitStats. + */ + @java.lang.Override + public boolean getReturnCommitStats() { + return returnCommitStats_; + } + + public static final int MAX_COMMIT_DELAY_FIELD_NUMBER = 8; + private com.google.protobuf.Duration maxCommitDelay_; + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the maxCommitDelay field is set. + */ + @java.lang.Override + public boolean hasMaxCommitDelay() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The maxCommitDelay. + */ + @java.lang.Override + public com.google.protobuf.Duration getMaxCommitDelay() { + return maxCommitDelay_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maxCommitDelay_; + } + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getMaxCommitDelayOrBuilder() { + return maxCommitDelay_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maxCommitDelay_; + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 6; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 9; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + public static final int ROUTING_HINT_FIELD_NUMBER = 10; + private com.google.spanner.v1.RoutingHint routingHint_; + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + @java.lang.Override + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint getRoutingHint() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (transactionCase_ == 2) { + output.writeBytes(2, (com.google.protobuf.ByteString) transaction_); + } + if (transactionCase_ == 3) { + output.writeMessage(3, (com.google.spanner.v1.TransactionOptions) transaction_); + } + for (int i = 0; i < mutations_.size(); i++) { + output.writeMessage(4, mutations_.get(i)); + } + if (returnCommitStats_ != false) { + output.writeBool(5, returnCommitStats_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getRequestOptions()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(8, getMaxCommitDelay()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(9, getPrecommitToken()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(10, getRoutingHint()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (transactionCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 2, (com.google.protobuf.ByteString) transaction_); + } + if (transactionCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.v1.TransactionOptions) transaction_); + } + for (int i = 0; i < mutations_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, mutations_.get(i)); + } + if (returnCommitStats_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, returnCommitStats_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getRequestOptions()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getMaxCommitDelay()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getPrecommitToken()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getRoutingHint()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.CommitRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.CommitRequest other = (com.google.spanner.v1.CommitRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (!getMutationsList().equals(other.getMutationsList())) return false; + if (getReturnCommitStats() != other.getReturnCommitStats()) return false; + if (hasMaxCommitDelay() != other.hasMaxCommitDelay()) return false; + if (hasMaxCommitDelay()) { + if (!getMaxCommitDelay().equals(other.getMaxCommitDelay())) return false; + } + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } + if (hasRoutingHint() != other.hasRoutingHint()) return false; + if (hasRoutingHint()) { + if (!getRoutingHint().equals(other.getRoutingHint())) return false; + } + if (!getTransactionCase().equals(other.getTransactionCase())) return false; + switch (transactionCase_) { + case 2: + if (!getTransactionId().equals(other.getTransactionId())) return false; + break; + case 3: + if (!getSingleUseTransaction().equals(other.getSingleUseTransaction())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (getMutationsCount() > 0) { + hash = (37 * hash) + MUTATIONS_FIELD_NUMBER; + hash = (53 * hash) + getMutationsList().hashCode(); + } + hash = (37 * hash) + RETURN_COMMIT_STATS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReturnCommitStats()); + if (hasMaxCommitDelay()) { + hash = (37 * hash) + MAX_COMMIT_DELAY_FIELD_NUMBER; + hash = (53 * hash) + getMaxCommitDelay().hashCode(); + } + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } + if (hasRoutingHint()) { + hash = (37 * hash) + ROUTING_HINT_FIELD_NUMBER; + hash = (53 * hash) + getRoutingHint().hashCode(); + } + switch (transactionCase_) { + case 2: + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + break; + case 3: + hash = (37 * hash) + SINGLE_USE_TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getSingleUseTransaction().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.CommitRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.CommitRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [Commit][google.spanner.v1.Spanner.Commit].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.CommitRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.CommitRequest) + com.google.spanner.v1.CommitRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitRequest.class, + com.google.spanner.v1.CommitRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.CommitRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetMutationsFieldBuilder(); + internalGetMaxCommitDelayFieldBuilder(); + internalGetRequestOptionsFieldBuilder(); + internalGetPrecommitTokenFieldBuilder(); + internalGetRoutingHintFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + if (singleUseTransactionBuilder_ != null) { + singleUseTransactionBuilder_.clear(); + } + if (mutationsBuilder_ == null) { + mutations_ = java.util.Collections.emptyList(); + } else { + mutations_ = null; + mutationsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + returnCommitStats_ = false; + maxCommitDelay_ = null; + if (maxCommitDelayBuilder_ != null) { + maxCommitDelayBuilder_.dispose(); + maxCommitDelayBuilder_ = null; + } + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + transactionCase_ = 0; + transaction_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CommitRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.CommitRequest getDefaultInstanceForType() { + return com.google.spanner.v1.CommitRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.CommitRequest build() { + com.google.spanner.v1.CommitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.CommitRequest buildPartial() { + com.google.spanner.v1.CommitRequest result = new com.google.spanner.v1.CommitRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.CommitRequest result) { + if (mutationsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + mutations_ = java.util.Collections.unmodifiableList(mutations_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.mutations_ = mutations_; + } else { + result.mutations_ = mutationsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.CommitRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.returnCommitStats_ = returnCommitStats_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.maxCommitDelay_ = + maxCommitDelayBuilder_ == null ? maxCommitDelay_ : maxCommitDelayBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.routingHint_ = + routingHintBuilder_ == null ? routingHint_ : routingHintBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.CommitRequest result) { + result.transactionCase_ = transactionCase_; + result.transaction_ = this.transaction_; + if (transactionCase_ == 3 && singleUseTransactionBuilder_ != null) { + result.transaction_ = singleUseTransactionBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.CommitRequest) { + return mergeFrom((com.google.spanner.v1.CommitRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.CommitRequest other) { + if (other == com.google.spanner.v1.CommitRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (mutationsBuilder_ == null) { + if (!other.mutations_.isEmpty()) { + if (mutations_.isEmpty()) { + mutations_ = other.mutations_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureMutationsIsMutable(); + mutations_.addAll(other.mutations_); + } + onChanged(); + } + } else { + if (!other.mutations_.isEmpty()) { + if (mutationsBuilder_.isEmpty()) { + mutationsBuilder_.dispose(); + mutationsBuilder_ = null; + mutations_ = other.mutations_; + bitField0_ = (bitField0_ & ~0x00000008); + mutationsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetMutationsFieldBuilder() + : null; + } else { + mutationsBuilder_.addAllMessages(other.mutations_); + } + } + } + if (other.getReturnCommitStats() != false) { + setReturnCommitStats(other.getReturnCommitStats()); + } + if (other.hasMaxCommitDelay()) { + mergeMaxCommitDelay(other.getMaxCommitDelay()); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } + if (other.hasRoutingHint()) { + mergeRoutingHint(other.getRoutingHint()); + } + switch (other.getTransactionCase()) { + case TRANSACTION_ID: + { + setTransactionId(other.getTransactionId()); + break; + } + case SINGLE_USE_TRANSACTION: + { + mergeSingleUseTransaction(other.getSingleUseTransaction()); + break; + } + case TRANSACTION_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + transaction_ = input.readBytes(); + transactionCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetSingleUseTransactionFieldBuilder().getBuilder(), extensionRegistry); + transactionCase_ = 3; + break; + } // case 26 + case 34: + { + com.google.spanner.v1.Mutation m = + input.readMessage(com.google.spanner.v1.Mutation.parser(), extensionRegistry); + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(m); + } else { + mutationsBuilder_.addMessage(m); + } + break; + } // case 34 + case 40: + { + returnCommitStats_ = input.readBool(); + bitField0_ |= 0x00000010; + break; + } // case 40 + case 50: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 50 + case 66: + { + input.readMessage( + internalGetMaxCommitDelayFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 66 + case 74: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 74 + case 82: + { + input.readMessage( + internalGetRoutingHintFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int transactionCase_ = 0; + private java.lang.Object transaction_; + + public TransactionCase getTransactionCase() { + return TransactionCase.forNumber(transactionCase_); + } + + public Builder clearTransaction() { + transactionCase_ = 0; + transaction_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the transaction to be committed is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction to be committed is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction to be committed is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction to be committed is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction to be committed is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Commit a previously-started transaction.
    +     * 
    + * + * bytes transaction_id = 2; + * + * @return Whether the transactionId field is set. + */ + public boolean hasTransactionId() { + return transactionCase_ == 2; + } + + /** + * + * + *
    +     * Commit a previously-started transaction.
    +     * 
    + * + * bytes transaction_id = 2; + * + * @return The transactionId. + */ + public com.google.protobuf.ByteString getTransactionId() { + if (transactionCase_ == 2) { + return (com.google.protobuf.ByteString) transaction_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + /** + * + * + *
    +     * Commit a previously-started transaction.
    +     * 
    + * + * bytes transaction_id = 2; + * + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + transactionCase_ = 2; + transaction_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Commit a previously-started transaction.
    +     * 
    + * + * bytes transaction_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + if (transactionCase_ == 2) { + transactionCase_ = 0; + transaction_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + singleUseTransactionBuilder_; + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return Whether the singleUseTransaction field is set. + */ + @java.lang.Override + public boolean hasSingleUseTransaction() { + return transactionCase_ == 3; + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return The singleUseTransaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getSingleUseTransaction() { + if (singleUseTransactionBuilder_ == null) { + if (transactionCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) transaction_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } else { + if (transactionCase_ == 3) { + return singleUseTransactionBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + public Builder setSingleUseTransaction(com.google.spanner.v1.TransactionOptions value) { + if (singleUseTransactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + onChanged(); + } else { + singleUseTransactionBuilder_.setMessage(value); + } + transactionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + public Builder setSingleUseTransaction( + com.google.spanner.v1.TransactionOptions.Builder builderForValue) { + if (singleUseTransactionBuilder_ == null) { + transaction_ = builderForValue.build(); + onChanged(); + } else { + singleUseTransactionBuilder_.setMessage(builderForValue.build()); + } + transactionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + public Builder mergeSingleUseTransaction(com.google.spanner.v1.TransactionOptions value) { + if (singleUseTransactionBuilder_ == null) { + if (transactionCase_ == 3 + && transaction_ != com.google.spanner.v1.TransactionOptions.getDefaultInstance()) { + transaction_ = + com.google.spanner.v1.TransactionOptions.newBuilder( + (com.google.spanner.v1.TransactionOptions) transaction_) + .mergeFrom(value) + .buildPartial(); + } else { + transaction_ = value; + } + onChanged(); + } else { + if (transactionCase_ == 3) { + singleUseTransactionBuilder_.mergeFrom(value); + } else { + singleUseTransactionBuilder_.setMessage(value); + } + } + transactionCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + public Builder clearSingleUseTransaction() { + if (singleUseTransactionBuilder_ == null) { + if (transactionCase_ == 3) { + transactionCase_ = 0; + transaction_ = null; + onChanged(); + } + } else { + if (transactionCase_ == 3) { + transactionCase_ = 0; + transaction_ = null; + } + singleUseTransactionBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + public com.google.spanner.v1.TransactionOptions.Builder getSingleUseTransactionBuilder() { + return internalGetSingleUseTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseTransactionOrBuilder() { + if ((transactionCase_ == 3) && (singleUseTransactionBuilder_ != null)) { + return singleUseTransactionBuilder_.getMessageOrBuilder(); + } else { + if (transactionCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) transaction_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Execute mutations in a temporary transaction. Note that unlike
    +     * commit of a previously-started transaction, commit with a
    +     * temporary transaction is non-idempotent. That is, if the
    +     * `CommitRequest` is sent to Cloud Spanner more than once (for
    +     * instance, due to retries in the application, or in the
    +     * transport library), it's possible that the mutations are
    +     * executed more than once. If this is undesirable, use
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +     * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + internalGetSingleUseTransactionFieldBuilder() { + if (singleUseTransactionBuilder_ == null) { + if (!(transactionCase_ == 3)) { + transaction_ = com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + singleUseTransactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder>( + (com.google.spanner.v1.TransactionOptions) transaction_, + getParentForChildren(), + isClean()); + transaction_ = null; + } + transactionCase_ = 3; + onChanged(); + return singleUseTransactionBuilder_; + } + + private java.util.List mutations_ = + java.util.Collections.emptyList(); + + private void ensureMutationsIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + mutations_ = new java.util.ArrayList(mutations_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + mutationsBuilder_; + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public java.util.List getMutationsList() { + if (mutationsBuilder_ == null) { + return java.util.Collections.unmodifiableList(mutations_); + } else { + return mutationsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public int getMutationsCount() { + if (mutationsBuilder_ == null) { + return mutations_.size(); + } else { + return mutationsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public com.google.spanner.v1.Mutation getMutations(int index) { + if (mutationsBuilder_ == null) { + return mutations_.get(index); + } else { + return mutationsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder setMutations(int index, com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.set(index, value); + onChanged(); + } else { + mutationsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder setMutations(int index, com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.set(index, builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder addMutations(com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.add(value); + onChanged(); + } else { + mutationsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder addMutations(int index, com.google.spanner.v1.Mutation value) { + if (mutationsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMutationsIsMutable(); + mutations_.add(index, value); + onChanged(); + } else { + mutationsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder addMutations(com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder addMutations(int index, com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.add(index, builderForValue.build()); + onChanged(); + } else { + mutationsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder addAllMutations( + java.lang.Iterable values) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, mutations_); + onChanged(); + } else { + mutationsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder clearMutations() { + if (mutationsBuilder_ == null) { + mutations_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + mutationsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public Builder removeMutations(int index) { + if (mutationsBuilder_ == null) { + ensureMutationsIsMutable(); + mutations_.remove(index); + onChanged(); + } else { + mutationsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public com.google.spanner.v1.Mutation.Builder getMutationsBuilder(int index) { + return internalGetMutationsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index) { + if (mutationsBuilder_ == null) { + return mutations_.get(index); + } else { + return mutationsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public java.util.List + getMutationsOrBuilderList() { + if (mutationsBuilder_ != null) { + return mutationsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(mutations_); + } + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public com.google.spanner.v1.Mutation.Builder addMutationsBuilder() { + return internalGetMutationsFieldBuilder() + .addBuilder(com.google.spanner.v1.Mutation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public com.google.spanner.v1.Mutation.Builder addMutationsBuilder(int index) { + return internalGetMutationsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Mutation.getDefaultInstance()); + } + + /** + * + * + *
    +     * The mutations to be executed when this transaction commits. All
    +     * mutations are applied atomically, in the order they appear in
    +     * this list.
    +     * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + public java.util.List getMutationsBuilderList() { + return internalGetMutationsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + internalGetMutationsFieldBuilder() { + if (mutationsBuilder_ == null) { + mutationsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder>( + mutations_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + mutations_ = null; + } + return mutationsBuilder_; + } + + private boolean returnCommitStats_; + + /** + * + * + *
    +     * If `true`, then statistics related to the transaction is included in
    +     * the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
    +     * Default value is `false`.
    +     * 
    + * + * bool return_commit_stats = 5; + * + * @return The returnCommitStats. + */ + @java.lang.Override + public boolean getReturnCommitStats() { + return returnCommitStats_; + } + + /** + * + * + *
    +     * If `true`, then statistics related to the transaction is included in
    +     * the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
    +     * Default value is `false`.
    +     * 
    + * + * bool return_commit_stats = 5; + * + * @param value The returnCommitStats to set. + * @return This builder for chaining. + */ + public Builder setReturnCommitStats(boolean value) { + + returnCommitStats_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If `true`, then statistics related to the transaction is included in
    +     * the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
    +     * Default value is `false`.
    +     * 
    + * + * bool return_commit_stats = 5; + * + * @return This builder for chaining. + */ + public Builder clearReturnCommitStats() { + bitField0_ = (bitField0_ & ~0x00000010); + returnCommitStats_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.Duration maxCommitDelay_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + maxCommitDelayBuilder_; + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the maxCommitDelay field is set. + */ + public boolean hasMaxCommitDelay() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The maxCommitDelay. + */ + public com.google.protobuf.Duration getMaxCommitDelay() { + if (maxCommitDelayBuilder_ == null) { + return maxCommitDelay_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maxCommitDelay_; + } else { + return maxCommitDelayBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMaxCommitDelay(com.google.protobuf.Duration value) { + if (maxCommitDelayBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + maxCommitDelay_ = value; + } else { + maxCommitDelayBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMaxCommitDelay(com.google.protobuf.Duration.Builder builderForValue) { + if (maxCommitDelayBuilder_ == null) { + maxCommitDelay_ = builderForValue.build(); + } else { + maxCommitDelayBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeMaxCommitDelay(com.google.protobuf.Duration value) { + if (maxCommitDelayBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && maxCommitDelay_ != null + && maxCommitDelay_ != com.google.protobuf.Duration.getDefaultInstance()) { + getMaxCommitDelayBuilder().mergeFrom(value); + } else { + maxCommitDelay_ = value; + } + } else { + maxCommitDelayBuilder_.mergeFrom(value); + } + if (maxCommitDelay_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMaxCommitDelay() { + bitField0_ = (bitField0_ & ~0x00000020); + maxCommitDelay_ = null; + if (maxCommitDelayBuilder_ != null) { + maxCommitDelayBuilder_.dispose(); + maxCommitDelayBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getMaxCommitDelayBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetMaxCommitDelayFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getMaxCommitDelayOrBuilder() { + if (maxCommitDelayBuilder_ != null) { + return maxCommitDelayBuilder_.getMessageOrBuilder(); + } else { + return maxCommitDelay_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : maxCommitDelay_; + } + } + + /** + * + * + *
    +     * Optional. The amount of latency this request is configured to incur in
    +     * order to improve throughput. If this field isn't set, Spanner assumes
    +     * requests are relatively latency sensitive and automatically determines an
    +     * appropriate delay time. You can specify a commit delay value between 0 and
    +     * 500 ms.
    +     * 
    + * + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetMaxCommitDelayFieldBuilder() { + if (maxCommitDelayBuilder_ == null) { + maxCommitDelayBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getMaxCommitDelay(), getParentForChildren(), isClean()); + maxCommitDelay_ = null; + } + return maxCommitDelayBuilder_; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000040); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000080); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, then you must include the precommit token with the highest
    +     * sequence number received in this transaction attempt. Failing to do so
    +     * results in a `FailedPrecondition` error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + + private com.google.spanner.v1.RoutingHint routingHint_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + routingHintBuilder_; + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + public com.google.spanner.v1.RoutingHint getRoutingHint() { + if (routingHintBuilder_ == null) { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } else { + return routingHintBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + routingHint_ = value; + } else { + routingHintBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint.Builder builderForValue) { + if (routingHintBuilder_ == null) { + routingHint_ = builderForValue.build(); + } else { + routingHintBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && routingHint_ != null + && routingHint_ != com.google.spanner.v1.RoutingHint.getDefaultInstance()) { + getRoutingHintBuilder().mergeFrom(value); + } else { + routingHint_ = value; + } + } else { + routingHintBuilder_.mergeFrom(value); + } + if (routingHint_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRoutingHint() { + bitField0_ = (bitField0_ & ~0x00000100); + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHint.Builder getRoutingHintBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetRoutingHintFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + if (routingHintBuilder_ != null) { + return routingHintBuilder_.getMessageOrBuilder(); + } else { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + internalGetRoutingHintFieldBuilder() { + if (routingHintBuilder_ == null) { + routingHintBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder>( + getRoutingHint(), getParentForChildren(), isClean()); + routingHint_ = null; + } + return routingHintBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.CommitRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.CommitRequest) + private static final com.google.spanner.v1.CommitRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.CommitRequest(); + } + + public static com.google.spanner.v1.CommitRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommitRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.CommitRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java new file mode 100644 index 000000000000..e38948d301ff --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java @@ -0,0 +1,426 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface CommitRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.CommitRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the transaction to be committed is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the transaction to be committed is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Commit a previously-started transaction.
    +   * 
    + * + * bytes transaction_id = 2; + * + * @return Whether the transactionId field is set. + */ + boolean hasTransactionId(); + + /** + * + * + *
    +   * Commit a previously-started transaction.
    +   * 
    + * + * bytes transaction_id = 2; + * + * @return The transactionId. + */ + com.google.protobuf.ByteString getTransactionId(); + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return Whether the singleUseTransaction field is set. + */ + boolean hasSingleUseTransaction(); + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + * + * @return The singleUseTransaction. + */ + com.google.spanner.v1.TransactionOptions getSingleUseTransaction(); + + /** + * + * + *
    +   * Execute mutations in a temporary transaction. Note that unlike
    +   * commit of a previously-started transaction, commit with a
    +   * temporary transaction is non-idempotent. That is, if the
    +   * `CommitRequest` is sent to Cloud Spanner more than once (for
    +   * instance, due to retries in the application, or in the
    +   * transport library), it's possible that the mutations are
    +   * executed more than once. If this is undesirable, use
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
    +   * [Commit][google.spanner.v1.Spanner.Commit] instead.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use_transaction = 3; + */ + com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseTransactionOrBuilder(); + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + java.util.List getMutationsList(); + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + com.google.spanner.v1.Mutation getMutations(int index); + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + int getMutationsCount(); + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + java.util.List getMutationsOrBuilderList(); + + /** + * + * + *
    +   * The mutations to be executed when this transaction commits. All
    +   * mutations are applied atomically, in the order they appear in
    +   * this list.
    +   * 
    + * + * repeated .google.spanner.v1.Mutation mutations = 4; + */ + com.google.spanner.v1.MutationOrBuilder getMutationsOrBuilder(int index); + + /** + * + * + *
    +   * If `true`, then statistics related to the transaction is included in
    +   * the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
    +   * Default value is `false`.
    +   * 
    + * + * bool return_commit_stats = 5; + * + * @return The returnCommitStats. + */ + boolean getReturnCommitStats(); + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the maxCommitDelay field is set. + */ + boolean hasMaxCommitDelay(); + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The maxCommitDelay. + */ + com.google.protobuf.Duration getMaxCommitDelay(); + + /** + * + * + *
    +   * Optional. The amount of latency this request is configured to incur in
    +   * order to improve throughput. If this field isn't set, Spanner assumes
    +   * requests are relatively latency sensitive and automatically determines an
    +   * appropriate delay time. You can specify a commit delay value between 0 and
    +   * 500 ms.
    +   * 
    + * + * .google.protobuf.Duration max_commit_delay = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getMaxCommitDelayOrBuilder(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 6; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, then you must include the precommit token with the highest
    +   * sequence number received in this transaction attempt. Failing to do so
    +   * results in a `FailedPrecondition` error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + boolean hasRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + com.google.spanner.v1.RoutingHint getRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder(); + + com.google.spanner.v1.CommitRequest.TransactionCase getTransactionCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java new file mode 100644 index 000000000000..6819b485f96c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java @@ -0,0 +1,2630 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/commit_response.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The response for [Commit][google.spanner.v1.Spanner.Commit].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.CommitResponse} + */ +@com.google.protobuf.Generated +public final class CommitResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.CommitResponse) + CommitResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommitResponse"); + } + + // Use CommitResponse.newBuilder() to construct. + private CommitResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CommitResponse() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitResponse.class, + com.google.spanner.v1.CommitResponse.Builder.class); + } + + public interface CommitStatsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.CommitResponse.CommitStats) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The total number of mutations for the transaction. Knowing the
    +     * `mutation_count` value can help you maximize the number of mutations
    +     * in a transaction and minimize the number of API round trips. You can
    +     * also monitor this value to prevent transactions from exceeding the system
    +     * [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
    +     * If the number of mutations exceeds the limit, the server returns
    +     * [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
    +     * 
    + * + * int64 mutation_count = 1; + * + * @return The mutationCount. + */ + long getMutationCount(); + } + + /** + * + * + *
    +   * Additional statistics about a commit.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.CommitResponse.CommitStats} + */ + public static final class CommitStats extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.CommitResponse.CommitStats) + CommitStatsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommitStats"); + } + + // Use CommitStats.newBuilder() to construct. + private CommitStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CommitStats() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_CommitStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitResponse.CommitStats.class, + com.google.spanner.v1.CommitResponse.CommitStats.Builder.class); + } + + public static final int MUTATION_COUNT_FIELD_NUMBER = 1; + private long mutationCount_ = 0L; + + /** + * + * + *
    +     * The total number of mutations for the transaction. Knowing the
    +     * `mutation_count` value can help you maximize the number of mutations
    +     * in a transaction and minimize the number of API round trips. You can
    +     * also monitor this value to prevent transactions from exceeding the system
    +     * [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
    +     * If the number of mutations exceeds the limit, the server returns
    +     * [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
    +     * 
    + * + * int64 mutation_count = 1; + * + * @return The mutationCount. + */ + @java.lang.Override + public long getMutationCount() { + return mutationCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (mutationCount_ != 0L) { + output.writeInt64(1, mutationCount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (mutationCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, mutationCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.CommitResponse.CommitStats)) { + return super.equals(obj); + } + com.google.spanner.v1.CommitResponse.CommitStats other = + (com.google.spanner.v1.CommitResponse.CommitStats) obj; + + if (getMutationCount() != other.getMutationCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MUTATION_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMutationCount()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.CommitResponse.CommitStats prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Additional statistics about a commit.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.CommitResponse.CommitStats} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.CommitResponse.CommitStats) + com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_CommitStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitResponse.CommitStats.class, + com.google.spanner.v1.CommitResponse.CommitStats.Builder.class); + } + + // Construct using com.google.spanner.v1.CommitResponse.CommitStats.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + mutationCount_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStats getDefaultInstanceForType() { + return com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStats build() { + com.google.spanner.v1.CommitResponse.CommitStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStats buildPartial() { + com.google.spanner.v1.CommitResponse.CommitStats result = + new com.google.spanner.v1.CommitResponse.CommitStats(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.CommitResponse.CommitStats result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.mutationCount_ = mutationCount_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.CommitResponse.CommitStats) { + return mergeFrom((com.google.spanner.v1.CommitResponse.CommitStats) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.CommitResponse.CommitStats other) { + if (other == com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance()) + return this; + if (other.getMutationCount() != 0L) { + setMutationCount(other.getMutationCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + mutationCount_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long mutationCount_; + + /** + * + * + *
    +       * The total number of mutations for the transaction. Knowing the
    +       * `mutation_count` value can help you maximize the number of mutations
    +       * in a transaction and minimize the number of API round trips. You can
    +       * also monitor this value to prevent transactions from exceeding the system
    +       * [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
    +       * If the number of mutations exceeds the limit, the server returns
    +       * [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
    +       * 
    + * + * int64 mutation_count = 1; + * + * @return The mutationCount. + */ + @java.lang.Override + public long getMutationCount() { + return mutationCount_; + } + + /** + * + * + *
    +       * The total number of mutations for the transaction. Knowing the
    +       * `mutation_count` value can help you maximize the number of mutations
    +       * in a transaction and minimize the number of API round trips. You can
    +       * also monitor this value to prevent transactions from exceeding the system
    +       * [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
    +       * If the number of mutations exceeds the limit, the server returns
    +       * [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
    +       * 
    + * + * int64 mutation_count = 1; + * + * @param value The mutationCount to set. + * @return This builder for chaining. + */ + public Builder setMutationCount(long value) { + + mutationCount_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The total number of mutations for the transaction. Knowing the
    +       * `mutation_count` value can help you maximize the number of mutations
    +       * in a transaction and minimize the number of API round trips. You can
    +       * also monitor this value to prevent transactions from exceeding the system
    +       * [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
    +       * If the number of mutations exceeds the limit, the server returns
    +       * [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
    +       * 
    + * + * int64 mutation_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearMutationCount() { + bitField0_ = (bitField0_ & ~0x00000001); + mutationCount_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.CommitResponse.CommitStats) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.CommitResponse.CommitStats) + private static final com.google.spanner.v1.CommitResponse.CommitStats DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.CommitResponse.CommitStats(); + } + + public static com.google.spanner.v1.CommitResponse.CommitStats getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommitStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStats getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + private int multiplexedSessionRetryCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object multiplexedSessionRetry_; + + public enum MultiplexedSessionRetryCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PRECOMMIT_TOKEN(4), + MULTIPLEXEDSESSIONRETRY_NOT_SET(0); + private final int value; + + private MultiplexedSessionRetryCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MultiplexedSessionRetryCase valueOf(int value) { + return forNumber(value); + } + + public static MultiplexedSessionRetryCase forNumber(int value) { + switch (value) { + case 4: + return PRECOMMIT_TOKEN; + case 0: + return MULTIPLEXEDSESSIONRETRY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public MultiplexedSessionRetryCase getMultiplexedSessionRetryCase() { + return MultiplexedSessionRetryCase.forNumber(multiplexedSessionRetryCase_); + } + + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTimestamp_; + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + @java.lang.Override + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCommitTimestamp() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + + public static final int COMMIT_STATS_FIELD_NUMBER = 2; + private com.google.spanner.v1.CommitResponse.CommitStats commitStats_; + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return Whether the commitStats field is set. + */ + @java.lang.Override + public boolean hasCommitStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return The commitStats. + */ + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStats getCommitStats() { + return commitStats_ == null + ? com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance() + : commitStats_; + } + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + @java.lang.Override + public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsOrBuilder() { + return commitStats_ == null + ? com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance() + : commitStats_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return multiplexedSessionRetryCase_ == 4; + } + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + + public static final int SNAPSHOT_TIMESTAMP_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp snapshotTimestamp_; + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + @java.lang.Override + public boolean hasSnapshotTimestamp() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSnapshotTimestamp() { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder() { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + + public static final int CACHE_UPDATE_FIELD_NUMBER = 6; + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + @java.lang.Override + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getCommitTimestamp()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getCommitStats()); + } + if (multiplexedSessionRetryCase_ == 4) { + output.writeMessage( + 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getSnapshotTimestamp()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(6, getCacheUpdate()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTimestamp()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCommitStats()); + } + if (multiplexedSessionRetryCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getSnapshotTimestamp()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getCacheUpdate()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.CommitResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.CommitResponse other = (com.google.spanner.v1.CommitResponse) obj; + + if (hasCommitTimestamp() != other.hasCommitTimestamp()) return false; + if (hasCommitTimestamp()) { + if (!getCommitTimestamp().equals(other.getCommitTimestamp())) return false; + } + if (hasCommitStats() != other.hasCommitStats()) return false; + if (hasCommitStats()) { + if (!getCommitStats().equals(other.getCommitStats())) return false; + } + if (hasSnapshotTimestamp() != other.hasSnapshotTimestamp()) return false; + if (hasSnapshotTimestamp()) { + if (!getSnapshotTimestamp().equals(other.getSnapshotTimestamp())) return false; + } + if (hasCacheUpdate() != other.hasCacheUpdate()) return false; + if (hasCacheUpdate()) { + if (!getCacheUpdate().equals(other.getCacheUpdate())) return false; + } + if (!getMultiplexedSessionRetryCase().equals(other.getMultiplexedSessionRetryCase())) + return false; + switch (multiplexedSessionRetryCase_) { + case 4: + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTimestamp()) { + hash = (37 * hash) + COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestamp().hashCode(); + } + if (hasCommitStats()) { + hash = (37 * hash) + COMMIT_STATS_FIELD_NUMBER; + hash = (53 * hash) + getCommitStats().hashCode(); + } + if (hasSnapshotTimestamp()) { + hash = (37 * hash) + SNAPSHOT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTimestamp().hashCode(); + } + if (hasCacheUpdate()) { + hash = (37 * hash) + CACHE_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getCacheUpdate().hashCode(); + } + switch (multiplexedSessionRetryCase_) { + case 4: + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.CommitResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CommitResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.CommitResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for [Commit][google.spanner.v1.Spanner.Commit].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.CommitResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.CommitResponse) + com.google.spanner.v1.CommitResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CommitResponse.class, + com.google.spanner.v1.CommitResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.CommitResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCommitTimestampFieldBuilder(); + internalGetCommitStatsFieldBuilder(); + internalGetSnapshotTimestampFieldBuilder(); + internalGetCacheUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + commitStats_ = null; + if (commitStatsBuilder_ != null) { + commitStatsBuilder_.dispose(); + commitStatsBuilder_ = null; + } + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.clear(); + } + snapshotTimestamp_ = null; + if (snapshotTimestampBuilder_ != null) { + snapshotTimestampBuilder_.dispose(); + snapshotTimestampBuilder_ = null; + } + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.CommitResponseProto + .internal_static_google_spanner_v1_CommitResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse getDefaultInstanceForType() { + return com.google.spanner.v1.CommitResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse build() { + com.google.spanner.v1.CommitResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse buildPartial() { + com.google.spanner.v1.CommitResponse result = new com.google.spanner.v1.CommitResponse(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.CommitResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.commitTimestamp_ = + commitTimestampBuilder_ == null ? commitTimestamp_ : commitTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.commitStats_ = + commitStatsBuilder_ == null ? commitStats_ : commitStatsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.snapshotTimestamp_ = + snapshotTimestampBuilder_ == null + ? snapshotTimestamp_ + : snapshotTimestampBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.cacheUpdate_ = + cacheUpdateBuilder_ == null ? cacheUpdate_ : cacheUpdateBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.CommitResponse result) { + result.multiplexedSessionRetryCase_ = multiplexedSessionRetryCase_; + result.multiplexedSessionRetry_ = this.multiplexedSessionRetry_; + if (multiplexedSessionRetryCase_ == 4 && precommitTokenBuilder_ != null) { + result.multiplexedSessionRetry_ = precommitTokenBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.CommitResponse) { + return mergeFrom((com.google.spanner.v1.CommitResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.CommitResponse other) { + if (other == com.google.spanner.v1.CommitResponse.getDefaultInstance()) return this; + if (other.hasCommitTimestamp()) { + mergeCommitTimestamp(other.getCommitTimestamp()); + } + if (other.hasCommitStats()) { + mergeCommitStats(other.getCommitStats()); + } + if (other.hasSnapshotTimestamp()) { + mergeSnapshotTimestamp(other.getSnapshotTimestamp()); + } + if (other.hasCacheUpdate()) { + mergeCacheUpdate(other.getCacheUpdate()); + } + switch (other.getMultiplexedSessionRetryCase()) { + case PRECOMMIT_TOKEN: + { + mergePrecommitToken(other.getPrecommitToken()); + break; + } + case MULTIPLEXEDSESSIONRETRY_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetCommitTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetCommitStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 34: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + multiplexedSessionRetryCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetSnapshotTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetCacheUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int multiplexedSessionRetryCase_ = 0; + private java.lang.Object multiplexedSessionRetry_; + + public MultiplexedSessionRetryCase getMultiplexedSessionRetryCase() { + return MultiplexedSessionRetryCase.forNumber(multiplexedSessionRetryCase_); + } + + public Builder clearMultiplexedSessionRetry() { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.Timestamp commitTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampBuilder_; + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + public boolean hasCommitTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + public com.google.protobuf.Timestamp getCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } else { + return commitTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTimestamp_ = value; + } else { + commitTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = builderForValue.build(); + } else { + commitTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder mergeCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && commitTimestamp_ != null + && commitTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCommitTimestampBuilder().mergeFrom(value); + } else { + commitTimestamp_ = value; + } + } else { + commitTimestampBuilder_.mergeFrom(value); + } + if (commitTimestamp_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public Builder clearCommitTimestamp() { + bitField0_ = (bitField0_ & ~0x00000001); + commitTimestamp_ = null; + if (commitTimestampBuilder_ != null) { + commitTimestampBuilder_.dispose(); + commitTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetCommitTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + if (commitTimestampBuilder_ != null) { + return commitTimestampBuilder_.getMessageOrBuilder(); + } else { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + } + + /** + * + * + *
    +     * The Cloud Spanner timestamp at which the transaction committed.
    +     * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCommitTimestampFieldBuilder() { + if (commitTimestampBuilder_ == null) { + commitTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTimestamp(), getParentForChildren(), isClean()); + commitTimestamp_ = null; + } + return commitTimestampBuilder_; + } + + private com.google.spanner.v1.CommitResponse.CommitStats commitStats_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CommitResponse.CommitStats, + com.google.spanner.v1.CommitResponse.CommitStats.Builder, + com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder> + commitStatsBuilder_; + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return Whether the commitStats field is set. + */ + public boolean hasCommitStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return The commitStats. + */ + public com.google.spanner.v1.CommitResponse.CommitStats getCommitStats() { + if (commitStatsBuilder_ == null) { + return commitStats_ == null + ? com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance() + : commitStats_; + } else { + return commitStatsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public Builder setCommitStats(com.google.spanner.v1.CommitResponse.CommitStats value) { + if (commitStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitStats_ = value; + } else { + commitStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public Builder setCommitStats( + com.google.spanner.v1.CommitResponse.CommitStats.Builder builderForValue) { + if (commitStatsBuilder_ == null) { + commitStats_ = builderForValue.build(); + } else { + commitStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public Builder mergeCommitStats(com.google.spanner.v1.CommitResponse.CommitStats value) { + if (commitStatsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && commitStats_ != null + && commitStats_ + != com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance()) { + getCommitStatsBuilder().mergeFrom(value); + } else { + commitStats_ = value; + } + } else { + commitStatsBuilder_.mergeFrom(value); + } + if (commitStats_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public Builder clearCommitStats() { + bitField0_ = (bitField0_ & ~0x00000002); + commitStats_ = null; + if (commitStatsBuilder_ != null) { + commitStatsBuilder_.dispose(); + commitStatsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public com.google.spanner.v1.CommitResponse.CommitStats.Builder getCommitStatsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetCommitStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsOrBuilder() { + if (commitStatsBuilder_ != null) { + return commitStatsBuilder_.getMessageOrBuilder(); + } else { + return commitStats_ == null + ? com.google.spanner.v1.CommitResponse.CommitStats.getDefaultInstance() + : commitStats_; + } + } + + /** + * + * + *
    +     * The statistics about this `Commit`. Not returned by default.
    +     * For more information, see
    +     * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +     * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CommitResponse.CommitStats, + com.google.spanner.v1.CommitResponse.CommitStats.Builder, + com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder> + internalGetCommitStatsFieldBuilder() { + if (commitStatsBuilder_ == null) { + commitStatsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CommitResponse.CommitStats, + com.google.spanner.v1.CommitResponse.CommitStats.Builder, + com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder>( + getCommitStats(), getParentForChildren(), isClean()); + commitStats_ = null; + } + return commitStatsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return multiplexedSessionRetryCase_ == 4; + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + return precommitTokenBuilder_.getMessage(); + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + multiplexedSessionRetry_ = value; + onChanged(); + } else { + precommitTokenBuilder_.setMessage(value); + } + multiplexedSessionRetryCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + multiplexedSessionRetry_ = builderForValue.build(); + onChanged(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + multiplexedSessionRetryCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4 + && multiplexedSessionRetry_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + multiplexedSessionRetry_ = + com.google.spanner.v1.MultiplexedSessionPrecommitToken.newBuilder( + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) + multiplexedSessionRetry_) + .mergeFrom(value) + .buildPartial(); + } else { + multiplexedSessionRetry_ = value; + } + onChanged(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + precommitTokenBuilder_.mergeFrom(value); + } else { + precommitTokenBuilder_.setMessage(value); + } + } + multiplexedSessionRetryCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder clearPrecommitToken() { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4) { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + onChanged(); + } + } else { + if (multiplexedSessionRetryCase_ == 4) { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + } + precommitTokenBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if ((multiplexedSessionRetryCase_ == 4) && (precommitTokenBuilder_ != null)) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * You must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + if (!(multiplexedSessionRetryCase_ == 4)) { + multiplexedSessionRetry_ = + com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_, + getParentForChildren(), + isClean()); + multiplexedSessionRetry_ = null; + } + multiplexedSessionRetryCase_ = 4; + onChanged(); + return precommitTokenBuilder_; + } + + private com.google.protobuf.Timestamp snapshotTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimestampBuilder_; + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + public boolean hasSnapshotTimestamp() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + public com.google.protobuf.Timestamp getSnapshotTimestamp() { + if (snapshotTimestampBuilder_ == null) { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } else { + return snapshotTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder setSnapshotTimestamp(com.google.protobuf.Timestamp value) { + if (snapshotTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTimestamp_ = value; + } else { + snapshotTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder setSnapshotTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimestampBuilder_ == null) { + snapshotTimestamp_ = builderForValue.build(); + } else { + snapshotTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder mergeSnapshotTimestamp(com.google.protobuf.Timestamp value) { + if (snapshotTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && snapshotTimestamp_ != null + && snapshotTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSnapshotTimestampBuilder().mergeFrom(value); + } else { + snapshotTimestamp_ = value; + } + } else { + snapshotTimestampBuilder_.mergeFrom(value); + } + if (snapshotTimestamp_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder clearSnapshotTimestamp() { + bitField0_ = (bitField0_ & ~0x00000008); + snapshotTimestamp_ = null; + if (snapshotTimestampBuilder_ != null) { + snapshotTimestampBuilder_.dispose(); + snapshotTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimestampBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetSnapshotTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder() { + if (snapshotTimestampBuilder_ != null) { + return snapshotTimestampBuilder_.getMessageOrBuilder(); + } else { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + } + + /** + * + * + *
    +     * If `TransactionOptions.isolation_level` is set to
    +     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +     * timestamp at which all reads in the transaction ran. This timestamp is
    +     * never returned.
    +     * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetSnapshotTimestampFieldBuilder() { + if (snapshotTimestampBuilder_ == null) { + snapshotTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTimestamp(), getParentForChildren(), isClean()); + snapshotTimestamp_ = null; + } + return snapshotTimestampBuilder_; + } + + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + cacheUpdateBuilder_; + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + if (cacheUpdateBuilder_ == null) { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } else { + return cacheUpdateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cacheUpdate_ = value; + } else { + cacheUpdateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate.Builder builderForValue) { + if (cacheUpdateBuilder_ == null) { + cacheUpdate_ = builderForValue.build(); + } else { + cacheUpdateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && cacheUpdate_ != null + && cacheUpdate_ != com.google.spanner.v1.CacheUpdate.getDefaultInstance()) { + getCacheUpdateBuilder().mergeFrom(value); + } else { + cacheUpdate_ = value; + } + } else { + cacheUpdateBuilder_.mergeFrom(value); + } + if (cacheUpdate_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCacheUpdate() { + bitField0_ = (bitField0_ & ~0x00000010); + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdate.Builder getCacheUpdateBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetCacheUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + if (cacheUpdateBuilder_ != null) { + return cacheUpdateBuilder_.getMessageOrBuilder(); + } else { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + internalGetCacheUpdateFieldBuilder() { + if (cacheUpdateBuilder_ == null) { + cacheUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder>( + getCacheUpdate(), getParentForChildren(), isClean()); + cacheUpdate_ = null; + } + return cacheUpdateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.CommitResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.CommitResponse) + private static final com.google.spanner.v1.CommitResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.CommitResponse(); + } + + public static com.google.spanner.v1.CommitResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommitResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.CommitResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java new file mode 100644 index 000000000000..bf00f8accf4a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java @@ -0,0 +1,254 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/commit_response.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface CommitResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.CommitResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return Whether the commitTimestamp field is set. + */ + boolean hasCommitTimestamp(); + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + * + * @return The commitTimestamp. + */ + com.google.protobuf.Timestamp getCommitTimestamp(); + + /** + * + * + *
    +   * The Cloud Spanner timestamp at which the transaction committed.
    +   * 
    + * + * .google.protobuf.Timestamp commit_timestamp = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder(); + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return Whether the commitStats field is set. + */ + boolean hasCommitStats(); + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + * + * @return The commitStats. + */ + com.google.spanner.v1.CommitResponse.CommitStats getCommitStats(); + + /** + * + * + *
    +   * The statistics about this `Commit`. Not returned by default.
    +   * For more information, see
    +   * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    +   * 
    + * + * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; + */ + com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsOrBuilder(); + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * You must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + boolean hasSnapshotTimestamp(); + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + com.google.protobuf.Timestamp getSnapshotTimestamp(); + + /** + * + * + *
    +   * If `TransactionOptions.isolation_level` is set to
    +   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
    +   * timestamp at which all reads in the transaction ran. This timestamp is
    +   * never returned.
    +   * 
    + * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + boolean hasCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + com.google.spanner.v1.CacheUpdate getCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder(); + + com.google.spanner.v1.CommitResponse.MultiplexedSessionRetryCase getMultiplexedSessionRetryCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java new file mode 100644 index 000000000000..31051dde3b65 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java @@ -0,0 +1,123 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/commit_response.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class CommitResponseProto extends com.google.protobuf.GeneratedFile { + private CommitResponseProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CommitResponseProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_CommitResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_CommitResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_CommitResponse_CommitStats_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n\'google/spanner/v1/commit_response.prot" + + "o\022\021google.spanner.v1\032\037google/api/field_b" + + "ehavior.proto\032\037google/protobuf/timestamp" + + ".proto\032 google/spanner/v1/location.proto" + + "\032#google/spanner/v1/transaction.proto\"\220\003" + + "\n\016CommitResponse\0224\n\020commit_timestamp\030\001 \001" + + "(\0132\032.google.protobuf.Timestamp\022C\n\014commit" + + "_stats\030\002 \001(\0132-.google.spanner.v1.CommitR" + + "esponse.CommitStats\022N\n\017precommit_token\030\004" + + " \001(\01323.google.spanner.v1.MultiplexedSess" + + "ionPrecommitTokenH\000\0226\n\022snapshot_timestam" + + "p\030\005 \001(\0132\032.google.protobuf.Timestamp\0229\n\014c" + + "ache_update\030\006 \001(\0132\036.google.spanner.v1.Ca" + + "cheUpdateB\003\340A\001\032%\n\013CommitStats\022\026\n\016mutatio" + + "n_count\030\001 \001(\003B\031\n\027MultiplexedSessionRetry" + + "B\266\001\n\025com.google.spanner.v1B\023CommitRespon" + + "seProtoP\001Z5cloud.google.com/go/spanner/a" + + "piv1/spannerpb;spannerpb\252\002\027Google.Cloud." + + "Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032G" + + "oogle::Cloud::Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.v1.LocationProto.getDescriptor(), + com.google.spanner.v1.TransactionProto.getDescriptor(), + }); + internal_static_google_spanner_v1_CommitResponse_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_CommitResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_CommitResponse_descriptor, + new java.lang.String[] { + "CommitTimestamp", + "CommitStats", + "PrecommitToken", + "SnapshotTimestamp", + "CacheUpdate", + "MultiplexedSessionRetry", + }); + internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor = + internal_static_google_spanner_v1_CommitResponse_descriptor.getNestedType(0); + internal_static_google_spanner_v1_CommitResponse_CommitStats_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor, + new java.lang.String[] { + "MutationCount", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.v1.LocationProto.getDescriptor(); + com.google.spanner.v1.TransactionProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java new file mode 100644 index 000000000000..f33336e704f7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java @@ -0,0 +1,901 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.CreateSessionRequest} + */ +@com.google.protobuf.Generated +public final class CreateSessionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.CreateSessionRequest) + CreateSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "CreateSessionRequest"); + } + + // Use CreateSessionRequest.newBuilder() to construct. + private CreateSessionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private CreateSessionRequest() { + database_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CreateSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CreateSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CreateSessionRequest.class, + com.google.spanner.v1.CreateSessionRequest.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database in which the new session is created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database in which the new session is created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SESSION_FIELD_NUMBER = 2; + private com.google.spanner.v1.Session session_; + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the session field is set. + */ + @java.lang.Override + public boolean hasSession() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The session. + */ + @java.lang.Override + public com.google.spanner.v1.Session getSession() { + return session_ == null ? com.google.spanner.v1.Session.getDefaultInstance() : session_; + } + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder() { + return session_ == null ? com.google.spanner.v1.Session.getDefaultInstance() : session_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getSession()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getSession()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.CreateSessionRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.CreateSessionRequest other = + (com.google.spanner.v1.CreateSessionRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (hasSession() != other.hasSession()) return false; + if (hasSession()) { + if (!getSession().equals(other.getSession())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + if (hasSession()) { + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CreateSessionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CreateSessionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.CreateSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.CreateSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.CreateSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.CreateSessionRequest) + com.google.spanner.v1.CreateSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CreateSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CreateSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.CreateSessionRequest.class, + com.google.spanner.v1.CreateSessionRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.CreateSessionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetSessionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + session_ = null; + if (sessionBuilder_ != null) { + sessionBuilder_.dispose(); + sessionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_CreateSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.CreateSessionRequest getDefaultInstanceForType() { + return com.google.spanner.v1.CreateSessionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.CreateSessionRequest build() { + com.google.spanner.v1.CreateSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.CreateSessionRequest buildPartial() { + com.google.spanner.v1.CreateSessionRequest result = + new com.google.spanner.v1.CreateSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.CreateSessionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.session_ = sessionBuilder_ == null ? session_ : sessionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.CreateSessionRequest) { + return mergeFrom((com.google.spanner.v1.CreateSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.CreateSessionRequest other) { + if (other == com.google.spanner.v1.CreateSessionRequest.getDefaultInstance()) return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasSession()) { + mergeSession(other.getSession()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetSessionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database in which the new session is created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which the new session is created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which the new session is created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which the new session is created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which the new session is created.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Session session_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + sessionBuilder_; + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the session field is set. + */ + public boolean hasSession() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The session. + */ + public com.google.spanner.v1.Session getSession() { + if (sessionBuilder_ == null) { + return session_ == null ? com.google.spanner.v1.Session.getDefaultInstance() : session_; + } else { + return sessionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSession(com.google.spanner.v1.Session value) { + if (sessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + } else { + sessionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setSession(com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionBuilder_ == null) { + session_ = builderForValue.build(); + } else { + sessionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeSession(com.google.spanner.v1.Session value) { + if (sessionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && session_ != null + && session_ != com.google.spanner.v1.Session.getDefaultInstance()) { + getSessionBuilder().mergeFrom(value); + } else { + session_ = value; + } + } else { + sessionBuilder_.mergeFrom(value); + } + if (session_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearSession() { + bitField0_ = (bitField0_ & ~0x00000002); + session_ = null; + if (sessionBuilder_ != null) { + sessionBuilder_.dispose(); + sessionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.Session.Builder getSessionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetSessionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder() { + if (sessionBuilder_ != null) { + return sessionBuilder_.getMessageOrBuilder(); + } else { + return session_ == null ? com.google.spanner.v1.Session.getDefaultInstance() : session_; + } + } + + /** + * + * + *
    +     * Required. The session to create.
    +     * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + internalGetSessionFieldBuilder() { + if (sessionBuilder_ == null) { + sessionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder>( + getSession(), getParentForChildren(), isClean()); + session_ = null; + } + return sessionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.CreateSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.CreateSessionRequest) + private static final com.google.spanner.v1.CreateSessionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.CreateSessionRequest(); + } + + public static com.google.spanner.v1.CreateSessionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.CreateSessionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java new file mode 100644 index 000000000000..74c2c254d17c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java @@ -0,0 +1,95 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface CreateSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.CreateSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database in which the new session is created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database in which the new session is created.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the session field is set. + */ + boolean hasSession(); + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The session. + */ + com.google.spanner.v1.Session getSession(); + + /** + * + * + *
    +   * Required. The session to create.
    +   * 
    + * + * .google.spanner.v1.Session session = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.spanner.v1.SessionOrBuilder getSessionOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DatabaseName.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DatabaseName.java new file mode 100644 index 000000000000..eee6b1de1585 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DatabaseName.java @@ -0,0 +1,223 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class DatabaseName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_DATABASE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/databases/{database}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String database; + + @Deprecated + protected DatabaseName() { + project = null; + instance = null; + database = null; + } + + private DatabaseName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + database = Preconditions.checkNotNull(builder.getDatabase()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static DatabaseName of(String project, String instance, String database) { + return newBuilder().setProject(project).setInstance(instance).setDatabase(database).build(); + } + + public static String format(String project, String instance, String database) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .build() + .toString(); + } + + public static DatabaseName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_DATABASE.validatedMatch( + formattedString, "DatabaseName.parse: formattedString not in valid format"); + return of(matchMap.get("project"), matchMap.get("instance"), matchMap.get("database")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (DatabaseName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_DATABASE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_DATABASE.instantiate( + "project", project, "instance", instance, "database", database); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + DatabaseName that = ((DatabaseName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.database, that.database); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(database); + return h; + } + + /** Builder for projects/{project}/instances/{instance}/databases/{database}. */ + public static class Builder { + private String project; + private String instance; + private String database; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + private Builder(DatabaseName databaseName) { + this.project = databaseName.project; + this.instance = databaseName.instance; + this.database = databaseName.database; + } + + public DatabaseName build() { + return new DatabaseName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java new file mode 100644 index 000000000000..0ccb2e08873c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java @@ -0,0 +1,609 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.DeleteSessionRequest} + */ +@com.google.protobuf.Generated +public final class DeleteSessionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.DeleteSessionRequest) + DeleteSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DeleteSessionRequest"); + } + + // Use DeleteSessionRequest.newBuilder() to construct. + private DeleteSessionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DeleteSessionRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DeleteSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DeleteSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DeleteSessionRequest.class, + com.google.spanner.v1.DeleteSessionRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the session to delete.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the session to delete.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.DeleteSessionRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.DeleteSessionRequest other = + (com.google.spanner.v1.DeleteSessionRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DeleteSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.DeleteSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.DeleteSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.DeleteSessionRequest) + com.google.spanner.v1.DeleteSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DeleteSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DeleteSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DeleteSessionRequest.class, + com.google.spanner.v1.DeleteSessionRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.DeleteSessionRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DeleteSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.DeleteSessionRequest getDefaultInstanceForType() { + return com.google.spanner.v1.DeleteSessionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.DeleteSessionRequest build() { + com.google.spanner.v1.DeleteSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.DeleteSessionRequest buildPartial() { + com.google.spanner.v1.DeleteSessionRequest result = + new com.google.spanner.v1.DeleteSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.DeleteSessionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.DeleteSessionRequest) { + return mergeFrom((com.google.spanner.v1.DeleteSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.DeleteSessionRequest other) { + if (other == com.google.spanner.v1.DeleteSessionRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the session to delete.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the session to delete.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the session to delete.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the session to delete.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the session to delete.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.DeleteSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.DeleteSessionRequest) + private static final com.google.spanner.v1.DeleteSessionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.DeleteSessionRequest(); + } + + public static com.google.spanner.v1.DeleteSessionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.DeleteSessionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java new file mode 100644 index 000000000000..d8187971b1e3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface DeleteSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.DeleteSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the session to delete.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the session to delete.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java new file mode 100644 index 000000000000..9bdd468b0139 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java @@ -0,0 +1,4429 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The `DirectedReadOptions` can be used to indicate which replicas or regions
    + * should be used for non-transactional reads or queries.
    + *
    + * `DirectedReadOptions` can only be specified for a read-only transaction,
    + * otherwise the API returns an `INVALID_ARGUMENT` error.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions} + */ +@com.google.protobuf.Generated +public final class DirectedReadOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.DirectedReadOptions) + DirectedReadOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "DirectedReadOptions"); + } + + // Use DirectedReadOptions.newBuilder() to construct. + private DirectedReadOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private DirectedReadOptions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.class, + com.google.spanner.v1.DirectedReadOptions.Builder.class); + } + + public interface ReplicaSelectionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.DirectedReadOptions.ReplicaSelection) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The location or region of the serving requests, for example, "us-east1".
    +     * 
    + * + * string location = 1; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
    +     * The location or region of the serving requests, for example, "us-east1".
    +     * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The type. + */ + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type getType(); + } + + /** + * + * + *
    +   * The directed read replica selector.
    +   * Callers must provide one or more of the following fields for replica
    +   * selection:
    +   *
    +   * * `location` - The location must be one of the regions within the
    +   * multi-region configuration of your database.
    +   * * `type` - The type of the replica.
    +   *
    +   * Some examples of using replica_selectors are:
    +   *
    +   * * `location:us-east1` --> The "us-east1" replica(s) of any available type
    +   * is used to process the request.
    +   * * `type:READ_ONLY`    --> The "READ_ONLY" type replica(s) in the nearest
    +   * available location are used to process the
    +   * request.
    +   * * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s)
    +   * in location "us-east1" is used to process
    +   * the request.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.ReplicaSelection} + */ + public static final class ReplicaSelection extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.DirectedReadOptions.ReplicaSelection) + ReplicaSelectionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReplicaSelection"); + } + + // Use ReplicaSelection.newBuilder() to construct. + private ReplicaSelection(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReplicaSelection() { + location_ = ""; + type_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.class, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder.class); + } + + /** + * + * + *
    +     * Indicates the type of replica.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
    +       * Read-write replicas support both reads and writes.
    +       * 
    + * + * READ_WRITE = 1; + */ + READ_WRITE(1), + /** + * + * + *
    +       * Read-only replicas only support reads (not writes).
    +       * 
    + * + * READ_ONLY = 2; + */ + READ_ONLY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Type"); + } + + /** + * + * + *
    +       * Not specified.
    +       * 
    + * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * Read-write replicas support both reads and writes.
    +       * 
    + * + * READ_WRITE = 1; + */ + public static final int READ_WRITE_VALUE = 1; + + /** + * + * + *
    +       * Read-only replicas only support reads (not writes).
    +       * 
    + * + * READ_ONLY = 2; + */ + public static final int READ_ONLY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return READ_WRITE; + case 2: + return READ_ONLY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type) + } + + public static final int LOCATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
    +     * The location or region of the serving requests, for example, "us-east1".
    +     * 
    + * + * string location = 1; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
    +     * The location or region of the serving requests, for example, "us-east1".
    +     * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_ = 0; + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +     * The type of replica.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type getType() { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type result = + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.forNumber(type_); + return result == null + ? com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, location_); + } + if (type_ + != com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, location_); + } + if (type_ + != com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.DirectedReadOptions.ReplicaSelection)) { + return super.equals(obj); + } + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection other = + (com.google.spanner.v1.DirectedReadOptions.ReplicaSelection) obj; + + if (!getLocation().equals(other.getLocation())) return false; + if (type_ != other.type_) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * Callers must provide one or more of the following fields for replica
    +     * selection:
    +     *
    +     * * `location` - The location must be one of the regions within the
    +     * multi-region configuration of your database.
    +     * * `type` - The type of the replica.
    +     *
    +     * Some examples of using replica_selectors are:
    +     *
    +     * * `location:us-east1` --> The "us-east1" replica(s) of any available type
    +     * is used to process the request.
    +     * * `type:READ_ONLY`    --> The "READ_ONLY" type replica(s) in the nearest
    +     * available location are used to process the
    +     * request.
    +     * * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s)
    +     * in location "us-east1" is used to process
    +     * the request.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.ReplicaSelection} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.DirectedReadOptions.ReplicaSelection) + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.class, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder.class); + } + + // Construct using com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + location_ = ""; + type_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection + getDefaultInstanceForType() { + return com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection build() { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection buildPartial() { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection result = + new com.google.spanner.v1.DirectedReadOptions.ReplicaSelection(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.location_ = location_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.DirectedReadOptions.ReplicaSelection) { + return mergeFrom((com.google.spanner.v1.DirectedReadOptions.ReplicaSelection) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.DirectedReadOptions.ReplicaSelection other) { + if (other + == com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance()) + return this; + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + type_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object location_ = ""; + + /** + * + * + *
    +       * The location or region of the serving requests, for example, "us-east1".
    +       * 
    + * + * string location = 1; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The location or region of the serving requests, for example, "us-east1".
    +       * 
    + * + * string location = 1; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The location or region of the serving requests, for example, "us-east1".
    +       * 
    + * + * string location = 1; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The location or region of the serving requests, for example, "us-east1".
    +       * 
    + * + * string location = 1; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The location or region of the serving requests, for example, "us-east1".
    +       * 
    + * + * string location = 1; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int type_ = 0; + + /** + * + * + *
    +       * The type of replica.
    +       * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * + * + *
    +       * The type of replica.
    +       * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of replica.
    +       * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type getType() { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type result = + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.forNumber(type_); + return result == null + ? com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +       * The type of replica.
    +       * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of replica.
    +       * 
    + * + * .google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type type = 2; + * + * @return This builder for chaining. + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.DirectedReadOptions.ReplicaSelection) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.DirectedReadOptions.ReplicaSelection) + private static final com.google.spanner.v1.DirectedReadOptions.ReplicaSelection + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.DirectedReadOptions.ReplicaSelection(); + } + + public static com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaSelection parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface IncludeReplicasOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.DirectedReadOptions.IncludeReplicas) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + java.util.List + getReplicaSelectionsList(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections(int index); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + int getReplicaSelectionsCount(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + java.util.List + getReplicaSelectionsOrBuilderList(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index); + + /** + * + * + *
    +     * If `true`, Spanner doesn't route requests to a replica outside the
    +     * <`include_replicas` list when all of the specified replicas are
    +     * unavailable or unhealthy. Default value is `false`.
    +     * 
    + * + * bool auto_failover_disabled = 2; + * + * @return The autoFailoverDisabled. + */ + boolean getAutoFailoverDisabled(); + } + + /** + * + * + *
    +   * An `IncludeReplicas` contains a repeated set of `ReplicaSelection` which
    +   * indicates the order in which replicas should be considered.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.IncludeReplicas} + */ + public static final class IncludeReplicas extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.DirectedReadOptions.IncludeReplicas) + IncludeReplicasOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IncludeReplicas"); + } + + // Use IncludeReplicas.newBuilder() to construct. + private IncludeReplicas(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IncludeReplicas() { + replicaSelections_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.class, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder.class); + } + + public static final int REPLICA_SELECTIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List + replicaSelections_; + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public java.util.List + getReplicaSelectionsList() { + return replicaSelections_; + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + getReplicaSelectionsOrBuilderList() { + return replicaSelections_; + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public int getReplicaSelectionsCount() { + return replicaSelections_.size(); + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections( + int index) { + return replicaSelections_.get(index); + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index) { + return replicaSelections_.get(index); + } + + public static final int AUTO_FAILOVER_DISABLED_FIELD_NUMBER = 2; + private boolean autoFailoverDisabled_ = false; + + /** + * + * + *
    +     * If `true`, Spanner doesn't route requests to a replica outside the
    +     * <`include_replicas` list when all of the specified replicas are
    +     * unavailable or unhealthy. Default value is `false`.
    +     * 
    + * + * bool auto_failover_disabled = 2; + * + * @return The autoFailoverDisabled. + */ + @java.lang.Override + public boolean getAutoFailoverDisabled() { + return autoFailoverDisabled_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < replicaSelections_.size(); i++) { + output.writeMessage(1, replicaSelections_.get(i)); + } + if (autoFailoverDisabled_ != false) { + output.writeBool(2, autoFailoverDisabled_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < replicaSelections_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, replicaSelections_.get(i)); + } + if (autoFailoverDisabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, autoFailoverDisabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.DirectedReadOptions.IncludeReplicas)) { + return super.equals(obj); + } + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas other = + (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) obj; + + if (!getReplicaSelectionsList().equals(other.getReplicaSelectionsList())) return false; + if (getAutoFailoverDisabled() != other.getAutoFailoverDisabled()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getReplicaSelectionsCount() > 0) { + hash = (37 * hash) + REPLICA_SELECTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelectionsList().hashCode(); + } + hash = (37 * hash) + AUTO_FAILOVER_DISABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAutoFailoverDisabled()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * An `IncludeReplicas` contains a repeated set of `ReplicaSelection` which
    +     * indicates the order in which replicas should be considered.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.IncludeReplicas} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.DirectedReadOptions.IncludeReplicas) + com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.class, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder.class); + } + + // Construct using com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (replicaSelectionsBuilder_ == null) { + replicaSelections_ = java.util.Collections.emptyList(); + } else { + replicaSelections_ = null; + replicaSelectionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + autoFailoverDisabled_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getDefaultInstanceForType() { + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas build() { + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas buildPartial() { + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas result = + new com.google.spanner.v1.DirectedReadOptions.IncludeReplicas(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas result) { + if (replicaSelectionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + replicaSelections_ = java.util.Collections.unmodifiableList(replicaSelections_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.replicaSelections_ = replicaSelections_; + } else { + result.replicaSelections_ = replicaSelectionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.DirectedReadOptions.IncludeReplicas result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.autoFailoverDisabled_ = autoFailoverDisabled_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) { + return mergeFrom((com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.DirectedReadOptions.IncludeReplicas other) { + if (other == com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance()) + return this; + if (replicaSelectionsBuilder_ == null) { + if (!other.replicaSelections_.isEmpty()) { + if (replicaSelections_.isEmpty()) { + replicaSelections_ = other.replicaSelections_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.addAll(other.replicaSelections_); + } + onChanged(); + } + } else { + if (!other.replicaSelections_.isEmpty()) { + if (replicaSelectionsBuilder_.isEmpty()) { + replicaSelectionsBuilder_.dispose(); + replicaSelectionsBuilder_ = null; + replicaSelections_ = other.replicaSelections_; + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelectionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicaSelectionsFieldBuilder() + : null; + } else { + replicaSelectionsBuilder_.addAllMessages(other.replicaSelections_); + } + } + } + if (other.getAutoFailoverDisabled() != false) { + setAutoFailoverDisabled(other.getAutoFailoverDisabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection m = + input.readMessage( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.parser(), + extensionRegistry); + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(m); + } else { + replicaSelectionsBuilder_.addMessage(m); + } + break; + } // case 10 + case 16: + { + autoFailoverDisabled_ = input.readBool(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + replicaSelections_ = java.util.Collections.emptyList(); + + private void ensureReplicaSelectionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + replicaSelections_ = + new java.util.ArrayList( + replicaSelections_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + replicaSelectionsBuilder_; + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List + getReplicaSelectionsList() { + if (replicaSelectionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicaSelections_); + } else { + return replicaSelectionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public int getReplicaSelectionsCount() { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.size(); + } else { + return replicaSelectionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections( + int index) { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.get(index); + } else { + return replicaSelectionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder setReplicaSelections( + int index, com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.set(index, value); + onChanged(); + } else { + replicaSelectionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder setReplicaSelections( + int index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.set(index, builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(value); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + int index, com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(index, value); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + int index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(index, builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addAllReplicaSelections( + java.lang.Iterable + values) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicaSelections_); + onChanged(); + } else { + replicaSelectionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder clearReplicaSelections() { + if (replicaSelectionsBuilder_ == null) { + replicaSelections_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + replicaSelectionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder removeReplicaSelections(int index) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.remove(index); + onChanged(); + } else { + replicaSelectionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + getReplicaSelectionsBuilder(int index) { + return internalGetReplicaSelectionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index) { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.get(index); + } else { + return replicaSelectionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List< + ? extends com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + getReplicaSelectionsOrBuilderList() { + if (replicaSelectionsBuilder_ != null) { + return replicaSelectionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicaSelections_); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + addReplicaSelectionsBuilder() { + return internalGetReplicaSelectionsFieldBuilder() + .addBuilder( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance()); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + addReplicaSelectionsBuilder(int index) { + return internalGetReplicaSelectionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance()); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List + getReplicaSelectionsBuilderList() { + return internalGetReplicaSelectionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + internalGetReplicaSelectionsFieldBuilder() { + if (replicaSelectionsBuilder_ == null) { + replicaSelectionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder>( + replicaSelections_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + replicaSelections_ = null; + } + return replicaSelectionsBuilder_; + } + + private boolean autoFailoverDisabled_; + + /** + * + * + *
    +       * If `true`, Spanner doesn't route requests to a replica outside the
    +       * <`include_replicas` list when all of the specified replicas are
    +       * unavailable or unhealthy. Default value is `false`.
    +       * 
    + * + * bool auto_failover_disabled = 2; + * + * @return The autoFailoverDisabled. + */ + @java.lang.Override + public boolean getAutoFailoverDisabled() { + return autoFailoverDisabled_; + } + + /** + * + * + *
    +       * If `true`, Spanner doesn't route requests to a replica outside the
    +       * <`include_replicas` list when all of the specified replicas are
    +       * unavailable or unhealthy. Default value is `false`.
    +       * 
    + * + * bool auto_failover_disabled = 2; + * + * @param value The autoFailoverDisabled to set. + * @return This builder for chaining. + */ + public Builder setAutoFailoverDisabled(boolean value) { + + autoFailoverDisabled_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * If `true`, Spanner doesn't route requests to a replica outside the
    +       * <`include_replicas` list when all of the specified replicas are
    +       * unavailable or unhealthy. Default value is `false`.
    +       * 
    + * + * bool auto_failover_disabled = 2; + * + * @return This builder for chaining. + */ + public Builder clearAutoFailoverDisabled() { + bitField0_ = (bitField0_ & ~0x00000002); + autoFailoverDisabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.DirectedReadOptions.IncludeReplicas) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.DirectedReadOptions.IncludeReplicas) + private static final com.google.spanner.v1.DirectedReadOptions.IncludeReplicas DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.DirectedReadOptions.IncludeReplicas(); + } + + public static com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IncludeReplicas parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ExcludeReplicasOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.DirectedReadOptions.ExcludeReplicas) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + java.util.List + getReplicaSelectionsList(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections(int index); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + int getReplicaSelectionsCount(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + java.util.List + getReplicaSelectionsOrBuilderList(); + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index); + } + + /** + * + * + *
    +   * An ExcludeReplicas contains a repeated set of ReplicaSelection that should
    +   * be excluded from serving requests.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.ExcludeReplicas} + */ + public static final class ExcludeReplicas extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.DirectedReadOptions.ExcludeReplicas) + ExcludeReplicasOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExcludeReplicas"); + } + + // Use ExcludeReplicas.newBuilder() to construct. + private ExcludeReplicas(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExcludeReplicas() { + replicaSelections_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.class, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder.class); + } + + public static final int REPLICA_SELECTIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List + replicaSelections_; + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public java.util.List + getReplicaSelectionsList() { + return replicaSelections_; + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + getReplicaSelectionsOrBuilderList() { + return replicaSelections_; + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public int getReplicaSelectionsCount() { + return replicaSelections_.size(); + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections( + int index) { + return replicaSelections_.get(index); + } + + /** + * + * + *
    +     * The directed read replica selector.
    +     * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index) { + return replicaSelections_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < replicaSelections_.size(); i++) { + output.writeMessage(1, replicaSelections_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < replicaSelections_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, replicaSelections_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas)) { + return super.equals(obj); + } + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas other = + (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) obj; + + if (!getReplicaSelectionsList().equals(other.getReplicaSelectionsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getReplicaSelectionsCount() > 0) { + hash = (37 * hash) + REPLICA_SELECTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelectionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * An ExcludeReplicas contains a repeated set of ReplicaSelection that should
    +     * be excluded from serving requests.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions.ExcludeReplicas} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.DirectedReadOptions.ExcludeReplicas) + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.class, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder.class); + } + + // Construct using com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (replicaSelectionsBuilder_ == null) { + replicaSelections_ = java.util.Collections.emptyList(); + } else { + replicaSelections_ = null; + replicaSelectionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getDefaultInstanceForType() { + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas build() { + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas buildPartial() { + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas result = + new com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas result) { + if (replicaSelectionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + replicaSelections_ = java.util.Collections.unmodifiableList(replicaSelections_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.replicaSelections_ = replicaSelections_; + } else { + result.replicaSelections_ = replicaSelectionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) { + return mergeFrom((com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas other) { + if (other == com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance()) + return this; + if (replicaSelectionsBuilder_ == null) { + if (!other.replicaSelections_.isEmpty()) { + if (replicaSelections_.isEmpty()) { + replicaSelections_ = other.replicaSelections_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.addAll(other.replicaSelections_); + } + onChanged(); + } + } else { + if (!other.replicaSelections_.isEmpty()) { + if (replicaSelectionsBuilder_.isEmpty()) { + replicaSelectionsBuilder_.dispose(); + replicaSelectionsBuilder_ = null; + replicaSelections_ = other.replicaSelections_; + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelectionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetReplicaSelectionsFieldBuilder() + : null; + } else { + replicaSelectionsBuilder_.addAllMessages(other.replicaSelections_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection m = + input.readMessage( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.parser(), + extensionRegistry); + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(m); + } else { + replicaSelectionsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List + replicaSelections_ = java.util.Collections.emptyList(); + + private void ensureReplicaSelectionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + replicaSelections_ = + new java.util.ArrayList( + replicaSelections_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + replicaSelectionsBuilder_; + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List + getReplicaSelectionsList() { + if (replicaSelectionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicaSelections_); + } else { + return replicaSelectionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public int getReplicaSelectionsCount() { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.size(); + } else { + return replicaSelectionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection getReplicaSelections( + int index) { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.get(index); + } else { + return replicaSelectionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder setReplicaSelections( + int index, com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.set(index, value); + onChanged(); + } else { + replicaSelectionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder setReplicaSelections( + int index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.set(index, builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(value); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + int index, com.google.spanner.v1.DirectedReadOptions.ReplicaSelection value) { + if (replicaSelectionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(index, value); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addReplicaSelections( + int index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.add(index, builderForValue.build()); + onChanged(); + } else { + replicaSelectionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder addAllReplicaSelections( + java.lang.Iterable + values) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicaSelections_); + onChanged(); + } else { + replicaSelectionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder clearReplicaSelections() { + if (replicaSelectionsBuilder_ == null) { + replicaSelections_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + replicaSelectionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public Builder removeReplicaSelections(int index) { + if (replicaSelectionsBuilder_ == null) { + ensureReplicaSelectionsIsMutable(); + replicaSelections_.remove(index); + onChanged(); + } else { + replicaSelectionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + getReplicaSelectionsBuilder(int index) { + return internalGetReplicaSelectionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder + getReplicaSelectionsOrBuilder(int index) { + if (replicaSelectionsBuilder_ == null) { + return replicaSelections_.get(index); + } else { + return replicaSelectionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List< + ? extends com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + getReplicaSelectionsOrBuilderList() { + if (replicaSelectionsBuilder_ != null) { + return replicaSelectionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicaSelections_); + } + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + addReplicaSelectionsBuilder() { + return internalGetReplicaSelectionsFieldBuilder() + .addBuilder( + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance()); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder + addReplicaSelectionsBuilder(int index) { + return internalGetReplicaSelectionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.getDefaultInstance()); + } + + /** + * + * + *
    +       * The directed read replica selector.
    +       * 
    + * + * + * repeated .google.spanner.v1.DirectedReadOptions.ReplicaSelection replica_selections = 1; + * + */ + public java.util.List + getReplicaSelectionsBuilderList() { + return internalGetReplicaSelectionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder> + internalGetReplicaSelectionsFieldBuilder() { + if (replicaSelectionsBuilder_ == null) { + replicaSelectionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelection.Builder, + com.google.spanner.v1.DirectedReadOptions.ReplicaSelectionOrBuilder>( + replicaSelections_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + replicaSelections_ = null; + } + return replicaSelectionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.DirectedReadOptions.ExcludeReplicas) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.DirectedReadOptions.ExcludeReplicas) + private static final com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas(); + } + + public static com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExcludeReplicas parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int replicasCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object replicas_; + + public enum ReplicasCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INCLUDE_REPLICAS(1), + EXCLUDE_REPLICAS(2), + REPLICAS_NOT_SET(0); + private final int value; + + private ReplicasCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ReplicasCase valueOf(int value) { + return forNumber(value); + } + + public static ReplicasCase forNumber(int value) { + switch (value) { + case 1: + return INCLUDE_REPLICAS; + case 2: + return EXCLUDE_REPLICAS; + case 0: + return REPLICAS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ReplicasCase getReplicasCase() { + return ReplicasCase.forNumber(replicasCase_); + } + + public static final int INCLUDE_REPLICAS_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return Whether the includeReplicas field is set. + */ + @java.lang.Override + public boolean hasIncludeReplicas() { + return replicasCase_ == 1; + } + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return The includeReplicas. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getIncludeReplicas() { + if (replicasCase_ == 1) { + return (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder + getIncludeReplicasOrBuilder() { + if (replicasCase_ == 1) { + return (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + + public static final int EXCLUDE_REPLICAS_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return Whether the excludeReplicas field is set. + */ + @java.lang.Override + public boolean hasExcludeReplicas() { + return replicasCase_ == 2; + } + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return The excludeReplicas. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getExcludeReplicas() { + if (replicasCase_ == 2) { + return (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder + getExcludeReplicasOrBuilder() { + if (replicasCase_ == 2) { + return (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (replicasCase_ == 1) { + output.writeMessage(1, (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_); + } + if (replicasCase_ == 2) { + output.writeMessage(2, (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (replicasCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_); + } + if (replicasCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.DirectedReadOptions)) { + return super.equals(obj); + } + com.google.spanner.v1.DirectedReadOptions other = + (com.google.spanner.v1.DirectedReadOptions) obj; + + if (!getReplicasCase().equals(other.getReplicasCase())) return false; + switch (replicasCase_) { + case 1: + if (!getIncludeReplicas().equals(other.getIncludeReplicas())) return false; + break; + case 2: + if (!getExcludeReplicas().equals(other.getExcludeReplicas())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (replicasCase_) { + case 1: + hash = (37 * hash) + INCLUDE_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getIncludeReplicas().hashCode(); + break; + case 2: + hash = (37 * hash) + EXCLUDE_REPLICAS_FIELD_NUMBER; + hash = (53 * hash) + getExcludeReplicas().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.DirectedReadOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.DirectedReadOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The `DirectedReadOptions` can be used to indicate which replicas or regions
    +   * should be used for non-transactional reads or queries.
    +   *
    +   * `DirectedReadOptions` can only be specified for a read-only transaction,
    +   * otherwise the API returns an `INVALID_ARGUMENT` error.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.DirectedReadOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.DirectedReadOptions) + com.google.spanner.v1.DirectedReadOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.DirectedReadOptions.class, + com.google.spanner.v1.DirectedReadOptions.Builder.class); + } + + // Construct using com.google.spanner.v1.DirectedReadOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (includeReplicasBuilder_ != null) { + includeReplicasBuilder_.clear(); + } + if (excludeReplicasBuilder_ != null) { + excludeReplicasBuilder_.clear(); + } + replicasCase_ = 0; + replicas_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_DirectedReadOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions getDefaultInstanceForType() { + return com.google.spanner.v1.DirectedReadOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions build() { + com.google.spanner.v1.DirectedReadOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions buildPartial() { + com.google.spanner.v1.DirectedReadOptions result = + new com.google.spanner.v1.DirectedReadOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.DirectedReadOptions result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.DirectedReadOptions result) { + result.replicasCase_ = replicasCase_; + result.replicas_ = this.replicas_; + if (replicasCase_ == 1 && includeReplicasBuilder_ != null) { + result.replicas_ = includeReplicasBuilder_.build(); + } + if (replicasCase_ == 2 && excludeReplicasBuilder_ != null) { + result.replicas_ = excludeReplicasBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.DirectedReadOptions) { + return mergeFrom((com.google.spanner.v1.DirectedReadOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.DirectedReadOptions other) { + if (other == com.google.spanner.v1.DirectedReadOptions.getDefaultInstance()) return this; + switch (other.getReplicasCase()) { + case INCLUDE_REPLICAS: + { + mergeIncludeReplicas(other.getIncludeReplicas()); + break; + } + case EXCLUDE_REPLICAS: + { + mergeExcludeReplicas(other.getExcludeReplicas()); + break; + } + case REPLICAS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIncludeReplicasFieldBuilder().getBuilder(), extensionRegistry); + replicasCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetExcludeReplicasFieldBuilder().getBuilder(), extensionRegistry); + replicasCase_ = 2; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int replicasCase_ = 0; + private java.lang.Object replicas_; + + public ReplicasCase getReplicasCase() { + return ReplicasCase.forNumber(replicasCase_); + } + + public Builder clearReplicas() { + replicasCase_ = 0; + replicas_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder> + includeReplicasBuilder_; + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return Whether the includeReplicas field is set. + */ + @java.lang.Override + public boolean hasIncludeReplicas() { + return replicasCase_ == 1; + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return The includeReplicas. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getIncludeReplicas() { + if (includeReplicasBuilder_ == null) { + if (replicasCase_ == 1) { + return (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } else { + if (replicasCase_ == 1) { + return includeReplicasBuilder_.getMessage(); + } + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + public Builder setIncludeReplicas( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas value) { + if (includeReplicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicas_ = value; + onChanged(); + } else { + includeReplicasBuilder_.setMessage(value); + } + replicasCase_ = 1; + return this; + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + public Builder setIncludeReplicas( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder builderForValue) { + if (includeReplicasBuilder_ == null) { + replicas_ = builderForValue.build(); + onChanged(); + } else { + includeReplicasBuilder_.setMessage(builderForValue.build()); + } + replicasCase_ = 1; + return this; + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + public Builder mergeIncludeReplicas( + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas value) { + if (includeReplicasBuilder_ == null) { + if (replicasCase_ == 1 + && replicas_ + != com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance()) { + replicas_ = + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.newBuilder( + (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_) + .mergeFrom(value) + .buildPartial(); + } else { + replicas_ = value; + } + onChanged(); + } else { + if (replicasCase_ == 1) { + includeReplicasBuilder_.mergeFrom(value); + } else { + includeReplicasBuilder_.setMessage(value); + } + } + replicasCase_ = 1; + return this; + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + public Builder clearIncludeReplicas() { + if (includeReplicasBuilder_ == null) { + if (replicasCase_ == 1) { + replicasCase_ = 0; + replicas_ = null; + onChanged(); + } + } else { + if (replicasCase_ == 1) { + replicasCase_ = 0; + replicas_ = null; + } + includeReplicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder + getIncludeReplicasBuilder() { + return internalGetIncludeReplicasFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder + getIncludeReplicasOrBuilder() { + if ((replicasCase_ == 1) && (includeReplicasBuilder_ != null)) { + return includeReplicasBuilder_.getMessageOrBuilder(); + } else { + if (replicasCase_ == 1) { + return (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * `Include_replicas` indicates the order of replicas (as they appear in
    +     * this list) to process the request. If `auto_failover_disabled` is set to
    +     * `true` and all replicas are exhausted without finding a healthy replica,
    +     * Spanner waits for a replica in the list to become available, requests
    +     * might fail due to `DEADLINE_EXCEEDED` errors.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder> + internalGetIncludeReplicasFieldBuilder() { + if (includeReplicasBuilder_ == null) { + if (!(replicasCase_ == 1)) { + replicas_ = + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.getDefaultInstance(); + } + includeReplicasBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder>( + (com.google.spanner.v1.DirectedReadOptions.IncludeReplicas) replicas_, + getParentForChildren(), + isClean()); + replicas_ = null; + } + replicasCase_ = 1; + onChanged(); + return includeReplicasBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder> + excludeReplicasBuilder_; + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return Whether the excludeReplicas field is set. + */ + @java.lang.Override + public boolean hasExcludeReplicas() { + return replicasCase_ == 2; + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return The excludeReplicas. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getExcludeReplicas() { + if (excludeReplicasBuilder_ == null) { + if (replicasCase_ == 2) { + return (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } else { + if (replicasCase_ == 2) { + return excludeReplicasBuilder_.getMessage(); + } + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + public Builder setExcludeReplicas( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas value) { + if (excludeReplicasBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicas_ = value; + onChanged(); + } else { + excludeReplicasBuilder_.setMessage(value); + } + replicasCase_ = 2; + return this; + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + public Builder setExcludeReplicas( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder builderForValue) { + if (excludeReplicasBuilder_ == null) { + replicas_ = builderForValue.build(); + onChanged(); + } else { + excludeReplicasBuilder_.setMessage(builderForValue.build()); + } + replicasCase_ = 2; + return this; + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + public Builder mergeExcludeReplicas( + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas value) { + if (excludeReplicasBuilder_ == null) { + if (replicasCase_ == 2 + && replicas_ + != com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance()) { + replicas_ = + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.newBuilder( + (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_) + .mergeFrom(value) + .buildPartial(); + } else { + replicas_ = value; + } + onChanged(); + } else { + if (replicasCase_ == 2) { + excludeReplicasBuilder_.mergeFrom(value); + } else { + excludeReplicasBuilder_.setMessage(value); + } + } + replicasCase_ = 2; + return this; + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + public Builder clearExcludeReplicas() { + if (excludeReplicasBuilder_ == null) { + if (replicasCase_ == 2) { + replicasCase_ = 0; + replicas_ = null; + onChanged(); + } + } else { + if (replicasCase_ == 2) { + replicasCase_ = 0; + replicas_ = null; + } + excludeReplicasBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder + getExcludeReplicasBuilder() { + return internalGetExcludeReplicasFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder + getExcludeReplicasOrBuilder() { + if ((replicasCase_ == 2) && (excludeReplicasBuilder_ != null)) { + return excludeReplicasBuilder_.getMessageOrBuilder(); + } else { + if (replicasCase_ == 2) { + return (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_; + } + return com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * `Exclude_replicas` indicates that specified replicas should be excluded
    +     * from serving requests. Spanner doesn't route requests to the replicas
    +     * in this list.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder> + internalGetExcludeReplicasFieldBuilder() { + if (excludeReplicasBuilder_ == null) { + if (!(replicasCase_ == 2)) { + replicas_ = + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.getDefaultInstance(); + } + excludeReplicasBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas.Builder, + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder>( + (com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas) replicas_, + getParentForChildren(), + isClean()); + replicas_ = null; + } + replicasCase_ = 2; + onChanged(); + return excludeReplicasBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.DirectedReadOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.DirectedReadOptions) + private static final com.google.spanner.v1.DirectedReadOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.DirectedReadOptions(); + } + + public static com.google.spanner.v1.DirectedReadOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DirectedReadOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java new file mode 100644 index 000000000000..879e73f4d582 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java @@ -0,0 +1,122 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface DirectedReadOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.DirectedReadOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return Whether the includeReplicas field is set. + */ + boolean hasIncludeReplicas(); + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + * + * @return The includeReplicas. + */ + com.google.spanner.v1.DirectedReadOptions.IncludeReplicas getIncludeReplicas(); + + /** + * + * + *
    +   * `Include_replicas` indicates the order of replicas (as they appear in
    +   * this list) to process the request. If `auto_failover_disabled` is set to
    +   * `true` and all replicas are exhausted without finding a healthy replica,
    +   * Spanner waits for a replica in the list to become available, requests
    +   * might fail due to `DEADLINE_EXCEEDED` errors.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.IncludeReplicas include_replicas = 1; + */ + com.google.spanner.v1.DirectedReadOptions.IncludeReplicasOrBuilder getIncludeReplicasOrBuilder(); + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return Whether the excludeReplicas field is set. + */ + boolean hasExcludeReplicas(); + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + * + * @return The excludeReplicas. + */ + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas getExcludeReplicas(); + + /** + * + * + *
    +   * `Exclude_replicas` indicates that specified replicas should be excluded
    +   * from serving requests. Spanner doesn't route requests to the replicas
    +   * in this list.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions.ExcludeReplicas exclude_replicas = 2; + */ + com.google.spanner.v1.DirectedReadOptions.ExcludeReplicasOrBuilder getExcludeReplicasOrBuilder(); + + com.google.spanner.v1.DirectedReadOptions.ReplicasCase getReplicasCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java new file mode 100644 index 000000000000..8c038e860af3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java @@ -0,0 +1,3932 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlRequest} + */ +@com.google.protobuf.Generated +public final class ExecuteBatchDmlRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ExecuteBatchDmlRequest) + ExecuteBatchDmlRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecuteBatchDmlRequest"); + } + + // Use ExecuteBatchDmlRequest.newBuilder() to construct. + private ExecuteBatchDmlRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecuteBatchDmlRequest() { + session_ = ""; + statements_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlRequest.class, + com.google.spanner.v1.ExecuteBatchDmlRequest.Builder.class); + } + + public interface StatementOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ExecuteBatchDmlRequest.Statement) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The DML string.
    +     * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + java.lang.String getSql(); + + /** + * + * + *
    +     * Required. The DML string.
    +     * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + com.google.protobuf.ByteString getSqlBytes(); + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return Whether the params field is set. + */ + boolean hasParams(); + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return The params. + */ + com.google.protobuf.Struct getParams(); + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + */ + com.google.protobuf.StructOrBuilder getParamsOrBuilder(); + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + int getParamTypesCount(); + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + boolean containsParamTypes(java.lang.String key); + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParamTypes(); + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + java.util.Map getParamTypesMap(); + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + /* nullable */ + com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue); + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key); + } + + /** + * + * + *
    +   * A single DML statement.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlRequest.Statement} + */ + public static final class Statement extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ExecuteBatchDmlRequest.Statement) + StatementOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Statement"); + } + + // Use Statement.newBuilder() to construct. + private Statement(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Statement() { + sql_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.class, + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder.class); + } + + private int bitField0_; + public static final int SQL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object sql_ = ""; + + /** + * + * + *
    +     * Required. The DML string.
    +     * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + @java.lang.Override + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } + } + + /** + * + * + *
    +     * Required. The DML string.
    +     * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMS_FIELD_NUMBER = 2; + private com.google.protobuf.Struct params_; + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return Whether the params field is set. + */ + @java.lang.Override + public boolean hasParams() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return The params. + */ + @java.lang.Override + public com.google.protobuf.Struct getParams() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the DML string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The
    +     * same parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 2; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + public static final int PARAM_TYPES_FIELD_NUMBER = 3; + + private static final class ParamTypesDefaultEntryHolder { + static final com.google.protobuf.MapEntry + defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_ParamTypesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + com.google.spanner.v1.Type.getDefaultInstance()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField paramTypes_; + + private com.google.protobuf.MapField + internalGetParamTypes() { + if (paramTypes_ == null) { + return com.google.protobuf.MapField.emptyMapField( + ParamTypesDefaultEntryHolder.defaultEntry); + } + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().getMap().size(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().getMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getMap(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +     * JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, sql_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getParams()); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParamTypes(), ParamTypesDefaultEntryHolder.defaultEntry, 3); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, sql_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getParams()); + } + for (java.util.Map.Entry entry : + internalGetParamTypes().getMap().entrySet()) { + com.google.protobuf.MapEntry paramTypes__ = + ParamTypesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, paramTypes__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ExecuteBatchDmlRequest.Statement)) { + return super.equals(obj); + } + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement other = + (com.google.spanner.v1.ExecuteBatchDmlRequest.Statement) obj; + + if (!getSql().equals(other.getSql())) return false; + if (hasParams() != other.hasParams()) return false; + if (hasParams()) { + if (!getParams().equals(other.getParams())) return false; + } + if (!internalGetParamTypes().equals(other.internalGetParamTypes())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SQL_FIELD_NUMBER; + hash = (53 * hash) + getSql().hashCode(); + if (hasParams()) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParams().hashCode(); + } + if (!internalGetParamTypes().getMap().isEmpty()) { + hash = (37 * hash) + PARAM_TYPES_FIELD_NUMBER; + hash = (53 * hash) + internalGetParamTypes().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A single DML statement.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlRequest.Statement} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ExecuteBatchDmlRequest.Statement) + com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 3: + return internalGetMutableParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.class, + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder.class); + } + + // Construct using com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetParamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + sql_ = ""; + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + internalGetMutableParamTypes().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getDefaultInstanceForType() { + return com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement build() { + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement buildPartial() { + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement result = + new com.google.spanner.v1.ExecuteBatchDmlRequest.Statement(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ExecuteBatchDmlRequest.Statement result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.sql_ = sql_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.params_ = paramsBuilder_ == null ? params_ : paramsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.paramTypes_ = + internalGetParamTypes().build(ParamTypesDefaultEntryHolder.defaultEntry); + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ExecuteBatchDmlRequest.Statement) { + return mergeFrom((com.google.spanner.v1.ExecuteBatchDmlRequest.Statement) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ExecuteBatchDmlRequest.Statement other) { + if (other == com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.getDefaultInstance()) + return this; + if (!other.getSql().isEmpty()) { + sql_ = other.sql_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasParams()) { + mergeParams(other.getParams()); + } + internalGetMutableParamTypes().mergeFrom(other.internalGetParamTypes()); + bitField0_ |= 0x00000004; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + sql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetParamsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.protobuf.MapEntry + paramTypes__ = + input.readMessage( + ParamTypesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParamTypes() + .ensureBuilderMap() + .put(paramTypes__.getKey(), paramTypes__.getValue()); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object sql_ = ""; + + /** + * + * + *
    +       * Required. The DML string.
    +       * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Required. The DML string.
    +       * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Required. The DML string.
    +       * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sql to set. + * @return This builder for chaining. + */ + public Builder setSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The DML string.
    +       * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSql() { + sql_ = getDefaultInstance().getSql(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The DML string.
    +       * 
    + * + * string sql = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for sql to set. + * @return This builder for chaining. + */ + public Builder setSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.Struct params_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + paramsBuilder_; + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return Whether the params field is set. + */ + public boolean hasParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + * + * @return The params. + */ + public com.google.protobuf.Struct getParams() { + if (paramsBuilder_ == null) { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } else { + return paramsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public Builder setParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + params_ = value; + } else { + paramsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public Builder setParams(com.google.protobuf.Struct.Builder builderForValue) { + if (paramsBuilder_ == null) { + params_ = builderForValue.build(); + } else { + paramsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public Builder mergeParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && params_ != null + && params_ != com.google.protobuf.Struct.getDefaultInstance()) { + getParamsBuilder().mergeFrom(value); + } else { + params_ = value; + } + } else { + paramsBuilder_.mergeFrom(value); + } + if (params_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public Builder clearParams() { + bitField0_ = (bitField0_ & ~0x00000002); + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public com.google.protobuf.Struct.Builder getParamsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + if (paramsBuilder_ != null) { + return paramsBuilder_.getMessageOrBuilder(); + } else { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + } + + /** + * + * + *
    +       * Parameter names and values that bind to placeholders in the DML string.
    +       *
    +       * A parameter placeholder consists of the `@` character followed by the
    +       * parameter name (for example, `@firstName`). Parameter names can contain
    +       * letters, numbers, and underscores.
    +       *
    +       * Parameters can appear anywhere that a literal value is expected. The
    +       * same parameter name can be used more than once, for example:
    +       *
    +       * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +       *
    +       * It's an error to execute a SQL statement with unbound parameters.
    +       * 
    + * + * .google.protobuf.Struct params = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetParamsFieldBuilder() { + if (paramsBuilder_ == null) { + paramsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getParams(), getParentForChildren(), isClean()); + params_ = null; + } + return paramsBuilder_; + } + + private static final class ParamTypesConverter + implements com.google.protobuf.MapFieldBuilder.Converter< + java.lang.String, com.google.spanner.v1.TypeOrBuilder, com.google.spanner.v1.Type> { + @java.lang.Override + public com.google.spanner.v1.Type build(com.google.spanner.v1.TypeOrBuilder val) { + if (val instanceof com.google.spanner.v1.Type) { + return (com.google.spanner.v1.Type) val; + } + return ((com.google.spanner.v1.Type.Builder) val).build(); + } + + @java.lang.Override + public com.google.protobuf.MapEntry + defaultEntry() { + return ParamTypesDefaultEntryHolder.defaultEntry; + } + } + ; + + private static final ParamTypesConverter paramTypesConverter = new ParamTypesConverter(); + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + paramTypes_; + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetParamTypes() { + if (paramTypes_ == null) { + return new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + return paramTypes_; + } + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetMutableParamTypes() { + if (paramTypes_ == null) { + paramTypes_ = new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + bitField0_ |= 0x00000004; + onChanged(); + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().ensureBuilderMap().size(); + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().ensureBuilderMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getImmutableMap(); + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + return map.containsKey(key) ? paramTypesConverter.build(map.get(key)) : defaultValue; + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return paramTypesConverter.build(map.get(key)); + } + + public Builder clearParamTypes() { + bitField0_ = (bitField0_ & ~0x00000004); + internalGetMutableParamTypes().clear(); + return this; + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + public Builder removeParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParamTypes().ensureBuilderMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParamTypes() { + bitField0_ |= 0x00000004; + return internalGetMutableParamTypes().ensureMessageMap(); + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + public Builder putParamTypes(java.lang.String key, com.google.spanner.v1.Type value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParamTypes().ensureBuilderMap().put(key, value); + bitField0_ |= 0x00000004; + return this; + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + public Builder putAllParamTypes( + java.util.Map values) { + for (java.util.Map.Entry e : + values.entrySet()) { + if (e.getKey() == null || e.getValue() == null) { + throw new NullPointerException(); + } + } + internalGetMutableParamTypes().ensureBuilderMap().putAll(values); + bitField0_ |= 0x00000004; + return this; + } + + /** + * + * + *
    +       * It isn't always possible for Cloud Spanner to infer the right SQL type
    +       * from a JSON value. For example, values of type `BYTES` and values
    +       * of type `STRING` both appear in
    +       * [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
    +       * JSON strings.
    +       *
    +       * In these cases, `param_types` can be used to specify the exact
    +       * SQL type for some or all of the SQL statement parameters. See the
    +       * definition of [Type][google.spanner.v1.Type] for more information
    +       * about SQL types.
    +       * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 3; + */ + public com.google.spanner.v1.Type.Builder putParamTypesBuilderIfAbsent(java.lang.String key) { + java.util.Map builderMap = + internalGetMutableParamTypes().ensureBuilderMap(); + com.google.spanner.v1.TypeOrBuilder entry = builderMap.get(key); + if (entry == null) { + entry = com.google.spanner.v1.Type.newBuilder(); + builderMap.put(key, entry); + } + if (entry instanceof com.google.spanner.v1.Type) { + entry = ((com.google.spanner.v1.Type) entry).toBuilder(); + builderMap.put(key, entry); + } + return (com.google.spanner.v1.Type.Builder) entry; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement) + private static final com.google.spanner.v1.ExecuteBatchDmlRequest.Statement DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ExecuteBatchDmlRequest.Statement(); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Statement parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the DML statements should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the DML statements should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionSelector transaction_; + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + public static final int STATEMENTS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List statements_; + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getStatementsList() { + return statements_; + } + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public java.util.List + getStatementsOrBuilderList() { + return statements_; + } + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public int getStatementsCount() { + return statements_.size(); + } + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getStatements(int index) { + return statements_.get(index); + } + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder getStatementsOrBuilder( + int index) { + return statements_.get(index); + } + + public static final int SEQNO_FIELD_NUMBER = 4; + private long seqno_ = 0L; + + /** + * + * + *
    +   * Required. A per-transaction sequence number used to identify this request.
    +   * This field makes each request idempotent such that if the request is
    +   * received multiple times, at most one succeeds.
    +   *
    +   * The sequence number must be monotonically increasing within the
    +   * transaction. If a request arrives for the first time with an out-of-order
    +   * sequence number, the transaction might be aborted. Replays of previously
    +   * handled requests yield the same response as the first execution.
    +   * 
    + * + * int64 seqno = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The seqno. + */ + @java.lang.Override + public long getSeqno() { + return seqno_; + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 5; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int LAST_STATEMENTS_FIELD_NUMBER = 6; + private boolean lastStatements_ = false; + + /** + * + * + *
    +   * Optional. If set to `true`, this request marks the end of the transaction.
    +   * After these statements execute, you must commit or abort the transaction.
    +   * Attempts to execute any other requests against this transaction
    +   * (including reads and queries) are rejected.
    +   *
    +   * Setting this option might cause some error reporting to be deferred until
    +   * commit time (for example, validation of unique constraints). Given this,
    +   * successful execution of statements shouldn't be assumed until a subsequent
    +   * `Commit` call completes successfully.
    +   * 
    + * + * bool last_statements = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatements. + */ + @java.lang.Override + public boolean getLastStatements() { + return lastStatements_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + for (int i = 0; i < statements_.size(); i++) { + output.writeMessage(3, statements_.get(i)); + } + if (seqno_ != 0L) { + output.writeInt64(4, seqno_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getRequestOptions()); + } + if (lastStatements_ != false) { + output.writeBool(6, lastStatements_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + for (int i = 0; i < statements_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, statements_.get(i)); + } + if (seqno_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(4, seqno_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getRequestOptions()); + } + if (lastStatements_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(6, lastStatements_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ExecuteBatchDmlRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.ExecuteBatchDmlRequest other = + (com.google.spanner.v1.ExecuteBatchDmlRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getStatementsList().equals(other.getStatementsList())) return false; + if (getSeqno() != other.getSeqno()) return false; + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (getLastStatements() != other.getLastStatements()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + if (getStatementsCount() > 0) { + hash = (37 * hash) + STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + getStatementsList().hashCode(); + } + hash = (37 * hash) + SEQNO_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSeqno()); + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + hash = (37 * hash) + LAST_STATEMENTS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLastStatements()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ExecuteBatchDmlRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ExecuteBatchDmlRequest) + com.google.spanner.v1.ExecuteBatchDmlRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlRequest.class, + com.google.spanner.v1.ExecuteBatchDmlRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.ExecuteBatchDmlRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTransactionFieldBuilder(); + internalGetStatementsFieldBuilder(); + internalGetRequestOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + if (statementsBuilder_ == null) { + statements_ = java.util.Collections.emptyList(); + } else { + statements_ = null; + statementsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + seqno_ = 0L; + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + lastStatements_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest getDefaultInstanceForType() { + return com.google.spanner.v1.ExecuteBatchDmlRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest build() { + com.google.spanner.v1.ExecuteBatchDmlRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest buildPartial() { + com.google.spanner.v1.ExecuteBatchDmlRequest result = + new com.google.spanner.v1.ExecuteBatchDmlRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.ExecuteBatchDmlRequest result) { + if (statementsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + statements_ = java.util.Collections.unmodifiableList(statements_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.statements_ = statements_; + } else { + result.statements_ = statementsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.ExecuteBatchDmlRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.seqno_ = seqno_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.lastStatements_ = lastStatements_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ExecuteBatchDmlRequest) { + return mergeFrom((com.google.spanner.v1.ExecuteBatchDmlRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ExecuteBatchDmlRequest other) { + if (other == com.google.spanner.v1.ExecuteBatchDmlRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (statementsBuilder_ == null) { + if (!other.statements_.isEmpty()) { + if (statements_.isEmpty()) { + statements_ = other.statements_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureStatementsIsMutable(); + statements_.addAll(other.statements_); + } + onChanged(); + } + } else { + if (!other.statements_.isEmpty()) { + if (statementsBuilder_.isEmpty()) { + statementsBuilder_.dispose(); + statementsBuilder_ = null; + statements_ = other.statements_; + bitField0_ = (bitField0_ & ~0x00000004); + statementsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetStatementsFieldBuilder() + : null; + } else { + statementsBuilder_.addAllMessages(other.statements_); + } + } + } + if (other.getSeqno() != 0L) { + setSeqno(other.getSeqno()); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (other.getLastStatements() != false) { + setLastStatements(other.getLastStatements()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement m = + input.readMessage( + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.parser(), + extensionRegistry); + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + statements_.add(m); + } else { + statementsBuilder_.addMessage(m); + } + break; + } // case 26 + case 32: + { + seqno_ = input.readInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + lastStatements_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the DML statements should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the DML statements should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the DML statements should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the DML statements should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the DML statements should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionSelector transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The transaction. + */ + public com.google.spanner.v1.TransactionSelector getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setTransaction( + com.google.spanner.v1.TransactionSelector.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.TransactionSelector.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.TransactionSelector.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * Required. The transaction to use. Must be a read-write transaction.
    +     *
    +     * To protect against replays, single-use transactions are not supported. The
    +     * caller must either supply an existing transaction ID or begin a new
    +     * transaction.
    +     * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private java.util.List statements_ = + java.util.Collections.emptyList(); + + private void ensureStatementsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + statements_ = + new java.util.ArrayList( + statements_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement, + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder, + com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder> + statementsBuilder_; + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getStatementsList() { + if (statementsBuilder_ == null) { + return java.util.Collections.unmodifiableList(statements_); + } else { + return statementsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public int getStatementsCount() { + if (statementsBuilder_ == null) { + return statements_.size(); + } else { + return statementsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getStatements(int index) { + if (statementsBuilder_ == null) { + return statements_.get(index); + } else { + return statementsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStatements( + int index, com.google.spanner.v1.ExecuteBatchDmlRequest.Statement value) { + if (statementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.set(index, value); + onChanged(); + } else { + statementsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setStatements( + int index, com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder builderForValue) { + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + statements_.set(index, builderForValue.build()); + onChanged(); + } else { + statementsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addStatements(com.google.spanner.v1.ExecuteBatchDmlRequest.Statement value) { + if (statementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.add(value); + onChanged(); + } else { + statementsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addStatements( + int index, com.google.spanner.v1.ExecuteBatchDmlRequest.Statement value) { + if (statementsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementsIsMutable(); + statements_.add(index, value); + onChanged(); + } else { + statementsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addStatements( + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder builderForValue) { + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + statements_.add(builderForValue.build()); + onChanged(); + } else { + statementsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addStatements( + int index, com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder builderForValue) { + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + statements_.add(index, builderForValue.build()); + onChanged(); + } else { + statementsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder addAllStatements( + java.lang.Iterable + values) { + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, statements_); + onChanged(); + } else { + statementsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearStatements() { + if (statementsBuilder_ == null) { + statements_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + statementsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder removeStatements(int index) { + if (statementsBuilder_ == null) { + ensureStatementsIsMutable(); + statements_.remove(index); + onChanged(); + } else { + statementsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder getStatementsBuilder( + int index) { + return internalGetStatementsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder getStatementsOrBuilder( + int index) { + if (statementsBuilder_ == null) { + return statements_.get(index); + } else { + return statementsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getStatementsOrBuilderList() { + if (statementsBuilder_ != null) { + return statementsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statements_); + } + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder addStatementsBuilder() { + return internalGetStatementsFieldBuilder() + .addBuilder(com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder addStatementsBuilder( + int index) { + return internalGetStatementsFieldBuilder() + .addBuilder( + index, com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.getDefaultInstance()); + } + + /** + * + * + *
    +     * Required. The list of statements to execute in this batch. Statements are
    +     * executed serially, such that the effects of statement `i` are visible to
    +     * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +     * the first failed statement; the remaining statements are not executed.
    +     *
    +     * Callers must provide at least one statement.
    +     * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public java.util.List + getStatementsBuilderList() { + return internalGetStatementsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement, + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder, + com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder> + internalGetStatementsFieldBuilder() { + if (statementsBuilder_ == null) { + statementsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement, + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement.Builder, + com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder>( + statements_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + statements_ = null; + } + return statementsBuilder_; + } + + private long seqno_; + + /** + * + * + *
    +     * Required. A per-transaction sequence number used to identify this request.
    +     * This field makes each request idempotent such that if the request is
    +     * received multiple times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction might be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     * 
    + * + * int64 seqno = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The seqno. + */ + @java.lang.Override + public long getSeqno() { + return seqno_; + } + + /** + * + * + *
    +     * Required. A per-transaction sequence number used to identify this request.
    +     * This field makes each request idempotent such that if the request is
    +     * received multiple times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction might be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     * 
    + * + * int64 seqno = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The seqno to set. + * @return This builder for chaining. + */ + public Builder setSeqno(long value) { + + seqno_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. A per-transaction sequence number used to identify this request.
    +     * This field makes each request idempotent such that if the request is
    +     * received multiple times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction might be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     * 
    + * + * int64 seqno = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSeqno() { + bitField0_ = (bitField0_ & ~0x00000008); + seqno_ = 0L; + onChanged(); + return this; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000010); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private boolean lastStatements_; + + /** + * + * + *
    +     * Optional. If set to `true`, this request marks the end of the transaction.
    +     * After these statements execute, you must commit or abort the transaction.
    +     * Attempts to execute any other requests against this transaction
    +     * (including reads and queries) are rejected.
    +     *
    +     * Setting this option might cause some error reporting to be deferred until
    +     * commit time (for example, validation of unique constraints). Given this,
    +     * successful execution of statements shouldn't be assumed until a subsequent
    +     * `Commit` call completes successfully.
    +     * 
    + * + * bool last_statements = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatements. + */ + @java.lang.Override + public boolean getLastStatements() { + return lastStatements_; + } + + /** + * + * + *
    +     * Optional. If set to `true`, this request marks the end of the transaction.
    +     * After these statements execute, you must commit or abort the transaction.
    +     * Attempts to execute any other requests against this transaction
    +     * (including reads and queries) are rejected.
    +     *
    +     * Setting this option might cause some error reporting to be deferred until
    +     * commit time (for example, validation of unique constraints). Given this,
    +     * successful execution of statements shouldn't be assumed until a subsequent
    +     * `Commit` call completes successfully.
    +     * 
    + * + * bool last_statements = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lastStatements to set. + * @return This builder for chaining. + */ + public Builder setLastStatements(boolean value) { + + lastStatements_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If set to `true`, this request marks the end of the transaction.
    +     * After these statements execute, you must commit or abort the transaction.
    +     * Attempts to execute any other requests against this transaction
    +     * (including reads and queries) are rejected.
    +     *
    +     * Setting this option might cause some error reporting to be deferred until
    +     * commit time (for example, validation of unique constraints). Given this,
    +     * successful execution of statements shouldn't be assumed until a subsequent
    +     * `Commit` call completes successfully.
    +     * 
    + * + * bool last_statements = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLastStatements() { + bitField0_ = (bitField0_ & ~0x00000020); + lastStatements_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ExecuteBatchDmlRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest) + private static final com.google.spanner.v1.ExecuteBatchDmlRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ExecuteBatchDmlRequest(); + } + + public static com.google.spanner.v1.ExecuteBatchDmlRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecuteBatchDmlRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java new file mode 100644 index 000000000000..ca03a10204d6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java @@ -0,0 +1,282 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ExecuteBatchDmlRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ExecuteBatchDmlRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the DML statements should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the DML statements should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The transaction. + */ + com.google.spanner.v1.TransactionSelector getTransaction(); + + /** + * + * + *
    +   * Required. The transaction to use. Must be a read-write transaction.
    +   *
    +   * To protect against replays, single-use transactions are not supported. The
    +   * caller must either supply an existing transaction ID or begin a new
    +   * transaction.
    +   * 
    + * + * + * .google.spanner.v1.TransactionSelector transaction = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List getStatementsList(); + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.ExecuteBatchDmlRequest.Statement getStatements(int index); + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + int getStatementsCount(); + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + java.util.List + getStatementsOrBuilderList(); + + /** + * + * + *
    +   * Required. The list of statements to execute in this batch. Statements are
    +   * executed serially, such that the effects of statement `i` are visible to
    +   * statement `i+1`. Each statement must be a DML statement. Execution stops at
    +   * the first failed statement; the remaining statements are not executed.
    +   *
    +   * Callers must provide at least one statement.
    +   * 
    + * + * + * repeated .google.spanner.v1.ExecuteBatchDmlRequest.Statement statements = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.v1.ExecuteBatchDmlRequest.StatementOrBuilder getStatementsOrBuilder(int index); + + /** + * + * + *
    +   * Required. A per-transaction sequence number used to identify this request.
    +   * This field makes each request idempotent such that if the request is
    +   * received multiple times, at most one succeeds.
    +   *
    +   * The sequence number must be monotonically increasing within the
    +   * transaction. If a request arrives for the first time with an out-of-order
    +   * sequence number, the transaction might be aborted. Replays of previously
    +   * handled requests yield the same response as the first execution.
    +   * 
    + * + * int64 seqno = 4 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The seqno. + */ + long getSeqno(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 5; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Optional. If set to `true`, this request marks the end of the transaction.
    +   * After these statements execute, you must commit or abort the transaction.
    +   * Attempts to execute any other requests against this transaction
    +   * (including reads and queries) are rejected.
    +   *
    +   * Setting this option might cause some error reporting to be deferred until
    +   * commit time (for example, validation of unique constraints). Given this,
    +   * successful execution of statements shouldn't be assumed until a subsequent
    +   * `Commit` call completes successfully.
    +   * 
    + * + * bool last_statements = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatements. + */ + boolean getLastStatements(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java new file mode 100644 index 000000000000..49a517a8aa30 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java @@ -0,0 +1,1801 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The response for
    + * [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list
    + * of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML
    + * statement that has successfully executed, in the same order as the statements
    + * in the request. If a statement fails, the status in the response body
    + * identifies the cause of the failure.
    + *
    + * To check for DML statements that failed, use the following approach:
    + *
    + * 1. Check the status in the response message. The
    + * [google.rpc.Code][google.rpc.Code] enum
    + * value `OK` indicates that all statements were executed successfully.
    + * 2. If the status was not `OK`, check the number of result sets in the
    + * response. If the response contains `N`
    + * [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in
    + * the request failed.
    + *
    + * Example 1:
    + *
    + * * Request: 5 DML statements, all executed successfully.
    + * * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the
    + * status `OK`.
    + *
    + * Example 2:
    + *
    + * * Request: 5 DML statements. The third statement has a syntax error.
    + * * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax
    + * error (`INVALID_ARGUMENT`)
    + * status. The number of [ResultSet][google.spanner.v1.ResultSet] messages
    + * indicates that the third statement failed, and the fourth and fifth
    + * statements were not executed.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlResponse} + */ +@com.google.protobuf.Generated +public final class ExecuteBatchDmlResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ExecuteBatchDmlResponse) + ExecuteBatchDmlResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecuteBatchDmlResponse"); + } + + // Use ExecuteBatchDmlResponse.newBuilder() to construct. + private ExecuteBatchDmlResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecuteBatchDmlResponse() { + resultSets_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlResponse.class, + com.google.spanner.v1.ExecuteBatchDmlResponse.Builder.class); + } + + private int bitField0_; + public static final int RESULT_SETS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List resultSets_; + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + @java.lang.Override + public java.util.List getResultSetsList() { + return resultSets_; + } + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + @java.lang.Override + public java.util.List + getResultSetsOrBuilderList() { + return resultSets_; + } + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + @java.lang.Override + public int getResultSetsCount() { + return resultSets_.size(); + } + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSet getResultSets(int index) { + return resultSets_.get(index); + } + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetOrBuilder getResultSetsOrBuilder(int index) { + return resultSets_.get(index); + } + + public static final int STATUS_FIELD_NUMBER = 2; + private com.google.rpc.Status status_; + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 3; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < resultSets_.size(); i++) { + output.writeMessage(1, resultSets_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getPrecommitToken()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < resultSets_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, resultSets_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStatus()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getPrecommitToken()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ExecuteBatchDmlResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.ExecuteBatchDmlResponse other = + (com.google.spanner.v1.ExecuteBatchDmlResponse) obj; + + if (!getResultSetsList().equals(other.getResultSetsList())) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getResultSetsCount() > 0) { + hash = (37 * hash) + RESULT_SETS_FIELD_NUMBER; + hash = (53 * hash) + getResultSetsList().hashCode(); + } + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ExecuteBatchDmlResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for
    +   * [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list
    +   * of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML
    +   * statement that has successfully executed, in the same order as the statements
    +   * in the request. If a statement fails, the status in the response body
    +   * identifies the cause of the failure.
    +   *
    +   * To check for DML statements that failed, use the following approach:
    +   *
    +   * 1. Check the status in the response message. The
    +   * [google.rpc.Code][google.rpc.Code] enum
    +   * value `OK` indicates that all statements were executed successfully.
    +   * 2. If the status was not `OK`, check the number of result sets in the
    +   * response. If the response contains `N`
    +   * [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in
    +   * the request failed.
    +   *
    +   * Example 1:
    +   *
    +   * * Request: 5 DML statements, all executed successfully.
    +   * * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the
    +   * status `OK`.
    +   *
    +   * Example 2:
    +   *
    +   * * Request: 5 DML statements. The third statement has a syntax error.
    +   * * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax
    +   * error (`INVALID_ARGUMENT`)
    +   * status. The number of [ResultSet][google.spanner.v1.ResultSet] messages
    +   * indicates that the third statement failed, and the fourth and fifth
    +   * statements were not executed.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteBatchDmlResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ExecuteBatchDmlResponse) + com.google.spanner.v1.ExecuteBatchDmlResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteBatchDmlResponse.class, + com.google.spanner.v1.ExecuteBatchDmlResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.ExecuteBatchDmlResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetResultSetsFieldBuilder(); + internalGetStatusFieldBuilder(); + internalGetPrecommitTokenFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (resultSetsBuilder_ == null) { + resultSets_ = java.util.Collections.emptyList(); + } else { + resultSets_ = null; + resultSetsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteBatchDmlResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlResponse getDefaultInstanceForType() { + return com.google.spanner.v1.ExecuteBatchDmlResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlResponse build() { + com.google.spanner.v1.ExecuteBatchDmlResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlResponse buildPartial() { + com.google.spanner.v1.ExecuteBatchDmlResponse result = + new com.google.spanner.v1.ExecuteBatchDmlResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.ExecuteBatchDmlResponse result) { + if (resultSetsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + resultSets_ = java.util.Collections.unmodifiableList(resultSets_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.resultSets_ = resultSets_; + } else { + result.resultSets_ = resultSetsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.ExecuteBatchDmlResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ExecuteBatchDmlResponse) { + return mergeFrom((com.google.spanner.v1.ExecuteBatchDmlResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ExecuteBatchDmlResponse other) { + if (other == com.google.spanner.v1.ExecuteBatchDmlResponse.getDefaultInstance()) return this; + if (resultSetsBuilder_ == null) { + if (!other.resultSets_.isEmpty()) { + if (resultSets_.isEmpty()) { + resultSets_ = other.resultSets_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureResultSetsIsMutable(); + resultSets_.addAll(other.resultSets_); + } + onChanged(); + } + } else { + if (!other.resultSets_.isEmpty()) { + if (resultSetsBuilder_.isEmpty()) { + resultSetsBuilder_.dispose(); + resultSetsBuilder_ = null; + resultSets_ = other.resultSets_; + bitField0_ = (bitField0_ & ~0x00000001); + resultSetsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetResultSetsFieldBuilder() + : null; + } else { + resultSetsBuilder_.addAllMessages(other.resultSets_); + } + } + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.ResultSet m = + input.readMessage(com.google.spanner.v1.ResultSet.parser(), extensionRegistry); + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + resultSets_.add(m); + } else { + resultSetsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + input.readMessage(internalGetStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List resultSets_ = + java.util.Collections.emptyList(); + + private void ensureResultSetsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + resultSets_ = new java.util.ArrayList(resultSets_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ResultSet, + com.google.spanner.v1.ResultSet.Builder, + com.google.spanner.v1.ResultSetOrBuilder> + resultSetsBuilder_; + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public java.util.List getResultSetsList() { + if (resultSetsBuilder_ == null) { + return java.util.Collections.unmodifiableList(resultSets_); + } else { + return resultSetsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public int getResultSetsCount() { + if (resultSetsBuilder_ == null) { + return resultSets_.size(); + } else { + return resultSetsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public com.google.spanner.v1.ResultSet getResultSets(int index) { + if (resultSetsBuilder_ == null) { + return resultSets_.get(index); + } else { + return resultSetsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder setResultSets(int index, com.google.spanner.v1.ResultSet value) { + if (resultSetsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultSetsIsMutable(); + resultSets_.set(index, value); + onChanged(); + } else { + resultSetsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder setResultSets( + int index, com.google.spanner.v1.ResultSet.Builder builderForValue) { + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + resultSets_.set(index, builderForValue.build()); + onChanged(); + } else { + resultSetsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder addResultSets(com.google.spanner.v1.ResultSet value) { + if (resultSetsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultSetsIsMutable(); + resultSets_.add(value); + onChanged(); + } else { + resultSetsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder addResultSets(int index, com.google.spanner.v1.ResultSet value) { + if (resultSetsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultSetsIsMutable(); + resultSets_.add(index, value); + onChanged(); + } else { + resultSetsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder addResultSets(com.google.spanner.v1.ResultSet.Builder builderForValue) { + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + resultSets_.add(builderForValue.build()); + onChanged(); + } else { + resultSetsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder addResultSets( + int index, com.google.spanner.v1.ResultSet.Builder builderForValue) { + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + resultSets_.add(index, builderForValue.build()); + onChanged(); + } else { + resultSetsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder addAllResultSets( + java.lang.Iterable values) { + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, resultSets_); + onChanged(); + } else { + resultSetsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder clearResultSets() { + if (resultSetsBuilder_ == null) { + resultSets_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + resultSetsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public Builder removeResultSets(int index) { + if (resultSetsBuilder_ == null) { + ensureResultSetsIsMutable(); + resultSets_.remove(index); + onChanged(); + } else { + resultSetsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public com.google.spanner.v1.ResultSet.Builder getResultSetsBuilder(int index) { + return internalGetResultSetsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public com.google.spanner.v1.ResultSetOrBuilder getResultSetsOrBuilder(int index) { + if (resultSetsBuilder_ == null) { + return resultSets_.get(index); + } else { + return resultSetsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public java.util.List + getResultSetsOrBuilderList() { + if (resultSetsBuilder_ != null) { + return resultSetsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(resultSets_); + } + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public com.google.spanner.v1.ResultSet.Builder addResultSetsBuilder() { + return internalGetResultSetsFieldBuilder() + .addBuilder(com.google.spanner.v1.ResultSet.getDefaultInstance()); + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public com.google.spanner.v1.ResultSet.Builder addResultSetsBuilder(int index) { + return internalGetResultSetsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.ResultSet.getDefaultInstance()); + } + + /** + * + * + *
    +     * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +     * request that ran successfully, in the same order as the statements in the
    +     * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +     * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +     * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +     * modified by the statement.
    +     *
    +     * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +     * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +     * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + public java.util.List getResultSetsBuilderList() { + return internalGetResultSetsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ResultSet, + com.google.spanner.v1.ResultSet.Builder, + com.google.spanner.v1.ResultSetOrBuilder> + internalGetResultSetsFieldBuilder() { + if (resultSetsBuilder_ == null) { + resultSetsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.ResultSet, + com.google.spanner.v1.ResultSet.Builder, + com.google.spanner.v1.ResultSetOrBuilder>( + resultSets_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + resultSets_ = null; + } + return resultSetsBuilder_; + } + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000002); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
    +     * If all DML statements are executed successfully, the status is `OK`.
    +     * Otherwise, the error status of the first failed statement.
    +     * 
    + * + * .google.rpc.Status status = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + internalGetStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000004); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * is on a multiplexed session. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ExecuteBatchDmlResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlResponse) + private static final com.google.spanner.v1.ExecuteBatchDmlResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ExecuteBatchDmlResponse(); + } + + public static com.google.spanner.v1.ExecuteBatchDmlResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecuteBatchDmlResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteBatchDmlResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java new file mode 100644 index 000000000000..5fe101215546 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java @@ -0,0 +1,215 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ExecuteBatchDmlResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ExecuteBatchDmlResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + java.util.List getResultSetsList(); + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + com.google.spanner.v1.ResultSet getResultSets(int index); + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + int getResultSetsCount(); + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + java.util.List getResultSetsOrBuilderList(); + + /** + * + * + *
    +   * One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
    +   * request that ran successfully, in the same order as the statements in the
    +   * request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
    +   * rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
    +   * [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
    +   * modified by the statement.
    +   *
    +   * Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
    +   * contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
    +   * 
    + * + * repeated .google.spanner.v1.ResultSet result_sets = 1; + */ + com.google.spanner.v1.ResultSetOrBuilder getResultSetsOrBuilder(int index); + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
    +   * If all DML statements are executed successfully, the status is `OK`.
    +   * Otherwise, the error status of the first failed statement.
    +   * 
    + * + * .google.rpc.Status status = 2; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * is on a multiplexed session. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java new file mode 100644 index 000000000000..c4fb3c56ef08 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java @@ -0,0 +1,5600 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    + * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteSqlRequest} + */ +@com.google.protobuf.Generated +public final class ExecuteSqlRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ExecuteSqlRequest) + ExecuteSqlRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ExecuteSqlRequest"); + } + + // Use ExecuteSqlRequest.newBuilder() to construct. + private ExecuteSqlRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ExecuteSqlRequest() { + session_ = ""; + sql_ = ""; + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + queryMode_ = 0; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteSqlRequest.class, + com.google.spanner.v1.ExecuteSqlRequest.Builder.class); + } + + /** + * + * + *
    +   * Mode in which the statement must be processed.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.ExecuteSqlRequest.QueryMode} + */ + public enum QueryMode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * The default mode. Only the statement results are returned.
    +     * 
    + * + * NORMAL = 0; + */ + NORMAL(0), + /** + * + * + *
    +     * This mode returns only the query plan, without any results or
    +     * execution statistics information.
    +     * 
    + * + * PLAN = 1; + */ + PLAN(1), + /** + * + * + *
    +     * This mode returns the query plan, overall execution statistics,
    +     * operator level execution statistics along with the results. This has a
    +     * performance overhead compared to the other modes. It isn't recommended
    +     * to use this mode for production traffic.
    +     * 
    + * + * PROFILE = 2; + */ + PROFILE(2), + /** + * + * + *
    +     * This mode returns the overall (but not operator-level) execution
    +     * statistics along with the results.
    +     * 
    + * + * WITH_STATS = 3; + */ + WITH_STATS(3), + /** + * + * + *
    +     * This mode returns the query plan, overall (but not operator-level)
    +     * execution statistics along with the results.
    +     * 
    + * + * WITH_PLAN_AND_STATS = 4; + */ + WITH_PLAN_AND_STATS(4), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryMode"); + } + + /** + * + * + *
    +     * The default mode. Only the statement results are returned.
    +     * 
    + * + * NORMAL = 0; + */ + public static final int NORMAL_VALUE = 0; + + /** + * + * + *
    +     * This mode returns only the query plan, without any results or
    +     * execution statistics information.
    +     * 
    + * + * PLAN = 1; + */ + public static final int PLAN_VALUE = 1; + + /** + * + * + *
    +     * This mode returns the query plan, overall execution statistics,
    +     * operator level execution statistics along with the results. This has a
    +     * performance overhead compared to the other modes. It isn't recommended
    +     * to use this mode for production traffic.
    +     * 
    + * + * PROFILE = 2; + */ + public static final int PROFILE_VALUE = 2; + + /** + * + * + *
    +     * This mode returns the overall (but not operator-level) execution
    +     * statistics along with the results.
    +     * 
    + * + * WITH_STATS = 3; + */ + public static final int WITH_STATS_VALUE = 3; + + /** + * + * + *
    +     * This mode returns the query plan, overall (but not operator-level)
    +     * execution statistics along with the results.
    +     * 
    + * + * WITH_PLAN_AND_STATS = 4; + */ + public static final int WITH_PLAN_AND_STATS_VALUE = 4; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QueryMode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static QueryMode forNumber(int value) { + switch (value) { + case 0: + return NORMAL; + case 1: + return PLAN; + case 2: + return PROFILE; + case 3: + return WITH_STATS; + case 4: + return WITH_PLAN_AND_STATS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public QueryMode findValueByNumber(int number) { + return QueryMode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ExecuteSqlRequest.getDescriptor().getEnumTypes().get(0); + } + + private static final QueryMode[] VALUES = values(); + + public static QueryMode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private QueryMode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ExecuteSqlRequest.QueryMode) + } + + public interface QueryOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ExecuteSqlRequest.QueryOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * An option to control the selection of optimizer version.
    +     *
    +     * This parameter allows individual queries to pick different query
    +     * optimizer versions.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the
    +     * latest supported query optimizer version. If not specified, Cloud Spanner
    +     * uses the optimizer version set at the database level options. Any other
    +     * positive integer (from the list of supported optimizer versions)
    +     * overrides the default optimizer version for query execution.
    +     *
    +     * The list of supported optimizer versions can be queried from
    +     * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer version fails with
    +     * an `INVALID_ARGUMENT` error.
    +     *
    +     * See
    +     * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +     * for more information on managing the query optimizer.
    +     *
    +     * The `optimizer_version` statement hint has precedence over this setting.
    +     * 
    + * + * string optimizer_version = 1; + * + * @return The optimizerVersion. + */ + java.lang.String getOptimizerVersion(); + + /** + * + * + *
    +     * An option to control the selection of optimizer version.
    +     *
    +     * This parameter allows individual queries to pick different query
    +     * optimizer versions.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the
    +     * latest supported query optimizer version. If not specified, Cloud Spanner
    +     * uses the optimizer version set at the database level options. Any other
    +     * positive integer (from the list of supported optimizer versions)
    +     * overrides the default optimizer version for query execution.
    +     *
    +     * The list of supported optimizer versions can be queried from
    +     * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer version fails with
    +     * an `INVALID_ARGUMENT` error.
    +     *
    +     * See
    +     * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +     * for more information on managing the query optimizer.
    +     *
    +     * The `optimizer_version` statement hint has precedence over this setting.
    +     * 
    + * + * string optimizer_version = 1; + * + * @return The bytes for optimizerVersion. + */ + com.google.protobuf.ByteString getOptimizerVersionBytes(); + + /** + * + * + *
    +     * An option to control the selection of optimizer statistics package.
    +     *
    +     * This parameter allows individual queries to use a different query
    +     * optimizer statistics package.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +     * generated statistics package. If not specified, Cloud Spanner uses
    +     * the statistics package set at the database level options, or the latest
    +     * package if the database option isn't set.
    +     *
    +     * The statistics package requested by the query has to be exempt from
    +     * garbage collection. This can be achieved with the following DDL
    +     * statement:
    +     *
    +     * ```sql
    +     * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +     * ```
    +     *
    +     * The list of available statistics packages can be queried from
    +     * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer statistics package
    +     * or with a statistics package that allows garbage collection fails with
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The optimizerStatisticsPackage. + */ + java.lang.String getOptimizerStatisticsPackage(); + + /** + * + * + *
    +     * An option to control the selection of optimizer statistics package.
    +     *
    +     * This parameter allows individual queries to use a different query
    +     * optimizer statistics package.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +     * generated statistics package. If not specified, Cloud Spanner uses
    +     * the statistics package set at the database level options, or the latest
    +     * package if the database option isn't set.
    +     *
    +     * The statistics package requested by the query has to be exempt from
    +     * garbage collection. This can be achieved with the following DDL
    +     * statement:
    +     *
    +     * ```sql
    +     * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +     * ```
    +     *
    +     * The list of available statistics packages can be queried from
    +     * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer statistics package
    +     * or with a statistics package that allows garbage collection fails with
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The bytes for optimizerStatisticsPackage. + */ + com.google.protobuf.ByteString getOptimizerStatisticsPackageBytes(); + } + + /** + * + * + *
    +   * Query optimizer configuration.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteSqlRequest.QueryOptions} + */ + public static final class QueryOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ExecuteSqlRequest.QueryOptions) + QueryOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryOptions"); + } + + // Use QueryOptions.newBuilder() to construct. + private QueryOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryOptions() { + optimizerVersion_ = ""; + optimizerStatisticsPackage_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.class, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder.class); + } + + public static final int OPTIMIZER_VERSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object optimizerVersion_ = ""; + + /** + * + * + *
    +     * An option to control the selection of optimizer version.
    +     *
    +     * This parameter allows individual queries to pick different query
    +     * optimizer versions.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the
    +     * latest supported query optimizer version. If not specified, Cloud Spanner
    +     * uses the optimizer version set at the database level options. Any other
    +     * positive integer (from the list of supported optimizer versions)
    +     * overrides the default optimizer version for query execution.
    +     *
    +     * The list of supported optimizer versions can be queried from
    +     * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer version fails with
    +     * an `INVALID_ARGUMENT` error.
    +     *
    +     * See
    +     * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +     * for more information on managing the query optimizer.
    +     *
    +     * The `optimizer_version` statement hint has precedence over this setting.
    +     * 
    + * + * string optimizer_version = 1; + * + * @return The optimizerVersion. + */ + @java.lang.Override + public java.lang.String getOptimizerVersion() { + java.lang.Object ref = optimizerVersion_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizerVersion_ = s; + return s; + } + } + + /** + * + * + *
    +     * An option to control the selection of optimizer version.
    +     *
    +     * This parameter allows individual queries to pick different query
    +     * optimizer versions.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the
    +     * latest supported query optimizer version. If not specified, Cloud Spanner
    +     * uses the optimizer version set at the database level options. Any other
    +     * positive integer (from the list of supported optimizer versions)
    +     * overrides the default optimizer version for query execution.
    +     *
    +     * The list of supported optimizer versions can be queried from
    +     * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer version fails with
    +     * an `INVALID_ARGUMENT` error.
    +     *
    +     * See
    +     * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +     * for more information on managing the query optimizer.
    +     *
    +     * The `optimizer_version` statement hint has precedence over this setting.
    +     * 
    + * + * string optimizer_version = 1; + * + * @return The bytes for optimizerVersion. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOptimizerVersionBytes() { + java.lang.Object ref = optimizerVersion_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizerVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPTIMIZER_STATISTICS_PACKAGE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object optimizerStatisticsPackage_ = ""; + + /** + * + * + *
    +     * An option to control the selection of optimizer statistics package.
    +     *
    +     * This parameter allows individual queries to use a different query
    +     * optimizer statistics package.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +     * generated statistics package. If not specified, Cloud Spanner uses
    +     * the statistics package set at the database level options, or the latest
    +     * package if the database option isn't set.
    +     *
    +     * The statistics package requested by the query has to be exempt from
    +     * garbage collection. This can be achieved with the following DDL
    +     * statement:
    +     *
    +     * ```sql
    +     * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +     * ```
    +     *
    +     * The list of available statistics packages can be queried from
    +     * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer statistics package
    +     * or with a statistics package that allows garbage collection fails with
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The optimizerStatisticsPackage. + */ + @java.lang.Override + public java.lang.String getOptimizerStatisticsPackage() { + java.lang.Object ref = optimizerStatisticsPackage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizerStatisticsPackage_ = s; + return s; + } + } + + /** + * + * + *
    +     * An option to control the selection of optimizer statistics package.
    +     *
    +     * This parameter allows individual queries to use a different query
    +     * optimizer statistics package.
    +     *
    +     * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +     * generated statistics package. If not specified, Cloud Spanner uses
    +     * the statistics package set at the database level options, or the latest
    +     * package if the database option isn't set.
    +     *
    +     * The statistics package requested by the query has to be exempt from
    +     * garbage collection. This can be achieved with the following DDL
    +     * statement:
    +     *
    +     * ```sql
    +     * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +     * ```
    +     *
    +     * The list of available statistics packages can be queried from
    +     * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +     *
    +     * Executing a SQL statement with an invalid optimizer statistics package
    +     * or with a statistics package that allows garbage collection fails with
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The bytes for optimizerStatisticsPackage. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOptimizerStatisticsPackageBytes() { + java.lang.Object ref = optimizerStatisticsPackage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizerStatisticsPackage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizerVersion_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, optimizerVersion_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizerStatisticsPackage_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, optimizerStatisticsPackage_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizerVersion_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, optimizerVersion_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(optimizerStatisticsPackage_)) { + size += + com.google.protobuf.GeneratedMessage.computeStringSize(2, optimizerStatisticsPackage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ExecuteSqlRequest.QueryOptions)) { + return super.equals(obj); + } + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions other = + (com.google.spanner.v1.ExecuteSqlRequest.QueryOptions) obj; + + if (!getOptimizerVersion().equals(other.getOptimizerVersion())) return false; + if (!getOptimizerStatisticsPackage().equals(other.getOptimizerStatisticsPackage())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPTIMIZER_VERSION_FIELD_NUMBER; + hash = (53 * hash) + getOptimizerVersion().hashCode(); + hash = (37 * hash) + OPTIMIZER_STATISTICS_PACKAGE_FIELD_NUMBER; + hash = (53 * hash) + getOptimizerStatisticsPackage().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Query optimizer configuration.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteSqlRequest.QueryOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ExecuteSqlRequest.QueryOptions) + com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.class, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder.class); + } + + // Construct using com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + optimizerVersion_ = ""; + optimizerStatisticsPackage_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getDefaultInstanceForType() { + return com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions build() { + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions buildPartial() { + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions result = + new com.google.spanner.v1.ExecuteSqlRequest.QueryOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ExecuteSqlRequest.QueryOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.optimizerVersion_ = optimizerVersion_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.optimizerStatisticsPackage_ = optimizerStatisticsPackage_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ExecuteSqlRequest.QueryOptions) { + return mergeFrom((com.google.spanner.v1.ExecuteSqlRequest.QueryOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ExecuteSqlRequest.QueryOptions other) { + if (other == com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance()) + return this; + if (!other.getOptimizerVersion().isEmpty()) { + optimizerVersion_ = other.optimizerVersion_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getOptimizerStatisticsPackage().isEmpty()) { + optimizerStatisticsPackage_ = other.optimizerStatisticsPackage_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + optimizerVersion_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + optimizerStatisticsPackage_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object optimizerVersion_ = ""; + + /** + * + * + *
    +       * An option to control the selection of optimizer version.
    +       *
    +       * This parameter allows individual queries to pick different query
    +       * optimizer versions.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the
    +       * latest supported query optimizer version. If not specified, Cloud Spanner
    +       * uses the optimizer version set at the database level options. Any other
    +       * positive integer (from the list of supported optimizer versions)
    +       * overrides the default optimizer version for query execution.
    +       *
    +       * The list of supported optimizer versions can be queried from
    +       * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer version fails with
    +       * an `INVALID_ARGUMENT` error.
    +       *
    +       * See
    +       * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +       * for more information on managing the query optimizer.
    +       *
    +       * The `optimizer_version` statement hint has precedence over this setting.
    +       * 
    + * + * string optimizer_version = 1; + * + * @return The optimizerVersion. + */ + public java.lang.String getOptimizerVersion() { + java.lang.Object ref = optimizerVersion_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizerVersion_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * An option to control the selection of optimizer version.
    +       *
    +       * This parameter allows individual queries to pick different query
    +       * optimizer versions.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the
    +       * latest supported query optimizer version. If not specified, Cloud Spanner
    +       * uses the optimizer version set at the database level options. Any other
    +       * positive integer (from the list of supported optimizer versions)
    +       * overrides the default optimizer version for query execution.
    +       *
    +       * The list of supported optimizer versions can be queried from
    +       * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer version fails with
    +       * an `INVALID_ARGUMENT` error.
    +       *
    +       * See
    +       * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +       * for more information on managing the query optimizer.
    +       *
    +       * The `optimizer_version` statement hint has precedence over this setting.
    +       * 
    + * + * string optimizer_version = 1; + * + * @return The bytes for optimizerVersion. + */ + public com.google.protobuf.ByteString getOptimizerVersionBytes() { + java.lang.Object ref = optimizerVersion_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizerVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * An option to control the selection of optimizer version.
    +       *
    +       * This parameter allows individual queries to pick different query
    +       * optimizer versions.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the
    +       * latest supported query optimizer version. If not specified, Cloud Spanner
    +       * uses the optimizer version set at the database level options. Any other
    +       * positive integer (from the list of supported optimizer versions)
    +       * overrides the default optimizer version for query execution.
    +       *
    +       * The list of supported optimizer versions can be queried from
    +       * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer version fails with
    +       * an `INVALID_ARGUMENT` error.
    +       *
    +       * See
    +       * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +       * for more information on managing the query optimizer.
    +       *
    +       * The `optimizer_version` statement hint has precedence over this setting.
    +       * 
    + * + * string optimizer_version = 1; + * + * @param value The optimizerVersion to set. + * @return This builder for chaining. + */ + public Builder setOptimizerVersion(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + optimizerVersion_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * An option to control the selection of optimizer version.
    +       *
    +       * This parameter allows individual queries to pick different query
    +       * optimizer versions.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the
    +       * latest supported query optimizer version. If not specified, Cloud Spanner
    +       * uses the optimizer version set at the database level options. Any other
    +       * positive integer (from the list of supported optimizer versions)
    +       * overrides the default optimizer version for query execution.
    +       *
    +       * The list of supported optimizer versions can be queried from
    +       * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer version fails with
    +       * an `INVALID_ARGUMENT` error.
    +       *
    +       * See
    +       * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +       * for more information on managing the query optimizer.
    +       *
    +       * The `optimizer_version` statement hint has precedence over this setting.
    +       * 
    + * + * string optimizer_version = 1; + * + * @return This builder for chaining. + */ + public Builder clearOptimizerVersion() { + optimizerVersion_ = getDefaultInstance().getOptimizerVersion(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * An option to control the selection of optimizer version.
    +       *
    +       * This parameter allows individual queries to pick different query
    +       * optimizer versions.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the
    +       * latest supported query optimizer version. If not specified, Cloud Spanner
    +       * uses the optimizer version set at the database level options. Any other
    +       * positive integer (from the list of supported optimizer versions)
    +       * overrides the default optimizer version for query execution.
    +       *
    +       * The list of supported optimizer versions can be queried from
    +       * `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer version fails with
    +       * an `INVALID_ARGUMENT` error.
    +       *
    +       * See
    +       * https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
    +       * for more information on managing the query optimizer.
    +       *
    +       * The `optimizer_version` statement hint has precedence over this setting.
    +       * 
    + * + * string optimizer_version = 1; + * + * @param value The bytes for optimizerVersion to set. + * @return This builder for chaining. + */ + public Builder setOptimizerVersionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + optimizerVersion_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object optimizerStatisticsPackage_ = ""; + + /** + * + * + *
    +       * An option to control the selection of optimizer statistics package.
    +       *
    +       * This parameter allows individual queries to use a different query
    +       * optimizer statistics package.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +       * generated statistics package. If not specified, Cloud Spanner uses
    +       * the statistics package set at the database level options, or the latest
    +       * package if the database option isn't set.
    +       *
    +       * The statistics package requested by the query has to be exempt from
    +       * garbage collection. This can be achieved with the following DDL
    +       * statement:
    +       *
    +       * ```sql
    +       * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +       * ```
    +       *
    +       * The list of available statistics packages can be queried from
    +       * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer statistics package
    +       * or with a statistics package that allows garbage collection fails with
    +       * an `INVALID_ARGUMENT` error.
    +       * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The optimizerStatisticsPackage. + */ + public java.lang.String getOptimizerStatisticsPackage() { + java.lang.Object ref = optimizerStatisticsPackage_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + optimizerStatisticsPackage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * An option to control the selection of optimizer statistics package.
    +       *
    +       * This parameter allows individual queries to use a different query
    +       * optimizer statistics package.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +       * generated statistics package. If not specified, Cloud Spanner uses
    +       * the statistics package set at the database level options, or the latest
    +       * package if the database option isn't set.
    +       *
    +       * The statistics package requested by the query has to be exempt from
    +       * garbage collection. This can be achieved with the following DDL
    +       * statement:
    +       *
    +       * ```sql
    +       * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +       * ```
    +       *
    +       * The list of available statistics packages can be queried from
    +       * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer statistics package
    +       * or with a statistics package that allows garbage collection fails with
    +       * an `INVALID_ARGUMENT` error.
    +       * 
    + * + * string optimizer_statistics_package = 2; + * + * @return The bytes for optimizerStatisticsPackage. + */ + public com.google.protobuf.ByteString getOptimizerStatisticsPackageBytes() { + java.lang.Object ref = optimizerStatisticsPackage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + optimizerStatisticsPackage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * An option to control the selection of optimizer statistics package.
    +       *
    +       * This parameter allows individual queries to use a different query
    +       * optimizer statistics package.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +       * generated statistics package. If not specified, Cloud Spanner uses
    +       * the statistics package set at the database level options, or the latest
    +       * package if the database option isn't set.
    +       *
    +       * The statistics package requested by the query has to be exempt from
    +       * garbage collection. This can be achieved with the following DDL
    +       * statement:
    +       *
    +       * ```sql
    +       * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +       * ```
    +       *
    +       * The list of available statistics packages can be queried from
    +       * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer statistics package
    +       * or with a statistics package that allows garbage collection fails with
    +       * an `INVALID_ARGUMENT` error.
    +       * 
    + * + * string optimizer_statistics_package = 2; + * + * @param value The optimizerStatisticsPackage to set. + * @return This builder for chaining. + */ + public Builder setOptimizerStatisticsPackage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + optimizerStatisticsPackage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * An option to control the selection of optimizer statistics package.
    +       *
    +       * This parameter allows individual queries to use a different query
    +       * optimizer statistics package.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +       * generated statistics package. If not specified, Cloud Spanner uses
    +       * the statistics package set at the database level options, or the latest
    +       * package if the database option isn't set.
    +       *
    +       * The statistics package requested by the query has to be exempt from
    +       * garbage collection. This can be achieved with the following DDL
    +       * statement:
    +       *
    +       * ```sql
    +       * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +       * ```
    +       *
    +       * The list of available statistics packages can be queried from
    +       * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer statistics package
    +       * or with a statistics package that allows garbage collection fails with
    +       * an `INVALID_ARGUMENT` error.
    +       * 
    + * + * string optimizer_statistics_package = 2; + * + * @return This builder for chaining. + */ + public Builder clearOptimizerStatisticsPackage() { + optimizerStatisticsPackage_ = getDefaultInstance().getOptimizerStatisticsPackage(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * An option to control the selection of optimizer statistics package.
    +       *
    +       * This parameter allows individual queries to use a different query
    +       * optimizer statistics package.
    +       *
    +       * Specifying `latest` as a value instructs Cloud Spanner to use the latest
    +       * generated statistics package. If not specified, Cloud Spanner uses
    +       * the statistics package set at the database level options, or the latest
    +       * package if the database option isn't set.
    +       *
    +       * The statistics package requested by the query has to be exempt from
    +       * garbage collection. This can be achieved with the following DDL
    +       * statement:
    +       *
    +       * ```sql
    +       * ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
    +       * ```
    +       *
    +       * The list of available statistics packages can be queried from
    +       * `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
    +       *
    +       * Executing a SQL statement with an invalid optimizer statistics package
    +       * or with a statistics package that allows garbage collection fails with
    +       * an `INVALID_ARGUMENT` error.
    +       * 
    + * + * string optimizer_statistics_package = 2; + * + * @param value The bytes for optimizerStatisticsPackage to set. + * @return This builder for chaining. + */ + public Builder setOptimizerStatisticsPackageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + optimizerStatisticsPackage_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ExecuteSqlRequest.QueryOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.QueryOptions) + private static final com.google.spanner.v1.ExecuteSqlRequest.QueryOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ExecuteSqlRequest.QueryOptions(); + } + + public static com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the SQL query should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the SQL query should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionSelector transaction_; + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + public static final int SQL_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object sql_ = ""; + + /** + * + * + *
    +   * Required. The SQL string.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + @java.lang.Override + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The SQL string.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMS_FIELD_NUMBER = 4; + private com.google.protobuf.Struct params_; + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return Whether the params field is set. + */ + @java.lang.Override + public boolean hasParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return The params. + */ + @java.lang.Override + public com.google.protobuf.Struct getParams() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + public static final int PARAM_TYPES_FIELD_NUMBER = 5; + + private static final class ParamTypesDefaultEntryHolder { + static final com.google.protobuf.MapEntry + defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_ParamTypesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + com.google.spanner.v1.Type.getDefaultInstance()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField paramTypes_; + + private com.google.protobuf.MapField + internalGetParamTypes() { + if (paramTypes_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParamTypesDefaultEntryHolder.defaultEntry); + } + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().getMap().size(); + } + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().getMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getMap(); + } + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int RESUME_TOKEN_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * If this request is resuming a previously interrupted SQL statement
    +   * execution, `resume_token` should be copied from the last
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +   * interruption. Doing this enables the new SQL statement execution to resume
    +   * where the last one left off. The rest of the request parameters must
    +   * exactly match the request that yielded this token.
    +   * 
    + * + * bytes resume_token = 6; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + public static final int QUERY_MODE_FIELD_NUMBER = 7; + private int queryMode_ = 0; + + /** + * + * + *
    +   * Used to control the amount of debugging information returned in
    +   * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +   * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +   * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +   * be set to
    +   * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The enum numeric value on the wire for queryMode. + */ + @java.lang.Override + public int getQueryModeValue() { + return queryMode_; + } + + /** + * + * + *
    +   * Used to control the amount of debugging information returned in
    +   * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +   * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +   * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +   * be set to
    +   * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The queryMode. + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryMode getQueryMode() { + com.google.spanner.v1.ExecuteSqlRequest.QueryMode result = + com.google.spanner.v1.ExecuteSqlRequest.QueryMode.forNumber(queryMode_); + return result == null ? com.google.spanner.v1.ExecuteSqlRequest.QueryMode.UNRECOGNIZED : result; + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 8; + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * If present, results are restricted to the specified partition
    +   * previously created using `PartitionQuery`. There must be an exact
    +   * match for the values of fields common to this message and the
    +   * `PartitionQueryRequest` message used to create this `partition_token`.
    +   * 
    + * + * bytes partition_token = 8; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + public static final int SEQNO_FIELD_NUMBER = 9; + private long seqno_ = 0L; + + /** + * + * + *
    +   * A per-transaction sequence number used to identify this request. This field
    +   * makes each request idempotent such that if the request is received multiple
    +   * times, at most one succeeds.
    +   *
    +   * The sequence number must be monotonically increasing within the
    +   * transaction. If a request arrives for the first time with an out-of-order
    +   * sequence number, the transaction can be aborted. Replays of previously
    +   * handled requests yield the same response as the first execution.
    +   *
    +   * Required for DML statements. Ignored for queries.
    +   * 
    + * + * int64 seqno = 9; + * + * @return The seqno. + */ + @java.lang.Override + public long getSeqno() { + return seqno_; + } + + public static final int QUERY_OPTIONS_FIELD_NUMBER = 10; + private com.google.spanner.v1.ExecuteSqlRequest.QueryOptions queryOptions_; + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return Whether the queryOptions field is set. + */ + @java.lang.Override + public boolean hasQueryOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return The queryOptions. + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getQueryOptions() { + return queryOptions_ == null + ? com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance() + : queryOptions_; + } + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder getQueryOptionsOrBuilder() { + return queryOptions_ == null + ? com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance() + : queryOptions_; + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 11; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int DIRECTED_READ_OPTIONS_FIELD_NUMBER = 15; + private com.google.spanner.v1.DirectedReadOptions directedReadOptions_; + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return Whether the directedReadOptions field is set. + */ + @java.lang.Override + public boolean hasDirectedReadOptions() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return The directedReadOptions. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions() { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder() { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + + public static final int DATA_BOOST_ENABLED_FIELD_NUMBER = 16; + private boolean dataBoostEnabled_ = false; + + /** + * + * + *
    +   * If this is for a partitioned query and this field is set to `true`, the
    +   * request is executed with Spanner Data Boost independent compute resources.
    +   *
    +   * If the field is set to `true` but the request doesn't set
    +   * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool data_boost_enabled = 16; + * + * @return The dataBoostEnabled. + */ + @java.lang.Override + public boolean getDataBoostEnabled() { + return dataBoostEnabled_; + } + + public static final int LAST_STATEMENT_FIELD_NUMBER = 17; + private boolean lastStatement_ = false; + + /** + * + * + *
    +   * Optional. If set to `true`, this statement marks the end of the
    +   * transaction. After this statement executes, you must commit or abort the
    +   * transaction. Attempts to execute any other requests against this
    +   * transaction (including reads and queries) are rejected.
    +   *
    +   * For DML statements, setting this option might cause some error reporting to
    +   * be deferred until commit time (for example, validation of unique
    +   * constraints). Given this, successful execution of a DML statement shouldn't
    +   * be assumed until a subsequent `Commit` call completes successfully.
    +   * 
    + * + * bool last_statement = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatement. + */ + @java.lang.Override + public boolean getLastStatement() { + return lastStatement_; + } + + public static final int ROUTING_HINT_FIELD_NUMBER = 18; + private com.google.spanner.v1.RoutingHint routingHint_; + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + @java.lang.Override + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint getRoutingHint() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, sql_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getParams()); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParamTypes(), ParamTypesDefaultEntryHolder.defaultEntry, 5); + if (!resumeToken_.isEmpty()) { + output.writeBytes(6, resumeToken_); + } + if (queryMode_ != com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL.getNumber()) { + output.writeEnum(7, queryMode_); + } + if (!partitionToken_.isEmpty()) { + output.writeBytes(8, partitionToken_); + } + if (seqno_ != 0L) { + output.writeInt64(9, seqno_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(10, getQueryOptions()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(11, getRequestOptions()); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(15, getDirectedReadOptions()); + } + if (dataBoostEnabled_ != false) { + output.writeBool(16, dataBoostEnabled_); + } + if (lastStatement_ != false) { + output.writeBool(17, lastStatement_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(18, getRoutingHint()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, sql_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getParams()); + } + for (java.util.Map.Entry entry : + internalGetParamTypes().getMap().entrySet()) { + com.google.protobuf.MapEntry paramTypes__ = + ParamTypesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, paramTypes__); + } + if (!resumeToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(6, resumeToken_); + } + if (queryMode_ != com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(7, queryMode_); + } + if (!partitionToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(8, partitionToken_); + } + if (seqno_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(9, seqno_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getQueryOptions()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getRequestOptions()); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(15, getDirectedReadOptions()); + } + if (dataBoostEnabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(16, dataBoostEnabled_); + } + if (lastStatement_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(17, lastStatement_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getRoutingHint()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ExecuteSqlRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.ExecuteSqlRequest other = (com.google.spanner.v1.ExecuteSqlRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getSql().equals(other.getSql())) return false; + if (hasParams() != other.hasParams()) return false; + if (hasParams()) { + if (!getParams().equals(other.getParams())) return false; + } + if (!internalGetParamTypes().equals(other.internalGetParamTypes())) return false; + if (!getResumeToken().equals(other.getResumeToken())) return false; + if (queryMode_ != other.queryMode_) return false; + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (getSeqno() != other.getSeqno()) return false; + if (hasQueryOptions() != other.hasQueryOptions()) return false; + if (hasQueryOptions()) { + if (!getQueryOptions().equals(other.getQueryOptions())) return false; + } + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (hasDirectedReadOptions() != other.hasDirectedReadOptions()) return false; + if (hasDirectedReadOptions()) { + if (!getDirectedReadOptions().equals(other.getDirectedReadOptions())) return false; + } + if (getDataBoostEnabled() != other.getDataBoostEnabled()) return false; + if (getLastStatement() != other.getLastStatement()) return false; + if (hasRoutingHint() != other.hasRoutingHint()) return false; + if (hasRoutingHint()) { + if (!getRoutingHint().equals(other.getRoutingHint())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + hash = (37 * hash) + SQL_FIELD_NUMBER; + hash = (53 * hash) + getSql().hashCode(); + if (hasParams()) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParams().hashCode(); + } + if (!internalGetParamTypes().getMap().isEmpty()) { + hash = (37 * hash) + PARAM_TYPES_FIELD_NUMBER; + hash = (53 * hash) + internalGetParamTypes().hashCode(); + } + hash = (37 * hash) + RESUME_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getResumeToken().hashCode(); + hash = (37 * hash) + QUERY_MODE_FIELD_NUMBER; + hash = (53 * hash) + queryMode_; + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + hash = (37 * hash) + SEQNO_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSeqno()); + if (hasQueryOptions()) { + hash = (37 * hash) + QUERY_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getQueryOptions().hashCode(); + } + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + if (hasDirectedReadOptions()) { + hash = (37 * hash) + DIRECTED_READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getDirectedReadOptions().hashCode(); + } + hash = (37 * hash) + DATA_BOOST_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDataBoostEnabled()); + hash = (37 * hash) + LAST_STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLastStatement()); + if (hasRoutingHint()) { + hash = (37 * hash) + ROUTING_HINT_FIELD_NUMBER; + hash = (53 * hash) + getRoutingHint().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ExecuteSqlRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ExecuteSqlRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
    +   * [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ExecuteSqlRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ExecuteSqlRequest) + com.google.spanner.v1.ExecuteSqlRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetMutableParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ExecuteSqlRequest.class, + com.google.spanner.v1.ExecuteSqlRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.ExecuteSqlRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTransactionFieldBuilder(); + internalGetParamsFieldBuilder(); + internalGetQueryOptionsFieldBuilder(); + internalGetRequestOptionsFieldBuilder(); + internalGetDirectedReadOptionsFieldBuilder(); + internalGetRoutingHintFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + sql_ = ""; + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + internalGetMutableParamTypes().clear(); + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + queryMode_ = 0; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + seqno_ = 0L; + queryOptions_ = null; + if (queryOptionsBuilder_ != null) { + queryOptionsBuilder_.dispose(); + queryOptionsBuilder_ = null; + } + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + directedReadOptions_ = null; + if (directedReadOptionsBuilder_ != null) { + directedReadOptionsBuilder_.dispose(); + directedReadOptionsBuilder_ = null; + } + dataBoostEnabled_ = false; + lastStatement_ = false; + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ExecuteSqlRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest getDefaultInstanceForType() { + return com.google.spanner.v1.ExecuteSqlRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest build() { + com.google.spanner.v1.ExecuteSqlRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest buildPartial() { + com.google.spanner.v1.ExecuteSqlRequest result = + new com.google.spanner.v1.ExecuteSqlRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ExecuteSqlRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.sql_ = sql_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.params_ = paramsBuilder_ == null ? params_ : paramsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.paramTypes_ = + internalGetParamTypes().build(ParamTypesDefaultEntryHolder.defaultEntry); + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.resumeToken_ = resumeToken_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.queryMode_ = queryMode_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.partitionToken_ = partitionToken_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.seqno_ = seqno_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.queryOptions_ = + queryOptionsBuilder_ == null ? queryOptions_ : queryOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.directedReadOptions_ = + directedReadOptionsBuilder_ == null + ? directedReadOptions_ + : directedReadOptionsBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.dataBoostEnabled_ = dataBoostEnabled_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.lastStatement_ = lastStatement_; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.routingHint_ = + routingHintBuilder_ == null ? routingHint_ : routingHintBuilder_.build(); + to_bitField0_ |= 0x00000020; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ExecuteSqlRequest) { + return mergeFrom((com.google.spanner.v1.ExecuteSqlRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ExecuteSqlRequest other) { + if (other == com.google.spanner.v1.ExecuteSqlRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (!other.getSql().isEmpty()) { + sql_ = other.sql_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasParams()) { + mergeParams(other.getParams()); + } + internalGetMutableParamTypes().mergeFrom(other.internalGetParamTypes()); + bitField0_ |= 0x00000010; + if (!other.getResumeToken().isEmpty()) { + setResumeToken(other.getResumeToken()); + } + if (other.queryMode_ != 0) { + setQueryModeValue(other.getQueryModeValue()); + } + if (!other.getPartitionToken().isEmpty()) { + setPartitionToken(other.getPartitionToken()); + } + if (other.getSeqno() != 0L) { + setSeqno(other.getSeqno()); + } + if (other.hasQueryOptions()) { + mergeQueryOptions(other.getQueryOptions()); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (other.hasDirectedReadOptions()) { + mergeDirectedReadOptions(other.getDirectedReadOptions()); + } + if (other.getDataBoostEnabled() != false) { + setDataBoostEnabled(other.getDataBoostEnabled()); + } + if (other.getLastStatement() != false) { + setLastStatement(other.getLastStatement()); + } + if (other.hasRoutingHint()) { + mergeRoutingHint(other.getRoutingHint()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + sql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetParamsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + com.google.protobuf.MapEntry + paramTypes__ = + input.readMessage( + ParamTypesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParamTypes() + .ensureBuilderMap() + .put(paramTypes__.getKey(), paramTypes__.getValue()); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + resumeToken_ = input.readBytes(); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 56: + { + queryMode_ = input.readEnum(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 66: + { + partitionToken_ = input.readBytes(); + bitField0_ |= 0x00000080; + break; + } // case 66 + case 72: + { + seqno_ = input.readInt64(); + bitField0_ |= 0x00000100; + break; + } // case 72 + case 82: + { + input.readMessage( + internalGetQueryOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 90 + case 122: + { + input.readMessage( + internalGetDirectedReadOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000800; + break; + } // case 122 + case 128: + { + dataBoostEnabled_ = input.readBool(); + bitField0_ |= 0x00001000; + break; + } // case 128 + case 136: + { + lastStatement_ = input.readBool(); + bitField0_ |= 0x00002000; + break; + } // case 136 + case 146: + { + input.readMessage( + internalGetRoutingHintFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00004000; + break; + } // case 146 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the SQL query should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the SQL query should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the SQL query should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the SQL query should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the SQL query should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionSelector transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.TransactionSelector getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction( + com.google.spanner.v1.TransactionSelector.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.TransactionSelector.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelector.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * The transaction to use.
    +     *
    +     * For queries, if none is provided, the default is a temporary read-only
    +     * transaction with strong concurrency.
    +     *
    +     * Standard DML statements require a read-write transaction. To protect
    +     * against replays, single-use transactions are not supported. The caller
    +     * must either supply an existing transaction ID or begin a new transaction.
    +     *
    +     * Partitioned DML requires an existing Partitioned DML transaction ID.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private java.lang.Object sql_ = ""; + + /** + * + * + *
    +     * Required. The SQL string.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The SQL string.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The SQL string.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sql to set. + * @return This builder for chaining. + */ + public Builder setSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sql_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The SQL string.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSql() { + sql_ = getDefaultInstance().getSql(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The SQL string.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for sql to set. + * @return This builder for chaining. + */ + public Builder setSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sql_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Struct params_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + paramsBuilder_; + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return Whether the params field is set. + */ + public boolean hasParams() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return The params. + */ + public com.google.protobuf.Struct getParams() { + if (paramsBuilder_ == null) { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } else { + return paramsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public Builder setParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + params_ = value; + } else { + paramsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public Builder setParams(com.google.protobuf.Struct.Builder builderForValue) { + if (paramsBuilder_ == null) { + params_ = builderForValue.build(); + } else { + paramsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public Builder mergeParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && params_ != null + && params_ != com.google.protobuf.Struct.getDefaultInstance()) { + getParamsBuilder().mergeFrom(value); + } else { + params_ = value; + } + } else { + paramsBuilder_.mergeFrom(value); + } + if (params_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public Builder clearParams() { + bitField0_ = (bitField0_ & ~0x00000008); + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public com.google.protobuf.Struct.Builder getParamsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + if (paramsBuilder_ != null) { + return paramsBuilder_.getMessageOrBuilder(); + } else { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + } + + /** + * + * + *
    +     * Parameter names and values that bind to placeholders in the SQL string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names must conform
    +     * to the naming requirements of identifiers as specified at
    +     * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetParamsFieldBuilder() { + if (paramsBuilder_ == null) { + paramsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getParams(), getParentForChildren(), isClean()); + params_ = null; + } + return paramsBuilder_; + } + + private static final class ParamTypesConverter + implements com.google.protobuf.MapFieldBuilder.Converter< + java.lang.String, com.google.spanner.v1.TypeOrBuilder, com.google.spanner.v1.Type> { + @java.lang.Override + public com.google.spanner.v1.Type build(com.google.spanner.v1.TypeOrBuilder val) { + if (val instanceof com.google.spanner.v1.Type) { + return (com.google.spanner.v1.Type) val; + } + return ((com.google.spanner.v1.Type.Builder) val).build(); + } + + @java.lang.Override + public com.google.protobuf.MapEntry + defaultEntry() { + return ParamTypesDefaultEntryHolder.defaultEntry; + } + } + ; + + private static final ParamTypesConverter paramTypesConverter = new ParamTypesConverter(); + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + paramTypes_; + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetParamTypes() { + if (paramTypes_ == null) { + return new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + return paramTypes_; + } + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetMutableParamTypes() { + if (paramTypes_ == null) { + paramTypes_ = new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + bitField0_ |= 0x00000010; + onChanged(); + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().ensureBuilderMap().size(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().ensureBuilderMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getImmutableMap(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + return map.containsKey(key) ? paramTypesConverter.build(map.get(key)) : defaultValue; + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return paramTypesConverter.build(map.get(key)); + } + + public Builder clearParamTypes() { + bitField0_ = (bitField0_ & ~0x00000010); + internalGetMutableParamTypes().clear(); + return this; + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + public Builder removeParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParamTypes().ensureBuilderMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParamTypes() { + bitField0_ |= 0x00000010; + return internalGetMutableParamTypes().ensureMessageMap(); + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + public Builder putParamTypes(java.lang.String key, com.google.spanner.v1.Type value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParamTypes().ensureBuilderMap().put(key, value); + bitField0_ |= 0x00000010; + return this; + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + public Builder putAllParamTypes( + java.util.Map values) { + for (java.util.Map.Entry e : + values.entrySet()) { + if (e.getKey() == null || e.getValue() == null) { + throw new NullPointerException(); + } + } + internalGetMutableParamTypes().ensureBuilderMap().putAll(values); + bitField0_ |= 0x00000010; + return this; + } + + /** + * + * + *
    +     * It isn't always possible for Cloud Spanner to infer the right SQL type
    +     * from a JSON value. For example, values of type `BYTES` and values
    +     * of type `STRING` both appear in
    +     * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +     *
    +     * In these cases, you can use `param_types` to specify the exact
    +     * SQL type for some or all of the SQL statement parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + public com.google.spanner.v1.Type.Builder putParamTypesBuilderIfAbsent(java.lang.String key) { + java.util.Map builderMap = + internalGetMutableParamTypes().ensureBuilderMap(); + com.google.spanner.v1.TypeOrBuilder entry = builderMap.get(key); + if (entry == null) { + entry = com.google.spanner.v1.Type.newBuilder(); + builderMap.put(key, entry); + } + if (entry instanceof com.google.spanner.v1.Type) { + entry = ((com.google.spanner.v1.Type) entry).toBuilder(); + builderMap.put(key, entry); + } + return (com.google.spanner.v1.Type.Builder) entry; + } + + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * If this request is resuming a previously interrupted SQL statement
    +     * execution, `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new SQL statement execution to resume
    +     * where the last one left off. The rest of the request parameters must
    +     * exactly match the request that yielded this token.
    +     * 
    + * + * bytes resume_token = 6; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + /** + * + * + *
    +     * If this request is resuming a previously interrupted SQL statement
    +     * execution, `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new SQL statement execution to resume
    +     * where the last one left off. The rest of the request parameters must
    +     * exactly match the request that yielded this token.
    +     * 
    + * + * bytes resume_token = 6; + * + * @param value The resumeToken to set. + * @return This builder for chaining. + */ + public Builder setResumeToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + resumeToken_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If this request is resuming a previously interrupted SQL statement
    +     * execution, `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new SQL statement execution to resume
    +     * where the last one left off. The rest of the request parameters must
    +     * exactly match the request that yielded this token.
    +     * 
    + * + * bytes resume_token = 6; + * + * @return This builder for chaining. + */ + public Builder clearResumeToken() { + bitField0_ = (bitField0_ & ~0x00000020); + resumeToken_ = getDefaultInstance().getResumeToken(); + onChanged(); + return this; + } + + private int queryMode_ = 0; + + /** + * + * + *
    +     * Used to control the amount of debugging information returned in
    +     * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +     * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +     * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +     * be set to
    +     * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The enum numeric value on the wire for queryMode. + */ + @java.lang.Override + public int getQueryModeValue() { + return queryMode_; + } + + /** + * + * + *
    +     * Used to control the amount of debugging information returned in
    +     * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +     * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +     * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +     * be set to
    +     * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @param value The enum numeric value on the wire for queryMode to set. + * @return This builder for chaining. + */ + public Builder setQueryModeValue(int value) { + queryMode_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used to control the amount of debugging information returned in
    +     * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +     * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +     * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +     * be set to
    +     * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The queryMode. + */ + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest.QueryMode getQueryMode() { + com.google.spanner.v1.ExecuteSqlRequest.QueryMode result = + com.google.spanner.v1.ExecuteSqlRequest.QueryMode.forNumber(queryMode_); + return result == null + ? com.google.spanner.v1.ExecuteSqlRequest.QueryMode.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Used to control the amount of debugging information returned in
    +     * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +     * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +     * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +     * be set to
    +     * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @param value The queryMode to set. + * @return This builder for chaining. + */ + public Builder setQueryMode(com.google.spanner.v1.ExecuteSqlRequest.QueryMode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + queryMode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used to control the amount of debugging information returned in
    +     * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +     * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +     * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +     * be set to
    +     * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return This builder for chaining. + */ + public Builder clearQueryMode() { + bitField0_ = (bitField0_ & ~0x00000040); + queryMode_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionQuery`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * `PartitionQueryRequest` message used to create this `partition_token`.
    +     * 
    + * + * bytes partition_token = 8; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionQuery`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * `PartitionQueryRequest` message used to create this `partition_token`.
    +     * 
    + * + * bytes partition_token = 8; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionQuery`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * `PartitionQueryRequest` message used to create this `partition_token`.
    +     * 
    + * + * bytes partition_token = 8; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + bitField0_ = (bitField0_ & ~0x00000080); + partitionToken_ = getDefaultInstance().getPartitionToken(); + onChanged(); + return this; + } + + private long seqno_; + + /** + * + * + *
    +     * A per-transaction sequence number used to identify this request. This field
    +     * makes each request idempotent such that if the request is received multiple
    +     * times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction can be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     *
    +     * Required for DML statements. Ignored for queries.
    +     * 
    + * + * int64 seqno = 9; + * + * @return The seqno. + */ + @java.lang.Override + public long getSeqno() { + return seqno_; + } + + /** + * + * + *
    +     * A per-transaction sequence number used to identify this request. This field
    +     * makes each request idempotent such that if the request is received multiple
    +     * times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction can be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     *
    +     * Required for DML statements. Ignored for queries.
    +     * 
    + * + * int64 seqno = 9; + * + * @param value The seqno to set. + * @return This builder for chaining. + */ + public Builder setSeqno(long value) { + + seqno_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A per-transaction sequence number used to identify this request. This field
    +     * makes each request idempotent such that if the request is received multiple
    +     * times, at most one succeeds.
    +     *
    +     * The sequence number must be monotonically increasing within the
    +     * transaction. If a request arrives for the first time with an out-of-order
    +     * sequence number, the transaction can be aborted. Replays of previously
    +     * handled requests yield the same response as the first execution.
    +     *
    +     * Required for DML statements. Ignored for queries.
    +     * 
    + * + * int64 seqno = 9; + * + * @return This builder for chaining. + */ + public Builder clearSeqno() { + bitField0_ = (bitField0_ & ~0x00000100); + seqno_ = 0L; + onChanged(); + return this; + } + + private com.google.spanner.v1.ExecuteSqlRequest.QueryOptions queryOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder> + queryOptionsBuilder_; + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return Whether the queryOptions field is set. + */ + public boolean hasQueryOptions() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return The queryOptions. + */ + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getQueryOptions() { + if (queryOptionsBuilder_ == null) { + return queryOptions_ == null + ? com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance() + : queryOptions_; + } else { + return queryOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public Builder setQueryOptions(com.google.spanner.v1.ExecuteSqlRequest.QueryOptions value) { + if (queryOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryOptions_ = value; + } else { + queryOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public Builder setQueryOptions( + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder builderForValue) { + if (queryOptionsBuilder_ == null) { + queryOptions_ = builderForValue.build(); + } else { + queryOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public Builder mergeQueryOptions(com.google.spanner.v1.ExecuteSqlRequest.QueryOptions value) { + if (queryOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && queryOptions_ != null + && queryOptions_ + != com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance()) { + getQueryOptionsBuilder().mergeFrom(value); + } else { + queryOptions_ = value; + } + } else { + queryOptionsBuilder_.mergeFrom(value); + } + if (queryOptions_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public Builder clearQueryOptions() { + bitField0_ = (bitField0_ & ~0x00000200); + queryOptions_ = null; + if (queryOptionsBuilder_ != null) { + queryOptionsBuilder_.dispose(); + queryOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder getQueryOptionsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetQueryOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + public com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder + getQueryOptionsOrBuilder() { + if (queryOptionsBuilder_ != null) { + return queryOptionsBuilder_.getMessageOrBuilder(); + } else { + return queryOptions_ == null + ? com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.getDefaultInstance() + : queryOptions_; + } + } + + /** + * + * + *
    +     * Query optimizer configuration to use for the given query.
    +     * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder> + internalGetQueryOptionsFieldBuilder() { + if (queryOptionsBuilder_ == null) { + queryOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions.Builder, + com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder>( + getQueryOptions(), getParentForChildren(), isClean()); + queryOptions_ = null; + } + return queryOptionsBuilder_; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000400); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private com.google.spanner.v1.DirectedReadOptions directedReadOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder> + directedReadOptionsBuilder_; + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return Whether the directedReadOptions field is set. + */ + public boolean hasDirectedReadOptions() { + return ((bitField0_ & 0x00000800) != 0); + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return The directedReadOptions. + */ + public com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions() { + if (directedReadOptionsBuilder_ == null) { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } else { + return directedReadOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public Builder setDirectedReadOptions(com.google.spanner.v1.DirectedReadOptions value) { + if (directedReadOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + directedReadOptions_ = value; + } else { + directedReadOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public Builder setDirectedReadOptions( + com.google.spanner.v1.DirectedReadOptions.Builder builderForValue) { + if (directedReadOptionsBuilder_ == null) { + directedReadOptions_ = builderForValue.build(); + } else { + directedReadOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public Builder mergeDirectedReadOptions(com.google.spanner.v1.DirectedReadOptions value) { + if (directedReadOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0) + && directedReadOptions_ != null + && directedReadOptions_ + != com.google.spanner.v1.DirectedReadOptions.getDefaultInstance()) { + getDirectedReadOptionsBuilder().mergeFrom(value); + } else { + directedReadOptions_ = value; + } + } else { + directedReadOptionsBuilder_.mergeFrom(value); + } + if (directedReadOptions_ != null) { + bitField0_ |= 0x00000800; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public Builder clearDirectedReadOptions() { + bitField0_ = (bitField0_ & ~0x00000800); + directedReadOptions_ = null; + if (directedReadOptionsBuilder_ != null) { + directedReadOptionsBuilder_.dispose(); + directedReadOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public com.google.spanner.v1.DirectedReadOptions.Builder getDirectedReadOptionsBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return internalGetDirectedReadOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + public com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder() { + if (directedReadOptionsBuilder_ != null) { + return directedReadOptionsBuilder_.getMessageOrBuilder(); + } else { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder> + internalGetDirectedReadOptionsFieldBuilder() { + if (directedReadOptionsBuilder_ == null) { + directedReadOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder>( + getDirectedReadOptions(), getParentForChildren(), isClean()); + directedReadOptions_ = null; + } + return directedReadOptionsBuilder_; + } + + private boolean dataBoostEnabled_; + + /** + * + * + *
    +     * If this is for a partitioned query and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 16; + * + * @return The dataBoostEnabled. + */ + @java.lang.Override + public boolean getDataBoostEnabled() { + return dataBoostEnabled_; + } + + /** + * + * + *
    +     * If this is for a partitioned query and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 16; + * + * @param value The dataBoostEnabled to set. + * @return This builder for chaining. + */ + public Builder setDataBoostEnabled(boolean value) { + + dataBoostEnabled_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If this is for a partitioned query and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 16; + * + * @return This builder for chaining. + */ + public Builder clearDataBoostEnabled() { + bitField0_ = (bitField0_ & ~0x00001000); + dataBoostEnabled_ = false; + onChanged(); + return this; + } + + private boolean lastStatement_; + + /** + * + * + *
    +     * Optional. If set to `true`, this statement marks the end of the
    +     * transaction. After this statement executes, you must commit or abort the
    +     * transaction. Attempts to execute any other requests against this
    +     * transaction (including reads and queries) are rejected.
    +     *
    +     * For DML statements, setting this option might cause some error reporting to
    +     * be deferred until commit time (for example, validation of unique
    +     * constraints). Given this, successful execution of a DML statement shouldn't
    +     * be assumed until a subsequent `Commit` call completes successfully.
    +     * 
    + * + * bool last_statement = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatement. + */ + @java.lang.Override + public boolean getLastStatement() { + return lastStatement_; + } + + /** + * + * + *
    +     * Optional. If set to `true`, this statement marks the end of the
    +     * transaction. After this statement executes, you must commit or abort the
    +     * transaction. Attempts to execute any other requests against this
    +     * transaction (including reads and queries) are rejected.
    +     *
    +     * For DML statements, setting this option might cause some error reporting to
    +     * be deferred until commit time (for example, validation of unique
    +     * constraints). Given this, successful execution of a DML statement shouldn't
    +     * be assumed until a subsequent `Commit` call completes successfully.
    +     * 
    + * + * bool last_statement = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The lastStatement to set. + * @return This builder for chaining. + */ + public Builder setLastStatement(boolean value) { + + lastStatement_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If set to `true`, this statement marks the end of the
    +     * transaction. After this statement executes, you must commit or abort the
    +     * transaction. Attempts to execute any other requests against this
    +     * transaction (including reads and queries) are rejected.
    +     *
    +     * For DML statements, setting this option might cause some error reporting to
    +     * be deferred until commit time (for example, validation of unique
    +     * constraints). Given this, successful execution of a DML statement shouldn't
    +     * be assumed until a subsequent `Commit` call completes successfully.
    +     * 
    + * + * bool last_statement = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLastStatement() { + bitField0_ = (bitField0_ & ~0x00002000); + lastStatement_ = false; + onChanged(); + return this; + } + + private com.google.spanner.v1.RoutingHint routingHint_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + routingHintBuilder_; + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + public com.google.spanner.v1.RoutingHint getRoutingHint() { + if (routingHintBuilder_ == null) { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } else { + return routingHintBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + routingHint_ = value; + } else { + routingHintBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint.Builder builderForValue) { + if (routingHintBuilder_ == null) { + routingHint_ = builderForValue.build(); + } else { + routingHintBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && routingHint_ != null + && routingHint_ != com.google.spanner.v1.RoutingHint.getDefaultInstance()) { + getRoutingHintBuilder().mergeFrom(value); + } else { + routingHint_ = value; + } + } else { + routingHintBuilder_.mergeFrom(value); + } + if (routingHint_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRoutingHint() { + bitField0_ = (bitField0_ & ~0x00004000); + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHint.Builder getRoutingHintBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return internalGetRoutingHintFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + if (routingHintBuilder_ != null) { + return routingHintBuilder_.getMessageOrBuilder(); + } else { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + internalGetRoutingHintFieldBuilder() { + if (routingHintBuilder_ == null) { + routingHintBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder>( + getRoutingHint(), getParentForChildren(), isClean()); + routingHint_ = null; + } + return routingHintBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ExecuteSqlRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest) + private static final com.google.spanner.v1.ExecuteSqlRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ExecuteSqlRequest(); + } + + public static com.google.spanner.v1.ExecuteSqlRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ExecuteSqlRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ExecuteSqlRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java new file mode 100644 index 000000000000..c72df5d5568a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java @@ -0,0 +1,623 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ExecuteSqlRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ExecuteSqlRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the SQL query should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the SQL query should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.TransactionSelector getTransaction(); + + /** + * + * + *
    +   * The transaction to use.
    +   *
    +   * For queries, if none is provided, the default is a temporary read-only
    +   * transaction with strong concurrency.
    +   *
    +   * Standard DML statements require a read-write transaction. To protect
    +   * against replays, single-use transactions are not supported. The caller
    +   * must either supply an existing transaction ID or begin a new transaction.
    +   *
    +   * Partitioned DML requires an existing Partitioned DML transaction ID.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * Required. The SQL string.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + java.lang.String getSql(); + + /** + * + * + *
    +   * Required. The SQL string.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + com.google.protobuf.ByteString getSqlBytes(); + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return Whether the params field is set. + */ + boolean hasParams(); + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + * + * @return The params. + */ + com.google.protobuf.Struct getParams(); + + /** + * + * + *
    +   * Parameter names and values that bind to placeholders in the SQL string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names must conform
    +   * to the naming requirements of identifiers as specified at
    +   * https://cloud.google.com/spanner/docs/lexical#identifiers.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4; + */ + com.google.protobuf.StructOrBuilder getParamsOrBuilder(); + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + int getParamTypesCount(); + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + boolean containsParamTypes(java.lang.String key); + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParamTypes(); + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + java.util.Map getParamTypesMap(); + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + /* nullable */ + com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue); + + /** + * + * + *
    +   * It isn't always possible for Cloud Spanner to infer the right SQL type
    +   * from a JSON value. For example, values of type `BYTES` and values
    +   * of type `STRING` both appear in
    +   * [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
    +   *
    +   * In these cases, you can use `param_types` to specify the exact
    +   * SQL type for some or all of the SQL statement parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * map<string, .google.spanner.v1.Type> param_types = 5; + */ + com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key); + + /** + * + * + *
    +   * If this request is resuming a previously interrupted SQL statement
    +   * execution, `resume_token` should be copied from the last
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +   * interruption. Doing this enables the new SQL statement execution to resume
    +   * where the last one left off. The rest of the request parameters must
    +   * exactly match the request that yielded this token.
    +   * 
    + * + * bytes resume_token = 6; + * + * @return The resumeToken. + */ + com.google.protobuf.ByteString getResumeToken(); + + /** + * + * + *
    +   * Used to control the amount of debugging information returned in
    +   * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +   * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +   * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +   * be set to
    +   * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The enum numeric value on the wire for queryMode. + */ + int getQueryModeValue(); + + /** + * + * + *
    +   * Used to control the amount of debugging information returned in
    +   * [ResultSetStats][google.spanner.v1.ResultSetStats]. If
    +   * [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
    +   * set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
    +   * be set to
    +   * [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryMode query_mode = 7; + * + * @return The queryMode. + */ + com.google.spanner.v1.ExecuteSqlRequest.QueryMode getQueryMode(); + + /** + * + * + *
    +   * If present, results are restricted to the specified partition
    +   * previously created using `PartitionQuery`. There must be an exact
    +   * match for the values of fields common to this message and the
    +   * `PartitionQueryRequest` message used to create this `partition_token`.
    +   * 
    + * + * bytes partition_token = 8; + * + * @return The partitionToken. + */ + com.google.protobuf.ByteString getPartitionToken(); + + /** + * + * + *
    +   * A per-transaction sequence number used to identify this request. This field
    +   * makes each request idempotent such that if the request is received multiple
    +   * times, at most one succeeds.
    +   *
    +   * The sequence number must be monotonically increasing within the
    +   * transaction. If a request arrives for the first time with an out-of-order
    +   * sequence number, the transaction can be aborted. Replays of previously
    +   * handled requests yield the same response as the first execution.
    +   *
    +   * Required for DML statements. Ignored for queries.
    +   * 
    + * + * int64 seqno = 9; + * + * @return The seqno. + */ + long getSeqno(); + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return Whether the queryOptions field is set. + */ + boolean hasQueryOptions(); + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + * + * @return The queryOptions. + */ + com.google.spanner.v1.ExecuteSqlRequest.QueryOptions getQueryOptions(); + + /** + * + * + *
    +   * Query optimizer configuration to use for the given query.
    +   * 
    + * + * .google.spanner.v1.ExecuteSqlRequest.QueryOptions query_options = 10; + */ + com.google.spanner.v1.ExecuteSqlRequest.QueryOptionsOrBuilder getQueryOptionsOrBuilder(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return Whether the directedReadOptions field is set. + */ + boolean hasDirectedReadOptions(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + * + * @return The directedReadOptions. + */ + com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 15; + */ + com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder(); + + /** + * + * + *
    +   * If this is for a partitioned query and this field is set to `true`, the
    +   * request is executed with Spanner Data Boost independent compute resources.
    +   *
    +   * If the field is set to `true` but the request doesn't set
    +   * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool data_boost_enabled = 16; + * + * @return The dataBoostEnabled. + */ + boolean getDataBoostEnabled(); + + /** + * + * + *
    +   * Optional. If set to `true`, this statement marks the end of the
    +   * transaction. After this statement executes, you must commit or abort the
    +   * transaction. Attempts to execute any other requests against this
    +   * transaction (including reads and queries) are rejected.
    +   *
    +   * For DML statements, setting this option might cause some error reporting to
    +   * be deferred until commit time (for example, validation of unique
    +   * constraints). Given this, successful execution of a DML statement shouldn't
    +   * be assumed until a subsequent `Commit` call completes successfully.
    +   * 
    + * + * bool last_statement = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The lastStatement. + */ + boolean getLastStatement(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + boolean hasRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + com.google.spanner.v1.RoutingHint getRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java new file mode 100644 index 000000000000..f3c97672a68d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java @@ -0,0 +1,608 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [GetSession][google.spanner.v1.Spanner.GetSession].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.GetSessionRequest} + */ +@com.google.protobuf.Generated +public final class GetSessionRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.GetSessionRequest) + GetSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "GetSessionRequest"); + } + + // Use GetSessionRequest.newBuilder() to construct. + private GetSessionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private GetSessionRequest() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_GetSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_GetSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.GetSessionRequest.class, + com.google.spanner.v1.GetSessionRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Required. The name of the session to retrieve.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the session to retrieve.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.GetSessionRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.GetSessionRequest other = (com.google.spanner.v1.GetSessionRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.GetSessionRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.GetSessionRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.GetSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.GetSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [GetSession][google.spanner.v1.Spanner.GetSession].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.GetSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.GetSessionRequest) + com.google.spanner.v1.GetSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_GetSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_GetSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.GetSessionRequest.class, + com.google.spanner.v1.GetSessionRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.GetSessionRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_GetSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.GetSessionRequest getDefaultInstanceForType() { + return com.google.spanner.v1.GetSessionRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.GetSessionRequest build() { + com.google.spanner.v1.GetSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.GetSessionRequest buildPartial() { + com.google.spanner.v1.GetSessionRequest result = + new com.google.spanner.v1.GetSessionRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.GetSessionRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.GetSessionRequest) { + return mergeFrom((com.google.spanner.v1.GetSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.GetSessionRequest other) { + if (other == com.google.spanner.v1.GetSessionRequest.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Required. The name of the session to retrieve.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the session to retrieve.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the session to retrieve.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the session to retrieve.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the session to retrieve.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.GetSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.GetSessionRequest) + private static final com.google.spanner.v1.GetSessionRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.GetSessionRequest(); + } + + public static com.google.spanner.v1.GetSessionRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.GetSessionRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java new file mode 100644 index 000000000000..28aece5eded4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java @@ -0,0 +1,58 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface GetSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.GetSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the session to retrieve.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Required. The name of the session to retrieve.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Group.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Group.java new file mode 100644 index 000000000000..47cf2c5c2116 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Group.java @@ -0,0 +1,1321 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `Group` represents a paxos group in a database. A group is a set of
    + * tablets that are replicated across multiple servers. Groups may have a leader
    + * tablet. Groups store one (or sometimes more) ranges of keys.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Group} + */ +@com.google.protobuf.Generated +public final class Group extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Group) + GroupOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Group"); + } + + // Use Group.newBuilder() to construct. + private Group(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Group() { + tablets_ = java.util.Collections.emptyList(); + generation_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Group_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Group_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Group.class, com.google.spanner.v1.Group.Builder.class); + } + + public static final int GROUP_UID_FIELD_NUMBER = 1; + private long groupUid_ = 0L; + + /** + * + * + *
    +   * The UID of the paxos group, unique within the database. Matches the
    +   * `group_uid` field in `Range`.
    +   * 
    + * + * uint64 group_uid = 1; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + public static final int TABLETS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List tablets_; + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + @java.lang.Override + public java.util.List getTabletsList() { + return tablets_; + } + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + @java.lang.Override + public java.util.List getTabletsOrBuilderList() { + return tablets_; + } + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + @java.lang.Override + public int getTabletsCount() { + return tablets_.size(); + } + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Tablet getTablets(int index) { + return tablets_.get(index); + } + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TabletOrBuilder getTabletsOrBuilder(int index) { + return tablets_.get(index); + } + + public static final int LEADER_INDEX_FIELD_NUMBER = 3; + private int leaderIndex_ = 0; + + /** + * + * + *
    +   * The last known leader tablet of the group as an index into `tablets`. May
    +   * be negative if the group has no known leader.
    +   * 
    + * + * int32 leader_index = 3; + * + * @return The leaderIndex. + */ + @java.lang.Override + public int getLeaderIndex() { + return leaderIndex_; + } + + public static final int GENERATION_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString generation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * `generation` indicates the freshness of the group information (including
    +   * leader information) contained in this proto. Generations can be compared
    +   * lexicographically; if generation A is greater than generation B, then the
    +   * `Group` corresponding to A is newer than the `Group` corresponding to B,
    +   * and should be used preferentially.
    +   * 
    + * + * bytes generation = 4; + * + * @return The generation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getGeneration() { + return generation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (groupUid_ != 0L) { + output.writeUInt64(1, groupUid_); + } + for (int i = 0; i < tablets_.size(); i++) { + output.writeMessage(2, tablets_.get(i)); + } + if (leaderIndex_ != 0) { + output.writeInt32(3, leaderIndex_); + } + if (!generation_.isEmpty()) { + output.writeBytes(4, generation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (groupUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, groupUid_); + } + for (int i = 0; i < tablets_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, tablets_.get(i)); + } + if (leaderIndex_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, leaderIndex_); + } + if (!generation_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(4, generation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Group)) { + return super.equals(obj); + } + com.google.spanner.v1.Group other = (com.google.spanner.v1.Group) obj; + + if (getGroupUid() != other.getGroupUid()) return false; + if (!getTabletsList().equals(other.getTabletsList())) return false; + if (getLeaderIndex() != other.getLeaderIndex()) return false; + if (!getGeneration().equals(other.getGeneration())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + GROUP_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGroupUid()); + if (getTabletsCount() > 0) { + hash = (37 * hash) + TABLETS_FIELD_NUMBER; + hash = (53 * hash) + getTabletsList().hashCode(); + } + hash = (37 * hash) + LEADER_INDEX_FIELD_NUMBER; + hash = (53 * hash) + getLeaderIndex(); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + getGeneration().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Group parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Group parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Group parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Group parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Group parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Group parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Group parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Group parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Group parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Group parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Group parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Group parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Group prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `Group` represents a paxos group in a database. A group is a set of
    +   * tablets that are replicated across multiple servers. Groups may have a leader
    +   * tablet. Groups store one (or sometimes more) ranges of keys.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Group} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Group) + com.google.spanner.v1.GroupOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Group_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Group_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Group.class, com.google.spanner.v1.Group.Builder.class); + } + + // Construct using com.google.spanner.v1.Group.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + groupUid_ = 0L; + if (tabletsBuilder_ == null) { + tablets_ = java.util.Collections.emptyList(); + } else { + tablets_ = null; + tabletsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + leaderIndex_ = 0; + generation_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Group_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Group getDefaultInstanceForType() { + return com.google.spanner.v1.Group.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Group build() { + com.google.spanner.v1.Group result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Group buildPartial() { + com.google.spanner.v1.Group result = new com.google.spanner.v1.Group(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.Group result) { + if (tabletsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + tablets_ = java.util.Collections.unmodifiableList(tablets_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tablets_ = tablets_; + } else { + result.tablets_ = tabletsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.Group result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.groupUid_ = groupUid_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.leaderIndex_ = leaderIndex_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.generation_ = generation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Group) { + return mergeFrom((com.google.spanner.v1.Group) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Group other) { + if (other == com.google.spanner.v1.Group.getDefaultInstance()) return this; + if (other.getGroupUid() != 0L) { + setGroupUid(other.getGroupUid()); + } + if (tabletsBuilder_ == null) { + if (!other.tablets_.isEmpty()) { + if (tablets_.isEmpty()) { + tablets_ = other.tablets_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTabletsIsMutable(); + tablets_.addAll(other.tablets_); + } + onChanged(); + } + } else { + if (!other.tablets_.isEmpty()) { + if (tabletsBuilder_.isEmpty()) { + tabletsBuilder_.dispose(); + tabletsBuilder_ = null; + tablets_ = other.tablets_; + bitField0_ = (bitField0_ & ~0x00000002); + tabletsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetTabletsFieldBuilder() + : null; + } else { + tabletsBuilder_.addAllMessages(other.tablets_); + } + } + } + if (other.getLeaderIndex() != 0) { + setLeaderIndex(other.getLeaderIndex()); + } + if (!other.getGeneration().isEmpty()) { + setGeneration(other.getGeneration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + groupUid_ = input.readUInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + com.google.spanner.v1.Tablet m = + input.readMessage(com.google.spanner.v1.Tablet.parser(), extensionRegistry); + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + tablets_.add(m); + } else { + tabletsBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + leaderIndex_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + generation_ = input.readBytes(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long groupUid_; + + /** + * + * + *
    +     * The UID of the paxos group, unique within the database. Matches the
    +     * `group_uid` field in `Range`.
    +     * 
    + * + * uint64 group_uid = 1; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + /** + * + * + *
    +     * The UID of the paxos group, unique within the database. Matches the
    +     * `group_uid` field in `Range`.
    +     * 
    + * + * uint64 group_uid = 1; + * + * @param value The groupUid to set. + * @return This builder for chaining. + */ + public Builder setGroupUid(long value) { + + groupUid_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The UID of the paxos group, unique within the database. Matches the
    +     * `group_uid` field in `Range`.
    +     * 
    + * + * uint64 group_uid = 1; + * + * @return This builder for chaining. + */ + public Builder clearGroupUid() { + bitField0_ = (bitField0_ & ~0x00000001); + groupUid_ = 0L; + onChanged(); + return this; + } + + private java.util.List tablets_ = + java.util.Collections.emptyList(); + + private void ensureTabletsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + tablets_ = new java.util.ArrayList(tablets_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Tablet, + com.google.spanner.v1.Tablet.Builder, + com.google.spanner.v1.TabletOrBuilder> + tabletsBuilder_; + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public java.util.List getTabletsList() { + if (tabletsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tablets_); + } else { + return tabletsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public int getTabletsCount() { + if (tabletsBuilder_ == null) { + return tablets_.size(); + } else { + return tabletsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public com.google.spanner.v1.Tablet getTablets(int index) { + if (tabletsBuilder_ == null) { + return tablets_.get(index); + } else { + return tabletsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder setTablets(int index, com.google.spanner.v1.Tablet value) { + if (tabletsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTabletsIsMutable(); + tablets_.set(index, value); + onChanged(); + } else { + tabletsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder setTablets(int index, com.google.spanner.v1.Tablet.Builder builderForValue) { + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + tablets_.set(index, builderForValue.build()); + onChanged(); + } else { + tabletsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder addTablets(com.google.spanner.v1.Tablet value) { + if (tabletsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTabletsIsMutable(); + tablets_.add(value); + onChanged(); + } else { + tabletsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder addTablets(int index, com.google.spanner.v1.Tablet value) { + if (tabletsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTabletsIsMutable(); + tablets_.add(index, value); + onChanged(); + } else { + tabletsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder addTablets(com.google.spanner.v1.Tablet.Builder builderForValue) { + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + tablets_.add(builderForValue.build()); + onChanged(); + } else { + tabletsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder addTablets(int index, com.google.spanner.v1.Tablet.Builder builderForValue) { + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + tablets_.add(index, builderForValue.build()); + onChanged(); + } else { + tabletsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder addAllTablets( + java.lang.Iterable values) { + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, tablets_); + onChanged(); + } else { + tabletsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder clearTablets() { + if (tabletsBuilder_ == null) { + tablets_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tabletsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public Builder removeTablets(int index) { + if (tabletsBuilder_ == null) { + ensureTabletsIsMutable(); + tablets_.remove(index); + onChanged(); + } else { + tabletsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public com.google.spanner.v1.Tablet.Builder getTabletsBuilder(int index) { + return internalGetTabletsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public com.google.spanner.v1.TabletOrBuilder getTabletsOrBuilder(int index) { + if (tabletsBuilder_ == null) { + return tablets_.get(index); + } else { + return tabletsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public java.util.List + getTabletsOrBuilderList() { + if (tabletsBuilder_ != null) { + return tabletsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tablets_); + } + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public com.google.spanner.v1.Tablet.Builder addTabletsBuilder() { + return internalGetTabletsFieldBuilder() + .addBuilder(com.google.spanner.v1.Tablet.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public com.google.spanner.v1.Tablet.Builder addTabletsBuilder(int index) { + return internalGetTabletsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Tablet.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of tablets that are part of the group. Note that this list may not
    +     * be exhaustive; it will only include tablets the server considers useful
    +     * to the client. The returned list is ordered ascending by distance.
    +     *
    +     * Tablet UIDs reference `Tablet.tablet_uid`.
    +     * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + public java.util.List getTabletsBuilderList() { + return internalGetTabletsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Tablet, + com.google.spanner.v1.Tablet.Builder, + com.google.spanner.v1.TabletOrBuilder> + internalGetTabletsFieldBuilder() { + if (tabletsBuilder_ == null) { + tabletsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Tablet, + com.google.spanner.v1.Tablet.Builder, + com.google.spanner.v1.TabletOrBuilder>( + tablets_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + tablets_ = null; + } + return tabletsBuilder_; + } + + private int leaderIndex_; + + /** + * + * + *
    +     * The last known leader tablet of the group as an index into `tablets`. May
    +     * be negative if the group has no known leader.
    +     * 
    + * + * int32 leader_index = 3; + * + * @return The leaderIndex. + */ + @java.lang.Override + public int getLeaderIndex() { + return leaderIndex_; + } + + /** + * + * + *
    +     * The last known leader tablet of the group as an index into `tablets`. May
    +     * be negative if the group has no known leader.
    +     * 
    + * + * int32 leader_index = 3; + * + * @param value The leaderIndex to set. + * @return This builder for chaining. + */ + public Builder setLeaderIndex(int value) { + + leaderIndex_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The last known leader tablet of the group as an index into `tablets`. May
    +     * be negative if the group has no known leader.
    +     * 
    + * + * int32 leader_index = 3; + * + * @return This builder for chaining. + */ + public Builder clearLeaderIndex() { + bitField0_ = (bitField0_ & ~0x00000004); + leaderIndex_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString generation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * `generation` indicates the freshness of the group information (including
    +     * leader information) contained in this proto. Generations can be compared
    +     * lexicographically; if generation A is greater than generation B, then the
    +     * `Group` corresponding to A is newer than the `Group` corresponding to B,
    +     * and should be used preferentially.
    +     * 
    + * + * bytes generation = 4; + * + * @return The generation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getGeneration() { + return generation_; + } + + /** + * + * + *
    +     * `generation` indicates the freshness of the group information (including
    +     * leader information) contained in this proto. Generations can be compared
    +     * lexicographically; if generation A is greater than generation B, then the
    +     * `Group` corresponding to A is newer than the `Group` corresponding to B,
    +     * and should be used preferentially.
    +     * 
    + * + * bytes generation = 4; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + generation_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `generation` indicates the freshness of the group information (including
    +     * leader information) contained in this proto. Generations can be compared
    +     * lexicographically; if generation A is greater than generation B, then the
    +     * `Group` corresponding to A is newer than the `Group` corresponding to B,
    +     * and should be used preferentially.
    +     * 
    + * + * bytes generation = 4; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000008); + generation_ = getDefaultInstance().getGeneration(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Group) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Group) + private static final com.google.spanner.v1.Group DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Group(); + } + + public static com.google.spanner.v1.Group getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Group parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Group getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GroupOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GroupOrBuilder.java new file mode 100644 index 000000000000..75312cd6a773 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GroupOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface GroupOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Group) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The UID of the paxos group, unique within the database. Matches the
    +   * `group_uid` field in `Range`.
    +   * 
    + * + * uint64 group_uid = 1; + * + * @return The groupUid. + */ + long getGroupUid(); + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + java.util.List getTabletsList(); + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + com.google.spanner.v1.Tablet getTablets(int index); + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + int getTabletsCount(); + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + java.util.List getTabletsOrBuilderList(); + + /** + * + * + *
    +   * A list of tablets that are part of the group. Note that this list may not
    +   * be exhaustive; it will only include tablets the server considers useful
    +   * to the client. The returned list is ordered ascending by distance.
    +   *
    +   * Tablet UIDs reference `Tablet.tablet_uid`.
    +   * 
    + * + * repeated .google.spanner.v1.Tablet tablets = 2; + */ + com.google.spanner.v1.TabletOrBuilder getTabletsOrBuilder(int index); + + /** + * + * + *
    +   * The last known leader tablet of the group as an index into `tablets`. May
    +   * be negative if the group has no known leader.
    +   * 
    + * + * int32 leader_index = 3; + * + * @return The leaderIndex. + */ + int getLeaderIndex(); + + /** + * + * + *
    +   * `generation` indicates the freshness of the group information (including
    +   * leader information) contained in this proto. Generations can be compared
    +   * lexicographically; if generation A is greater than generation B, then the
    +   * `Group` corresponding to A is newer than the `Group` corresponding to B,
    +   * and should be used preferentially.
    +   * 
    + * + * bytes generation = 4; + * + * @return The generation. + */ + com.google.protobuf.ByteString getGeneration(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java new file mode 100644 index 000000000000..992a09ec1346 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java @@ -0,0 +1,1994 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/keys.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * KeyRange represents a range of rows in a table or index.
    + *
    + * A range has a start key and an end key. These keys can be open or
    + * closed, indicating if the range includes rows with that key.
    + *
    + * Keys are represented by lists, where the ith value in the list
    + * corresponds to the ith component of the table or index primary key.
    + * Individual values are encoded as described
    + * [here][google.spanner.v1.TypeCode].
    + *
    + * For example, consider the following table definition:
    + *
    + * CREATE TABLE UserEvents (
    + * UserName STRING(MAX),
    + * EventDate STRING(10)
    + * ) PRIMARY KEY(UserName, EventDate);
    + *
    + * The following keys name rows in this table:
    + *
    + * ["Bob", "2014-09-23"]
    + * ["Alfred", "2015-06-12"]
    + *
    + * Since the `UserEvents` table's `PRIMARY KEY` clause names two
    + * columns, each `UserEvents` key has two elements; the first is the
    + * `UserName`, and the second is the `EventDate`.
    + *
    + * Key ranges with multiple components are interpreted
    + * lexicographically by component using the table or index key's declared
    + * sort order. For example, the following range returns all events for
    + * user `"Bob"` that occurred in the year 2015:
    + *
    + * "start_closed": ["Bob", "2015-01-01"]
    + * "end_closed": ["Bob", "2015-12-31"]
    + *
    + * Start and end keys can omit trailing key components. This affects the
    + * inclusion and exclusion of rows that exactly match the provided key
    + * components: if the key is closed, then rows that exactly match the
    + * provided components are included; if the key is open, then rows
    + * that exactly match are not included.
    + *
    + * For example, the following range includes all events for `"Bob"` that
    + * occurred during and after the year 2000:
    + *
    + * "start_closed": ["Bob", "2000-01-01"]
    + * "end_closed": ["Bob"]
    + *
    + * The next example retrieves all events for `"Bob"`:
    + *
    + * "start_closed": ["Bob"]
    + * "end_closed": ["Bob"]
    + *
    + * To retrieve events before the year 2000:
    + *
    + * "start_closed": ["Bob"]
    + * "end_open": ["Bob", "2000-01-01"]
    + *
    + * The following range includes all rows in the table:
    + *
    + * "start_closed": []
    + * "end_closed": []
    + *
    + * This range returns all users whose `UserName` begins with any
    + * character from A to C:
    + *
    + * "start_closed": ["A"]
    + * "end_open": ["D"]
    + *
    + * This range returns all users whose `UserName` begins with B:
    + *
    + * "start_closed": ["B"]
    + * "end_open": ["C"]
    + *
    + * Key ranges honor column sort order. For example, suppose a table is
    + * defined as follows:
    + *
    + * CREATE TABLE DescendingSortedTable {
    + * Key INT64,
    + * ...
    + * ) PRIMARY KEY(Key DESC);
    + *
    + * The following range retrieves all rows with key values between 1
    + * and 100 inclusive:
    + *
    + * "start_closed": ["100"]
    + * "end_closed": ["1"]
    + *
    + * Note that 100 is passed as the start, and 1 is passed as the end,
    + * because `Key` is a descending column in the schema.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRange} + */ +@com.google.protobuf.Generated +public final class KeyRange extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.KeyRange) + KeyRangeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeyRange"); + } + + // Use KeyRange.newBuilder() to construct. + private KeyRange(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private KeyRange() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeyRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.KeysProto + .internal_static_google_spanner_v1_KeyRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRange.class, com.google.spanner.v1.KeyRange.Builder.class); + } + + private int startKeyTypeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object startKeyType_; + + public enum StartKeyTypeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + START_CLOSED(1), + START_OPEN(2), + STARTKEYTYPE_NOT_SET(0); + private final int value; + + private StartKeyTypeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static StartKeyTypeCase valueOf(int value) { + return forNumber(value); + } + + public static StartKeyTypeCase forNumber(int value) { + switch (value) { + case 1: + return START_CLOSED; + case 2: + return START_OPEN; + case 0: + return STARTKEYTYPE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public StartKeyTypeCase getStartKeyTypeCase() { + return StartKeyTypeCase.forNumber(startKeyTypeCase_); + } + + private int endKeyTypeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object endKeyType_; + + public enum EndKeyTypeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + END_CLOSED(3), + END_OPEN(4), + ENDKEYTYPE_NOT_SET(0); + private final int value; + + private EndKeyTypeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static EndKeyTypeCase valueOf(int value) { + return forNumber(value); + } + + public static EndKeyTypeCase forNumber(int value) { + switch (value) { + case 3: + return END_CLOSED; + case 4: + return END_OPEN; + case 0: + return ENDKEYTYPE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public EndKeyTypeCase getEndKeyTypeCase() { + return EndKeyTypeCase.forNumber(endKeyTypeCase_); + } + + public static final int START_CLOSED_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return Whether the startClosed field is set. + */ + @java.lang.Override + public boolean hasStartClosed() { + return startKeyTypeCase_ == 1; + } + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return The startClosed. + */ + @java.lang.Override + public com.google.protobuf.ListValue getStartClosed() { + if (startKeyTypeCase_ == 1) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getStartClosedOrBuilder() { + if (startKeyTypeCase_ == 1) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + public static final int START_OPEN_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return Whether the startOpen field is set. + */ + @java.lang.Override + public boolean hasStartOpen() { + return startKeyTypeCase_ == 2; + } + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return The startOpen. + */ + @java.lang.Override + public com.google.protobuf.ListValue getStartOpen() { + if (startKeyTypeCase_ == 2) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getStartOpenOrBuilder() { + if (startKeyTypeCase_ == 2) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + public static final int END_CLOSED_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return Whether the endClosed field is set. + */ + @java.lang.Override + public boolean hasEndClosed() { + return endKeyTypeCase_ == 3; + } + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return The endClosed. + */ + @java.lang.Override + public com.google.protobuf.ListValue getEndClosed() { + if (endKeyTypeCase_ == 3) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getEndClosedOrBuilder() { + if (endKeyTypeCase_ == 3) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + public static final int END_OPEN_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return Whether the endOpen field is set. + */ + @java.lang.Override + public boolean hasEndOpen() { + return endKeyTypeCase_ == 4; + } + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return The endOpen. + */ + @java.lang.Override + public com.google.protobuf.ListValue getEndOpen() { + if (endKeyTypeCase_ == 4) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getEndOpenOrBuilder() { + if (endKeyTypeCase_ == 4) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (startKeyTypeCase_ == 1) { + output.writeMessage(1, (com.google.protobuf.ListValue) startKeyType_); + } + if (startKeyTypeCase_ == 2) { + output.writeMessage(2, (com.google.protobuf.ListValue) startKeyType_); + } + if (endKeyTypeCase_ == 3) { + output.writeMessage(3, (com.google.protobuf.ListValue) endKeyType_); + } + if (endKeyTypeCase_ == 4) { + output.writeMessage(4, (com.google.protobuf.ListValue) endKeyType_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (startKeyTypeCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.protobuf.ListValue) startKeyType_); + } + if (startKeyTypeCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.protobuf.ListValue) startKeyType_); + } + if (endKeyTypeCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.protobuf.ListValue) endKeyType_); + } + if (endKeyTypeCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.protobuf.ListValue) endKeyType_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.KeyRange)) { + return super.equals(obj); + } + com.google.spanner.v1.KeyRange other = (com.google.spanner.v1.KeyRange) obj; + + if (!getStartKeyTypeCase().equals(other.getStartKeyTypeCase())) return false; + switch (startKeyTypeCase_) { + case 1: + if (!getStartClosed().equals(other.getStartClosed())) return false; + break; + case 2: + if (!getStartOpen().equals(other.getStartOpen())) return false; + break; + case 0: + default: + } + if (!getEndKeyTypeCase().equals(other.getEndKeyTypeCase())) return false; + switch (endKeyTypeCase_) { + case 3: + if (!getEndClosed().equals(other.getEndClosed())) return false; + break; + case 4: + if (!getEndOpen().equals(other.getEndOpen())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (startKeyTypeCase_) { + case 1: + hash = (37 * hash) + START_CLOSED_FIELD_NUMBER; + hash = (53 * hash) + getStartClosed().hashCode(); + break; + case 2: + hash = (37 * hash) + START_OPEN_FIELD_NUMBER; + hash = (53 * hash) + getStartOpen().hashCode(); + break; + case 0: + default: + } + switch (endKeyTypeCase_) { + case 3: + hash = (37 * hash) + END_CLOSED_FIELD_NUMBER; + hash = (53 * hash) + getEndClosed().hashCode(); + break; + case 4: + hash = (37 * hash) + END_OPEN_FIELD_NUMBER; + hash = (53 * hash) + getEndOpen().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.KeyRange parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRange parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRange parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRange parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRange parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRange parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRange parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRange parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRange parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRange parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRange parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRange parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.KeyRange prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * KeyRange represents a range of rows in a table or index.
    +   *
    +   * A range has a start key and an end key. These keys can be open or
    +   * closed, indicating if the range includes rows with that key.
    +   *
    +   * Keys are represented by lists, where the ith value in the list
    +   * corresponds to the ith component of the table or index primary key.
    +   * Individual values are encoded as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * For example, consider the following table definition:
    +   *
    +   * CREATE TABLE UserEvents (
    +   * UserName STRING(MAX),
    +   * EventDate STRING(10)
    +   * ) PRIMARY KEY(UserName, EventDate);
    +   *
    +   * The following keys name rows in this table:
    +   *
    +   * ["Bob", "2014-09-23"]
    +   * ["Alfred", "2015-06-12"]
    +   *
    +   * Since the `UserEvents` table's `PRIMARY KEY` clause names two
    +   * columns, each `UserEvents` key has two elements; the first is the
    +   * `UserName`, and the second is the `EventDate`.
    +   *
    +   * Key ranges with multiple components are interpreted
    +   * lexicographically by component using the table or index key's declared
    +   * sort order. For example, the following range returns all events for
    +   * user `"Bob"` that occurred in the year 2015:
    +   *
    +   * "start_closed": ["Bob", "2015-01-01"]
    +   * "end_closed": ["Bob", "2015-12-31"]
    +   *
    +   * Start and end keys can omit trailing key components. This affects the
    +   * inclusion and exclusion of rows that exactly match the provided key
    +   * components: if the key is closed, then rows that exactly match the
    +   * provided components are included; if the key is open, then rows
    +   * that exactly match are not included.
    +   *
    +   * For example, the following range includes all events for `"Bob"` that
    +   * occurred during and after the year 2000:
    +   *
    +   * "start_closed": ["Bob", "2000-01-01"]
    +   * "end_closed": ["Bob"]
    +   *
    +   * The next example retrieves all events for `"Bob"`:
    +   *
    +   * "start_closed": ["Bob"]
    +   * "end_closed": ["Bob"]
    +   *
    +   * To retrieve events before the year 2000:
    +   *
    +   * "start_closed": ["Bob"]
    +   * "end_open": ["Bob", "2000-01-01"]
    +   *
    +   * The following range includes all rows in the table:
    +   *
    +   * "start_closed": []
    +   * "end_closed": []
    +   *
    +   * This range returns all users whose `UserName` begins with any
    +   * character from A to C:
    +   *
    +   * "start_closed": ["A"]
    +   * "end_open": ["D"]
    +   *
    +   * This range returns all users whose `UserName` begins with B:
    +   *
    +   * "start_closed": ["B"]
    +   * "end_open": ["C"]
    +   *
    +   * Key ranges honor column sort order. For example, suppose a table is
    +   * defined as follows:
    +   *
    +   * CREATE TABLE DescendingSortedTable {
    +   * Key INT64,
    +   * ...
    +   * ) PRIMARY KEY(Key DESC);
    +   *
    +   * The following range retrieves all rows with key values between 1
    +   * and 100 inclusive:
    +   *
    +   * "start_closed": ["100"]
    +   * "end_closed": ["1"]
    +   *
    +   * Note that 100 is passed as the start, and 1 is passed as the end,
    +   * because `Key` is a descending column in the schema.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRange} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.KeyRange) + com.google.spanner.v1.KeyRangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeyRange_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.KeysProto + .internal_static_google_spanner_v1_KeyRange_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRange.class, com.google.spanner.v1.KeyRange.Builder.class); + } + + // Construct using com.google.spanner.v1.KeyRange.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (startClosedBuilder_ != null) { + startClosedBuilder_.clear(); + } + if (startOpenBuilder_ != null) { + startOpenBuilder_.clear(); + } + if (endClosedBuilder_ != null) { + endClosedBuilder_.clear(); + } + if (endOpenBuilder_ != null) { + endOpenBuilder_.clear(); + } + startKeyTypeCase_ = 0; + startKeyType_ = null; + endKeyTypeCase_ = 0; + endKeyType_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeyRange_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRange getDefaultInstanceForType() { + return com.google.spanner.v1.KeyRange.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.KeyRange build() { + com.google.spanner.v1.KeyRange result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRange buildPartial() { + com.google.spanner.v1.KeyRange result = new com.google.spanner.v1.KeyRange(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.KeyRange result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.KeyRange result) { + result.startKeyTypeCase_ = startKeyTypeCase_; + result.startKeyType_ = this.startKeyType_; + if (startKeyTypeCase_ == 1 && startClosedBuilder_ != null) { + result.startKeyType_ = startClosedBuilder_.build(); + } + if (startKeyTypeCase_ == 2 && startOpenBuilder_ != null) { + result.startKeyType_ = startOpenBuilder_.build(); + } + result.endKeyTypeCase_ = endKeyTypeCase_; + result.endKeyType_ = this.endKeyType_; + if (endKeyTypeCase_ == 3 && endClosedBuilder_ != null) { + result.endKeyType_ = endClosedBuilder_.build(); + } + if (endKeyTypeCase_ == 4 && endOpenBuilder_ != null) { + result.endKeyType_ = endOpenBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.KeyRange) { + return mergeFrom((com.google.spanner.v1.KeyRange) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.KeyRange other) { + if (other == com.google.spanner.v1.KeyRange.getDefaultInstance()) return this; + switch (other.getStartKeyTypeCase()) { + case START_CLOSED: + { + mergeStartClosed(other.getStartClosed()); + break; + } + case START_OPEN: + { + mergeStartOpen(other.getStartOpen()); + break; + } + case STARTKEYTYPE_NOT_SET: + { + break; + } + } + switch (other.getEndKeyTypeCase()) { + case END_CLOSED: + { + mergeEndClosed(other.getEndClosed()); + break; + } + case END_OPEN: + { + mergeEndOpen(other.getEndOpen()); + break; + } + case ENDKEYTYPE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetStartClosedFieldBuilder().getBuilder(), extensionRegistry); + startKeyTypeCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetStartOpenFieldBuilder().getBuilder(), extensionRegistry); + startKeyTypeCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetEndClosedFieldBuilder().getBuilder(), extensionRegistry); + endKeyTypeCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetEndOpenFieldBuilder().getBuilder(), extensionRegistry); + endKeyTypeCase_ = 4; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int startKeyTypeCase_ = 0; + private java.lang.Object startKeyType_; + + public StartKeyTypeCase getStartKeyTypeCase() { + return StartKeyTypeCase.forNumber(startKeyTypeCase_); + } + + public Builder clearStartKeyType() { + startKeyTypeCase_ = 0; + startKeyType_ = null; + onChanged(); + return this; + } + + private int endKeyTypeCase_ = 0; + private java.lang.Object endKeyType_; + + public EndKeyTypeCase getEndKeyTypeCase() { + return EndKeyTypeCase.forNumber(endKeyTypeCase_); + } + + public Builder clearEndKeyType() { + endKeyTypeCase_ = 0; + endKeyType_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + startClosedBuilder_; + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return Whether the startClosed field is set. + */ + @java.lang.Override + public boolean hasStartClosed() { + return startKeyTypeCase_ == 1; + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return The startClosed. + */ + @java.lang.Override + public com.google.protobuf.ListValue getStartClosed() { + if (startClosedBuilder_ == null) { + if (startKeyTypeCase_ == 1) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } else { + if (startKeyTypeCase_ == 1) { + return startClosedBuilder_.getMessage(); + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + public Builder setStartClosed(com.google.protobuf.ListValue value) { + if (startClosedBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startKeyType_ = value; + onChanged(); + } else { + startClosedBuilder_.setMessage(value); + } + startKeyTypeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + public Builder setStartClosed(com.google.protobuf.ListValue.Builder builderForValue) { + if (startClosedBuilder_ == null) { + startKeyType_ = builderForValue.build(); + onChanged(); + } else { + startClosedBuilder_.setMessage(builderForValue.build()); + } + startKeyTypeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + public Builder mergeStartClosed(com.google.protobuf.ListValue value) { + if (startClosedBuilder_ == null) { + if (startKeyTypeCase_ == 1 + && startKeyType_ != com.google.protobuf.ListValue.getDefaultInstance()) { + startKeyType_ = + com.google.protobuf.ListValue.newBuilder( + (com.google.protobuf.ListValue) startKeyType_) + .mergeFrom(value) + .buildPartial(); + } else { + startKeyType_ = value; + } + onChanged(); + } else { + if (startKeyTypeCase_ == 1) { + startClosedBuilder_.mergeFrom(value); + } else { + startClosedBuilder_.setMessage(value); + } + } + startKeyTypeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + public Builder clearStartClosed() { + if (startClosedBuilder_ == null) { + if (startKeyTypeCase_ == 1) { + startKeyTypeCase_ = 0; + startKeyType_ = null; + onChanged(); + } + } else { + if (startKeyTypeCase_ == 1) { + startKeyTypeCase_ = 0; + startKeyType_ = null; + } + startClosedBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + public com.google.protobuf.ListValue.Builder getStartClosedBuilder() { + return internalGetStartClosedFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getStartClosedOrBuilder() { + if ((startKeyTypeCase_ == 1) && (startClosedBuilder_ != null)) { + return startClosedBuilder_.getMessageOrBuilder(); + } else { + if (startKeyTypeCase_ == 1) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the start is closed, then the range includes all rows whose
    +     * first `len(start_closed)` key columns exactly match `start_closed`.
    +     * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetStartClosedFieldBuilder() { + if (startClosedBuilder_ == null) { + if (!(startKeyTypeCase_ == 1)) { + startKeyType_ = com.google.protobuf.ListValue.getDefaultInstance(); + } + startClosedBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + (com.google.protobuf.ListValue) startKeyType_, getParentForChildren(), isClean()); + startKeyType_ = null; + } + startKeyTypeCase_ = 1; + onChanged(); + return startClosedBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + startOpenBuilder_; + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return Whether the startOpen field is set. + */ + @java.lang.Override + public boolean hasStartOpen() { + return startKeyTypeCase_ == 2; + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return The startOpen. + */ + @java.lang.Override + public com.google.protobuf.ListValue getStartOpen() { + if (startOpenBuilder_ == null) { + if (startKeyTypeCase_ == 2) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } else { + if (startKeyTypeCase_ == 2) { + return startOpenBuilder_.getMessage(); + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + public Builder setStartOpen(com.google.protobuf.ListValue value) { + if (startOpenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startKeyType_ = value; + onChanged(); + } else { + startOpenBuilder_.setMessage(value); + } + startKeyTypeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + public Builder setStartOpen(com.google.protobuf.ListValue.Builder builderForValue) { + if (startOpenBuilder_ == null) { + startKeyType_ = builderForValue.build(); + onChanged(); + } else { + startOpenBuilder_.setMessage(builderForValue.build()); + } + startKeyTypeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + public Builder mergeStartOpen(com.google.protobuf.ListValue value) { + if (startOpenBuilder_ == null) { + if (startKeyTypeCase_ == 2 + && startKeyType_ != com.google.protobuf.ListValue.getDefaultInstance()) { + startKeyType_ = + com.google.protobuf.ListValue.newBuilder( + (com.google.protobuf.ListValue) startKeyType_) + .mergeFrom(value) + .buildPartial(); + } else { + startKeyType_ = value; + } + onChanged(); + } else { + if (startKeyTypeCase_ == 2) { + startOpenBuilder_.mergeFrom(value); + } else { + startOpenBuilder_.setMessage(value); + } + } + startKeyTypeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + public Builder clearStartOpen() { + if (startOpenBuilder_ == null) { + if (startKeyTypeCase_ == 2) { + startKeyTypeCase_ = 0; + startKeyType_ = null; + onChanged(); + } + } else { + if (startKeyTypeCase_ == 2) { + startKeyTypeCase_ = 0; + startKeyType_ = null; + } + startOpenBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + public com.google.protobuf.ListValue.Builder getStartOpenBuilder() { + return internalGetStartOpenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getStartOpenOrBuilder() { + if ((startKeyTypeCase_ == 2) && (startOpenBuilder_ != null)) { + return startOpenBuilder_.getMessageOrBuilder(); + } else { + if (startKeyTypeCase_ == 2) { + return (com.google.protobuf.ListValue) startKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the start is open, then the range excludes rows whose first
    +     * `len(start_open)` key columns exactly match `start_open`.
    +     * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetStartOpenFieldBuilder() { + if (startOpenBuilder_ == null) { + if (!(startKeyTypeCase_ == 2)) { + startKeyType_ = com.google.protobuf.ListValue.getDefaultInstance(); + } + startOpenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + (com.google.protobuf.ListValue) startKeyType_, getParentForChildren(), isClean()); + startKeyType_ = null; + } + startKeyTypeCase_ = 2; + onChanged(); + return startOpenBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + endClosedBuilder_; + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return Whether the endClosed field is set. + */ + @java.lang.Override + public boolean hasEndClosed() { + return endKeyTypeCase_ == 3; + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return The endClosed. + */ + @java.lang.Override + public com.google.protobuf.ListValue getEndClosed() { + if (endClosedBuilder_ == null) { + if (endKeyTypeCase_ == 3) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } else { + if (endKeyTypeCase_ == 3) { + return endClosedBuilder_.getMessage(); + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + public Builder setEndClosed(com.google.protobuf.ListValue value) { + if (endClosedBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endKeyType_ = value; + onChanged(); + } else { + endClosedBuilder_.setMessage(value); + } + endKeyTypeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + public Builder setEndClosed(com.google.protobuf.ListValue.Builder builderForValue) { + if (endClosedBuilder_ == null) { + endKeyType_ = builderForValue.build(); + onChanged(); + } else { + endClosedBuilder_.setMessage(builderForValue.build()); + } + endKeyTypeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + public Builder mergeEndClosed(com.google.protobuf.ListValue value) { + if (endClosedBuilder_ == null) { + if (endKeyTypeCase_ == 3 + && endKeyType_ != com.google.protobuf.ListValue.getDefaultInstance()) { + endKeyType_ = + com.google.protobuf.ListValue.newBuilder((com.google.protobuf.ListValue) endKeyType_) + .mergeFrom(value) + .buildPartial(); + } else { + endKeyType_ = value; + } + onChanged(); + } else { + if (endKeyTypeCase_ == 3) { + endClosedBuilder_.mergeFrom(value); + } else { + endClosedBuilder_.setMessage(value); + } + } + endKeyTypeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + public Builder clearEndClosed() { + if (endClosedBuilder_ == null) { + if (endKeyTypeCase_ == 3) { + endKeyTypeCase_ = 0; + endKeyType_ = null; + onChanged(); + } + } else { + if (endKeyTypeCase_ == 3) { + endKeyTypeCase_ = 0; + endKeyType_ = null; + } + endClosedBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + public com.google.protobuf.ListValue.Builder getEndClosedBuilder() { + return internalGetEndClosedFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getEndClosedOrBuilder() { + if ((endKeyTypeCase_ == 3) && (endClosedBuilder_ != null)) { + return endClosedBuilder_.getMessageOrBuilder(); + } else { + if (endKeyTypeCase_ == 3) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the end is closed, then the range includes all rows whose
    +     * first `len(end_closed)` key columns exactly match `end_closed`.
    +     * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetEndClosedFieldBuilder() { + if (endClosedBuilder_ == null) { + if (!(endKeyTypeCase_ == 3)) { + endKeyType_ = com.google.protobuf.ListValue.getDefaultInstance(); + } + endClosedBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + (com.google.protobuf.ListValue) endKeyType_, getParentForChildren(), isClean()); + endKeyType_ = null; + } + endKeyTypeCase_ = 3; + onChanged(); + return endClosedBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + endOpenBuilder_; + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return Whether the endOpen field is set. + */ + @java.lang.Override + public boolean hasEndOpen() { + return endKeyTypeCase_ == 4; + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return The endOpen. + */ + @java.lang.Override + public com.google.protobuf.ListValue getEndOpen() { + if (endOpenBuilder_ == null) { + if (endKeyTypeCase_ == 4) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } else { + if (endKeyTypeCase_ == 4) { + return endOpenBuilder_.getMessage(); + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + public Builder setEndOpen(com.google.protobuf.ListValue value) { + if (endOpenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endKeyType_ = value; + onChanged(); + } else { + endOpenBuilder_.setMessage(value); + } + endKeyTypeCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + public Builder setEndOpen(com.google.protobuf.ListValue.Builder builderForValue) { + if (endOpenBuilder_ == null) { + endKeyType_ = builderForValue.build(); + onChanged(); + } else { + endOpenBuilder_.setMessage(builderForValue.build()); + } + endKeyTypeCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + public Builder mergeEndOpen(com.google.protobuf.ListValue value) { + if (endOpenBuilder_ == null) { + if (endKeyTypeCase_ == 4 + && endKeyType_ != com.google.protobuf.ListValue.getDefaultInstance()) { + endKeyType_ = + com.google.protobuf.ListValue.newBuilder((com.google.protobuf.ListValue) endKeyType_) + .mergeFrom(value) + .buildPartial(); + } else { + endKeyType_ = value; + } + onChanged(); + } else { + if (endKeyTypeCase_ == 4) { + endOpenBuilder_.mergeFrom(value); + } else { + endOpenBuilder_.setMessage(value); + } + } + endKeyTypeCase_ = 4; + return this; + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + public Builder clearEndOpen() { + if (endOpenBuilder_ == null) { + if (endKeyTypeCase_ == 4) { + endKeyTypeCase_ = 0; + endKeyType_ = null; + onChanged(); + } + } else { + if (endKeyTypeCase_ == 4) { + endKeyTypeCase_ = 0; + endKeyType_ = null; + } + endOpenBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + public com.google.protobuf.ListValue.Builder getEndOpenBuilder() { + return internalGetEndOpenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getEndOpenOrBuilder() { + if ((endKeyTypeCase_ == 4) && (endOpenBuilder_ != null)) { + return endOpenBuilder_.getMessageOrBuilder(); + } else { + if (endKeyTypeCase_ == 4) { + return (com.google.protobuf.ListValue) endKeyType_; + } + return com.google.protobuf.ListValue.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * If the end is open, then the range excludes rows whose first
    +     * `len(end_open)` key columns exactly match `end_open`.
    +     * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetEndOpenFieldBuilder() { + if (endOpenBuilder_ == null) { + if (!(endKeyTypeCase_ == 4)) { + endKeyType_ = com.google.protobuf.ListValue.getDefaultInstance(); + } + endOpenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + (com.google.protobuf.ListValue) endKeyType_, getParentForChildren(), isClean()); + endKeyType_ = null; + } + endKeyTypeCase_ = 4; + onChanged(); + return endOpenBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.KeyRange) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRange) + private static final com.google.spanner.v1.KeyRange DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.KeyRange(); + } + + public static com.google.spanner.v1.KeyRange getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public KeyRange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRange getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java new file mode 100644 index 000000000000..ede288880303 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java @@ -0,0 +1,192 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/keys.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface KeyRangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.KeyRange) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return Whether the startClosed field is set. + */ + boolean hasStartClosed(); + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + * + * @return The startClosed. + */ + com.google.protobuf.ListValue getStartClosed(); + + /** + * + * + *
    +   * If the start is closed, then the range includes all rows whose
    +   * first `len(start_closed)` key columns exactly match `start_closed`.
    +   * 
    + * + * .google.protobuf.ListValue start_closed = 1; + */ + com.google.protobuf.ListValueOrBuilder getStartClosedOrBuilder(); + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return Whether the startOpen field is set. + */ + boolean hasStartOpen(); + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + * + * @return The startOpen. + */ + com.google.protobuf.ListValue getStartOpen(); + + /** + * + * + *
    +   * If the start is open, then the range excludes rows whose first
    +   * `len(start_open)` key columns exactly match `start_open`.
    +   * 
    + * + * .google.protobuf.ListValue start_open = 2; + */ + com.google.protobuf.ListValueOrBuilder getStartOpenOrBuilder(); + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return Whether the endClosed field is set. + */ + boolean hasEndClosed(); + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + * + * @return The endClosed. + */ + com.google.protobuf.ListValue getEndClosed(); + + /** + * + * + *
    +   * If the end is closed, then the range includes all rows whose
    +   * first `len(end_closed)` key columns exactly match `end_closed`.
    +   * 
    + * + * .google.protobuf.ListValue end_closed = 3; + */ + com.google.protobuf.ListValueOrBuilder getEndClosedOrBuilder(); + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return Whether the endOpen field is set. + */ + boolean hasEndOpen(); + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + * + * @return The endOpen. + */ + com.google.protobuf.ListValue getEndOpen(); + + /** + * + * + *
    +   * If the end is open, then the range excludes rows whose first
    +   * `len(end_open)` key columns exactly match `end_open`.
    +   * 
    + * + * .google.protobuf.ListValue end_open = 4; + */ + com.google.protobuf.ListValueOrBuilder getEndOpenOrBuilder(); + + com.google.spanner.v1.KeyRange.StartKeyTypeCase getStartKeyTypeCase(); + + com.google.spanner.v1.KeyRange.EndKeyTypeCase getEndKeyTypeCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipe.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipe.java new file mode 100644 index 000000000000..b6723d35963a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipe.java @@ -0,0 +1,4400 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `KeyRecipe` provides the metadata required to translate reads, mutations,
    + * and queries into a byte array in "sortable string format" (ssformat)that can
    + * be used with `Range`s to route requests. Note that the client *must* tolerate
    + * `KeyRecipe`s that appear to be invalid, since the `KeyRecipe` format may
    + * change over time. Requests with invalid `KeyRecipe`s should be routed to a
    + * default server.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRecipe} + */ +@com.google.protobuf.Generated +public final class KeyRecipe extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.KeyRecipe) + KeyRecipeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeyRecipe"); + } + + // Use KeyRecipe.newBuilder() to construct. + private KeyRecipe(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private KeyRecipe() { + part_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRecipe.class, com.google.spanner.v1.KeyRecipe.Builder.class); + } + + public interface PartOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.KeyRecipe.Part) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * If non-zero, `tag` is the only field present in this `Part`. The part
    +     * is encoded by appending `tag` to the ssformat key.
    +     * 
    + * + * uint32 tag = 1; + * + * @return The tag. + */ + int getTag(); + + /** + * + * + *
    +     * Whether the key column is sorted ascending or descending. Only present
    +     * if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The enum numeric value on the wire for order. + */ + int getOrderValue(); + + /** + * + * + *
    +     * Whether the key column is sorted ascending or descending. Only present
    +     * if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The order. + */ + com.google.spanner.v1.KeyRecipe.Part.Order getOrder(); + + /** + * + * + *
    +     * How NULLs are represented in the encoded key part. Only present if `tag`
    +     * is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The enum numeric value on the wire for nullOrder. + */ + int getNullOrderValue(); + + /** + * + * + *
    +     * How NULLs are represented in the encoded key part. Only present if `tag`
    +     * is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The nullOrder. + */ + com.google.spanner.v1.KeyRecipe.Part.NullOrder getNullOrder(); + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return The type. + */ + com.google.spanner.v1.Type getType(); + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(); + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return Whether the identifier field is set. + */ + boolean hasIdentifier(); + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return The identifier. + */ + java.lang.String getIdentifier(); + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return The bytes for identifier. + */ + com.google.protobuf.ByteString getIdentifierBytes(); + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + * + * @return Whether the value field is set. + */ + boolean hasValue(); + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + * + * @return The value. + */ + com.google.protobuf.Value getValue(); + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + */ + com.google.protobuf.ValueOrBuilder getValueOrBuilder(); + + /** + * + * + *
    +     * If true, the client is responsible to fill in the value randomly.
    +     * It's relevant only for the INT64 type.
    +     * 
    + * + * bool random = 8; + * + * @return Whether the random field is set. + */ + boolean hasRandom(); + + /** + * + * + *
    +     * If true, the client is responsible to fill in the value randomly.
    +     * It's relevant only for the INT64 type.
    +     * 
    + * + * bool random = 8; + * + * @return The random. + */ + boolean getRandom(); + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return A list containing the structIdentifiers. + */ + java.util.List getStructIdentifiersList(); + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return The count of structIdentifiers. + */ + int getStructIdentifiersCount(); + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param index The index of the element to return. + * @return The structIdentifiers at the given index. + */ + int getStructIdentifiers(int index); + + com.google.spanner.v1.KeyRecipe.Part.ValueTypeCase getValueTypeCase(); + } + + /** + * + * + *
    +   * An ssformat key is composed of a sequence of tag numbers and key column
    +   * values. `Part` represents a single tag or key column value.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRecipe.Part} + */ + public static final class Part extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.KeyRecipe.Part) + PartOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Part"); + } + + // Use Part.newBuilder() to construct. + private Part(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Part() { + order_ = 0; + nullOrder_ = 0; + structIdentifiers_ = emptyIntList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_Part_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_Part_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRecipe.Part.class, + com.google.spanner.v1.KeyRecipe.Part.Builder.class); + } + + /** + * + * + *
    +     * The remaining fields encode column values.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.KeyRecipe.Part.Order} + */ + public enum Order implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Default value, equivalent to `ASCENDING`.
    +       * 
    + * + * ORDER_UNSPECIFIED = 0; + */ + ORDER_UNSPECIFIED(0), + /** + * + * + *
    +       * The key is ascending - corresponds to `ASC` in the schema definition.
    +       * 
    + * + * ASCENDING = 1; + */ + ASCENDING(1), + /** + * + * + *
    +       * The key is descending - corresponds to `DESC` in the schema definition.
    +       * 
    + * + * DESCENDING = 2; + */ + DESCENDING(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Order"); + } + + /** + * + * + *
    +       * Default value, equivalent to `ASCENDING`.
    +       * 
    + * + * ORDER_UNSPECIFIED = 0; + */ + public static final int ORDER_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * The key is ascending - corresponds to `ASC` in the schema definition.
    +       * 
    + * + * ASCENDING = 1; + */ + public static final int ASCENDING_VALUE = 1; + + /** + * + * + *
    +       * The key is descending - corresponds to `DESC` in the schema definition.
    +       * 
    + * + * DESCENDING = 2; + */ + public static final int DESCENDING_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Order valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Order forNumber(int value) { + switch (value) { + case 0: + return ORDER_UNSPECIFIED; + case 1: + return ASCENDING; + case 2: + return DESCENDING; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Order findValueByNumber(int number) { + return Order.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.KeyRecipe.Part.getDescriptor().getEnumTypes().get(0); + } + + private static final Order[] VALUES = values(); + + public static Order valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Order(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.KeyRecipe.Part.Order) + } + + /** + * + * + *
    +     * The null order of the key column. This dictates where NULL values sort
    +     * in the sorted order. Note that columns which are `NOT NULL` can have a
    +     * special encoding.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.KeyRecipe.Part.NullOrder} + */ + public enum NullOrder implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Default value. This value is unused.
    +       * 
    + * + * NULL_ORDER_UNSPECIFIED = 0; + */ + NULL_ORDER_UNSPECIFIED(0), + /** + * + * + *
    +       * NULL values sort before any non-NULL values.
    +       * 
    + * + * NULLS_FIRST = 1; + */ + NULLS_FIRST(1), + /** + * + * + *
    +       * NULL values sort after any non-NULL values.
    +       * 
    + * + * NULLS_LAST = 2; + */ + NULLS_LAST(2), + /** + * + * + *
    +       * The column does not support NULL values.
    +       * 
    + * + * NOT_NULL = 3; + */ + NOT_NULL(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "NullOrder"); + } + + /** + * + * + *
    +       * Default value. This value is unused.
    +       * 
    + * + * NULL_ORDER_UNSPECIFIED = 0; + */ + public static final int NULL_ORDER_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * NULL values sort before any non-NULL values.
    +       * 
    + * + * NULLS_FIRST = 1; + */ + public static final int NULLS_FIRST_VALUE = 1; + + /** + * + * + *
    +       * NULL values sort after any non-NULL values.
    +       * 
    + * + * NULLS_LAST = 2; + */ + public static final int NULLS_LAST_VALUE = 2; + + /** + * + * + *
    +       * The column does not support NULL values.
    +       * 
    + * + * NOT_NULL = 3; + */ + public static final int NOT_NULL_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static NullOrder valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static NullOrder forNumber(int value) { + switch (value) { + case 0: + return NULL_ORDER_UNSPECIFIED; + case 1: + return NULLS_FIRST; + case 2: + return NULLS_LAST; + case 3: + return NOT_NULL; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public NullOrder findValueByNumber(int number) { + return NullOrder.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.KeyRecipe.Part.getDescriptor().getEnumTypes().get(1); + } + + private static final NullOrder[] VALUES = values(); + + public static NullOrder valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private NullOrder(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.KeyRecipe.Part.NullOrder) + } + + private int bitField0_; + private int valueTypeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object valueType_; + + public enum ValueTypeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + IDENTIFIER(5), + VALUE(6), + RANDOM(8), + VALUETYPE_NOT_SET(0); + private final int value; + + private ValueTypeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ValueTypeCase valueOf(int value) { + return forNumber(value); + } + + public static ValueTypeCase forNumber(int value) { + switch (value) { + case 5: + return IDENTIFIER; + case 6: + return VALUE; + case 8: + return RANDOM; + case 0: + return VALUETYPE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ValueTypeCase getValueTypeCase() { + return ValueTypeCase.forNumber(valueTypeCase_); + } + + public static final int TAG_FIELD_NUMBER = 1; + private int tag_ = 0; + + /** + * + * + *
    +     * If non-zero, `tag` is the only field present in this `Part`. The part
    +     * is encoded by appending `tag` to the ssformat key.
    +     * 
    + * + * uint32 tag = 1; + * + * @return The tag. + */ + @java.lang.Override + public int getTag() { + return tag_; + } + + public static final int ORDER_FIELD_NUMBER = 2; + private int order_ = 0; + + /** + * + * + *
    +     * Whether the key column is sorted ascending or descending. Only present
    +     * if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The enum numeric value on the wire for order. + */ + @java.lang.Override + public int getOrderValue() { + return order_; + } + + /** + * + * + *
    +     * Whether the key column is sorted ascending or descending. Only present
    +     * if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The order. + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part.Order getOrder() { + com.google.spanner.v1.KeyRecipe.Part.Order result = + com.google.spanner.v1.KeyRecipe.Part.Order.forNumber(order_); + return result == null ? com.google.spanner.v1.KeyRecipe.Part.Order.UNRECOGNIZED : result; + } + + public static final int NULL_ORDER_FIELD_NUMBER = 3; + private int nullOrder_ = 0; + + /** + * + * + *
    +     * How NULLs are represented in the encoded key part. Only present if `tag`
    +     * is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The enum numeric value on the wire for nullOrder. + */ + @java.lang.Override + public int getNullOrderValue() { + return nullOrder_; + } + + /** + * + * + *
    +     * How NULLs are represented in the encoded key part. Only present if `tag`
    +     * is zero.
    +     * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The nullOrder. + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part.NullOrder getNullOrder() { + com.google.spanner.v1.KeyRecipe.Part.NullOrder result = + com.google.spanner.v1.KeyRecipe.Part.NullOrder.forNumber(nullOrder_); + return result == null ? com.google.spanner.v1.KeyRecipe.Part.NullOrder.UNRECOGNIZED : result; + } + + public static final int TYPE_FIELD_NUMBER = 4; + private com.google.spanner.v1.Type type_; + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.Type getType() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + /** + * + * + *
    +     * The type of the key part. Only present if `tag` is zero.
    +     * 
    + * + * .google.spanner.v1.Type type = 4; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + public static final int IDENTIFIER_FIELD_NUMBER = 5; + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return Whether the identifier field is set. + */ + public boolean hasIdentifier() { + return valueTypeCase_ == 5; + } + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return The identifier. + */ + public java.lang.String getIdentifier() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 5) { + ref = valueType_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (valueTypeCase_ == 5) { + valueType_ = s; + } + return s; + } + } + + /** + * + * + *
    +     * `identifier` is the name of the column or query parameter.
    +     * 
    + * + * string identifier = 5; + * + * @return The bytes for identifier. + */ + public com.google.protobuf.ByteString getIdentifierBytes() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 5) { + ref = valueType_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (valueTypeCase_ == 5) { + valueType_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 6; + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + * + * @return Whether the value field is set. + */ + @java.lang.Override + public boolean hasValue() { + return valueTypeCase_ == 6; + } + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + * + * @return The value. + */ + @java.lang.Override + public com.google.protobuf.Value getValue() { + if (valueTypeCase_ == 6) { + return (com.google.protobuf.Value) valueType_; + } + return com.google.protobuf.Value.getDefaultInstance(); + } + + /** + * + * + *
    +     * The constant value of the key part.
    +     * It is present when query uses a constant as a part of the key.
    +     * 
    + * + * .google.protobuf.Value value = 6; + */ + @java.lang.Override + public com.google.protobuf.ValueOrBuilder getValueOrBuilder() { + if (valueTypeCase_ == 6) { + return (com.google.protobuf.Value) valueType_; + } + return com.google.protobuf.Value.getDefaultInstance(); + } + + public static final int RANDOM_FIELD_NUMBER = 8; + + /** + * + * + *
    +     * If true, the client is responsible to fill in the value randomly.
    +     * It's relevant only for the INT64 type.
    +     * 
    + * + * bool random = 8; + * + * @return Whether the random field is set. + */ + @java.lang.Override + public boolean hasRandom() { + return valueTypeCase_ == 8; + } + + /** + * + * + *
    +     * If true, the client is responsible to fill in the value randomly.
    +     * It's relevant only for the INT64 type.
    +     * 
    + * + * bool random = 8; + * + * @return The random. + */ + @java.lang.Override + public boolean getRandom() { + if (valueTypeCase_ == 8) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + public static final int STRUCT_IDENTIFIERS_FIELD_NUMBER = 7; + + @SuppressWarnings("serial") + private com.google.protobuf.Internal.IntList structIdentifiers_ = emptyIntList(); + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return A list containing the structIdentifiers. + */ + @java.lang.Override + public java.util.List getStructIdentifiersList() { + return structIdentifiers_; + } + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return The count of structIdentifiers. + */ + public int getStructIdentifiersCount() { + return structIdentifiers_.size(); + } + + /** + * + * + *
    +     * It is a repeated field to support fetching key columns from nested
    +     * structs, such as `STRUCT` query parameters.
    +     * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param index The index of the element to return. + * @return The structIdentifiers at the given index. + */ + public int getStructIdentifiers(int index) { + return structIdentifiers_.getInt(index); + } + + private int structIdentifiersMemoizedSerializedSize = -1; + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + if (tag_ != 0) { + output.writeUInt32(1, tag_); + } + if (order_ != com.google.spanner.v1.KeyRecipe.Part.Order.ORDER_UNSPECIFIED.getNumber()) { + output.writeEnum(2, order_); + } + if (nullOrder_ + != com.google.spanner.v1.KeyRecipe.Part.NullOrder.NULL_ORDER_UNSPECIFIED.getNumber()) { + output.writeEnum(3, nullOrder_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getType()); + } + if (valueTypeCase_ == 5) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, valueType_); + } + if (valueTypeCase_ == 6) { + output.writeMessage(6, (com.google.protobuf.Value) valueType_); + } + if (getStructIdentifiersList().size() > 0) { + output.writeUInt32NoTag(58); + output.writeUInt32NoTag(structIdentifiersMemoizedSerializedSize); + } + for (int i = 0; i < structIdentifiers_.size(); i++) { + output.writeInt32NoTag(structIdentifiers_.getInt(i)); + } + if (valueTypeCase_ == 8) { + output.writeBool(8, (boolean) ((java.lang.Boolean) valueType_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (tag_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeUInt32Size(1, tag_); + } + if (order_ != com.google.spanner.v1.KeyRecipe.Part.Order.ORDER_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, order_); + } + if (nullOrder_ + != com.google.spanner.v1.KeyRecipe.Part.NullOrder.NULL_ORDER_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, nullOrder_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getType()); + } + if (valueTypeCase_ == 5) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, valueType_); + } + if (valueTypeCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (com.google.protobuf.Value) valueType_); + } + { + int dataSize = 0; + for (int i = 0; i < structIdentifiers_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag( + structIdentifiers_.getInt(i)); + } + size += dataSize; + if (!getStructIdentifiersList().isEmpty()) { + size += 1; + size += com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(dataSize); + } + structIdentifiersMemoizedSerializedSize = dataSize; + } + if (valueTypeCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 8, (boolean) ((java.lang.Boolean) valueType_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.KeyRecipe.Part)) { + return super.equals(obj); + } + com.google.spanner.v1.KeyRecipe.Part other = (com.google.spanner.v1.KeyRecipe.Part) obj; + + if (getTag() != other.getTag()) return false; + if (order_ != other.order_) return false; + if (nullOrder_ != other.nullOrder_) return false; + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (!getType().equals(other.getType())) return false; + } + if (!getStructIdentifiersList().equals(other.getStructIdentifiersList())) return false; + if (!getValueTypeCase().equals(other.getValueTypeCase())) return false; + switch (valueTypeCase_) { + case 5: + if (!getIdentifier().equals(other.getIdentifier())) return false; + break; + case 6: + if (!getValue().equals(other.getValue())) return false; + break; + case 8: + if (getRandom() != other.getRandom()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TAG_FIELD_NUMBER; + hash = (53 * hash) + getTag(); + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + order_; + hash = (37 * hash) + NULL_ORDER_FIELD_NUMBER; + hash = (53 * hash) + nullOrder_; + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + } + if (getStructIdentifiersCount() > 0) { + hash = (37 * hash) + STRUCT_IDENTIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getStructIdentifiersList().hashCode(); + } + switch (valueTypeCase_) { + case 5: + hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER; + hash = (53 * hash) + getIdentifier().hashCode(); + break; + case 6: + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + break; + case 8: + hash = (37 * hash) + RANDOM_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRandom()); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe.Part parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.KeyRecipe.Part prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * An ssformat key is composed of a sequence of tag numbers and key column
    +     * values. `Part` represents a single tag or key column value.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRecipe.Part} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.KeyRecipe.Part) + com.google.spanner.v1.KeyRecipe.PartOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_Part_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_Part_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRecipe.Part.class, + com.google.spanner.v1.KeyRecipe.Part.Builder.class); + } + + // Construct using com.google.spanner.v1.KeyRecipe.Part.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + tag_ = 0; + order_ = 0; + nullOrder_ = 0; + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + if (valueBuilder_ != null) { + valueBuilder_.clear(); + } + structIdentifiers_ = emptyIntList(); + valueTypeCase_ = 0; + valueType_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_Part_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part getDefaultInstanceForType() { + return com.google.spanner.v1.KeyRecipe.Part.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part build() { + com.google.spanner.v1.KeyRecipe.Part result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part buildPartial() { + com.google.spanner.v1.KeyRecipe.Part result = + new com.google.spanner.v1.KeyRecipe.Part(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.KeyRecipe.Part result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.tag_ = tag_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.order_ = order_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nullOrder_ = nullOrder_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.type_ = typeBuilder_ == null ? type_ : typeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + structIdentifiers_.makeImmutable(); + result.structIdentifiers_ = structIdentifiers_; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.KeyRecipe.Part result) { + result.valueTypeCase_ = valueTypeCase_; + result.valueType_ = this.valueType_; + if (valueTypeCase_ == 6 && valueBuilder_ != null) { + result.valueType_ = valueBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.KeyRecipe.Part) { + return mergeFrom((com.google.spanner.v1.KeyRecipe.Part) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.KeyRecipe.Part other) { + if (other == com.google.spanner.v1.KeyRecipe.Part.getDefaultInstance()) return this; + if (other.getTag() != 0) { + setTag(other.getTag()); + } + if (other.order_ != 0) { + setOrderValue(other.getOrderValue()); + } + if (other.nullOrder_ != 0) { + setNullOrderValue(other.getNullOrderValue()); + } + if (other.hasType()) { + mergeType(other.getType()); + } + if (!other.structIdentifiers_.isEmpty()) { + if (structIdentifiers_.isEmpty()) { + structIdentifiers_ = other.structIdentifiers_; + structIdentifiers_.makeImmutable(); + bitField0_ |= 0x00000080; + } else { + ensureStructIdentifiersIsMutable(); + structIdentifiers_.addAll(other.structIdentifiers_); + } + onChanged(); + } + switch (other.getValueTypeCase()) { + case IDENTIFIER: + { + valueTypeCase_ = 5; + valueType_ = other.valueType_; + onChanged(); + break; + } + case VALUE: + { + mergeValue(other.getValue()); + break; + } + case RANDOM: + { + setRandom(other.getRandom()); + break; + } + case VALUETYPE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + tag_ = input.readUInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + order_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: + { + nullOrder_ = input.readEnum(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + input.readMessage(internalGetTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + valueTypeCase_ = 5; + valueType_ = s; + break; + } // case 42 + case 50: + { + input.readMessage(internalGetValueFieldBuilder().getBuilder(), extensionRegistry); + valueTypeCase_ = 6; + break; + } // case 50 + case 56: + { + int v = input.readInt32(); + ensureStructIdentifiersIsMutable(); + structIdentifiers_.addInt(v); + break; + } // case 56 + case 58: + { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + ensureStructIdentifiersIsMutable(); + while (input.getBytesUntilLimit() > 0) { + structIdentifiers_.addInt(input.readInt32()); + } + input.popLimit(limit); + break; + } // case 58 + case 64: + { + valueType_ = input.readBool(); + valueTypeCase_ = 8; + break; + } // case 64 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int valueTypeCase_ = 0; + private java.lang.Object valueType_; + + public ValueTypeCase getValueTypeCase() { + return ValueTypeCase.forNumber(valueTypeCase_); + } + + public Builder clearValueType() { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private int tag_; + + /** + * + * + *
    +       * If non-zero, `tag` is the only field present in this `Part`. The part
    +       * is encoded by appending `tag` to the ssformat key.
    +       * 
    + * + * uint32 tag = 1; + * + * @return The tag. + */ + @java.lang.Override + public int getTag() { + return tag_; + } + + /** + * + * + *
    +       * If non-zero, `tag` is the only field present in this `Part`. The part
    +       * is encoded by appending `tag` to the ssformat key.
    +       * 
    + * + * uint32 tag = 1; + * + * @param value The tag to set. + * @return This builder for chaining. + */ + public Builder setTag(int value) { + + tag_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * If non-zero, `tag` is the only field present in this `Part`. The part
    +       * is encoded by appending `tag` to the ssformat key.
    +       * 
    + * + * uint32 tag = 1; + * + * @return This builder for chaining. + */ + public Builder clearTag() { + bitField0_ = (bitField0_ & ~0x00000001); + tag_ = 0; + onChanged(); + return this; + } + + private int order_ = 0; + + /** + * + * + *
    +       * Whether the key column is sorted ascending or descending. Only present
    +       * if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The enum numeric value on the wire for order. + */ + @java.lang.Override + public int getOrderValue() { + return order_; + } + + /** + * + * + *
    +       * Whether the key column is sorted ascending or descending. Only present
    +       * if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @param value The enum numeric value on the wire for order to set. + * @return This builder for chaining. + */ + public Builder setOrderValue(int value) { + order_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Whether the key column is sorted ascending or descending. Only present
    +       * if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return The order. + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part.Order getOrder() { + com.google.spanner.v1.KeyRecipe.Part.Order result = + com.google.spanner.v1.KeyRecipe.Part.Order.forNumber(order_); + return result == null ? com.google.spanner.v1.KeyRecipe.Part.Order.UNRECOGNIZED : result; + } + + /** + * + * + *
    +       * Whether the key column is sorted ascending or descending. Only present
    +       * if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @param value The order to set. + * @return This builder for chaining. + */ + public Builder setOrder(com.google.spanner.v1.KeyRecipe.Part.Order value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + order_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Whether the key column is sorted ascending or descending. Only present
    +       * if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.Order order = 2; + * + * @return This builder for chaining. + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000002); + order_ = 0; + onChanged(); + return this; + } + + private int nullOrder_ = 0; + + /** + * + * + *
    +       * How NULLs are represented in the encoded key part. Only present if `tag`
    +       * is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The enum numeric value on the wire for nullOrder. + */ + @java.lang.Override + public int getNullOrderValue() { + return nullOrder_; + } + + /** + * + * + *
    +       * How NULLs are represented in the encoded key part. Only present if `tag`
    +       * is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @param value The enum numeric value on the wire for nullOrder to set. + * @return This builder for chaining. + */ + public Builder setNullOrderValue(int value) { + nullOrder_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * How NULLs are represented in the encoded key part. Only present if `tag`
    +       * is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return The nullOrder. + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part.NullOrder getNullOrder() { + com.google.spanner.v1.KeyRecipe.Part.NullOrder result = + com.google.spanner.v1.KeyRecipe.Part.NullOrder.forNumber(nullOrder_); + return result == null + ? com.google.spanner.v1.KeyRecipe.Part.NullOrder.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +       * How NULLs are represented in the encoded key part. Only present if `tag`
    +       * is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @param value The nullOrder to set. + * @return This builder for chaining. + */ + public Builder setNullOrder(com.google.spanner.v1.KeyRecipe.Part.NullOrder value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + nullOrder_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * How NULLs are represented in the encoded key part. Only present if `tag`
    +       * is zero.
    +       * 
    + * + * .google.spanner.v1.KeyRecipe.Part.NullOrder null_order = 3; + * + * @return This builder for chaining. + */ + public Builder clearNullOrder() { + bitField0_ = (bitField0_ & ~0x00000004); + nullOrder_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type type_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return Whether the type field is set. + */ + public boolean hasType() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + * + * @return The type. + */ + public com.google.spanner.v1.Type getType() { + if (typeBuilder_ == null) { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } else { + return typeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public Builder setType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + } else { + typeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public Builder setType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + type_ = builderForValue.build(); + } else { + typeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public Builder mergeType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && type_ != null + && type_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getTypeBuilder().mergeFrom(value); + } else { + type_ = value; + } + } else { + typeBuilder_.mergeFrom(value); + } + if (type_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000008); + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilder(); + } else { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + } + + /** + * + * + *
    +       * The type of the key part. Only present if `tag` is zero.
    +       * 
    + * + * .google.spanner.v1.Type type = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getType(), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @return Whether the identifier field is set. + */ + @java.lang.Override + public boolean hasIdentifier() { + return valueTypeCase_ == 5; + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @return The identifier. + */ + @java.lang.Override + public java.lang.String getIdentifier() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 5) { + ref = valueType_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (valueTypeCase_ == 5) { + valueType_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @return The bytes for identifier. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIdentifierBytes() { + java.lang.Object ref = ""; + if (valueTypeCase_ == 5) { + ref = valueType_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (valueTypeCase_ == 5) { + valueType_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @param value The identifier to set. + * @return This builder for chaining. + */ + public Builder setIdentifier(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + valueTypeCase_ = 5; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @return This builder for chaining. + */ + public Builder clearIdentifier() { + if (valueTypeCase_ == 5) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * `identifier` is the name of the column or query parameter.
    +       * 
    + * + * string identifier = 5; + * + * @param value The bytes for identifier to set. + * @return This builder for chaining. + */ + public Builder setIdentifierBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + valueTypeCase_ = 5; + valueType_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + valueBuilder_; + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + * + * @return Whether the value field is set. + */ + @java.lang.Override + public boolean hasValue() { + return valueTypeCase_ == 6; + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + * + * @return The value. + */ + @java.lang.Override + public com.google.protobuf.Value getValue() { + if (valueBuilder_ == null) { + if (valueTypeCase_ == 6) { + return (com.google.protobuf.Value) valueType_; + } + return com.google.protobuf.Value.getDefaultInstance(); + } else { + if (valueTypeCase_ == 6) { + return valueBuilder_.getMessage(); + } + return com.google.protobuf.Value.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + public Builder setValue(com.google.protobuf.Value value) { + if (valueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + valueType_ = value; + onChanged(); + } else { + valueBuilder_.setMessage(value); + } + valueTypeCase_ = 6; + return this; + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + public Builder setValue(com.google.protobuf.Value.Builder builderForValue) { + if (valueBuilder_ == null) { + valueType_ = builderForValue.build(); + onChanged(); + } else { + valueBuilder_.setMessage(builderForValue.build()); + } + valueTypeCase_ = 6; + return this; + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + public Builder mergeValue(com.google.protobuf.Value value) { + if (valueBuilder_ == null) { + if (valueTypeCase_ == 6 && valueType_ != com.google.protobuf.Value.getDefaultInstance()) { + valueType_ = + com.google.protobuf.Value.newBuilder((com.google.protobuf.Value) valueType_) + .mergeFrom(value) + .buildPartial(); + } else { + valueType_ = value; + } + onChanged(); + } else { + if (valueTypeCase_ == 6) { + valueBuilder_.mergeFrom(value); + } else { + valueBuilder_.setMessage(value); + } + } + valueTypeCase_ = 6; + return this; + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + public Builder clearValue() { + if (valueBuilder_ == null) { + if (valueTypeCase_ == 6) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + } else { + if (valueTypeCase_ == 6) { + valueTypeCase_ = 0; + valueType_ = null; + } + valueBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + public com.google.protobuf.Value.Builder getValueBuilder() { + return internalGetValueFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + @java.lang.Override + public com.google.protobuf.ValueOrBuilder getValueOrBuilder() { + if ((valueTypeCase_ == 6) && (valueBuilder_ != null)) { + return valueBuilder_.getMessageOrBuilder(); + } else { + if (valueTypeCase_ == 6) { + return (com.google.protobuf.Value) valueType_; + } + return com.google.protobuf.Value.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * The constant value of the key part.
    +       * It is present when query uses a constant as a part of the key.
    +       * 
    + * + * .google.protobuf.Value value = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + internalGetValueFieldBuilder() { + if (valueBuilder_ == null) { + if (!(valueTypeCase_ == 6)) { + valueType_ = com.google.protobuf.Value.getDefaultInstance(); + } + valueBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder>( + (com.google.protobuf.Value) valueType_, getParentForChildren(), isClean()); + valueType_ = null; + } + valueTypeCase_ = 6; + onChanged(); + return valueBuilder_; + } + + /** + * + * + *
    +       * If true, the client is responsible to fill in the value randomly.
    +       * It's relevant only for the INT64 type.
    +       * 
    + * + * bool random = 8; + * + * @return Whether the random field is set. + */ + public boolean hasRandom() { + return valueTypeCase_ == 8; + } + + /** + * + * + *
    +       * If true, the client is responsible to fill in the value randomly.
    +       * It's relevant only for the INT64 type.
    +       * 
    + * + * bool random = 8; + * + * @return The random. + */ + public boolean getRandom() { + if (valueTypeCase_ == 8) { + return (java.lang.Boolean) valueType_; + } + return false; + } + + /** + * + * + *
    +       * If true, the client is responsible to fill in the value randomly.
    +       * It's relevant only for the INT64 type.
    +       * 
    + * + * bool random = 8; + * + * @param value The random to set. + * @return This builder for chaining. + */ + public Builder setRandom(boolean value) { + + valueTypeCase_ = 8; + valueType_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * If true, the client is responsible to fill in the value randomly.
    +       * It's relevant only for the INT64 type.
    +       * 
    + * + * bool random = 8; + * + * @return This builder for chaining. + */ + public Builder clearRandom() { + if (valueTypeCase_ == 8) { + valueTypeCase_ = 0; + valueType_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.Internal.IntList structIdentifiers_ = emptyIntList(); + + private void ensureStructIdentifiersIsMutable() { + if (!structIdentifiers_.isModifiable()) { + structIdentifiers_ = makeMutableCopy(structIdentifiers_); + } + bitField0_ |= 0x00000080; + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return A list containing the structIdentifiers. + */ + public java.util.List getStructIdentifiersList() { + structIdentifiers_.makeImmutable(); + return structIdentifiers_; + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return The count of structIdentifiers. + */ + public int getStructIdentifiersCount() { + return structIdentifiers_.size(); + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param index The index of the element to return. + * @return The structIdentifiers at the given index. + */ + public int getStructIdentifiers(int index) { + return structIdentifiers_.getInt(index); + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param index The index to set the value at. + * @param value The structIdentifiers to set. + * @return This builder for chaining. + */ + public Builder setStructIdentifiers(int index, int value) { + + ensureStructIdentifiersIsMutable(); + structIdentifiers_.setInt(index, value); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param value The structIdentifiers to add. + * @return This builder for chaining. + */ + public Builder addStructIdentifiers(int value) { + + ensureStructIdentifiersIsMutable(); + structIdentifiers_.addInt(value); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @param values The structIdentifiers to add. + * @return This builder for chaining. + */ + public Builder addAllStructIdentifiers( + java.lang.Iterable values) { + ensureStructIdentifiersIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, structIdentifiers_); + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +       * It is a repeated field to support fetching key columns from nested
    +       * structs, such as `STRUCT` query parameters.
    +       * 
    + * + * repeated int32 struct_identifiers = 7; + * + * @return This builder for chaining. + */ + public Builder clearStructIdentifiers() { + structIdentifiers_ = emptyIntList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.KeyRecipe.Part) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRecipe.Part) + private static final com.google.spanner.v1.KeyRecipe.Part DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.KeyRecipe.Part(); + } + + public static com.google.spanner.v1.KeyRecipe.Part getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Part parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int targetCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object target_; + + public enum TargetCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + TABLE_NAME(1), + INDEX_NAME(2), + OPERATION_UID(3), + TARGET_NOT_SET(0); + private final int value; + + private TargetCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TargetCase valueOf(int value) { + return forNumber(value); + } + + public static TargetCase forNumber(int value) { + switch (value) { + case 1: + return TABLE_NAME; + case 2: + return INDEX_NAME; + case 3: + return OPERATION_UID; + case 0: + return TARGET_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public TargetCase getTargetCase() { + return TargetCase.forNumber(targetCase_); + } + + public static final int TABLE_NAME_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return Whether the tableName field is set. + */ + public boolean hasTableName() { + return targetCase_ == 1; + } + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return The tableName. + */ + public java.lang.String getTableName() { + java.lang.Object ref = ""; + if (targetCase_ == 1) { + ref = target_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (targetCase_ == 1) { + target_ = s; + } + return s; + } + } + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return The bytes for tableName. + */ + public com.google.protobuf.ByteString getTableNameBytes() { + java.lang.Object ref = ""; + if (targetCase_ == 1) { + ref = target_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (targetCase_ == 1) { + target_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_NAME_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return Whether the indexName field is set. + */ + public boolean hasIndexName() { + return targetCase_ == 2; + } + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return The indexName. + */ + public java.lang.String getIndexName() { + java.lang.Object ref = ""; + if (targetCase_ == 2) { + ref = target_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (targetCase_ == 2) { + target_ = s; + } + return s; + } + } + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return The bytes for indexName. + */ + public com.google.protobuf.ByteString getIndexNameBytes() { + java.lang.Object ref = ""; + if (targetCase_ == 2) { + ref = target_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (targetCase_ == 2) { + target_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPERATION_UID_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * The UID of a query, matching the UID from `RoutingHint`.
    +   * 
    + * + * uint64 operation_uid = 3; + * + * @return Whether the operationUid field is set. + */ + @java.lang.Override + public boolean hasOperationUid() { + return targetCase_ == 3; + } + + /** + * + * + *
    +   * The UID of a query, matching the UID from `RoutingHint`.
    +   * 
    + * + * uint64 operation_uid = 3; + * + * @return The operationUid. + */ + @java.lang.Override + public long getOperationUid() { + if (targetCase_ == 3) { + return (java.lang.Long) target_; + } + return 0L; + } + + public static final int PART_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List part_; + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + @java.lang.Override + public java.util.List getPartList() { + return part_; + } + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + @java.lang.Override + public java.util.List + getPartOrBuilderList() { + return part_; + } + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + @java.lang.Override + public int getPartCount() { + return part_.size(); + } + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.Part getPart(int index) { + return part_.get(index); + } + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe.PartOrBuilder getPartOrBuilder(int index) { + return part_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (targetCase_ == 1) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, target_); + } + if (targetCase_ == 2) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, target_); + } + if (targetCase_ == 3) { + output.writeUInt64(3, (long) ((java.lang.Long) target_)); + } + for (int i = 0; i < part_.size(); i++) { + output.writeMessage(4, part_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (targetCase_ == 1) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, target_); + } + if (targetCase_ == 2) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, target_); + } + if (targetCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeUInt64Size( + 3, (long) ((java.lang.Long) target_)); + } + for (int i = 0; i < part_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, part_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.KeyRecipe)) { + return super.equals(obj); + } + com.google.spanner.v1.KeyRecipe other = (com.google.spanner.v1.KeyRecipe) obj; + + if (!getPartList().equals(other.getPartList())) return false; + if (!getTargetCase().equals(other.getTargetCase())) return false; + switch (targetCase_) { + case 1: + if (!getTableName().equals(other.getTableName())) return false; + break; + case 2: + if (!getIndexName().equals(other.getIndexName())) return false; + break; + case 3: + if (getOperationUid() != other.getOperationUid()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartCount() > 0) { + hash = (37 * hash) + PART_FIELD_NUMBER; + hash = (53 * hash) + getPartList().hashCode(); + } + switch (targetCase_) { + case 1: + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + break; + case 2: + hash = (37 * hash) + INDEX_NAME_FIELD_NUMBER; + hash = (53 * hash) + getIndexName().hashCode(); + break; + case 3: + hash = (37 * hash) + OPERATION_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOperationUid()); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.KeyRecipe parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeyRecipe parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.KeyRecipe prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `KeyRecipe` provides the metadata required to translate reads, mutations,
    +   * and queries into a byte array in "sortable string format" (ssformat)that can
    +   * be used with `Range`s to route requests. Note that the client *must* tolerate
    +   * `KeyRecipe`s that appear to be invalid, since the `KeyRecipe` format may
    +   * change over time. Requests with invalid `KeyRecipe`s should be routed to a
    +   * default server.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.KeyRecipe} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.KeyRecipe) + com.google.spanner.v1.KeyRecipeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeyRecipe.class, com.google.spanner.v1.KeyRecipe.Builder.class); + } + + // Construct using com.google.spanner.v1.KeyRecipe.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partBuilder_ == null) { + part_ = java.util.Collections.emptyList(); + } else { + part_ = null; + partBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + targetCase_ = 0; + target_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_KeyRecipe_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe getDefaultInstanceForType() { + return com.google.spanner.v1.KeyRecipe.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe build() { + com.google.spanner.v1.KeyRecipe result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe buildPartial() { + com.google.spanner.v1.KeyRecipe result = new com.google.spanner.v1.KeyRecipe(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.KeyRecipe result) { + if (partBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + part_ = java.util.Collections.unmodifiableList(part_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.part_ = part_; + } else { + result.part_ = partBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.KeyRecipe result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.KeyRecipe result) { + result.targetCase_ = targetCase_; + result.target_ = this.target_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.KeyRecipe) { + return mergeFrom((com.google.spanner.v1.KeyRecipe) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.KeyRecipe other) { + if (other == com.google.spanner.v1.KeyRecipe.getDefaultInstance()) return this; + if (partBuilder_ == null) { + if (!other.part_.isEmpty()) { + if (part_.isEmpty()) { + part_ = other.part_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensurePartIsMutable(); + part_.addAll(other.part_); + } + onChanged(); + } + } else { + if (!other.part_.isEmpty()) { + if (partBuilder_.isEmpty()) { + partBuilder_.dispose(); + partBuilder_ = null; + part_ = other.part_; + bitField0_ = (bitField0_ & ~0x00000008); + partBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartFieldBuilder() + : null; + } else { + partBuilder_.addAllMessages(other.part_); + } + } + } + switch (other.getTargetCase()) { + case TABLE_NAME: + { + targetCase_ = 1; + target_ = other.target_; + onChanged(); + break; + } + case INDEX_NAME: + { + targetCase_ = 2; + target_ = other.target_; + onChanged(); + break; + } + case OPERATION_UID: + { + setOperationUid(other.getOperationUid()); + break; + } + case TARGET_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + targetCase_ = 1; + target_ = s; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + targetCase_ = 2; + target_ = s; + break; + } // case 18 + case 24: + { + target_ = input.readUInt64(); + targetCase_ = 3; + break; + } // case 24 + case 34: + { + com.google.spanner.v1.KeyRecipe.Part m = + input.readMessage( + com.google.spanner.v1.KeyRecipe.Part.parser(), extensionRegistry); + if (partBuilder_ == null) { + ensurePartIsMutable(); + part_.add(m); + } else { + partBuilder_.addMessage(m); + } + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int targetCase_ = 0; + private java.lang.Object target_; + + public TargetCase getTargetCase() { + return TargetCase.forNumber(targetCase_); + } + + public Builder clearTarget() { + targetCase_ = 0; + target_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @return Whether the tableName field is set. + */ + @java.lang.Override + public boolean hasTableName() { + return targetCase_ == 1; + } + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @return The tableName. + */ + @java.lang.Override + public java.lang.String getTableName() { + java.lang.Object ref = ""; + if (targetCase_ == 1) { + ref = target_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (targetCase_ == 1) { + target_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @return The bytes for tableName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableNameBytes() { + java.lang.Object ref = ""; + if (targetCase_ == 1) { + ref = target_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (targetCase_ == 1) { + target_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @param value The tableName to set. + * @return This builder for chaining. + */ + public Builder setTableName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetCase_ = 1; + target_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @return This builder for chaining. + */ + public Builder clearTableName() { + if (targetCase_ == 1) { + targetCase_ = 0; + target_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * A table name, matching the name from the database schema.
    +     * 
    + * + * string table_name = 1; + * + * @param value The bytes for tableName to set. + * @return This builder for chaining. + */ + public Builder setTableNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetCase_ = 1; + target_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @return Whether the indexName field is set. + */ + @java.lang.Override + public boolean hasIndexName() { + return targetCase_ == 2; + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @return The indexName. + */ + @java.lang.Override + public java.lang.String getIndexName() { + java.lang.Object ref = ""; + if (targetCase_ == 2) { + ref = target_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (targetCase_ == 2) { + target_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @return The bytes for indexName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexNameBytes() { + java.lang.Object ref = ""; + if (targetCase_ == 2) { + ref = target_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (targetCase_ == 2) { + target_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @param value The indexName to set. + * @return This builder for chaining. + */ + public Builder setIndexName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetCase_ = 2; + target_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @return This builder for chaining. + */ + public Builder clearIndexName() { + if (targetCase_ == 2) { + targetCase_ = 0; + target_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * An index name, matching the name from the database schema.
    +     * 
    + * + * string index_name = 2; + * + * @param value The bytes for indexName to set. + * @return This builder for chaining. + */ + public Builder setIndexNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetCase_ = 2; + target_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The UID of a query, matching the UID from `RoutingHint`.
    +     * 
    + * + * uint64 operation_uid = 3; + * + * @return Whether the operationUid field is set. + */ + public boolean hasOperationUid() { + return targetCase_ == 3; + } + + /** + * + * + *
    +     * The UID of a query, matching the UID from `RoutingHint`.
    +     * 
    + * + * uint64 operation_uid = 3; + * + * @return The operationUid. + */ + public long getOperationUid() { + if (targetCase_ == 3) { + return (java.lang.Long) target_; + } + return 0L; + } + + /** + * + * + *
    +     * The UID of a query, matching the UID from `RoutingHint`.
    +     * 
    + * + * uint64 operation_uid = 3; + * + * @param value The operationUid to set. + * @return This builder for chaining. + */ + public Builder setOperationUid(long value) { + + targetCase_ = 3; + target_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The UID of a query, matching the UID from `RoutingHint`.
    +     * 
    + * + * uint64 operation_uid = 3; + * + * @return This builder for chaining. + */ + public Builder clearOperationUid() { + if (targetCase_ == 3) { + targetCase_ = 0; + target_ = null; + onChanged(); + } + return this; + } + + private java.util.List part_ = + java.util.Collections.emptyList(); + + private void ensurePartIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + part_ = new java.util.ArrayList(part_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe.Part, + com.google.spanner.v1.KeyRecipe.Part.Builder, + com.google.spanner.v1.KeyRecipe.PartOrBuilder> + partBuilder_; + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public java.util.List getPartList() { + if (partBuilder_ == null) { + return java.util.Collections.unmodifiableList(part_); + } else { + return partBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public int getPartCount() { + if (partBuilder_ == null) { + return part_.size(); + } else { + return partBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public com.google.spanner.v1.KeyRecipe.Part getPart(int index) { + if (partBuilder_ == null) { + return part_.get(index); + } else { + return partBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder setPart(int index, com.google.spanner.v1.KeyRecipe.Part value) { + if (partBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartIsMutable(); + part_.set(index, value); + onChanged(); + } else { + partBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder setPart( + int index, com.google.spanner.v1.KeyRecipe.Part.Builder builderForValue) { + if (partBuilder_ == null) { + ensurePartIsMutable(); + part_.set(index, builderForValue.build()); + onChanged(); + } else { + partBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder addPart(com.google.spanner.v1.KeyRecipe.Part value) { + if (partBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartIsMutable(); + part_.add(value); + onChanged(); + } else { + partBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder addPart(int index, com.google.spanner.v1.KeyRecipe.Part value) { + if (partBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartIsMutable(); + part_.add(index, value); + onChanged(); + } else { + partBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder addPart(com.google.spanner.v1.KeyRecipe.Part.Builder builderForValue) { + if (partBuilder_ == null) { + ensurePartIsMutable(); + part_.add(builderForValue.build()); + onChanged(); + } else { + partBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder addPart( + int index, com.google.spanner.v1.KeyRecipe.Part.Builder builderForValue) { + if (partBuilder_ == null) { + ensurePartIsMutable(); + part_.add(index, builderForValue.build()); + onChanged(); + } else { + partBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder addAllPart( + java.lang.Iterable values) { + if (partBuilder_ == null) { + ensurePartIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, part_); + onChanged(); + } else { + partBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder clearPart() { + if (partBuilder_ == null) { + part_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + partBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public Builder removePart(int index) { + if (partBuilder_ == null) { + ensurePartIsMutable(); + part_.remove(index); + onChanged(); + } else { + partBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public com.google.spanner.v1.KeyRecipe.Part.Builder getPartBuilder(int index) { + return internalGetPartFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public com.google.spanner.v1.KeyRecipe.PartOrBuilder getPartOrBuilder(int index) { + if (partBuilder_ == null) { + return part_.get(index); + } else { + return partBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public java.util.List + getPartOrBuilderList() { + if (partBuilder_ != null) { + return partBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(part_); + } + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public com.google.spanner.v1.KeyRecipe.Part.Builder addPartBuilder() { + return internalGetPartFieldBuilder() + .addBuilder(com.google.spanner.v1.KeyRecipe.Part.getDefaultInstance()); + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public com.google.spanner.v1.KeyRecipe.Part.Builder addPartBuilder(int index) { + return internalGetPartFieldBuilder() + .addBuilder(index, com.google.spanner.v1.KeyRecipe.Part.getDefaultInstance()); + } + + /** + * + * + *
    +     * Parts are in the order they should appear in the encoded key.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + public java.util.List getPartBuilderList() { + return internalGetPartFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe.Part, + com.google.spanner.v1.KeyRecipe.Part.Builder, + com.google.spanner.v1.KeyRecipe.PartOrBuilder> + internalGetPartFieldBuilder() { + if (partBuilder_ == null) { + partBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe.Part, + com.google.spanner.v1.KeyRecipe.Part.Builder, + com.google.spanner.v1.KeyRecipe.PartOrBuilder>( + part_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + part_ = null; + } + return partBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.KeyRecipe) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRecipe) + private static final com.google.spanner.v1.KeyRecipe DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.KeyRecipe(); + } + + public static com.google.spanner.v1.KeyRecipe getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public KeyRecipe parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.KeyRecipe getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipeOrBuilder.java new file mode 100644 index 000000000000..4f8f4bfae9b9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRecipeOrBuilder.java @@ -0,0 +1,189 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface KeyRecipeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.KeyRecipe) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return Whether the tableName field is set. + */ + boolean hasTableName(); + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return The tableName. + */ + java.lang.String getTableName(); + + /** + * + * + *
    +   * A table name, matching the name from the database schema.
    +   * 
    + * + * string table_name = 1; + * + * @return The bytes for tableName. + */ + com.google.protobuf.ByteString getTableNameBytes(); + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return Whether the indexName field is set. + */ + boolean hasIndexName(); + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return The indexName. + */ + java.lang.String getIndexName(); + + /** + * + * + *
    +   * An index name, matching the name from the database schema.
    +   * 
    + * + * string index_name = 2; + * + * @return The bytes for indexName. + */ + com.google.protobuf.ByteString getIndexNameBytes(); + + /** + * + * + *
    +   * The UID of a query, matching the UID from `RoutingHint`.
    +   * 
    + * + * uint64 operation_uid = 3; + * + * @return Whether the operationUid field is set. + */ + boolean hasOperationUid(); + + /** + * + * + *
    +   * The UID of a query, matching the UID from `RoutingHint`.
    +   * 
    + * + * uint64 operation_uid = 3; + * + * @return The operationUid. + */ + long getOperationUid(); + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + java.util.List getPartList(); + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + com.google.spanner.v1.KeyRecipe.Part getPart(int index); + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + int getPartCount(); + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + java.util.List getPartOrBuilderList(); + + /** + * + * + *
    +   * Parts are in the order they should appear in the encoded key.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe.Part part = 4; + */ + com.google.spanner.v1.KeyRecipe.PartOrBuilder getPartOrBuilder(int index); + + com.google.spanner.v1.KeyRecipe.TargetCase getTargetCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java new file mode 100644 index 000000000000..f49b67163bfe --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java @@ -0,0 +1,1619 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/keys.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All
    + * the keys are expected to be in the same table or index. The keys need
    + * not be sorted in any particular way.
    + *
    + * If the same key is specified multiple times in the set (for example
    + * if two ranges, two keys, or a key and a range overlap), Cloud Spanner
    + * behaves as if the key were only specified once.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.KeySet} + */ +@com.google.protobuf.Generated +public final class KeySet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.KeySet) + KeySetOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeySet"); + } + + // Use KeySet.newBuilder() to construct. + private KeySet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private KeySet() { + keys_ = java.util.Collections.emptyList(); + ranges_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeySet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.KeysProto + .internal_static_google_spanner_v1_KeySet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeySet.class, com.google.spanner.v1.KeySet.Builder.class); + } + + public static final int KEYS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List keys_; + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + @java.lang.Override + public java.util.List getKeysList() { + return keys_; + } + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + @java.lang.Override + public java.util.List getKeysOrBuilderList() { + return keys_; + } + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + @java.lang.Override + public int getKeysCount() { + return keys_.size(); + } + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + @java.lang.Override + public com.google.protobuf.ListValue getKeys(int index) { + return keys_.get(index); + } + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getKeysOrBuilder(int index) { + return keys_.get(index); + } + + public static final int RANGES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List ranges_; + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + @java.lang.Override + public java.util.List getRangesList() { + return ranges_; + } + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + @java.lang.Override + public java.util.List + getRangesOrBuilderList() { + return ranges_; + } + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + @java.lang.Override + public int getRangesCount() { + return ranges_.size(); + } + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRange getRanges(int index) { + return ranges_.get(index); + } + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRangeOrBuilder getRangesOrBuilder(int index) { + return ranges_.get(index); + } + + public static final int ALL_FIELD_NUMBER = 3; + private boolean all_ = false; + + /** + * + * + *
    +   * For convenience `all` can be set to `true` to indicate that this
    +   * `KeySet` matches all keys in the table or index. Note that any keys
    +   * specified in `keys` or `ranges` are only yielded once.
    +   * 
    + * + * bool all = 3; + * + * @return The all. + */ + @java.lang.Override + public boolean getAll() { + return all_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < keys_.size(); i++) { + output.writeMessage(1, keys_.get(i)); + } + for (int i = 0; i < ranges_.size(); i++) { + output.writeMessage(2, ranges_.get(i)); + } + if (all_ != false) { + output.writeBool(3, all_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < keys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, keys_.get(i)); + } + for (int i = 0; i < ranges_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, ranges_.get(i)); + } + if (all_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, all_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.KeySet)) { + return super.equals(obj); + } + com.google.spanner.v1.KeySet other = (com.google.spanner.v1.KeySet) obj; + + if (!getKeysList().equals(other.getKeysList())) return false; + if (!getRangesList().equals(other.getRangesList())) return false; + if (getAll() != other.getAll()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getKeysCount() > 0) { + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeysList().hashCode(); + } + if (getRangesCount() > 0) { + hash = (37 * hash) + RANGES_FIELD_NUMBER; + hash = (53 * hash) + getRangesList().hashCode(); + } + hash = (37 * hash) + ALL_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAll()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.KeySet parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeySet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeySet parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeySet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeySet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.KeySet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.KeySet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeySet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeySet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeySet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.KeySet parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.KeySet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.KeySet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All
    +   * the keys are expected to be in the same table or index. The keys need
    +   * not be sorted in any particular way.
    +   *
    +   * If the same key is specified multiple times in the set (for example
    +   * if two ranges, two keys, or a key and a range overlap), Cloud Spanner
    +   * behaves as if the key were only specified once.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.KeySet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.KeySet) + com.google.spanner.v1.KeySetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeySet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.KeysProto + .internal_static_google_spanner_v1_KeySet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.KeySet.class, com.google.spanner.v1.KeySet.Builder.class); + } + + // Construct using com.google.spanner.v1.KeySet.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + } else { + keys_ = null; + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (rangesBuilder_ == null) { + ranges_ = java.util.Collections.emptyList(); + } else { + ranges_ = null; + rangesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + all_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.KeysProto.internal_static_google_spanner_v1_KeySet_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.KeySet getDefaultInstanceForType() { + return com.google.spanner.v1.KeySet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.KeySet build() { + com.google.spanner.v1.KeySet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.KeySet buildPartial() { + com.google.spanner.v1.KeySet result = new com.google.spanner.v1.KeySet(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.KeySet result) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + keys_ = java.util.Collections.unmodifiableList(keys_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + if (rangesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + ranges_ = java.util.Collections.unmodifiableList(ranges_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.ranges_ = ranges_; + } else { + result.ranges_ = rangesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.KeySet result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.all_ = all_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.KeySet) { + return mergeFrom((com.google.spanner.v1.KeySet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.KeySet other) { + if (other == com.google.spanner.v1.KeySet.getDefaultInstance()) return this; + if (keysBuilder_ == null) { + if (!other.keys_.isEmpty()) { + if (keys_.isEmpty()) { + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureKeysIsMutable(); + keys_.addAll(other.keys_); + } + onChanged(); + } + } else { + if (!other.keys_.isEmpty()) { + if (keysBuilder_.isEmpty()) { + keysBuilder_.dispose(); + keysBuilder_ = null; + keys_ = other.keys_; + bitField0_ = (bitField0_ & ~0x00000001); + keysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetKeysFieldBuilder() + : null; + } else { + keysBuilder_.addAllMessages(other.keys_); + } + } + } + if (rangesBuilder_ == null) { + if (!other.ranges_.isEmpty()) { + if (ranges_.isEmpty()) { + ranges_ = other.ranges_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRangesIsMutable(); + ranges_.addAll(other.ranges_); + } + onChanged(); + } + } else { + if (!other.ranges_.isEmpty()) { + if (rangesBuilder_.isEmpty()) { + rangesBuilder_.dispose(); + rangesBuilder_ = null; + ranges_ = other.ranges_; + bitField0_ = (bitField0_ & ~0x00000002); + rangesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRangesFieldBuilder() + : null; + } else { + rangesBuilder_.addAllMessages(other.ranges_); + } + } + } + if (other.getAll() != false) { + setAll(other.getAll()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.ListValue m = + input.readMessage(com.google.protobuf.ListValue.parser(), extensionRegistry); + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(m); + } else { + keysBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + com.google.spanner.v1.KeyRange m = + input.readMessage(com.google.spanner.v1.KeyRange.parser(), extensionRegistry); + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + ranges_.add(m); + } else { + rangesBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + all_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List keys_ = java.util.Collections.emptyList(); + + private void ensureKeysIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + keys_ = new java.util.ArrayList(keys_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + keysBuilder_; + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public java.util.List getKeysList() { + if (keysBuilder_ == null) { + return java.util.Collections.unmodifiableList(keys_); + } else { + return keysBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public int getKeysCount() { + if (keysBuilder_ == null) { + return keys_.size(); + } else { + return keysBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public com.google.protobuf.ListValue getKeys(int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder setKeys(int index, com.google.protobuf.ListValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.set(index, value); + onChanged(); + } else { + keysBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder setKeys(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.set(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder addKeys(com.google.protobuf.ListValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(value); + onChanged(); + } else { + keysBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder addKeys(int index, com.google.protobuf.ListValue value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeysIsMutable(); + keys_.add(index, value); + onChanged(); + } else { + keysBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder addKeys(com.google.protobuf.ListValue.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder addKeys(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.add(index, builderForValue.build()); + onChanged(); + } else { + keysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder addAllKeys(java.lang.Iterable values) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, keys_); + onChanged(); + } else { + keysBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + keysBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public Builder removeKeys(int index) { + if (keysBuilder_ == null) { + ensureKeysIsMutable(); + keys_.remove(index); + onChanged(); + } else { + keysBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public com.google.protobuf.ListValue.Builder getKeysBuilder(int index) { + return internalGetKeysFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public com.google.protobuf.ListValueOrBuilder getKeysOrBuilder(int index) { + if (keysBuilder_ == null) { + return keys_.get(index); + } else { + return keysBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public java.util.List getKeysOrBuilderList() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(keys_); + } + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public com.google.protobuf.ListValue.Builder addKeysBuilder() { + return internalGetKeysFieldBuilder() + .addBuilder(com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public com.google.protobuf.ListValue.Builder addKeysBuilder(int index) { + return internalGetKeysFieldBuilder() + .addBuilder(index, com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of specific keys. Entries in `keys` should have exactly as
    +     * many elements as there are columns in the primary or index key
    +     * with which this `KeySet` is used.  Individual key values are
    +     * encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + public java.util.List getKeysBuilderList() { + return internalGetKeysFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + keys_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + keys_ = null; + } + return keysBuilder_; + } + + private java.util.List ranges_ = + java.util.Collections.emptyList(); + + private void ensureRangesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + ranges_ = new java.util.ArrayList(ranges_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRange, + com.google.spanner.v1.KeyRange.Builder, + com.google.spanner.v1.KeyRangeOrBuilder> + rangesBuilder_; + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public java.util.List getRangesList() { + if (rangesBuilder_ == null) { + return java.util.Collections.unmodifiableList(ranges_); + } else { + return rangesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public int getRangesCount() { + if (rangesBuilder_ == null) { + return ranges_.size(); + } else { + return rangesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public com.google.spanner.v1.KeyRange getRanges(int index) { + if (rangesBuilder_ == null) { + return ranges_.get(index); + } else { + return rangesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder setRanges(int index, com.google.spanner.v1.KeyRange value) { + if (rangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangesIsMutable(); + ranges_.set(index, value); + onChanged(); + } else { + rangesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder setRanges(int index, com.google.spanner.v1.KeyRange.Builder builderForValue) { + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + ranges_.set(index, builderForValue.build()); + onChanged(); + } else { + rangesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder addRanges(com.google.spanner.v1.KeyRange value) { + if (rangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangesIsMutable(); + ranges_.add(value); + onChanged(); + } else { + rangesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder addRanges(int index, com.google.spanner.v1.KeyRange value) { + if (rangesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangesIsMutable(); + ranges_.add(index, value); + onChanged(); + } else { + rangesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder addRanges(com.google.spanner.v1.KeyRange.Builder builderForValue) { + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + ranges_.add(builderForValue.build()); + onChanged(); + } else { + rangesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder addRanges(int index, com.google.spanner.v1.KeyRange.Builder builderForValue) { + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + ranges_.add(index, builderForValue.build()); + onChanged(); + } else { + rangesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder addAllRanges( + java.lang.Iterable values) { + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, ranges_); + onChanged(); + } else { + rangesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder clearRanges() { + if (rangesBuilder_ == null) { + ranges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rangesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public Builder removeRanges(int index) { + if (rangesBuilder_ == null) { + ensureRangesIsMutable(); + ranges_.remove(index); + onChanged(); + } else { + rangesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public com.google.spanner.v1.KeyRange.Builder getRangesBuilder(int index) { + return internalGetRangesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public com.google.spanner.v1.KeyRangeOrBuilder getRangesOrBuilder(int index) { + if (rangesBuilder_ == null) { + return ranges_.get(index); + } else { + return rangesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public java.util.List + getRangesOrBuilderList() { + if (rangesBuilder_ != null) { + return rangesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(ranges_); + } + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public com.google.spanner.v1.KeyRange.Builder addRangesBuilder() { + return internalGetRangesFieldBuilder() + .addBuilder(com.google.spanner.v1.KeyRange.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public com.google.spanner.v1.KeyRange.Builder addRangesBuilder(int index) { + return internalGetRangesFieldBuilder() + .addBuilder(index, com.google.spanner.v1.KeyRange.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +     * information about key range specifications.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + public java.util.List getRangesBuilderList() { + return internalGetRangesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRange, + com.google.spanner.v1.KeyRange.Builder, + com.google.spanner.v1.KeyRangeOrBuilder> + internalGetRangesFieldBuilder() { + if (rangesBuilder_ == null) { + rangesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRange, + com.google.spanner.v1.KeyRange.Builder, + com.google.spanner.v1.KeyRangeOrBuilder>( + ranges_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + ranges_ = null; + } + return rangesBuilder_; + } + + private boolean all_; + + /** + * + * + *
    +     * For convenience `all` can be set to `true` to indicate that this
    +     * `KeySet` matches all keys in the table or index. Note that any keys
    +     * specified in `keys` or `ranges` are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @return The all. + */ + @java.lang.Override + public boolean getAll() { + return all_; + } + + /** + * + * + *
    +     * For convenience `all` can be set to `true` to indicate that this
    +     * `KeySet` matches all keys in the table or index. Note that any keys
    +     * specified in `keys` or `ranges` are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @param value The all to set. + * @return This builder for chaining. + */ + public Builder setAll(boolean value) { + + all_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For convenience `all` can be set to `true` to indicate that this
    +     * `KeySet` matches all keys in the table or index. Note that any keys
    +     * specified in `keys` or `ranges` are only yielded once.
    +     * 
    + * + * bool all = 3; + * + * @return This builder for chaining. + */ + public Builder clearAll() { + bitField0_ = (bitField0_ & ~0x00000004); + all_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.KeySet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.KeySet) + private static final com.google.spanner.v1.KeySet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.KeySet(); + } + + public static com.google.spanner.v1.KeySet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public KeySet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.KeySet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java new file mode 100644 index 000000000000..77a31c506c7c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java @@ -0,0 +1,173 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/keys.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface KeySetOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.KeySet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + java.util.List getKeysList(); + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + com.google.protobuf.ListValue getKeys(int index); + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + int getKeysCount(); + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + java.util.List getKeysOrBuilderList(); + + /** + * + * + *
    +   * A list of specific keys. Entries in `keys` should have exactly as
    +   * many elements as there are columns in the primary or index key
    +   * with which this `KeySet` is used.  Individual key values are
    +   * encoded as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue keys = 1; + */ + com.google.protobuf.ListValueOrBuilder getKeysOrBuilder(int index); + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + java.util.List getRangesList(); + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + com.google.spanner.v1.KeyRange getRanges(int index); + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + int getRangesCount(); + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + java.util.List getRangesOrBuilderList(); + + /** + * + * + *
    +   * A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more
    +   * information about key range specifications.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRange ranges = 2; + */ + com.google.spanner.v1.KeyRangeOrBuilder getRangesOrBuilder(int index); + + /** + * + * + *
    +   * For convenience `all` can be set to `true` to indicate that this
    +   * `KeySet` matches all keys in the table or index. Note that any keys
    +   * specified in `keys` or `ranges` are only yielded once.
    +   * 
    + * + * bool all = 3; + * + * @return The all. + */ + boolean getAll(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java new file mode 100644 index 000000000000..a1e368ba3652 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java @@ -0,0 +1,102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/keys.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class KeysProto extends com.google.protobuf.GeneratedFile { + private KeysProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "KeysProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_KeyRange_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_KeyRange_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_KeySet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_KeySet_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n\034google/spanner/v1/keys.proto\022\021google.s" + + "panner.v1\032\034google/protobuf/struct.proto\"" + + "\364\001\n\010KeyRange\0222\n\014start_closed\030\001 \001(\0132\032.goo" + + "gle.protobuf.ListValueH\000\0220\n\nstart_open\030\002" + + " \001(\0132\032.google.protobuf.ListValueH\000\0220\n\nen" + + "d_closed\030\003 \001(\0132\032.google.protobuf.ListVal" + + "ueH\001\022.\n\010end_open\030\004 \001(\0132\032.google.protobuf" + + ".ListValueH\001B\020\n\016start_key_typeB\016\n\014end_ke" + + "y_type\"l\n\006KeySet\022(\n\004keys\030\001 \003(\0132\032.google." + + "protobuf.ListValue\022+\n\006ranges\030\002 \003(\0132\033.goo" + + "gle.spanner.v1.KeyRange\022\013\n\003all\030\003 \001(\010B\254\001\n" + + "\025com.google.spanner.v1B\tKeysProtoP\001Z5clo" + + "ud.google.com/go/spanner/apiv1/spannerpb" + + ";spannerpb\252\002\027Google.Cloud.Spanner.V1\312\002\027G" + + "oogle\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::" + + "Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.StructProto.getDescriptor(), + }); + internal_static_google_spanner_v1_KeyRange_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_KeyRange_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_KeyRange_descriptor, + new java.lang.String[] { + "StartClosed", "StartOpen", "EndClosed", "EndOpen", "StartKeyType", "EndKeyType", + }); + internal_static_google_spanner_v1_KeySet_descriptor = getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_KeySet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_KeySet_descriptor, + new java.lang.String[] { + "Keys", "Ranges", "All", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.protobuf.StructProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java new file mode 100644 index 000000000000..6413acdc191a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java @@ -0,0 +1,1172 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ListSessionsRequest} + */ +@com.google.protobuf.Generated +public final class ListSessionsRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ListSessionsRequest) + ListSessionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListSessionsRequest"); + } + + // Use ListSessionsRequest.newBuilder() to construct. + private ListSessionsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListSessionsRequest() { + database_ = ""; + pageToken_ = ""; + filter_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ListSessionsRequest.class, + com.google.spanner.v1.ListSessionsRequest.Builder.class); + } + + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
    +   * Required. The database in which to list sessions.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The database in which to list sessions.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + + /** + * + * + *
    +   * Number of sessions to be returned in the response. If 0 or less, defaults
    +   * to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +   * from a previous
    +   * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +   * from a previous
    +   * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FILTER_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object filter_ = ""; + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `labels.env:*` --> The session has the label "env".
    +   * * `labels.env:dev` --> The session has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The filter. + */ + @java.lang.Override + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } + } + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `labels.env:*` --> The session has the label "env".
    +   * * `labels.env:dev` --> The session has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + @java.lang.Override + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, database_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, filter_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, database_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, pageToken_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(filter_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, filter_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ListSessionsRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.ListSessionsRequest other = + (com.google.spanner.v1.ListSessionsRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getFilter().equals(other.getFilter())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (37 * hash) + FILTER_FIELD_NUMBER; + hash = (53 * hash) + getFilter().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ListSessionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ListSessionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ListSessionsRequest) + com.google.spanner.v1.ListSessionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ListSessionsRequest.class, + com.google.spanner.v1.ListSessionsRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.ListSessionsRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + pageSize_ = 0; + pageToken_ = ""; + filter_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsRequest getDefaultInstanceForType() { + return com.google.spanner.v1.ListSessionsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsRequest build() { + com.google.spanner.v1.ListSessionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsRequest buildPartial() { + com.google.spanner.v1.ListSessionsRequest result = + new com.google.spanner.v1.ListSessionsRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ListSessionsRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.filter_ = filter_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ListSessionsRequest) { + return mergeFrom((com.google.spanner.v1.ListSessionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ListSessionsRequest other) { + if (other == com.google.spanner.v1.ListSessionsRequest.getDefaultInstance()) return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getFilter().isEmpty()) { + filter_ = other.filter_; + bitField0_ |= 0x00000008; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + filter_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
    +     * Required. The database in which to list sessions.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which to list sessions.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The database in which to list sessions.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which to list sessions.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The database in which to list sessions.
    +     * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + + /** + * + * + *
    +     * Number of sessions to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + /** + * + * + *
    +     * Number of sessions to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Number of sessions to be returned in the response. If 0 or less, defaults
    +     * to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +     * from a previous
    +     * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +     * from a previous
    +     * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +     * from a previous
    +     * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +     * from a previous
    +     * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +     * from a previous
    +     * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +     * 
    + * + * string page_token = 3; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object filter_ = ""; + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `labels.env:*` --> The session has the label "env".
    +     * * `labels.env:dev` --> The session has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * 
    + * + * string filter = 4; + * + * @return The filter. + */ + public java.lang.String getFilter() { + java.lang.Object ref = filter_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + filter_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `labels.env:*` --> The session has the label "env".
    +     * * `labels.env:dev` --> The session has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + public com.google.protobuf.ByteString getFilterBytes() { + java.lang.Object ref = filter_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + filter_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `labels.env:*` --> The session has the label "env".
    +     * * `labels.env:dev` --> The session has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * 
    + * + * string filter = 4; + * + * @param value The filter to set. + * @return This builder for chaining. + */ + public Builder setFilter(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `labels.env:*` --> The session has the label "env".
    +     * * `labels.env:dev` --> The session has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * 
    + * + * string filter = 4; + * + * @return This builder for chaining. + */ + public Builder clearFilter() { + filter_ = getDefaultInstance().getFilter(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * An expression for filtering the results of the request. Filter rules are
    +     * case insensitive. The fields eligible for filtering are:
    +     *
    +     * * `labels.key` where key is the name of a label
    +     *
    +     * Some examples of using filters are:
    +     *
    +     * * `labels.env:*` --> The session has the label "env".
    +     * * `labels.env:dev` --> The session has the label "env" and the value of
    +     * the label contains the string "dev".
    +     * 
    + * + * string filter = 4; + * + * @param value The bytes for filter to set. + * @return This builder for chaining. + */ + public Builder setFilterBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + filter_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ListSessionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsRequest) + private static final com.google.spanner.v1.ListSessionsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ListSessionsRequest(); + } + + public static com.google.spanner.v1.ListSessionsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListSessionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java new file mode 100644 index 000000000000..ce34b437d205 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java @@ -0,0 +1,148 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ListSessionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ListSessionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The database in which to list sessions.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
    +   * Required. The database in which to list sessions.
    +   * 
    + * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
    +   * Number of sessions to be returned in the response. If 0 or less, defaults
    +   * to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +   * from a previous
    +   * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + + /** + * + * + *
    +   * If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
    +   * from a previous
    +   * [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
    +   * 
    + * + * string page_token = 3; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `labels.env:*` --> The session has the label "env".
    +   * * `labels.env:dev` --> The session has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The filter. + */ + java.lang.String getFilter(); + + /** + * + * + *
    +   * An expression for filtering the results of the request. Filter rules are
    +   * case insensitive. The fields eligible for filtering are:
    +   *
    +   * * `labels.key` where key is the name of a label
    +   *
    +   * Some examples of using filters are:
    +   *
    +   * * `labels.env:*` --> The session has the label "env".
    +   * * `labels.env:dev` --> The session has the label "env" and the value of
    +   * the label contains the string "dev".
    +   * 
    + * + * string filter = 4; + * + * @return The bytes for filter. + */ + com.google.protobuf.ByteString getFilterBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java new file mode 100644 index 000000000000..1f2ccda7c22c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java @@ -0,0 +1,1119 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ListSessionsResponse} + */ +@com.google.protobuf.Generated +public final class ListSessionsResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ListSessionsResponse) + ListSessionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ListSessionsResponse"); + } + + // Use ListSessionsResponse.newBuilder() to construct. + private ListSessionsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ListSessionsResponse() { + sessions_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ListSessionsResponse.class, + com.google.spanner.v1.ListSessionsResponse.Builder.class); + } + + public static final int SESSIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List sessions_; + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + @java.lang.Override + public java.util.List getSessionsList() { + return sessions_; + } + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + @java.lang.Override + public java.util.List + getSessionsOrBuilderList() { + return sessions_; + } + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + @java.lang.Override + public int getSessionsCount() { + return sessions_.size(); + } + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + @java.lang.Override + public com.google.spanner.v1.Session getSessions(int index) { + return sessions_.get(index); + } + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + @java.lang.Override + public com.google.spanner.v1.SessionOrBuilder getSessionsOrBuilder(int index) { + return sessions_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +   * of the matching sessions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +   * of the matching sessions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < sessions_.size(); i++) { + output.writeMessage(1, sessions_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < sessions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, sessions_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ListSessionsResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.ListSessionsResponse other = + (com.google.spanner.v1.ListSessionsResponse) obj; + + if (!getSessionsList().equals(other.getSessionsList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSessionsCount() > 0) { + hash = (37 * hash) + SESSIONS_FIELD_NUMBER; + hash = (53 * hash) + getSessionsList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ListSessionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ListSessionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ListSessionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ListSessionsResponse) + com.google.spanner.v1.ListSessionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ListSessionsResponse.class, + com.google.spanner.v1.ListSessionsResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.ListSessionsResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (sessionsBuilder_ == null) { + sessions_ = java.util.Collections.emptyList(); + } else { + sessions_ = null; + sessionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ListSessionsResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsResponse getDefaultInstanceForType() { + return com.google.spanner.v1.ListSessionsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsResponse build() { + com.google.spanner.v1.ListSessionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsResponse buildPartial() { + com.google.spanner.v1.ListSessionsResponse result = + new com.google.spanner.v1.ListSessionsResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.ListSessionsResponse result) { + if (sessionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + sessions_ = java.util.Collections.unmodifiableList(sessions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.sessions_ = sessions_; + } else { + result.sessions_ = sessionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.ListSessionsResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ListSessionsResponse) { + return mergeFrom((com.google.spanner.v1.ListSessionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ListSessionsResponse other) { + if (other == com.google.spanner.v1.ListSessionsResponse.getDefaultInstance()) return this; + if (sessionsBuilder_ == null) { + if (!other.sessions_.isEmpty()) { + if (sessions_.isEmpty()) { + sessions_ = other.sessions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSessionsIsMutable(); + sessions_.addAll(other.sessions_); + } + onChanged(); + } + } else { + if (!other.sessions_.isEmpty()) { + if (sessionsBuilder_.isEmpty()) { + sessionsBuilder_.dispose(); + sessionsBuilder_ = null; + sessions_ = other.sessions_; + bitField0_ = (bitField0_ & ~0x00000001); + sessionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSessionsFieldBuilder() + : null; + } else { + sessionsBuilder_.addAllMessages(other.sessions_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.Session m = + input.readMessage(com.google.spanner.v1.Session.parser(), extensionRegistry); + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + sessions_.add(m); + } else { + sessionsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List sessions_ = + java.util.Collections.emptyList(); + + private void ensureSessionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + sessions_ = new java.util.ArrayList(sessions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + sessionsBuilder_; + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public java.util.List getSessionsList() { + if (sessionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(sessions_); + } else { + return sessionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public int getSessionsCount() { + if (sessionsBuilder_ == null) { + return sessions_.size(); + } else { + return sessionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public com.google.spanner.v1.Session getSessions(int index) { + if (sessionsBuilder_ == null) { + return sessions_.get(index); + } else { + return sessionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder setSessions(int index, com.google.spanner.v1.Session value) { + if (sessionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionsIsMutable(); + sessions_.set(index, value); + onChanged(); + } else { + sessionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder setSessions(int index, com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + sessions_.set(index, builderForValue.build()); + onChanged(); + } else { + sessionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder addSessions(com.google.spanner.v1.Session value) { + if (sessionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionsIsMutable(); + sessions_.add(value); + onChanged(); + } else { + sessionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder addSessions(int index, com.google.spanner.v1.Session value) { + if (sessionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSessionsIsMutable(); + sessions_.add(index, value); + onChanged(); + } else { + sessionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder addSessions(com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + sessions_.add(builderForValue.build()); + onChanged(); + } else { + sessionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder addSessions(int index, com.google.spanner.v1.Session.Builder builderForValue) { + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + sessions_.add(index, builderForValue.build()); + onChanged(); + } else { + sessionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder addAllSessions( + java.lang.Iterable values) { + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, sessions_); + onChanged(); + } else { + sessionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder clearSessions() { + if (sessionsBuilder_ == null) { + sessions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + sessionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public Builder removeSessions(int index) { + if (sessionsBuilder_ == null) { + ensureSessionsIsMutable(); + sessions_.remove(index); + onChanged(); + } else { + sessionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public com.google.spanner.v1.Session.Builder getSessionsBuilder(int index) { + return internalGetSessionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public com.google.spanner.v1.SessionOrBuilder getSessionsOrBuilder(int index) { + if (sessionsBuilder_ == null) { + return sessions_.get(index); + } else { + return sessionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public java.util.List + getSessionsOrBuilderList() { + if (sessionsBuilder_ != null) { + return sessionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(sessions_); + } + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public com.google.spanner.v1.Session.Builder addSessionsBuilder() { + return internalGetSessionsFieldBuilder() + .addBuilder(com.google.spanner.v1.Session.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public com.google.spanner.v1.Session.Builder addSessionsBuilder(int index) { + return internalGetSessionsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Session.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of requested sessions.
    +     * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + public java.util.List getSessionsBuilderList() { + return internalGetSessionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder> + internalGetSessionsFieldBuilder() { + if (sessionsBuilder_ == null) { + sessionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Session, + com.google.spanner.v1.Session.Builder, + com.google.spanner.v1.SessionOrBuilder>( + sessions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + sessions_ = null; + } + return sessionsBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +     * of the matching sessions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +     * of the matching sessions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +     * of the matching sessions.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +     * of the matching sessions.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +     * of the matching sessions.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ListSessionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsResponse) + private static final com.google.spanner.v1.ListSessionsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ListSessionsResponse(); + } + + public static com.google.spanner.v1.ListSessionsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListSessionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ListSessionsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java new file mode 100644 index 000000000000..8a30f2f99976 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ListSessionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ListSessionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + java.util.List getSessionsList(); + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + com.google.spanner.v1.Session getSessions(int index); + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + int getSessionsCount(); + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + java.util.List getSessionsOrBuilderList(); + + /** + * + * + *
    +   * The list of requested sessions.
    +   * 
    + * + * repeated .google.spanner.v1.Session sessions = 1; + */ + com.google.spanner.v1.SessionOrBuilder getSessionsOrBuilder(int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +   * of the matching sessions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
    +   * of the matching sessions.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/LocationProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/LocationProto.java new file mode 100644 index 000000000000..e5c3bee3958b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/LocationProto.java @@ -0,0 +1,266 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class LocationProto extends com.google.protobuf.GeneratedFile { + private LocationProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "LocationProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Range_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Range_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Tablet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Tablet_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Group_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Group_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_KeyRecipe_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_KeyRecipe_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_KeyRecipe_Part_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_KeyRecipe_Part_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RecipeList_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RecipeList_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_CacheUpdate_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_CacheUpdate_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RoutingHint_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RoutingHint_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RoutingHint_SkippedTablet_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + " google/spanner/v1/location.proto\022\021goog" + + "le.spanner.v1\032\034google/protobuf/struct.proto\032\034google/spanner/v1/type.proto\"f\n" + + "\005Range\022\021\n" + + "\tstart_key\030\001 \001(\014\022\021\n" + + "\tlimit_key\030\002 \001(\014\022\021\n" + + "\tgroup_uid\030\003 \001(\004\022\020\n" + + "\010split_id\030\004 \001(\004\022\022\n" + + "\n" + + "generation\030\005 \001(\014\"\346\001\n" + + "\006Tablet\022\022\n\n" + + "tablet_uid\030\001 \001(\004\022\026\n" + + "\016server_address\030\002 \001(\t\022\020\n" + + "\010location\030\003 \001(\t\022,\n" + + "\004role\030\004 \001(\0162\036.google.spanner.v1.Tablet.Role\022\023\n" + + "\013incarnation\030\005 \001(\014\022\020\n" + + "\010distance\030\006 \001(\r" + + "\022\014\n" + + "\004skip\030\007 \001(\010\";\n" + + "\004Role\022\024\n" + + "\020ROLE_UNSPECIFIED\020\000\022\016\n\n" + + "READ_WRITE\020\001\022\r\n" + + "\tREAD_ONLY\020\002\"p\n" + + "\005Group\022\021\n" + + "\tgroup_uid\030\001 \001(\004\022*\n" + + "\007tablets\030\002 \003(\0132\031.google.spanner.v1.Tablet\022\024\n" + + "\014leader_index\030\003 \001(\005\022\022\n\n" + + "generation\030\004 \001(\014\"\323\004\n" + + "\tKeyRecipe\022\024\n\n" + + "table_name\030\001 \001(\tH\000\022\024\n\n" + + "index_name\030\002 \001(\tH\000\022\027\n\r" + + "operation_uid\030\003 \001(\004H\000\022/\n" + + "\004part\030\004 \003(\0132!.google.spanner.v1.KeyRecipe.Part\032\305\003\n" + + "\004Part\022\013\n" + + "\003tag\030\001 \001(\r" + + "\0226\n" + + "\005order\030\002 \001(\0162\'.google.spanner.v1.KeyRecipe.Part.Order\022?\n\n" + + "null_order\030\003 \001(\0162+.google.spanner.v1.KeyRecipe.Part.NullOrder\022%\n" + + "\004type\030\004 \001(\0132\027.google.spanner.v1.Type\022\024\n\n" + + "identifier\030\005 \001(\tH\000\022\'\n" + + "\005value\030\006 \001(\0132\026.google.protobuf.ValueH\000\022\020\n" + + "\006random\030\010 \001(\010H\000\022\032\n" + + "\022struct_identifiers\030\007 \003(\005\"=\n" + + "\005Order\022\025\n" + + "\021ORDER_UNSPECIFIED\020\000\022\r\n" + + "\tASCENDING\020\001\022\016\n\n" + + "DESCENDING\020\002\"V\n" + + "\tNullOrder\022\032\n" + + "\026NULL_ORDER_UNSPECIFIED\020\000\022\017\n" + + "\013NULLS_FIRST\020\001\022\016\n\n" + + "NULLS_LAST\020\002\022\014\n" + + "\010NOT_NULL\020\003B\014\n\n" + + "value_typeB\010\n" + + "\006target\"U\n\n" + + "RecipeList\022\031\n" + + "\021schema_generation\030\001 \001(\014\022,\n" + + "\006recipe\030\003 \003(\0132\034.google.spanner.v1.KeyRecipe\"\250\001\n" + + "\013CacheUpdate\022\023\n" + + "\013database_id\030\001 \001(\004\022\'\n" + + "\005range\030\002 \003(\0132\030.google.spanner.v1.Range\022\'\n" + + "\005group\030\003 \003(\0132\030.google.spanner.v1.Group\0222\n" + + "\013key_recipes\030\005 \001(\0132\035.google.spanner.v1.RecipeList\"\312\002\n" + + "\013RoutingHint\022\025\n\r" + + "operation_uid\030\001 \001(\004\022\023\n" + + "\013database_id\030\002 \001(\004\022\031\n" + + "\021schema_generation\030\003 \001(\014\022\013\n" + + "\003key\030\004 \001(\014\022\021\n" + + "\tlimit_key\030\005 \001(\014\022\021\n" + + "\tgroup_uid\030\006 \001(\004\022\020\n" + + "\010split_id\030\007 \001(\004\022\022\n\n" + + "tablet_uid\030\010 \001(\004\022H\n" + + "\022skipped_tablet_uid\030\t" + + " \003(\0132,.google.spanner.v1.RoutingHint.SkippedTablet\022\027\n" + + "\017client_location\030\n" + + " \001(\t\0328\n\r" + + "SkippedTablet\022\022\n\n" + + "tablet_uid\030\001 \001(\004\022\023\n" + + "\013incarnation\030\002 \001(\014B\260\001\n" + + "\025com.google.spanner.v1B\r" + + "LocationProtoP\001Z5cloud.google.com/go/spanner/apiv1/spannerp" + + "b;spannerpb\252\002\027Google.Cloud.Spanner.V1\312\002\027" + + "Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.StructProto.getDescriptor(), + com.google.spanner.v1.TypeProto.getDescriptor(), + }); + internal_static_google_spanner_v1_Range_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_Range_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Range_descriptor, + new java.lang.String[] { + "StartKey", "LimitKey", "GroupUid", "SplitId", "Generation", + }); + internal_static_google_spanner_v1_Tablet_descriptor = getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_Tablet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Tablet_descriptor, + new java.lang.String[] { + "TabletUid", "ServerAddress", "Location", "Role", "Incarnation", "Distance", "Skip", + }); + internal_static_google_spanner_v1_Group_descriptor = getDescriptor().getMessageType(2); + internal_static_google_spanner_v1_Group_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Group_descriptor, + new java.lang.String[] { + "GroupUid", "Tablets", "LeaderIndex", "Generation", + }); + internal_static_google_spanner_v1_KeyRecipe_descriptor = getDescriptor().getMessageType(3); + internal_static_google_spanner_v1_KeyRecipe_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_KeyRecipe_descriptor, + new java.lang.String[] { + "TableName", "IndexName", "OperationUid", "Part", "Target", + }); + internal_static_google_spanner_v1_KeyRecipe_Part_descriptor = + internal_static_google_spanner_v1_KeyRecipe_descriptor.getNestedType(0); + internal_static_google_spanner_v1_KeyRecipe_Part_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_KeyRecipe_Part_descriptor, + new java.lang.String[] { + "Tag", + "Order", + "NullOrder", + "Type", + "Identifier", + "Value", + "Random", + "StructIdentifiers", + "ValueType", + }); + internal_static_google_spanner_v1_RecipeList_descriptor = getDescriptor().getMessageType(4); + internal_static_google_spanner_v1_RecipeList_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_RecipeList_descriptor, + new java.lang.String[] { + "SchemaGeneration", "Recipe", + }); + internal_static_google_spanner_v1_CacheUpdate_descriptor = getDescriptor().getMessageType(5); + internal_static_google_spanner_v1_CacheUpdate_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_CacheUpdate_descriptor, + new java.lang.String[] { + "DatabaseId", "Range", "Group", "KeyRecipes", + }); + internal_static_google_spanner_v1_RoutingHint_descriptor = getDescriptor().getMessageType(6); + internal_static_google_spanner_v1_RoutingHint_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_RoutingHint_descriptor, + new java.lang.String[] { + "OperationUid", + "DatabaseId", + "SchemaGeneration", + "Key", + "LimitKey", + "GroupUid", + "SplitId", + "TabletUid", + "SkippedTabletUid", + "ClientLocation", + }); + internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor = + internal_static_google_spanner_v1_RoutingHint_descriptor.getNestedType(0); + internal_static_google_spanner_v1_RoutingHint_SkippedTablet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor, + new java.lang.String[] { + "TabletUid", "Incarnation", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.spanner.v1.TypeProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java new file mode 100644 index 000000000000..f6e18f0307c3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java @@ -0,0 +1,624 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * When a read-write transaction is executed on a multiplexed session,
    + * this precommit token is sent back to the client
    + * as a part of the [Transaction][google.spanner.v1.Transaction] message in the
    + * [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and
    + * also as a part of the [ResultSet][google.spanner.v1.ResultSet] and
    + * [PartialResultSet][google.spanner.v1.PartialResultSet] responses.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} + */ +@com.google.protobuf.Generated +public final class MultiplexedSessionPrecommitToken extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.MultiplexedSessionPrecommitToken) + MultiplexedSessionPrecommitTokenOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MultiplexedSessionPrecommitToken"); + } + + // Use MultiplexedSessionPrecommitToken.newBuilder() to construct. + private MultiplexedSessionPrecommitToken( + com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private MultiplexedSessionPrecommitToken() { + precommitToken_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.class, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder.class); + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString precommitToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Opaque precommit token.
    +   * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrecommitToken() { + return precommitToken_; + } + + public static final int SEQ_NUM_FIELD_NUMBER = 2; + private int seqNum_ = 0; + + /** + * + * + *
    +   * An incrementing seq number is generated on every precommit token
    +   * that is returned. Clients should remember the precommit token with the
    +   * highest sequence number from the current transaction attempt.
    +   * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + @java.lang.Override + public int getSeqNum() { + return seqNum_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!precommitToken_.isEmpty()) { + output.writeBytes(1, precommitToken_); + } + if (seqNum_ != 0) { + output.writeInt32(2, seqNum_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!precommitToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, precommitToken_); + } + if (seqNum_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, seqNum_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.MultiplexedSessionPrecommitToken)) { + return super.equals(obj); + } + com.google.spanner.v1.MultiplexedSessionPrecommitToken other = + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) obj; + + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + if (getSeqNum() != other.getSeqNum()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + hash = (37 * hash) + SEQ_NUM_FIELD_NUMBER; + hash = (53 * hash) + getSeqNum(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.MultiplexedSessionPrecommitToken prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * When a read-write transaction is executed on a multiplexed session,
    +   * this precommit token is sent back to the client
    +   * as a part of the [Transaction][google.spanner.v1.Transaction] message in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and
    +   * also as a part of the [ResultSet][google.spanner.v1.ResultSet] and
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet] responses.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.MultiplexedSessionPrecommitToken) + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.class, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder.class); + } + + // Construct using com.google.spanner.v1.MultiplexedSessionPrecommitToken.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + precommitToken_ = com.google.protobuf.ByteString.EMPTY; + seqNum_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstanceForType() { + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken build() { + com.google.spanner.v1.MultiplexedSessionPrecommitToken result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken buildPartial() { + com.google.spanner.v1.MultiplexedSessionPrecommitToken result = + new com.google.spanner.v1.MultiplexedSessionPrecommitToken(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.MultiplexedSessionPrecommitToken result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.precommitToken_ = precommitToken_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.seqNum_ = seqNum_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.MultiplexedSessionPrecommitToken) { + return mergeFrom((com.google.spanner.v1.MultiplexedSessionPrecommitToken) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.MultiplexedSessionPrecommitToken other) { + if (other == com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) + return this; + if (!other.getPrecommitToken().isEmpty()) { + setPrecommitToken(other.getPrecommitToken()); + } + if (other.getSeqNum() != 0) { + setSeqNum(other.getSeqNum()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + precommitToken_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + seqNum_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString precommitToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrecommitToken() { + return precommitToken_; + } + + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @param value The precommitToken to set. + * @return This builder for chaining. + */ + public Builder setPrecommitToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000001); + precommitToken_ = getDefaultInstance().getPrecommitToken(); + onChanged(); + return this; + } + + private int seqNum_; + + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + @java.lang.Override + public int getSeqNum() { + return seqNum_; + } + + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @param value The seqNum to set. + * @return This builder for chaining. + */ + public Builder setSeqNum(int value) { + + seqNum_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @return This builder for chaining. + */ + public Builder clearSeqNum() { + bitField0_ = (bitField0_ & ~0x00000002); + seqNum_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.MultiplexedSessionPrecommitToken) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.MultiplexedSessionPrecommitToken) + private static final com.google.spanner.v1.MultiplexedSessionPrecommitToken DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.MultiplexedSessionPrecommitToken(); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MultiplexedSessionPrecommitToken parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java new file mode 100644 index 000000000000..30cdf3b0d5eb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface MultiplexedSessionPrecommitTokenOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.MultiplexedSessionPrecommitToken) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Opaque precommit token.
    +   * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + com.google.protobuf.ByteString getPrecommitToken(); + + /** + * + * + *
    +   * An incrementing seq number is generated on every precommit token
    +   * that is returned. Clients should remember the precommit token with the
    +   * highest sequence number from the current transaction attempt.
    +   * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + int getSeqNum(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java new file mode 100644 index 000000000000..d357769157f3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java @@ -0,0 +1,8383 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/mutation.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A modification to one or more Cloud Spanner rows.  Mutations can be
    + * applied to a Cloud Spanner database by sending them in a
    + * [Commit][google.spanner.v1.Spanner.Commit] call.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation} + */ +@com.google.protobuf.Generated +public final class Mutation extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Mutation) + MutationOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Mutation"); + } + + // Use Mutation.newBuilder() to construct. + private Mutation(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Mutation() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.class, com.google.spanner.v1.Mutation.Builder.class); + } + + public interface WriteOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Mutation.Write) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The table whose rows will be written.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +     * Required. The table whose rows will be written.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @return A list containing the columns. + */ + java.util.List getColumnsList(); + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @return The count of columns. + */ + int getColumnsCount(); + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + java.lang.String getColumns(int index); + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + com.google.protobuf.ByteString getColumnsBytes(int index); + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + java.util.List getValuesList(); + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + com.google.protobuf.ListValue getValues(int index); + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + int getValuesCount(); + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + java.util.List getValuesOrBuilderList(); + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + com.google.protobuf.ListValueOrBuilder getValuesOrBuilder(int index); + } + + /** + * + * + *
    +   * Arguments to [insert][google.spanner.v1.Mutation.insert],
    +   * [update][google.spanner.v1.Mutation.update],
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and
    +   * [replace][google.spanner.v1.Mutation.replace] operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Write} + */ + public static final class Write extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Mutation.Write) + WriteOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Write"); + } + + // Use Write.newBuilder() to construct. + private Write(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Write() { + table_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + values_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Write_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Write_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Write.class, + com.google.spanner.v1.Mutation.Write.Builder.class); + } + + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Required. The table whose rows will be written.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +     * Required. The table whose rows will be written.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMNS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + return columns_; + } + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +     * The names of the columns in
    +     * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +     *
    +     * The list of columns must contain enough columns to allow
    +     * Cloud Spanner to derive values for all primary key columns in the
    +     * row(s) to be modified.
    +     * 
    + * + * repeated string columns = 2; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + public static final int VALUES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List values_; + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + @java.lang.Override + public java.util.List getValuesList() { + return values_; + } + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + @java.lang.Override + public java.util.List + getValuesOrBuilderList() { + return values_; + } + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + @java.lang.Override + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + @java.lang.Override + public com.google.protobuf.ListValue getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
    +     * The values to be written. `values` can contain more than one
    +     * list of values. If it does, then multiple rows are written, one
    +     * for each entry in `values`. Each list in `values` must have
    +     * exactly as many entries as there are entries in
    +     * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +     * multiple lists is equivalent to sending multiple `Mutation`s, each
    +     * containing one `values` entry and repeating
    +     * [table][google.spanner.v1.Mutation.Write.table] and
    +     * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +     * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getValuesOrBuilder(int index) { + return values_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + for (int i = 0; i < columns_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, columns_.getRaw(i)); + } + for (int i = 0; i < values_.size(); i++) { + output.writeMessage(3, values_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + { + int dataSize = 0; + for (int i = 0; i < columns_.size(); i++) { + dataSize += computeStringSizeNoTag(columns_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnsList().size(); + } + for (int i = 0; i < values_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, values_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Mutation.Write)) { + return super.equals(obj); + } + com.google.spanner.v1.Mutation.Write other = (com.google.spanner.v1.Mutation.Write) obj; + + if (!getTable().equals(other.getTable())) return false; + if (!getColumnsList().equals(other.getColumnsList())) return false; + if (!getValuesList().equals(other.getValuesList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (getColumnsCount() > 0) { + hash = (37 * hash) + COLUMNS_FIELD_NUMBER; + hash = (53 * hash) + getColumnsList().hashCode(); + } + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Mutation.Write parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Write parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Write parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Write parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Mutation.Write prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to [insert][google.spanner.v1.Mutation.insert],
    +     * [update][google.spanner.v1.Mutation.update],
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and
    +     * [replace][google.spanner.v1.Mutation.replace] operations.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Write} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Mutation.Write) + com.google.spanner.v1.Mutation.WriteOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Write_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Write_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Write.class, + com.google.spanner.v1.Mutation.Write.Builder.class); + } + + // Construct using com.google.spanner.v1.Mutation.Write.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + } else { + values_ = null; + valuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Write_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getDefaultInstanceForType() { + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Write build() { + com.google.spanner.v1.Mutation.Write result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Write buildPartial() { + com.google.spanner.v1.Mutation.Write result = + new com.google.spanner.v1.Mutation.Write(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.Mutation.Write result) { + if (valuesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.values_ = values_; + } else { + result.values_ = valuesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.Mutation.Write result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + columns_.makeImmutable(); + result.columns_ = columns_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Mutation.Write) { + return mergeFrom((com.google.spanner.v1.Mutation.Write) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Mutation.Write other) { + if (other == com.google.spanner.v1.Mutation.Write.getDefaultInstance()) return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.columns_.isEmpty()) { + if (columns_.isEmpty()) { + columns_ = other.columns_; + bitField0_ |= 0x00000002; + } else { + ensureColumnsIsMutable(); + columns_.addAll(other.columns_); + } + onChanged(); + } + if (valuesBuilder_ == null) { + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + } else { + if (!other.values_.isEmpty()) { + if (valuesBuilder_.isEmpty()) { + valuesBuilder_.dispose(); + valuesBuilder_ = null; + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000004); + valuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetValuesFieldBuilder() + : null; + } else { + valuesBuilder_.addAllMessages(other.values_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnsIsMutable(); + columns_.add(s); + break; + } // case 18 + case 26: + { + com.google.protobuf.ListValue m = + input.readMessage(com.google.protobuf.ListValue.parser(), extensionRegistry); + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(m); + } else { + valuesBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +       * Required. The table whose rows will be written.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Required. The table whose rows will be written.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Required. The table whose rows will be written.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The table whose rows will be written.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The table whose rows will be written.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnsIsMutable() { + if (!columns_.isModifiable()) { + columns_ = new com.google.protobuf.LazyStringArrayList(columns_); + } + bitField0_ |= 0x00000002; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + columns_.makeImmutable(); + return columns_; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param index The index to set the value at. + * @param value The columns to set. + * @return This builder for chaining. + */ + public Builder setColumns(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param value The columns to add. + * @return This builder for chaining. + */ + public Builder addColumns(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param values The columns to add. + * @return This builder for chaining. + */ + public Builder addAllColumns(java.lang.Iterable values) { + ensureColumnsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columns_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @return This builder for chaining. + */ + public Builder clearColumns() { + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The names of the columns in
    +       * [table][google.spanner.v1.Mutation.Write.table] to be written.
    +       *
    +       * The list of columns must contain enough columns to allow
    +       * Cloud Spanner to derive values for all primary key columns in the
    +       * row(s) to be modified.
    +       * 
    + * + * repeated string columns = 2; + * + * @param value The bytes of the columns to add. + * @return This builder for chaining. + */ + public Builder addColumnsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.util.List values_ = + java.util.Collections.emptyList(); + + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + valuesBuilder_; + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public java.util.List getValuesList() { + if (valuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(values_); + } else { + return valuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public int getValuesCount() { + if (valuesBuilder_ == null) { + return values_.size(); + } else { + return valuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public com.google.protobuf.ListValue getValues(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder setValues(int index, com.google.protobuf.ListValue value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + } else { + valuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder setValues(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.set(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder addValues(com.google.protobuf.ListValue value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + } else { + valuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder addValues(int index, com.google.protobuf.ListValue value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(index, value); + onChanged(); + } else { + valuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder addValues(com.google.protobuf.ListValue.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder addValues(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder addAllValues( + java.lang.Iterable values) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + onChanged(); + } else { + valuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder clearValues() { + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + valuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public Builder removeValues(int index) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.remove(index); + onChanged(); + } else { + valuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public com.google.protobuf.ListValue.Builder getValuesBuilder(int index) { + return internalGetValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public com.google.protobuf.ListValueOrBuilder getValuesOrBuilder(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public java.util.List + getValuesOrBuilderList() { + if (valuesBuilder_ != null) { + return valuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(values_); + } + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public com.google.protobuf.ListValue.Builder addValuesBuilder() { + return internalGetValuesFieldBuilder() + .addBuilder(com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public com.google.protobuf.ListValue.Builder addValuesBuilder(int index) { + return internalGetValuesFieldBuilder() + .addBuilder(index, com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +       * The values to be written. `values` can contain more than one
    +       * list of values. If it does, then multiple rows are written, one
    +       * for each entry in `values`. Each list in `values` must have
    +       * exactly as many entries as there are entries in
    +       * [columns][google.spanner.v1.Mutation.Write.columns] above. Sending
    +       * multiple lists is equivalent to sending multiple `Mutation`s, each
    +       * containing one `values` entry and repeating
    +       * [table][google.spanner.v1.Mutation.Write.table] and
    +       * [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in
    +       * each list are encoded as described [here][google.spanner.v1.TypeCode].
    +       * 
    + * + * repeated .google.protobuf.ListValue values = 3; + */ + public java.util.List getValuesBuilderList() { + return internalGetValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetValuesFieldBuilder() { + if (valuesBuilder_ == null) { + valuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + values_, ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); + values_ = null; + } + return valuesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Mutation.Write) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Write) + private static final com.google.spanner.v1.Mutation.Write DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Mutation.Write(); + } + + public static com.google.spanner.v1.Mutation.Write getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Write parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface DeleteOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Mutation.Delete) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The table whose rows will be deleted.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +     * Required. The table whose rows will be deleted.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + boolean hasKeySet(); + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + com.google.spanner.v1.KeySet getKeySet(); + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder(); + } + + /** + * + * + *
    +   * Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Delete} + */ + public static final class Delete extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Mutation.Delete) + DeleteOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Delete"); + } + + // Use Delete.newBuilder() to construct. + private Delete(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Delete() { + table_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Delete_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Delete_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Delete.class, + com.google.spanner.v1.Mutation.Delete.Builder.class); + } + + private int bitField0_; + public static final int TABLE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Required. The table whose rows will be deleted.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +     * Required. The table whose rows will be deleted.
    +     * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEY_SET_FIELD_NUMBER = 2; + private com.google.spanner.v1.KeySet keySet_; + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + @java.lang.Override + public boolean hasKeySet() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + @java.lang.Override + public com.google.spanner.v1.KeySet getKeySet() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + /** + * + * + *
    +     * Required. The primary keys of the rows within
    +     * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +     * keys must be specified in the order in which they appear in the `PRIMARY
    +     * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +     * used to create the table). Delete is idempotent. The transaction will
    +     * succeed even if some or all rows do not exist.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getKeySet()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, table_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getKeySet()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Mutation.Delete)) { + return super.equals(obj); + } + com.google.spanner.v1.Mutation.Delete other = (com.google.spanner.v1.Mutation.Delete) obj; + + if (!getTable().equals(other.getTable())) return false; + if (hasKeySet() != other.hasKeySet()) return false; + if (hasKeySet()) { + if (!getKeySet().equals(other.getKeySet())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasKeySet()) { + hash = (37 * hash) + KEY_SET_FIELD_NUMBER; + hash = (53 * hash) + getKeySet().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Delete parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Delete parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Delete parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Mutation.Delete prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Delete} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Mutation.Delete) + com.google.spanner.v1.Mutation.DeleteOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Delete_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Delete_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Delete.class, + com.google.spanner.v1.Mutation.Delete.Builder.class); + } + + // Construct using com.google.spanner.v1.Mutation.Delete.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeySetFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + table_ = ""; + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Delete_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete getDefaultInstanceForType() { + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete build() { + com.google.spanner.v1.Mutation.Delete result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete buildPartial() { + com.google.spanner.v1.Mutation.Delete result = + new com.google.spanner.v1.Mutation.Delete(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Mutation.Delete result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.table_ = table_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.keySet_ = keySetBuilder_ == null ? keySet_ : keySetBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Mutation.Delete) { + return mergeFrom((com.google.spanner.v1.Mutation.Delete) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Mutation.Delete other) { + if (other == com.google.spanner.v1.Mutation.Delete.getDefaultInstance()) return this; + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasKeySet()) { + mergeKeySet(other.getKeySet()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetKeySetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +       * Required. The table whose rows will be deleted.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Required. The table whose rows will be deleted.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Required. The table whose rows will be deleted.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The table whose rows will be deleted.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The table whose rows will be deleted.
    +       * 
    + * + * string table = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.KeySet keySet_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + keySetBuilder_; + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the keySet field is set. + */ + public boolean hasKeySet() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The keySet. + */ + public com.google.spanner.v1.KeySet getKeySet() { + if (keySetBuilder_ == null) { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } else { + return keySetBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keySet_ = value; + } else { + keySetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setKeySet(com.google.spanner.v1.KeySet.Builder builderForValue) { + if (keySetBuilder_ == null) { + keySet_ = builderForValue.build(); + } else { + keySetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && keySet_ != null + && keySet_ != com.google.spanner.v1.KeySet.getDefaultInstance()) { + getKeySetBuilder().mergeFrom(value); + } else { + keySet_ = value; + } + } else { + keySetBuilder_.mergeFrom(value); + } + if (keySet_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearKeySet() { + bitField0_ = (bitField0_ & ~0x00000002); + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.KeySet.Builder getKeySetBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetKeySetFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + if (keySetBuilder_ != null) { + return keySetBuilder_.getMessageOrBuilder(); + } else { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + } + + /** + * + * + *
    +       * Required. The primary keys of the rows within
    +       * [table][google.spanner.v1.Mutation.Delete.table] to delete.  The primary
    +       * keys must be specified in the order in which they appear in the `PRIMARY
    +       * KEY()` clause of the table's equivalent DDL statement (the DDL statement
    +       * used to create the table). Delete is idempotent. The transaction will
    +       * succeed even if some or all rows do not exist.
    +       * 
    + * + * .google.spanner.v1.KeySet key_set = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + internalGetKeySetFieldBuilder() { + if (keySetBuilder_ == null) { + keySetBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder>( + getKeySet(), getParentForChildren(), isClean()); + keySet_ = null; + } + return keySetBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Mutation.Delete) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Delete) + private static final com.google.spanner.v1.Mutation.Delete DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Mutation.Delete(); + } + + public static com.google.spanner.v1.Mutation.Delete getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Delete parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface SendOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Mutation.Send) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The queue to which the message will be sent.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + java.lang.String getQueue(); + + /** + * + * + *
    +     * Required. The queue to which the message will be sent.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + com.google.protobuf.ByteString getQueueBytes(); + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + boolean hasKey(); + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + com.google.protobuf.ListValue getKey(); + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.protobuf.ListValueOrBuilder getKeyOrBuilder(); + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return Whether the deliverTime field is set. + */ + boolean hasDeliverTime(); + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return The deliverTime. + */ + com.google.protobuf.Timestamp getDeliverTime(); + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getDeliverTimeOrBuilder(); + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return Whether the payload field is set. + */ + boolean hasPayload(); + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return The payload. + */ + com.google.protobuf.Value getPayload(); + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + */ + com.google.protobuf.ValueOrBuilder getPayloadOrBuilder(); + } + + /** + * + * + *
    +   * Arguments to [send][google.spanner.v1.Mutation.send] operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Send} + */ + public static final class Send extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Mutation.Send) + SendOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Send"); + } + + // Use Send.newBuilder() to construct. + private Send(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Send() { + queue_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Send_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Send_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Send.class, + com.google.spanner.v1.Mutation.Send.Builder.class); + } + + private int bitField0_; + public static final int QUEUE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object queue_ = ""; + + /** + * + * + *
    +     * Required. The queue to which the message will be sent.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + @java.lang.Override + public java.lang.String getQueue() { + java.lang.Object ref = queue_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queue_ = s; + return s; + } + } + + /** + * + * + *
    +     * Required. The queue to which the message will be sent.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + @java.lang.Override + public com.google.protobuf.ByteString getQueueBytes() { + java.lang.Object ref = queue_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queue_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEY_FIELD_NUMBER = 2; + private com.google.protobuf.ListValue key_; + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + @java.lang.Override + public boolean hasKey() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + @java.lang.Override + public com.google.protobuf.ListValue getKey() { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + + /** + * + * + *
    +     * Required. The primary key of the message to be sent.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getKeyOrBuilder() { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + + public static final int DELIVER_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp deliverTime_; + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return Whether the deliverTime field is set. + */ + @java.lang.Override + public boolean hasDeliverTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return The deliverTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getDeliverTime() { + return deliverTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deliverTime_; + } + + /** + * + * + *
    +     * The time at which Spanner will begin attempting to deliver the message.
    +     * If `deliver_time` is not set, Spanner will deliver the message
    +     * immediately. If `deliver_time` is in the past, Spanner will replace it
    +     * with a value closer to the current time.
    +     * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getDeliverTimeOrBuilder() { + return deliverTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deliverTime_; + } + + public static final int PAYLOAD_FIELD_NUMBER = 4; + private com.google.protobuf.Value payload_; + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return Whether the payload field is set. + */ + @java.lang.Override + public boolean hasPayload() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return The payload. + */ + @java.lang.Override + public com.google.protobuf.Value getPayload() { + return payload_ == null ? com.google.protobuf.Value.getDefaultInstance() : payload_; + } + + /** + * + * + *
    +     * The payload of the message.
    +     * 
    + * + * .google.protobuf.Value payload = 4; + */ + @java.lang.Override + public com.google.protobuf.ValueOrBuilder getPayloadOrBuilder() { + return payload_ == null ? com.google.protobuf.Value.getDefaultInstance() : payload_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(queue_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, queue_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getKey()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getDeliverTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getPayload()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(queue_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, queue_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getKey()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getDeliverTime()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getPayload()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Mutation.Send)) { + return super.equals(obj); + } + com.google.spanner.v1.Mutation.Send other = (com.google.spanner.v1.Mutation.Send) obj; + + if (!getQueue().equals(other.getQueue())) return false; + if (hasKey() != other.hasKey()) return false; + if (hasKey()) { + if (!getKey().equals(other.getKey())) return false; + } + if (hasDeliverTime() != other.hasDeliverTime()) return false; + if (hasDeliverTime()) { + if (!getDeliverTime().equals(other.getDeliverTime())) return false; + } + if (hasPayload() != other.hasPayload()) return false; + if (hasPayload()) { + if (!getPayload().equals(other.getPayload())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + QUEUE_FIELD_NUMBER; + hash = (53 * hash) + getQueue().hashCode(); + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + } + if (hasDeliverTime()) { + hash = (37 * hash) + DELIVER_TIME_FIELD_NUMBER; + hash = (53 * hash) + getDeliverTime().hashCode(); + } + if (hasPayload()) { + hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; + hash = (53 * hash) + getPayload().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Mutation.Send parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Send parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Send parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Send parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Mutation.Send prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to [send][google.spanner.v1.Mutation.send] operations.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Send} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Mutation.Send) + com.google.spanner.v1.Mutation.SendOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Send_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Send_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Send.class, + com.google.spanner.v1.Mutation.Send.Builder.class); + } + + // Construct using com.google.spanner.v1.Mutation.Send.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeyFieldBuilder(); + internalGetDeliverTimeFieldBuilder(); + internalGetPayloadFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + queue_ = ""; + key_ = null; + if (keyBuilder_ != null) { + keyBuilder_.dispose(); + keyBuilder_ = null; + } + deliverTime_ = null; + if (deliverTimeBuilder_ != null) { + deliverTimeBuilder_.dispose(); + deliverTimeBuilder_ = null; + } + payload_ = null; + if (payloadBuilder_ != null) { + payloadBuilder_.dispose(); + payloadBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Send_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Send getDefaultInstanceForType() { + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Send build() { + com.google.spanner.v1.Mutation.Send result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Send buildPartial() { + com.google.spanner.v1.Mutation.Send result = new com.google.spanner.v1.Mutation.Send(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Mutation.Send result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.queue_ = queue_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.key_ = keyBuilder_ == null ? key_ : keyBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.deliverTime_ = + deliverTimeBuilder_ == null ? deliverTime_ : deliverTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.payload_ = payloadBuilder_ == null ? payload_ : payloadBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Mutation.Send) { + return mergeFrom((com.google.spanner.v1.Mutation.Send) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Mutation.Send other) { + if (other == com.google.spanner.v1.Mutation.Send.getDefaultInstance()) return this; + if (!other.getQueue().isEmpty()) { + queue_ = other.queue_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.hasDeliverTime()) { + mergeDeliverTime(other.getDeliverTime()); + } + if (other.hasPayload()) { + mergePayload(other.getPayload()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + queue_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetKeyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetDeliverTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetPayloadFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object queue_ = ""; + + /** + * + * + *
    +       * Required. The queue to which the message will be sent.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + public java.lang.String getQueue() { + java.lang.Object ref = queue_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queue_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Required. The queue to which the message will be sent.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + public com.google.protobuf.ByteString getQueueBytes() { + java.lang.Object ref = queue_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queue_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Required. The queue to which the message will be sent.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The queue to set. + * @return This builder for chaining. + */ + public Builder setQueue(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queue_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The queue to which the message will be sent.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearQueue() { + queue_ = getDefaultInstance().getQueue(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The queue to which the message will be sent.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for queue to set. + * @return This builder for chaining. + */ + public Builder setQueueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queue_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ListValue key_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + keyBuilder_; + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + public com.google.protobuf.ListValue getKey() { + if (keyBuilder_ == null) { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } else { + return keyBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKey(com.google.protobuf.ListValue value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKey(com.google.protobuf.ListValue.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeKey(com.google.protobuf.ListValue value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && key_ != null + && key_ != com.google.protobuf.ListValue.getDefaultInstance()) { + getKeyBuilder().mergeFrom(value); + } else { + key_ = value; + } + } else { + keyBuilder_.mergeFrom(value); + } + if (key_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000002); + key_ = null; + if (keyBuilder_ != null) { + keyBuilder_.dispose(); + keyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.protobuf.ListValue.Builder getKeyBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetKeyFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.protobuf.ListValueOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + } + + /** + * + * + *
    +       * Required. The primary key of the message to be sent.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + getKey(), getParentForChildren(), isClean()); + key_ = null; + } + return keyBuilder_; + } + + private com.google.protobuf.Timestamp deliverTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + deliverTimeBuilder_; + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return Whether the deliverTime field is set. + */ + public boolean hasDeliverTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + * + * @return The deliverTime. + */ + public com.google.protobuf.Timestamp getDeliverTime() { + if (deliverTimeBuilder_ == null) { + return deliverTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deliverTime_; + } else { + return deliverTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public Builder setDeliverTime(com.google.protobuf.Timestamp value) { + if (deliverTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + deliverTime_ = value; + } else { + deliverTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public Builder setDeliverTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (deliverTimeBuilder_ == null) { + deliverTime_ = builderForValue.build(); + } else { + deliverTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public Builder mergeDeliverTime(com.google.protobuf.Timestamp value) { + if (deliverTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && deliverTime_ != null + && deliverTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getDeliverTimeBuilder().mergeFrom(value); + } else { + deliverTime_ = value; + } + } else { + deliverTimeBuilder_.mergeFrom(value); + } + if (deliverTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public Builder clearDeliverTime() { + bitField0_ = (bitField0_ & ~0x00000004); + deliverTime_ = null; + if (deliverTimeBuilder_ != null) { + deliverTimeBuilder_.dispose(); + deliverTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getDeliverTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetDeliverTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getDeliverTimeOrBuilder() { + if (deliverTimeBuilder_ != null) { + return deliverTimeBuilder_.getMessageOrBuilder(); + } else { + return deliverTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : deliverTime_; + } + } + + /** + * + * + *
    +       * The time at which Spanner will begin attempting to deliver the message.
    +       * If `deliver_time` is not set, Spanner will deliver the message
    +       * immediately. If `deliver_time` is in the past, Spanner will replace it
    +       * with a value closer to the current time.
    +       * 
    + * + * .google.protobuf.Timestamp deliver_time = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetDeliverTimeFieldBuilder() { + if (deliverTimeBuilder_ == null) { + deliverTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getDeliverTime(), getParentForChildren(), isClean()); + deliverTime_ = null; + } + return deliverTimeBuilder_; + } + + private com.google.protobuf.Value payload_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + payloadBuilder_; + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return Whether the payload field is set. + */ + public boolean hasPayload() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + * + * @return The payload. + */ + public com.google.protobuf.Value getPayload() { + if (payloadBuilder_ == null) { + return payload_ == null ? com.google.protobuf.Value.getDefaultInstance() : payload_; + } else { + return payloadBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public Builder setPayload(com.google.protobuf.Value value) { + if (payloadBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + payload_ = value; + } else { + payloadBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public Builder setPayload(com.google.protobuf.Value.Builder builderForValue) { + if (payloadBuilder_ == null) { + payload_ = builderForValue.build(); + } else { + payloadBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public Builder mergePayload(com.google.protobuf.Value value) { + if (payloadBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && payload_ != null + && payload_ != com.google.protobuf.Value.getDefaultInstance()) { + getPayloadBuilder().mergeFrom(value); + } else { + payload_ = value; + } + } else { + payloadBuilder_.mergeFrom(value); + } + if (payload_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public Builder clearPayload() { + bitField0_ = (bitField0_ & ~0x00000008); + payload_ = null; + if (payloadBuilder_ != null) { + payloadBuilder_.dispose(); + payloadBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public com.google.protobuf.Value.Builder getPayloadBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetPayloadFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + public com.google.protobuf.ValueOrBuilder getPayloadOrBuilder() { + if (payloadBuilder_ != null) { + return payloadBuilder_.getMessageOrBuilder(); + } else { + return payload_ == null ? com.google.protobuf.Value.getDefaultInstance() : payload_; + } + } + + /** + * + * + *
    +       * The payload of the message.
    +       * 
    + * + * .google.protobuf.Value payload = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + internalGetPayloadFieldBuilder() { + if (payloadBuilder_ == null) { + payloadBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder>( + getPayload(), getParentForChildren(), isClean()); + payload_ = null; + } + return payloadBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Mutation.Send) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Send) + private static final com.google.spanner.v1.Mutation.Send DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Mutation.Send(); + } + + public static com.google.spanner.v1.Mutation.Send getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Send parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Send getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AckOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Mutation.Ack) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Required. The queue where the message to be acked is stored.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + java.lang.String getQueue(); + + /** + * + * + *
    +     * Required. The queue where the message to be acked is stored.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + com.google.protobuf.ByteString getQueueBytes(); + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + boolean hasKey(); + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + com.google.protobuf.ListValue getKey(); + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.protobuf.ListValueOrBuilder getKeyOrBuilder(); + + /** + * + * + *
    +     * By default, an attempt to ack a message that does not exist will fail
    +     * with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack
    +     * will succeed even if the message does not exist. This is useful for
    +     * unconditionally acking a message, even if it is missing or has already
    +     * been acked.
    +     * 
    + * + * bool ignore_not_found = 3; + * + * @return The ignoreNotFound. + */ + boolean getIgnoreNotFound(); + } + + /** + * + * + *
    +   * Arguments to [ack][google.spanner.v1.Mutation.ack] operations.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Ack} + */ + public static final class Ack extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Mutation.Ack) + AckOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Ack"); + } + + // Use Ack.newBuilder() to construct. + private Ack(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Ack() { + queue_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Ack_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Ack_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Ack.class, + com.google.spanner.v1.Mutation.Ack.Builder.class); + } + + private int bitField0_; + public static final int QUEUE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object queue_ = ""; + + /** + * + * + *
    +     * Required. The queue where the message to be acked is stored.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + @java.lang.Override + public java.lang.String getQueue() { + java.lang.Object ref = queue_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queue_ = s; + return s; + } + } + + /** + * + * + *
    +     * Required. The queue where the message to be acked is stored.
    +     * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + @java.lang.Override + public com.google.protobuf.ByteString getQueueBytes() { + java.lang.Object ref = queue_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queue_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int KEY_FIELD_NUMBER = 2; + private com.google.protobuf.ListValue key_; + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + @java.lang.Override + public boolean hasKey() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + @java.lang.Override + public com.google.protobuf.ListValue getKey() { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + + /** + * + * + *
    +     * Required. The primary key of the message to be acked.
    +     * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getKeyOrBuilder() { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + + public static final int IGNORE_NOT_FOUND_FIELD_NUMBER = 3; + private boolean ignoreNotFound_ = false; + + /** + * + * + *
    +     * By default, an attempt to ack a message that does not exist will fail
    +     * with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack
    +     * will succeed even if the message does not exist. This is useful for
    +     * unconditionally acking a message, even if it is missing or has already
    +     * been acked.
    +     * 
    + * + * bool ignore_not_found = 3; + * + * @return The ignoreNotFound. + */ + @java.lang.Override + public boolean getIgnoreNotFound() { + return ignoreNotFound_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(queue_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, queue_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getKey()); + } + if (ignoreNotFound_ != false) { + output.writeBool(3, ignoreNotFound_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(queue_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, queue_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getKey()); + } + if (ignoreNotFound_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, ignoreNotFound_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Mutation.Ack)) { + return super.equals(obj); + } + com.google.spanner.v1.Mutation.Ack other = (com.google.spanner.v1.Mutation.Ack) obj; + + if (!getQueue().equals(other.getQueue())) return false; + if (hasKey() != other.hasKey()) return false; + if (hasKey()) { + if (!getKey().equals(other.getKey())) return false; + } + if (getIgnoreNotFound() != other.getIgnoreNotFound()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + QUEUE_FIELD_NUMBER; + hash = (53 * hash) + getQueue().hashCode(); + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + } + hash = (37 * hash) + IGNORE_NOT_FOUND_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIgnoreNotFound()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Ack parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Ack parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation.Ack parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Mutation.Ack prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Arguments to [ack][google.spanner.v1.Mutation.ack] operations.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation.Ack} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Mutation.Ack) + com.google.spanner.v1.Mutation.AckOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Ack_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Ack_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.Ack.class, + com.google.spanner.v1.Mutation.Ack.Builder.class); + } + + // Construct using com.google.spanner.v1.Mutation.Ack.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetKeyFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + queue_ = ""; + key_ = null; + if (keyBuilder_ != null) { + keyBuilder_.dispose(); + keyBuilder_ = null; + } + ignoreNotFound_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_Ack_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack getDefaultInstanceForType() { + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack build() { + com.google.spanner.v1.Mutation.Ack result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack buildPartial() { + com.google.spanner.v1.Mutation.Ack result = new com.google.spanner.v1.Mutation.Ack(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Mutation.Ack result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.queue_ = queue_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.key_ = keyBuilder_ == null ? key_ : keyBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.ignoreNotFound_ = ignoreNotFound_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Mutation.Ack) { + return mergeFrom((com.google.spanner.v1.Mutation.Ack) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Mutation.Ack other) { + if (other == com.google.spanner.v1.Mutation.Ack.getDefaultInstance()) return this; + if (!other.getQueue().isEmpty()) { + queue_ = other.queue_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasKey()) { + mergeKey(other.getKey()); + } + if (other.getIgnoreNotFound() != false) { + setIgnoreNotFound(other.getIgnoreNotFound()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + queue_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetKeyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + ignoreNotFound_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object queue_ = ""; + + /** + * + * + *
    +       * Required. The queue where the message to be acked is stored.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The queue. + */ + public java.lang.String getQueue() { + java.lang.Object ref = queue_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + queue_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Required. The queue where the message to be acked is stored.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for queue. + */ + public com.google.protobuf.ByteString getQueueBytes() { + java.lang.Object ref = queue_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + queue_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Required. The queue where the message to be acked is stored.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The queue to set. + * @return This builder for chaining. + */ + public Builder setQueue(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queue_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The queue where the message to be acked is stored.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearQueue() { + queue_ = getDefaultInstance().getQueue(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The queue where the message to be acked is stored.
    +       * 
    + * + * string queue = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for queue to set. + * @return This builder for chaining. + */ + public Builder setQueueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queue_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ListValue key_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + keyBuilder_; + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the key field is set. + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The key. + */ + public com.google.protobuf.ListValue getKey() { + if (keyBuilder_ == null) { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } else { + return keyBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKey(com.google.protobuf.ListValue value) { + if (keyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + } else { + keyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKey(com.google.protobuf.ListValue.Builder builderForValue) { + if (keyBuilder_ == null) { + key_ = builderForValue.build(); + } else { + keyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeKey(com.google.protobuf.ListValue value) { + if (keyBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && key_ != null + && key_ != com.google.protobuf.ListValue.getDefaultInstance()) { + getKeyBuilder().mergeFrom(value); + } else { + key_ = value; + } + } else { + keyBuilder_.mergeFrom(value); + } + if (key_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000002); + key_ = null; + if (keyBuilder_ != null) { + keyBuilder_.dispose(); + keyBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.protobuf.ListValue.Builder getKeyBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetKeyFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.protobuf.ListValueOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); + } else { + return key_ == null ? com.google.protobuf.ListValue.getDefaultInstance() : key_; + } + } + + /** + * + * + *
    +       * Required. The primary key of the message to be acked.
    +       * 
    + * + * .google.protobuf.ListValue key = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + getKey(), getParentForChildren(), isClean()); + key_ = null; + } + return keyBuilder_; + } + + private boolean ignoreNotFound_; + + /** + * + * + *
    +       * By default, an attempt to ack a message that does not exist will fail
    +       * with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack
    +       * will succeed even if the message does not exist. This is useful for
    +       * unconditionally acking a message, even if it is missing or has already
    +       * been acked.
    +       * 
    + * + * bool ignore_not_found = 3; + * + * @return The ignoreNotFound. + */ + @java.lang.Override + public boolean getIgnoreNotFound() { + return ignoreNotFound_; + } + + /** + * + * + *
    +       * By default, an attempt to ack a message that does not exist will fail
    +       * with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack
    +       * will succeed even if the message does not exist. This is useful for
    +       * unconditionally acking a message, even if it is missing or has already
    +       * been acked.
    +       * 
    + * + * bool ignore_not_found = 3; + * + * @param value The ignoreNotFound to set. + * @return This builder for chaining. + */ + public Builder setIgnoreNotFound(boolean value) { + + ignoreNotFound_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * By default, an attempt to ack a message that does not exist will fail
    +       * with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack
    +       * will succeed even if the message does not exist. This is useful for
    +       * unconditionally acking a message, even if it is missing or has already
    +       * been acked.
    +       * 
    + * + * bool ignore_not_found = 3; + * + * @return This builder for chaining. + */ + public Builder clearIgnoreNotFound() { + bitField0_ = (bitField0_ & ~0x00000004); + ignoreNotFound_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Mutation.Ack) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Ack) + private static final com.google.spanner.v1.Mutation.Ack DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Mutation.Ack(); + } + + public static com.google.spanner.v1.Mutation.Ack getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Ack parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int operationCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object operation_; + + public enum OperationCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INSERT(1), + UPDATE(2), + INSERT_OR_UPDATE(3), + REPLACE(4), + DELETE(5), + SEND(6), + ACK(7), + OPERATION_NOT_SET(0); + private final int value; + + private OperationCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OperationCase valueOf(int value) { + return forNumber(value); + } + + public static OperationCase forNumber(int value) { + switch (value) { + case 1: + return INSERT; + case 2: + return UPDATE; + case 3: + return INSERT_OR_UPDATE; + case 4: + return REPLACE; + case 5: + return DELETE; + case 6: + return SEND; + case 7: + return ACK; + case 0: + return OPERATION_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public OperationCase getOperationCase() { + return OperationCase.forNumber(operationCase_); + } + + public static final int INSERT_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return Whether the insert field is set. + */ + @java.lang.Override + public boolean hasInsert() { + return operationCase_ == 1; + } + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return The insert. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getInsert() { + if (operationCase_ == 1) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrBuilder() { + if (operationCase_ == 1) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + public static final int UPDATE_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return Whether the update field is set. + */ + @java.lang.Override + public boolean hasUpdate() { + return operationCase_ == 2; + } + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return The update. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getUpdate() { + if (operationCase_ == 2) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getUpdateOrBuilder() { + if (operationCase_ == 2) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + public static final int INSERT_OR_UPDATE_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return Whether the insertOrUpdate field is set. + */ + @java.lang.Override + public boolean hasInsertOrUpdate() { + return operationCase_ == 3; + } + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return The insertOrUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getInsertOrUpdate() { + if (operationCase_ == 3) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrUpdateOrBuilder() { + if (operationCase_ == 3) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + public static final int REPLACE_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return Whether the replace field is set. + */ + @java.lang.Override + public boolean hasReplace() { + return operationCase_ == 4; + } + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return The replace. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getReplace() { + if (operationCase_ == 4) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getReplaceOrBuilder() { + if (operationCase_ == 4) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + + public static final int DELETE_FIELD_NUMBER = 5; + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return Whether the delete field is set. + */ + @java.lang.Override + public boolean hasDelete() { + return operationCase_ == 5; + } + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return The delete. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete getDelete() { + if (operationCase_ == 5) { + return (com.google.spanner.v1.Mutation.Delete) operation_; + } + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.DeleteOrBuilder getDeleteOrBuilder() { + if (operationCase_ == 5) { + return (com.google.spanner.v1.Mutation.Delete) operation_; + } + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + + public static final int SEND_FIELD_NUMBER = 6; + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return Whether the send field is set. + */ + @java.lang.Override + public boolean hasSend() { + return operationCase_ == 6; + } + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return The send. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Send getSend() { + if (operationCase_ == 6) { + return (com.google.spanner.v1.Mutation.Send) operation_; + } + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.SendOrBuilder getSendOrBuilder() { + if (operationCase_ == 6) { + return (com.google.spanner.v1.Mutation.Send) operation_; + } + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + + public static final int ACK_FIELD_NUMBER = 7; + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return Whether the ack field is set. + */ + @java.lang.Override + public boolean hasAck() { + return operationCase_ == 7; + } + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return The ack. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack getAck() { + if (operationCase_ == 7) { + return (com.google.spanner.v1.Mutation.Ack) operation_; + } + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.AckOrBuilder getAckOrBuilder() { + if (operationCase_ == 7) { + return (com.google.spanner.v1.Mutation.Ack) operation_; + } + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (operationCase_ == 1) { + output.writeMessage(1, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 2) { + output.writeMessage(2, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 3) { + output.writeMessage(3, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 4) { + output.writeMessage(4, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 5) { + output.writeMessage(5, (com.google.spanner.v1.Mutation.Delete) operation_); + } + if (operationCase_ == 6) { + output.writeMessage(6, (com.google.spanner.v1.Mutation.Send) operation_); + } + if (operationCase_ == 7) { + output.writeMessage(7, (com.google.spanner.v1.Mutation.Ack) operation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (operationCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.spanner.v1.Mutation.Write) operation_); + } + if (operationCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.spanner.v1.Mutation.Delete) operation_); + } + if (operationCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (com.google.spanner.v1.Mutation.Send) operation_); + } + if (operationCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.spanner.v1.Mutation.Ack) operation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Mutation)) { + return super.equals(obj); + } + com.google.spanner.v1.Mutation other = (com.google.spanner.v1.Mutation) obj; + + if (!getOperationCase().equals(other.getOperationCase())) return false; + switch (operationCase_) { + case 1: + if (!getInsert().equals(other.getInsert())) return false; + break; + case 2: + if (!getUpdate().equals(other.getUpdate())) return false; + break; + case 3: + if (!getInsertOrUpdate().equals(other.getInsertOrUpdate())) return false; + break; + case 4: + if (!getReplace().equals(other.getReplace())) return false; + break; + case 5: + if (!getDelete().equals(other.getDelete())) return false; + break; + case 6: + if (!getSend().equals(other.getSend())) return false; + break; + case 7: + if (!getAck().equals(other.getAck())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (operationCase_) { + case 1: + hash = (37 * hash) + INSERT_FIELD_NUMBER; + hash = (53 * hash) + getInsert().hashCode(); + break; + case 2: + hash = (37 * hash) + UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getUpdate().hashCode(); + break; + case 3: + hash = (37 * hash) + INSERT_OR_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getInsertOrUpdate().hashCode(); + break; + case 4: + hash = (37 * hash) + REPLACE_FIELD_NUMBER; + hash = (53 * hash) + getReplace().hashCode(); + break; + case 5: + hash = (37 * hash) + DELETE_FIELD_NUMBER; + hash = (53 * hash) + getDelete().hashCode(); + break; + case 6: + hash = (37 * hash) + SEND_FIELD_NUMBER; + hash = (53 * hash) + getSend().hashCode(); + break; + case 7: + hash = (37 * hash) + ACK_FIELD_NUMBER; + hash = (53 * hash) + getAck().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Mutation parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Mutation parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Mutation parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Mutation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Mutation prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A modification to one or more Cloud Spanner rows.  Mutations can be
    +   * applied to a Cloud Spanner database by sending them in a
    +   * [Commit][google.spanner.v1.Spanner.Commit] call.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Mutation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Mutation) + com.google.spanner.v1.MutationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Mutation.class, com.google.spanner.v1.Mutation.Builder.class); + } + + // Construct using com.google.spanner.v1.Mutation.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (insertBuilder_ != null) { + insertBuilder_.clear(); + } + if (updateBuilder_ != null) { + updateBuilder_.clear(); + } + if (insertOrUpdateBuilder_ != null) { + insertOrUpdateBuilder_.clear(); + } + if (replaceBuilder_ != null) { + replaceBuilder_.clear(); + } + if (deleteBuilder_ != null) { + deleteBuilder_.clear(); + } + if (sendBuilder_ != null) { + sendBuilder_.clear(); + } + if (ackBuilder_ != null) { + ackBuilder_.clear(); + } + operationCase_ = 0; + operation_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.MutationProto + .internal_static_google_spanner_v1_Mutation_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation getDefaultInstanceForType() { + return com.google.spanner.v1.Mutation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Mutation build() { + com.google.spanner.v1.Mutation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation buildPartial() { + com.google.spanner.v1.Mutation result = new com.google.spanner.v1.Mutation(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Mutation result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.Mutation result) { + result.operationCase_ = operationCase_; + result.operation_ = this.operation_; + if (operationCase_ == 1 && insertBuilder_ != null) { + result.operation_ = insertBuilder_.build(); + } + if (operationCase_ == 2 && updateBuilder_ != null) { + result.operation_ = updateBuilder_.build(); + } + if (operationCase_ == 3 && insertOrUpdateBuilder_ != null) { + result.operation_ = insertOrUpdateBuilder_.build(); + } + if (operationCase_ == 4 && replaceBuilder_ != null) { + result.operation_ = replaceBuilder_.build(); + } + if (operationCase_ == 5 && deleteBuilder_ != null) { + result.operation_ = deleteBuilder_.build(); + } + if (operationCase_ == 6 && sendBuilder_ != null) { + result.operation_ = sendBuilder_.build(); + } + if (operationCase_ == 7 && ackBuilder_ != null) { + result.operation_ = ackBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Mutation) { + return mergeFrom((com.google.spanner.v1.Mutation) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Mutation other) { + if (other == com.google.spanner.v1.Mutation.getDefaultInstance()) return this; + switch (other.getOperationCase()) { + case INSERT: + { + mergeInsert(other.getInsert()); + break; + } + case UPDATE: + { + mergeUpdate(other.getUpdate()); + break; + } + case INSERT_OR_UPDATE: + { + mergeInsertOrUpdate(other.getInsertOrUpdate()); + break; + } + case REPLACE: + { + mergeReplace(other.getReplace()); + break; + } + case DELETE: + { + mergeDelete(other.getDelete()); + break; + } + case SEND: + { + mergeSend(other.getSend()); + break; + } + case ACK: + { + mergeAck(other.getAck()); + break; + } + case OPERATION_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetInsertFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetUpdateFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetInsertOrUpdateFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetReplaceFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage(internalGetDeleteFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 5; + break; + } // case 42 + case 50: + { + input.readMessage(internalGetSendFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 6; + break; + } // case 50 + case 58: + { + input.readMessage(internalGetAckFieldBuilder().getBuilder(), extensionRegistry); + operationCase_ = 7; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int operationCase_ = 0; + private java.lang.Object operation_; + + public OperationCase getOperationCase() { + return OperationCase.forNumber(operationCase_); + } + + public Builder clearOperation() { + operationCase_ = 0; + operation_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + insertBuilder_; + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return Whether the insert field is set. + */ + @java.lang.Override + public boolean hasInsert() { + return operationCase_ == 1; + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return The insert. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getInsert() { + if (insertBuilder_ == null) { + if (operationCase_ == 1) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } else { + if (operationCase_ == 1) { + return insertBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + public Builder setInsert(com.google.spanner.v1.Mutation.Write value) { + if (insertBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + insertBuilder_.setMessage(value); + } + operationCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + public Builder setInsert(com.google.spanner.v1.Mutation.Write.Builder builderForValue) { + if (insertBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + insertBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + public Builder mergeInsert(com.google.spanner.v1.Mutation.Write value) { + if (insertBuilder_ == null) { + if (operationCase_ == 1 + && operation_ != com.google.spanner.v1.Mutation.Write.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Write.newBuilder( + (com.google.spanner.v1.Mutation.Write) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 1) { + insertBuilder_.mergeFrom(value); + } else { + insertBuilder_.setMessage(value); + } + } + operationCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + public Builder clearInsert() { + if (insertBuilder_ == null) { + if (operationCase_ == 1) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 1) { + operationCase_ = 0; + operation_ = null; + } + insertBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + public com.google.spanner.v1.Mutation.Write.Builder getInsertBuilder() { + return internalGetInsertFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrBuilder() { + if ((operationCase_ == 1) && (insertBuilder_ != null)) { + return insertBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 1) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Insert new rows in a table. If any of the rows already exist,
    +     * the write or transaction fails with error `ALREADY_EXISTS`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + internalGetInsertFieldBuilder() { + if (insertBuilder_ == null) { + if (!(operationCase_ == 1)) { + operation_ = com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + insertBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder>( + (com.google.spanner.v1.Mutation.Write) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 1; + onChanged(); + return insertBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + updateBuilder_; + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return Whether the update field is set. + */ + @java.lang.Override + public boolean hasUpdate() { + return operationCase_ == 2; + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return The update. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getUpdate() { + if (updateBuilder_ == null) { + if (operationCase_ == 2) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } else { + if (operationCase_ == 2) { + return updateBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + public Builder setUpdate(com.google.spanner.v1.Mutation.Write value) { + if (updateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + updateBuilder_.setMessage(value); + } + operationCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + public Builder setUpdate(com.google.spanner.v1.Mutation.Write.Builder builderForValue) { + if (updateBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + updateBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + public Builder mergeUpdate(com.google.spanner.v1.Mutation.Write value) { + if (updateBuilder_ == null) { + if (operationCase_ == 2 + && operation_ != com.google.spanner.v1.Mutation.Write.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Write.newBuilder( + (com.google.spanner.v1.Mutation.Write) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 2) { + updateBuilder_.mergeFrom(value); + } else { + updateBuilder_.setMessage(value); + } + } + operationCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + public Builder clearUpdate() { + if (updateBuilder_ == null) { + if (operationCase_ == 2) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 2) { + operationCase_ = 0; + operation_ = null; + } + updateBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + public com.google.spanner.v1.Mutation.Write.Builder getUpdateBuilder() { + return internalGetUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getUpdateOrBuilder() { + if ((operationCase_ == 2) && (updateBuilder_ != null)) { + return updateBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 2) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Update existing rows in a table. If any of the rows does not
    +     * already exist, the transaction fails with error `NOT_FOUND`.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + internalGetUpdateFieldBuilder() { + if (updateBuilder_ == null) { + if (!(operationCase_ == 2)) { + operation_ = com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + updateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder>( + (com.google.spanner.v1.Mutation.Write) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 2; + onChanged(); + return updateBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + insertOrUpdateBuilder_; + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return Whether the insertOrUpdate field is set. + */ + @java.lang.Override + public boolean hasInsertOrUpdate() { + return operationCase_ == 3; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return The insertOrUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getInsertOrUpdate() { + if (insertOrUpdateBuilder_ == null) { + if (operationCase_ == 3) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } else { + if (operationCase_ == 3) { + return insertOrUpdateBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + public Builder setInsertOrUpdate(com.google.spanner.v1.Mutation.Write value) { + if (insertOrUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + insertOrUpdateBuilder_.setMessage(value); + } + operationCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + public Builder setInsertOrUpdate(com.google.spanner.v1.Mutation.Write.Builder builderForValue) { + if (insertOrUpdateBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + insertOrUpdateBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + public Builder mergeInsertOrUpdate(com.google.spanner.v1.Mutation.Write value) { + if (insertOrUpdateBuilder_ == null) { + if (operationCase_ == 3 + && operation_ != com.google.spanner.v1.Mutation.Write.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Write.newBuilder( + (com.google.spanner.v1.Mutation.Write) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 3) { + insertOrUpdateBuilder_.mergeFrom(value); + } else { + insertOrUpdateBuilder_.setMessage(value); + } + } + operationCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + public Builder clearInsertOrUpdate() { + if (insertOrUpdateBuilder_ == null) { + if (operationCase_ == 3) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 3) { + operationCase_ = 0; + operation_ = null; + } + insertOrUpdateBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + public com.google.spanner.v1.Mutation.Write.Builder getInsertOrUpdateBuilder() { + return internalGetInsertOrUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrUpdateOrBuilder() { + if ((operationCase_ == 3) && (insertOrUpdateBuilder_ != null)) { + return insertOrUpdateBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 3) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, then its column values are overwritten with the ones
    +     * provided. Any column values not explicitly written are preserved.
    +     *
    +     * When using
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +     * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +     * columns in the table must be given a value. This holds true even when the
    +     * row already exists and will therefore actually be updated.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + internalGetInsertOrUpdateFieldBuilder() { + if (insertOrUpdateBuilder_ == null) { + if (!(operationCase_ == 3)) { + operation_ = com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + insertOrUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder>( + (com.google.spanner.v1.Mutation.Write) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 3; + onChanged(); + return insertOrUpdateBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + replaceBuilder_; + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return Whether the replace field is set. + */ + @java.lang.Override + public boolean hasReplace() { + return operationCase_ == 4; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return The replace. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Write getReplace() { + if (replaceBuilder_ == null) { + if (operationCase_ == 4) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } else { + if (operationCase_ == 4) { + return replaceBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + public Builder setReplace(com.google.spanner.v1.Mutation.Write value) { + if (replaceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + replaceBuilder_.setMessage(value); + } + operationCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + public Builder setReplace(com.google.spanner.v1.Mutation.Write.Builder builderForValue) { + if (replaceBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + replaceBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + public Builder mergeReplace(com.google.spanner.v1.Mutation.Write value) { + if (replaceBuilder_ == null) { + if (operationCase_ == 4 + && operation_ != com.google.spanner.v1.Mutation.Write.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Write.newBuilder( + (com.google.spanner.v1.Mutation.Write) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 4) { + replaceBuilder_.mergeFrom(value); + } else { + replaceBuilder_.setMessage(value); + } + } + operationCase_ = 4; + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + public Builder clearReplace() { + if (replaceBuilder_ == null) { + if (operationCase_ == 4) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 4) { + operationCase_ = 0; + operation_ = null; + } + replaceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + public com.google.spanner.v1.Mutation.Write.Builder getReplaceBuilder() { + return internalGetReplaceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.WriteOrBuilder getReplaceOrBuilder() { + if ((operationCase_ == 4) && (replaceBuilder_ != null)) { + return replaceBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 4) { + return (com.google.spanner.v1.Mutation.Write) operation_; + } + return com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +     * already exists, it is deleted, and the column values provided are
    +     * inserted instead. Unlike
    +     * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +     * means any values not explicitly written become `NULL`.
    +     *
    +     * In an interleaved table, if you create the child table with the
    +     * `ON DELETE CASCADE` annotation, then replacing a parent row
    +     * also deletes the child rows. Otherwise, you must delete the
    +     * child rows before you replace the parent row.
    +     * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder> + internalGetReplaceFieldBuilder() { + if (replaceBuilder_ == null) { + if (!(operationCase_ == 4)) { + operation_ = com.google.spanner.v1.Mutation.Write.getDefaultInstance(); + } + replaceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Write, + com.google.spanner.v1.Mutation.Write.Builder, + com.google.spanner.v1.Mutation.WriteOrBuilder>( + (com.google.spanner.v1.Mutation.Write) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 4; + onChanged(); + return replaceBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Delete, + com.google.spanner.v1.Mutation.Delete.Builder, + com.google.spanner.v1.Mutation.DeleteOrBuilder> + deleteBuilder_; + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return Whether the delete field is set. + */ + @java.lang.Override + public boolean hasDelete() { + return operationCase_ == 5; + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return The delete. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Delete getDelete() { + if (deleteBuilder_ == null) { + if (operationCase_ == 5) { + return (com.google.spanner.v1.Mutation.Delete) operation_; + } + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } else { + if (operationCase_ == 5) { + return deleteBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + public Builder setDelete(com.google.spanner.v1.Mutation.Delete value) { + if (deleteBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + deleteBuilder_.setMessage(value); + } + operationCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + public Builder setDelete(com.google.spanner.v1.Mutation.Delete.Builder builderForValue) { + if (deleteBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + deleteBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + public Builder mergeDelete(com.google.spanner.v1.Mutation.Delete value) { + if (deleteBuilder_ == null) { + if (operationCase_ == 5 + && operation_ != com.google.spanner.v1.Mutation.Delete.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Delete.newBuilder( + (com.google.spanner.v1.Mutation.Delete) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 5) { + deleteBuilder_.mergeFrom(value); + } else { + deleteBuilder_.setMessage(value); + } + } + operationCase_ = 5; + return this; + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + public Builder clearDelete() { + if (deleteBuilder_ == null) { + if (operationCase_ == 5) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 5) { + operationCase_ = 0; + operation_ = null; + } + deleteBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + public com.google.spanner.v1.Mutation.Delete.Builder getDeleteBuilder() { + return internalGetDeleteFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.DeleteOrBuilder getDeleteOrBuilder() { + if ((operationCase_ == 5) && (deleteBuilder_ != null)) { + return deleteBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 5) { + return (com.google.spanner.v1.Mutation.Delete) operation_; + } + return com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Delete rows from a table. Succeeds whether or not the named
    +     * rows were present.
    +     * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Delete, + com.google.spanner.v1.Mutation.Delete.Builder, + com.google.spanner.v1.Mutation.DeleteOrBuilder> + internalGetDeleteFieldBuilder() { + if (deleteBuilder_ == null) { + if (!(operationCase_ == 5)) { + operation_ = com.google.spanner.v1.Mutation.Delete.getDefaultInstance(); + } + deleteBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Delete, + com.google.spanner.v1.Mutation.Delete.Builder, + com.google.spanner.v1.Mutation.DeleteOrBuilder>( + (com.google.spanner.v1.Mutation.Delete) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 5; + onChanged(); + return deleteBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Send, + com.google.spanner.v1.Mutation.Send.Builder, + com.google.spanner.v1.Mutation.SendOrBuilder> + sendBuilder_; + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return Whether the send field is set. + */ + @java.lang.Override + public boolean hasSend() { + return operationCase_ == 6; + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return The send. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Send getSend() { + if (sendBuilder_ == null) { + if (operationCase_ == 6) { + return (com.google.spanner.v1.Mutation.Send) operation_; + } + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } else { + if (operationCase_ == 6) { + return sendBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + public Builder setSend(com.google.spanner.v1.Mutation.Send value) { + if (sendBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + sendBuilder_.setMessage(value); + } + operationCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + public Builder setSend(com.google.spanner.v1.Mutation.Send.Builder builderForValue) { + if (sendBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + sendBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + public Builder mergeSend(com.google.spanner.v1.Mutation.Send value) { + if (sendBuilder_ == null) { + if (operationCase_ == 6 + && operation_ != com.google.spanner.v1.Mutation.Send.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Send.newBuilder( + (com.google.spanner.v1.Mutation.Send) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 6) { + sendBuilder_.mergeFrom(value); + } else { + sendBuilder_.setMessage(value); + } + } + operationCase_ = 6; + return this; + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + public Builder clearSend() { + if (sendBuilder_ == null) { + if (operationCase_ == 6) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 6) { + operationCase_ = 0; + operation_ = null; + } + sendBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + public com.google.spanner.v1.Mutation.Send.Builder getSendBuilder() { + return internalGetSendFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.SendOrBuilder getSendOrBuilder() { + if ((operationCase_ == 6) && (sendBuilder_ != null)) { + return sendBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 6) { + return (com.google.spanner.v1.Mutation.Send) operation_; + } + return com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Send a message to a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Send, + com.google.spanner.v1.Mutation.Send.Builder, + com.google.spanner.v1.Mutation.SendOrBuilder> + internalGetSendFieldBuilder() { + if (sendBuilder_ == null) { + if (!(operationCase_ == 6)) { + operation_ = com.google.spanner.v1.Mutation.Send.getDefaultInstance(); + } + sendBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Send, + com.google.spanner.v1.Mutation.Send.Builder, + com.google.spanner.v1.Mutation.SendOrBuilder>( + (com.google.spanner.v1.Mutation.Send) operation_, + getParentForChildren(), + isClean()); + operation_ = null; + } + operationCase_ = 6; + onChanged(); + return sendBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Ack, + com.google.spanner.v1.Mutation.Ack.Builder, + com.google.spanner.v1.Mutation.AckOrBuilder> + ackBuilder_; + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return Whether the ack field is set. + */ + @java.lang.Override + public boolean hasAck() { + return operationCase_ == 7; + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return The ack. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.Ack getAck() { + if (ackBuilder_ == null) { + if (operationCase_ == 7) { + return (com.google.spanner.v1.Mutation.Ack) operation_; + } + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } else { + if (operationCase_ == 7) { + return ackBuilder_.getMessage(); + } + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + public Builder setAck(com.google.spanner.v1.Mutation.Ack value) { + if (ackBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + operation_ = value; + onChanged(); + } else { + ackBuilder_.setMessage(value); + } + operationCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + public Builder setAck(com.google.spanner.v1.Mutation.Ack.Builder builderForValue) { + if (ackBuilder_ == null) { + operation_ = builderForValue.build(); + onChanged(); + } else { + ackBuilder_.setMessage(builderForValue.build()); + } + operationCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + public Builder mergeAck(com.google.spanner.v1.Mutation.Ack value) { + if (ackBuilder_ == null) { + if (operationCase_ == 7 + && operation_ != com.google.spanner.v1.Mutation.Ack.getDefaultInstance()) { + operation_ = + com.google.spanner.v1.Mutation.Ack.newBuilder( + (com.google.spanner.v1.Mutation.Ack) operation_) + .mergeFrom(value) + .buildPartial(); + } else { + operation_ = value; + } + onChanged(); + } else { + if (operationCase_ == 7) { + ackBuilder_.mergeFrom(value); + } else { + ackBuilder_.setMessage(value); + } + } + operationCase_ = 7; + return this; + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + public Builder clearAck() { + if (ackBuilder_ == null) { + if (operationCase_ == 7) { + operationCase_ = 0; + operation_ = null; + onChanged(); + } + } else { + if (operationCase_ == 7) { + operationCase_ = 0; + operation_ = null; + } + ackBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + public com.google.spanner.v1.Mutation.Ack.Builder getAckBuilder() { + return internalGetAckFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + @java.lang.Override + public com.google.spanner.v1.Mutation.AckOrBuilder getAckOrBuilder() { + if ((operationCase_ == 7) && (ackBuilder_ != null)) { + return ackBuilder_.getMessageOrBuilder(); + } else { + if (operationCase_ == 7) { + return (com.google.spanner.v1.Mutation.Ack) operation_; + } + return com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Ack a message from a queue.
    +     * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Ack, + com.google.spanner.v1.Mutation.Ack.Builder, + com.google.spanner.v1.Mutation.AckOrBuilder> + internalGetAckFieldBuilder() { + if (ackBuilder_ == null) { + if (!(operationCase_ == 7)) { + operation_ = com.google.spanner.v1.Mutation.Ack.getDefaultInstance(); + } + ackBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Mutation.Ack, + com.google.spanner.v1.Mutation.Ack.Builder, + com.google.spanner.v1.Mutation.AckOrBuilder>( + (com.google.spanner.v1.Mutation.Ack) operation_, getParentForChildren(), isClean()); + operation_ = null; + } + operationCase_ = 7; + onChanged(); + return ackBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Mutation) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation) + private static final com.google.spanner.v1.Mutation DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Mutation(); + } + + public static com.google.spanner.v1.Mutation getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Mutation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Mutation getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java new file mode 100644 index 000000000000..b0bd8d72816f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java @@ -0,0 +1,349 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/mutation.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface MutationOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Mutation) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return Whether the insert field is set. + */ + boolean hasInsert(); + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + * + * @return The insert. + */ + com.google.spanner.v1.Mutation.Write getInsert(); + + /** + * + * + *
    +   * Insert new rows in a table. If any of the rows already exist,
    +   * the write or transaction fails with error `ALREADY_EXISTS`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert = 1; + */ + com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrBuilder(); + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return Whether the update field is set. + */ + boolean hasUpdate(); + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + * + * @return The update. + */ + com.google.spanner.v1.Mutation.Write getUpdate(); + + /** + * + * + *
    +   * Update existing rows in a table. If any of the rows does not
    +   * already exist, the transaction fails with error `NOT_FOUND`.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write update = 2; + */ + com.google.spanner.v1.Mutation.WriteOrBuilder getUpdateOrBuilder(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return Whether the insertOrUpdate field is set. + */ + boolean hasInsertOrUpdate(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + * + * @return The insertOrUpdate. + */ + com.google.spanner.v1.Mutation.Write getInsertOrUpdate(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, then its column values are overwritten with the ones
    +   * provided. Any column values not explicitly written are preserved.
    +   *
    +   * When using
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as
    +   * when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL`
    +   * columns in the table must be given a value. This holds true even when the
    +   * row already exists and will therefore actually be updated.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write insert_or_update = 3; + */ + com.google.spanner.v1.Mutation.WriteOrBuilder getInsertOrUpdateOrBuilder(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return Whether the replace field is set. + */ + boolean hasReplace(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + * + * @return The replace. + */ + com.google.spanner.v1.Mutation.Write getReplace(); + + /** + * + * + *
    +   * Like [insert][google.spanner.v1.Mutation.insert], except that if the row
    +   * already exists, it is deleted, and the column values provided are
    +   * inserted instead. Unlike
    +   * [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this
    +   * means any values not explicitly written become `NULL`.
    +   *
    +   * In an interleaved table, if you create the child table with the
    +   * `ON DELETE CASCADE` annotation, then replacing a parent row
    +   * also deletes the child rows. Otherwise, you must delete the
    +   * child rows before you replace the parent row.
    +   * 
    + * + * .google.spanner.v1.Mutation.Write replace = 4; + */ + com.google.spanner.v1.Mutation.WriteOrBuilder getReplaceOrBuilder(); + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return Whether the delete field is set. + */ + boolean hasDelete(); + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + * + * @return The delete. + */ + com.google.spanner.v1.Mutation.Delete getDelete(); + + /** + * + * + *
    +   * Delete rows from a table. Succeeds whether or not the named
    +   * rows were present.
    +   * 
    + * + * .google.spanner.v1.Mutation.Delete delete = 5; + */ + com.google.spanner.v1.Mutation.DeleteOrBuilder getDeleteOrBuilder(); + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return Whether the send field is set. + */ + boolean hasSend(); + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + * + * @return The send. + */ + com.google.spanner.v1.Mutation.Send getSend(); + + /** + * + * + *
    +   * Send a message to a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Send send = 6; + */ + com.google.spanner.v1.Mutation.SendOrBuilder getSendOrBuilder(); + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return Whether the ack field is set. + */ + boolean hasAck(); + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + * + * @return The ack. + */ + com.google.spanner.v1.Mutation.Ack getAck(); + + /** + * + * + *
    +   * Ack a message from a queue.
    +   * 
    + * + * .google.spanner.v1.Mutation.Ack ack = 7; + */ + com.google.spanner.v1.Mutation.AckOrBuilder getAckOrBuilder(); + + com.google.spanner.v1.Mutation.OperationCase getOperationCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java new file mode 100644 index 000000000000..48bf250009d9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java @@ -0,0 +1,170 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/mutation.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class MutationProto extends com.google.protobuf.GeneratedFile { + private MutationProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "MutationProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Mutation_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Mutation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Mutation_Write_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Mutation_Write_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Mutation_Delete_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Mutation_Delete_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Mutation_Send_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Mutation_Send_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Mutation_Ack_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Mutation_Ack_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + " google/spanner/v1/mutation.proto\022\021goog" + + "le.spanner.v1\032\037google/api/field_behavior" + + ".proto\032\034google/protobuf/struct.proto\032\037go" + + "ogle/protobuf/timestamp.proto\032\034google/spanner/v1/keys.proto\"\300\006\n" + + "\010Mutation\0223\n" + + "\006insert\030\001 \001(\0132!.google.spanner.v1.Mutation.WriteH\000\0223\n" + + "\006update\030\002 \001(\0132!.google.spanner.v1.Mutation.WriteH\000\022=\n" + + "\020insert_or_update\030\003 \001(\0132!.google.spanner.v1.Mutation.WriteH\000\0224\n" + + "\007replace\030\004 \001(\0132!.google.spanner.v1.Mutation.WriteH\000\0224\n" + + "\006delete\030\005 \001(\0132\".google.spanner.v1.Mutation.DeleteH\000\0220\n" + + "\004send\030\006 \001(\0132 .google.spanner.v1.Mutation.SendH\000\022.\n" + + "\003ack\030\007 \001(\0132\037.google.spanner.v1.Mutation.AckH\000\032X\n" + + "\005Write\022\022\n" + + "\005table\030\001 \001(\tB\003\340A\002\022\017\n" + + "\007columns\030\002 \003(\t\022*\n" + + "\006values\030\003 \003(\0132\032.google.protobuf.ListValue\032M\n" + + "\006Delete\022\022\n" + + "\005table\030\001 \001(\tB\003\340A\002\022/\n" + + "\007key_set\030\002" + + " \001(\0132\031.google.spanner.v1.KeySetB\003\340A\002\032\243\001\n" + + "\004Send\022\022\n" + + "\005queue\030\001 \001(\tB\003\340A\002\022,\n" + + "\003key\030\002 \001(\0132\032.google.protobuf.ListValueB\003\340A\002\0220\n" + + "\014deliver_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022\'\n" + + "\007payload\030\004 \001(\0132\026.google.protobuf.Value\032a\n" + + "\003Ack\022\022\n" + + "\005queue\030\001 \001(\tB\003\340A\002\022,\n" + + "\003key\030\002 \001(\0132\032.google.protobuf.ListValueB\003\340A\002\022\030\n" + + "\020ignore_not_found\030\003 \001(\010B\013\n" + + "\toperationB\260\001\n" + + "\025com.google.spanner.v1B\r" + + "MutationProtoP\001Z5cloud.google.com/go/s" + + "panner/apiv1/spannerpb;spannerpb\252\002\027Googl" + + "e.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanne" + + "r\\V1\352\002\032Google::Cloud::Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.StructProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.v1.KeysProto.getDescriptor(), + }); + internal_static_google_spanner_v1_Mutation_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_Mutation_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Mutation_descriptor, + new java.lang.String[] { + "Insert", "Update", "InsertOrUpdate", "Replace", "Delete", "Send", "Ack", "Operation", + }); + internal_static_google_spanner_v1_Mutation_Write_descriptor = + internal_static_google_spanner_v1_Mutation_descriptor.getNestedType(0); + internal_static_google_spanner_v1_Mutation_Write_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Mutation_Write_descriptor, + new java.lang.String[] { + "Table", "Columns", "Values", + }); + internal_static_google_spanner_v1_Mutation_Delete_descriptor = + internal_static_google_spanner_v1_Mutation_descriptor.getNestedType(1); + internal_static_google_spanner_v1_Mutation_Delete_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Mutation_Delete_descriptor, + new java.lang.String[] { + "Table", "KeySet", + }); + internal_static_google_spanner_v1_Mutation_Send_descriptor = + internal_static_google_spanner_v1_Mutation_descriptor.getNestedType(2); + internal_static_google_spanner_v1_Mutation_Send_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Mutation_Send_descriptor, + new java.lang.String[] { + "Queue", "Key", "DeliverTime", "Payload", + }); + internal_static_google_spanner_v1_Mutation_Ack_descriptor = + internal_static_google_spanner_v1_Mutation_descriptor.getNestedType(3); + internal_static_google_spanner_v1_Mutation_Ack_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Mutation_Ack_descriptor, + new java.lang.String[] { + "Queue", "Key", "IgnoreNotFound", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.v1.KeysProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java new file mode 100644 index 000000000000..28dd7cef0228 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java @@ -0,0 +1,4355 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Partial results from a streaming read or SQL query. Streaming reads and
    + * SQL queries better tolerate large result sets, large rows, and large
    + * values, but are a little trickier to consume.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PartialResultSet} + */ +@com.google.protobuf.Generated +public final class PartialResultSet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PartialResultSet) + PartialResultSetOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartialResultSet"); + } + + // Use PartialResultSet.newBuilder() to construct. + private PartialResultSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartialResultSet() { + values_ = java.util.Collections.emptyList(); + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_PartialResultSet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_PartialResultSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartialResultSet.class, + com.google.spanner.v1.PartialResultSet.Builder.class); + } + + private int bitField0_; + public static final int METADATA_FIELD_NUMBER = 1; + private com.google.spanner.v1.ResultSetMetadata metadata_; + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + @java.lang.Override + public boolean hasMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata getMetadata() { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder() { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + + public static final int VALUES_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List values_; + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + @java.lang.Override + public java.util.List getValuesList() { + return values_; + } + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + @java.lang.Override + public java.util.List getValuesOrBuilderList() { + return values_; + } + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + @java.lang.Override + public int getValuesCount() { + return values_.size(); + } + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + @java.lang.Override + public com.google.protobuf.Value getValues(int index) { + return values_.get(index); + } + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + @java.lang.Override + public com.google.protobuf.ValueOrBuilder getValuesOrBuilder(int index) { + return values_.get(index); + } + + public static final int CHUNKED_VALUE_FIELD_NUMBER = 3; + private boolean chunkedValue_ = false; + + /** + * + * + *
    +   * If true, then the final value in
    +   * [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
    +   * combined with more values from subsequent `PartialResultSet`s to obtain a
    +   * complete field value.
    +   * 
    + * + * bool chunked_value = 3; + * + * @return The chunkedValue. + */ + @java.lang.Override + public boolean getChunkedValue() { + return chunkedValue_; + } + + public static final int RESUME_TOKEN_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Streaming calls might be interrupted for a variety of reasons, such
    +   * as TCP connection loss. If this occurs, the stream of results can
    +   * be resumed by re-sending the original request and including
    +   * `resume_token`. Note that executing any other transaction in the
    +   * same session invalidates the token.
    +   * 
    + * + * bytes resume_token = 4; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + public static final int STATS_FIELD_NUMBER = 5; + private com.google.spanner.v1.ResultSetStats stats_; + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return Whether the stats field is set. + */ + @java.lang.Override + public boolean hasStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return The stats. + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetStats getStats() { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 8; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + public static final int LAST_FIELD_NUMBER = 9; + private boolean last_ = false; + + /** + * + * + *
    +   * Optional. Indicates whether this is the last `PartialResultSet` in the
    +   * stream. The server might optionally set this field. Clients shouldn't rely
    +   * on this field being set in all cases.
    +   * 
    + * + * bool last = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The last. + */ + @java.lang.Override + public boolean getLast() { + return last_; + } + + public static final int CACHE_UPDATE_FIELD_NUMBER = 10; + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + @java.lang.Override + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMetadata()); + } + for (int i = 0; i < values_.size(); i++) { + output.writeMessage(2, values_.get(i)); + } + if (chunkedValue_ != false) { + output.writeBool(3, chunkedValue_); + } + if (!resumeToken_.isEmpty()) { + output.writeBytes(4, resumeToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(5, getStats()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(8, getPrecommitToken()); + } + if (last_ != false) { + output.writeBool(9, last_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(10, getCacheUpdate()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMetadata()); + } + for (int i = 0; i < values_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, values_.get(i)); + } + if (chunkedValue_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, chunkedValue_); + } + if (!resumeToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(4, resumeToken_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getStats()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getPrecommitToken()); + } + if (last_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, last_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getCacheUpdate()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PartialResultSet)) { + return super.equals(obj); + } + com.google.spanner.v1.PartialResultSet other = (com.google.spanner.v1.PartialResultSet) obj; + + if (hasMetadata() != other.hasMetadata()) return false; + if (hasMetadata()) { + if (!getMetadata().equals(other.getMetadata())) return false; + } + if (!getValuesList().equals(other.getValuesList())) return false; + if (getChunkedValue() != other.getChunkedValue()) return false; + if (!getResumeToken().equals(other.getResumeToken())) return false; + if (hasStats() != other.hasStats()) return false; + if (hasStats()) { + if (!getStats().equals(other.getStats())) return false; + } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } + if (getLast() != other.getLast()) return false; + if (hasCacheUpdate() != other.hasCacheUpdate()) return false; + if (hasCacheUpdate()) { + if (!getCacheUpdate().equals(other.getCacheUpdate())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMetadata()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadata().hashCode(); + } + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (37 * hash) + CHUNKED_VALUE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getChunkedValue()); + hash = (37 * hash) + RESUME_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getResumeToken().hashCode(); + if (hasStats()) { + hash = (37 * hash) + STATS_FIELD_NUMBER; + hash = (53 * hash) + getStats().hashCode(); + } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } + hash = (37 * hash) + LAST_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLast()); + if (hasCacheUpdate()) { + hash = (37 * hash) + CACHE_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getCacheUpdate().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PartialResultSet parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartialResultSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartialResultSet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartialResultSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PartialResultSet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Partial results from a streaming read or SQL query. Streaming reads and
    +   * SQL queries better tolerate large result sets, large rows, and large
    +   * values, but are a little trickier to consume.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PartialResultSet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PartialResultSet) + com.google.spanner.v1.PartialResultSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_PartialResultSet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_PartialResultSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartialResultSet.class, + com.google.spanner.v1.PartialResultSet.Builder.class); + } + + // Construct using com.google.spanner.v1.PartialResultSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetMetadataFieldBuilder(); + internalGetValuesFieldBuilder(); + internalGetStatsFieldBuilder(); + internalGetPrecommitTokenFieldBuilder(); + internalGetCacheUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + } else { + values_ = null; + valuesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + chunkedValue_ = false; + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + last_ = false; + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_PartialResultSet_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PartialResultSet getDefaultInstanceForType() { + return com.google.spanner.v1.PartialResultSet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PartialResultSet build() { + com.google.spanner.v1.PartialResultSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PartialResultSet buildPartial() { + com.google.spanner.v1.PartialResultSet result = + new com.google.spanner.v1.PartialResultSet(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.PartialResultSet result) { + if (valuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + values_ = java.util.Collections.unmodifiableList(values_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.values_ = values_; + } else { + result.values_ = valuesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.PartialResultSet result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.chunkedValue_ = chunkedValue_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.resumeToken_ = resumeToken_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.last_ = last_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.cacheUpdate_ = + cacheUpdateBuilder_ == null ? cacheUpdate_ : cacheUpdateBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PartialResultSet) { + return mergeFrom((com.google.spanner.v1.PartialResultSet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PartialResultSet other) { + if (other == com.google.spanner.v1.PartialResultSet.getDefaultInstance()) return this; + if (other.hasMetadata()) { + mergeMetadata(other.getMetadata()); + } + if (valuesBuilder_ == null) { + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + } else { + if (!other.values_.isEmpty()) { + if (valuesBuilder_.isEmpty()) { + valuesBuilder_.dispose(); + valuesBuilder_ = null; + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000002); + valuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetValuesFieldBuilder() + : null; + } else { + valuesBuilder_.addAllMessages(other.values_); + } + } + } + if (other.getChunkedValue() != false) { + setChunkedValue(other.getChunkedValue()); + } + if (!other.getResumeToken().isEmpty()) { + setResumeToken(other.getResumeToken()); + } + if (other.hasStats()) { + mergeStats(other.getStats()); + } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } + if (other.getLast() != false) { + setLast(other.getLast()); + } + if (other.hasCacheUpdate()) { + mergeCacheUpdate(other.getCacheUpdate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.protobuf.Value m = + input.readMessage(com.google.protobuf.Value.parser(), extensionRegistry); + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(m); + } else { + valuesBuilder_.addMessage(m); + } + break; + } // case 18 + case 24: + { + chunkedValue_ = input.readBool(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: + { + resumeToken_ = input.readBytes(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + input.readMessage(internalGetStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 66: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 66 + case 72: + { + last_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 72 + case 82: + { + input.readMessage( + internalGetCacheUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.v1.ResultSetMetadata metadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder> + metadataBuilder_; + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + public boolean hasMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + public com.google.spanner.v1.ResultSetMetadata getMetadata() { + if (metadataBuilder_ == null) { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } else { + return metadataBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder setMetadata(com.google.spanner.v1.ResultSetMetadata value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metadata_ = value; + } else { + metadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder setMetadata(com.google.spanner.v1.ResultSetMetadata.Builder builderForValue) { + if (metadataBuilder_ == null) { + metadata_ = builderForValue.build(); + } else { + metadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder mergeMetadata(com.google.spanner.v1.ResultSetMetadata value) { + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && metadata_ != null + && metadata_ != com.google.spanner.v1.ResultSetMetadata.getDefaultInstance()) { + getMetadataBuilder().mergeFrom(value); + } else { + metadata_ = value; + } + } else { + metadataBuilder_.mergeFrom(value); + } + if (metadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public com.google.spanner.v1.ResultSetMetadata.Builder getMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilder(); + } else { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * Only present in the first response.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder> + internalGetMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder>( + getMetadata(), getParentForChildren(), isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + private java.util.List values_ = java.util.Collections.emptyList(); + + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + values_ = new java.util.ArrayList(values_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + valuesBuilder_; + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public java.util.List getValuesList() { + if (valuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(values_); + } else { + return valuesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public int getValuesCount() { + if (valuesBuilder_ == null) { + return values_.size(); + } else { + return valuesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public com.google.protobuf.Value getValues(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder setValues(int index, com.google.protobuf.Value value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + } else { + valuesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder setValues(int index, com.google.protobuf.Value.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.set(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder addValues(com.google.protobuf.Value value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + } else { + valuesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder addValues(int index, com.google.protobuf.Value value) { + if (valuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(index, value); + onChanged(); + } else { + valuesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder addValues(com.google.protobuf.Value.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder addValues(int index, com.google.protobuf.Value.Builder builderForValue) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.add(index, builderForValue.build()); + onChanged(); + } else { + valuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder addAllValues(java.lang.Iterable values) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + onChanged(); + } else { + valuesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder clearValues() { + if (valuesBuilder_ == null) { + values_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + valuesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public Builder removeValues(int index) { + if (valuesBuilder_ == null) { + ensureValuesIsMutable(); + values_.remove(index); + onChanged(); + } else { + valuesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public com.google.protobuf.Value.Builder getValuesBuilder(int index) { + return internalGetValuesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public com.google.protobuf.ValueOrBuilder getValuesOrBuilder(int index) { + if (valuesBuilder_ == null) { + return values_.get(index); + } else { + return valuesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public java.util.List getValuesOrBuilderList() { + if (valuesBuilder_ != null) { + return valuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(values_); + } + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public com.google.protobuf.Value.Builder addValuesBuilder() { + return internalGetValuesFieldBuilder() + .addBuilder(com.google.protobuf.Value.getDefaultInstance()); + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public com.google.protobuf.Value.Builder addValuesBuilder(int index) { + return internalGetValuesFieldBuilder() + .addBuilder(index, com.google.protobuf.Value.getDefaultInstance()); + } + + /** + * + * + *
    +     * A streamed result set consists of a stream of values, which might
    +     * be split into many `PartialResultSet` messages to accommodate
    +     * large rows and/or large values. Every N complete values defines a
    +     * row, where N is equal to the number of entries in
    +     * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +     *
    +     * Most values are encoded based on type as described
    +     * [here][google.spanner.v1.TypeCode].
    +     *
    +     * It's possible that the last value in values is "chunked",
    +     * meaning that the rest of the value is sent in subsequent
    +     * `PartialResultSet`(s). This is denoted by the
    +     * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +     * Two or more chunked values can be merged to form a complete value as
    +     * follows:
    +     *
    +     * * `bool/number/null`: can't be chunked
    +     * * `string`: concatenate the strings
    +     * * `list`: concatenate the lists. If the last element in a list is a
    +     * `string`, `list`, or `object`, merge it with the first element in
    +     * the next list by applying these rules recursively.
    +     * * `object`: concatenate the (field name, field value) pairs. If a
    +     * field name is duplicated, then apply these rules recursively
    +     * to merge the field values.
    +     *
    +     * Some examples of merging:
    +     *
    +     * Strings are concatenated.
    +     * "foo", "bar" => "foobar"
    +     *
    +     * Lists of non-strings are concatenated.
    +     * [2, 3], [4] => [2, 3, 4]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are strings.
    +     * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +     *
    +     * Lists are concatenated, but the last and first elements are merged
    +     * because they are lists. Recursively, the last and first elements
    +     * of the inner lists are merged because they are strings.
    +     * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +     *
    +     * Non-overlapping object fields are combined.
    +     * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +     *
    +     * Overlapping object fields are merged.
    +     * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +     *
    +     * Examples of merging objects containing lists of strings.
    +     * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +     *
    +     * For a more complete example, suppose a streaming SQL query is
    +     * yielding a result set whose rows contain a single string
    +     * field. The following `PartialResultSet`s might be yielded:
    +     *
    +     * {
    +     * "metadata": { ... }
    +     * "values": ["Hello", "W"]
    +     * "chunked_value": true
    +     * "resume_token": "Af65..."
    +     * }
    +     * {
    +     * "values": ["orl"]
    +     * "chunked_value": true
    +     * }
    +     * {
    +     * "values": ["d"]
    +     * "resume_token": "Zx1B..."
    +     * }
    +     *
    +     * This sequence of `PartialResultSet`s encodes two rows, one
    +     * containing the field value `"Hello"`, and a second containing the
    +     * field value `"World" = "W" + "orl" + "d"`.
    +     *
    +     * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +     * resumed from a previously yielded `resume_token`. For the above sequence of
    +     * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +     * yields results from the `PartialResultSet` with value "orl".
    +     * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + public java.util.List getValuesBuilderList() { + return internalGetValuesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder> + internalGetValuesFieldBuilder() { + if (valuesBuilder_ == null) { + valuesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.Value, + com.google.protobuf.Value.Builder, + com.google.protobuf.ValueOrBuilder>( + values_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + values_ = null; + } + return valuesBuilder_; + } + + private boolean chunkedValue_; + + /** + * + * + *
    +     * If true, then the final value in
    +     * [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
    +     * combined with more values from subsequent `PartialResultSet`s to obtain a
    +     * complete field value.
    +     * 
    + * + * bool chunked_value = 3; + * + * @return The chunkedValue. + */ + @java.lang.Override + public boolean getChunkedValue() { + return chunkedValue_; + } + + /** + * + * + *
    +     * If true, then the final value in
    +     * [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
    +     * combined with more values from subsequent `PartialResultSet`s to obtain a
    +     * complete field value.
    +     * 
    + * + * bool chunked_value = 3; + * + * @param value The chunkedValue to set. + * @return This builder for chaining. + */ + public Builder setChunkedValue(boolean value) { + + chunkedValue_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If true, then the final value in
    +     * [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
    +     * combined with more values from subsequent `PartialResultSet`s to obtain a
    +     * complete field value.
    +     * 
    + * + * bool chunked_value = 3; + * + * @return This builder for chaining. + */ + public Builder clearChunkedValue() { + bitField0_ = (bitField0_ & ~0x00000004); + chunkedValue_ = false; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Streaming calls might be interrupted for a variety of reasons, such
    +     * as TCP connection loss. If this occurs, the stream of results can
    +     * be resumed by re-sending the original request and including
    +     * `resume_token`. Note that executing any other transaction in the
    +     * same session invalidates the token.
    +     * 
    + * + * bytes resume_token = 4; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + /** + * + * + *
    +     * Streaming calls might be interrupted for a variety of reasons, such
    +     * as TCP connection loss. If this occurs, the stream of results can
    +     * be resumed by re-sending the original request and including
    +     * `resume_token`. Note that executing any other transaction in the
    +     * same session invalidates the token.
    +     * 
    + * + * bytes resume_token = 4; + * + * @param value The resumeToken to set. + * @return This builder for chaining. + */ + public Builder setResumeToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + resumeToken_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Streaming calls might be interrupted for a variety of reasons, such
    +     * as TCP connection loss. If this occurs, the stream of results can
    +     * be resumed by re-sending the original request and including
    +     * `resume_token`. Note that executing any other transaction in the
    +     * same session invalidates the token.
    +     * 
    + * + * bytes resume_token = 4; + * + * @return This builder for chaining. + */ + public Builder clearResumeToken() { + bitField0_ = (bitField0_ & ~0x00000008); + resumeToken_ = getDefaultInstance().getResumeToken(); + onChanged(); + return this; + } + + private com.google.spanner.v1.ResultSetStats stats_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder> + statsBuilder_; + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return The stats. + */ + public com.google.spanner.v1.ResultSetStats getStats() { + if (statsBuilder_ == null) { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } else { + return statsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public Builder setStats(com.google.spanner.v1.ResultSetStats value) { + if (statsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stats_ = value; + } else { + statsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public Builder setStats(com.google.spanner.v1.ResultSetStats.Builder builderForValue) { + if (statsBuilder_ == null) { + stats_ = builderForValue.build(); + } else { + statsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public Builder mergeStats(com.google.spanner.v1.ResultSetStats value) { + if (statsBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && stats_ != null + && stats_ != com.google.spanner.v1.ResultSetStats.getDefaultInstance()) { + getStatsBuilder().mergeFrom(value); + } else { + stats_ = value; + } + } else { + statsBuilder_.mergeFrom(value); + } + if (stats_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public Builder clearStats() { + bitField0_ = (bitField0_ & ~0x00000010); + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public com.google.spanner.v1.ResultSetStats.Builder getStatsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { + if (statsBuilder_ != null) { + return statsBuilder_.getMessageOrBuilder(); + } else { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + } + + /** + * + * + *
    +     * Query plan and execution statistics for the statement that produced this
    +     * streaming result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +     * and are sent only once with the last response in the stream. This field is
    +     * also present in the last response for DML statements.
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder> + internalGetStatsFieldBuilder() { + if (statsBuilder_ == null) { + statsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder>( + getStats(), getParentForChildren(), isClean()); + stats_ = null; + } + return statsBuilder_; + } + + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000020); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction
    +     * has multiplexed sessions enabled. Pass the precommit token with the highest
    +     * sequence number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + + private boolean last_; + + /** + * + * + *
    +     * Optional. Indicates whether this is the last `PartialResultSet` in the
    +     * stream. The server might optionally set this field. Clients shouldn't rely
    +     * on this field being set in all cases.
    +     * 
    + * + * bool last = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The last. + */ + @java.lang.Override + public boolean getLast() { + return last_; + } + + /** + * + * + *
    +     * Optional. Indicates whether this is the last `PartialResultSet` in the
    +     * stream. The server might optionally set this field. Clients shouldn't rely
    +     * on this field being set in all cases.
    +     * 
    + * + * bool last = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The last to set. + * @return This builder for chaining. + */ + public Builder setLast(boolean value) { + + last_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Indicates whether this is the last `PartialResultSet` in the
    +     * stream. The server might optionally set this field. Clients shouldn't rely
    +     * on this field being set in all cases.
    +     * 
    + * + * bool last = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearLast() { + bitField0_ = (bitField0_ & ~0x00000040); + last_ = false; + onChanged(); + return this; + } + + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + cacheUpdateBuilder_; + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000080) != 0); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + if (cacheUpdateBuilder_ == null) { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } else { + return cacheUpdateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cacheUpdate_ = value; + } else { + cacheUpdateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate.Builder builderForValue) { + if (cacheUpdateBuilder_ == null) { + cacheUpdate_ = builderForValue.build(); + } else { + cacheUpdateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && cacheUpdate_ != null + && cacheUpdate_ != com.google.spanner.v1.CacheUpdate.getDefaultInstance()) { + getCacheUpdateBuilder().mergeFrom(value); + } else { + cacheUpdate_ = value; + } + } else { + cacheUpdateBuilder_.mergeFrom(value); + } + if (cacheUpdate_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCacheUpdate() { + bitField0_ = (bitField0_ & ~0x00000080); + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdate.Builder getCacheUpdateBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return internalGetCacheUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + if (cacheUpdateBuilder_ != null) { + return cacheUpdateBuilder_.getMessageOrBuilder(); + } else { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + internalGetCacheUpdateFieldBuilder() { + if (cacheUpdateBuilder_ == null) { + cacheUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder>( + getCacheUpdate(), getParentForChildren(), isClean()); + cacheUpdate_ = null; + } + return cacheUpdateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PartialResultSet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PartialResultSet) + private static final com.google.spanner.v1.PartialResultSet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PartialResultSet(); + } + + public static com.google.spanner.v1.PartialResultSet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartialResultSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PartialResultSet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java new file mode 100644 index 000000000000..4e33ea860220 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java @@ -0,0 +1,715 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartialResultSetOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PartialResultSet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + boolean hasMetadata(); + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + com.google.spanner.v1.ResultSetMetadata getMetadata(); + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * Only present in the first response.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder(); + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + java.util.List getValuesList(); + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + com.google.protobuf.Value getValues(int index); + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + int getValuesCount(); + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + java.util.List getValuesOrBuilderList(); + + /** + * + * + *
    +   * A streamed result set consists of a stream of values, which might
    +   * be split into many `PartialResultSet` messages to accommodate
    +   * large rows and/or large values. Every N complete values defines a
    +   * row, where N is equal to the number of entries in
    +   * [metadata.row_type.fields][google.spanner.v1.StructType.fields].
    +   *
    +   * Most values are encoded based on type as described
    +   * [here][google.spanner.v1.TypeCode].
    +   *
    +   * It's possible that the last value in values is "chunked",
    +   * meaning that the rest of the value is sent in subsequent
    +   * `PartialResultSet`(s). This is denoted by the
    +   * [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field.
    +   * Two or more chunked values can be merged to form a complete value as
    +   * follows:
    +   *
    +   * * `bool/number/null`: can't be chunked
    +   * * `string`: concatenate the strings
    +   * * `list`: concatenate the lists. If the last element in a list is a
    +   * `string`, `list`, or `object`, merge it with the first element in
    +   * the next list by applying these rules recursively.
    +   * * `object`: concatenate the (field name, field value) pairs. If a
    +   * field name is duplicated, then apply these rules recursively
    +   * to merge the field values.
    +   *
    +   * Some examples of merging:
    +   *
    +   * Strings are concatenated.
    +   * "foo", "bar" => "foobar"
    +   *
    +   * Lists of non-strings are concatenated.
    +   * [2, 3], [4] => [2, 3, 4]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are strings.
    +   * ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
    +   *
    +   * Lists are concatenated, but the last and first elements are merged
    +   * because they are lists. Recursively, the last and first elements
    +   * of the inner lists are merged because they are strings.
    +   * ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
    +   *
    +   * Non-overlapping object fields are combined.
    +   * {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
    +   *
    +   * Overlapping object fields are merged.
    +   * {"a": "1"}, {"a": "2"} => {"a": "12"}
    +   *
    +   * Examples of merging objects containing lists of strings.
    +   * {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
    +   *
    +   * For a more complete example, suppose a streaming SQL query is
    +   * yielding a result set whose rows contain a single string
    +   * field. The following `PartialResultSet`s might be yielded:
    +   *
    +   * {
    +   * "metadata": { ... }
    +   * "values": ["Hello", "W"]
    +   * "chunked_value": true
    +   * "resume_token": "Af65..."
    +   * }
    +   * {
    +   * "values": ["orl"]
    +   * "chunked_value": true
    +   * }
    +   * {
    +   * "values": ["d"]
    +   * "resume_token": "Zx1B..."
    +   * }
    +   *
    +   * This sequence of `PartialResultSet`s encodes two rows, one
    +   * containing the field value `"Hello"`, and a second containing the
    +   * field value `"World" = "W" + "orl" + "d"`.
    +   *
    +   * Not all `PartialResultSet`s contain a `resume_token`. Execution can only be
    +   * resumed from a previously yielded `resume_token`. For the above sequence of
    +   * `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."`
    +   * yields results from the `PartialResultSet` with value "orl".
    +   * 
    + * + * repeated .google.protobuf.Value values = 2; + */ + com.google.protobuf.ValueOrBuilder getValuesOrBuilder(int index); + + /** + * + * + *
    +   * If true, then the final value in
    +   * [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be
    +   * combined with more values from subsequent `PartialResultSet`s to obtain a
    +   * complete field value.
    +   * 
    + * + * bool chunked_value = 3; + * + * @return The chunkedValue. + */ + boolean getChunkedValue(); + + /** + * + * + *
    +   * Streaming calls might be interrupted for a variety of reasons, such
    +   * as TCP connection loss. If this occurs, the stream of results can
    +   * be resumed by re-sending the original request and including
    +   * `resume_token`. Note that executing any other transaction in the
    +   * same session invalidates the token.
    +   * 
    + * + * bytes resume_token = 4; + * + * @return The resumeToken. + */ + com.google.protobuf.ByteString getResumeToken(); + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return Whether the stats field is set. + */ + boolean hasStats(); + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + * + * @return The stats. + */ + com.google.spanner.v1.ResultSetStats getStats(); + + /** + * + * + *
    +   * Query plan and execution statistics for the statement that produced this
    +   * streaming result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
    +   * and are sent only once with the last response in the stream. This field is
    +   * also present in the last response for DML statements.
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 5; + */ + com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction
    +   * has multiplexed sessions enabled. Pass the precommit token with the highest
    +   * sequence number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + /** + * + * + *
    +   * Optional. Indicates whether this is the last `PartialResultSet` in the
    +   * stream. The server might optionally set this field. Clients shouldn't rely
    +   * on this field being set in all cases.
    +   * 
    + * + * bool last = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The last. + */ + boolean getLast(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + boolean hasCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + com.google.spanner.v1.CacheUpdate getCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 10 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java new file mode 100644 index 000000000000..9b8b36727be5 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java @@ -0,0 +1,511 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Information returned for each partition returned in a
    + * PartitionResponse.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Partition} + */ +@com.google.protobuf.Generated +public final class Partition extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Partition) + PartitionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Partition"); + } + + // Use Partition.newBuilder() to construct. + private Partition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Partition() { + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Partition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Partition.class, com.google.spanner.v1.Partition.Builder.class); + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
    +   * `ExecuteStreamingSql` requests to restrict the results to those identified
    +   * by this partition token.
    +   * 
    + * + * bytes partition_token = 1; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!partitionToken_.isEmpty()) { + output.writeBytes(1, partitionToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!partitionToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, partitionToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Partition)) { + return super.equals(obj); + } + com.google.spanner.v1.Partition other = (com.google.spanner.v1.Partition) obj; + + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Partition parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Partition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Partition parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Partition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Partition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Partition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Partition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Partition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Partition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Partition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Partition parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Partition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Partition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Information returned for each partition returned in a
    +   * PartitionResponse.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Partition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Partition) + com.google.spanner.v1.PartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Partition_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Partition.class, com.google.spanner.v1.Partition.Builder.class); + } + + // Construct using com.google.spanner.v1.Partition.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Partition_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Partition getDefaultInstanceForType() { + return com.google.spanner.v1.Partition.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Partition build() { + com.google.spanner.v1.Partition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Partition buildPartial() { + com.google.spanner.v1.Partition result = new com.google.spanner.v1.Partition(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Partition result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.partitionToken_ = partitionToken_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Partition) { + return mergeFrom((com.google.spanner.v1.Partition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Partition other) { + if (other == com.google.spanner.v1.Partition.getDefaultInstance()) return this; + if (!other.getPartitionToken().isEmpty()) { + setPartitionToken(other.getPartitionToken()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + partitionToken_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
    +     * `ExecuteStreamingSql` requests to restrict the results to those identified
    +     * by this partition token.
    +     * 
    + * + * bytes partition_token = 1; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + /** + * + * + *
    +     * This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
    +     * `ExecuteStreamingSql` requests to restrict the results to those identified
    +     * by this partition token.
    +     * 
    + * + * bytes partition_token = 1; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
    +     * `ExecuteStreamingSql` requests to restrict the results to those identified
    +     * by this partition token.
    +     * 
    + * + * bytes partition_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + bitField0_ = (bitField0_ & ~0x00000001); + partitionToken_ = getDefaultInstance().getPartitionToken(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Partition) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Partition) + private static final com.google.spanner.v1.Partition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Partition(); + } + + public static com.google.spanner.v1.Partition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Partition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Partition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java new file mode 100644 index 000000000000..94447be37dc2 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java @@ -0,0 +1,646 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Options for a `PartitionQueryRequest` and `PartitionReadRequest`.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionOptions} + */ +@com.google.protobuf.Generated +public final class PartitionOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PartitionOptions) + PartitionOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionOptions"); + } + + // Use PartitionOptions.newBuilder() to construct. + private PartitionOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionOptions() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionOptions.class, + com.google.spanner.v1.PartitionOptions.Builder.class); + } + + public static final int PARTITION_SIZE_BYTES_FIELD_NUMBER = 1; + private long partitionSizeBytes_ = 0L; + + /** + * + * + *
    +   * **Note:** This hint is currently ignored by `PartitionQuery` and
    +   * `PartitionRead` requests.
    +   *
    +   * The desired data size for each partition generated. The default for this
    +   * option is currently 1 GiB. This is only a hint. The actual size of each
    +   * partition can be smaller or larger than this size request.
    +   * 
    + * + * int64 partition_size_bytes = 1; + * + * @return The partitionSizeBytes. + */ + @java.lang.Override + public long getPartitionSizeBytes() { + return partitionSizeBytes_; + } + + public static final int MAX_PARTITIONS_FIELD_NUMBER = 2; + private long maxPartitions_ = 0L; + + /** + * + * + *
    +   * **Note:** This hint is currently ignored by `PartitionQuery` and
    +   * `PartitionRead` requests.
    +   *
    +   * The desired maximum number of partitions to return. For example, this
    +   * might be set to the number of workers available. The default for this
    +   * option is currently 10,000. The maximum value is currently 200,000. This
    +   * is only a hint. The actual number of partitions returned can be smaller or
    +   * larger than this maximum count request.
    +   * 
    + * + * int64 max_partitions = 2; + * + * @return The maxPartitions. + */ + @java.lang.Override + public long getMaxPartitions() { + return maxPartitions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (partitionSizeBytes_ != 0L) { + output.writeInt64(1, partitionSizeBytes_); + } + if (maxPartitions_ != 0L) { + output.writeInt64(2, maxPartitions_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (partitionSizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, partitionSizeBytes_); + } + if (maxPartitions_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, maxPartitions_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PartitionOptions)) { + return super.equals(obj); + } + com.google.spanner.v1.PartitionOptions other = (com.google.spanner.v1.PartitionOptions) obj; + + if (getPartitionSizeBytes() != other.getPartitionSizeBytes()) return false; + if (getMaxPartitions() != other.getMaxPartitions()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARTITION_SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getPartitionSizeBytes()); + hash = (37 * hash) + MAX_PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getMaxPartitions()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PartitionOptions parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PartitionOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Options for a `PartitionQueryRequest` and `PartitionReadRequest`.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PartitionOptions) + com.google.spanner.v1.PartitionOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionOptions.class, + com.google.spanner.v1.PartitionOptions.Builder.class); + } + + // Construct using com.google.spanner.v1.PartitionOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + partitionSizeBytes_ = 0L; + maxPartitions_ = 0L; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionOptions getDefaultInstanceForType() { + return com.google.spanner.v1.PartitionOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PartitionOptions build() { + com.google.spanner.v1.PartitionOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionOptions buildPartial() { + com.google.spanner.v1.PartitionOptions result = + new com.google.spanner.v1.PartitionOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.PartitionOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.partitionSizeBytes_ = partitionSizeBytes_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.maxPartitions_ = maxPartitions_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PartitionOptions) { + return mergeFrom((com.google.spanner.v1.PartitionOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PartitionOptions other) { + if (other == com.google.spanner.v1.PartitionOptions.getDefaultInstance()) return this; + if (other.getPartitionSizeBytes() != 0L) { + setPartitionSizeBytes(other.getPartitionSizeBytes()); + } + if (other.getMaxPartitions() != 0L) { + setMaxPartitions(other.getMaxPartitions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + partitionSizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + maxPartitions_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long partitionSizeBytes_; + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired data size for each partition generated. The default for this
    +     * option is currently 1 GiB. This is only a hint. The actual size of each
    +     * partition can be smaller or larger than this size request.
    +     * 
    + * + * int64 partition_size_bytes = 1; + * + * @return The partitionSizeBytes. + */ + @java.lang.Override + public long getPartitionSizeBytes() { + return partitionSizeBytes_; + } + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired data size for each partition generated. The default for this
    +     * option is currently 1 GiB. This is only a hint. The actual size of each
    +     * partition can be smaller or larger than this size request.
    +     * 
    + * + * int64 partition_size_bytes = 1; + * + * @param value The partitionSizeBytes to set. + * @return This builder for chaining. + */ + public Builder setPartitionSizeBytes(long value) { + + partitionSizeBytes_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired data size for each partition generated. The default for this
    +     * option is currently 1 GiB. This is only a hint. The actual size of each
    +     * partition can be smaller or larger than this size request.
    +     * 
    + * + * int64 partition_size_bytes = 1; + * + * @return This builder for chaining. + */ + public Builder clearPartitionSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000001); + partitionSizeBytes_ = 0L; + onChanged(); + return this; + } + + private long maxPartitions_; + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired maximum number of partitions to return. For example, this
    +     * might be set to the number of workers available. The default for this
    +     * option is currently 10,000. The maximum value is currently 200,000. This
    +     * is only a hint. The actual number of partitions returned can be smaller or
    +     * larger than this maximum count request.
    +     * 
    + * + * int64 max_partitions = 2; + * + * @return The maxPartitions. + */ + @java.lang.Override + public long getMaxPartitions() { + return maxPartitions_; + } + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired maximum number of partitions to return. For example, this
    +     * might be set to the number of workers available. The default for this
    +     * option is currently 10,000. The maximum value is currently 200,000. This
    +     * is only a hint. The actual number of partitions returned can be smaller or
    +     * larger than this maximum count request.
    +     * 
    + * + * int64 max_partitions = 2; + * + * @param value The maxPartitions to set. + * @return This builder for chaining. + */ + public Builder setMaxPartitions(long value) { + + maxPartitions_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * **Note:** This hint is currently ignored by `PartitionQuery` and
    +     * `PartitionRead` requests.
    +     *
    +     * The desired maximum number of partitions to return. For example, this
    +     * might be set to the number of workers available. The default for this
    +     * option is currently 10,000. The maximum value is currently 200,000. This
    +     * is only a hint. The actual number of partitions returned can be smaller or
    +     * larger than this maximum count request.
    +     * 
    + * + * int64 max_partitions = 2; + * + * @return This builder for chaining. + */ + public Builder clearMaxPartitions() { + bitField0_ = (bitField0_ & ~0x00000002); + maxPartitions_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PartitionOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionOptions) + private static final com.google.spanner.v1.PartitionOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PartitionOptions(); + } + + public static com.google.spanner.v1.PartitionOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java new file mode 100644 index 000000000000..94ae119aa287 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartitionOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PartitionOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * **Note:** This hint is currently ignored by `PartitionQuery` and
    +   * `PartitionRead` requests.
    +   *
    +   * The desired data size for each partition generated. The default for this
    +   * option is currently 1 GiB. This is only a hint. The actual size of each
    +   * partition can be smaller or larger than this size request.
    +   * 
    + * + * int64 partition_size_bytes = 1; + * + * @return The partitionSizeBytes. + */ + long getPartitionSizeBytes(); + + /** + * + * + *
    +   * **Note:** This hint is currently ignored by `PartitionQuery` and
    +   * `PartitionRead` requests.
    +   *
    +   * The desired maximum number of partitions to return. For example, this
    +   * might be set to the number of workers available. The default for this
    +   * option is currently 10,000. The maximum value is currently 200,000. This
    +   * is only a hint. The actual number of partitions returned can be smaller or
    +   * larger than this maximum count request.
    +   * 
    + * + * int64 max_partitions = 2; + * + * @return The maxPartitions. + */ + long getMaxPartitions(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java new file mode 100644 index 000000000000..ff7d8444c175 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java @@ -0,0 +1,43 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Partition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or
    +   * `ExecuteStreamingSql` requests to restrict the results to those identified
    +   * by this partition token.
    +   * 
    + * + * bytes partition_token = 1; + * + * @return The partitionToken. + */ + com.google.protobuf.ByteString getPartitionToken(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java new file mode 100644 index 000000000000..591c068dc72a --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java @@ -0,0 +1,2427 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionQueryRequest} + */ +@com.google.protobuf.Generated +public final class PartitionQueryRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PartitionQueryRequest) + PartitionQueryRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionQueryRequest"); + } + + // Use PartitionQueryRequest.newBuilder() to construct. + private PartitionQueryRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionQueryRequest() { + session_ = ""; + sql_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionQueryRequest.class, + com.google.spanner.v1.PartitionQueryRequest.Builder.class); + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionSelector transaction_; + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + public static final int SQL_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object sql_ = ""; + + /** + * + * + *
    +   * Required. The query request to generate partitions for. The request fails
    +   * if the query isn't root partitionable. For a query to be root
    +   * partitionable, it needs to satisfy a few conditions. For example, if the
    +   * query execution plan contains a distributed union operator, then it must be
    +   * the first operator in the plan. For more information about other
    +   * conditions, see [Read data in
    +   * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +   *
    +   * The query request must not contain DML commands, such as `INSERT`,
    +   * `UPDATE`, or `DELETE`. Use
    +   * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +   * a `PartitionedDml` transaction for large, partition-friendly DML
    +   * operations.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + @java.lang.Override + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The query request to generate partitions for. The request fails
    +   * if the query isn't root partitionable. For a query to be root
    +   * partitionable, it needs to satisfy a few conditions. For example, if the
    +   * query execution plan contains a distributed union operator, then it must be
    +   * the first operator in the plan. For more information about other
    +   * conditions, see [Read data in
    +   * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +   *
    +   * The query request must not contain DML commands, such as `INSERT`,
    +   * `UPDATE`, or `DELETE`. Use
    +   * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +   * a `PartitionedDml` transaction for large, partition-friendly DML
    +   * operations.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARAMS_FIELD_NUMBER = 4; + private com.google.protobuf.Struct params_; + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the params field is set. + */ + @java.lang.Override + public boolean hasParams() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The params. + */ + @java.lang.Override + public com.google.protobuf.Struct getParams() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + + public static final int PARAM_TYPES_FIELD_NUMBER = 5; + + private static final class ParamTypesDefaultEntryHolder { + static final com.google.protobuf.MapEntry + defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_ParamTypesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + com.google.spanner.v1.Type.getDefaultInstance()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField paramTypes_; + + private com.google.protobuf.MapField + internalGetParamTypes() { + if (paramTypes_ == null) { + return com.google.protobuf.MapField.emptyMapField(ParamTypesDefaultEntryHolder.defaultEntry); + } + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().getMap().size(); + } + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().getMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getMap(); + } + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetParamTypes().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int PARTITION_OPTIONS_FIELD_NUMBER = 6; + private com.google.spanner.v1.PartitionOptions partitionOptions_; + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return Whether the partitionOptions field is set. + */ + @java.lang.Override + public boolean hasPartitionOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return The partitionOptions. + */ + @java.lang.Override + public com.google.spanner.v1.PartitionOptions getPartitionOptions() { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + @java.lang.Override + public com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder() { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, sql_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getParams()); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetParamTypes(), ParamTypesDefaultEntryHolder.defaultEntry, 5); + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(6, getPartitionOptions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(sql_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, sql_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getParams()); + } + for (java.util.Map.Entry entry : + internalGetParamTypes().getMap().entrySet()) { + com.google.protobuf.MapEntry paramTypes__ = + ParamTypesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, paramTypes__); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getPartitionOptions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PartitionQueryRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.PartitionQueryRequest other = + (com.google.spanner.v1.PartitionQueryRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getSql().equals(other.getSql())) return false; + if (hasParams() != other.hasParams()) return false; + if (hasParams()) { + if (!getParams().equals(other.getParams())) return false; + } + if (!internalGetParamTypes().equals(other.internalGetParamTypes())) return false; + if (hasPartitionOptions() != other.hasPartitionOptions()) return false; + if (hasPartitionOptions()) { + if (!getPartitionOptions().equals(other.getPartitionOptions())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + hash = (37 * hash) + SQL_FIELD_NUMBER; + hash = (53 * hash) + getSql().hashCode(); + if (hasParams()) { + hash = (37 * hash) + PARAMS_FIELD_NUMBER; + hash = (53 * hash) + getParams().hashCode(); + } + if (!internalGetParamTypes().getMap().isEmpty()) { + hash = (37 * hash) + PARAM_TYPES_FIELD_NUMBER; + hash = (53 * hash) + internalGetParamTypes().hashCode(); + } + if (hasPartitionOptions()) { + hash = (37 * hash) + PARTITION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionOptions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionQueryRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PartitionQueryRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionQueryRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PartitionQueryRequest) + com.google.spanner.v1.PartitionQueryRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 5: + return internalGetMutableParamTypes(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionQueryRequest.class, + com.google.spanner.v1.PartitionQueryRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.PartitionQueryRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTransactionFieldBuilder(); + internalGetParamsFieldBuilder(); + internalGetPartitionOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + sql_ = ""; + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + internalGetMutableParamTypes().clear(); + partitionOptions_ = null; + if (partitionOptionsBuilder_ != null) { + partitionOptionsBuilder_.dispose(); + partitionOptionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionQueryRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionQueryRequest getDefaultInstanceForType() { + return com.google.spanner.v1.PartitionQueryRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PartitionQueryRequest build() { + com.google.spanner.v1.PartitionQueryRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionQueryRequest buildPartial() { + com.google.spanner.v1.PartitionQueryRequest result = + new com.google.spanner.v1.PartitionQueryRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.PartitionQueryRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.sql_ = sql_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.params_ = paramsBuilder_ == null ? params_ : paramsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.paramTypes_ = + internalGetParamTypes().build(ParamTypesDefaultEntryHolder.defaultEntry); + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.partitionOptions_ = + partitionOptionsBuilder_ == null ? partitionOptions_ : partitionOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PartitionQueryRequest) { + return mergeFrom((com.google.spanner.v1.PartitionQueryRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PartitionQueryRequest other) { + if (other == com.google.spanner.v1.PartitionQueryRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (!other.getSql().isEmpty()) { + sql_ = other.sql_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasParams()) { + mergeParams(other.getParams()); + } + internalGetMutableParamTypes().mergeFrom(other.internalGetParamTypes()); + bitField0_ |= 0x00000010; + if (other.hasPartitionOptions()) { + mergePartitionOptions(other.getPartitionOptions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + sql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage(internalGetParamsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + com.google.protobuf.MapEntry + paramTypes__ = + input.readMessage( + ParamTypesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableParamTypes() + .ensureBuilderMap() + .put(paramTypes__.getKey(), paramTypes__.getValue()); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetPartitionOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionSelector transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.TransactionSelector getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction( + com.google.spanner.v1.TransactionSelector.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.TransactionSelector.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelector.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * Read-only snapshot transactions are supported, read and write and
    +     * single-use transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private java.lang.Object sql_ = ""; + + /** + * + * + *
    +     * Required. The query request to generate partitions for. The request fails
    +     * if the query isn't root partitionable. For a query to be root
    +     * partitionable, it needs to satisfy a few conditions. For example, if the
    +     * query execution plan contains a distributed union operator, then it must be
    +     * the first operator in the plan. For more information about other
    +     * conditions, see [Read data in
    +     * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +     *
    +     * The query request must not contain DML commands, such as `INSERT`,
    +     * `UPDATE`, or `DELETE`. Use
    +     * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +     * a `PartitionedDml` transaction for large, partition-friendly DML
    +     * operations.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + public java.lang.String getSql() { + java.lang.Object ref = sql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + sql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The query request to generate partitions for. The request fails
    +     * if the query isn't root partitionable. For a query to be root
    +     * partitionable, it needs to satisfy a few conditions. For example, if the
    +     * query execution plan contains a distributed union operator, then it must be
    +     * the first operator in the plan. For more information about other
    +     * conditions, see [Read data in
    +     * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +     *
    +     * The query request must not contain DML commands, such as `INSERT`,
    +     * `UPDATE`, or `DELETE`. Use
    +     * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +     * a `PartitionedDml` transaction for large, partition-friendly DML
    +     * operations.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + public com.google.protobuf.ByteString getSqlBytes() { + java.lang.Object ref = sql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + sql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The query request to generate partitions for. The request fails
    +     * if the query isn't root partitionable. For a query to be root
    +     * partitionable, it needs to satisfy a few conditions. For example, if the
    +     * query execution plan contains a distributed union operator, then it must be
    +     * the first operator in the plan. For more information about other
    +     * conditions, see [Read data in
    +     * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +     *
    +     * The query request must not contain DML commands, such as `INSERT`,
    +     * `UPDATE`, or `DELETE`. Use
    +     * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +     * a `PartitionedDml` transaction for large, partition-friendly DML
    +     * operations.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The sql to set. + * @return This builder for chaining. + */ + public Builder setSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + sql_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The query request to generate partitions for. The request fails
    +     * if the query isn't root partitionable. For a query to be root
    +     * partitionable, it needs to satisfy a few conditions. For example, if the
    +     * query execution plan contains a distributed union operator, then it must be
    +     * the first operator in the plan. For more information about other
    +     * conditions, see [Read data in
    +     * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +     *
    +     * The query request must not contain DML commands, such as `INSERT`,
    +     * `UPDATE`, or `DELETE`. Use
    +     * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +     * a `PartitionedDml` transaction for large, partition-friendly DML
    +     * operations.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearSql() { + sql_ = getDefaultInstance().getSql(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The query request to generate partitions for. The request fails
    +     * if the query isn't root partitionable. For a query to be root
    +     * partitionable, it needs to satisfy a few conditions. For example, if the
    +     * query execution plan contains a distributed union operator, then it must be
    +     * the first operator in the plan. For more information about other
    +     * conditions, see [Read data in
    +     * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +     *
    +     * The query request must not contain DML commands, such as `INSERT`,
    +     * `UPDATE`, or `DELETE`. Use
    +     * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +     * a `PartitionedDml` transaction for large, partition-friendly DML
    +     * operations.
    +     * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for sql to set. + * @return This builder for chaining. + */ + public Builder setSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + sql_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.protobuf.Struct params_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + paramsBuilder_; + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the params field is set. + */ + public boolean hasParams() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The params. + */ + public com.google.protobuf.Struct getParams() { + if (paramsBuilder_ == null) { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } else { + return paramsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + params_ = value; + } else { + paramsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setParams(com.google.protobuf.Struct.Builder builderForValue) { + if (paramsBuilder_ == null) { + params_ = builderForValue.build(); + } else { + paramsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder mergeParams(com.google.protobuf.Struct value) { + if (paramsBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && params_ != null + && params_ != com.google.protobuf.Struct.getDefaultInstance()) { + getParamsBuilder().mergeFrom(value); + } else { + params_ = value; + } + } else { + paramsBuilder_.mergeFrom(value); + } + if (params_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder clearParams() { + bitField0_ = (bitField0_ & ~0x00000008); + params_ = null; + if (paramsBuilder_ != null) { + paramsBuilder_.dispose(); + paramsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.protobuf.Struct.Builder getParamsBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetParamsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.protobuf.StructOrBuilder getParamsOrBuilder() { + if (paramsBuilder_ != null) { + return paramsBuilder_.getMessageOrBuilder(); + } else { + return params_ == null ? com.google.protobuf.Struct.getDefaultInstance() : params_; + } + } + + /** + * + * + *
    +     * Optional. Parameter names and values that bind to placeholders in the SQL
    +     * string.
    +     *
    +     * A parameter placeholder consists of the `@` character followed by the
    +     * parameter name (for example, `@firstName`). Parameter names can contain
    +     * letters, numbers, and underscores.
    +     *
    +     * Parameters can appear anywhere that a literal value is expected. The same
    +     * parameter name can be used more than once, for example:
    +     *
    +     * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +     *
    +     * It's an error to execute a SQL statement with unbound parameters.
    +     * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetParamsFieldBuilder() { + if (paramsBuilder_ == null) { + paramsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getParams(), getParentForChildren(), isClean()); + params_ = null; + } + return paramsBuilder_; + } + + private static final class ParamTypesConverter + implements com.google.protobuf.MapFieldBuilder.Converter< + java.lang.String, com.google.spanner.v1.TypeOrBuilder, com.google.spanner.v1.Type> { + @java.lang.Override + public com.google.spanner.v1.Type build(com.google.spanner.v1.TypeOrBuilder val) { + if (val instanceof com.google.spanner.v1.Type) { + return (com.google.spanner.v1.Type) val; + } + return ((com.google.spanner.v1.Type.Builder) val).build(); + } + + @java.lang.Override + public com.google.protobuf.MapEntry + defaultEntry() { + return ParamTypesDefaultEntryHolder.defaultEntry; + } + } + ; + + private static final ParamTypesConverter paramTypesConverter = new ParamTypesConverter(); + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + paramTypes_; + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetParamTypes() { + if (paramTypes_ == null) { + return new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + return paramTypes_; + } + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.spanner.v1.TypeOrBuilder, + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder> + internalGetMutableParamTypes() { + if (paramTypes_ == null) { + paramTypes_ = new com.google.protobuf.MapFieldBuilder<>(paramTypesConverter); + } + bitField0_ |= 0x00000010; + onChanged(); + return paramTypes_; + } + + public int getParamTypesCount() { + return internalGetParamTypes().ensureBuilderMap().size(); + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetParamTypes().ensureBuilderMap().containsKey(key); + } + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getParamTypes() { + return getParamTypesMap(); + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getParamTypesMap() { + return internalGetParamTypes().getImmutableMap(); + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + return map.containsKey(key) ? paramTypesConverter.build(map.get(key)) : defaultValue; + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableParamTypes().ensureBuilderMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return paramTypesConverter.build(map.get(key)); + } + + public Builder clearParamTypes() { + bitField0_ = (bitField0_ & ~0x00000010); + internalGetMutableParamTypes().clear(); + return this; + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeParamTypes(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableParamTypes().ensureBuilderMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableParamTypes() { + bitField0_ |= 0x00000010; + return internalGetMutableParamTypes().ensureMessageMap(); + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putParamTypes(java.lang.String key, com.google.spanner.v1.Type value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableParamTypes().ensureBuilderMap().put(key, value); + bitField0_ |= 0x00000010; + return this; + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllParamTypes( + java.util.Map values) { + for (java.util.Map.Entry e : + values.entrySet()) { + if (e.getKey() == null || e.getValue() == null) { + throw new NullPointerException(); + } + } + internalGetMutableParamTypes().ensureBuilderMap().putAll(values); + bitField0_ |= 0x00000010; + return this; + } + + /** + * + * + *
    +     * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +     * type from a JSON value. For example, values of type `BYTES` and values of
    +     * type `STRING` both appear in
    +     * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +     *
    +     * In these cases, `param_types` can be used to specify the exact
    +     * SQL type for some or all of the SQL query parameters. See the
    +     * definition of [Type][google.spanner.v1.Type] for more information
    +     * about SQL types.
    +     * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.Type.Builder putParamTypesBuilderIfAbsent(java.lang.String key) { + java.util.Map builderMap = + internalGetMutableParamTypes().ensureBuilderMap(); + com.google.spanner.v1.TypeOrBuilder entry = builderMap.get(key); + if (entry == null) { + entry = com.google.spanner.v1.Type.newBuilder(); + builderMap.put(key, entry); + } + if (entry instanceof com.google.spanner.v1.Type) { + entry = ((com.google.spanner.v1.Type) entry).toBuilder(); + builderMap.put(key, entry); + } + return (com.google.spanner.v1.Type.Builder) entry; + } + + private com.google.spanner.v1.PartitionOptions partitionOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder> + partitionOptionsBuilder_; + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return Whether the partitionOptions field is set. + */ + public boolean hasPartitionOptions() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return The partitionOptions. + */ + public com.google.spanner.v1.PartitionOptions getPartitionOptions() { + if (partitionOptionsBuilder_ == null) { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } else { + return partitionOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public Builder setPartitionOptions(com.google.spanner.v1.PartitionOptions value) { + if (partitionOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partitionOptions_ = value; + } else { + partitionOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public Builder setPartitionOptions( + com.google.spanner.v1.PartitionOptions.Builder builderForValue) { + if (partitionOptionsBuilder_ == null) { + partitionOptions_ = builderForValue.build(); + } else { + partitionOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public Builder mergePartitionOptions(com.google.spanner.v1.PartitionOptions value) { + if (partitionOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && partitionOptions_ != null + && partitionOptions_ != com.google.spanner.v1.PartitionOptions.getDefaultInstance()) { + getPartitionOptionsBuilder().mergeFrom(value); + } else { + partitionOptions_ = value; + } + } else { + partitionOptionsBuilder_.mergeFrom(value); + } + if (partitionOptions_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public Builder clearPartitionOptions() { + bitField0_ = (bitField0_ & ~0x00000020); + partitionOptions_ = null; + if (partitionOptionsBuilder_ != null) { + partitionOptionsBuilder_.dispose(); + partitionOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public com.google.spanner.v1.PartitionOptions.Builder getPartitionOptionsBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetPartitionOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + public com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder() { + if (partitionOptionsBuilder_ != null) { + return partitionOptionsBuilder_.getMessageOrBuilder(); + } else { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder> + internalGetPartitionOptionsFieldBuilder() { + if (partitionOptionsBuilder_ == null) { + partitionOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder>( + getPartitionOptions(), getParentForChildren(), isClean()); + partitionOptions_ = null; + } + return partitionOptionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PartitionQueryRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest) + private static final com.google.spanner.v1.PartitionQueryRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PartitionQueryRequest(); + } + + public static com.google.spanner.v1.PartitionQueryRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionQueryRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionQueryRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java new file mode 100644 index 000000000000..c03b0b2e6af0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java @@ -0,0 +1,371 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartitionQueryRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PartitionQueryRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.TransactionSelector getTransaction(); + + /** + * + * + *
    +   * Read-only snapshot transactions are supported, read and write and
    +   * single-use transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * Required. The query request to generate partitions for. The request fails
    +   * if the query isn't root partitionable. For a query to be root
    +   * partitionable, it needs to satisfy a few conditions. For example, if the
    +   * query execution plan contains a distributed union operator, then it must be
    +   * the first operator in the plan. For more information about other
    +   * conditions, see [Read data in
    +   * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +   *
    +   * The query request must not contain DML commands, such as `INSERT`,
    +   * `UPDATE`, or `DELETE`. Use
    +   * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +   * a `PartitionedDml` transaction for large, partition-friendly DML
    +   * operations.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The sql. + */ + java.lang.String getSql(); + + /** + * + * + *
    +   * Required. The query request to generate partitions for. The request fails
    +   * if the query isn't root partitionable. For a query to be root
    +   * partitionable, it needs to satisfy a few conditions. For example, if the
    +   * query execution plan contains a distributed union operator, then it must be
    +   * the first operator in the plan. For more information about other
    +   * conditions, see [Read data in
    +   * parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
    +   *
    +   * The query request must not contain DML commands, such as `INSERT`,
    +   * `UPDATE`, or `DELETE`. Use
    +   * [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with
    +   * a `PartitionedDml` transaction for large, partition-friendly DML
    +   * operations.
    +   * 
    + * + * string sql = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for sql. + */ + com.google.protobuf.ByteString getSqlBytes(); + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the params field is set. + */ + boolean hasParams(); + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The params. + */ + com.google.protobuf.Struct getParams(); + + /** + * + * + *
    +   * Optional. Parameter names and values that bind to placeholders in the SQL
    +   * string.
    +   *
    +   * A parameter placeholder consists of the `@` character followed by the
    +   * parameter name (for example, `@firstName`). Parameter names can contain
    +   * letters, numbers, and underscores.
    +   *
    +   * Parameters can appear anywhere that a literal value is expected. The same
    +   * parameter name can be used more than once, for example:
    +   *
    +   * `"WHERE id > @msg_id AND id < @msg_id + 100"`
    +   *
    +   * It's an error to execute a SQL statement with unbound parameters.
    +   * 
    + * + * .google.protobuf.Struct params = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + com.google.protobuf.StructOrBuilder getParamsOrBuilder(); + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getParamTypesCount(); + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsParamTypes(java.lang.String key); + + /** Use {@link #getParamTypesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getParamTypes(); + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getParamTypesMap(); + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + com.google.spanner.v1.Type getParamTypesOrDefault( + java.lang.String key, + /* nullable */ + com.google.spanner.v1.Type defaultValue); + + /** + * + * + *
    +   * Optional. It isn't always possible for Cloud Spanner to infer the right SQL
    +   * type from a JSON value. For example, values of type `BYTES` and values of
    +   * type `STRING` both appear in
    +   * [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
    +   *
    +   * In these cases, `param_types` can be used to specify the exact
    +   * SQL type for some or all of the SQL query parameters. See the
    +   * definition of [Type][google.spanner.v1.Type] for more information
    +   * about SQL types.
    +   * 
    + * + * + * map<string, .google.spanner.v1.Type> param_types = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.Type getParamTypesOrThrow(java.lang.String key); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return Whether the partitionOptions field is set. + */ + boolean hasPartitionOptions(); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + * + * @return The partitionOptions. + */ + com.google.spanner.v1.PartitionOptions getPartitionOptions(); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 6; + */ + com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java new file mode 100644 index 000000000000..e3880e91f1b3 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java @@ -0,0 +1,2294 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionReadRequest} + */ +@com.google.protobuf.Generated +public final class PartitionReadRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PartitionReadRequest) + PartitionReadRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionReadRequest"); + } + + // Use PartitionReadRequest.newBuilder() to construct. + private PartitionReadRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionReadRequest() { + session_ = ""; + table_ = ""; + index_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionReadRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionReadRequest.class, + com.google.spanner.v1.PartitionReadRequest.Builder.class); + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionSelector transaction_; + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + public static final int TABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +   * instead of the table primary key when interpreting
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +   * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +   * for further information.
    +   * 
    + * + * string index = 4; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +   * instead of the table primary key when interpreting
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +   * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +   * for further information.
    +   * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMNS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + return columns_; + } + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + public static final int KEY_SET_FIELD_NUMBER = 6; + private com.google.spanner.v1.KeySet keySet_; + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + @java.lang.Override + public boolean hasKeySet() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + @java.lang.Override + public com.google.spanner.v1.KeySet getKeySet() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + public static final int PARTITION_OPTIONS_FIELD_NUMBER = 9; + private com.google.spanner.v1.PartitionOptions partitionOptions_; + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return Whether the partitionOptions field is set. + */ + @java.lang.Override + public boolean hasPartitionOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return The partitionOptions. + */ + @java.lang.Override + public com.google.spanner.v1.PartitionOptions getPartitionOptions() { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + @java.lang.Override + public com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder() { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, index_); + } + for (int i = 0; i < columns_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, columns_.getRaw(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getKeySet()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(9, getPartitionOptions()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, index_); + } + { + int dataSize = 0; + for (int i = 0; i < columns_.size(); i++) { + dataSize += computeStringSizeNoTag(columns_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnsList().size(); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getKeySet()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getPartitionOptions()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PartitionReadRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.PartitionReadRequest other = + (com.google.spanner.v1.PartitionReadRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getTable().equals(other.getTable())) return false; + if (!getIndex().equals(other.getIndex())) return false; + if (!getColumnsList().equals(other.getColumnsList())) return false; + if (hasKeySet() != other.hasKeySet()) return false; + if (hasKeySet()) { + if (!getKeySet().equals(other.getKeySet())) return false; + } + if (hasPartitionOptions() != other.hasPartitionOptions()) return false; + if (hasPartitionOptions()) { + if (!getPartitionOptions().equals(other.getPartitionOptions())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + if (getColumnsCount() > 0) { + hash = (37 * hash) + COLUMNS_FIELD_NUMBER; + hash = (53 * hash) + getColumnsList().hashCode(); + } + if (hasKeySet()) { + hash = (37 * hash) + KEY_SET_FIELD_NUMBER; + hash = (53 * hash) + getKeySet().hashCode(); + } + if (hasPartitionOptions()) { + hash = (37 * hash) + PARTITION_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionOptions().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionReadRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionReadRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionReadRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PartitionReadRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionReadRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PartitionReadRequest) + com.google.spanner.v1.PartitionReadRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionReadRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionReadRequest.class, + com.google.spanner.v1.PartitionReadRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.PartitionReadRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTransactionFieldBuilder(); + internalGetKeySetFieldBuilder(); + internalGetPartitionOptionsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + table_ = ""; + index_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + partitionOptions_ = null; + if (partitionOptionsBuilder_ != null) { + partitionOptionsBuilder_.dispose(); + partitionOptionsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionReadRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionReadRequest getDefaultInstanceForType() { + return com.google.spanner.v1.PartitionReadRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PartitionReadRequest build() { + com.google.spanner.v1.PartitionReadRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionReadRequest buildPartial() { + com.google.spanner.v1.PartitionReadRequest result = + new com.google.spanner.v1.PartitionReadRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.PartitionReadRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.index_ = index_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + columns_.makeImmutable(); + result.columns_ = columns_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.keySet_ = keySetBuilder_ == null ? keySet_ : keySetBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.partitionOptions_ = + partitionOptionsBuilder_ == null ? partitionOptions_ : partitionOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PartitionReadRequest) { + return mergeFrom((com.google.spanner.v1.PartitionReadRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PartitionReadRequest other) { + if (other == com.google.spanner.v1.PartitionReadRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getIndex().isEmpty()) { + index_ = other.index_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.columns_.isEmpty()) { + if (columns_.isEmpty()) { + columns_ = other.columns_; + bitField0_ |= 0x00000010; + } else { + ensureColumnsIsMutable(); + columns_.addAll(other.columns_); + } + onChanged(); + } + if (other.hasKeySet()) { + mergeKeySet(other.getKeySet()); + } + if (other.hasPartitionOptions()) { + mergePartitionOptions(other.getPartitionOptions()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnsIsMutable(); + columns_.add(s); + break; + } // case 42 + case 50: + { + input.readMessage(internalGetKeySetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 74: + { + input.readMessage( + internalGetPartitionOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session used to create the partitions.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionSelector transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.TransactionSelector getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction( + com.google.spanner.v1.TransactionSelector.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.TransactionSelector.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelector.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * Read only snapshot transactions are supported, read/write and single use
    +     * transactions are not.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +     * instead of the table primary key when interpreting
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +     * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +     * for further information.
    +     * 
    + * + * string index = 4; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +     * instead of the table primary key when interpreting
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +     * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +     * for further information.
    +     * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +     * instead of the table primary key when interpreting
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +     * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +     * for further information.
    +     * 
    + * + * string index = 4; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +     * instead of the table primary key when interpreting
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +     * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +     * for further information.
    +     * 
    + * + * string index = 4; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +     * instead of the table primary key when interpreting
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +     * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +     * for further information.
    +     * 
    + * + * string index = 4; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnsIsMutable() { + if (!columns_.isModifiable()) { + columns_ = new com.google.protobuf.LazyStringArrayList(columns_); + } + bitField0_ |= 0x00000010; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + columns_.makeImmutable(); + return columns_; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param index The index to set the value at. + * @param value The columns to set. + * @return This builder for chaining. + */ + public Builder setColumns(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.set(index, value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param value The columns to add. + * @return This builder for chaining. + */ + public Builder addColumns(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param values The columns to add. + * @return This builder for chaining. + */ + public Builder addAllColumns(java.lang.Iterable values) { + ensureColumnsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columns_); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @return This builder for chaining. + */ + public Builder clearColumns() { + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5; + * + * @param value The bytes of the columns to add. + * @return This builder for chaining. + */ + public Builder addColumnsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.spanner.v1.KeySet keySet_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + keySetBuilder_; + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + public boolean hasKeySet() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + public com.google.spanner.v1.KeySet getKeySet() { + if (keySetBuilder_ == null) { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } else { + return keySetBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keySet_ = value; + } else { + keySetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKeySet(com.google.spanner.v1.KeySet.Builder builderForValue) { + if (keySetBuilder_ == null) { + keySet_ = builderForValue.build(); + } else { + keySetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && keySet_ != null + && keySet_ != com.google.spanner.v1.KeySet.getDefaultInstance()) { + getKeySetBuilder().mergeFrom(value); + } else { + keySet_ = value; + } + } else { + keySetBuilder_.mergeFrom(value); + } + if (keySet_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearKeySet() { + bitField0_ = (bitField0_ & ~0x00000020); + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.spanner.v1.KeySet.Builder getKeySetBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetKeySetFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + if (keySetBuilder_ != null) { + return keySetBuilder_.getMessageOrBuilder(); + } else { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in
    +     * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +     * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +     * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + internalGetKeySetFieldBuilder() { + if (keySetBuilder_ == null) { + keySetBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder>( + getKeySet(), getParentForChildren(), isClean()); + keySet_ = null; + } + return keySetBuilder_; + } + + private com.google.spanner.v1.PartitionOptions partitionOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder> + partitionOptionsBuilder_; + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return Whether the partitionOptions field is set. + */ + public boolean hasPartitionOptions() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return The partitionOptions. + */ + public com.google.spanner.v1.PartitionOptions getPartitionOptions() { + if (partitionOptionsBuilder_ == null) { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } else { + return partitionOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public Builder setPartitionOptions(com.google.spanner.v1.PartitionOptions value) { + if (partitionOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partitionOptions_ = value; + } else { + partitionOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public Builder setPartitionOptions( + com.google.spanner.v1.PartitionOptions.Builder builderForValue) { + if (partitionOptionsBuilder_ == null) { + partitionOptions_ = builderForValue.build(); + } else { + partitionOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public Builder mergePartitionOptions(com.google.spanner.v1.PartitionOptions value) { + if (partitionOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && partitionOptions_ != null + && partitionOptions_ != com.google.spanner.v1.PartitionOptions.getDefaultInstance()) { + getPartitionOptionsBuilder().mergeFrom(value); + } else { + partitionOptions_ = value; + } + } else { + partitionOptionsBuilder_.mergeFrom(value); + } + if (partitionOptions_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public Builder clearPartitionOptions() { + bitField0_ = (bitField0_ & ~0x00000040); + partitionOptions_ = null; + if (partitionOptionsBuilder_ != null) { + partitionOptionsBuilder_.dispose(); + partitionOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public com.google.spanner.v1.PartitionOptions.Builder getPartitionOptionsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetPartitionOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + public com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder() { + if (partitionOptionsBuilder_ != null) { + return partitionOptionsBuilder_.getMessageOrBuilder(); + } else { + return partitionOptions_ == null + ? com.google.spanner.v1.PartitionOptions.getDefaultInstance() + : partitionOptions_; + } + } + + /** + * + * + *
    +     * Additional options that affect how many partitions are created.
    +     * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder> + internalGetPartitionOptionsFieldBuilder() { + if (partitionOptionsBuilder_ == null) { + partitionOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PartitionOptions, + com.google.spanner.v1.PartitionOptions.Builder, + com.google.spanner.v1.PartitionOptionsOrBuilder>( + getPartitionOptions(), getParentForChildren(), isClean()); + partitionOptions_ = null; + } + return partitionOptionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PartitionReadRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionReadRequest) + private static final com.google.spanner.v1.PartitionReadRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PartitionReadRequest(); + } + + public static com.google.spanner.v1.PartitionReadRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionReadRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionReadRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java new file mode 100644 index 000000000000..506b61d78248 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java @@ -0,0 +1,319 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartitionReadRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PartitionReadRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session used to create the partitions.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.TransactionSelector getTransaction(); + + /** + * + * + *
    +   * Read only snapshot transactions are supported, read/write and single use
    +   * transactions are not.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +   * instead of the table primary key when interpreting
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +   * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +   * for further information.
    +   * 
    + * + * string index = 4; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
    +   * instead of the table primary key when interpreting
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
    +   * result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
    +   * for further information.
    +   * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @return A list containing the columns. + */ + java.util.List getColumnsList(); + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @return The count of columns. + */ + int getColumnsCount(); + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + java.lang.String getColumns(int index); + + /** + * + * + *
    +   * The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + com.google.protobuf.ByteString getColumnsBytes(int index); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + boolean hasKeySet(); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + com.google.spanner.v1.KeySet getKeySet(); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in
    +   * [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present. If
    +   * [index][google.spanner.v1.PartitionReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
    +   * index keys in [index][google.spanner.v1.PartitionReadRequest.index].
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder(); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return Whether the partitionOptions field is set. + */ + boolean hasPartitionOptions(); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + * + * @return The partitionOptions. + */ + com.google.spanner.v1.PartitionOptions getPartitionOptions(); + + /** + * + * + *
    +   * Additional options that affect how many partitions are created.
    +   * 
    + * + * .google.spanner.v1.PartitionOptions partition_options = 9; + */ + com.google.spanner.v1.PartitionOptionsOrBuilder getPartitionOptionsOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java new file mode 100644 index 000000000000..47d8ccb7cb57 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java @@ -0,0 +1,1213 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
    + * or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionResponse} + */ +@com.google.protobuf.Generated +public final class PartitionResponse extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PartitionResponse) + PartitionResponseOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionResponse"); + } + + // Use PartitionResponse.newBuilder() to construct. + private PartitionResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionResponse() { + partitions_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionResponse.class, + com.google.spanner.v1.PartitionResponse.Builder.class); + } + + private int bitField0_; + public static final int PARTITIONS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List partitions_; + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List getPartitionsList() { + return partitions_; + } + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + @java.lang.Override + public java.util.List + getPartitionsOrBuilderList() { + return partitions_; + } + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + @java.lang.Override + public int getPartitionsCount() { + return partitions_.size(); + } + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + @java.lang.Override + public com.google.spanner.v1.Partition getPartitions(int index) { + return partitions_.get(index); + } + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + @java.lang.Override + public com.google.spanner.v1.PartitionOrBuilder getPartitionsOrBuilder(int index) { + return partitions_.get(index); + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.Transaction transaction_; + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.Transaction getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < partitions_.size(); i++) { + output.writeMessage(1, partitions_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < partitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, partitions_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PartitionResponse)) { + return super.equals(obj); + } + com.google.spanner.v1.PartitionResponse other = (com.google.spanner.v1.PartitionResponse) obj; + + if (!getPartitionsList().equals(other.getPartitionsList())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPartitionsCount() > 0) { + hash = (37 * hash) + PARTITIONS_FIELD_NUMBER; + hash = (53 * hash) + getPartitionsList().hashCode(); + } + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PartitionResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PartitionResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PartitionResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
    +   * or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PartitionResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PartitionResponse) + com.google.spanner.v1.PartitionResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PartitionResponse.class, + com.google.spanner.v1.PartitionResponse.Builder.class); + } + + // Construct using com.google.spanner.v1.PartitionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPartitionsFieldBuilder(); + internalGetTransactionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + } else { + partitions_ = null; + partitionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_PartitionResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionResponse getDefaultInstanceForType() { + return com.google.spanner.v1.PartitionResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PartitionResponse build() { + com.google.spanner.v1.PartitionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionResponse buildPartial() { + com.google.spanner.v1.PartitionResponse result = + new com.google.spanner.v1.PartitionResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.PartitionResponse result) { + if (partitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + partitions_ = java.util.Collections.unmodifiableList(partitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitions_ = partitions_; + } else { + result.partitions_ = partitionsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.PartitionResponse result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PartitionResponse) { + return mergeFrom((com.google.spanner.v1.PartitionResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PartitionResponse other) { + if (other == com.google.spanner.v1.PartitionResponse.getDefaultInstance()) return this; + if (partitionsBuilder_ == null) { + if (!other.partitions_.isEmpty()) { + if (partitions_.isEmpty()) { + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionsIsMutable(); + partitions_.addAll(other.partitions_); + } + onChanged(); + } + } else { + if (!other.partitions_.isEmpty()) { + if (partitionsBuilder_.isEmpty()) { + partitionsBuilder_.dispose(); + partitionsBuilder_ = null; + partitions_ = other.partitions_; + bitField0_ = (bitField0_ & ~0x00000001); + partitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPartitionsFieldBuilder() + : null; + } else { + partitionsBuilder_.addAllMessages(other.partitions_); + } + } + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.Partition m = + input.readMessage(com.google.spanner.v1.Partition.parser(), extensionRegistry); + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(m); + } else { + partitionsBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List partitions_ = + java.util.Collections.emptyList(); + + private void ensurePartitionsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + partitions_ = new java.util.ArrayList(partitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Partition, + com.google.spanner.v1.Partition.Builder, + com.google.spanner.v1.PartitionOrBuilder> + partitionsBuilder_; + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public java.util.List getPartitionsList() { + if (partitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitions_); + } else { + return partitionsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public int getPartitionsCount() { + if (partitionsBuilder_ == null) { + return partitions_.size(); + } else { + return partitionsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public com.google.spanner.v1.Partition getPartitions(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder setPartitions(int index, com.google.spanner.v1.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.set(index, value); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder setPartitions( + int index, com.google.spanner.v1.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder addPartitions(com.google.spanner.v1.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(value); + onChanged(); + } else { + partitionsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder addPartitions(int index, com.google.spanner.v1.Partition value) { + if (partitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionsIsMutable(); + partitions_.add(index, value); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder addPartitions(com.google.spanner.v1.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder addPartitions( + int index, com.google.spanner.v1.Partition.Builder builderForValue) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder addAllPartitions( + java.lang.Iterable values) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitions_); + onChanged(); + } else { + partitionsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder clearPartitions() { + if (partitionsBuilder_ == null) { + partitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + partitionsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public Builder removePartitions(int index) { + if (partitionsBuilder_ == null) { + ensurePartitionsIsMutable(); + partitions_.remove(index); + onChanged(); + } else { + partitionsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public com.google.spanner.v1.Partition.Builder getPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public com.google.spanner.v1.PartitionOrBuilder getPartitionsOrBuilder(int index) { + if (partitionsBuilder_ == null) { + return partitions_.get(index); + } else { + return partitionsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public java.util.List + getPartitionsOrBuilderList() { + if (partitionsBuilder_ != null) { + return partitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitions_); + } + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public com.google.spanner.v1.Partition.Builder addPartitionsBuilder() { + return internalGetPartitionsFieldBuilder() + .addBuilder(com.google.spanner.v1.Partition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public com.google.spanner.v1.Partition.Builder addPartitionsBuilder(int index) { + return internalGetPartitionsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.Partition.getDefaultInstance()); + } + + /** + * + * + *
    +     * Partitions created by this request.
    +     * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + public java.util.List getPartitionsBuilderList() { + return internalGetPartitionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Partition, + com.google.spanner.v1.Partition.Builder, + com.google.spanner.v1.PartitionOrBuilder> + internalGetPartitionsFieldBuilder() { + if (partitionsBuilder_ == null) { + partitionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.Partition, + com.google.spanner.v1.Partition.Builder, + com.google.spanner.v1.PartitionOrBuilder>( + partitions_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + partitions_ = null; + } + return partitionsBuilder_; + } + + private com.google.spanner.v1.Transaction transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.Transaction getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.Transaction value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.Transaction.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.Transaction value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.Transaction.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public com.google.spanner.v1.Transaction.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * Transaction created by this request.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PartitionResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionResponse) + private static final com.google.spanner.v1.PartitionResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PartitionResponse(); + } + + public static com.google.spanner.v1.PartitionResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PartitionResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java new file mode 100644 index 000000000000..078cd629d686 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java @@ -0,0 +1,120 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PartitionResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PartitionResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + java.util.List getPartitionsList(); + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + com.google.spanner.v1.Partition getPartitions(int index); + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + int getPartitionsCount(); + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + java.util.List getPartitionsOrBuilderList(); + + /** + * + * + *
    +   * Partitions created by this request.
    +   * 
    + * + * repeated .google.spanner.v1.Partition partitions = 1; + */ + com.google.spanner.v1.PartitionOrBuilder getPartitionsOrBuilder(int index); + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.Transaction getTransaction(); + + /** + * + * + *
    +   * Transaction created by this request.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java new file mode 100644 index 000000000000..2573c5966d20 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java @@ -0,0 +1,4662 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Node information for nodes appearing in a
    + * [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode} + */ +@com.google.protobuf.Generated +public final class PlanNode extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PlanNode) + PlanNodeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PlanNode"); + } + + // Use PlanNode.newBuilder() to construct. + private PlanNode(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PlanNode() { + kind_ = 0; + displayName_ = ""; + childLinks_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.class, com.google.spanner.v1.PlanNode.Builder.class); + } + + /** + * + * + *
    +   * The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between
    +   * the two different kinds of nodes that can appear in a query plan.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.PlanNode.Kind} + */ + public enum Kind implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * KIND_UNSPECIFIED = 0; + */ + KIND_UNSPECIFIED(0), + /** + * + * + *
    +     * Denotes a Relational operator node in the expression tree. Relational
    +     * operators represent iterative processing of rows during query execution.
    +     * For example, a `TableScan` operation that reads rows from a table.
    +     * 
    + * + * RELATIONAL = 1; + */ + RELATIONAL(1), + /** + * + * + *
    +     * Denotes a Scalar node in the expression tree. Scalar nodes represent
    +     * non-iterable entities in the query plan. For example, constants or
    +     * arithmetic operators appearing inside predicate expressions or references
    +     * to column names.
    +     * 
    + * + * SCALAR = 2; + */ + SCALAR(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Kind"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * KIND_UNSPECIFIED = 0; + */ + public static final int KIND_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Denotes a Relational operator node in the expression tree. Relational
    +     * operators represent iterative processing of rows during query execution.
    +     * For example, a `TableScan` operation that reads rows from a table.
    +     * 
    + * + * RELATIONAL = 1; + */ + public static final int RELATIONAL_VALUE = 1; + + /** + * + * + *
    +     * Denotes a Scalar node in the expression tree. Scalar nodes represent
    +     * non-iterable entities in the query plan. For example, constants or
    +     * arithmetic operators appearing inside predicate expressions or references
    +     * to column names.
    +     * 
    + * + * SCALAR = 2; + */ + public static final int SCALAR_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Kind valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Kind forNumber(int value) { + switch (value) { + case 0: + return KIND_UNSPECIFIED; + case 1: + return RELATIONAL; + case 2: + return SCALAR; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Kind findValueByNumber(int number) { + return Kind.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.PlanNode.getDescriptor().getEnumTypes().get(0); + } + + private static final Kind[] VALUES = values(); + + public static Kind valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Kind(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.PlanNode.Kind) + } + + public interface ChildLinkOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PlanNode.ChildLink) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The node to which the link points.
    +     * 
    + * + * int32 child_index = 1; + * + * @return The childIndex. + */ + int getChildIndex(); + + /** + * + * + *
    +     * The type of the link. For example, in Hash Joins this could be used to
    +     * distinguish between the build child and the probe child, or in the case
    +     * of the child being an output variable, to represent the tag associated
    +     * with the output variable.
    +     * 
    + * + * string type = 2; + * + * @return The type. + */ + java.lang.String getType(); + + /** + * + * + *
    +     * The type of the link. For example, in Hash Joins this could be used to
    +     * distinguish between the build child and the probe child, or in the case
    +     * of the child being an output variable, to represent the tag associated
    +     * with the output variable.
    +     * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + com.google.protobuf.ByteString getTypeBytes(); + + /** + * + * + *
    +     * Only present if the child node is
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +     * output variable of the parent node. The field carries the name of the
    +     * output variable. For example, a `TableScan` operator that reads rows from
    +     * a table will have child links to the `SCALAR` nodes representing the
    +     * output variables created for each column that is read by the operator.
    +     * The corresponding `variable` fields will be set to the variable names
    +     * assigned to the columns.
    +     * 
    + * + * string variable = 3; + * + * @return The variable. + */ + java.lang.String getVariable(); + + /** + * + * + *
    +     * Only present if the child node is
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +     * output variable of the parent node. The field carries the name of the
    +     * output variable. For example, a `TableScan` operator that reads rows from
    +     * a table will have child links to the `SCALAR` nodes representing the
    +     * output variables created for each column that is read by the operator.
    +     * The corresponding `variable` fields will be set to the variable names
    +     * assigned to the columns.
    +     * 
    + * + * string variable = 3; + * + * @return The bytes for variable. + */ + com.google.protobuf.ByteString getVariableBytes(); + } + + /** + * + * + *
    +   * Metadata associated with a parent-child relationship appearing in a
    +   * [PlanNode][google.spanner.v1.PlanNode].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode.ChildLink} + */ + public static final class ChildLink extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PlanNode.ChildLink) + ChildLinkOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ChildLink"); + } + + // Use ChildLink.newBuilder() to construct. + private ChildLink(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ChildLink() { + type_ = ""; + variable_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ChildLink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.ChildLink.class, + com.google.spanner.v1.PlanNode.ChildLink.Builder.class); + } + + public static final int CHILD_INDEX_FIELD_NUMBER = 1; + private int childIndex_ = 0; + + /** + * + * + *
    +     * The node to which the link points.
    +     * 
    + * + * int32 child_index = 1; + * + * @return The childIndex. + */ + @java.lang.Override + public int getChildIndex() { + return childIndex_; + } + + public static final int TYPE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object type_ = ""; + + /** + * + * + *
    +     * The type of the link. For example, in Hash Joins this could be used to
    +     * distinguish between the build child and the probe child, or in the case
    +     * of the child being an output variable, to represent the tag associated
    +     * with the output variable.
    +     * 
    + * + * string type = 2; + * + * @return The type. + */ + @java.lang.Override + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } + } + + /** + * + * + *
    +     * The type of the link. For example, in Hash Joins this could be used to
    +     * distinguish between the build child and the probe child, or in the case
    +     * of the child being an output variable, to represent the tag associated
    +     * with the output variable.
    +     * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VARIABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object variable_ = ""; + + /** + * + * + *
    +     * Only present if the child node is
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +     * output variable of the parent node. The field carries the name of the
    +     * output variable. For example, a `TableScan` operator that reads rows from
    +     * a table will have child links to the `SCALAR` nodes representing the
    +     * output variables created for each column that is read by the operator.
    +     * The corresponding `variable` fields will be set to the variable names
    +     * assigned to the columns.
    +     * 
    + * + * string variable = 3; + * + * @return The variable. + */ + @java.lang.Override + public java.lang.String getVariable() { + java.lang.Object ref = variable_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + variable_ = s; + return s; + } + } + + /** + * + * + *
    +     * Only present if the child node is
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +     * output variable of the parent node. The field carries the name of the
    +     * output variable. For example, a `TableScan` operator that reads rows from
    +     * a table will have child links to the `SCALAR` nodes representing the
    +     * output variables created for each column that is read by the operator.
    +     * The corresponding `variable` fields will be set to the variable names
    +     * assigned to the columns.
    +     * 
    + * + * string variable = 3; + * + * @return The bytes for variable. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVariableBytes() { + java.lang.Object ref = variable_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + variable_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (childIndex_ != 0) { + output.writeInt32(1, childIndex_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(variable_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, variable_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (childIndex_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, childIndex_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(type_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, type_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(variable_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, variable_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PlanNode.ChildLink)) { + return super.equals(obj); + } + com.google.spanner.v1.PlanNode.ChildLink other = + (com.google.spanner.v1.PlanNode.ChildLink) obj; + + if (getChildIndex() != other.getChildIndex()) return false; + if (!getType().equals(other.getType())) return false; + if (!getVariable().equals(other.getVariable())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CHILD_INDEX_FIELD_NUMBER; + hash = (53 * hash) + getChildIndex(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + hash = (37 * hash) + VARIABLE_FIELD_NUMBER; + hash = (53 * hash) + getVariable().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ChildLink parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PlanNode.ChildLink prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Metadata associated with a parent-child relationship appearing in a
    +     * [PlanNode][google.spanner.v1.PlanNode].
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode.ChildLink} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PlanNode.ChildLink) + com.google.spanner.v1.PlanNode.ChildLinkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ChildLink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.ChildLink.class, + com.google.spanner.v1.PlanNode.ChildLink.Builder.class); + } + + // Construct using com.google.spanner.v1.PlanNode.ChildLink.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + childIndex_ = 0; + type_ = ""; + variable_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLink getDefaultInstanceForType() { + return com.google.spanner.v1.PlanNode.ChildLink.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLink build() { + com.google.spanner.v1.PlanNode.ChildLink result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLink buildPartial() { + com.google.spanner.v1.PlanNode.ChildLink result = + new com.google.spanner.v1.PlanNode.ChildLink(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.PlanNode.ChildLink result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.childIndex_ = childIndex_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = type_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.variable_ = variable_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PlanNode.ChildLink) { + return mergeFrom((com.google.spanner.v1.PlanNode.ChildLink) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PlanNode.ChildLink other) { + if (other == com.google.spanner.v1.PlanNode.ChildLink.getDefaultInstance()) return this; + if (other.getChildIndex() != 0) { + setChildIndex(other.getChildIndex()); + } + if (!other.getType().isEmpty()) { + type_ = other.type_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getVariable().isEmpty()) { + variable_ = other.variable_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + childIndex_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + type_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + variable_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int childIndex_; + + /** + * + * + *
    +       * The node to which the link points.
    +       * 
    + * + * int32 child_index = 1; + * + * @return The childIndex. + */ + @java.lang.Override + public int getChildIndex() { + return childIndex_; + } + + /** + * + * + *
    +       * The node to which the link points.
    +       * 
    + * + * int32 child_index = 1; + * + * @param value The childIndex to set. + * @return This builder for chaining. + */ + public Builder setChildIndex(int value) { + + childIndex_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The node to which the link points.
    +       * 
    + * + * int32 child_index = 1; + * + * @return This builder for chaining. + */ + public Builder clearChildIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + childIndex_ = 0; + onChanged(); + return this; + } + + private java.lang.Object type_ = ""; + + /** + * + * + *
    +       * The type of the link. For example, in Hash Joins this could be used to
    +       * distinguish between the build child and the probe child, or in the case
    +       * of the child being an output variable, to represent the tag associated
    +       * with the output variable.
    +       * 
    + * + * string type = 2; + * + * @return The type. + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The type of the link. For example, in Hash Joins this could be used to
    +       * distinguish between the build child and the probe child, or in the case
    +       * of the child being an output variable, to represent the tag associated
    +       * with the output variable.
    +       * 
    + * + * string type = 2; + * + * @return The bytes for type. + */ + public com.google.protobuf.ByteString getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The type of the link. For example, in Hash Joins this could be used to
    +       * distinguish between the build child and the probe child, or in the case
    +       * of the child being an output variable, to represent the tag associated
    +       * with the output variable.
    +       * 
    + * + * string type = 2; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the link. For example, in Hash Joins this could be used to
    +       * distinguish between the build child and the probe child, or in the case
    +       * of the child being an output variable, to represent the tag associated
    +       * with the output variable.
    +       * 
    + * + * string type = 2; + * + * @return This builder for chaining. + */ + public Builder clearType() { + type_ = getDefaultInstance().getType(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the link. For example, in Hash Joins this could be used to
    +       * distinguish between the build child and the probe child, or in the case
    +       * of the child being an output variable, to represent the tag associated
    +       * with the output variable.
    +       * 
    + * + * string type = 2; + * + * @param value The bytes for type to set. + * @return This builder for chaining. + */ + public Builder setTypeBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + type_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object variable_ = ""; + + /** + * + * + *
    +       * Only present if the child node is
    +       * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +       * output variable of the parent node. The field carries the name of the
    +       * output variable. For example, a `TableScan` operator that reads rows from
    +       * a table will have child links to the `SCALAR` nodes representing the
    +       * output variables created for each column that is read by the operator.
    +       * The corresponding `variable` fields will be set to the variable names
    +       * assigned to the columns.
    +       * 
    + * + * string variable = 3; + * + * @return The variable. + */ + public java.lang.String getVariable() { + java.lang.Object ref = variable_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + variable_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * Only present if the child node is
    +       * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +       * output variable of the parent node. The field carries the name of the
    +       * output variable. For example, a `TableScan` operator that reads rows from
    +       * a table will have child links to the `SCALAR` nodes representing the
    +       * output variables created for each column that is read by the operator.
    +       * The corresponding `variable` fields will be set to the variable names
    +       * assigned to the columns.
    +       * 
    + * + * string variable = 3; + * + * @return The bytes for variable. + */ + public com.google.protobuf.ByteString getVariableBytes() { + java.lang.Object ref = variable_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + variable_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * Only present if the child node is
    +       * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +       * output variable of the parent node. The field carries the name of the
    +       * output variable. For example, a `TableScan` operator that reads rows from
    +       * a table will have child links to the `SCALAR` nodes representing the
    +       * output variables created for each column that is read by the operator.
    +       * The corresponding `variable` fields will be set to the variable names
    +       * assigned to the columns.
    +       * 
    + * + * string variable = 3; + * + * @param value The variable to set. + * @return This builder for chaining. + */ + public Builder setVariable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + variable_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Only present if the child node is
    +       * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +       * output variable of the parent node. The field carries the name of the
    +       * output variable. For example, a `TableScan` operator that reads rows from
    +       * a table will have child links to the `SCALAR` nodes representing the
    +       * output variables created for each column that is read by the operator.
    +       * The corresponding `variable` fields will be set to the variable names
    +       * assigned to the columns.
    +       * 
    + * + * string variable = 3; + * + * @return This builder for chaining. + */ + public Builder clearVariable() { + variable_ = getDefaultInstance().getVariable(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Only present if the child node is
    +       * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an
    +       * output variable of the parent node. The field carries the name of the
    +       * output variable. For example, a `TableScan` operator that reads rows from
    +       * a table will have child links to the `SCALAR` nodes representing the
    +       * output variables created for each column that is read by the operator.
    +       * The corresponding `variable` fields will be set to the variable names
    +       * assigned to the columns.
    +       * 
    + * + * string variable = 3; + * + * @param value The bytes for variable to set. + * @return This builder for chaining. + */ + public Builder setVariableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + variable_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PlanNode.ChildLink) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ChildLink) + private static final com.google.spanner.v1.PlanNode.ChildLink DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PlanNode.ChildLink(); + } + + public static com.google.spanner.v1.PlanNode.ChildLink getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ChildLink parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLink getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ShortRepresentationOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PlanNode.ShortRepresentation) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * A string representation of the expression subtree rooted at this node.
    +     * 
    + * + * string description = 1; + * + * @return The description. + */ + java.lang.String getDescription(); + + /** + * + * + *
    +     * A string representation of the expression subtree rooted at this node.
    +     * 
    + * + * string description = 1; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + int getSubqueriesCount(); + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + boolean containsSubqueries(java.lang.String key); + + /** Use {@link #getSubqueriesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getSubqueries(); + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + java.util.Map getSubqueriesMap(); + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + int getSubqueriesOrDefault(java.lang.String key, int defaultValue); + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + int getSubqueriesOrThrow(java.lang.String key); + } + + /** + * + * + *
    +   * Condensed representation of a node and its subtree. Only present for
    +   * `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode.ShortRepresentation} + */ + public static final class ShortRepresentation extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.PlanNode.ShortRepresentation) + ShortRepresentationOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ShortRepresentation"); + } + + // Use ShortRepresentation.newBuilder() to construct. + private ShortRepresentation(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ShortRepresentation() { + description_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetSubqueries(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.ShortRepresentation.class, + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder.class); + } + + public static final int DESCRIPTION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object description_ = ""; + + /** + * + * + *
    +     * A string representation of the expression subtree rooted at this node.
    +     * 
    + * + * string description = 1; + * + * @return The description. + */ + @java.lang.Override + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + + /** + * + * + *
    +     * A string representation of the expression subtree rooted at this node.
    +     * 
    + * + * string description = 1; + * + * @return The bytes for description. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SUBQUERIES_FIELD_NUMBER = 2; + + private static final class SubqueriesDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.INT32, + 0); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField subqueries_; + + private com.google.protobuf.MapField + internalGetSubqueries() { + if (subqueries_ == null) { + return com.google.protobuf.MapField.emptyMapField( + SubqueriesDefaultEntryHolder.defaultEntry); + } + return subqueries_; + } + + public int getSubqueriesCount() { + return internalGetSubqueries().getMap().size(); + } + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public boolean containsSubqueries(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetSubqueries().getMap().containsKey(key); + } + + /** Use {@link #getSubqueriesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getSubqueries() { + return getSubqueriesMap(); + } + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public java.util.Map getSubqueriesMap() { + return internalGetSubqueries().getMap(); + } + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public int getSubqueriesOrDefault(java.lang.String key, int defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetSubqueries().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * A mapping of (subquery variable name) -> (subquery node id) for cases
    +     * where the `description` string of this node references a `SCALAR`
    +     * subquery contained in the expression subtree rooted at this node. The
    +     * referenced `SCALAR` subquery may not necessarily be a direct child of
    +     * this node.
    +     * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public int getSubqueriesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetSubqueries().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, description_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetSubqueries(), SubqueriesDefaultEntryHolder.defaultEntry, 2); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(description_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, description_); + } + for (java.util.Map.Entry entry : + internalGetSubqueries().getMap().entrySet()) { + com.google.protobuf.MapEntry subqueries__ = + SubqueriesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, subqueries__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PlanNode.ShortRepresentation)) { + return super.equals(obj); + } + com.google.spanner.v1.PlanNode.ShortRepresentation other = + (com.google.spanner.v1.PlanNode.ShortRepresentation) obj; + + if (!getDescription().equals(other.getDescription())) return false; + if (!internalGetSubqueries().equals(other.internalGetSubqueries())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + if (!internalGetSubqueries().getMap().isEmpty()) { + hash = (37 * hash) + SUBQUERIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetSubqueries().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PlanNode.ShortRepresentation prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Condensed representation of a node and its subtree. Only present for
    +     * `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode].
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode.ShortRepresentation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PlanNode.ShortRepresentation) + com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetSubqueries(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetMutableSubqueries(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.ShortRepresentation.class, + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder.class); + } + + // Construct using com.google.spanner.v1.PlanNode.ShortRepresentation.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + description_ = ""; + internalGetMutableSubqueries().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentation getDefaultInstanceForType() { + return com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentation build() { + com.google.spanner.v1.PlanNode.ShortRepresentation result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentation buildPartial() { + com.google.spanner.v1.PlanNode.ShortRepresentation result = + new com.google.spanner.v1.PlanNode.ShortRepresentation(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.PlanNode.ShortRepresentation result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.description_ = description_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.subqueries_ = internalGetSubqueries(); + result.subqueries_.makeImmutable(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PlanNode.ShortRepresentation) { + return mergeFrom((com.google.spanner.v1.PlanNode.ShortRepresentation) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PlanNode.ShortRepresentation other) { + if (other == com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance()) + return this; + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + bitField0_ |= 0x00000001; + onChanged(); + } + internalGetMutableSubqueries().mergeFrom(other.internalGetSubqueries()); + bitField0_ |= 0x00000002; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + description_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.protobuf.MapEntry subqueries__ = + input.readMessage( + SubqueriesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableSubqueries() + .getMutableMap() + .put(subqueries__.getKey(), subqueries__.getValue()); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object description_ = ""; + + /** + * + * + *
    +       * A string representation of the expression subtree rooted at this node.
    +       * 
    + * + * string description = 1; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * A string representation of the expression subtree rooted at this node.
    +       * 
    + * + * string description = 1; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * A string representation of the expression subtree rooted at this node.
    +       * 
    + * + * string description = 1; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + description_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * A string representation of the expression subtree rooted at this node.
    +       * 
    + * + * string description = 1; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + description_ = getDefaultInstance().getDescription(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * A string representation of the expression subtree rooted at this node.
    +       * 
    + * + * string description = 1; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + description_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.MapField subqueries_; + + private com.google.protobuf.MapField + internalGetSubqueries() { + if (subqueries_ == null) { + return com.google.protobuf.MapField.emptyMapField( + SubqueriesDefaultEntryHolder.defaultEntry); + } + return subqueries_; + } + + private com.google.protobuf.MapField + internalGetMutableSubqueries() { + if (subqueries_ == null) { + subqueries_ = + com.google.protobuf.MapField.newMapField(SubqueriesDefaultEntryHolder.defaultEntry); + } + if (!subqueries_.isMutable()) { + subqueries_ = subqueries_.copy(); + } + bitField0_ |= 0x00000002; + onChanged(); + return subqueries_; + } + + public int getSubqueriesCount() { + return internalGetSubqueries().getMap().size(); + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public boolean containsSubqueries(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetSubqueries().getMap().containsKey(key); + } + + /** Use {@link #getSubqueriesMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getSubqueries() { + return getSubqueriesMap(); + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public java.util.Map getSubqueriesMap() { + return internalGetSubqueries().getMap(); + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public int getSubqueriesOrDefault(java.lang.String key, int defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetSubqueries().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + @java.lang.Override + public int getSubqueriesOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetSubqueries().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearSubqueries() { + bitField0_ = (bitField0_ & ~0x00000002); + internalGetMutableSubqueries().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + public Builder removeSubqueries(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableSubqueries().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableSubqueries() { + bitField0_ |= 0x00000002; + return internalGetMutableSubqueries().getMutableMap(); + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + public Builder putSubqueries(java.lang.String key, int value) { + if (key == null) { + throw new NullPointerException("map key"); + } + + internalGetMutableSubqueries().getMutableMap().put(key, value); + bitField0_ |= 0x00000002; + return this; + } + + /** + * + * + *
    +       * A mapping of (subquery variable name) -> (subquery node id) for cases
    +       * where the `description` string of this node references a `SCALAR`
    +       * subquery contained in the expression subtree rooted at this node. The
    +       * referenced `SCALAR` subquery may not necessarily be a direct child of
    +       * this node.
    +       * 
    + * + * map<string, int32> subqueries = 2; + */ + public Builder putAllSubqueries(java.util.Map values) { + internalGetMutableSubqueries().getMutableMap().putAll(values); + bitField0_ |= 0x00000002; + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PlanNode.ShortRepresentation) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation) + private static final com.google.spanner.v1.PlanNode.ShortRepresentation DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PlanNode.ShortRepresentation(); + } + + public static com.google.spanner.v1.PlanNode.ShortRepresentation getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ShortRepresentation parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentation getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int INDEX_FIELD_NUMBER = 1; + private int index_ = 0; + + /** + * + * + *
    +   * The `PlanNode`'s index in [node
    +   * list][google.spanner.v1.QueryPlan.plan_nodes].
    +   * 
    + * + * int32 index = 1; + * + * @return The index. + */ + @java.lang.Override + public int getIndex() { + return index_; + } + + public static final int KIND_FIELD_NUMBER = 2; + private int kind_ = 0; + + /** + * + * + *
    +   * Used to determine the type of node. May be needed for visualizing
    +   * different kinds of nodes differently. For example, If the node is a
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +   * condensed representation which can be used to directly embed a description
    +   * of the node in its parent.
    +   * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The enum numeric value on the wire for kind. + */ + @java.lang.Override + public int getKindValue() { + return kind_; + } + + /** + * + * + *
    +   * Used to determine the type of node. May be needed for visualizing
    +   * different kinds of nodes differently. For example, If the node is a
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +   * condensed representation which can be used to directly embed a description
    +   * of the node in its parent.
    +   * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The kind. + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.Kind getKind() { + com.google.spanner.v1.PlanNode.Kind result = + com.google.spanner.v1.PlanNode.Kind.forNumber(kind_); + return result == null ? com.google.spanner.v1.PlanNode.Kind.UNRECOGNIZED : result; + } + + public static final int DISPLAY_NAME_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object displayName_ = ""; + + /** + * + * + *
    +   * The display name for the node.
    +   * 
    + * + * string display_name = 3; + * + * @return The displayName. + */ + @java.lang.Override + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } + } + + /** + * + * + *
    +   * The display name for the node.
    +   * 
    + * + * string display_name = 3; + * + * @return The bytes for displayName. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CHILD_LINKS_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private java.util.List childLinks_; + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + @java.lang.Override + public java.util.List getChildLinksList() { + return childLinks_; + } + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + @java.lang.Override + public java.util.List + getChildLinksOrBuilderList() { + return childLinks_; + } + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + @java.lang.Override + public int getChildLinksCount() { + return childLinks_.size(); + } + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLink getChildLinks(int index) { + return childLinks_.get(index); + } + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.ChildLinkOrBuilder getChildLinksOrBuilder(int index) { + return childLinks_.get(index); + } + + public static final int SHORT_REPRESENTATION_FIELD_NUMBER = 5; + private com.google.spanner.v1.PlanNode.ShortRepresentation shortRepresentation_; + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return Whether the shortRepresentation field is set. + */ + @java.lang.Override + public boolean hasShortRepresentation() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return The shortRepresentation. + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentation getShortRepresentation() { + return shortRepresentation_ == null + ? com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance() + : shortRepresentation_; + } + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder + getShortRepresentationOrBuilder() { + return shortRepresentation_ == null + ? com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance() + : shortRepresentation_; + } + + public static final int METADATA_FIELD_NUMBER = 6; + private com.google.protobuf.Struct metadata_; + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return Whether the metadata field is set. + */ + @java.lang.Override + public boolean hasMetadata() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return The metadata. + */ + @java.lang.Override + public com.google.protobuf.Struct getMetadata() { + return metadata_ == null ? com.google.protobuf.Struct.getDefaultInstance() : metadata_; + } + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getMetadataOrBuilder() { + return metadata_ == null ? com.google.protobuf.Struct.getDefaultInstance() : metadata_; + } + + public static final int EXECUTION_STATS_FIELD_NUMBER = 7; + private com.google.protobuf.Struct executionStats_; + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return Whether the executionStats field is set. + */ + @java.lang.Override + public boolean hasExecutionStats() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return The executionStats. + */ + @java.lang.Override + public com.google.protobuf.Struct getExecutionStats() { + return executionStats_ == null + ? com.google.protobuf.Struct.getDefaultInstance() + : executionStats_; + } + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getExecutionStatsOrBuilder() { + return executionStats_ == null + ? com.google.protobuf.Struct.getDefaultInstance() + : executionStats_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (index_ != 0) { + output.writeInt32(1, index_); + } + if (kind_ != com.google.spanner.v1.PlanNode.Kind.KIND_UNSPECIFIED.getNumber()) { + output.writeEnum(2, kind_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, displayName_); + } + for (int i = 0; i < childLinks_.size(); i++) { + output.writeMessage(4, childLinks_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(5, getShortRepresentation()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getMetadata()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(7, getExecutionStats()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (index_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, index_); + } + if (kind_ != com.google.spanner.v1.PlanNode.Kind.KIND_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, kind_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(displayName_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, displayName_); + } + for (int i = 0; i < childLinks_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, childLinks_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getShortRepresentation()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getMetadata()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getExecutionStats()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.PlanNode)) { + return super.equals(obj); + } + com.google.spanner.v1.PlanNode other = (com.google.spanner.v1.PlanNode) obj; + + if (getIndex() != other.getIndex()) return false; + if (kind_ != other.kind_) return false; + if (!getDisplayName().equals(other.getDisplayName())) return false; + if (!getChildLinksList().equals(other.getChildLinksList())) return false; + if (hasShortRepresentation() != other.hasShortRepresentation()) return false; + if (hasShortRepresentation()) { + if (!getShortRepresentation().equals(other.getShortRepresentation())) return false; + } + if (hasMetadata() != other.hasMetadata()) return false; + if (hasMetadata()) { + if (!getMetadata().equals(other.getMetadata())) return false; + } + if (hasExecutionStats() != other.hasExecutionStats()) return false; + if (hasExecutionStats()) { + if (!getExecutionStats().equals(other.getExecutionStats())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex(); + hash = (37 * hash) + KIND_FIELD_NUMBER; + hash = (53 * hash) + kind_; + hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getDisplayName().hashCode(); + if (getChildLinksCount() > 0) { + hash = (37 * hash) + CHILD_LINKS_FIELD_NUMBER; + hash = (53 * hash) + getChildLinksList().hashCode(); + } + if (hasShortRepresentation()) { + hash = (37 * hash) + SHORT_REPRESENTATION_FIELD_NUMBER; + hash = (53 * hash) + getShortRepresentation().hashCode(); + } + if (hasMetadata()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadata().hashCode(); + } + if (hasExecutionStats()) { + hash = (37 * hash) + EXECUTION_STATS_FIELD_NUMBER; + hash = (53 * hash) + getExecutionStats().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.PlanNode parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.PlanNode parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.PlanNode parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.PlanNode parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.PlanNode prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Node information for nodes appearing in a
    +   * [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.PlanNode} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.PlanNode) + com.google.spanner.v1.PlanNodeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.PlanNode.class, com.google.spanner.v1.PlanNode.Builder.class); + } + + // Construct using com.google.spanner.v1.PlanNode.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetChildLinksFieldBuilder(); + internalGetShortRepresentationFieldBuilder(); + internalGetMetadataFieldBuilder(); + internalGetExecutionStatsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + index_ = 0; + kind_ = 0; + displayName_ = ""; + if (childLinksBuilder_ == null) { + childLinks_ = java.util.Collections.emptyList(); + } else { + childLinks_ = null; + childLinksBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + shortRepresentation_ = null; + if (shortRepresentationBuilder_ != null) { + shortRepresentationBuilder_.dispose(); + shortRepresentationBuilder_ = null; + } + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + executionStats_ = null; + if (executionStatsBuilder_ != null) { + executionStatsBuilder_.dispose(); + executionStatsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_PlanNode_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode getDefaultInstanceForType() { + return com.google.spanner.v1.PlanNode.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode build() { + com.google.spanner.v1.PlanNode result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode buildPartial() { + com.google.spanner.v1.PlanNode result = new com.google.spanner.v1.PlanNode(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.PlanNode result) { + if (childLinksBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0)) { + childLinks_ = java.util.Collections.unmodifiableList(childLinks_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.childLinks_ = childLinks_; + } else { + result.childLinks_ = childLinksBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.PlanNode result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.index_ = index_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.kind_ = kind_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.displayName_ = displayName_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.shortRepresentation_ = + shortRepresentationBuilder_ == null + ? shortRepresentation_ + : shortRepresentationBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.executionStats_ = + executionStatsBuilder_ == null ? executionStats_ : executionStatsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.PlanNode) { + return mergeFrom((com.google.spanner.v1.PlanNode) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.PlanNode other) { + if (other == com.google.spanner.v1.PlanNode.getDefaultInstance()) return this; + if (other.getIndex() != 0) { + setIndex(other.getIndex()); + } + if (other.kind_ != 0) { + setKindValue(other.getKindValue()); + } + if (!other.getDisplayName().isEmpty()) { + displayName_ = other.displayName_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (childLinksBuilder_ == null) { + if (!other.childLinks_.isEmpty()) { + if (childLinks_.isEmpty()) { + childLinks_ = other.childLinks_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureChildLinksIsMutable(); + childLinks_.addAll(other.childLinks_); + } + onChanged(); + } + } else { + if (!other.childLinks_.isEmpty()) { + if (childLinksBuilder_.isEmpty()) { + childLinksBuilder_.dispose(); + childLinksBuilder_ = null; + childLinks_ = other.childLinks_; + bitField0_ = (bitField0_ & ~0x00000008); + childLinksBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetChildLinksFieldBuilder() + : null; + } else { + childLinksBuilder_.addAllMessages(other.childLinks_); + } + } + } + if (other.hasShortRepresentation()) { + mergeShortRepresentation(other.getShortRepresentation()); + } + if (other.hasMetadata()) { + mergeMetadata(other.getMetadata()); + } + if (other.hasExecutionStats()) { + mergeExecutionStats(other.getExecutionStats()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + index_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + kind_ = input.readEnum(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + displayName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + com.google.spanner.v1.PlanNode.ChildLink m = + input.readMessage( + com.google.spanner.v1.PlanNode.ChildLink.parser(), extensionRegistry); + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + childLinks_.add(m); + } else { + childLinksBuilder_.addMessage(m); + } + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetShortRepresentationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 58: + { + input.readMessage( + internalGetExecutionStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 58 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int index_; + + /** + * + * + *
    +     * The `PlanNode`'s index in [node
    +     * list][google.spanner.v1.QueryPlan.plan_nodes].
    +     * 
    + * + * int32 index = 1; + * + * @return The index. + */ + @java.lang.Override + public int getIndex() { + return index_; + } + + /** + * + * + *
    +     * The `PlanNode`'s index in [node
    +     * list][google.spanner.v1.QueryPlan.plan_nodes].
    +     * 
    + * + * int32 index = 1; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(int value) { + + index_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The `PlanNode`'s index in [node
    +     * list][google.spanner.v1.QueryPlan.plan_nodes].
    +     * 
    + * + * int32 index = 1; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + index_ = 0; + onChanged(); + return this; + } + + private int kind_ = 0; + + /** + * + * + *
    +     * Used to determine the type of node. May be needed for visualizing
    +     * different kinds of nodes differently. For example, If the node is a
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +     * condensed representation which can be used to directly embed a description
    +     * of the node in its parent.
    +     * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The enum numeric value on the wire for kind. + */ + @java.lang.Override + public int getKindValue() { + return kind_; + } + + /** + * + * + *
    +     * Used to determine the type of node. May be needed for visualizing
    +     * different kinds of nodes differently. For example, If the node is a
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +     * condensed representation which can be used to directly embed a description
    +     * of the node in its parent.
    +     * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @param value The enum numeric value on the wire for kind to set. + * @return This builder for chaining. + */ + public Builder setKindValue(int value) { + kind_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used to determine the type of node. May be needed for visualizing
    +     * different kinds of nodes differently. For example, If the node is a
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +     * condensed representation which can be used to directly embed a description
    +     * of the node in its parent.
    +     * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The kind. + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode.Kind getKind() { + com.google.spanner.v1.PlanNode.Kind result = + com.google.spanner.v1.PlanNode.Kind.forNumber(kind_); + return result == null ? com.google.spanner.v1.PlanNode.Kind.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * Used to determine the type of node. May be needed for visualizing
    +     * different kinds of nodes differently. For example, If the node is a
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +     * condensed representation which can be used to directly embed a description
    +     * of the node in its parent.
    +     * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @param value The kind to set. + * @return This builder for chaining. + */ + public Builder setKind(com.google.spanner.v1.PlanNode.Kind value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + kind_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Used to determine the type of node. May be needed for visualizing
    +     * different kinds of nodes differently. For example, If the node is a
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +     * condensed representation which can be used to directly embed a description
    +     * of the node in its parent.
    +     * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return This builder for chaining. + */ + public Builder clearKind() { + bitField0_ = (bitField0_ & ~0x00000002); + kind_ = 0; + onChanged(); + return this; + } + + private java.lang.Object displayName_ = ""; + + /** + * + * + *
    +     * The display name for the node.
    +     * 
    + * + * string display_name = 3; + * + * @return The displayName. + */ + public java.lang.String getDisplayName() { + java.lang.Object ref = displayName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + displayName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The display name for the node.
    +     * 
    + * + * string display_name = 3; + * + * @return The bytes for displayName. + */ + public com.google.protobuf.ByteString getDisplayNameBytes() { + java.lang.Object ref = displayName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + displayName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The display name for the node.
    +     * 
    + * + * string display_name = 3; + * + * @param value The displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The display name for the node.
    +     * 
    + * + * string display_name = 3; + * + * @return This builder for chaining. + */ + public Builder clearDisplayName() { + displayName_ = getDefaultInstance().getDisplayName(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The display name for the node.
    +     * 
    + * + * string display_name = 3; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.util.List childLinks_ = + java.util.Collections.emptyList(); + + private void ensureChildLinksIsMutable() { + if (!((bitField0_ & 0x00000008) != 0)) { + childLinks_ = + new java.util.ArrayList(childLinks_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode.ChildLink, + com.google.spanner.v1.PlanNode.ChildLink.Builder, + com.google.spanner.v1.PlanNode.ChildLinkOrBuilder> + childLinksBuilder_; + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public java.util.List getChildLinksList() { + if (childLinksBuilder_ == null) { + return java.util.Collections.unmodifiableList(childLinks_); + } else { + return childLinksBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public int getChildLinksCount() { + if (childLinksBuilder_ == null) { + return childLinks_.size(); + } else { + return childLinksBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public com.google.spanner.v1.PlanNode.ChildLink getChildLinks(int index) { + if (childLinksBuilder_ == null) { + return childLinks_.get(index); + } else { + return childLinksBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder setChildLinks(int index, com.google.spanner.v1.PlanNode.ChildLink value) { + if (childLinksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildLinksIsMutable(); + childLinks_.set(index, value); + onChanged(); + } else { + childLinksBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder setChildLinks( + int index, com.google.spanner.v1.PlanNode.ChildLink.Builder builderForValue) { + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + childLinks_.set(index, builderForValue.build()); + onChanged(); + } else { + childLinksBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder addChildLinks(com.google.spanner.v1.PlanNode.ChildLink value) { + if (childLinksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildLinksIsMutable(); + childLinks_.add(value); + onChanged(); + } else { + childLinksBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder addChildLinks(int index, com.google.spanner.v1.PlanNode.ChildLink value) { + if (childLinksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChildLinksIsMutable(); + childLinks_.add(index, value); + onChanged(); + } else { + childLinksBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder addChildLinks(com.google.spanner.v1.PlanNode.ChildLink.Builder builderForValue) { + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + childLinks_.add(builderForValue.build()); + onChanged(); + } else { + childLinksBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder addChildLinks( + int index, com.google.spanner.v1.PlanNode.ChildLink.Builder builderForValue) { + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + childLinks_.add(index, builderForValue.build()); + onChanged(); + } else { + childLinksBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder addAllChildLinks( + java.lang.Iterable values) { + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, childLinks_); + onChanged(); + } else { + childLinksBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder clearChildLinks() { + if (childLinksBuilder_ == null) { + childLinks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + childLinksBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public Builder removeChildLinks(int index) { + if (childLinksBuilder_ == null) { + ensureChildLinksIsMutable(); + childLinks_.remove(index); + onChanged(); + } else { + childLinksBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public com.google.spanner.v1.PlanNode.ChildLink.Builder getChildLinksBuilder(int index) { + return internalGetChildLinksFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public com.google.spanner.v1.PlanNode.ChildLinkOrBuilder getChildLinksOrBuilder(int index) { + if (childLinksBuilder_ == null) { + return childLinks_.get(index); + } else { + return childLinksBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public java.util.List + getChildLinksOrBuilderList() { + if (childLinksBuilder_ != null) { + return childLinksBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(childLinks_); + } + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public com.google.spanner.v1.PlanNode.ChildLink.Builder addChildLinksBuilder() { + return internalGetChildLinksFieldBuilder() + .addBuilder(com.google.spanner.v1.PlanNode.ChildLink.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public com.google.spanner.v1.PlanNode.ChildLink.Builder addChildLinksBuilder(int index) { + return internalGetChildLinksFieldBuilder() + .addBuilder(index, com.google.spanner.v1.PlanNode.ChildLink.getDefaultInstance()); + } + + /** + * + * + *
    +     * List of child node `index`es and their relationship to this parent.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + public java.util.List + getChildLinksBuilderList() { + return internalGetChildLinksFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode.ChildLink, + com.google.spanner.v1.PlanNode.ChildLink.Builder, + com.google.spanner.v1.PlanNode.ChildLinkOrBuilder> + internalGetChildLinksFieldBuilder() { + if (childLinksBuilder_ == null) { + childLinksBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode.ChildLink, + com.google.spanner.v1.PlanNode.ChildLink.Builder, + com.google.spanner.v1.PlanNode.ChildLinkOrBuilder>( + childLinks_, ((bitField0_ & 0x00000008) != 0), getParentForChildren(), isClean()); + childLinks_ = null; + } + return childLinksBuilder_; + } + + private com.google.spanner.v1.PlanNode.ShortRepresentation shortRepresentation_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PlanNode.ShortRepresentation, + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder, + com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder> + shortRepresentationBuilder_; + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return Whether the shortRepresentation field is set. + */ + public boolean hasShortRepresentation() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return The shortRepresentation. + */ + public com.google.spanner.v1.PlanNode.ShortRepresentation getShortRepresentation() { + if (shortRepresentationBuilder_ == null) { + return shortRepresentation_ == null + ? com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance() + : shortRepresentation_; + } else { + return shortRepresentationBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public Builder setShortRepresentation( + com.google.spanner.v1.PlanNode.ShortRepresentation value) { + if (shortRepresentationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + shortRepresentation_ = value; + } else { + shortRepresentationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public Builder setShortRepresentation( + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder builderForValue) { + if (shortRepresentationBuilder_ == null) { + shortRepresentation_ = builderForValue.build(); + } else { + shortRepresentationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public Builder mergeShortRepresentation( + com.google.spanner.v1.PlanNode.ShortRepresentation value) { + if (shortRepresentationBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && shortRepresentation_ != null + && shortRepresentation_ + != com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance()) { + getShortRepresentationBuilder().mergeFrom(value); + } else { + shortRepresentation_ = value; + } + } else { + shortRepresentationBuilder_.mergeFrom(value); + } + if (shortRepresentation_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public Builder clearShortRepresentation() { + bitField0_ = (bitField0_ & ~0x00000010); + shortRepresentation_ = null; + if (shortRepresentationBuilder_ != null) { + shortRepresentationBuilder_.dispose(); + shortRepresentationBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public com.google.spanner.v1.PlanNode.ShortRepresentation.Builder + getShortRepresentationBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetShortRepresentationFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + public com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder + getShortRepresentationOrBuilder() { + if (shortRepresentationBuilder_ != null) { + return shortRepresentationBuilder_.getMessageOrBuilder(); + } else { + return shortRepresentation_ == null + ? com.google.spanner.v1.PlanNode.ShortRepresentation.getDefaultInstance() + : shortRepresentation_; + } + } + + /** + * + * + *
    +     * Condensed representation for
    +     * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +     * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PlanNode.ShortRepresentation, + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder, + com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder> + internalGetShortRepresentationFieldBuilder() { + if (shortRepresentationBuilder_ == null) { + shortRepresentationBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.PlanNode.ShortRepresentation, + com.google.spanner.v1.PlanNode.ShortRepresentation.Builder, + com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder>( + getShortRepresentation(), getParentForChildren(), isClean()); + shortRepresentation_ = null; + } + return shortRepresentationBuilder_; + } + + private com.google.protobuf.Struct metadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + metadataBuilder_; + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return Whether the metadata field is set. + */ + public boolean hasMetadata() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return The metadata. + */ + public com.google.protobuf.Struct getMetadata() { + if (metadataBuilder_ == null) { + return metadata_ == null ? com.google.protobuf.Struct.getDefaultInstance() : metadata_; + } else { + return metadataBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public Builder setMetadata(com.google.protobuf.Struct value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metadata_ = value; + } else { + metadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public Builder setMetadata(com.google.protobuf.Struct.Builder builderForValue) { + if (metadataBuilder_ == null) { + metadata_ = builderForValue.build(); + } else { + metadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public Builder mergeMetadata(com.google.protobuf.Struct value) { + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && metadata_ != null + && metadata_ != com.google.protobuf.Struct.getDefaultInstance()) { + getMetadataBuilder().mergeFrom(value); + } else { + metadata_ = value; + } + } else { + metadataBuilder_.mergeFrom(value); + } + if (metadata_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x00000020); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public com.google.protobuf.Struct.Builder getMetadataBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + public com.google.protobuf.StructOrBuilder getMetadataOrBuilder() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilder(); + } else { + return metadata_ == null ? com.google.protobuf.Struct.getDefaultInstance() : metadata_; + } + } + + /** + * + * + *
    +     * Attributes relevant to the node contained in a group of key-value pairs.
    +     * For example, a Parameter Reference node could have the following
    +     * information in its metadata:
    +     *
    +     * {
    +     * "parameter_reference": "param1",
    +     * "parameter_type": "array"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getMetadata(), getParentForChildren(), isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + private com.google.protobuf.Struct executionStats_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + executionStatsBuilder_; + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return Whether the executionStats field is set. + */ + public boolean hasExecutionStats() { + return ((bitField0_ & 0x00000040) != 0); + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return The executionStats. + */ + public com.google.protobuf.Struct getExecutionStats() { + if (executionStatsBuilder_ == null) { + return executionStats_ == null + ? com.google.protobuf.Struct.getDefaultInstance() + : executionStats_; + } else { + return executionStatsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public Builder setExecutionStats(com.google.protobuf.Struct value) { + if (executionStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + executionStats_ = value; + } else { + executionStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public Builder setExecutionStats(com.google.protobuf.Struct.Builder builderForValue) { + if (executionStatsBuilder_ == null) { + executionStats_ = builderForValue.build(); + } else { + executionStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public Builder mergeExecutionStats(com.google.protobuf.Struct value) { + if (executionStatsBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && executionStats_ != null + && executionStats_ != com.google.protobuf.Struct.getDefaultInstance()) { + getExecutionStatsBuilder().mergeFrom(value); + } else { + executionStats_ = value; + } + } else { + executionStatsBuilder_.mergeFrom(value); + } + if (executionStats_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public Builder clearExecutionStats() { + bitField0_ = (bitField0_ & ~0x00000040); + executionStats_ = null; + if (executionStatsBuilder_ != null) { + executionStatsBuilder_.dispose(); + executionStatsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public com.google.protobuf.Struct.Builder getExecutionStatsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return internalGetExecutionStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + public com.google.protobuf.StructOrBuilder getExecutionStatsOrBuilder() { + if (executionStatsBuilder_ != null) { + return executionStatsBuilder_.getMessageOrBuilder(); + } else { + return executionStats_ == null + ? com.google.protobuf.Struct.getDefaultInstance() + : executionStats_; + } + } + + /** + * + * + *
    +     * The execution statistics associated with the node, contained in a group of
    +     * key-value pairs. Only present if the plan was returned as a result of a
    +     * profile query. For example, number of executions, number of rows/time per
    +     * execution etc.
    +     * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetExecutionStatsFieldBuilder() { + if (executionStatsBuilder_ == null) { + executionStatsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getExecutionStats(), getParentForChildren(), isClean()); + executionStats_ = null; + } + return executionStatsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.PlanNode) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode) + private static final com.google.spanner.v1.PlanNode DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.PlanNode(); + } + + public static com.google.spanner.v1.PlanNode getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PlanNode parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.PlanNode getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java new file mode 100644 index 000000000000..75a22e50173c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java @@ -0,0 +1,302 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface PlanNodeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.PlanNode) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The `PlanNode`'s index in [node
    +   * list][google.spanner.v1.QueryPlan.plan_nodes].
    +   * 
    + * + * int32 index = 1; + * + * @return The index. + */ + int getIndex(); + + /** + * + * + *
    +   * Used to determine the type of node. May be needed for visualizing
    +   * different kinds of nodes differently. For example, If the node is a
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +   * condensed representation which can be used to directly embed a description
    +   * of the node in its parent.
    +   * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The enum numeric value on the wire for kind. + */ + int getKindValue(); + + /** + * + * + *
    +   * Used to determine the type of node. May be needed for visualizing
    +   * different kinds of nodes differently. For example, If the node is a
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a
    +   * condensed representation which can be used to directly embed a description
    +   * of the node in its parent.
    +   * 
    + * + * .google.spanner.v1.PlanNode.Kind kind = 2; + * + * @return The kind. + */ + com.google.spanner.v1.PlanNode.Kind getKind(); + + /** + * + * + *
    +   * The display name for the node.
    +   * 
    + * + * string display_name = 3; + * + * @return The displayName. + */ + java.lang.String getDisplayName(); + + /** + * + * + *
    +   * The display name for the node.
    +   * 
    + * + * string display_name = 3; + * + * @return The bytes for displayName. + */ + com.google.protobuf.ByteString getDisplayNameBytes(); + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + java.util.List getChildLinksList(); + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + com.google.spanner.v1.PlanNode.ChildLink getChildLinks(int index); + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + int getChildLinksCount(); + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + java.util.List + getChildLinksOrBuilderList(); + + /** + * + * + *
    +   * List of child node `index`es and their relationship to this parent.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode.ChildLink child_links = 4; + */ + com.google.spanner.v1.PlanNode.ChildLinkOrBuilder getChildLinksOrBuilder(int index); + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return Whether the shortRepresentation field is set. + */ + boolean hasShortRepresentation(); + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + * + * @return The shortRepresentation. + */ + com.google.spanner.v1.PlanNode.ShortRepresentation getShortRepresentation(); + + /** + * + * + *
    +   * Condensed representation for
    +   * [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
    +   * 
    + * + * .google.spanner.v1.PlanNode.ShortRepresentation short_representation = 5; + */ + com.google.spanner.v1.PlanNode.ShortRepresentationOrBuilder getShortRepresentationOrBuilder(); + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return Whether the metadata field is set. + */ + boolean hasMetadata(); + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + * + * @return The metadata. + */ + com.google.protobuf.Struct getMetadata(); + + /** + * + * + *
    +   * Attributes relevant to the node contained in a group of key-value pairs.
    +   * For example, a Parameter Reference node could have the following
    +   * information in its metadata:
    +   *
    +   * {
    +   * "parameter_reference": "param1",
    +   * "parameter_type": "array"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct metadata = 6; + */ + com.google.protobuf.StructOrBuilder getMetadataOrBuilder(); + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return Whether the executionStats field is set. + */ + boolean hasExecutionStats(); + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + * + * @return The executionStats. + */ + com.google.protobuf.Struct getExecutionStats(); + + /** + * + * + *
    +   * The execution statistics associated with the node, contained in a group of
    +   * key-value pairs. Only present if the plan was returned as a result of a
    +   * profile query. For example, number of executions, number of rows/time per
    +   * execution etc.
    +   * 
    + * + * .google.protobuf.Struct execution_stats = 7; + */ + com.google.protobuf.StructOrBuilder getExecutionStatsOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResult.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResult.java new file mode 100644 index 000000000000..abbcc3cf3a01 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResult.java @@ -0,0 +1,1884 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Output of query advisor analysis.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.QueryAdvisorResult} + */ +@com.google.protobuf.Generated +public final class QueryAdvisorResult extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.QueryAdvisorResult) + QueryAdvisorResultOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryAdvisorResult"); + } + + // Use QueryAdvisorResult.newBuilder() to construct. + private QueryAdvisorResult(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryAdvisorResult() { + indexAdvice_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryAdvisorResult.class, + com.google.spanner.v1.QueryAdvisorResult.Builder.class); + } + + public interface IndexAdviceOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.QueryAdvisorResult.IndexAdvice) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the ddl. + */ + java.util.List getDdlList(); + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of ddl. + */ + int getDdlCount(); + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The ddl at the given index. + */ + java.lang.String getDdl(int index); + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the ddl at the given index. + */ + com.google.protobuf.ByteString getDdlBytes(int index); + + /** + * + * + *
    +     * Optional. Estimated latency improvement factor. For example if the query
    +     * currently takes 500 ms to run and the estimated latency with new indexes
    +     * is 100 ms this field will be 5.
    +     * 
    + * + * double improvement_factor = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The improvementFactor. + */ + double getImprovementFactor(); + } + + /** + * + * + *
    +   * Recommendation to add new indexes to run queries more efficiently.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.QueryAdvisorResult.IndexAdvice} + */ + public static final class IndexAdvice extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.QueryAdvisorResult.IndexAdvice) + IndexAdviceOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IndexAdvice"); + } + + // Use IndexAdvice.newBuilder() to construct. + private IndexAdvice(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private IndexAdvice() { + ddl_ = com.google.protobuf.LazyStringArrayList.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.class, + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder.class); + } + + public static final int DDL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList ddl_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the ddl. + */ + public com.google.protobuf.ProtocolStringList getDdlList() { + return ddl_; + } + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of ddl. + */ + public int getDdlCount() { + return ddl_.size(); + } + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The ddl at the given index. + */ + public java.lang.String getDdl(int index) { + return ddl_.get(index); + } + + /** + * + * + *
    +     * Optional. DDL statements to add new indexes that will improve the query.
    +     * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the ddl at the given index. + */ + public com.google.protobuf.ByteString getDdlBytes(int index) { + return ddl_.getByteString(index); + } + + public static final int IMPROVEMENT_FACTOR_FIELD_NUMBER = 2; + private double improvementFactor_ = 0D; + + /** + * + * + *
    +     * Optional. Estimated latency improvement factor. For example if the query
    +     * currently takes 500 ms to run and the estimated latency with new indexes
    +     * is 100 ms this field will be 5.
    +     * 
    + * + * double improvement_factor = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The improvementFactor. + */ + @java.lang.Override + public double getImprovementFactor() { + return improvementFactor_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < ddl_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, ddl_.getRaw(i)); + } + if (java.lang.Double.doubleToRawLongBits(improvementFactor_) != 0) { + output.writeDouble(2, improvementFactor_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < ddl_.size(); i++) { + dataSize += computeStringSizeNoTag(ddl_.getRaw(i)); + } + size += dataSize; + size += 1 * getDdlList().size(); + } + if (java.lang.Double.doubleToRawLongBits(improvementFactor_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, improvementFactor_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.QueryAdvisorResult.IndexAdvice)) { + return super.equals(obj); + } + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice other = + (com.google.spanner.v1.QueryAdvisorResult.IndexAdvice) obj; + + if (!getDdlList().equals(other.getDdlList())) return false; + if (java.lang.Double.doubleToLongBits(getImprovementFactor()) + != java.lang.Double.doubleToLongBits(other.getImprovementFactor())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getDdlCount() > 0) { + hash = (37 * hash) + DDL_FIELD_NUMBER; + hash = (53 * hash) + getDdlList().hashCode(); + } + hash = (37 * hash) + IMPROVEMENT_FACTOR_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getImprovementFactor())); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Recommendation to add new indexes to run queries more efficiently.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.QueryAdvisorResult.IndexAdvice} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.QueryAdvisorResult.IndexAdvice) + com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.class, + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder.class); + } + + // Construct using com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + ddl_ = com.google.protobuf.LazyStringArrayList.emptyList(); + improvementFactor_ = 0D; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getDefaultInstanceForType() { + return com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice build() { + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice buildPartial() { + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice result = + new com.google.spanner.v1.QueryAdvisorResult.IndexAdvice(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.QueryAdvisorResult.IndexAdvice result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + ddl_.makeImmutable(); + result.ddl_ = ddl_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.improvementFactor_ = improvementFactor_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.QueryAdvisorResult.IndexAdvice) { + return mergeFrom((com.google.spanner.v1.QueryAdvisorResult.IndexAdvice) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.QueryAdvisorResult.IndexAdvice other) { + if (other == com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.getDefaultInstance()) + return this; + if (!other.ddl_.isEmpty()) { + if (ddl_.isEmpty()) { + ddl_ = other.ddl_; + bitField0_ |= 0x00000001; + } else { + ensureDdlIsMutable(); + ddl_.addAll(other.ddl_); + } + onChanged(); + } + if (java.lang.Double.doubleToRawLongBits(other.getImprovementFactor()) != 0) { + setImprovementFactor(other.getImprovementFactor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureDdlIsMutable(); + ddl_.add(s); + break; + } // case 10 + case 17: + { + improvementFactor_ = input.readDouble(); + bitField0_ |= 0x00000002; + break; + } // case 17 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringArrayList ddl_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureDdlIsMutable() { + if (!ddl_.isModifiable()) { + ddl_ = new com.google.protobuf.LazyStringArrayList(ddl_); + } + bitField0_ |= 0x00000001; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the ddl. + */ + public com.google.protobuf.ProtocolStringList getDdlList() { + ddl_.makeImmutable(); + return ddl_; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of ddl. + */ + public int getDdlCount() { + return ddl_.size(); + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The ddl at the given index. + */ + public java.lang.String getDdl(int index) { + return ddl_.get(index); + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the ddl at the given index. + */ + public com.google.protobuf.ByteString getDdlBytes(int index) { + return ddl_.getByteString(index); + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The ddl to set. + * @return This builder for chaining. + */ + public Builder setDdl(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDdlIsMutable(); + ddl_.set(index, value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The ddl to add. + * @return This builder for chaining. + */ + public Builder addDdl(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureDdlIsMutable(); + ddl_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The ddl to add. + * @return This builder for chaining. + */ + public Builder addAllDdl(java.lang.Iterable values) { + ensureDdlIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, ddl_); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDdl() { + ddl_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. DDL statements to add new indexes that will improve the query.
    +       * 
    + * + * repeated string ddl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the ddl to add. + * @return This builder for chaining. + */ + public Builder addDdlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureDdlIsMutable(); + ddl_.add(value); + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private double improvementFactor_; + + /** + * + * + *
    +       * Optional. Estimated latency improvement factor. For example if the query
    +       * currently takes 500 ms to run and the estimated latency with new indexes
    +       * is 100 ms this field will be 5.
    +       * 
    + * + * double improvement_factor = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The improvementFactor. + */ + @java.lang.Override + public double getImprovementFactor() { + return improvementFactor_; + } + + /** + * + * + *
    +       * Optional. Estimated latency improvement factor. For example if the query
    +       * currently takes 500 ms to run and the estimated latency with new indexes
    +       * is 100 ms this field will be 5.
    +       * 
    + * + * double improvement_factor = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The improvementFactor to set. + * @return This builder for chaining. + */ + public Builder setImprovementFactor(double value) { + + improvementFactor_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. Estimated latency improvement factor. For example if the query
    +       * currently takes 500 ms to run and the estimated latency with new indexes
    +       * is 100 ms this field will be 5.
    +       * 
    + * + * double improvement_factor = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearImprovementFactor() { + bitField0_ = (bitField0_ & ~0x00000002); + improvementFactor_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.QueryAdvisorResult.IndexAdvice) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.QueryAdvisorResult.IndexAdvice) + private static final com.google.spanner.v1.QueryAdvisorResult.IndexAdvice DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.QueryAdvisorResult.IndexAdvice(); + } + + public static com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IndexAdvice parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int INDEX_ADVICE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List indexAdvice_; + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List getIndexAdviceList() { + return indexAdvice_; + } + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List + getIndexAdviceOrBuilderList() { + return indexAdvice_; + } + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getIndexAdviceCount() { + return indexAdvice_.size(); + } + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getIndexAdvice(int index) { + return indexAdvice_.get(index); + } + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder getIndexAdviceOrBuilder( + int index) { + return indexAdvice_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < indexAdvice_.size(); i++) { + output.writeMessage(1, indexAdvice_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < indexAdvice_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, indexAdvice_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.QueryAdvisorResult)) { + return super.equals(obj); + } + com.google.spanner.v1.QueryAdvisorResult other = (com.google.spanner.v1.QueryAdvisorResult) obj; + + if (!getIndexAdviceList().equals(other.getIndexAdviceList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getIndexAdviceCount() > 0) { + hash = (37 * hash) + INDEX_ADVICE_FIELD_NUMBER; + hash = (53 * hash) + getIndexAdviceList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryAdvisorResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.QueryAdvisorResult prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Output of query advisor analysis.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.QueryAdvisorResult} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.QueryAdvisorResult) + com.google.spanner.v1.QueryAdvisorResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryAdvisorResult.class, + com.google.spanner.v1.QueryAdvisorResult.Builder.class); + } + + // Construct using com.google.spanner.v1.QueryAdvisorResult.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (indexAdviceBuilder_ == null) { + indexAdvice_ = java.util.Collections.emptyList(); + } else { + indexAdvice_ = null; + indexAdviceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryAdvisorResult_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult getDefaultInstanceForType() { + return com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult build() { + com.google.spanner.v1.QueryAdvisorResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult buildPartial() { + com.google.spanner.v1.QueryAdvisorResult result = + new com.google.spanner.v1.QueryAdvisorResult(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.QueryAdvisorResult result) { + if (indexAdviceBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + indexAdvice_ = java.util.Collections.unmodifiableList(indexAdvice_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.indexAdvice_ = indexAdvice_; + } else { + result.indexAdvice_ = indexAdviceBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.QueryAdvisorResult result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.QueryAdvisorResult) { + return mergeFrom((com.google.spanner.v1.QueryAdvisorResult) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.QueryAdvisorResult other) { + if (other == com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance()) return this; + if (indexAdviceBuilder_ == null) { + if (!other.indexAdvice_.isEmpty()) { + if (indexAdvice_.isEmpty()) { + indexAdvice_ = other.indexAdvice_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureIndexAdviceIsMutable(); + indexAdvice_.addAll(other.indexAdvice_); + } + onChanged(); + } + } else { + if (!other.indexAdvice_.isEmpty()) { + if (indexAdviceBuilder_.isEmpty()) { + indexAdviceBuilder_.dispose(); + indexAdviceBuilder_ = null; + indexAdvice_ = other.indexAdvice_; + bitField0_ = (bitField0_ & ~0x00000001); + indexAdviceBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetIndexAdviceFieldBuilder() + : null; + } else { + indexAdviceBuilder_.addAllMessages(other.indexAdvice_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice m = + input.readMessage( + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.parser(), + extensionRegistry); + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + indexAdvice_.add(m); + } else { + indexAdviceBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List indexAdvice_ = + java.util.Collections.emptyList(); + + private void ensureIndexAdviceIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + indexAdvice_ = + new java.util.ArrayList( + indexAdvice_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice, + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder, + com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder> + indexAdviceBuilder_; + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getIndexAdviceList() { + if (indexAdviceBuilder_ == null) { + return java.util.Collections.unmodifiableList(indexAdvice_); + } else { + return indexAdviceBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getIndexAdviceCount() { + if (indexAdviceBuilder_ == null) { + return indexAdvice_.size(); + } else { + return indexAdviceBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getIndexAdvice(int index) { + if (indexAdviceBuilder_ == null) { + return indexAdvice_.get(index); + } else { + return indexAdviceBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIndexAdvice( + int index, com.google.spanner.v1.QueryAdvisorResult.IndexAdvice value) { + if (indexAdviceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureIndexAdviceIsMutable(); + indexAdvice_.set(index, value); + onChanged(); + } else { + indexAdviceBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIndexAdvice( + int index, com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder builderForValue) { + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + indexAdvice_.set(index, builderForValue.build()); + onChanged(); + } else { + indexAdviceBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addIndexAdvice(com.google.spanner.v1.QueryAdvisorResult.IndexAdvice value) { + if (indexAdviceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureIndexAdviceIsMutable(); + indexAdvice_.add(value); + onChanged(); + } else { + indexAdviceBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addIndexAdvice( + int index, com.google.spanner.v1.QueryAdvisorResult.IndexAdvice value) { + if (indexAdviceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureIndexAdviceIsMutable(); + indexAdvice_.add(index, value); + onChanged(); + } else { + indexAdviceBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addIndexAdvice( + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder builderForValue) { + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + indexAdvice_.add(builderForValue.build()); + onChanged(); + } else { + indexAdviceBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addIndexAdvice( + int index, com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder builderForValue) { + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + indexAdvice_.add(index, builderForValue.build()); + onChanged(); + } else { + indexAdviceBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllIndexAdvice( + java.lang.Iterable values) { + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, indexAdvice_); + onChanged(); + } else { + indexAdviceBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearIndexAdvice() { + if (indexAdviceBuilder_ == null) { + indexAdvice_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + indexAdviceBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeIndexAdvice(int index) { + if (indexAdviceBuilder_ == null) { + ensureIndexAdviceIsMutable(); + indexAdvice_.remove(index); + onChanged(); + } else { + indexAdviceBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder getIndexAdviceBuilder( + int index) { + return internalGetIndexAdviceFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder getIndexAdviceOrBuilder( + int index) { + if (indexAdviceBuilder_ == null) { + return indexAdvice_.get(index); + } else { + return indexAdviceBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getIndexAdviceOrBuilderList() { + if (indexAdviceBuilder_ != null) { + return indexAdviceBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(indexAdvice_); + } + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder addIndexAdviceBuilder() { + return internalGetIndexAdviceFieldBuilder() + .addBuilder(com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.getDefaultInstance()); + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder addIndexAdviceBuilder( + int index) { + return internalGetIndexAdviceFieldBuilder() + .addBuilder( + index, com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.getDefaultInstance()); + } + + /** + * + * + *
    +     * Optional. Index Recommendation for a query. This is an optional field and
    +     * the recommendation will only be available when the recommendation
    +     * guarantees significant improvement in query performance.
    +     * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getIndexAdviceBuilderList() { + return internalGetIndexAdviceFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice, + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder, + com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder> + internalGetIndexAdviceFieldBuilder() { + if (indexAdviceBuilder_ == null) { + indexAdviceBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice, + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice.Builder, + com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder>( + indexAdvice_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + indexAdvice_ = null; + } + return indexAdviceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.QueryAdvisorResult) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.QueryAdvisorResult) + private static final com.google.spanner.v1.QueryAdvisorResult DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.QueryAdvisorResult(); + } + + public static com.google.spanner.v1.QueryAdvisorResult getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryAdvisorResult parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResultOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResultOrBuilder.java new file mode 100644 index 000000000000..8d46964cf309 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryAdvisorResultOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface QueryAdvisorResultOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.QueryAdvisorResult) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List getIndexAdviceList(); + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.QueryAdvisorResult.IndexAdvice getIndexAdvice(int index); + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getIndexAdviceCount(); + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getIndexAdviceOrBuilderList(); + + /** + * + * + *
    +   * Optional. Index Recommendation for a query. This is an optional field and
    +   * the recommendation will only be available when the recommendation
    +   * guarantees significant improvement in query performance.
    +   * 
    + * + * + * repeated .google.spanner.v1.QueryAdvisorResult.IndexAdvice index_advice = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.QueryAdvisorResult.IndexAdviceOrBuilder getIndexAdviceOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java new file mode 100644 index 000000000000..789c2f58e736 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java @@ -0,0 +1,1288 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Contains an ordered list of nodes appearing in the query plan.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.QueryPlan} + */ +@com.google.protobuf.Generated +public final class QueryPlan extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.QueryPlan) + QueryPlanOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryPlan"); + } + + // Use QueryPlan.newBuilder() to construct. + private QueryPlan(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private QueryPlan() { + planNodes_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryPlan_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryPlan_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryPlan.class, com.google.spanner.v1.QueryPlan.Builder.class); + } + + private int bitField0_; + public static final int PLAN_NODES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List planNodes_; + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + @java.lang.Override + public java.util.List getPlanNodesList() { + return planNodes_; + } + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + @java.lang.Override + public java.util.List + getPlanNodesOrBuilderList() { + return planNodes_; + } + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + @java.lang.Override + public int getPlanNodesCount() { + return planNodes_.size(); + } + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + @java.lang.Override + public com.google.spanner.v1.PlanNode getPlanNodes(int index) { + return planNodes_.get(index); + } + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + @java.lang.Override + public com.google.spanner.v1.PlanNodeOrBuilder getPlanNodesOrBuilder(int index) { + return planNodes_.get(index); + } + + public static final int QUERY_ADVICE_FIELD_NUMBER = 2; + private com.google.spanner.v1.QueryAdvisorResult queryAdvice_; + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the queryAdvice field is set. + */ + @java.lang.Override + public boolean hasQueryAdvice() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The queryAdvice. + */ + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResult getQueryAdvice() { + return queryAdvice_ == null + ? com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance() + : queryAdvice_; + } + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.QueryAdvisorResultOrBuilder getQueryAdviceOrBuilder() { + return queryAdvice_ == null + ? com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance() + : queryAdvice_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < planNodes_.size(); i++) { + output.writeMessage(1, planNodes_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getQueryAdvice()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < planNodes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, planNodes_.get(i)); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getQueryAdvice()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.QueryPlan)) { + return super.equals(obj); + } + com.google.spanner.v1.QueryPlan other = (com.google.spanner.v1.QueryPlan) obj; + + if (!getPlanNodesList().equals(other.getPlanNodesList())) return false; + if (hasQueryAdvice() != other.hasQueryAdvice()) return false; + if (hasQueryAdvice()) { + if (!getQueryAdvice().equals(other.getQueryAdvice())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getPlanNodesCount() > 0) { + hash = (37 * hash) + PLAN_NODES_FIELD_NUMBER; + hash = (53 * hash) + getPlanNodesList().hashCode(); + } + if (hasQueryAdvice()) { + hash = (37 * hash) + QUERY_ADVICE_FIELD_NUMBER; + hash = (53 * hash) + getQueryAdvice().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.QueryPlan parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryPlan parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryPlan parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.QueryPlan parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryPlan parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryPlan parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.QueryPlan parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.QueryPlan prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Contains an ordered list of nodes appearing in the query plan.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.QueryPlan} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.QueryPlan) + com.google.spanner.v1.QueryPlanOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryPlan_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryPlan_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.QueryPlan.class, com.google.spanner.v1.QueryPlan.Builder.class); + } + + // Construct using com.google.spanner.v1.QueryPlan.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetPlanNodesFieldBuilder(); + internalGetQueryAdviceFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (planNodesBuilder_ == null) { + planNodes_ = java.util.Collections.emptyList(); + } else { + planNodes_ = null; + planNodesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + queryAdvice_ = null; + if (queryAdviceBuilder_ != null) { + queryAdviceBuilder_.dispose(); + queryAdviceBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.QueryPlanProto + .internal_static_google_spanner_v1_QueryPlan_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.QueryPlan getDefaultInstanceForType() { + return com.google.spanner.v1.QueryPlan.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.QueryPlan build() { + com.google.spanner.v1.QueryPlan result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.QueryPlan buildPartial() { + com.google.spanner.v1.QueryPlan result = new com.google.spanner.v1.QueryPlan(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.QueryPlan result) { + if (planNodesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + planNodes_ = java.util.Collections.unmodifiableList(planNodes_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.planNodes_ = planNodes_; + } else { + result.planNodes_ = planNodesBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.QueryPlan result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.queryAdvice_ = + queryAdviceBuilder_ == null ? queryAdvice_ : queryAdviceBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.QueryPlan) { + return mergeFrom((com.google.spanner.v1.QueryPlan) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.QueryPlan other) { + if (other == com.google.spanner.v1.QueryPlan.getDefaultInstance()) return this; + if (planNodesBuilder_ == null) { + if (!other.planNodes_.isEmpty()) { + if (planNodes_.isEmpty()) { + planNodes_ = other.planNodes_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePlanNodesIsMutable(); + planNodes_.addAll(other.planNodes_); + } + onChanged(); + } + } else { + if (!other.planNodes_.isEmpty()) { + if (planNodesBuilder_.isEmpty()) { + planNodesBuilder_.dispose(); + planNodesBuilder_ = null; + planNodes_ = other.planNodes_; + bitField0_ = (bitField0_ & ~0x00000001); + planNodesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetPlanNodesFieldBuilder() + : null; + } else { + planNodesBuilder_.addAllMessages(other.planNodes_); + } + } + } + if (other.hasQueryAdvice()) { + mergeQueryAdvice(other.getQueryAdvice()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.PlanNode m = + input.readMessage(com.google.spanner.v1.PlanNode.parser(), extensionRegistry); + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + planNodes_.add(m); + } else { + planNodesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetQueryAdviceFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List planNodes_ = + java.util.Collections.emptyList(); + + private void ensurePlanNodesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + planNodes_ = new java.util.ArrayList(planNodes_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode, + com.google.spanner.v1.PlanNode.Builder, + com.google.spanner.v1.PlanNodeOrBuilder> + planNodesBuilder_; + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public java.util.List getPlanNodesList() { + if (planNodesBuilder_ == null) { + return java.util.Collections.unmodifiableList(planNodes_); + } else { + return planNodesBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public int getPlanNodesCount() { + if (planNodesBuilder_ == null) { + return planNodes_.size(); + } else { + return planNodesBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public com.google.spanner.v1.PlanNode getPlanNodes(int index) { + if (planNodesBuilder_ == null) { + return planNodes_.get(index); + } else { + return planNodesBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder setPlanNodes(int index, com.google.spanner.v1.PlanNode value) { + if (planNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePlanNodesIsMutable(); + planNodes_.set(index, value); + onChanged(); + } else { + planNodesBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder setPlanNodes(int index, com.google.spanner.v1.PlanNode.Builder builderForValue) { + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + planNodes_.set(index, builderForValue.build()); + onChanged(); + } else { + planNodesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder addPlanNodes(com.google.spanner.v1.PlanNode value) { + if (planNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePlanNodesIsMutable(); + planNodes_.add(value); + onChanged(); + } else { + planNodesBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder addPlanNodes(int index, com.google.spanner.v1.PlanNode value) { + if (planNodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePlanNodesIsMutable(); + planNodes_.add(index, value); + onChanged(); + } else { + planNodesBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder addPlanNodes(com.google.spanner.v1.PlanNode.Builder builderForValue) { + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + planNodes_.add(builderForValue.build()); + onChanged(); + } else { + planNodesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder addPlanNodes(int index, com.google.spanner.v1.PlanNode.Builder builderForValue) { + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + planNodes_.add(index, builderForValue.build()); + onChanged(); + } else { + planNodesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder addAllPlanNodes( + java.lang.Iterable values) { + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, planNodes_); + onChanged(); + } else { + planNodesBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder clearPlanNodes() { + if (planNodesBuilder_ == null) { + planNodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + planNodesBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public Builder removePlanNodes(int index) { + if (planNodesBuilder_ == null) { + ensurePlanNodesIsMutable(); + planNodes_.remove(index); + onChanged(); + } else { + planNodesBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public com.google.spanner.v1.PlanNode.Builder getPlanNodesBuilder(int index) { + return internalGetPlanNodesFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public com.google.spanner.v1.PlanNodeOrBuilder getPlanNodesOrBuilder(int index) { + if (planNodesBuilder_ == null) { + return planNodes_.get(index); + } else { + return planNodesBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public java.util.List + getPlanNodesOrBuilderList() { + if (planNodesBuilder_ != null) { + return planNodesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(planNodes_); + } + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public com.google.spanner.v1.PlanNode.Builder addPlanNodesBuilder() { + return internalGetPlanNodesFieldBuilder() + .addBuilder(com.google.spanner.v1.PlanNode.getDefaultInstance()); + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public com.google.spanner.v1.PlanNode.Builder addPlanNodesBuilder(int index) { + return internalGetPlanNodesFieldBuilder() + .addBuilder(index, com.google.spanner.v1.PlanNode.getDefaultInstance()); + } + + /** + * + * + *
    +     * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +     * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +     * corresponds to its index in `plan_nodes`.
    +     * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + public java.util.List getPlanNodesBuilderList() { + return internalGetPlanNodesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode, + com.google.spanner.v1.PlanNode.Builder, + com.google.spanner.v1.PlanNodeOrBuilder> + internalGetPlanNodesFieldBuilder() { + if (planNodesBuilder_ == null) { + planNodesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.PlanNode, + com.google.spanner.v1.PlanNode.Builder, + com.google.spanner.v1.PlanNodeOrBuilder>( + planNodes_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + planNodes_ = null; + } + return planNodesBuilder_; + } + + private com.google.spanner.v1.QueryAdvisorResult queryAdvice_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult, + com.google.spanner.v1.QueryAdvisorResult.Builder, + com.google.spanner.v1.QueryAdvisorResultOrBuilder> + queryAdviceBuilder_; + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the queryAdvice field is set. + */ + public boolean hasQueryAdvice() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The queryAdvice. + */ + public com.google.spanner.v1.QueryAdvisorResult getQueryAdvice() { + if (queryAdviceBuilder_ == null) { + return queryAdvice_ == null + ? com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance() + : queryAdvice_; + } else { + return queryAdviceBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setQueryAdvice(com.google.spanner.v1.QueryAdvisorResult value) { + if (queryAdviceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryAdvice_ = value; + } else { + queryAdviceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setQueryAdvice( + com.google.spanner.v1.QueryAdvisorResult.Builder builderForValue) { + if (queryAdviceBuilder_ == null) { + queryAdvice_ = builderForValue.build(); + } else { + queryAdviceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeQueryAdvice(com.google.spanner.v1.QueryAdvisorResult value) { + if (queryAdviceBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && queryAdvice_ != null + && queryAdvice_ != com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance()) { + getQueryAdviceBuilder().mergeFrom(value); + } else { + queryAdvice_ = value; + } + } else { + queryAdviceBuilder_.mergeFrom(value); + } + if (queryAdvice_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearQueryAdvice() { + bitField0_ = (bitField0_ & ~0x00000002); + queryAdvice_ = null; + if (queryAdviceBuilder_ != null) { + queryAdviceBuilder_.dispose(); + queryAdviceBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResult.Builder getQueryAdviceBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetQueryAdviceFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.QueryAdvisorResultOrBuilder getQueryAdviceOrBuilder() { + if (queryAdviceBuilder_ != null) { + return queryAdviceBuilder_.getMessageOrBuilder(); + } else { + return queryAdvice_ == null + ? com.google.spanner.v1.QueryAdvisorResult.getDefaultInstance() + : queryAdvice_; + } + } + + /** + * + * + *
    +     * Optional. The advise/recommendations for a query. Currently this field will
    +     * be serving index recommendations for a query.
    +     * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult, + com.google.spanner.v1.QueryAdvisorResult.Builder, + com.google.spanner.v1.QueryAdvisorResultOrBuilder> + internalGetQueryAdviceFieldBuilder() { + if (queryAdviceBuilder_ == null) { + queryAdviceBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryAdvisorResult, + com.google.spanner.v1.QueryAdvisorResult.Builder, + com.google.spanner.v1.QueryAdvisorResultOrBuilder>( + getQueryAdvice(), getParentForChildren(), isClean()); + queryAdvice_ = null; + } + return queryAdviceBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.QueryPlan) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.QueryPlan) + private static final com.google.spanner.v1.QueryPlan DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.QueryPlan(); + } + + public static com.google.spanner.v1.QueryPlan getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryPlan parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.QueryPlan getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java new file mode 100644 index 000000000000..39bf2ec6dfef --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java @@ -0,0 +1,139 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface QueryPlanOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.QueryPlan) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + java.util.List getPlanNodesList(); + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + com.google.spanner.v1.PlanNode getPlanNodes(int index); + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + int getPlanNodesCount(); + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + java.util.List getPlanNodesOrBuilderList(); + + /** + * + * + *
    +   * The nodes in the query plan. Plan nodes are returned in pre-order starting
    +   * with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id`
    +   * corresponds to its index in `plan_nodes`.
    +   * 
    + * + * repeated .google.spanner.v1.PlanNode plan_nodes = 1; + */ + com.google.spanner.v1.PlanNodeOrBuilder getPlanNodesOrBuilder(int index); + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the queryAdvice field is set. + */ + boolean hasQueryAdvice(); + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The queryAdvice. + */ + com.google.spanner.v1.QueryAdvisorResult getQueryAdvice(); + + /** + * + * + *
    +   * Optional. The advise/recommendations for a query. Currently this field will
    +   * be serving index recommendations for a query.
    +   * 
    + * + * + * .google.spanner.v1.QueryAdvisorResult query_advice = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.QueryAdvisorResultOrBuilder getQueryAdviceOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java new file mode 100644 index 000000000000..e5b42e990074 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java @@ -0,0 +1,201 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/query_plan.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class QueryPlanProto extends com.google.protobuf.GeneratedFile { + private QueryPlanProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "QueryPlanProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PlanNode_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PlanNode_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PlanNode_ChildLink_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_QueryAdvisorResult_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_QueryAdvisorResult_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_QueryPlan_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_QueryPlan_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "\"google/spanner/v1/query_plan.proto\022\021go" + + "ogle.spanner.v1\032\037google/api/field_behavi" + + "or.proto\032\034google/protobuf/struct.proto\"\370\004\n" + + "\010PlanNode\022\r\n" + + "\005index\030\001 \001(\005\022.\n" + + "\004kind\030\002 \001(\0162 .google.spanner.v1.PlanNode.Kind\022\024\n" + + "\014display_name\030\003 \001(\t\022:\n" + + "\013child_links\030\004 \003(\0132%.google.spanner.v1.PlanNode.ChildLink\022M\n" + + "\024short_representation\030\005" + + " \001(\0132/.google.spanner.v1.PlanNode.ShortRepresentation\022)\n" + + "\010metadata\030\006 \001(\0132\027.google.protobuf.Struct\0220\n" + + "\017execution_stats\030\007 \001(\0132\027.google.protobuf.Struct\032@\n" + + "\tChildLink\022\023\n" + + "\013child_index\030\001 \001(\005\022\014\n" + + "\004type\030\002 \001(\t\022\020\n" + + "\010variable\030\003 \001(\t\032\262\001\n" + + "\023ShortRepresentation\022\023\n" + + "\013description\030\001 \001(\t\022S\n\n" + + "subqueries\030\002 \003(\0132?.google.spanner.v1." + + "PlanNode.ShortRepresentation.SubqueriesEntry\0321\n" + + "\017SubqueriesEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\005:\0028\001\"8\n" + + "\004Kind\022\024\n" + + "\020KIND_UNSPECIFIED\020\000\022\016\n\n" + + "RELATIONAL\020\001\022\n\n" + + "\006SCALAR\020\002\"\244\001\n" + + "\022QueryAdvisorResult\022L\n" + + "\014index_advice\030\001 \003(\01321" + + ".google.spanner.v1.QueryAdvisorResult.IndexAdviceB\003\340A\001\032@\n" + + "\013IndexAdvice\022\020\n" + + "\003ddl\030\001 \003(\tB\003\340A\001\022\037\n" + + "\022improvement_factor\030\002 \001(\001B\003\340A\001\"~\n" + + "\tQueryPlan\022/\n\n" + + "plan_nodes\030\001 \003(\0132\033.google.spanner.v1.PlanNode\022@\n" + + "\014query_advice\030\002" + + " \001(\0132%.google.spanner.v1.QueryAdvisorResultB\003\340A\001B\261\001\n" + + "\025com.google.spanner.v1B\016QueryPlanProtoP\001Z5cloud.google.com/go/spanne" + + "r/apiv1/spannerpb;spannerpb\252\002\027Google.Clo" + + "ud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352" + + "\002\032Google::Cloud::Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.StructProto.getDescriptor(), + }); + internal_static_google_spanner_v1_PlanNode_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_PlanNode_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_PlanNode_descriptor, + new java.lang.String[] { + "Index", + "Kind", + "DisplayName", + "ChildLinks", + "ShortRepresentation", + "Metadata", + "ExecutionStats", + }); + internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor = + internal_static_google_spanner_v1_PlanNode_descriptor.getNestedType(0); + internal_static_google_spanner_v1_PlanNode_ChildLink_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_PlanNode_ChildLink_descriptor, + new java.lang.String[] { + "ChildIndex", "Type", "Variable", + }); + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor = + internal_static_google_spanner_v1_PlanNode_descriptor.getNestedType(1); + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor, + new java.lang.String[] { + "Description", "Subqueries", + }); + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_descriptor = + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_descriptor.getNestedType(0); + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_PlanNode_ShortRepresentation_SubqueriesEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_spanner_v1_QueryAdvisorResult_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_QueryAdvisorResult_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_QueryAdvisorResult_descriptor, + new java.lang.String[] { + "IndexAdvice", + }); + internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor = + internal_static_google_spanner_v1_QueryAdvisorResult_descriptor.getNestedType(0); + internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_QueryAdvisorResult_IndexAdvice_descriptor, + new java.lang.String[] { + "Ddl", "ImprovementFactor", + }); + internal_static_google_spanner_v1_QueryPlan_descriptor = getDescriptor().getMessageType(2); + internal_static_google_spanner_v1_QueryPlan_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_QueryPlan_descriptor, + new java.lang.String[] { + "PlanNodes", "QueryAdvice", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Range.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Range.java new file mode 100644 index 000000000000..e01efe1b227b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Range.java @@ -0,0 +1,944 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `Range` represents a range of keys in a database. The keys themselves
    + * are encoded in "sortable string format", also known as ssformat. Consult
    + * Spanner's open source client libraries for details on the encoding.
    + *
    + * Each range represents a contiguous range of rows, possibly from multiple
    + * tables/indexes. Each range is associated with a single paxos group (known as
    + * a "group" throughout this API), a split (which names the exact range within
    + * the group), and a generation that can be used to determine whether a given
    + * `Range` represents a newer or older location for the key range.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Range} + */ +@com.google.protobuf.Generated +public final class Range extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Range) + RangeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Range"); + } + + // Use Range.newBuilder() to construct. + private Range(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Range() { + startKey_ = com.google.protobuf.ByteString.EMPTY; + limitKey_ = com.google.protobuf.ByteString.EMPTY; + generation_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Range_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Range_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Range.class, com.google.spanner.v1.Range.Builder.class); + } + + public static final int START_KEY_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The start key of the range, inclusive. Encoded in "sortable string format"
    +   * (ssformat).
    +   * 
    + * + * bytes start_key = 1; + * + * @return The startKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStartKey() { + return startKey_; + } + + public static final int LIMIT_KEY_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString limitKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The limit key of the range, exclusive. Encoded in "sortable string format"
    +   * (ssformat).
    +   * 
    + * + * bytes limit_key = 2; + * + * @return The limitKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLimitKey() { + return limitKey_; + } + + public static final int GROUP_UID_FIELD_NUMBER = 3; + private long groupUid_ = 0L; + + /** + * + * + *
    +   * The UID of the paxos group where this range is stored. UIDs are unique
    +   * within the database. References `Group.group_uid`.
    +   * 
    + * + * uint64 group_uid = 3; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + public static final int SPLIT_ID_FIELD_NUMBER = 4; + private long splitId_ = 0L; + + /** + * + * + *
    +   * A group can store multiple ranges of keys. Each key range is named by an
    +   * ID (the split ID). Within a group, split IDs are unique. The `split_id`
    +   * names the exact split in `group_uid` where this range is stored.
    +   * 
    + * + * uint64 split_id = 4; + * + * @return The splitId. + */ + @java.lang.Override + public long getSplitId() { + return splitId_; + } + + public static final int GENERATION_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString generation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * `generation` indicates the freshness of the range information contained
    +   * in this proto. Generations can be compared lexicographically; if generation
    +   * A is greater than generation B, then the `Range` corresponding to A is
    +   * newer than the `Range` corresponding to B, and should be used
    +   * preferentially.
    +   * 
    + * + * bytes generation = 5; + * + * @return The generation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getGeneration() { + return generation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!startKey_.isEmpty()) { + output.writeBytes(1, startKey_); + } + if (!limitKey_.isEmpty()) { + output.writeBytes(2, limitKey_); + } + if (groupUid_ != 0L) { + output.writeUInt64(3, groupUid_); + } + if (splitId_ != 0L) { + output.writeUInt64(4, splitId_); + } + if (!generation_.isEmpty()) { + output.writeBytes(5, generation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!startKey_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, startKey_); + } + if (!limitKey_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, limitKey_); + } + if (groupUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(3, groupUid_); + } + if (splitId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(4, splitId_); + } + if (!generation_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(5, generation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Range)) { + return super.equals(obj); + } + com.google.spanner.v1.Range other = (com.google.spanner.v1.Range) obj; + + if (!getStartKey().equals(other.getStartKey())) return false; + if (!getLimitKey().equals(other.getLimitKey())) return false; + if (getGroupUid() != other.getGroupUid()) return false; + if (getSplitId() != other.getSplitId()) return false; + if (!getGeneration().equals(other.getGeneration())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + START_KEY_FIELD_NUMBER; + hash = (53 * hash) + getStartKey().hashCode(); + hash = (37 * hash) + LIMIT_KEY_FIELD_NUMBER; + hash = (53 * hash) + getLimitKey().hashCode(); + hash = (37 * hash) + GROUP_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGroupUid()); + hash = (37 * hash) + SPLIT_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSplitId()); + hash = (37 * hash) + GENERATION_FIELD_NUMBER; + hash = (53 * hash) + getGeneration().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Range parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Range parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Range parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Range parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Range parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Range parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Range parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Range parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Range parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Range parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Range parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Range parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Range prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `Range` represents a range of keys in a database. The keys themselves
    +   * are encoded in "sortable string format", also known as ssformat. Consult
    +   * Spanner's open source client libraries for details on the encoding.
    +   *
    +   * Each range represents a contiguous range of rows, possibly from multiple
    +   * tables/indexes. Each range is associated with a single paxos group (known as
    +   * a "group" throughout this API), a split (which names the exact range within
    +   * the group), and a generation that can be used to determine whether a given
    +   * `Range` represents a newer or older location for the key range.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Range} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Range) + com.google.spanner.v1.RangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Range_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Range_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Range.class, com.google.spanner.v1.Range.Builder.class); + } + + // Construct using com.google.spanner.v1.Range.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + startKey_ = com.google.protobuf.ByteString.EMPTY; + limitKey_ = com.google.protobuf.ByteString.EMPTY; + groupUid_ = 0L; + splitId_ = 0L; + generation_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Range_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Range getDefaultInstanceForType() { + return com.google.spanner.v1.Range.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Range build() { + com.google.spanner.v1.Range result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Range buildPartial() { + com.google.spanner.v1.Range result = new com.google.spanner.v1.Range(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Range result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.startKey_ = startKey_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.limitKey_ = limitKey_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.groupUid_ = groupUid_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.splitId_ = splitId_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.generation_ = generation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Range) { + return mergeFrom((com.google.spanner.v1.Range) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Range other) { + if (other == com.google.spanner.v1.Range.getDefaultInstance()) return this; + if (!other.getStartKey().isEmpty()) { + setStartKey(other.getStartKey()); + } + if (!other.getLimitKey().isEmpty()) { + setLimitKey(other.getLimitKey()); + } + if (other.getGroupUid() != 0L) { + setGroupUid(other.getGroupUid()); + } + if (other.getSplitId() != 0L) { + setSplitId(other.getSplitId()); + } + if (!other.getGeneration().isEmpty()) { + setGeneration(other.getGeneration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + startKey_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + limitKey_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + groupUid_ = input.readUInt64(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: + { + splitId_ = input.readUInt64(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + generation_ = input.readBytes(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The start key of the range, inclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes start_key = 1; + * + * @return The startKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getStartKey() { + return startKey_; + } + + /** + * + * + *
    +     * The start key of the range, inclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes start_key = 1; + * + * @param value The startKey to set. + * @return This builder for chaining. + */ + public Builder setStartKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + startKey_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The start key of the range, inclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes start_key = 1; + * + * @return This builder for chaining. + */ + public Builder clearStartKey() { + bitField0_ = (bitField0_ & ~0x00000001); + startKey_ = getDefaultInstance().getStartKey(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString limitKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The limit key of the range, exclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes limit_key = 2; + * + * @return The limitKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLimitKey() { + return limitKey_; + } + + /** + * + * + *
    +     * The limit key of the range, exclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes limit_key = 2; + * + * @param value The limitKey to set. + * @return This builder for chaining. + */ + public Builder setLimitKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + limitKey_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The limit key of the range, exclusive. Encoded in "sortable string format"
    +     * (ssformat).
    +     * 
    + * + * bytes limit_key = 2; + * + * @return This builder for chaining. + */ + public Builder clearLimitKey() { + bitField0_ = (bitField0_ & ~0x00000002); + limitKey_ = getDefaultInstance().getLimitKey(); + onChanged(); + return this; + } + + private long groupUid_; + + /** + * + * + *
    +     * The UID of the paxos group where this range is stored. UIDs are unique
    +     * within the database. References `Group.group_uid`.
    +     * 
    + * + * uint64 group_uid = 3; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + /** + * + * + *
    +     * The UID of the paxos group where this range is stored. UIDs are unique
    +     * within the database. References `Group.group_uid`.
    +     * 
    + * + * uint64 group_uid = 3; + * + * @param value The groupUid to set. + * @return This builder for chaining. + */ + public Builder setGroupUid(long value) { + + groupUid_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The UID of the paxos group where this range is stored. UIDs are unique
    +     * within the database. References `Group.group_uid`.
    +     * 
    + * + * uint64 group_uid = 3; + * + * @return This builder for chaining. + */ + public Builder clearGroupUid() { + bitField0_ = (bitField0_ & ~0x00000004); + groupUid_ = 0L; + onChanged(); + return this; + } + + private long splitId_; + + /** + * + * + *
    +     * A group can store multiple ranges of keys. Each key range is named by an
    +     * ID (the split ID). Within a group, split IDs are unique. The `split_id`
    +     * names the exact split in `group_uid` where this range is stored.
    +     * 
    + * + * uint64 split_id = 4; + * + * @return The splitId. + */ + @java.lang.Override + public long getSplitId() { + return splitId_; + } + + /** + * + * + *
    +     * A group can store multiple ranges of keys. Each key range is named by an
    +     * ID (the split ID). Within a group, split IDs are unique. The `split_id`
    +     * names the exact split in `group_uid` where this range is stored.
    +     * 
    + * + * uint64 split_id = 4; + * + * @param value The splitId to set. + * @return This builder for chaining. + */ + public Builder setSplitId(long value) { + + splitId_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A group can store multiple ranges of keys. Each key range is named by an
    +     * ID (the split ID). Within a group, split IDs are unique. The `split_id`
    +     * names the exact split in `group_uid` where this range is stored.
    +     * 
    + * + * uint64 split_id = 4; + * + * @return This builder for chaining. + */ + public Builder clearSplitId() { + bitField0_ = (bitField0_ & ~0x00000008); + splitId_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString generation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * `generation` indicates the freshness of the range information contained
    +     * in this proto. Generations can be compared lexicographically; if generation
    +     * A is greater than generation B, then the `Range` corresponding to A is
    +     * newer than the `Range` corresponding to B, and should be used
    +     * preferentially.
    +     * 
    + * + * bytes generation = 5; + * + * @return The generation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getGeneration() { + return generation_; + } + + /** + * + * + *
    +     * `generation` indicates the freshness of the range information contained
    +     * in this proto. Generations can be compared lexicographically; if generation
    +     * A is greater than generation B, then the `Range` corresponding to A is
    +     * newer than the `Range` corresponding to B, and should be used
    +     * preferentially.
    +     * 
    + * + * bytes generation = 5; + * + * @param value The generation to set. + * @return This builder for chaining. + */ + public Builder setGeneration(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + generation_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `generation` indicates the freshness of the range information contained
    +     * in this proto. Generations can be compared lexicographically; if generation
    +     * A is greater than generation B, then the `Range` corresponding to A is
    +     * newer than the `Range` corresponding to B, and should be used
    +     * preferentially.
    +     * 
    + * + * bytes generation = 5; + * + * @return This builder for chaining. + */ + public Builder clearGeneration() { + bitField0_ = (bitField0_ & ~0x00000010); + generation_ = getDefaultInstance().getGeneration(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Range) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Range) + private static final com.google.spanner.v1.Range DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Range(); + } + + public static com.google.spanner.v1.Range getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Range parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Range getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RangeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RangeOrBuilder.java new file mode 100644 index 000000000000..d4f2488803f0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RangeOrBuilder.java @@ -0,0 +1,102 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface RangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Range) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The start key of the range, inclusive. Encoded in "sortable string format"
    +   * (ssformat).
    +   * 
    + * + * bytes start_key = 1; + * + * @return The startKey. + */ + com.google.protobuf.ByteString getStartKey(); + + /** + * + * + *
    +   * The limit key of the range, exclusive. Encoded in "sortable string format"
    +   * (ssformat).
    +   * 
    + * + * bytes limit_key = 2; + * + * @return The limitKey. + */ + com.google.protobuf.ByteString getLimitKey(); + + /** + * + * + *
    +   * The UID of the paxos group where this range is stored. UIDs are unique
    +   * within the database. References `Group.group_uid`.
    +   * 
    + * + * uint64 group_uid = 3; + * + * @return The groupUid. + */ + long getGroupUid(); + + /** + * + * + *
    +   * A group can store multiple ranges of keys. Each key range is named by an
    +   * ID (the split ID). Within a group, split IDs are unique. The `split_id`
    +   * names the exact split in `group_uid` where this range is stored.
    +   * 
    + * + * uint64 split_id = 4; + * + * @return The splitId. + */ + long getSplitId(); + + /** + * + * + *
    +   * `generation` indicates the freshness of the range information contained
    +   * in this proto. Generations can be compared lexicographically; if generation
    +   * A is greater than generation B, then the `Range` corresponding to A is
    +   * newer than the `Range` corresponding to B, and should be used
    +   * preferentially.
    +   * 
    + * + * bytes generation = 5; + * + * @return The generation. + */ + com.google.protobuf.ByteString getGeneration(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java new file mode 100644 index 000000000000..692d4e3f994b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java @@ -0,0 +1,4281 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [Read][google.spanner.v1.Spanner.Read] and
    + * [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ReadRequest} + */ +@com.google.protobuf.Generated +public final class ReadRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ReadRequest) + ReadRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadRequest"); + } + + // Use ReadRequest.newBuilder() to construct. + private ReadRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadRequest() { + session_ = ""; + table_ = ""; + index_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + orderBy_ = 0; + lockHint_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ReadRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ReadRequest.class, + com.google.spanner.v1.ReadRequest.Builder.class); + } + + /** + * + * + *
    +   * An option to control the order in which rows are returned from a read.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.ReadRequest.OrderBy} + */ + public enum OrderBy implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Default value.
    +     *
    +     * `ORDER_BY_UNSPECIFIED` is equivalent to `ORDER_BY_PRIMARY_KEY`.
    +     * 
    + * + * ORDER_BY_UNSPECIFIED = 0; + */ + ORDER_BY_UNSPECIFIED(0), + /** + * + * + *
    +     * Read rows are returned in primary key order.
    +     *
    +     * In the event that this option is used in conjunction with the
    +     * `partition_token` field, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * ORDER_BY_PRIMARY_KEY = 1; + */ + ORDER_BY_PRIMARY_KEY(1), + /** + * + * + *
    +     * Read rows are returned in any order.
    +     * 
    + * + * ORDER_BY_NO_ORDER = 2; + */ + ORDER_BY_NO_ORDER(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "OrderBy"); + } + + /** + * + * + *
    +     * Default value.
    +     *
    +     * `ORDER_BY_UNSPECIFIED` is equivalent to `ORDER_BY_PRIMARY_KEY`.
    +     * 
    + * + * ORDER_BY_UNSPECIFIED = 0; + */ + public static final int ORDER_BY_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Read rows are returned in primary key order.
    +     *
    +     * In the event that this option is used in conjunction with the
    +     * `partition_token` field, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * ORDER_BY_PRIMARY_KEY = 1; + */ + public static final int ORDER_BY_PRIMARY_KEY_VALUE = 1; + + /** + * + * + *
    +     * Read rows are returned in any order.
    +     * 
    + * + * ORDER_BY_NO_ORDER = 2; + */ + public static final int ORDER_BY_NO_ORDER_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OrderBy valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static OrderBy forNumber(int value) { + switch (value) { + case 0: + return ORDER_BY_UNSPECIFIED; + case 1: + return ORDER_BY_PRIMARY_KEY; + case 2: + return ORDER_BY_NO_ORDER; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public OrderBy findValueByNumber(int number) { + return OrderBy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ReadRequest.getDescriptor().getEnumTypes().get(0); + } + + private static final OrderBy[] VALUES = values(); + + public static OrderBy valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private OrderBy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ReadRequest.OrderBy) + } + + /** + * + * + *
    +   * A lock hint mechanism for reads done within a transaction.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.ReadRequest.LockHint} + */ + public enum LockHint implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Default value.
    +     *
    +     * `LOCK_HINT_UNSPECIFIED` is equivalent to `LOCK_HINT_SHARED`.
    +     * 
    + * + * LOCK_HINT_UNSPECIFIED = 0; + */ + LOCK_HINT_UNSPECIFIED(0), + /** + * + * + *
    +     * Acquire shared locks.
    +     *
    +     * By default when you perform a read as part of a read-write transaction,
    +     * Spanner acquires shared read locks, which allows other reads to still
    +     * access the data until your transaction is ready to commit. When your
    +     * transaction is committing and writes are being applied, the transaction
    +     * attempts to upgrade to an exclusive lock for any data you are writing.
    +     * For more information about locks, see [Lock
    +     * modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
    +     * 
    + * + * LOCK_HINT_SHARED = 1; + */ + LOCK_HINT_SHARED(1), + /** + * + * + *
    +     * Acquire exclusive locks.
    +     *
    +     * Requesting exclusive locks is beneficial if you observe high write
    +     * contention, which means you notice that multiple transactions are
    +     * concurrently trying to read and write to the same data, resulting in a
    +     * large number of aborts. This problem occurs when two transactions
    +     * initially acquire shared locks and then both try to upgrade to exclusive
    +     * locks at the same time. In this situation both transactions are waiting
    +     * for the other to give up their lock, resulting in a deadlocked situation.
    +     * Spanner is able to detect this occurring and force one of the
    +     * transactions to abort. However, this is a slow and expensive operation
    +     * and results in lower performance. In this case it makes sense to acquire
    +     * exclusive locks at the start of the transaction because then when
    +     * multiple transactions try to act on the same data, they automatically get
    +     * serialized. Each transaction waits its turn to acquire the lock and
    +     * avoids getting into deadlock situations.
    +     *
    +     * Because the exclusive lock hint is just a hint, it shouldn't be
    +     * considered equivalent to a mutex. In other words, you shouldn't use
    +     * Spanner exclusive locks as a mutual exclusion mechanism for the execution
    +     * of code outside of Spanner.
    +     *
    +     * **Note:** Request exclusive locks judiciously because they block others
    +     * from reading that data for the entire transaction, rather than just when
    +     * the writes are being performed. Unless you observe high write contention,
    +     * you should use the default of shared read locks so you don't prematurely
    +     * block other clients from reading the data that you're writing to.
    +     * 
    + * + * LOCK_HINT_EXCLUSIVE = 2; + */ + LOCK_HINT_EXCLUSIVE(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "LockHint"); + } + + /** + * + * + *
    +     * Default value.
    +     *
    +     * `LOCK_HINT_UNSPECIFIED` is equivalent to `LOCK_HINT_SHARED`.
    +     * 
    + * + * LOCK_HINT_UNSPECIFIED = 0; + */ + public static final int LOCK_HINT_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * Acquire shared locks.
    +     *
    +     * By default when you perform a read as part of a read-write transaction,
    +     * Spanner acquires shared read locks, which allows other reads to still
    +     * access the data until your transaction is ready to commit. When your
    +     * transaction is committing and writes are being applied, the transaction
    +     * attempts to upgrade to an exclusive lock for any data you are writing.
    +     * For more information about locks, see [Lock
    +     * modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
    +     * 
    + * + * LOCK_HINT_SHARED = 1; + */ + public static final int LOCK_HINT_SHARED_VALUE = 1; + + /** + * + * + *
    +     * Acquire exclusive locks.
    +     *
    +     * Requesting exclusive locks is beneficial if you observe high write
    +     * contention, which means you notice that multiple transactions are
    +     * concurrently trying to read and write to the same data, resulting in a
    +     * large number of aborts. This problem occurs when two transactions
    +     * initially acquire shared locks and then both try to upgrade to exclusive
    +     * locks at the same time. In this situation both transactions are waiting
    +     * for the other to give up their lock, resulting in a deadlocked situation.
    +     * Spanner is able to detect this occurring and force one of the
    +     * transactions to abort. However, this is a slow and expensive operation
    +     * and results in lower performance. In this case it makes sense to acquire
    +     * exclusive locks at the start of the transaction because then when
    +     * multiple transactions try to act on the same data, they automatically get
    +     * serialized. Each transaction waits its turn to acquire the lock and
    +     * avoids getting into deadlock situations.
    +     *
    +     * Because the exclusive lock hint is just a hint, it shouldn't be
    +     * considered equivalent to a mutex. In other words, you shouldn't use
    +     * Spanner exclusive locks as a mutual exclusion mechanism for the execution
    +     * of code outside of Spanner.
    +     *
    +     * **Note:** Request exclusive locks judiciously because they block others
    +     * from reading that data for the entire transaction, rather than just when
    +     * the writes are being performed. Unless you observe high write contention,
    +     * you should use the default of shared read locks so you don't prematurely
    +     * block other clients from reading the data that you're writing to.
    +     * 
    + * + * LOCK_HINT_EXCLUSIVE = 2; + */ + public static final int LOCK_HINT_EXCLUSIVE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static LockHint valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static LockHint forNumber(int value) { + switch (value) { + case 0: + return LOCK_HINT_UNSPECIFIED; + case 1: + return LOCK_HINT_SHARED; + case 2: + return LOCK_HINT_EXCLUSIVE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public LockHint findValueByNumber(int number) { + return LockHint.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ReadRequest.getDescriptor().getEnumTypes().get(1); + } + + private static final LockHint[] VALUES = values(); + + public static LockHint valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private LockHint(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ReadRequest.LockHint) + } + + private int bitField0_; + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the read should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the read should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.TransactionSelector transaction_; + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + + public static final int TABLE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object table_ = ""; + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + @java.lang.Override + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int INDEX_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object index_ = ""; + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +   * the table primary key when interpreting
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +   * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +   * information.
    +   * 
    + * + * string index = 4; + * + * @return The index. + */ + @java.lang.Override + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } + } + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +   * the table primary key when interpreting
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +   * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +   * information.
    +   * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int COLUMNS_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + return columns_; + } + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + public static final int KEY_SET_FIELD_NUMBER = 6; + private com.google.spanner.v1.KeySet keySet_; + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + @java.lang.Override + public boolean hasKeySet() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + @java.lang.Override + public com.google.spanner.v1.KeySet getKeySet() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + @java.lang.Override + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + + public static final int LIMIT_FIELD_NUMBER = 8; + private long limit_ = 0L; + + /** + * + * + *
    +   * If greater than zero, only the first `limit` rows are yielded. If `limit`
    +   * is zero, the default is no limit. A limit can't be specified if
    +   * `partition_token` is set.
    +   * 
    + * + * int64 limit = 8; + * + * @return The limit. + */ + @java.lang.Override + public long getLimit() { + return limit_; + } + + public static final int RESUME_TOKEN_FIELD_NUMBER = 9; + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * If this request is resuming a previously interrupted read,
    +   * `resume_token` should be copied from the last
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +   * interruption. Doing this enables the new read to resume where the last read
    +   * left off. The rest of the request parameters must exactly match the request
    +   * that yielded this token.
    +   * 
    + * + * bytes resume_token = 9; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + public static final int PARTITION_TOKEN_FIELD_NUMBER = 10; + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * If present, results are restricted to the specified partition
    +   * previously created using `PartitionRead`. There must be an exact
    +   * match for the values of fields common to this message and the
    +   * PartitionReadRequest message used to create this partition_token.
    +   * 
    + * + * bytes partition_token = 10; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + public static final int REQUEST_OPTIONS_FIELD_NUMBER = 11; + private com.google.spanner.v1.RequestOptions requestOptions_; + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + @java.lang.Override + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions getRequestOptions() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + + public static final int DIRECTED_READ_OPTIONS_FIELD_NUMBER = 14; + private com.google.spanner.v1.DirectedReadOptions directedReadOptions_; + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return Whether the directedReadOptions field is set. + */ + @java.lang.Override + public boolean hasDirectedReadOptions() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return The directedReadOptions. + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions() { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + @java.lang.Override + public com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder() { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + + public static final int DATA_BOOST_ENABLED_FIELD_NUMBER = 15; + private boolean dataBoostEnabled_ = false; + + /** + * + * + *
    +   * If this is for a partitioned read and this field is set to `true`, the
    +   * request is executed with Spanner Data Boost independent compute resources.
    +   *
    +   * If the field is set to `true` but the request doesn't set
    +   * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool data_boost_enabled = 15; + * + * @return The dataBoostEnabled. + */ + @java.lang.Override + public boolean getDataBoostEnabled() { + return dataBoostEnabled_; + } + + public static final int ORDER_BY_FIELD_NUMBER = 16; + private int orderBy_ = 0; + + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner returns result rows in primary key order except for
    +   * PartitionRead requests. For applications that don't require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (for example, bulk point
    +   * lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + @java.lang.Override + public int getOrderByValue() { + return orderBy_; + } + + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner returns result rows in primary key order except for
    +   * PartitionRead requests. For applications that don't require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (for example, bulk point
    +   * lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.OrderBy getOrderBy() { + com.google.spanner.v1.ReadRequest.OrderBy result = + com.google.spanner.v1.ReadRequest.OrderBy.forNumber(orderBy_); + return result == null ? com.google.spanner.v1.ReadRequest.OrderBy.UNRECOGNIZED : result; + } + + public static final int LOCK_HINT_FIELD_NUMBER = 17; + private int lockHint_ = 0; + + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + @java.lang.Override + public int getLockHintValue() { + return lockHint_; + } + + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.LockHint getLockHint() { + com.google.spanner.v1.ReadRequest.LockHint result = + com.google.spanner.v1.ReadRequest.LockHint.forNumber(lockHint_); + return result == null ? com.google.spanner.v1.ReadRequest.LockHint.UNRECOGNIZED : result; + } + + public static final int ROUTING_HINT_FIELD_NUMBER = 18; + private com.google.spanner.v1.RoutingHint routingHint_; + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + @java.lang.Override + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint getRoutingHint() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 4, index_); + } + for (int i = 0; i < columns_.size(); i++) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, columns_.getRaw(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(6, getKeySet()); + } + if (limit_ != 0L) { + output.writeInt64(8, limit_); + } + if (!resumeToken_.isEmpty()) { + output.writeBytes(9, resumeToken_); + } + if (!partitionToken_.isEmpty()) { + output.writeBytes(10, partitionToken_); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(11, getRequestOptions()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(14, getDirectedReadOptions()); + } + if (dataBoostEnabled_ != false) { + output.writeBool(15, dataBoostEnabled_); + } + if (orderBy_ != com.google.spanner.v1.ReadRequest.OrderBy.ORDER_BY_UNSPECIFIED.getNumber()) { + output.writeEnum(16, orderBy_); + } + if (lockHint_ != com.google.spanner.v1.ReadRequest.LockHint.LOCK_HINT_UNSPECIFIED.getNumber()) { + output.writeEnum(17, lockHint_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(18, getRoutingHint()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(table_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, table_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(index_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(4, index_); + } + { + int dataSize = 0; + for (int i = 0; i < columns_.size(); i++) { + dataSize += computeStringSizeNoTag(columns_.getRaw(i)); + } + size += dataSize; + size += 1 * getColumnsList().size(); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getKeySet()); + } + if (limit_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(8, limit_); + } + if (!resumeToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(9, resumeToken_); + } + if (!partitionToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(10, partitionToken_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getRequestOptions()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(14, getDirectedReadOptions()); + } + if (dataBoostEnabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, dataBoostEnabled_); + } + if (orderBy_ != com.google.spanner.v1.ReadRequest.OrderBy.ORDER_BY_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(16, orderBy_); + } + if (lockHint_ != com.google.spanner.v1.ReadRequest.LockHint.LOCK_HINT_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(17, lockHint_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getRoutingHint()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ReadRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.ReadRequest other = (com.google.spanner.v1.ReadRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (!getTable().equals(other.getTable())) return false; + if (!getIndex().equals(other.getIndex())) return false; + if (!getColumnsList().equals(other.getColumnsList())) return false; + if (hasKeySet() != other.hasKeySet()) return false; + if (hasKeySet()) { + if (!getKeySet().equals(other.getKeySet())) return false; + } + if (getLimit() != other.getLimit()) return false; + if (!getResumeToken().equals(other.getResumeToken())) return false; + if (!getPartitionToken().equals(other.getPartitionToken())) return false; + if (hasRequestOptions() != other.hasRequestOptions()) return false; + if (hasRequestOptions()) { + if (!getRequestOptions().equals(other.getRequestOptions())) return false; + } + if (hasDirectedReadOptions() != other.hasDirectedReadOptions()) return false; + if (hasDirectedReadOptions()) { + if (!getDirectedReadOptions().equals(other.getDirectedReadOptions())) return false; + } + if (getDataBoostEnabled() != other.getDataBoostEnabled()) return false; + if (orderBy_ != other.orderBy_) return false; + if (lockHint_ != other.lockHint_) return false; + if (hasRoutingHint() != other.hasRoutingHint()) return false; + if (hasRoutingHint()) { + if (!getRoutingHint().equals(other.getRoutingHint())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + getIndex().hashCode(); + if (getColumnsCount() > 0) { + hash = (37 * hash) + COLUMNS_FIELD_NUMBER; + hash = (53 * hash) + getColumnsList().hashCode(); + } + if (hasKeySet()) { + hash = (37 * hash) + KEY_SET_FIELD_NUMBER; + hash = (53 * hash) + getKeySet().hashCode(); + } + hash = (37 * hash) + LIMIT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getLimit()); + hash = (37 * hash) + RESUME_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getResumeToken().hashCode(); + hash = (37 * hash) + PARTITION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPartitionToken().hashCode(); + if (hasRequestOptions()) { + hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getRequestOptions().hashCode(); + } + if (hasDirectedReadOptions()) { + hash = (37 * hash) + DIRECTED_READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getDirectedReadOptions().hashCode(); + } + hash = (37 * hash) + DATA_BOOST_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDataBoostEnabled()); + hash = (37 * hash) + ORDER_BY_FIELD_NUMBER; + hash = (53 * hash) + orderBy_; + hash = (37 * hash) + LOCK_HINT_FIELD_NUMBER; + hash = (53 * hash) + lockHint_; + if (hasRoutingHint()) { + hash = (37 * hash) + ROUTING_HINT_FIELD_NUMBER; + hash = (53 * hash) + getRoutingHint().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ReadRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ReadRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ReadRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ReadRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ReadRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ReadRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ReadRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ReadRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [Read][google.spanner.v1.Spanner.Read] and
    +   * [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ReadRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ReadRequest) + com.google.spanner.v1.ReadRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ReadRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ReadRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ReadRequest.class, + com.google.spanner.v1.ReadRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.ReadRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTransactionFieldBuilder(); + internalGetKeySetFieldBuilder(); + internalGetRequestOptionsFieldBuilder(); + internalGetDirectedReadOptionsFieldBuilder(); + internalGetRoutingHintFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + table_ = ""; + index_ = ""; + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + limit_ = 0L; + resumeToken_ = com.google.protobuf.ByteString.EMPTY; + partitionToken_ = com.google.protobuf.ByteString.EMPTY; + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + directedReadOptions_ = null; + if (directedReadOptionsBuilder_ != null) { + directedReadOptionsBuilder_.dispose(); + directedReadOptionsBuilder_ = null; + } + dataBoostEnabled_ = false; + orderBy_ = 0; + lockHint_ = 0; + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_ReadRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ReadRequest getDefaultInstanceForType() { + return com.google.spanner.v1.ReadRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ReadRequest build() { + com.google.spanner.v1.ReadRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ReadRequest buildPartial() { + com.google.spanner.v1.ReadRequest result = new com.google.spanner.v1.ReadRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ReadRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.table_ = table_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.index_ = index_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + columns_.makeImmutable(); + result.columns_ = columns_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.keySet_ = keySetBuilder_ == null ? keySet_ : keySetBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.limit_ = limit_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.resumeToken_ = resumeToken_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.partitionToken_ = partitionToken_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.requestOptions_ = + requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.directedReadOptions_ = + directedReadOptionsBuilder_ == null + ? directedReadOptions_ + : directedReadOptionsBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + if (((from_bitField0_ & 0x00000800) != 0)) { + result.dataBoostEnabled_ = dataBoostEnabled_; + } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.orderBy_ = orderBy_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.lockHint_ = lockHint_; + } + if (((from_bitField0_ & 0x00004000) != 0)) { + result.routingHint_ = + routingHintBuilder_ == null ? routingHint_ : routingHintBuilder_.build(); + to_bitField0_ |= 0x00000010; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ReadRequest) { + return mergeFrom((com.google.spanner.v1.ReadRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ReadRequest other) { + if (other == com.google.spanner.v1.ReadRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (!other.getIndex().isEmpty()) { + index_ = other.index_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (!other.columns_.isEmpty()) { + if (columns_.isEmpty()) { + columns_ = other.columns_; + bitField0_ |= 0x00000010; + } else { + ensureColumnsIsMutable(); + columns_.addAll(other.columns_); + } + onChanged(); + } + if (other.hasKeySet()) { + mergeKeySet(other.getKeySet()); + } + if (other.getLimit() != 0L) { + setLimit(other.getLimit()); + } + if (!other.getResumeToken().isEmpty()) { + setResumeToken(other.getResumeToken()); + } + if (!other.getPartitionToken().isEmpty()) { + setPartitionToken(other.getPartitionToken()); + } + if (other.hasRequestOptions()) { + mergeRequestOptions(other.getRequestOptions()); + } + if (other.hasDirectedReadOptions()) { + mergeDirectedReadOptions(other.getDirectedReadOptions()); + } + if (other.getDataBoostEnabled() != false) { + setDataBoostEnabled(other.getDataBoostEnabled()); + } + if (other.orderBy_ != 0) { + setOrderByValue(other.getOrderByValue()); + } + if (other.lockHint_ != 0) { + setLockHintValue(other.getLockHintValue()); + } + if (other.hasRoutingHint()) { + mergeRoutingHint(other.getRoutingHint()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + table_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + index_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureColumnsIsMutable(); + columns_.add(s); + break; + } // case 42 + case 50: + { + input.readMessage(internalGetKeySetFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 50 + case 64: + { + limit_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 64 + case 74: + { + resumeToken_ = input.readBytes(); + bitField0_ |= 0x00000080; + break; + } // case 74 + case 82: + { + partitionToken_ = input.readBytes(); + bitField0_ |= 0x00000100; + break; + } // case 82 + case 90: + { + input.readMessage( + internalGetRequestOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000200; + break; + } // case 90 + case 114: + { + input.readMessage( + internalGetDirectedReadOptionsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 114 + case 120: + { + dataBoostEnabled_ = input.readBool(); + bitField0_ |= 0x00000800; + break; + } // case 120 + case 128: + { + orderBy_ = input.readEnum(); + bitField0_ |= 0x00001000; + break; + } // case 128 + case 136: + { + lockHint_ = input.readEnum(); + bitField0_ |= 0x00002000; + break; + } // case 136 + case 146: + { + input.readMessage( + internalGetRoutingHintFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00004000; + break; + } // case 146 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the read should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the read should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the read should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the read should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the read should be performed.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.TransactionSelector transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.TransactionSelector getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder setTransaction( + com.google.spanner.v1.TransactionSelector.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.TransactionSelector value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.TransactionSelector.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelector.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + public com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.TransactionSelector.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * The transaction to use. If none is provided, the default is a
    +     * temporary read-only transaction with strong concurrency.
    +     * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionSelector, + com.google.spanner.v1.TransactionSelector.Builder, + com.google.spanner.v1.TransactionSelectorOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private java.lang.Object table_ = ""; + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + table_ = getDefaultInstance().getTable(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The name of the table in the database to be read.
    +     * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + table_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private java.lang.Object index_ = ""; + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +     * the table primary key when interpreting
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +     * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +     * information.
    +     * 
    + * + * string index = 4; + * + * @return The index. + */ + public java.lang.String getIndex() { + java.lang.Object ref = index_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + index_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +     * the table primary key when interpreting
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +     * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +     * information.
    +     * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + public com.google.protobuf.ByteString getIndexBytes() { + java.lang.Object ref = index_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + index_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +     * the table primary key when interpreting
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +     * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +     * information.
    +     * 
    + * + * string index = 4; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +     * the table primary key when interpreting
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +     * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +     * information.
    +     * 
    + * + * string index = 4; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + index_ = getDefaultInstance().getIndex(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If non-empty, the name of an index on
    +     * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +     * the table primary key when interpreting
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +     * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +     * information.
    +     * 
    + * + * string index = 4; + * + * @param value The bytes for index to set. + * @return This builder for chaining. + */ + public Builder setIndexBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + index_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList columns_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureColumnsIsMutable() { + if (!columns_.isModifiable()) { + columns_ = new com.google.protobuf.LazyStringArrayList(columns_); + } + bitField0_ |= 0x00000010; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the columns. + */ + public com.google.protobuf.ProtocolStringList getColumnsList() { + columns_.makeImmutable(); + return columns_; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of columns. + */ + public int getColumnsCount() { + return columns_.size(); + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + public java.lang.String getColumns(int index) { + return columns_.get(index); + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + public com.google.protobuf.ByteString getColumnsBytes(int index) { + return columns_.getByteString(index); + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The columns to set. + * @return This builder for chaining. + */ + public Builder setColumns(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.set(index, value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The columns to add. + * @return This builder for chaining. + */ + public Builder addColumns(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The columns to add. + * @return This builder for chaining. + */ + public Builder addAllColumns(java.lang.Iterable values) { + ensureColumnsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, columns_); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearColumns() { + columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + ; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +     * returned for each row matching this request.
    +     * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the columns to add. + * @return This builder for chaining. + */ + public Builder addColumnsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureColumnsIsMutable(); + columns_.add(value); + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private com.google.spanner.v1.KeySet keySet_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + keySetBuilder_; + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + public boolean hasKeySet() { + return ((bitField0_ & 0x00000020) != 0); + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + public com.google.spanner.v1.KeySet getKeySet() { + if (keySetBuilder_ == null) { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } else { + return keySetBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keySet_ = value; + } else { + keySetBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder setKeySet(com.google.spanner.v1.KeySet.Builder builderForValue) { + if (keySetBuilder_ == null) { + keySet_ = builderForValue.build(); + } else { + keySetBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder mergeKeySet(com.google.spanner.v1.KeySet value) { + if (keySetBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && keySet_ != null + && keySet_ != com.google.spanner.v1.KeySet.getDefaultInstance()) { + getKeySetBuilder().mergeFrom(value); + } else { + keySet_ = value; + } + } else { + keySetBuilder_.mergeFrom(value); + } + if (keySet_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public Builder clearKeySet() { + bitField0_ = (bitField0_ & ~0x00000020); + keySet_ = null; + if (keySetBuilder_ != null) { + keySetBuilder_.dispose(); + keySetBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.spanner.v1.KeySet.Builder getKeySetBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return internalGetKeySetFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + public com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder() { + if (keySetBuilder_ != null) { + return keySetBuilder_.getMessageOrBuilder(); + } else { + return keySet_ == null ? com.google.spanner.v1.KeySet.getDefaultInstance() : keySet_; + } + } + + /** + * + * + *
    +     * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +     * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +     * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +     * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +     * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +     * in [index][google.spanner.v1.ReadRequest.index].
    +     *
    +     * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +     * field is empty, rows are yielded in table primary key order (if
    +     * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +     * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +     * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +     * isn't empty, rows are yielded in an unspecified order.
    +     *
    +     * It isn't an error for the `key_set` to name rows that don't
    +     * exist in the database. Read yields nothing for nonexistent rows.
    +     * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder> + internalGetKeySetFieldBuilder() { + if (keySetBuilder_ == null) { + keySetBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.KeySet, + com.google.spanner.v1.KeySet.Builder, + com.google.spanner.v1.KeySetOrBuilder>( + getKeySet(), getParentForChildren(), isClean()); + keySet_ = null; + } + return keySetBuilder_; + } + + private long limit_; + + /** + * + * + *
    +     * If greater than zero, only the first `limit` rows are yielded. If `limit`
    +     * is zero, the default is no limit. A limit can't be specified if
    +     * `partition_token` is set.
    +     * 
    + * + * int64 limit = 8; + * + * @return The limit. + */ + @java.lang.Override + public long getLimit() { + return limit_; + } + + /** + * + * + *
    +     * If greater than zero, only the first `limit` rows are yielded. If `limit`
    +     * is zero, the default is no limit. A limit can't be specified if
    +     * `partition_token` is set.
    +     * 
    + * + * int64 limit = 8; + * + * @param value The limit to set. + * @return This builder for chaining. + */ + public Builder setLimit(long value) { + + limit_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If greater than zero, only the first `limit` rows are yielded. If `limit`
    +     * is zero, the default is no limit. A limit can't be specified if
    +     * `partition_token` is set.
    +     * 
    + * + * int64 limit = 8; + * + * @return This builder for chaining. + */ + public Builder clearLimit() { + bitField0_ = (bitField0_ & ~0x00000040); + limit_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * If this request is resuming a previously interrupted read,
    +     * `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new read to resume where the last read
    +     * left off. The rest of the request parameters must exactly match the request
    +     * that yielded this token.
    +     * 
    + * + * bytes resume_token = 9; + * + * @return The resumeToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getResumeToken() { + return resumeToken_; + } + + /** + * + * + *
    +     * If this request is resuming a previously interrupted read,
    +     * `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new read to resume where the last read
    +     * left off. The rest of the request parameters must exactly match the request
    +     * that yielded this token.
    +     * 
    + * + * bytes resume_token = 9; + * + * @param value The resumeToken to set. + * @return This builder for chaining. + */ + public Builder setResumeToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + resumeToken_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If this request is resuming a previously interrupted read,
    +     * `resume_token` should be copied from the last
    +     * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +     * interruption. Doing this enables the new read to resume where the last read
    +     * left off. The rest of the request parameters must exactly match the request
    +     * that yielded this token.
    +     * 
    + * + * bytes resume_token = 9; + * + * @return This builder for chaining. + */ + public Builder clearResumeToken() { + bitField0_ = (bitField0_ & ~0x00000080); + resumeToken_ = getDefaultInstance().getResumeToken(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString partitionToken_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionRead`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * PartitionReadRequest message used to create this partition_token.
    +     * 
    + * + * bytes partition_token = 10; + * + * @return The partitionToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPartitionToken() { + return partitionToken_; + } + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionRead`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * PartitionReadRequest message used to create this partition_token.
    +     * 
    + * + * bytes partition_token = 10; + * + * @param value The partitionToken to set. + * @return This builder for chaining. + */ + public Builder setPartitionToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + partitionToken_ = value; + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If present, results are restricted to the specified partition
    +     * previously created using `PartitionRead`. There must be an exact
    +     * match for the values of fields common to this message and the
    +     * PartitionReadRequest message used to create this partition_token.
    +     * 
    + * + * bytes partition_token = 10; + * + * @return This builder for chaining. + */ + public Builder clearPartitionToken() { + bitField0_ = (bitField0_ & ~0x00000100); + partitionToken_ = getDefaultInstance().getPartitionToken(); + onChanged(); + return this; + } + + private com.google.spanner.v1.RequestOptions requestOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + requestOptionsBuilder_; + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + public boolean hasRequestOptions() { + return ((bitField0_ & 0x00000200) != 0); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + public com.google.spanner.v1.RequestOptions getRequestOptions() { + if (requestOptionsBuilder_ == null) { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } else { + return requestOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + requestOptions_ = value; + } else { + requestOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder setRequestOptions(com.google.spanner.v1.RequestOptions.Builder builderForValue) { + if (requestOptionsBuilder_ == null) { + requestOptions_ = builderForValue.build(); + } else { + requestOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder mergeRequestOptions(com.google.spanner.v1.RequestOptions value) { + if (requestOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000200) != 0) + && requestOptions_ != null + && requestOptions_ != com.google.spanner.v1.RequestOptions.getDefaultInstance()) { + getRequestOptionsBuilder().mergeFrom(value); + } else { + requestOptions_ = value; + } + } else { + requestOptionsBuilder_.mergeFrom(value); + } + if (requestOptions_ != null) { + bitField0_ |= 0x00000200; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public Builder clearRequestOptions() { + bitField0_ = (bitField0_ & ~0x00000200); + requestOptions_ = null; + if (requestOptionsBuilder_ != null) { + requestOptionsBuilder_.dispose(); + requestOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public com.google.spanner.v1.RequestOptions.Builder getRequestOptionsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return internalGetRequestOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder() { + if (requestOptionsBuilder_ != null) { + return requestOptionsBuilder_.getMessageOrBuilder(); + } else { + return requestOptions_ == null + ? com.google.spanner.v1.RequestOptions.getDefaultInstance() + : requestOptions_; + } + } + + /** + * + * + *
    +     * Common options for this request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder> + internalGetRequestOptionsFieldBuilder() { + if (requestOptionsBuilder_ == null) { + requestOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions, + com.google.spanner.v1.RequestOptions.Builder, + com.google.spanner.v1.RequestOptionsOrBuilder>( + getRequestOptions(), getParentForChildren(), isClean()); + requestOptions_ = null; + } + return requestOptionsBuilder_; + } + + private com.google.spanner.v1.DirectedReadOptions directedReadOptions_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder> + directedReadOptionsBuilder_; + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return Whether the directedReadOptions field is set. + */ + public boolean hasDirectedReadOptions() { + return ((bitField0_ & 0x00000400) != 0); + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return The directedReadOptions. + */ + public com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions() { + if (directedReadOptionsBuilder_ == null) { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } else { + return directedReadOptionsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public Builder setDirectedReadOptions(com.google.spanner.v1.DirectedReadOptions value) { + if (directedReadOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + directedReadOptions_ = value; + } else { + directedReadOptionsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public Builder setDirectedReadOptions( + com.google.spanner.v1.DirectedReadOptions.Builder builderForValue) { + if (directedReadOptionsBuilder_ == null) { + directedReadOptions_ = builderForValue.build(); + } else { + directedReadOptionsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public Builder mergeDirectedReadOptions(com.google.spanner.v1.DirectedReadOptions value) { + if (directedReadOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && directedReadOptions_ != null + && directedReadOptions_ + != com.google.spanner.v1.DirectedReadOptions.getDefaultInstance()) { + getDirectedReadOptionsBuilder().mergeFrom(value); + } else { + directedReadOptions_ = value; + } + } else { + directedReadOptionsBuilder_.mergeFrom(value); + } + if (directedReadOptions_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public Builder clearDirectedReadOptions() { + bitField0_ = (bitField0_ & ~0x00000400); + directedReadOptions_ = null; + if (directedReadOptionsBuilder_ != null) { + directedReadOptionsBuilder_.dispose(); + directedReadOptionsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public com.google.spanner.v1.DirectedReadOptions.Builder getDirectedReadOptionsBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return internalGetDirectedReadOptionsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + public com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder() { + if (directedReadOptionsBuilder_ != null) { + return directedReadOptionsBuilder_.getMessageOrBuilder(); + } else { + return directedReadOptions_ == null + ? com.google.spanner.v1.DirectedReadOptions.getDefaultInstance() + : directedReadOptions_; + } + } + + /** + * + * + *
    +     * Directed read options for this request.
    +     * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder> + internalGetDirectedReadOptionsFieldBuilder() { + if (directedReadOptionsBuilder_ == null) { + directedReadOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.DirectedReadOptions, + com.google.spanner.v1.DirectedReadOptions.Builder, + com.google.spanner.v1.DirectedReadOptionsOrBuilder>( + getDirectedReadOptions(), getParentForChildren(), isClean()); + directedReadOptions_ = null; + } + return directedReadOptionsBuilder_; + } + + private boolean dataBoostEnabled_; + + /** + * + * + *
    +     * If this is for a partitioned read and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 15; + * + * @return The dataBoostEnabled. + */ + @java.lang.Override + public boolean getDataBoostEnabled() { + return dataBoostEnabled_; + } + + /** + * + * + *
    +     * If this is for a partitioned read and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 15; + * + * @param value The dataBoostEnabled to set. + * @return This builder for chaining. + */ + public Builder setDataBoostEnabled(boolean value) { + + dataBoostEnabled_ = value; + bitField0_ |= 0x00000800; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If this is for a partitioned read and this field is set to `true`, the
    +     * request is executed with Spanner Data Boost independent compute resources.
    +     *
    +     * If the field is set to `true` but the request doesn't set
    +     * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool data_boost_enabled = 15; + * + * @return This builder for chaining. + */ + public Builder clearDataBoostEnabled() { + bitField0_ = (bitField0_ & ~0x00000800); + dataBoostEnabled_ = false; + onChanged(); + return this; + } + + private int orderBy_ = 0; + + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner returns result rows in primary key order except for
    +     * PartitionRead requests. For applications that don't require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (for example, bulk point
    +     * lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + @java.lang.Override + public int getOrderByValue() { + return orderBy_; + } + + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner returns result rows in primary key order except for
    +     * PartitionRead requests. For applications that don't require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (for example, bulk point
    +     * lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderByValue(int value) { + orderBy_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner returns result rows in primary key order except for
    +     * PartitionRead requests. For applications that don't require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (for example, bulk point
    +     * lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.OrderBy getOrderBy() { + com.google.spanner.v1.ReadRequest.OrderBy result = + com.google.spanner.v1.ReadRequest.OrderBy.forNumber(orderBy_); + return result == null ? com.google.spanner.v1.ReadRequest.OrderBy.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner returns result rows in primary key order except for
    +     * PartitionRead requests. For applications that don't require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (for example, bulk point
    +     * lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderBy(com.google.spanner.v1.ReadRequest.OrderBy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + orderBy_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner returns result rows in primary key order except for
    +     * PartitionRead requests. For applications that don't require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (for example, bulk point
    +     * lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearOrderBy() { + bitField0_ = (bitField0_ & ~0x00001000); + orderBy_ = 0; + onChanged(); + return this; + } + + private int lockHint_ = 0; + + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + @java.lang.Override + public int getLockHintValue() { + return lockHint_; + } + + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for lockHint to set. + * @return This builder for chaining. + */ + public Builder setLockHintValue(int value) { + lockHint_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.LockHint getLockHint() { + com.google.spanner.v1.ReadRequest.LockHint result = + com.google.spanner.v1.ReadRequest.LockHint.forNumber(lockHint_); + return result == null ? com.google.spanner.v1.ReadRequest.LockHint.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The lockHint to set. + * @return This builder for chaining. + */ + public Builder setLockHint(com.google.spanner.v1.ReadRequest.LockHint value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00002000; + lockHint_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearLockHint() { + bitField0_ = (bitField0_ & ~0x00002000); + lockHint_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.v1.RoutingHint routingHint_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + routingHintBuilder_; + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + public boolean hasRoutingHint() { + return ((bitField0_ & 0x00004000) != 0); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + public com.google.spanner.v1.RoutingHint getRoutingHint() { + if (routingHintBuilder_ == null) { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } else { + return routingHintBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + routingHint_ = value; + } else { + routingHintBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRoutingHint(com.google.spanner.v1.RoutingHint.Builder builderForValue) { + if (routingHintBuilder_ == null) { + routingHint_ = builderForValue.build(); + } else { + routingHintBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRoutingHint(com.google.spanner.v1.RoutingHint value) { + if (routingHintBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && routingHint_ != null + && routingHint_ != com.google.spanner.v1.RoutingHint.getDefaultInstance()) { + getRoutingHintBuilder().mergeFrom(value); + } else { + routingHint_ = value; + } + } else { + routingHintBuilder_.mergeFrom(value); + } + if (routingHint_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRoutingHint() { + bitField0_ = (bitField0_ & ~0x00004000); + routingHint_ = null; + if (routingHintBuilder_ != null) { + routingHintBuilder_.dispose(); + routingHintBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHint.Builder getRoutingHintBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return internalGetRoutingHintFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder() { + if (routingHintBuilder_ != null) { + return routingHintBuilder_.getMessageOrBuilder(); + } else { + return routingHint_ == null + ? com.google.spanner.v1.RoutingHint.getDefaultInstance() + : routingHint_; + } + } + + /** + * + * + *
    +     * Optional. Makes the Spanner requests location-aware if present.
    +     *
    +     * It gives the server hints that can be used to route the request
    +     * to an appropriate server, potentially significantly decreasing latency and
    +     * improving throughput. To achieve improved performance, most fields must be
    +     * filled in with accurate values.
    +     * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder> + internalGetRoutingHintFieldBuilder() { + if (routingHintBuilder_ == null) { + routingHintBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RoutingHint, + com.google.spanner.v1.RoutingHint.Builder, + com.google.spanner.v1.RoutingHintOrBuilder>( + getRoutingHint(), getParentForChildren(), isClean()); + routingHint_ = null; + } + return routingHintBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ReadRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ReadRequest) + private static final com.google.spanner.v1.ReadRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ReadRequest(); + } + + public static com.google.spanner.v1.ReadRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ReadRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java new file mode 100644 index 000000000000..812bf0956b6f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java @@ -0,0 +1,574 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ReadRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ReadRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the read should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the read should be performed.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.TransactionSelector getTransaction(); + + /** + * + * + *
    +   * The transaction to use. If none is provided, the default is a
    +   * temporary read-only transaction with strong concurrency.
    +   * 
    + * + * .google.spanner.v1.TransactionSelector transaction = 2; + */ + com.google.spanner.v1.TransactionSelectorOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The table. + */ + java.lang.String getTable(); + + /** + * + * + *
    +   * Required. The name of the table in the database to be read.
    +   * 
    + * + * string table = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +   * the table primary key when interpreting
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +   * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +   * information.
    +   * 
    + * + * string index = 4; + * + * @return The index. + */ + java.lang.String getIndex(); + + /** + * + * + *
    +   * If non-empty, the name of an index on
    +   * [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
    +   * the table primary key when interpreting
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
    +   * See [key_set][google.spanner.v1.ReadRequest.key_set] for further
    +   * information.
    +   * 
    + * + * string index = 4; + * + * @return The bytes for index. + */ + com.google.protobuf.ByteString getIndexBytes(); + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the columns. + */ + java.util.List getColumnsList(); + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of columns. + */ + int getColumnsCount(); + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The columns at the given index. + */ + java.lang.String getColumns(int index); + + /** + * + * + *
    +   * Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
    +   * returned for each row matching this request.
    +   * 
    + * + * repeated string columns = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the columns at the given index. + */ + com.google.protobuf.ByteString getColumnsBytes(int index); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return Whether the keySet field is set. + */ + boolean hasKeySet(); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The keySet. + */ + com.google.spanner.v1.KeySet getKeySet(); + + /** + * + * + *
    +   * Required. `key_set` identifies the rows to be yielded. `key_set` names the
    +   * primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
    +   * be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
    +   * If [index][google.spanner.v1.ReadRequest.index] is present, then
    +   * [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
    +   * in [index][google.spanner.v1.ReadRequest.index].
    +   *
    +   * If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
    +   * field is empty, rows are yielded in table primary key order (if
    +   * [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
    +   * (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
    +   * [partition_token][google.spanner.v1.ReadRequest.partition_token] field
    +   * isn't empty, rows are yielded in an unspecified order.
    +   *
    +   * It isn't an error for the `key_set` to name rows that don't
    +   * exist in the database. Read yields nothing for nonexistent rows.
    +   * 
    + * + * .google.spanner.v1.KeySet key_set = 6 [(.google.api.field_behavior) = REQUIRED]; + */ + com.google.spanner.v1.KeySetOrBuilder getKeySetOrBuilder(); + + /** + * + * + *
    +   * If greater than zero, only the first `limit` rows are yielded. If `limit`
    +   * is zero, the default is no limit. A limit can't be specified if
    +   * `partition_token` is set.
    +   * 
    + * + * int64 limit = 8; + * + * @return The limit. + */ + long getLimit(); + + /** + * + * + *
    +   * If this request is resuming a previously interrupted read,
    +   * `resume_token` should be copied from the last
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
    +   * interruption. Doing this enables the new read to resume where the last read
    +   * left off. The rest of the request parameters must exactly match the request
    +   * that yielded this token.
    +   * 
    + * + * bytes resume_token = 9; + * + * @return The resumeToken. + */ + com.google.protobuf.ByteString getResumeToken(); + + /** + * + * + *
    +   * If present, results are restricted to the specified partition
    +   * previously created using `PartitionRead`. There must be an exact
    +   * match for the values of fields common to this message and the
    +   * PartitionReadRequest message used to create this partition_token.
    +   * 
    + * + * bytes partition_token = 10; + * + * @return The partitionToken. + */ + com.google.protobuf.ByteString getPartitionToken(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return Whether the requestOptions field is set. + */ + boolean hasRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + * + * @return The requestOptions. + */ + com.google.spanner.v1.RequestOptions getRequestOptions(); + + /** + * + * + *
    +   * Common options for this request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions request_options = 11; + */ + com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return Whether the directedReadOptions field is set. + */ + boolean hasDirectedReadOptions(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + * + * @return The directedReadOptions. + */ + com.google.spanner.v1.DirectedReadOptions getDirectedReadOptions(); + + /** + * + * + *
    +   * Directed read options for this request.
    +   * 
    + * + * .google.spanner.v1.DirectedReadOptions directed_read_options = 14; + */ + com.google.spanner.v1.DirectedReadOptionsOrBuilder getDirectedReadOptionsOrBuilder(); + + /** + * + * + *
    +   * If this is for a partitioned read and this field is set to `true`, the
    +   * request is executed with Spanner Data Boost independent compute resources.
    +   *
    +   * If the field is set to `true` but the request doesn't set
    +   * `partition_token`, the API returns an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool data_boost_enabled = 15; + * + * @return The dataBoostEnabled. + */ + boolean getDataBoostEnabled(); + + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner returns result rows in primary key order except for
    +   * PartitionRead requests. For applications that don't require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (for example, bulk point
    +   * lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + int getOrderByValue(); + + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner returns result rows in primary key order except for
    +   * PartitionRead requests. For applications that don't require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (for example, bulk point
    +   * lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + com.google.spanner.v1.ReadRequest.OrderBy getOrderBy(); + + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + int getLockHintValue(); + + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + com.google.spanner.v1.ReadRequest.LockHint getLockHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the routingHint field is set. + */ + boolean hasRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The routingHint. + */ + com.google.spanner.v1.RoutingHint getRoutingHint(); + + /** + * + * + *
    +   * Optional. Makes the Spanner requests location-aware if present.
    +   *
    +   * It gives the server hints that can be used to route the request
    +   * to an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   * 
    + * + * + * .google.spanner.v1.RoutingHint routing_hint = 18 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.RoutingHintOrBuilder getRoutingHintOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeList.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeList.java new file mode 100644 index 000000000000..6ff0c098f9f7 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeList.java @@ -0,0 +1,1034 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `RecipeList` contains a list of `KeyRecipe`s, which share the same
    + * schema generation.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.RecipeList} + */ +@com.google.protobuf.Generated +public final class RecipeList extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RecipeList) + RecipeListOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RecipeList"); + } + + // Use RecipeList.newBuilder() to construct. + private RecipeList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RecipeList() { + schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + recipe_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RecipeList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RecipeList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RecipeList.class, com.google.spanner.v1.RecipeList.Builder.class); + } + + public static final int SCHEMA_GENERATION_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The schema generation of the recipes. To be sent to the server in
    +   * `RoutingHint.schema_generation` whenever one of the recipes is used.
    +   * `schema_generation` values are comparable with each other; if generation A
    +   * compares greater than generation B, then A is a more recent schema than B.
    +   * Clients should in general aim to cache only the latest schema generation,
    +   * and discard more stale recipes.
    +   * 
    + * + * bytes schema_generation = 1; + * + * @return The schemaGeneration. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaGeneration() { + return schemaGeneration_; + } + + public static final int RECIPE_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List recipe_; + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + @java.lang.Override + public java.util.List getRecipeList() { + return recipe_; + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + @java.lang.Override + public java.util.List + getRecipeOrBuilderList() { + return recipe_; + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + @java.lang.Override + public int getRecipeCount() { + return recipe_.size(); + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipe getRecipe(int index) { + return recipe_.get(index); + } + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + @java.lang.Override + public com.google.spanner.v1.KeyRecipeOrBuilder getRecipeOrBuilder(int index) { + return recipe_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!schemaGeneration_.isEmpty()) { + output.writeBytes(1, schemaGeneration_); + } + for (int i = 0; i < recipe_.size(); i++) { + output.writeMessage(3, recipe_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!schemaGeneration_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, schemaGeneration_); + } + for (int i = 0; i < recipe_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, recipe_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RecipeList)) { + return super.equals(obj); + } + com.google.spanner.v1.RecipeList other = (com.google.spanner.v1.RecipeList) obj; + + if (!getSchemaGeneration().equals(other.getSchemaGeneration())) return false; + if (!getRecipeList().equals(other.getRecipeList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SCHEMA_GENERATION_FIELD_NUMBER; + hash = (53 * hash) + getSchemaGeneration().hashCode(); + if (getRecipeCount() > 0) { + hash = (37 * hash) + RECIPE_FIELD_NUMBER; + hash = (53 * hash) + getRecipeList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RecipeList parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RecipeList parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RecipeList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RecipeList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RecipeList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RecipeList parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RecipeList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RecipeList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `RecipeList` contains a list of `KeyRecipe`s, which share the same
    +   * schema generation.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RecipeList} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RecipeList) + com.google.spanner.v1.RecipeListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RecipeList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RecipeList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RecipeList.class, + com.google.spanner.v1.RecipeList.Builder.class); + } + + // Construct using com.google.spanner.v1.RecipeList.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + if (recipeBuilder_ == null) { + recipe_ = java.util.Collections.emptyList(); + } else { + recipe_ = null; + recipeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RecipeList_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RecipeList getDefaultInstanceForType() { + return com.google.spanner.v1.RecipeList.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RecipeList build() { + com.google.spanner.v1.RecipeList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RecipeList buildPartial() { + com.google.spanner.v1.RecipeList result = new com.google.spanner.v1.RecipeList(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.RecipeList result) { + if (recipeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + recipe_ = java.util.Collections.unmodifiableList(recipe_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.recipe_ = recipe_; + } else { + result.recipe_ = recipeBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.RecipeList result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.schemaGeneration_ = schemaGeneration_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RecipeList) { + return mergeFrom((com.google.spanner.v1.RecipeList) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RecipeList other) { + if (other == com.google.spanner.v1.RecipeList.getDefaultInstance()) return this; + if (!other.getSchemaGeneration().isEmpty()) { + setSchemaGeneration(other.getSchemaGeneration()); + } + if (recipeBuilder_ == null) { + if (!other.recipe_.isEmpty()) { + if (recipe_.isEmpty()) { + recipe_ = other.recipe_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRecipeIsMutable(); + recipe_.addAll(other.recipe_); + } + onChanged(); + } + } else { + if (!other.recipe_.isEmpty()) { + if (recipeBuilder_.isEmpty()) { + recipeBuilder_.dispose(); + recipeBuilder_ = null; + recipe_ = other.recipe_; + bitField0_ = (bitField0_ & ~0x00000002); + recipeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRecipeFieldBuilder() + : null; + } else { + recipeBuilder_.addAllMessages(other.recipe_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + schemaGeneration_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + com.google.spanner.v1.KeyRecipe m = + input.readMessage(com.google.spanner.v1.KeyRecipe.parser(), extensionRegistry); + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + recipe_.add(m); + } else { + recipeBuilder_.addMessage(m); + } + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The schema generation of the recipes. To be sent to the server in
    +     * `RoutingHint.schema_generation` whenever one of the recipes is used.
    +     * `schema_generation` values are comparable with each other; if generation A
    +     * compares greater than generation B, then A is a more recent schema than B.
    +     * Clients should in general aim to cache only the latest schema generation,
    +     * and discard more stale recipes.
    +     * 
    + * + * bytes schema_generation = 1; + * + * @return The schemaGeneration. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaGeneration() { + return schemaGeneration_; + } + + /** + * + * + *
    +     * The schema generation of the recipes. To be sent to the server in
    +     * `RoutingHint.schema_generation` whenever one of the recipes is used.
    +     * `schema_generation` values are comparable with each other; if generation A
    +     * compares greater than generation B, then A is a more recent schema than B.
    +     * Clients should in general aim to cache only the latest schema generation,
    +     * and discard more stale recipes.
    +     * 
    + * + * bytes schema_generation = 1; + * + * @param value The schemaGeneration to set. + * @return This builder for chaining. + */ + public Builder setSchemaGeneration(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + schemaGeneration_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The schema generation of the recipes. To be sent to the server in
    +     * `RoutingHint.schema_generation` whenever one of the recipes is used.
    +     * `schema_generation` values are comparable with each other; if generation A
    +     * compares greater than generation B, then A is a more recent schema than B.
    +     * Clients should in general aim to cache only the latest schema generation,
    +     * and discard more stale recipes.
    +     * 
    + * + * bytes schema_generation = 1; + * + * @return This builder for chaining. + */ + public Builder clearSchemaGeneration() { + bitField0_ = (bitField0_ & ~0x00000001); + schemaGeneration_ = getDefaultInstance().getSchemaGeneration(); + onChanged(); + return this; + } + + private java.util.List recipe_ = + java.util.Collections.emptyList(); + + private void ensureRecipeIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + recipe_ = new java.util.ArrayList(recipe_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe, + com.google.spanner.v1.KeyRecipe.Builder, + com.google.spanner.v1.KeyRecipeOrBuilder> + recipeBuilder_; + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public java.util.List getRecipeList() { + if (recipeBuilder_ == null) { + return java.util.Collections.unmodifiableList(recipe_); + } else { + return recipeBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public int getRecipeCount() { + if (recipeBuilder_ == null) { + return recipe_.size(); + } else { + return recipeBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public com.google.spanner.v1.KeyRecipe getRecipe(int index) { + if (recipeBuilder_ == null) { + return recipe_.get(index); + } else { + return recipeBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder setRecipe(int index, com.google.spanner.v1.KeyRecipe value) { + if (recipeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecipeIsMutable(); + recipe_.set(index, value); + onChanged(); + } else { + recipeBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder setRecipe(int index, com.google.spanner.v1.KeyRecipe.Builder builderForValue) { + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + recipe_.set(index, builderForValue.build()); + onChanged(); + } else { + recipeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder addRecipe(com.google.spanner.v1.KeyRecipe value) { + if (recipeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecipeIsMutable(); + recipe_.add(value); + onChanged(); + } else { + recipeBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder addRecipe(int index, com.google.spanner.v1.KeyRecipe value) { + if (recipeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecipeIsMutable(); + recipe_.add(index, value); + onChanged(); + } else { + recipeBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder addRecipe(com.google.spanner.v1.KeyRecipe.Builder builderForValue) { + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + recipe_.add(builderForValue.build()); + onChanged(); + } else { + recipeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder addRecipe(int index, com.google.spanner.v1.KeyRecipe.Builder builderForValue) { + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + recipe_.add(index, builderForValue.build()); + onChanged(); + } else { + recipeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder addAllRecipe( + java.lang.Iterable values) { + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, recipe_); + onChanged(); + } else { + recipeBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder clearRecipe() { + if (recipeBuilder_ == null) { + recipe_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + recipeBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public Builder removeRecipe(int index) { + if (recipeBuilder_ == null) { + ensureRecipeIsMutable(); + recipe_.remove(index); + onChanged(); + } else { + recipeBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public com.google.spanner.v1.KeyRecipe.Builder getRecipeBuilder(int index) { + return internalGetRecipeFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public com.google.spanner.v1.KeyRecipeOrBuilder getRecipeOrBuilder(int index) { + if (recipeBuilder_ == null) { + return recipe_.get(index); + } else { + return recipeBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public java.util.List + getRecipeOrBuilderList() { + if (recipeBuilder_ != null) { + return recipeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(recipe_); + } + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public com.google.spanner.v1.KeyRecipe.Builder addRecipeBuilder() { + return internalGetRecipeFieldBuilder() + .addBuilder(com.google.spanner.v1.KeyRecipe.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public com.google.spanner.v1.KeyRecipe.Builder addRecipeBuilder(int index) { + return internalGetRecipeFieldBuilder() + .addBuilder(index, com.google.spanner.v1.KeyRecipe.getDefaultInstance()); + } + + /** + * + * + *
    +     * A list of recipes to be cached.
    +     * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + public java.util.List getRecipeBuilderList() { + return internalGetRecipeFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe, + com.google.spanner.v1.KeyRecipe.Builder, + com.google.spanner.v1.KeyRecipeOrBuilder> + internalGetRecipeFieldBuilder() { + if (recipeBuilder_ == null) { + recipeBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.KeyRecipe, + com.google.spanner.v1.KeyRecipe.Builder, + com.google.spanner.v1.KeyRecipeOrBuilder>( + recipe_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + recipe_ = null; + } + return recipeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RecipeList) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RecipeList) + private static final com.google.spanner.v1.RecipeList DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RecipeList(); + } + + public static com.google.spanner.v1.RecipeList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RecipeList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RecipeList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeListOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeListOrBuilder.java new file mode 100644 index 000000000000..d05927ae9151 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RecipeListOrBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface RecipeListOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RecipeList) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The schema generation of the recipes. To be sent to the server in
    +   * `RoutingHint.schema_generation` whenever one of the recipes is used.
    +   * `schema_generation` values are comparable with each other; if generation A
    +   * compares greater than generation B, then A is a more recent schema than B.
    +   * Clients should in general aim to cache only the latest schema generation,
    +   * and discard more stale recipes.
    +   * 
    + * + * bytes schema_generation = 1; + * + * @return The schemaGeneration. + */ + com.google.protobuf.ByteString getSchemaGeneration(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + java.util.List getRecipeList(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + com.google.spanner.v1.KeyRecipe getRecipe(int index); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + int getRecipeCount(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + java.util.List getRecipeOrBuilderList(); + + /** + * + * + *
    +   * A list of recipes to be cached.
    +   * 
    + * + * repeated .google.spanner.v1.KeyRecipe recipe = 3; + */ + com.google.spanner.v1.KeyRecipeOrBuilder getRecipeOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java new file mode 100644 index 000000000000..6a2afc8f0872 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java @@ -0,0 +1,2550 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Common request options for various APIs.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.RequestOptions} + */ +@com.google.protobuf.Generated +public final class RequestOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RequestOptions) + RequestOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RequestOptions"); + } + + // Use RequestOptions.newBuilder() to construct. + private RequestOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RequestOptions() { + priority_ = 0; + requestTag_ = ""; + transactionTag_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RequestOptions.class, + com.google.spanner.v1.RequestOptions.Builder.class); + } + + /** + * + * + *
    +   * The relative priority for requests. Note that priority isn't applicable
    +   * for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   *
    +   * The priority acts as a hint to the Cloud Spanner scheduler and doesn't
    +   * guarantee priority or order of execution. For example:
    +   *
    +   * * Some parts of a write operation always execute at `PRIORITY_HIGH`,
    +   * regardless of the specified priority. This can cause you to see an
    +   * increase in high priority workload even when executing a low priority
    +   * request. This can also potentially cause a priority inversion where a
    +   * lower priority request is fulfilled ahead of a higher priority
    +   * request.
    +   * * If a transaction contains multiple operations with different priorities,
    +   * Cloud Spanner doesn't guarantee to process the higher priority
    +   * operations first. There might be other constraints to satisfy, such as
    +   * the order of operations.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.RequestOptions.Priority} + */ + public enum Priority implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`.
    +     * 
    + * + * PRIORITY_UNSPECIFIED = 0; + */ + PRIORITY_UNSPECIFIED(0), + /** + * + * + *
    +     * This specifies that the request is low priority.
    +     * 
    + * + * PRIORITY_LOW = 1; + */ + PRIORITY_LOW(1), + /** + * + * + *
    +     * This specifies that the request is medium priority.
    +     * 
    + * + * PRIORITY_MEDIUM = 2; + */ + PRIORITY_MEDIUM(2), + /** + * + * + *
    +     * This specifies that the request is high priority.
    +     * 
    + * + * PRIORITY_HIGH = 3; + */ + PRIORITY_HIGH(3), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Priority"); + } + + /** + * + * + *
    +     * `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`.
    +     * 
    + * + * PRIORITY_UNSPECIFIED = 0; + */ + public static final int PRIORITY_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * This specifies that the request is low priority.
    +     * 
    + * + * PRIORITY_LOW = 1; + */ + public static final int PRIORITY_LOW_VALUE = 1; + + /** + * + * + *
    +     * This specifies that the request is medium priority.
    +     * 
    + * + * PRIORITY_MEDIUM = 2; + */ + public static final int PRIORITY_MEDIUM_VALUE = 2; + + /** + * + * + *
    +     * This specifies that the request is high priority.
    +     * 
    + * + * PRIORITY_HIGH = 3; + */ + public static final int PRIORITY_HIGH_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Priority valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Priority forNumber(int value) { + switch (value) { + case 0: + return PRIORITY_UNSPECIFIED; + case 1: + return PRIORITY_LOW; + case 2: + return PRIORITY_MEDIUM; + case 3: + return PRIORITY_HIGH; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Priority findValueByNumber(int number) { + return Priority.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.RequestOptions.getDescriptor().getEnumTypes().get(0); + } + + private static final Priority[] VALUES = values(); + + public static Priority valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Priority(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.RequestOptions.Priority) + } + + public interface ClientContextOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RequestOptions.ClientContext) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getSecureContextCount(); + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsSecureContext(java.lang.String key); + + /** Use {@link #getSecureContextMap()} instead. */ + @java.lang.Deprecated + java.util.Map getSecureContext(); + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getSecureContextMap(); + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + /* nullable */ + com.google.protobuf.Value getSecureContextOrDefault( + java.lang.String key, + /* nullable */ + com.google.protobuf.Value defaultValue); + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.Value getSecureContextOrThrow(java.lang.String key); + } + + /** + * + * + *
    +   * Container for various pieces of client-owned context attached to a request.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RequestOptions.ClientContext} + */ + public static final class ClientContext extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RequestOptions.ClientContext) + ClientContextOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ClientContext"); + } + + // Use ClientContext.newBuilder() to construct. + private ClientContext(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ClientContext() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetSecureContext(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RequestOptions.ClientContext.class, + com.google.spanner.v1.RequestOptions.ClientContext.Builder.class); + } + + public static final int SECURE_CONTEXT_FIELD_NUMBER = 1; + + private static final class SecureContextDefaultEntryHolder { + static final com.google.protobuf.MapEntry + defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_SecureContextEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.MESSAGE, + com.google.protobuf.Value.getDefaultInstance()); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField + secureContext_; + + private com.google.protobuf.MapField + internalGetSecureContext() { + if (secureContext_ == null) { + return com.google.protobuf.MapField.emptyMapField( + SecureContextDefaultEntryHolder.defaultEntry); + } + return secureContext_; + } + + public int getSecureContextCount() { + return internalGetSecureContext().getMap().size(); + } + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsSecureContext(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetSecureContext().getMap().containsKey(key); + } + + /** Use {@link #getSecureContextMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getSecureContext() { + return getSecureContextMap(); + } + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getSecureContextMap() { + return internalGetSecureContext().getMap(); + } + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.protobuf.Value getSecureContextOrDefault( + java.lang.String key, + /* nullable */ + com.google.protobuf.Value defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetSecureContext().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * Optional. Map of parameter name to value for this request. These values
    +     * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +     * (e.g., by queries against Parameterized Secure Views).
    +     * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.Value getSecureContextOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetSecureContext().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetSecureContext(), SecureContextDefaultEntryHolder.defaultEntry, 1); + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry entry : + internalGetSecureContext().getMap().entrySet()) { + com.google.protobuf.MapEntry secureContext__ = + SecureContextDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, secureContext__); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RequestOptions.ClientContext)) { + return super.equals(obj); + } + com.google.spanner.v1.RequestOptions.ClientContext other = + (com.google.spanner.v1.RequestOptions.ClientContext) obj; + + if (!internalGetSecureContext().equals(other.internalGetSecureContext())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetSecureContext().getMap().isEmpty()) { + hash = (37 * hash) + SECURE_CONTEXT_FIELD_NUMBER; + hash = (53 * hash) + internalGetSecureContext().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RequestOptions.ClientContext prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Container for various pieces of client-owned context attached to a request.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.RequestOptions.ClientContext} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RequestOptions.ClientContext) + com.google.spanner.v1.RequestOptions.ClientContextOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetSecureContext(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 1: + return internalGetMutableSecureContext(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RequestOptions.ClientContext.class, + com.google.spanner.v1.RequestOptions.ClientContext.Builder.class); + } + + // Construct using com.google.spanner.v1.RequestOptions.ClientContext.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + internalGetMutableSecureContext().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_ClientContext_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContext getDefaultInstanceForType() { + return com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContext build() { + com.google.spanner.v1.RequestOptions.ClientContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContext buildPartial() { + com.google.spanner.v1.RequestOptions.ClientContext result = + new com.google.spanner.v1.RequestOptions.ClientContext(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.RequestOptions.ClientContext result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.secureContext_ = + internalGetSecureContext().build(SecureContextDefaultEntryHolder.defaultEntry); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RequestOptions.ClientContext) { + return mergeFrom((com.google.spanner.v1.RequestOptions.ClientContext) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RequestOptions.ClientContext other) { + if (other == com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance()) + return this; + internalGetMutableSecureContext().mergeFrom(other.internalGetSecureContext()); + bitField0_ |= 0x00000001; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.MapEntry + secureContext__ = + input.readMessage( + SecureContextDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableSecureContext() + .ensureBuilderMap() + .put(secureContext__.getKey(), secureContext__.getValue()); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private static final class SecureContextConverter + implements com.google.protobuf.MapFieldBuilder.Converter< + java.lang.String, com.google.protobuf.ValueOrBuilder, com.google.protobuf.Value> { + @java.lang.Override + public com.google.protobuf.Value build(com.google.protobuf.ValueOrBuilder val) { + if (val instanceof com.google.protobuf.Value) { + return (com.google.protobuf.Value) val; + } + return ((com.google.protobuf.Value.Builder) val).build(); + } + + @java.lang.Override + public com.google.protobuf.MapEntry + defaultEntry() { + return SecureContextDefaultEntryHolder.defaultEntry; + } + } + ; + + private static final SecureContextConverter secureContextConverter = + new SecureContextConverter(); + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.protobuf.ValueOrBuilder, + com.google.protobuf.Value, + com.google.protobuf.Value.Builder> + secureContext_; + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.protobuf.ValueOrBuilder, + com.google.protobuf.Value, + com.google.protobuf.Value.Builder> + internalGetSecureContext() { + if (secureContext_ == null) { + return new com.google.protobuf.MapFieldBuilder<>(secureContextConverter); + } + return secureContext_; + } + + private com.google.protobuf.MapFieldBuilder< + java.lang.String, + com.google.protobuf.ValueOrBuilder, + com.google.protobuf.Value, + com.google.protobuf.Value.Builder> + internalGetMutableSecureContext() { + if (secureContext_ == null) { + secureContext_ = new com.google.protobuf.MapFieldBuilder<>(secureContextConverter); + } + bitField0_ |= 0x00000001; + onChanged(); + return secureContext_; + } + + public int getSecureContextCount() { + return internalGetSecureContext().ensureBuilderMap().size(); + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public boolean containsSecureContext(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetSecureContext().ensureBuilderMap().containsKey(key); + } + + /** Use {@link #getSecureContextMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getSecureContext() { + return getSecureContextMap(); + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.Map getSecureContextMap() { + return internalGetSecureContext().getImmutableMap(); + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public /* nullable */ com.google.protobuf.Value getSecureContextOrDefault( + java.lang.String key, + /* nullable */ + com.google.protobuf.Value defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableSecureContext().ensureBuilderMap(); + return map.containsKey(key) ? secureContextConverter.build(map.get(key)) : defaultValue; + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.Value getSecureContextOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = + internalGetMutableSecureContext().ensureBuilderMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return secureContextConverter.build(map.get(key)); + } + + public Builder clearSecureContext() { + bitField0_ = (bitField0_ & ~0x00000001); + internalGetMutableSecureContext().clear(); + return this; + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeSecureContext(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableSecureContext().ensureBuilderMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableSecureContext() { + bitField0_ |= 0x00000001; + return internalGetMutableSecureContext().ensureMessageMap(); + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putSecureContext(java.lang.String key, com.google.protobuf.Value value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableSecureContext().ensureBuilderMap().put(key, value); + bitField0_ |= 0x00000001; + return this; + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllSecureContext( + java.util.Map values) { + for (java.util.Map.Entry e : + values.entrySet()) { + if (e.getKey() == null || e.getValue() == null) { + throw new NullPointerException(); + } + } + internalGetMutableSecureContext().ensureBuilderMap().putAll(values); + bitField0_ |= 0x00000001; + return this; + } + + /** + * + * + *
    +       * Optional. Map of parameter name to value for this request. These values
    +       * will be returned by any SECURE_CONTEXT() calls invoked by this request
    +       * (e.g., by queries against Parameterized Secure Views).
    +       * 
    + * + * + * map<string, .google.protobuf.Value> secure_context = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Value.Builder putSecureContextBuilderIfAbsent( + java.lang.String key) { + java.util.Map builderMap = + internalGetMutableSecureContext().ensureBuilderMap(); + com.google.protobuf.ValueOrBuilder entry = builderMap.get(key); + if (entry == null) { + entry = com.google.protobuf.Value.newBuilder(); + builderMap.put(key, entry); + } + if (entry instanceof com.google.protobuf.Value) { + entry = ((com.google.protobuf.Value) entry).toBuilder(); + builderMap.put(key, entry); + } + return (com.google.protobuf.Value.Builder) entry; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RequestOptions.ClientContext) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RequestOptions.ClientContext) + private static final com.google.spanner.v1.RequestOptions.ClientContext DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RequestOptions.ClientContext(); + } + + public static com.google.spanner.v1.RequestOptions.ClientContext getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ClientContext parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContext getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int PRIORITY_FIELD_NUMBER = 1; + private int priority_ = 0; + + /** + * + * + *
    +   * Priority for the request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The enum numeric value on the wire for priority. + */ + @java.lang.Override + public int getPriorityValue() { + return priority_; + } + + /** + * + * + *
    +   * Priority for the request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The priority. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.Priority getPriority() { + com.google.spanner.v1.RequestOptions.Priority result = + com.google.spanner.v1.RequestOptions.Priority.forNumber(priority_); + return result == null ? com.google.spanner.v1.RequestOptions.Priority.UNRECOGNIZED : result; + } + + public static final int REQUEST_TAG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object requestTag_ = ""; + + /** + * + * + *
    +   * A per-request tag which can be applied to queries or reads, used for
    +   * statistics collection.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * This field is ignored for requests where it's not applicable (for example,
    +   * `CommitRequest`).
    +   * Legal characters for `request_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string request_tag = 2; + * + * @return The requestTag. + */ + @java.lang.Override + public java.lang.String getRequestTag() { + java.lang.Object ref = requestTag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestTag_ = s; + return s; + } + } + + /** + * + * + *
    +   * A per-request tag which can be applied to queries or reads, used for
    +   * statistics collection.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * This field is ignored for requests where it's not applicable (for example,
    +   * `CommitRequest`).
    +   * Legal characters for `request_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string request_tag = 2; + * + * @return The bytes for requestTag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getRequestTagBytes() { + java.lang.Object ref = requestTag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_TAG_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +   * A tag used for statistics collection about this transaction.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * To enable tagging on a transaction, `transaction_tag` must be set to the
    +   * same value for all requests belonging to the same transaction, including
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   * If this request doesn't belong to any transaction, `transaction_tag` is
    +   * ignored.
    +   * Legal characters for `transaction_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string transaction_tag = 3; + * + * @return The transactionTag. + */ + @java.lang.Override + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } + } + + /** + * + * + *
    +   * A tag used for statistics collection about this transaction.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * To enable tagging on a transaction, `transaction_tag` must be set to the
    +   * same value for all requests belonging to the same transaction, including
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   * If this request doesn't belong to any transaction, `transaction_tag` is
    +   * ignored.
    +   * Legal characters for `transaction_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string transaction_tag = 3; + * + * @return The bytes for transactionTag. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLIENT_CONTEXT_FIELD_NUMBER = 4; + private com.google.spanner.v1.RequestOptions.ClientContext clientContext_; + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the clientContext field is set. + */ + @java.lang.Override + public boolean hasClientContext() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The clientContext. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContext getClientContext() { + return clientContext_ == null + ? com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance() + : clientContext_; + } + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.ClientContextOrBuilder getClientContextOrBuilder() { + return clientContext_ == null + ? com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance() + : clientContext_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (priority_ + != com.google.spanner.v1.RequestOptions.Priority.PRIORITY_UNSPECIFIED.getNumber()) { + output.writeEnum(1, priority_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestTag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, requestTag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, transactionTag_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(4, getClientContext()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (priority_ + != com.google.spanner.v1.RequestOptions.Priority.PRIORITY_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, priority_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(requestTag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, requestTag_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(transactionTag_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, transactionTag_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getClientContext()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RequestOptions)) { + return super.equals(obj); + } + com.google.spanner.v1.RequestOptions other = (com.google.spanner.v1.RequestOptions) obj; + + if (priority_ != other.priority_) return false; + if (!getRequestTag().equals(other.getRequestTag())) return false; + if (!getTransactionTag().equals(other.getTransactionTag())) return false; + if (hasClientContext() != other.hasClientContext()) return false; + if (hasClientContext()) { + if (!getClientContext().equals(other.getClientContext())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PRIORITY_FIELD_NUMBER; + hash = (53 * hash) + priority_; + hash = (37 * hash) + REQUEST_TAG_FIELD_NUMBER; + hash = (53 * hash) + getRequestTag().hashCode(); + hash = (37 * hash) + TRANSACTION_TAG_FIELD_NUMBER; + hash = (53 * hash) + getTransactionTag().hashCode(); + if (hasClientContext()) { + hash = (37 * hash) + CLIENT_CONTEXT_FIELD_NUMBER; + hash = (53 * hash) + getClientContext().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RequestOptions parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RequestOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RequestOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Common request options for various APIs.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RequestOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RequestOptions) + com.google.spanner.v1.RequestOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RequestOptions.class, + com.google.spanner.v1.RequestOptions.Builder.class); + } + + // Construct using com.google.spanner.v1.RequestOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetClientContextFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + priority_ = 0; + requestTag_ = ""; + transactionTag_ = ""; + clientContext_ = null; + if (clientContextBuilder_ != null) { + clientContextBuilder_.dispose(); + clientContextBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RequestOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions getDefaultInstanceForType() { + return com.google.spanner.v1.RequestOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions build() { + com.google.spanner.v1.RequestOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions buildPartial() { + com.google.spanner.v1.RequestOptions result = new com.google.spanner.v1.RequestOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.RequestOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.priority_ = priority_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.requestTag_ = requestTag_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.transactionTag_ = transactionTag_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.clientContext_ = + clientContextBuilder_ == null ? clientContext_ : clientContextBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RequestOptions) { + return mergeFrom((com.google.spanner.v1.RequestOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RequestOptions other) { + if (other == com.google.spanner.v1.RequestOptions.getDefaultInstance()) return this; + if (other.priority_ != 0) { + setPriorityValue(other.getPriorityValue()); + } + if (!other.getRequestTag().isEmpty()) { + requestTag_ = other.requestTag_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getTransactionTag().isEmpty()) { + transactionTag_ = other.transactionTag_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasClientContext()) { + mergeClientContext(other.getClientContext()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + priority_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + requestTag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + transactionTag_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetClientContextFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int priority_ = 0; + + /** + * + * + *
    +     * Priority for the request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The enum numeric value on the wire for priority. + */ + @java.lang.Override + public int getPriorityValue() { + return priority_; + } + + /** + * + * + *
    +     * Priority for the request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @param value The enum numeric value on the wire for priority to set. + * @return This builder for chaining. + */ + public Builder setPriorityValue(int value) { + priority_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Priority for the request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The priority. + */ + @java.lang.Override + public com.google.spanner.v1.RequestOptions.Priority getPriority() { + com.google.spanner.v1.RequestOptions.Priority result = + com.google.spanner.v1.RequestOptions.Priority.forNumber(priority_); + return result == null ? com.google.spanner.v1.RequestOptions.Priority.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * Priority for the request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @param value The priority to set. + * @return This builder for chaining. + */ + public Builder setPriority(com.google.spanner.v1.RequestOptions.Priority value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + priority_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Priority for the request.
    +     * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return This builder for chaining. + */ + public Builder clearPriority() { + bitField0_ = (bitField0_ & ~0x00000001); + priority_ = 0; + onChanged(); + return this; + } + + private java.lang.Object requestTag_ = ""; + + /** + * + * + *
    +     * A per-request tag which can be applied to queries or reads, used for
    +     * statistics collection.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * This field is ignored for requests where it's not applicable (for example,
    +     * `CommitRequest`).
    +     * Legal characters for `request_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string request_tag = 2; + * + * @return The requestTag. + */ + public java.lang.String getRequestTag() { + java.lang.Object ref = requestTag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + requestTag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A per-request tag which can be applied to queries or reads, used for
    +     * statistics collection.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * This field is ignored for requests where it's not applicable (for example,
    +     * `CommitRequest`).
    +     * Legal characters for `request_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string request_tag = 2; + * + * @return The bytes for requestTag. + */ + public com.google.protobuf.ByteString getRequestTagBytes() { + java.lang.Object ref = requestTag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + requestTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A per-request tag which can be applied to queries or reads, used for
    +     * statistics collection.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * This field is ignored for requests where it's not applicable (for example,
    +     * `CommitRequest`).
    +     * Legal characters for `request_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string request_tag = 2; + * + * @param value The requestTag to set. + * @return This builder for chaining. + */ + public Builder setRequestTag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + requestTag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A per-request tag which can be applied to queries or reads, used for
    +     * statistics collection.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * This field is ignored for requests where it's not applicable (for example,
    +     * `CommitRequest`).
    +     * Legal characters for `request_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string request_tag = 2; + * + * @return This builder for chaining. + */ + public Builder clearRequestTag() { + requestTag_ = getDefaultInstance().getRequestTag(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A per-request tag which can be applied to queries or reads, used for
    +     * statistics collection.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * This field is ignored for requests where it's not applicable (for example,
    +     * `CommitRequest`).
    +     * Legal characters for `request_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string request_tag = 2; + * + * @param value The bytes for requestTag to set. + * @return This builder for chaining. + */ + public Builder setRequestTagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + requestTag_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object transactionTag_ = ""; + + /** + * + * + *
    +     * A tag used for statistics collection about this transaction.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * To enable tagging on a transaction, `transaction_tag` must be set to the
    +     * same value for all requests belonging to the same transaction, including
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +     * If this request doesn't belong to any transaction, `transaction_tag` is
    +     * ignored.
    +     * Legal characters for `transaction_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string transaction_tag = 3; + * + * @return The transactionTag. + */ + public java.lang.String getTransactionTag() { + java.lang.Object ref = transactionTag_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionTag_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * A tag used for statistics collection about this transaction.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * To enable tagging on a transaction, `transaction_tag` must be set to the
    +     * same value for all requests belonging to the same transaction, including
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +     * If this request doesn't belong to any transaction, `transaction_tag` is
    +     * ignored.
    +     * Legal characters for `transaction_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string transaction_tag = 3; + * + * @return The bytes for transactionTag. + */ + public com.google.protobuf.ByteString getTransactionTagBytes() { + java.lang.Object ref = transactionTag_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + transactionTag_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * A tag used for statistics collection about this transaction.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * To enable tagging on a transaction, `transaction_tag` must be set to the
    +     * same value for all requests belonging to the same transaction, including
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +     * If this request doesn't belong to any transaction, `transaction_tag` is
    +     * ignored.
    +     * Legal characters for `transaction_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string transaction_tag = 3; + * + * @param value The transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTag(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + transactionTag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A tag used for statistics collection about this transaction.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * To enable tagging on a transaction, `transaction_tag` must be set to the
    +     * same value for all requests belonging to the same transaction, including
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +     * If this request doesn't belong to any transaction, `transaction_tag` is
    +     * ignored.
    +     * Legal characters for `transaction_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string transaction_tag = 3; + * + * @return This builder for chaining. + */ + public Builder clearTransactionTag() { + transactionTag_ = getDefaultInstance().getTransactionTag(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * A tag used for statistics collection about this transaction.
    +     * Both `request_tag` and `transaction_tag` can be specified for a read or
    +     * query that belongs to a transaction.
    +     * To enable tagging on a transaction, `transaction_tag` must be set to the
    +     * same value for all requests belonging to the same transaction, including
    +     * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +     * If this request doesn't belong to any transaction, `transaction_tag` is
    +     * ignored.
    +     * Legal characters for `transaction_tag` values are all printable characters
    +     * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +     * characters. Values that exceed this limit are truncated.
    +     * Any leading underscore (_) characters are removed from the string.
    +     * 
    + * + * string transaction_tag = 3; + * + * @param value The bytes for transactionTag to set. + * @return This builder for chaining. + */ + public Builder setTransactionTagBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + transactionTag_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private com.google.spanner.v1.RequestOptions.ClientContext clientContext_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions.ClientContext, + com.google.spanner.v1.RequestOptions.ClientContext.Builder, + com.google.spanner.v1.RequestOptions.ClientContextOrBuilder> + clientContextBuilder_; + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the clientContext field is set. + */ + public boolean hasClientContext() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The clientContext. + */ + public com.google.spanner.v1.RequestOptions.ClientContext getClientContext() { + if (clientContextBuilder_ == null) { + return clientContext_ == null + ? com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance() + : clientContext_; + } else { + return clientContextBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setClientContext(com.google.spanner.v1.RequestOptions.ClientContext value) { + if (clientContextBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + clientContext_ = value; + } else { + clientContextBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setClientContext( + com.google.spanner.v1.RequestOptions.ClientContext.Builder builderForValue) { + if (clientContextBuilder_ == null) { + clientContext_ = builderForValue.build(); + } else { + clientContextBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeClientContext(com.google.spanner.v1.RequestOptions.ClientContext value) { + if (clientContextBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && clientContext_ != null + && clientContext_ + != com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance()) { + getClientContextBuilder().mergeFrom(value); + } else { + clientContext_ = value; + } + } else { + clientContextBuilder_.mergeFrom(value); + } + if (clientContext_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearClientContext() { + bitField0_ = (bitField0_ & ~0x00000008); + clientContext_ = null; + if (clientContextBuilder_ != null) { + clientContextBuilder_.dispose(); + clientContextBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RequestOptions.ClientContext.Builder getClientContextBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetClientContextFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.RequestOptions.ClientContextOrBuilder getClientContextOrBuilder() { + if (clientContextBuilder_ != null) { + return clientContextBuilder_.getMessageOrBuilder(); + } else { + return clientContext_ == null + ? com.google.spanner.v1.RequestOptions.ClientContext.getDefaultInstance() + : clientContext_; + } + } + + /** + * + * + *
    +     * Optional. Optional context that may be needed for some requests.
    +     * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions.ClientContext, + com.google.spanner.v1.RequestOptions.ClientContext.Builder, + com.google.spanner.v1.RequestOptions.ClientContextOrBuilder> + internalGetClientContextFieldBuilder() { + if (clientContextBuilder_ == null) { + clientContextBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.RequestOptions.ClientContext, + com.google.spanner.v1.RequestOptions.ClientContext.Builder, + com.google.spanner.v1.RequestOptions.ClientContextOrBuilder>( + getClientContext(), getParentForChildren(), isClean()); + clientContext_ = null; + } + return clientContextBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RequestOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RequestOptions) + private static final com.google.spanner.v1.RequestOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RequestOptions(); + } + + public static com.google.spanner.v1.RequestOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RequestOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RequestOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java new file mode 100644 index 000000000000..601c48378eba --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java @@ -0,0 +1,189 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface RequestOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RequestOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Priority for the request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The enum numeric value on the wire for priority. + */ + int getPriorityValue(); + + /** + * + * + *
    +   * Priority for the request.
    +   * 
    + * + * .google.spanner.v1.RequestOptions.Priority priority = 1; + * + * @return The priority. + */ + com.google.spanner.v1.RequestOptions.Priority getPriority(); + + /** + * + * + *
    +   * A per-request tag which can be applied to queries or reads, used for
    +   * statistics collection.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * This field is ignored for requests where it's not applicable (for example,
    +   * `CommitRequest`).
    +   * Legal characters for `request_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string request_tag = 2; + * + * @return The requestTag. + */ + java.lang.String getRequestTag(); + + /** + * + * + *
    +   * A per-request tag which can be applied to queries or reads, used for
    +   * statistics collection.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * This field is ignored for requests where it's not applicable (for example,
    +   * `CommitRequest`).
    +   * Legal characters for `request_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a request_tag is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string request_tag = 2; + * + * @return The bytes for requestTag. + */ + com.google.protobuf.ByteString getRequestTagBytes(); + + /** + * + * + *
    +   * A tag used for statistics collection about this transaction.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * To enable tagging on a transaction, `transaction_tag` must be set to the
    +   * same value for all requests belonging to the same transaction, including
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   * If this request doesn't belong to any transaction, `transaction_tag` is
    +   * ignored.
    +   * Legal characters for `transaction_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string transaction_tag = 3; + * + * @return The transactionTag. + */ + java.lang.String getTransactionTag(); + + /** + * + * + *
    +   * A tag used for statistics collection about this transaction.
    +   * Both `request_tag` and `transaction_tag` can be specified for a read or
    +   * query that belongs to a transaction.
    +   * To enable tagging on a transaction, `transaction_tag` must be set to the
    +   * same value for all requests belonging to the same transaction, including
    +   * [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
    +   * If this request doesn't belong to any transaction, `transaction_tag` is
    +   * ignored.
    +   * Legal characters for `transaction_tag` values are all printable characters
    +   * (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50
    +   * characters. Values that exceed this limit are truncated.
    +   * Any leading underscore (_) characters are removed from the string.
    +   * 
    + * + * string transaction_tag = 3; + * + * @return The bytes for transactionTag. + */ + com.google.protobuf.ByteString getTransactionTagBytes(); + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the clientContext field is set. + */ + boolean hasClientContext(); + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The clientContext. + */ + com.google.spanner.v1.RequestOptions.ClientContext getClientContext(); + + /** + * + * + *
    +   * Optional. Optional context that may be needed for some requests.
    +   * 
    + * + * + * .google.spanner.v1.RequestOptions.ClientContext client_context = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.RequestOptions.ClientContextOrBuilder getClientContextOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java new file mode 100644 index 000000000000..3c1b8d0b4e0f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java @@ -0,0 +1,2376 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Results from [Read][google.spanner.v1.Spanner.Read] or
    + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSet} + */ +@com.google.protobuf.Generated +public final class ResultSet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ResultSet) + ResultSetOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ResultSet"); + } + + // Use ResultSet.newBuilder() to construct. + private ResultSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ResultSet() { + rows_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSet.class, com.google.spanner.v1.ResultSet.Builder.class); + } + + private int bitField0_; + public static final int METADATA_FIELD_NUMBER = 1; + private com.google.spanner.v1.ResultSetMetadata metadata_; + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + @java.lang.Override + public boolean hasMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata getMetadata() { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder() { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + + public static final int ROWS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private java.util.List rows_; + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + @java.lang.Override + public java.util.List getRowsList() { + return rows_; + } + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + @java.lang.Override + public java.util.List getRowsOrBuilderList() { + return rows_; + } + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + @java.lang.Override + public int getRowsCount() { + return rows_.size(); + } + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + @java.lang.Override + public com.google.protobuf.ListValue getRows(int index) { + return rows_.get(index); + } + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + @java.lang.Override + public com.google.protobuf.ListValueOrBuilder getRowsOrBuilder(int index) { + return rows_.get(index); + } + + public static final int STATS_FIELD_NUMBER = 3; + private com.google.spanner.v1.ResultSetStats stats_; + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return Whether the stats field is set. + */ + @java.lang.Override + public boolean hasStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return The stats. + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetStats getStats() { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + @java.lang.Override + public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 5; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + public static final int CACHE_UPDATE_FIELD_NUMBER = 6; + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + @java.lang.Override + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getMetadata()); + } + for (int i = 0; i < rows_.size(); i++) { + output.writeMessage(2, rows_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getStats()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getPrecommitToken()); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(6, getCacheUpdate()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getMetadata()); + } + for (int i = 0; i < rows_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, rows_.get(i)); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStats()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getPrecommitToken()); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getCacheUpdate()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ResultSet)) { + return super.equals(obj); + } + com.google.spanner.v1.ResultSet other = (com.google.spanner.v1.ResultSet) obj; + + if (hasMetadata() != other.hasMetadata()) return false; + if (hasMetadata()) { + if (!getMetadata().equals(other.getMetadata())) return false; + } + if (!getRowsList().equals(other.getRowsList())) return false; + if (hasStats() != other.hasStats()) return false; + if (hasStats()) { + if (!getStats().equals(other.getStats())) return false; + } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } + if (hasCacheUpdate() != other.hasCacheUpdate()) return false; + if (hasCacheUpdate()) { + if (!getCacheUpdate().equals(other.getCacheUpdate())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasMetadata()) { + hash = (37 * hash) + METADATA_FIELD_NUMBER; + hash = (53 * hash) + getMetadata().hashCode(); + } + if (getRowsCount() > 0) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRowsList().hashCode(); + } + if (hasStats()) { + hash = (37 * hash) + STATS_FIELD_NUMBER; + hash = (53 * hash) + getStats().hashCode(); + } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } + if (hasCacheUpdate()) { + hash = (37 * hash) + CACHE_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getCacheUpdate().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ResultSet parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSet parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ResultSet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Results from [Read][google.spanner.v1.Spanner.Read] or
    +   * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ResultSet) + com.google.spanner.v1.ResultSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSet.class, com.google.spanner.v1.ResultSet.Builder.class); + } + + // Construct using com.google.spanner.v1.ResultSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetMetadataFieldBuilder(); + internalGetRowsFieldBuilder(); + internalGetStatsFieldBuilder(); + internalGetPrecommitTokenFieldBuilder(); + internalGetCacheUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + if (rowsBuilder_ == null) { + rows_ = java.util.Collections.emptyList(); + } else { + rows_ = null; + rowsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSet_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSet getDefaultInstanceForType() { + return com.google.spanner.v1.ResultSet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ResultSet build() { + com.google.spanner.v1.ResultSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSet buildPartial() { + com.google.spanner.v1.ResultSet result = new com.google.spanner.v1.ResultSet(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.ResultSet result) { + if (rowsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + rows_ = java.util.Collections.unmodifiableList(rows_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.rows_ = rows_; + } else { + result.rows_ = rowsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.ResultSet result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.cacheUpdate_ = + cacheUpdateBuilder_ == null ? cacheUpdate_ : cacheUpdateBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ResultSet) { + return mergeFrom((com.google.spanner.v1.ResultSet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ResultSet other) { + if (other == com.google.spanner.v1.ResultSet.getDefaultInstance()) return this; + if (other.hasMetadata()) { + mergeMetadata(other.getMetadata()); + } + if (rowsBuilder_ == null) { + if (!other.rows_.isEmpty()) { + if (rows_.isEmpty()) { + rows_ = other.rows_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRowsIsMutable(); + rows_.addAll(other.rows_); + } + onChanged(); + } + } else { + if (!other.rows_.isEmpty()) { + if (rowsBuilder_.isEmpty()) { + rowsBuilder_.dispose(); + rowsBuilder_ = null; + rows_ = other.rows_; + bitField0_ = (bitField0_ & ~0x00000002); + rowsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetRowsFieldBuilder() + : null; + } else { + rowsBuilder_.addAllMessages(other.rows_); + } + } + } + if (other.hasStats()) { + mergeStats(other.getStats()); + } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } + if (other.hasCacheUpdate()) { + mergeCacheUpdate(other.getCacheUpdate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetMetadataFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.protobuf.ListValue m = + input.readMessage(com.google.protobuf.ListValue.parser(), extensionRegistry); + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + rows_.add(m); + } else { + rowsBuilder_.addMessage(m); + } + break; + } // case 18 + case 26: + { + input.readMessage(internalGetStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 42: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + case 50: + { + input.readMessage( + internalGetCacheUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.v1.ResultSetMetadata metadata_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder> + metadataBuilder_; + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + public boolean hasMetadata() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + public com.google.spanner.v1.ResultSetMetadata getMetadata() { + if (metadataBuilder_ == null) { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } else { + return metadataBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder setMetadata(com.google.spanner.v1.ResultSetMetadata value) { + if (metadataBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + metadata_ = value; + } else { + metadataBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder setMetadata(com.google.spanner.v1.ResultSetMetadata.Builder builderForValue) { + if (metadataBuilder_ == null) { + metadata_ = builderForValue.build(); + } else { + metadataBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder mergeMetadata(com.google.spanner.v1.ResultSetMetadata value) { + if (metadataBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && metadata_ != null + && metadata_ != com.google.spanner.v1.ResultSetMetadata.getDefaultInstance()) { + getMetadataBuilder().mergeFrom(value); + } else { + metadata_ = value; + } + } else { + metadataBuilder_.mergeFrom(value); + } + if (metadata_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public Builder clearMetadata() { + bitField0_ = (bitField0_ & ~0x00000001); + metadata_ = null; + if (metadataBuilder_ != null) { + metadataBuilder_.dispose(); + metadataBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public com.google.spanner.v1.ResultSetMetadata.Builder getMetadataBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetMetadataFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + public com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder() { + if (metadataBuilder_ != null) { + return metadataBuilder_.getMessageOrBuilder(); + } else { + return metadata_ == null + ? com.google.spanner.v1.ResultSetMetadata.getDefaultInstance() + : metadata_; + } + } + + /** + * + * + *
    +     * Metadata about the result set, such as row type information.
    +     * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder> + internalGetMetadataFieldBuilder() { + if (metadataBuilder_ == null) { + metadataBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetMetadata, + com.google.spanner.v1.ResultSetMetadata.Builder, + com.google.spanner.v1.ResultSetMetadataOrBuilder>( + getMetadata(), getParentForChildren(), isClean()); + metadata_ = null; + } + return metadataBuilder_; + } + + private java.util.List rows_ = java.util.Collections.emptyList(); + + private void ensureRowsIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + rows_ = new java.util.ArrayList(rows_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + rowsBuilder_; + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public java.util.List getRowsList() { + if (rowsBuilder_ == null) { + return java.util.Collections.unmodifiableList(rows_); + } else { + return rowsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public int getRowsCount() { + if (rowsBuilder_ == null) { + return rows_.size(); + } else { + return rowsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public com.google.protobuf.ListValue getRows(int index) { + if (rowsBuilder_ == null) { + return rows_.get(index); + } else { + return rowsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder setRows(int index, com.google.protobuf.ListValue value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowsIsMutable(); + rows_.set(index, value); + onChanged(); + } else { + rowsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder setRows(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + rows_.set(index, builderForValue.build()); + onChanged(); + } else { + rowsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder addRows(com.google.protobuf.ListValue value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowsIsMutable(); + rows_.add(value); + onChanged(); + } else { + rowsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder addRows(int index, com.google.protobuf.ListValue value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowsIsMutable(); + rows_.add(index, value); + onChanged(); + } else { + rowsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder addRows(com.google.protobuf.ListValue.Builder builderForValue) { + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + rows_.add(builderForValue.build()); + onChanged(); + } else { + rowsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder addRows(int index, com.google.protobuf.ListValue.Builder builderForValue) { + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + rows_.add(index, builderForValue.build()); + onChanged(); + } else { + rowsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder addAllRows(java.lang.Iterable values) { + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, rows_); + onChanged(); + } else { + rowsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder clearRows() { + if (rowsBuilder_ == null) { + rows_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rowsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public Builder removeRows(int index) { + if (rowsBuilder_ == null) { + ensureRowsIsMutable(); + rows_.remove(index); + onChanged(); + } else { + rowsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public com.google.protobuf.ListValue.Builder getRowsBuilder(int index) { + return internalGetRowsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public com.google.protobuf.ListValueOrBuilder getRowsOrBuilder(int index) { + if (rowsBuilder_ == null) { + return rows_.get(index); + } else { + return rowsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public java.util.List getRowsOrBuilderList() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rows_); + } + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public com.google.protobuf.ListValue.Builder addRowsBuilder() { + return internalGetRowsFieldBuilder() + .addBuilder(com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public com.google.protobuf.ListValue.Builder addRowsBuilder(int index) { + return internalGetRowsFieldBuilder() + .addBuilder(index, com.google.protobuf.ListValue.getDefaultInstance()); + } + + /** + * + * + *
    +     * Each element in `rows` is a row whose format is defined by
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +     * element in each row matches the ith field in
    +     * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +     * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +     * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + public java.util.List getRowsBuilderList() { + return internalGetRowsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder> + internalGetRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.protobuf.ListValue, + com.google.protobuf.ListValue.Builder, + com.google.protobuf.ListValueOrBuilder>( + rows_, ((bitField0_ & 0x00000002) != 0), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + private com.google.spanner.v1.ResultSetStats stats_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder> + statsBuilder_; + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return The stats. + */ + public com.google.spanner.v1.ResultSetStats getStats() { + if (statsBuilder_ == null) { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } else { + return statsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public Builder setStats(com.google.spanner.v1.ResultSetStats value) { + if (statsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stats_ = value; + } else { + statsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public Builder setStats(com.google.spanner.v1.ResultSetStats.Builder builderForValue) { + if (statsBuilder_ == null) { + stats_ = builderForValue.build(); + } else { + statsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public Builder mergeStats(com.google.spanner.v1.ResultSetStats value) { + if (statsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && stats_ != null + && stats_ != com.google.spanner.v1.ResultSetStats.getDefaultInstance()) { + getStatsBuilder().mergeFrom(value); + } else { + stats_ = value; + } + } else { + statsBuilder_.mergeFrom(value); + } + if (stats_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public Builder clearStats() { + bitField0_ = (bitField0_ & ~0x00000004); + stats_ = null; + if (statsBuilder_ != null) { + statsBuilder_.dispose(); + statsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public com.google.spanner.v1.ResultSetStats.Builder getStatsBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { + if (statsBuilder_ != null) { + return statsBuilder_.getMessageOrBuilder(); + } else { + return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; + } + } + + /** + * + * + *
    +     * Query plan and execution statistics for the SQL statement that
    +     * produced this result set. These can be requested by setting
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * DML statements always produce stats containing the number of rows
    +     * modified, unless executed using the
    +     * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * Other fields might or might not be populated, based on the
    +     * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +     * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder> + internalGetStatsFieldBuilder() { + if (statsBuilder_ == null) { + statsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.ResultSetStats, + com.google.spanner.v1.ResultSetStats.Builder, + com.google.spanner.v1.ResultSetStatsOrBuilder>( + getStats(), getParentForChildren(), isClean()); + stats_ = null; + } + return statsBuilder_; + } + + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000008); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + + /** + * + * + *
    +     * Optional. A precommit token is included if the read-write transaction is on
    +     * a multiplexed session. Pass the precommit token with the highest sequence
    +     * number from this transaction attempt to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + cacheUpdateBuilder_; + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + if (cacheUpdateBuilder_ == null) { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } else { + return cacheUpdateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cacheUpdate_ = value; + } else { + cacheUpdateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate.Builder builderForValue) { + if (cacheUpdateBuilder_ == null) { + cacheUpdate_ = builderForValue.build(); + } else { + cacheUpdateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && cacheUpdate_ != null + && cacheUpdate_ != com.google.spanner.v1.CacheUpdate.getDefaultInstance()) { + getCacheUpdateBuilder().mergeFrom(value); + } else { + cacheUpdate_ = value; + } + } else { + cacheUpdateBuilder_.mergeFrom(value); + } + if (cacheUpdate_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCacheUpdate() { + bitField0_ = (bitField0_ & ~0x00000010); + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdate.Builder getCacheUpdateBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return internalGetCacheUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + if (cacheUpdateBuilder_ != null) { + return cacheUpdateBuilder_.getMessageOrBuilder(); + } else { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + internalGetCacheUpdateFieldBuilder() { + if (cacheUpdateBuilder_ == null) { + cacheUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder>( + getCacheUpdate(), getParentForChildren(), isClean()); + cacheUpdate_ = null; + } + return cacheUpdateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ResultSet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSet) + private static final com.google.spanner.v1.ResultSet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ResultSet(); + } + + public static com.google.spanner.v1.ResultSet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResultSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java new file mode 100644 index 000000000000..6cd09c5326b4 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java @@ -0,0 +1,1461 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Metadata about a [ResultSet][google.spanner.v1.ResultSet] or
    + * [PartialResultSet][google.spanner.v1.PartialResultSet].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSetMetadata} + */ +@com.google.protobuf.Generated +public final class ResultSetMetadata extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ResultSetMetadata) + ResultSetMetadataOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ResultSetMetadata"); + } + + // Use ResultSetMetadata.newBuilder() to construct. + private ResultSetMetadata(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ResultSetMetadata() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSetMetadata.class, + com.google.spanner.v1.ResultSetMetadata.Builder.class); + } + + private int bitField0_; + public static final int ROW_TYPE_FIELD_NUMBER = 1; + private com.google.spanner.v1.StructType rowType_; + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return Whether the rowType field is set. + */ + @java.lang.Override + public boolean hasRowType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return The rowType. + */ + @java.lang.Override + public com.google.spanner.v1.StructType getRowType() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + @java.lang.Override + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + + public static final int TRANSACTION_FIELD_NUMBER = 2; + private com.google.spanner.v1.Transaction transaction_; + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + @java.lang.Override + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + @java.lang.Override + public com.google.spanner.v1.Transaction getTransaction() { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder() { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + + public static final int UNDECLARED_PARAMETERS_FIELD_NUMBER = 3; + private com.google.spanner.v1.StructType undeclaredParameters_; + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return Whether the undeclaredParameters field is set. + */ + @java.lang.Override + public boolean hasUndeclaredParameters() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return The undeclaredParameters. + */ + @java.lang.Override + public com.google.spanner.v1.StructType getUndeclaredParameters() { + return undeclaredParameters_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : undeclaredParameters_; + } + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + @java.lang.Override + public com.google.spanner.v1.StructTypeOrBuilder getUndeclaredParametersOrBuilder() { + return undeclaredParameters_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : undeclaredParameters_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getRowType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getTransaction()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(3, getUndeclaredParameters()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRowType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTransaction()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUndeclaredParameters()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ResultSetMetadata)) { + return super.equals(obj); + } + com.google.spanner.v1.ResultSetMetadata other = (com.google.spanner.v1.ResultSetMetadata) obj; + + if (hasRowType() != other.hasRowType()) return false; + if (hasRowType()) { + if (!getRowType().equals(other.getRowType())) return false; + } + if (hasTransaction() != other.hasTransaction()) return false; + if (hasTransaction()) { + if (!getTransaction().equals(other.getTransaction())) return false; + } + if (hasUndeclaredParameters() != other.hasUndeclaredParameters()) return false; + if (hasUndeclaredParameters()) { + if (!getUndeclaredParameters().equals(other.getUndeclaredParameters())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRowType()) { + hash = (37 * hash) + ROW_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getRowType().hashCode(); + } + if (hasTransaction()) { + hash = (37 * hash) + TRANSACTION_FIELD_NUMBER; + hash = (53 * hash) + getTransaction().hashCode(); + } + if (hasUndeclaredParameters()) { + hash = (37 * hash) + UNDECLARED_PARAMETERS_FIELD_NUMBER; + hash = (53 * hash) + getUndeclaredParameters().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ResultSetMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Metadata about a [ResultSet][google.spanner.v1.ResultSet] or
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSetMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ResultSetMetadata) + com.google.spanner.v1.ResultSetMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSetMetadata.class, + com.google.spanner.v1.ResultSetMetadata.Builder.class); + } + + // Construct using com.google.spanner.v1.ResultSetMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetRowTypeFieldBuilder(); + internalGetTransactionFieldBuilder(); + internalGetUndeclaredParametersFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + undeclaredParameters_ = null; + if (undeclaredParametersBuilder_ != null) { + undeclaredParametersBuilder_.dispose(); + undeclaredParametersBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata getDefaultInstanceForType() { + return com.google.spanner.v1.ResultSetMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata build() { + com.google.spanner.v1.ResultSetMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata buildPartial() { + com.google.spanner.v1.ResultSetMetadata result = + new com.google.spanner.v1.ResultSetMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ResultSetMetadata result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.rowType_ = rowTypeBuilder_ == null ? rowType_ : rowTypeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transaction_ = + transactionBuilder_ == null ? transaction_ : transactionBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.undeclaredParameters_ = + undeclaredParametersBuilder_ == null + ? undeclaredParameters_ + : undeclaredParametersBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ResultSetMetadata) { + return mergeFrom((com.google.spanner.v1.ResultSetMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ResultSetMetadata other) { + if (other == com.google.spanner.v1.ResultSetMetadata.getDefaultInstance()) return this; + if (other.hasRowType()) { + mergeRowType(other.getRowType()); + } + if (other.hasTransaction()) { + mergeTransaction(other.getTransaction()); + } + if (other.hasUndeclaredParameters()) { + mergeUndeclaredParameters(other.getUndeclaredParameters()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(internalGetRowTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetTransactionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetUndeclaredParametersFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.v1.StructType rowType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + rowTypeBuilder_; + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return Whether the rowType field is set. + */ + public boolean hasRowType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return The rowType. + */ + public com.google.spanner.v1.StructType getRowType() { + if (rowTypeBuilder_ == null) { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } else { + return rowTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public Builder setRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rowType_ = value; + } else { + rowTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public Builder setRowType(com.google.spanner.v1.StructType.Builder builderForValue) { + if (rowTypeBuilder_ == null) { + rowType_ = builderForValue.build(); + } else { + rowTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public Builder mergeRowType(com.google.spanner.v1.StructType value) { + if (rowTypeBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && rowType_ != null + && rowType_ != com.google.spanner.v1.StructType.getDefaultInstance()) { + getRowTypeBuilder().mergeFrom(value); + } else { + rowType_ = value; + } + } else { + rowTypeBuilder_.mergeFrom(value); + } + if (rowType_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public Builder clearRowType() { + bitField0_ = (bitField0_ & ~0x00000001); + rowType_ = null; + if (rowTypeBuilder_ != null) { + rowTypeBuilder_.dispose(); + rowTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public com.google.spanner.v1.StructType.Builder getRowTypeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetRowTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + public com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder() { + if (rowTypeBuilder_ != null) { + return rowTypeBuilder_.getMessageOrBuilder(); + } else { + return rowType_ == null ? com.google.spanner.v1.StructType.getDefaultInstance() : rowType_; + } + } + + /** + * + * + *
    +     * Indicates the field names and types for the rows in the result
    +     * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +     * Users"` could return a `row_type` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + internalGetRowTypeFieldBuilder() { + if (rowTypeBuilder_ == null) { + rowTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder>( + getRowType(), getParentForChildren(), isClean()); + rowType_ = null; + } + return rowTypeBuilder_; + } + + private com.google.spanner.v1.Transaction transaction_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder> + transactionBuilder_; + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + public boolean hasTransaction() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + public com.google.spanner.v1.Transaction getTransaction() { + if (transactionBuilder_ == null) { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } else { + return transactionBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.Transaction value) { + if (transactionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + transaction_ = value; + } else { + transactionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder setTransaction(com.google.spanner.v1.Transaction.Builder builderForValue) { + if (transactionBuilder_ == null) { + transaction_ = builderForValue.build(); + } else { + transactionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder mergeTransaction(com.google.spanner.v1.Transaction value) { + if (transactionBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && transaction_ != null + && transaction_ != com.google.spanner.v1.Transaction.getDefaultInstance()) { + getTransactionBuilder().mergeFrom(value); + } else { + transaction_ = value; + } + } else { + transactionBuilder_.mergeFrom(value); + } + if (transaction_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public Builder clearTransaction() { + bitField0_ = (bitField0_ & ~0x00000002); + transaction_ = null; + if (transactionBuilder_ != null) { + transactionBuilder_.dispose(); + transactionBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public com.google.spanner.v1.Transaction.Builder getTransactionBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTransactionFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + public com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder() { + if (transactionBuilder_ != null) { + return transactionBuilder_.getMessageOrBuilder(); + } else { + return transaction_ == null + ? com.google.spanner.v1.Transaction.getDefaultInstance() + : transaction_; + } + } + + /** + * + * + *
    +     * If the read or SQL query began a transaction as a side-effect, the
    +     * information about the new transaction is yielded here.
    +     * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder> + internalGetTransactionFieldBuilder() { + if (transactionBuilder_ == null) { + transactionBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Transaction, + com.google.spanner.v1.Transaction.Builder, + com.google.spanner.v1.TransactionOrBuilder>( + getTransaction(), getParentForChildren(), isClean()); + transaction_ = null; + } + return transactionBuilder_; + } + + private com.google.spanner.v1.StructType undeclaredParameters_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + undeclaredParametersBuilder_; + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return Whether the undeclaredParameters field is set. + */ + public boolean hasUndeclaredParameters() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return The undeclaredParameters. + */ + public com.google.spanner.v1.StructType getUndeclaredParameters() { + if (undeclaredParametersBuilder_ == null) { + return undeclaredParameters_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : undeclaredParameters_; + } else { + return undeclaredParametersBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public Builder setUndeclaredParameters(com.google.spanner.v1.StructType value) { + if (undeclaredParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + undeclaredParameters_ = value; + } else { + undeclaredParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public Builder setUndeclaredParameters( + com.google.spanner.v1.StructType.Builder builderForValue) { + if (undeclaredParametersBuilder_ == null) { + undeclaredParameters_ = builderForValue.build(); + } else { + undeclaredParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public Builder mergeUndeclaredParameters(com.google.spanner.v1.StructType value) { + if (undeclaredParametersBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && undeclaredParameters_ != null + && undeclaredParameters_ != com.google.spanner.v1.StructType.getDefaultInstance()) { + getUndeclaredParametersBuilder().mergeFrom(value); + } else { + undeclaredParameters_ = value; + } + } else { + undeclaredParametersBuilder_.mergeFrom(value); + } + if (undeclaredParameters_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public Builder clearUndeclaredParameters() { + bitField0_ = (bitField0_ & ~0x00000004); + undeclaredParameters_ = null; + if (undeclaredParametersBuilder_ != null) { + undeclaredParametersBuilder_.dispose(); + undeclaredParametersBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public com.google.spanner.v1.StructType.Builder getUndeclaredParametersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetUndeclaredParametersFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + public com.google.spanner.v1.StructTypeOrBuilder getUndeclaredParametersOrBuilder() { + if (undeclaredParametersBuilder_ != null) { + return undeclaredParametersBuilder_.getMessageOrBuilder(); + } else { + return undeclaredParameters_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : undeclaredParameters_; + } + } + + /** + * + * + *
    +     * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +     * undeclared. This indicates the field names and types for those undeclared
    +     * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +     * Users where UserId = @userId and UserName = @userName "` could return a
    +     * `undeclared_parameters` value like:
    +     *
    +     * "fields": [
    +     * { "name": "UserId", "type": { "code": "INT64" } },
    +     * { "name": "UserName", "type": { "code": "STRING" } },
    +     * ]
    +     * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + internalGetUndeclaredParametersFieldBuilder() { + if (undeclaredParametersBuilder_ == null) { + undeclaredParametersBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder>( + getUndeclaredParameters(), getParentForChildren(), isClean()); + undeclaredParameters_ = null; + } + return undeclaredParametersBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ResultSetMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetMetadata) + private static final com.google.spanner.v1.ResultSetMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ResultSetMetadata(); + } + + public static com.google.spanner.v1.ResultSetMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResultSetMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java new file mode 100644 index 000000000000..c4943749436b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java @@ -0,0 +1,190 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ResultSetMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ResultSetMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return Whether the rowType field is set. + */ + boolean hasRowType(); + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + * + * @return The rowType. + */ + com.google.spanner.v1.StructType getRowType(); + + /** + * + * + *
    +   * Indicates the field names and types for the rows in the result
    +   * set. For example, a SQL query like `"SELECT UserId, UserName FROM
    +   * Users"` could return a `row_type` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType row_type = 1; + */ + com.google.spanner.v1.StructTypeOrBuilder getRowTypeOrBuilder(); + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return Whether the transaction field is set. + */ + boolean hasTransaction(); + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + * + * @return The transaction. + */ + com.google.spanner.v1.Transaction getTransaction(); + + /** + * + * + *
    +   * If the read or SQL query began a transaction as a side-effect, the
    +   * information about the new transaction is yielded here.
    +   * 
    + * + * .google.spanner.v1.Transaction transaction = 2; + */ + com.google.spanner.v1.TransactionOrBuilder getTransactionOrBuilder(); + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return Whether the undeclaredParameters field is set. + */ + boolean hasUndeclaredParameters(); + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + * + * @return The undeclaredParameters. + */ + com.google.spanner.v1.StructType getUndeclaredParameters(); + + /** + * + * + *
    +   * A SQL query can be parameterized. In PLAN mode, these parameters can be
    +   * undeclared. This indicates the field names and types for those undeclared
    +   * parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
    +   * Users where UserId = @userId and UserName = @userName "` could return a
    +   * `undeclared_parameters` value like:
    +   *
    +   * "fields": [
    +   * { "name": "UserId", "type": { "code": "INT64" } },
    +   * { "name": "UserName", "type": { "code": "STRING" } },
    +   * ]
    +   * 
    + * + * .google.spanner.v1.StructType undeclared_parameters = 3; + */ + com.google.spanner.v1.StructTypeOrBuilder getUndeclaredParametersOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java new file mode 100644 index 000000000000..a3694c8fb99f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java @@ -0,0 +1,311 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ResultSetOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ResultSet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return Whether the metadata field is set. + */ + boolean hasMetadata(); + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + * + * @return The metadata. + */ + com.google.spanner.v1.ResultSetMetadata getMetadata(); + + /** + * + * + *
    +   * Metadata about the result set, such as row type information.
    +   * 
    + * + * .google.spanner.v1.ResultSetMetadata metadata = 1; + */ + com.google.spanner.v1.ResultSetMetadataOrBuilder getMetadataOrBuilder(); + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + java.util.List getRowsList(); + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + com.google.protobuf.ListValue getRows(int index); + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + int getRowsCount(); + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + java.util.List getRowsOrBuilderList(); + + /** + * + * + *
    +   * Each element in `rows` is a row whose format is defined by
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith
    +   * element in each row matches the ith field in
    +   * [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements
    +   * are encoded based on type as described [here][google.spanner.v1.TypeCode].
    +   * 
    + * + * repeated .google.protobuf.ListValue rows = 2; + */ + com.google.protobuf.ListValueOrBuilder getRowsOrBuilder(int index); + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return Whether the stats field is set. + */ + boolean hasStats(); + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + * + * @return The stats. + */ + com.google.spanner.v1.ResultSetStats getStats(); + + /** + * + * + *
    +   * Query plan and execution statistics for the SQL statement that
    +   * produced this result set. These can be requested by setting
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * DML statements always produce stats containing the number of rows
    +   * modified, unless executed using the
    +   * [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN]
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * Other fields might or might not be populated, based on the
    +   * [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
    +   * 
    + * + * .google.spanner.v1.ResultSetStats stats = 3; + */ + com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * Optional. A precommit token is included if the read-write transaction is on
    +   * a multiplexed session. Pass the precommit token with the highest sequence
    +   * number from this transaction attempt to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + boolean hasCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + com.google.spanner.v1.CacheUpdate getCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java new file mode 100644 index 000000000000..c705555c44d0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java @@ -0,0 +1,170 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class ResultSetProto extends com.google.protobuf.GeneratedFile { + private ResultSetProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ResultSetProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ResultSet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ResultSet_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartialResultSet_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartialResultSet_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ResultSetMetadata_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ResultSetMetadata_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ResultSetStats_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ResultSetStats_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n\"google/spanner/v1/result_set.proto\022\021go" + + "ogle.spanner.v1\032\037google/api/field_behavi" + + "or.proto\032\034google/protobuf/struct.proto\032 " + + "google/spanner/v1/location.proto\032\"google" + + "/spanner/v1/query_plan.proto\032#google/spa" + + "nner/v1/transaction.proto\032\034google/spanne" + + "r/v1/type.proto\"\255\002\n\tResultSet\0226\n\010metadat" + + "a\030\001 \001(\0132$.google.spanner.v1.ResultSetMet" + + "adata\022(\n\004rows\030\002 \003(\0132\032.google.protobuf.Li" + + "stValue\0220\n\005stats\030\003 \001(\0132!.google.spanner." + + "v1.ResultSetStats\022Q\n\017precommit_token\030\005 \001" + + "(\01323.google.spanner.v1.MultiplexedSessio" + + "nPrecommitTokenB\003\340A\001\0229\n\014cache_update\030\006 \001" + + "(\0132\036.google.spanner.v1.CacheUpdateB\003\340A\001\"" + + "\362\002\n\020PartialResultSet\0226\n\010metadata\030\001 \001(\0132$" + + ".google.spanner.v1.ResultSetMetadata\022&\n\006" + + "values\030\002 \003(\0132\026.google.protobuf.Value\022\025\n\r" + + "chunked_value\030\003 \001(\010\022\024\n\014resume_token\030\004 \001(" + + "\014\0220\n\005stats\030\005 \001(\0132!.google.spanner.v1.Res" + + "ultSetStats\022Q\n\017precommit_token\030\010 \001(\01323.g" + + "oogle.spanner.v1.MultiplexedSessionPreco" + + "mmitTokenB\003\340A\001\022\021\n\004last\030\t \001(\010B\003\340A\001\0229\n\014cac" + + "he_update\030\n \001(\0132\036.google.spanner.v1.Cach" + + "eUpdateB\003\340A\001\"\267\001\n\021ResultSetMetadata\022/\n\010ro" + + "w_type\030\001 \001(\0132\035.google.spanner.v1.StructT" + + "ype\0223\n\013transaction\030\002 \001(\0132\036.google.spanne" + + "r.v1.Transaction\022<\n\025undeclared_parameter" + + "s\030\003 \001(\0132\035.google.spanner.v1.StructType\"\271" + + "\001\n\016ResultSetStats\0220\n\nquery_plan\030\001 \001(\0132\034." + + "google.spanner.v1.QueryPlan\022,\n\013query_sta" + + "ts\030\002 \001(\0132\027.google.protobuf.Struct\022\031\n\017row" + + "_count_exact\030\003 \001(\003H\000\022\037\n\025row_count_lower_" + + "bound\030\004 \001(\003H\000B\013\n\trow_countB\261\001\n\025com.googl" + + "e.spanner.v1B\016ResultSetProtoP\001Z5cloud.go" + + "ogle.com/go/spanner/apiv1/spannerpb;span" + + "nerpb\252\002\027Google.Cloud.Spanner.V1\312\002\027Google" + + "\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spann" + + "er::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.StructProto.getDescriptor(), + com.google.spanner.v1.LocationProto.getDescriptor(), + com.google.spanner.v1.QueryPlanProto.getDescriptor(), + com.google.spanner.v1.TransactionProto.getDescriptor(), + com.google.spanner.v1.TypeProto.getDescriptor(), + }); + internal_static_google_spanner_v1_ResultSet_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_ResultSet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ResultSet_descriptor, + new java.lang.String[] { + "Metadata", "Rows", "Stats", "PrecommitToken", "CacheUpdate", + }); + internal_static_google_spanner_v1_PartialResultSet_descriptor = + getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_PartialResultSet_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_PartialResultSet_descriptor, + new java.lang.String[] { + "Metadata", + "Values", + "ChunkedValue", + "ResumeToken", + "Stats", + "PrecommitToken", + "Last", + "CacheUpdate", + }); + internal_static_google_spanner_v1_ResultSetMetadata_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_v1_ResultSetMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ResultSetMetadata_descriptor, + new java.lang.String[] { + "RowType", "Transaction", "UndeclaredParameters", + }); + internal_static_google_spanner_v1_ResultSetStats_descriptor = getDescriptor().getMessageType(3); + internal_static_google_spanner_v1_ResultSetStats_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_ResultSetStats_descriptor, + new java.lang.String[] { + "QueryPlan", "QueryStats", "RowCountExact", "RowCountLowerBound", "RowCount", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.StructProto.getDescriptor(); + com.google.spanner.v1.LocationProto.getDescriptor(); + com.google.spanner.v1.QueryPlanProto.getDescriptor(); + com.google.spanner.v1.TransactionProto.getDescriptor(); + com.google.spanner.v1.TypeProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java new file mode 100644 index 000000000000..e89b89efbeda --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java @@ -0,0 +1,1438 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or
    + * [PartialResultSet][google.spanner.v1.PartialResultSet].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSetStats} + */ +@com.google.protobuf.Generated +public final class ResultSetStats extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.ResultSetStats) + ResultSetStatsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ResultSetStats"); + } + + // Use ResultSetStats.newBuilder() to construct. + private ResultSetStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ResultSetStats() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSetStats.class, + com.google.spanner.v1.ResultSetStats.Builder.class); + } + + private int bitField0_; + private int rowCountCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rowCount_; + + public enum RowCountCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + ROW_COUNT_EXACT(3), + ROW_COUNT_LOWER_BOUND(4), + ROWCOUNT_NOT_SET(0); + private final int value; + + private RowCountCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowCountCase valueOf(int value) { + return forNumber(value); + } + + public static RowCountCase forNumber(int value) { + switch (value) { + case 3: + return ROW_COUNT_EXACT; + case 4: + return ROW_COUNT_LOWER_BOUND; + case 0: + return ROWCOUNT_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowCountCase getRowCountCase() { + return RowCountCase.forNumber(rowCountCase_); + } + + public static final int QUERY_PLAN_FIELD_NUMBER = 1; + private com.google.spanner.v1.QueryPlan queryPlan_; + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return Whether the queryPlan field is set. + */ + @java.lang.Override + public boolean hasQueryPlan() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return The queryPlan. + */ + @java.lang.Override + public com.google.spanner.v1.QueryPlan getQueryPlan() { + return queryPlan_ == null ? com.google.spanner.v1.QueryPlan.getDefaultInstance() : queryPlan_; + } + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + @java.lang.Override + public com.google.spanner.v1.QueryPlanOrBuilder getQueryPlanOrBuilder() { + return queryPlan_ == null ? com.google.spanner.v1.QueryPlan.getDefaultInstance() : queryPlan_; + } + + public static final int QUERY_STATS_FIELD_NUMBER = 2; + private com.google.protobuf.Struct queryStats_; + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return Whether the queryStats field is set. + */ + @java.lang.Override + public boolean hasQueryStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return The queryStats. + */ + @java.lang.Override + public com.google.protobuf.Struct getQueryStats() { + return queryStats_ == null ? com.google.protobuf.Struct.getDefaultInstance() : queryStats_; + } + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + @java.lang.Override + public com.google.protobuf.StructOrBuilder getQueryStatsOrBuilder() { + return queryStats_ == null ? com.google.protobuf.Struct.getDefaultInstance() : queryStats_; + } + + public static final int ROW_COUNT_EXACT_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Standard DML returns an exact count of rows that were modified.
    +   * 
    + * + * int64 row_count_exact = 3; + * + * @return Whether the rowCountExact field is set. + */ + @java.lang.Override + public boolean hasRowCountExact() { + return rowCountCase_ == 3; + } + + /** + * + * + *
    +   * Standard DML returns an exact count of rows that were modified.
    +   * 
    + * + * int64 row_count_exact = 3; + * + * @return The rowCountExact. + */ + @java.lang.Override + public long getRowCountExact() { + if (rowCountCase_ == 3) { + return (java.lang.Long) rowCount_; + } + return 0L; + } + + public static final int ROW_COUNT_LOWER_BOUND_FIELD_NUMBER = 4; + + /** + * + * + *
    +   * Partitioned DML doesn't offer exactly-once semantics, so it
    +   * returns a lower bound of the rows modified.
    +   * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return Whether the rowCountLowerBound field is set. + */ + @java.lang.Override + public boolean hasRowCountLowerBound() { + return rowCountCase_ == 4; + } + + /** + * + * + *
    +   * Partitioned DML doesn't offer exactly-once semantics, so it
    +   * returns a lower bound of the rows modified.
    +   * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return The rowCountLowerBound. + */ + @java.lang.Override + public long getRowCountLowerBound() { + if (rowCountCase_ == 4) { + return (java.lang.Long) rowCount_; + } + return 0L; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getQueryPlan()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getQueryStats()); + } + if (rowCountCase_ == 3) { + output.writeInt64(3, (long) ((java.lang.Long) rowCount_)); + } + if (rowCountCase_ == 4) { + output.writeInt64(4, (long) ((java.lang.Long) rowCount_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getQueryPlan()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getQueryStats()); + } + if (rowCountCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 3, (long) ((java.lang.Long) rowCount_)); + } + if (rowCountCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 4, (long) ((java.lang.Long) rowCount_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.ResultSetStats)) { + return super.equals(obj); + } + com.google.spanner.v1.ResultSetStats other = (com.google.spanner.v1.ResultSetStats) obj; + + if (hasQueryPlan() != other.hasQueryPlan()) return false; + if (hasQueryPlan()) { + if (!getQueryPlan().equals(other.getQueryPlan())) return false; + } + if (hasQueryStats() != other.hasQueryStats()) return false; + if (hasQueryStats()) { + if (!getQueryStats().equals(other.getQueryStats())) return false; + } + if (!getRowCountCase().equals(other.getRowCountCase())) return false; + switch (rowCountCase_) { + case 3: + if (getRowCountExact() != other.getRowCountExact()) return false; + break; + case 4: + if (getRowCountLowerBound() != other.getRowCountLowerBound()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasQueryPlan()) { + hash = (37 * hash) + QUERY_PLAN_FIELD_NUMBER; + hash = (53 * hash) + getQueryPlan().hashCode(); + } + if (hasQueryStats()) { + hash = (37 * hash) + QUERY_STATS_FIELD_NUMBER; + hash = (53 * hash) + getQueryStats().hashCode(); + } + switch (rowCountCase_) { + case 3: + hash = (37 * hash) + ROW_COUNT_EXACT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCountExact()); + break; + case 4: + hash = (37 * hash) + ROW_COUNT_LOWER_BOUND_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCountLowerBound()); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.ResultSetStats parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetStats parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.ResultSetStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.ResultSetStats prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or
    +   * [PartialResultSet][google.spanner.v1.PartialResultSet].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.ResultSetStats} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.ResultSetStats) + com.google.spanner.v1.ResultSetStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.ResultSetStats.class, + com.google.spanner.v1.ResultSetStats.Builder.class); + } + + // Construct using com.google.spanner.v1.ResultSetStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetQueryPlanFieldBuilder(); + internalGetQueryStatsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + queryPlan_ = null; + if (queryPlanBuilder_ != null) { + queryPlanBuilder_.dispose(); + queryPlanBuilder_ = null; + } + queryStats_ = null; + if (queryStatsBuilder_ != null) { + queryStatsBuilder_.dispose(); + queryStatsBuilder_ = null; + } + rowCountCase_ = 0; + rowCount_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.ResultSetProto + .internal_static_google_spanner_v1_ResultSetStats_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetStats getDefaultInstanceForType() { + return com.google.spanner.v1.ResultSetStats.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetStats build() { + com.google.spanner.v1.ResultSetStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetStats buildPartial() { + com.google.spanner.v1.ResultSetStats result = new com.google.spanner.v1.ResultSetStats(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.ResultSetStats result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.queryPlan_ = queryPlanBuilder_ == null ? queryPlan_ : queryPlanBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.queryStats_ = queryStatsBuilder_ == null ? queryStats_ : queryStatsBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.ResultSetStats result) { + result.rowCountCase_ = rowCountCase_; + result.rowCount_ = this.rowCount_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.ResultSetStats) { + return mergeFrom((com.google.spanner.v1.ResultSetStats) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.ResultSetStats other) { + if (other == com.google.spanner.v1.ResultSetStats.getDefaultInstance()) return this; + if (other.hasQueryPlan()) { + mergeQueryPlan(other.getQueryPlan()); + } + if (other.hasQueryStats()) { + mergeQueryStats(other.getQueryStats()); + } + switch (other.getRowCountCase()) { + case ROW_COUNT_EXACT: + { + setRowCountExact(other.getRowCountExact()); + break; + } + case ROW_COUNT_LOWER_BOUND: + { + setRowCountLowerBound(other.getRowCountLowerBound()); + break; + } + case ROWCOUNT_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetQueryPlanFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetQueryStatsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: + { + rowCount_ = input.readInt64(); + rowCountCase_ = 3; + break; + } // case 24 + case 32: + { + rowCount_ = input.readInt64(); + rowCountCase_ = 4; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int rowCountCase_ = 0; + private java.lang.Object rowCount_; + + public RowCountCase getRowCountCase() { + return RowCountCase.forNumber(rowCountCase_); + } + + public Builder clearRowCount() { + rowCountCase_ = 0; + rowCount_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.spanner.v1.QueryPlan queryPlan_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryPlan, + com.google.spanner.v1.QueryPlan.Builder, + com.google.spanner.v1.QueryPlanOrBuilder> + queryPlanBuilder_; + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return Whether the queryPlan field is set. + */ + public boolean hasQueryPlan() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return The queryPlan. + */ + public com.google.spanner.v1.QueryPlan getQueryPlan() { + if (queryPlanBuilder_ == null) { + return queryPlan_ == null + ? com.google.spanner.v1.QueryPlan.getDefaultInstance() + : queryPlan_; + } else { + return queryPlanBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public Builder setQueryPlan(com.google.spanner.v1.QueryPlan value) { + if (queryPlanBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryPlan_ = value; + } else { + queryPlanBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public Builder setQueryPlan(com.google.spanner.v1.QueryPlan.Builder builderForValue) { + if (queryPlanBuilder_ == null) { + queryPlan_ = builderForValue.build(); + } else { + queryPlanBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public Builder mergeQueryPlan(com.google.spanner.v1.QueryPlan value) { + if (queryPlanBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && queryPlan_ != null + && queryPlan_ != com.google.spanner.v1.QueryPlan.getDefaultInstance()) { + getQueryPlanBuilder().mergeFrom(value); + } else { + queryPlan_ = value; + } + } else { + queryPlanBuilder_.mergeFrom(value); + } + if (queryPlan_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public Builder clearQueryPlan() { + bitField0_ = (bitField0_ & ~0x00000001); + queryPlan_ = null; + if (queryPlanBuilder_ != null) { + queryPlanBuilder_.dispose(); + queryPlanBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public com.google.spanner.v1.QueryPlan.Builder getQueryPlanBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetQueryPlanFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + public com.google.spanner.v1.QueryPlanOrBuilder getQueryPlanOrBuilder() { + if (queryPlanBuilder_ != null) { + return queryPlanBuilder_.getMessageOrBuilder(); + } else { + return queryPlan_ == null + ? com.google.spanner.v1.QueryPlan.getDefaultInstance() + : queryPlan_; + } + } + + /** + * + * + *
    +     * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +     * result.
    +     * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryPlan, + com.google.spanner.v1.QueryPlan.Builder, + com.google.spanner.v1.QueryPlanOrBuilder> + internalGetQueryPlanFieldBuilder() { + if (queryPlanBuilder_ == null) { + queryPlanBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.QueryPlan, + com.google.spanner.v1.QueryPlan.Builder, + com.google.spanner.v1.QueryPlanOrBuilder>( + getQueryPlan(), getParentForChildren(), isClean()); + queryPlan_ = null; + } + return queryPlanBuilder_; + } + + private com.google.protobuf.Struct queryStats_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + queryStatsBuilder_; + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return Whether the queryStats field is set. + */ + public boolean hasQueryStats() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return The queryStats. + */ + public com.google.protobuf.Struct getQueryStats() { + if (queryStatsBuilder_ == null) { + return queryStats_ == null ? com.google.protobuf.Struct.getDefaultInstance() : queryStats_; + } else { + return queryStatsBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public Builder setQueryStats(com.google.protobuf.Struct value) { + if (queryStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queryStats_ = value; + } else { + queryStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public Builder setQueryStats(com.google.protobuf.Struct.Builder builderForValue) { + if (queryStatsBuilder_ == null) { + queryStats_ = builderForValue.build(); + } else { + queryStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public Builder mergeQueryStats(com.google.protobuf.Struct value) { + if (queryStatsBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && queryStats_ != null + && queryStats_ != com.google.protobuf.Struct.getDefaultInstance()) { + getQueryStatsBuilder().mergeFrom(value); + } else { + queryStats_ = value; + } + } else { + queryStatsBuilder_.mergeFrom(value); + } + if (queryStats_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public Builder clearQueryStats() { + bitField0_ = (bitField0_ & ~0x00000002); + queryStats_ = null; + if (queryStatsBuilder_ != null) { + queryStatsBuilder_.dispose(); + queryStatsBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public com.google.protobuf.Struct.Builder getQueryStatsBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetQueryStatsFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + public com.google.protobuf.StructOrBuilder getQueryStatsOrBuilder() { + if (queryStatsBuilder_ != null) { + return queryStatsBuilder_.getMessageOrBuilder(); + } else { + return queryStats_ == null ? com.google.protobuf.Struct.getDefaultInstance() : queryStats_; + } + } + + /** + * + * + *
    +     * Aggregated statistics from the execution of the query. Only present when
    +     * the query is profiled. For example, a query could return the statistics as
    +     * follows:
    +     *
    +     * {
    +     * "rows_returned": "3",
    +     * "elapsed_time": "1.22 secs",
    +     * "cpu_time": "1.19 secs"
    +     * }
    +     * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder> + internalGetQueryStatsFieldBuilder() { + if (queryStatsBuilder_ == null) { + queryStatsBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Struct, + com.google.protobuf.Struct.Builder, + com.google.protobuf.StructOrBuilder>( + getQueryStats(), getParentForChildren(), isClean()); + queryStats_ = null; + } + return queryStatsBuilder_; + } + + /** + * + * + *
    +     * Standard DML returns an exact count of rows that were modified.
    +     * 
    + * + * int64 row_count_exact = 3; + * + * @return Whether the rowCountExact field is set. + */ + public boolean hasRowCountExact() { + return rowCountCase_ == 3; + } + + /** + * + * + *
    +     * Standard DML returns an exact count of rows that were modified.
    +     * 
    + * + * int64 row_count_exact = 3; + * + * @return The rowCountExact. + */ + public long getRowCountExact() { + if (rowCountCase_ == 3) { + return (java.lang.Long) rowCount_; + } + return 0L; + } + + /** + * + * + *
    +     * Standard DML returns an exact count of rows that were modified.
    +     * 
    + * + * int64 row_count_exact = 3; + * + * @param value The rowCountExact to set. + * @return This builder for chaining. + */ + public Builder setRowCountExact(long value) { + + rowCountCase_ = 3; + rowCount_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Standard DML returns an exact count of rows that were modified.
    +     * 
    + * + * int64 row_count_exact = 3; + * + * @return This builder for chaining. + */ + public Builder clearRowCountExact() { + if (rowCountCase_ == 3) { + rowCountCase_ = 0; + rowCount_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Partitioned DML doesn't offer exactly-once semantics, so it
    +     * returns a lower bound of the rows modified.
    +     * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return Whether the rowCountLowerBound field is set. + */ + public boolean hasRowCountLowerBound() { + return rowCountCase_ == 4; + } + + /** + * + * + *
    +     * Partitioned DML doesn't offer exactly-once semantics, so it
    +     * returns a lower bound of the rows modified.
    +     * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return The rowCountLowerBound. + */ + public long getRowCountLowerBound() { + if (rowCountCase_ == 4) { + return (java.lang.Long) rowCount_; + } + return 0L; + } + + /** + * + * + *
    +     * Partitioned DML doesn't offer exactly-once semantics, so it
    +     * returns a lower bound of the rows modified.
    +     * 
    + * + * int64 row_count_lower_bound = 4; + * + * @param value The rowCountLowerBound to set. + * @return This builder for chaining. + */ + public Builder setRowCountLowerBound(long value) { + + rowCountCase_ = 4; + rowCount_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Partitioned DML doesn't offer exactly-once semantics, so it
    +     * returns a lower bound of the rows modified.
    +     * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return This builder for chaining. + */ + public Builder clearRowCountLowerBound() { + if (rowCountCase_ == 4) { + rowCountCase_ = 0; + rowCount_ = null; + onChanged(); + } + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.ResultSetStats) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetStats) + private static final com.google.spanner.v1.ResultSetStats DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.ResultSetStats(); + } + + public static com.google.spanner.v1.ResultSetStats getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ResultSetStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.ResultSetStats getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java new file mode 100644 index 000000000000..efdc44f72572 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java @@ -0,0 +1,185 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/result_set.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface ResultSetStatsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.ResultSetStats) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return Whether the queryPlan field is set. + */ + boolean hasQueryPlan(); + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + * + * @return The queryPlan. + */ + com.google.spanner.v1.QueryPlan getQueryPlan(); + + /** + * + * + *
    +   * [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this
    +   * result.
    +   * 
    + * + * .google.spanner.v1.QueryPlan query_plan = 1; + */ + com.google.spanner.v1.QueryPlanOrBuilder getQueryPlanOrBuilder(); + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return Whether the queryStats field is set. + */ + boolean hasQueryStats(); + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + * + * @return The queryStats. + */ + com.google.protobuf.Struct getQueryStats(); + + /** + * + * + *
    +   * Aggregated statistics from the execution of the query. Only present when
    +   * the query is profiled. For example, a query could return the statistics as
    +   * follows:
    +   *
    +   * {
    +   * "rows_returned": "3",
    +   * "elapsed_time": "1.22 secs",
    +   * "cpu_time": "1.19 secs"
    +   * }
    +   * 
    + * + * .google.protobuf.Struct query_stats = 2; + */ + com.google.protobuf.StructOrBuilder getQueryStatsOrBuilder(); + + /** + * + * + *
    +   * Standard DML returns an exact count of rows that were modified.
    +   * 
    + * + * int64 row_count_exact = 3; + * + * @return Whether the rowCountExact field is set. + */ + boolean hasRowCountExact(); + + /** + * + * + *
    +   * Standard DML returns an exact count of rows that were modified.
    +   * 
    + * + * int64 row_count_exact = 3; + * + * @return The rowCountExact. + */ + long getRowCountExact(); + + /** + * + * + *
    +   * Partitioned DML doesn't offer exactly-once semantics, so it
    +   * returns a lower bound of the rows modified.
    +   * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return Whether the rowCountLowerBound field is set. + */ + boolean hasRowCountLowerBound(); + + /** + * + * + *
    +   * Partitioned DML doesn't offer exactly-once semantics, so it
    +   * returns a lower bound of the rows modified.
    +   * 
    + * + * int64 row_count_lower_bound = 4; + * + * @return The rowCountLowerBound. + */ + long getRowCountLowerBound(); + + com.google.spanner.v1.ResultSetStats.RowCountCase getRowCountCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java new file mode 100644 index 000000000000..bedf1ee135fc --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java @@ -0,0 +1,707 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * The request for [Rollback][google.spanner.v1.Spanner.Rollback].
    + * 
    + * + * Protobuf type {@code google.spanner.v1.RollbackRequest} + */ +@com.google.protobuf.Generated +public final class RollbackRequest extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RollbackRequest) + RollbackRequestOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RollbackRequest"); + } + + // Use RollbackRequest.newBuilder() to construct. + private RollbackRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RollbackRequest() { + session_ = ""; + transactionId_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RollbackRequest.class, + com.google.spanner.v1.RollbackRequest.Builder.class); + } + + public static final int SESSION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object session_ = ""; + + /** + * + * + *
    +   * Required. The session in which the transaction to roll back is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + @java.lang.Override + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } + } + + /** + * + * + *
    +   * Required. The session in which the transaction to roll back is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString transactionId_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * Required. The transaction to roll back.
    +   * 
    + * + * bytes transaction_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionId() { + return transactionId_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, session_); + } + if (!transactionId_.isEmpty()) { + output.writeBytes(2, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(session_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, session_); + } + if (!transactionId_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RollbackRequest)) { + return super.equals(obj); + } + com.google.spanner.v1.RollbackRequest other = (com.google.spanner.v1.RollbackRequest) obj; + + if (!getSession().equals(other.getSession())) return false; + if (!getTransactionId().equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SESSION_FIELD_NUMBER; + hash = (53 * hash) + getSession().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RollbackRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RollbackRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RollbackRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RollbackRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * The request for [Rollback][google.spanner.v1.Spanner.Rollback].
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RollbackRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RollbackRequest) + com.google.spanner.v1.RollbackRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RollbackRequest.class, + com.google.spanner.v1.RollbackRequest.Builder.class); + } + + // Construct using com.google.spanner.v1.RollbackRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + session_ = ""; + transactionId_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_RollbackRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RollbackRequest getDefaultInstanceForType() { + return com.google.spanner.v1.RollbackRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RollbackRequest build() { + com.google.spanner.v1.RollbackRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RollbackRequest buildPartial() { + com.google.spanner.v1.RollbackRequest result = + new com.google.spanner.v1.RollbackRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.RollbackRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.session_ = session_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RollbackRequest) { + return mergeFrom((com.google.spanner.v1.RollbackRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RollbackRequest other) { + if (other == com.google.spanner.v1.RollbackRequest.getDefaultInstance()) return this; + if (!other.getSession().isEmpty()) { + session_ = other.session_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTransactionId().isEmpty()) { + setTransactionId(other.getTransactionId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + session_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + transactionId_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object session_ = ""; + + /** + * + * + *
    +     * Required. The session in which the transaction to roll back is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + public java.lang.String getSession() { + java.lang.Object ref = session_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + session_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction to roll back is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + public com.google.protobuf.ByteString getSessionBytes() { + java.lang.Object ref = session_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + session_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Required. The session in which the transaction to roll back is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The session to set. + * @return This builder for chaining. + */ + public Builder setSession(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction to roll back is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearSession() { + session_ = getDefaultInstance().getSession(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The session in which the transaction to roll back is running.
    +     * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for session to set. + * @return This builder for chaining. + */ + public Builder setSessionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + session_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString transactionId_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Required. The transaction to roll back.
    +     * 
    + * + * bytes transaction_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTransactionId() { + return transactionId_; + } + + /** + * + * + *
    +     * Required. The transaction to roll back.
    +     * 
    + * + * bytes transaction_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + transactionId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The transaction to roll back.
    +     * 
    + * + * bytes transaction_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + bitField0_ = (bitField0_ & ~0x00000002); + transactionId_ = getDefaultInstance().getTransactionId(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RollbackRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RollbackRequest) + private static final com.google.spanner.v1.RollbackRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RollbackRequest(); + } + + public static com.google.spanner.v1.RollbackRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RollbackRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RollbackRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java new file mode 100644 index 000000000000..10871a3dfb6f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java @@ -0,0 +1,71 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface RollbackRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RollbackRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The session in which the transaction to roll back is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The session. + */ + java.lang.String getSession(); + + /** + * + * + *
    +   * Required. The session in which the transaction to roll back is running.
    +   * 
    + * + * + * string session = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for session. + */ + com.google.protobuf.ByteString getSessionBytes(); + + /** + * + * + *
    +   * Required. The transaction to roll back.
    +   * 
    + * + * bytes transaction_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The transactionId. + */ + com.google.protobuf.ByteString getTransactionId(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHint.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHint.java new file mode 100644 index 000000000000..b4b8b40a23c0 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHint.java @@ -0,0 +1,2777 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * `RoutingHint` can be optionally added to location-aware Spanner
    + * requests. It gives the server hints that can be used to route the request to
    + * an appropriate server, potentially significantly decreasing latency and
    + * improving throughput. To achieve improved performance, most fields must be
    + * filled in with accurate values.
    + *
    + * The presence of a valid `RoutingHint` tells the server that the client
    + * is location-aware.
    + *
    + * `RoutingHint` does not change the semantics of the request; it is
    + * purely a performance hint; the request will perform the same actions on the
    + * database's data as if `RoutingHint` were not present. However, if
    + * the `RoutingHint` is incomplete or incorrect, the response may include
    + * a `CacheUpdate` the client can use to correct its location cache.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.RoutingHint} + */ +@com.google.protobuf.Generated +public final class RoutingHint extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RoutingHint) + RoutingHintOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "RoutingHint"); + } + + // Use RoutingHint.newBuilder() to construct. + private RoutingHint(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private RoutingHint() { + schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + key_ = com.google.protobuf.ByteString.EMPTY; + limitKey_ = com.google.protobuf.ByteString.EMPTY; + skippedTabletUid_ = java.util.Collections.emptyList(); + clientLocation_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RoutingHint.class, + com.google.spanner.v1.RoutingHint.Builder.class); + } + + public interface SkippedTabletOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RoutingHint.SkippedTablet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`.
    +     * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + long getTabletUid(); + + /** + * + * + *
    +     * The incarnation of the tablet that was skipped. See `Tablet.incarnation`.
    +     * 
    + * + * bytes incarnation = 2; + * + * @return The incarnation. + */ + com.google.protobuf.ByteString getIncarnation(); + } + + /** + * + * + *
    +   * A tablet that was skipped by the client. See `Tablet.tablet_uid` and
    +   * `Tablet.incarnation`.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RoutingHint.SkippedTablet} + */ + public static final class SkippedTablet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.RoutingHint.SkippedTablet) + SkippedTabletOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SkippedTablet"); + } + + // Use SkippedTablet.newBuilder() to construct. + private SkippedTablet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private SkippedTablet() { + incarnation_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_SkippedTablet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RoutingHint.SkippedTablet.class, + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder.class); + } + + public static final int TABLET_UID_FIELD_NUMBER = 1; + private long tabletUid_ = 0L; + + /** + * + * + *
    +     * The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`.
    +     * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + public static final int INCARNATION_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString incarnation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The incarnation of the tablet that was skipped. See `Tablet.incarnation`.
    +     * 
    + * + * bytes incarnation = 2; + * + * @return The incarnation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncarnation() { + return incarnation_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (tabletUid_ != 0L) { + output.writeUInt64(1, tabletUid_); + } + if (!incarnation_.isEmpty()) { + output.writeBytes(2, incarnation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (tabletUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, tabletUid_); + } + if (!incarnation_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, incarnation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RoutingHint.SkippedTablet)) { + return super.equals(obj); + } + com.google.spanner.v1.RoutingHint.SkippedTablet other = + (com.google.spanner.v1.RoutingHint.SkippedTablet) obj; + + if (getTabletUid() != other.getTabletUid()) return false; + if (!getIncarnation().equals(other.getIncarnation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLET_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTabletUid()); + hash = (37 * hash) + INCARNATION_FIELD_NUMBER; + hash = (53 * hash) + getIncarnation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RoutingHint.SkippedTablet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * A tablet that was skipped by the client. See `Tablet.tablet_uid` and
    +     * `Tablet.incarnation`.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.RoutingHint.SkippedTablet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RoutingHint.SkippedTablet) + com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_SkippedTablet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RoutingHint.SkippedTablet.class, + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder.class); + } + + // Construct using com.google.spanner.v1.RoutingHint.SkippedTablet.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + tabletUid_ = 0L; + incarnation_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_SkippedTablet_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTablet getDefaultInstanceForType() { + return com.google.spanner.v1.RoutingHint.SkippedTablet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTablet build() { + com.google.spanner.v1.RoutingHint.SkippedTablet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTablet buildPartial() { + com.google.spanner.v1.RoutingHint.SkippedTablet result = + new com.google.spanner.v1.RoutingHint.SkippedTablet(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.RoutingHint.SkippedTablet result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.tabletUid_ = tabletUid_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.incarnation_ = incarnation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RoutingHint.SkippedTablet) { + return mergeFrom((com.google.spanner.v1.RoutingHint.SkippedTablet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RoutingHint.SkippedTablet other) { + if (other == com.google.spanner.v1.RoutingHint.SkippedTablet.getDefaultInstance()) + return this; + if (other.getTabletUid() != 0L) { + setTabletUid(other.getTabletUid()); + } + if (!other.getIncarnation().isEmpty()) { + setIncarnation(other.getIncarnation()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + tabletUid_ = input.readUInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + incarnation_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long tabletUid_; + + /** + * + * + *
    +       * The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`.
    +       * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + /** + * + * + *
    +       * The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`.
    +       * 
    + * + * uint64 tablet_uid = 1; + * + * @param value The tabletUid to set. + * @return This builder for chaining. + */ + public Builder setTabletUid(long value) { + + tabletUid_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`.
    +       * 
    + * + * uint64 tablet_uid = 1; + * + * @return This builder for chaining. + */ + public Builder clearTabletUid() { + bitField0_ = (bitField0_ & ~0x00000001); + tabletUid_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString incarnation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +       * The incarnation of the tablet that was skipped. See `Tablet.incarnation`.
    +       * 
    + * + * bytes incarnation = 2; + * + * @return The incarnation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncarnation() { + return incarnation_; + } + + /** + * + * + *
    +       * The incarnation of the tablet that was skipped. See `Tablet.incarnation`.
    +       * 
    + * + * bytes incarnation = 2; + * + * @param value The incarnation to set. + * @return This builder for chaining. + */ + public Builder setIncarnation(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + incarnation_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The incarnation of the tablet that was skipped. See `Tablet.incarnation`.
    +       * 
    + * + * bytes incarnation = 2; + * + * @return This builder for chaining. + */ + public Builder clearIncarnation() { + bitField0_ = (bitField0_ & ~0x00000002); + incarnation_ = getDefaultInstance().getIncarnation(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RoutingHint.SkippedTablet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RoutingHint.SkippedTablet) + private static final com.google.spanner.v1.RoutingHint.SkippedTablet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RoutingHint.SkippedTablet(); + } + + public static com.google.spanner.v1.RoutingHint.SkippedTablet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SkippedTablet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTablet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int OPERATION_UID_FIELD_NUMBER = 1; + private long operationUid_ = 0L; + + /** + * + * + *
    +   * A session-scoped unique ID for the operation, computed client-side.
    +   * Requests with the same `operation_uid` should have a shared 'shape',
    +   * meaning that some fields are expected to be the same, such as the SQL
    +   * query, the target table/columns (for reads) etc. Requests with the same
    +   * `operation_uid` are meant to differ only in fields like keys/key
    +   * ranges/query parameters, transaction IDs, etc.
    +   *
    +   * `operation_uid` must be non-zero for `RoutingHint` to be valid.
    +   * 
    + * + * uint64 operation_uid = 1; + * + * @return The operationUid. + */ + @java.lang.Override + public long getOperationUid() { + return operationUid_; + } + + public static final int DATABASE_ID_FIELD_NUMBER = 2; + private long databaseId_ = 0L; + + /** + * + * + *
    +   * The database ID of the database being accessed, see
    +   * `CacheUpdate.database_id`. Should match the cache entries that were used
    +   * to generate the rest of the fields in this `RoutingHint`.
    +   * 
    + * + * uint64 database_id = 2; + * + * @return The databaseId. + */ + @java.lang.Override + public long getDatabaseId() { + return databaseId_; + } + + public static final int SCHEMA_GENERATION_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The schema generation of the recipe that was used to generate `key` and
    +   * `limit_key`. See also `RecipeList.schema_generation`.
    +   * 
    + * + * bytes schema_generation = 3; + * + * @return The schemaGeneration. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaGeneration() { + return schemaGeneration_; + } + + public static final int KEY_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * The key / key range that this request accesses. For operations that
    +   * access a single key, `key` should be set and `limit_key` should be empty.
    +   * For operations that access a key range, `key` and `limit_key` should both
    +   * be set, to the inclusive start and exclusive end of the range respectively.
    +   *
    +   * The keys are encoded in "sortable string format" (ssformat), using a
    +   * `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more
    +   * details.
    +   * 
    + * + * bytes key = 4; + * + * @return The key. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKey() { + return key_; + } + + public static final int LIMIT_KEY_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString limitKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * If this request targets a key range, this is the exclusive end of the
    +   * range. See `key` for more details.
    +   * 
    + * + * bytes limit_key = 5; + * + * @return The limitKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLimitKey() { + return limitKey_; + } + + public static final int GROUP_UID_FIELD_NUMBER = 6; + private long groupUid_ = 0L; + + /** + * + * + *
    +   * The group UID of the group that the client believes serves the range
    +   * defined by `key` and `limit_key`. See `Range.group_uid` for more details.
    +   * 
    + * + * uint64 group_uid = 6; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + public static final int SPLIT_ID_FIELD_NUMBER = 7; + private long splitId_ = 0L; + + /** + * + * + *
    +   * The split ID of the split that the client believes contains the range
    +   * defined by `key` and `limit_key`. See `Range.split_id` for more details.
    +   * 
    + * + * uint64 split_id = 7; + * + * @return The splitId. + */ + @java.lang.Override + public long getSplitId() { + return splitId_; + } + + public static final int TABLET_UID_FIELD_NUMBER = 8; + private long tabletUid_ = 0L; + + /** + * + * + *
    +   * The tablet UID of the tablet from group `group_uid` that the client
    +   * believes is best to serve this request. See `Group.local_tablet_uids` and
    +   * `Group.leader_tablet_uid`.
    +   * 
    + * + * uint64 tablet_uid = 8; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + public static final int SKIPPED_TABLET_UID_FIELD_NUMBER = 9; + + @SuppressWarnings("serial") + private java.util.List skippedTabletUid_; + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + @java.lang.Override + public java.util.List getSkippedTabletUidList() { + return skippedTabletUid_; + } + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + @java.lang.Override + public java.util.List + getSkippedTabletUidOrBuilderList() { + return skippedTabletUid_; + } + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + @java.lang.Override + public int getSkippedTabletUidCount() { + return skippedTabletUid_.size(); + } + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTablet getSkippedTabletUid(int index) { + return skippedTabletUid_.get(index); + } + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + @java.lang.Override + public com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder getSkippedTabletUidOrBuilder( + int index) { + return skippedTabletUid_.get(index); + } + + public static final int CLIENT_LOCATION_FIELD_NUMBER = 10; + + @SuppressWarnings("serial") + private volatile java.lang.Object clientLocation_ = ""; + + /** + * + * + *
    +   * If present, the client's current location. This should be the name of a
    +   * Google Cloud zone or region, such as "us-central1".
    +   *
    +   * If absent, the client's location will be assumed to be the same as the
    +   * location of the server the client ends up connected to.
    +   *
    +   * Locations are primarily valuable for clients that connect from regions
    +   * other than the ones that contain the Spanner database.
    +   * 
    + * + * string client_location = 10; + * + * @return The clientLocation. + */ + @java.lang.Override + public java.lang.String getClientLocation() { + java.lang.Object ref = clientLocation_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clientLocation_ = s; + return s; + } + } + + /** + * + * + *
    +   * If present, the client's current location. This should be the name of a
    +   * Google Cloud zone or region, such as "us-central1".
    +   *
    +   * If absent, the client's location will be assumed to be the same as the
    +   * location of the server the client ends up connected to.
    +   *
    +   * Locations are primarily valuable for clients that connect from regions
    +   * other than the ones that contain the Spanner database.
    +   * 
    + * + * string client_location = 10; + * + * @return The bytes for clientLocation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getClientLocationBytes() { + java.lang.Object ref = clientLocation_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clientLocation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (operationUid_ != 0L) { + output.writeUInt64(1, operationUid_); + } + if (databaseId_ != 0L) { + output.writeUInt64(2, databaseId_); + } + if (!schemaGeneration_.isEmpty()) { + output.writeBytes(3, schemaGeneration_); + } + if (!key_.isEmpty()) { + output.writeBytes(4, key_); + } + if (!limitKey_.isEmpty()) { + output.writeBytes(5, limitKey_); + } + if (groupUid_ != 0L) { + output.writeUInt64(6, groupUid_); + } + if (splitId_ != 0L) { + output.writeUInt64(7, splitId_); + } + if (tabletUid_ != 0L) { + output.writeUInt64(8, tabletUid_); + } + for (int i = 0; i < skippedTabletUid_.size(); i++) { + output.writeMessage(9, skippedTabletUid_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(clientLocation_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 10, clientLocation_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (operationUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, operationUid_); + } + if (databaseId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(2, databaseId_); + } + if (!schemaGeneration_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(3, schemaGeneration_); + } + if (!key_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(4, key_); + } + if (!limitKey_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(5, limitKey_); + } + if (groupUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(6, groupUid_); + } + if (splitId_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(7, splitId_); + } + if (tabletUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(8, tabletUid_); + } + for (int i = 0; i < skippedTabletUid_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, skippedTabletUid_.get(i)); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(clientLocation_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(10, clientLocation_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.RoutingHint)) { + return super.equals(obj); + } + com.google.spanner.v1.RoutingHint other = (com.google.spanner.v1.RoutingHint) obj; + + if (getOperationUid() != other.getOperationUid()) return false; + if (getDatabaseId() != other.getDatabaseId()) return false; + if (!getSchemaGeneration().equals(other.getSchemaGeneration())) return false; + if (!getKey().equals(other.getKey())) return false; + if (!getLimitKey().equals(other.getLimitKey())) return false; + if (getGroupUid() != other.getGroupUid()) return false; + if (getSplitId() != other.getSplitId()) return false; + if (getTabletUid() != other.getTabletUid()) return false; + if (!getSkippedTabletUidList().equals(other.getSkippedTabletUidList())) return false; + if (!getClientLocation().equals(other.getClientLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + OPERATION_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOperationUid()); + hash = (37 * hash) + DATABASE_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getDatabaseId()); + hash = (37 * hash) + SCHEMA_GENERATION_FIELD_NUMBER; + hash = (53 * hash) + getSchemaGeneration().hashCode(); + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + hash = (37 * hash) + LIMIT_KEY_FIELD_NUMBER; + hash = (53 * hash) + getLimitKey().hashCode(); + hash = (37 * hash) + GROUP_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getGroupUid()); + hash = (37 * hash) + SPLIT_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSplitId()); + hash = (37 * hash) + TABLET_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTabletUid()); + if (getSkippedTabletUidCount() > 0) { + hash = (37 * hash) + SKIPPED_TABLET_UID_FIELD_NUMBER; + hash = (53 * hash) + getSkippedTabletUidList().hashCode(); + } + hash = (37 * hash) + CLIENT_LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getClientLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.RoutingHint parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.RoutingHint parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.RoutingHint prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * `RoutingHint` can be optionally added to location-aware Spanner
    +   * requests. It gives the server hints that can be used to route the request to
    +   * an appropriate server, potentially significantly decreasing latency and
    +   * improving throughput. To achieve improved performance, most fields must be
    +   * filled in with accurate values.
    +   *
    +   * The presence of a valid `RoutingHint` tells the server that the client
    +   * is location-aware.
    +   *
    +   * `RoutingHint` does not change the semantics of the request; it is
    +   * purely a performance hint; the request will perform the same actions on the
    +   * database's data as if `RoutingHint` were not present. However, if
    +   * the `RoutingHint` is incomplete or incorrect, the response may include
    +   * a `CacheUpdate` the client can use to correct its location cache.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.RoutingHint} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.RoutingHint) + com.google.spanner.v1.RoutingHintOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.RoutingHint.class, + com.google.spanner.v1.RoutingHint.Builder.class); + } + + // Construct using com.google.spanner.v1.RoutingHint.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + operationUid_ = 0L; + databaseId_ = 0L; + schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + key_ = com.google.protobuf.ByteString.EMPTY; + limitKey_ = com.google.protobuf.ByteString.EMPTY; + groupUid_ = 0L; + splitId_ = 0L; + tabletUid_ = 0L; + if (skippedTabletUidBuilder_ == null) { + skippedTabletUid_ = java.util.Collections.emptyList(); + } else { + skippedTabletUid_ = null; + skippedTabletUidBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + clientLocation_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_RoutingHint_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint getDefaultInstanceForType() { + return com.google.spanner.v1.RoutingHint.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint build() { + com.google.spanner.v1.RoutingHint result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint buildPartial() { + com.google.spanner.v1.RoutingHint result = new com.google.spanner.v1.RoutingHint(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.RoutingHint result) { + if (skippedTabletUidBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0)) { + skippedTabletUid_ = java.util.Collections.unmodifiableList(skippedTabletUid_); + bitField0_ = (bitField0_ & ~0x00000100); + } + result.skippedTabletUid_ = skippedTabletUid_; + } else { + result.skippedTabletUid_ = skippedTabletUidBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.RoutingHint result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.operationUid_ = operationUid_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.databaseId_ = databaseId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.schemaGeneration_ = schemaGeneration_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.key_ = key_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.limitKey_ = limitKey_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.groupUid_ = groupUid_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.splitId_ = splitId_; + } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.tabletUid_ = tabletUid_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.clientLocation_ = clientLocation_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.RoutingHint) { + return mergeFrom((com.google.spanner.v1.RoutingHint) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.RoutingHint other) { + if (other == com.google.spanner.v1.RoutingHint.getDefaultInstance()) return this; + if (other.getOperationUid() != 0L) { + setOperationUid(other.getOperationUid()); + } + if (other.getDatabaseId() != 0L) { + setDatabaseId(other.getDatabaseId()); + } + if (!other.getSchemaGeneration().isEmpty()) { + setSchemaGeneration(other.getSchemaGeneration()); + } + if (!other.getKey().isEmpty()) { + setKey(other.getKey()); + } + if (!other.getLimitKey().isEmpty()) { + setLimitKey(other.getLimitKey()); + } + if (other.getGroupUid() != 0L) { + setGroupUid(other.getGroupUid()); + } + if (other.getSplitId() != 0L) { + setSplitId(other.getSplitId()); + } + if (other.getTabletUid() != 0L) { + setTabletUid(other.getTabletUid()); + } + if (skippedTabletUidBuilder_ == null) { + if (!other.skippedTabletUid_.isEmpty()) { + if (skippedTabletUid_.isEmpty()) { + skippedTabletUid_ = other.skippedTabletUid_; + bitField0_ = (bitField0_ & ~0x00000100); + } else { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.addAll(other.skippedTabletUid_); + } + onChanged(); + } + } else { + if (!other.skippedTabletUid_.isEmpty()) { + if (skippedTabletUidBuilder_.isEmpty()) { + skippedTabletUidBuilder_.dispose(); + skippedTabletUidBuilder_ = null; + skippedTabletUid_ = other.skippedTabletUid_; + bitField0_ = (bitField0_ & ~0x00000100); + skippedTabletUidBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetSkippedTabletUidFieldBuilder() + : null; + } else { + skippedTabletUidBuilder_.addAllMessages(other.skippedTabletUid_); + } + } + } + if (!other.getClientLocation().isEmpty()) { + clientLocation_ = other.clientLocation_; + bitField0_ |= 0x00000200; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + operationUid_ = input.readUInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: + { + databaseId_ = input.readUInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 26: + { + schemaGeneration_ = input.readBytes(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + key_ = input.readBytes(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + limitKey_ = input.readBytes(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + groupUid_ = input.readUInt64(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + splitId_ = input.readUInt64(); + bitField0_ |= 0x00000040; + break; + } // case 56 + case 64: + { + tabletUid_ = input.readUInt64(); + bitField0_ |= 0x00000080; + break; + } // case 64 + case 74: + { + com.google.spanner.v1.RoutingHint.SkippedTablet m = + input.readMessage( + com.google.spanner.v1.RoutingHint.SkippedTablet.parser(), + extensionRegistry); + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.add(m); + } else { + skippedTabletUidBuilder_.addMessage(m); + } + break; + } // case 74 + case 82: + { + clientLocation_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000200; + break; + } // case 82 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long operationUid_; + + /** + * + * + *
    +     * A session-scoped unique ID for the operation, computed client-side.
    +     * Requests with the same `operation_uid` should have a shared 'shape',
    +     * meaning that some fields are expected to be the same, such as the SQL
    +     * query, the target table/columns (for reads) etc. Requests with the same
    +     * `operation_uid` are meant to differ only in fields like keys/key
    +     * ranges/query parameters, transaction IDs, etc.
    +     *
    +     * `operation_uid` must be non-zero for `RoutingHint` to be valid.
    +     * 
    + * + * uint64 operation_uid = 1; + * + * @return The operationUid. + */ + @java.lang.Override + public long getOperationUid() { + return operationUid_; + } + + /** + * + * + *
    +     * A session-scoped unique ID for the operation, computed client-side.
    +     * Requests with the same `operation_uid` should have a shared 'shape',
    +     * meaning that some fields are expected to be the same, such as the SQL
    +     * query, the target table/columns (for reads) etc. Requests with the same
    +     * `operation_uid` are meant to differ only in fields like keys/key
    +     * ranges/query parameters, transaction IDs, etc.
    +     *
    +     * `operation_uid` must be non-zero for `RoutingHint` to be valid.
    +     * 
    + * + * uint64 operation_uid = 1; + * + * @param value The operationUid to set. + * @return This builder for chaining. + */ + public Builder setOperationUid(long value) { + + operationUid_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A session-scoped unique ID for the operation, computed client-side.
    +     * Requests with the same `operation_uid` should have a shared 'shape',
    +     * meaning that some fields are expected to be the same, such as the SQL
    +     * query, the target table/columns (for reads) etc. Requests with the same
    +     * `operation_uid` are meant to differ only in fields like keys/key
    +     * ranges/query parameters, transaction IDs, etc.
    +     *
    +     * `operation_uid` must be non-zero for `RoutingHint` to be valid.
    +     * 
    + * + * uint64 operation_uid = 1; + * + * @return This builder for chaining. + */ + public Builder clearOperationUid() { + bitField0_ = (bitField0_ & ~0x00000001); + operationUid_ = 0L; + onChanged(); + return this; + } + + private long databaseId_; + + /** + * + * + *
    +     * The database ID of the database being accessed, see
    +     * `CacheUpdate.database_id`. Should match the cache entries that were used
    +     * to generate the rest of the fields in this `RoutingHint`.
    +     * 
    + * + * uint64 database_id = 2; + * + * @return The databaseId. + */ + @java.lang.Override + public long getDatabaseId() { + return databaseId_; + } + + /** + * + * + *
    +     * The database ID of the database being accessed, see
    +     * `CacheUpdate.database_id`. Should match the cache entries that were used
    +     * to generate the rest of the fields in this `RoutingHint`.
    +     * 
    + * + * uint64 database_id = 2; + * + * @param value The databaseId to set. + * @return This builder for chaining. + */ + public Builder setDatabaseId(long value) { + + databaseId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database ID of the database being accessed, see
    +     * `CacheUpdate.database_id`. Should match the cache entries that were used
    +     * to generate the rest of the fields in this `RoutingHint`.
    +     * 
    + * + * uint64 database_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearDatabaseId() { + bitField0_ = (bitField0_ & ~0x00000002); + databaseId_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString schemaGeneration_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The schema generation of the recipe that was used to generate `key` and
    +     * `limit_key`. See also `RecipeList.schema_generation`.
    +     * 
    + * + * bytes schema_generation = 3; + * + * @return The schemaGeneration. + */ + @java.lang.Override + public com.google.protobuf.ByteString getSchemaGeneration() { + return schemaGeneration_; + } + + /** + * + * + *
    +     * The schema generation of the recipe that was used to generate `key` and
    +     * `limit_key`. See also `RecipeList.schema_generation`.
    +     * 
    + * + * bytes schema_generation = 3; + * + * @param value The schemaGeneration to set. + * @return This builder for chaining. + */ + public Builder setSchemaGeneration(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + schemaGeneration_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The schema generation of the recipe that was used to generate `key` and
    +     * `limit_key`. See also `RecipeList.schema_generation`.
    +     * 
    + * + * bytes schema_generation = 3; + * + * @return This builder for chaining. + */ + public Builder clearSchemaGeneration() { + bitField0_ = (bitField0_ & ~0x00000004); + schemaGeneration_ = getDefaultInstance().getSchemaGeneration(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * The key / key range that this request accesses. For operations that
    +     * access a single key, `key` should be set and `limit_key` should be empty.
    +     * For operations that access a key range, `key` and `limit_key` should both
    +     * be set, to the inclusive start and exclusive end of the range respectively.
    +     *
    +     * The keys are encoded in "sortable string format" (ssformat), using a
    +     * `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more
    +     * details.
    +     * 
    + * + * bytes key = 4; + * + * @return The key. + */ + @java.lang.Override + public com.google.protobuf.ByteString getKey() { + return key_; + } + + /** + * + * + *
    +     * The key / key range that this request accesses. For operations that
    +     * access a single key, `key` should be set and `limit_key` should be empty.
    +     * For operations that access a key range, `key` and `limit_key` should both
    +     * be set, to the inclusive start and exclusive end of the range respectively.
    +     *
    +     * The keys are encoded in "sortable string format" (ssformat), using a
    +     * `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more
    +     * details.
    +     * 
    + * + * bytes key = 4; + * + * @param value The key to set. + * @return This builder for chaining. + */ + public Builder setKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + key_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The key / key range that this request accesses. For operations that
    +     * access a single key, `key` should be set and `limit_key` should be empty.
    +     * For operations that access a key range, `key` and `limit_key` should both
    +     * be set, to the inclusive start and exclusive end of the range respectively.
    +     *
    +     * The keys are encoded in "sortable string format" (ssformat), using a
    +     * `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more
    +     * details.
    +     * 
    + * + * bytes key = 4; + * + * @return This builder for chaining. + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000008); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + + private com.google.protobuf.ByteString limitKey_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * If this request targets a key range, this is the exclusive end of the
    +     * range. See `key` for more details.
    +     * 
    + * + * bytes limit_key = 5; + * + * @return The limitKey. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLimitKey() { + return limitKey_; + } + + /** + * + * + *
    +     * If this request targets a key range, this is the exclusive end of the
    +     * range. See `key` for more details.
    +     * 
    + * + * bytes limit_key = 5; + * + * @param value The limitKey to set. + * @return This builder for chaining. + */ + public Builder setLimitKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + limitKey_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If this request targets a key range, this is the exclusive end of the
    +     * range. See `key` for more details.
    +     * 
    + * + * bytes limit_key = 5; + * + * @return This builder for chaining. + */ + public Builder clearLimitKey() { + bitField0_ = (bitField0_ & ~0x00000010); + limitKey_ = getDefaultInstance().getLimitKey(); + onChanged(); + return this; + } + + private long groupUid_; + + /** + * + * + *
    +     * The group UID of the group that the client believes serves the range
    +     * defined by `key` and `limit_key`. See `Range.group_uid` for more details.
    +     * 
    + * + * uint64 group_uid = 6; + * + * @return The groupUid. + */ + @java.lang.Override + public long getGroupUid() { + return groupUid_; + } + + /** + * + * + *
    +     * The group UID of the group that the client believes serves the range
    +     * defined by `key` and `limit_key`. See `Range.group_uid` for more details.
    +     * 
    + * + * uint64 group_uid = 6; + * + * @param value The groupUid to set. + * @return This builder for chaining. + */ + public Builder setGroupUid(long value) { + + groupUid_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The group UID of the group that the client believes serves the range
    +     * defined by `key` and `limit_key`. See `Range.group_uid` for more details.
    +     * 
    + * + * uint64 group_uid = 6; + * + * @return This builder for chaining. + */ + public Builder clearGroupUid() { + bitField0_ = (bitField0_ & ~0x00000020); + groupUid_ = 0L; + onChanged(); + return this; + } + + private long splitId_; + + /** + * + * + *
    +     * The split ID of the split that the client believes contains the range
    +     * defined by `key` and `limit_key`. See `Range.split_id` for more details.
    +     * 
    + * + * uint64 split_id = 7; + * + * @return The splitId. + */ + @java.lang.Override + public long getSplitId() { + return splitId_; + } + + /** + * + * + *
    +     * The split ID of the split that the client believes contains the range
    +     * defined by `key` and `limit_key`. See `Range.split_id` for more details.
    +     * 
    + * + * uint64 split_id = 7; + * + * @param value The splitId to set. + * @return This builder for chaining. + */ + public Builder setSplitId(long value) { + + splitId_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The split ID of the split that the client believes contains the range
    +     * defined by `key` and `limit_key`. See `Range.split_id` for more details.
    +     * 
    + * + * uint64 split_id = 7; + * + * @return This builder for chaining. + */ + public Builder clearSplitId() { + bitField0_ = (bitField0_ & ~0x00000040); + splitId_ = 0L; + onChanged(); + return this; + } + + private long tabletUid_; + + /** + * + * + *
    +     * The tablet UID of the tablet from group `group_uid` that the client
    +     * believes is best to serve this request. See `Group.local_tablet_uids` and
    +     * `Group.leader_tablet_uid`.
    +     * 
    + * + * uint64 tablet_uid = 8; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + /** + * + * + *
    +     * The tablet UID of the tablet from group `group_uid` that the client
    +     * believes is best to serve this request. See `Group.local_tablet_uids` and
    +     * `Group.leader_tablet_uid`.
    +     * 
    + * + * uint64 tablet_uid = 8; + * + * @param value The tabletUid to set. + * @return This builder for chaining. + */ + public Builder setTabletUid(long value) { + + tabletUid_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The tablet UID of the tablet from group `group_uid` that the client
    +     * believes is best to serve this request. See `Group.local_tablet_uids` and
    +     * `Group.leader_tablet_uid`.
    +     * 
    + * + * uint64 tablet_uid = 8; + * + * @return This builder for chaining. + */ + public Builder clearTabletUid() { + bitField0_ = (bitField0_ & ~0x00000080); + tabletUid_ = 0L; + onChanged(); + return this; + } + + private java.util.List skippedTabletUid_ = + java.util.Collections.emptyList(); + + private void ensureSkippedTabletUidIsMutable() { + if (!((bitField0_ & 0x00000100) != 0)) { + skippedTabletUid_ = + new java.util.ArrayList( + skippedTabletUid_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.RoutingHint.SkippedTablet, + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder, + com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder> + skippedTabletUidBuilder_; + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public java.util.List + getSkippedTabletUidList() { + if (skippedTabletUidBuilder_ == null) { + return java.util.Collections.unmodifiableList(skippedTabletUid_); + } else { + return skippedTabletUidBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public int getSkippedTabletUidCount() { + if (skippedTabletUidBuilder_ == null) { + return skippedTabletUid_.size(); + } else { + return skippedTabletUidBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public com.google.spanner.v1.RoutingHint.SkippedTablet getSkippedTabletUid(int index) { + if (skippedTabletUidBuilder_ == null) { + return skippedTabletUid_.get(index); + } else { + return skippedTabletUidBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder setSkippedTabletUid( + int index, com.google.spanner.v1.RoutingHint.SkippedTablet value) { + if (skippedTabletUidBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.set(index, value); + onChanged(); + } else { + skippedTabletUidBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder setSkippedTabletUid( + int index, com.google.spanner.v1.RoutingHint.SkippedTablet.Builder builderForValue) { + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.set(index, builderForValue.build()); + onChanged(); + } else { + skippedTabletUidBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder addSkippedTabletUid(com.google.spanner.v1.RoutingHint.SkippedTablet value) { + if (skippedTabletUidBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.add(value); + onChanged(); + } else { + skippedTabletUidBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder addSkippedTabletUid( + int index, com.google.spanner.v1.RoutingHint.SkippedTablet value) { + if (skippedTabletUidBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.add(index, value); + onChanged(); + } else { + skippedTabletUidBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder addSkippedTabletUid( + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder builderForValue) { + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.add(builderForValue.build()); + onChanged(); + } else { + skippedTabletUidBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder addSkippedTabletUid( + int index, com.google.spanner.v1.RoutingHint.SkippedTablet.Builder builderForValue) { + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.add(index, builderForValue.build()); + onChanged(); + } else { + skippedTabletUidBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder addAllSkippedTabletUid( + java.lang.Iterable values) { + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, skippedTabletUid_); + onChanged(); + } else { + skippedTabletUidBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder clearSkippedTabletUid() { + if (skippedTabletUidBuilder_ == null) { + skippedTabletUid_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + skippedTabletUidBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public Builder removeSkippedTabletUid(int index) { + if (skippedTabletUidBuilder_ == null) { + ensureSkippedTabletUidIsMutable(); + skippedTabletUid_.remove(index); + onChanged(); + } else { + skippedTabletUidBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public com.google.spanner.v1.RoutingHint.SkippedTablet.Builder getSkippedTabletUidBuilder( + int index) { + return internalGetSkippedTabletUidFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder getSkippedTabletUidOrBuilder( + int index) { + if (skippedTabletUidBuilder_ == null) { + return skippedTabletUid_.get(index); + } else { + return skippedTabletUidBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public java.util.List + getSkippedTabletUidOrBuilderList() { + if (skippedTabletUidBuilder_ != null) { + return skippedTabletUidBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skippedTabletUid_); + } + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public com.google.spanner.v1.RoutingHint.SkippedTablet.Builder addSkippedTabletUidBuilder() { + return internalGetSkippedTabletUidFieldBuilder() + .addBuilder(com.google.spanner.v1.RoutingHint.SkippedTablet.getDefaultInstance()); + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public com.google.spanner.v1.RoutingHint.SkippedTablet.Builder addSkippedTabletUidBuilder( + int index) { + return internalGetSkippedTabletUidFieldBuilder() + .addBuilder(index, com.google.spanner.v1.RoutingHint.SkippedTablet.getDefaultInstance()); + } + + /** + * + * + *
    +     * If the client had multiple options for tablet selection, and some of its
    +     * first choices were unhealthy (e.g., the server is unreachable, or
    +     * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +     * tablets, with their incarnations. The server may include a `CacheUpdate`
    +     * with new locations for those tablets.
    +     * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + public java.util.List + getSkippedTabletUidBuilderList() { + return internalGetSkippedTabletUidFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.RoutingHint.SkippedTablet, + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder, + com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder> + internalGetSkippedTabletUidFieldBuilder() { + if (skippedTabletUidBuilder_ == null) { + skippedTabletUidBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.RoutingHint.SkippedTablet, + com.google.spanner.v1.RoutingHint.SkippedTablet.Builder, + com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder>( + skippedTabletUid_, + ((bitField0_ & 0x00000100) != 0), + getParentForChildren(), + isClean()); + skippedTabletUid_ = null; + } + return skippedTabletUidBuilder_; + } + + private java.lang.Object clientLocation_ = ""; + + /** + * + * + *
    +     * If present, the client's current location. This should be the name of a
    +     * Google Cloud zone or region, such as "us-central1".
    +     *
    +     * If absent, the client's location will be assumed to be the same as the
    +     * location of the server the client ends up connected to.
    +     *
    +     * Locations are primarily valuable for clients that connect from regions
    +     * other than the ones that contain the Spanner database.
    +     * 
    + * + * string client_location = 10; + * + * @return The clientLocation. + */ + public java.lang.String getClientLocation() { + java.lang.Object ref = clientLocation_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + clientLocation_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If present, the client's current location. This should be the name of a
    +     * Google Cloud zone or region, such as "us-central1".
    +     *
    +     * If absent, the client's location will be assumed to be the same as the
    +     * location of the server the client ends up connected to.
    +     *
    +     * Locations are primarily valuable for clients that connect from regions
    +     * other than the ones that contain the Spanner database.
    +     * 
    + * + * string client_location = 10; + * + * @return The bytes for clientLocation. + */ + public com.google.protobuf.ByteString getClientLocationBytes() { + java.lang.Object ref = clientLocation_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + clientLocation_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If present, the client's current location. This should be the name of a
    +     * Google Cloud zone or region, such as "us-central1".
    +     *
    +     * If absent, the client's location will be assumed to be the same as the
    +     * location of the server the client ends up connected to.
    +     *
    +     * Locations are primarily valuable for clients that connect from regions
    +     * other than the ones that contain the Spanner database.
    +     * 
    + * + * string client_location = 10; + * + * @param value The clientLocation to set. + * @return This builder for chaining. + */ + public Builder setClientLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + clientLocation_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If present, the client's current location. This should be the name of a
    +     * Google Cloud zone or region, such as "us-central1".
    +     *
    +     * If absent, the client's location will be assumed to be the same as the
    +     * location of the server the client ends up connected to.
    +     *
    +     * Locations are primarily valuable for clients that connect from regions
    +     * other than the ones that contain the Spanner database.
    +     * 
    + * + * string client_location = 10; + * + * @return This builder for chaining. + */ + public Builder clearClientLocation() { + clientLocation_ = getDefaultInstance().getClientLocation(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If present, the client's current location. This should be the name of a
    +     * Google Cloud zone or region, such as "us-central1".
    +     *
    +     * If absent, the client's location will be assumed to be the same as the
    +     * location of the server the client ends up connected to.
    +     *
    +     * Locations are primarily valuable for clients that connect from regions
    +     * other than the ones that contain the Spanner database.
    +     * 
    + * + * string client_location = 10; + * + * @param value The bytes for clientLocation to set. + * @return This builder for chaining. + */ + public Builder setClientLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + clientLocation_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.RoutingHint) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.RoutingHint) + private static final com.google.spanner.v1.RoutingHint DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.RoutingHint(); + } + + public static com.google.spanner.v1.RoutingHint getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RoutingHint parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.RoutingHint getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHintOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHintOrBuilder.java new file mode 100644 index 000000000000..eb685eaa8940 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RoutingHintOrBuilder.java @@ -0,0 +1,270 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface RoutingHintOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.RoutingHint) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * A session-scoped unique ID for the operation, computed client-side.
    +   * Requests with the same `operation_uid` should have a shared 'shape',
    +   * meaning that some fields are expected to be the same, such as the SQL
    +   * query, the target table/columns (for reads) etc. Requests with the same
    +   * `operation_uid` are meant to differ only in fields like keys/key
    +   * ranges/query parameters, transaction IDs, etc.
    +   *
    +   * `operation_uid` must be non-zero for `RoutingHint` to be valid.
    +   * 
    + * + * uint64 operation_uid = 1; + * + * @return The operationUid. + */ + long getOperationUid(); + + /** + * + * + *
    +   * The database ID of the database being accessed, see
    +   * `CacheUpdate.database_id`. Should match the cache entries that were used
    +   * to generate the rest of the fields in this `RoutingHint`.
    +   * 
    + * + * uint64 database_id = 2; + * + * @return The databaseId. + */ + long getDatabaseId(); + + /** + * + * + *
    +   * The schema generation of the recipe that was used to generate `key` and
    +   * `limit_key`. See also `RecipeList.schema_generation`.
    +   * 
    + * + * bytes schema_generation = 3; + * + * @return The schemaGeneration. + */ + com.google.protobuf.ByteString getSchemaGeneration(); + + /** + * + * + *
    +   * The key / key range that this request accesses. For operations that
    +   * access a single key, `key` should be set and `limit_key` should be empty.
    +   * For operations that access a key range, `key` and `limit_key` should both
    +   * be set, to the inclusive start and exclusive end of the range respectively.
    +   *
    +   * The keys are encoded in "sortable string format" (ssformat), using a
    +   * `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more
    +   * details.
    +   * 
    + * + * bytes key = 4; + * + * @return The key. + */ + com.google.protobuf.ByteString getKey(); + + /** + * + * + *
    +   * If this request targets a key range, this is the exclusive end of the
    +   * range. See `key` for more details.
    +   * 
    + * + * bytes limit_key = 5; + * + * @return The limitKey. + */ + com.google.protobuf.ByteString getLimitKey(); + + /** + * + * + *
    +   * The group UID of the group that the client believes serves the range
    +   * defined by `key` and `limit_key`. See `Range.group_uid` for more details.
    +   * 
    + * + * uint64 group_uid = 6; + * + * @return The groupUid. + */ + long getGroupUid(); + + /** + * + * + *
    +   * The split ID of the split that the client believes contains the range
    +   * defined by `key` and `limit_key`. See `Range.split_id` for more details.
    +   * 
    + * + * uint64 split_id = 7; + * + * @return The splitId. + */ + long getSplitId(); + + /** + * + * + *
    +   * The tablet UID of the tablet from group `group_uid` that the client
    +   * believes is best to serve this request. See `Group.local_tablet_uids` and
    +   * `Group.leader_tablet_uid`.
    +   * 
    + * + * uint64 tablet_uid = 8; + * + * @return The tabletUid. + */ + long getTabletUid(); + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + java.util.List getSkippedTabletUidList(); + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + com.google.spanner.v1.RoutingHint.SkippedTablet getSkippedTabletUid(int index); + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + int getSkippedTabletUidCount(); + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + java.util.List + getSkippedTabletUidOrBuilderList(); + + /** + * + * + *
    +   * If the client had multiple options for tablet selection, and some of its
    +   * first choices were unhealthy (e.g., the server is unreachable, or
    +   * `Tablet.skip` is true), this field will contain the tablet UIDs of those
    +   * tablets, with their incarnations. The server may include a `CacheUpdate`
    +   * with new locations for those tablets.
    +   * 
    + * + * repeated .google.spanner.v1.RoutingHint.SkippedTablet skipped_tablet_uid = 9; + */ + com.google.spanner.v1.RoutingHint.SkippedTabletOrBuilder getSkippedTabletUidOrBuilder(int index); + + /** + * + * + *
    +   * If present, the client's current location. This should be the name of a
    +   * Google Cloud zone or region, such as "us-central1".
    +   *
    +   * If absent, the client's location will be assumed to be the same as the
    +   * location of the server the client ends up connected to.
    +   *
    +   * Locations are primarily valuable for clients that connect from regions
    +   * other than the ones that contain the Spanner database.
    +   * 
    + * + * string client_location = 10; + * + * @return The clientLocation. + */ + java.lang.String getClientLocation(); + + /** + * + * + *
    +   * If present, the client's current location. This should be the name of a
    +   * Google Cloud zone or region, such as "us-central1".
    +   *
    +   * If absent, the client's location will be assumed to be the same as the
    +   * location of the server the client ends up connected to.
    +   *
    +   * Locations are primarily valuable for clients that connect from regions
    +   * other than the ones that contain the Spanner database.
    +   * 
    + * + * string client_location = 10; + * + * @return The bytes for clientLocation. + */ + com.google.protobuf.ByteString getClientLocationBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java new file mode 100644 index 000000000000..ffddd0a0470d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java @@ -0,0 +1,1972 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A session in the Cloud Spanner API.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Session} + */ +@com.google.protobuf.Generated +public final class Session extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Session) + SessionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Session"); + } + + // Use Session.newBuilder() to construct. + private Session(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Session() { + name_ = ""; + creatorRole_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto.internal_static_google_spanner_v1_Session_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Session_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Session.class, com.google.spanner.v1.Session.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +   * Output only. The name of the session. This is always system-assigned.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +   * Output only. The name of the session. This is always system-assigned.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LABELS_FIELD_NUMBER = 2; + + private static final class LabelsDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Session_LabelsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + @SuppressWarnings("serial") + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + @java.lang.Override + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + + public static final int APPROXIMATE_LAST_USE_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp approximateLastUseTime_; + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the approximateLastUseTime field is set. + */ + @java.lang.Override + public boolean hasApproximateLastUseTime() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The approximateLastUseTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getApproximateLastUseTime() { + return approximateLastUseTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : approximateLastUseTime_; + } + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getApproximateLastUseTimeOrBuilder() { + return approximateLastUseTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : approximateLastUseTime_; + } + + public static final int CREATOR_ROLE_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object creatorRole_ = ""; + + /** + * + * + *
    +   * The database role which created this session.
    +   * 
    + * + * string creator_role = 5; + * + * @return The creatorRole. + */ + @java.lang.Override + public java.lang.String getCreatorRole() { + java.lang.Object ref = creatorRole_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + creatorRole_ = s; + return s; + } + } + + /** + * + * + *
    +   * The database role which created this session.
    +   * 
    + * + * string creator_role = 5; + * + * @return The bytes for creatorRole. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCreatorRoleBytes() { + java.lang.Object ref = creatorRole_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + creatorRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int MULTIPLEXED_FIELD_NUMBER = 6; + private boolean multiplexed_ = false; + + /** + * + * + *
    +   * Optional. If `true`, specifies a multiplexed session. Use a multiplexed
    +   * session for multiple, concurrent operations including any combination of
    +   * read-only and read-write transactions. Use
    +   * [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
    +   * multiplexed sessions. Don't use
    +   * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
    +   * create a multiplexed session. You can't delete or list multiplexed
    +   * sessions.
    +   * 
    + * + * bool multiplexed = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The multiplexed. + */ + @java.lang.Override + public boolean getMultiplexed() { + return multiplexed_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + com.google.protobuf.GeneratedMessage.serializeStringMapTo( + output, internalGetLabels(), LabelsDefaultEntryHolder.defaultEntry, 2); + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(4, getApproximateLastUseTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(creatorRole_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, creatorRole_); + } + if (multiplexed_ != false) { + output.writeBool(6, multiplexed_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + for (java.util.Map.Entry entry : + internalGetLabels().getMap().entrySet()) { + com.google.protobuf.MapEntry labels__ = + LabelsDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, labels__); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(4, getApproximateLastUseTime()); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(creatorRole_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, creatorRole_); + } + if (multiplexed_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(6, multiplexed_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Session)) { + return super.equals(obj); + } + com.google.spanner.v1.Session other = (com.google.spanner.v1.Session) obj; + + if (!getName().equals(other.getName())) return false; + if (!internalGetLabels().equals(other.internalGetLabels())) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasApproximateLastUseTime() != other.hasApproximateLastUseTime()) return false; + if (hasApproximateLastUseTime()) { + if (!getApproximateLastUseTime().equals(other.getApproximateLastUseTime())) return false; + } + if (!getCreatorRole().equals(other.getCreatorRole())) return false; + if (getMultiplexed() != other.getMultiplexed()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (!internalGetLabels().getMap().isEmpty()) { + hash = (37 * hash) + LABELS_FIELD_NUMBER; + hash = (53 * hash) + internalGetLabels().hashCode(); + } + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasApproximateLastUseTime()) { + hash = (37 * hash) + APPROXIMATE_LAST_USE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getApproximateLastUseTime().hashCode(); + } + hash = (37 * hash) + CREATOR_ROLE_FIELD_NUMBER; + hash = (53 * hash) + getCreatorRole().hashCode(); + hash = (37 * hash) + MULTIPLEXED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getMultiplexed()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Session parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Session parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Session parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Session parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Session parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Session parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Session parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Session parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Session parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Session parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Session parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Session parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Session prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A session in the Cloud Spanner API.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Session} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Session) + com.google.spanner.v1.SessionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Session_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( + int number) { + switch (number) { + case 2: + return internalGetMutableLabels(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Session_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Session.class, com.google.spanner.v1.Session.Builder.class); + } + + // Construct using com.google.spanner.v1.Session.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetCreateTimeFieldBuilder(); + internalGetApproximateLastUseTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + internalGetMutableLabels().clear(); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + approximateLastUseTime_ = null; + if (approximateLastUseTimeBuilder_ != null) { + approximateLastUseTimeBuilder_.dispose(); + approximateLastUseTimeBuilder_ = null; + } + creatorRole_ = ""; + multiplexed_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.SpannerProto + .internal_static_google_spanner_v1_Session_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Session getDefaultInstanceForType() { + return com.google.spanner.v1.Session.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Session build() { + com.google.spanner.v1.Session result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Session buildPartial() { + com.google.spanner.v1.Session result = new com.google.spanner.v1.Session(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Session result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.labels_ = internalGetLabels(); + result.labels_.makeImmutable(); + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.approximateLastUseTime_ = + approximateLastUseTimeBuilder_ == null + ? approximateLastUseTime_ + : approximateLastUseTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.creatorRole_ = creatorRole_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.multiplexed_ = multiplexed_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Session) { + return mergeFrom((com.google.spanner.v1.Session) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Session other) { + if (other == com.google.spanner.v1.Session.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + internalGetMutableLabels().mergeFrom(other.internalGetLabels()); + bitField0_ |= 0x00000002; + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasApproximateLastUseTime()) { + mergeApproximateLastUseTime(other.getApproximateLastUseTime()); + } + if (!other.getCreatorRole().isEmpty()) { + creatorRole_ = other.creatorRole_; + bitField0_ |= 0x00000010; + onChanged(); + } + if (other.getMultiplexed() != false) { + setMultiplexed(other.getMultiplexed()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + com.google.protobuf.MapEntry labels__ = + input.readMessage( + LabelsDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + internalGetMutableLabels() + .getMutableMap() + .put(labels__.getKey(), labels__.getValue()); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetCreateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetApproximateLastUseTimeFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: + { + creatorRole_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + multiplexed_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +     * Output only. The name of the session. This is always system-assigned.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Output only. The name of the session. This is always system-assigned.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Output only. The name of the session. This is always system-assigned.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The name of the session. This is always system-assigned.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The name of the session. This is always system-assigned.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.protobuf.MapField labels_; + + private com.google.protobuf.MapField internalGetLabels() { + if (labels_ == null) { + return com.google.protobuf.MapField.emptyMapField(LabelsDefaultEntryHolder.defaultEntry); + } + return labels_; + } + + private com.google.protobuf.MapField + internalGetMutableLabels() { + if (labels_ == null) { + labels_ = com.google.protobuf.MapField.newMapField(LabelsDefaultEntryHolder.defaultEntry); + } + if (!labels_.isMutable()) { + labels_ = labels_.copy(); + } + bitField0_ |= 0x00000002; + onChanged(); + return labels_; + } + + public int getLabelsCount() { + return internalGetLabels().getMap().size(); + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public boolean containsLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + return internalGetLabels().getMap().containsKey(key); + } + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Override + @java.lang.Deprecated + public java.util.Map getLabels() { + return getLabelsMap(); + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public java.util.Map getLabelsMap() { + return internalGetLabels().getMap(); + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public /* nullable */ java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + @java.lang.Override + public java.lang.String getLabelsOrThrow(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + java.util.Map map = internalGetLabels().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearLabels() { + bitField0_ = (bitField0_ & ~0x00000002); + internalGetMutableLabels().getMutableMap().clear(); + return this; + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + public Builder removeLabels(java.lang.String key) { + if (key == null) { + throw new NullPointerException("map key"); + } + internalGetMutableLabels().getMutableMap().remove(key); + return this; + } + + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableLabels() { + bitField0_ |= 0x00000002; + return internalGetMutableLabels().getMutableMap(); + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + public Builder putLabels(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new NullPointerException("map key"); + } + if (value == null) { + throw new NullPointerException("map value"); + } + internalGetMutableLabels().getMutableMap().put(key, value); + bitField0_ |= 0x00000002; + return this; + } + + /** + * + * + *
    +     * The labels for the session.
    +     *
    +     * * Label keys must be between 1 and 63 characters long and must conform to
    +     * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +     * * Label values must be between 0 and 63 characters long and must conform
    +     * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +     * * No more than 64 labels can be associated with a given session.
    +     *
    +     * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +     * 
    + * + * map<string, string> labels = 2; + */ + public Builder putAllLabels(java.util.Map values) { + internalGetMutableLabels().getMutableMap().putAll(values); + bitField0_ |= 0x00000002; + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + } else { + createTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && createTime_ != null + && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCreateTimeBuilder().mergeFrom(value); + } else { + createTime_ = value; + } + } else { + createTimeBuilder_.mergeFrom(value); + } + if (createTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = null; + if (createTimeBuilder_ != null) { + createTimeBuilder_.dispose(); + createTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetCreateTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + + /** + * + * + *
    +     * Output only. The timestamp when the session is created.
    +     * 
    + * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp approximateLastUseTime_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + approximateLastUseTimeBuilder_; + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the approximateLastUseTime field is set. + */ + public boolean hasApproximateLastUseTime() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The approximateLastUseTime. + */ + public com.google.protobuf.Timestamp getApproximateLastUseTime() { + if (approximateLastUseTimeBuilder_ == null) { + return approximateLastUseTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : approximateLastUseTime_; + } else { + return approximateLastUseTimeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setApproximateLastUseTime(com.google.protobuf.Timestamp value) { + if (approximateLastUseTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + approximateLastUseTime_ = value; + } else { + approximateLastUseTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setApproximateLastUseTime( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (approximateLastUseTimeBuilder_ == null) { + approximateLastUseTime_ = builderForValue.build(); + } else { + approximateLastUseTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeApproximateLastUseTime(com.google.protobuf.Timestamp value) { + if (approximateLastUseTimeBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && approximateLastUseTime_ != null + && approximateLastUseTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getApproximateLastUseTimeBuilder().mergeFrom(value); + } else { + approximateLastUseTime_ = value; + } + } else { + approximateLastUseTimeBuilder_.mergeFrom(value); + } + if (approximateLastUseTime_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearApproximateLastUseTime() { + bitField0_ = (bitField0_ & ~0x00000008); + approximateLastUseTime_ = null; + if (approximateLastUseTimeBuilder_ != null) { + approximateLastUseTimeBuilder_.dispose(); + approximateLastUseTimeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getApproximateLastUseTimeBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetApproximateLastUseTimeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getApproximateLastUseTimeOrBuilder() { + if (approximateLastUseTimeBuilder_ != null) { + return approximateLastUseTimeBuilder_.getMessageOrBuilder(); + } else { + return approximateLastUseTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : approximateLastUseTime_; + } + } + + /** + * + * + *
    +     * Output only. The approximate timestamp when the session is last used. It's
    +     * typically earlier than the actual last use time.
    +     * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetApproximateLastUseTimeFieldBuilder() { + if (approximateLastUseTimeBuilder_ == null) { + approximateLastUseTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getApproximateLastUseTime(), getParentForChildren(), isClean()); + approximateLastUseTime_ = null; + } + return approximateLastUseTimeBuilder_; + } + + private java.lang.Object creatorRole_ = ""; + + /** + * + * + *
    +     * The database role which created this session.
    +     * 
    + * + * string creator_role = 5; + * + * @return The creatorRole. + */ + public java.lang.String getCreatorRole() { + java.lang.Object ref = creatorRole_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + creatorRole_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The database role which created this session.
    +     * 
    + * + * string creator_role = 5; + * + * @return The bytes for creatorRole. + */ + public com.google.protobuf.ByteString getCreatorRoleBytes() { + java.lang.Object ref = creatorRole_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + creatorRole_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The database role which created this session.
    +     * 
    + * + * string creator_role = 5; + * + * @param value The creatorRole to set. + * @return This builder for chaining. + */ + public Builder setCreatorRole(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + creatorRole_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database role which created this session.
    +     * 
    + * + * string creator_role = 5; + * + * @return This builder for chaining. + */ + public Builder clearCreatorRole() { + creatorRole_ = getDefaultInstance().getCreatorRole(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The database role which created this session.
    +     * 
    + * + * string creator_role = 5; + * + * @param value The bytes for creatorRole to set. + * @return This builder for chaining. + */ + public Builder setCreatorRoleBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + creatorRole_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + private boolean multiplexed_; + + /** + * + * + *
    +     * Optional. If `true`, specifies a multiplexed session. Use a multiplexed
    +     * session for multiple, concurrent operations including any combination of
    +     * read-only and read-write transactions. Use
    +     * [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
    +     * multiplexed sessions. Don't use
    +     * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
    +     * create a multiplexed session. You can't delete or list multiplexed
    +     * sessions.
    +     * 
    + * + * bool multiplexed = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The multiplexed. + */ + @java.lang.Override + public boolean getMultiplexed() { + return multiplexed_; + } + + /** + * + * + *
    +     * Optional. If `true`, specifies a multiplexed session. Use a multiplexed
    +     * session for multiple, concurrent operations including any combination of
    +     * read-only and read-write transactions. Use
    +     * [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
    +     * multiplexed sessions. Don't use
    +     * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
    +     * create a multiplexed session. You can't delete or list multiplexed
    +     * sessions.
    +     * 
    + * + * bool multiplexed = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The multiplexed to set. + * @return This builder for chaining. + */ + public Builder setMultiplexed(boolean value) { + + multiplexed_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. If `true`, specifies a multiplexed session. Use a multiplexed
    +     * session for multiple, concurrent operations including any combination of
    +     * read-only and read-write transactions. Use
    +     * [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
    +     * multiplexed sessions. Don't use
    +     * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
    +     * create a multiplexed session. You can't delete or list multiplexed
    +     * sessions.
    +     * 
    + * + * bool multiplexed = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearMultiplexed() { + bitField0_ = (bitField0_ & ~0x00000020); + multiplexed_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Session) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Session) + private static final com.google.spanner.v1.Session DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Session(); + } + + public static com.google.spanner.v1.Session getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Session parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Session getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionName.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionName.java new file mode 100644 index 000000000000..a71cf0eac13d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionName.java @@ -0,0 +1,259 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class SessionName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_DATABASE_SESSION = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String database; + private final String session; + + @Deprecated + protected SessionName() { + project = null; + instance = null; + database = null; + session = null; + } + + private SessionName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + database = Preconditions.checkNotNull(builder.getDatabase()); + session = Preconditions.checkNotNull(builder.getSession()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSession() { + return session; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static SessionName of(String project, String instance, String database, String session) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSession(session) + .build(); + } + + public static String format(String project, String instance, String database, String session) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSession(session) + .build() + .toString(); + } + + public static SessionName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_DATABASE_SESSION.validatedMatch( + formattedString, "SessionName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("instance"), + matchMap.get("database"), + matchMap.get("session")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (SessionName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_DATABASE_SESSION.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + if (session != null) { + fieldMapBuilder.put("session", session); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_DATABASE_SESSION.instantiate( + "project", project, "instance", instance, "database", database, "session", session); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + SessionName that = ((SessionName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.database, that.database) + && Objects.equals(this.session, that.session); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(database); + h *= 1000003; + h ^= Objects.hashCode(session); + return h; + } + + /** + * Builder for projects/{project}/instances/{instance}/databases/{database}/sessions/{session}. + */ + public static class Builder { + private String project; + private String instance; + private String database; + private String session; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSession() { + return session; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + public Builder setSession(String session) { + this.session = session; + return this; + } + + private Builder(SessionName sessionName) { + this.project = sessionName.project; + this.instance = sessionName.instance; + this.database = sessionName.database; + this.session = sessionName.session; + } + + public SessionName build() { + return new SessionName(this); + } + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java new file mode 100644 index 000000000000..fc02633e8344 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java @@ -0,0 +1,289 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface SessionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Session) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Output only. The name of the session. This is always system-assigned.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +   * Output only. The name of the session. This is always system-assigned.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + int getLabelsCount(); + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + boolean containsLabels(java.lang.String key); + + /** Use {@link #getLabelsMap()} instead. */ + @java.lang.Deprecated + java.util.Map getLabels(); + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + java.util.Map getLabelsMap(); + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + /* nullable */ + java.lang.String getLabelsOrDefault( + java.lang.String key, + /* nullable */ + java.lang.String defaultValue); + + /** + * + * + *
    +   * The labels for the session.
    +   *
    +   * * Label keys must be between 1 and 63 characters long and must conform to
    +   * the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
    +   * * Label values must be between 0 and 63 characters long and must conform
    +   * to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
    +   * * No more than 64 labels can be associated with a given session.
    +   *
    +   * See https://goo.gl/xmQnxf for more information on and examples of labels.
    +   * 
    + * + * map<string, string> labels = 2; + */ + java.lang.String getLabelsOrThrow(java.lang.String key); + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + + /** + * + * + *
    +   * Output only. The timestamp when the session is created.
    +   * 
    + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the approximateLastUseTime field is set. + */ + boolean hasApproximateLastUseTime(); + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The approximateLastUseTime. + */ + com.google.protobuf.Timestamp getApproximateLastUseTime(); + + /** + * + * + *
    +   * Output only. The approximate timestamp when the session is last used. It's
    +   * typically earlier than the actual last use time.
    +   * 
    + * + * + * .google.protobuf.Timestamp approximate_last_use_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getApproximateLastUseTimeOrBuilder(); + + /** + * + * + *
    +   * The database role which created this session.
    +   * 
    + * + * string creator_role = 5; + * + * @return The creatorRole. + */ + java.lang.String getCreatorRole(); + + /** + * + * + *
    +   * The database role which created this session.
    +   * 
    + * + * string creator_role = 5; + * + * @return The bytes for creatorRole. + */ + com.google.protobuf.ByteString getCreatorRoleBytes(); + + /** + * + * + *
    +   * Optional. If `true`, specifies a multiplexed session. Use a multiplexed
    +   * session for multiple, concurrent operations including any combination of
    +   * read-only and read-write transactions. Use
    +   * [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create
    +   * multiplexed sessions. Don't use
    +   * [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to
    +   * create a multiplexed session. You can't delete or list multiplexed
    +   * sessions.
    +   * 
    + * + * bool multiplexed = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The multiplexed. + */ + boolean getMultiplexed(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java new file mode 100644 index 000000000000..e69777ee73fb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java @@ -0,0 +1,893 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/spanner.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class SpannerProto extends com.google.protobuf.GeneratedFile { + private SpannerProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "SpannerProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_CreateSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_CreateSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BatchCreateSessionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BatchCreateSessionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BatchCreateSessionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BatchCreateSessionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Session_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Session_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Session_LabelsEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Session_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_GetSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_GetSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ListSessionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ListSessionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ListSessionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ListSessionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_DeleteSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_DeleteSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RequestOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RequestOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RequestOptions_ClientContext_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RequestOptions_ClientContext_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RequestOptions_ClientContext_SecureContextEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RequestOptions_ClientContext_SecureContextEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_DirectedReadOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_DirectedReadOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_DirectedReadOptions_ReplicaSelection_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_DirectedReadOptions_IncludeReplicas_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_DirectedReadOptions_ExcludeReplicas_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteSqlRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteSqlRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteSqlRequest_QueryOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteSqlRequest_ParamTypesEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteSqlRequest_ParamTypesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_ParamTypesEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteBatchDmlRequest_Statement_ParamTypesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ExecuteBatchDmlResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ExecuteBatchDmlResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartitionOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartitionOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartitionQueryRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartitionQueryRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartitionQueryRequest_ParamTypesEntry_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartitionQueryRequest_ParamTypesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartitionReadRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartitionReadRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Partition_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Partition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_PartitionResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_PartitionResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_ReadRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_ReadRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BeginTransactionRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BeginTransactionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_CommitRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_CommitRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_RollbackRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_RollbackRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BatchWriteRequest_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BatchWriteRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BatchWriteRequest_MutationGroup_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_BatchWriteResponse_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_BatchWriteResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "\037google/spanner/v1/spanner.proto\022\021googl" + + "e.spanner.v1\032\'google/spanner/v1/commit_r" + + "esponse.proto\032\034google/api/annotations.pr" + + "oto\032\027google/api/client.proto\032\037google/api" + + "/field_behavior.proto\032\031google/api/resour" + + "ce.proto\032\036google/protobuf/duration.proto" + + "\032\033google/protobuf/empty.proto\032\034google/pr" + + "otobuf/struct.proto\032\037google/protobuf/tim" + + "estamp.proto\032\027google/rpc/status.proto\032\034google/spanner/v1/keys.proto\032" + + " google/spanner/v1/location.proto\032 google/spanner/v1" + + "/mutation.proto\032\"google/spanner/v1/resul" + + "t_set.proto\032#google/spanner/v1/transacti" + + "on.proto\032\034google/spanner/v1/type.proto\"\203\001\n" + + "\024CreateSessionRequest\0229\n" + + "\010database\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Database\0220\n" + + "\007session\030\002 \001(\0132\032.google.spanner.v1.SessionB\003\340A\002\"\251\001\n" + + "\032BatchCreateSessionsRequest\0229\n" + + "\010database\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Database\0224\n" + + "\020session_template\030\002 \001(\0132\032.google.spanner.v1.Session\022\032\n\r" + + "session_count\030\003 \001(\005B\003\340A\002\"J\n" + + "\033BatchCreateSessionsResponse\022+\n" + + "\007session\030\001 \003(\0132\032.google.spanner.v1.Session\"\270\003\n" + + "\007Session\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\003\0226\n" + + "\006labels\030\002 \003(\0132&.google.spanner.v1.Session.LabelsEntry\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022B\n" + + "\031approximate_last_use_time\030\004" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\024\n" + + "\014creator_role\030\005 \001(\t\022\030\n" + + "\013multiplexed\030\006 \001(\010B\003\340A\001\032-\n" + + "\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001:\210\001\352A\204\001\n" + + "\036spanner.googleapis.com/Session\022Oprojects/{project}/instances/{insta" + + "nce}/databases/{database}/sessions/{session}*\010sessions2\007session\"I\n" + + "\021GetSessionRequest\0224\n" + + "\004name\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\"\207\001\n" + + "\023ListSessionsRequest\0229\n" + + "\010database\030\001 \001(\tB\'\340A\002\372A!\n" + + "\037spanner.googleapis.com/Database\022\021\n" + + "\tpage_size\030\002 \001(\005\022\022\n\n" + + "page_token\030\003 \001(\t\022\016\n" + + "\006filter\030\004 \001(\t\"]\n" + + "\024ListSessionsResponse\022,\n" + + "\010sessions\030\001 \003(\0132\032.google.spanner.v1.Session\022\027\n" + + "\017next_page_token\030\002 \001(\t\"L\n" + + "\024DeleteSessionRequest\0224\n" + + "\004name\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\"\353\003\n" + + "\016RequestOptions\022<\n" + + "\010priority\030\001 \001(\0162*.google.spanner.v1.RequestOptions.Priority\022\023\n" + + "\013request_tag\030\002 \001(\t\022\027\n" + + "\017transaction_tag\030\003 \001(\t\022L\n" + + "\016client_context\030\004 \001(\013" + + "2/.google.spanner.v1.RequestOptions.ClientContextB\003\340A\001\032\276\001\n\r" + + "ClientContext\022_\n" + + "\016secure_context\030\001 \003(\0132B.google.spanner.v1.Req" + + "uestOptions.ClientContext.SecureContextEntryB\003\340A\001\032L\n" + + "\022SecureContextEntry\022\013\n" + + "\003key\030\001 \001(\t\022%\n" + + "\005value\030\002 \001(\0132\026.google.protobuf.Value:\0028\001\"^\n" + + "\010Priority\022\030\n" + + "\024PRIORITY_UNSPECIFIED\020\000\022\020\n" + + "\014PRIORITY_LOW\020\001\022\023\n" + + "\017PRIORITY_MEDIUM\020\002\022\021\n\r" + + "PRIORITY_HIGH\020\003\"\352\004\n" + + "\023DirectedReadOptions\022R\n" + + "\020include_replicas\030\001 \001(\01326.goog" + + "le.spanner.v1.DirectedReadOptions.IncludeReplicasH\000\022R\n" + + "\020exclude_replicas\030\002 \001(\01326." + + "google.spanner.v1.DirectedReadOptions.ExcludeReplicasH\000\032\255\001\n" + + "\020ReplicaSelection\022\020\n" + + "\010location\030\001 \001(\t\022J\n" + + "\004type\030\002 \001(\0162<.google.sp" + + "anner.v1.DirectedReadOptions.ReplicaSelection.Type\";\n" + + "\004Type\022\024\n" + + "\020TYPE_UNSPECIFIED\020\000\022\016\n\n" + + "READ_WRITE\020\001\022\r\n" + + "\tREAD_ONLY\020\002\032\206\001\n" + + "\017IncludeReplicas\022S\n" + + "\022replica_selections\030\001 \003(\0132" + + "7.google.spanner.v1.DirectedReadOptions.ReplicaSelection\022\036\n" + + "\026auto_failover_disabled\030\002 \001(\010\032f\n" + + "\017ExcludeReplicas\022S\n" + + "\022replica_selections\030\001" + + " \003(\01327.google.spanner.v1.DirectedReadOptions.ReplicaSelectionB\n\n" + + "\010replicas\"\310\007\n" + + "\021ExecuteSqlRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022;\n" + + "\013transaction\030\002 \001(\0132&.google.spanner.v1.TransactionSelector\022\020\n" + + "\003sql\030\003 \001(\tB\003\340A\002\022\'\n" + + "\006params\030\004 \001(\0132\027.google.protobuf.Struct\022I\n" + + "\013param_types\030\005 \003(\01324.google.spanne" + + "r.v1.ExecuteSqlRequest.ParamTypesEntry\022\024\n" + + "\014resume_token\030\006 \001(\014\022B\n\n" + + "query_mode\030\007 \001(\0162..google.spanner.v1.ExecuteSqlRequest.QueryMode\022\027\n" + + "\017partition_token\030\010 \001(\014\022\r\n" + + "\005seqno\030\t \001(\003\022H\n\r" + + "query_options\030\n" + + " \001(\01321.google.spanner.v1.ExecuteSqlRequest.QueryOptions\022:\n" + + "\017request_options\030\013 \001(\0132!.google.spanner.v1.RequestOptions\022E\n" + + "\025directed_read_options\030\017" + + " \001(\0132&.google.spanner.v1.DirectedReadOptions\022\032\n" + + "\022data_boost_enabled\030\020 \001(\010\022\033\n" + + "\016last_statement\030\021 \001(\010B\003\340A\001\0229\n" + + "\014routing_hint\030\022" + + " \001(\0132\036.google.spanner.v1.RoutingHintB\003\340A\001\032O\n" + + "\014QueryOptions\022\031\n" + + "\021optimizer_version\030\001 \001(\t\022$\n" + + "\034optimizer_statistics_package\030\002 \001(\t\032J\n" + + "\017ParamTypesEntry\022\013\n" + + "\003key\030\001 \001(\t\022&\n" + + "\005value\030\002 \001(\0132\027.google.spanner.v1.Type:\0028\001\"W\n" + + "\tQueryMode\022\n\n" + + "\006NORMAL\020\000\022\010\n" + + "\004PLAN\020\001\022\013\n" + + "\007PROFILE\020\002\022\016\n\n" + + "WITH_STATS\020\003\022\027\n" + + "\023WITH_PLAN_AND_STATS\020\004\"\276\004\n" + + "\026ExecuteBatchDmlRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022@\n" + + "\013transaction\030\002 \001(\013" + + "2&.google.spanner.v1.TransactionSelectorB\003\340A\002\022L\n\n" + + "statements\030\003 \003(\01323.google.spann" + + "er.v1.ExecuteBatchDmlRequest.StatementB\003\340A\002\022\022\n" + + "\005seqno\030\004 \001(\003B\003\340A\002\022:\n" + + "\017request_options\030\005 \001(\0132!.google.spanner.v1.RequestOptions\022\034\n" + + "\017last_statements\030\006 \001(\010B\003\340A\001\032\354\001\n" + + "\tStatement\022\020\n" + + "\003sql\030\001 \001(\tB\003\340A\002\022\'\n" + + "\006params\030\002 \001(\0132\027.google.protobuf.Struct\022X\n" + + "\013param_types\030\003" + + " \003(\0132C.google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry\032J\n" + + "\017ParamTypesEntry\022\013\n" + + "\003key\030\001 \001(\t\022&\n" + + "\005value\030\002 \001(\0132\027.google.spanner.v1.Type:\0028\001\"\303\001\n" + + "\027ExecuteBatchDmlResponse\0221\n" + + "\013result_sets\030\001 \003(\0132\034.google.spanner.v1.ResultSet\022\"\n" + + "\006status\030\002 \001(\0132\022.google.rpc.Status\022Q\n" + + "\017precommit_token\030\003" + + " \001(\01323.google.spanner.v1.MultiplexedSessionPrecommitTokenB\003\340A\001\"H\n" + + "\020PartitionOptions\022\034\n" + + "\024partition_size_bytes\030\001 \001(\003\022\026\n" + + "\016max_partitions\030\002 \001(\003\"\255\003\n" + + "\025PartitionQueryRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022;\n" + + "\013transaction\030\002 \001(\0132&.google.spanner.v1.TransactionSelector\022\020\n" + + "\003sql\030\003 \001(\tB\003\340A\002\022,\n" + + "\006params\030\004 \001(\0132\027.google.protobuf.StructB\003\340A\001\022R\n" + + "\013param_types\030\005" + + " \003(\01328.google.spanner.v1.PartitionQueryRequest.ParamTypesEntryB\003\340A\001\022>\n" + + "\021partition_options\030\006 \001(\0132#.google.spanner.v1.PartitionOptions\032J\n" + + "\017ParamTypesEntry\022\013\n" + + "\003key\030\001 \001(\t\022&\n" + + "\005value\030\002 \001(\0132\027.google.spanner.v1.Type:\0028\001\"\261\002\n" + + "\024PartitionReadRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022;\n" + + "\013transaction\030\002 \001(\0132&.google.spanner.v1.TransactionSelector\022\022\n" + + "\005table\030\003 \001(\tB\003\340A\002\022\r\n" + + "\005index\030\004 \001(\t\022\017\n" + + "\007columns\030\005 \003(\t\022/\n" + + "\007key_set\030\006 \001(\0132\031.google.spanner.v1.KeySetB\003\340A\002\022>\n" + + "\021partition_options\030\t \001(\0132#.google.spanner.v1.PartitionOptions\"$\n" + + "\tPartition\022\027\n" + + "\017partition_token\030\001 \001(\014\"z\n" + + "\021PartitionResponse\0220\n\n" + + "partitions\030\001 \003(\0132\034.google.spanner.v1.Partition\0223\n" + + "\013transaction\030\002 \001(\0132\036.google.spanner.v1.Transaction\"\261\006\n" + + "\013ReadRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022;\n" + + "\013transaction\030\002 \001(\0132&.google.spanner.v1.TransactionSelector\022\022\n" + + "\005table\030\003 \001(\tB\003\340A\002\022\r\n" + + "\005index\030\004 \001(\t\022\024\n" + + "\007columns\030\005 \003(\tB\003\340A\002\022/\n" + + "\007key_set\030\006 \001(\0132\031.google.spanner.v1.KeySetB\003\340A\002\022\r\n" + + "\005limit\030\010 \001(\003\022\024\n" + + "\014resume_token\030\t \001(\014\022\027\n" + + "\017partition_token\030\n" + + " \001(\014\022:\n" + + "\017request_options\030\013 \001(\0132!.google.spanner.v1.RequestOptions\022E\n" + + "\025directed_read_options\030\016 \001(\0132" + + "&.google.spanner.v1.DirectedReadOptions\022\032\n" + + "\022data_boost_enabled\030\017 \001(\010\022=\n" + + "\010order_by\030\020" + + " \001(\0162&.google.spanner.v1.ReadRequest.OrderByB\003\340A\001\022?\n" + + "\tlock_hint\030\021" + + " \001(\0162\'.google.spanner.v1.ReadRequest.LockHintB\003\340A\001\0229\n" + + "\014routing_hint\030\022" + + " \001(\0132\036.google.spanner.v1.RoutingHintB\003\340A\001\"T\n" + + "\007OrderBy\022\030\n" + + "\024ORDER_BY_UNSPECIFIED\020\000\022\030\n" + + "\024ORDER_BY_PRIMARY_KEY\020\001\022\025\n" + + "\021ORDER_BY_NO_ORDER\020\002\"T\n" + + "\010LockHint\022\031\n" + + "\025LOCK_HINT_UNSPECIFIED\020\000\022\024\n" + + "\020LOCK_HINT_SHARED\020\001\022\027\n" + + "\023LOCK_HINT_EXCLUSIVE\020\002\"\276\002\n" + + "\027BeginTransactionRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022;\n" + + "\007options\030\002" + + " \001(\0132%.google.spanner.v1.TransactionOptionsB\003\340A\002\022:\n" + + "\017request_options\030\003 \001(\0132!.google.spanner.v1.RequestOptions\0226\n" + + "\014mutation_key\030\004" + + " \001(\0132\033.google.spanner.v1.MutationB\003\340A\001\0229\n" + + "\014routing_hint\030\005" + + " \001(\0132\036.google.spanner.v1.RoutingHintB\003\340A\001\"\213\004\n\r" + + "CommitRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022\030\n" + + "\016transaction_id\030\002 \001(\014H\000\022G\n" + + "\026single_use_transaction\030\003 \001(\013" + + "2%.google.spanner.v1.TransactionOptionsH\000\022.\n" + + "\tmutations\030\004 \003(\0132\033.google.spanner.v1.Mutation\022\033\n" + + "\023return_commit_stats\030\005 \001(\010\0228\n" + + "\020max_commit_delay\030\010" + + " \001(\0132\031.google.protobuf.DurationB\003\340A\001\022:\n" + + "\017request_options\030\006 \001(\0132!.google.spanner.v1.RequestOptions\022Q\n" + + "\017precommit_token\030\t \001(\01323.google.spanner.v" + + "1.MultiplexedSessionPrecommitTokenB\003\340A\001\0229\n" + + "\014routing_hint\030\n" + + " \001(\0132\036.google.spanner.v1.RoutingHintB\003\340A\001B\r\n" + + "\013transaction\"g\n" + + "\017RollbackRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022\033\n" + + "\016transaction_id\030\002 \001(\014B\003\340A\002\"\316\002\n" + + "\021BatchWriteRequest\0227\n" + + "\007session\030\001 \001(\tB&\340A\002\372A \n" + + "\036spanner.googleapis.com/Session\022:\n" + + "\017request_options\030\003 \001(\0132!.google.spanner.v1.RequestOptions\022P\n" + + "\017mutation_groups\030\004 \003(\01322.google.spanner." + + "v1.BatchWriteRequest.MutationGroupB\003\340A\002\022,\n" + + "\037exclude_txn_from_change_streams\030\005 \001(\010B\003\340A\001\032D\n\r" + + "MutationGroup\0223\n" + + "\tmutations\030\001 \003(\0132\033.google.spanner.v1.MutationB\003\340A\002\"\177\n" + + "\022BatchWriteResponse\022\017\n" + + "\007indexes\030\001 \003(\005\022\"\n" + + "\006status\030\002 \001(\0132\022.google.rpc.Status\0224\n" + + "\020commit_timestamp\030\003 \001(\0132\032.google.protobuf.Timestamp2\213\030\n" + + "\007Spanner\022\246\001\n\r" + + "CreateSession\022\'.google.spanner.v1.CreateSessionRequest\032\032.go" + + "ogle.spanner.v1.Session\"P\332A\010database\202\323\344\223" + + "\002?\":/v1/{database=projects/*/instances/*/databases/*}/sessions:\001*\022\340\001\n" + + "\023BatchCreateSessions\022-.google.spanner.v1.BatchCreat" + + "eSessionsRequest\032..google.spanner.v1.Bat" + + "chCreateSessionsResponse\"j\332A\026database,se" + + "ssion_count\202\323\344\223\002K\"F/v1/{database=project" + + "s/*/instances/*/databases/*}/sessions:batchCreate:\001*\022\227\001\n\n" + + "GetSession\022$.google.spanner.v1.GetSessionRequest\032\032.google.spann" + + "er.v1.Session\"G\332A\004name\202\323\344\223\002:\0228/v1/{name=" + + "projects/*/instances/*/databases/*/sessions/*}\022\256\001\n" + + "\014ListSessions\022&.google.spanner.v1.ListSessionsRequest\032\'.google.spanner" + + ".v1.ListSessionsResponse\"M\332A\010database\202\323\344" + + "\223\002<\022:/v1/{database=projects/*/instances/*/databases/*}/sessions\022\231\001\n\r" + + "DeleteSession\022\'.google.spanner.v1.DeleteSessionReque" + + "st\032\026.google.protobuf.Empty\"G\332A\004name\202\323\344\223\002" + + ":*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\022\243\001\n\n" + + "ExecuteSql\022$.google.spanner.v1.ExecuteSqlRequest\032\034.googl" + + "e.spanner.v1.ResultSet\"Q\202\323\344\223\002K\"F/v1/{ses" + + "sion=projects/*/instances/*/databases/*/sessions/*}:executeSql:\001*\022\276\001\n" + + "\023ExecuteStreamingSql\022$.google.spanner.v1.ExecuteSql" + + "Request\032#.google.spanner.v1.PartialResul" + + "tSet\"Z\202\323\344\223\002T\"O/v1/{session=projects/*/in" + + "stances/*/databases/*/sessions/*}:executeStreamingSql:\001*0\001\022\300\001\n" + + "\017ExecuteBatchDml\022).google.spanner.v1.ExecuteBatchDmlReques" + + "t\032*.google.spanner.v1.ExecuteBatchDmlRes" + + "ponse\"V\202\323\344\223\002P\"K/v1/{session=projects/*/i" + + "nstances/*/databases/*/sessions/*}:executeBatchDml:\001*\022\221\001\n" + + "\004Read\022\036.google.spanner.v1.ReadRequest\032\034.google.spanner.v1.Resul" + + "tSet\"K\202\323\344\223\002E\"@/v1/{session=projects/*/in" + + "stances/*/databases/*/sessions/*}:read:\001*\022\254\001\n\r" + + "StreamingRead\022\036.google.spanner.v1.ReadRequest\032#.google.spanner.v1.PartialR" + + "esultSet\"T\202\323\344\223\002N\"I/v1/{session=projects/" + + "*/instances/*/databases/*/sessions/*}:streamingRead:\001*0\001\022\311\001\n" + + "\020BeginTransaction\022*.google.spanner.v1.BeginTransactionReques" + + "t\032\036.google.spanner.v1.Transaction\"i\332A\017se" + + "ssion,options\202\323\344\223\002Q\"L/v1/{session=projec" + + "ts/*/instances/*/databases/*/sessions/*}:beginTransaction:\001*\022\353\001\n" + + "\006Commit\022 .google" + + ".spanner.v1.CommitRequest\032!.google.spanner.v1.CommitResponse\"\233\001\332A" + + " session,transaction_id,mutations\332A(session,single_use_" + + "transaction,mutations\202\323\344\223\002G\"B/v1/{sessio" + + "n=projects/*/instances/*/databases/*/sessions/*}:commit:\001*\022\260\001\n" + + "\010Rollback\022\".google.spanner.v1.RollbackRequest\032\026.google.pro" + + "tobuf.Empty\"h\332A\026session,transaction_id\202\323" + + "\344\223\002I\"D/v1/{session=projects/*/instances/" + + "*/databases/*/sessions/*}:rollback:\001*\022\267\001\n" + + "\016PartitionQuery\022(.google.spanner.v1.Par" + + "titionQueryRequest\032$.google.spanner.v1.P" + + "artitionResponse\"U\202\323\344\223\002O\"J/v1/{session=p" + + "rojects/*/instances/*/databases/*/sessions/*}:partitionQuery:\001*\022\264\001\n\r" + + "PartitionRead\022\'.google.spanner.v1.PartitionReadReque" + + "st\032$.google.spanner.v1.PartitionResponse" + + "\"T\202\323\344\223\002N\"I/v1/{session=projects/*/instan" + + "ces/*/databases/*/sessions/*}:partitionRead:\001*\022\310\001\n\n" + + "BatchWrite\022$.google.spanner.v1.BatchWriteRequest\032%.google.spanner.v1." + + "BatchWriteResponse\"k\332A\027session,mutation_" + + "groups\202\323\344\223\002K\"F/v1/{session=projects/*/in" + + "stances/*/databases/*/sessions/*}:batchW" + + "rite:\001*0\001\032w\312A\026spanner.googleapis.com\322A[h" + + "ttps://www.googleapis.com/auth/cloud-pla" + + "tform,https://www.googleapis.com/auth/spanner.dataB\221\002\n" + + "\025com.google.spanner.v1B\014SpannerProtoP\001Z5cloud.google.com/go/spanne" + + "r/apiv1/spannerpb;spannerpb\252\002\027Google.Clo" + + "ud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1\352A_\n" + + "\037spanner.googleapis.com/Database\022 + * `StructType` defines the fields of a + * [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. + * + * + * Protobuf type {@code google.spanner.v1.StructType} + */ +@com.google.protobuf.Generated +public final class StructType extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.StructType) + StructTypeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "StructType"); + } + + // Use StructType.newBuilder() to construct. + private StructType(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private StructType() { + fields_ = java.util.Collections.emptyList(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto.internal_static_google_spanner_v1_StructType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.StructType.class, com.google.spanner.v1.StructType.Builder.class); + } + + public interface FieldOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.StructType.Field) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * The name of the field. For reads, this is the column name. For
    +     * SQL queries, it is the column alias (e.g., `"Word"` in the
    +     * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +     * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +     * columns might have an empty name (e.g., `"SELECT
    +     * UPPER(ColName)"`). Note that a query result can contain
    +     * multiple fields with the same name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + java.lang.String getName(); + + /** + * + * + *
    +     * The name of the field. For reads, this is the column name. For
    +     * SQL queries, it is the column alias (e.g., `"Word"` in the
    +     * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +     * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +     * columns might have an empty name (e.g., `"SELECT
    +     * UPPER(ColName)"`). Note that a query result can contain
    +     * multiple fields with the same name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + boolean hasType(); + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + com.google.spanner.v1.Type getType(); + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder(); + } + + /** + * + * + *
    +   * Message representing a single field of a struct.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.StructType.Field} + */ + public static final class Field extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.StructType.Field) + FieldOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Field"); + } + + // Use Field.newBuilder() to construct. + private Field(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Field() { + name_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_Field_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_Field_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.StructType.Field.class, + com.google.spanner.v1.StructType.Field.Builder.class); + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + + /** + * + * + *
    +     * The name of the field. For reads, this is the column name. For
    +     * SQL queries, it is the column alias (e.g., `"Word"` in the
    +     * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +     * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +     * columns might have an empty name (e.g., `"SELECT
    +     * UPPER(ColName)"`). Note that a query result can contain
    +     * multiple fields with the same name.
    +     * 
    + * + * string name = 1; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + + /** + * + * + *
    +     * The name of the field. For reads, this is the column name. For
    +     * SQL queries, it is the column alias (e.g., `"Word"` in the
    +     * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +     * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +     * columns might have an empty name (e.g., `"SELECT
    +     * UPPER(ColName)"`). Note that a query result can contain
    +     * multiple fields with the same name.
    +     * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Type type_; + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + @java.lang.Override + public boolean hasType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + @java.lang.Override + public com.google.spanner.v1.Type getType() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + /** + * + * + *
    +     * The type of the field.
    +     * 
    + * + * .google.spanner.v1.Type type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getType()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getType()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.StructType.Field)) { + return super.equals(obj); + } + com.google.spanner.v1.StructType.Field other = (com.google.spanner.v1.StructType.Field) obj; + + if (!getName().equals(other.getName())) return false; + if (hasType() != other.hasType()) return false; + if (hasType()) { + if (!getType().equals(other.getType())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.StructType.Field parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType.Field parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType.Field parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.StructType.Field parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType.Field parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType.Field parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.StructType.Field prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Message representing a single field of a struct.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.StructType.Field} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.StructType.Field) + com.google.spanner.v1.StructType.FieldOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_Field_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_Field_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.StructType.Field.class, + com.google.spanner.v1.StructType.Field.Builder.class); + } + + // Construct using com.google.spanner.v1.StructType.Field.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_Field_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.StructType.Field getDefaultInstanceForType() { + return com.google.spanner.v1.StructType.Field.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.StructType.Field build() { + com.google.spanner.v1.StructType.Field result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.StructType.Field buildPartial() { + com.google.spanner.v1.StructType.Field result = + new com.google.spanner.v1.StructType.Field(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.StructType.Field result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.type_ = typeBuilder_ == null ? type_ : typeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.StructType.Field) { + return mergeFrom((com.google.spanner.v1.StructType.Field) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.StructType.Field other) { + if (other == com.google.spanner.v1.StructType.Field.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasType()) { + mergeType(other.getType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(internalGetTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + + /** + * + * + *
    +       * The name of the field. For reads, this is the column name. For
    +       * SQL queries, it is the column alias (e.g., `"Word"` in the
    +       * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +       * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +       * columns might have an empty name (e.g., `"SELECT
    +       * UPPER(ColName)"`). Note that a query result can contain
    +       * multiple fields with the same name.
    +       * 
    + * + * string name = 1; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +       * The name of the field. For reads, this is the column name. For
    +       * SQL queries, it is the column alias (e.g., `"Word"` in the
    +       * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +       * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +       * columns might have an empty name (e.g., `"SELECT
    +       * UPPER(ColName)"`). Note that a query result can contain
    +       * multiple fields with the same name.
    +       * 
    + * + * string name = 1; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +       * The name of the field. For reads, this is the column name. For
    +       * SQL queries, it is the column alias (e.g., `"Word"` in the
    +       * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +       * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +       * columns might have an empty name (e.g., `"SELECT
    +       * UPPER(ColName)"`). Note that a query result can contain
    +       * multiple fields with the same name.
    +       * 
    + * + * string name = 1; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The name of the field. For reads, this is the column name. For
    +       * SQL queries, it is the column alias (e.g., `"Word"` in the
    +       * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +       * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +       * columns might have an empty name (e.g., `"SELECT
    +       * UPPER(ColName)"`). Note that a query result can contain
    +       * multiple fields with the same name.
    +       * 
    + * + * string name = 1; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
    +       * The name of the field. For reads, this is the column name. For
    +       * SQL queries, it is the column alias (e.g., `"Word"` in the
    +       * query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
    +       * `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
    +       * columns might have an empty name (e.g., `"SELECT
    +       * UPPER(ColName)"`). Note that a query result can contain
    +       * multiple fields with the same name.
    +       * 
    + * + * string name = 1; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type type_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + typeBuilder_; + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return Whether the type field is set. + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + * + * @return The type. + */ + public com.google.spanner.v1.Type getType() { + if (typeBuilder_ == null) { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } else { + return typeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + type_ = value; + } else { + typeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder setType(com.google.spanner.v1.Type.Builder builderForValue) { + if (typeBuilder_ == null) { + type_ = builderForValue.build(); + } else { + typeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder mergeType(com.google.spanner.v1.Type value) { + if (typeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && type_ != null + && type_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getTypeBuilder().mergeFrom(value); + } else { + type_ = value; + } + } else { + typeBuilder_.mergeFrom(value); + } + if (type_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = null; + if (typeBuilder_ != null) { + typeBuilder_.dispose(); + typeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.Type.Builder getTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getTypeOrBuilder() { + if (typeBuilder_ != null) { + return typeBuilder_.getMessageOrBuilder(); + } else { + return type_ == null ? com.google.spanner.v1.Type.getDefaultInstance() : type_; + } + } + + /** + * + * + *
    +       * The type of the field.
    +       * 
    + * + * .google.spanner.v1.Type type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetTypeFieldBuilder() { + if (typeBuilder_ == null) { + typeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getType(), getParentForChildren(), isClean()); + type_ = null; + } + return typeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.StructType.Field) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.StructType.Field) + private static final com.google.spanner.v1.StructType.Field DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.StructType.Field(); + } + + public static com.google.spanner.v1.StructType.Field getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Field parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.StructType.Field getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int FIELDS_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List fields_; + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + @java.lang.Override + public java.util.List getFieldsList() { + return fields_; + } + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + @java.lang.Override + public java.util.List + getFieldsOrBuilderList() { + return fields_; + } + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + @java.lang.Override + public int getFieldsCount() { + return fields_.size(); + } + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + @java.lang.Override + public com.google.spanner.v1.StructType.Field getFields(int index) { + return fields_.get(index); + } + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + @java.lang.Override + public com.google.spanner.v1.StructType.FieldOrBuilder getFieldsOrBuilder(int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(1, fields_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.StructType)) { + return super.equals(obj); + } + com.google.spanner.v1.StructType other = (com.google.spanner.v1.StructType) obj; + + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.StructType parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.StructType parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.StructType parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.StructType parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.StructType parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.StructType parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.StructType prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * `StructType` defines the fields of a
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.StructType} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.StructType) + com.google.spanner.v1.StructTypeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.StructType.class, + com.google.spanner.v1.StructType.Builder.class); + } + + // Construct using com.google.spanner.v1.StructType.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + } else { + fields_ = null; + fieldsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_StructType_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.StructType getDefaultInstanceForType() { + return com.google.spanner.v1.StructType.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.StructType build() { + com.google.spanner.v1.StructType result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.StructType buildPartial() { + com.google.spanner.v1.StructType result = new com.google.spanner.v1.StructType(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(com.google.spanner.v1.StructType result) { + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + } + + private void buildPartial0(com.google.spanner.v1.StructType result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.StructType) { + return mergeFrom((com.google.spanner.v1.StructType) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.StructType other) { + if (other == com.google.spanner.v1.StructType.getDefaultInstance()) return this; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? internalGetFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.v1.StructType.Field m = + input.readMessage( + com.google.spanner.v1.StructType.Field.parser(), extensionRegistry); + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(m); + } else { + fieldsBuilder_.addMessage(m); + } + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List fields_ = + java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = new java.util.ArrayList(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.StructType.Field, + com.google.spanner.v1.StructType.Field.Builder, + com.google.spanner.v1.StructType.FieldOrBuilder> + fieldsBuilder_; + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public java.util.List getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public com.google.spanner.v1.StructType.Field getFields(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder setFields(int index, com.google.spanner.v1.StructType.Field value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder setFields( + int index, com.google.spanner.v1.StructType.Field.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder addFields(com.google.spanner.v1.StructType.Field value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder addFields(int index, com.google.spanner.v1.StructType.Field value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder addFields(com.google.spanner.v1.StructType.Field.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder addFields( + int index, com.google.spanner.v1.StructType.Field.Builder builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder addAllFields( + java.lang.Iterable values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public com.google.spanner.v1.StructType.Field.Builder getFieldsBuilder(int index) { + return internalGetFieldsFieldBuilder().getBuilder(index); + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public com.google.spanner.v1.StructType.FieldOrBuilder getFieldsOrBuilder(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public java.util.List + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public com.google.spanner.v1.StructType.Field.Builder addFieldsBuilder() { + return internalGetFieldsFieldBuilder() + .addBuilder(com.google.spanner.v1.StructType.Field.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public com.google.spanner.v1.StructType.Field.Builder addFieldsBuilder(int index) { + return internalGetFieldsFieldBuilder() + .addBuilder(index, com.google.spanner.v1.StructType.Field.getDefaultInstance()); + } + + /** + * + * + *
    +     * The list of fields that make up this struct. Order is
    +     * significant, because values of this struct type are represented as
    +     * lists, where the order of field values matches the order of
    +     * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +     * order of fields matches the order of columns in a read request, or the
    +     * order of fields in the `SELECT` clause of a query.
    +     * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + public java.util.List getFieldsBuilderList() { + return internalGetFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.StructType.Field, + com.google.spanner.v1.StructType.Field.Builder, + com.google.spanner.v1.StructType.FieldOrBuilder> + internalGetFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilder< + com.google.spanner.v1.StructType.Field, + com.google.spanner.v1.StructType.Field.Builder, + com.google.spanner.v1.StructType.FieldOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.StructType) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.StructType) + private static final com.google.spanner.v1.StructType DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.StructType(); + } + + public static com.google.spanner.v1.StructType getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StructType parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.StructType getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java new file mode 100644 index 000000000000..a3f5422bf319 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java @@ -0,0 +1,109 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface StructTypeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.StructType) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + java.util.List getFieldsList(); + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + com.google.spanner.v1.StructType.Field getFields(int index); + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + int getFieldsCount(); + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + java.util.List + getFieldsOrBuilderList(); + + /** + * + * + *
    +   * The list of fields that make up this struct. Order is
    +   * significant, because values of this struct type are represented as
    +   * lists, where the order of field values matches the order of
    +   * fields in the [StructType][google.spanner.v1.StructType]. In turn, the
    +   * order of fields matches the order of columns in a read request, or the
    +   * order of fields in the `SELECT` clause of a query.
    +   * 
    + * + * repeated .google.spanner.v1.StructType.Field fields = 1; + */ + com.google.spanner.v1.StructType.FieldOrBuilder getFieldsOrBuilder(int index); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Tablet.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Tablet.java new file mode 100644 index 000000000000..5983e78b52b9 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Tablet.java @@ -0,0 +1,1673 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A `Tablet` represents a single replica of a `Group`. A tablet is served by a
    + * single server at a time, and can move between servers due to server death or
    + * simply load balancing.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Tablet} + */ +@com.google.protobuf.Generated +public final class Tablet extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Tablet) + TabletOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Tablet"); + } + + // Use Tablet.newBuilder() to construct. + private Tablet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Tablet() { + serverAddress_ = ""; + location_ = ""; + role_ = 0; + incarnation_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto.internal_static_google_spanner_v1_Tablet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Tablet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Tablet.class, com.google.spanner.v1.Tablet.Builder.class); + } + + /** + * + * + *
    +   * Indicates the role of the tablet.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.Tablet.Role} + */ + public enum Role implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * ROLE_UNSPECIFIED = 0; + */ + ROLE_UNSPECIFIED(0), + /** + * + * + *
    +     * The tablet can perform reads and (if elected leader) writes.
    +     * 
    + * + * READ_WRITE = 1; + */ + READ_WRITE(1), + /** + * + * + *
    +     * The tablet can only perform reads.
    +     * 
    + * + * READ_ONLY = 2; + */ + READ_ONLY(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Role"); + } + + /** + * + * + *
    +     * Not specified.
    +     * 
    + * + * ROLE_UNSPECIFIED = 0; + */ + public static final int ROLE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * The tablet can perform reads and (if elected leader) writes.
    +     * 
    + * + * READ_WRITE = 1; + */ + public static final int READ_WRITE_VALUE = 1; + + /** + * + * + *
    +     * The tablet can only perform reads.
    +     * 
    + * + * READ_ONLY = 2; + */ + public static final int READ_ONLY_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Role valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Role forNumber(int value) { + switch (value) { + case 0: + return ROLE_UNSPECIFIED; + case 1: + return READ_WRITE; + case 2: + return READ_ONLY; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Role findValueByNumber(int number) { + return Role.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.Tablet.getDescriptor().getEnumTypes().get(0); + } + + private static final Role[] VALUES = values(); + + public static Role valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Role(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.Tablet.Role) + } + + public static final int TABLET_UID_FIELD_NUMBER = 1; + private long tabletUid_ = 0L; + + /** + * + * + *
    +   * The UID of the tablet, unique within the database. Matches the
    +   * `tablet_uids` and `leader_tablet_uid` fields in `Group`.
    +   * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + public static final int SERVER_ADDRESS_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object serverAddress_ = ""; + + /** + * + * + *
    +   * The address of the server that is serving this tablet -- either an IP
    +   * address or DNS hostname and a port number.
    +   * 
    + * + * string server_address = 2; + * + * @return The serverAddress. + */ + @java.lang.Override + public java.lang.String getServerAddress() { + java.lang.Object ref = serverAddress_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serverAddress_ = s; + return s; + } + } + + /** + * + * + *
    +   * The address of the server that is serving this tablet -- either an IP
    +   * address or DNS hostname and a port number.
    +   * 
    + * + * string server_address = 2; + * + * @return The bytes for serverAddress. + */ + @java.lang.Override + public com.google.protobuf.ByteString getServerAddressBytes() { + java.lang.Object ref = serverAddress_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serverAddress_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOCATION_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + + /** + * + * + *
    +   * Where this tablet is located. This is the name of a Google Cloud region,
    +   * such as "us-central1".
    +   * 
    + * + * string location = 3; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + + /** + * + * + *
    +   * Where this tablet is located. This is the name of a Google Cloud region,
    +   * such as "us-central1".
    +   * 
    + * + * string location = 3; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ROLE_FIELD_NUMBER = 4; + private int role_ = 0; + + /** + * + * + *
    +   * The role of the tablet.
    +   * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The enum numeric value on the wire for role. + */ + @java.lang.Override + public int getRoleValue() { + return role_; + } + + /** + * + * + *
    +   * The role of the tablet.
    +   * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The role. + */ + @java.lang.Override + public com.google.spanner.v1.Tablet.Role getRole() { + com.google.spanner.v1.Tablet.Role result = com.google.spanner.v1.Tablet.Role.forNumber(role_); + return result == null ? com.google.spanner.v1.Tablet.Role.UNRECOGNIZED : result; + } + + public static final int INCARNATION_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString incarnation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * `incarnation` indicates the freshness of the tablet information contained
    +   * in this proto. Incarnations can be compared lexicographically; if
    +   * incarnation A is greater than incarnation B, then the `Tablet`
    +   * corresponding to A is newer than the `Tablet` corresponding to B, and
    +   * should be used preferentially.
    +   * 
    + * + * bytes incarnation = 5; + * + * @return The incarnation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncarnation() { + return incarnation_; + } + + public static final int DISTANCE_FIELD_NUMBER = 6; + private int distance_ = 0; + + /** + * + * + *
    +   * Distances help the client pick the closest tablet out of the list of
    +   * tablets for a given request. Tablets with lower distances should generally
    +   * be preferred. Tablets with the same distance are approximately equally
    +   * close; the client can choose arbitrarily.
    +   *
    +   * Distances do not correspond precisely to expected latency, geographical
    +   * distance, or anything else. Distances should be compared only between
    +   * tablets of the same group; they are not meaningful between different
    +   * groups.
    +   *
    +   * A value of zero indicates that the tablet may be in the same zone as
    +   * the client, and have minimum network latency. A value less than or equal to
    +   * five indicates that the tablet is thought to be in the same region as the
    +   * client, and may have a few milliseconds of network latency. Values greater
    +   * than five are most likely in a different region, with non-trivial network
    +   * latency.
    +   *
    +   * Clients should use the following algorithm:
    +   * * If the request is using a directed read, eliminate any tablets that
    +   * do not match the directed read's target zone and/or replica type.
    +   * * (Read-write transactions only) Choose leader tablet if it has an
    +   * distance <=5.
    +   * * Group and sort tablets by distance. Choose a random
    +   * tablet with the lowest distance. If the request
    +   * is not a directed read, only consider replicas with distances <=5.
    +   * * Send the request to the fallback endpoint.
    +   *
    +   * The tablet picked by this algorithm may be skipped, either because it is
    +   * marked as `skip` by the server or because the corresponding server is
    +   * unreachable, flow controlled, etc. Skipped tablets should be added to the
    +   * `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should
    +   * then be re-run without including the skipped tablet(s) to pick the next
    +   * best tablet.
    +   * 
    + * + * uint32 distance = 6; + * + * @return The distance. + */ + @java.lang.Override + public int getDistance() { + return distance_; + } + + public static final int SKIP_FIELD_NUMBER = 7; + private boolean skip_ = false; + + /** + * + * + *
    +   * If true, the tablet should not be chosen by the client. Typically, this
    +   * signals that the tablet is unhealthy in some way. Tablets with `skip`
    +   * set to true should be reported back to the server in
    +   * `RoutingHint.skipped_tablet_uid`; this cues the server to send updated
    +   * information for this tablet should it become usable again.
    +   * 
    + * + * bool skip = 7; + * + * @return The skip. + */ + @java.lang.Override + public boolean getSkip() { + return skip_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (tabletUid_ != 0L) { + output.writeUInt64(1, tabletUid_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serverAddress_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 2, serverAddress_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 3, location_); + } + if (role_ != com.google.spanner.v1.Tablet.Role.ROLE_UNSPECIFIED.getNumber()) { + output.writeEnum(4, role_); + } + if (!incarnation_.isEmpty()) { + output.writeBytes(5, incarnation_); + } + if (distance_ != 0) { + output.writeUInt32(6, distance_); + } + if (skip_ != false) { + output.writeBool(7, skip_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (tabletUid_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, tabletUid_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(serverAddress_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(2, serverAddress_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(3, location_); + } + if (role_ != com.google.spanner.v1.Tablet.Role.ROLE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, role_); + } + if (!incarnation_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(5, incarnation_); + } + if (distance_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeUInt32Size(6, distance_); + } + if (skip_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, skip_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Tablet)) { + return super.equals(obj); + } + com.google.spanner.v1.Tablet other = (com.google.spanner.v1.Tablet) obj; + + if (getTabletUid() != other.getTabletUid()) return false; + if (!getServerAddress().equals(other.getServerAddress())) return false; + if (!getLocation().equals(other.getLocation())) return false; + if (role_ != other.role_) return false; + if (!getIncarnation().equals(other.getIncarnation())) return false; + if (getDistance() != other.getDistance()) return false; + if (getSkip() != other.getSkip()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLET_UID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTabletUid()); + hash = (37 * hash) + SERVER_ADDRESS_FIELD_NUMBER; + hash = (53 * hash) + getServerAddress().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (37 * hash) + ROLE_FIELD_NUMBER; + hash = (53 * hash) + role_; + hash = (37 * hash) + INCARNATION_FIELD_NUMBER; + hash = (53 * hash) + getIncarnation().hashCode(); + hash = (37 * hash) + DISTANCE_FIELD_NUMBER; + hash = (53 * hash) + getDistance(); + hash = (37 * hash) + SKIP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSkip()); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Tablet parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Tablet parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Tablet parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Tablet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Tablet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Tablet parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Tablet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Tablet parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Tablet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Tablet parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Tablet parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Tablet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Tablet prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A `Tablet` represents a single replica of a `Group`. A tablet is served by a
    +   * single server at a time, and can move between servers due to server death or
    +   * simply load balancing.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Tablet} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Tablet) + com.google.spanner.v1.TabletOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Tablet_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Tablet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Tablet.class, com.google.spanner.v1.Tablet.Builder.class); + } + + // Construct using com.google.spanner.v1.Tablet.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + tabletUid_ = 0L; + serverAddress_ = ""; + location_ = ""; + role_ = 0; + incarnation_ = com.google.protobuf.ByteString.EMPTY; + distance_ = 0; + skip_ = false; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.LocationProto + .internal_static_google_spanner_v1_Tablet_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Tablet getDefaultInstanceForType() { + return com.google.spanner.v1.Tablet.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Tablet build() { + com.google.spanner.v1.Tablet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Tablet buildPartial() { + com.google.spanner.v1.Tablet result = new com.google.spanner.v1.Tablet(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Tablet result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.tabletUid_ = tabletUid_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.serverAddress_ = serverAddress_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.location_ = location_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.role_ = role_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.incarnation_ = incarnation_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.distance_ = distance_; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.skip_ = skip_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Tablet) { + return mergeFrom((com.google.spanner.v1.Tablet) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Tablet other) { + if (other == com.google.spanner.v1.Tablet.getDefaultInstance()) return this; + if (other.getTabletUid() != 0L) { + setTabletUid(other.getTabletUid()); + } + if (!other.getServerAddress().isEmpty()) { + serverAddress_ = other.serverAddress_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.role_ != 0) { + setRoleValue(other.getRoleValue()); + } + if (!other.getIncarnation().isEmpty()) { + setIncarnation(other.getIncarnation()); + } + if (other.getDistance() != 0) { + setDistance(other.getDistance()); + } + if (other.getSkip() != false) { + setSkip(other.getSkip()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + tabletUid_ = input.readUInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + serverAddress_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + role_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + incarnation_ = input.readBytes(); + bitField0_ |= 0x00000010; + break; + } // case 42 + case 48: + { + distance_ = input.readUInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + case 56: + { + skip_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 56 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long tabletUid_; + + /** + * + * + *
    +     * The UID of the tablet, unique within the database. Matches the
    +     * `tablet_uids` and `leader_tablet_uid` fields in `Group`.
    +     * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + @java.lang.Override + public long getTabletUid() { + return tabletUid_; + } + + /** + * + * + *
    +     * The UID of the tablet, unique within the database. Matches the
    +     * `tablet_uids` and `leader_tablet_uid` fields in `Group`.
    +     * 
    + * + * uint64 tablet_uid = 1; + * + * @param value The tabletUid to set. + * @return This builder for chaining. + */ + public Builder setTabletUid(long value) { + + tabletUid_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The UID of the tablet, unique within the database. Matches the
    +     * `tablet_uids` and `leader_tablet_uid` fields in `Group`.
    +     * 
    + * + * uint64 tablet_uid = 1; + * + * @return This builder for chaining. + */ + public Builder clearTabletUid() { + bitField0_ = (bitField0_ & ~0x00000001); + tabletUid_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object serverAddress_ = ""; + + /** + * + * + *
    +     * The address of the server that is serving this tablet -- either an IP
    +     * address or DNS hostname and a port number.
    +     * 
    + * + * string server_address = 2; + * + * @return The serverAddress. + */ + public java.lang.String getServerAddress() { + java.lang.Object ref = serverAddress_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serverAddress_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * The address of the server that is serving this tablet -- either an IP
    +     * address or DNS hostname and a port number.
    +     * 
    + * + * string server_address = 2; + * + * @return The bytes for serverAddress. + */ + public com.google.protobuf.ByteString getServerAddressBytes() { + java.lang.Object ref = serverAddress_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + serverAddress_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * The address of the server that is serving this tablet -- either an IP
    +     * address or DNS hostname and a port number.
    +     * 
    + * + * string server_address = 2; + * + * @param value The serverAddress to set. + * @return This builder for chaining. + */ + public Builder setServerAddress(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + serverAddress_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The address of the server that is serving this tablet -- either an IP
    +     * address or DNS hostname and a port number.
    +     * 
    + * + * string server_address = 2; + * + * @return This builder for chaining. + */ + public Builder clearServerAddress() { + serverAddress_ = getDefaultInstance().getServerAddress(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The address of the server that is serving this tablet -- either an IP
    +     * address or DNS hostname and a port number.
    +     * 
    + * + * string server_address = 2; + * + * @param value The bytes for serverAddress to set. + * @return This builder for chaining. + */ + public Builder setServerAddressBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + serverAddress_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object location_ = ""; + + /** + * + * + *
    +     * Where this tablet is located. This is the name of a Google Cloud region,
    +     * such as "us-central1".
    +     * 
    + * + * string location = 3; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * Where this tablet is located. This is the name of a Google Cloud region,
    +     * such as "us-central1".
    +     * 
    + * + * string location = 3; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * Where this tablet is located. This is the name of a Google Cloud region,
    +     * such as "us-central1".
    +     * 
    + * + * string location = 3; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Where this tablet is located. This is the name of a Google Cloud region,
    +     * such as "us-central1".
    +     * 
    + * + * string location = 3; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Where this tablet is located. This is the name of a Google Cloud region,
    +     * such as "us-central1".
    +     * 
    + * + * string location = 3; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int role_ = 0; + + /** + * + * + *
    +     * The role of the tablet.
    +     * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The enum numeric value on the wire for role. + */ + @java.lang.Override + public int getRoleValue() { + return role_; + } + + /** + * + * + *
    +     * The role of the tablet.
    +     * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @param value The enum numeric value on the wire for role to set. + * @return This builder for chaining. + */ + public Builder setRoleValue(int value) { + role_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The role of the tablet.
    +     * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The role. + */ + @java.lang.Override + public com.google.spanner.v1.Tablet.Role getRole() { + com.google.spanner.v1.Tablet.Role result = com.google.spanner.v1.Tablet.Role.forNumber(role_); + return result == null ? com.google.spanner.v1.Tablet.Role.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * The role of the tablet.
    +     * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @param value The role to set. + * @return This builder for chaining. + */ + public Builder setRole(com.google.spanner.v1.Tablet.Role value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + role_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The role of the tablet.
    +     * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return This builder for chaining. + */ + public Builder clearRole() { + bitField0_ = (bitField0_ & ~0x00000008); + role_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString incarnation_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * `incarnation` indicates the freshness of the tablet information contained
    +     * in this proto. Incarnations can be compared lexicographically; if
    +     * incarnation A is greater than incarnation B, then the `Tablet`
    +     * corresponding to A is newer than the `Tablet` corresponding to B, and
    +     * should be used preferentially.
    +     * 
    + * + * bytes incarnation = 5; + * + * @return The incarnation. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncarnation() { + return incarnation_; + } + + /** + * + * + *
    +     * `incarnation` indicates the freshness of the tablet information contained
    +     * in this proto. Incarnations can be compared lexicographically; if
    +     * incarnation A is greater than incarnation B, then the `Tablet`
    +     * corresponding to A is newer than the `Tablet` corresponding to B, and
    +     * should be used preferentially.
    +     * 
    + * + * bytes incarnation = 5; + * + * @param value The incarnation to set. + * @return This builder for chaining. + */ + public Builder setIncarnation(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + incarnation_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `incarnation` indicates the freshness of the tablet information contained
    +     * in this proto. Incarnations can be compared lexicographically; if
    +     * incarnation A is greater than incarnation B, then the `Tablet`
    +     * corresponding to A is newer than the `Tablet` corresponding to B, and
    +     * should be used preferentially.
    +     * 
    + * + * bytes incarnation = 5; + * + * @return This builder for chaining. + */ + public Builder clearIncarnation() { + bitField0_ = (bitField0_ & ~0x00000010); + incarnation_ = getDefaultInstance().getIncarnation(); + onChanged(); + return this; + } + + private int distance_; + + /** + * + * + *
    +     * Distances help the client pick the closest tablet out of the list of
    +     * tablets for a given request. Tablets with lower distances should generally
    +     * be preferred. Tablets with the same distance are approximately equally
    +     * close; the client can choose arbitrarily.
    +     *
    +     * Distances do not correspond precisely to expected latency, geographical
    +     * distance, or anything else. Distances should be compared only between
    +     * tablets of the same group; they are not meaningful between different
    +     * groups.
    +     *
    +     * A value of zero indicates that the tablet may be in the same zone as
    +     * the client, and have minimum network latency. A value less than or equal to
    +     * five indicates that the tablet is thought to be in the same region as the
    +     * client, and may have a few milliseconds of network latency. Values greater
    +     * than five are most likely in a different region, with non-trivial network
    +     * latency.
    +     *
    +     * Clients should use the following algorithm:
    +     * * If the request is using a directed read, eliminate any tablets that
    +     * do not match the directed read's target zone and/or replica type.
    +     * * (Read-write transactions only) Choose leader tablet if it has an
    +     * distance <=5.
    +     * * Group and sort tablets by distance. Choose a random
    +     * tablet with the lowest distance. If the request
    +     * is not a directed read, only consider replicas with distances <=5.
    +     * * Send the request to the fallback endpoint.
    +     *
    +     * The tablet picked by this algorithm may be skipped, either because it is
    +     * marked as `skip` by the server or because the corresponding server is
    +     * unreachable, flow controlled, etc. Skipped tablets should be added to the
    +     * `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should
    +     * then be re-run without including the skipped tablet(s) to pick the next
    +     * best tablet.
    +     * 
    + * + * uint32 distance = 6; + * + * @return The distance. + */ + @java.lang.Override + public int getDistance() { + return distance_; + } + + /** + * + * + *
    +     * Distances help the client pick the closest tablet out of the list of
    +     * tablets for a given request. Tablets with lower distances should generally
    +     * be preferred. Tablets with the same distance are approximately equally
    +     * close; the client can choose arbitrarily.
    +     *
    +     * Distances do not correspond precisely to expected latency, geographical
    +     * distance, or anything else. Distances should be compared only between
    +     * tablets of the same group; they are not meaningful between different
    +     * groups.
    +     *
    +     * A value of zero indicates that the tablet may be in the same zone as
    +     * the client, and have minimum network latency. A value less than or equal to
    +     * five indicates that the tablet is thought to be in the same region as the
    +     * client, and may have a few milliseconds of network latency. Values greater
    +     * than five are most likely in a different region, with non-trivial network
    +     * latency.
    +     *
    +     * Clients should use the following algorithm:
    +     * * If the request is using a directed read, eliminate any tablets that
    +     * do not match the directed read's target zone and/or replica type.
    +     * * (Read-write transactions only) Choose leader tablet if it has an
    +     * distance <=5.
    +     * * Group and sort tablets by distance. Choose a random
    +     * tablet with the lowest distance. If the request
    +     * is not a directed read, only consider replicas with distances <=5.
    +     * * Send the request to the fallback endpoint.
    +     *
    +     * The tablet picked by this algorithm may be skipped, either because it is
    +     * marked as `skip` by the server or because the corresponding server is
    +     * unreachable, flow controlled, etc. Skipped tablets should be added to the
    +     * `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should
    +     * then be re-run without including the skipped tablet(s) to pick the next
    +     * best tablet.
    +     * 
    + * + * uint32 distance = 6; + * + * @param value The distance to set. + * @return This builder for chaining. + */ + public Builder setDistance(int value) { + + distance_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Distances help the client pick the closest tablet out of the list of
    +     * tablets for a given request. Tablets with lower distances should generally
    +     * be preferred. Tablets with the same distance are approximately equally
    +     * close; the client can choose arbitrarily.
    +     *
    +     * Distances do not correspond precisely to expected latency, geographical
    +     * distance, or anything else. Distances should be compared only between
    +     * tablets of the same group; they are not meaningful between different
    +     * groups.
    +     *
    +     * A value of zero indicates that the tablet may be in the same zone as
    +     * the client, and have minimum network latency. A value less than or equal to
    +     * five indicates that the tablet is thought to be in the same region as the
    +     * client, and may have a few milliseconds of network latency. Values greater
    +     * than five are most likely in a different region, with non-trivial network
    +     * latency.
    +     *
    +     * Clients should use the following algorithm:
    +     * * If the request is using a directed read, eliminate any tablets that
    +     * do not match the directed read's target zone and/or replica type.
    +     * * (Read-write transactions only) Choose leader tablet if it has an
    +     * distance <=5.
    +     * * Group and sort tablets by distance. Choose a random
    +     * tablet with the lowest distance. If the request
    +     * is not a directed read, only consider replicas with distances <=5.
    +     * * Send the request to the fallback endpoint.
    +     *
    +     * The tablet picked by this algorithm may be skipped, either because it is
    +     * marked as `skip` by the server or because the corresponding server is
    +     * unreachable, flow controlled, etc. Skipped tablets should be added to the
    +     * `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should
    +     * then be re-run without including the skipped tablet(s) to pick the next
    +     * best tablet.
    +     * 
    + * + * uint32 distance = 6; + * + * @return This builder for chaining. + */ + public Builder clearDistance() { + bitField0_ = (bitField0_ & ~0x00000020); + distance_ = 0; + onChanged(); + return this; + } + + private boolean skip_; + + /** + * + * + *
    +     * If true, the tablet should not be chosen by the client. Typically, this
    +     * signals that the tablet is unhealthy in some way. Tablets with `skip`
    +     * set to true should be reported back to the server in
    +     * `RoutingHint.skipped_tablet_uid`; this cues the server to send updated
    +     * information for this tablet should it become usable again.
    +     * 
    + * + * bool skip = 7; + * + * @return The skip. + */ + @java.lang.Override + public boolean getSkip() { + return skip_; + } + + /** + * + * + *
    +     * If true, the tablet should not be chosen by the client. Typically, this
    +     * signals that the tablet is unhealthy in some way. Tablets with `skip`
    +     * set to true should be reported back to the server in
    +     * `RoutingHint.skipped_tablet_uid`; this cues the server to send updated
    +     * information for this tablet should it become usable again.
    +     * 
    + * + * bool skip = 7; + * + * @param value The skip to set. + * @return This builder for chaining. + */ + public Builder setSkip(boolean value) { + + skip_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If true, the tablet should not be chosen by the client. Typically, this
    +     * signals that the tablet is unhealthy in some way. Tablets with `skip`
    +     * set to true should be reported back to the server in
    +     * `RoutingHint.skipped_tablet_uid`; this cues the server to send updated
    +     * information for this tablet should it become usable again.
    +     * 
    + * + * bool skip = 7; + * + * @return This builder for chaining. + */ + public Builder clearSkip() { + bitField0_ = (bitField0_ & ~0x00000040); + skip_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Tablet) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Tablet) + private static final com.google.spanner.v1.Tablet DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Tablet(); + } + + public static com.google.spanner.v1.Tablet getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Tablet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Tablet getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TabletOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TabletOrBuilder.java new file mode 100644 index 000000000000..df919eebae52 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TabletOrBuilder.java @@ -0,0 +1,203 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/location.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface TabletOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Tablet) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The UID of the tablet, unique within the database. Matches the
    +   * `tablet_uids` and `leader_tablet_uid` fields in `Group`.
    +   * 
    + * + * uint64 tablet_uid = 1; + * + * @return The tabletUid. + */ + long getTabletUid(); + + /** + * + * + *
    +   * The address of the server that is serving this tablet -- either an IP
    +   * address or DNS hostname and a port number.
    +   * 
    + * + * string server_address = 2; + * + * @return The serverAddress. + */ + java.lang.String getServerAddress(); + + /** + * + * + *
    +   * The address of the server that is serving this tablet -- either an IP
    +   * address or DNS hostname and a port number.
    +   * 
    + * + * string server_address = 2; + * + * @return The bytes for serverAddress. + */ + com.google.protobuf.ByteString getServerAddressBytes(); + + /** + * + * + *
    +   * Where this tablet is located. This is the name of a Google Cloud region,
    +   * such as "us-central1".
    +   * 
    + * + * string location = 3; + * + * @return The location. + */ + java.lang.String getLocation(); + + /** + * + * + *
    +   * Where this tablet is located. This is the name of a Google Cloud region,
    +   * such as "us-central1".
    +   * 
    + * + * string location = 3; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); + + /** + * + * + *
    +   * The role of the tablet.
    +   * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The enum numeric value on the wire for role. + */ + int getRoleValue(); + + /** + * + * + *
    +   * The role of the tablet.
    +   * 
    + * + * .google.spanner.v1.Tablet.Role role = 4; + * + * @return The role. + */ + com.google.spanner.v1.Tablet.Role getRole(); + + /** + * + * + *
    +   * `incarnation` indicates the freshness of the tablet information contained
    +   * in this proto. Incarnations can be compared lexicographically; if
    +   * incarnation A is greater than incarnation B, then the `Tablet`
    +   * corresponding to A is newer than the `Tablet` corresponding to B, and
    +   * should be used preferentially.
    +   * 
    + * + * bytes incarnation = 5; + * + * @return The incarnation. + */ + com.google.protobuf.ByteString getIncarnation(); + + /** + * + * + *
    +   * Distances help the client pick the closest tablet out of the list of
    +   * tablets for a given request. Tablets with lower distances should generally
    +   * be preferred. Tablets with the same distance are approximately equally
    +   * close; the client can choose arbitrarily.
    +   *
    +   * Distances do not correspond precisely to expected latency, geographical
    +   * distance, or anything else. Distances should be compared only between
    +   * tablets of the same group; they are not meaningful between different
    +   * groups.
    +   *
    +   * A value of zero indicates that the tablet may be in the same zone as
    +   * the client, and have minimum network latency. A value less than or equal to
    +   * five indicates that the tablet is thought to be in the same region as the
    +   * client, and may have a few milliseconds of network latency. Values greater
    +   * than five are most likely in a different region, with non-trivial network
    +   * latency.
    +   *
    +   * Clients should use the following algorithm:
    +   * * If the request is using a directed read, eliminate any tablets that
    +   * do not match the directed read's target zone and/or replica type.
    +   * * (Read-write transactions only) Choose leader tablet if it has an
    +   * distance <=5.
    +   * * Group and sort tablets by distance. Choose a random
    +   * tablet with the lowest distance. If the request
    +   * is not a directed read, only consider replicas with distances <=5.
    +   * * Send the request to the fallback endpoint.
    +   *
    +   * The tablet picked by this algorithm may be skipped, either because it is
    +   * marked as `skip` by the server or because the corresponding server is
    +   * unreachable, flow controlled, etc. Skipped tablets should be added to the
    +   * `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should
    +   * then be re-run without including the skipped tablet(s) to pick the next
    +   * best tablet.
    +   * 
    + * + * uint32 distance = 6; + * + * @return The distance. + */ + int getDistance(); + + /** + * + * + *
    +   * If true, the tablet should not be chosen by the client. Typically, this
    +   * signals that the tablet is unhealthy in some way. Tablets with `skip`
    +   * set to true should be reported back to the server in
    +   * `RoutingHint.skipped_tablet_uid`; this cues the server to send updated
    +   * information for this tablet should it become usable again.
    +   * 
    + * + * bool skip = 7; + * + * @return The skip. + */ + boolean getSkip(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java new file mode 100644 index 000000000000..8cf66a0d30f8 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java @@ -0,0 +1,1610 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * A transaction.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Transaction} + */ +@com.google.protobuf.Generated +public final class Transaction extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Transaction) + TransactionOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Transaction"); + } + + // Use Transaction.newBuilder() to construct. + private Transaction(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Transaction() { + id_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_Transaction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_Transaction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Transaction.class, + com.google.spanner.v1.Transaction.Builder.class); + } + + private int bitField0_; + public static final int ID_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString id_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +   * `id` may be used to identify the transaction in subsequent
    +   * [Read][google.spanner.v1.Spanner.Read],
    +   * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
    +   * [Commit][google.spanner.v1.Spanner.Commit], or
    +   * [Rollback][google.spanner.v1.Spanner.Rollback] calls.
    +   *
    +   * Single-use read-only transactions do not have IDs, because
    +   * single-use transactions do not support multiple requests.
    +   * 
    + * + * bytes id = 1; + * + * @return The id. + */ + @java.lang.Override + public com.google.protobuf.ByteString getId() { + return id_; + } + + public static final int READ_TIMESTAMP_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp readTimestamp_; + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return Whether the readTimestamp field is set. + */ + @java.lang.Override + public boolean hasReadTimestamp() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return The readTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getReadTimestamp() { + return readTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : readTimestamp_; + } + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { + return readTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : readTimestamp_; + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 3; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + + public static final int CACHE_UPDATE_FIELD_NUMBER = 5; + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + @java.lang.Override + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!id_.isEmpty()) { + output.writeBytes(1, id_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getReadTimestamp()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getPrecommitToken()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getCacheUpdate()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!id_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, id_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadTimestamp()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getPrecommitToken()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getCacheUpdate()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Transaction)) { + return super.equals(obj); + } + com.google.spanner.v1.Transaction other = (com.google.spanner.v1.Transaction) obj; + + if (!getId().equals(other.getId())) return false; + if (hasReadTimestamp() != other.hasReadTimestamp()) return false; + if (hasReadTimestamp()) { + if (!getReadTimestamp().equals(other.getReadTimestamp())) return false; + } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } + if (hasCacheUpdate() != other.hasCacheUpdate()) return false; + if (hasCacheUpdate()) { + if (!getCacheUpdate().equals(other.getCacheUpdate())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + if (hasReadTimestamp()) { + hash = (37 * hash) + READ_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getReadTimestamp().hashCode(); + } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } + if (hasCacheUpdate()) { + hash = (37 * hash) + CACHE_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + getCacheUpdate().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Transaction parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Transaction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Transaction parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Transaction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Transaction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Transaction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Transaction parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Transaction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Transaction parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Transaction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Transaction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Transaction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Transaction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * A transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Transaction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Transaction) + com.google.spanner.v1.TransactionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_Transaction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_Transaction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Transaction.class, + com.google.spanner.v1.Transaction.Builder.class); + } + + // Construct using com.google.spanner.v1.Transaction.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetReadTimestampFieldBuilder(); + internalGetPrecommitTokenFieldBuilder(); + internalGetCacheUpdateFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + id_ = com.google.protobuf.ByteString.EMPTY; + readTimestamp_ = null; + if (readTimestampBuilder_ != null) { + readTimestampBuilder_.dispose(); + readTimestampBuilder_ = null; + } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_Transaction_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Transaction getDefaultInstanceForType() { + return com.google.spanner.v1.Transaction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Transaction build() { + com.google.spanner.v1.Transaction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Transaction buildPartial() { + com.google.spanner.v1.Transaction result = new com.google.spanner.v1.Transaction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Transaction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.id_ = id_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.readTimestamp_ = + readTimestampBuilder_ == null ? readTimestamp_ : readTimestampBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.cacheUpdate_ = + cacheUpdateBuilder_ == null ? cacheUpdate_ : cacheUpdateBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Transaction) { + return mergeFrom((com.google.spanner.v1.Transaction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Transaction other) { + if (other == com.google.spanner.v1.Transaction.getDefaultInstance()) return this; + if (!other.getId().isEmpty()) { + setId(other.getId()); + } + if (other.hasReadTimestamp()) { + mergeReadTimestamp(other.getReadTimestamp()); + } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } + if (other.hasCacheUpdate()) { + mergeCacheUpdate(other.getCacheUpdate()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + id_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetReadTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 42: + { + input.readMessage( + internalGetCacheUpdateFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString id_ = com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * `id` may be used to identify the transaction in subsequent
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
    +     * [Commit][google.spanner.v1.Spanner.Commit], or
    +     * [Rollback][google.spanner.v1.Spanner.Rollback] calls.
    +     *
    +     * Single-use read-only transactions do not have IDs, because
    +     * single-use transactions do not support multiple requests.
    +     * 
    + * + * bytes id = 1; + * + * @return The id. + */ + @java.lang.Override + public com.google.protobuf.ByteString getId() { + return id_; + } + + /** + * + * + *
    +     * `id` may be used to identify the transaction in subsequent
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
    +     * [Commit][google.spanner.v1.Spanner.Commit], or
    +     * [Rollback][google.spanner.v1.Spanner.Rollback] calls.
    +     *
    +     * Single-use read-only transactions do not have IDs, because
    +     * single-use transactions do not support multiple requests.
    +     * 
    + * + * bytes id = 1; + * + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * `id` may be used to identify the transaction in subsequent
    +     * [Read][google.spanner.v1.Spanner.Read],
    +     * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
    +     * [Commit][google.spanner.v1.Spanner.Commit], or
    +     * [Rollback][google.spanner.v1.Spanner.Rollback] calls.
    +     *
    +     * Single-use read-only transactions do not have IDs, because
    +     * single-use transactions do not support multiple requests.
    +     * 
    + * + * bytes id = 1; + * + * @return This builder for chaining. + */ + public Builder clearId() { + bitField0_ = (bitField0_ & ~0x00000001); + id_ = getDefaultInstance().getId(); + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp readTimestamp_; + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + readTimestampBuilder_; + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return Whether the readTimestamp field is set. + */ + public boolean hasReadTimestamp() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return The readTimestamp. + */ + public com.google.protobuf.Timestamp getReadTimestamp() { + if (readTimestampBuilder_ == null) { + return readTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : readTimestamp_; + } else { + return readTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public Builder setReadTimestamp(com.google.protobuf.Timestamp value) { + if (readTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readTimestamp_ = value; + } else { + readTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public Builder setReadTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (readTimestampBuilder_ == null) { + readTimestamp_ = builderForValue.build(); + } else { + readTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public Builder mergeReadTimestamp(com.google.protobuf.Timestamp value) { + if (readTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && readTimestamp_ != null + && readTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getReadTimestampBuilder().mergeFrom(value); + } else { + readTimestamp_ = value; + } + } else { + readTimestampBuilder_.mergeFrom(value); + } + if (readTimestamp_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public Builder clearReadTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + readTimestamp_ = null; + if (readTimestampBuilder_ != null) { + readTimestampBuilder_.dispose(); + readTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public com.google.protobuf.Timestamp.Builder getReadTimestampBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetReadTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { + if (readTimestampBuilder_ != null) { + return readTimestampBuilder_.getMessageOrBuilder(); + } else { + return readTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : readTimestamp_; + } + } + + /** + * + * + *
    +     * For snapshot read-only transactions, the read timestamp chosen
    +     * for the transaction. Not returned by default: see
    +     * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetReadTimestampFieldBuilder() { + if (readTimestampBuilder_ == null) { + readTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getReadTimestamp(), getParentForChildren(), isClean()); + readTimestamp_ = null; + } + return readTimestampBuilder_; + } + + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000004); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetPrecommitTokenFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + + /** + * + * + *
    +     * A precommit token is included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + internalGetPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + + private com.google.spanner.v1.CacheUpdate cacheUpdate_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + cacheUpdateBuilder_; + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + public boolean hasCacheUpdate() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + public com.google.spanner.v1.CacheUpdate getCacheUpdate() { + if (cacheUpdateBuilder_ == null) { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } else { + return cacheUpdateBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cacheUpdate_ = value; + } else { + cacheUpdateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setCacheUpdate(com.google.spanner.v1.CacheUpdate.Builder builderForValue) { + if (cacheUpdateBuilder_ == null) { + cacheUpdate_ = builderForValue.build(); + } else { + cacheUpdateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeCacheUpdate(com.google.spanner.v1.CacheUpdate value) { + if (cacheUpdateBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && cacheUpdate_ != null + && cacheUpdate_ != com.google.spanner.v1.CacheUpdate.getDefaultInstance()) { + getCacheUpdateBuilder().mergeFrom(value); + } else { + cacheUpdate_ = value; + } + } else { + cacheUpdateBuilder_.mergeFrom(value); + } + if (cacheUpdate_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearCacheUpdate() { + bitField0_ = (bitField0_ & ~0x00000008); + cacheUpdate_ = null; + if (cacheUpdateBuilder_ != null) { + cacheUpdateBuilder_.dispose(); + cacheUpdateBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdate.Builder getCacheUpdateBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return internalGetCacheUpdateFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder() { + if (cacheUpdateBuilder_ != null) { + return cacheUpdateBuilder_.getMessageOrBuilder(); + } else { + return cacheUpdate_ == null + ? com.google.spanner.v1.CacheUpdate.getDefaultInstance() + : cacheUpdate_; + } + } + + /** + * + * + *
    +     * Optional. A cache update expresses a set of changes the client should
    +     * incorporate into its location cache. The client should discard the changes
    +     * if they are older than the data it already has. This data can be obtained
    +     * in response to requests that included a `RoutingHint` field, but may also
    +     * be obtained by explicit location-fetching RPCs which may be added in the
    +     * future.
    +     * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder> + internalGetCacheUpdateFieldBuilder() { + if (cacheUpdateBuilder_ == null) { + cacheUpdateBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.CacheUpdate, + com.google.spanner.v1.CacheUpdate.Builder, + com.google.spanner.v1.CacheUpdateOrBuilder>( + getCacheUpdate(), getParentForChildren(), isClean()); + cacheUpdate_ = null; + } + return cacheUpdateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Transaction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Transaction) + private static final com.google.spanner.v1.Transaction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Transaction(); + } + + public static com.google.spanner.v1.Transaction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Transaction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Transaction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java new file mode 100644 index 000000000000..ecba9c42e3eb --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java @@ -0,0 +1,6235 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * Options to use for transactions.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions} + */ +@com.google.protobuf.Generated +public final class TransactionOptions extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.TransactionOptions) + TransactionOptionsOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TransactionOptions"); + } + + // Use TransactionOptions.newBuilder() to construct. + private TransactionOptions(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TransactionOptions() { + isolationLevel_ = 0; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.class, + com.google.spanner.v1.TransactionOptions.Builder.class); + } + + /** + * + * + *
    +   * `IsolationLevel` is used when setting the [isolation
    +   * level](https://cloud.google.com/spanner/docs/isolation-levels) for a
    +   * transaction.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.TransactionOptions.IsolationLevel} + */ + public enum IsolationLevel implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Default value.
    +     *
    +     * If the value is not specified, the `SERIALIZABLE` isolation level is
    +     * used.
    +     * 
    + * + * ISOLATION_LEVEL_UNSPECIFIED = 0; + */ + ISOLATION_LEVEL_UNSPECIFIED(0), + /** + * + * + *
    +     * All transactions appear as if they executed in a serial order, even if
    +     * some of the reads, writes, and other operations of distinct transactions
    +     * actually occurred in parallel. Spanner assigns commit timestamps that
    +     * reflect the order of committed transactions to implement this property.
    +     * Spanner offers a stronger guarantee than serializability called external
    +     * consistency. For more information, see
    +     * [TrueTime and external
    +     * consistency](https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability).
    +     * 
    + * + * SERIALIZABLE = 1; + */ + SERIALIZABLE(1), + /** + * + * + *
    +     * All reads performed during the transaction observe a consistent snapshot
    +     * of the database, and the transaction is only successfully committed in
    +     * the absence of conflicts between its updates and any concurrent updates
    +     * that have occurred since that snapshot. Consequently, in contrast to
    +     * `SERIALIZABLE` transactions, only write-write conflicts are detected in
    +     * snapshot transactions.
    +     *
    +     * This isolation level does not support read-only and partitioned DML
    +     * transactions.
    +     *
    +     * When `REPEATABLE_READ` is specified on a read-write transaction, the
    +     * locking semantics default to `OPTIMISTIC`.
    +     * 
    + * + * REPEATABLE_READ = 2; + */ + REPEATABLE_READ(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "IsolationLevel"); + } + + /** + * + * + *
    +     * Default value.
    +     *
    +     * If the value is not specified, the `SERIALIZABLE` isolation level is
    +     * used.
    +     * 
    + * + * ISOLATION_LEVEL_UNSPECIFIED = 0; + */ + public static final int ISOLATION_LEVEL_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +     * All transactions appear as if they executed in a serial order, even if
    +     * some of the reads, writes, and other operations of distinct transactions
    +     * actually occurred in parallel. Spanner assigns commit timestamps that
    +     * reflect the order of committed transactions to implement this property.
    +     * Spanner offers a stronger guarantee than serializability called external
    +     * consistency. For more information, see
    +     * [TrueTime and external
    +     * consistency](https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability).
    +     * 
    + * + * SERIALIZABLE = 1; + */ + public static final int SERIALIZABLE_VALUE = 1; + + /** + * + * + *
    +     * All reads performed during the transaction observe a consistent snapshot
    +     * of the database, and the transaction is only successfully committed in
    +     * the absence of conflicts between its updates and any concurrent updates
    +     * that have occurred since that snapshot. Consequently, in contrast to
    +     * `SERIALIZABLE` transactions, only write-write conflicts are detected in
    +     * snapshot transactions.
    +     *
    +     * This isolation level does not support read-only and partitioned DML
    +     * transactions.
    +     *
    +     * When `REPEATABLE_READ` is specified on a read-write transaction, the
    +     * locking semantics default to `OPTIMISTIC`.
    +     * 
    + * + * REPEATABLE_READ = 2; + */ + public static final int REPEATABLE_READ_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static IsolationLevel valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static IsolationLevel forNumber(int value) { + switch (value) { + case 0: + return ISOLATION_LEVEL_UNSPECIFIED; + case 1: + return SERIALIZABLE; + case 2: + return REPEATABLE_READ; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public IsolationLevel findValueByNumber(int number) { + return IsolationLevel.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.TransactionOptions.getDescriptor().getEnumTypes().get(0); + } + + private static final IsolationLevel[] VALUES = values(); + + public static IsolationLevel valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private IsolationLevel(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.TransactionOptions.IsolationLevel) + } + + public interface ReadWriteOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.TransactionOptions.ReadWrite) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Read lock mode for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * @return The enum numeric value on the wire for readLockMode. + */ + int getReadLockModeValue(); + + /** + * + * + *
    +     * Read lock mode for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * @return The readLockMode. + */ + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLockMode(); + + /** + * + * + *
    +     * Optional. Clients should pass the transaction ID of the previous
    +     * transaction attempt that was aborted if this transaction is being
    +     * executed on a multiplexed session.
    +     * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId(); + } + + /** + * + * + *
    +   * Message type to initiate a read-write transaction. Currently this
    +   * transaction type has no options.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.ReadWrite} + */ + public static final class ReadWrite extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.TransactionOptions.ReadWrite) + ReadWriteOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadWrite"); + } + + // Use ReadWrite.newBuilder() to construct. + private ReadWrite(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadWrite() { + readLockMode_ = 0; + multiplexedSessionPreviousTransactionId_ = com.google.protobuf.ByteString.EMPTY; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadWrite_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.ReadWrite.class, + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder.class); + } + + /** + * + * + *
    +     * `ReadLockMode` is used to set the read lock mode for read-write
    +     * transactions.
    +     * 
    + * + * Protobuf enum {@code google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode} + */ + public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +       * Default value.
    +       *
    +       * * If isolation level is
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE],
    +       * locking semantics default to `PESSIMISTIC`.
    +       * * If isolation level is
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ],
    +       * locking semantics default to `OPTIMISTIC`.
    +       * * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control)
    +       * for more details.
    +       * 
    + * + * READ_LOCK_MODE_UNSPECIFIED = 0; + */ + READ_LOCK_MODE_UNSPECIFIED(0), + /** + * + * + *
    +       * Pessimistic lock mode.
    +       *
    +       * Lock acquisition behavior depends on the isolation level in use. In
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
    +       * isolation, reads and writes acquire necessary locks during transaction
    +       * statement execution. In
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ]
    +       * isolation, reads that explicitly request to be locked and writes
    +       * acquire locks.
    +       * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control) for
    +       * details on the types of locks acquired at each transaction step.
    +       * 
    + * + * PESSIMISTIC = 1; + */ + PESSIMISTIC(1), + /** + * + * + *
    +       * Optimistic lock mode.
    +       *
    +       * Lock acquisition behavior depends on the isolation level in use. In
    +       * both
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
    +       * and
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ]
    +       * isolation, reads and writes do not acquire locks during transaction
    +       * statement execution.
    +       * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control) for
    +       * details on how the guarantees of each isolation level are provided at
    +       * commit time.
    +       * 
    + * + * OPTIMISTIC = 2; + */ + OPTIMISTIC(2), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadLockMode"); + } + + /** + * + * + *
    +       * Default value.
    +       *
    +       * * If isolation level is
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE],
    +       * locking semantics default to `PESSIMISTIC`.
    +       * * If isolation level is
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ],
    +       * locking semantics default to `OPTIMISTIC`.
    +       * * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control)
    +       * for more details.
    +       * 
    + * + * READ_LOCK_MODE_UNSPECIFIED = 0; + */ + public static final int READ_LOCK_MODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +       * Pessimistic lock mode.
    +       *
    +       * Lock acquisition behavior depends on the isolation level in use. In
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
    +       * isolation, reads and writes acquire necessary locks during transaction
    +       * statement execution. In
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ]
    +       * isolation, reads that explicitly request to be locked and writes
    +       * acquire locks.
    +       * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control) for
    +       * details on the types of locks acquired at each transaction step.
    +       * 
    + * + * PESSIMISTIC = 1; + */ + public static final int PESSIMISTIC_VALUE = 1; + + /** + * + * + *
    +       * Optimistic lock mode.
    +       *
    +       * Lock acquisition behavior depends on the isolation level in use. In
    +       * both
    +       * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE]
    +       * and
    +       * [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ]
    +       * isolation, reads and writes do not acquire locks during transaction
    +       * statement execution.
    +       * See
    +       * [Concurrency
    +       * control](https://cloud.google.com/spanner/docs/concurrency-control) for
    +       * details on how the guarantees of each isolation level are provided at
    +       * commit time.
    +       * 
    + * + * OPTIMISTIC = 2; + */ + public static final int OPTIMISTIC_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ReadLockMode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ReadLockMode forNumber(int value) { + switch (value) { + case 0: + return READ_LOCK_MODE_UNSPECIFIED; + case 1: + return PESSIMISTIC; + case 2: + return OPTIMISTIC; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ReadLockMode findValueByNumber(int number) { + return ReadLockMode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ReadLockMode[] VALUES = values(); + + public static ReadLockMode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ReadLockMode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode) + } + + public static final int READ_LOCK_MODE_FIELD_NUMBER = 1; + private int readLockMode_ = 0; + + /** + * + * + *
    +     * Read lock mode for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * @return The enum numeric value on the wire for readLockMode. + */ + @java.lang.Override + public int getReadLockModeValue() { + return readLockMode_; + } + + /** + * + * + *
    +     * Read lock mode for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * @return The readLockMode. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLockMode() { + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode result = + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode.forNumber(readLockMode_); + return result == null + ? com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode.UNRECOGNIZED + : result; + } + + public static final int MULTIPLEXED_SESSION_PREVIOUS_TRANSACTION_ID_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString multiplexedSessionPreviousTransactionId_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +     * Optional. Clients should pass the transaction ID of the previous
    +     * transaction attempt that was aborted if this transaction is being
    +     * executed on a multiplexed session.
    +     * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId() { + return multiplexedSessionPreviousTransactionId_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (readLockMode_ + != com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode + .READ_LOCK_MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(1, readLockMode_); + } + if (!multiplexedSessionPreviousTransactionId_.isEmpty()) { + output.writeBytes(2, multiplexedSessionPreviousTransactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (readLockMode_ + != com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode + .READ_LOCK_MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, readLockMode_); + } + if (!multiplexedSessionPreviousTransactionId_.isEmpty()) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 2, multiplexedSessionPreviousTransactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.TransactionOptions.ReadWrite)) { + return super.equals(obj); + } + com.google.spanner.v1.TransactionOptions.ReadWrite other = + (com.google.spanner.v1.TransactionOptions.ReadWrite) obj; + + if (readLockMode_ != other.readLockMode_) return false; + if (!getMultiplexedSessionPreviousTransactionId() + .equals(other.getMultiplexedSessionPreviousTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_LOCK_MODE_FIELD_NUMBER; + hash = (53 * hash) + readLockMode_; + hash = (37 * hash) + MULTIPLEXED_SESSION_PREVIOUS_TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getMultiplexedSessionPreviousTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.TransactionOptions.ReadWrite prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Message type to initiate a read-write transaction. Currently this
    +     * transaction type has no options.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.ReadWrite} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.TransactionOptions.ReadWrite) + com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadWrite_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.ReadWrite.class, + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder.class); + } + + // Construct using com.google.spanner.v1.TransactionOptions.ReadWrite.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + readLockMode_ = 0; + multiplexedSessionPreviousTransactionId_ = com.google.protobuf.ByteString.EMPTY; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite getDefaultInstanceForType() { + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite build() { + com.google.spanner.v1.TransactionOptions.ReadWrite result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite buildPartial() { + com.google.spanner.v1.TransactionOptions.ReadWrite result = + new com.google.spanner.v1.TransactionOptions.ReadWrite(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.TransactionOptions.ReadWrite result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.readLockMode_ = readLockMode_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.multiplexedSessionPreviousTransactionId_ = + multiplexedSessionPreviousTransactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.TransactionOptions.ReadWrite) { + return mergeFrom((com.google.spanner.v1.TransactionOptions.ReadWrite) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.TransactionOptions.ReadWrite other) { + if (other == com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance()) + return this; + if (other.readLockMode_ != 0) { + setReadLockModeValue(other.getReadLockModeValue()); + } + if (!other.getMultiplexedSessionPreviousTransactionId().isEmpty()) { + setMultiplexedSessionPreviousTransactionId( + other.getMultiplexedSessionPreviousTransactionId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + readLockMode_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + multiplexedSessionPreviousTransactionId_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int readLockMode_ = 0; + + /** + * + * + *
    +       * Read lock mode for the transaction.
    +       * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * + * @return The enum numeric value on the wire for readLockMode. + */ + @java.lang.Override + public int getReadLockModeValue() { + return readLockMode_; + } + + /** + * + * + *
    +       * Read lock mode for the transaction.
    +       * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * + * @param value The enum numeric value on the wire for readLockMode to set. + * @return This builder for chaining. + */ + public Builder setReadLockModeValue(int value) { + readLockMode_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Read lock mode for the transaction.
    +       * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * + * @return The readLockMode. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLockMode() { + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode result = + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode.forNumber( + readLockMode_); + return result == null + ? com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +       * Read lock mode for the transaction.
    +       * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * + * @param value The readLockMode to set. + * @return This builder for chaining. + */ + public Builder setReadLockMode( + com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + readLockMode_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +       * Read lock mode for the transaction.
    +       * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode read_lock_mode = 1; + * + * + * @return This builder for chaining. + */ + public Builder clearReadLockMode() { + bitField0_ = (bitField0_ & ~0x00000001); + readLockMode_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString multiplexedSessionPreviousTransactionId_ = + com.google.protobuf.ByteString.EMPTY; + + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId() { + return multiplexedSessionPreviousTransactionId_; + } + + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The multiplexedSessionPreviousTransactionId to set. + * @return This builder for chaining. + */ + public Builder setMultiplexedSessionPreviousTransactionId( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + multiplexedSessionPreviousTransactionId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMultiplexedSessionPreviousTransactionId() { + bitField0_ = (bitField0_ & ~0x00000002); + multiplexedSessionPreviousTransactionId_ = + getDefaultInstance().getMultiplexedSessionPreviousTransactionId(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.TransactionOptions.ReadWrite) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadWrite) + private static final com.google.spanner.v1.TransactionOptions.ReadWrite DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.TransactionOptions.ReadWrite(); + } + + public static com.google.spanner.v1.TransactionOptions.ReadWrite getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadWrite parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface PartitionedDmlOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.TransactionOptions.PartitionedDml) + com.google.protobuf.MessageOrBuilder {} + + /** + * + * + *
    +   * Message type to initiate a Partitioned DML transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.PartitionedDml} + */ + public static final class PartitionedDml extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.TransactionOptions.PartitionedDml) + PartitionedDmlOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "PartitionedDml"); + } + + // Use PartitionedDml.newBuilder() to construct. + private PartitionedDml(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private PartitionedDml() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.PartitionedDml.class, + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.TransactionOptions.PartitionedDml)) { + return super.equals(obj); + } + com.google.spanner.v1.TransactionOptions.PartitionedDml other = + (com.google.spanner.v1.TransactionOptions.PartitionedDml) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.TransactionOptions.PartitionedDml prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Message type to initiate a Partitioned DML transaction.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.PartitionedDml} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.TransactionOptions.PartitionedDml) + com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.PartitionedDml.class, + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder.class); + } + + // Construct using com.google.spanner.v1.TransactionOptions.PartitionedDml.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml getDefaultInstanceForType() { + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml build() { + com.google.spanner.v1.TransactionOptions.PartitionedDml result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml buildPartial() { + com.google.spanner.v1.TransactionOptions.PartitionedDml result = + new com.google.spanner.v1.TransactionOptions.PartitionedDml(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.TransactionOptions.PartitionedDml) { + return mergeFrom((com.google.spanner.v1.TransactionOptions.PartitionedDml) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.TransactionOptions.PartitionedDml other) { + if (other == com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.TransactionOptions.PartitionedDml) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.PartitionedDml) + private static final com.google.spanner.v1.TransactionOptions.PartitionedDml DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.TransactionOptions.PartitionedDml(); + } + + public static com.google.spanner.v1.TransactionOptions.PartitionedDml getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PartitionedDml parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ReadOnlyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.TransactionOptions.ReadOnly) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +     * Read at a timestamp where all previously committed transactions
    +     * are visible.
    +     * 
    + * + * bool strong = 1; + * + * @return Whether the strong field is set. + */ + boolean hasStrong(); + + /** + * + * + *
    +     * Read at a timestamp where all previously committed transactions
    +     * are visible.
    +     * 
    + * + * bool strong = 1; + * + * @return The strong. + */ + boolean getStrong(); + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return Whether the minReadTimestamp field is set. + */ + boolean hasMinReadTimestamp(); + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return The minReadTimestamp. + */ + com.google.protobuf.Timestamp getMinReadTimestamp(); + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + com.google.protobuf.TimestampOrBuilder getMinReadTimestampOrBuilder(); + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return Whether the maxStaleness field is set. + */ + boolean hasMaxStaleness(); + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return The maxStaleness. + */ + com.google.protobuf.Duration getMaxStaleness(); + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + com.google.protobuf.DurationOrBuilder getMaxStalenessOrBuilder(); + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return Whether the readTimestamp field is set. + */ + boolean hasReadTimestamp(); + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return The readTimestamp. + */ + com.google.protobuf.Timestamp getReadTimestamp(); + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder(); + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return Whether the exactStaleness field is set. + */ + boolean hasExactStaleness(); + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return The exactStaleness. + */ + com.google.protobuf.Duration getExactStaleness(); + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + com.google.protobuf.DurationOrBuilder getExactStalenessOrBuilder(); + + /** + * + * + *
    +     * If true, the Cloud Spanner-selected read timestamp is included in
    +     * the [Transaction][google.spanner.v1.Transaction] message that describes
    +     * the transaction.
    +     * 
    + * + * bool return_read_timestamp = 6; + * + * @return The returnReadTimestamp. + */ + boolean getReturnReadTimestamp(); + + com.google.spanner.v1.TransactionOptions.ReadOnly.TimestampBoundCase getTimestampBoundCase(); + } + + /** + * + * + *
    +   * Message type to initiate a read-only transaction.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.ReadOnly} + */ + public static final class ReadOnly extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.TransactionOptions.ReadOnly) + ReadOnlyOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "ReadOnly"); + } + + // Use ReadOnly.newBuilder() to construct. + private ReadOnly(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private ReadOnly() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadOnly_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.ReadOnly.class, + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder.class); + } + + private int timestampBoundCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object timestampBound_; + + public enum TimestampBoundCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + STRONG(1), + MIN_READ_TIMESTAMP(2), + MAX_STALENESS(3), + READ_TIMESTAMP(4), + EXACT_STALENESS(5), + TIMESTAMPBOUND_NOT_SET(0); + private final int value; + + private TimestampBoundCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TimestampBoundCase valueOf(int value) { + return forNumber(value); + } + + public static TimestampBoundCase forNumber(int value) { + switch (value) { + case 1: + return STRONG; + case 2: + return MIN_READ_TIMESTAMP; + case 3: + return MAX_STALENESS; + case 4: + return READ_TIMESTAMP; + case 5: + return EXACT_STALENESS; + case 0: + return TIMESTAMPBOUND_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public TimestampBoundCase getTimestampBoundCase() { + return TimestampBoundCase.forNumber(timestampBoundCase_); + } + + public static final int STRONG_FIELD_NUMBER = 1; + + /** + * + * + *
    +     * Read at a timestamp where all previously committed transactions
    +     * are visible.
    +     * 
    + * + * bool strong = 1; + * + * @return Whether the strong field is set. + */ + @java.lang.Override + public boolean hasStrong() { + return timestampBoundCase_ == 1; + } + + /** + * + * + *
    +     * Read at a timestamp where all previously committed transactions
    +     * are visible.
    +     * 
    + * + * bool strong = 1; + * + * @return The strong. + */ + @java.lang.Override + public boolean getStrong() { + if (timestampBoundCase_ == 1) { + return (java.lang.Boolean) timestampBound_; + } + return false; + } + + public static final int MIN_READ_TIMESTAMP_FIELD_NUMBER = 2; + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return Whether the minReadTimestamp field is set. + */ + @java.lang.Override + public boolean hasMinReadTimestamp() { + return timestampBoundCase_ == 2; + } + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return The minReadTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getMinReadTimestamp() { + if (timestampBoundCase_ == 2) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + /** + * + * + *
    +     * Executes all reads at a timestamp >= `min_read_timestamp`.
    +     *
    +     * This is useful for requesting fresher data than some previous
    +     * read, or data that is fresh enough to observe the effects of some
    +     * previously committed transaction whose timestamp is known.
    +     *
    +     * Note that this option can only be used in single-use transactions.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getMinReadTimestampOrBuilder() { + if (timestampBoundCase_ == 2) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int MAX_STALENESS_FIELD_NUMBER = 3; + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return Whether the maxStaleness field is set. + */ + @java.lang.Override + public boolean hasMaxStaleness() { + return timestampBoundCase_ == 3; + } + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return The maxStaleness. + */ + @java.lang.Override + public com.google.protobuf.Duration getMaxStaleness() { + if (timestampBoundCase_ == 3) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + /** + * + * + *
    +     * Read data at a timestamp >= `NOW - max_staleness`
    +     * seconds. Guarantees that all writes that have committed more
    +     * than the specified number of seconds ago are visible. Because
    +     * Cloud Spanner chooses the exact timestamp, this mode works even if
    +     * the client's local clock is substantially skewed from Cloud Spanner
    +     * commit timestamps.
    +     *
    +     * Useful for reading the freshest data available at a nearby
    +     * replica, while bounding the possible staleness if the local
    +     * replica has fallen behind.
    +     *
    +     * Note that this option can only be used in single-use
    +     * transactions.
    +     * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getMaxStalenessOrBuilder() { + if (timestampBoundCase_ == 3) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + public static final int READ_TIMESTAMP_FIELD_NUMBER = 4; + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return Whether the readTimestamp field is set. + */ + @java.lang.Override + public boolean hasReadTimestamp() { + return timestampBoundCase_ == 4; + } + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return The readTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getReadTimestamp() { + if (timestampBoundCase_ == 4) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + /** + * + * + *
    +     * Executes all reads at the given timestamp. Unlike other modes,
    +     * reads at a specific timestamp are repeatable; the same read at
    +     * the same timestamp always returns the same data. If the
    +     * timestamp is in the future, the read is blocked until the
    +     * specified timestamp, modulo the read's deadline.
    +     *
    +     * Useful for large scale consistent reads such as mapreduces, or
    +     * for coordinating many reads against a consistent snapshot of the
    +     * data.
    +     *
    +     * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +     * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +     * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { + if (timestampBoundCase_ == 4) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int EXACT_STALENESS_FIELD_NUMBER = 5; + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return Whether the exactStaleness field is set. + */ + @java.lang.Override + public boolean hasExactStaleness() { + return timestampBoundCase_ == 5; + } + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return The exactStaleness. + */ + @java.lang.Override + public com.google.protobuf.Duration getExactStaleness() { + if (timestampBoundCase_ == 5) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + /** + * + * + *
    +     * Executes all reads at a timestamp that is `exact_staleness`
    +     * old. The timestamp is chosen soon after the read is started.
    +     *
    +     * Guarantees that all writes that have committed more than the
    +     * specified number of seconds ago are visible. Because Cloud Spanner
    +     * chooses the exact timestamp, this mode works even if the client's
    +     * local clock is substantially skewed from Cloud Spanner commit
    +     * timestamps.
    +     *
    +     * Useful for reading at nearby replicas without the distributed
    +     * timestamp negotiation overhead of `max_staleness`.
    +     * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getExactStalenessOrBuilder() { + if (timestampBoundCase_ == 5) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + public static final int RETURN_READ_TIMESTAMP_FIELD_NUMBER = 6; + private boolean returnReadTimestamp_ = false; + + /** + * + * + *
    +     * If true, the Cloud Spanner-selected read timestamp is included in
    +     * the [Transaction][google.spanner.v1.Transaction] message that describes
    +     * the transaction.
    +     * 
    + * + * bool return_read_timestamp = 6; + * + * @return The returnReadTimestamp. + */ + @java.lang.Override + public boolean getReturnReadTimestamp() { + return returnReadTimestamp_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (timestampBoundCase_ == 1) { + output.writeBool(1, (boolean) ((java.lang.Boolean) timestampBound_)); + } + if (timestampBoundCase_ == 2) { + output.writeMessage(2, (com.google.protobuf.Timestamp) timestampBound_); + } + if (timestampBoundCase_ == 3) { + output.writeMessage(3, (com.google.protobuf.Duration) timestampBound_); + } + if (timestampBoundCase_ == 4) { + output.writeMessage(4, (com.google.protobuf.Timestamp) timestampBound_); + } + if (timestampBoundCase_ == 5) { + output.writeMessage(5, (com.google.protobuf.Duration) timestampBound_); + } + if (returnReadTimestamp_ != false) { + output.writeBool(6, returnReadTimestamp_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (timestampBoundCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize( + 1, (boolean) ((java.lang.Boolean) timestampBound_)); + } + if (timestampBoundCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.protobuf.Timestamp) timestampBound_); + } + if (timestampBoundCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.protobuf.Duration) timestampBound_); + } + if (timestampBoundCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.protobuf.Timestamp) timestampBound_); + } + if (timestampBoundCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.protobuf.Duration) timestampBound_); + } + if (returnReadTimestamp_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(6, returnReadTimestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.TransactionOptions.ReadOnly)) { + return super.equals(obj); + } + com.google.spanner.v1.TransactionOptions.ReadOnly other = + (com.google.spanner.v1.TransactionOptions.ReadOnly) obj; + + if (getReturnReadTimestamp() != other.getReturnReadTimestamp()) return false; + if (!getTimestampBoundCase().equals(other.getTimestampBoundCase())) return false; + switch (timestampBoundCase_) { + case 1: + if (getStrong() != other.getStrong()) return false; + break; + case 2: + if (!getMinReadTimestamp().equals(other.getMinReadTimestamp())) return false; + break; + case 3: + if (!getMaxStaleness().equals(other.getMaxStaleness())) return false; + break; + case 4: + if (!getReadTimestamp().equals(other.getReadTimestamp())) return false; + break; + case 5: + if (!getExactStaleness().equals(other.getExactStaleness())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + RETURN_READ_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReturnReadTimestamp()); + switch (timestampBoundCase_) { + case 1: + hash = (37 * hash) + STRONG_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getStrong()); + break; + case 2: + hash = (37 * hash) + MIN_READ_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getMinReadTimestamp().hashCode(); + break; + case 3: + hash = (37 * hash) + MAX_STALENESS_FIELD_NUMBER; + hash = (53 * hash) + getMaxStaleness().hashCode(); + break; + case 4: + hash = (37 * hash) + READ_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getReadTimestamp().hashCode(); + break; + case 5: + hash = (37 * hash) + EXACT_STALENESS_FIELD_NUMBER; + hash = (53 * hash) + getExactStaleness().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.TransactionOptions.ReadOnly prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +     * Message type to initiate a read-only transaction.
    +     * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions.ReadOnly} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.TransactionOptions.ReadOnly) + com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadOnly_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.ReadOnly.class, + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder.class); + } + + // Construct using com.google.spanner.v1.TransactionOptions.ReadOnly.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (minReadTimestampBuilder_ != null) { + minReadTimestampBuilder_.clear(); + } + if (maxStalenessBuilder_ != null) { + maxStalenessBuilder_.clear(); + } + if (readTimestampBuilder_ != null) { + readTimestampBuilder_.clear(); + } + if (exactStalenessBuilder_ != null) { + exactStalenessBuilder_.clear(); + } + returnReadTimestamp_ = false; + timestampBoundCase_ = 0; + timestampBound_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly getDefaultInstanceForType() { + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly build() { + com.google.spanner.v1.TransactionOptions.ReadOnly result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly buildPartial() { + com.google.spanner.v1.TransactionOptions.ReadOnly result = + new com.google.spanner.v1.TransactionOptions.ReadOnly(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.TransactionOptions.ReadOnly result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000020) != 0)) { + result.returnReadTimestamp_ = returnReadTimestamp_; + } + } + + private void buildPartialOneofs(com.google.spanner.v1.TransactionOptions.ReadOnly result) { + result.timestampBoundCase_ = timestampBoundCase_; + result.timestampBound_ = this.timestampBound_; + if (timestampBoundCase_ == 2 && minReadTimestampBuilder_ != null) { + result.timestampBound_ = minReadTimestampBuilder_.build(); + } + if (timestampBoundCase_ == 3 && maxStalenessBuilder_ != null) { + result.timestampBound_ = maxStalenessBuilder_.build(); + } + if (timestampBoundCase_ == 4 && readTimestampBuilder_ != null) { + result.timestampBound_ = readTimestampBuilder_.build(); + } + if (timestampBoundCase_ == 5 && exactStalenessBuilder_ != null) { + result.timestampBound_ = exactStalenessBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.TransactionOptions.ReadOnly) { + return mergeFrom((com.google.spanner.v1.TransactionOptions.ReadOnly) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.TransactionOptions.ReadOnly other) { + if (other == com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance()) + return this; + if (other.getReturnReadTimestamp() != false) { + setReturnReadTimestamp(other.getReturnReadTimestamp()); + } + switch (other.getTimestampBoundCase()) { + case STRONG: + { + setStrong(other.getStrong()); + break; + } + case MIN_READ_TIMESTAMP: + { + mergeMinReadTimestamp(other.getMinReadTimestamp()); + break; + } + case MAX_STALENESS: + { + mergeMaxStaleness(other.getMaxStaleness()); + break; + } + case READ_TIMESTAMP: + { + mergeReadTimestamp(other.getReadTimestamp()); + break; + } + case EXACT_STALENESS: + { + mergeExactStaleness(other.getExactStaleness()); + break; + } + case TIMESTAMPBOUND_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + timestampBound_ = input.readBool(); + timestampBoundCase_ = 1; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetMinReadTimestampFieldBuilder().getBuilder(), extensionRegistry); + timestampBoundCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetMaxStalenessFieldBuilder().getBuilder(), extensionRegistry); + timestampBoundCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + internalGetReadTimestampFieldBuilder().getBuilder(), extensionRegistry); + timestampBoundCase_ = 4; + break; + } // case 34 + case 42: + { + input.readMessage( + internalGetExactStalenessFieldBuilder().getBuilder(), extensionRegistry); + timestampBoundCase_ = 5; + break; + } // case 42 + case 48: + { + returnReadTimestamp_ = input.readBool(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int timestampBoundCase_ = 0; + private java.lang.Object timestampBound_; + + public TimestampBoundCase getTimestampBoundCase() { + return TimestampBoundCase.forNumber(timestampBoundCase_); + } + + public Builder clearTimestampBound() { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
    +       * Read at a timestamp where all previously committed transactions
    +       * are visible.
    +       * 
    + * + * bool strong = 1; + * + * @return Whether the strong field is set. + */ + public boolean hasStrong() { + return timestampBoundCase_ == 1; + } + + /** + * + * + *
    +       * Read at a timestamp where all previously committed transactions
    +       * are visible.
    +       * 
    + * + * bool strong = 1; + * + * @return The strong. + */ + public boolean getStrong() { + if (timestampBoundCase_ == 1) { + return (java.lang.Boolean) timestampBound_; + } + return false; + } + + /** + * + * + *
    +       * Read at a timestamp where all previously committed transactions
    +       * are visible.
    +       * 
    + * + * bool strong = 1; + * + * @param value The strong to set. + * @return This builder for chaining. + */ + public Builder setStrong(boolean value) { + + timestampBoundCase_ = 1; + timestampBound_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +       * Read at a timestamp where all previously committed transactions
    +       * are visible.
    +       * 
    + * + * bool strong = 1; + * + * @return This builder for chaining. + */ + public Builder clearStrong() { + if (timestampBoundCase_ == 1) { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + minReadTimestampBuilder_; + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return Whether the minReadTimestamp field is set. + */ + @java.lang.Override + public boolean hasMinReadTimestamp() { + return timestampBoundCase_ == 2; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + * + * @return The minReadTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getMinReadTimestamp() { + if (minReadTimestampBuilder_ == null) { + if (timestampBoundCase_ == 2) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (timestampBoundCase_ == 2) { + return minReadTimestampBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + public Builder setMinReadTimestamp(com.google.protobuf.Timestamp value) { + if (minReadTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampBound_ = value; + onChanged(); + } else { + minReadTimestampBuilder_.setMessage(value); + } + timestampBoundCase_ = 2; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + public Builder setMinReadTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (minReadTimestampBuilder_ == null) { + timestampBound_ = builderForValue.build(); + onChanged(); + } else { + minReadTimestampBuilder_.setMessage(builderForValue.build()); + } + timestampBoundCase_ = 2; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + public Builder mergeMinReadTimestamp(com.google.protobuf.Timestamp value) { + if (minReadTimestampBuilder_ == null) { + if (timestampBoundCase_ == 2 + && timestampBound_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + timestampBound_ = + com.google.protobuf.Timestamp.newBuilder( + (com.google.protobuf.Timestamp) timestampBound_) + .mergeFrom(value) + .buildPartial(); + } else { + timestampBound_ = value; + } + onChanged(); + } else { + if (timestampBoundCase_ == 2) { + minReadTimestampBuilder_.mergeFrom(value); + } else { + minReadTimestampBuilder_.setMessage(value); + } + } + timestampBoundCase_ = 2; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + public Builder clearMinReadTimestamp() { + if (minReadTimestampBuilder_ == null) { + if (timestampBoundCase_ == 2) { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + } + } else { + if (timestampBoundCase_ == 2) { + timestampBoundCase_ = 0; + timestampBound_ = null; + } + minReadTimestampBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + public com.google.protobuf.Timestamp.Builder getMinReadTimestampBuilder() { + return internalGetMinReadTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getMinReadTimestampOrBuilder() { + if ((timestampBoundCase_ == 2) && (minReadTimestampBuilder_ != null)) { + return minReadTimestampBuilder_.getMessageOrBuilder(); + } else { + if (timestampBoundCase_ == 2) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at a timestamp >= `min_read_timestamp`.
    +       *
    +       * This is useful for requesting fresher data than some previous
    +       * read, or data that is fresh enough to observe the effects of some
    +       * previously committed transaction whose timestamp is known.
    +       *
    +       * Note that this option can only be used in single-use transactions.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp min_read_timestamp = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetMinReadTimestampFieldBuilder() { + if (minReadTimestampBuilder_ == null) { + if (!(timestampBoundCase_ == 2)) { + timestampBound_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + minReadTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) timestampBound_, + getParentForChildren(), + isClean()); + timestampBound_ = null; + } + timestampBoundCase_ = 2; + onChanged(); + return minReadTimestampBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + maxStalenessBuilder_; + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return Whether the maxStaleness field is set. + */ + @java.lang.Override + public boolean hasMaxStaleness() { + return timestampBoundCase_ == 3; + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + * + * @return The maxStaleness. + */ + @java.lang.Override + public com.google.protobuf.Duration getMaxStaleness() { + if (maxStalenessBuilder_ == null) { + if (timestampBoundCase_ == 3) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } else { + if (timestampBoundCase_ == 3) { + return maxStalenessBuilder_.getMessage(); + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + public Builder setMaxStaleness(com.google.protobuf.Duration value) { + if (maxStalenessBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampBound_ = value; + onChanged(); + } else { + maxStalenessBuilder_.setMessage(value); + } + timestampBoundCase_ = 3; + return this; + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + public Builder setMaxStaleness(com.google.protobuf.Duration.Builder builderForValue) { + if (maxStalenessBuilder_ == null) { + timestampBound_ = builderForValue.build(); + onChanged(); + } else { + maxStalenessBuilder_.setMessage(builderForValue.build()); + } + timestampBoundCase_ = 3; + return this; + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + public Builder mergeMaxStaleness(com.google.protobuf.Duration value) { + if (maxStalenessBuilder_ == null) { + if (timestampBoundCase_ == 3 + && timestampBound_ != com.google.protobuf.Duration.getDefaultInstance()) { + timestampBound_ = + com.google.protobuf.Duration.newBuilder( + (com.google.protobuf.Duration) timestampBound_) + .mergeFrom(value) + .buildPartial(); + } else { + timestampBound_ = value; + } + onChanged(); + } else { + if (timestampBoundCase_ == 3) { + maxStalenessBuilder_.mergeFrom(value); + } else { + maxStalenessBuilder_.setMessage(value); + } + } + timestampBoundCase_ = 3; + return this; + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + public Builder clearMaxStaleness() { + if (maxStalenessBuilder_ == null) { + if (timestampBoundCase_ == 3) { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + } + } else { + if (timestampBoundCase_ == 3) { + timestampBoundCase_ = 0; + timestampBound_ = null; + } + maxStalenessBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + public com.google.protobuf.Duration.Builder getMaxStalenessBuilder() { + return internalGetMaxStalenessFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getMaxStalenessOrBuilder() { + if ((timestampBoundCase_ == 3) && (maxStalenessBuilder_ != null)) { + return maxStalenessBuilder_.getMessageOrBuilder(); + } else { + if (timestampBoundCase_ == 3) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Read data at a timestamp >= `NOW - max_staleness`
    +       * seconds. Guarantees that all writes that have committed more
    +       * than the specified number of seconds ago are visible. Because
    +       * Cloud Spanner chooses the exact timestamp, this mode works even if
    +       * the client's local clock is substantially skewed from Cloud Spanner
    +       * commit timestamps.
    +       *
    +       * Useful for reading the freshest data available at a nearby
    +       * replica, while bounding the possible staleness if the local
    +       * replica has fallen behind.
    +       *
    +       * Note that this option can only be used in single-use
    +       * transactions.
    +       * 
    + * + * .google.protobuf.Duration max_staleness = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetMaxStalenessFieldBuilder() { + if (maxStalenessBuilder_ == null) { + if (!(timestampBoundCase_ == 3)) { + timestampBound_ = com.google.protobuf.Duration.getDefaultInstance(); + } + maxStalenessBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + (com.google.protobuf.Duration) timestampBound_, + getParentForChildren(), + isClean()); + timestampBound_ = null; + } + timestampBoundCase_ = 3; + onChanged(); + return maxStalenessBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + readTimestampBuilder_; + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return Whether the readTimestamp field is set. + */ + @java.lang.Override + public boolean hasReadTimestamp() { + return timestampBoundCase_ == 4; + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + * + * @return The readTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getReadTimestamp() { + if (readTimestampBuilder_ == null) { + if (timestampBoundCase_ == 4) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (timestampBoundCase_ == 4) { + return readTimestampBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + public Builder setReadTimestamp(com.google.protobuf.Timestamp value) { + if (readTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampBound_ = value; + onChanged(); + } else { + readTimestampBuilder_.setMessage(value); + } + timestampBoundCase_ = 4; + return this; + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + public Builder setReadTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (readTimestampBuilder_ == null) { + timestampBound_ = builderForValue.build(); + onChanged(); + } else { + readTimestampBuilder_.setMessage(builderForValue.build()); + } + timestampBoundCase_ = 4; + return this; + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + public Builder mergeReadTimestamp(com.google.protobuf.Timestamp value) { + if (readTimestampBuilder_ == null) { + if (timestampBoundCase_ == 4 + && timestampBound_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + timestampBound_ = + com.google.protobuf.Timestamp.newBuilder( + (com.google.protobuf.Timestamp) timestampBound_) + .mergeFrom(value) + .buildPartial(); + } else { + timestampBound_ = value; + } + onChanged(); + } else { + if (timestampBoundCase_ == 4) { + readTimestampBuilder_.mergeFrom(value); + } else { + readTimestampBuilder_.setMessage(value); + } + } + timestampBoundCase_ = 4; + return this; + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + public Builder clearReadTimestamp() { + if (readTimestampBuilder_ == null) { + if (timestampBoundCase_ == 4) { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + } + } else { + if (timestampBoundCase_ == 4) { + timestampBoundCase_ = 0; + timestampBound_ = null; + } + readTimestampBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + public com.google.protobuf.Timestamp.Builder getReadTimestampBuilder() { + return internalGetReadTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { + if ((timestampBoundCase_ == 4) && (readTimestampBuilder_ != null)) { + return readTimestampBuilder_.getMessageOrBuilder(); + } else { + if (timestampBoundCase_ == 4) { + return (com.google.protobuf.Timestamp) timestampBound_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at the given timestamp. Unlike other modes,
    +       * reads at a specific timestamp are repeatable; the same read at
    +       * the same timestamp always returns the same data. If the
    +       * timestamp is in the future, the read is blocked until the
    +       * specified timestamp, modulo the read's deadline.
    +       *
    +       * Useful for large scale consistent reads such as mapreduces, or
    +       * for coordinating many reads against a consistent snapshot of the
    +       * data.
    +       *
    +       * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +       * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +       * 
    + * + * .google.protobuf.Timestamp read_timestamp = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + internalGetReadTimestampFieldBuilder() { + if (readTimestampBuilder_ == null) { + if (!(timestampBoundCase_ == 4)) { + timestampBound_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + readTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) timestampBound_, + getParentForChildren(), + isClean()); + timestampBound_ = null; + } + timestampBoundCase_ = 4; + onChanged(); + return readTimestampBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + exactStalenessBuilder_; + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return Whether the exactStaleness field is set. + */ + @java.lang.Override + public boolean hasExactStaleness() { + return timestampBoundCase_ == 5; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + * + * @return The exactStaleness. + */ + @java.lang.Override + public com.google.protobuf.Duration getExactStaleness() { + if (exactStalenessBuilder_ == null) { + if (timestampBoundCase_ == 5) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } else { + if (timestampBoundCase_ == 5) { + return exactStalenessBuilder_.getMessage(); + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + public Builder setExactStaleness(com.google.protobuf.Duration value) { + if (exactStalenessBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampBound_ = value; + onChanged(); + } else { + exactStalenessBuilder_.setMessage(value); + } + timestampBoundCase_ = 5; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + public Builder setExactStaleness(com.google.protobuf.Duration.Builder builderForValue) { + if (exactStalenessBuilder_ == null) { + timestampBound_ = builderForValue.build(); + onChanged(); + } else { + exactStalenessBuilder_.setMessage(builderForValue.build()); + } + timestampBoundCase_ = 5; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + public Builder mergeExactStaleness(com.google.protobuf.Duration value) { + if (exactStalenessBuilder_ == null) { + if (timestampBoundCase_ == 5 + && timestampBound_ != com.google.protobuf.Duration.getDefaultInstance()) { + timestampBound_ = + com.google.protobuf.Duration.newBuilder( + (com.google.protobuf.Duration) timestampBound_) + .mergeFrom(value) + .buildPartial(); + } else { + timestampBound_ = value; + } + onChanged(); + } else { + if (timestampBoundCase_ == 5) { + exactStalenessBuilder_.mergeFrom(value); + } else { + exactStalenessBuilder_.setMessage(value); + } + } + timestampBoundCase_ = 5; + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + public Builder clearExactStaleness() { + if (exactStalenessBuilder_ == null) { + if (timestampBoundCase_ == 5) { + timestampBoundCase_ = 0; + timestampBound_ = null; + onChanged(); + } + } else { + if (timestampBoundCase_ == 5) { + timestampBoundCase_ = 0; + timestampBound_ = null; + } + exactStalenessBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + public com.google.protobuf.Duration.Builder getExactStalenessBuilder() { + return internalGetExactStalenessFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getExactStalenessOrBuilder() { + if ((timestampBoundCase_ == 5) && (exactStalenessBuilder_ != null)) { + return exactStalenessBuilder_.getMessageOrBuilder(); + } else { + if (timestampBoundCase_ == 5) { + return (com.google.protobuf.Duration) timestampBound_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +       * Executes all reads at a timestamp that is `exact_staleness`
    +       * old. The timestamp is chosen soon after the read is started.
    +       *
    +       * Guarantees that all writes that have committed more than the
    +       * specified number of seconds ago are visible. Because Cloud Spanner
    +       * chooses the exact timestamp, this mode works even if the client's
    +       * local clock is substantially skewed from Cloud Spanner commit
    +       * timestamps.
    +       *
    +       * Useful for reading at nearby replicas without the distributed
    +       * timestamp negotiation overhead of `max_staleness`.
    +       * 
    + * + * .google.protobuf.Duration exact_staleness = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetExactStalenessFieldBuilder() { + if (exactStalenessBuilder_ == null) { + if (!(timestampBoundCase_ == 5)) { + timestampBound_ = com.google.protobuf.Duration.getDefaultInstance(); + } + exactStalenessBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + (com.google.protobuf.Duration) timestampBound_, + getParentForChildren(), + isClean()); + timestampBound_ = null; + } + timestampBoundCase_ = 5; + onChanged(); + return exactStalenessBuilder_; + } + + private boolean returnReadTimestamp_; + + /** + * + * + *
    +       * If true, the Cloud Spanner-selected read timestamp is included in
    +       * the [Transaction][google.spanner.v1.Transaction] message that describes
    +       * the transaction.
    +       * 
    + * + * bool return_read_timestamp = 6; + * + * @return The returnReadTimestamp. + */ + @java.lang.Override + public boolean getReturnReadTimestamp() { + return returnReadTimestamp_; + } + + /** + * + * + *
    +       * If true, the Cloud Spanner-selected read timestamp is included in
    +       * the [Transaction][google.spanner.v1.Transaction] message that describes
    +       * the transaction.
    +       * 
    + * + * bool return_read_timestamp = 6; + * + * @param value The returnReadTimestamp to set. + * @return This builder for chaining. + */ + public Builder setReturnReadTimestamp(boolean value) { + + returnReadTimestamp_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + + /** + * + * + *
    +       * If true, the Cloud Spanner-selected read timestamp is included in
    +       * the [Transaction][google.spanner.v1.Transaction] message that describes
    +       * the transaction.
    +       * 
    + * + * bool return_read_timestamp = 6; + * + * @return This builder for chaining. + */ + public Builder clearReturnReadTimestamp() { + bitField0_ = (bitField0_ & ~0x00000020); + returnReadTimestamp_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.TransactionOptions.ReadOnly) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadOnly) + private static final com.google.spanner.v1.TransactionOptions.ReadOnly DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.TransactionOptions.ReadOnly(); + } + + public static com.google.spanner.v1.TransactionOptions.ReadOnly getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadOnly parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int modeCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object mode_; + + public enum ModeCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + READ_WRITE(1), + PARTITIONED_DML(3), + READ_ONLY(2), + MODE_NOT_SET(0); + private final int value; + + private ModeCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ModeCase valueOf(int value) { + return forNumber(value); + } + + public static ModeCase forNumber(int value) { + switch (value) { + case 1: + return READ_WRITE; + case 3: + return PARTITIONED_DML; + case 2: + return READ_ONLY; + case 0: + return MODE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ModeCase getModeCase() { + return ModeCase.forNumber(modeCase_); + } + + public static final int READ_WRITE_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return Whether the readWrite field is set. + */ + @java.lang.Override + public boolean hasReadWrite() { + return modeCase_ == 1; + } + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return The readWrite. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite getReadWrite() { + if (modeCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder getReadWriteOrBuilder() { + if (modeCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + + public static final int PARTITIONED_DML_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return Whether the partitionedDml field is set. + */ + @java.lang.Override + public boolean hasPartitionedDml() { + return modeCase_ == 3; + } + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return The partitionedDml. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml getPartitionedDml() { + if (modeCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_; + } + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder + getPartitionedDmlOrBuilder() { + if (modeCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_; + } + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + + public static final int READ_ONLY_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return Whether the readOnly field is set. + */ + @java.lang.Override + public boolean hasReadOnly() { + return modeCase_ == 2; + } + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return The readOnly. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly getReadOnly() { + if (modeCase_ == 2) { + return (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrBuilder() { + if (modeCase_ == 2) { + return (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + + public static final int EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER = 5; + private boolean excludeTxnFromChangeStreams_ = false; + + /** + * + * + *
    +   * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
    +   * or write transactions from being tracked in change streams.
    +   *
    +   * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
    +   * updates
    +   * made within this transaction aren't recorded in the change stream.
    +   *
    +   * * If you don't set the DDL option `allow_txn_exclusion` or if it's
    +   * set to `false`, then the updates made within this transaction are
    +   * recorded in the change stream.
    +   *
    +   * When `exclude_txn_from_change_streams` is set to `false` or not set,
    +   * modifications from this transaction are recorded in all change streams
    +   * that are tracking columns modified by these transactions.
    +   *
    +   * The `exclude_txn_from_change_streams` option can only be specified
    +   * for read-write or partitioned DML transactions, otherwise the API returns
    +   * an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 5; + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + public static final int ISOLATION_LEVEL_FIELD_NUMBER = 6; + private int isolationLevel_ = 0; + + /** + * + * + *
    +   * Isolation level for the transaction.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The enum numeric value on the wire for isolationLevel. + */ + @java.lang.Override + public int getIsolationLevelValue() { + return isolationLevel_; + } + + /** + * + * + *
    +   * Isolation level for the transaction.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The isolationLevel. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.IsolationLevel getIsolationLevel() { + com.google.spanner.v1.TransactionOptions.IsolationLevel result = + com.google.spanner.v1.TransactionOptions.IsolationLevel.forNumber(isolationLevel_); + return result == null + ? com.google.spanner.v1.TransactionOptions.IsolationLevel.UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (modeCase_ == 1) { + output.writeMessage(1, (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_); + } + if (modeCase_ == 2) { + output.writeMessage(2, (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_); + } + if (modeCase_ == 3) { + output.writeMessage(3, (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_); + } + if (excludeTxnFromChangeStreams_ != false) { + output.writeBool(5, excludeTxnFromChangeStreams_); + } + if (isolationLevel_ + != com.google.spanner.v1.TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED + .getNumber()) { + output.writeEnum(6, isolationLevel_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (modeCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_); + } + if (modeCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_); + } + if (modeCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_); + } + if (excludeTxnFromChangeStreams_ != false) { + size += + com.google.protobuf.CodedOutputStream.computeBoolSize(5, excludeTxnFromChangeStreams_); + } + if (isolationLevel_ + != com.google.spanner.v1.TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(6, isolationLevel_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.TransactionOptions)) { + return super.equals(obj); + } + com.google.spanner.v1.TransactionOptions other = (com.google.spanner.v1.TransactionOptions) obj; + + if (getExcludeTxnFromChangeStreams() != other.getExcludeTxnFromChangeStreams()) return false; + if (isolationLevel_ != other.isolationLevel_) return false; + if (!getModeCase().equals(other.getModeCase())) return false; + switch (modeCase_) { + case 1: + if (!getReadWrite().equals(other.getReadWrite())) return false; + break; + case 3: + if (!getPartitionedDml().equals(other.getPartitionedDml())) return false; + break; + case 2: + if (!getReadOnly().equals(other.getReadOnly())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + EXCLUDE_TXN_FROM_CHANGE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getExcludeTxnFromChangeStreams()); + hash = (37 * hash) + ISOLATION_LEVEL_FIELD_NUMBER; + hash = (53 * hash) + isolationLevel_; + switch (modeCase_) { + case 1: + hash = (37 * hash) + READ_WRITE_FIELD_NUMBER; + hash = (53 * hash) + getReadWrite().hashCode(); + break; + case 3: + hash = (37 * hash) + PARTITIONED_DML_FIELD_NUMBER; + hash = (53 * hash) + getPartitionedDml().hashCode(); + break; + case 2: + hash = (37 * hash) + READ_ONLY_FIELD_NUMBER; + hash = (53 * hash) + getReadOnly().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.TransactionOptions parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.TransactionOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Options to use for transactions.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionOptions} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.TransactionOptions) + com.google.spanner.v1.TransactionOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionOptions.class, + com.google.spanner.v1.TransactionOptions.Builder.class); + } + + // Construct using com.google.spanner.v1.TransactionOptions.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (readWriteBuilder_ != null) { + readWriteBuilder_.clear(); + } + if (partitionedDmlBuilder_ != null) { + partitionedDmlBuilder_.clear(); + } + if (readOnlyBuilder_ != null) { + readOnlyBuilder_.clear(); + } + excludeTxnFromChangeStreams_ = false; + isolationLevel_ = 0; + modeCase_ = 0; + mode_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionOptions_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getDefaultInstanceForType() { + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions build() { + com.google.spanner.v1.TransactionOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions buildPartial() { + com.google.spanner.v1.TransactionOptions result = + new com.google.spanner.v1.TransactionOptions(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.TransactionOptions result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000008) != 0)) { + result.excludeTxnFromChangeStreams_ = excludeTxnFromChangeStreams_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.isolationLevel_ = isolationLevel_; + } + } + + private void buildPartialOneofs(com.google.spanner.v1.TransactionOptions result) { + result.modeCase_ = modeCase_; + result.mode_ = this.mode_; + if (modeCase_ == 1 && readWriteBuilder_ != null) { + result.mode_ = readWriteBuilder_.build(); + } + if (modeCase_ == 3 && partitionedDmlBuilder_ != null) { + result.mode_ = partitionedDmlBuilder_.build(); + } + if (modeCase_ == 2 && readOnlyBuilder_ != null) { + result.mode_ = readOnlyBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.TransactionOptions) { + return mergeFrom((com.google.spanner.v1.TransactionOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.TransactionOptions other) { + if (other == com.google.spanner.v1.TransactionOptions.getDefaultInstance()) return this; + if (other.getExcludeTxnFromChangeStreams() != false) { + setExcludeTxnFromChangeStreams(other.getExcludeTxnFromChangeStreams()); + } + if (other.isolationLevel_ != 0) { + setIsolationLevelValue(other.getIsolationLevelValue()); + } + switch (other.getModeCase()) { + case READ_WRITE: + { + mergeReadWrite(other.getReadWrite()); + break; + } + case PARTITIONED_DML: + { + mergePartitionedDml(other.getPartitionedDml()); + break; + } + case READ_ONLY: + { + mergeReadOnly(other.getReadOnly()); + break; + } + case MODE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetReadWriteFieldBuilder().getBuilder(), extensionRegistry); + modeCase_ = 1; + break; + } // case 10 + case 18: + { + input.readMessage( + internalGetReadOnlyFieldBuilder().getBuilder(), extensionRegistry); + modeCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetPartitionedDmlFieldBuilder().getBuilder(), extensionRegistry); + modeCase_ = 3; + break; + } // case 26 + case 40: + { + excludeTxnFromChangeStreams_ = input.readBool(); + bitField0_ |= 0x00000008; + break; + } // case 40 + case 48: + { + isolationLevel_ = input.readEnum(); + bitField0_ |= 0x00000010; + break; + } // case 48 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int modeCase_ = 0; + private java.lang.Object mode_; + + public ModeCase getModeCase() { + return ModeCase.forNumber(modeCase_); + } + + public Builder clearMode() { + modeCase_ = 0; + mode_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadWrite, + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder, + com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder> + readWriteBuilder_; + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return Whether the readWrite field is set. + */ + @java.lang.Override + public boolean hasReadWrite() { + return modeCase_ == 1; + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return The readWrite. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWrite getReadWrite() { + if (readWriteBuilder_ == null) { + if (modeCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } else { + if (modeCase_ == 1) { + return readWriteBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + public Builder setReadWrite(com.google.spanner.v1.TransactionOptions.ReadWrite value) { + if (readWriteBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + onChanged(); + } else { + readWriteBuilder_.setMessage(value); + } + modeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + public Builder setReadWrite( + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder builderForValue) { + if (readWriteBuilder_ == null) { + mode_ = builderForValue.build(); + onChanged(); + } else { + readWriteBuilder_.setMessage(builderForValue.build()); + } + modeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + public Builder mergeReadWrite(com.google.spanner.v1.TransactionOptions.ReadWrite value) { + if (readWriteBuilder_ == null) { + if (modeCase_ == 1 + && mode_ != com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance()) { + mode_ = + com.google.spanner.v1.TransactionOptions.ReadWrite.newBuilder( + (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_) + .mergeFrom(value) + .buildPartial(); + } else { + mode_ = value; + } + onChanged(); + } else { + if (modeCase_ == 1) { + readWriteBuilder_.mergeFrom(value); + } else { + readWriteBuilder_.setMessage(value); + } + } + modeCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + public Builder clearReadWrite() { + if (readWriteBuilder_ == null) { + if (modeCase_ == 1) { + modeCase_ = 0; + mode_ = null; + onChanged(); + } + } else { + if (modeCase_ == 1) { + modeCase_ = 0; + mode_ = null; + } + readWriteBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + public com.google.spanner.v1.TransactionOptions.ReadWrite.Builder getReadWriteBuilder() { + return internalGetReadWriteFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder getReadWriteOrBuilder() { + if ((modeCase_ == 1) && (readWriteBuilder_ != null)) { + return readWriteBuilder_.getMessageOrBuilder(); + } else { + if (modeCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Transaction may write.
    +     *
    +     * Authorization to begin a read-write transaction requires
    +     * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadWrite, + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder, + com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder> + internalGetReadWriteFieldBuilder() { + if (readWriteBuilder_ == null) { + if (!(modeCase_ == 1)) { + mode_ = com.google.spanner.v1.TransactionOptions.ReadWrite.getDefaultInstance(); + } + readWriteBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadWrite, + com.google.spanner.v1.TransactionOptions.ReadWrite.Builder, + com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder>( + (com.google.spanner.v1.TransactionOptions.ReadWrite) mode_, + getParentForChildren(), + isClean()); + mode_ = null; + } + modeCase_ = 1; + onChanged(); + return readWriteBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.PartitionedDml, + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder, + com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder> + partitionedDmlBuilder_; + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return Whether the partitionedDml field is set. + */ + @java.lang.Override + public boolean hasPartitionedDml() { + return modeCase_ == 3; + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return The partitionedDml. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDml getPartitionedDml() { + if (partitionedDmlBuilder_ == null) { + if (modeCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_; + } + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } else { + if (modeCase_ == 3) { + return partitionedDmlBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + public Builder setPartitionedDml( + com.google.spanner.v1.TransactionOptions.PartitionedDml value) { + if (partitionedDmlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + onChanged(); + } else { + partitionedDmlBuilder_.setMessage(value); + } + modeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + public Builder setPartitionedDml( + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder builderForValue) { + if (partitionedDmlBuilder_ == null) { + mode_ = builderForValue.build(); + onChanged(); + } else { + partitionedDmlBuilder_.setMessage(builderForValue.build()); + } + modeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + public Builder mergePartitionedDml( + com.google.spanner.v1.TransactionOptions.PartitionedDml value) { + if (partitionedDmlBuilder_ == null) { + if (modeCase_ == 3 + && mode_ + != com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance()) { + mode_ = + com.google.spanner.v1.TransactionOptions.PartitionedDml.newBuilder( + (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_) + .mergeFrom(value) + .buildPartial(); + } else { + mode_ = value; + } + onChanged(); + } else { + if (modeCase_ == 3) { + partitionedDmlBuilder_.mergeFrom(value); + } else { + partitionedDmlBuilder_.setMessage(value); + } + } + modeCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + public Builder clearPartitionedDml() { + if (partitionedDmlBuilder_ == null) { + if (modeCase_ == 3) { + modeCase_ = 0; + mode_ = null; + onChanged(); + } + } else { + if (modeCase_ == 3) { + modeCase_ = 0; + mode_ = null; + } + partitionedDmlBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + public com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder + getPartitionedDmlBuilder() { + return internalGetPartitionedDmlFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder + getPartitionedDmlOrBuilder() { + if ((modeCase_ == 3) && (partitionedDmlBuilder_ != null)) { + return partitionedDmlBuilder_.getMessageOrBuilder(); + } else { + if (modeCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_; + } + return com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Partitioned DML transaction.
    +     *
    +     * Authorization to begin a Partitioned DML transaction requires
    +     * `spanner.databases.beginPartitionedDmlTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.PartitionedDml, + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder, + com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder> + internalGetPartitionedDmlFieldBuilder() { + if (partitionedDmlBuilder_ == null) { + if (!(modeCase_ == 3)) { + mode_ = com.google.spanner.v1.TransactionOptions.PartitionedDml.getDefaultInstance(); + } + partitionedDmlBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.PartitionedDml, + com.google.spanner.v1.TransactionOptions.PartitionedDml.Builder, + com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder>( + (com.google.spanner.v1.TransactionOptions.PartitionedDml) mode_, + getParentForChildren(), + isClean()); + mode_ = null; + } + modeCase_ = 3; + onChanged(); + return partitionedDmlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadOnly, + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder, + com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder> + readOnlyBuilder_; + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return Whether the readOnly field is set. + */ + @java.lang.Override + public boolean hasReadOnly() { + return modeCase_ == 2; + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return The readOnly. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnly getReadOnly() { + if (readOnlyBuilder_ == null) { + if (modeCase_ == 2) { + return (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } else { + if (modeCase_ == 2) { + return readOnlyBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + public Builder setReadOnly(com.google.spanner.v1.TransactionOptions.ReadOnly value) { + if (readOnlyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + onChanged(); + } else { + readOnlyBuilder_.setMessage(value); + } + modeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + public Builder setReadOnly( + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder builderForValue) { + if (readOnlyBuilder_ == null) { + mode_ = builderForValue.build(); + onChanged(); + } else { + readOnlyBuilder_.setMessage(builderForValue.build()); + } + modeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + public Builder mergeReadOnly(com.google.spanner.v1.TransactionOptions.ReadOnly value) { + if (readOnlyBuilder_ == null) { + if (modeCase_ == 2 + && mode_ != com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance()) { + mode_ = + com.google.spanner.v1.TransactionOptions.ReadOnly.newBuilder( + (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_) + .mergeFrom(value) + .buildPartial(); + } else { + mode_ = value; + } + onChanged(); + } else { + if (modeCase_ == 2) { + readOnlyBuilder_.mergeFrom(value); + } else { + readOnlyBuilder_.setMessage(value); + } + } + modeCase_ = 2; + return this; + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + public Builder clearReadOnly() { + if (readOnlyBuilder_ == null) { + if (modeCase_ == 2) { + modeCase_ = 0; + mode_ = null; + onChanged(); + } + } else { + if (modeCase_ == 2) { + modeCase_ = 0; + mode_ = null; + } + readOnlyBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + public com.google.spanner.v1.TransactionOptions.ReadOnly.Builder getReadOnlyBuilder() { + return internalGetReadOnlyFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrBuilder() { + if ((modeCase_ == 2) && (readOnlyBuilder_ != null)) { + return readOnlyBuilder_.getMessageOrBuilder(); + } else { + if (modeCase_ == 2) { + return (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_; + } + return com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Transaction does not write.
    +     *
    +     * Authorization to begin a read-only transaction requires
    +     * `spanner.databases.beginReadOnlyTransaction` permission
    +     * on the `session` resource.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadOnly, + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder, + com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder> + internalGetReadOnlyFieldBuilder() { + if (readOnlyBuilder_ == null) { + if (!(modeCase_ == 2)) { + mode_ = com.google.spanner.v1.TransactionOptions.ReadOnly.getDefaultInstance(); + } + readOnlyBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions.ReadOnly, + com.google.spanner.v1.TransactionOptions.ReadOnly.Builder, + com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder>( + (com.google.spanner.v1.TransactionOptions.ReadOnly) mode_, + getParentForChildren(), + isClean()); + mode_ = null; + } + modeCase_ = 2; + onChanged(); + return readOnlyBuilder_; + } + + private boolean excludeTxnFromChangeStreams_; + + /** + * + * + *
    +     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
    +     * or write transactions from being tracked in change streams.
    +     *
    +     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
    +     * updates
    +     * made within this transaction aren't recorded in the change stream.
    +     *
    +     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
    +     * set to `false`, then the updates made within this transaction are
    +     * recorded in the change stream.
    +     *
    +     * When `exclude_txn_from_change_streams` is set to `false` or not set,
    +     * modifications from this transaction are recorded in all change streams
    +     * that are tracking columns modified by these transactions.
    +     *
    +     * The `exclude_txn_from_change_streams` option can only be specified
    +     * for read-write or partitioned DML transactions, otherwise the API returns
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5; + * + * @return The excludeTxnFromChangeStreams. + */ + @java.lang.Override + public boolean getExcludeTxnFromChangeStreams() { + return excludeTxnFromChangeStreams_; + } + + /** + * + * + *
    +     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
    +     * or write transactions from being tracked in change streams.
    +     *
    +     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
    +     * updates
    +     * made within this transaction aren't recorded in the change stream.
    +     *
    +     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
    +     * set to `false`, then the updates made within this transaction are
    +     * recorded in the change stream.
    +     *
    +     * When `exclude_txn_from_change_streams` is set to `false` or not set,
    +     * modifications from this transaction are recorded in all change streams
    +     * that are tracking columns modified by these transactions.
    +     *
    +     * The `exclude_txn_from_change_streams` option can only be specified
    +     * for read-write or partitioned DML transactions, otherwise the API returns
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5; + * + * @param value The excludeTxnFromChangeStreams to set. + * @return This builder for chaining. + */ + public Builder setExcludeTxnFromChangeStreams(boolean value) { + + excludeTxnFromChangeStreams_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
    +     * or write transactions from being tracked in change streams.
    +     *
    +     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
    +     * updates
    +     * made within this transaction aren't recorded in the change stream.
    +     *
    +     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
    +     * set to `false`, then the updates made within this transaction are
    +     * recorded in the change stream.
    +     *
    +     * When `exclude_txn_from_change_streams` is set to `false` or not set,
    +     * modifications from this transaction are recorded in all change streams
    +     * that are tracking columns modified by these transactions.
    +     *
    +     * The `exclude_txn_from_change_streams` option can only be specified
    +     * for read-write or partitioned DML transactions, otherwise the API returns
    +     * an `INVALID_ARGUMENT` error.
    +     * 
    + * + * bool exclude_txn_from_change_streams = 5; + * + * @return This builder for chaining. + */ + public Builder clearExcludeTxnFromChangeStreams() { + bitField0_ = (bitField0_ & ~0x00000008); + excludeTxnFromChangeStreams_ = false; + onChanged(); + return this; + } + + private int isolationLevel_ = 0; + + /** + * + * + *
    +     * Isolation level for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The enum numeric value on the wire for isolationLevel. + */ + @java.lang.Override + public int getIsolationLevelValue() { + return isolationLevel_; + } + + /** + * + * + *
    +     * Isolation level for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @param value The enum numeric value on the wire for isolationLevel to set. + * @return This builder for chaining. + */ + public Builder setIsolationLevelValue(int value) { + isolationLevel_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Isolation level for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The isolationLevel. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions.IsolationLevel getIsolationLevel() { + com.google.spanner.v1.TransactionOptions.IsolationLevel result = + com.google.spanner.v1.TransactionOptions.IsolationLevel.forNumber(isolationLevel_); + return result == null + ? com.google.spanner.v1.TransactionOptions.IsolationLevel.UNRECOGNIZED + : result; + } + + /** + * + * + *
    +     * Isolation level for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @param value The isolationLevel to set. + * @return This builder for chaining. + */ + public Builder setIsolationLevel( + com.google.spanner.v1.TransactionOptions.IsolationLevel value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + isolationLevel_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Isolation level for the transaction.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return This builder for chaining. + */ + public Builder clearIsolationLevel() { + bitField0_ = (bitField0_ & ~0x00000010); + isolationLevel_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.TransactionOptions) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions) + private static final com.google.spanner.v1.TransactionOptions DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.TransactionOptions(); + } + + public static com.google.spanner.v1.TransactionOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TransactionOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java new file mode 100644 index 000000000000..06dad5e90e47 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java @@ -0,0 +1,233 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface TransactionOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.TransactionOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return Whether the readWrite field is set. + */ + boolean hasReadWrite(); + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + * + * @return The readWrite. + */ + com.google.spanner.v1.TransactionOptions.ReadWrite getReadWrite(); + + /** + * + * + *
    +   * Transaction may write.
    +   *
    +   * Authorization to begin a read-write transaction requires
    +   * `spanner.databases.beginOrRollbackReadWriteTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadWrite read_write = 1; + */ + com.google.spanner.v1.TransactionOptions.ReadWriteOrBuilder getReadWriteOrBuilder(); + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return Whether the partitionedDml field is set. + */ + boolean hasPartitionedDml(); + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + * + * @return The partitionedDml. + */ + com.google.spanner.v1.TransactionOptions.PartitionedDml getPartitionedDml(); + + /** + * + * + *
    +   * Partitioned DML transaction.
    +   *
    +   * Authorization to begin a Partitioned DML transaction requires
    +   * `spanner.databases.beginPartitionedDmlTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.PartitionedDml partitioned_dml = 3; + */ + com.google.spanner.v1.TransactionOptions.PartitionedDmlOrBuilder getPartitionedDmlOrBuilder(); + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return Whether the readOnly field is set. + */ + boolean hasReadOnly(); + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + * + * @return The readOnly. + */ + com.google.spanner.v1.TransactionOptions.ReadOnly getReadOnly(); + + /** + * + * + *
    +   * Transaction does not write.
    +   *
    +   * Authorization to begin a read-only transaction requires
    +   * `spanner.databases.beginReadOnlyTransaction` permission
    +   * on the `session` resource.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.ReadOnly read_only = 2; + */ + com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrBuilder(); + + /** + * + * + *
    +   * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
    +   * or write transactions from being tracked in change streams.
    +   *
    +   * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
    +   * updates
    +   * made within this transaction aren't recorded in the change stream.
    +   *
    +   * * If you don't set the DDL option `allow_txn_exclusion` or if it's
    +   * set to `false`, then the updates made within this transaction are
    +   * recorded in the change stream.
    +   *
    +   * When `exclude_txn_from_change_streams` is set to `false` or not set,
    +   * modifications from this transaction are recorded in all change streams
    +   * that are tracking columns modified by these transactions.
    +   *
    +   * The `exclude_txn_from_change_streams` option can only be specified
    +   * for read-write or partitioned DML transactions, otherwise the API returns
    +   * an `INVALID_ARGUMENT` error.
    +   * 
    + * + * bool exclude_txn_from_change_streams = 5; + * + * @return The excludeTxnFromChangeStreams. + */ + boolean getExcludeTxnFromChangeStreams(); + + /** + * + * + *
    +   * Isolation level for the transaction.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The enum numeric value on the wire for isolationLevel. + */ + int getIsolationLevelValue(); + + /** + * + * + *
    +   * Isolation level for the transaction.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions.IsolationLevel isolation_level = 6; + * + * @return The isolationLevel. + */ + com.google.spanner.v1.TransactionOptions.IsolationLevel getIsolationLevel(); + + com.google.spanner.v1.TransactionOptions.ModeCase getModeCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java new file mode 100644 index 000000000000..53adafa6a3aa --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java @@ -0,0 +1,213 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface TransactionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Transaction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * `id` may be used to identify the transaction in subsequent
    +   * [Read][google.spanner.v1.Spanner.Read],
    +   * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
    +   * [Commit][google.spanner.v1.Spanner.Commit], or
    +   * [Rollback][google.spanner.v1.Spanner.Rollback] calls.
    +   *
    +   * Single-use read-only transactions do not have IDs, because
    +   * single-use transactions do not support multiple requests.
    +   * 
    + * + * bytes id = 1; + * + * @return The id. + */ + com.google.protobuf.ByteString getId(); + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return Whether the readTimestamp field is set. + */ + boolean hasReadTimestamp(); + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + * + * @return The readTimestamp. + */ + com.google.protobuf.Timestamp getReadTimestamp(); + + /** + * + * + *
    +   * For snapshot read-only transactions, the read timestamp chosen
    +   * for the transaction. Not returned by default: see
    +   * [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
    +   *
    +   * A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
    +   * Example: `"2014-10-02T15:01:23.045123456Z"`.
    +   * 
    + * + * .google.protobuf.Timestamp read_timestamp = 2; + */ + com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder(); + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + + /** + * + * + *
    +   * A precommit token is included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the cacheUpdate field is set. + */ + boolean hasCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The cacheUpdate. + */ + com.google.spanner.v1.CacheUpdate getCacheUpdate(); + + /** + * + * + *
    +   * Optional. A cache update expresses a set of changes the client should
    +   * incorporate into its location cache. The client should discard the changes
    +   * if they are older than the data it already has. This data can be obtained
    +   * in response to requests that included a `RoutingHint` field, but may also
    +   * be obtained by explicit location-fetching RPCs which may be added in the
    +   * future.
    +   * 
    + * + * + * .google.spanner.v1.CacheUpdate cache_update = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.CacheUpdateOrBuilder getCacheUpdateOrBuilder(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java new file mode 100644 index 000000000000..bbfedb2eb48b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java @@ -0,0 +1,214 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class TransactionProto extends com.google.protobuf.GeneratedFile { + private TransactionProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TransactionProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_TransactionOptions_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_TransactionOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_TransactionOptions_ReadWrite_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_TransactionOptions_ReadOnly_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Transaction_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Transaction_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_TransactionSelector_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_TransactionSelector_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n#google/spanner/v1/transaction.proto\022\021g" + + "oogle.spanner.v1\032\037google/api/field_behav" + + "ior.proto\032\036google/protobuf/duration.prot" + + "o\032\037google/protobuf/timestamp.proto\032 goog" + + "le/spanner/v1/location.proto\"\367\007\n\022Transac" + + "tionOptions\022E\n\nread_write\030\001 \001(\0132/.google" + + ".spanner.v1.TransactionOptions.ReadWrite" + + "H\000\022O\n\017partitioned_dml\030\003 \001(\01324.google.spa" + + "nner.v1.TransactionOptions.PartitionedDm" + + "lH\000\022C\n\tread_only\030\002 \001(\0132..google.spanner." + + "v1.TransactionOptions.ReadOnlyH\000\022\'\n\037excl" + + "ude_txn_from_change_streams\030\005 \001(\010\022M\n\017iso" + + "lation_level\030\006 \001(\01624.google.spanner.v1.T" + + "ransactionOptions.IsolationLevel\032\354\001\n\tRea" + + "dWrite\022T\n\016read_lock_mode\030\001 \001(\0162<.google." + + "spanner.v1.TransactionOptions.ReadWrite." + + "ReadLockMode\0228\n+multiplexed_session_prev" + + "ious_transaction_id\030\002 \001(\014B\003\340A\001\"O\n\014ReadLo" + + "ckMode\022\036\n\032READ_LOCK_MODE_UNSPECIFIED\020\000\022\017" + + "\n\013PESSIMISTIC\020\001\022\016\n\nOPTIMISTIC\020\002\032\020\n\016Parti" + + "tionedDml\032\250\002\n\010ReadOnly\022\020\n\006strong\030\001 \001(\010H\000" + + "\0228\n\022min_read_timestamp\030\002 \001(\0132\032.google.pr" + + "otobuf.TimestampH\000\0222\n\rmax_staleness\030\003 \001(" + + "\0132\031.google.protobuf.DurationH\000\0224\n\016read_t" + + "imestamp\030\004 \001(\0132\032.google.protobuf.Timesta" + + "mpH\000\0224\n\017exact_staleness\030\005 \001(\0132\031.google.p" + + "rotobuf.DurationH\000\022\035\n\025return_read_timest" + + "amp\030\006 \001(\010B\021\n\017timestamp_bound\"X\n\016Isolatio" + + "nLevel\022\037\n\033ISOLATION_LEVEL_UNSPECIFIED\020\000\022" + + "\020\n\014SERIALIZABLE\020\001\022\023\n\017REPEATABLE_READ\020\002B\006" + + "\n\004mode\"\326\001\n\013Transaction\022\n\n\002id\030\001 \001(\014\0222\n\016re" + + "ad_timestamp\030\002 \001(\0132\032.google.protobuf.Tim" + + "estamp\022L\n\017precommit_token\030\003 \001(\01323.google" + + ".spanner.v1.MultiplexedSessionPrecommitT" + + "oken\0229\n\014cache_update\030\005 \001(\0132\036.google.span" + + "ner.v1.CacheUpdateB\003\340A\001\"\244\001\n\023TransactionS" + + "elector\022;\n\nsingle_use\030\001 \001(\0132%.google.spa" + + "nner.v1.TransactionOptionsH\000\022\014\n\002id\030\002 \001(\014" + + "H\000\0226\n\005begin\030\003 \001(\0132%.google.spanner.v1.Tr" + + "ansactionOptionsH\000B\n\n\010selector\"L\n Multip" + + "lexedSessionPrecommitToken\022\027\n\017precommit_" + + "token\030\001 \001(\014\022\017\n\007seq_num\030\002 \001(\005B\263\001\n\025com.goo" + + "gle.spanner.v1B\020TransactionProtoP\001Z5clou" + + "d.google.com/go/spanner/apiv1/spannerpb;" + + "spannerpb\252\002\027Google.Cloud.Spanner.V1\312\002\027Go" + + "ogle\\Cloud\\Spanner\\V1\352\002\032Google::Cloud::S" + + "panner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.v1.LocationProto.getDescriptor(), + }); + internal_static_google_spanner_v1_TransactionOptions_descriptor = + getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_TransactionOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_TransactionOptions_descriptor, + new java.lang.String[] { + "ReadWrite", + "PartitionedDml", + "ReadOnly", + "ExcludeTxnFromChangeStreams", + "IsolationLevel", + "Mode", + }); + internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor = + internal_static_google_spanner_v1_TransactionOptions_descriptor.getNestedType(0); + internal_static_google_spanner_v1_TransactionOptions_ReadWrite_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor, + new java.lang.String[] { + "ReadLockMode", "MultiplexedSessionPreviousTransactionId", + }); + internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor = + internal_static_google_spanner_v1_TransactionOptions_descriptor.getNestedType(1); + internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor = + internal_static_google_spanner_v1_TransactionOptions_descriptor.getNestedType(2); + internal_static_google_spanner_v1_TransactionOptions_ReadOnly_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_TransactionOptions_ReadOnly_descriptor, + new java.lang.String[] { + "Strong", + "MinReadTimestamp", + "MaxStaleness", + "ReadTimestamp", + "ExactStaleness", + "ReturnReadTimestamp", + "TimestampBound", + }); + internal_static_google_spanner_v1_Transaction_descriptor = getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_Transaction_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Transaction_descriptor, + new java.lang.String[] { + "Id", "ReadTimestamp", "PrecommitToken", "CacheUpdate", + }); + internal_static_google_spanner_v1_TransactionSelector_descriptor = + getDescriptor().getMessageType(2); + internal_static_google_spanner_v1_TransactionSelector_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_TransactionSelector_descriptor, + new java.lang.String[] { + "SingleUse", "Id", "Begin", "Selector", + }); + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor = + getDescriptor().getMessageType(3); + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor, + new java.lang.String[] { + "PrecommitToken", "SeqNum", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.v1.LocationProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java new file mode 100644 index 000000000000..025aa4cb8e62 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java @@ -0,0 +1,1310 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * This message is used to select the transaction in which a
    + * [Read][google.spanner.v1.Spanner.Read] or
    + * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
    + *
    + * See [TransactionOptions][google.spanner.v1.TransactionOptions] for more
    + * information about transactions.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionSelector} + */ +@com.google.protobuf.Generated +public final class TransactionSelector extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.TransactionSelector) + TransactionSelectorOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TransactionSelector"); + } + + // Use TransactionSelector.newBuilder() to construct. + private TransactionSelector(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TransactionSelector() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionSelector_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionSelector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionSelector.class, + com.google.spanner.v1.TransactionSelector.Builder.class); + } + + private int selectorCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object selector_; + + public enum SelectorCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + SINGLE_USE(1), + ID(2), + BEGIN(3), + SELECTOR_NOT_SET(0); + private final int value; + + private SelectorCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SelectorCase valueOf(int value) { + return forNumber(value); + } + + public static SelectorCase forNumber(int value) { + switch (value) { + case 1: + return SINGLE_USE; + case 2: + return ID; + case 3: + return BEGIN; + case 0: + return SELECTOR_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SelectorCase getSelectorCase() { + return SelectorCase.forNumber(selectorCase_); + } + + public static final int SINGLE_USE_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return Whether the singleUse field is set. + */ + @java.lang.Override + public boolean hasSingleUse() { + return selectorCase_ == 1; + } + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return The singleUse. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getSingleUse() { + if (selectorCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseOrBuilder() { + if (selectorCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + public static final int ID_FIELD_NUMBER = 2; + + /** + * + * + *
    +   * Execute the read or SQL query in a previously-started transaction.
    +   * 
    + * + * bytes id = 2; + * + * @return Whether the id field is set. + */ + @java.lang.Override + public boolean hasId() { + return selectorCase_ == 2; + } + + /** + * + * + *
    +   * Execute the read or SQL query in a previously-started transaction.
    +   * 
    + * + * bytes id = 2; + * + * @return The id. + */ + @java.lang.Override + public com.google.protobuf.ByteString getId() { + if (selectorCase_ == 2) { + return (com.google.protobuf.ByteString) selector_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + public static final int BEGIN_FIELD_NUMBER = 3; + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return Whether the begin field is set. + */ + @java.lang.Override + public boolean hasBegin() { + return selectorCase_ == 3; + } + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return The begin. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getBegin() { + if (selectorCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getBeginOrBuilder() { + if (selectorCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (selectorCase_ == 1) { + output.writeMessage(1, (com.google.spanner.v1.TransactionOptions) selector_); + } + if (selectorCase_ == 2) { + output.writeBytes(2, (com.google.protobuf.ByteString) selector_); + } + if (selectorCase_ == 3) { + output.writeMessage(3, (com.google.spanner.v1.TransactionOptions) selector_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (selectorCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.v1.TransactionOptions) selector_); + } + if (selectorCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 2, (com.google.protobuf.ByteString) selector_); + } + if (selectorCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.spanner.v1.TransactionOptions) selector_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.TransactionSelector)) { + return super.equals(obj); + } + com.google.spanner.v1.TransactionSelector other = + (com.google.spanner.v1.TransactionSelector) obj; + + if (!getSelectorCase().equals(other.getSelectorCase())) return false; + switch (selectorCase_) { + case 1: + if (!getSingleUse().equals(other.getSingleUse())) return false; + break; + case 2: + if (!getId().equals(other.getId())) return false; + break; + case 3: + if (!getBegin().equals(other.getBegin())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (selectorCase_) { + case 1: + hash = (37 * hash) + SINGLE_USE_FIELD_NUMBER; + hash = (53 * hash) + getSingleUse().hashCode(); + break; + case 2: + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + break; + case 3: + hash = (37 * hash) + BEGIN_FIELD_NUMBER; + hash = (53 * hash) + getBegin().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.TransactionSelector parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionSelector parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionSelector parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.TransactionSelector parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.TransactionSelector prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * This message is used to select the transaction in which a
    +   * [Read][google.spanner.v1.Spanner.Read] or
    +   * [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
    +   *
    +   * See [TransactionOptions][google.spanner.v1.TransactionOptions] for more
    +   * information about transactions.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.TransactionSelector} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.TransactionSelector) + com.google.spanner.v1.TransactionSelectorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionSelector_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionSelector_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.TransactionSelector.class, + com.google.spanner.v1.TransactionSelector.Builder.class); + } + + // Construct using com.google.spanner.v1.TransactionSelector.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (singleUseBuilder_ != null) { + singleUseBuilder_.clear(); + } + if (beginBuilder_ != null) { + beginBuilder_.clear(); + } + selectorCase_ = 0; + selector_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_TransactionSelector_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getDefaultInstanceForType() { + return com.google.spanner.v1.TransactionSelector.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.TransactionSelector build() { + com.google.spanner.v1.TransactionSelector result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionSelector buildPartial() { + com.google.spanner.v1.TransactionSelector result = + new com.google.spanner.v1.TransactionSelector(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.TransactionSelector result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.v1.TransactionSelector result) { + result.selectorCase_ = selectorCase_; + result.selector_ = this.selector_; + if (selectorCase_ == 1 && singleUseBuilder_ != null) { + result.selector_ = singleUseBuilder_.build(); + } + if (selectorCase_ == 3 && beginBuilder_ != null) { + result.selector_ = beginBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.TransactionSelector) { + return mergeFrom((com.google.spanner.v1.TransactionSelector) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.TransactionSelector other) { + if (other == com.google.spanner.v1.TransactionSelector.getDefaultInstance()) return this; + switch (other.getSelectorCase()) { + case SINGLE_USE: + { + mergeSingleUse(other.getSingleUse()); + break; + } + case ID: + { + setId(other.getId()); + break; + } + case BEGIN: + { + mergeBegin(other.getBegin()); + break; + } + case SELECTOR_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetSingleUseFieldBuilder().getBuilder(), extensionRegistry); + selectorCase_ = 1; + break; + } // case 10 + case 18: + { + selector_ = input.readBytes(); + selectorCase_ = 2; + break; + } // case 18 + case 26: + { + input.readMessage(internalGetBeginFieldBuilder().getBuilder(), extensionRegistry); + selectorCase_ = 3; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int selectorCase_ = 0; + private java.lang.Object selector_; + + public SelectorCase getSelectorCase() { + return SelectorCase.forNumber(selectorCase_); + } + + public Builder clearSelector() { + selectorCase_ = 0; + selector_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + singleUseBuilder_; + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return Whether the singleUse field is set. + */ + @java.lang.Override + public boolean hasSingleUse() { + return selectorCase_ == 1; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return The singleUse. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getSingleUse() { + if (singleUseBuilder_ == null) { + if (selectorCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } else { + if (selectorCase_ == 1) { + return singleUseBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + public Builder setSingleUse(com.google.spanner.v1.TransactionOptions value) { + if (singleUseBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + selector_ = value; + onChanged(); + } else { + singleUseBuilder_.setMessage(value); + } + selectorCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + public Builder setSingleUse(com.google.spanner.v1.TransactionOptions.Builder builderForValue) { + if (singleUseBuilder_ == null) { + selector_ = builderForValue.build(); + onChanged(); + } else { + singleUseBuilder_.setMessage(builderForValue.build()); + } + selectorCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + public Builder mergeSingleUse(com.google.spanner.v1.TransactionOptions value) { + if (singleUseBuilder_ == null) { + if (selectorCase_ == 1 + && selector_ != com.google.spanner.v1.TransactionOptions.getDefaultInstance()) { + selector_ = + com.google.spanner.v1.TransactionOptions.newBuilder( + (com.google.spanner.v1.TransactionOptions) selector_) + .mergeFrom(value) + .buildPartial(); + } else { + selector_ = value; + } + onChanged(); + } else { + if (selectorCase_ == 1) { + singleUseBuilder_.mergeFrom(value); + } else { + singleUseBuilder_.setMessage(value); + } + } + selectorCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + public Builder clearSingleUse() { + if (singleUseBuilder_ == null) { + if (selectorCase_ == 1) { + selectorCase_ = 0; + selector_ = null; + onChanged(); + } + } else { + if (selectorCase_ == 1) { + selectorCase_ = 0; + selector_ = null; + } + singleUseBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + public com.google.spanner.v1.TransactionOptions.Builder getSingleUseBuilder() { + return internalGetSingleUseFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseOrBuilder() { + if ((selectorCase_ == 1) && (singleUseBuilder_ != null)) { + return singleUseBuilder_.getMessageOrBuilder(); + } else { + if (selectorCase_ == 1) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Execute the read or SQL query in a temporary transaction.
    +     * This is the most efficient way to execute a transaction that
    +     * consists of a single SQL query.
    +     * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + internalGetSingleUseFieldBuilder() { + if (singleUseBuilder_ == null) { + if (!(selectorCase_ == 1)) { + selector_ = com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + singleUseBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder>( + (com.google.spanner.v1.TransactionOptions) selector_, + getParentForChildren(), + isClean()); + selector_ = null; + } + selectorCase_ = 1; + onChanged(); + return singleUseBuilder_; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a previously-started transaction.
    +     * 
    + * + * bytes id = 2; + * + * @return Whether the id field is set. + */ + public boolean hasId() { + return selectorCase_ == 2; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a previously-started transaction.
    +     * 
    + * + * bytes id = 2; + * + * @return The id. + */ + public com.google.protobuf.ByteString getId() { + if (selectorCase_ == 2) { + return (com.google.protobuf.ByteString) selector_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a previously-started transaction.
    +     * 
    + * + * bytes id = 2; + * + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + selectorCase_ = 2; + selector_ = value; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Execute the read or SQL query in a previously-started transaction.
    +     * 
    + * + * bytes id = 2; + * + * @return This builder for chaining. + */ + public Builder clearId() { + if (selectorCase_ == 2) { + selectorCase_ = 0; + selector_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + beginBuilder_; + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return Whether the begin field is set. + */ + @java.lang.Override + public boolean hasBegin() { + return selectorCase_ == 3; + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return The begin. + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptions getBegin() { + if (beginBuilder_ == null) { + if (selectorCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } else { + if (selectorCase_ == 3) { + return beginBuilder_.getMessage(); + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + public Builder setBegin(com.google.spanner.v1.TransactionOptions value) { + if (beginBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + selector_ = value; + onChanged(); + } else { + beginBuilder_.setMessage(value); + } + selectorCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + public Builder setBegin(com.google.spanner.v1.TransactionOptions.Builder builderForValue) { + if (beginBuilder_ == null) { + selector_ = builderForValue.build(); + onChanged(); + } else { + beginBuilder_.setMessage(builderForValue.build()); + } + selectorCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + public Builder mergeBegin(com.google.spanner.v1.TransactionOptions value) { + if (beginBuilder_ == null) { + if (selectorCase_ == 3 + && selector_ != com.google.spanner.v1.TransactionOptions.getDefaultInstance()) { + selector_ = + com.google.spanner.v1.TransactionOptions.newBuilder( + (com.google.spanner.v1.TransactionOptions) selector_) + .mergeFrom(value) + .buildPartial(); + } else { + selector_ = value; + } + onChanged(); + } else { + if (selectorCase_ == 3) { + beginBuilder_.mergeFrom(value); + } else { + beginBuilder_.setMessage(value); + } + } + selectorCase_ = 3; + return this; + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + public Builder clearBegin() { + if (beginBuilder_ == null) { + if (selectorCase_ == 3) { + selectorCase_ = 0; + selector_ = null; + onChanged(); + } + } else { + if (selectorCase_ == 3) { + selectorCase_ = 0; + selector_ = null; + } + beginBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + public com.google.spanner.v1.TransactionOptions.Builder getBeginBuilder() { + return internalGetBeginFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + @java.lang.Override + public com.google.spanner.v1.TransactionOptionsOrBuilder getBeginOrBuilder() { + if ((selectorCase_ == 3) && (beginBuilder_ != null)) { + return beginBuilder_.getMessageOrBuilder(); + } else { + if (selectorCase_ == 3) { + return (com.google.spanner.v1.TransactionOptions) selector_; + } + return com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Begin a new transaction and execute this read or SQL query in
    +     * it. The transaction ID of the new transaction is returned in
    +     * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +     * which is a [Transaction][google.spanner.v1.Transaction].
    +     * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder> + internalGetBeginFieldBuilder() { + if (beginBuilder_ == null) { + if (!(selectorCase_ == 3)) { + selector_ = com.google.spanner.v1.TransactionOptions.getDefaultInstance(); + } + beginBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.TransactionOptions, + com.google.spanner.v1.TransactionOptions.Builder, + com.google.spanner.v1.TransactionOptionsOrBuilder>( + (com.google.spanner.v1.TransactionOptions) selector_, + getParentForChildren(), + isClean()); + selector_ = null; + } + selectorCase_ = 3; + onChanged(); + return beginBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.TransactionSelector) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionSelector) + private static final com.google.spanner.v1.TransactionSelector DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.TransactionSelector(); + } + + public static com.google.spanner.v1.TransactionSelector getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TransactionSelector parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.TransactionSelector getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java new file mode 100644 index 000000000000..cad31c1c6539 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java @@ -0,0 +1,145 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/transaction.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface TransactionSelectorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.TransactionSelector) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return Whether the singleUse field is set. + */ + boolean hasSingleUse(); + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + * + * @return The singleUse. + */ + com.google.spanner.v1.TransactionOptions getSingleUse(); + + /** + * + * + *
    +   * Execute the read or SQL query in a temporary transaction.
    +   * This is the most efficient way to execute a transaction that
    +   * consists of a single SQL query.
    +   * 
    + * + * .google.spanner.v1.TransactionOptions single_use = 1; + */ + com.google.spanner.v1.TransactionOptionsOrBuilder getSingleUseOrBuilder(); + + /** + * + * + *
    +   * Execute the read or SQL query in a previously-started transaction.
    +   * 
    + * + * bytes id = 2; + * + * @return Whether the id field is set. + */ + boolean hasId(); + + /** + * + * + *
    +   * Execute the read or SQL query in a previously-started transaction.
    +   * 
    + * + * bytes id = 2; + * + * @return The id. + */ + com.google.protobuf.ByteString getId(); + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return Whether the begin field is set. + */ + boolean hasBegin(); + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + * + * @return The begin. + */ + com.google.spanner.v1.TransactionOptions getBegin(); + + /** + * + * + *
    +   * Begin a new transaction and execute this read or SQL query in
    +   * it. The transaction ID of the new transaction is returned in
    +   * [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
    +   * which is a [Transaction][google.spanner.v1.Transaction].
    +   * 
    + * + * .google.spanner.v1.TransactionOptions begin = 3; + */ + com.google.spanner.v1.TransactionOptionsOrBuilder getBeginOrBuilder(); + + com.google.spanner.v1.TransactionSelector.SelectorCase getSelectorCase(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java new file mode 100644 index 000000000000..d5eec528e773 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java @@ -0,0 +1,1597 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * `Type` indicates the type of a Cloud Spanner value, as might be stored in a
    + * table cell or returned from an SQL query.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.Type} + */ +@com.google.protobuf.Generated +public final class Type extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.Type) + TypeOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "Type"); + } + + // Use Type.newBuilder() to construct. + private Type(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private Type() { + code_ = 0; + typeAnnotation_ = 0; + protoTypeFqn_ = ""; + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto.internal_static_google_spanner_v1_Type_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto.internal_static_google_spanner_v1_Type_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Type.class, com.google.spanner.v1.Type.Builder.class); + } + + private int bitField0_; + public static final int CODE_FIELD_NUMBER = 1; + private int code_ = 0; + + /** + * + * + *
    +   * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +   * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
    +   * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +   * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The code. + */ + @java.lang.Override + public com.google.spanner.v1.TypeCode getCode() { + com.google.spanner.v1.TypeCode result = com.google.spanner.v1.TypeCode.forNumber(code_); + return result == null ? com.google.spanner.v1.TypeCode.UNRECOGNIZED : result; + } + + public static final int ARRAY_ELEMENT_TYPE_FIELD_NUMBER = 2; + private com.google.spanner.v1.Type arrayElementType_; + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return Whether the arrayElementType field is set. + */ + @java.lang.Override + public boolean hasArrayElementType() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return The arrayElementType. + */ + @java.lang.Override + public com.google.spanner.v1.Type getArrayElementType() { + return arrayElementType_ == null + ? com.google.spanner.v1.Type.getDefaultInstance() + : arrayElementType_; + } + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + @java.lang.Override + public com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder() { + return arrayElementType_ == null + ? com.google.spanner.v1.Type.getDefaultInstance() + : arrayElementType_; + } + + public static final int STRUCT_TYPE_FIELD_NUMBER = 3; + private com.google.spanner.v1.StructType structType_; + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return Whether the structType field is set. + */ + @java.lang.Override + public boolean hasStructType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return The structType. + */ + @java.lang.Override + public com.google.spanner.v1.StructType getStructType() { + return structType_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : structType_; + } + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + @java.lang.Override + public com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder() { + return structType_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : structType_; + } + + public static final int TYPE_ANNOTATION_FIELD_NUMBER = 4; + private int typeAnnotation_ = 0; + + /** + * + * + *
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
    +   * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The enum numeric value on the wire for typeAnnotation. + */ + @java.lang.Override + public int getTypeAnnotationValue() { + return typeAnnotation_; + } + + /** + * + * + *
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
    +   * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The typeAnnotation. + */ + @java.lang.Override + public com.google.spanner.v1.TypeAnnotationCode getTypeAnnotation() { + com.google.spanner.v1.TypeAnnotationCode result = + com.google.spanner.v1.TypeAnnotationCode.forNumber(typeAnnotation_); + return result == null ? com.google.spanner.v1.TypeAnnotationCode.UNRECOGNIZED : result; + } + + public static final int PROTO_TYPE_FQN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object protoTypeFqn_ = ""; + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +   * [code][google.spanner.v1.Type.code] ==
    +   * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +   * qualified name of the proto type representing the proto/enum definition.
    +   * 
    + * + * string proto_type_fqn = 5; + * + * @return The protoTypeFqn. + */ + @java.lang.Override + public java.lang.String getProtoTypeFqn() { + java.lang.Object ref = protoTypeFqn_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + protoTypeFqn_ = s; + return s; + } + } + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +   * [code][google.spanner.v1.Type.code] ==
    +   * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +   * qualified name of the proto type representing the proto/enum definition.
    +   * 
    + * + * string proto_type_fqn = 5; + * + * @return The bytes for protoTypeFqn. + */ + @java.lang.Override + public com.google.protobuf.ByteString getProtoTypeFqnBytes() { + java.lang.Object ref = protoTypeFqn_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + protoTypeFqn_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (code_ != com.google.spanner.v1.TypeCode.TYPE_CODE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, code_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getArrayElementType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getStructType()); + } + if (typeAnnotation_ + != com.google.spanner.v1.TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED.getNumber()) { + output.writeEnum(4, typeAnnotation_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(protoTypeFqn_)) { + com.google.protobuf.GeneratedMessage.writeString(output, 5, protoTypeFqn_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (code_ != com.google.spanner.v1.TypeCode.TYPE_CODE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, code_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getArrayElementType()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStructType()); + } + if (typeAnnotation_ + != com.google.spanner.v1.TypeAnnotationCode.TYPE_ANNOTATION_CODE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, typeAnnotation_); + } + if (!com.google.protobuf.GeneratedMessage.isStringEmpty(protoTypeFqn_)) { + size += com.google.protobuf.GeneratedMessage.computeStringSize(5, protoTypeFqn_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.Type)) { + return super.equals(obj); + } + com.google.spanner.v1.Type other = (com.google.spanner.v1.Type) obj; + + if (code_ != other.code_) return false; + if (hasArrayElementType() != other.hasArrayElementType()) return false; + if (hasArrayElementType()) { + if (!getArrayElementType().equals(other.getArrayElementType())) return false; + } + if (hasStructType() != other.hasStructType()) return false; + if (hasStructType()) { + if (!getStructType().equals(other.getStructType())) return false; + } + if (typeAnnotation_ != other.typeAnnotation_) return false; + if (!getProtoTypeFqn().equals(other.getProtoTypeFqn())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + if (hasArrayElementType()) { + hash = (37 * hash) + ARRAY_ELEMENT_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getArrayElementType().hashCode(); + } + if (hasStructType()) { + hash = (37 * hash) + STRUCT_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getStructType().hashCode(); + } + hash = (37 * hash) + TYPE_ANNOTATION_FIELD_NUMBER; + hash = (53 * hash) + typeAnnotation_; + hash = (37 * hash) + PROTO_TYPE_FQN_FIELD_NUMBER; + hash = (53 * hash) + getProtoTypeFqn().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.Type parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Type parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Type parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Type parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Type parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.Type parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.Type parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Type parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Type parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Type parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.Type parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.Type parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.v1.Type prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * `Type` indicates the type of a Cloud Spanner value, as might be stored in a
    +   * table cell or returned from an SQL query.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.Type} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.Type) + com.google.spanner.v1.TypeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TypeProto.internal_static_google_spanner_v1_Type_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TypeProto + .internal_static_google_spanner_v1_Type_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.Type.class, com.google.spanner.v1.Type.Builder.class); + } + + // Construct using com.google.spanner.v1.Type.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetArrayElementTypeFieldBuilder(); + internalGetStructTypeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + code_ = 0; + arrayElementType_ = null; + if (arrayElementTypeBuilder_ != null) { + arrayElementTypeBuilder_.dispose(); + arrayElementTypeBuilder_ = null; + } + structType_ = null; + if (structTypeBuilder_ != null) { + structTypeBuilder_.dispose(); + structTypeBuilder_ = null; + } + typeAnnotation_ = 0; + protoTypeFqn_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TypeProto.internal_static_google_spanner_v1_Type_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.Type getDefaultInstanceForType() { + return com.google.spanner.v1.Type.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.Type build() { + com.google.spanner.v1.Type result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.Type buildPartial() { + com.google.spanner.v1.Type result = new com.google.spanner.v1.Type(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.Type result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.code_ = code_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.arrayElementType_ = + arrayElementTypeBuilder_ == null ? arrayElementType_ : arrayElementTypeBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.structType_ = structTypeBuilder_ == null ? structType_ : structTypeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.typeAnnotation_ = typeAnnotation_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.protoTypeFqn_ = protoTypeFqn_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.Type) { + return mergeFrom((com.google.spanner.v1.Type) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.Type other) { + if (other == com.google.spanner.v1.Type.getDefaultInstance()) return this; + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (other.hasArrayElementType()) { + mergeArrayElementType(other.getArrayElementType()); + } + if (other.hasStructType()) { + mergeStructType(other.getStructType()); + } + if (other.typeAnnotation_ != 0) { + setTypeAnnotationValue(other.getTypeAnnotationValue()); + } + if (!other.getProtoTypeFqn().isEmpty()) { + protoTypeFqn_ = other.protoTypeFqn_; + bitField0_ |= 0x00000010; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + code_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + input.readMessage( + internalGetArrayElementTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage( + internalGetStructTypeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + typeAnnotation_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 42: + { + protoTypeFqn_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000010; + break; + } // case 42 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int code_ = 0; + + /** + * + * + *
    +     * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +     * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + + /** + * + * + *
    +     * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +     * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + code_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +     * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The code. + */ + @java.lang.Override + public com.google.spanner.v1.TypeCode getCode() { + com.google.spanner.v1.TypeCode result = com.google.spanner.v1.TypeCode.forNumber(code_); + return result == null ? com.google.spanner.v1.TypeCode.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +     * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode(com.google.spanner.v1.TypeCode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + code_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +     * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + bitField0_ = (bitField0_ & ~0x00000001); + code_ = 0; + onChanged(); + return this; + } + + private com.google.spanner.v1.Type arrayElementType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + arrayElementTypeBuilder_; + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return Whether the arrayElementType field is set. + */ + public boolean hasArrayElementType() { + return ((bitField0_ & 0x00000002) != 0); + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return The arrayElementType. + */ + public com.google.spanner.v1.Type getArrayElementType() { + if (arrayElementTypeBuilder_ == null) { + return arrayElementType_ == null + ? com.google.spanner.v1.Type.getDefaultInstance() + : arrayElementType_; + } else { + return arrayElementTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public Builder setArrayElementType(com.google.spanner.v1.Type value) { + if (arrayElementTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + arrayElementType_ = value; + } else { + arrayElementTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public Builder setArrayElementType(com.google.spanner.v1.Type.Builder builderForValue) { + if (arrayElementTypeBuilder_ == null) { + arrayElementType_ = builderForValue.build(); + } else { + arrayElementTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public Builder mergeArrayElementType(com.google.spanner.v1.Type value) { + if (arrayElementTypeBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && arrayElementType_ != null + && arrayElementType_ != com.google.spanner.v1.Type.getDefaultInstance()) { + getArrayElementTypeBuilder().mergeFrom(value); + } else { + arrayElementType_ = value; + } + } else { + arrayElementTypeBuilder_.mergeFrom(value); + } + if (arrayElementType_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public Builder clearArrayElementType() { + bitField0_ = (bitField0_ & ~0x00000002); + arrayElementType_ = null; + if (arrayElementTypeBuilder_ != null) { + arrayElementTypeBuilder_.dispose(); + arrayElementTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public com.google.spanner.v1.Type.Builder getArrayElementTypeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return internalGetArrayElementTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + public com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder() { + if (arrayElementTypeBuilder_ != null) { + return arrayElementTypeBuilder_.getMessageOrBuilder(); + } else { + return arrayElementType_ == null + ? com.google.spanner.v1.Type.getDefaultInstance() + : arrayElementType_; + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
    +     * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder> + internalGetArrayElementTypeFieldBuilder() { + if (arrayElementTypeBuilder_ == null) { + arrayElementTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.Type, + com.google.spanner.v1.Type.Builder, + com.google.spanner.v1.TypeOrBuilder>( + getArrayElementType(), getParentForChildren(), isClean()); + arrayElementType_ = null; + } + return arrayElementTypeBuilder_; + } + + private com.google.spanner.v1.StructType structType_; + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + structTypeBuilder_; + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return Whether the structType field is set. + */ + public boolean hasStructType() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return The structType. + */ + public com.google.spanner.v1.StructType getStructType() { + if (structTypeBuilder_ == null) { + return structType_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : structType_; + } else { + return structTypeBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public Builder setStructType(com.google.spanner.v1.StructType value) { + if (structTypeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + structType_ = value; + } else { + structTypeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public Builder setStructType(com.google.spanner.v1.StructType.Builder builderForValue) { + if (structTypeBuilder_ == null) { + structType_ = builderForValue.build(); + } else { + structTypeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public Builder mergeStructType(com.google.spanner.v1.StructType value) { + if (structTypeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && structType_ != null + && structType_ != com.google.spanner.v1.StructType.getDefaultInstance()) { + getStructTypeBuilder().mergeFrom(value); + } else { + structType_ = value; + } + } else { + structTypeBuilder_.mergeFrom(value); + } + if (structType_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public Builder clearStructType() { + bitField0_ = (bitField0_ & ~0x00000004); + structType_ = null; + if (structTypeBuilder_ != null) { + structTypeBuilder_.dispose(); + structTypeBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public com.google.spanner.v1.StructType.Builder getStructTypeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return internalGetStructTypeFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + public com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder() { + if (structTypeBuilder_ != null) { + return structTypeBuilder_.getMessageOrBuilder(); + } else { + return structType_ == null + ? com.google.spanner.v1.StructType.getDefaultInstance() + : structType_; + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
    +     * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder> + internalGetStructTypeFieldBuilder() { + if (structTypeBuilder_ == null) { + structTypeBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.spanner.v1.StructType, + com.google.spanner.v1.StructType.Builder, + com.google.spanner.v1.StructTypeOrBuilder>( + getStructType(), getParentForChildren(), isClean()); + structType_ = null; + } + return structTypeBuilder_; + } + + private int typeAnnotation_ = 0; + + /** + * + * + *
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
    +     * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The enum numeric value on the wire for typeAnnotation. + */ + @java.lang.Override + public int getTypeAnnotationValue() { + return typeAnnotation_; + } + + /** + * + * + *
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
    +     * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @param value The enum numeric value on the wire for typeAnnotation to set. + * @return This builder for chaining. + */ + public Builder setTypeAnnotationValue(int value) { + typeAnnotation_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
    +     * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The typeAnnotation. + */ + @java.lang.Override + public com.google.spanner.v1.TypeAnnotationCode getTypeAnnotation() { + com.google.spanner.v1.TypeAnnotationCode result = + com.google.spanner.v1.TypeAnnotationCode.forNumber(typeAnnotation_); + return result == null ? com.google.spanner.v1.TypeAnnotationCode.UNRECOGNIZED : result; + } + + /** + * + * + *
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
    +     * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @param value The typeAnnotation to set. + * @return This builder for chaining. + */ + public Builder setTypeAnnotation(com.google.spanner.v1.TypeAnnotationCode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + typeAnnotation_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * + * + *
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
    +     * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return This builder for chaining. + */ + public Builder clearTypeAnnotation() { + bitField0_ = (bitField0_ & ~0x00000008); + typeAnnotation_ = 0; + onChanged(); + return this; + } + + private java.lang.Object protoTypeFqn_ = ""; + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +     * [code][google.spanner.v1.Type.code] ==
    +     * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +     * qualified name of the proto type representing the proto/enum definition.
    +     * 
    + * + * string proto_type_fqn = 5; + * + * @return The protoTypeFqn. + */ + public java.lang.String getProtoTypeFqn() { + java.lang.Object ref = protoTypeFqn_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + protoTypeFqn_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +     * [code][google.spanner.v1.Type.code] ==
    +     * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +     * qualified name of the proto type representing the proto/enum definition.
    +     * 
    + * + * string proto_type_fqn = 5; + * + * @return The bytes for protoTypeFqn. + */ + public com.google.protobuf.ByteString getProtoTypeFqnBytes() { + java.lang.Object ref = protoTypeFqn_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + protoTypeFqn_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +     * [code][google.spanner.v1.Type.code] ==
    +     * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +     * qualified name of the proto type representing the proto/enum definition.
    +     * 
    + * + * string proto_type_fqn = 5; + * + * @param value The protoTypeFqn to set. + * @return This builder for chaining. + */ + public Builder setProtoTypeFqn(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + protoTypeFqn_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +     * [code][google.spanner.v1.Type.code] ==
    +     * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +     * qualified name of the proto type representing the proto/enum definition.
    +     * 
    + * + * string proto_type_fqn = 5; + * + * @return This builder for chaining. + */ + public Builder clearProtoTypeFqn() { + protoTypeFqn_ = getDefaultInstance().getProtoTypeFqn(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + return this; + } + + /** + * + * + *
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +     * [code][google.spanner.v1.Type.code] ==
    +     * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +     * qualified name of the proto type representing the proto/enum definition.
    +     * 
    + * + * string proto_type_fqn = 5; + * + * @param value The bytes for protoTypeFqn to set. + * @return This builder for chaining. + */ + public Builder setProtoTypeFqnBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + protoTypeFqn_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.Type) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.Type) + private static final com.google.spanner.v1.Type DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.Type(); + } + + public static com.google.spanner.v1.Type getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Type parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.Type getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java new file mode 100644 index 000000000000..4d60e66a4d3b --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java @@ -0,0 +1,246 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * `TypeAnnotationCode` is used as a part of [Type][google.spanner.v1.Type] to
    + * disambiguate SQL types that should be used for a given Cloud Spanner value.
    + * Disambiguation is needed because the same Cloud Spanner type can be mapped to
    + * different SQL types depending on SQL dialect. TypeAnnotationCode doesn't
    + * affect the way value is serialized.
    + * 
    + * + * Protobuf enum {@code google.spanner.v1.TypeAnnotationCode} + */ +@com.google.protobuf.Generated +public enum TypeAnnotationCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * TYPE_ANNOTATION_CODE_UNSPECIFIED = 0; + */ + TYPE_ANNOTATION_CODE_UNSPECIFIED(0), + /** + * + * + *
    +   * PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that
    +   * values of this type should be treated as PostgreSQL NUMERIC values.
    +   * Currently this annotation is always needed for
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with
    +   * PostgreSQL-enabled Spanner databases.
    +   * 
    + * + * PG_NUMERIC = 2; + */ + PG_NUMERIC(2), + /** + * + * + *
    +   * PostgreSQL compatible JSONB type. This annotation needs to be applied to
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of
    +   * this type should be treated as PostgreSQL JSONB values. Currently this
    +   * annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON]
    +   * when a client interacts with PostgreSQL-enabled Spanner databases.
    +   * 
    + * + * PG_JSONB = 3; + */ + PG_JSONB(3), + /** + * + * + *
    +   * PostgreSQL compatible OID type. This annotation can be used by a client
    +   * interacting with PostgreSQL-enabled Spanner database to specify that a
    +   * value should be treated using the semantics of the OID type.
    +   * 
    + * + * PG_OID = 4; + */ + PG_OID(4), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TypeAnnotationCode"); + } + + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * TYPE_ANNOTATION_CODE_UNSPECIFIED = 0; + */ + public static final int TYPE_ANNOTATION_CODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +   * PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that
    +   * values of this type should be treated as PostgreSQL NUMERIC values.
    +   * Currently this annotation is always needed for
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with
    +   * PostgreSQL-enabled Spanner databases.
    +   * 
    + * + * PG_NUMERIC = 2; + */ + public static final int PG_NUMERIC_VALUE = 2; + + /** + * + * + *
    +   * PostgreSQL compatible JSONB type. This annotation needs to be applied to
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of
    +   * this type should be treated as PostgreSQL JSONB values. Currently this
    +   * annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON]
    +   * when a client interacts with PostgreSQL-enabled Spanner databases.
    +   * 
    + * + * PG_JSONB = 3; + */ + public static final int PG_JSONB_VALUE = 3; + + /** + * + * + *
    +   * PostgreSQL compatible OID type. This annotation can be used by a client
    +   * interacting with PostgreSQL-enabled Spanner database to specify that a
    +   * value should be treated using the semantics of the OID type.
    +   * 
    + * + * PG_OID = 4; + */ + public static final int PG_OID_VALUE = 4; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TypeAnnotationCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static TypeAnnotationCode forNumber(int value) { + switch (value) { + case 0: + return TYPE_ANNOTATION_CODE_UNSPECIFIED; + case 2: + return PG_NUMERIC; + case 3: + return PG_JSONB; + case 4: + return PG_OID; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public TypeAnnotationCode findValueByNumber(int number) { + return TypeAnnotationCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.TypeProto.getDescriptor().getEnumTypes().get(1); + } + + private static final TypeAnnotationCode[] VALUES = values(); + + public static TypeAnnotationCode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private TypeAnnotationCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.TypeAnnotationCode) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java new file mode 100644 index 000000000000..cbe38593cf4c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java @@ -0,0 +1,587 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +/** + * + * + *
    + * `TypeCode` is used as part of [Type][google.spanner.v1.Type] to
    + * indicate the type of a Cloud Spanner value.
    + *
    + * Each legal value of a type can be encoded to or decoded from a JSON
    + * value, using the encodings described below. All Cloud Spanner values can
    + * be `null`, regardless of type; `null`s are always encoded as a JSON
    + * `null`.
    + * 
    + * + * Protobuf enum {@code google.spanner.v1.TypeCode} + */ +@com.google.protobuf.Generated +public enum TypeCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * TYPE_CODE_UNSPECIFIED = 0; + */ + TYPE_CODE_UNSPECIFIED(0), + /** + * + * + *
    +   * Encoded as JSON `true` or `false`.
    +   * 
    + * + * BOOL = 1; + */ + BOOL(1), + /** + * + * + *
    +   * Encoded as `string`, in decimal format.
    +   * 
    + * + * INT64 = 2; + */ + INT64(2), + /** + * + * + *
    +   * Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
    +   * `"-Infinity"`.
    +   * 
    + * + * FLOAT64 = 3; + */ + FLOAT64(3), + /** + * + * + *
    +   * Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
    +   * `"-Infinity"`.
    +   * 
    + * + * FLOAT32 = 15; + */ + FLOAT32(15), + /** + * + * + *
    +   * Encoded as `string` in RFC 3339 timestamp format. The time zone
    +   * must be present, and must be `"Z"`.
    +   *
    +   * If the schema has the column option
    +   * `allow_commit_timestamp=true`, the placeholder string
    +   * `"spanner.commit_timestamp()"` can be used to instruct the system
    +   * to insert the commit timestamp associated with the transaction
    +   * commit.
    +   * 
    + * + * TIMESTAMP = 4; + */ + TIMESTAMP(4), + /** + * + * + *
    +   * Encoded as `string` in RFC 3339 date format.
    +   * 
    + * + * DATE = 5; + */ + DATE(5), + /** + * + * + *
    +   * Encoded as `string`.
    +   * 
    + * + * STRING = 6; + */ + STRING(6), + /** + * + * + *
    +   * Encoded as a base64-encoded `string`, as described in RFC 4648,
    +   * section 4.
    +   * 
    + * + * BYTES = 7; + */ + BYTES(7), + /** + * + * + *
    +   * Encoded as `list`, where the list elements are represented
    +   * according to
    +   * [array_element_type][google.spanner.v1.Type.array_element_type].
    +   * 
    + * + * ARRAY = 8; + */ + ARRAY(8), + /** + * + * + *
    +   * Encoded as `list`, where list element `i` is represented according
    +   * to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
    +   * 
    + * + * STRUCT = 9; + */ + STRUCT(9), + /** + * + * + *
    +   * Encoded as `string`, in decimal format or scientific notation format.
    +   * Decimal format:
    +   * `[+-]Digits[.[Digits]]` or
    +   * `[+-][Digits].Digits`
    +   *
    +   * Scientific notation:
    +   * `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    +   * `[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    +   * (ExponentIndicator is `"e"` or `"E"`)
    +   * 
    + * + * NUMERIC = 10; + */ + NUMERIC(10), + /** + * + * + *
    +   * Encoded as a JSON-formatted `string` as described in RFC 7159. The
    +   * following rules are applied when parsing JSON input:
    +   *
    +   * - Whitespace characters are not preserved.
    +   * - If a JSON object has duplicate keys, only the first key is preserved.
    +   * - Members of a JSON object are not guaranteed to have their order
    +   * preserved.
    +   * - JSON array elements will have their order preserved.
    +   * 
    + * + * JSON = 11; + */ + JSON(11), + /** + * + * + *
    +   * Encoded as a base64-encoded `string`, as described in RFC 4648,
    +   * section 4.
    +   * 
    + * + * PROTO = 13; + */ + PROTO(13), + /** + * + * + *
    +   * Encoded as `string`, in decimal format.
    +   * 
    + * + * ENUM = 14; + */ + ENUM(14), + /** + * + * + *
    +   * Encoded as `string`, in `ISO8601` duration format -
    +   * `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S`
    +   * where `n` is an integer.
    +   * For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2
    +   * months, 3 days, 4 hours, 5 minutes, and 6.5 seconds.
    +   * 
    + * + * INTERVAL = 16; + */ + INTERVAL(16), + /** + * + * + *
    +   * Encoded as `string`, in lower-case hexa-decimal format, as described
    +   * in RFC 9562, section 4.
    +   * 
    + * + * UUID = 17; + */ + UUID(17), + UNRECOGNIZED(-1), + ; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TypeCode"); + } + + /** + * + * + *
    +   * Not specified.
    +   * 
    + * + * TYPE_CODE_UNSPECIFIED = 0; + */ + public static final int TYPE_CODE_UNSPECIFIED_VALUE = 0; + + /** + * + * + *
    +   * Encoded as JSON `true` or `false`.
    +   * 
    + * + * BOOL = 1; + */ + public static final int BOOL_VALUE = 1; + + /** + * + * + *
    +   * Encoded as `string`, in decimal format.
    +   * 
    + * + * INT64 = 2; + */ + public static final int INT64_VALUE = 2; + + /** + * + * + *
    +   * Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
    +   * `"-Infinity"`.
    +   * 
    + * + * FLOAT64 = 3; + */ + public static final int FLOAT64_VALUE = 3; + + /** + * + * + *
    +   * Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
    +   * `"-Infinity"`.
    +   * 
    + * + * FLOAT32 = 15; + */ + public static final int FLOAT32_VALUE = 15; + + /** + * + * + *
    +   * Encoded as `string` in RFC 3339 timestamp format. The time zone
    +   * must be present, and must be `"Z"`.
    +   *
    +   * If the schema has the column option
    +   * `allow_commit_timestamp=true`, the placeholder string
    +   * `"spanner.commit_timestamp()"` can be used to instruct the system
    +   * to insert the commit timestamp associated with the transaction
    +   * commit.
    +   * 
    + * + * TIMESTAMP = 4; + */ + public static final int TIMESTAMP_VALUE = 4; + + /** + * + * + *
    +   * Encoded as `string` in RFC 3339 date format.
    +   * 
    + * + * DATE = 5; + */ + public static final int DATE_VALUE = 5; + + /** + * + * + *
    +   * Encoded as `string`.
    +   * 
    + * + * STRING = 6; + */ + public static final int STRING_VALUE = 6; + + /** + * + * + *
    +   * Encoded as a base64-encoded `string`, as described in RFC 4648,
    +   * section 4.
    +   * 
    + * + * BYTES = 7; + */ + public static final int BYTES_VALUE = 7; + + /** + * + * + *
    +   * Encoded as `list`, where the list elements are represented
    +   * according to
    +   * [array_element_type][google.spanner.v1.Type.array_element_type].
    +   * 
    + * + * ARRAY = 8; + */ + public static final int ARRAY_VALUE = 8; + + /** + * + * + *
    +   * Encoded as `list`, where list element `i` is represented according
    +   * to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
    +   * 
    + * + * STRUCT = 9; + */ + public static final int STRUCT_VALUE = 9; + + /** + * + * + *
    +   * Encoded as `string`, in decimal format or scientific notation format.
    +   * Decimal format:
    +   * `[+-]Digits[.[Digits]]` or
    +   * `[+-][Digits].Digits`
    +   *
    +   * Scientific notation:
    +   * `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    +   * `[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    +   * (ExponentIndicator is `"e"` or `"E"`)
    +   * 
    + * + * NUMERIC = 10; + */ + public static final int NUMERIC_VALUE = 10; + + /** + * + * + *
    +   * Encoded as a JSON-formatted `string` as described in RFC 7159. The
    +   * following rules are applied when parsing JSON input:
    +   *
    +   * - Whitespace characters are not preserved.
    +   * - If a JSON object has duplicate keys, only the first key is preserved.
    +   * - Members of a JSON object are not guaranteed to have their order
    +   * preserved.
    +   * - JSON array elements will have their order preserved.
    +   * 
    + * + * JSON = 11; + */ + public static final int JSON_VALUE = 11; + + /** + * + * + *
    +   * Encoded as a base64-encoded `string`, as described in RFC 4648,
    +   * section 4.
    +   * 
    + * + * PROTO = 13; + */ + public static final int PROTO_VALUE = 13; + + /** + * + * + *
    +   * Encoded as `string`, in decimal format.
    +   * 
    + * + * ENUM = 14; + */ + public static final int ENUM_VALUE = 14; + + /** + * + * + *
    +   * Encoded as `string`, in `ISO8601` duration format -
    +   * `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S`
    +   * where `n` is an integer.
    +   * For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2
    +   * months, 3 days, 4 hours, 5 minutes, and 6.5 seconds.
    +   * 
    + * + * INTERVAL = 16; + */ + public static final int INTERVAL_VALUE = 16; + + /** + * + * + *
    +   * Encoded as `string`, in lower-case hexa-decimal format, as described
    +   * in RFC 9562, section 4.
    +   * 
    + * + * UUID = 17; + */ + public static final int UUID_VALUE = 17; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TypeCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static TypeCode forNumber(int value) { + switch (value) { + case 0: + return TYPE_CODE_UNSPECIFIED; + case 1: + return BOOL; + case 2: + return INT64; + case 3: + return FLOAT64; + case 15: + return FLOAT32; + case 4: + return TIMESTAMP; + case 5: + return DATE; + case 6: + return STRING; + case 7: + return BYTES; + case 8: + return ARRAY; + case 9: + return STRUCT; + case 10: + return NUMERIC; + case 11: + return JSON; + case 13: + return PROTO; + case 14: + return ENUM; + case 16: + return INTERVAL; + case 17: + return UUID; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public TypeCode findValueByNumber(int number) { + return TypeCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.TypeProto.getDescriptor().getEnumTypes().get(0); + } + + private static final TypeCode[] VALUES = values(); + + public static TypeCode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private TypeCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.TypeCode) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java new file mode 100644 index 000000000000..5e035a1c696e --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java @@ -0,0 +1,214 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public interface TypeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.Type) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +   * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + + /** + * + * + *
    +   * Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
    +   * 
    + * + * .google.spanner.v1.TypeCode code = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The code. + */ + com.google.spanner.v1.TypeCode getCode(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return Whether the arrayElementType field is set. + */ + boolean hasArrayElementType(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + * + * @return The arrayElementType. + */ + com.google.spanner.v1.Type getArrayElementType(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
    +   * 
    + * + * .google.spanner.v1.Type array_element_type = 2; + */ + com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return Whether the structType field is set. + */ + boolean hasStructType(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + * + * @return The structType. + */ + com.google.spanner.v1.StructType getStructType(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
    +   * 
    + * + * .google.spanner.v1.StructType struct_type = 3; + */ + com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder(); + + /** + * + * + *
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
    +   * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The enum numeric value on the wire for typeAnnotation. + */ + int getTypeAnnotationValue(); + + /** + * + * + *
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
    +   * 
    + * + * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; + * + * @return The typeAnnotation. + */ + com.google.spanner.v1.TypeAnnotationCode getTypeAnnotation(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +   * [code][google.spanner.v1.Type.code] ==
    +   * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +   * qualified name of the proto type representing the proto/enum definition.
    +   * 
    + * + * string proto_type_fqn = 5; + * + * @return The protoTypeFqn. + */ + java.lang.String getProtoTypeFqn(); + + /** + * + * + *
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [PROTO][google.spanner.v1.TypeCode.PROTO] or
    +   * [code][google.spanner.v1.Type.code] ==
    +   * [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
    +   * qualified name of the proto type representing the proto/enum definition.
    +   * 
    + * + * string proto_type_fqn = 5; + * + * @return The bytes for protoTypeFqn. + */ + com.google.protobuf.ByteString getProtoTypeFqnBytes(); +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java new file mode 100644 index 000000000000..42c680575345 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java @@ -0,0 +1,146 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/spanner/v1/type.proto +// Protobuf Java Version: 4.33.2 + +package com.google.spanner.v1; + +@com.google.protobuf.Generated +public final class TypeProto extends com.google.protobuf.GeneratedFile { + private TypeProto() {} + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TypeProto"); + } + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_Type_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_Type_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_StructType_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_StructType_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_StructType_Field_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_spanner_v1_StructType_Field_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n" + + "\034google/spanner/v1/type.proto\022\021google.s" + + "panner.v1\032\037google/api/field_behavior.proto\"\367\001\n" + + "\004Type\022.\n" + + "\004code\030\001 \001(\0162\033.google.spanner.v1.TypeCodeB\003\340A\002\0223\n" + + "\022array_element_type\030\002 \001(\0132\027.google.spanner.v1.Type\0222\n" + + "\013struct_type\030\003 \001(\0132\035.google.spanner.v1.StructType\022>\n" + + "\017type_annotation\030\004 \001(\0162%.google.spanner.v1.TypeAnnotationCode\022\026\n" + + "\016proto_type_fqn\030\005 \001(\t\"\177\n\n" + + "StructType\0223\n" + + "\006fields\030\001 \003(\0132#.google.spanner.v1.StructType.Field\032<\n" + + "\005Field\022\014\n" + + "\004name\030\001 \001(\t\022%\n" + + "\004type\030\002 \001(\0132\027.google.spanner.v1.Type*\337\001\n" + + "\010TypeCode\022\031\n" + + "\025TYPE_CODE_UNSPECIFIED\020\000\022\010\n" + + "\004BOOL\020\001\022\t\n" + + "\005INT64\020\002\022\013\n" + + "\007FLOAT64\020\003\022\013\n" + + "\007FLOAT32\020\017\022\r\n" + + "\tTIMESTAMP\020\004\022\010\n" + + "\004DATE\020\005\022\n\n" + + "\006STRING\020\006\022\t\n" + + "\005BYTES\020\007\022\t\n" + + "\005ARRAY\020\010\022\n\n" + + "\006STRUCT\020\t\022\013\n" + + "\007NUMERIC\020\n" + + "\022\010\n" + + "\004JSON\020\013\022\t\n" + + "\005PROTO\020\r" + + "\022\010\n" + + "\004ENUM\020\016\022\014\n" + + "\010INTERVAL\020\020\022\010\n" + + "\004UUID\020\021*d\n" + + "\022TypeAnnotationCode\022$\n" + + " TYPE_ANNOTATION_CODE_UNSPECIFIED\020\000\022\016\n\n" + + "PG_NUMERIC\020\002\022\014\n" + + "\010PG_JSONB\020\003\022\n\n" + + "\006PG_OID\020\004B\254\001\n" + + "\025com.google.spanner.v1B\tTypeProtoP\001Z5cloud.goog" + + "le.com/go/spanner/apiv1/spannerpb;spanne" + + "rpb\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\C" + + "loud\\Spanner\\V1\352\002\032Google::Cloud::Spanner::V1b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + }); + internal_static_google_spanner_v1_Type_descriptor = getDescriptor().getMessageType(0); + internal_static_google_spanner_v1_Type_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_Type_descriptor, + new java.lang.String[] { + "Code", "ArrayElementType", "StructType", "TypeAnnotation", "ProtoTypeFqn", + }); + internal_static_google_spanner_v1_StructType_descriptor = getDescriptor().getMessageType(1); + internal_static_google_spanner_v1_StructType_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_StructType_descriptor, + new java.lang.String[] { + "Fields", + }); + internal_static_google_spanner_v1_StructType_Field_descriptor = + internal_static_google_spanner_v1_StructType_descriptor.getNestedType(0); + internal_static_google_spanner_v1_StructType_Field_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_spanner_v1_StructType_Field_descriptor, + new java.lang.String[] { + "Name", "Type", + }); + descriptor.resolveAllFeaturesImmutable(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/change_stream.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/change_stream.proto new file mode 100644 index 000000000000..e7d12e6084c6 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/change_stream.proto @@ -0,0 +1,451 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/v1/type.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "ChangeStreamProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// Spanner Change Streams enable customers to capture and stream out changes to +// their Spanner databases in real-time. A change stream +// can be created with option partition_mode='IMMUTABLE_KEY_RANGE' or +// partition_mode='MUTABLE_KEY_RANGE'. +// +// This message is only used in Change Streams created with the option +// partition_mode='MUTABLE_KEY_RANGE'. Spanner automatically creates a special +// Table-Valued Function (TVF) along with each Change Streams. The function +// provides access to the change stream's records. The function is named +// READ_ (where is the +// name of the change stream), and it returns a table with only one column +// called ChangeRecord. +message ChangeStreamRecord { + // A data change record contains a set of changes to a table with the same + // modification type (insert, update, or delete) committed at the same commit + // timestamp in one change stream partition for the same transaction. Multiple + // data change records can be returned for the same transaction across + // multiple change stream partitions. + message DataChangeRecord { + // Metadata for a column. + message ColumnMetadata { + // Name of the column. + string name = 1; + + // Type of the column. + Type type = 2; + + // Indicates whether the column is a primary key column. + bool is_primary_key = 3; + + // Ordinal position of the column based on the original table definition + // in the schema starting with a value of 1. + int64 ordinal_position = 4; + } + + // Returns the value and associated metadata for a particular field of the + // [Mod][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.Mod]. + message ModValue { + // Index within the repeated + // [column_metadata][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.column_metadata] + // field, to obtain the column metadata for the column that was modified. + int32 column_metadata_index = 1; + + // The value of the column. + google.protobuf.Value value = 2; + } + + // A mod describes all data changes in a watched table row. + message Mod { + // Returns the value of the primary key of the modified row. + repeated ModValue keys = 1; + + // Returns the old values before the change for the modified columns. + // Always empty for + // [INSERT][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.INSERT], + // or if old values are not being captured specified by + // [value_capture_type][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ValueCaptureType]. + repeated ModValue old_values = 2; + + // Returns the new values after the change for the modified columns. + // Always empty for + // [DELETE][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.ModType.DELETE]. + repeated ModValue new_values = 3; + } + + // Mod type describes the type of change Spanner applied to the data. For + // example, if the client submits an INSERT_OR_UPDATE request, Spanner will + // perform an insert if there is no existing row and return ModType INSERT. + // Alternatively, if there is an existing row, Spanner will perform an + // update and return ModType UPDATE. + enum ModType { + // Not specified. + MOD_TYPE_UNSPECIFIED = 0; + + // Indicates data was inserted. + INSERT = 10; + + // Indicates existing data was updated. + UPDATE = 20; + + // Indicates existing data was deleted. + DELETE = 30; + } + + // Value capture type describes which values are recorded in the data + // change record. + enum ValueCaptureType { + // Not specified. + VALUE_CAPTURE_TYPE_UNSPECIFIED = 0; + + // Records both old and new values of the modified watched columns. + OLD_AND_NEW_VALUES = 10; + + // Records only new values of the modified watched columns. + NEW_VALUES = 20; + + // Records new values of all watched columns, including modified and + // unmodified columns. + NEW_ROW = 30; + + // Records the new values of all watched columns, including modified and + // unmodified columns. Also records the old values of the modified + // columns. + NEW_ROW_AND_OLD_VALUES = 40; + } + + // Indicates the timestamp in which the change was committed. + // DataChangeRecord.commit_timestamps, + // PartitionStartRecord.start_timestamps, + // PartitionEventRecord.commit_timestamps, and + // PartitionEndRecord.end_timestamps can have the same value in the same + // partition. + google.protobuf.Timestamp commit_timestamp = 1; + + // Record sequence numbers are unique and monotonically increasing (but not + // necessarily contiguous) for a specific timestamp across record + // types in the same partition. To guarantee ordered processing, the reader + // should process records (of potentially different types) in + // record_sequence order for a specific timestamp in the same partition. + // + // The record sequence number ordering across partitions is only meaningful + // in the context of a specific transaction. Record sequence numbers are + // unique across partitions for a specific transaction. Sort the + // DataChangeRecords for the same + // [server_transaction_id][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.server_transaction_id] + // by + // [record_sequence][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.record_sequence] + // to reconstruct the ordering of the changes within the transaction. + string record_sequence = 2; + + // Provides a globally unique string that represents the transaction in + // which the change was committed. Multiple transactions can have the same + // commit timestamp, but each transaction has a unique + // server_transaction_id. + string server_transaction_id = 3; + + // Indicates whether this is the last record for a transaction in the + // current partition. Clients can use this field to determine when all + // records for a transaction in the current partition have been received. + bool is_last_record_in_transaction_in_partition = 4; + + // Name of the table affected by the change. + string table = 5; + + // Provides metadata describing the columns associated with the + // [mods][google.spanner.v1.ChangeStreamRecord.DataChangeRecord.mods] listed + // below. + repeated ColumnMetadata column_metadata = 6; + + // Describes the changes that were made. + repeated Mod mods = 7; + + // Describes the type of change. + ModType mod_type = 8; + + // Describes the value capture type that was specified in the change stream + // configuration when this change was captured. + ValueCaptureType value_capture_type = 9; + + // Indicates the number of data change records that are part of this + // transaction across all change stream partitions. This value can be used + // to assemble all the records associated with a particular transaction. + int32 number_of_records_in_transaction = 10; + + // Indicates the number of partitions that return data change records for + // this transaction. This value can be helpful in assembling all records + // associated with a particular transaction. + int32 number_of_partitions_in_transaction = 11; + + // Indicates the transaction tag associated with this transaction. + string transaction_tag = 12; + + // Indicates whether the transaction is a system transaction. System + // transactions include those issued by time-to-live (TTL), column backfill, + // etc. + bool is_system_transaction = 13; + } + + // A heartbeat record is returned as a progress indicator, when there are no + // data changes or any other partition record types in the change stream + // partition. + message HeartbeatRecord { + // Indicates the timestamp at which the query has returned all the records + // in the change stream partition with timestamp <= heartbeat timestamp. + // The heartbeat timestamp will not be the same as the timestamps of other + // record types in the same partition. + google.protobuf.Timestamp timestamp = 1; + } + + // A partition start record serves as a notification that the client should + // schedule the partitions to be queried. PartitionStartRecord returns + // information about one or more partitions. + message PartitionStartRecord { + // Start timestamp at which the partitions should be queried to return + // change stream records with timestamps >= start_timestamp. + // DataChangeRecord.commit_timestamps, + // PartitionStartRecord.start_timestamps, + // PartitionEventRecord.commit_timestamps, and + // PartitionEndRecord.end_timestamps can have the same value in the same + // partition. + google.protobuf.Timestamp start_timestamp = 1; + + // Record sequence numbers are unique and monotonically increasing (but not + // necessarily contiguous) for a specific timestamp across record + // types in the same partition. To guarantee ordered processing, the reader + // should process records (of potentially different types) in + // record_sequence order for a specific timestamp in the same partition. + string record_sequence = 2; + + // Unique partition identifiers to be used in queries. + repeated string partition_tokens = 3; + } + + // A partition end record serves as a notification that the client should stop + // reading the partition. No further records are expected to be retrieved on + // it. + message PartitionEndRecord { + // End timestamp at which the change stream partition is terminated. All + // changes generated by this partition will have timestamps <= + // end_timestamp. DataChangeRecord.commit_timestamps, + // PartitionStartRecord.start_timestamps, + // PartitionEventRecord.commit_timestamps, and + // PartitionEndRecord.end_timestamps can have the same value in the same + // partition. PartitionEndRecord is the last record returned for a + // partition. + google.protobuf.Timestamp end_timestamp = 1; + + // Record sequence numbers are unique and monotonically increasing (but not + // necessarily contiguous) for a specific timestamp across record + // types in the same partition. To guarantee ordered processing, the reader + // should process records (of potentially different types) in + // record_sequence order for a specific timestamp in the same partition. + string record_sequence = 2; + + // Unique partition identifier describing the terminated change stream + // partition. + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEndRecord.partition_token] + // is equal to the partition token of the change stream partition currently + // queried to return this PartitionEndRecord. + string partition_token = 3; + } + + // A partition event record describes key range changes for a change stream + // partition. The changes to a row defined by its primary key can be captured + // in one change stream partition for a specific time range, and then be + // captured in a different change stream partition for a different time range. + // This movement of key ranges across change stream partitions is a reflection + // of activities, such as Spanner's dynamic splitting and load balancing, etc. + // Processing this event is needed if users want to guarantee processing of + // the changes for any key in timestamp order. If time ordered processing of + // changes for a primary key is not needed, this event can be ignored. + // To guarantee time ordered processing for each primary key, if the event + // describes move-ins, the reader of this partition needs to wait until the + // readers of the source partitions have processed all records with timestamps + // <= this PartitionEventRecord.commit_timestamp, before advancing beyond this + // PartitionEventRecord. If the event describes move-outs, the reader can + // notify the readers of the destination partitions that they can continue + // processing. + message PartitionEventRecord { + // Describes move-in of the key ranges into the change stream partition + // identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]. + // + // To maintain processing the changes for a particular key in timestamp + // order, the query processing the change stream partition identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token] + // should not advance beyond the partition event record commit timestamp + // until the queries processing the source change stream partitions have + // processed all change stream records with timestamps <= the partition + // event record commit timestamp. + message MoveInEvent { + // An unique partition identifier describing the source change stream + // partition that recorded changes for the key range that is moving + // into this partition. + string source_partition_token = 1; + } + + // Describes move-out of the key ranges out of the change stream partition + // identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]. + // + // To maintain processing the changes for a particular key in timestamp + // order, the query processing the + // [MoveOutEvent][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.MoveOutEvent] + // in the partition identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token] + // should inform the queries processing the destination partitions that + // they can unblock and proceed processing records past the + // [commit_timestamp][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.commit_timestamp]. + message MoveOutEvent { + // An unique partition identifier describing the destination change + // stream partition that will record changes for the key range that is + // moving out of this partition. + string destination_partition_token = 1; + } + + // Indicates the commit timestamp at which the key range change occurred. + // DataChangeRecord.commit_timestamps, + // PartitionStartRecord.start_timestamps, + // PartitionEventRecord.commit_timestamps, and + // PartitionEndRecord.end_timestamps can have the same value in the same + // partition. + google.protobuf.Timestamp commit_timestamp = 1; + + // Record sequence numbers are unique and monotonically increasing (but not + // necessarily contiguous) for a specific timestamp across record + // types in the same partition. To guarantee ordered processing, the reader + // should process records (of potentially different types) in + // record_sequence order for a specific timestamp in the same partition. + string record_sequence = 2; + + // Unique partition identifier describing the partition this event + // occurred on. + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token] + // is equal to the partition token of the change stream partition currently + // queried to return this PartitionEventRecord. + string partition_token = 3; + + // Set when one or more key ranges are moved into the change stream + // partition identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]. + // + // Example: Two key ranges are moved into partition (P1) from partition (P2) + // and partition (P3) in a single transaction at timestamp T. + // + // The PartitionEventRecord returned in P1 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P1" + // move_in_events { + // source_partition_token: "P2" + // } + // move_in_events { + // source_partition_token: "P3" + // } + // } + // + // The PartitionEventRecord returned in P2 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P2" + // move_out_events { + // destination_partition_token: "P1" + // } + // } + // + // The PartitionEventRecord returned in P3 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P3" + // move_out_events { + // destination_partition_token: "P1" + // } + // } + repeated MoveInEvent move_in_events = 4; + + // Set when one or more key ranges are moved out of the change stream + // partition identified by + // [partition_token][google.spanner.v1.ChangeStreamRecord.PartitionEventRecord.partition_token]. + // + // Example: Two key ranges are moved out of partition (P1) to partition (P2) + // and partition (P3) in a single transaction at timestamp T. + // + // The PartitionEventRecord returned in P1 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P1" + // move_out_events { + // destination_partition_token: "P2" + // } + // move_out_events { + // destination_partition_token: "P3" + // } + // } + // + // The PartitionEventRecord returned in P2 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P2" + // move_in_events { + // source_partition_token: "P1" + // } + // } + // + // The PartitionEventRecord returned in P3 will reflect the move as: + // + // PartitionEventRecord { + // commit_timestamp: T + // partition_token: "P3" + // move_in_events { + // source_partition_token: "P1" + // } + // } + repeated MoveOutEvent move_out_events = 5; + } + + // One of the change stream subrecords. + oneof record { + // Data change record describing a data change for a change stream + // partition. + DataChangeRecord data_change_record = 1; + + // Heartbeat record describing a heartbeat for a change stream partition. + HeartbeatRecord heartbeat_record = 2; + + // Partition start record describing a new change stream partition. + PartitionStartRecord partition_start_record = 3; + + // Partition end record describing a terminated change stream partition. + PartitionEndRecord partition_end_record = 4; + + // Partition event record describing key range changes for a change stream + // partition. + PartitionEventRecord partition_event_record = 5; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto new file mode 100644 index 000000000000..20d2850bb648 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto @@ -0,0 +1,74 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/transaction.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "CommitResponseProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// The response for [Commit][google.spanner.v1.Spanner.Commit]. +message CommitResponse { + // Additional statistics about a commit. + message CommitStats { + // The total number of mutations for the transaction. Knowing the + // `mutation_count` value can help you maximize the number of mutations + // in a transaction and minimize the number of API round trips. You can + // also monitor this value to prevent transactions from exceeding the system + // [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data). + // If the number of mutations exceeds the limit, the server returns + // [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT). + int64 mutation_count = 1; + } + + // The Cloud Spanner timestamp at which the transaction committed. + google.protobuf.Timestamp commit_timestamp = 1; + + // The statistics about this `Commit`. Not returned by default. + // For more information, see + // [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats]. + CommitStats commit_stats = 2; + + // You must examine and retry the commit if the following is populated. + oneof MultiplexedSessionRetry { + // If specified, transaction has not committed yet. + // You must retry the commit with the new precommit token. + MultiplexedSessionPrecommitToken precommit_token = 4; + } + + // If `TransactionOptions.isolation_level` is set to + // `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the + // timestamp at which all reads in the transaction ran. This timestamp is + // never returned. + google.protobuf.Timestamp snapshot_timestamp = 5; + + // Optional. A cache update expresses a set of changes the client should + // incorporate into its location cache. The client should discard the changes + // if they are older than the data it already has. This data can be obtained + // in response to requests that included a `RoutingHint` field, but may also + // be obtained by explicit location-fetching RPCs which may be added in the + // future. + CacheUpdate cache_update = 6 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto new file mode 100644 index 000000000000..5e30e831e64d --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto @@ -0,0 +1,163 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/protobuf/struct.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "KeysProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// KeyRange represents a range of rows in a table or index. +// +// A range has a start key and an end key. These keys can be open or +// closed, indicating if the range includes rows with that key. +// +// Keys are represented by lists, where the ith value in the list +// corresponds to the ith component of the table or index primary key. +// Individual values are encoded as described +// [here][google.spanner.v1.TypeCode]. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10) +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// ["Bob", "2014-09-23"] +// ["Alfred", "2015-06-12"] +// +// Since the `UserEvents` table's `PRIMARY KEY` clause names two +// columns, each `UserEvents` key has two elements; the first is the +// `UserName`, and the second is the `EventDate`. +// +// Key ranges with multiple components are interpreted +// lexicographically by component using the table or index key's declared +// sort order. For example, the following range returns all events for +// user `"Bob"` that occurred in the year 2015: +// +// "start_closed": ["Bob", "2015-01-01"] +// "end_closed": ["Bob", "2015-12-31"] +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if the key is closed, then rows that exactly match the +// provided components are included; if the key is open, then rows +// that exactly match are not included. +// +// For example, the following range includes all events for `"Bob"` that +// occurred during and after the year 2000: +// +// "start_closed": ["Bob", "2000-01-01"] +// "end_closed": ["Bob"] +// +// The next example retrieves all events for `"Bob"`: +// +// "start_closed": ["Bob"] +// "end_closed": ["Bob"] +// +// To retrieve events before the year 2000: +// +// "start_closed": ["Bob"] +// "end_open": ["Bob", "2000-01-01"] +// +// The following range includes all rows in the table: +// +// "start_closed": [] +// "end_closed": [] +// +// This range returns all users whose `UserName` begins with any +// character from A to C: +// +// "start_closed": ["A"] +// "end_open": ["D"] +// +// This range returns all users whose `UserName` begins with B: +// +// "start_closed": ["B"] +// "end_open": ["C"] +// +// Key ranges honor column sort order. For example, suppose a table is +// defined as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 +// and 100 inclusive: +// +// "start_closed": ["100"] +// "end_closed": ["1"] +// +// Note that 100 is passed as the start, and 1 is passed as the end, +// because `Key` is a descending column in the schema. +message KeyRange { + // The start key must be provided. It can be either closed or open. + oneof start_key_type { + // If the start is closed, then the range includes all rows whose + // first `len(start_closed)` key columns exactly match `start_closed`. + google.protobuf.ListValue start_closed = 1; + + // If the start is open, then the range excludes rows whose first + // `len(start_open)` key columns exactly match `start_open`. + google.protobuf.ListValue start_open = 2; + } + + // The end key must be provided. It can be either closed or open. + oneof end_key_type { + // If the end is closed, then the range includes all rows whose + // first `len(end_closed)` key columns exactly match `end_closed`. + google.protobuf.ListValue end_closed = 3; + + // If the end is open, then the range excludes rows whose first + // `len(end_open)` key columns exactly match `end_open`. + google.protobuf.ListValue end_open = 4; + } +} + +// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All +// the keys are expected to be in the same table or index. The keys need +// not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for example +// if two ranges, two keys, or a key and a range overlap), Cloud Spanner +// behaves as if the key were only specified once. +message KeySet { + // A list of specific keys. Entries in `keys` should have exactly as + // many elements as there are columns in the primary or index key + // with which this `KeySet` is used. Individual key values are + // encoded as described [here][google.spanner.v1.TypeCode]. + repeated google.protobuf.ListValue keys = 1; + + // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more + // information about key range specifications. + repeated KeyRange ranges = 2; + + // For convenience `all` can be set to `true` to indicate that this + // `KeySet` matches all keys in the table or index. Note that any keys + // specified in `keys` or `ranges` are only yielded once. + bool all = 3; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/location.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/location.proto new file mode 100644 index 000000000000..870dc0ec0a9c --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/location.proto @@ -0,0 +1,388 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/protobuf/struct.proto"; +import "google/spanner/v1/type.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "LocationProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// A `Range` represents a range of keys in a database. The keys themselves +// are encoded in "sortable string format", also known as ssformat. Consult +// Spanner's open source client libraries for details on the encoding. +// +// Each range represents a contiguous range of rows, possibly from multiple +// tables/indexes. Each range is associated with a single paxos group (known as +// a "group" throughout this API), a split (which names the exact range within +// the group), and a generation that can be used to determine whether a given +// `Range` represents a newer or older location for the key range. +message Range { + // The start key of the range, inclusive. Encoded in "sortable string format" + // (ssformat). + bytes start_key = 1; + + // The limit key of the range, exclusive. Encoded in "sortable string format" + // (ssformat). + bytes limit_key = 2; + + // The UID of the paxos group where this range is stored. UIDs are unique + // within the database. References `Group.group_uid`. + uint64 group_uid = 3; + + // A group can store multiple ranges of keys. Each key range is named by an + // ID (the split ID). Within a group, split IDs are unique. The `split_id` + // names the exact split in `group_uid` where this range is stored. + uint64 split_id = 4; + + // `generation` indicates the freshness of the range information contained + // in this proto. Generations can be compared lexicographically; if generation + // A is greater than generation B, then the `Range` corresponding to A is + // newer than the `Range` corresponding to B, and should be used + // preferentially. + bytes generation = 5; +} + +// A `Tablet` represents a single replica of a `Group`. A tablet is served by a +// single server at a time, and can move between servers due to server death or +// simply load balancing. +message Tablet { + // Indicates the role of the tablet. + enum Role { + // Not specified. + ROLE_UNSPECIFIED = 0; + + // The tablet can perform reads and (if elected leader) writes. + READ_WRITE = 1; + + // The tablet can only perform reads. + READ_ONLY = 2; + } + + // The UID of the tablet, unique within the database. Matches the + // `tablet_uids` and `leader_tablet_uid` fields in `Group`. + uint64 tablet_uid = 1; + + // The address of the server that is serving this tablet -- either an IP + // address or DNS hostname and a port number. + string server_address = 2; + + // Where this tablet is located. This is the name of a Google Cloud region, + // such as "us-central1". + string location = 3; + + // The role of the tablet. + Role role = 4; + + // `incarnation` indicates the freshness of the tablet information contained + // in this proto. Incarnations can be compared lexicographically; if + // incarnation A is greater than incarnation B, then the `Tablet` + // corresponding to A is newer than the `Tablet` corresponding to B, and + // should be used preferentially. + bytes incarnation = 5; + + // Distances help the client pick the closest tablet out of the list of + // tablets for a given request. Tablets with lower distances should generally + // be preferred. Tablets with the same distance are approximately equally + // close; the client can choose arbitrarily. + // + // Distances do not correspond precisely to expected latency, geographical + // distance, or anything else. Distances should be compared only between + // tablets of the same group; they are not meaningful between different + // groups. + // + // A value of zero indicates that the tablet may be in the same zone as + // the client, and have minimum network latency. A value less than or equal to + // five indicates that the tablet is thought to be in the same region as the + // client, and may have a few milliseconds of network latency. Values greater + // than five are most likely in a different region, with non-trivial network + // latency. + // + // Clients should use the following algorithm: + // * If the request is using a directed read, eliminate any tablets that + // do not match the directed read's target zone and/or replica type. + // * (Read-write transactions only) Choose leader tablet if it has an + // distance <=5. + // * Group and sort tablets by distance. Choose a random + // tablet with the lowest distance. If the request + // is not a directed read, only consider replicas with distances <=5. + // * Send the request to the fallback endpoint. + // + // The tablet picked by this algorithm may be skipped, either because it is + // marked as `skip` by the server or because the corresponding server is + // unreachable, flow controlled, etc. Skipped tablets should be added to the + // `skipped_tablet_uid` field in `RoutingHint`; the algorithm above should + // then be re-run without including the skipped tablet(s) to pick the next + // best tablet. + uint32 distance = 6; + + // If true, the tablet should not be chosen by the client. Typically, this + // signals that the tablet is unhealthy in some way. Tablets with `skip` + // set to true should be reported back to the server in + // `RoutingHint.skipped_tablet_uid`; this cues the server to send updated + // information for this tablet should it become usable again. + bool skip = 7; +} + +// A `Group` represents a paxos group in a database. A group is a set of +// tablets that are replicated across multiple servers. Groups may have a leader +// tablet. Groups store one (or sometimes more) ranges of keys. +message Group { + // The UID of the paxos group, unique within the database. Matches the + // `group_uid` field in `Range`. + uint64 group_uid = 1; + + // A list of tablets that are part of the group. Note that this list may not + // be exhaustive; it will only include tablets the server considers useful + // to the client. The returned list is ordered ascending by distance. + // + // Tablet UIDs reference `Tablet.tablet_uid`. + repeated Tablet tablets = 2; + + // The last known leader tablet of the group as an index into `tablets`. May + // be negative if the group has no known leader. + int32 leader_index = 3; + + // `generation` indicates the freshness of the group information (including + // leader information) contained in this proto. Generations can be compared + // lexicographically; if generation A is greater than generation B, then the + // `Group` corresponding to A is newer than the `Group` corresponding to B, + // and should be used preferentially. + bytes generation = 4; +} + +// A `KeyRecipe` provides the metadata required to translate reads, mutations, +// and queries into a byte array in "sortable string format" (ssformat)that can +// be used with `Range`s to route requests. Note that the client *must* tolerate +// `KeyRecipe`s that appear to be invalid, since the `KeyRecipe` format may +// change over time. Requests with invalid `KeyRecipe`s should be routed to a +// default server. +message KeyRecipe { + // An ssformat key is composed of a sequence of tag numbers and key column + // values. `Part` represents a single tag or key column value. + message Part { + // The remaining fields encode column values. + enum Order { + // Default value, equivalent to `ASCENDING`. + ORDER_UNSPECIFIED = 0; + + // The key is ascending - corresponds to `ASC` in the schema definition. + ASCENDING = 1; + + // The key is descending - corresponds to `DESC` in the schema definition. + DESCENDING = 2; + } + + // The null order of the key column. This dictates where NULL values sort + // in the sorted order. Note that columns which are `NOT NULL` can have a + // special encoding. + enum NullOrder { + // Default value. This value is unused. + NULL_ORDER_UNSPECIFIED = 0; + + // NULL values sort before any non-NULL values. + NULLS_FIRST = 1; + + // NULL values sort after any non-NULL values. + NULLS_LAST = 2; + + // The column does not support NULL values. + NOT_NULL = 3; + } + + // If non-zero, `tag` is the only field present in this `Part`. The part + // is encoded by appending `tag` to the ssformat key. + uint32 tag = 1; + + // Whether the key column is sorted ascending or descending. Only present + // if `tag` is zero. + Order order = 2; + + // How NULLs are represented in the encoded key part. Only present if `tag` + // is zero. + NullOrder null_order = 3; + + // The type of the key part. Only present if `tag` is zero. + Type type = 4; + + // Only present if `tag` is zero. + oneof value_type { + // `identifier` is the name of the column or query parameter. + string identifier = 5; + + // The constant value of the key part. + // It is present when query uses a constant as a part of the key. + google.protobuf.Value value = 6; + + // If true, the client is responsible to fill in the value randomly. + // It's relevant only for the INT64 type. + bool random = 8; + } + + // It is a repeated field to support fetching key columns from nested + // structs, such as `STRUCT` query parameters. + repeated int32 struct_identifiers = 7; + } + + // A recipe can be associated with a table, index, or query. Tables recipes + // are used to encode read and write keys; index recipes are used for index + // reads, and query recipes are used only for SQL queries. + oneof target { + // A table name, matching the name from the database schema. + string table_name = 1; + + // An index name, matching the name from the database schema. + string index_name = 2; + + // The UID of a query, matching the UID from `RoutingHint`. + uint64 operation_uid = 3; + } + + // Parts are in the order they should appear in the encoded key. + repeated Part part = 4; +} + +// A `RecipeList` contains a list of `KeyRecipe`s, which share the same +// schema generation. +message RecipeList { + // The schema generation of the recipes. To be sent to the server in + // `RoutingHint.schema_generation` whenever one of the recipes is used. + // `schema_generation` values are comparable with each other; if generation A + // compares greater than generation B, then A is a more recent schema than B. + // Clients should in general aim to cache only the latest schema generation, + // and discard more stale recipes. + bytes schema_generation = 1; + + // A list of recipes to be cached. + repeated KeyRecipe recipe = 3; +} + +// A `CacheUpdate` expresses a set of changes the client should incorporate into +// its location cache. These changes may or may not be newer than what the +// client has in its cache, and should be discarded if necessary. `CacheUpdate`s +// can be obtained in response to requests that included a `RoutingHint` +// field, but may also be obtained by explicit location-fetching RPCs which may +// be added in the future. +message CacheUpdate { + // An internal ID for the database. Database names can be reused if a database + // is deleted and re-created. Each time the database is re-created, it will + // get a new database ID, which will never be re-used for any other database. + uint64 database_id = 1; + + // A list of ranges to be cached. + repeated Range range = 2; + + // A list of groups to be cached. + repeated Group group = 3; + + // A list of recipes to be cached. + RecipeList key_recipes = 5; +} + +// `RoutingHint` can be optionally added to location-aware Spanner +// requests. It gives the server hints that can be used to route the request to +// an appropriate server, potentially significantly decreasing latency and +// improving throughput. To achieve improved performance, most fields must be +// filled in with accurate values. +// +// The presence of a valid `RoutingHint` tells the server that the client +// is location-aware. +// +// `RoutingHint` does not change the semantics of the request; it is +// purely a performance hint; the request will perform the same actions on the +// database's data as if `RoutingHint` were not present. However, if +// the `RoutingHint` is incomplete or incorrect, the response may include +// a `CacheUpdate` the client can use to correct its location cache. +message RoutingHint { + // A tablet that was skipped by the client. See `Tablet.tablet_uid` and + // `Tablet.incarnation`. + message SkippedTablet { + // The tablet UID of the tablet that was skipped. See `Tablet.tablet_uid`. + uint64 tablet_uid = 1; + + // The incarnation of the tablet that was skipped. See `Tablet.incarnation`. + bytes incarnation = 2; + } + + // A session-scoped unique ID for the operation, computed client-side. + // Requests with the same `operation_uid` should have a shared 'shape', + // meaning that some fields are expected to be the same, such as the SQL + // query, the target table/columns (for reads) etc. Requests with the same + // `operation_uid` are meant to differ only in fields like keys/key + // ranges/query parameters, transaction IDs, etc. + // + // `operation_uid` must be non-zero for `RoutingHint` to be valid. + uint64 operation_uid = 1; + + // The database ID of the database being accessed, see + // `CacheUpdate.database_id`. Should match the cache entries that were used + // to generate the rest of the fields in this `RoutingHint`. + uint64 database_id = 2; + + // The schema generation of the recipe that was used to generate `key` and + // `limit_key`. See also `RecipeList.schema_generation`. + bytes schema_generation = 3; + + // The key / key range that this request accesses. For operations that + // access a single key, `key` should be set and `limit_key` should be empty. + // For operations that access a key range, `key` and `limit_key` should both + // be set, to the inclusive start and exclusive end of the range respectively. + // + // The keys are encoded in "sortable string format" (ssformat), using a + // `KeyRecipe` that is appropriate for the request. See `KeyRecipe` for more + // details. + bytes key = 4; + + // If this request targets a key range, this is the exclusive end of the + // range. See `key` for more details. + bytes limit_key = 5; + + // The group UID of the group that the client believes serves the range + // defined by `key` and `limit_key`. See `Range.group_uid` for more details. + uint64 group_uid = 6; + + // The split ID of the split that the client believes contains the range + // defined by `key` and `limit_key`. See `Range.split_id` for more details. + uint64 split_id = 7; + + // The tablet UID of the tablet from group `group_uid` that the client + // believes is best to serve this request. See `Group.local_tablet_uids` and + // `Group.leader_tablet_uid`. + uint64 tablet_uid = 8; + + // If the client had multiple options for tablet selection, and some of its + // first choices were unhealthy (e.g., the server is unreachable, or + // `Tablet.skip` is true), this field will contain the tablet UIDs of those + // tablets, with their incarnations. The server may include a `CacheUpdate` + // with new locations for those tablets. + repeated SkippedTablet skipped_tablet_uid = 9; + + // If present, the client's current location. This should be the name of a + // Google Cloud zone or region, such as "us-central1". + // + // If absent, the client's location will be assumed to be the same as the + // location of the server the client ends up connected to. + // + // Locations are primarily valuable for clients that connect from regions + // other than the ones that contain the Spanner database. + string client_location = 10; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto new file mode 100644 index 000000000000..7e3306a20382 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto @@ -0,0 +1,156 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/v1/keys.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "MutationProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// A modification to one or more Cloud Spanner rows. Mutations can be +// applied to a Cloud Spanner database by sending them in a +// [Commit][google.spanner.v1.Spanner.Commit] call. +message Mutation { + // Arguments to [insert][google.spanner.v1.Mutation.insert], + // [update][google.spanner.v1.Mutation.update], + // [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and + // [replace][google.spanner.v1.Mutation.replace] operations. + message Write { + // Required. The table whose rows will be written. + string table = 1 [(google.api.field_behavior) = REQUIRED]; + + // The names of the columns in + // [table][google.spanner.v1.Mutation.Write.table] to be written. + // + // The list of columns must contain enough columns to allow + // Cloud Spanner to derive values for all primary key columns in the + // row(s) to be modified. + repeated string columns = 2; + + // The values to be written. `values` can contain more than one + // list of values. If it does, then multiple rows are written, one + // for each entry in `values`. Each list in `values` must have + // exactly as many entries as there are entries in + // [columns][google.spanner.v1.Mutation.Write.columns] above. Sending + // multiple lists is equivalent to sending multiple `Mutation`s, each + // containing one `values` entry and repeating + // [table][google.spanner.v1.Mutation.Write.table] and + // [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in + // each list are encoded as described [here][google.spanner.v1.TypeCode]. + repeated google.protobuf.ListValue values = 3; + } + + // Arguments to [delete][google.spanner.v1.Mutation.delete] operations. + message Delete { + // Required. The table whose rows will be deleted. + string table = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The primary keys of the rows within + // [table][google.spanner.v1.Mutation.Delete.table] to delete. The primary + // keys must be specified in the order in which they appear in the `PRIMARY + // KEY()` clause of the table's equivalent DDL statement (the DDL statement + // used to create the table). Delete is idempotent. The transaction will + // succeed even if some or all rows do not exist. + KeySet key_set = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Arguments to [send][google.spanner.v1.Mutation.send] operations. + message Send { + // Required. The queue to which the message will be sent. + string queue = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The primary key of the message to be sent. + google.protobuf.ListValue key = 2 [(google.api.field_behavior) = REQUIRED]; + + // The time at which Spanner will begin attempting to deliver the message. + // If `deliver_time` is not set, Spanner will deliver the message + // immediately. If `deliver_time` is in the past, Spanner will replace it + // with a value closer to the current time. + google.protobuf.Timestamp deliver_time = 3; + + // The payload of the message. + google.protobuf.Value payload = 4; + } + + // Arguments to [ack][google.spanner.v1.Mutation.ack] operations. + message Ack { + // Required. The queue where the message to be acked is stored. + string queue = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The primary key of the message to be acked. + google.protobuf.ListValue key = 2 [(google.api.field_behavior) = REQUIRED]; + + // By default, an attempt to ack a message that does not exist will fail + // with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack + // will succeed even if the message does not exist. This is useful for + // unconditionally acking a message, even if it is missing or has already + // been acked. + bool ignore_not_found = 3; + } + + // Required. The operation to perform. + oneof operation { + // Insert new rows in a table. If any of the rows already exist, + // the write or transaction fails with error `ALREADY_EXISTS`. + Write insert = 1; + + // Update existing rows in a table. If any of the rows does not + // already exist, the transaction fails with error `NOT_FOUND`. + Write update = 2; + + // Like [insert][google.spanner.v1.Mutation.insert], except that if the row + // already exists, then its column values are overwritten with the ones + // provided. Any column values not explicitly written are preserved. + // + // When using + // [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as + // when using [insert][google.spanner.v1.Mutation.insert], all `NOT NULL` + // columns in the table must be given a value. This holds true even when the + // row already exists and will therefore actually be updated. + Write insert_or_update = 3; + + // Like [insert][google.spanner.v1.Mutation.insert], except that if the row + // already exists, it is deleted, and the column values provided are + // inserted instead. Unlike + // [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this + // means any values not explicitly written become `NULL`. + // + // In an interleaved table, if you create the child table with the + // `ON DELETE CASCADE` annotation, then replacing a parent row + // also deletes the child rows. Otherwise, you must delete the + // child rows before you replace the parent row. + Write replace = 4; + + // Delete rows from a table. Succeeds whether or not the named + // rows were present. + Delete delete = 5; + + // Send a message to a queue. + Send send = 6; + + // Ack a message from a queue. + Ack ack = 7; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto new file mode 100644 index 000000000000..5850ff97fb21 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto @@ -0,0 +1,156 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "QueryPlanProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// Node information for nodes appearing in a +// [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. +message PlanNode { + // The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between + // the two different kinds of nodes that can appear in a query plan. + enum Kind { + // Not specified. + KIND_UNSPECIFIED = 0; + + // Denotes a Relational operator node in the expression tree. Relational + // operators represent iterative processing of rows during query execution. + // For example, a `TableScan` operation that reads rows from a table. + RELATIONAL = 1; + + // Denotes a Scalar node in the expression tree. Scalar nodes represent + // non-iterable entities in the query plan. For example, constants or + // arithmetic operators appearing inside predicate expressions or references + // to column names. + SCALAR = 2; + } + + // Metadata associated with a parent-child relationship appearing in a + // [PlanNode][google.spanner.v1.PlanNode]. + message ChildLink { + // The node to which the link points. + int32 child_index = 1; + + // The type of the link. For example, in Hash Joins this could be used to + // distinguish between the build child and the probe child, or in the case + // of the child being an output variable, to represent the tag associated + // with the output variable. + string type = 2; + + // Only present if the child node is + // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds to an + // output variable of the parent node. The field carries the name of the + // output variable. For example, a `TableScan` operator that reads rows from + // a table will have child links to the `SCALAR` nodes representing the + // output variables created for each column that is read by the operator. + // The corresponding `variable` fields will be set to the variable names + // assigned to the columns. + string variable = 3; + } + + // Condensed representation of a node and its subtree. Only present for + // `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode]. + message ShortRepresentation { + // A string representation of the expression subtree rooted at this node. + string description = 1; + + // A mapping of (subquery variable name) -> (subquery node id) for cases + // where the `description` string of this node references a `SCALAR` + // subquery contained in the expression subtree rooted at this node. The + // referenced `SCALAR` subquery may not necessarily be a direct child of + // this node. + map subqueries = 2; + } + + // The `PlanNode`'s index in [node + // list][google.spanner.v1.QueryPlan.plan_nodes]. + int32 index = 1; + + // Used to determine the type of node. May be needed for visualizing + // different kinds of nodes differently. For example, If the node is a + // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a + // condensed representation which can be used to directly embed a description + // of the node in its parent. + Kind kind = 2; + + // The display name for the node. + string display_name = 3; + + // List of child node `index`es and their relationship to this parent. + repeated ChildLink child_links = 4; + + // Condensed representation for + // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. + ShortRepresentation short_representation = 5; + + // Attributes relevant to the node contained in a group of key-value pairs. + // For example, a Parameter Reference node could have the following + // information in its metadata: + // + // { + // "parameter_reference": "param1", + // "parameter_type": "array" + // } + google.protobuf.Struct metadata = 6; + + // The execution statistics associated with the node, contained in a group of + // key-value pairs. Only present if the plan was returned as a result of a + // profile query. For example, number of executions, number of rows/time per + // execution etc. + google.protobuf.Struct execution_stats = 7; +} + +// Output of query advisor analysis. +message QueryAdvisorResult { + // Recommendation to add new indexes to run queries more efficiently. + message IndexAdvice { + // Optional. DDL statements to add new indexes that will improve the query. + repeated string ddl = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Estimated latency improvement factor. For example if the query + // currently takes 500 ms to run and the estimated latency with new indexes + // is 100 ms this field will be 5. + double improvement_factor = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Index Recommendation for a query. This is an optional field and + // the recommendation will only be available when the recommendation + // guarantees significant improvement in query performance. + repeated IndexAdvice index_advice = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Contains an ordered list of nodes appearing in the query plan. +message QueryPlan { + // The nodes in the query plan. Plan nodes are returned in pre-order starting + // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` + // corresponds to its index in `plan_nodes`. + repeated PlanNode plan_nodes = 1; + + // Optional. The advise/recommendations for a query. Currently this field will + // be serving index recommendations for a query. + QueryAdvisorResult query_advice = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto new file mode 100644 index 000000000000..3851d688ce27 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto @@ -0,0 +1,260 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/query_plan.proto"; +import "google/spanner/v1/transaction.proto"; +import "google/spanner/v1/type.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "ResultSetProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// Results from [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. +message ResultSet { + // Metadata about the result set, such as row type information. + ResultSetMetadata metadata = 1; + + // Each element in `rows` is a row whose format is defined by + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith + // element in each row matches the ith field in + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements + // are encoded based on type as described [here][google.spanner.v1.TypeCode]. + repeated google.protobuf.ListValue rows = 2; + + // Query plan and execution statistics for the SQL statement that + // produced this result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + // DML statements always produce stats containing the number of rows + // modified, unless executed using the + // [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN] + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + // Other fields might or might not be populated, based on the + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + ResultSetStats stats = 3; + + // Optional. A precommit token is included if the read-write transaction is on + // a multiplexed session. Pass the precommit token with the highest sequence + // number from this transaction attempt to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + MultiplexedSessionPrecommitToken precommit_token = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A cache update expresses a set of changes the client should + // incorporate into its location cache. The client should discard the changes + // if they are older than the data it already has. This data can be obtained + // in response to requests that included a `RoutingHint` field, but may also + // be obtained by explicit location-fetching RPCs which may be added in the + // future. + CacheUpdate cache_update = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Partial results from a streaming read or SQL query. Streaming reads and +// SQL queries better tolerate large result sets, large rows, and large +// values, but are a little trickier to consume. +message PartialResultSet { + // Metadata about the result set, such as row type information. + // Only present in the first response. + ResultSetMetadata metadata = 1; + + // A streamed result set consists of a stream of values, which might + // be split into many `PartialResultSet` messages to accommodate + // large rows and/or large values. Every N complete values defines a + // row, where N is equal to the number of entries in + // [metadata.row_type.fields][google.spanner.v1.StructType.fields]. + // + // Most values are encoded based on type as described + // [here][google.spanner.v1.TypeCode]. + // + // It's possible that the last value in values is "chunked", + // meaning that the rest of the value is sent in subsequent + // `PartialResultSet`(s). This is denoted by the + // [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] field. + // Two or more chunked values can be merged to form a complete value as + // follows: + // + // * `bool/number/null`: can't be chunked + // * `string`: concatenate the strings + // * `list`: concatenate the lists. If the last element in a list is a + // `string`, `list`, or `object`, merge it with the first element in + // the next list by applying these rules recursively. + // * `object`: concatenate the (field name, field value) pairs. If a + // field name is duplicated, then apply these rules recursively + // to merge the field values. + // + // Some examples of merging: + // + // Strings are concatenated. + // "foo", "bar" => "foobar" + // + // Lists of non-strings are concatenated. + // [2, 3], [4] => [2, 3, 4] + // + // Lists are concatenated, but the last and first elements are merged + // because they are strings. + // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] + // + // Lists are concatenated, but the last and first elements are merged + // because they are lists. Recursively, the last and first elements + // of the inner lists are merged because they are strings. + // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] + // + // Non-overlapping object fields are combined. + // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} + // + // Overlapping object fields are merged. + // {"a": "1"}, {"a": "2"} => {"a": "12"} + // + // Examples of merging objects containing lists of strings. + // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} + // + // For a more complete example, suppose a streaming SQL query is + // yielding a result set whose rows contain a single string + // field. The following `PartialResultSet`s might be yielded: + // + // { + // "metadata": { ... } + // "values": ["Hello", "W"] + // "chunked_value": true + // "resume_token": "Af65..." + // } + // { + // "values": ["orl"] + // "chunked_value": true + // } + // { + // "values": ["d"] + // "resume_token": "Zx1B..." + // } + // + // This sequence of `PartialResultSet`s encodes two rows, one + // containing the field value `"Hello"`, and a second containing the + // field value `"World" = "W" + "orl" + "d"`. + // + // Not all `PartialResultSet`s contain a `resume_token`. Execution can only be + // resumed from a previously yielded `resume_token`. For the above sequence of + // `PartialResultSet`s, resuming the query with `"resume_token": "Af65..."` + // yields results from the `PartialResultSet` with value "orl". + repeated google.protobuf.Value values = 2; + + // If true, then the final value in + // [values][google.spanner.v1.PartialResultSet.values] is chunked, and must be + // combined with more values from subsequent `PartialResultSet`s to obtain a + // complete field value. + bool chunked_value = 3; + + // Streaming calls might be interrupted for a variety of reasons, such + // as TCP connection loss. If this occurs, the stream of results can + // be resumed by re-sending the original request and including + // `resume_token`. Note that executing any other transaction in the + // same session invalidates the token. + bytes resume_token = 4; + + // Query plan and execution statistics for the statement that produced this + // streaming result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] + // and are sent only once with the last response in the stream. This field is + // also present in the last response for DML statements. + ResultSetStats stats = 5; + + // Optional. A precommit token is included if the read-write transaction + // has multiplexed sessions enabled. Pass the precommit token with the highest + // sequence number from this transaction attempt to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + MultiplexedSessionPrecommitToken precommit_token = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates whether this is the last `PartialResultSet` in the + // stream. The server might optionally set this field. Clients shouldn't rely + // on this field being set in all cases. + bool last = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A cache update expresses a set of changes the client should + // incorporate into its location cache. The client should discard the changes + // if they are older than the data it already has. This data can be obtained + // in response to requests that included a `RoutingHint` field, but may also + // be obtained by explicit location-fetching RPCs which may be added in the + // future. + CacheUpdate cache_update = 10 [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or +// [PartialResultSet][google.spanner.v1.PartialResultSet]. +message ResultSetMetadata { + // Indicates the field names and types for the rows in the result + // set. For example, a SQL query like `"SELECT UserId, UserName FROM + // Users"` could return a `row_type` value like: + // + // "fields": [ + // { "name": "UserId", "type": { "code": "INT64" } }, + // { "name": "UserName", "type": { "code": "STRING" } }, + // ] + StructType row_type = 1; + + // If the read or SQL query began a transaction as a side-effect, the + // information about the new transaction is yielded here. + Transaction transaction = 2; + + // A SQL query can be parameterized. In PLAN mode, these parameters can be + // undeclared. This indicates the field names and types for those undeclared + // parameters in the SQL query. For example, a SQL query like `"SELECT * FROM + // Users where UserId = @userId and UserName = @userName "` could return a + // `undeclared_parameters` value like: + // + // "fields": [ + // { "name": "UserId", "type": { "code": "INT64" } }, + // { "name": "UserName", "type": { "code": "STRING" } }, + // ] + StructType undeclared_parameters = 3; +} + +// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or +// [PartialResultSet][google.spanner.v1.PartialResultSet]. +message ResultSetStats { + // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this + // result. + QueryPlan query_plan = 1; + + // Aggregated statistics from the execution of the query. Only present when + // the query is profiled. For example, a query could return the statistics as + // follows: + // + // { + // "rows_returned": "3", + // "elapsed_time": "1.22 secs", + // "cpu_time": "1.19 secs" + // } + google.protobuf.Struct query_stats = 2; + + // The number of rows modified by the DML statement. + oneof row_count { + // Standard DML returns an exact count of rows that were modified. + int64 row_count_exact = 3; + + // Partitioned DML doesn't offer exactly-once semantics, so it + // returns a lower bound of the rows modified. + int64 row_count_lower_bound = 4; + } +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto new file mode 100644 index 000000000000..a6796c9f1878 --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto @@ -0,0 +1,1435 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import public "google/spanner/v1/commit_response.proto"; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/spanner/v1/keys.proto"; +import "google/spanner/v1/location.proto"; +import "google/spanner/v1/mutation.proto"; +import "google/spanner/v1/result_set.proto"; +import "google/spanner/v1/transaction.proto"; +import "google/spanner/v1/type.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "SpannerProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; +option (google.api.resource_definition) = { + type: "spanner.googleapis.com/Database" + pattern: "projects/{project}/instances/{instance}/databases/{database}" +}; + +// Cloud Spanner API +// +// The Cloud Spanner API can be used to manage sessions and execute +// transactions on data stored in Cloud Spanner databases. +service Spanner { + option (google.api.default_host) = "spanner.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/spanner.data"; + + // Creates a new session. A session can be used to perform + // transactions that read and/or modify data in a Cloud Spanner database. + // Sessions are meant to be reused for many consecutive + // transactions. + // + // Sessions can only execute one transaction at a time. To execute + // multiple concurrent read-write/write-only transactions, create + // multiple sessions. Note that standalone reads and queries use a + // transaction internally, and count toward the one transaction + // limit. + // + // Active sessions use additional server resources, so it's a good idea to + // delete idle and unneeded sessions. + // Aside from explicit deletes, Cloud Spanner can delete sessions when no + // operations are sent for more than an hour. If a session is deleted, + // requests to it return `NOT_FOUND`. + // + // Idle sessions can be kept alive by sending a trivial SQL query + // periodically, for example, `"SELECT 1"`. + rpc CreateSession(CreateSessionRequest) returns (Session) { + option (google.api.http) = { + post: "/v1/{database=projects/*/instances/*/databases/*}/sessions" + body: "*" + }; + option (google.api.method_signature) = "database"; + } + + // Creates multiple new sessions. + // + // This API can be used to initialize a session cache on the clients. + // See https://goo.gl/TgSFN2 for best practices on session cache management. + rpc BatchCreateSessions(BatchCreateSessionsRequest) + returns (BatchCreateSessionsResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate" + body: "*" + }; + option (google.api.method_signature) = "database,session_count"; + } + + // Gets a session. Returns `NOT_FOUND` if the session doesn't exist. + // This is mainly useful for determining whether a session is still + // alive. + rpc GetSession(GetSessionRequest) returns (Session) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists all sessions in a given database. + rpc ListSessions(ListSessionsRequest) returns (ListSessionsResponse) { + option (google.api.http) = { + get: "/v1/{database=projects/*/instances/*/databases/*}/sessions" + }; + option (google.api.method_signature) = "database"; + } + + // Ends a session, releasing server resources associated with it. This + // asynchronously triggers the cancellation of any operations that are running + // with this session. + rpc DeleteSession(DeleteSessionRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Executes an SQL statement, returning all results in a single reply. This + // method can't be used to return a result set larger than 10 MiB; + // if the query yields more data than that, the query fails with + // a `FAILED_PRECONDITION` error. + // + // Operations inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more + // details. + // + // Larger result sets can be fetched in streaming fashion by calling + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + // instead. + // + // The query string can be SQL or [Graph Query Language + // (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro). + rpc ExecuteSql(ExecuteSqlRequest) returns (ResultSet) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql" + body: "*" + }; + } + + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the + // result set as a stream. Unlike + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on + // the size of the returned result set. However, no individual row in the + // result set can exceed 100 MiB, and no column value can exceed 10 MiB. + // + // The query string can be SQL or [Graph Query Language + // (GQL)](https://cloud.google.com/spanner/docs/reference/standard-sql/graph-intro). + rpc ExecuteStreamingSql(ExecuteSqlRequest) returns (stream PartialResultSet) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql" + body: "*" + }; + } + + // Executes a batch of SQL DML statements. This method allows many statements + // to be run with lower latency than submitting them sequentially with + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + // + // Statements are executed in sequential order. A request can succeed even if + // a statement fails. The + // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + // field in the response provides information about the statement that failed. + // Clients must inspect this field to determine whether an error occurred. + // + // Execution stops after the first failed statement; the remaining statements + // are not executed. + rpc ExecuteBatchDml(ExecuteBatchDmlRequest) + returns (ExecuteBatchDmlResponse) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml" + body: "*" + }; + } + + // Reads rows from the database using key lookups and scans, as a + // simple key/value style alternative to + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method can't be + // used to return a result set larger than 10 MiB; if the read matches more + // data than that, the read fails with a `FAILED_PRECONDITION` + // error. + // + // Reads inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more + // details. + // + // Larger result sets can be yielded in streaming fashion by calling + // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + rpc Read(ReadRequest) returns (ResultSet) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read" + body: "*" + }; + } + + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set + // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no + // limit on the size of the returned result set. However, no individual row in + // the result set can exceed 100 MiB, and no column value can exceed + // 10 MiB. + rpc StreamingRead(ReadRequest) returns (stream PartialResultSet) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead" + body: "*" + }; + } + + // Begins a new transaction. This step can often be skipped: + // [Read][google.spanner.v1.Spanner.Read], + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + // side-effect. + rpc BeginTransaction(BeginTransactionRequest) returns (Transaction) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction" + body: "*" + }; + option (google.api.method_signature) = "session,options"; + } + + // Commits a transaction. The request includes the mutations to be + // applied to rows in the database. + // + // `Commit` might return an `ABORTED` error. This can occur at any time; + // commonly, the cause is conflicts with concurrent + // transactions. However, it can also happen for a variety of other + // reasons. If `Commit` returns `ABORTED`, the caller should retry + // the transaction from the beginning, reusing the same session. + // + // On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, + // for example, if the client job experiences a 1+ hour networking failure. + // At that point, Cloud Spanner has lost track of the transaction outcome and + // we recommend that you perform another read from the database to see the + // state of things as they are now. + rpc Commit(CommitRequest) returns (CommitResponse) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit" + body: "*" + }; + option (google.api.method_signature) = "session,transaction_id,mutations"; + option (google.api.method_signature) = + "session,single_use_transaction,mutations"; + } + + // Rolls back a transaction, releasing any locks it holds. It's a good + // idea to call this for any transaction that includes one or more + // [Read][google.spanner.v1.Spanner.Read] or + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately + // decides not to commit. + // + // `Rollback` returns `OK` if it successfully aborts the transaction, the + // transaction was already aborted, or the transaction isn't + // found. `Rollback` never returns `ABORTED`. + rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback" + body: "*" + }; + option (google.api.method_signature) = "session,transaction_id"; + } + + // Creates a set of partition tokens that can be used to execute a query + // operation in parallel. Each of the returned partition tokens can be used + // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to + // specify a subset of the query result to read. The same session and + // read-only transaction must be used by the `PartitionQueryRequest` used to + // create the partition tokens and the `ExecuteSqlRequests` that use the + // partition tokens. + // + // Partition tokens become invalid when the session used to create them + // is deleted, is idle for too long, begins a new transaction, or becomes too + // old. When any of these happen, it isn't possible to resume the query, and + // the whole operation must be restarted from the beginning. + rpc PartitionQuery(PartitionQueryRequest) returns (PartitionResponse) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery" + body: "*" + }; + } + + // Creates a set of partition tokens that can be used to execute a read + // operation in parallel. Each of the returned partition tokens can be used + // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a + // subset of the read result to read. The same session and read-only + // transaction must be used by the `PartitionReadRequest` used to create the + // partition tokens and the `ReadRequests` that use the partition tokens. + // There are no ordering guarantees on rows returned among the returned + // partition tokens, or even within each individual `StreamingRead` call + // issued with a `partition_token`. + // + // Partition tokens become invalid when the session used to create them + // is deleted, is idle for too long, begins a new transaction, or becomes too + // old. When any of these happen, it isn't possible to resume the read, and + // the whole operation must be restarted from the beginning. + rpc PartitionRead(PartitionReadRequest) returns (PartitionResponse) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead" + body: "*" + }; + } + + // Batches the supplied mutation groups in a collection of efficient + // transactions. All mutations in a group are committed atomically. However, + // mutations across groups can be committed non-atomically in an unspecified + // order and thus, they must be independent of each other. Partial failure is + // possible, that is, some groups might have been committed successfully, + // while some might have failed. The results of individual batches are + // streamed into the response as the batches are applied. + // + // `BatchWrite` requests are not replay protected, meaning that each mutation + // group can be applied more than once. Replays of non-idempotent mutations + // can have undesirable effects. For example, replays of an insert mutation + // can produce an already exists error or if you use generated or commit + // timestamp-based keys, it can result in additional rows being added to the + // mutation's table. We recommend structuring your mutation groups to be + // idempotent to avoid this issue. + rpc BatchWrite(BatchWriteRequest) returns (stream BatchWriteResponse) { + option (google.api.http) = { + post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite" + body: "*" + }; + option (google.api.method_signature) = "session,mutation_groups"; + } +} + +// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession]. +message CreateSessionRequest { + // Required. The database in which the new session is created. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Required. The session to create. + Session session = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +message BatchCreateSessionsRequest { + // Required. The database in which the new sessions are created. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Parameters to apply to each created session. + Session session_template = 2; + + // Required. The number of sessions to be created in this batch call. At least + // one session is created. The API can return fewer than the requested number + // of sessions. If a specific number of sessions are desired, the client can + // make additional calls to `BatchCreateSessions` (adjusting + // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + // as necessary). + int32 session_count = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The response for +// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +message BatchCreateSessionsResponse { + // The freshly created sessions. + repeated Session session = 1; +} + +// A session in the Cloud Spanner API. +message Session { + option (google.api.resource) = { + type: "spanner.googleapis.com/Session" + pattern: "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}" + plural: "sessions" + singular: "session" + }; + + // Output only. The name of the session. This is always system-assigned. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels for the session. + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. + // * Label values must be between 0 and 63 characters long and must conform + // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + // * No more than 64 labels can be associated with a given session. + // + // See https://goo.gl/xmQnxf for more information on and examples of labels. + map labels = 2; + + // Output only. The timestamp when the session is created. + google.protobuf.Timestamp create_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The approximate timestamp when the session is last used. It's + // typically earlier than the actual last use time. + google.protobuf.Timestamp approximate_last_use_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The database role which created this session. + string creator_role = 5; + + // Optional. If `true`, specifies a multiplexed session. Use a multiplexed + // session for multiple, concurrent operations including any combination of + // read-only and read-write transactions. Use + // [`sessions.create`][google.spanner.v1.Spanner.CreateSession] to create + // multiplexed sessions. Don't use + // [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions] to + // create a multiplexed session. You can't delete or list multiplexed + // sessions. + bool multiplexed = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. +message GetSessionRequest { + // Required. The name of the session to retrieve. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; +} + +// The request for [ListSessions][google.spanner.v1.Spanner.ListSessions]. +message ListSessionsRequest { + // Required. The database in which to list sessions. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Number of sessions to be returned in the response. If 0 or less, defaults + // to the server's maximum allowed page size. + int32 page_size = 2; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] + // from a previous + // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. + string page_token = 3; + + // An expression for filtering the results of the request. Filter rules are + // case insensitive. The fields eligible for filtering are: + // + // * `labels.key` where key is the name of a label + // + // Some examples of using filters are: + // + // * `labels.env:*` --> The session has the label "env". + // * `labels.env:dev` --> The session has the label "env" and the value of + // the label contains the string "dev". + string filter = 4; +} + +// The response for [ListSessions][google.spanner.v1.Spanner.ListSessions]. +message ListSessionsResponse { + // The list of requested sessions. + repeated Session sessions = 1; + + // `next_page_token` can be sent in a subsequent + // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more + // of the matching sessions. + string next_page_token = 2; +} + +// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. +message DeleteSessionRequest { + // Required. The name of the session to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; +} + +// Common request options for various APIs. +message RequestOptions { + // The relative priority for requests. Note that priority isn't applicable + // for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + // + // The priority acts as a hint to the Cloud Spanner scheduler and doesn't + // guarantee priority or order of execution. For example: + // + // * Some parts of a write operation always execute at `PRIORITY_HIGH`, + // regardless of the specified priority. This can cause you to see an + // increase in high priority workload even when executing a low priority + // request. This can also potentially cause a priority inversion where a + // lower priority request is fulfilled ahead of a higher priority + // request. + // * If a transaction contains multiple operations with different priorities, + // Cloud Spanner doesn't guarantee to process the higher priority + // operations first. There might be other constraints to satisfy, such as + // the order of operations. + enum Priority { + // `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`. + PRIORITY_UNSPECIFIED = 0; + + // This specifies that the request is low priority. + PRIORITY_LOW = 1; + + // This specifies that the request is medium priority. + PRIORITY_MEDIUM = 2; + + // This specifies that the request is high priority. + PRIORITY_HIGH = 3; + } + + // Container for various pieces of client-owned context attached to a request. + message ClientContext { + // Optional. Map of parameter name to value for this request. These values + // will be returned by any SECURE_CONTEXT() calls invoked by this request + // (e.g., by queries against Parameterized Secure Views). + map secure_context = 1 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Priority for the request. + Priority priority = 1; + + // A per-request tag which can be applied to queries or reads, used for + // statistics collection. + // Both `request_tag` and `transaction_tag` can be specified for a read or + // query that belongs to a transaction. + // This field is ignored for requests where it's not applicable (for example, + // `CommitRequest`). + // Legal characters for `request_tag` values are all printable characters + // (ASCII 32 - 126) and the length of a request_tag is limited to 50 + // characters. Values that exceed this limit are truncated. + // Any leading underscore (_) characters are removed from the string. + string request_tag = 2; + + // A tag used for statistics collection about this transaction. + // Both `request_tag` and `transaction_tag` can be specified for a read or + // query that belongs to a transaction. + // To enable tagging on a transaction, `transaction_tag` must be set to the + // same value for all requests belonging to the same transaction, including + // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + // If this request doesn't belong to any transaction, `transaction_tag` is + // ignored. + // Legal characters for `transaction_tag` values are all printable characters + // (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 + // characters. Values that exceed this limit are truncated. + // Any leading underscore (_) characters are removed from the string. + string transaction_tag = 3; + + // Optional. Optional context that may be needed for some requests. + ClientContext client_context = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The `DirectedReadOptions` can be used to indicate which replicas or regions +// should be used for non-transactional reads or queries. +// +// `DirectedReadOptions` can only be specified for a read-only transaction, +// otherwise the API returns an `INVALID_ARGUMENT` error. +message DirectedReadOptions { + // The directed read replica selector. + // Callers must provide one or more of the following fields for replica + // selection: + // + // * `location` - The location must be one of the regions within the + // multi-region configuration of your database. + // * `type` - The type of the replica. + // + // Some examples of using replica_selectors are: + // + // * `location:us-east1` --> The "us-east1" replica(s) of any available type + // is used to process the request. + // * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in the nearest + // available location are used to process the + // request. + // * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s) + // in location "us-east1" is used to process + // the request. + message ReplicaSelection { + // Indicates the type of replica. + enum Type { + // Not specified. + TYPE_UNSPECIFIED = 0; + + // Read-write replicas support both reads and writes. + READ_WRITE = 1; + + // Read-only replicas only support reads (not writes). + READ_ONLY = 2; + } + + // The location or region of the serving requests, for example, "us-east1". + string location = 1; + + // The type of replica. + Type type = 2; + } + + // An `IncludeReplicas` contains a repeated set of `ReplicaSelection` which + // indicates the order in which replicas should be considered. + message IncludeReplicas { + // The directed read replica selector. + repeated ReplicaSelection replica_selections = 1; + + // If `true`, Spanner doesn't route requests to a replica outside the + // <`include_replicas` list when all of the specified replicas are + // unavailable or unhealthy. Default value is `false`. + bool auto_failover_disabled = 2; + } + + // An ExcludeReplicas contains a repeated set of ReplicaSelection that should + // be excluded from serving requests. + message ExcludeReplicas { + // The directed read replica selector. + repeated ReplicaSelection replica_selections = 1; + } + + // Required. At most one of either `include_replicas` or `exclude_replicas` + // should be present in the message. + oneof replicas { + // `Include_replicas` indicates the order of replicas (as they appear in + // this list) to process the request. If `auto_failover_disabled` is set to + // `true` and all replicas are exhausted without finding a healthy replica, + // Spanner waits for a replica in the list to become available, requests + // might fail due to `DEADLINE_EXCEEDED` errors. + IncludeReplicas include_replicas = 1; + + // `Exclude_replicas` indicates that specified replicas should be excluded + // from serving requests. Spanner doesn't route requests to the replicas + // in this list. + ExcludeReplicas exclude_replicas = 2; + } +} + +// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and +// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. +message ExecuteSqlRequest { + // Mode in which the statement must be processed. + enum QueryMode { + // The default mode. Only the statement results are returned. + NORMAL = 0; + + // This mode returns only the query plan, without any results or + // execution statistics information. + PLAN = 1; + + // This mode returns the query plan, overall execution statistics, + // operator level execution statistics along with the results. This has a + // performance overhead compared to the other modes. It isn't recommended + // to use this mode for production traffic. + PROFILE = 2; + + // This mode returns the overall (but not operator-level) execution + // statistics along with the results. + WITH_STATS = 3; + + // This mode returns the query plan, overall (but not operator-level) + // execution statistics along with the results. + WITH_PLAN_AND_STATS = 4; + } + + // Query optimizer configuration. + message QueryOptions { + // An option to control the selection of optimizer version. + // + // This parameter allows individual queries to pick different query + // optimizer versions. + // + // Specifying `latest` as a value instructs Cloud Spanner to use the + // latest supported query optimizer version. If not specified, Cloud Spanner + // uses the optimizer version set at the database level options. Any other + // positive integer (from the list of supported optimizer versions) + // overrides the default optimizer version for query execution. + // + // The list of supported optimizer versions can be queried from + // `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`. + // + // Executing a SQL statement with an invalid optimizer version fails with + // an `INVALID_ARGUMENT` error. + // + // See + // https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer + // for more information on managing the query optimizer. + // + // The `optimizer_version` statement hint has precedence over this setting. + string optimizer_version = 1; + + // An option to control the selection of optimizer statistics package. + // + // This parameter allows individual queries to use a different query + // optimizer statistics package. + // + // Specifying `latest` as a value instructs Cloud Spanner to use the latest + // generated statistics package. If not specified, Cloud Spanner uses + // the statistics package set at the database level options, or the latest + // package if the database option isn't set. + // + // The statistics package requested by the query has to be exempt from + // garbage collection. This can be achieved with the following DDL + // statement: + // + // ```sql + // ALTER STATISTICS SET OPTIONS (allow_gc=false) + // ``` + // + // The list of available statistics packages can be queried from + // `INFORMATION_SCHEMA.SPANNER_STATISTICS`. + // + // Executing a SQL statement with an invalid optimizer statistics package + // or with a statistics package that allows garbage collection fails with + // an `INVALID_ARGUMENT` error. + string optimizer_statistics_package = 2; + } + + // Required. The session in which the SQL query should be performed. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // The transaction to use. + // + // For queries, if none is provided, the default is a temporary read-only + // transaction with strong concurrency. + // + // Standard DML statements require a read-write transaction. To protect + // against replays, single-use transactions are not supported. The caller + // must either supply an existing transaction ID or begin a new transaction. + // + // Partitioned DML requires an existing Partitioned DML transaction ID. + TransactionSelector transaction = 2; + + // Required. The SQL string. + string sql = 3 [(google.api.field_behavior) = REQUIRED]; + + // Parameter names and values that bind to placeholders in the SQL string. + // + // A parameter placeholder consists of the `@` character followed by the + // parameter name (for example, `@firstName`). Parameter names must conform + // to the naming requirements of identifiers as specified at + // https://cloud.google.com/spanner/docs/lexical#identifiers. + // + // Parameters can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: + // + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It's an error to execute a SQL statement with unbound parameters. + google.protobuf.Struct params = 4; + + // It isn't always possible for Cloud Spanner to infer the right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in + // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + // + // In these cases, you can use `param_types` to specify the exact + // SQL type for some or all of the SQL statement parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + map param_types = 5; + + // If this request is resuming a previously interrupted SQL statement + // execution, `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + // interruption. Doing this enables the new SQL statement execution to resume + // where the last one left off. The rest of the request parameters must + // exactly match the request that yielded this token. + bytes resume_token = 6; + + // Used to control the amount of debugging information returned in + // [ResultSetStats][google.spanner.v1.ResultSetStats]. If + // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is + // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only + // be set to + // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. + QueryMode query_mode = 7; + + // If present, results are restricted to the specified partition + // previously created using `PartitionQuery`. There must be an exact + // match for the values of fields common to this message and the + // `PartitionQueryRequest` message used to create this `partition_token`. + bytes partition_token = 8; + + // A per-transaction sequence number used to identify this request. This field + // makes each request idempotent such that if the request is received multiple + // times, at most one succeeds. + // + // The sequence number must be monotonically increasing within the + // transaction. If a request arrives for the first time with an out-of-order + // sequence number, the transaction can be aborted. Replays of previously + // handled requests yield the same response as the first execution. + // + // Required for DML statements. Ignored for queries. + int64 seqno = 9; + + // Query optimizer configuration to use for the given query. + QueryOptions query_options = 10; + + // Common options for this request. + RequestOptions request_options = 11; + + // Directed read options for this request. + DirectedReadOptions directed_read_options = 15; + + // If this is for a partitioned query and this field is set to `true`, the + // request is executed with Spanner Data Boost independent compute resources. + // + // If the field is set to `true` but the request doesn't set + // `partition_token`, the API returns an `INVALID_ARGUMENT` error. + bool data_boost_enabled = 16; + + // Optional. If set to `true`, this statement marks the end of the + // transaction. After this statement executes, you must commit or abort the + // transaction. Attempts to execute any other requests against this + // transaction (including reads and queries) are rejected. + // + // For DML statements, setting this option might cause some error reporting to + // be deferred until commit time (for example, validation of unique + // constraints). Given this, successful execution of a DML statement shouldn't + // be assumed until a subsequent `Commit` call completes successfully. + bool last_statement = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the Spanner requests location-aware if present. + // + // It gives the server hints that can be used to route the request + // to an appropriate server, potentially significantly decreasing latency and + // improving throughput. To achieve improved performance, most fields must be + // filled in with accurate values. + RoutingHint routing_hint = 18 [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. +message ExecuteBatchDmlRequest { + // A single DML statement. + message Statement { + // Required. The DML string. + string sql = 1 [(google.api.field_behavior) = REQUIRED]; + + // Parameter names and values that bind to placeholders in the DML string. + // + // A parameter placeholder consists of the `@` character followed by the + // parameter name (for example, `@firstName`). Parameter names can contain + // letters, numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The + // same parameter name can be used more than once, for example: + // + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It's an error to execute a SQL statement with unbound parameters. + google.protobuf.Struct params = 2; + + // It isn't always possible for Cloud Spanner to infer the right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in + // [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as + // JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL statement parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + map param_types = 3; + } + + // Required. The session in which the DML statements should be performed. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Required. The transaction to use. Must be a read-write transaction. + // + // To protect against replays, single-use transactions are not supported. The + // caller must either supply an existing transaction ID or begin a new + // transaction. + TransactionSelector transaction = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The list of statements to execute in this batch. Statements are + // executed serially, such that the effects of statement `i` are visible to + // statement `i+1`. Each statement must be a DML statement. Execution stops at + // the first failed statement; the remaining statements are not executed. + // + // Callers must provide at least one statement. + repeated Statement statements = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A per-transaction sequence number used to identify this request. + // This field makes each request idempotent such that if the request is + // received multiple times, at most one succeeds. + // + // The sequence number must be monotonically increasing within the + // transaction. If a request arrives for the first time with an out-of-order + // sequence number, the transaction might be aborted. Replays of previously + // handled requests yield the same response as the first execution. + int64 seqno = 4 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + RequestOptions request_options = 5; + + // Optional. If set to `true`, this request marks the end of the transaction. + // After these statements execute, you must commit or abort the transaction. + // Attempts to execute any other requests against this transaction + // (including reads and queries) are rejected. + // + // Setting this option might cause some error reporting to be deferred until + // commit time (for example, validation of unique constraints). Given this, + // successful execution of statements shouldn't be assumed until a subsequent + // `Commit` call completes successfully. + bool last_statements = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list +// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML +// statement that has successfully executed, in the same order as the statements +// in the request. If a statement fails, the status in the response body +// identifies the cause of the failure. +// +// To check for DML statements that failed, use the following approach: +// +// 1. Check the status in the response message. The +// [google.rpc.Code][google.rpc.Code] enum +// value `OK` indicates that all statements were executed successfully. +// 2. If the status was not `OK`, check the number of result sets in the +// response. If the response contains `N` +// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in +// the request failed. +// +// Example 1: +// +// * Request: 5 DML statements, all executed successfully. +// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the +// status `OK`. +// +// Example 2: +// +// * Request: 5 DML statements. The third statement has a syntax error. +// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax +// error (`INVALID_ARGUMENT`) +// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages +// indicates that the third statement failed, and the fourth and fifth +// statements were not executed. +message ExecuteBatchDmlResponse { + // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the + // request that ran successfully, in the same order as the statements in the + // request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any + // rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each + // [ResultSet][google.spanner.v1.ResultSet] contain the number of rows + // modified by the statement. + // + // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response + // contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. + repeated ResultSet result_sets = 1; + + // If all DML statements are executed successfully, the status is `OK`. + // Otherwise, the error status of the first failed statement. + google.rpc.Status status = 2; + + // Optional. A precommit token is included if the read-write transaction + // is on a multiplexed session. Pass the precommit token with the highest + // sequence number from this transaction attempt should be passed to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + MultiplexedSessionPrecommitToken precommit_token = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options for a `PartitionQueryRequest` and `PartitionReadRequest`. +message PartitionOptions { + // **Note:** This hint is currently ignored by `PartitionQuery` and + // `PartitionRead` requests. + // + // The desired data size for each partition generated. The default for this + // option is currently 1 GiB. This is only a hint. The actual size of each + // partition can be smaller or larger than this size request. + int64 partition_size_bytes = 1; + + // **Note:** This hint is currently ignored by `PartitionQuery` and + // `PartitionRead` requests. + // + // The desired maximum number of partitions to return. For example, this + // might be set to the number of workers available. The default for this + // option is currently 10,000. The maximum value is currently 200,000. This + // is only a hint. The actual number of partitions returned can be smaller or + // larger than this maximum count request. + int64 max_partitions = 2; +} + +// The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] +message PartitionQueryRequest { + // Required. The session used to create the partitions. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Read-only snapshot transactions are supported, read and write and + // single-use transactions are not. + TransactionSelector transaction = 2; + + // Required. The query request to generate partitions for. The request fails + // if the query isn't root partitionable. For a query to be root + // partitionable, it needs to satisfy a few conditions. For example, if the + // query execution plan contains a distributed union operator, then it must be + // the first operator in the plan. For more information about other + // conditions, see [Read data in + // parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). + // + // The query request must not contain DML commands, such as `INSERT`, + // `UPDATE`, or `DELETE`. Use + // [`ExecuteStreamingSql`][google.spanner.v1.Spanner.ExecuteStreamingSql] with + // a `PartitionedDml` transaction for large, partition-friendly DML + // operations. + string sql = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Parameter names and values that bind to placeholders in the SQL + // string. + // + // A parameter placeholder consists of the `@` character followed by the + // parameter name (for example, `@firstName`). Parameter names can contain + // letters, numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: + // + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It's an error to execute a SQL statement with unbound parameters. + google.protobuf.Struct params = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. It isn't always possible for Cloud Spanner to infer the right SQL + // type from a JSON value. For example, values of type `BYTES` and values of + // type `STRING` both appear in + // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL query parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + map param_types = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Additional options that affect how many partitions are created. + PartitionOptions partition_options = 6; +} + +// The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead] +message PartitionReadRequest { + // Required. The session used to create the partitions. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Read only snapshot transactions are supported, read/write and single use + // transactions are not. + TransactionSelector transaction = 2; + + // Required. The name of the table in the database to be read. + string table = 3 [(google.api.field_behavior) = REQUIRED]; + + // If non-empty, the name of an index on + // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used + // instead of the table primary key when interpreting + // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting + // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] + // for further information. + string index = 4; + + // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be + // returned for each row matching this request. + repeated string columns = 5; + + // Required. `key_set` identifies the rows to be yielded. `key_set` names the + // primary keys of the rows in + // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless + // [index][google.spanner.v1.PartitionReadRequest.index] is present. If + // [index][google.spanner.v1.PartitionReadRequest.index] is present, then + // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names + // index keys in [index][google.spanner.v1.PartitionReadRequest.index]. + // + // It isn't an error for the `key_set` to name rows that don't + // exist in the database. Read yields nothing for nonexistent rows. + KeySet key_set = 6 [(google.api.field_behavior) = REQUIRED]; + + // Additional options that affect how many partitions are created. + PartitionOptions partition_options = 9; +} + +// Information returned for each partition returned in a +// PartitionResponse. +message Partition { + // This token can be passed to `Read`, `StreamingRead`, `ExecuteSql`, or + // `ExecuteStreamingSql` requests to restrict the results to those identified + // by this partition token. + bytes partition_token = 1; +} + +// The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] +// or [PartitionRead][google.spanner.v1.Spanner.PartitionRead] +message PartitionResponse { + // Partitions created by this request. + repeated Partition partitions = 1; + + // Transaction created by this request. + Transaction transaction = 2; +} + +// The request for [Read][google.spanner.v1.Spanner.Read] and +// [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. +message ReadRequest { + // An option to control the order in which rows are returned from a read. + enum OrderBy { + // Default value. + // + // `ORDER_BY_UNSPECIFIED` is equivalent to `ORDER_BY_PRIMARY_KEY`. + ORDER_BY_UNSPECIFIED = 0; + + // Read rows are returned in primary key order. + // + // In the event that this option is used in conjunction with the + // `partition_token` field, the API returns an `INVALID_ARGUMENT` error. + ORDER_BY_PRIMARY_KEY = 1; + + // Read rows are returned in any order. + ORDER_BY_NO_ORDER = 2; + } + + // A lock hint mechanism for reads done within a transaction. + enum LockHint { + // Default value. + // + // `LOCK_HINT_UNSPECIFIED` is equivalent to `LOCK_HINT_SHARED`. + LOCK_HINT_UNSPECIFIED = 0; + + // Acquire shared locks. + // + // By default when you perform a read as part of a read-write transaction, + // Spanner acquires shared read locks, which allows other reads to still + // access the data until your transaction is ready to commit. When your + // transaction is committing and writes are being applied, the transaction + // attempts to upgrade to an exclusive lock for any data you are writing. + // For more information about locks, see [Lock + // modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes). + LOCK_HINT_SHARED = 1; + + // Acquire exclusive locks. + // + // Requesting exclusive locks is beneficial if you observe high write + // contention, which means you notice that multiple transactions are + // concurrently trying to read and write to the same data, resulting in a + // large number of aborts. This problem occurs when two transactions + // initially acquire shared locks and then both try to upgrade to exclusive + // locks at the same time. In this situation both transactions are waiting + // for the other to give up their lock, resulting in a deadlocked situation. + // Spanner is able to detect this occurring and force one of the + // transactions to abort. However, this is a slow and expensive operation + // and results in lower performance. In this case it makes sense to acquire + // exclusive locks at the start of the transaction because then when + // multiple transactions try to act on the same data, they automatically get + // serialized. Each transaction waits its turn to acquire the lock and + // avoids getting into deadlock situations. + // + // Because the exclusive lock hint is just a hint, it shouldn't be + // considered equivalent to a mutex. In other words, you shouldn't use + // Spanner exclusive locks as a mutual exclusion mechanism for the execution + // of code outside of Spanner. + // + // **Note:** Request exclusive locks judiciously because they block others + // from reading that data for the entire transaction, rather than just when + // the writes are being performed. Unless you observe high write contention, + // you should use the default of shared read locks so you don't prematurely + // block other clients from reading the data that you're writing to. + LOCK_HINT_EXCLUSIVE = 2; + } + + // Required. The session in which the read should be performed. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // The transaction to use. If none is provided, the default is a + // temporary read-only transaction with strong concurrency. + TransactionSelector transaction = 2; + + // Required. The name of the table in the database to be read. + string table = 3 [(google.api.field_behavior) = REQUIRED]; + + // If non-empty, the name of an index on + // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of + // the table primary key when interpreting + // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. + // See [key_set][google.spanner.v1.ReadRequest.key_set] for further + // information. + string index = 4; + + // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be + // returned for each row matching this request. + repeated string columns = 5 [(google.api.field_behavior) = REQUIRED]; + + // Required. `key_set` identifies the rows to be yielded. `key_set` names the + // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to + // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. + // If [index][google.spanner.v1.ReadRequest.index] is present, then + // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys + // in [index][google.spanner.v1.ReadRequest.index]. + // + // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] + // field is empty, rows are yielded in table primary key order (if + // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order + // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the + // [partition_token][google.spanner.v1.ReadRequest.partition_token] field + // isn't empty, rows are yielded in an unspecified order. + // + // It isn't an error for the `key_set` to name rows that don't + // exist in the database. Read yields nothing for nonexistent rows. + KeySet key_set = 6 [(google.api.field_behavior) = REQUIRED]; + + // If greater than zero, only the first `limit` rows are yielded. If `limit` + // is zero, the default is no limit. A limit can't be specified if + // `partition_token` is set. + int64 limit = 8; + + // If this request is resuming a previously interrupted read, + // `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + // interruption. Doing this enables the new read to resume where the last read + // left off. The rest of the request parameters must exactly match the request + // that yielded this token. + bytes resume_token = 9; + + // If present, results are restricted to the specified partition + // previously created using `PartitionRead`. There must be an exact + // match for the values of fields common to this message and the + // PartitionReadRequest message used to create this partition_token. + bytes partition_token = 10; + + // Common options for this request. + RequestOptions request_options = 11; + + // Directed read options for this request. + DirectedReadOptions directed_read_options = 14; + + // If this is for a partitioned read and this field is set to `true`, the + // request is executed with Spanner Data Boost independent compute resources. + // + // If the field is set to `true` but the request doesn't set + // `partition_token`, the API returns an `INVALID_ARGUMENT` error. + bool data_boost_enabled = 15; + + // Optional. Order for the returned rows. + // + // By default, Spanner returns result rows in primary key order except for + // PartitionRead requests. For applications that don't require rows to be + // returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting + // `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, + // resulting in lower latencies in certain cases (for example, bulk point + // lookups). + OrderBy order_by = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lock Hint for the request, it can only be used with read-write + // transactions. + LockHint lock_hint = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the Spanner requests location-aware if present. + // + // It gives the server hints that can be used to route the request + // to an appropriate server, potentially significantly decreasing latency and + // improving throughput. To achieve improved performance, most fields must be + // filled in with accurate values. + RoutingHint routing_hint = 18 [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for +// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +message BeginTransactionRequest { + // Required. The session in which the transaction runs. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Required. Options for the new transaction. + TransactionOptions options = 2 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + // Priority is ignored for this request. Setting the priority in this + // `request_options` struct doesn't do anything. To set the priority for a + // transaction, set it on the reads and writes that are part of this + // transaction instead. + RequestOptions request_options = 3; + + // Optional. Required for read-write transactions on a multiplexed session + // that commit mutations but don't perform any reads or queries. You must + // randomly select one of the mutations from the mutation set and send it as a + // part of this request. + Mutation mutation_key = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the Spanner requests location-aware if present. + // + // It gives the server hints that can be used to route the request + // to an appropriate server, potentially significantly decreasing latency and + // improving throughput. To achieve improved performance, most fields must be + // filled in with accurate values. + RoutingHint routing_hint = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for [Commit][google.spanner.v1.Spanner.Commit]. +message CommitRequest { + // Required. The session in which the transaction to be committed is running. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Required. The transaction in which to commit. + oneof transaction { + // Commit a previously-started transaction. + bytes transaction_id = 2; + + // Execute mutations in a temporary transaction. Note that unlike + // commit of a previously-started transaction, commit with a + // temporary transaction is non-idempotent. That is, if the + // `CommitRequest` is sent to Cloud Spanner more than once (for + // instance, due to retries in the application, or in the + // transport library), it's possible that the mutations are + // executed more than once. If this is undesirable, use + // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and + // [Commit][google.spanner.v1.Spanner.Commit] instead. + TransactionOptions single_use_transaction = 3; + } + + // The mutations to be executed when this transaction commits. All + // mutations are applied atomically, in the order they appear in + // this list. + repeated Mutation mutations = 4; + + // If `true`, then statistics related to the transaction is included in + // the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. + // Default value is `false`. + bool return_commit_stats = 5; + + // Optional. The amount of latency this request is configured to incur in + // order to improve throughput. If this field isn't set, Spanner assumes + // requests are relatively latency sensitive and automatically determines an + // appropriate delay time. You can specify a commit delay value between 0 and + // 500 ms. + google.protobuf.Duration max_commit_delay = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // Common options for this request. + RequestOptions request_options = 6; + + // Optional. If the read-write transaction was executed on a multiplexed + // session, then you must include the precommit token with the highest + // sequence number received in this transaction attempt. Failing to do so + // results in a `FailedPrecondition` error. + MultiplexedSessionPrecommitToken precommit_token = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Makes the Spanner requests location-aware if present. + // + // It gives the server hints that can be used to route the request + // to an appropriate server, potentially significantly decreasing latency and + // improving throughput. To achieve improved performance, most fields must be + // filled in with accurate values. + RoutingHint routing_hint = 10 [(google.api.field_behavior) = OPTIONAL]; +} + +// The request for [Rollback][google.spanner.v1.Spanner.Rollback]. +message RollbackRequest { + // Required. The session in which the transaction to roll back is running. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Required. The transaction to roll back. + bytes transaction_id = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. +message BatchWriteRequest { + // A group of mutations to be committed together. Related mutations should be + // placed in a group. For example, two mutations inserting rows with the same + // primary key prefix in both parent and child tables are related. + message MutationGroup { + // Required. The mutations in this group. + repeated Mutation mutations = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The session in which the batch request is to be run. + string session = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + ]; + + // Common options for this request. + RequestOptions request_options = 3; + + // Required. The groups of mutations to be applied. + repeated MutationGroup mutation_groups = 4 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. If you don't set the `exclude_txn_from_change_streams` option or + // if it's set to `false`, then any change streams monitoring columns modified + // by transactions will capture the updates made within that transaction. + bool exclude_txn_from_change_streams = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The result of applying a batch of mutations. +message BatchWriteResponse { + // The mutation groups applied in this batch. The values index into the + // `mutation_groups` field in the corresponding `BatchWriteRequest`. + repeated int32 indexes = 1; + + // An `OK` status indicates success. Any other status indicates a failure. + google.rpc.Status status = 2; + + // The commit timestamp of the transaction that applied this batch. + // Present if status is OK and the mutation groups were applied, absent + // otherwise. + // + // For mutation groups with conditions, a status=OK and missing + // commit_timestamp means that the mutation groups were not applied due to the + // condition not being satisfied after evaluation. + google.protobuf.Timestamp commit_timestamp = 3; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto new file mode 100644 index 000000000000..f7cbccae8b7f --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto @@ -0,0 +1,329 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/v1/location.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "TransactionProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// Options to use for transactions. +message TransactionOptions { + // Message type to initiate a read-write transaction. Currently this + // transaction type has no options. + message ReadWrite { + // `ReadLockMode` is used to set the read lock mode for read-write + // transactions. + enum ReadLockMode { + // Default value. + // + // * If isolation level is + // [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE], + // locking semantics default to `PESSIMISTIC`. + // * If isolation level is + // [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ], + // locking semantics default to `OPTIMISTIC`. + // * See + // [Concurrency + // control](https://cloud.google.com/spanner/docs/concurrency-control) + // for more details. + READ_LOCK_MODE_UNSPECIFIED = 0; + + // Pessimistic lock mode. + // + // Lock acquisition behavior depends on the isolation level in use. In + // [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + // isolation, reads and writes acquire necessary locks during transaction + // statement execution. In + // [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ] + // isolation, reads that explicitly request to be locked and writes + // acquire locks. + // See + // [Concurrency + // control](https://cloud.google.com/spanner/docs/concurrency-control) for + // details on the types of locks acquired at each transaction step. + PESSIMISTIC = 1; + + // Optimistic lock mode. + // + // Lock acquisition behavior depends on the isolation level in use. In + // both + // [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + // and + // [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ] + // isolation, reads and writes do not acquire locks during transaction + // statement execution. + // See + // [Concurrency + // control](https://cloud.google.com/spanner/docs/concurrency-control) for + // details on how the guarantees of each isolation level are provided at + // commit time. + OPTIMISTIC = 2; + } + + // Read lock mode for the transaction. + ReadLockMode read_lock_mode = 1; + + // Optional. Clients should pass the transaction ID of the previous + // transaction attempt that was aborted if this transaction is being + // executed on a multiplexed session. + bytes multiplexed_session_previous_transaction_id = 2 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Message type to initiate a Partitioned DML transaction. + message PartitionedDml {} + + // Message type to initiate a read-only transaction. + message ReadOnly { + // How to choose the timestamp for the read-only transaction. + oneof timestamp_bound { + // Read at a timestamp where all previously committed transactions + // are visible. + bool strong = 1; + + // Executes all reads at a timestamp >= `min_read_timestamp`. + // + // This is useful for requesting fresher data than some previous + // read, or data that is fresh enough to observe the effects of some + // previously committed transaction whose timestamp is known. + // + // Note that this option can only be used in single-use transactions. + // + // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. + // Example: `"2014-10-02T15:01:23.045123456Z"`. + google.protobuf.Timestamp min_read_timestamp = 2; + + // Read data at a timestamp >= `NOW - max_staleness` + // seconds. Guarantees that all writes that have committed more + // than the specified number of seconds ago are visible. Because + // Cloud Spanner chooses the exact timestamp, this mode works even if + // the client's local clock is substantially skewed from Cloud Spanner + // commit timestamps. + // + // Useful for reading the freshest data available at a nearby + // replica, while bounding the possible staleness if the local + // replica has fallen behind. + // + // Note that this option can only be used in single-use + // transactions. + google.protobuf.Duration max_staleness = 3; + + // Executes all reads at the given timestamp. Unlike other modes, + // reads at a specific timestamp are repeatable; the same read at + // the same timestamp always returns the same data. If the + // timestamp is in the future, the read is blocked until the + // specified timestamp, modulo the read's deadline. + // + // Useful for large scale consistent reads such as mapreduces, or + // for coordinating many reads against a consistent snapshot of the + // data. + // + // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. + // Example: `"2014-10-02T15:01:23.045123456Z"`. + google.protobuf.Timestamp read_timestamp = 4; + + // Executes all reads at a timestamp that is `exact_staleness` + // old. The timestamp is chosen soon after the read is started. + // + // Guarantees that all writes that have committed more than the + // specified number of seconds ago are visible. Because Cloud Spanner + // chooses the exact timestamp, this mode works even if the client's + // local clock is substantially skewed from Cloud Spanner commit + // timestamps. + // + // Useful for reading at nearby replicas without the distributed + // timestamp negotiation overhead of `max_staleness`. + google.protobuf.Duration exact_staleness = 5; + } + + // If true, the Cloud Spanner-selected read timestamp is included in + // the [Transaction][google.spanner.v1.Transaction] message that describes + // the transaction. + bool return_read_timestamp = 6; + } + + // `IsolationLevel` is used when setting the [isolation + // level](https://cloud.google.com/spanner/docs/isolation-levels) for a + // transaction. + enum IsolationLevel { + // Default value. + // + // If the value is not specified, the `SERIALIZABLE` isolation level is + // used. + ISOLATION_LEVEL_UNSPECIFIED = 0; + + // All transactions appear as if they executed in a serial order, even if + // some of the reads, writes, and other operations of distinct transactions + // actually occurred in parallel. Spanner assigns commit timestamps that + // reflect the order of committed transactions to implement this property. + // Spanner offers a stronger guarantee than serializability called external + // consistency. For more information, see + // [TrueTime and external + // consistency](https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability). + SERIALIZABLE = 1; + + // All reads performed during the transaction observe a consistent snapshot + // of the database, and the transaction is only successfully committed in + // the absence of conflicts between its updates and any concurrent updates + // that have occurred since that snapshot. Consequently, in contrast to + // `SERIALIZABLE` transactions, only write-write conflicts are detected in + // snapshot transactions. + // + // This isolation level does not support read-only and partitioned DML + // transactions. + // + // When `REPEATABLE_READ` is specified on a read-write transaction, the + // locking semantics default to `OPTIMISTIC`. + REPEATABLE_READ = 2; + } + + // Required. The type of transaction. + oneof mode { + // Transaction may write. + // + // Authorization to begin a read-write transaction requires + // `spanner.databases.beginOrRollbackReadWriteTransaction` permission + // on the `session` resource. + ReadWrite read_write = 1; + + // Partitioned DML transaction. + // + // Authorization to begin a Partitioned DML transaction requires + // `spanner.databases.beginPartitionedDmlTransaction` permission + // on the `session` resource. + PartitionedDml partitioned_dml = 3; + + // Transaction does not write. + // + // Authorization to begin a read-only transaction requires + // `spanner.databases.beginReadOnlyTransaction` permission + // on the `session` resource. + ReadOnly read_only = 2; + } + + // When `exclude_txn_from_change_streams` is set to `true`, it prevents read + // or write transactions from being tracked in change streams. + // + // * If the DDL option `allow_txn_exclusion` is set to `true`, then the + // updates + // made within this transaction aren't recorded in the change stream. + // + // * If you don't set the DDL option `allow_txn_exclusion` or if it's + // set to `false`, then the updates made within this transaction are + // recorded in the change stream. + // + // When `exclude_txn_from_change_streams` is set to `false` or not set, + // modifications from this transaction are recorded in all change streams + // that are tracking columns modified by these transactions. + // + // The `exclude_txn_from_change_streams` option can only be specified + // for read-write or partitioned DML transactions, otherwise the API returns + // an `INVALID_ARGUMENT` error. + bool exclude_txn_from_change_streams = 5; + + // Isolation level for the transaction. + IsolationLevel isolation_level = 6; +} + +// A transaction. +message Transaction { + // `id` may be used to identify the transaction in subsequent + // [Read][google.spanner.v1.Spanner.Read], + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], + // [Commit][google.spanner.v1.Spanner.Commit], or + // [Rollback][google.spanner.v1.Spanner.Rollback] calls. + // + // Single-use read-only transactions do not have IDs, because + // single-use transactions do not support multiple requests. + bytes id = 1; + + // For snapshot read-only transactions, the read timestamp chosen + // for the transaction. Not returned by default: see + // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp]. + // + // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. + // Example: `"2014-10-02T15:01:23.045123456Z"`. + google.protobuf.Timestamp read_timestamp = 2; + + // A precommit token is included in the response of a BeginTransaction + // request if the read-write transaction is on a multiplexed session and + // a mutation_key was specified in the + // [BeginTransaction][google.spanner.v1.BeginTransactionRequest]. + // The precommit token with the highest sequence number from this transaction + // attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit] + // request for this transaction. + MultiplexedSessionPrecommitToken precommit_token = 3; + + // Optional. A cache update expresses a set of changes the client should + // incorporate into its location cache. The client should discard the changes + // if they are older than the data it already has. This data can be obtained + // in response to requests that included a `RoutingHint` field, but may also + // be obtained by explicit location-fetching RPCs which may be added in the + // future. + CacheUpdate cache_update = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// This message is used to select the transaction in which a +// [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. +// +// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more +// information about transactions. +message TransactionSelector { + // If no fields are set, the default is a single use transaction + // with strong concurrency. + oneof selector { + // Execute the read or SQL query in a temporary transaction. + // This is the most efficient way to execute a transaction that + // consists of a single SQL query. + TransactionOptions single_use = 1; + + // Execute the read or SQL query in a previously-started transaction. + bytes id = 2; + + // Begin a new transaction and execute this read or SQL query in + // it. The transaction ID of the new transaction is returned in + // [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction], + // which is a [Transaction][google.spanner.v1.Transaction]. + TransactionOptions begin = 3; + } +} + +// When a read-write transaction is executed on a multiplexed session, +// this precommit token is sent back to the client +// as a part of the [Transaction][google.spanner.v1.Transaction] message in the +// [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and +// also as a part of the [ResultSet][google.spanner.v1.ResultSet] and +// [PartialResultSet][google.spanner.v1.PartialResultSet] responses. +message MultiplexedSessionPrecommitToken { + // Opaque precommit token. + bytes precommit_token = 1; + + // An incrementing seq number is generated on every precommit token + // that is returned. Clients should remember the precommit token with the + // highest sequence number from the current transaction attempt. + int32 seq_num = 2; +} diff --git a/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto new file mode 100644 index 000000000000..e3e85a770afc --- /dev/null +++ b/java-spanner/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto @@ -0,0 +1,214 @@ +// Copyright 2026 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.v1; + +import "google/api/field_behavior.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.V1"; +option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; +option java_multiple_files = true; +option java_outer_classname = "TypeProto"; +option java_package = "com.google.spanner.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\V1"; +option ruby_package = "Google::Cloud::Spanner::V1"; + +// `Type` indicates the type of a Cloud Spanner value, as might be stored in a +// table cell or returned from an SQL query. +message Type { + // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type. + TypeCode code = 1 [(google.api.field_behavior) = REQUIRED]; + + // If [code][google.spanner.v1.Type.code] == + // [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the + // type of the array elements. + Type array_element_type = 2; + + // If [code][google.spanner.v1.Type.code] == + // [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides + // type information for the struct's fields. + StructType struct_type = 3; + + // The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that + // disambiguates SQL type that Spanner will use to represent values of this + // type during query processing. This is necessary for some type codes because + // a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different + // SQL types depending on the SQL dialect. + // [type_annotation][google.spanner.v1.Type.type_annotation] typically is not + // needed to process the content of a value (it doesn't affect serialization) + // and clients can ignore it on the read path. + TypeAnnotationCode type_annotation = 4; + + // If [code][google.spanner.v1.Type.code] == + // [PROTO][google.spanner.v1.TypeCode.PROTO] or + // [code][google.spanner.v1.Type.code] == + // [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully + // qualified name of the proto type representing the proto/enum definition. + string proto_type_fqn = 5; +} + +// `StructType` defines the fields of a +// [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. +message StructType { + // Message representing a single field of a struct. + message Field { + // The name of the field. For reads, this is the column name. For + // SQL queries, it is the column alias (e.g., `"Word"` in the + // query `"SELECT 'hello' AS Word"`), or the column name (e.g., + // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some + // columns might have an empty name (e.g., `"SELECT + // UPPER(ColName)"`). Note that a query result can contain + // multiple fields with the same name. + string name = 1; + + // The type of the field. + Type type = 2; + } + + // The list of fields that make up this struct. Order is + // significant, because values of this struct type are represented as + // lists, where the order of field values matches the order of + // fields in the [StructType][google.spanner.v1.StructType]. In turn, the + // order of fields matches the order of columns in a read request, or the + // order of fields in the `SELECT` clause of a query. + repeated Field fields = 1; +} + +// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to +// indicate the type of a Cloud Spanner value. +// +// Each legal value of a type can be encoded to or decoded from a JSON +// value, using the encodings described below. All Cloud Spanner values can +// be `null`, regardless of type; `null`s are always encoded as a JSON +// `null`. +enum TypeCode { + // Not specified. + TYPE_CODE_UNSPECIFIED = 0; + + // Encoded as JSON `true` or `false`. + BOOL = 1; + + // Encoded as `string`, in decimal format. + INT64 = 2; + + // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + // `"-Infinity"`. + FLOAT64 = 3; + + // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + // `"-Infinity"`. + FLOAT32 = 15; + + // Encoded as `string` in RFC 3339 timestamp format. The time zone + // must be present, and must be `"Z"`. + // + // If the schema has the column option + // `allow_commit_timestamp=true`, the placeholder string + // `"spanner.commit_timestamp()"` can be used to instruct the system + // to insert the commit timestamp associated with the transaction + // commit. + TIMESTAMP = 4; + + // Encoded as `string` in RFC 3339 date format. + DATE = 5; + + // Encoded as `string`. + STRING = 6; + + // Encoded as a base64-encoded `string`, as described in RFC 4648, + // section 4. + BYTES = 7; + + // Encoded as `list`, where the list elements are represented + // according to + // [array_element_type][google.spanner.v1.Type.array_element_type]. + ARRAY = 8; + + // Encoded as `list`, where list element `i` is represented according + // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + STRUCT = 9; + + // Encoded as `string`, in decimal format or scientific notation format. + // Decimal format: + // `[+-]Digits[.[Digits]]` or + // `[+-][Digits].Digits` + // + // Scientific notation: + // `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or + // `[+-][Digits].Digits[ExponentIndicator[+-]Digits]` + // (ExponentIndicator is `"e"` or `"E"`) + NUMERIC = 10; + + // Encoded as a JSON-formatted `string` as described in RFC 7159. The + // following rules are applied when parsing JSON input: + // + // - Whitespace characters are not preserved. + // - If a JSON object has duplicate keys, only the first key is preserved. + // - Members of a JSON object are not guaranteed to have their order + // preserved. + // - JSON array elements will have their order preserved. + JSON = 11; + + // Encoded as a base64-encoded `string`, as described in RFC 4648, + // section 4. + PROTO = 13; + + // Encoded as `string`, in decimal format. + ENUM = 14; + + // Encoded as `string`, in `ISO8601` duration format - + // `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S` + // where `n` is an integer. + // For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2 + // months, 3 days, 4 hours, 5 minutes, and 6.5 seconds. + INTERVAL = 16; + + // Encoded as `string`, in lower-case hexa-decimal format, as described + // in RFC 9562, section 4. + UUID = 17; +} + +// `TypeAnnotationCode` is used as a part of [Type][google.spanner.v1.Type] to +// disambiguate SQL types that should be used for a given Cloud Spanner value. +// Disambiguation is needed because the same Cloud Spanner type can be mapped to +// different SQL types depending on SQL dialect. TypeAnnotationCode doesn't +// affect the way value is serialized. +enum TypeAnnotationCode { + // Not specified. + TYPE_ANNOTATION_CODE_UNSPECIFIED = 0; + + // PostgreSQL compatible NUMERIC type. This annotation needs to be applied to + // [Type][google.spanner.v1.Type] instances having + // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that + // values of this type should be treated as PostgreSQL NUMERIC values. + // Currently this annotation is always needed for + // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with + // PostgreSQL-enabled Spanner databases. + PG_NUMERIC = 2; + + // PostgreSQL compatible JSONB type. This annotation needs to be applied to + // [Type][google.spanner.v1.Type] instances having + // [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of + // this type should be treated as PostgreSQL JSONB values. Currently this + // annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON] + // when a client interacts with PostgreSQL-enabled Spanner databases. + PG_JSONB = 3; + + // PostgreSQL compatible OID type. This annotation can be used by a client + // interacting with PostgreSQL-enabled Spanner database to specify that a + // value should be treated using the semantics of the OID type. + PG_OID = 4; +} diff --git a/java-spanner/releases.txt b/java-spanner/releases.txt new file mode 100644 index 000000000000..d2bd82bc1fb0 --- /dev/null +++ b/java-spanner/releases.txt @@ -0,0 +1,71 @@ +6739e12:1.47.0 +100cc59:1.46.0 +c88eb1d:1.45.0 +6afd140:1.44.0 +755fd13:1.43.0 +dfe5da3:1.42.0 +44fa57b:1.41.0 +27dc689:1.38.0 +16e4851:1.37.0 +476c4be:1.36.0 +403cd0e:1.35.0 +7e6d7ab:1.34.0 +6d87b65:1.33.0 +2c35b7b:1.32.0 +b9a7f53:1.31.0 +3978864:1.30.0 +0055e0b:1.29.0 +a0ae465:1.28.0 +bbaec8b:1.27.0 +562d0bf:1.26.0 +d1122c9:1.25.0 +04ac5fe:1.24.0 +0fdbb4a:1.23.0 +a892d71:1.22.0 +098e182:1.21.0 +cb48702:1.20.0 +88af93f:1.19.0 +393ab17:1.18.0 +7301f03:1.17.0 +1e748c6:1.16.0 +d2215ef:1.15.0 +2ede22f:1.14.0 +2545e09:1.13.0 +bc3d800:1.12.0 +f892ee9:1.11.0 +e0fc9a4:1.10.0 +ea0d715:1.9.0 +9bca164:1.8.0 +92d4c0c:1.7.0 +e440776:1.6.0 +6ba35ba:1.5.0 +e6c52b5:1.4.0 +2cc884d:1.3.0 +d51f49f:1.2.0 +4ab4705:1.1.0 +4fe36e6:1.0.0 +fddc8b0:0.72.0-beta +50c96ae:0.71.0-beta +567aa6f:0.70.0-beta +2eca299:0.69.0-beta +cf33def:0.68.0-beta +ea5608d:0.67.0-beta +e006e5b:0.66.0-beta +9e8fce1:0.65.0-beta +3000a36:0.64.0-beta +07ef452:0.63.0-beta +8e7a255:0.62.0-beta +eeaff12:0.61.0-beta +3d387a3:0.60.0-beta +0dc9bc5:0.59.0-beta +2f112a3:0.58.0-beta +85bc9e1:0.56.0-beta +f3c446e:0.55.1-beta +844bc14:0.55.0-beta +0d466be:0.54.0-beta +9d30034:0.53.0-beta +d768f1a:0.52.0-beta +3e6489d:0.51.0-beta +12e3e81:0.50.0-beta +6398093:0.49.0-beta +aa7cacb:0.48.0-beta diff --git a/java-spanner/samples/README.md b/java-spanner/samples/README.md new file mode 100644 index 000000000000..d4ad2ac70351 --- /dev/null +++ b/java-spanner/samples/README.md @@ -0,0 +1,40 @@ +# Getting Started with Cloud Spanner and the Google Cloud Client libraries + + +Open in Cloud Shell + +[Cloud Spanner][Spanner] is a horizontally-scalable database-as-a-service +with transactions and SQL support. +These sample Java applications demonstrate how to access the Spanner API using +the [Google Cloud Client Library for Java][java-spanner]. + +[Spanner]: https://cloud.google.com/spanner/ +[java-spanner]: https://github.com/googleapis/java-spanner + +## Quickstart + +Install [Maven](http://maven.apache.org/). + +Build your project from the root directory (`java-spanner`): + + mvn clean package -DskipTests + cd samples/snippets + mvn package + +Every subsequent command here should be run from a subdirectory `samples/snippets`. + +### Running samples + +Usage: + + java -jar target/spanner-snippets/spanner-google-cloud-samples.jar operation my-instance my-database + +#### Examples + +Create Database: + + java -jar target/spanner-google-cloud-samples-jar-with-dependencies.jar my-instance my-database + +Listing database operations: + + java -jar target/spanner-snippets/spanner-google-cloud-samples.jar listdatabaseoperations my-instance my-database \ No newline at end of file diff --git a/java-spanner/samples/install-without-bom/pom.xml b/java-spanner/samples/install-without-bom/pom.xml new file mode 100644 index 000000000000..a504dc626be7 --- /dev/null +++ b/java-spanner/samples/install-without-bom/pom.xml @@ -0,0 +1,169 @@ + + + 4.0.0 + com.google.cloud + spanner-install-without-bom + jar + Google Cloud Spanner Install Without Bom + https://github.com/googleapis/java-spanner + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + 0.31.1 + 2.84.0 + 3.85.0 + + + + + + + com.google.cloud + google-cloud-spanner + 6.110.0 + + + + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-impl + ${opencensus.version} + runtime + + + io.opencensus + opencensus-contrib-zpages + ${opencensus.version} + + + io.opencensus + opencensus-exporter-trace-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-trace + + + + + io.opencensus + opencensus-exporter-stats-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-monitoring + + + + + io.opencensus + opencensus-contrib-grpc-metrics + ${opencensus.version} + + + com.google.cloud + google-cloud-trace + ${trace.version} + + + com.google.cloud + google-cloud-monitoring + ${cloudmonitoring.version} + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + + + + + + ../snippets/src/main/resources + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.5 + + 10 + false + + java-sample-integration-tests + java-client-mr-integration-tests + nam11 + us-east1 + cmek-test-key-ring + cmek-test-key + mysample + quick-db + + + **/SpannerSampleIT.java + + + + + + diff --git a/java-spanner/samples/pom.xml b/java-spanner/samples/pom.xml new file mode 100644 index 000000000000..d72a58cea5d7 --- /dev/null +++ b/java-spanner/samples/pom.xml @@ -0,0 +1,57 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner-samples + 0.0.1-SNAPSHOT + pom + Google Cloud Spanner Samples Parent + https://github.com/googleapis/java-spanner + + Java idiomatic client for Google Cloud Platform services. + + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + install-without-bom + snapshot + snippets + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.7.0 + + true + + + + + diff --git a/java-spanner/samples/snapshot/pom.xml b/java-spanner/samples/snapshot/pom.xml new file mode 100644 index 000000000000..9059d7937fa5 --- /dev/null +++ b/java-spanner/samples/snapshot/pom.xml @@ -0,0 +1,169 @@ + + + 4.0.0 + com.google.cloud + spanner-snapshot + jar + Google Cloud Spanner Snapshot Samples + https://github.com/googleapis/java-spanner + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + 0.31.1 + 2.84.0 + 3.85.0 + + + + + + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + + + + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-impl + ${opencensus.version} + runtime + + + io.opencensus + opencensus-contrib-zpages + ${opencensus.version} + + + io.opencensus + opencensus-exporter-trace-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-trace + + + + + io.opencensus + opencensus-exporter-stats-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-monitoring + + + + + io.opencensus + opencensus-contrib-grpc-metrics + ${opencensus.version} + + + com.google.cloud + google-cloud-trace + ${trace.version} + + + com.google.cloud + google-cloud-monitoring + ${cloudmonitoring.version} + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + + + + + + ../snippets/src/main/resources + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.5 + + 10 + false + + java-sample-integration-tests + java-client-mr-integration-tests + nam11 + us-east1 + cmek-test-key-ring + cmek-test-key + mysample + mysample-instance + quick-db + + + **/SpannerSampleIT.java + + + + + + diff --git a/java-spanner/samples/snippets/pom.xml b/java-spanner/samples/snippets/pom.xml new file mode 100644 index 000000000000..23c24291e28d --- /dev/null +++ b/java-spanner/samples/snippets/pom.xml @@ -0,0 +1,249 @@ + + + 4.0.0 + com.google.cloud + spanner-snippets + jar + Google Cloud Spanner Snippets + https://github.com/googleapis/java-spanner + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + 0.31.1 + + + + + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + com.google.cloud + google-cloud-spanner + + + + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-impl + ${opencensus.version} + runtime + + + io.opencensus + opencensus-contrib-zpages + ${opencensus.version} + + + io.opencensus + opencensus-exporter-trace-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-trace + + + + + io.opencensus + opencensus-exporter-stats-stackdriver + ${opencensus.version} + + + com.google.cloud + google-cloud-monitoring + + + + + io.opencensus + opencensus-contrib-grpc-metrics + ${opencensus.version} + + + + com.google.cloud + google-cloud-trace + + + + com.google.cloud + google-cloud-monitoring + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + + + integration-tests + + true + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.5 + + 10 + false + + java-sample-integration-tests + java-client-mr-integration-tests + nam11 + us-east1 + cmek-test-key-ring + cmek-test-key + mysample + quick-db + + + **/SpannerSampleIT.java + + + + + + + + slow-tests + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.5 + + 10 + false + + java-sample-integration-tests + java-client-mr-integration-tests + nam11 + us-east1 + cmek-test-key-ring + cmek-test-key + mysample + quick-db + + + **/SpannerSampleIT.java + + + + + + + + + + + maven-resources-plugin + + + copy-resources + validate + + copy-resources + + + ${project.build.directory}/spanner-snippets + + + resources + true + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/spanner-snippets/lib + false + false + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + spanner-snippets/spanner-google-cloud-samples + + false + + com.example.spanner.admin.archived.SpannerSample + true + lib/ + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.6.0 + + + **/SingerProto.java + + + + + diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java new file mode 100644 index 000000000000..d3de612098d9 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java @@ -0,0 +1,84 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_add_and_drop_database_role] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AddAndDropDatabaseRole { + + static void addAndDropDatabaseRole() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String parentRole = "parent_role"; + String childRole = "child_role"; + addAndDropDatabaseRole(projectId, instanceId, databaseId, parentRole, childRole, "Albums"); + } + + static void addAndDropDatabaseRole( + String projectId, String instanceId, String databaseId, + String parentRole, String childRole, String... tables) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + System.out.println("Waiting for role create operation to complete..."); + List roleStatements = new ArrayList<>(ImmutableList.of( + String.format("CREATE ROLE %s", parentRole), + String.format("CREATE ROLE %s", childRole), + String.format("GRANT ROLE %s TO ROLE %s", parentRole, childRole))); + for (String table : tables) { + roleStatements.add(String.format("GRANT SELECT ON TABLE %s TO ROLE %s", table, parentRole)); + } + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), roleStatements) + .get(5, TimeUnit.MINUTES); + System.out.printf( + "Created roles %s and %s and granted privileges%n", parentRole, childRole); + // Delete role and membership. + System.out.println("Waiting for role revoke & drop operation to complete..."); + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + String.format("REVOKE ROLE %s FROM ROLE %s", parentRole, childRole), + String.format("DROP ROLE %s", childRole))).get(5, TimeUnit.MINUTES); + System.out.printf("Revoked privileges and dropped role %s%n", childRole); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: AddAndDropDatabaseRole failed with error message %s\n", e.getMessage()); + e.printStackTrace(); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for AddAndDropDatabaseRole operation to finish was interrupted"); + } + } +} +// [END spanner_add_and_drop_database_role] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java new file mode 100644 index 000000000000..c87b25ff476e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_add_json_column] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.concurrent.ExecutionException; + +class AddJsonColumnSample { + + static void addJsonColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + addJsonColumn(projectId, instanceId, databaseId); + } + + static void addJsonColumn(String projectId, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of("ALTER TABLE Venues ADD COLUMN VenueDetails JSON")).get(); + System.out.printf("Successfully added column `VenueDetails`%n"); + } + } +} +// [END spanner_add_json_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java new file mode 100644 index 000000000000..ab2607c49867 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_jsonb_add_column] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.concurrent.ExecutionException; + +class AddJsonbColumnSample { + + static void addJsonbColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + addJsonbColumn(projectId, instanceId, databaseId); + } + + static void addJsonbColumn(String projectId, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + // JSONB datatype is only supported with PostgreSQL-dialect databases. + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of("ALTER TABLE Venues ADD COLUMN VenueDetails JSONB")).get(); + System.out.printf("Successfully added column `VenueDetails`%n"); + } + } +} +// [END spanner_postgresql_jsonb_add_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java new file mode 100644 index 000000000000..00cfb848e706 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_add_numeric_column] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.concurrent.ExecutionException; + +class AddNumericColumnSample { + + static void addNumericColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + addNumericColumn(projectId, instanceId, databaseId); + } + + static void addNumericColumn(String projectId, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of("ALTER TABLE Venues ADD COLUMN Revenue NUMERIC")).get(); + System.out.printf("Successfully added column `Revenue`%n"); + } + } +} +// [END spanner_add_numeric_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java new file mode 100644 index 000000000000..36be70034f79 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_add_proto_type_columns] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutionException; + +class AddProtoColumnSample { + + static void addProtoColumn() throws InterruptedException, ExecutionException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + addProtoColumn(projectId, instanceId, databaseId); + } + + static void addProtoColumn(String projectId, String instanceId, String databaseId) + throws InterruptedException, ExecutionException, IOException { + InputStream in = + AddProtoColumnSample.class + .getClassLoader() + .getResourceAsStream("com/example/spanner/descriptors.pb"); + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + UpdateDatabaseDdlRequest request = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .addAllStatements( + ImmutableList.of( + "CREATE PROTO BUNDLE (" + + "examples.spanner.music.SingerInfo," + + "examples.spanner.music.Genre," + + ")", + "ALTER TABLE Singers ADD COLUMN SingerInfo examples.spanner.music.SingerInfo", + "ALTER TABLE Singers ADD COLUMN " + + "SingerInfoArray ARRAY", + "ALTER TABLE Singers ADD COLUMN SingerGenre examples.spanner.music.Genre", + "ALTER TABLE Singers ADD COLUMN " + + "SingerGenreArray ARRAY")) + .setProtoDescriptors(ByteString.readFrom(in)) + .build(); + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + databaseAdminClient.updateDatabaseDdlAsync(request).get(); + System.out.printf("Added Proto columns %n"); + } + } +} +// [END spanner_add_proto_type_columns] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java new file mode 100644 index 000000000000..641449ace9f6 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_alter_sequence] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AlterSequenceSample { + + static void alterSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + alterSequence(projectId, instanceId, databaseId); + } + + static void alterSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + + databaseAdminClient + .updateDatabaseDdlAsync(DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "ALTER SEQUENCE Seq SET OPTIONS " + + "(skip_range_min = 1000, skip_range_max = 5000000)")) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Altered Seq sequence to skip an inclusive range between 1000 and 5000000"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Lea'), ('Catalina'), ('Smith') " + + "THEN RETURN CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_alter_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java new file mode 100644 index 000000000000..6950e6a4fac6 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_alter_table_with_foreign_key_delete_cascade] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; + +class AlterTableWithForeignKeyDeleteCascadeSample { + + static void alterForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + alterForeignKeyDeleteCascadeConstraint(projectId, instanceId, databaseId); + } + + static void alterForeignKeyDeleteCascadeConstraint( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient.updateDatabaseDdlAsync(DatabaseName.of(projectId, instanceId, + databaseId), + ImmutableList.of( + "ALTER TABLE ShoppingCarts\n" + + " ADD CONSTRAINT FKShoppingCartsCustomerName\n" + + " FOREIGN KEY (CustomerName)\n" + + " REFERENCES Customers(CustomerName)\n" + + " ON DELETE CASCADE\n")); + System.out.printf( + String.format( + "Altered ShoppingCarts table with FKShoppingCartsCustomerName\n" + + "foreign key constraint on database %s on instance %s", + databaseId, instanceId)); + } + } +} +// [END spanner_alter_table_with_foreign_key_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java new file mode 100644 index 000000000000..83e28589ed3a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java @@ -0,0 +1,71 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_async_dml_standard_insert] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncRunner; +import com.google.cloud.spanner.AsyncRunner.AsyncWork; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TransactionContext; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeoutException; + +class AsyncDmlExample { + + static void asyncDml() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncDml(client); + } + } + + // Execute a DML statement asynchronously. + static void asyncDml(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + AsyncRunner runner = client.runAsync(); + ApiFuture rowCount = + runner.runAsync( + txn -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw')"; + return txn.executeUpdateAsync(Statement.of(sql)); + }, + executor); + System.out.printf("%d records inserted.%n", rowCount.get()); + executor.shutdown(); + } +} +//[END spanner_async_dml_standard_insert] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryExample.java new file mode 100644 index 000000000000..257ea6e4ad73 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryExample.java @@ -0,0 +1,107 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_async_query_data] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncQueryExample { + + static void asyncQuery() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncQuery(client); + } + } + + // Execute a query asynchronously and process the results in a callback. + static void asyncQuery(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ApiFuture finished; + ExecutorService executor = Executors.newSingleThreadExecutor(); + try (AsyncResultSet resultSet = + client + .singleUse() + .executeQueryAsync(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + // Setting a callback will automatically start the iteration over the results of the query + // using the specified executor. The callback will be called at least once. The returned + // ApiFuture is done when the callback has returned DONE and all resources used by the + // AsyncResultSet have been released. + finished = + resultSet.setCallback( + executor, + new ReadyCallback() { + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + // OK: There is a row ready. + case OK: + System.out.printf( + "%d %d %s%n", + resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + break; + + // DONE: There are no more rows in the result set. + case DONE: + return CallbackResponse.DONE; + + // NOT_READY: There are currently no more rows in the buffer. + case NOT_READY: + return CallbackResponse.CONTINUE; + + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + } + }); + } + + // This ApiFuture is done when the callback has returned DONE and all resources of the + // asynchronous result set have been released. + finished.get(30L, TimeUnit.SECONDS); + executor.shutdown(); + } +} +//[END spanner_async_query_data] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryToListAsyncExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryToListAsyncExample.java new file mode 100644 index 000000000000..11da6a13fddb --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncQueryToListAsyncExample.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_async_query_to_list] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Example code for using Async query on Cloud Spanner and convert it to list. + */ +class AsyncQueryToListAsyncExample { + static class Album { + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } + + static void asyncQueryToList() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncQueryToList(client); + } + } + + // Execute a query asynchronously and transform the result to a list. + static void asyncQueryToList(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + ApiFuture> albums; + try (AsyncResultSet resultSet = + client + .singleUse() + .executeQueryAsync(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + // Convert the result set to a list of Albums asynchronously. + albums = + resultSet.toListAsync( + reader -> { + return new Album( + reader.getLong("SingerId"), + reader.getLong("AlbumId"), + reader.getString("AlbumTitle")); + }, + executor); + } + + for (Album album : albums.get(30L, TimeUnit.SECONDS)) { + System.out.printf("%d %d %s%n", album.singerId, album.albumId, album.albumTitle); + } + executor.shutdown(); + } +} +//[END spanner_async_query_to_list] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadExample.java new file mode 100644 index 000000000000..26fefb0df996 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadExample.java @@ -0,0 +1,111 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_async_read_data] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncReadExample { + + static void asyncRead() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncRead(client); + } + } + + // Execute a query asynchronously and process the results in a callback. + static void asyncRead(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ApiFuture finished; + ExecutorService executor = Executors.newSingleThreadExecutor(); + try (AsyncResultSet resultSet = + client + .singleUse() + .readAsync( + "Albums", + KeySet.all(), // Read all rows in a table. + Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + // Setting a callback will automatically start the iteration over the results of the query + // using the specified executor. The callback will be called at least once. The returned + // ApiFuture is done when the callback has returned DONE and all resources used by the + // AsyncResultSet have been released. + finished = + resultSet.setCallback( + executor, + new ReadyCallback() { + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + // OK: There is a row ready. + case OK: + System.out.printf( + "%d %d %s%n", + resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + break; + + // DONE: There are no more rows in the result set. + case DONE: + return CallbackResponse.DONE; + + // NOT_READY: There are currently no more rows in the buffer. + case NOT_READY: + return CallbackResponse.CONTINUE; + + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + } + }); + } + + // This ApiFuture is done when the callback has returned DONE and all resources of the + // asynchronous result set have been released. + finished.get(30L, TimeUnit.SECONDS); + executor.shutdown(); + } +} +//[END spanner_async_read_data] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadOnlyTransactionExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadOnlyTransactionExample.java new file mode 100644 index 000000000000..dc2560ab7842 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadOnlyTransactionExample.java @@ -0,0 +1,135 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_async_read_only_transaction] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncReadOnlyTransactionExample { + + static void asyncReadOnlyTransaction() + throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncReadOnlyTransaction(client); + } + } + + static void asyncReadOnlyTransaction(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ApiFuture finished1; + ApiFuture finished2; + ExecutorService executor = Executors.newFixedThreadPool(2); + + try (ReadOnlyTransaction transaction = client.readOnlyTransaction()) { + try (AsyncResultSet resultSet = + transaction.executeQueryAsync( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + finished1 = + resultSet.setCallback( + executor, + new ReadyCallback() { + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + System.out.printf( + "%d %d %s%n", + resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + } + }); + } + try (AsyncResultSet resultSet = + transaction.executeQueryAsync( + Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"))) { + finished2 = + resultSet.setCallback( + executor, + new ReadyCallback() { + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + case OK: + System.out.printf( + "%d %s %s%n", + resultSet.getLong(0), + resultSet.getString(1), + resultSet.getString(2)); + break; + case DONE: + return CallbackResponse.DONE; + case NOT_READY: + return CallbackResponse.CONTINUE; + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + } + }); + } + } + + ApiFutures.allAsList(ImmutableList.of(finished1, finished2)).get(60L, TimeUnit.SECONDS); + executor.shutdown(); + } +} +//[END spanner_async_read_only_transaction] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadRowExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadRowExample.java new file mode 100644 index 000000000000..f05f5c12cff0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadRowExample.java @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_async_read_row] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Struct; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncReadRowExample { + + static void asyncReadRow() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncReadRow(client); + } + } + + // Read a single row asynchronously and print out the result when available. + static void asyncReadRow(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ApiFuture row = + client + .singleUse() + .readRowAsync( + "Albums", Key.of(1L, 1L), Arrays.asList("SingerId", "AlbumId", "AlbumTitle")); + // Add a callback that will print out the contents of the row when the result has been returned. + SettableApiFuture printed = SettableApiFuture.create(); + ApiFutures.addCallback( + row, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + System.out.printf("Error reading row: %s%n", t.getMessage()); + printed.set(null); + } + + @Override + public void onSuccess(Struct result) { + System.out.printf( + "%d %d %s%n", result.getLong(0), result.getLong(1), result.getString(2)); + printed.set(null); + } + }, + MoreExecutors.directExecutor()); + + // Wait until the row has been printed. + printed.get(30L, TimeUnit.SECONDS); + } +} +//[END spanner_async_read_row] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadUsingIndexExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadUsingIndexExample.java new file mode 100644 index 000000000000..ffb3b9cb21c5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncReadUsingIndexExample.java @@ -0,0 +1,112 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_async_read_data_with_index] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.AsyncResultSet.ReadyCallback; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncReadUsingIndexExample { + + static void asyncReadUsingIndex() + throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncReadUsingIndex(client); + } + } + + // Execute a query asynchronously and process the results in a callback. + static void asyncReadUsingIndex(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ApiFuture finished; + ExecutorService executor = Executors.newSingleThreadExecutor(); + try (AsyncResultSet resultSet = + client + .singleUse() + .readUsingIndexAsync( + "Albums", + "AlbumsByAlbumTitle", + KeySet.all(), // Read all rows in a table. + Arrays.asList("AlbumId", "AlbumTitle"))) { + // Setting a callback will automatically start the iteration over the results of the query + // using the specified executor. The callback will be called at least once. The returned + // ApiFuture is done when the callback has returned DONE and all resources used by the + // AsyncResultSet have been released. + finished = + resultSet.setCallback( + executor, + new ReadyCallback() { + @Override + public CallbackResponse cursorReady(AsyncResultSet resultSet) { + try { + while (true) { + switch (resultSet.tryNext()) { + // OK: There is a row ready. + case OK: + System.out.printf( + "%d %s%n", resultSet.getLong(0), resultSet.getString(1)); + break; + + // DONE: There are no more rows in the result set. + case DONE: + return CallbackResponse.DONE; + + // NOT_READY: There are currently no more rows in the buffer. + case NOT_READY: + return CallbackResponse.CONTINUE; + + default: + throw new IllegalStateException(); + } + } + } catch (SpannerException e) { + System.out.printf("Error in callback: %s%n", e.getMessage()); + return CallbackResponse.DONE; + } + } + }); + } + + // This ApiFuture is done when the callback has returned DONE and all resources of the + // asynchronous result set have been released. + finished.get(30L, TimeUnit.SECONDS); + executor.shutdown(); + } +} +//[END spanner_async_read_data_with_index] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncRunnerExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncRunnerExample.java new file mode 100644 index 000000000000..afdf8f657275 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncRunnerExample.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_async_read_write_transaction] +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AsyncRunner; +import com.google.cloud.spanner.AsyncRunner.AsyncWork; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TransactionContext; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncRunnerExample { + + static void asyncRunner() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncRunner(client); + } + } + + // Execute a read/write transaction asynchronously. + static void asyncRunner(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + + // Create an async transaction runner. + AsyncRunner runner = client.runAsync(); + // The transaction returns the total number of rows that were updated as a future array of + // longs. + ApiFuture rowCounts = + runner.runAsync( + txn -> { + // Transfer marketing budget from one album to another. We do it in a + // transaction to ensure that the transfer is atomic. + ApiFuture album1BudgetFut = + txn.readRowAsync("Albums", Key.of(1, 1), ImmutableList.of("MarketingBudget")); + ApiFuture album2BudgetFut = + txn.readRowAsync("Albums", Key.of(2, 2), ImmutableList.of("MarketingBudget")); + + try { + // Transaction will only be committed if this condition still holds at the + // time of commit. Otherwise it will be aborted and the AsyncWork will be + // rerun by the client library. + long transfer = 200_000; + if (album2BudgetFut.get().getLong(0) >= transfer) { + long album1Budget = album1BudgetFut.get().getLong(0); + long album2Budget = album2BudgetFut.get().getLong(0); + + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement1 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("AlbumBudget") + .to(album1Budget) + .build(); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("AlbumBudget") + .to(album2Budget) + .build(); + return txn.batchUpdateAsync( + ImmutableList.of(updateStatement1, updateStatement2)); + } else { + return ApiFutures.immediateFuture(new long[] {0L, 0L}); + } + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }, + executor); + + ApiFuture totalUpdateCount = + ApiFutures.transform( + rowCounts, + new ApiFunction() { + @SuppressFBWarnings("UVA_USE_VAR_ARGS") + @Override + public Long apply(long[] input) { + return Arrays.stream(input).sum(); + } + }, + MoreExecutors.directExecutor()); + System.out.printf("%d records updated.%n", totalUpdateCount.get(30L, TimeUnit.SECONDS)); + executor.shutdown(); + } +} +//[END spanner_async_read_write_transaction] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncTransactionManagerExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncTransactionManagerExample.java new file mode 100644 index 000000000000..5d4087a2fab8 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/AsyncTransactionManagerExample.java @@ -0,0 +1,147 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_async_transaction_manager] +import com.google.api.core.ApiFunction; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncTransactionManager; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class AsyncTransactionManagerExample { + + static void asyncTransactionManager() + throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncTransactionManager(client); + } + } + + static void asyncTransactionManager(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + + AsyncTransactionStep, long[]> updateCounts; + try (AsyncTransactionManager mgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = mgr.beginAsync(); + // Loop to retry aborted errors. + while (true) { + try { + updateCounts = + txn.then( + (transaction, v) -> { + // Execute two reads in parallel and return the result of these as the input + // for the next step of the transaction. + ApiFuture album1BudgetFut = + transaction.readRowAsync( + "Albums", Key.of(1, 1), ImmutableList.of("MarketingBudget")); + ApiFuture album2BudgetFut = + transaction.readRowAsync( + "Albums", Key.of(2, 2), ImmutableList.of("MarketingBudget")); + return ApiFutures.allAsList(Arrays.asList(album1BudgetFut, album2BudgetFut)); + }, + executor) + // The input of the next step of the transaction is the return value of the + // previous step, i.e. a list containing the marketing budget of two Albums. + .then( + (transaction, budgets) -> { + long album1Budget = budgets.get(0).getLong(0); + long album2Budget = budgets.get(1).getLong(0); + long transfer = 200_000; + if (album2Budget >= transfer) { + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement1 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("AlbumBudget") + .to(album1Budget) + .build(); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("AlbumBudget") + .to(album2Budget) + .build(); + return transaction.batchUpdateAsync( + ImmutableList.of(updateStatement1, updateStatement2)); + } else { + return ApiFutures.immediateFuture(new long[] {0L, 0L}); + } + }, + executor); + // Commit after the updates. + CommitTimestampFuture commitTsFut = updateCounts.commitAsync(); + // Wait for the transaction to finish and execute a retry if necessary. + commitTsFut.get(); + break; + } catch (AbortedException e) { + txn = mgr.resetForRetryAsync(); + } + } + } + + // Calculate the total update count. + ApiFuture totalUpdateCount = + ApiFutures.transform( + updateCounts, + new ApiFunction() { + @SuppressFBWarnings("UVA_USE_VAR_ARGS") + @Override + public Long apply(long[] input) { + return Arrays.stream(input).sum(); + } + }, + MoreExecutors.directExecutor()); + System.out.printf("%d records updated.%n", totalUpdateCount.get(30L, TimeUnit.SECONDS)); + executor.shutdown(); + } +} +//[END spanner_async_transaction_manager] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchSample.java new file mode 100644 index 000000000000..f87adf62b91a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchSample.java @@ -0,0 +1,121 @@ +/* + * Copyright 2018 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.BatchClient; +import com.google.cloud.spanner.BatchReadOnlyTransaction; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Partition; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** Sample showing how to run a query using the Batch API. */ +public class BatchSample { + + /** + * This example showcases how to create a batch client, partition a query, and concurrently read + * from multiple partitions. + */ + public static void main(String[] args) throws InterruptedException { + if (args.length != 2) { + System.err.println("Usage: BatchSample "); + return; + } + + /* + * CREATE TABLE Singers ( + * SingerId INT64 NOT NULL, + * FirstName STRING(1024), + * LastName STRING(1024), + * SingerInfo BYTES(MAX), + * ) PRIMARY KEY (SingerId); + */ + + String instanceId = args[0]; + String databaseId = args[1]; + + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + + // [START spanner_batch_client] + int numThreads = Runtime.getRuntime().availableProcessors(); + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + + // Statistics + int totalPartitions; + AtomicInteger totalRecords = new AtomicInteger(0); + + try { + BatchClient batchClient = + spanner.getBatchClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId)); + + final BatchReadOnlyTransaction txn = + batchClient.batchReadOnlyTransaction(TimestampBound.strong()); + + // A Partition object is serializable and can be used from a different process. + // DataBoost option is an optional parameter which can be used for partition read + // and query to execute the request via spanner independent compute resources. + + List partitions = + txn.partitionQuery( + PartitionOptions.getDefaultInstance(), + Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"), + // Option to enable data boost for a given request + Options.dataBoostEnabled(true)); + + totalPartitions = partitions.size(); + + for (final Partition p : partitions) { + executor.execute( + () -> { + try (ResultSet results = txn.execute(p)) { + while (results.next()) { + long singerId = results.getLong(0); + String firstName = results.getString(1); + String lastName = results.getString(2); + System.out.println("[" + singerId + "] " + firstName + " " + lastName); + totalRecords.getAndIncrement(); + } + } + }); + } + } finally { + executor.shutdown(); + executor.awaitTermination(1, TimeUnit.HOURS); + spanner.close(); + } + + double avgRecordsPerPartition = 0.0; + if (totalPartitions != 0) { + avgRecordsPerPartition = (double) totalRecords.get() / totalPartitions; + } + System.out.println("totalPartitions=" + totalPartitions); + System.out.println("totalRecords=" + totalRecords); + System.out.println("avgRecordsPerPartition=" + avgRecordsPerPartition); + // [END spanner_batch_client] + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchWriteAtLeastOnceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchWriteAtLeastOnceSample.java new file mode 100644 index 000000000000..bd59562eb285 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/BatchWriteAtLeastOnceSample.java @@ -0,0 +1,135 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_batch_write_at_least_once] + +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.MutationGroup; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import com.google.spanner.v1.BatchWriteResponse; + +public class BatchWriteAtLeastOnceSample { + + /*** + * Assume DDL for the underlying database: + *
    {@code
    +   *   CREATE TABLE Singers (
    +   *     SingerId   INT64 NOT NULL,
    +   *     FirstName  STRING(1024),
    +   *     LastName   STRING(1024),
    +   *   ) PRIMARY KEY (SingerId)
    +   *
    +   *   CREATE TABLE Albums (
    +   *     SingerId     INT64 NOT NULL,
    +   *     AlbumId      INT64 NOT NULL,
    +   *     AlbumTitle   STRING(1024),
    +   *   ) PRIMARY KEY (SingerId, AlbumId),
    +   *   INTERLEAVE IN PARENT Singers ON DELETE CASCADE
    +   * }
    + */ + + private static final MutationGroup MUTATION_GROUP1 = + MutationGroup.of( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(16) + .set("FirstName") + .to("Scarlet") + .set("LastName") + .to("Terry") + .build()); + private static final MutationGroup MUTATION_GROUP2 = + MutationGroup.of( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(17) + .set("FirstName") + .to("Marc") + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(18) + .set("FirstName") + .to("Catalina") + .set("LastName") + .to("Smith") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(17) + .set("AlbumId") + .to(1) + .set("AlbumTitle") + .to("Total Junk") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(18) + .set("AlbumId") + .to(2) + .set("AlbumTitle") + .to("Go, Go, Go") + .build()); + + static void batchWriteAtLeastOnce() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + batchWriteAtLeastOnce(projectId, instanceId, databaseId); + } + + static void batchWriteAtLeastOnce(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + final DatabaseClient dbClient = spanner.getDatabaseClient(dbId); + + // Creates and issues a BatchWrite RPC request that will apply the mutation groups + // non-atomically and respond back with a stream of BatchWriteResponse. + ServerStream responses = + dbClient.batchWriteAtLeastOnce( + ImmutableList.of(MUTATION_GROUP1, MUTATION_GROUP2), + Options.tag("batch-write-tag")); + + // Iterates through the results in the stream response and prints the MutationGroup indexes, + // commit timestamp and status. + for (BatchWriteResponse response : responses) { + if (response.getStatus().getCode() == Code.OK_VALUE) { + System.out.printf( + "Mutation group indexes %s have been applied with commit timestamp %s", + response.getIndexesList(), response.getCommitTimestamp()); + } else { + System.out.printf( + "Mutation group indexes %s could not be applied with error code %s and " + + "error message %s", response.getIndexesList(), + Code.forNumber(response.getStatus().getCode()), response.getStatus().getMessage()); + } + } + } + } +} + +// [END spanner_batch_write_at_least_once] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java new file mode 100644 index 000000000000..10a7c4b26d4a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +/** + * Sample showing how to set exclude transaction from change streams in different write requests. + */ +public class ChangeStreamsTxnExclusionSample { + + static void setExcludeTxnFromChangeStreams() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-instance"; + final String instanceId = "my-project"; + final String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + readWriteTxnExcludedFromChangeStreams(databaseClient); + } + } + + // [START spanner_set_exclude_txn_from_change_streams] + static void readWriteTxnExcludedFromChangeStreams(DatabaseClient client) { + // Exclude the transaction from allowed tracking change streams with alloww_txn_exclusion=true. + // This exclusion will be applied to all the individual operations inside this transaction. + client + .readWriteTransaction(Options.excludeTxnFromChangeStreams()) + .run( + transaction -> { + transaction.executeUpdate( + Statement.of( + "INSERT Singers (SingerId, FirstName, LastName)\n" + + "VALUES (1341, 'Virginia', 'Watson')")); + System.out.println("New singer inserted."); + + transaction.executeUpdate( + Statement.of("UPDATE Singers SET FirstName = 'Hi' WHERE SingerId = 111")); + System.out.println("Singer first name updated."); + + return null; + }); + } + // [END spanner_set_exclude_txn_from_change_streams] + +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupSample.java new file mode 100644 index 000000000000..c6ee70668755 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupSample.java @@ -0,0 +1,97 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_copy_backup] + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +public class CopyBackupSample { + + static void copyBackup() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String sourceBackupId = "my-backup"; + String destinationBackupId = "my-destination-backup"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + copyBackup(databaseAdminClient, projectId, instanceId, sourceBackupId, destinationBackupId); + } + } + + static void copyBackup( + DatabaseAdminClient databaseAdminClient, + String projectId, + String instanceId, + String sourceBackupId, + String destinationBackupId) { + + Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), + TimeUnit.MILLISECONDS)); + + // Initiate the request which returns an OperationFuture. + System.out.println("Copying backup [" + destinationBackupId + "]..."); + Backup destinationBackup; + try { + // Creates a copy of an existing backup. + // Wait for the backup operation to complete. + destinationBackup = databaseAdminClient.copyBackupAsync( + InstanceName.of(projectId, instanceId), destinationBackupId, + BackupName.of(projectId, instanceId, sourceBackupId), expireTime.toProto()).get(); + System.out.println("Copied backup [" + destinationBackup.getName() + "]"); + } catch (ExecutionException e) { + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + // Load the metadata of the new backup from the server. + destinationBackup = databaseAdminClient.getBackup(destinationBackup.getName()); + System.out.println( + String.format( + "Backup %s of size %d bytes was copied at %s for version of database at %s", + destinationBackup.getName(), + destinationBackup.getSizeBytes(), + OffsetDateTime.ofInstant( + Instant.ofEpochSecond(destinationBackup.getCreateTime().getSeconds(), + destinationBackup.getCreateTime().getNanos()), + ZoneId.systemDefault()), + OffsetDateTime.ofInstant( + Instant.ofEpochSecond(destinationBackup.getVersionTime().getSeconds(), + destinationBackup.getVersionTime().getNanos()), + ZoneId.systemDefault()))); + } +} +// [END spanner_copy_backup] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupWithMultiRegionEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupWithMultiRegionEncryptionKey.java new file mode 100644 index 000000000000..4fe60a775e59 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CopyBackupWithMultiRegionEncryptionKey.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_copy_backup_with_MR_CMEK] + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType; +import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +public class CopyBackupWithMultiRegionEncryptionKey { + + static void copyBackupWithMultiRegionEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String sourceBackupId = "my-backup"; + String destinationBackupId = "my-destination-backup"; + String[] kmsKeyNames = + new String[] { + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/" + }; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + copyBackupWithMultiRegionEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + sourceBackupId, + destinationBackupId, + kmsKeyNames); + } + } + + static void copyBackupWithMultiRegionEncryptionKey( + DatabaseAdminClient databaseAdminClient, + String projectId, + String instanceId, + String sourceBackupId, + String destinationBackupId, + String[] kmsKeyNames) { + + Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), TimeUnit.MILLISECONDS)); + + // Initiate the request which returns an OperationFuture. + System.out.println("Copying backup [" + destinationBackupId + "]..."); + CopyBackupRequest request = + CopyBackupRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setBackupId(destinationBackupId) + .setSourceBackup(BackupName.of(projectId, instanceId, sourceBackupId).toString()) + .setExpireTime(expireTime.toProto()) + .setEncryptionConfig( + CopyBackupEncryptionConfig.newBuilder() + .setEncryptionType(EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .addAllKmsKeyNames(ImmutableList.copyOf(kmsKeyNames)) + .build()) + .build(); + Backup destinationBackup; + try { + // Creates a copy of an existing backup. + // Wait for the backup operation to complete. + destinationBackup = databaseAdminClient.copyBackupAsync(request).get(); + System.out.println("Copied backup [" + destinationBackup.getName() + "]"); + } catch (ExecutionException e) { + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + // Load the metadata of the new backup from the server. + destinationBackup = databaseAdminClient.getBackup(destinationBackup.getName()); + System.out.println( + String.format( + "Backup %s of size %d bytes was copied at %s for version of database at %s", + destinationBackup.getName(), + destinationBackup.getSizeBytes(), + OffsetDateTime.ofInstant( + Instant.ofEpochSecond( + destinationBackup.getCreateTime().getSeconds(), + destinationBackup.getCreateTime().getNanos()), + ZoneId.systemDefault()), + OffsetDateTime.ofInstant( + Instant.ofEpochSecond( + destinationBackup.getVersionTime().getSeconds(), + destinationBackup.getVersionTime().getNanos()), + ZoneId.systemDefault()))); + } +} +// [END spanner_copy_backup_with_MR_CMEK] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithEncryptionKey.java new file mode 100644 index 000000000000..e2c7b1706173 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithEncryptionKey.java @@ -0,0 +1,112 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_backup_with_encryption_key] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.threeten.bp.LocalDateTime; +import org.threeten.bp.OffsetDateTime; + +public class CreateBackupWithEncryptionKey { + + static void createBackupWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + createBackupWithEncryptionKey( + adminClient, + projectId, + instanceId, + databaseId, + backupId, + kmsKeyName); + } + } + + static Void createBackupWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String databaseId, String backupId, String kmsKeyName) { + // Set expire time to 14 days from now. + final Timestamp expireTime = + Timestamp.newBuilder().setSeconds(TimeUnit.MILLISECONDS.toSeconds(( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14)))).build(); + final BackupName backupName = BackupName.of(projectId, instanceId, backupId); + Backup backup = Backup.newBuilder() + .setName(backupName.toString()) + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setExpireTime(expireTime).build(); + + final CreateBackupRequest request = + CreateBackupRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setBackupId(backupId) + .setBackup(backup) + .setEncryptionConfig( + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .setKmsKeyName(kmsKeyName).build()).build(); + try { + System.out.println("Waiting for operation to complete..."); + backup = adminClient.createBackupAsync(request).get(1200, TimeUnit.SECONDS); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + System.out.printf( + "Backup %s of size %d bytes was created at %s using encryption key %s%n", + backup.getName(), + backup.getSizeBytes(), + LocalDateTime.ofEpochSecond( + backup.getCreateTime().getSeconds(), + backup.getCreateTime().getNanos(), + OffsetDateTime.now().getOffset()), + kmsKeyName + ); + + return null; + } +} +// [END spanner_create_backup_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithMultiRegionEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithMultiRegionEncryptionKey.java new file mode 100644 index 000000000000..eb8f669dedae --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateBackupWithMultiRegionEncryptionKey.java @@ -0,0 +1,123 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_backup_with_MR_CMEK] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType; +import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.threeten.bp.LocalDateTime; +import org.threeten.bp.OffsetDateTime; + +public class CreateBackupWithMultiRegionEncryptionKey { + + static void createBackupWithMultiRegionEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String[] kmsKeyNames = + new String[] { + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/" + }; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + createBackupWithMultiRegionEncryptionKey( + adminClient, projectId, instanceId, databaseId, backupId, kmsKeyNames); + } + } + + static Void createBackupWithMultiRegionEncryptionKey( + DatabaseAdminClient adminClient, + String projectId, + String instanceId, + String databaseId, + String backupId, + String[] kmsKeyNames) { + // Set expire time to 14 days from now. + final Timestamp expireTime = + Timestamp.newBuilder() + .setSeconds( + TimeUnit.MILLISECONDS.toSeconds( + (System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14)))) + .build(); + final BackupName backupName = BackupName.of(projectId, instanceId, backupId); + Backup backup = + Backup.newBuilder() + .setName(backupName.toString()) + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setExpireTime(expireTime) + .build(); + + final CreateBackupRequest request = + CreateBackupRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setBackupId(backupId) + .setBackup(backup) + .setEncryptionConfig( + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(EncryptionType.CUSTOMER_MANAGED_ENCRYPTION) + .addAllKmsKeyNames(ImmutableList.copyOf(kmsKeyNames)) + .build()) + .build(); + try { + System.out.println("Waiting for operation to complete..."); + backup = adminClient.createBackupAsync(request).get(1200, TimeUnit.SECONDS); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + System.out.printf( + "Backup %s of size %d bytes was created at %s using encryption keys %s%n", + backup.getName(), + backup.getSizeBytes(), + LocalDateTime.ofEpochSecond( + backup.getCreateTime().getSeconds(), + backup.getCreateTime().getNanos(), + OffsetDateTime.now().getOffset()), + ImmutableList.copyOf(kmsKeyNames)); + + return null; + } +} +// [END spanner_create_backup_with_MR_CMEK] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java new file mode 100644 index 000000000000..33917685cd98 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_create_database_with_default_leader] + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class CreateDatabaseWithDefaultLeaderSample { + + static void createDatabaseWithDefaultLeader() throws IOException { + // TODO(developer): Replace these variables before running the sample. + final String instanceName = "projects/my-project/instances/my-instance-id"; + final String databaseId = "my-database-name"; + final String defaultLeader = "my-default-leader"; + createDatabaseWithDefaultLeader(instanceName, databaseId, defaultLeader); + } + + static void createDatabaseWithDefaultLeader(String instanceName, String databaseId, + String defaultLeader) throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + Database createdDatabase = + databaseAdminClient.createDatabaseAsync( + CreateDatabaseRequest.newBuilder() + .setParent(instanceName) + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .addAllExtraStatements( + ImmutableList.of("CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE", + "ALTER DATABASE " + "`" + databaseId + "`" + + " SET OPTIONS ( default_leader = '" + defaultLeader + "' )")) + .build()).get(); + System.out.println("Created database [" + createdDatabase.getName() + "]"); + System.out.println("\tDefault leader: " + createdDatabase.getDefaultLeader()); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +//[END spanner_create_database_with_default_leader] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java new file mode 100644 index 000000000000..c06e9c3eba3a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java @@ -0,0 +1,102 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_database_with_encryption_key] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateDatabaseWithEncryptionKey { + + static void createDatabaseWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + createDatabaseWithEncryptionKey( + adminClient, + projectId, + instanceId, + databaseId, + kmsKeyName); + } + } + + static void createDatabaseWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String databaseId, String kmsKeyName) { + InstanceName instanceName = InstanceName.of(projectId, instanceId); + CreateDatabaseRequest request = CreateDatabaseRequest.newBuilder() + .setParent(instanceName.toString()) + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setEncryptionConfig(EncryptionConfig.newBuilder().setKmsKeyName(kmsKeyName).build()) + .addAllExtraStatements( + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE" + )) + .build(); + try { + System.out.println("Waiting for operation to complete..."); + Database createdDatabase = + adminClient.createDatabaseAsync(request).get(120, TimeUnit.SECONDS); + + System.out.printf( + "Database %s created with encryption key %s%n", + createdDatabase.getName(), + createdDatabase.getEncryptionConfig().getKmsKeyName() + ); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_create_database_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithMultiRegionEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithMultiRegionEncryptionKey.java new file mode 100644 index 000000000000..962b5b1d5d00 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithMultiRegionEncryptionKey.java @@ -0,0 +1,106 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_database_with_MR_CMEK] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.EncryptionConfig; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateDatabaseWithMultiRegionEncryptionKey { + + static void createDatabaseWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String[] kmsKeyNames = + new String[] { + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/" + }; + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + createDatabaseWithMultiRegionEncryptionKey( + adminClient, projectId, instanceId, databaseId, kmsKeyNames); + } + } + + static void createDatabaseWithMultiRegionEncryptionKey( + DatabaseAdminClient adminClient, + String projectId, + String instanceId, + String databaseId, + String[] kmsKeyNames) { + InstanceName instanceName = InstanceName.of(projectId, instanceId); + CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent(instanceName.toString()) + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setEncryptionConfig( + EncryptionConfig.newBuilder() + .addAllKmsKeyNames(ImmutableList.copyOf(kmsKeyNames)) + .build()) + .addAllExtraStatements( + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .build(); + try { + System.out.println("Waiting for operation to complete..."); + Database createdDatabase = + adminClient.createDatabaseAsync(request).get(120, TimeUnit.SECONDS); + + System.out.printf( + "Database %s created with encryption keys %s%n", + createdDatabase.getName(), createdDatabase.getEncryptionConfig().getKmsKeyNamesList()); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_create_database_with_MR_CMEK] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java new file mode 100644 index 000000000000..888fe6258893 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java @@ -0,0 +1,72 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_database_with_version_retention_period] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.Lists; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.ExecutionException; + +public class CreateDatabaseWithVersionRetentionPeriodSample { + + static void createDatabaseWithVersionRetentionPeriod() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String versionRetentionPeriod = "7d"; + + createDatabaseWithVersionRetentionPeriod(projectId, instanceId, databaseId, + versionRetentionPeriod); + } + + static void createDatabaseWithVersionRetentionPeriod(String projectId, + String instanceId, String databaseId, String versionRetentionPeriod) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .addAllExtraStatements(Lists.newArrayList("ALTER DATABASE " + "`" + databaseId + "`" + + " SET OPTIONS ( version_retention_period = '" + versionRetentionPeriod + "' )")) + .build(); + Database database = + databaseAdminClient.createDatabaseAsync(request).get(); + System.out.println("Created database [" + database.getName() + "]"); + System.out.println("\tVersion retention period: " + database.getVersionRetentionPeriod()); + System.out.println("\tEarliest version time: " + database.getEarliestVersionTime()); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_create_database_with_version_retention_period] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java new file mode 100644 index 000000000000..b3836092692e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_full_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.FullBackupSpec; +import java.io.IOException; + +class CreateFullBackupScheduleSample { + + static void createFullBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + createFullBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void createFullBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setFullBackupSpec(FullBackupSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 24).build()) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("30 12 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + final BackupSchedule createdBackupSchedule = + databaseAdminClient.createBackupSchedule( + CreateBackupScheduleRequest.newBuilder() + .setParent(databaseName.toString()) + .setBackupScheduleId(backupScheduleId) + .setBackupSchedule(backupSchedule) + .build()); + System.out.println( + String.format( + "Created backup schedule: %s\n%s", + createdBackupSchedule.getName(), createdBackupSchedule.toString())); + } + } +} +// [END spanner_create_full_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java new file mode 100644 index 000000000000..f73ebd30f237 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java @@ -0,0 +1,78 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_incremental_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.IncrementalBackupSpec; +import java.io.IOException; + +class CreateIncrementalBackupScheduleSample { + + static void createIncrementalBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + createIncrementalBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void createIncrementalBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setIncrementalBackupSpec(IncrementalBackupSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 24).build()) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("30 12 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + final BackupSchedule createdBackupSchedule = + databaseAdminClient.createBackupSchedule( + CreateBackupScheduleRequest.newBuilder() + .setParent(databaseName.toString()) + .setBackupScheduleId(backupScheduleId) + .setBackupSchedule(backupSchedule) + .build()); + System.out.println( + String.format( + "Created incremental backup schedule: %s\n%s", + createdBackupSchedule.getName(), createdBackupSchedule.toString())); + } + } +} +// [END spanner_create_incremental_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java new file mode 100644 index 000000000000..426d7c0484f7 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java @@ -0,0 +1,94 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaInfo; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +class CreateInstanceConfigSample { + + static void createInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String baseInstanceConfigId = "nam11"; + String instanceConfigId = "custom-instance-config4"; + + createInstanceConfig(projectId, baseInstanceConfigId, instanceConfigId); + } + + static void createInstanceConfig( + String projectId, String baseInstanceConfigId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final InstanceConfigName baseInstanceConfigName = InstanceConfigName.of(projectId, + baseInstanceConfigId); + final InstanceConfig baseConfig = + instanceAdminClient.getInstanceConfig(baseInstanceConfigName.toString()); + final InstanceConfigName instanceConfigName = InstanceConfigName.of(projectId, + instanceConfigId); + /** + * The replicas for the custom instance configuration must include all the replicas of the + * base configuration, in addition to at least one from the list of optional replicas of the + * base configuration. + */ + final List replicas = + Stream.concat(baseConfig.getReplicasList().stream(), + baseConfig.getOptionalReplicasList().stream().limit(1)).collect(Collectors.toList()); + final InstanceConfig instanceConfig = + InstanceConfig.newBuilder().setName(instanceConfigName.toString()) + .setBaseConfig(baseInstanceConfigName.toString()) + .setDisplayName("Instance Configuration").addAllReplicas(replicas).build(); + final CreateInstanceConfigRequest createInstanceConfigRequest = + CreateInstanceConfigRequest.newBuilder().setParent(ProjectName.of(projectId).toString()) + .setInstanceConfigId(instanceConfigId).setInstanceConfig(instanceConfig).build(); + try { + System.out.printf("Waiting for create operation for %s to complete...\n", + instanceConfigName); + InstanceConfig instanceConfigResult = + instanceAdminClient.createInstanceConfigAsync( + createInstanceConfigRequest).get(5, TimeUnit.MINUTES); + System.out.printf("Created instance configuration %s\n", instanceConfigResult.getName()); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: Creating instance configuration %s failed with error message %s\n", + instanceConfig.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for createInstanceConfig operation to finish was interrupted"); + } + } + } +} +// [END spanner_create_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java new file mode 100644 index 000000000000..c5efd3956fa5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java @@ -0,0 +1,77 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; +import java.util.concurrent.ExecutionException; + +class CreateInstanceExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + // Set Instance configuration. + int nodeCount = 2; + String displayName = "Descriptive name"; + + // Create an Instance object that will be used to create the instance. + Instance instance = + Instance.newBuilder() + .setDisplayName(displayName) + .setEdition(Instance.Edition.STANDARD) + .setNodeCount(nodeCount) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the createInstance operation to finish. + Instance createdInstance = + instanceAdminClient + .createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()) + .get(); + System.out.printf("Instance %s was successfully created%n", createdInstance.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } + } +} +// [END spanner_create_instance] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java new file mode 100644 index 000000000000..0e547bdaf7ef --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_partition] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstancePartition; +import java.util.concurrent.ExecutionException; + +class CreateInstancePartitionSample { + + static void createInstancePartition() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String instancePartitionId = "my-instance-partition"; + createInstancePartition(projectId, instanceId, instancePartitionId); + } + + static void createInstancePartition( + String projectId, String instanceId, String instancePartitionId) { + // Set instance partition configuration. + int nodeCount = 1; + String displayName = "Descriptive name"; + + // Create an InstancePartition object that will be used to create the instance partition. + InstancePartition instancePartition = + InstancePartition.newBuilder() + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .setConfig(InstanceConfigName.of(projectId, "nam3").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the createInstancePartition operation to finish. + InstancePartition createdInstancePartition = + instanceAdminClient + .createInstancePartitionAsync( + CreateInstancePartitionRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setInstancePartitionId(instancePartitionId) + .setInstancePartition(instancePartition) + .build()) + .get(); + System.out.printf( + "Instance partition %s was successfully created%n", createdInstancePartition.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance partition %s failed with error message %s%n", + instancePartition.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for createInstancePartition operation to finish was interrupted"); + } + } +} +// [END spanner_create_instance_partition] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigExample.java new file mode 100644 index 000000000000..b4c4f8736e28 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigExample.java @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_with_asymmetric_autoscaling_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaSelection; +import java.util.concurrent.ExecutionException; + +class CreateInstanceWithAsymmetricAutoscalingConfigExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + // Set Instance configuration. + String configId = "nam-eur-asia3"; + String displayName = "Descriptive name"; + + // Create an autoscaling config. + // When autoscaling_config is enabled, node_count and processing_units fields + // need not be specified. + // The read-only replicas listed in the asymmetric autoscaling options scale independently + // from other replicas. + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(1).setMaxNodes(2)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .addAsymmetricAutoscalingOptions( + AutoscalingConfig.AsymmetricAutoscalingOption.newBuilder() + .setReplicaSelection(ReplicaSelection.newBuilder().setLocation("europe-west1"))) + .addAsymmetricAutoscalingOptions( + AutoscalingConfig.AsymmetricAutoscalingOption.newBuilder() + .setReplicaSelection(ReplicaSelection.newBuilder().setLocation("europe-west4"))) + .addAsymmetricAutoscalingOptions( + AutoscalingConfig.AsymmetricAutoscalingOption.newBuilder() + .setReplicaSelection(ReplicaSelection.newBuilder().setLocation("asia-east1"))) + .build(); + Instance instance = + Instance.newBuilder() + .setAutoscalingConfig(autoscalingConfig) + .setDisplayName(displayName) + .setConfig( + InstanceConfigName.of(projectId, configId).toString()) + .build(); + + // Creates a new instance + System.out.printf("Creating instance %s.%n", instanceId); + try { + // Wait for the createInstance operation to finish. + Instance instanceResult = instanceAdminClient.createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()).get(); + System.out.printf("Asymmetric Autoscaling instance %s was successfully created%n", + instanceResult.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } + } + } +} +// [END spanner_create_instance_with_asymmetric_autoscaling_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java new file mode 100644 index 000000000000..4d0793820afd --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java @@ -0,0 +1,95 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_with_autoscaling_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.Instance.Edition; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; +import java.util.concurrent.ExecutionException; + +class CreateInstanceWithAutoscalingConfigExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + // Set Instance configuration. + String configId = "regional-us-east4"; + String displayName = "Descriptive name"; + + // Create an autoscaling config. + // When autoscaling_config is enabled, node_count and processing_units fields + // need not be specified. + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(1).setMaxNodes(2)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + Instance instance = + Instance.newBuilder() + .setAutoscalingConfig(autoscalingConfig) + .setDisplayName(displayName) + .setConfig( + InstanceConfigName.of(projectId, configId).toString()) + .setEdition(Edition.ENTERPRISE) + .build(); + + // Creates a new instance + System.out.printf("Creating instance %s.%n", instanceId); + try { + // Wait for the createInstance operation to finish. + Instance instanceResult = instanceAdminClient.createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()).get(); + System.out.printf("Autoscaler instance %s was successfully created%n", + instanceResult.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } + } + } +} +// [END spanner_create_instance_with_autoscaling_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java new file mode 100644 index 000000000000..51133194744a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_create_instance_with_processing_units] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; + +class CreateInstanceWithProcessingUnitsExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Set Instance configuration. + String configId = "regional-us-east4"; + // This will create an instance with the processing power of 0.2 nodes. + int processingUnits = 500; + String displayName = "Descriptive name"; + + try { + // Creates a new instance + System.out.printf("Creating instance %s.%n", instanceId); + Instance instance = + Instance.newBuilder() + .setDisplayName(displayName) + .setProcessingUnits(processingUnits) + .setConfig( + InstanceConfigName.of(projectId, configId).toString()) + .build(); + // Wait for the createInstance operation to finish. + System.out.printf("Waiting for operation on %s to complete...%n", instanceId); + Instance createdInstance = instanceAdminClient.createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()).get(); + + System.out.printf("Created instance %s.%n", createdInstance.getName()); + System.out.printf("Instance %s has %d processing units.%n", createdInstance.getName(), + createdInstance.getProcessingUnits()); + } catch (Exception e) { + System.out.printf("Error: %s.%n", e.getMessage()); + } + } + } +} +//[END spanner_create_instance_with_processing_units] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithoutDefaultBackupSchedulesExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithoutDefaultBackupSchedulesExample.java new file mode 100644 index 000000000000..ed370949699b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithoutDefaultBackupSchedulesExample.java @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_without_default_backup_schedule] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstanceRequest; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.ProjectName; +import java.util.concurrent.ExecutionException; + +class CreateInstanceWithoutDefaultBackupSchedulesExample { + + static void createInstanceWithoutDefaultBackupSchedules() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstanceWithoutDefaultBackupSchedules(projectId, instanceId); + } + + static void createInstanceWithoutDefaultBackupSchedules(String projectId, String instanceId) { + // Set Instance configuration. + int nodeCount = 2; + String displayName = "Descriptive name"; + + // Create an Instance object that will be used to create the instance. + Instance instance = + Instance.newBuilder() + .setDisplayName(displayName) + .setDefaultBackupScheduleType(Instance.DefaultBackupScheduleType.NONE) + .setNodeCount(nodeCount) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the createInstance operation to finish. + Instance createdInstance = + instanceAdminClient + .createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()) + .get(); + System.out.printf("Instance %s was successfully created%n", createdInstance.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } + } +} +// [END spanner_create_instance_without_default_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java new file mode 100644 index 000000000000..757921080d29 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java @@ -0,0 +1,99 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_sequence] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateSequenceSample { + + static void createSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + createSequence(projectId, instanceId, databaseId); + } + + static void createSequence(String projectId, String instanceId, String databaseId) { + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "CREATE SEQUENCE Seq OPTIONS (sequence_kind = 'bit_reversed_positive')", + "CREATE TABLE Customers (CustomerId INT64 DEFAULT " + + "(GET_NEXT_SEQUENCE_VALUE(SEQUENCE Seq)), CustomerName STRING(1024)) " + + "PRIMARY KEY (CustomerId)")) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Created Seq sequence and Customers table, where the key column CustomerId " + + "uses the sequence as a default value"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Alice'), ('David'), ('Marc') THEN RETURN CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_create_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java new file mode 100644 index 000000000000..c9484916a4ed --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_table_with_foreign_key_delete_cascade] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; + +class CreateTableWithForeignKeyDeleteCascadeSample { + + static void createForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + createForeignKeyDeleteCascadeConstraint(projectId, instanceId, databaseId); + } + + static void createForeignKeyDeleteCascadeConstraint( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerId FOREIGN KEY (CustomerId)\n" + + " REFERENCES Customers (CustomerId) ON DELETE CASCADE\n" + + " ) PRIMARY KEY (CartId)\n")); + + System.out.printf( + String.format( + "Created Customers and ShoppingCarts table with FKShoppingCartsCustomerId\n" + + "foreign key constraint on database %s on instance %s\n", + databaseId, instanceId)); + } + } +} +// [END spanner_create_table_with_foreign_key_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java new file mode 100644 index 000000000000..27051e299c98 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_set_custom_timeout_and_retry] + +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import org.threeten.bp.Duration; + +class CustomTimeoutAndRetrySettingsExample { + + static void executeSqlWithCustomTimeoutAndRetrySettings() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + executeSqlWithCustomTimeoutAndRetrySettings(projectId, instanceId, databaseId); + } + + // Create a Spanner client with custom ExecuteSql timeout and retry settings. + static void executeSqlWithCustomTimeoutAndRetrySettings( + String projectId, String instanceId, String databaseId) { + SpannerOptions.Builder builder = SpannerOptions.newBuilder().setProjectId(projectId); + // Set custom timeout and retry settings for the ExecuteSql RPC. + // This must be done in a separate chain as the setRetryableCodes and setRetrySettings methods + // return a UnaryCallSettings.Builder instead of a SpannerOptions.Builder. + builder + .getSpannerStubSettingsBuilder() + .executeSqlSettings() + // Configure which errors should be retried. + .setRetryableCodes(Code.UNAVAILABLE) + .setRetrySettings( + RetrySettings.newBuilder() + // Configure retry delay settings. + // The initial amount of time to wait before retrying the request. + .setInitialRetryDelay(Duration.ofMillis(500)) + // The maximum amount of time to wait before retrying. I.e. after this value is + // reached, the wait time will not increase further by the multiplier. + .setMaxRetryDelay(Duration.ofSeconds(16)) + // The previous wait time is multiplied by this multiplier to come up with the next + // wait time, until the max is reached. + .setRetryDelayMultiplier(1.5) + + // Configure RPC and total timeout settings. + // Timeout for the first RPC call. Subsequent retries will be based off this value. + .setInitialRpcTimeout(Duration.ofSeconds(60)) + // The max for the per RPC timeout. + .setMaxRpcTimeout(Duration.ofSeconds(60)) + // Controls the change of timeout for each retry. + .setRpcTimeoutMultiplier(1.0) + // The timeout for all calls (first call + all retries). + .setTotalTimeout(Duration.ofSeconds(60)) + .build()); + // Create a Spanner client using the custom retry and timeout settings. + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName)\n" + + "VALUES (20, 'George', 'Washington')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.%n", rowCount); + return null; + }); + } + } +} +// [END spanner_set_custom_timeout_and_retry] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DatabaseAddSplitPointsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DatabaseAddSplitPointsSample.java new file mode 100644 index 000000000000..390ac6c3b215 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DatabaseAddSplitPointsSample.java @@ -0,0 +1,121 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_database_add_split_points] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.SplitPoints; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class DatabaseAddSplitPointsSample { + + /*** + * Assume DDL for the underlying database: + *
    {@code
    +   * CREATE TABLE Singers (
    +   * SingerId INT64 NOT NULL,
    +   * FirstName STRING(1024),
    +   * LastName STRING(1024),
    +   *  SingerInfo BYTES(MAX),
    +   * ) PRIMARY KEY(SingerId);
    +   *
    +   *
    +   * CREATE INDEX SingersByFirstLastName ON Singers(FirstName, LastName);
    +   * }
    + */ + + static void addSplitPoints() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + addSplitPoints(projectId, instanceId, databaseId); + } + + static void addSplitPoints(String projectId, String instanceId, String databaseId) + throws IOException { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + List splitPoints = new ArrayList<>(); + + // table key + com.google.spanner.admin.database.v1.SplitPoints splitPointForTable = + SplitPoints.newBuilder() + .setTable("Singers") + .addKeys( + com.google.spanner.admin.database.v1.SplitPoints.Key.newBuilder() + .setKeyParts( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("42").build()) + .build())) + .build(); + + // index key without table key part + com.google.spanner.admin.database.v1.SplitPoints splitPointForIndex = + SplitPoints.newBuilder() + .setIndex("SingersByFirstLastName") + .addKeys( + com.google.spanner.admin.database.v1.SplitPoints.Key.newBuilder() + .setKeyParts( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("John").build()) + .addValues(Value.newBuilder().setStringValue("Doe").build()) + .build())) + .build(); + + // index key with table key part, first key is the index key and second is the table key + com.google.spanner.admin.database.v1.SplitPoints splitPointForIndexWitTableKey = + SplitPoints.newBuilder() + .setIndex("SingersByFirstLastName") + .addKeys( + com.google.spanner.admin.database.v1.SplitPoints.Key.newBuilder() + .setKeyParts( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("Jane").build()) + .addValues(Value.newBuilder().setStringValue("Doe").build()) + .build())) + .addKeys( + com.google.spanner.admin.database.v1.SplitPoints.Key.newBuilder() + .setKeyParts( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("38").build()) + .build())) + .build(); + + splitPoints.add(splitPointForTable); + splitPoints.add(splitPointForIndex); + splitPoints.add(splitPointForIndexWitTableKey); + databaseAdminClient.addSplitPoints( + DatabaseName.of(projectId, instanceId, databaseId), splitPoints); + + } catch (Exception e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } + } +} +// [END spanner_database_add_split_points] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java new file mode 100644 index 000000000000..e87a1fcb66e5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_delete_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import java.io.IOException; + +class DeleteBackupScheduleSample { + + static void deleteBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + deleteBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void deleteBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + databaseAdminClient.deleteBackupSchedule( + DeleteBackupScheduleRequest.newBuilder().setName(backupScheduleName.toString()).build()); + System.out.println( + String.format("Deleted backup schedule: %s", backupScheduleName.toString())); + } + } +} +// [END spanner_delete_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java new file mode 100644 index 000000000000..c2da7b300008 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_delete_instance_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.DeleteInstanceConfigRequest; +import com.google.spanner.admin.instance.v1.InstanceConfigName; + +class DeleteInstanceConfigSample { + + static void deleteInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceConfigId = "custom-user-config"; + deleteInstanceConfig(projectId, instanceConfigId); + } + + static void deleteInstanceConfig(String projectId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final InstanceConfigName instanceConfigName = InstanceConfigName.of(projectId, + instanceConfigId); + final DeleteInstanceConfigRequest request = + DeleteInstanceConfigRequest.newBuilder().setName(instanceConfigName.toString()).build(); + + try { + System.out.printf("Deleting %s...\n", instanceConfigName); + instanceAdminClient.deleteInstanceConfig(request); + System.out.printf("Deleted instance configuration %s\n", instanceConfigName); + } catch (SpannerException e) { + System.out.printf( + "Error: Deleting instance configuration %s failed with error message: %s\n", + instanceConfigName, e.getMessage()); + } + } + } +} +// [END spanner_delete_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java new file mode 100644 index 000000000000..c7319677980b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_delete_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class DeleteUsingDmlReturningSample { + + static void deleteUsingDmlReturningSample() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + deleteUsingDmlReturningSample(projectId, instanceId, databaseId); + } + + static void deleteUsingDmlReturningSample( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Delete records from SINGERS table satisfying a + // particular condition and returns the SingerId + // and FullName column of the deleted records using + // ‘THEN RETURN SingerId, FullName’. + // It is also possible to return all columns of all the + // deleted records by using ‘THEN RETURN *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "DELETE FROM Singers WHERE FirstName = 'Alice' THEN RETURN SingerId, FullName"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + System.out.printf( + "Deleted row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_delete_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java new file mode 100644 index 000000000000..141d9e282445 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java @@ -0,0 +1,114 @@ +/* + * Copyright 2023 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_directed_read] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; + +public class DirectedReadSample { + static void directedRead() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + directedRead(projectId, instanceId, databaseId); + } + + static void directedRead(String projectId, String instanceId, String databaseId) { + // Only one of excludeReplicas or includeReplicas can be set + // Each accepts a list of replicaSelections which contains location and type + // * `location` - The location must be one of the regions within the + // multi-region configuration of your database. + // * `type` - The type of the replica + // Some examples of using replicaSelectors are: + // * `location:us-east1` --> The "us-east1" replica(s) of any available type + // will be used to process the request. + // * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest + // . available location will be used to process the + // request. + // * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s) + // in location "us-east1" will be used to process + // the request. + // includeReplicas also contains an option called autoFailoverDisabled, which when set to true + // will instruct Spanner to not route requests to a replica outside the + // includeReplicas list when all the specified replicas are unavailable + // or unhealthy. Default value is `false`. + final DirectedReadOptions directedReadOptionsForClient = + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("us-east4").build()) + .build()) + .build(); + + // You can set default `DirectedReadOptions` for a Spanner client. These options will be applied + // to all read-only transactions that are executed by this client, unless specific + // DirectedReadOptions are set for a query. + // Directed read can only be used for read-only transactions. The default options will be + // ignored for any read/write transaction that the client executes. + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .setDirectedReadOptions(directedReadOptionsForClient) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + // DirectedReadOptions at request level will override the options set at + // client level (through SpannerOptions). + final DirectedReadOptions directedReadOptionsForRequest = + DirectedReadOptions.newBuilder() + .setIncludeReplicas( + IncludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder() + .setType(ReplicaSelection.Type.READ_WRITE) + .build()) + .setAutoFailoverDisabled(true) + .build()) + .build(); + + // Read rows while passing DirectedReadOptions directly to the query. + try (ResultSet rs = + dbClient + .singleUse() + .executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"), + Options.directedRead(directedReadOptionsForRequest))) { + while (rs.next()) { + System.out.printf( + "SingerId: %d, AlbumId: %d, AlbumTitle: %s\n", + rs.getLong(0), rs.getLong(1), rs.getString(2)); + } + System.out.println("Successfully executed read-only transaction with directedReadOptions"); + } + } + } +} +// [END spanner_directed_read] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java new file mode 100644 index 000000000000..7c35b9f62186 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_drop_foreign_key_constraint_delete_cascade] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; + +class DropForeignKeyConstraintDeleteCascadeSample { + + static void deleteForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + deleteForeignKeyDeleteCascadeConstraint(projectId, instanceId, databaseId); + } + + static void deleteForeignKeyDeleteCascadeConstraint( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "ALTER TABLE ShoppingCarts\n" + + " DROP CONSTRAINT FKShoppingCartsCustomerName\n")); + + System.out.printf( + String.format( + "Altered ShoppingCarts table to drop FKShoppingCartsCustomerName\n" + + "foreign key constraint on database %s on instance %s\n", + databaseId, instanceId)); + } + } +} +// [END spanner_drop_foreign_key_constraint_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java new file mode 100644 index 000000000000..9f1b32caed91 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_drop_sequence] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DropSequenceSample { + + static void dropSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + dropSequence(projectId, instanceId, databaseId); + } + + static void dropSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient + .updateDatabaseDdlAsync(DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT", + "DROP SEQUENCE Seq")) + .get(5, TimeUnit.MINUTES); + System.out.println( + "Altered Customers table to drop DEFAULT from CustomerId column " + + "and dropped the Seq sequence"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_drop_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java new file mode 100644 index 000000000000..e4e35bd95aad --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java @@ -0,0 +1,110 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_enable_fine_grained_access] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.iam.v1.Binding; +import com.google.iam.v1.GetIamPolicyRequest; +import com.google.iam.v1.GetPolicyOptions; +import com.google.iam.v1.Policy; +import com.google.iam.v1.SetIamPolicyRequest; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.type.Expr; + +public class EnableFineGrainedAccess { + + static void enableFineGrainedAccess() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String iamMember = "user:alice@example.com"; + String role = "my-role"; + String title = "my-condition-title"; + enableFineGrainedAccess(projectId, instanceId, databaseId, iamMember, title, role); + } + + static void enableFineGrainedAccess( + String projectId, + String instanceId, + String databaseId, + String iamMember, + String title, + String role) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + final GetPolicyOptions options = + GetPolicyOptions.newBuilder().setRequestedPolicyVersion(3).build(); + final GetIamPolicyRequest getRequest = + GetIamPolicyRequest.newBuilder() + .setResource(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setOptions(options).build(); + final Policy policy = databaseAdminClient.getIamPolicy(getRequest); + int policyVersion = policy.getVersion(); + // The policy in the response from getDatabaseIAMPolicy might use the policy version + // that you specified, or it might use a lower policy version. For example, if you + // specify version 3, but the policy has no conditional role bindings, the response + // uses version 1. Valid values are 0, 1, and 3. + if (policy.getVersion() < 3) { + // conditional role bindings work with policy version 3 + policyVersion = 3; + } + + Binding binding1 = + Binding.newBuilder() + .setRole("roles/spanner.fineGrainedAccessUser") + .addAllMembers(ImmutableList.of(iamMember)) + .build(); + + Binding binding2 = + Binding.newBuilder() + .setRole("roles/spanner.databaseRoleUser") + .setCondition( + Expr.newBuilder().setDescription(title).setExpression( + String.format("resource.name.endsWith(\"/databaseRoles/%s\")", role) + ).setTitle(title).build()) + .addAllMembers(ImmutableList.of(iamMember)) + .build(); + ImmutableList bindings = + ImmutableList.builder() + .addAll(policy.getBindingsList()) + .add(binding1) + .add(binding2) + .build(); + Policy policyWithConditions = + Policy.newBuilder() + .setVersion(policyVersion) + .setEtag(policy.getEtag()) + .addAllBindings(bindings) + .build(); + final SetIamPolicyRequest setRequest = + SetIamPolicyRequest.newBuilder() + .setResource(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setPolicy(policyWithConditions).build(); + final Policy response = databaseAdminClient.setIamPolicy(setRequest); + System.out.printf( + "Enabled fine-grained access in IAM with version %d%n", response.getVersion()); + } + } +} +// [END spanner_enable_fine_grained_access] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java new file mode 100644 index 000000000000..3cd7e21f9b18 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_get_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import java.io.IOException; + +class GetBackupScheduleSample { + + static void getBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + getBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void getBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + final BackupSchedule backupSchedule = + databaseAdminClient.getBackupSchedule( + GetBackupScheduleRequest.newBuilder().setName(backupScheduleName.toString()).build()); + System.out.println( + String.format( + "Backup schedule: %s\n%s", backupSchedule.getName(), backupSchedule.toString())); + } + } +} +// [END spanner_get_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java new file mode 100644 index 000000000000..9adb8449020d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_get_commit_stats] + +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.util.Arrays; + +public class GetCommitStatsSample { + + static void getCommitStats() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseClient databaseClient = spanner + .getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + getCommitStats(databaseClient); + } + } + + static void getCommitStats(DatabaseClient databaseClient) { + final CommitResponse commitResponse = databaseClient.writeWithOptions(Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to("1") + .set("AlbumId") + .to("1") + .set("MarketingBudget") + .to("200000") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to("2") + .set("AlbumId") + .to("2") + .set("MarketingBudget") + .to("400000") + .build() + ), Options.commitStats()); + + System.out.println( + "Updated data with " + commitResponse.getCommitStats().getMutationCount() + " mutations."); + } +} +// [END spanner_get_commit_stats] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java new file mode 100644 index 000000000000..b84f1c0ccc69 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_get_database_ddl] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; + +public class GetDatabaseDdlSample { + + static void getDatabaseDdl() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + getDatabaseDdl(projectId, instanceId, databaseId); + } + + static void getDatabaseDdl( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + final GetDatabaseDdlResponse response = + databaseAdminClient.getDatabaseDdl(DatabaseName.of(projectId, instanceId, databaseId)); + System.out.println("Retrieved database DDL for " + databaseId); + for (String ddl : response.getStatementsList()) { + System.out.println(ddl); + } + } + } +} +//[END spanner_get_database_ddl] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java new file mode 100644 index 000000000000..9dd8690f7523 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_get_instance_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; + +public class GetInstanceConfigSample { + + static void getInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceConfigId = "nam6"; + getInstanceConfig(projectId, instanceConfigId); + } + + static void getInstanceConfig(String projectId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final InstanceConfigName instanceConfigName = InstanceConfigName.of(projectId, + instanceConfigId); + + final InstanceConfig instanceConfig = + instanceAdminClient.getInstanceConfig(instanceConfigName.toString()); + + System.out.printf( + "Available leader options for instance config %s: %s%n", + instanceConfig.getName(), + instanceConfig.getLeaderOptionsList() + ); + } + } +} +//[END spanner_get_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java new file mode 100644 index 000000000000..7a3ddd5e1afd --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_insert_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class InsertUsingDmlReturningSample { + + static void insertUsingDmlReturning() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + insertUsingDmlReturning(projectId, instanceId, databaseId); + } + + static void insertUsingDmlReturning(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Insert records into the SINGERS table and returns the + // generated column FullName of the inserted records using + // ‘THEN RETURN FullName’. + // It is also possible to return all columns of all the + // inserted records by using ‘THEN RETURN *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw') THEN RETURN FullName"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.println(resultSet.getString(0)); + } + System.out.printf( + "Inserted row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_insert_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/IsolationLevelAndReadLockModeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/IsolationLevelAndReadLockModeSample.java new file mode 100644 index 000000000000..ca2e1a9d7511 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/IsolationLevelAndReadLockModeSample.java @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.Builder.DefaultReadWriteTransactionOptions; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode; + +public class IsolationLevelAndReadLockModeSample { + + // [START spanner_isolation_level] + static void isolationLevelSetting(DatabaseId db) { + // The isolation level specified at the client-level will be applied to all + // RW transactions. + DefaultReadWriteTransactionOptions transactionOptions = + DefaultReadWriteTransactionOptions.newBuilder() + .setIsolationLevel(IsolationLevel.SERIALIZABLE) + .build(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultTransactionOptions(transactionOptions) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbClient + // The isolation level specified at the transaction-level takes precedence + // over the isolation level configured at the client-level. + .readWriteTransaction(Options.isolationLevel(IsolationLevel.REPEATABLE_READ)) + .run(transaction -> { + // Read an AlbumTitle. + String selectSql = + "SELECT AlbumTitle from Albums WHERE SingerId = 1 and AlbumId = 1"; + String title = null; + try (ResultSet resultSet = transaction.executeQuery(Statement.of(selectSql))) { + if (resultSet.next()) { + title = resultSet.getString("AlbumTitle"); + } + } + System.out.printf("Current album title: %s\n", title); + + // Update the title. + String updateSql = + "UPDATE Albums " + + "SET AlbumTitle = 'New Album Title' " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(updateSql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_isolation_level] + + // [START spanner_read_lock_mode] + static void readLockModeSetting(DatabaseId db) { + // The read lock mode specified at the client-level will be applied to all + // RW transactions. + DefaultReadWriteTransactionOptions transactionOptions = + DefaultReadWriteTransactionOptions.newBuilder() + .setReadLockMode(ReadLockMode.OPTIMISTIC) + .build(); + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultTransactionOptions(transactionOptions) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbClient + // The read lock mode specified at the transaction-level takes precedence + // over the read lock mode configured at the client-level. + .readWriteTransaction(Options.readLockMode(ReadLockMode.PESSIMISTIC)) + .run(transaction -> { + // Read an AlbumTitle. + String selectSql = + "SELECT AlbumTitle from Albums WHERE SingerId = 1 and AlbumId = 1"; + String title = null; + try (ResultSet resultSet = transaction.executeQuery(Statement.of(selectSql))) { + if (resultSet.next()) { + title = resultSet.getString("AlbumTitle"); + } + } + System.out.printf("Current album title: %s\n", title); + + // Update the title. + String updateSql = + "UPDATE Albums " + + "SET AlbumTitle = 'New Album Title' " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(updateSql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_read_lock_mode] +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/LastStatementSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/LastStatementSample.java new file mode 100644 index 000000000000..ef03ed7d88ac --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/LastStatementSample.java @@ -0,0 +1,70 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +/** + * Sample showing how to set the last statement option when a DML statement is the last statement in + * a transaction. + */ +public class LastStatementSample { + + static void insertAndUpdateUsingLastStatement() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + insertAndUpdateUsingLastStatement(databaseClient); + } + } + + // [START spanner_dml_last_statement] + static void insertAndUpdateUsingLastStatement(DatabaseClient client) { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.of( + "INSERT Singers (SingerId, FirstName, LastName)\n" + + "VALUES (54213, 'John', 'Do')")); + System.out.println("New singer inserted."); + + // Pass in the `lastStatement` option to the last DML statement of the transaction. + transaction.executeUpdate( + Statement.of( + "UPDATE Singers SET Singers.LastName = 'Doe' WHERE SingerId = 54213\n"), + Options.lastStatement()); + System.out.println("Singer last name updated."); + + return null; + }); + } + // [END spanner_dml_last_statement] + +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java new file mode 100644 index 000000000000..fba708937c06 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_list_backup_schedules] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.io.IOException; + +class ListBackupSchedulesSample { + + static void listBackupSchedules() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + listBackupSchedules(projectId, instanceId, databaseId); + } + + static void listBackupSchedules(String projectId, String instanceId, String databaseId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + + System.out.println( + String.format("Backup schedules for database '%s'", databaseName.toString())); + for (BackupSchedule backupSchedule : + databaseAdminClient.listBackupSchedules(databaseName).iterateAll()) { + System.out.println( + String.format( + "Backup schedule: %s\n%s", backupSchedule.getName(), backupSchedule.toString())); + } + } + } +} +// [END spanner_list_backup_schedules] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java new file mode 100644 index 000000000000..e16a55cb7b7f --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java @@ -0,0 +1,55 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_list_database_roles] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPage; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.DatabaseRole; + +public class ListDatabaseRoles { + + static void listDatabaseRoles() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + listDatabaseRoles(projectId, instanceId, databaseId); + } + + static void listDatabaseRoles(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + ListDatabaseRolesPagedResponse response + = databaseAdminClient.listDatabaseRoles(databaseName); + System.out.println("List of Database roles"); + for (ListDatabaseRolesPage page : response.iteratePages()) { + for (DatabaseRole role : page.iterateAll()) { + System.out.printf("Obtained role %s%n", role.getName()); + } + } + } + } +} +// [END spanner_list_database_roles] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java new file mode 100644 index 000000000000..631b72dc1196 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_list_databases] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPage; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabasesPagedResponse; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.InstanceName; + +public class ListDatabasesSample { + + static void listDatabases() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + listDatabases(projectId, instanceId); + } + + static void listDatabases(String projectId, String instanceId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + ListDatabasesPagedResponse response = + databaseAdminClient.listDatabases(InstanceName.of(projectId, instanceId)); + + System.out.println("Databases for projects/" + projectId + "/instances/" + instanceId); + + for (ListDatabasesPage page : response.iteratePages()) { + for (Database database : page.iterateAll()) { + final String defaultLeader = database.getDefaultLeader().equals("") + ? "" : "(default leader = " + database.getDefaultLeader() + ")"; + System.out.println("\t" + database.getName() + " " + defaultLeader); + } + } + } + } +} +//[END spanner_list_databases] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java new file mode 100644 index 000000000000..b42c52126b5d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_list_instance_config_operations] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import com.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest; +import com.google.spanner.admin.instance.v1.ProjectName; + +public class ListInstanceConfigOperationsSample { + + static void listInstanceConfigOperations() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + listInstanceConfigOperations(projectId); + } + + static void listInstanceConfigOperations(String projectId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final ProjectName projectName = ProjectName.of(projectId); + System.out.printf( + "Getting list of instance config operations for project %s...\n", + projectId); + final ListInstanceConfigOperationsRequest request = + ListInstanceConfigOperationsRequest.newBuilder() + .setParent(projectName.toString()) + .setFilter("(metadata.@type=type.googleapis.com/" + + "google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)").build(); + final Iterable instanceConfigOperations = + instanceAdminClient.listInstanceConfigOperations(request).iterateAll(); + for (Operation operation : instanceConfigOperations) { + CreateInstanceConfigMetadata metadata = + operation.getMetadata().unpack(CreateInstanceConfigMetadata.class); + System.out.printf( + "Create instance config operation for %s is %d%% completed.\n", + metadata.getInstanceConfig().getName(), metadata.getProgress().getProgressPercent()); + } + System.out.printf( + "Obtained list of instance config operations for project %s...\n", + projectName); + } catch (InvalidProtocolBufferException e) { + System.out.printf( + "Error: Listing instance config operations failed with error message %s\n", + e.getMessage()); + } + } +} +// [END spanner_list_instance_config_operations] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigsSample.java new file mode 100644 index 000000000000..7c5391638b00 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigsSample.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_list_instance_configs] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.ProjectName; + +public class ListInstanceConfigsSample { + + static void listInstanceConfigs() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + listInstanceConfigs(projectId); + } + + static void listInstanceConfigs(String projectId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final ProjectName projectName = ProjectName.of(projectId); + for (InstanceConfig instanceConfig : + instanceAdminClient.listInstanceConfigs(projectName).iterateAll()) { + System.out.printf( + "Available leader options for instance config %s: %s%n", + instanceConfig.getName(), + instanceConfig.getLeaderOptionsList() + ); + } + } + } +} +//[END spanner_list_instance_configs] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAlterSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAlterSequenceSample.java new file mode 100644 index 000000000000..a3e4a9a677c9 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAlterSequenceSample.java @@ -0,0 +1,91 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_alter_sequence] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgAlterSequenceSample { + + static void pgAlterSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgAlterSequence(projectId, instanceId, databaseId); + } + + static void pgAlterSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + + databaseAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of("ALTER SEQUENCE Seq SKIP RANGE 1000 5000000")) + .get(5, TimeUnit.MINUTES); + System.out.println( + "Altered Seq sequence to skip an inclusive range between 1000 and 5000000"); + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Lea'), ('Catalina'), ('Smith') RETURNING CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_alter_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncQueryToListAsyncExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncQueryToListAsyncExample.java new file mode 100644 index 000000000000..63e7205563b6 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncQueryToListAsyncExample.java @@ -0,0 +1,90 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_async_query_to_list] +import com.google.api.core.ApiFuture; +import com.google.cloud.spanner.AsyncResultSet; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgAsyncQueryToListAsyncExample { + static class Album { + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } + + static void asyncQueryToList() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncQueryToList(client); + } + } + + // Execute a query asynchronously and transform the result to a list. + static void asyncQueryToList(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + ApiFuture> albums; + try (AsyncResultSet resultSet = + client + .singleUse() + .executeQueryAsync(Statement.of("SELECT singerid as \"SingerId\", " + + "albumid as \"AlbumId\", albumtitle as \"AlbumTitle\" " + + "FROM Albums"))) { + // Convert the result set to a list of Albums asynchronously. + albums = + resultSet.toListAsync( + reader -> { + return new Album( + reader.getLong("SingerId"), + reader.getLong("AlbumId"), + reader.getString("AlbumTitle")); + }, + executor); + } + + for (Album album : albums.get(30L, TimeUnit.SECONDS)) { + System.out.printf("%d %d %s%n", album.singerId, album.albumId, album.albumTitle); + } + executor.shutdown(); + } +} +//[END spanner_postgresql_async_query_to_list] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncRunnerExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncRunnerExample.java new file mode 100644 index 000000000000..f05b509d4c82 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncRunnerExample.java @@ -0,0 +1,124 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_postgresql_async_read_write_transaction] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AsyncRunner; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class PgAsyncRunnerExample { + + static void asyncRunner() throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncRunner(client); + } + } + + // Execute a read/write transaction asynchronously. + static void asyncRunner(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + + // Create an async transaction runner. + AsyncRunner runner = client.runAsync(); + // The transaction returns the total number of rows that were updated as a future array of + // longs. + ApiFuture rowCounts = + runner.runAsync( + txn -> { + // Transfer marketing budget from one album to another. We do it in a + // transaction to ensure that the transfer is atomic. + ApiFuture album1BudgetFut = + txn.readRowAsync("Albums", Key.of(1, 1), ImmutableList.of("MarketingBudget")); + ApiFuture album2BudgetFut = + txn.readRowAsync("Albums", Key.of(2, 2), ImmutableList.of("MarketingBudget")); + + try { + // Transaction will only be committed if this condition still holds at the + // time of commit. Otherwise it will be aborted and the AsyncWork will be + // rerun by the client library. + long transfer = 200_000; + if (album2BudgetFut.get().getLong(0) >= transfer) { + long album1Budget = album1BudgetFut.get().getLong(0); + long album2Budget = album2BudgetFut.get().getLong(0); + + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement1 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("p1") + .to(album1Budget) + .build(); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("p1") + .to(album2Budget) + .build(); + return txn.batchUpdateAsync( + ImmutableList.of(updateStatement1, updateStatement2)); + } else { + return ApiFutures.immediateFuture(new long[] {0L, 0L}); + } + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + }, + executor); + + ApiFuture totalUpdateCount = + ApiFutures.transform( + rowCounts, + input -> Arrays.stream(input).sum(), + MoreExecutors.directExecutor()); + System.out.printf("%d records updated.%n", totalUpdateCount.get(30L, TimeUnit.SECONDS)); + executor.shutdown(); + } +} +//[END spanner_postgresql_async_read_write_transaction] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncTransactionManagerExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncTransactionManagerExample.java new file mode 100644 index 000000000000..d1b20decde6d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgAsyncTransactionManagerExample.java @@ -0,0 +1,140 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_postgresql_async_transaction_manager] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.AsyncTransactionManager; +import com.google.cloud.spanner.AsyncTransactionManager.AsyncTransactionStep; +import com.google.cloud.spanner.AsyncTransactionManager.CommitTimestampFuture; +import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class PgAsyncTransactionManagerExample { + + static void asyncTransactionManager() + throws InterruptedException, ExecutionException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + asyncTransactionManager(client); + } + } + + static void asyncTransactionManager(DatabaseClient client) + throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + + AsyncTransactionStep, long[]> updateCounts; + try (AsyncTransactionManager mgr = client.transactionManagerAsync()) { + TransactionContextFuture txn = mgr.beginAsync(); + // Loop to retry aborted errors. + while (true) { + try { + updateCounts = + txn.then( + (transaction, v) -> { + // Execute two reads in parallel and return the result of these as the input + // for the next step of the transaction. + ApiFuture album1BudgetFut = + transaction.readRowAsync( + "Albums", Key.of(1, 1), ImmutableList.of("MarketingBudget")); + ApiFuture album2BudgetFut = + transaction.readRowAsync( + "Albums", Key.of(2, 2), ImmutableList.of("MarketingBudget")); + return ApiFutures.allAsList( + Arrays.asList(album1BudgetFut, album2BudgetFut)); + }, + executor) + // The input of the next step of the transaction is the return value of the + // previous step, i.e. a list containing the marketing budget of two Albums. + .then( + (transaction, budgets) -> { + long album1Budget = budgets.get(0).getLong(0); + long album2Budget = budgets.get(1).getLong(0); + long transfer = 200_000; + if (album2Budget >= transfer) { + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement1 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("p1") + .to(album1Budget) + .build(); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("p1") + .to(album2Budget) + .build(); + return transaction.batchUpdateAsync( + ImmutableList.of(updateStatement1, updateStatement2)); + } else { + return ApiFutures.immediateFuture(new long[] {0L, 0L}); + } + }, + executor); + // Commit after the updates. + CommitTimestampFuture commitTsFut = updateCounts.commitAsync(); + // Wait for the transaction to finish and execute a retry if necessary. + commitTsFut.get(); + break; + } catch (AbortedException e) { + txn = mgr.resetForRetryAsync(); + } + } + } + + // Calculate the total update count. + ApiFuture totalUpdateCount = + ApiFutures.transform( + updateCounts, + input -> Arrays.stream(input).sum(), + MoreExecutors.directExecutor()); + System.out.printf("%d records updated.%n", totalUpdateCount.get(30L, TimeUnit.SECONDS)); + executor.shutdown(); + } +} +//[END spanner_postgresql_async_transaction_manager] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgBatchDmlSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgBatchDmlSample.java new file mode 100644 index 000000000000..6851dfd366e5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgBatchDmlSample.java @@ -0,0 +1,85 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_batch_dml] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import java.util.Arrays; + +class PgBatchDmlSample { + + static void batchDml() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + batchDml(projectId, instanceId, databaseId); + } + + static void batchDml(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + // Spanner PostgreSQL supports BatchDML statements. This will batch multiple DML statements + // into one request, which reduces the number of round trips that is needed for multiple DML + // statements. + long[] updateCounts = + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate( + Arrays.asList( + Statement.newBuilder( + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + "VALUES ($1, $2, $3)") + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(1L) + .bind("p2") + .to("Alice") + .bind("p3") + .to("Henderson") + .build(), + Statement.newBuilder( + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + "VALUES ($1, $2, $3)") + // Use 'p1' to bind to the parameter with index 1 etc. + .bind("p1") + .to(2L) + .bind("p2") + .to("Bruce") + .bind("p3") + .to("Allison") + .build()))); + long totalUpdateCount = Arrays.stream(updateCounts).sum(); + System.out.printf("Inserted %d singers\n", totalUpdateCount); + } + } +} +// [END spanner_postgresql_batch_dml] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCaseSensitivitySample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCaseSensitivitySample.java new file mode 100644 index 000000000000..abebdef39aad --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCaseSensitivitySample.java @@ -0,0 +1,148 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_identifier_case_sensitivity] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.Lists; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +public class PgCaseSensitivitySample { + + static void pgCaseSensitivity() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgCaseSensitivity(projectId, instanceId, databaseId); + } + + static void pgCaseSensitivity(String projectId, String instanceId, String databaseId) { + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + + // Spanner PostgreSQL follows the case sensitivity rules of PostgreSQL. This means that: + // 1. Identifiers that are not double-quoted are folded to lower case. + // 2. Identifiers that are double-quoted retain their case and are case-sensitive. + // See https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + // for more information. + databaseAdminClient.updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + Lists.newArrayList( + "CREATE TABLE Singers (" + // SingerId will be folded to `singerid`. + + " SingerId bigint NOT NULL PRIMARY KEY," + // FirstName and LastName are double-quoted and will therefore retain their + // mixed case and are case-sensitive. This means that any statement that + // references any of these columns must use double quotes. + + " \"FirstName\" varchar(1024) NOT NULL," + + " \"LastName\" varchar(1024) NOT NULL" + + ")")).get(); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + client.write( + Collections.singleton( + Mutation.newInsertBuilder("Singers") + .set("singerid") + .to(1L) + // Column names in mutations are always case-insensitive, regardless whether the + // columns were double-quoted or not during creation. + .set("firstname") + .to("Bruce") + .set("lastname") + .to("Allison") + .build())); + + try (ResultSet singers = + client + .singleUse() + .executeQuery( + Statement.of("SELECT SingerId, \"FirstName\", \"LastName\" FROM Singers"))) { + while (singers.next()) { + System.out.printf( + "SingerId: %d, FirstName: %s, LastName: %s\n", + // SingerId is automatically folded to lower case. Accessing the column by its name in + // a result set must therefore use all lower-case letters. + singers.getLong("singerid"), + // FirstName and LastName were double-quoted during creation, and retain their mixed + // case when returned in a result set. + singers.getString("FirstName"), + singers.getString("LastName")); + } + } + + // Aliases are also identifiers, and specifying an alias in double quotes will make the alias + // retain its case. + try (ResultSet singers = + client + .singleUse() + .executeQuery( + Statement.of( + "SELECT " + + "singerid AS \"SingerId\", " + + "concat(\"FirstName\", ' '::varchar, \"LastName\") AS \"FullName\" " + + "FROM Singers"))) { + while (singers.next()) { + System.out.printf( + "SingerId: %d, FullName: %s\n", + // The aliases are double-quoted and therefore retains their mixed case. + singers.getLong("SingerId"), singers.getString("FullName")); + } + } + + // DML statements must also follow the PostgreSQL case rules. + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO Singers (SingerId, \"FirstName\", \"LastName\") " + + "VALUES ($1, $2, $3)") + .bind("p1") + .to(2L) + .bind("p2") + .to("Alice") + .bind("p3") + .to("Bruxelles") + .build())); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_postgresql_identifier_case_sensitivity] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCreateSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCreateSequenceSample.java new file mode 100644 index 000000000000..79445aa27275 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgCreateSequenceSample.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_create_sequence] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgCreateSequenceSample { + + static void pgCreateSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgCreateSequence(projectId, instanceId, databaseId); + } + + static void pgCreateSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient + .updateDatabaseDdlAsync(DatabaseName.of(projectId, instanceId, databaseId).toString(), + ImmutableList.of( + "CREATE SEQUENCE Seq BIT_REVERSED_POSITIVE;", + "CREATE TABLE Customers (CustomerId BIGINT DEFAULT nextval('Seq'), " + + "CustomerName character varying(1024), PRIMARY KEY (CustomerId))")) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Created Seq sequence and Customers table, where the key column " + + "CustomerId uses the sequence as a default value"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Alice'), ('David'), ('Marc') RETURNING CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_create_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDeleteUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDeleteUsingDmlReturningSample.java new file mode 100644 index 000000000000..712ee07975c7 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDeleteUsingDmlReturningSample.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_delete_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class PgDeleteUsingDmlReturningSample { + + static void deleteUsingDmlReturningSample() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + deleteUsingDmlReturningSample(projectId, instanceId, databaseId); + } + + static void deleteUsingDmlReturningSample( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Delete records from SINGERS table satisfying a + // particular condition and returns the SingerId + // and FullName column of the deleted records using + // ‘RETURNING SingerId, FullName’. + // It is also possible to return all columns of all the + // deleted records by using ‘RETURNING *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "DELETE FROM Singers WHERE FirstName = 'Alice' RETURNING SingerId, FullName"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + System.out.printf( + "Deleted row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_postgresql_delete_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDropSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDropSequenceSample.java new file mode 100644 index 000000000000..129009e9b2b0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgDropSequenceSample.java @@ -0,0 +1,69 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_drop_sequence] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgDropSequenceSample { + + static void pgDropSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgDropSequence(projectId, instanceId, databaseId); + } + + static void pgDropSequence(String projectId, String instanceId, String databaseId) { + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + ImmutableList.of( + "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT", + "DROP SEQUENCE Seq")) + .get(5, TimeUnit.MINUTES); + System.out.println( + "Altered Customers table to drop DEFAULT from " + + "CustomerId column and dropped the Seq sequence"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_drop_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInsertUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInsertUsingDmlReturningSample.java new file mode 100644 index 000000000000..38a68c4f8de7 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInsertUsingDmlReturningSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_insert_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class PgInsertUsingDmlReturningSample { + + static void insertUsingDmlReturning() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + insertUsingDmlReturning(projectId, instanceId, databaseId); + } + + static void insertUsingDmlReturning(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Insert records into SINGERS table and returns the + // generated column FullName of the inserted records + // using ‘RETURNING FullName’. + // It is also possible to return all columns of all the + // inserted records by using ‘RETURNING *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw') RETURNING FullName"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.println(resultSet.getString(0)); + } + System.out.printf( + "Inserted row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_postgresql_insert_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInterleavedTableSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInterleavedTableSample.java new file mode 100644 index 000000000000..30ee48ed6dd0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgInterleavedTableSample.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_interleaved_table] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +public class PgInterleavedTableSample { + + static void pgInterleavedTable() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgInterleavedTable(projectId, instanceId, databaseId); + } + + static void pgInterleavedTable(String projectId, String instanceId, String databaseId) { + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + // The Spanner PostgreSQL dialect extends the PostgreSQL dialect with certain Spanner + // specific features, such as interleaved tables. + // See https://cloud.google.com/spanner/docs/postgresql/data-definition-language#create_table + // for the full CREATE TABLE syntax. + databaseAdminClient.updateDatabaseDdlAsync(DatabaseName.of(projectId, + instanceId, + databaseId), + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL PRIMARY KEY," + + " FirstName varchar(1024) NOT NULL," + + " LastName varchar(1024) NOT NULL" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " Title varchar(1024) NOT NULL," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).get(); + System.out.println("Created interleaved table hierarchy using PostgreSQL dialect"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_postgresql_interleaved_table] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgLastStatementSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgLastStatementSample.java new file mode 100644 index 000000000000..1c583a71b397 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgLastStatementSample.java @@ -0,0 +1,69 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +/** + * Sample showing how to set the last statement option when a DML statement is the last statement in + * a transaction. + */ +public class PgLastStatementSample { + + static void insertAndUpdateUsingLastStatement() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + insertAndUpdateUsingLastStatement(databaseClient); + } + } + + // [START spanner_postgresql_dml_last_statement] + static void insertAndUpdateUsingLastStatement(DatabaseClient client) { + client + .readWriteTransaction() + .run( + transaction -> { + transaction.executeUpdate( + Statement.of( + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + "VALUES (54214, 'John', 'Do')")); + System.out.println("New singer inserted."); + + // Pass in the `lastStatement` option to the last DML statement of the transaction. + transaction.executeUpdate( + Statement.of("UPDATE Singers SET LastName = 'Doe' WHERE SingerId = 54214\n"), + Options.lastStatement()); + System.out.println("Singer last name updated."); + + return null; + }); + } + // [END spanner_postgresql_dml_last_statement] + +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgPartitionedDmlSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgPartitionedDmlSample.java new file mode 100644 index 000000000000..ff442e7b2ef2 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgPartitionedDmlSample.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_partitioned_dml] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +class PgPartitionedDmlSample { + + static void partitionedDml() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + partitionedDml(projectId, instanceId, databaseId); + } + + static void partitionedDml(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + // Spanner PostgreSQL has the same transaction limits as normal Spanner. This includes a + // maximum of 20,000 mutations in a single read/write transaction. Large update operations can + // be executed using Partitioned DML. This is also supported on Spanner PostgreSQL. + // See https://cloud.google.com/spanner/docs/dml-partitioned for more information. + long deletedCount = + client.executePartitionedUpdate(Statement.of("DELETE FROM users WHERE active=false")); + // The returned update count is the lower bound of the number of records that was deleted. + System.out.printf("Deleted at least %d inactive users\n", deletedCount); + } + } +} +// [END spanner_postgresql_partitioned_dml] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgQueryWithNumericParameterSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgQueryWithNumericParameterSample.java new file mode 100644 index 000000000000..144a26def385 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgQueryWithNumericParameterSample.java @@ -0,0 +1,60 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_query_with_numeric_parameter] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; + +class PgQueryWithNumericParameterSample { + + static void queryWithNumericParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithNumericParameter(client); + } + } + + static void queryWithNumericParameter(DatabaseClient client) { + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", revenue as \"Revenue\" FROM Venues WHERE Revenue " + + "< $1") + .bind("p1") + .to(Value.pgNumeric("100000")) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s%n", resultSet.getLong("VenueId"), resultSet.getValue("Revenue")); + } + } + } +} +// [END spanner_postgresql_query_with_numeric_parameter] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgSpannerSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgSpannerSample.java new file mode 100644 index 000000000000..b3ad5cd08c44 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgSpannerSample.java @@ -0,0 +1,1625 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.api.gax.paging.Page; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import com.google.common.io.BaseEncoding; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Example code for using the Cloud Spanner PostgreSQL interface. + */ +public class PgSpannerSample { + + // [START spanner_postgresql_insert_data] + static final List SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk"), + new Album(1, 2, "Go, Go, Go"), + new Album(2, 1, "Green"), + new Album(2, 2, "Forever Hold Your Peace"), + new Album(2, 3, "Terrified")); + // [END spanner_postgresql_insert_data] + + /** + * Class to contain performance sample data. + */ + static class Performance { + + final long singerId; + final long venueId; + final String eventDate; + final long revenue; + + Performance(long singerId, long venueId, String eventDate, long revenue) { + this.singerId = singerId; + this.venueId = venueId; + this.eventDate = eventDate; + this.revenue = revenue; + } + } + + // [START spanner_postgresql_insert_data_with_timestamp_column] + static final List PERFORMANCES = + Arrays.asList( + new Performance(1, 4, "2017-10-05", 11000), + new Performance(1, 19, "2017-11-02", 15000), + new Performance(2, 42, "2017-12-23", 7000)); + // [START spanner_postgresql_insert_datatypes_data] + + static Value availableDates1 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-12-01"), + Date.parseDate("2020-12-02"), + Date.parseDate("2020-12-03"))); + static Value availableDates2 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-11-01"), + Date.parseDate("2020-11-05"), + Date.parseDate("2020-11-15"))); + static Value availableDates3 = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-10-07"))); + // [END spanner_postgresql_insert_data_with_timestamp_column] + static String exampleBytes1 = BaseEncoding.base64().encode("Hello World 1".getBytes()); + static String exampleBytes2 = BaseEncoding.base64().encode("Hello World 2".getBytes()); + static String exampleBytes3 = BaseEncoding.base64().encode("Hello World 3".getBytes()); + static final List VENUES = + Arrays.asList( + new Venue( + 4, + "Venue 4", + exampleBytes1, + 1800, + availableDates1, + "2018-09-02", + false, + 0.85543f, + new BigDecimal("215100.10")), + new Venue( + 19, + "Venue 19", + exampleBytes2, + 6300, + availableDates2, + "2019-01-15", + true, + 0.98716f, + new BigDecimal("1200100.00")), + new Venue( + 42, + "Venue 42", + exampleBytes3, + 3000, + availableDates3, + "2018-10-01", + false, + 0.72598f, + new BigDecimal("390650.99"))); + // [END spanner_postgresql_insert_datatypes_data] + + /** + * Class to contain venue sample data. + */ + static class Venue { + + final long venueId; + final String venueName; + final String venueInfo; + final long capacity; + final Value availableDates; + final String lastContactDate; + final boolean outdoorVenue; + final float popularityScore; + final BigDecimal revenue; + + Venue( + long venueId, + String venueName, + String venueInfo, + long capacity, + Value availableDates, + String lastContactDate, + boolean outdoorVenue, + float popularityScore, + BigDecimal revenue) { + this.venueId = venueId; + this.venueName = venueName; + this.venueInfo = venueInfo; + this.capacity = capacity; + this.availableDates = availableDates; + this.lastContactDate = lastContactDate; + this.outdoorVenue = outdoorVenue; + this.popularityScore = popularityScore; + this.revenue = revenue; + } + } + + // [START spanner_postgresql_create_database] + static void createPostgreSqlDatabase( + DatabaseAdminClient dbAdminClient, String projectId, String instanceId, String databaseId) { + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE \"" + databaseId + "\"") + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseDialect(DatabaseDialect.POSTGRESQL).build(); + + try { + // Initiate the request which returns an OperationFuture. + Database db = dbAdminClient.createDatabaseAsync(request).get(); + System.out.println("Created database [" + db.getName() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_database] + + // [START spanner_postgresql_insert_data] + static void writeExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_data] + + // [START spanner_postgresql_delete_data] + static void deleteExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the Albums with the key values (2,1) and (2,3). + mutations.add( + Mutation.delete( + "Albums", KeySet.newBuilder().addKey(Key.of(2, 1)).addKey(Key.of(2, 3)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the column key is >=3 and <5 + mutations.add( + Mutation.delete("Singers", KeySet.range(KeyRange.closedOpen(Key.of(3), Key.of(5))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete remaining Singers rows, which will also delete the remaining Albums rows since it was + // defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Singers", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + // [END spanner_postgresql_delete_data] + + // [START spanner_postgresql_query_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single read or query against Cloud Spanner. + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), + resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_query_data] + + // [START spanner_postgresql_read_data] + static void read(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .read( + "Albums", + KeySet.all(), // Read all rows in a table. + Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), + resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_read_data] + + // [START spanner_postgresql_add_column] + static void addMarketingBudget(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget bigint")).get(); + System.out.println("Added MarketingBudget column"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_add_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // [START spanner_postgresql_update_data] + static void update(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(100000) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(500000) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_postgresql_update_data] + + // [START spanner_postgresql_read_write_transaction] + static void writeWithTransaction(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + Struct row = + transaction.readRow("Albums", Key.of(2, 2), Arrays.asList("MarketingBudget")); + long album2Budget = row.getLong(0); + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = + transaction + .readRow("Albums", Key.of(1, 1), Arrays.asList("MarketingBudget")) + .getLong(0); + album1Budget += transfer; + album2Budget -= transfer; + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(album1Budget) + .build()); + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(album2Budget) + .build()); + } + return null; + }); + } + // [END spanner_postgresql_read_write_transaction] + + // [START spanner_postgresql_query_data_with_new_column] + static void queryMarketingBudget(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT singerid as \"SingerId\", " + + "albumid as \"AlbumId\", marketingbudget as \"MarketingBudget\" " + + "FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : + resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_postgresql_query_data_with_new_column] + + // [START spanner_postgresql_create_index] + static void addIndex(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList("CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)")).get(); + System.out.println("Added AlbumsByAlbumTitle index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_index] + + // [START spanner_postgresql_read_data_with_index] + static void readUsingIndex(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + } + } + // [END spanner_postgresql_read_data_with_index] + + // [START spanner_postgresql_create_storing_index] + static void addStoringIndex(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) " + + "INCLUDE (MarketingBudget)")).get(); + System.out.println("Added AlbumsByAlbumTitle2 index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_storing_index] + + // Before running this example, create a storing index AlbumsByAlbumTitle2 by applying the DDL + // statement "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) INCLUDE (MarketingBudget)". + // [START spanner_postgresql_read_data_with_storing_index] + static void readStoringIndex(DatabaseClient dbClient) { + // We can read MarketingBudget also from the index since it stores a copy of MarketingBudget. + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle2", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong(0), + resultSet.getString(1), + resultSet.isNull("marketingbudget") ? "NULL" : resultSet.getLong(2)); + } + } + } + // [END spanner_postgresql_read_data_with_storing_index] + + // [START spanner_postgresql_read_only_transaction] + static void readOnlyTransaction(DatabaseClient dbClient) { + // ReadOnlyTransaction must be closed by calling close() on it to release resources held by it. + // We use a try-with-resource block to automatically do so. + try (ReadOnlyTransaction transaction = dbClient.readOnlyTransaction()) { + ResultSet queryResultSet = + transaction.executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums")); + while (queryResultSet.next()) { + System.out.printf( + "%d %d %s\n", + queryResultSet.getLong(0), queryResultSet.getLong(1), + queryResultSet.getString(2)); + } + try (ResultSet readResultSet = + transaction.read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (readResultSet.next()) { + System.out.printf( + "%d %d %s\n", + readResultSet.getLong(0), readResultSet.getLong(1), + readResultSet.getString(2)); + } + } + } + } + // [END spanner_postgresql_read_only_transaction] + + // [START spanner_postgresql_query_singers_table] + static void querySingersTable(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT singerid as \"SingerId\", " + + "firstname as \"FirstName\", lastname as \"LastName\" FROM Singers"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_postgresql_query_singers_table] + + + // [START spanner_postgresql_dml_getting_started_insert] + static void writeUsingDml(DatabaseClient dbClient) { + // Insert 4 singer records + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records inserted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_getting_started_insert] + + // [START spanner_postgresql_query_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT singerid AS \"SingerId\", " + + "firstname as \"FirstName\", lastname as \"LastName\" " + + "FROM Singers " + + "WHERE LastName = $1") + .bind("p1") + .to("Garcia") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_postgresql_query_with_parameter] + + // [START spanner_postgresql_dml_getting_started_update] + static void writeWithTransactionUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + String sql1 = + "SELECT marketingbudget as \"MarketingBudget\" from Albums WHERE " + + "SingerId = 2 and AlbumId = 2"; + ResultSet resultSet = transaction.executeQuery(Statement.of(sql1)); + long album2Budget = 0; + while (resultSet.next()) { + album2Budget = resultSet.getLong("MarketingBudget"); + } + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + String sql2 = + "SELECT marketingbudget as \"MarketingBudget\" from Albums WHERE " + + "SingerId = 1 and AlbumId = 1"; + ResultSet resultSet2 = transaction.executeQuery(Statement.of(sql2)); + long album1Budget = 0; + while (resultSet2.next()) { + album1Budget = resultSet2.getLong("MarketingBudget"); + } + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("p1") + .to(album1Budget) + .build(); + transaction.executeUpdate(updateStatement); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("p1") + .to(album2Budget) + .build(); + transaction.executeUpdate(updateStatement2); + } + return null; + }); + } + // [END spanner_postgresql_dml_getting_started_update] + + // [START spanner_postgresql_create_table_using_ddl] + // [START spanner_postgresql_create_database] + static void createTableUsingDdl(DatabaseAdminClient dbAdminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + dbAdminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " SingerInfo bytea," + + " FullName character varying(2048) GENERATED " + + " ALWAYS AS (FirstName || ' ' || LastName) STORED," + + " PRIMARY KEY (SingerId)" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " AlbumTitle character varying(1024)," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).get(); + System.out.println("Created Singers & Albums tables in database: [" + databaseName + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_database] + // [END spanner_postgresql_create_table_using_ddl] + + // [START spanner_postgresql_read_stale_data] + static void readStaleData(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse(TimestampBound.ofExactStaleness(15, TimeUnit.SECONDS)) + .read( + "Albums", KeySet.all(), + Arrays.asList("SingerId", "AlbumId", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong(0), + resultSet.getLong(1), + resultSet.isNull(2) ? "NULL" : resultSet.getLong(2)); + } + } + } + // [END spanner_postgresql_read_stale_data] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget BIGINT". + // In addition this update expects the LastUpdateTime column added by applying the DDL statement + // "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMPTZ" + // [START spanner_postgresql_update_data_with_timestamp_column] + static void updateWithTimestamp(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(1000000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(750000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_postgresql_update_data_with_timestamp_column] + + // [START spanner_postgresql_add_timestamp_column] + static void addLastUpdateTimestampColumn( + DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "ALTER TABLE Albums ADD COLUMN LastUpdateTime spanner.commit_timestamp")).get(); + System.out.println("Added LastUpdateTime as a timestamp column in Albums table."); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_add_timestamp_column] + + // [START spanner_postgresql_query_data_with_timestamp_column] + static void queryMarketingBudgetWithTimestamp(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT singerid as \"SingerId\", albumid as \"AlbumId\", " + + "marketingbudget as \"MarketingBudget\"," + + "lastupdatetime as \"LastUpdateTime\" FROM Albums" + + " ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget"), + resultSet.isNull("LastUpdateTime") ? "NULL" : resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_postgresql_query_data_with_timestamp_column] + + // [START spanner_postgresql_create_table_with_timestamp_column] + static void createTableWithTimestamp(DatabaseAdminClient dbAdminClient, + DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + dbAdminClient.updateDatabaseDdlAsync(databaseName, + Arrays.asList( + "CREATE TABLE Performances (" + + " SingerId BIGINT NOT NULL," + + " VenueId BIGINT NOT NULL," + + " Revenue BIGINT," + + " LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL," + + " PRIMARY KEY (SingerId, VenueId))" + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).get(); + System.out.println("Created Performances table in database: [" + databaseName + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_table_with_timestamp_column] + + // [START spanner_postgresql_insert_data_with_timestamp_column] + static void writeExampleDataWithTimestamp(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Performance performance : PERFORMANCES) { + mutations.add( + Mutation.newInsertBuilder("Performances") + .set("SingerId") + .to(performance.singerId) + .set("VenueId") + .to(performance.venueId) + .set("Revenue") + .to(performance.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_data_with_timestamp_column] + + static void queryPerformancesTable(DatabaseClient dbClient) { + // Rows without an explicit value for Revenue will have a Revenue equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT singerid as \"SingerId\", venueid as \"VenueId\", " + + "revenue as \"Revenue\", lastupdatetime as \"LastUpdateTime\" " + + "FROM Performances ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("VenueId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("Revenue") ? "NULL" : resultSet.getLong("Revenue"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + + // [START spanner_postgresql_dml_standard_insert] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (10, 'Virginia', 'Watson')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_insert] + + // [START spanner_postgresql_dml_standard_update] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_update] + + // [START spanner_postgresql_dml_standard_delete] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = "DELETE FROM Singers WHERE FirstName = 'Alice'"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record deleted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_delete] + + // [START spanner_postgresql_dml_write_then_read] + static void writeAndReadUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Insert record. + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (11, 'Timothy', 'Campbell')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + // Read newly inserted record. + sql = "SELECT firstname as \"FirstName\", lastname as \"LastName\" FROM Singers WHERE " + + "SingerId = 11"; + // We use a try-with-resource block to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf( + "%s %s\n", + resultSet.getString("FirstName"), resultSet.getString("LastName")); + } + } + return null; + }); + } + // [END spanner_postgresql_dml_write_then_read] + + // [START spanner_postgresql_dml_partitioned_update] + static void updateUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + } + // [END spanner_postgresql_dml_partitioned_update] + + // [START spanner_postgresql_dml_partitioned_delete] + static void deleteUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "DELETE FROM Singers WHERE SingerId > 10"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records deleted.\n", rowCount); + } + // [END spanner_postgresql_dml_partitioned_delete] + + // [START spanner_postgresql_dml_batch_update] + static void updateUsingBatchDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + List stmts = new ArrayList(); + String sql = + "INSERT INTO Albums " + + "(SingerId, AlbumId, AlbumTitle, MarketingBudget) " + + "VALUES (1, 3, 'Test Album Title', 10000) "; + stmts.add(Statement.of(sql)); + sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 3"; + stmts.add(Statement.of(sql)); + long[] rowCounts; + try { + rowCounts = transaction.batchUpdate(stmts); + } catch (SpannerBatchUpdateException e) { + rowCounts = e.getUpdateCounts(); + } + for (int i = 0; i < rowCounts.length; i++) { + System.out.printf("%d record updated by stmt %d.\n", rowCounts[i], i); + } + return null; + }); + } + // [END spanner_postgresql_dml_batch_update] + + // [START spanner_postgresql_create_table_with_datatypes] + static void createTableWithDatatypes(DatabaseAdminClient dbAdminClient, + DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + dbAdminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "CREATE TABLE Venues (" + + " VenueId BIGINT NOT NULL," + + " VenueName character varying(100)," + + " VenueInfo bytea," + + " Capacity BIGINT," + + " OutdoorVenue BOOL, " + + " PopularityScore FLOAT8, " + + " Revenue NUMERIC, " + + " LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL," + + " PRIMARY KEY (VenueId))")).get(); + System.out.println("Created Venues table in database: [" + databaseName + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_table_with_datatypes] + + // [START spanner_postgresql_insert_datatypes_data] + static void writeDatatypesData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Venue venue : VENUES) { + mutations.add( + Mutation.newInsertBuilder("Venues") + .set("VenueId") + .to(venue.venueId) + .set("VenueName") + .to(venue.venueName) + .set("VenueInfo") + .to(venue.venueInfo) + .set("Capacity") + .to(venue.capacity) + .set("OutdoorVenue") + .to(venue.outdoorVenue) + .set("PopularityScore") + .to(venue.popularityScore) + .set("Revenue") + .to(venue.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_datatypes_data] + + // [START spanner_postgresql_query_with_bool_parameter] + static void queryWithBool(DatabaseClient dbClient) { + boolean exampleBool = true; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\"," + + " outdoorvenue as \"OutdoorVenue\" FROM Venues " + + "WHERE OutdoorVenue = $1") + .bind("p1") + .to(exampleBool) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %b\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBoolean("OutdoorVenue")); + } + } + } + // [END spanner_postgresql_query_with_bool_parameter] + + // [START spanner_postgresql_query_with_bytes_parameter] + static void queryWithBytes(DatabaseClient dbClient) { + ByteArray exampleBytes = + ByteArray.fromBase64(BaseEncoding.base64().encode("Hello World 1".getBytes())); + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\" FROM Venues " + + "WHERE VenueInfo = $1") + .bind("p1") + .to(exampleBytes) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_postgresql_query_with_bytes_parameter] + + // [START spanner_postgresql_query_with_float_parameter] + static void queryWithFloat(DatabaseClient dbClient) { + float exampleFloat = 0.8f; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "popularityscore as \"PopularityScore\" FROM Venues " + + "WHERE PopularityScore > $1") + .bind("p1") + .to(exampleFloat) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %f\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDouble("PopularityScore")); + } + } + } + // [END spanner_postgresql_query_with_float_parameter] + + // [START spanner_postgresql_query_with_int_parameter] + static void queryWithInt(DatabaseClient dbClient) { + long exampleInt = 3000; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "capacity as \"Capacity\" " + + "FROM Venues " + "WHERE Capacity >= $1") + .bind("p1") + .to(exampleInt) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %d\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getLong("Capacity")); + } + } + } + // [END spanner_postgresql_query_with_int_parameter] + + // [START spanner_postgresql_query_with_string_parameter] + static void queryWithString(DatabaseClient dbClient) { + String exampleString = "Venue 42"; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\" FROM Venues WHERE" + + " VenueName = $1") + .bind("p1") + .to(exampleString) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_postgresql_query_with_string_parameter] + + // [START spanner_postgresql_query_with_timestamp_parameter] + static void queryWithTimestampParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "lastupdatetime as \"LastUpdateTime\" FROM Venues " + + "WHERE LastUpdateTime < $1") + .bind("p1") + .to(Timestamp.now()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_postgresql_query_with_timestamp_parameter] + + // [START spanner_postgresql_query_with_numeric_parameter] + static void queryWithNumeric(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "revenue as \"Revenue\" FROM Venues\n" + + "WHERE Revenue >= $1") + .bind("p1") + .to(Value.pgNumeric("300000")) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s%n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getValue("Revenue")); + } + } + } + // [END spanner_postgresql_query_with_numeric_parameter] + + // [START spanner_postgresql_create_client_with_query_options] + static void clientWithQueryOptions(DatabaseId db) { + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + db, ExecuteSqlRequest.QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.spanner_postgresql_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_create_client_with_query_options] + + // [START spanner_postgresql_query_with_query_options] + static void queryWithQueryOptions(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement + .newBuilder("SELECT SingerId, AlbumId, AlbumTitle FROM Albums") + .withQueryOptions(ExecuteSqlRequest.QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying + // the "INFORMATION_SCHEMA.spanner_postgresql_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build())) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_query_with_query_options] + + // [START spanner_postgresql_list_backup_operations] + static void listBackupOperations( + DatabaseAdminClient databaseAdminClient, + String projectId, String instanceId, + String databaseId, String backupId) { + com.google.spanner.admin.database.v1.InstanceName instanceName = + com.google.spanner.admin.database.v1.InstanceName.of(projectId, instanceId); + // Get 'CreateBackup' operations for the sample database. + String filter = + String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CreateBackupMetadata) " + + "AND (metadata.database:%s)", + DatabaseName.of(projectId, instanceId, databaseId).toString()); + ListBackupOperationsRequest listBackupOperationsRequest = + ListBackupOperationsRequest.newBuilder() + .setParent(instanceName.toString()).setFilter(filter).build(); + ListBackupOperationsPagedResponse createBackupOperations + = databaseAdminClient.listBackupOperations(listBackupOperationsRequest); + System.out.println("Create Backup Operations:"); + for (Operation op : createBackupOperations.iterateAll()) { + try { + CreateBackupMetadata metadata = op.getMetadata().unpack(CreateBackupMetadata.class); + System.out.println( + String.format( + "Backup %s on database %s pending: %d%% complete", + metadata.getName(), + metadata.getDatabase(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CreateBackupMetadata. + System.err.println(e.getMessage()); + } + } + // Get copy backup operations for the sample database. + filter = String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CopyBackupMetadata) " + + "AND (metadata.source_backup:%s)", + BackupName.of(projectId, instanceId, backupId).toString()); + listBackupOperationsRequest = + ListBackupOperationsRequest.newBuilder() + .setParent(instanceName.toString()).setFilter(filter).build(); + ListBackupOperationsPagedResponse copyBackupOperations = + databaseAdminClient.listBackupOperations(listBackupOperationsRequest); + System.out.println("Copy Backup Operations:"); + for (Operation op : copyBackupOperations.iterateAll()) { + try { + CopyBackupMetadata copyBackupMetadata = + op.getMetadata().unpack(CopyBackupMetadata.class); + System.out.println( + String.format( + "Copy Backup %s on backup %s pending: %d%% complete", + copyBackupMetadata.getName(), + copyBackupMetadata.getSourceBackup(), + copyBackupMetadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CopyBackupMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_postgresql_list_backup_operations] + + // [START spanner_postgresql_list_database_operations] + static void listDatabaseOperations( + DatabaseAdminClient dbAdminClient, String projectId, String instanceId) { + // Get optimize restored database operations. + com.google.cloud.Timestamp last24Hours = com.google.cloud.Timestamp.ofTimeSecondsAndNanos( + TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(com.google.cloud.Timestamp.now().getSeconds(), TimeUnit.SECONDS) + - 24, + TimeUnit.HOURS), 0); + String filter = String.format("(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) AND " + + "(metadata.progress.start_time > \"%s\")", last24Hours); + ListDatabaseOperationsRequest listDatabaseOperationsRequest = + ListDatabaseOperationsRequest.newBuilder() + .setParent(com.google.spanner.admin.instance.v1.InstanceName.of( + projectId, instanceId).toString()).setFilter(filter).build(); + ListDatabaseOperationsPagedResponse pagedResponse + = dbAdminClient.listDatabaseOperations(listDatabaseOperationsRequest); + for (Operation op : pagedResponse.iterateAll()) { + try { + OptimizeRestoredDatabaseMetadata metadata = + op.getMetadata().unpack(OptimizeRestoredDatabaseMetadata.class); + System.out.println(String.format( + "Database %s restored from backup is %d%% optimized", + metadata.getName(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain OptimizeRestoredDatabaseMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_postgresql_list_database_operations] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + String command, + DatabaseId database, + String backupId) { + DatabaseName databaseName = DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase()); + switch (command) { + case "createpgdatabase": + createPostgreSqlDatabase(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase()); + break; + case "write": + writeExampleData(dbClient); + break; + case "delete": + deleteExampleData(dbClient); + break; + case "query": + query(dbClient); + break; + case "read": + read(dbClient); + break; + case "addmarketingbudget": + addMarketingBudget(dbAdminClient, databaseName); + break; + case "update": + update(dbClient); + break; + case "writetransaction": + writeWithTransaction(dbClient); + break; + case "querymarketingbudget": + queryMarketingBudget(dbClient); + break; + case "addindex": + addIndex(dbAdminClient, databaseName); + break; + case "readindex": + readUsingIndex(dbClient); + break; + case "addstoringindex": + addStoringIndex(dbAdminClient, databaseName); + break; + case "readstoringindex": + readStoringIndex(dbClient); + break; + case "readonlytransaction": + readOnlyTransaction(dbClient); + break; + case "querysingerstable": + querySingersTable(dbClient); + break; + case "writeusingdml": + writeUsingDml(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "writewithtransactionusingdml": + writeWithTransactionUsingDml(dbClient); + break; + case "createtableusingddl": + createTableUsingDdl(dbAdminClient, databaseName); + break; + case "readstaledata": + readStaleData(dbClient); + break; + case "addlastupdatetimestampcolumn": + addLastUpdateTimestampColumn(dbAdminClient, databaseName); + break; + case "updatewithtimestamp": + updateWithTimestamp(dbClient); + break; + case "querywithtimestamp": + queryMarketingBudgetWithTimestamp(dbClient); + break; + case "createtablewithtimestamp": + createTableWithTimestamp(dbAdminClient, databaseName); + break; + case "writewithtimestamp": + writeExampleDataWithTimestamp(dbClient); + break; + case "queryperformancestable": + queryPerformancesTable(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "writeandreadusingdml": + writeAndReadUsingDml(dbClient); + break; + case "updateusingpartitioneddml": + updateUsingPartitionedDml(dbClient); + break; + case "deleteusingpartitioneddml": + deleteUsingPartitionedDml(dbClient); + break; + case "updateusingbatchdml": + updateUsingBatchDml(dbClient); + break; + case "createtablewithdatatypes": + createTableWithDatatypes(dbAdminClient, databaseName); + break; + case "writedatatypesdata": + writeDatatypesData(dbClient); + break; + case "querywithbool": + queryWithBool(dbClient); + break; + case "querywithbytes": + queryWithBytes(dbClient); + break; + case "querywithfloat": + queryWithFloat(dbClient); + break; + case "querywithint": + queryWithInt(dbClient); + break; + case "querywithstring": + queryWithString(dbClient); + break; + case "querywithtimestampparameter": + queryWithTimestampParameter(dbClient); + break; + case "querywithnumeric": + queryWithNumeric(dbClient); + break; + case "clientwithqueryoptions": + clientWithQueryOptions(database); + break; + case "querywithqueryoptions": + queryWithQueryOptions(dbClient); + break; + case "listbackupoperations": + listBackupOperations(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase(), backupId); + break; + case "listdatabaseoperations": + listDatabaseOperations(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance()); + break; + default: + printUsageAndExit(); + } + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" PgSpannerExample "); + System.err.println(); + System.err.println("Examples:"); + System.err.println(" PgSpannerExample createdatabase my-instance example-db"); + System.err.println(" PgSpannerExample write my-instance example-db"); + System.err.println(" PgSpannerExample delete my-instance example-db"); + System.err.println(" PgSpannerExample query my-instance example-db"); + System.err.println(" PgSpannerExample read my-instance example-db"); + System.err.println(" PgSpannerExample addmarketingbudget my-instance example-db"); + System.err.println(" PgSpannerExample update my-instance example-db"); + System.err.println(" PgSpannerExample writetransaction my-instance example-db"); + System.err.println(" PgSpannerExample querymarketingbudget my-instance example-db"); + System.err.println(" PgSpannerExample addindex my-instance example-db"); + System.err.println(" PgSpannerExample readindex my-instance example-db"); + System.err.println(" PgSpannerExample addstoringindex my-instance example-db"); + System.err.println(" PgSpannerExample readstoringindex my-instance example-db"); + System.err.println(" PgSpannerExample readonlytransaction my-instance example-db"); + System.err.println(" PgSpannerExample querysingerstable my-instance example-db"); + System.err.println(" PgSpannerExample writeusingdml my-instance example-db"); + System.err.println(" PgSpannerExample querywithparameter my-instance example-db"); + System.err.println(" PgSpannerExample writewithtransactionusingdml my-instance example-db"); + System.err.println(" PgSpannerExample createtableforsamples my-instance example-db"); + System.err.println(" PgSpannerExample writewithtimestamp my-instance example-db"); + System.err.println(" PgSpannerExample queryperformancestable my-instance example-db"); + System.err.println(" PgSpannerExample writestructdata my-instance example-db"); + System.err.println(" PgSpannerExample insertusingdml my-instance example-db"); + System.err.println(" PgSpannerExample updateusingdml my-instance example-db"); + System.err.println(" PgSpannerExample deleteusingdml my-instance example-db"); + System.err.println(" PgSpannerExample writeandreadusingdml my-instance example-db"); + System.err.println(" PgSpannerExample writeusingdml my-instance example-db"); + System.err.println(" PgSpannerExample deleteusingpartitioneddml my-instance example-db"); + System.err.println(" PgSpannerExample updateusingbatchdml my-instance example-db"); + System.err.println(" PgSpannerExample createtablewithdatatypes my-instance example-db"); + System.err.println(" PgSpannerExample writedatatypesdata my-instance example-db"); + System.err.println(" PgSpannerExample querywithbool my-instance example-db"); + System.err.println(" PgSpannerExample querywithbytes my-instance example-db"); + System.err.println(" PgSpannerExample querywithfloat my-instance example-db"); + System.err.println(" PgSpannerExample querywithint my-instance example-db"); + System.err.println(" PgSpannerExample querywithstring my-instance example-db"); + System.err.println(" PgSpannerExample querywithtimestampparameter my-instance example-db"); + System.err.println(" PgSpannerExample clientwithqueryoptions my-instance example-db"); + System.err.println(" PgSpannerExample querywithqueryoptions my-instance example-db"); + System.err.println(" PgSpannerExample listbackupoperations my-instance example-db"); + System.err.println(" PgSpannerExample listdatabaseoperations my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) { + if (args.length != 3) { + printUsageAndExit(); + } + // [START spanner_init_client] + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + DatabaseAdminClient dbAdminClient = null; + try { + // [END spanner_init_client] + final String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + // Generate a backup id for the sample database. + String backupId = null; + if (args.length == 4) { + backupId = args[3]; + } + + // [START spanner_init_client] + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbAdminClient = spanner.createDatabaseAdminClient(); + // [END spanner_init_client] + + // Use client here... + run(dbClient, dbAdminClient, command, db, backupId); + // [START spanner_init_client] + } finally { + if (dbAdminClient != null) { + if (!dbAdminClient.isShutdown() || !dbAdminClient.isTerminated()) { + dbAdminClient.close(); + } + } + spanner.close(); + } + // [END spanner_init_client] + System.out.println("Closed client"); + } + + /** + * Class to contain singer sample data. + */ + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + /** + * Class to contain album sample data. + */ + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgUpdateUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgUpdateUsingDmlReturningSample.java new file mode 100644 index 000000000000..d0b5a5795e6d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/PgUpdateUsingDmlReturningSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_update_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class PgUpdateUsingDmlReturningSample { + + static void updateUsingDmlReturning() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + updateUsingDmlReturning(projectId, instanceId, databaseId); + } + + static void updateUsingDmlReturning(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Update MarketingBudget column for records satisfying + // a particular condition and returns the modified + // MarketingBudget column of the updated records using + // ‘RETURNING MarketingBudget’. + // It is also possible to return all columns of all the + // updated records by using ‘RETURNING *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1 " + + "RETURNING MarketingBudget"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong(0)); + } + System.out.printf( + "Updated row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_postgresql_update_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSample.java new file mode 100644 index 000000000000..814c74542cba --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSample.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_query_information_schema_database_options] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class QueryInformationSchemaDatabaseOptionsSample { + + static void queryInformationSchemaDatabaseOptions() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + queryInformationSchemaDatabaseOptions(projectId, instanceId, databaseId); + } + + static void queryInformationSchemaDatabaseOptions( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseId id = DatabaseId.of(projectId, instanceId, databaseId); + final DatabaseClient databaseClient = spanner.getDatabaseClient(id); + + try (ResultSet resultSet = databaseClient + .singleUse() + .executeQuery(Statement.of( + "SELECT OPTION_NAME, OPTION_VALUE" + + " FROM INFORMATION_SCHEMA.DATABASE_OPTIONS" + + " WHERE OPTION_NAME = 'default_leader'") + )) { + if (resultSet.next()) { + final String optionName = resultSet.getString("OPTION_NAME"); + final String optionValue = resultSet.getString("OPTION_VALUE"); + + System.out.println("The " + optionName + " for " + id + " is " + optionValue); + } else { + System.out.println( + "Database " + id + " does not have a value for option 'default_leader'" + ); + } + } + } + } +} +//[END spanner_query_information_schema_database_options] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java new file mode 100644 index 000000000000..022818cec660 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java @@ -0,0 +1,64 @@ +/* + * Copyright 2021 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_query_with_json_parameter] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; + +class QueryWithJsonParameterSample { + + static void queryWithJsonParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithJsonParameter(client); + } + } + + static void queryWithJsonParameter(DatabaseClient client) { + String exampleJson = "{\"rating\": 9}"; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueDetails\n" + + "FROM Venues\n" + + "WHERE JSON_VALUE(VenueDetails, '$.rating') = " + + "JSON_VALUE(@details, '$.rating')") + .bind("details") + .to(Value.json(exampleJson)) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "VenueId: %s, VenueDetails: %s%n", + resultSet.getLong("VenueId"), resultSet.getJson("VenueDetails")); + } + } + } +} +// [END spanner_query_with_json_parameter] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java new file mode 100644 index 000000000000..a0af5be1a915 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java @@ -0,0 +1,64 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_jsonb_query_parameter] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; + +class QueryWithJsonbParameterSample { + + static void queryWithJsonbParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithJsonbParameter(client); + } + } + + static void queryWithJsonbParameter(DatabaseClient client) { + int rating = 2; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueDetails\n" + + "FROM Venues\n" + + "WHERE CAST(venuedetails ->> 'rating' " + + "AS INTEGER) > $1") + .bind("p1") + .to(Value.int64(rating)) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "VenueId: %s, VenueDetails: %s%n", + resultSet.getLong("venueid"), resultSet.getPgJsonb("venuedetails")); + } + } + } +} +// [END spanner_postgresql_jsonb_query_parameter] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java new file mode 100644 index 000000000000..146ea780fdd3 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java @@ -0,0 +1,59 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_query_with_numeric_parameter] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import java.math.BigDecimal; + +class QueryWithNumericParameterSample { + + static void queryWithNumericParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithNumericParameter(client); + } + } + + static void queryWithNumericParameter(DatabaseClient client) { + Statement statement = + Statement.newBuilder( + "SELECT VenueId, Revenue FROM Venues WHERE Revenue < @numeric") + .bind("numeric") + .to(new BigDecimal("100000")) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s%n", resultSet.getLong("VenueId"), resultSet.getBigDecimal("Revenue")); + } + } + } +} +// [END spanner_query_with_numeric_parameter] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java new file mode 100644 index 000000000000..b700fa341fad --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_query_with_proto_types_parameter] +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +class QueryWithProtoParameterSample { + + static void queryWithProtoParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithProtoParameter(client); + } + } + + static void queryWithProtoParameter(DatabaseClient client) { + Statement statement = + Statement.newBuilder( + "SELECT SingerId, SingerInfo, SingerInfo.nationality, SingerInfoArray, " + + "SingerGenre, SingerGenreArray FROM Singers " + + "WHERE SingerInfo.nationality=@country and SingerGenre=@singerGenre") + .bind("country") + .to("Country2") + .bind("singerGenre") + .to(Genre.FOLK) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s %s %s %s%n", + resultSet.getLong("SingerId"), + resultSet.getProtoMessage("SingerInfo", SingerInfo.getDefaultInstance()), + resultSet.getString("nationality"), + resultSet.getProtoMessageList("SingerInfoArray", SingerInfo.getDefaultInstance()), + resultSet.getProtoEnum("SingerGenre", Genre::forNumber), + resultSet.getProtoEnumList("SingerGenreArray", Genre::forNumber)); + } + } + } +} +// [END spanner_query_with_proto_types_parameter] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java new file mode 100644 index 000000000000..14aad267dd90 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_quickstart] +// Imports the Google Cloud client library +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +/** + * A quick start code for Cloud Spanner. It demonstrates how to setup the Cloud Spanner client and + * execute a simple query using it against an existing database. + */ +public class QuickstartSample { + public static void main(String... args) throws Exception { + + if (args.length != 2) { + System.err.println("Usage: QuickStartSample "); + return; + } + // Instantiates a client + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + + // Name of your instance & database. + String instanceId = args[0]; + String databaseId = args[1]; + try { + // Creates a database client + DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId)); + // Queries the database + ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of("SELECT 1")); + + System.out.println("\n\nResults:"); + // Prints the results + while (resultSet.next()) { + System.out.printf("%d\n\n", resultSet.getLong(0)); + } + } finally { + // Closes the client which will free up the resources used + spanner.close(); + } + } +} +// [END spanner_quickstart] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java new file mode 100644 index 000000000000..a746d54e0e41 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java @@ -0,0 +1,64 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_read_data_with_database_role] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.util.Arrays; + +public class ReadDataWithDatabaseRole { + + static void readDataWithDatabaseRole() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String role = "my-role"; + readDataWithDatabaseRole(projectId, instanceId, databaseId, role); + } + + static void readDataWithDatabaseRole( + String projectId, String instanceId, String databaseId, String role) { + try (Spanner spannerWithRole = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .setDatabaseRole(role) + .build() + .getService()) { + DatabaseClient dbClient = + spannerWithRole.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + ResultSet resultSet = + dbClient + .singleUse() + .read( + "Singers", + KeySet.all(), + Arrays.asList("SingerId", "FirstName", "LastName")); + while (resultSet.next()) { + System.out.printf("SingerId: %d\n", resultSet.getLong(0)); + System.out.printf("FirstName: %s\n", resultSet.getString(1)); + System.out.printf("LastName: %s\n", resultSet.getString(2)); + } + } + } +} +// [END spanner_read_data_with_database_role] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java new file mode 100644 index 000000000000..af101f96cca0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java @@ -0,0 +1,91 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_restore_backup_with_encryption_key] + +import static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION; + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import java.util.concurrent.ExecutionException; + +public class RestoreBackupWithEncryptionKey { + + static void restoreBackupWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + restoreBackupWithEncryptionKey( + adminClient, + projectId, + instanceId, + backupId, + databaseId, + kmsKeyName); + } + } + + static Void restoreBackupWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String backupId, String restoreId, String kmsKeyName) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseId(restoreId) + .setBackup(BackupName.of(projectId, instanceId, backupId).toString()) + .setEncryptionConfig(RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType(CUSTOMER_MANAGED_ENCRYPTION).setKmsKeyName(kmsKeyName)).build(); + Database database; + try { + System.out.println("Waiting for operation to complete..."); + database = adminClient.restoreDatabaseAsync(request).get(); + ; + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + System.out.printf( + "Database %s restored to %s from backup %s using encryption key %s%n", + database.getRestoreInfo().getBackupInfo().getSourceDatabase(), + database.getName(), + database.getRestoreInfo().getBackupInfo().getBackup(), + database.getEncryptionConfig().getKmsKeyName() + ); + return null; + } +} +// [END spanner_restore_backup_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithMultiRegionEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithMultiRegionEncryptionKey.java new file mode 100644 index 000000000000..a13ff4b8cd0b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithMultiRegionEncryptionKey.java @@ -0,0 +1,98 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_restore_backup_with_MR_CMEK] + +import static com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION; + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import java.util.concurrent.ExecutionException; + +public class RestoreBackupWithMultiRegionEncryptionKey { + + static void restoreBackupWithMultiRegionEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String[] kmsKeyNames = + new String[] { + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/", + "projects/" + projectId + "/locations//keyRings//cryptoKeys/" + }; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient adminClient = spanner.createDatabaseAdminClient()) { + restoreBackupWithMultiRegionEncryptionKey( + adminClient, projectId, instanceId, backupId, databaseId, kmsKeyNames); + } + } + + static Void restoreBackupWithMultiRegionEncryptionKey( + DatabaseAdminClient adminClient, + String projectId, + String instanceId, + String backupId, + String restoreId, + String[] kmsKeyNames) { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseId(restoreId) + .setBackup(BackupName.of(projectId, instanceId, backupId).toString()) + .setEncryptionConfig( + RestoreDatabaseEncryptionConfig.newBuilder() + .setEncryptionType(CUSTOMER_MANAGED_ENCRYPTION) + .addAllKmsKeyNames(ImmutableList.copyOf(kmsKeyNames))) + .build(); + Database database; + try { + System.out.println("Waiting for operation to complete..."); + database = adminClient.restoreDatabaseAsync(request).get(); + ; + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + System.out.printf( + "Database %s restored to %s from backup %s using encryption keys %s%n", + database.getRestoreInfo().getBackupInfo().getSourceDatabase(), + database.getName(), + database.getRestoreInfo().getBackupInfo().getBackup(), + database.getEncryptionConfig().getKmsKeyNamesList()); + return null; + } +} +// [END spanner_restore_backup_with_MR_CMEK] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java new file mode 100644 index 000000000000..901664829368 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_set_max_commit_delay] + +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.time.Duration; +import java.util.Arrays; + +public class SetMaxCommitDelaySample { + + static void setMaxCommitDelay() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseClient databaseClient = spanner + .getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + setMaxCommitDelay(databaseClient); + } + } + + static void setMaxCommitDelay(DatabaseClient databaseClient) { + final CommitResponse commitResponse = databaseClient.writeWithOptions(Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to("1") + .set("AlbumId") + .to("1") + .set("MarketingBudget") + .to("200000") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to("2") + .set("AlbumId") + .to("2") + .set("MarketingBudget") + .to("400000") + .build() + ), Options.maxCommitDelay(Duration.ofMillis(100))); + + System.out.println( + "Updated data with timestamp + " + commitResponse.getCommitTimestamp() + "."); + } +} +// [END spanner_set_max_commit_delay] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/SingerProto.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SingerProto.java new file mode 100644 index 000000000000..b962e4bc6b7a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SingerProto.java @@ -0,0 +1,1191 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: samples/snippets/src/main/resources/com/example/spanner/singer.proto + +// Protobuf Java Version: 3.25.1 +package com.example.spanner; + +public final class SingerProto { + private SingerProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + /** Protobuf enum {@code examples.spanner.music.Genre} */ + public enum Genre implements com.google.protobuf.ProtocolMessageEnum { + /** POP = 0; */ + POP(0), + /** JAZZ = 1; */ + JAZZ(1), + /** FOLK = 2; */ + FOLK(2), + /** ROCK = 3; */ + ROCK(3), + UNRECOGNIZED(-1), + ; + + /** POP = 0; */ + public static final int POP_VALUE = 0; + /** JAZZ = 1; */ + public static final int JAZZ_VALUE = 1; + /** FOLK = 2; */ + public static final int FOLK_VALUE = 2; + /** ROCK = 3; */ + public static final int ROCK_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static Genre valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Genre forNumber(int value) { + switch (value) { + case 0: + return POP; + case 1: + return JAZZ; + case 2: + return FOLK; + case 3: + return ROCK; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Genre findValueByNumber(int number) { + return Genre.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException("Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return SingerProto.getDescriptor().getEnumTypes().get(0); + } + + private static final Genre[] VALUES = values(); + + public static Genre valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Genre(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:examples.spanner.music.Genre) + } + + public interface SingerInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:examples.spanner.music.SingerInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + boolean hasSingerId(); + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + long getSingerId(); + + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + boolean hasBirthDate(); + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + String getBirthDate(); + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + com.google.protobuf.ByteString getBirthDateBytes(); + + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + boolean hasNationality(); + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + String getNationality(); + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + com.google.protobuf.ByteString getNationalityBytes(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + boolean hasGenre(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + int getGenreValue(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + Genre getGenre(); + } + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class SingerInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + private static final long serialVersionUID = 0L; + // Use SingerInfo.newBuilder() to construct. + private SingerInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SingerInfo() { + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new SingerInfo(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + private int bitField0_; + public static final int SINGER_ID_FIELD_NUMBER = 1; + private long singerId_ = 0L; + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + + public static final int BIRTH_DATE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + @Override + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + @Override + public String getBirthDate() { + Object ref = birthDate_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } + } + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + @Override + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NATIONALITY_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile Object nationality_ = ""; + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + @Override + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + @Override + public String getNationality() { + Object ref = nationality_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } + } + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + @Override + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENRE_FIELD_NUMBER = 4; + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeEnum(4, genre_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, genre_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof SingerInfo)) { + return super.equals(obj); + } + SingerInfo other = (SingerInfo) obj; + + if (hasSingerId() != other.hasSingerId()) return false; + if (hasSingerId()) { + if (getSingerId() != other.getSingerId()) return false; + } + if (hasBirthDate() != other.hasBirthDate()) return false; + if (hasBirthDate()) { + if (!getBirthDate().equals(other.getBirthDate())) return false; + } + if (hasNationality() != other.hasNationality()) return false; + if (hasNationality()) { + if (!getNationality().equals(other.getNationality())) return false; + } + if (hasGenre() != other.hasGenre()) return false; + if (hasGenre()) { + if (genre_ != other.genre_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSingerId()) { + hash = (37 * hash) + SINGER_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSingerId()); + } + if (hasBirthDate()) { + hash = (37 * hash) + BIRTH_DATE_FIELD_NUMBER; + hash = (53 * hash) + getBirthDate().hashCode(); + } + if (hasNationality()) { + hash = (37 * hash) + NATIONALITY_FIELD_NUMBER; + hash = (53 * hash) + getNationality().hashCode(); + } + if (hasGenre()) { + hash = (37 * hash) + GENRE_FIELD_NUMBER; + hash = (53 * hash) + genre_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static SingerInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static SingerInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(SingerInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + // Construct using com.example.spanner.SingerProto.SingerInfo.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + singerId_ = 0L; + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return SingerInfo.getDefaultInstance(); + } + + @Override + public SingerInfo build() { + SingerInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public SingerInfo buildPartial() { + SingerInfo result = new SingerInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(SingerInfo result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.singerId_ = singerId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.birthDate_ = birthDate_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nationality_ = nationality_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.genre_ = genre_; + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof SingerInfo) { + return mergeFrom((SingerInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(SingerInfo other) { + if (other == SingerInfo.getDefaultInstance()) return this; + if (other.hasSingerId()) { + setSingerId(other.getSingerId()); + } + if (other.hasBirthDate()) { + birthDate_ = other.birthDate_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasNationality()) { + nationality_ = other.nationality_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasGenre()) { + setGenre(other.getGenre()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + singerId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + birthDate_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + nationality_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + genre_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long singerId_; + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + /** + * optional int64 singer_id = 1; + * + * @param value The singerId to set. + * @return This builder for chaining. + */ + public Builder setSingerId(long value) { + + singerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * optional int64 singer_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearSingerId() { + bitField0_ = (bitField0_ & ~0x00000001); + singerId_ = 0L; + onChanged(); + return this; + } + + private Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + public String getBirthDate() { + Object ref = birthDate_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string birth_date = 2; + * + * @param value The birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDate(String value) { + if (value == null) { + throw new NullPointerException(); + } + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * + * @return This builder for chaining. + */ + public Builder clearBirthDate() { + birthDate_ = getDefaultInstance().getBirthDate(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * + * @param value The bytes for birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDateBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private Object nationality_ = ""; + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + public String getNationality() { + Object ref = nationality_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string nationality = 3; + * + * @param value The nationality to set. + * @return This builder for chaining. + */ + public Builder setNationality(String value) { + if (value == null) { + throw new NullPointerException(); + } + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * + * @return This builder for chaining. + */ + public Builder clearNationality() { + nationality_ = getDefaultInstance().getNationality(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * + * @param value The bytes for nationality to set. + * @return This builder for chaining. + */ + public Builder setNationalityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The enum numeric value on the wire for genre to set. + * @return This builder for chaining. + */ + public Builder setGenreValue(int value) { + genre_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The genre to set. + * @return This builder for chaining. + */ + public Builder setGenre(Genre value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + genre_ = value.getNumber(); + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return This builder for chaining. + */ + public Builder clearGenre() { + bitField0_ = (bitField0_ & ~0x00000008); + genre_ = 0; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:examples.spanner.music.SingerInfo) + } + + // @@protoc_insertion_point(class_scope:examples.spanner.music.SingerInfo) + private static final SingerInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new SingerInfo(); + } + + public static SingerInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public SingerInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_examples_spanner_music_SingerInfo_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + String[] descriptorData = { + "\n\014singer.proto\022\026examples.spanner.music\"\301" + + "\001\n\nSingerInfo\022\026\n\tsinger_id\030\001 \001(\003H\000\210\001\001\022\027\n" + + "\nbirth_date\030\002 \001(\tH\001\210\001\001\022\030\n\013nationality\030\003 " + + "\001(\tH\002\210\001\001\0221\n\005genre\030\004 \001(\0162\035.examples.spann" + + "er.music.GenreH\003\210\001\001B\014\n\n_singer_idB\r\n\013_bi" + + "rth_dateB\016\n\014_nationalityB\010\n\006_genre*.\n\005Ge" + + "nre\022\007\n\003POP\020\000\022\010\n\004JAZZ\020\001\022\010\n\004FOLK\020\002\022\010\n\004ROCK" + + "\020\003B$\n\023com.example.spannerB\013SingerProtoP\000" + + "b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_examples_spanner_music_SingerInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_examples_spanner_music_SingerInfo_descriptor, + new String[] { + "SingerId", "BirthDate", "Nationality", "Genre", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java new file mode 100644 index 000000000000..ccb053133e56 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java @@ -0,0 +1,587 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; + +/** + * Example code for using the Cloud Spanner API. This example demonstrates all the common property + * graph operations that can be done on Cloud Spanner. These are: + * + *

    + * + *

      + *
    • Creating a Cloud Spanner database with a property graph. + *
    • Inserting data, updating and deleting data. + *
    • Executing graph queries. + *
    + */ +public class SpannerGraphSample { + + // [START spanner_insert_graph_data] + /** Class to contain sample Person data. */ + static class Person { + + final long id; + final String name; + final Timestamp birthday; + final String country; + final String city; + + Person(long id, String name, Timestamp birthday, String country, String city) { + this.id = id; + this.name = name; + this.birthday = birthday; + this.country = country; + this.city = city; + } + } + + /** Class to contain sample Account data. */ + static class Account { + + final long id; + final Timestamp createTime; + final boolean isBlocked; + final String nickName; + + Account(long id, Timestamp createTime, boolean isBlocked, String nickName) { + this.id = id; + this.createTime = createTime; + this.isBlocked = isBlocked; + this.nickName = nickName; + } + } + + /** Class to contain sample Transfer data. */ + static class Transfer { + + final long id; + final long toId; + final double amount; + final Timestamp createTime; + final String orderNumber; + + Transfer(long id, long toId, double amount, Timestamp createTime, String orderNumber) { + this.id = id; + this.toId = toId; + this.amount = amount; + this.createTime = createTime; + this.orderNumber = orderNumber; + } + } + + /** Class to contain sample Ownership data. */ + static class Own { + + final long id; + final long accountId; + final Timestamp createTime; + + Own(long id, long accountId, Timestamp createTime) { + this.id = id; + this.accountId = accountId; + this.createTime = createTime; + } + } + + // [END spanner_insert_graph_data] + + // [START spanner_create_database_with_property_graph] + static void createDatabaseWithPropertyGraph( + DatabaseAdminClient dbAdminClient, InstanceName instanceName, String databaseId) { + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(instanceName.toString()) + .addAllExtraStatements( + Arrays.asList( + "CREATE TABLE Person (" + + " id INT64 NOT NULL," + + " name STRING(MAX)," + + " birthday TIMESTAMP," + + " country STRING(MAX)," + + " city STRING(MAX)," + + ") PRIMARY KEY (id)", + "CREATE TABLE Account (" + + " id INT64 NOT NULL," + + " create_time TIMESTAMP," + + " is_blocked BOOL," + + " nick_name STRING(MAX)," + + ") PRIMARY KEY (id)", + "CREATE TABLE PersonOwnAccount (" + + " id INT64 NOT NULL," + + " account_id INT64 NOT NULL," + + " create_time TIMESTAMP," + + " FOREIGN KEY (account_id)" + + " REFERENCES Account (id)" + + ") PRIMARY KEY (id, account_id)," + + "INTERLEAVE IN PARENT Person ON DELETE CASCADE", + "CREATE TABLE AccountTransferAccount (" + + " id INT64 NOT NULL," + + " to_id INT64 NOT NULL," + + " amount FLOAT64," + + " create_time TIMESTAMP NOT NULL," + + " order_number STRING(MAX)," + + " FOREIGN KEY (to_id) REFERENCES Account (id)" + + ") PRIMARY KEY (id, to_id, create_time)," + + "INTERLEAVE IN PARENT Account ON DELETE CASCADE", + "CREATE OR REPLACE PROPERTY GRAPH FinGraph " + + "NODE TABLES (Account, Person)" + + "EDGE TABLES (" + + " PersonOwnAccount" + + " SOURCE KEY(id) REFERENCES Person(id)" + + " DESTINATION KEY(account_id) REFERENCES Account(id)" + + " LABEL Owns," + + " AccountTransferAccount" + + " SOURCE KEY(id) REFERENCES Account(id)" + + " DESTINATION KEY(to_id) REFERENCES Account(id)" + + " LABEL Transfers)")) + .build(); + try { + // Initiate the request which returns an OperationFuture. + com.google.spanner.admin.database.v1.Database db = + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(); + System.out.println("Created database [" + db.getName() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + System.out.println("Encountered exception" + e.getCause()); + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + + // [END spanner_create_database_with_property_graph] + + // [START spanner_insert_graph_data] + static final List ACCOUNTS = + Arrays.asList( + new Account( + 7, Timestamp.parseTimestamp("2020-01-10T06:22:20.12Z"), false, "Vacation Fund"), + new Account( + 16, Timestamp.parseTimestamp("2020-01-27T17:55:09.12Z"), true, "Vacation Fund"), + new Account( + 20, Timestamp.parseTimestamp("2020-02-18T05:44:20.12Z"), false, "Rainy Day Fund")); + + static final List PERSONS = + Arrays.asList( + new Person( + 1, + "Alex", + Timestamp.parseTimestamp("1991-12-21T00:00:00.12Z"), + "Australia", + " Adelaide"), + new Person( + 2, + "Dana", + Timestamp.parseTimestamp("1980-10-31T00:00:00.12Z"), + "Czech_Republic", + "Moravia"), + new Person( + 3, "Lee", Timestamp.parseTimestamp("1986-12-07T00:00:00.12Z"), "India", "Kollam")); + + static final List TRANSFERS = + Arrays.asList( + new Transfer( + 7, 16, 300.0, Timestamp.parseTimestamp("2020-08-29T15:28:58.12Z"), "304330008004315"), + new Transfer( + 7, 16, 100.0, Timestamp.parseTimestamp("2020-10-04T16:55:05.12Z"), "304120005529714"), + new Transfer( + 16, + 20, + 300.0, + Timestamp.parseTimestamp("2020-09-25T02:36:14.12Z"), + "103650009791820"), + new Transfer( + 20, 7, 500.0, Timestamp.parseTimestamp("2020-10-04T16:55:05.12Z"), "304120005529714"), + new Transfer( + 20, + 16, + 200.0, + Timestamp.parseTimestamp("2020-10-17T03:59:40.12Z"), + "302290001255747")); + + static final List OWNERSHIPS = + Arrays.asList( + new Own(1, 7, Timestamp.parseTimestamp("2020-01-10T06:22:20.12Z")), + new Own(2, 20, Timestamp.parseTimestamp("2020-01-27T17:55:09.12Z")), + new Own(3, 16, Timestamp.parseTimestamp("2020-02-18T05:44:20.12Z"))); + + static void insertData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Account account : ACCOUNTS) { + mutations.add( + Mutation.newInsertBuilder("Account") + .set("id") + .to(account.id) + .set("create_time") + .to(account.createTime) + .set("is_blocked") + .to(account.isBlocked) + .set("nick_name") + .to(account.nickName) + .build()); + } + for (Person person : PERSONS) { + mutations.add( + Mutation.newInsertBuilder("Person") + .set("id") + .to(person.id) + .set("name") + .to(person.name) + .set("birthday") + .to(person.birthday) + .set("country") + .to(person.country) + .set("city") + .to(person.city) + .build()); + } + for (Transfer transfer : TRANSFERS) { + mutations.add( + Mutation.newInsertBuilder("AccountTransferAccount") + .set("id") + .to(transfer.id) + .set("to_id") + .to(transfer.toId) + .set("amount") + .to(transfer.amount) + .set("create_time") + .to(transfer.createTime) + .set("order_number") + .to(transfer.orderNumber) + .build()); + } + for (Own own : OWNERSHIPS) { + mutations.add( + Mutation.newInsertBuilder("PersonOwnAccount") + .set("id") + .to(own.id) + .set("account_id") + .to(own.accountId) + .set("create_time") + .to(own.createTime) + .build()); + } + + dbClient.write(mutations); + } + + // [END spanner_insert_graph_data] + + // [START spanner_insert_graph_data_with_dml] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO Account (id, create_time, is_blocked) " + + " VALUES" + + " (1, CAST('2000-08-10 08:18:48.463959-07:52' AS TIMESTAMP), false)," + + " (2, CAST('2000-08-12 07:13:16.463959-03:41' AS TIMESTAMP), true)"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record(s) inserted into Account.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO AccountTransferAccount (id, to_id, create_time, amount) " + + " VALUES" + + " (1, 2, CAST('2000-09-11 03:11:18.463959-06:36' AS TIMESTAMP), 100)," + + " (1, 1, CAST('2000-09-12 04:09:34.463959-05:12' AS TIMESTAMP), 200) "; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record(s) inserted into AccountTransferAccount.\n", rowCount); + return null; + }); + } + + // [END spanner_insert_graph_data_with_dml] + + // [START spanner_update_graph_data_with_dml] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "UPDATE Account SET is_blocked = false WHERE id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) updated.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE AccountTransferAccount SET amount = 300 WHERE id = 1 AND to_id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d AccountTransferAccount record(s) updated.\n", rowCount); + return null; + }); + } + + // [END spanner_update_graph_data_with_dml] + + // [START spanner_update_graph_data_with_graph_query_in_dml] + static void updateUsingGraphQueryInDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE Account SET is_blocked = true " + + "WHERE id IN {" + + " GRAPH FinGraph" + + " MATCH (a:Account WHERE a.id = 1)-[:TRANSFERS]->{1,2}(b:Account)" + + " RETURN b.id}"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) updated.\n", rowCount); + return null; + }); + } + + // [END spanner_update_graph_data_with_graph_query_in_dml] + + // [START spanner_query_graph_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single query against Cloud Spanner. + .executeQuery( + Statement.of( + "Graph FinGraph MATCH" + + " (a:Person)-[o:Owns]->()-[t:Transfers]->()<-[p:Owns]-(b:Person)RETURN" + + " a.name AS sender, b.name AS receiver, t.amount, t.create_time AS" + + " transfer_at"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %f %s\n", + resultSet.getString(0), + resultSet.getString(1), + resultSet.getDouble(2), + resultSet.getTimestamp(3)); + } + } + } + + // [END spanner_query_graph_data] + + // [START spanner_query_graph_data_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "Graph FinGraph MATCH" + + " (a:Person)-[o:Owns]->()-[t:Transfers]->()<-[p:Owns]-(b:Person) WHERE" + + " t.amount >= @min RETURN a.name AS sender, b.name AS receiver, t.amount," + + " t.create_time AS transfer_at") + .bind("min") + .to(500) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%s %s %f %s\n", + resultSet.getString("sender"), + resultSet.getString("receiver"), + resultSet.getDouble("amount"), + resultSet.getTimestamp("transfer_at")); + } + } + } + + // [END spanner_query_graph_data_with_parameter] + + // [START spanner_delete_graph_data_with_dml] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "DELETE FROM AccountTransferAccount WHERE id = 1 AND to_id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d AccountTransferAccount record(s) deleted.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "DELETE FROM Account WHERE id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) deleted.\n", rowCount); + return null; + }); + } + + // [END spanner_delete_graph_data_with_dml] + + // [START spanner_delete_graph_data] + static void deleteData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the PersonOwnAccount rows with the key values (1,7) and (2,20). + mutations.add( + Mutation.delete( + "PersonOwnAccount", + KeySet.newBuilder().addKey(Key.of(1, 7)).addKey(Key.of(2, 20)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the key prefix is >=1 and <8 + mutations.add( + Mutation.delete( + "AccountTransferAccount", KeySet.range(KeyRange.closedOpen(Key.of(1), Key.of(8))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete all Account rows, which will also delete the remaining + // AccountTransferAccount rows since it was defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Account", KeySet.all())); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete all Person rows, which will also delete the remaining + // PersonOwnAccount rows since it was defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Person", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + + // [END spanner_delete_graph_data] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + String command, + DatabaseId database) { + switch (command) { + case "createdatabase": + createDatabaseWithPropertyGraph( + dbAdminClient, + InstanceName.of( + database.getInstanceId().getProject(), database.getInstanceId().getInstance()), + database.getDatabase()); + break; + case "insert": + insertData(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "updateusinggraphqueryindml": + updateUsingGraphQueryInDml(dbClient); + break; + case "query": + query(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "delete": + deleteData(dbClient); + break; + default: + printUsageAndExit(); + } + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" SpannerGraphExample "); + System.err.println(""); + System.err.println("Examples:"); + System.err.println(" SpannerGraphExample createdatabase my-instance example-db"); + System.err.println(" SpannerGraphExample insert my-instance example-db"); + System.err.println(" SpannerGraphExample insertusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample updateusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample updateusinggraphqueryindml my-instance example-db"); + System.err.println(" SpannerGraphExample query my-instance example-db"); + System.err.println(" SpannerGraphExample querywithparameter my-instance example-db"); + System.err.println(" SpannerGraphExample deleteusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample delete my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) { + if (args.length != 3 && args.length != 4) { + printUsageAndExit(); + } + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + DatabaseAdminClient dbAdminClient = null; + try { + final String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbAdminClient = spanner.createDatabaseAdminClient(); + + run(dbClient, dbAdminClient, command, db); + } finally { + if (dbAdminClient != null) { + if (!dbAdminClient.isShutdown() || !dbAdminClient.isTerminated()) { + dbAdminClient.close(); + } + } + spanner.close(); + } + System.out.println("Closed client"); + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerSample.java new file mode 100644 index 000000000000..a01b00c0f6c3 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/SpannerSample.java @@ -0,0 +1,2276 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.cloud.spanner.Type.StructField; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.rpc.NotFoundException; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StatusCode.Code; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.io.BaseEncoding; +import com.google.longrunning.Operation; +import com.google.protobuf.FieldMask; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Timestamp; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupInfo; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; +import com.google.spanner.admin.database.v1.ListBackupsRequest; +import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; +import com.google.spanner.admin.database.v1.RestoreInfo; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import java.math.BigDecimal; +import java.time.Instant; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Example code for using the Cloud Spanner API. This example demonstrates all the common operations + * that can be done on Cloud Spanner. These are: + * + *

    + * + *

      + *
    • Creating a Cloud Spanner database. + *
    • Writing, reading and executing SQL queries. + *
    • Writing data using a read-write transaction. + *
    • Using an index to read and execute SQL queries over data. + *
    • Using commit timestamp for tracking when a record was last updated. + *
    • Using Google API Extensions for Java to make thread-safe requests via long-running + * operations. http://googleapis.github.io/gax-java/ + *
    + */ +public class SpannerSample { + + /** + * Class to contain singer sample data. + */ + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + /** + * Class to contain album sample data. + */ + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } + + /** + * Class to contain performance sample data. + */ + static class Performance { + + final long singerId; + final long venueId; + final String eventDate; + final long revenue; + + Performance(long singerId, long venueId, String eventDate, long revenue) { + this.singerId = singerId; + this.venueId = venueId; + this.eventDate = eventDate; + this.revenue = revenue; + } + } + + /** + * Class to contain venue sample data. + */ + static class Venue { + + final long venueId; + final String venueName; + final String venueInfo; + final long capacity; + final Value availableDates; + final String lastContactDate; + final boolean outdoorVenue; + final float popularityScore; + final BigDecimal revenue; + final Value venueDetails; + + Venue( + long venueId, + String venueName, + String venueInfo, + long capacity, + Value availableDates, + String lastContactDate, + boolean outdoorVenue, + float popularityScore, + BigDecimal revenue, + Value venueDetails) { + this.venueId = venueId; + this.venueName = venueName; + this.venueInfo = venueInfo; + this.capacity = capacity; + this.availableDates = availableDates; + this.lastContactDate = lastContactDate; + this.outdoorVenue = outdoorVenue; + this.popularityScore = popularityScore; + this.revenue = revenue; + this.venueDetails = venueDetails; + } + } + + // [START spanner_insert_data] + static final List SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + + static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk"), + new Album(1, 2, "Go, Go, Go"), + new Album(2, 1, "Green"), + new Album(2, 2, "Forever Hold Your Peace"), + new Album(2, 3, "Terrified")); + // [END spanner_insert_data] + + // [START spanner_insert_data_with_timestamp_column] + static final List PERFORMANCES = + Arrays.asList( + new Performance(1, 4, "2017-10-05", 11000), + new Performance(1, 19, "2017-11-02", 15000), + new Performance(2, 42, "2017-12-23", 7000)); + // [END spanner_insert_data_with_timestamp_column] + + // [START spanner_insert_datatypes_data] + static Value availableDates1 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-12-01"), + Date.parseDate("2020-12-02"), + Date.parseDate("2020-12-03"))); + static Value availableDates2 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-11-01"), + Date.parseDate("2020-11-05"), + Date.parseDate("2020-11-15"))); + static Value availableDates3 = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-10-07"))); + static String exampleBytes1 = BaseEncoding.base64().encode("Hello World 1".getBytes()); + static String exampleBytes2 = BaseEncoding.base64().encode("Hello World 2".getBytes()); + static String exampleBytes3 = BaseEncoding.base64().encode("Hello World 3".getBytes()); + static final List VENUES = + Arrays.asList( + new Venue( + 4, + "Venue 4", + exampleBytes1, + 1800, + availableDates1, + "2018-09-02", + false, + 0.85543f, + new BigDecimal("215100.10"), + Value.json( + "[{\"name\":\"room 1\",\"open\":true},{\"name\":\"room 2\",\"open\":false}]")), + new Venue( + 19, + "Venue 19", + exampleBytes2, + 6300, + availableDates2, + "2019-01-15", + true, + 0.98716f, + new BigDecimal("1200100.00"), + Value.json("{\"rating\":9,\"open\":true}")), + new Venue( + 42, + "Venue 42", + exampleBytes3, + 3000, + availableDates3, + "2018-10-01", + false, + 0.72598f, + new BigDecimal("390650.99"), + Value.json( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}"))); + // [END spanner_insert_datatypes_data] + + // [START spanner_create_database] + static void createDatabase(DatabaseAdminClient dbAdminClient, + InstanceName instanceName, String databaseId) { + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(instanceName.toString()) + .addAllExtraStatements(Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).build(); + try { + // Initiate the request which returns an OperationFuture. + com.google.spanner.admin.database.v1.Database db = + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(); + System.out.println("Created database [" + db.getName() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_database] + + // [START spanner_create_table_with_timestamp_column] + static void createTableWithTimestamp(DatabaseAdminClient dbAdminClient, + DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + dbAdminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "CREATE TABLE Performances (" + + " SingerId INT64 NOT NULL," + + " VenueId INT64 NOT NULL," + + " EventDate Date," + + " Revenue INT64, " + + " LastUpdateTime TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true)" + + ") PRIMARY KEY (SingerId, VenueId, EventDate)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).get(); + System.out.println( + "Created Performances table in database: [" + databaseName.toString() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_table_with_timestamp_column] + + // [START spanner_insert_data_with_timestamp_column] + static void writeExampleDataWithTimestamp(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Performance performance : PERFORMANCES) { + mutations.add( + Mutation.newInsertBuilder("Performances") + .set("SingerId") + .to(performance.singerId) + .set("VenueId") + .to(performance.venueId) + .set("EventDate") + .to(performance.eventDate) + .set("Revenue") + .to(performance.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_data_with_timestamp_column] + + // [START spanner_insert_data] + static void writeExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_data] + + // [START spanner_delete_data] + static void deleteExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the Albums with the key values (2,1) and (2,3). + mutations.add( + Mutation.delete( + "Albums", KeySet.newBuilder().addKey(Key.of(2, 1)).addKey(Key.of(2, 3)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the column key is >=3 and <5 + mutations.add( + Mutation.delete("Singers", KeySet.range(KeyRange.closedOpen(Key.of(3), Key.of(5))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete remaining Singers rows, which will also delete the remaining Albums rows since it was + // defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Singers", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + // [END spanner_delete_data] + + // [START spanner_query_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single read or query against Cloud Spanner. + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_query_data] + + // [START spanner_read_data] + static void read(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .read( + "Albums", + KeySet.all(), // Read all rows in a table. + Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_read_data] + + // [START spanner_add_column] + static void addMarketingBudget(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget INT64")).get(); + System.out.println("Added MarketingBudget column"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_add_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // [START spanner_update_data] + static void update(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(100000) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(500000) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_update_data] + + // [START spanner_read_write_transaction] + static void writeWithTransaction(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + Struct row = + transaction.readRow("Albums", Key.of(2, 2), Arrays.asList("MarketingBudget")); + long album2Budget = row.getLong(0); + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = + transaction + .readRow("Albums", Key.of(1, 1), Arrays.asList("MarketingBudget")) + .getLong(0); + album1Budget += transfer; + album2Budget -= transfer; + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(album1Budget) + .build()); + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(album2Budget) + .build()); + } + return null; + }); + } + // [END spanner_read_write_transaction] + + // [START spanner_query_data_with_new_column] + static void queryMarketingBudget(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, MarketingBudget FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_query_data_with_new_column] + + // [START spanner_create_index] + static void addIndex(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList("CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)")).get(); + System.out.println("Added AlbumsByAlbumTitle index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_index] + + // Before running this example, add the index AlbumsByAlbumTitle by applying the DDL statement + // "CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)". + // [START spanner_query_data_with_index] + static void queryUsingIndex(DatabaseClient dbClient) { + Statement statement = + Statement + // We use FORCE_INDEX hint to specify which index to use. For more details see + // https://cloud.google.com/spanner/docs/query-syntax#from-clause + .newBuilder( + "SELECT AlbumId, AlbumTitle, MarketingBudget " + + "FROM Albums@{FORCE_INDEX=AlbumsByAlbumTitle} " + + "WHERE AlbumTitle >= @StartTitle AND AlbumTitle < @EndTitle") + // We use @BoundParameters to help speed up frequently executed queries. + // For more details see https://cloud.google.com/spanner/docs/sql-best-practices + .bind("StartTitle") + .to("Aardvark") + .bind("EndTitle") + .to("Goo") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("AlbumId"), + resultSet.getString("AlbumTitle"), + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_query_data_with_index] + + // [START spanner_read_data_with_index] + static void readUsingIndex(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + } + } + // [END spanner_read_data_with_index] + + // [START spanner_create_storing_index] + static void addStoringIndex(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) " + + "STORING (MarketingBudget)")).get(); + System.out.println("Added AlbumsByAlbumTitle2 index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_storing_index] + + // Before running this example, create a storing index AlbumsByAlbumTitle2 by applying the DDL + // statement "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget)". + // [START spanner_read_data_with_storing_index] + static void readStoringIndex(DatabaseClient dbClient) { + // We can read MarketingBudget also from the index since it stores a copy of MarketingBudget. + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle2", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong(0), + resultSet.getString(1), + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_read_data_with_storing_index] + + // [START spanner_read_only_transaction] + static void readOnlyTransaction(DatabaseClient dbClient) { + // ReadOnlyTransaction must be closed by calling close() on it to release resources held by it. + // We use a try-with-resource block to automatically do so. + try (ReadOnlyTransaction transaction = dbClient.readOnlyTransaction()) { + try (ResultSet queryResultSet = + transaction.executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (queryResultSet.next()) { + System.out.printf( + "%d %d %s\n", + queryResultSet.getLong(0), queryResultSet.getLong(1), queryResultSet.getString(2)); + } + } // queryResultSet.close() is automatically called here + try (ResultSet readResultSet = + transaction.read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (readResultSet.next()) { + System.out.printf( + "%d %d %s\n", + readResultSet.getLong(0), readResultSet.getLong(1), readResultSet.getString(2)); + } + } // readResultSet.close() is automatically called here + } // transaction.close() is automatically called here + } + // [END spanner_read_only_transaction] + + // [START spanner_read_stale_data] + static void readStaleData(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse(TimestampBound.ofExactStaleness(15, TimeUnit.SECONDS)) + .read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong(0), + resultSet.getLong(1), + resultSet.isNull(2) ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_read_stale_data] + + // [START spanner_add_timestamp_column] + static void addCommitTimestamp(DatabaseAdminClient adminClient, DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + adminClient.updateDatabaseDdlAsync( + databaseName, + Arrays.asList( + "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP " + + "OPTIONS (allow_commit_timestamp=true)")).get(); + System.out.println("Added LastUpdateTime as a commit timestamp column in Albums table."); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_add_timestamp_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // In addition this update expects the LastUpdateTime column added by applying the DDL statement + // "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP OPTIONS (allow_commit_timestamp=true)" + // [START spanner_update_data_with_timestamp_column] + static void updateWithTimestamp(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(1000000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(750000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_update_data_with_timestamp_column] + + // [START spanner_query_data_with_timestamp_column] + static void queryMarketingBudgetWithTimestamp(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT SingerId, AlbumId, MarketingBudget, LastUpdateTime FROM Albums" + + " ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget"), + resultSet.isNull("LastUpdateTime") ? "NULL" : resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_query_data_with_timestamp_column] + + static void querySingersTable(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + + static void queryPerformancesTable(DatabaseClient dbClient) { + // Rows without an explicit value for Revenue will have a Revenue equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT SingerId, VenueId, EventDate, Revenue, LastUpdateTime " + + "FROM Performances ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("VenueId"), + resultSet.getDate("EventDate"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("Revenue") ? "NULL" : resultSet.getLong("Revenue"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + + // [START spanner_write_data_for_struct_queries] + static void writeStructExampleData(DatabaseClient dbClient) { + final List singers = + Arrays.asList( + new Singer(6, "Elena", "Campbell"), + new Singer(7, "Gabriel", "Wright"), + new Singer(8, "Benjamin", "Martinez"), + new Singer(9, "Hannah", "Harris")); + + List mutations = new ArrayList<>(); + for (Singer singer : singers) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + dbClient.write(mutations); + System.out.println("Inserted example data for struct parameter queries."); + } + // [END spanner_write_data_for_struct_queries] + + static void queryWithStruct(DatabaseClient dbClient) { + // [START spanner_create_struct_with_data] + Struct name = + Struct.newBuilder().set("FirstName").to("Elena").set("LastName").to("Campbell").build(); + // [END spanner_create_struct_with_data] + + // [START spanner_query_data_with_struct] + Statement s = + Statement.newBuilder( + "SELECT SingerId FROM Singers " + + "WHERE STRUCT(FirstName, LastName) " + + "= @name") + .bind("name") + .to(name) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + // [END spanner_query_data_with_struct] + } + + static void queryWithArrayOfStruct(DatabaseClient dbClient) { + // [START spanner_create_user_defined_struct] + Type nameType = + Type.struct( + Arrays.asList( + StructField.of("FirstName", Type.string()), + StructField.of("LastName", Type.string()))); + // [END spanner_create_user_defined_struct] + + // [START spanner_create_array_of_struct_with_data] + List bandMembers = new ArrayList<>(); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Elena").set("LastName").to("Campbell").build()); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Gabriel").set("LastName").to("Wright").build()); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Benjamin").set("LastName").to("Martinez").build()); + // [END spanner_create_array_of_struct_with_data] + + // [START spanner_query_data_with_array_of_struct] + Statement s = + Statement.newBuilder( + "SELECT SingerId FROM Singers WHERE " + + "STRUCT(FirstName, LastName) " + + "IN UNNEST(@names) " + + "ORDER BY SingerId DESC") + .bind("names") + .toStructArray(nameType, bandMembers) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + // [END spanner_query_data_with_array_of_struct] + } + + // [START spanner_field_access_on_struct_parameters] + static void queryStructField(DatabaseClient dbClient) { + Statement s = + Statement.newBuilder("SELECT SingerId FROM Singers WHERE FirstName = @name.FirstName") + .bind("name") + .to( + Struct.newBuilder() + .set("FirstName") + .to("Elena") + .set("LastName") + .to("Campbell") + .build()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + } + // [END spanner_field_access_on_struct_parameters] + + // [START spanner_field_access_on_nested_struct_parameters] + static void queryNestedStructField(DatabaseClient dbClient) { + Type nameType = + Type.struct( + Arrays.asList( + StructField.of("FirstName", Type.string()), + StructField.of("LastName", Type.string()))); + + Struct songInfo = + Struct.newBuilder() + .set("song_name") + .to("Imagination") + .set("artistNames") + .toStructArray( + nameType, + Arrays.asList( + Struct.newBuilder() + .set("FirstName") + .to("Elena") + .set("LastName") + .to("Campbell") + .build(), + Struct.newBuilder() + .set("FirstName") + .to("Hannah") + .set("LastName") + .to("Harris") + .build())) + .build(); + Statement s = + Statement.newBuilder( + "SELECT SingerId, @song_info.song_name " + + "FROM Singers WHERE " + + "STRUCT(FirstName, LastName) " + + "IN UNNEST(@song_info.artistNames)") + .bind("song_info") + .to(songInfo) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong("SingerId"), resultSet.getString(1)); + } + } + } + // [END spanner_field_access_on_nested_struct_parameters] + + // [START spanner_dml_standard_insert] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (10, 'Virginia', 'Watson')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_insert] + + // [START spanner_dml_standard_update] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_update] + + // [START spanner_dml_standard_delete] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = "DELETE FROM Singers WHERE FirstName = 'Alice'"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record deleted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_delete] + + // [START spanner_dml_standard_update_with_timestamp] + static void updateUsingDmlWithTimestamp(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET LastUpdateTime = PENDING_COMMIT_TIMESTAMP() WHERE SingerId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_update_with_timestamp] + + // [START spanner_dml_write_then_read] + static void writeAndReadUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Insert record. + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (11, 'Timothy', 'Campbell')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + // Read newly inserted record. + sql = "SELECT FirstName, LastName FROM Singers WHERE SingerId = 11"; + // We use a try-with-resource block to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf( + "%s %s\n", + resultSet.getString("FirstName"), resultSet.getString("LastName")); + } + } + return null; + }); + } + // [END spanner_dml_write_then_read] + + // [START spanner_dml_structs] + static void updateUsingDmlWithStruct(DatabaseClient dbClient) { + Struct name = + Struct.newBuilder().set("FirstName").to("Timothy").set("LastName").to("Campbell").build(); + Statement s = + Statement.newBuilder( + "UPDATE Singers SET LastName = 'Grant' " + + "WHERE STRUCT(FirstName, LastName) " + + "= @name") + .bind("name") + .to(name) + .build(); + dbClient + .readWriteTransaction() + .run(transaction -> { + long rowCount = transaction.executeUpdate(s); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_structs] + + // [START spanner_dml_getting_started_insert] + static void writeUsingDml(DatabaseClient dbClient) { + // Insert 4 singer records + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records inserted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_getting_started_insert] + + // [START spanner_query_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT SingerId, FirstName, LastName " + + "FROM Singers " + + "WHERE LastName = @lastName") + .bind("lastName") + .to("Garcia") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_query_with_parameter] + + // [START spanner_dml_getting_started_update] + static void writeWithTransactionUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + String sql1 = + "SELECT MarketingBudget from Albums WHERE SingerId = 2 and AlbumId = 2"; + ResultSet resultSet = transaction.executeQuery(Statement.of(sql1)); + long album2Budget = 0; + while (resultSet.next()) { + album2Budget = resultSet.getLong("MarketingBudget"); + } + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + String sql2 = + "SELECT MarketingBudget from Albums WHERE SingerId = 1 and AlbumId = 1"; + ResultSet resultSet2 = transaction.executeQuery(Statement.of(sql2)); + long album1Budget = 0; + while (resultSet2.next()) { + album1Budget = resultSet2.getLong("MarketingBudget"); + } + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("AlbumBudget") + .to(album1Budget) + .build(); + transaction.executeUpdate(updateStatement); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("AlbumBudget") + .to(album2Budget) + .build(); + transaction.executeUpdate(updateStatement2); + } + return null; + }); + } + // [END spanner_dml_getting_started_update] + + // [START spanner_dml_partitioned_update] + static void updateUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + } + // [END spanner_dml_partitioned_update] + + // [START spanner_dml_partitioned_delete] + static void deleteUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "DELETE FROM Singers WHERE SingerId > 10"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records deleted.\n", rowCount); + } + // [END spanner_dml_partitioned_delete] + + // [START spanner_dml_batch_update] + static void updateUsingBatchDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + List stmts = new ArrayList(); + String sql = + "INSERT INTO Albums " + + "(SingerId, AlbumId, AlbumTitle, MarketingBudget) " + + "VALUES (1, 3, 'Test Album Title', 10000) "; + stmts.add(Statement.of(sql)); + sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 3"; + stmts.add(Statement.of(sql)); + long[] rowCounts; + try { + rowCounts = transaction.batchUpdate(stmts); + } catch (SpannerBatchUpdateException e) { + rowCounts = e.getUpdateCounts(); + } + for (int i = 0; i < rowCounts.length; i++) { + System.out.printf("%d record updated by stmt %d.\n", rowCounts[i], i); + } + return null; + }); + } + // [END spanner_dml_batch_update] + + // [START spanner_create_table_with_datatypes] + static void createTableWithDatatypes(DatabaseAdminClient dbAdminClient, + DatabaseName databaseName) { + try { + // Initiate the request which returns an OperationFuture. + dbAdminClient.updateDatabaseDdlAsync(databaseName, + Arrays.asList( + "CREATE TABLE Venues (" + + " VenueId INT64 NOT NULL," + + " VenueName STRING(100)," + + " VenueInfo BYTES(MAX)," + + " Capacity INT64," + + " AvailableDates ARRAY," + + " LastContactDate DATE," + + " OutdoorVenue BOOL, " + + " PopularityScore FLOAT64, " + + " Revenue NUMERIC, " + + " VenueDetails JSON, " + + " LastUpdateTime TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true)" + + ") PRIMARY KEY (VenueId)")).get(); + System.out.println("Created Venues table in database: [" + databaseName.toString() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_table_with_datatypes] + + // [START spanner_insert_datatypes_data] + static void writeDatatypesData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Venue venue : VENUES) { + mutations.add( + Mutation.newInsertBuilder("Venues") + .set("VenueId") + .to(venue.venueId) + .set("VenueName") + .to(venue.venueName) + .set("VenueInfo") + .to(venue.venueInfo) + .set("Capacity") + .to(venue.capacity) + .set("AvailableDates") + .to(venue.availableDates) + .set("LastContactDate") + .to(venue.lastContactDate) + .set("OutdoorVenue") + .to(venue.outdoorVenue) + .set("PopularityScore") + .to(venue.popularityScore) + .set("Revenue") + .to(venue.revenue) + .set("VenueDetails") + .to(venue.venueDetails) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_datatypes_data] + + // [START spanner_query_with_array_parameter] + static void queryWithArray(DatabaseClient dbClient) { + Value exampleArray = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-11-01"))); + + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, AvailableDate FROM Venues v, " + + "UNNEST(v.AvailableDates) as AvailableDate " + + "WHERE AvailableDate in UNNEST(@availableDates)") + .bind("availableDates") + .to(exampleArray) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDate("AvailableDate")); + } + } + } + // [END spanner_query_with_array_parameter] + + // [START spanner_query_with_bool_parameter] + static void queryWithBool(DatabaseClient dbClient) { + boolean exampleBool = true; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, OutdoorVenue FROM Venues " + + "WHERE OutdoorVenue = @outdoorVenue") + .bind("outdoorVenue") + .to(exampleBool) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %b\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBoolean("OutdoorVenue")); + } + } + } + // [END spanner_query_with_bool_parameter] + + // [START spanner_query_with_bytes_parameter] + static void queryWithBytes(DatabaseClient dbClient) { + ByteArray exampleBytes = + ByteArray.fromBase64(BaseEncoding.base64().encode("Hello World 1".getBytes())); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName FROM Venues " + "WHERE VenueInfo = @venueInfo") + .bind("venueInfo") + .to(exampleBytes) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_query_with_bytes_parameter] + + // [START spanner_query_with_date_parameter] + static void queryWithDate(DatabaseClient dbClient) { + String exampleDate = "2019-01-01"; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, LastContactDate FROM Venues " + + "WHERE LastContactDate < @lastContactDate") + .bind("lastContactDate") + .to(exampleDate) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDate("LastContactDate")); + } + } + } + // [END spanner_query_with_date_parameter] + + // [START spanner_query_with_float_parameter] + static void queryWithFloat(DatabaseClient dbClient) { + float exampleFloat = 0.8f; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, PopularityScore FROM Venues " + + "WHERE PopularityScore > @popularityScore") + .bind("popularityScore") + .to(exampleFloat) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %f\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDouble("PopularityScore")); + } + } + } + // [END spanner_query_with_float_parameter] + + // [START spanner_query_with_int_parameter] + static void queryWithInt(DatabaseClient dbClient) { + long exampleInt = 3000; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, Capacity FROM Venues " + "WHERE Capacity >= @capacity") + .bind("capacity") + .to(exampleInt) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %d\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getLong("Capacity")); + } + } + } + // [END spanner_query_with_int_parameter] + + // [START spanner_query_with_string_parameter] + static void queryWithString(DatabaseClient dbClient) { + String exampleString = "Venue 42"; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName FROM Venues " + "WHERE VenueName = @venueName") + .bind("venueName") + .to(exampleString) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_query_with_string_parameter] + + // [START spanner_query_with_timestamp_parameter] + static void queryWithTimestampParameter(DatabaseClient dbClient) { + Instant exampleTimestamp = Instant.now(); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, LastUpdateTime FROM Venues " + + "WHERE LastUpdateTime < @lastUpdateTime") + .bind("lastUpdateTime") + .to(exampleTimestamp.toString()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_query_with_timestamp_parameter] + + // [START spanner_query_with_numeric_parameter] + static void queryWithNumeric(DatabaseClient dbClient) { + BigDecimal exampleNumeric = new BigDecimal("300000"); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, Revenue\n" + + "FROM Venues\n" + + "WHERE Revenue >= @revenue") + .bind("revenue") + .to(exampleNumeric) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s%n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBigDecimal("Revenue")); + } + } + } + // [END spanner_query_with_numeric_parameter] + + // [START spanner_create_client_with_query_options] + static void clientWithQueryOptions(DatabaseId db) { + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + db, QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.SPANNER_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_create_client_with_query_options] + + // [START spanner_query_with_query_options] + static void queryWithQueryOptions(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement + .newBuilder("SELECT SingerId, AlbumId, AlbumTitle FROM Albums") + .withQueryOptions(QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.SPANNER_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build())) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_query_with_query_options] + + // [START spanner_create_backup] + static void createBackup(DatabaseAdminClient dbAdminClient, String projectId, String instanceId, + String databaseId, String backupId, Timestamp versionTime) { + // Set expire time to 14 days from now. + Timestamp expireTime = + Timestamp.newBuilder().setSeconds(TimeUnit.MILLISECONDS.toSeconds(( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14)))).build(); + BackupName backupName = BackupName.of(projectId, instanceId, backupId); + Backup backup = Backup.newBuilder() + .setName(backupName.toString()) + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setExpireTime(expireTime).setVersionTime(versionTime).build(); + + // Initiate the request which returns an OperationFuture. + System.out.println("Creating backup [" + backupId + "]..."); + try { + // Wait for the backup operation to complete. + backup = dbAdminClient.createBackupAsync( + InstanceName.of(projectId, instanceId), backup, backupId).get(); + System.out.println("Created backup [" + backup.getName() + "]"); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + // Reload the metadata of the backup from the server. + backup = dbAdminClient.getBackup(backup.getName()); + System.out.println( + String.format( + "Backup %s of size %d bytes was created at %s for version of database at %s", + backup.getName(), + backup.getSizeBytes(), + java.time.OffsetDateTime.ofInstant( + Instant.ofEpochSecond(backup.getCreateTime().getSeconds(), + backup.getCreateTime().getNanos()), ZoneId.systemDefault()), + java.time.OffsetDateTime.ofInstant( + Instant.ofEpochSecond(backup.getVersionTime().getSeconds(), + backup.getVersionTime().getNanos()), ZoneId.systemDefault())) + ); + } + // [END spanner_create_backup] + + // [START spanner_cancel_backup_create] + static void cancelCreateBackup( + DatabaseAdminClient dbAdminClient, String projectId, String instanceId, + String databaseId, String backupId) { + // Set expire time to 14 days from now. + Timestamp expireTime = + Timestamp.newBuilder().setSeconds(TimeUnit.MILLISECONDS.toSeconds(( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14)))).build(); + BackupName backupName = BackupName.of(projectId, instanceId, backupId); + Backup backup = Backup.newBuilder() + .setName(backupName.toString()) + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setExpireTime(expireTime).build(); + + try { + // Start the creation of a backup. + System.out.println("Creating backup [" + backupId + "]..."); + OperationFuture op = dbAdminClient.createBackupAsync( + InstanceName.of(projectId, instanceId), backup, backupId); + + // Try to cancel the backup operation. + System.out.println("Cancelling create backup operation for [" + backupId + "]..."); + dbAdminClient.getOperationsClient().cancelOperation(op.getName()); + + // Get a polling future for the running operation. This future will regularly poll the server + // for the current status of the backup operation. + RetryingFuture pollingFuture = op.getPollingFuture(); + + // Wait for the operation to finish. + // isDone will return true when the operation is complete, regardless of whether it was + // successful or not. + while (!pollingFuture.get().isDone()) { + System.out.println("Waiting for the cancelled backup operation to finish..."); + Thread.sleep(TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS)); + } + if (pollingFuture.get().getErrorCode() == null) { + // Backup was created before it could be cancelled. Delete the backup. + dbAdminClient.deleteBackup(backupName); + System.out.println("Backup operation for [" + backupId + + "] successfully finished before it could be cancelled"); + } else if (pollingFuture.get().getErrorCode().getCode() == StatusCode.Code.CANCELLED) { + System.out.println("Backup operation for [" + backupId + "] successfully cancelled"); + } + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_cancel_backup_create] + + // [START spanner_list_backup_operations] + static void listBackupOperations( + DatabaseAdminClient databaseAdminClient, + String projectId, String instanceId, + String databaseId, String backupId) { + InstanceName instanceName = InstanceName.of(projectId, instanceId); + // Get 'CreateBackup' operations for the sample database. + String filter = + String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CreateBackupMetadata) " + + "AND (metadata.database:%s)", + DatabaseName.of(projectId, instanceId, databaseId).toString()); + ListBackupOperationsRequest listBackupOperationsRequest = + ListBackupOperationsRequest.newBuilder() + .setParent(instanceName.toString()).setFilter(filter).build(); + ListBackupOperationsPagedResponse createBackupOperations + = databaseAdminClient.listBackupOperations(listBackupOperationsRequest); + System.out.println("Create Backup Operations:"); + for (Operation op : createBackupOperations.iterateAll()) { + try { + CreateBackupMetadata metadata = op.getMetadata().unpack(CreateBackupMetadata.class); + System.out.println( + String.format( + "Backup %s on database %s pending: %d%% complete", + metadata.getName(), + metadata.getDatabase(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CreateBackupMetadata. + System.err.println(e.getMessage()); + } + } + // Get copy backup operations for the sample database. + filter = String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CopyBackupMetadata) " + + "AND (metadata.source_backup:%s)", + BackupName.of(projectId, instanceId, backupId).toString()); + listBackupOperationsRequest = + ListBackupOperationsRequest.newBuilder() + .setParent(instanceName.toString()).setFilter(filter).build(); + ListBackupOperationsPagedResponse copyBackupOperations = + databaseAdminClient.listBackupOperations(listBackupOperationsRequest); + System.out.println("Copy Backup Operations:"); + for (Operation op : copyBackupOperations.iterateAll()) { + try { + CopyBackupMetadata copyBackupMetadata = + op.getMetadata().unpack(CopyBackupMetadata.class); + System.out.println( + String.format( + "Copy Backup %s on backup %s pending: %d%% complete", + copyBackupMetadata.getName(), + copyBackupMetadata.getSourceBackup(), + copyBackupMetadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CopyBackupMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_list_backup_operations] + + // [START spanner_list_database_operations] + static void listDatabaseOperations( + DatabaseAdminClient dbAdminClient, String projectId, String instanceId) { + // Get optimize restored database operations. + com.google.cloud.Timestamp last24Hours = com.google.cloud.Timestamp.ofTimeSecondsAndNanos( + TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(com.google.cloud.Timestamp.now().getSeconds(), TimeUnit.SECONDS) + - 24, + TimeUnit.HOURS), 0); + String filter = String.format("(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) AND " + + "(metadata.progress.start_time > \"%s\")", last24Hours); + ListDatabaseOperationsRequest listDatabaseOperationsRequest = + ListDatabaseOperationsRequest.newBuilder() + .setParent(com.google.spanner.admin.instance.v1.InstanceName.of( + projectId, instanceId).toString()).setFilter(filter).build(); + ListDatabaseOperationsPagedResponse pagedResponse + = dbAdminClient.listDatabaseOperations(listDatabaseOperationsRequest); + for (Operation op : pagedResponse.iterateAll()) { + try { + OptimizeRestoredDatabaseMetadata metadata = + op.getMetadata().unpack(OptimizeRestoredDatabaseMetadata.class); + System.out.println(String.format( + "Database %s restored from backup is %d%% optimized", + metadata.getName(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain OptimizeRestoredDatabaseMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_list_database_operations] + + // [START spanner_list_backups] + static void listBackups( + DatabaseAdminClient dbAdminClient, String projectId, + String instanceId, String databaseId, String backupId) { + InstanceName instanceName = InstanceName.of(projectId, instanceId); + // List all backups. + System.out.println("All backups:"); + for (Backup backup : dbAdminClient.listBackups( + instanceName.toString()).iterateAll()) { + System.out.println(backup); + } + + // List all backups with a specific name. + System.out.println( + String.format("All backups with backup name containing \"%s\":", backupId)); + ListBackupsRequest listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()) + .setFilter(String.format("name:%s", backupId)).build(); + for (Backup backup : dbAdminClient.listBackups(listBackupsRequest).iterateAll()) { + System.out.println(backup); + } + + // List all backups for databases whose name contains a certain text. + System.out.println( + String.format( + "All backups for databases with a name containing \"%s\":", databaseId)); + listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()) + .setFilter(String.format("database:%s", databaseId)).build(); + for (Backup backup : dbAdminClient.listBackups(listBackupsRequest).iterateAll()) { + System.out.println(backup); + } + + // List all backups that expire before a certain time. + com.google.cloud.Timestamp expireTime = com.google.cloud.Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(30), TimeUnit.MILLISECONDS)); + + System.out.println(String.format("All backups that expire before %s:", expireTime)); + listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()) + .setFilter(String.format("expire_time < \"%s\"", expireTime)).build(); + + for (Backup backup : dbAdminClient.listBackups(listBackupsRequest).iterateAll()) { + System.out.println(backup); + } + + // List all backups with size greater than a certain number of bytes. + listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()) + .setFilter("size_bytes > 100").build(); + + System.out.println("All backups with size greater than 100 bytes:"); + for (Backup backup : dbAdminClient.listBackups(listBackupsRequest).iterateAll()) { + System.out.println(backup); + } + + // List all backups with a create time after a certain timestamp and that are also ready. + com.google.cloud.Timestamp createTime = com.google.cloud.Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() - TimeUnit.DAYS.toMillis(1), TimeUnit.MILLISECONDS)); + + System.out.println( + String.format( + "All databases created after %s and that are ready:", createTime.toString())); + listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()) + .setFilter(String.format( + "create_time >= \"%s\" AND state:READY", createTime.toString())).build(); + for (Backup backup : dbAdminClient.listBackups(listBackupsRequest).iterateAll()) { + System.out.println(backup); + } + + // List backups using pagination. + System.out.println("All backups, listed using pagination:"); + listBackupsRequest = + ListBackupsRequest.newBuilder().setParent(instanceName.toString()).setPageSize(10).build(); + while (true) { + ListBackupsPagedResponse response = dbAdminClient.listBackups(listBackupsRequest); + for (Backup backup : response.getPage().iterateAll()) { + System.out.println(backup); + } + String nextPageToken = response.getNextPageToken(); + if (!Strings.isNullOrEmpty(nextPageToken)) { + listBackupsRequest = listBackupsRequest.toBuilder().setPageToken(nextPageToken).build(); + } else { + break; + } + } + } + // [END spanner_list_backups] + + // [START spanner_restore_backup] + static void restoreBackup( + DatabaseAdminClient dbAdminClient, + String projectId, + String instanceId, + String backupId, + String restoreToDatabaseId) { + BackupName backupName = BackupName.of(projectId, instanceId, backupId); + Backup backup = dbAdminClient.getBackup(backupName); + // Initiate the request which returns an OperationFuture. + System.out.println(String.format( + "Restoring backup [%s] to database [%s]...", backup.getName(), restoreToDatabaseId)); + try { + RestoreDatabaseRequest request = + RestoreDatabaseRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseId(restoreToDatabaseId) + .setBackup(backupName.toString()).build(); + OperationFuture op = + dbAdminClient.restoreDatabaseAsync(request); + // Wait until the database has been restored. + com.google.spanner.admin.database.v1.Database db = op.get(); + // Get the restore info. + RestoreInfo restoreInfo = db.getRestoreInfo(); + BackupInfo backupInfo = restoreInfo.getBackupInfo(); + + System.out.println( + "Restored database [" + + db.getName() + + "] from [" + + restoreInfo.getBackupInfo().getBackup() + + "] with version time [" + backupInfo.getVersionTime() + "]"); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_restore_backup] + + // [START spanner_update_backup] + static void updateBackup(DatabaseAdminClient dbAdminClient, String projectId, + String instanceId, String backupId) { + BackupName backupName = BackupName.of(projectId, instanceId, backupId); + + // Get current backup metadata. + Backup backup = dbAdminClient.getBackup(backupName); + // Add 30 days to the expire time. + // Expire time must be within 366 days of the create time of the backup. + Timestamp currentExpireTime = backup.getExpireTime(); + com.google.cloud.Timestamp newExpireTime = + com.google.cloud.Timestamp.ofTimeMicroseconds( + TimeUnit.SECONDS.toMicros(currentExpireTime.getSeconds()) + + TimeUnit.NANOSECONDS.toMicros(currentExpireTime.getNanos()) + + TimeUnit.DAYS.toMicros(30L)); + + // New Expire Time must be less than Max Expire Time + newExpireTime = + newExpireTime.compareTo(com.google.cloud.Timestamp.fromProto(backup.getMaxExpireTime())) + < 0 ? newExpireTime : com.google.cloud.Timestamp.fromProto(backup.getMaxExpireTime()); + + System.out.println(String.format( + "Updating expire time of backup [%s] to %s...", + backupId.toString(), + java.time.OffsetDateTime.ofInstant( + Instant.ofEpochSecond(newExpireTime.getSeconds(), + newExpireTime.getNanos()), ZoneId.systemDefault()))); + + // Update expire time. + backup = backup.toBuilder().setExpireTime(newExpireTime.toProto()).build(); + dbAdminClient.updateBackup(backup, + FieldMask.newBuilder().addAllPaths(Lists.newArrayList("expire_time")).build()); + System.out.println("Updated backup [" + backupId + "]"); + } + // [END spanner_update_backup] + + // [START spanner_delete_backup] + static void deleteBackup(DatabaseAdminClient dbAdminClient, + String project, String instance, String backupId) { + BackupName backupName = BackupName.of(project, instance, backupId); + + // Delete the backup. + System.out.println("Deleting backup [" + backupId + "]..."); + dbAdminClient.deleteBackup(backupName); + // Verify that the backup is deleted. + try { + dbAdminClient.getBackup(backupName); + } catch (NotFoundException e) { + if (e.getStatusCode().getCode() == Code.NOT_FOUND) { + System.out.println("Deleted backup [" + backupId + "]"); + } else { + System.out.println("Delete backup [" + backupId + "] failed"); + throw new RuntimeException("Delete backup [" + backupId + "] failed", e); + } + } + } + // [END spanner_delete_backup] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + String command, + DatabaseId database, + String backupId) { + switch (command) { + case "createdatabase": + createDatabase(dbAdminClient, InstanceName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance()), database.getDatabase()); + break; + case "write": + writeExampleData(dbClient); + break; + case "delete": + deleteExampleData(dbClient); + break; + case "query": + query(dbClient); + break; + case "read": + read(dbClient); + break; + case "addmarketingbudget": + addMarketingBudget(dbAdminClient, DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "update": + update(dbClient); + break; + case "writetransaction": + writeWithTransaction(dbClient); + break; + case "querymarketingbudget": + queryMarketingBudget(dbClient); + break; + case "addindex": + addIndex(dbAdminClient, DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "readindex": + readUsingIndex(dbClient); + break; + case "queryindex": + queryUsingIndex(dbClient); + break; + case "addstoringindex": + addStoringIndex(dbAdminClient, DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "readstoringindex": + readStoringIndex(dbClient); + break; + case "readonlytransaction": + readOnlyTransaction(dbClient); + break; + case "readstaledata": + readStaleData(dbClient); + break; + case "addcommittimestamp": + addCommitTimestamp(dbAdminClient, DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "updatewithtimestamp": + updateWithTimestamp(dbClient); + break; + case "querywithtimestamp": + queryMarketingBudgetWithTimestamp(dbClient); + break; + case "createtablewithtimestamp": + createTableWithTimestamp(dbAdminClient, + DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "writewithtimestamp": + writeExampleDataWithTimestamp(dbClient); + break; + case "querysingerstable": + querySingersTable(dbClient); + break; + case "queryperformancestable": + queryPerformancesTable(dbClient); + break; + case "writestructdata": + writeStructExampleData(dbClient); + break; + case "querywithstruct": + queryWithStruct(dbClient); + break; + case "querywitharrayofstruct": + queryWithArrayOfStruct(dbClient); + break; + case "querystructfield": + queryStructField(dbClient); + break; + case "querynestedstructfield": + queryNestedStructField(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "updateusingdmlwithtimestamp": + updateUsingDmlWithTimestamp(dbClient); + break; + case "writeandreadusingdml": + writeAndReadUsingDml(dbClient); + break; + case "updateusingdmlwithstruct": + updateUsingDmlWithStruct(dbClient); + break; + case "writeusingdml": + writeUsingDml(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "writewithtransactionusingdml": + writeWithTransactionUsingDml(dbClient); + break; + case "updateusingpartitioneddml": + updateUsingPartitionedDml(dbClient); + break; + case "deleteusingpartitioneddml": + deleteUsingPartitionedDml(dbClient); + break; + case "updateusingbatchdml": + updateUsingBatchDml(dbClient); + break; + case "createtablewithdatatypes": + createTableWithDatatypes(dbAdminClient, + DatabaseName.of(database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase())); + break; + case "writedatatypesdata": + writeDatatypesData(dbClient); + break; + case "querywitharray": + queryWithArray(dbClient); + break; + case "querywithbool": + queryWithBool(dbClient); + break; + case "querywithbytes": + queryWithBytes(dbClient); + break; + case "querywithdate": + queryWithDate(dbClient); + break; + case "querywithfloat": + queryWithFloat(dbClient); + break; + case "querywithint": + queryWithInt(dbClient); + break; + case "querywithstring": + queryWithString(dbClient); + break; + case "querywithtimestampparameter": + queryWithTimestampParameter(dbClient); + break; + case "querywithnumeric": + queryWithNumeric(dbClient); + break; + case "clientwithqueryoptions": + clientWithQueryOptions(database); + break; + case "querywithqueryoptions": + queryWithQueryOptions(dbClient); + break; + case "createbackup": + createBackup(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase(), + backupId, getVersionTime(dbClient)); + break; + case "cancelcreatebackup": + cancelCreateBackup( + dbAdminClient, + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase(), + backupId + "_cancel"); + break; + case "listbackupoperations": + listBackupOperations(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase(), backupId); + break; + case "listdatabaseoperations": + listDatabaseOperations(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance()); + break; + case "listbackups": + listBackups(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), database.getDatabase(), backupId); + break; + case "restorebackup": + restoreBackup( + dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), backupId, database.getDatabase()); + break; + case "updatebackup": + updateBackup(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), backupId); + break; + case "deletebackup": + deleteBackup(dbAdminClient, database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), backupId); + break; + default: + printUsageAndExit(); + } + } + + static Timestamp getVersionTime(DatabaseClient dbClient) { + // Generates a version time for the backup + com.google.cloud.Timestamp versionTime; + try (ResultSet resultSet = dbClient.singleUse() + .executeQuery(Statement.of("SELECT CURRENT_TIMESTAMP()"))) { + resultSet.next(); + versionTime = resultSet.getTimestamp(0); + } + return versionTime.toProto(); + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" SpannerExample "); + System.err.println(""); + System.err.println("Examples:"); + System.err.println(" SpannerExample createdatabase my-instance example-db"); + System.err.println(" SpannerExample write my-instance example-db"); + System.err.println(" SpannerExample delete my-instance example-db"); + System.err.println(" SpannerExample query my-instance example-db"); + System.err.println(" SpannerExample read my-instance example-db"); + System.err.println(" SpannerExample addmarketingbudget my-instance example-db"); + System.err.println(" SpannerExample update my-instance example-db"); + System.err.println(" SpannerExample writetransaction my-instance example-db"); + System.err.println(" SpannerExample querymarketingbudget my-instance example-db"); + System.err.println(" SpannerExample addindex my-instance example-db"); + System.err.println(" SpannerExample readindex my-instance example-db"); + System.err.println(" SpannerExample queryindex my-instance example-db"); + System.err.println(" SpannerExample addstoringindex my-instance example-db"); + System.err.println(" SpannerExample readstoringindex my-instance example-db"); + System.err.println(" SpannerExample readonlytransaction my-instance example-db"); + System.err.println(" SpannerExample readstaledata my-instance example-db"); + System.err.println(" SpannerExample addcommittimestamp my-instance example-db"); + System.err.println(" SpannerExample updatewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample querywithtimestamp my-instance example-db"); + System.err.println(" SpannerExample createtablewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample writewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample querysingerstable my-instance example-db"); + System.err.println(" SpannerExample queryperformancestable my-instance example-db"); + System.err.println(" SpannerExample writestructdata my-instance example-db"); + System.err.println(" SpannerExample querywithstruct my-instance example-db"); + System.err.println(" SpannerExample querywitharrayofstruct my-instance example-db"); + System.err.println(" SpannerExample querystructfield my-instance example-db"); + System.err.println(" SpannerExample querynestedstructfield my-instance example-db"); + System.err.println(" SpannerExample insertusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdml my-instance example-db"); + System.err.println(" SpannerExample deleteusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdmlwithtimestamp my-instance example-db"); + System.err.println(" SpannerExample writeandreadusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdmlwithstruct my-instance example-db"); + System.err.println(" SpannerExample writeusingdml my-instance example-db"); + System.err.println(" SpannerExample querywithparameter my-instance example-db"); + System.err.println(" SpannerExample writewithtransactionusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingpartitioneddml my-instance example-db"); + System.err.println(" SpannerExample deleteusingpartitioneddml my-instance example-db"); + System.err.println(" SpannerExample updateusingbatchdml my-instance example-db"); + System.err.println(" SpannerExample createtablewithdatatypes my-instance example-db"); + System.err.println(" SpannerExample writedatatypesdata my-instance example-db"); + System.err.println(" SpannerExample querywitharray my-instance example-db"); + System.err.println(" SpannerExample querywithbool my-instance example-db"); + System.err.println(" SpannerExample querywithbytes my-instance example-db"); + System.err.println(" SpannerExample querywithdate my-instance example-db"); + System.err.println(" SpannerExample querywithfloat my-instance example-db"); + System.err.println(" SpannerExample querywithint my-instance example-db"); + System.err.println(" SpannerExample querywithstring my-instance example-db"); + System.err.println(" SpannerExample querywithtimestampparameter my-instance example-db"); + System.err.println(" SpannerExample clientwithqueryoptions my-instance example-db"); + System.err.println(" SpannerExample querywithqueryoptions my-instance example-db"); + System.err.println(" SpannerExample createbackup my-instance example-db"); + System.err.println(" SpannerExample listbackups my-instance example-db"); + System.err.println(" SpannerExample listbackupoperations my-instance example-db backup-id"); + System.err.println(" SpannerExample listdatabaseoperations my-instance example-db"); + System.err.println(" SpannerExample restorebackup my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) { + if (args.length != 3 && args.length != 4) { + printUsageAndExit(); + } + // [START init_client] + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + DatabaseAdminClient dbAdminClient = null; + try { + final String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + // [END init_client] + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + // Generate a backup id for the sample database. + String backupId = null; + if (args.length == 4) { + backupId = args[3]; + } + + // [START init_client] + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbAdminClient = spanner.createDatabaseAdminClient(); + + // Use client here... + // [END init_client] + + run(dbClient, dbAdminClient, command, db, backupId); + // [START init_client] + } finally { + if (dbAdminClient != null) { + if (!dbAdminClient.isShutdown() || !dbAdminClient.isTerminated()) { + dbAdminClient.close(); + } + } + spanner.close(); + } + // [END init_client] + System.out.println("Closed client"); + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java new file mode 100644 index 000000000000..3e75b2fed6a0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.Statement; +import com.google.spanner.v1.SpannerGrpc; +import io.grpc.CallOptions; +import io.grpc.Context; +import io.grpc.MethodDescriptor; +import java.util.concurrent.TimeUnit; + +class StatementTimeoutExample { + + // [START spanner_set_statement_timeout] + + static void executeSqlWithTimeout() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + executeSqlWithTimeout(client); + } + } + + static void executeSqlWithTimeout(DatabaseClient client) { + CallContextConfigurator configurator = new CallContextConfigurator() { + public ApiCallContext configure(ApiCallContext context, ReqT request, + MethodDescriptor method) { + // DML uses the ExecuteSql RPC. + if (method == SpannerGrpc.getExecuteSqlMethod()) { + // NOTE: You can use a GrpcCallContext to set a custom timeout for a single RPC + // invocation. This timeout can however ONLY BE SHORTER than the default timeout + // for the RPC. If you set a timeout that is longer than the default timeout, then + // the default timeout will be used. + return GrpcCallContext.createDefault() + .withCallOptions(CallOptions.DEFAULT.withDeadlineAfter(60L, TimeUnit.SECONDS)); + } + // Return null to indicate that the default should be used for other methods. + return null; + } + }; + // Create a context that uses the custom call configuration. + Context context = + Context.current().withValue(SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY, configurator); + // Run the transaction in the custom context. + context.run(() -> + client.readWriteTransaction().run(transaction -> { + String sql = "INSERT INTO Singers (SingerId, FirstName, LastName)\n" + + "VALUES (20, 'George', 'Washington')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.%n", rowCount); + return null; + }) + ); + } + // [END spanner_set_statement_timeout] +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/TagSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TagSample.java new file mode 100644 index 000000000000..d8124d3ae267 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TagSample.java @@ -0,0 +1,93 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Statement; + +/** + * Sample showing how to add transaction and query tags to Cloud Spanner operations. + */ +public class TagSample { + + // [START spanner_set_transaction_tag] + static void setTransactionTag(DatabaseClient databaseClient) { + // Sets the transaction tag to "app=concert,env=dev". + // This transaction tag will be applied to all the individual operations inside this + // transaction. + databaseClient + .readWriteTransaction(Options.tag("app=concert,env=dev")) + .run(transaction -> { + // Sets the request tag to "app=concert,env=dev,action=update". + // This request tag will only be set on this request. + transaction.executeUpdate( + Statement.of("UPDATE Venues" + + " SET Capacity = CAST(Capacity/4 AS INT64)" + + " WHERE OutdoorVenue = false"), + Options.tag("app=concert,env=dev,action=update")); + System.out.println("Venue capacities updated."); + + Statement insertStatement = Statement.newBuilder( + "INSERT INTO Venues" + + " (VenueId, VenueName, Capacity, OutdoorVenue, LastUpdateTime)" + + " VALUES (" + + " @venueId, @venueName, @capacity, @outdoorVenue, PENDING_COMMIT_TIMESTAMP()" + + " )") + .bind("venueId") + .to(81) + .bind("venueName") + .to("Venue 81") + .bind("capacity") + .to(1440) + .bind("outdoorVenue") + .to(true) + .build(); + + // Sets the request tag to "app=concert,env=dev,action=insert". + // This request tag will only be set on this request. + transaction.executeUpdate( + insertStatement, + Options.tag("app=concert,env=dev,action=insert")); + System.out.println("New venue inserted."); + + return null; + }); + } + // [END spanner_set_transaction_tag] + + // [START spanner_set_request_tag] + static void setRequestTag(DatabaseClient databaseClient) { + // Sets the request tag to "app=concert,env=dev,action=select". + // This request tag will only be set on this request. + try (ResultSet resultSet = databaseClient + .singleUse() + .executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"), + Options.tag("app=concert,env=dev,action=select"))) { + while (resultSet.next()) { + System.out.printf( + "SingerId: %d, AlbumId: %d, AlbumTitle: %s\n", + resultSet.getLong(0), + resultSet.getLong(1), + resultSet.getString(2)); + } + } + } + // [END spanner_set_request_tag] +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/TracingSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TracingSample.java new file mode 100644 index 000000000000..9678ebfec747 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TracingSample.java @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.api.MonitoredResource; +import com.google.cloud.MetadataConfig; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.spi.v1.SpannerRpcViews; +import io.opencensus.common.Duration; +import io.opencensus.common.Scope; +import io.opencensus.contrib.grpc.metrics.RpcViews; +import io.opencensus.contrib.zpages.ZPageHandlers; +import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; +import io.opencensus.exporter.trace.stackdriver.StackdriverExporter; +import io.opencensus.trace.Tracing; +import io.opencensus.trace.samplers.Samplers; +import java.util.Arrays; + +/** + * This sample demonstrates how to enable opencensus tracing and stats in cloud spanner client. + * + * @deprecated The OpenCensus project is deprecated. Use OpenTelemetry to enable metrics and stats + * with cloud spanner client. + *

    Note: This sample uses System.exit(0) to ensure clean termination because the + * ZPageHandlers HTTP server (localhost:8080/tracez) uses non-daemon threads and does not + * provide a public stop() method. + */ +public class TracingSample { + + private static final String SAMPLE_SPAN = "CloudSpannerSample"; + + public static void main(String[] args) throws Exception { + if (args.length != 2) { + System.err.println("Usage: TracingSample "); + return; + } + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + + // Installs a handler for /tracez page. + ZPageHandlers.startHttpServerAndRegisterAll(8080); + // Installs an exporter for stack driver traces. + StackdriverExporter.createAndRegister(); + Tracing.getExportComponent() + .getSampledSpanStore() + .registerSpanNamesForCollection(Arrays.asList(SAMPLE_SPAN)); + + // Installs an exporter for stack driver stats. + MonitoredResource.Builder builder = MonitoredResource.newBuilder(); + if (MetadataConfig.getProjectId() != null) { + builder.putLabels("project_id", options.getProjectId()); + } + builder.setType("global"); + StackdriverStatsExporter.createAndRegisterWithProjectIdAndMonitoredResource( + options.getProjectId(), Duration.create(60L, 0), builder.build()); + RpcViews.registerAllGrpcViews(); + // Capture GFE Latency and GFE Header missing count. + SpannerRpcViews.registerGfeLatencyAndHeaderMissingCountViews(); + + // Name of your instance & database. + String instanceId = args[0]; + String databaseId = args[1]; + try { + // Creates a database client + DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(options.getProjectId(), instanceId, databaseId)); + // Queries the database + try (Scope ss = + Tracing.getTracer() + .spanBuilderWithExplicitParent(SAMPLE_SPAN, null) + .setSampler(Samplers.alwaysSample()) + .startScopedSpan()) { + ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of("SELECT 1")); + + System.out.println("\n\nResults:"); + // Prints the results + while (resultSet.next()) { + System.out.printf("%d\n\n", resultSet.getLong(0)); + } + } + } finally { + // First, shutdown the stats/metrics exporters + StackdriverStatsExporter.unregister(); + + // Shutdown tracing components + StackdriverExporter.unregister(); + Tracing.getExportComponent().shutdown(); + + // Close the spanner client + spanner.close(); + + // Force immediate exit since ZPageHandlers.startHttpServerAndRegisterAll(8080) + // starts a non-daemon HTTP server thread that cannot be stopped gracefully + System.exit(0); + } + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java new file mode 100644 index 000000000000..c9b92c74fbb5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java @@ -0,0 +1,98 @@ +/* + * Copyright 2023 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_transaction_timeout] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import io.grpc.Context; +import io.grpc.Context.CancellableContext; +import io.grpc.Deadline; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Sample showing how to set a timeout for an entire transaction for the Cloud Spanner Java client. + */ +class TransactionTimeoutExample { + + static void executeTransactionWithTimeout() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + executeTransactionWithTimeout(projectId, instanceId, databaseId, 60L, TimeUnit.SECONDS); + } + + // Execute a read/write transaction with a timeout for the entire transaction. + static void executeTransactionWithTimeout( + String projectId, + String instanceId, + String databaseId, + long timeoutValue, + TimeUnit timeoutUnit) { + try (Spanner spanner = SpannerOptions.newBuilder().setProjectId(projectId).build() + .getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Create a gRPC context with a deadline and with cancellation. + // gRPC context deadlines require the use of a scheduled executor. + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try (CancellableContext context = + Context.current() + .withDeadline(Deadline.after(timeoutValue, timeoutUnit), executor) + .withCancellation()) { + context.run( + () -> { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = + transaction.executeQuery( + Statement.of( + "SELECT SingerId, FirstName, LastName\n" + + "FROM Singers\n" + + "ORDER BY LastName, FirstName"))) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName)\n" + + "VALUES (20, 'George', 'Washington')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.%n", rowCount); + return null; + }); + }); + } + } + } +} +// [END spanner_transaction_timeout] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UnnamedParametersExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UnnamedParametersExample.java new file mode 100644 index 000000000000..3c73a7591d19 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UnnamedParametersExample.java @@ -0,0 +1,84 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Statement.StatementFactory; +import java.time.LocalDate; + +public class UnnamedParametersExample { + + static void executeQueryWithUnnamedParameters() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + executeQueryWithUnnamedParameters(projectId, instanceId, databaseId); + } + + static void executeQueryWithUnnamedParameters( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + StatementFactory statementFactory = client.getStatementFactory(); + + // Insert a row with unnamed parameters + client + .readWriteTransaction() + .run( + transaction -> { + Statement statement = statementFactory + .withUnnamedParameters("INSERT INTO Students(StudentId, Name, IsNRI, AvgMarks, " + + "JoinedAt, PinCode, CreatedAt) VALUES(?, ?, ?, ?, ?, ?, ?)", + 1000001, + "Google", + false, + (float) 34.5, + LocalDate.of(2024, 3, 31), + "123456", + Timestamp.now()); + transaction.executeUpdate(statement); + + return null; + }); + System.out.println("Row is inserted."); + + // Query the table with unnamed parameters + try (ResultSet resultSet = + client + .singleUse() + .executeQuery( + statementFactory.withUnnamedParameters( + "SELECT * FROM Students WHERE StudentId = ?", 1000001))) { + while (resultSet.next()) { + System.out.println(resultSet.getString("Name")); + } + } + System.out.println("Row is fetched."); + } + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java new file mode 100644 index 000000000000..b49ec4901f6e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import java.io.IOException; + +class UpdateBackupScheduleSample { + + static void updateBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + updateBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void updateBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName(backupScheduleName.toString()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 48)) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("45 15 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + final FieldMask fieldMask = + FieldMask.newBuilder() + .addPaths("retention_duration") + .addPaths("spec.cron_spec.text") + .addPaths("encryption_config") + .build(); + final BackupSchedule updatedBackupSchedule = + databaseAdminClient.updateBackupSchedule( + UpdateBackupScheduleRequest.newBuilder() + .setBackupSchedule(backupSchedule) + .setUpdateMask(fieldMask) + .build()); + System.out.println( + String.format( + "Updated backup schedule: %s\n%s", + updatedBackupSchedule.getName(), updatedBackupSchedule.toString())); + } + } +} +// [END spanner_update_backup_schedule] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java new file mode 100644 index 000000000000..57bb8e9159bd --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_database] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.Lists; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class UpdateDatabaseSample { + + static void updateDatabase() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + + updateDatabase(projectId, instanceId, databaseId); + } + + static void updateDatabase( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + final Database database = + Database.newBuilder() + .setName(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setEnableDropProtection(true).build(); + final UpdateDatabaseRequest updateDatabaseRequest = + UpdateDatabaseRequest.newBuilder() + .setDatabase(database) + .setUpdateMask( + FieldMask.newBuilder().addAllPaths( + Lists.newArrayList("enable_drop_protection")).build()) + .build(); + OperationFuture operation = + databaseAdminClient.updateDatabaseAsync(updateDatabaseRequest); + System.out.printf("Waiting for update operation for %s to complete...\n", databaseId); + Database updatedDb = operation.get(5, TimeUnit.MINUTES); + System.out.printf("Updated database %s.\n", updatedDb.getName()); + } catch (ExecutionException | TimeoutException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_update_database] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java new file mode 100644 index 000000000000..701240cb1290 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +//[START spanner_update_database_with_default_leader] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +public class UpdateDatabaseWithDefaultLeaderSample { + + static void updateDatabaseWithDefaultLeader() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + final String defaultLeader = "my-default-leader"; + updateDatabaseWithDefaultLeader(projectId, instanceId, databaseId, defaultLeader); + } + + static void updateDatabaseWithDefaultLeader( + String projectId, String instanceId, String databaseId, String defaultLeader) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + databaseAdminClient + .updateDatabaseDdlAsync( + DatabaseName.of(projectId, instanceId, databaseId), + Collections.singletonList( + String.format( + "ALTER DATABASE `%s` SET OPTIONS (default_leader = '%s')", + databaseId, + defaultLeader + ) + ) + ).get(); + System.out.println("Updated default leader to " + defaultLeader); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +//[END spanner_update_database_with_default_leader] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java new file mode 100644 index 000000000000..c10175abe1bf --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java @@ -0,0 +1,88 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_instance_config] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class UpdateInstanceConfigSample { + + static void updateInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceConfigId = "custom-instance-config"; + updateInstanceConfig(projectId, instanceConfigId); + } + + static void updateInstanceConfig(String projectId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + final InstanceConfigName instanceConfigName = + InstanceConfigName.of(projectId, instanceConfigId); + final InstanceConfig instanceConfig = + InstanceConfig.newBuilder() + .setName(instanceConfigName.toString()) + .setDisplayName("updated custom instance config") + .putLabels("updated", "true").build(); + /** + * The field mask must always be specified; this prevents any future + * fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] + * from being erased accidentally by clients that do not know about them. + */ + final UpdateInstanceConfigRequest updateInstanceConfigRequest = + UpdateInstanceConfigRequest.newBuilder() + .setInstanceConfig(instanceConfig) + .setUpdateMask( + FieldMask.newBuilder().addAllPaths(ImmutableList.of("display_name", "labels")) + .build()).build(); + try { + System.out.printf("Waiting for update operation on %s to complete...\n", + instanceConfigName); + InstanceConfig instanceConfigResult = + instanceAdminClient.updateInstanceConfigAsync( + updateInstanceConfigRequest).get(5, TimeUnit.MINUTES); + System.out.printf( + "Updated instance configuration %s with new display name %s\n", + instanceConfigResult.getName(), instanceConfig.getDisplayName()); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: Updating instance config %s failed with error message %s\n", + instanceConfig.getName(), e.getMessage()); + e.printStackTrace(); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for updateInstanceConfig operation to finish was interrupted"); + } + } + } +} +// [END spanner_update_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceDefaultBackupScheduleTypeExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceDefaultBackupScheduleTypeExample.java new file mode 100644 index 000000000000..39a456fe5991 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceDefaultBackupScheduleTypeExample.java @@ -0,0 +1,82 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_instance_default_backup_schedule_type] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.common.collect.Lists; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.util.concurrent.ExecutionException; + +public class UpdateInstanceDefaultBackupScheduleTypeExample { + + static void updateInstanceDefaultBackupScheduleType() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + updateInstanceDefaultBackupScheduleType(projectId, instanceId); + } + + static void updateInstanceDefaultBackupScheduleType(String projectId, String instanceId) { + // Set Instance configuration. + int nodeCount = 2; + String displayName = "Updated name"; + + // Update an Instance object that will be used to update the instance. + Instance instance = + Instance.newBuilder() + .setName(InstanceName.of(projectId, instanceId).toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .setDefaultBackupScheduleType(Instance.DefaultBackupScheduleType.AUTOMATIC) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the updatedInstance operation to finish. + Instance updatedInstance = + instanceAdminClient + .updateInstanceAsync( + UpdateInstanceRequest.newBuilder() + .setFieldMask( + FieldMask.newBuilder() + .addAllPaths(Lists.newArrayList("default_backup_schedule_type"))) + .setInstance(instance) + .build()) + .get(); + System.out.printf("Instance %s was successfully updated%n", updatedInstance.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Updating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for updateInstance operation to finish was interrupted"); + } + } +} + +// [END spanner_update_instance_default_backup_schedule_type] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java new file mode 100644 index 000000000000..cb4eadd097af --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java @@ -0,0 +1,81 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_instance] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.common.collect.Lists; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.util.concurrent.ExecutionException; + +public class UpdateInstanceExample { + + static void updateInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + updateInstance(projectId, instanceId); + } + + static void updateInstance(String projectId, String instanceId) { + // Set Instance configuration. + int nodeCount = 2; + String displayName = "Updated name"; + + // Update an Instance object that will be used to update the instance. + Instance instance = + Instance.newBuilder() + .setName(InstanceName.of(projectId, instanceId).toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .setEdition(Instance.Edition.ENTERPRISE) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the updatedInstance operation to finish. + Instance updatedInstance = + instanceAdminClient + .updateInstanceAsync( + UpdateInstanceRequest.newBuilder() + .setFieldMask( + FieldMask.newBuilder().addAllPaths(Lists.newArrayList("edition"))) + .setInstance(instance) + .build()) + .get(); + System.out.printf("Instance %s was successfully updated%n", updatedInstance.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Updating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for updateInstance operation to finish was interrupted"); + } + } +} + +// [END spanner_update_instance] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java new file mode 100644 index 000000000000..2870d20ea3c3 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_json_column] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Value; +import com.google.common.collect.ImmutableList; + +class UpdateJsonDataSample { + + static void updateJsonData() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateJsonData(client); + } + } + + static void updateJsonData(DatabaseClient client) { + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("VenueDetails") + .to( + Value.json( + "[{\"name\":\"room 1\",\"open\":true}," + + "{\"name\":\"room 2\",\"open\":false}]")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("VenueDetails") + .to(Value.json("{\"rating\":9,\"open\":true}")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("VenueDetails") + .to( + Value.json( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}")) + .build())); + System.out.println("Venues successfully updated"); + } +} +// [END spanner_update_data_with_json_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java new file mode 100644 index 000000000000..48733474c6fc --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java @@ -0,0 +1,78 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_postgresql_jsonb_update_data] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Value; +import com.google.common.collect.ImmutableList; + +class UpdateJsonbDataSample { + + static void updateJsonbData() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateJsonbData(client); + } + } + + static void updateJsonbData(DatabaseClient client) { + // PG JSONB takes the last value in the case of duplicate keys. + // PG JSONB sorts first by key length and then lexicographically with + // equivalent key length. + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("VenueDetails") + .to( + Value.pgJsonb( + "[{\"name\":\"room 1\",\"open\":true,\"name\":\"room 3\"}," + + "{\"name\":\"room 2\",\"open\":false}]")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("VenueDetails") + .to(Value.pgJsonb("{\"rating\":9,\"open\":true}")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("VenueDetails") + .to( + Value.pgJsonb( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}")) + .build())); + System.out.println("Venues successfully updated"); + } +} +// [END spanner_postgresql_jsonb_update_data] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java new file mode 100644 index 000000000000..a2bdb52d78a3 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_numeric_column] +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.math.BigDecimal; + +class UpdateNumericDataSample { + + static void updateNumericData() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateNumericData(client); + } + } + + static void updateNumericData(DatabaseClient client) { + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("Revenue") + .to(new BigDecimal("35000")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("Revenue") + .to(new BigDecimal("104500")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("Revenue") + .to(new BigDecimal("99999999999999999999999999999.99")) + .build())); + System.out.println("Venues successfully updated"); + } +} +// [END spanner_update_data_with_numeric_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java new file mode 100644 index 000000000000..37712a27dc3f --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_proto_types] + +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.util.Collections; +import java.util.List; + +class UpdateProtoDataSample { + + static void updateProtoData() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateProtoData(client); + } + } + + static void updateProtoData(DatabaseClient client) { + SingerInfo singerInfo = + SingerInfo.newBuilder() + .setSingerId(2) + .setBirthDate("February") + .setNationality("Country2") + .setGenre(Genre.FOLK) + .build(); + Genre singerGenre = Genre.FOLK; + List singerInfoList = Collections.singletonList(singerInfo); + List singerGenreList = Collections.singletonList(singerGenre); + + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(2L) + .set("SingerInfo") + .to(singerInfo) + .set("SingerInfoArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .set("SingerGenre") + .to(singerGenre) + .set("SingerGenreArray") + .toProtoEnumArray(singerGenreList, Genre.getDescriptor()) + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(3L) + .set("SingerInfo") + .to(null, SingerInfo.getDescriptor()) + .set("SingerInfoArray") + .toProtoMessageArray(null, SingerInfo.getDescriptor()) + .set("SingerGenre") + .to(null, Genre.getDescriptor()) + .set("SingerGenreArray") + .toProtoEnumArray(null, Genre.getDescriptor()) + .build())); + System.out.println("Data updated"); + } +} +// [END spanner_update_data_with_proto_types] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java new file mode 100644 index 000000000000..9b85f774eb7a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java @@ -0,0 +1,97 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_proto_types_with_dml] + +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +class UpdateProtoDataSampleUsingDml { + + static void updateProtoDataUsingDml() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateProtoDataUsingDml(client); + } + } + + static void updateProtoDataUsingDml(DatabaseClient client) { + SingerInfo singerInfo = + SingerInfo.newBuilder() + .setSingerId(1) + .setBirthDate("January") + .setNationality("Country1") + .setGenre(Genre.ROCK) + .build(); + Genre singerGenre = Genre.ROCK; + List singerInfoList = Collections.singletonList(singerInfo); + List singerGenreList = Collections.singletonList(singerGenre); + + client + .readWriteTransaction() + .run( + transaction -> { + Statement statement1 = + Statement.newBuilder( + "UPDATE Singers SET SingerInfo = @singerInfo, " + + "SingerInfoArray=@singerInfoArray, " + + "SingerGenre=@singerGenre, SingerGenreArray=@singerGenreArray " + + "WHERE SingerId = 1") + .bind("singerInfo") + .to(singerInfo) + .bind("singerInfoArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .bind("singerGenre") + .to(singerGenre) + .bind("singerGenreArray") + .toProtoEnumArray(singerGenreList, Genre.getDescriptor()) + .build(); + + Statement statement2 = + Statement.newBuilder( + "UPDATE Singers SET SingerInfo.nationality = @singerNationality " + + "WHERE SingerId = 1") + .bind("singerNationality") + .to("Country2") + .build(); + + transaction.batchUpdate(Arrays.asList(statement1, statement2)); + return null; + }); + + System.out.println("record(s) updated"); + } +} +// [END spanner_update_data_with_proto_types_with_dml] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java new file mode 100644 index 000000000000..7632148dea05 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_dml_returning] + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +public class UpdateUsingDmlReturningSample { + + static void updateUsingDmlReturning() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + updateUsingDmlReturning(projectId, instanceId, databaseId); + } + + static void updateUsingDmlReturning(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + // Update MarketingBudget column for records satisfying + // a particular condition and returns the modified + // MarketingBudget column of the updated records using + // ‘THEN RETURN MarketingBudget’. + // It is also possible to return all columns of all the + // updated records by using ‘THEN RETURN *’. + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1 " + + "THEN RETURN MarketingBudget"; + + // readWriteTransaction.executeQuery(..) API should be used for executing + // DML statements with RETURNING clause. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong(0)); + } + System.out.printf( + "Updated row(s) count: %d\n", resultSet.getStats().getRowCountExact()); + } + return null; + }); + } + } +} +// [END spanner_update_dml_returning] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java new file mode 100644 index 000000000000..8df22402c21d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java @@ -0,0 +1,89 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_add_and_drop_database_role] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AddAndDropDatabaseRole { + + static void addAndDropDatabaseRole() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String parentRole = "my-new-parent-role"; + String childRole = "my-new-child-role"; + addAndDropDatabaseRole(projectId, instanceId, databaseId, parentRole, childRole); + } + + static void addAndDropDatabaseRole( + String projectId, String instanceId, String databaseId, String parentRole, String childRole) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + OperationFuture operation = + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "CREATE ROLE " + parentRole, + "GRANT SELECT ON TABLE Singers TO ROLE " + parentRole, + "GRANT SELECT ON TABLE Albums TO ROLE " + parentRole, + "CREATE ROLE " + childRole, + "GRANT ROLE " + parentRole + " TO ROLE " + childRole), + null); + try { + System.out.println("Waiting for role create operation to complete..."); + operation.get(5, TimeUnit.MINUTES); + System.out.printf( + "Created roles %s and %s and granted privileges%n", parentRole, childRole); + // Delete role and membership. + operation = + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "REVOKE ROLE " + parentRole + " FROM ROLE " + childRole, + "DROP ROLE " + childRole), + null); + System.out.println("Waiting for role revoke & drop operation to complete..."); + operation.get(5, TimeUnit.MINUTES); + System.out.printf("Revoked privileges and dropped role %s%n", childRole); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: AddAndDropDatabaseRole failed with error message %s\n", e.getMessage()); + e.printStackTrace(); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for AddAndDropDatabaseRole operation to finish was interrupted"); + } + } + } +} +// [END spanner_add_and_drop_database_role] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java new file mode 100644 index 000000000000..8be7d6bd58b9 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_add_json_column] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.concurrent.ExecutionException; + +class AddJsonColumnSample { + + static void addJsonColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + addJsonColumn(adminClient, instanceId, databaseId); + } + } + + static void addJsonColumn(DatabaseAdminClient adminClient, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + OperationFuture operation = + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues ADD COLUMN VenueDetails JSON"), + null); + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + operation.get(); + System.out.printf("Successfully added column `VenueDetails`%n"); + } +} +// [END spanner_add_json_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonbColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonbColumnSample.java new file mode 100644 index 000000000000..102ea36b0ee0 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonbColumnSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_jsonb_add_column] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.concurrent.ExecutionException; + +class AddJsonbColumnSample { + + static void addJsonbColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + addJsonbColumn(adminClient, instanceId, databaseId); + } + } + + static void addJsonbColumn(DatabaseAdminClient adminClient, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + OperationFuture operation = + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues ADD COLUMN VenueDetails JSONB"), + null); + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + operation.get(); + System.out.printf("Successfully added column `VenueDetails`%n"); + } +} +// [END spanner_postgresql_jsonb_add_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddNumericColumnSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddNumericColumnSample.java new file mode 100644 index 000000000000..347aaf5a9ed7 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddNumericColumnSample.java @@ -0,0 +1,58 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_add_numeric_column] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.concurrent.ExecutionException; + +class AddNumericColumnSample { + + static void addNumericColumn() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + addNumericColumn(adminClient, instanceId, databaseId); + } + } + + static void addNumericColumn( + DatabaseAdminClient adminClient, String instanceId, String databaseId) + throws InterruptedException, ExecutionException { + OperationFuture operation = + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues ADD COLUMN Revenue NUMERIC"), + null); + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + operation.get(); + System.out.printf("Successfully added column `Revenue`%n"); + } +} +// [END spanner_add_numeric_column] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterSequenceSample.java new file mode 100644 index 000000000000..294bfdf6e22b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterSequenceSample.java @@ -0,0 +1,96 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_alter_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AlterSequenceSample { + static void alterSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + alterSequence(projectId, instanceId, databaseId); + } + + static void alterSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "ALTER SEQUENCE Seq SET OPTIONS " + + "(skip_range_min = 1000, skip_range_max = 5000000)"), + null) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Altered Seq sequence to skip an inclusive range between 1000 and 5000000"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Lea'), ('Catalina'), ('Smith') " + + "THEN RETURN CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_alter_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSample.java new file mode 100644 index 000000000000..ebf8a3f053b3 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSample.java @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_alter_table_with_foreign_key_delete_cascade] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; + +class AlterTableWithForeignKeyDeleteCascadeSample { + + static void alterForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + alterForeignKeyDeleteCascadeConstraint(adminClient, instanceId, databaseId); + } + } + + static void alterForeignKeyDeleteCascadeConstraint( + DatabaseAdminClient adminClient, String instanceId, String databaseId) { + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "ALTER TABLE ShoppingCarts\n" + + " ADD CONSTRAINT FKShoppingCartsCustomerName\n" + + " FOREIGN KEY (CustomerName)\n" + + " REFERENCES Customers(CustomerName)\n" + + " ON DELETE CASCADE\n"), + null); + System.out.printf( + String.format( + "Altered ShoppingCarts table with FKShoppingCartsCustomerName\n" + + "foreign key constraint on database %s on instance %s", + databaseId, instanceId)); + } +} +// [END spanner_alter_table_with_foreign_key_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CopyBackupSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CopyBackupSample.java new file mode 100644 index 000000000000..3b3c192a5a52 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CopyBackupSample.java @@ -0,0 +1,101 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_copy_backup] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +public class CopyBackupSample { + static void copyBackup() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String sourceBackupId = "my-backup"; + String destinationBackupId = "my-destination-backup"; + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + copyBackup(databaseAdminClient, projectId, instanceId, sourceBackupId, destinationBackupId); + } + } + + static void copyBackup( + DatabaseAdminClient databaseAdminClient, + String projectId, + String instanceId, + String sourceBackupId, + String destinationBackupId) { + + Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), + TimeUnit.MILLISECONDS)); + // Creates a copy of an existing backup. + Backup destinationBackup = + databaseAdminClient + .newBackupBuilder(BackupId.of(projectId, instanceId, destinationBackupId)) + .setExpireTime(expireTime) + .build(); + + // Initiate the request which returns an OperationFuture. + System.out.println("Copying backup [" + destinationBackup.getId() + "]..."); + OperationFuture operation = + databaseAdminClient.copyBackup( + BackupId.of(projectId, instanceId, sourceBackupId), destinationBackup); + try { + // Wait for the backup operation to complete. + destinationBackup = operation.get(); + System.out.println("Copied backup [" + destinationBackup.getId() + "]"); + } catch (ExecutionException e) { + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + // Load the metadata of the new backup from the server. + destinationBackup = destinationBackup.reload(); + System.out.println( + String.format( + "Backup %s of size %d bytes was copied at %s for version of database at %s", + destinationBackup.getId().getName(), + destinationBackup.getSize(), + LocalDateTime.ofEpochSecond( + destinationBackup.getProto().getCreateTime().getSeconds(), + destinationBackup.getProto().getCreateTime().getNanos(), + OffsetDateTime.now().getOffset()), + LocalDateTime.ofEpochSecond( + destinationBackup.getProto().getVersionTime().getSeconds(), + destinationBackup.getProto().getVersionTime().getNanos(), + OffsetDateTime.now().getOffset()))); + return; + } +} +// [END spanner_copy_backup] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateBackupWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateBackupWithEncryptionKey.java new file mode 100644 index 000000000000..23ddd6991750 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateBackupWithEncryptionKey.java @@ -0,0 +1,107 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_backup_with_encryption_key] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.threeten.bp.LocalDateTime; +import org.threeten.bp.OffsetDateTime; + +public class CreateBackupWithEncryptionKey { + + static void createBackupWithEncryptionKey() throws InterruptedException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + createBackupWithEncryptionKey( + adminClient, + projectId, + instanceId, + databaseId, + backupId, + kmsKeyName); + } + } + + static Void createBackupWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String databaseId, String backupId, String kmsKeyName) + throws InterruptedException { + // Set expire time to 14 days from now. + final Timestamp expireTime = Timestamp.ofTimeMicroseconds(TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), TimeUnit.MILLISECONDS)); + final Backup backupToCreate = adminClient + .newBackupBuilder(BackupId.of(projectId, instanceId, backupId)) + .setDatabase(DatabaseId.of(projectId, instanceId, databaseId)) + .setExpireTime(expireTime) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(kmsKeyName)) + .build(); + final OperationFuture operation = adminClient + .createBackup(backupToCreate); + + Backup backup; + try { + System.out.println("Waiting for operation to complete..."); + backup = operation.get(1200, TimeUnit.SECONDS); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + + System.out.printf( + "Backup %s of size %d bytes was created at %s using encryption key %s%n", + backup.getId().getName(), + backup.getSize(), + LocalDateTime.ofEpochSecond( + backup.getProto().getCreateTime().getSeconds(), + backup.getProto().getCreateTime().getNanos(), + OffsetDateTime.now().getOffset()), + kmsKeyName + ); + + return null; + } +} +// [END spanner_create_backup_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSample.java new file mode 100644 index 000000000000..8bfc6422cff6 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSample.java @@ -0,0 +1,85 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_create_database_with_default_leader] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +public class CreateDatabaseWithDefaultLeaderSample { + + static void createDatabaseWithDefaultLeader() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + final String defaultLeader = "my-default-leader"; + createDatabaseWithDefaultLeader(projectId, instanceId, databaseId, defaultLeader); + } + + static void createDatabaseWithDefaultLeader( + String projectId, String instanceId, String databaseId, String defaultLeader) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final OperationFuture operation = databaseAdminClient + .createDatabase( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE", + "ALTER DATABASE " + "`" + databaseId + "`" + + " SET OPTIONS ( default_leader = '" + defaultLeader + "' )" + ) + ); + final Database database = operation.get(); + System.out.println("Created database [" + database.getId() + "]"); + System.out.println("\tDefault leader: " + database.getDefaultLeader()); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +//[END spanner_create_database_with_default_leader] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithEncryptionKey.java new file mode 100644 index 000000000000..2064423547e4 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithEncryptionKey.java @@ -0,0 +1,100 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_database_with_encryption_key] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateDatabaseWithEncryptionKey { + + static void createDatabaseWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + createDatabaseWithEncryptionKey( + adminClient, + projectId, + instanceId, + databaseId, + kmsKeyName); + } + } + + static void createDatabaseWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String databaseId, String kmsKeyName) { + final Database databaseToCreate = adminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(kmsKeyName)) + .build(); + final OperationFuture operation = adminClient + .createDatabase(databaseToCreate, Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE" + )); + try { + System.out.println("Waiting for operation to complete..."); + Database createdDatabase = operation.get(120, TimeUnit.SECONDS); + + System.out.printf( + "Database %s created with encryption key %s%n", + createdDatabase.getId(), + createdDatabase.getEncryptionConfig().getKmsKeyName() + ); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagates the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_create_database_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSample.java new file mode 100644 index 000000000000..1dea5af2045c --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSample.java @@ -0,0 +1,86 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_database_with_version_retention_period] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +public class CreateDatabaseWithVersionRetentionPeriodSample { + + static void createDatabaseWithVersionRetentionPeriod() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String versionRetentionPeriod = "7d"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + createDatabaseWithVersionRetentionPeriod(adminClient, instanceId, databaseId, + versionRetentionPeriod); + } + } + + static void createDatabaseWithVersionRetentionPeriod(DatabaseAdminClient adminClient, + String instanceId, String databaseId, String versionRetentionPeriod) { + OperationFuture op = + adminClient.createDatabase( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE", + "ALTER DATABASE " + "`" + databaseId + "`" + + " SET OPTIONS ( version_retention_period = '" + versionRetentionPeriod + "' )" + )); + try { + Database database = op.get(); + System.out.println("Created database [" + database.getId() + "]"); + System.out.println("\tVersion retention period: " + database.getVersionRetentionPeriod()); + System.out.println("\tEarliest version time: " + database.getEarliestVersionTime()); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_create_database_with_version_retention_period] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceConfigSample.java new file mode 100644 index 000000000000..3b9e49bf0e0b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceConfigSample.java @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_instance_config] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceConfigInfo; +import com.google.cloud.spanner.ReplicaInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class CreateInstanceConfigSample { + static void createInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String baseInstanceConfig = "my-base-instance-config"; + String instanceConfigId = "custom-instance-config4"; + createInstanceConfig(projectId, baseInstanceConfig, instanceConfigId); + } + + static void createInstanceConfig( + String projectId, String baseInstanceConfig, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + final InstanceConfig baseConfig = instanceAdminClient.getInstanceConfig(baseInstanceConfig); + List readOnlyReplicas = + ImmutableList.of(baseConfig.getOptionalReplicas().get(0)); + InstanceConfigInfo instanceConfigInfo = + InstanceConfig.newBuilder(InstanceConfigId.of(projectId, instanceConfigId), baseConfig) + .setDisplayName(instanceConfigId) + .addReadOnlyReplicas(readOnlyReplicas) + .build(); + final OperationFuture operation = + instanceAdminClient.createInstanceConfig(instanceConfigInfo); + try { + System.out.printf("Waiting for create operation for %s to complete...\n", instanceConfigId); + InstanceConfig instanceConfig = operation.get(5, TimeUnit.MINUTES); + System.out.printf("Created instance configuration %s\n", instanceConfig.getId()); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: Creating instance configuration %s failed with error message %s\n", + instanceConfigInfo.getId(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for createInstanceConfig operation to finish was interrupted"); + } + } + } +} +// [END spanner_create_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java new file mode 100644 index 000000000000..a17784d874b4 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java @@ -0,0 +1,73 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_create_instance] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import java.util.concurrent.ExecutionException; + +class CreateInstanceExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + Spanner spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + // Set Instance configuration. + String configId = "regional-us-east4"; + int nodeCount = 2; + String displayName = "Descriptive name"; + + // Create an InstanceInfo object that will be used to create the instance. + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setInstanceConfigId(InstanceConfigId.of(projectId, configId)) + .setNodeCount(nodeCount) + .setDisplayName(displayName) + .build(); + OperationFuture operation = + instanceAdminClient.createInstance(instanceInfo); + try { + // Wait for the createInstance operation to finish. + Instance instance = operation.get(); + System.out.printf("Instance %s was successfully created%n", instance.getId()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instanceInfo.getId(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } finally { + spanner.close(); + } + } +} +//[END spanner_create_instance] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java new file mode 100644 index 000000000000..0502fba5eda7 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java @@ -0,0 +1,86 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_instance_with_autoscaling_config] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.Instance.Edition; +import java.util.concurrent.ExecutionException; + +class CreateInstanceWithAutoscalingConfigExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + Spanner spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + // Set Instance configuration. + String configId = "regional-us-east4"; + // Create an autoscaling config. + AutoscalingConfig autoscalingConfig = + AutoscalingConfig.newBuilder() + .setAutoscalingLimits( + AutoscalingConfig.AutoscalingLimits.newBuilder().setMinNodes(1).setMaxNodes(2)) + .setAutoscalingTargets( + AutoscalingConfig.AutoscalingTargets.newBuilder() + .setHighPriorityCpuUtilizationPercent(65) + .setStorageUtilizationPercent(95)) + .build(); + + // Create an InstanceInfo object that will be used to create the instance. + InstanceInfo instanceInfo = + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setInstanceConfigId(InstanceConfigId.of(projectId, configId)) + .setAutoscalingConfig(autoscalingConfig) + .setDisplayName("Descriptive name") + .setEdition(Edition.ENTERPRISE) + .build(); + OperationFuture operation = + instanceAdminClient.createInstance(instanceInfo); + + try { + // Wait for the createInstance operation to finish. + Instance instance = operation.get(); + System.out.printf("Autoscaler instance %s was successfully created%n", instance.getId()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance %s failed with error message %s%n", + instanceInfo.getId(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for createInstance operation to finish was interrupted"); + } finally { + spanner.close(); + } + } +} +// [END spanner_create_instance_with_autoscaling_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java new file mode 100644 index 000000000000..95d4f1b67375 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_create_instance_with_processing_units] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; + +class CreateInstanceWithProcessingUnitsExample { + + static void createInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + createInstance(projectId, instanceId); + } + + static void createInstance(String projectId, String instanceId) { + Spanner spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + // Set Instance configuration. + String configId = "regional-us-east4"; + // This will create an instance with the processing power of 0.2 nodes. + int processingUnits = 500; + String displayName = "Descriptive name"; + + try { + // Creates a new instance + System.out.printf("Creating instance %s.%n", instanceId); + OperationFuture operation = + instanceAdminClient.createInstance(InstanceInfo + .newBuilder(InstanceId.of(projectId, instanceId)) + .setInstanceConfigId(InstanceConfigId.of(projectId, configId)) + .setProcessingUnits(processingUnits) + .setDisplayName(displayName) + .build()); + + // Wait for the createInstance operation to finish. + System.out.printf("Waiting for operation on %s to complete...%n", instanceId); + Instance createdInstance = operation.get(); + + System.out.printf("Created instance %s.%n", createdInstance.getId().getInstance()); + + Instance instance = instanceAdminClient.getInstance(instanceId); + System.out.printf("Instance %s has %d processing units.%n", instance.getId().getInstance(), + instance.getProcessingUnits()); + } catch (Exception e) { + System.out.printf("Error: %s.%n", e.getMessage()); + } + spanner.close(); + } +} +//[END spanner_create_instance_with_processing_units] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateSequenceSample.java new file mode 100644 index 000000000000..d0172eb2d531 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateSequenceSample.java @@ -0,0 +1,98 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateSequenceSample { + static void createSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + createSequence(projectId, instanceId, databaseId); + } + + static void createSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "CREATE SEQUENCE Seq OPTIONS (sequence_kind = 'bit_reversed_positive')", + "CREATE TABLE Customers (CustomerId INT64 DEFAULT " + + "(GET_NEXT_SEQUENCE_VALUE(SEQUENCE Seq)), CustomerName STRING(1024)) " + + "PRIMARY KEY (CustomerId)"), + null) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Created Seq sequence and Customers table, where its key column CustomerId " + + "uses the sequence as a default value"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Alice'), ('David'), ('Marc') THEN RETURN CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_create_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSample.java new file mode 100644 index 000000000000..a5cc4668d58b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSample.java @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_create_table_with_foreign_key_delete_cascade] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; + +class CreateTableWithForeignKeyDeleteCascadeSample { + + static void createForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + createForeignKeyDeleteCascadeConstraint(adminClient, instanceId, databaseId); + } + } + + static void createForeignKeyDeleteCascadeConstraint( + DatabaseAdminClient adminClient, String instanceId, String databaseId) { + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerId FOREIGN KEY (CustomerId)\n" + + " REFERENCES Customers (CustomerId) ON DELETE CASCADE\n" + + " ) PRIMARY KEY (CartId)\n"), + null); + + System.out.printf( + String.format( + "Created Customers and ShoppingCarts table with FKShoppingCartsCustomerId\n" + + "foreign key constraint on database %s on instance %s\n", + databaseId, instanceId)); + } +} +// [END spanner_create_table_with_foreign_key_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DeleteInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DeleteInstanceConfigSample.java new file mode 100644 index 000000000000..b1013cd4b0f1 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DeleteInstanceConfigSample.java @@ -0,0 +1,49 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_delete_instance_config] +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; + +class DeleteInstanceConfigSample { + static void deleteInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceConfigId = "custom-user-config"; + deleteInstanceConfig(projectId, instanceConfigId); + } + + static void deleteInstanceConfig(String projectId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + try { + System.out.printf("Deleting %s...\n", instanceConfigId); + instanceAdminClient.deleteInstanceConfig(instanceConfigId); + System.out.printf("Deleted instance configuration %s\n", instanceConfigId); + } catch (SpannerException e) { + System.out.printf( + "Error: Deleting instance configuration %s failed with error message: %s\n", + instanceConfigId, e.getMessage()); + } + } + } +} +// [END spanner_delete_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSample.java new file mode 100644 index 000000000000..bf03542f912f --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSample.java @@ -0,0 +1,57 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_drop_foreign_key_constraint_delete_cascade] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; + +class DropForeignKeyConstraintDeleteCascadeSample { + + static void deleteForeignKeyDeleteCascadeConstraint() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + deleteForeignKeyDeleteCascadeConstraint(adminClient, instanceId, databaseId); + } + } + + static void deleteForeignKeyDeleteCascadeConstraint( + DatabaseAdminClient adminClient, String instanceId, String databaseId) { + adminClient.updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "ALTER TABLE ShoppingCarts\n" + + " DROP CONSTRAINT FKShoppingCartsCustomerName\n"), + null); + + System.out.printf( + String.format( + "Altered ShoppingCarts table to drop FKShoppingCartsCustomerName\n" + + "foreign key constraint on database %s on instance %s\n", + databaseId, instanceId)); + } +} +// [END spanner_drop_foreign_key_constraint_delete_cascade] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropSequenceSample.java new file mode 100644 index 000000000000..3e8f2bb41462 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/DropSequenceSample.java @@ -0,0 +1,69 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_drop_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DropSequenceSample { + static void dropSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + dropSequence(projectId, instanceId, databaseId); + } + + static void dropSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT", + "DROP SEQUENCE Seq"), + null) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Altered Customers table to drop DEFAULT from CustomerId column " + + "and dropped the Seq sequence"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_drop_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/EnableFineGrainedAccess.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/EnableFineGrainedAccess.java new file mode 100644 index 000000000000..cc373f50f60b --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/EnableFineGrainedAccess.java @@ -0,0 +1,102 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_enable_fine_grained_access] +import com.google.cloud.Binding; +import com.google.cloud.Condition; +import com.google.cloud.Policy; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; + +public class EnableFineGrainedAccess { + + static void enableFineGrainedAccess() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String iamMember = "user:alice@example.com"; + String role = "my-role"; + String title = "my-condition-title"; + enableFineGrainedAccess(projectId, instanceId, databaseId, iamMember, title, role); + } + + static void enableFineGrainedAccess( + String projectId, + String instanceId, + String databaseId, + String iamMember, + String title, + String role) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + Policy policy = adminClient.getDatabaseIAMPolicy(instanceId, databaseId, 3); + int policyVersion = policy.getVersion(); + // The policy in the response from getDatabaseIAMPolicy might use the policy version + // that you specified, or it might use a lower policy version. For example, if you + // specify version 3, but the policy has no conditional role bindings, the response + // uses version 1. Valid values are 0, 1, and 3. + if (policy.getVersion() < 3) { + // conditional role bindings work with policy version 3 + policyVersion = 3; + } + + Binding binding1 = + Binding.newBuilder() + .setRole("roles/spanner.fineGrainedAccessUser") + .setMembers(ImmutableList.of(iamMember)) + .build(); + + Binding binding2 = + Binding.newBuilder() + .setRole("roles/spanner.databaseRoleUser") + .setCondition( + Condition.newBuilder() + .setDescription(title) + .setExpression( + String.format("resource.name.endsWith(\"/databaseRoles/%s\")", role)) + .setTitle(title) + .build()) + .setMembers(ImmutableList.of(iamMember)) + .build(); + ImmutableList bindings = + ImmutableList.builder() + .addAll(policy.getBindingsList()) + .add(binding1) + .add(binding2) + .build(); + Policy policyWithConditions = + Policy.newBuilder() + .setVersion(policyVersion) + .setEtag(policy.getEtag()) + .setBindings(bindings) + .build(); + Policy response = + adminClient.setDatabaseIAMPolicy(instanceId, databaseId, policyWithConditions); + System.out.printf( + "Enabled fine-grained access in IAM with version %d%n", response.getVersion()); + } + } +} +// [END spanner_enable_fine_grained_access] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetDatabaseDdlSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetDatabaseDdlSample.java new file mode 100644 index 000000000000..c7f56f496950 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetDatabaseDdlSample.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_get_database_ddl] + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.util.List; + +public class GetDatabaseDdlSample { + + static void getDatabaseDdl() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + getDatabaseDdl(projectId, instanceId, databaseId); + } + + static void getDatabaseDdl( + String projectId, String instanceId, String databaseId) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final List ddls = databaseAdminClient.getDatabaseDdl(instanceId, databaseId); + System.out.println("Retrieved database DDL for " + databaseId); + for (String ddl : ddls) { + System.out.println(ddl); + } + } + } +} +//[END spanner_get_database_ddl] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetInstanceConfigSample.java new file mode 100644 index 000000000000..38b6230b97bf --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/GetInstanceConfigSample.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_get_instance_config] + +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; + +public class GetInstanceConfigSample { + + static void getInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceConfigName = "nam6"; + getInstanceConfig(projectId, instanceConfigName); + } + + static void getInstanceConfig(String projectId, String instanceConfigName) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + final InstanceConfig instanceConfig = instanceAdminClient + .getInstanceConfig(instanceConfigName); + + System.out.printf( + "Available leader options for instance config %s: %s%n", + instanceConfig.getId(), + instanceConfig.getLeaderOptions() + ); + } + } +} +//[END spanner_get_instance_config] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabaseRoles.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabaseRoles.java new file mode 100644 index 000000000000..ee31d874ec1a --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabaseRoles.java @@ -0,0 +1,52 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_list_database_roles] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseRole; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.util.concurrent.ExecutionException; + +public class ListDatabaseRoles { + + static void listDatabaseRoles() throws InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + listDatabaseRoles(projectId, instanceId, databaseId); + } + + static void listDatabaseRoles(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + String databasePath = DatabaseId.of(projectId, instanceId, databaseId).getName(); + System.out.println("List of Database roles"); + for (DatabaseRole role : adminClient.listDatabaseRoles(instanceId, databaseId).iterateAll()) { + System.out.printf("%s%n", role.getName()); + } + } + } +} +// [END spanner_list_database_roles] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabasesSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabasesSample.java new file mode 100644 index 000000000000..4abd2c6f2523 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListDatabasesSample.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_list_databases] + +import com.google.api.gax.paging.Page; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; + +public class ListDatabasesSample { + + static void listDatabases() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + listDatabases(projectId, instanceId); + } + + static void listDatabases(String projectId, String instanceId) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + Page page = databaseAdminClient.listDatabases(instanceId); + System.out.println("Databases for projects/" + projectId + "/instances/" + instanceId); + while (page != null) { + for (Database database : page.iterateAll()) { + final String defaultLeader = database.getDefaultLeader().equals("") + ? "" : "(default leader = " + database.getDefaultLeader() + ")"; + System.out.println("\t" + database.getId() + " " + defaultLeader); + } + page = page.getNextPage(); + } + } + } +} +//[END spanner_list_databases] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigOperationsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigOperationsSample.java new file mode 100644 index 000000000000..2cf683b4c94f --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigOperationsSample.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_list_instance_config_operations] +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.instance.v1.CreateInstanceConfigMetadata; + +public class ListInstanceConfigOperationsSample { + static void listInstanceConfigOperations() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + listInstanceConfigOperations(projectId); + } + + static void listInstanceConfigOperations(String projectId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + try { + System.out.printf( + "Getting list of instance config operations for project %s...\n", + projectId); + final Iterable instanceConfigOperations = + instanceAdminClient + .listInstanceConfigOperations( + Options.filter( + "(metadata.@type=type.googleapis.com/" + + "google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)")) + .iterateAll(); + for (Operation operation : instanceConfigOperations) { + CreateInstanceConfigMetadata metadata = + operation.getMetadata().unpack(CreateInstanceConfigMetadata.class); + System.out.printf( + "Create instance config operation for %s is %d%% completed.\n", + metadata.getInstanceConfig().getName(), metadata.getProgress().getProgressPercent()); + } + } catch (InvalidProtocolBufferException e) { + System.out.printf( + "Error: Listing instance config operations failed with error message %s\n", + e.getMessage()); + } + } + } +} +// [END spanner_list_instance_config_operations] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigsSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigsSample.java new file mode 100644 index 000000000000..b7753502bace --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/ListInstanceConfigsSample.java @@ -0,0 +1,52 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_list_instance_configs] + +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; + +public class ListInstanceConfigsSample { + + static void listInstanceConfigs() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + listInstanceConfigs(projectId); + } + + static void listInstanceConfigs(String projectId) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + + for (InstanceConfig instanceConfig : instanceAdminClient.listInstanceConfigs().iterateAll()) { + System.out.printf( + "Available leader options for instance config %s: %s%n", + instanceConfig.getId(), + instanceConfig.getLeaderOptions() + ); + } + } + } +} +//[END spanner_list_instance_configs] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgAlterSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgAlterSequenceSample.java new file mode 100644 index 000000000000..10bccc2cb49d --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgAlterSequenceSample.java @@ -0,0 +1,89 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_alter_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgAlterSequenceSample { + static void pgAlterSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgAlterSequence(projectId, instanceId, databaseId); + } + + static void pgAlterSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER SEQUENCE Seq SKIP RANGE 1000 5000000"), + null) + .get(5, TimeUnit.MINUTES); + System.out.println( + "Altered Seq sequence to skip an inclusive range between 1000 and 5000000"); + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Lea'), ('Catalina'), ('Smith') RETURNING CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_alter_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCaseSensitivitySample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCaseSensitivitySample.java new file mode 100644 index 000000000000..a9096c0c2e5c --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCaseSensitivitySample.java @@ -0,0 +1,154 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_identifier_case_sensitivity] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +public class PgCaseSensitivitySample { + + static void pgCaseSensitivity() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgCaseSensitivity(projectId, instanceId, databaseId); + } + + static void pgCaseSensitivity(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + + // Spanner PostgreSQL follows the case sensitivity rules of PostgreSQL. This means that: + // 1. Identifiers that are not double-quoted are folded to lower case. + // 2. Identifiers that are double-quoted retain their case and are case-sensitive. + // See https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + // for more information. + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + databaseId, + Collections.singleton( + "CREATE TABLE Singers (" + // SingerId will be folded to `singerid`. + + " SingerId bigint NOT NULL PRIMARY KEY," + // FirstName and LastName are double-quoted and will therefore retain their + // mixed case and are case-sensitive. This means that any statement that + // references any of these columns must use double quotes. + + " \"FirstName\" varchar(1024) NOT NULL," + + " \"LastName\" varchar(1024) NOT NULL" + + ")"), + null); + updateOperation.get(); + + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + client.write( + Collections.singleton( + Mutation.newInsertBuilder("Singers") + .set("singerid") + .to(1L) + // Column names in mutations are always case-insensitive, regardless whether the + // columns were double-quoted or not during creation. + .set("firstname") + .to("Bruce") + .set("lastname") + .to("Allison") + .build())); + + try (ResultSet singers = + client + .singleUse() + .executeQuery( + Statement.of("SELECT SingerId, \"FirstName\", \"LastName\" FROM Singers"))) { + while (singers.next()) { + System.out.printf( + "SingerId: %d, FirstName: %s, LastName: %s\n", + // SingerId is automatically folded to lower case. Accessing the column by its name in + // a result set must therefore use all lower-case letters. + singers.getLong("singerid"), + // FirstName and LastName were double-quoted during creation, and retain their mixed + // case when returned in a result set. + singers.getString("FirstName"), + singers.getString("LastName")); + } + } + + // Aliases are also identifiers, and specifying an alias in double quotes will make the alias + // retain its case. + try (ResultSet singers = + client + .singleUse() + .executeQuery( + Statement.of( + "SELECT " + + "singerid AS \"SingerId\", " + + "concat(\"FirstName\", ' '::varchar, \"LastName\") AS \"FullName\" " + + "FROM Singers"))) { + while (singers.next()) { + System.out.printf( + "SingerId: %d, FullName: %s\n", + // The aliases are double-quoted and therefore retains their mixed case. + singers.getLong("SingerId"), singers.getString("FullName")); + } + } + + // DML statements must also follow the PostgreSQL case rules. + client + .readWriteTransaction() + .run( + transaction -> + transaction.executeUpdate( + Statement.newBuilder( + "INSERT INTO Singers (SingerId, \"FirstName\", \"LastName\") " + + "VALUES ($1, $2, $3)") + .bind("p1") + .to(2L) + .bind("p2") + .to("Alice") + .bind("p3") + .to("Bruxelles") + .build())); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_postgresql_identifier_case_sensitivity] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCreateSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCreateSequenceSample.java new file mode 100644 index 000000000000..070f62f70f3e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgCreateSequenceSample.java @@ -0,0 +1,97 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_create_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgCreateSequenceSample { + static void pgCreateSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgCreateSequence(projectId, instanceId, databaseId); + } + + static void pgCreateSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "CREATE SEQUENCE Seq BIT_REVERSED_POSITIVE;", + "CREATE TABLE Customers (CustomerId BIGINT DEFAULT nextval('Seq'), " + + "CustomerName character varying(1024), PRIMARY KEY (CustomerId))"), + null) + .get(5, TimeUnit.MINUTES); + + System.out.println( + "Created Seq sequence and Customers table, where its key column " + + "CustomerId uses the sequence as a default value"); + + final DatabaseClient dbClient = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + + Long insertCount = + dbClient + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet rs = + transaction.executeQuery( + Statement.of( + "INSERT INTO Customers (CustomerName) VALUES " + + "('Alice'), ('David'), ('Marc') RETURNING CustomerId"))) { + while (rs.next()) { + System.out.printf( + "Inserted customer record with CustomerId: %d\n", rs.getLong(0)); + } + return Objects.requireNonNull(rs.getStats()).getRowCountExact(); + } + }); + System.out.printf("Number of customer records inserted is: %d\n", insertCount); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_create_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgDropSequenceSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgDropSequenceSample.java new file mode 100644 index 000000000000..26e8eb74bb75 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgDropSequenceSample.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_drop_sequence] +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class PgDropSequenceSample { + static void pgDropSequence() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgDropSequence(projectId, instanceId, databaseId); + } + + static void pgDropSequence(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + dbAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of( + "ALTER TABLE Customers ALTER COLUMN CustomerId DROP DEFAULT", + "DROP SEQUENCE Seq"), + null) + .get(5, TimeUnit.MINUTES); + System.out.println( + "Altered Customers table to drop DEFAULT from " + + "CustomerId column and dropped the Seq sequence"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } catch (TimeoutException e) { + // If the operation timed out propagate the timeout + throw SpannerExceptionFactory.propagateTimeout(e); + } + } +} +// [END spanner_postgresql_drop_sequence] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgInterleavedTableSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgInterleavedTableSample.java new file mode 100644 index 000000000000..007843a4a2c5 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgInterleavedTableSample.java @@ -0,0 +1,81 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_postgresql_interleaved_table] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +public class PgInterleavedTableSample { + + static void pgInterleavedTable() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + pgInterleavedTable(projectId, instanceId, databaseId); + } + + static void pgInterleavedTable(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + + // The Spanner PostgreSQL dialect extends the PostgreSQL dialect with certain Spanner + // specific features, such as interleaved tables. + // See https://cloud.google.com/spanner/docs/postgresql/data-definition-language#create_table + // for the full CREATE TABLE syntax. + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL PRIMARY KEY," + + " FirstName varchar(1024) NOT NULL," + + " LastName varchar(1024) NOT NULL" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " Title varchar(1024) NOT NULL," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE"), + null); + updateOperation.get(); + System.out.println("Created interleaved table hierarchy using PostgreSQL dialect"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_postgresql_interleaved_table] \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgSpannerSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgSpannerSample.java new file mode 100644 index 000000000000..0ab3a75c9670 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/PgSpannerSample.java @@ -0,0 +1,1584 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.paging.Page; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Value; +import com.google.common.io.BaseEncoding; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.math.BigDecimal; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Example code for using the Cloud Spanner PostgreSQL interface. + */ +public class PgSpannerSample { + // [START spanner_postgresql_insert_data] + static final List SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk"), + new Album(1, 2, "Go, Go, Go"), + new Album(2, 1, "Green"), + new Album(2, 2, "Forever Hold Your Peace"), + new Album(2, 3, "Terrified")); + // [END spanner_postgresql_insert_data] + + /** Class to contain performance sample data. */ + static class Performance { + + final long singerId; + final long venueId; + final String eventDate; + final long revenue; + + Performance(long singerId, long venueId, String eventDate, long revenue) { + this.singerId = singerId; + this.venueId = venueId; + this.eventDate = eventDate; + this.revenue = revenue; + } + } + + // [START spanner_postgresql_insert_data_with_timestamp_column] + static final List PERFORMANCES = + Arrays.asList( + new Performance(1, 4, "2017-10-05", 11000), + new Performance(1, 19, "2017-11-02", 15000), + new Performance(2, 42, "2017-12-23", 7000)); + // [START spanner_postgresql_insert_datatypes_data] + + static Value availableDates1 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-12-01"), + Date.parseDate("2020-12-02"), + Date.parseDate("2020-12-03"))); + static Value availableDates2 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-11-01"), + Date.parseDate("2020-11-05"), + Date.parseDate("2020-11-15"))); + static Value availableDates3 = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-10-07"))); + // [END spanner_postgresql_insert_data_with_timestamp_column] + static String exampleBytes1 = BaseEncoding.base64().encode("Hello World 1".getBytes()); + static String exampleBytes2 = BaseEncoding.base64().encode("Hello World 2".getBytes()); + static String exampleBytes3 = BaseEncoding.base64().encode("Hello World 3".getBytes()); + static final List VENUES = + Arrays.asList( + new Venue( + 4, + "Venue 4", + exampleBytes1, + 1800, + availableDates1, + "2018-09-02", + false, + 0.85543f, + new BigDecimal("215100.10")), + new Venue( + 19, + "Venue 19", + exampleBytes2, + 6300, + availableDates2, + "2019-01-15", + true, + 0.98716f, + new BigDecimal("1200100.00")), + new Venue( + 42, + "Venue 42", + exampleBytes3, + 3000, + availableDates3, + "2018-10-01", + false, + 0.72598f, + new BigDecimal("390650.99"))); + // [END spanner_postgresql_insert_datatypes_data] + + /** Class to contain venue sample data. */ + static class Venue { + + final long venueId; + final String venueName; + final String venueInfo; + final long capacity; + final Value availableDates; + final String lastContactDate; + final boolean outdoorVenue; + final float popularityScore; + final BigDecimal revenue; + + Venue( + long venueId, + String venueName, + String venueInfo, + long capacity, + Value availableDates, + String lastContactDate, + boolean outdoorVenue, + float popularityScore, + BigDecimal revenue) { + this.venueId = venueId; + this.venueName = venueName; + this.venueInfo = venueInfo; + this.capacity = capacity; + this.availableDates = availableDates; + this.lastContactDate = lastContactDate; + this.outdoorVenue = outdoorVenue; + this.popularityScore = popularityScore; + this.revenue = revenue; + } + } + + // [START spanner_postgresql_create_database] + static void createPostgreSqlDatabase(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = dbAdminClient.createDatabase( + dbAdminClient.newDatabaseBuilder(id).setDialect(Dialect.POSTGRESQL).build(), + Collections.emptyList()); + try { + // Initiate the request which returns an OperationFuture. + Database db = op.get(); + System.out.println("Created database [" + db.getId() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_database] + + // [START spanner_postgresql_insert_data] + static void writeExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_data] + + // [START spanner_postgresql_delete_data] + static void deleteExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the Albums with the key values (2,1) and (2,3). + mutations.add( + Mutation.delete( + "Albums", KeySet.newBuilder().addKey(Key.of(2, 1)).addKey(Key.of(2, 3)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the column key is >=3 and <5 + mutations.add( + Mutation.delete("Singers", KeySet.range(KeyRange.closedOpen(Key.of(3), Key.of(5))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete remaining Singers rows, which will also delete the remaining Albums rows since it was + // defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Singers", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + // [END spanner_postgresql_delete_data] + + // [START spanner_postgresql_query_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single read or query against Cloud Spanner. + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), + resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_query_data] + + // [START spanner_postgresql_read_data] + static void read(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .read( + "Albums", + KeySet.all(), // Read all rows in a table. + Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), + resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_read_data] + + // [START spanner_postgresql_add_column] + static void addMarketingBudget(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget bigint"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added MarketingBudget column"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_add_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // [START spanner_postgresql_update_data] + static void update(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(100000) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(500000) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_postgresql_update_data] + + // [START spanner_postgresql_read_write_transaction] + static void writeWithTransaction(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + Struct row = + transaction.readRow("Albums", Key.of(2, 2), Arrays.asList("MarketingBudget")); + long album2Budget = row.getLong(0); + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = + transaction + .readRow("Albums", Key.of(1, 1), Arrays.asList("MarketingBudget")) + .getLong(0); + album1Budget += transfer; + album2Budget -= transfer; + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(album1Budget) + .build()); + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(album2Budget) + .build()); + } + return null; + }); + } + // [END spanner_postgresql_read_write_transaction] + + // [START spanner_postgresql_query_data_with_new_column] + static void queryMarketingBudget(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT singerid as \"SingerId\", " + + "albumid as \"AlbumId\", marketingbudget as \"MarketingBudget\" " + + "FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : + resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_postgresql_query_data_with_new_column] + + // [START spanner_postgresql_create_index] + static void addIndex(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList("CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added AlbumsByAlbumTitle index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_index] + + // [START spanner_postgresql_read_data_with_index] + static void readUsingIndex(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + } + } + // [END spanner_postgresql_read_data_with_index] + + // [START spanner_postgresql_create_storing_index] + static void addStoringIndex(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList( + "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) " + + "INCLUDE (MarketingBudget)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added AlbumsByAlbumTitle2 index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_storing_index] + + // Before running this example, create a storing index AlbumsByAlbumTitle2 by applying the DDL + // statement "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) INCLUDE (MarketingBudget)". + // [START spanner_postgresql_read_data_with_storing_index] + static void readStoringIndex(DatabaseClient dbClient) { + // We can read MarketingBudget also from the index since it stores a copy of MarketingBudget. + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle2", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong(0), + resultSet.getString(1), + resultSet.isNull("marketingbudget") ? "NULL" : resultSet.getLong(2)); + } + } + } + // [END spanner_postgresql_read_data_with_storing_index] + + // [START spanner_postgresql_read_only_transaction] + static void readOnlyTransaction(DatabaseClient dbClient) { + // ReadOnlyTransaction must be closed by calling close() on it to release resources held by it. + // We use a try-with-resource block to automatically do so. + try (ReadOnlyTransaction transaction = dbClient.readOnlyTransaction()) { + ResultSet queryResultSet = + transaction.executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums")); + while (queryResultSet.next()) { + System.out.printf( + "%d %d %s\n", + queryResultSet.getLong(0), queryResultSet.getLong(1), + queryResultSet.getString(2)); + } + try (ResultSet readResultSet = + transaction.read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (readResultSet.next()) { + System.out.printf( + "%d %d %s\n", + readResultSet.getLong(0), readResultSet.getLong(1), + readResultSet.getString(2)); + } + } + } + } + // [END spanner_postgresql_read_only_transaction] + + // [START spanner_postgresql_query_singers_table] + static void querySingersTable(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT singerid as \"SingerId\", " + + "firstname as \"FirstName\", lastname as \"LastName\" FROM Singers"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_postgresql_query_singers_table] + + + // [START spanner_postgresql_dml_getting_started_insert] + static void writeUsingDml(DatabaseClient dbClient) { + // Insert 4 singer records + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records inserted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_getting_started_insert] + + // [START spanner_postgresql_query_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT singerid AS \"SingerId\", " + + "firstname as \"FirstName\", lastname as \"LastName\" " + + "FROM Singers " + + "WHERE LastName = $1") + .bind("p1") + .to("Garcia") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_postgresql_query_with_parameter] + + // [START spanner_postgresql_dml_getting_started_update] + static void writeWithTransactionUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + String sql1 = + "SELECT marketingbudget as \"MarketingBudget\" from Albums WHERE " + + "SingerId = 2 and AlbumId = 2"; + ResultSet resultSet = transaction.executeQuery(Statement.of(sql1)); + long album2Budget = 0; + while (resultSet.next()) { + album2Budget = resultSet.getLong("MarketingBudget"); + } + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + String sql2 = + "SELECT marketingbudget as \"MarketingBudget\" from Albums WHERE " + + "SingerId = 1 and AlbumId = 1"; + ResultSet resultSet2 = transaction.executeQuery(Statement.of(sql2)); + long album1Budget = 0; + while (resultSet2.next()) { + album1Budget = resultSet2.getLong("MarketingBudget"); + } + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("p1") + .to(album1Budget) + .build(); + transaction.executeUpdate(updateStatement); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = $1 " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("p1") + .to(album2Budget) + .build(); + transaction.executeUpdate(updateStatement2); + } + return null; + }); + } + // [END spanner_postgresql_dml_getting_started_update] + + // [START spanner_postgresql_create_table_using_ddl] + // [START spanner_postgresql_create_database] + static void createTableUsingDdl(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.updateDatabaseDdl( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " SingerInfo bytea," + + " FullName character varying(2048) GENERATED " + + " ALWAYS AS (FirstName || ' ' || LastName) STORED," + + " PRIMARY KEY (SingerId)" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " AlbumTitle character varying(1024)," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Created Singers & Albums tables in database: [" + id + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_database] + // [END spanner_postgresql_create_table_using_ddl] + + // [START spanner_postgresql_read_stale_data] + static void readStaleData(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse(TimestampBound.ofExactStaleness(15, TimeUnit.SECONDS)) + .read( + "Albums", KeySet.all(), + Arrays.asList("SingerId", "AlbumId", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong(0), + resultSet.getLong(1), + resultSet.isNull(2) ? "NULL" : resultSet.getLong(2)); + } + } + } + // [END spanner_postgresql_read_stale_data] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget BIGINT". + // In addition this update expects the LastUpdateTime column added by applying the DDL statement + // "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMPTZ" + // [START spanner_postgresql_update_data_with_timestamp_column] + static void updateWithTimestamp(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(1000000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(750000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_postgresql_update_data_with_timestamp_column] + + // [START spanner_postgresql_add_timestamp_column] + static void addLastUpdateTimestampColumn(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList( + "ALTER TABLE Albums ADD COLUMN LastUpdateTime spanner.commit_timestamp"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added LastUpdateTime as a timestamp column in Albums table."); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_add_timestamp_column] + + // [START spanner_postgresql_query_data_with_timestamp_column] + static void queryMarketingBudgetWithTimestamp(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT singerid as \"SingerId\", albumid as \"AlbumId\", " + + "marketingbudget as \"MarketingBudget\"," + + "lastupdatetime as \"LastUpdateTime\" FROM Albums" + + " ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget"), + resultSet.isNull("LastUpdateTime") ? "NULL" : resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_postgresql_query_data_with_timestamp_column] + + // [START spanner_postgresql_create_table_with_timestamp_column] + static void createTableWithTimestamp(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.updateDatabaseDdl( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Performances (" + + " SingerId BIGINT NOT NULL," + + " VenueId BIGINT NOT NULL," + + " Revenue BIGINT," + + " LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL," + + " PRIMARY KEY (SingerId, VenueId))" + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Created Performances table in database: [" + id + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_table_with_timestamp_column] + + // [START spanner_postgresql_insert_data_with_timestamp_column] + static void writeExampleDataWithTimestamp(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Performance performance : PERFORMANCES) { + mutations.add( + Mutation.newInsertBuilder("Performances") + .set("SingerId") + .to(performance.singerId) + .set("VenueId") + .to(performance.venueId) + .set("Revenue") + .to(performance.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_data_with_timestamp_column] + + static void queryPerformancesTable(DatabaseClient dbClient) { + // Rows without an explicit value for Revenue will have a Revenue equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT singerid as \"SingerId\", venueid as \"VenueId\", " + + "revenue as \"Revenue\", lastupdatetime as \"LastUpdateTime\" " + + "FROM Performances ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("VenueId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("Revenue") ? "NULL" : resultSet.getLong("Revenue"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + + // [START spanner_postgresql_dml_standard_insert] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (10, 'Virginia', 'Watson')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_insert] + + // [START spanner_postgresql_dml_standard_update] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_update] + + // [START spanner_postgresql_dml_standard_delete] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = "DELETE FROM Singers WHERE FirstName = 'Alice'"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record deleted.\n", rowCount); + return null; + }); + } + // [END spanner_postgresql_dml_standard_delete] + + // [START spanner_postgresql_dml_write_then_read] + static void writeAndReadUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Insert record. + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (11, 'Timothy', 'Campbell')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + // Read newly inserted record. + sql = "SELECT firstname as \"FirstName\", lastname as \"LastName\" FROM Singers WHERE " + + "SingerId = 11"; + // We use a try-with-resource block to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf( + "%s %s\n", + resultSet.getString("FirstName"), resultSet.getString("LastName")); + } + } + return null; + }); + } + // [END spanner_postgresql_dml_write_then_read] + + // [START spanner_postgresql_dml_partitioned_update] + static void updateUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + } + // [END spanner_postgresql_dml_partitioned_update] + + // [START spanner_postgresql_dml_partitioned_delete] + static void deleteUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "DELETE FROM Singers WHERE SingerId > 10"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records deleted.\n", rowCount); + } + // [END spanner_postgresql_dml_partitioned_delete] + + // [START spanner_postgresql_dml_batch_update] + static void updateUsingBatchDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + List stmts = new ArrayList(); + String sql = + "INSERT INTO Albums " + + "(SingerId, AlbumId, AlbumTitle, MarketingBudget) " + + "VALUES (1, 3, 'Test Album Title', 10000) "; + stmts.add(Statement.of(sql)); + sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 3"; + stmts.add(Statement.of(sql)); + long[] rowCounts; + try { + rowCounts = transaction.batchUpdate(stmts); + } catch (SpannerBatchUpdateException e) { + rowCounts = e.getUpdateCounts(); + } + for (int i = 0; i < rowCounts.length; i++) { + System.out.printf("%d record updated by stmt %d.\n", rowCounts[i], i); + } + return null; + }); + } + // [END spanner_postgresql_dml_batch_update] + + // [START spanner_postgresql_create_table_with_datatypes] + static void createTableWithDatatypes(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.updateDatabaseDdl( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Venues (" + + " VenueId BIGINT NOT NULL," + + " VenueName character varying(100)," + + " VenueInfo bytea," + + " Capacity BIGINT," + + " OutdoorVenue BOOL, " + + " PopularityScore FLOAT8, " + + " Revenue NUMERIC, " + + " LastUpdateTime SPANNER.COMMIT_TIMESTAMP NOT NULL," + + " PRIMARY KEY (VenueId))"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Created Venues table in database: [" + id + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_postgresql_create_table_with_datatypes] + + // [START spanner_postgresql_insert_datatypes_data] + static void writeDatatypesData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Venue venue : VENUES) { + mutations.add( + Mutation.newInsertBuilder("Venues") + .set("VenueId") + .to(venue.venueId) + .set("VenueName") + .to(venue.venueName) + .set("VenueInfo") + .to(venue.venueInfo) + .set("Capacity") + .to(venue.capacity) + .set("OutdoorVenue") + .to(venue.outdoorVenue) + .set("PopularityScore") + .to(venue.popularityScore) + .set("Revenue") + .to(venue.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_postgresql_insert_datatypes_data] + + // [START spanner_postgresql_query_with_bool_parameter] + static void queryWithBool(DatabaseClient dbClient) { + boolean exampleBool = true; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\"," + + " outdoorvenue as \"OutdoorVenue\" FROM Venues " + + "WHERE OutdoorVenue = $1") + .bind("p1") + .to(exampleBool) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %b\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBoolean("OutdoorVenue")); + } + } + } + // [END spanner_postgresql_query_with_bool_parameter] + + // [START spanner_postgresql_query_with_bytes_parameter] + static void queryWithBytes(DatabaseClient dbClient) { + ByteArray exampleBytes = + ByteArray.fromBase64(BaseEncoding.base64().encode("Hello World 1".getBytes())); + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\" FROM Venues " + + "WHERE VenueInfo = $1") + .bind("p1") + .to(exampleBytes) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_postgresql_query_with_bytes_parameter] + + // [START spanner_postgresql_query_with_float_parameter] + static void queryWithFloat(DatabaseClient dbClient) { + float exampleFloat = 0.8f; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "popularityscore as \"PopularityScore\" FROM Venues " + + "WHERE PopularityScore > $1") + .bind("p1") + .to(exampleFloat) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %f\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDouble("PopularityScore")); + } + } + } + // [END spanner_postgresql_query_with_float_parameter] + + // [START spanner_postgresql_query_with_int_parameter] + static void queryWithInt(DatabaseClient dbClient) { + long exampleInt = 3000; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "capacity as \"Capacity\" " + + "FROM Venues " + "WHERE Capacity >= $1") + .bind("p1") + .to(exampleInt) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %d\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getLong("Capacity")); + } + } + } + // [END spanner_postgresql_query_with_int_parameter] + + // [START spanner_postgresql_query_with_string_parameter] + static void queryWithString(DatabaseClient dbClient) { + String exampleString = "Venue 42"; + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\" FROM Venues WHERE" + + " VenueName = $1") + .bind("p1") + .to(exampleString) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_postgresql_query_with_string_parameter] + + // [START spanner_postgresql_query_with_timestamp_parameter] + static void queryWithTimestampParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "lastupdatetime as \"LastUpdateTime\" FROM Venues " + + "WHERE LastUpdateTime < $1") + .bind("p1") + .to(Timestamp.now()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_postgresql_query_with_timestamp_parameter] + + // [START spanner_postgresql_query_with_numeric_parameter] + static void queryWithNumeric(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT venueid as \"VenueId\", venuename as \"VenueName\", " + + "revenue as \"Revenue\" FROM Venues\n" + + "WHERE Revenue >= $1") + .bind("p1") + .to(Value.pgNumeric("300000")) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s%n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getValue("Revenue")); + } + } + } + // [END spanner_postgresql_query_with_numeric_parameter] + + // [START spanner_postgresql_create_client_with_query_options] + static void clientWithQueryOptions(DatabaseId db) { + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + db, ExecuteSqlRequest.QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.spanner_postgresql_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_create_client_with_query_options] + + // [START spanner_postgresql_query_with_query_options] + static void queryWithQueryOptions(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement + .newBuilder("SELECT SingerId, AlbumId, AlbumTitle FROM Albums") + .withQueryOptions(ExecuteSqlRequest.QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying + // the "INFORMATION_SCHEMA.spanner_postgresql_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build())) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_postgresql_query_with_query_options] + + // [START spanner_postgresql_list_backup_operations] + static void listBackupOperations(InstanceAdminClient instanceAdminClient, DatabaseId databaseId) { + Instance instance = instanceAdminClient.getInstance(databaseId.getInstanceId().getInstance()); + // Get create backup operations for the sample database. + Timestamp last24Hours = Timestamp.ofTimeSecondsAndNanos(TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(Timestamp.now().getSeconds(), TimeUnit.SECONDS) - 24, + TimeUnit.HOURS), 0); + String filter = + String.format( + "(metadata.database:%s) AND " + + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CreateBackupMetadata) AND " + + "(metadata.progress.start_time > \"%s\")", + databaseId.getName(), last24Hours); + Page operations = instance + .listBackupOperations(Options.filter(filter)); + for (com.google.longrunning.Operation op : operations.iterateAll()) { + try { + CreateBackupMetadata metadata = op.getMetadata().unpack(CreateBackupMetadata.class); + System.out.println( + String.format( + "Backup %s on database %s pending: %d%% complete", + metadata.getName(), + metadata.getDatabase(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CreateBackupMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_postgresql_list_backup_operations] + + // [START spanner_postgresql_list_database_operations] + static void listDatabaseOperations( + InstanceAdminClient instanceAdminClient, + DatabaseAdminClient dbAdminClient, + InstanceId instanceId) { + Instance instance = instanceAdminClient.getInstance(instanceId.getInstance()); + // Get optimize restored database operations. + Timestamp last24Hours = Timestamp.ofTimeSecondsAndNanos(TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(Timestamp.now().getSeconds(), TimeUnit.SECONDS) - 24, + TimeUnit.HOURS), 0); + String filter = String.format("(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) AND " + + "(metadata.progress.start_time > \"%s\")", last24Hours); + for (Operation op : instance.listDatabaseOperations(Options.filter(filter)).iterateAll()) { + try { + OptimizeRestoredDatabaseMetadata metadata = + op.getMetadata().unpack(OptimizeRestoredDatabaseMetadata.class); + System.out.println(String.format( + "Database %s restored from backup is %d%% optimized", + metadata.getName(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain OptimizeRestoredDatabaseMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_postgresql_list_database_operations] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + InstanceAdminClient instanceAdminClient, + String command, + DatabaseId database) { + switch (command) { + case "createdatabase": + createPostgreSqlDatabase(dbAdminClient, database); + break; + case "write": + writeExampleData(dbClient); + break; + case "delete": + deleteExampleData(dbClient); + break; + case "query": + query(dbClient); + break; + case "read": + read(dbClient); + break; + case "addmarketingbudget": + addMarketingBudget(dbAdminClient, database); + break; + case "update": + update(dbClient); + break; + case "writetransaction": + writeWithTransaction(dbClient); + break; + case "querymarketingbudget": + queryMarketingBudget(dbClient); + break; + case "addindex": + addIndex(dbAdminClient, database); + break; + case "readindex": + readUsingIndex(dbClient); + break; + case "addstoringindex": + addStoringIndex(dbAdminClient, database); + break; + case "readstoringindex": + readStoringIndex(dbClient); + break; + case "readonlytransaction": + readOnlyTransaction(dbClient); + break; + case "querysingerstable": + querySingersTable(dbClient); + break; + case "writeusingdml": + writeUsingDml(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "writewithtransactionusingdml": + writeWithTransactionUsingDml(dbClient); + break; + case "createtableusingddl": + createTableUsingDdl(dbAdminClient, database); + break; + case "readstaledata": + readStaleData(dbClient); + break; + case "addlastupdatetimestampcolumn": + addLastUpdateTimestampColumn(dbAdminClient, database); + break; + case "updatewithtimestamp": + updateWithTimestamp(dbClient); + break; + case "querywithtimestamp": + queryMarketingBudgetWithTimestamp(dbClient); + break; + case "createtablewithtimestamp": + createTableWithTimestamp(dbAdminClient, database); + break; + case "writewithtimestamp": + writeExampleDataWithTimestamp(dbClient); + break; + case "queryperformancestable": + queryPerformancesTable(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "writeandreadusingdml": + writeAndReadUsingDml(dbClient); + break; + case "updateusingpartitioneddml": + updateUsingPartitionedDml(dbClient); + break; + case "deleteusingpartitioneddml": + deleteUsingPartitionedDml(dbClient); + break; + case "updateusingbatchdml": + updateUsingBatchDml(dbClient); + break; + case "createtablewithdatatypes": + createTableWithDatatypes(dbAdminClient, database); + break; + case "writedatatypesdata": + writeDatatypesData(dbClient); + break; + case "querywithbool": + queryWithBool(dbClient); + break; + case "querywithbytes": + queryWithBytes(dbClient); + break; + case "querywithfloat": + queryWithFloat(dbClient); + break; + case "querywithint": + queryWithInt(dbClient); + break; + case "querywithstring": + queryWithString(dbClient); + break; + case "querywithtimestampparameter": + queryWithTimestampParameter(dbClient); + break; + case "querywithnumeric": + queryWithNumeric(dbClient); + break; + case "clientwithqueryoptions": + clientWithQueryOptions(database); + break; + case "querywithqueryoptions": + queryWithQueryOptions(dbClient); + break; + case "listbackupoperations": + listBackupOperations(instanceAdminClient, database); + break; + case "listdatabaseoperations": + listDatabaseOperations(instanceAdminClient, dbAdminClient, database.getInstanceId()); + break; + default: + printUsageAndExit(); + } + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" PgSpannerExample "); + System.err.println(); + System.err.println("Examples:"); + System.err.println(" PgSpannerExample createdatabase my-instance example-db"); + System.err.println(" PgSpannerExample write my-instance example-db"); + System.err.println(" PgSpannerExample delete my-instance example-db"); + System.err.println(" PgSpannerExample query my-instance example-db"); + System.err.println(" PgSpannerExample read my-instance example-db"); + System.err.println(" PgSpannerExample addmarketingbudget my-instance example-db"); + System.err.println(" PgSpannerExample update my-instance example-db"); + System.err.println(" PgSpannerExample writetransaction my-instance example-db"); + System.err.println(" PgSpannerExample querymarketingbudget my-instance example-db"); + System.err.println(" PgSpannerExample addindex my-instance example-db"); + System.err.println(" PgSpannerExample readindex my-instance example-db"); + System.err.println(" PgSpannerExample addstoringindex my-instance example-db"); + System.err.println(" PgSpannerExample readstoringindex my-instance example-db"); + System.err.println(" PgSpannerExample readonlytransaction my-instance example-db"); + System.err.println(" PgSpannerExample querysingerstable my-instance example-db"); + System.err.println(" PgSpannerExample writeusingdml my-instance example-db"); + System.err.println(" PgSpannerExample querywithparameter my-instance example-db"); + System.err.println(" PgSpannerExample writewithtransactionusingdml my-instance example-db"); + System.err.println(" PgSpannerExample createtableforsamples my-instance example-db"); + System.err.println(" PgSpannerExample writewithtimestamp my-instance example-db"); + System.err.println(" PgSpannerExample queryperformancestable my-instance example-db"); + System.err.println(" PgSpannerExample writestructdata my-instance example-db"); + System.err.println(" PgSpannerExample insertusingdml my-instance example-db"); + System.err.println(" PgSpannerExample updateusingdml my-instance example-db"); + System.err.println(" PgSpannerExample deleteusingdml my-instance example-db"); + System.err.println(" PgSpannerExample writeandreadusingdml my-instance example-db"); + System.err.println(" PgSpannerExample writeusingdml my-instance example-db"); + System.err.println(" PgSpannerExample deleteusingpartitioneddml my-instance example-db"); + System.err.println(" PgSpannerExample updateusingbatchdml my-instance example-db"); + System.err.println(" PgSpannerExample createtablewithdatatypes my-instance example-db"); + System.err.println(" PgSpannerExample writedatatypesdata my-instance example-db"); + System.err.println(" PgSpannerExample querywithbool my-instance example-db"); + System.err.println(" PgSpannerExample querywithbytes my-instance example-db"); + System.err.println(" PgSpannerExample querywithfloat my-instance example-db"); + System.err.println(" PgSpannerExample querywithint my-instance example-db"); + System.err.println(" PgSpannerExample querywithstring my-instance example-db"); + System.err.println(" PgSpannerExample querywithtimestampparameter my-instance example-db"); + System.err.println(" PgSpannerExample clientwithqueryoptions my-instance example-db"); + System.err.println(" PgSpannerExample querywithqueryoptions my-instance example-db"); + System.err.println(" PgSpannerExample listbackupoperations my-instance example-db"); + System.err.println(" PgSpannerExample listdatabaseoperations my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) { + if (args.length != 3) { + printUsageAndExit(); + } + // [START spanner_init_client] + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + try { + // [END spanner_init_client] + String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + // [START spanner_init_client] + DatabaseClient dbClient = spanner.getDatabaseClient(db); + DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + // [END spanner_init_client] + + // Use client here... + run(dbClient, dbAdminClient, instanceAdminClient, command, db); + // [START spanner_init_client] + } finally { + spanner.close(); + } + // [END spanner_init_client] + System.out.println("Closed client"); + } + + /** Class to contain singer sample data. */ + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + /** Class to contain album sample data. */ + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/RestoreBackupWithEncryptionKey.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/RestoreBackupWithEncryptionKey.java new file mode 100644 index 000000000000..087413dbe59f --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/RestoreBackupWithEncryptionKey.java @@ -0,0 +1,92 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_restore_backup_with_encryption_key] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Restore; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.encryption.EncryptionConfigs; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import java.util.concurrent.ExecutionException; + +public class RestoreBackupWithEncryptionKey { + + static void restoreBackupWithEncryptionKey() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupId = "my-backup"; + String kmsKeyName = + "projects/" + projectId + "/locations//keyRings//cryptoKeys/"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseAdminClient adminClient = spanner.getDatabaseAdminClient(); + restoreBackupWithEncryptionKey( + adminClient, + projectId, + instanceId, + backupId, + databaseId, + kmsKeyName); + } + } + + static Void restoreBackupWithEncryptionKey(DatabaseAdminClient adminClient, + String projectId, String instanceId, String backupId, String restoreId, String kmsKeyName) { + final Restore restore = adminClient + .newRestoreBuilder( + BackupId.of(projectId, instanceId, backupId), + DatabaseId.of(projectId, instanceId, restoreId)) + .setEncryptionConfig(EncryptionConfigs.customerManagedEncryption(kmsKeyName)) + .build(); + final OperationFuture operation = adminClient + .restoreDatabase(restore); + + Database database; + try { + System.out.println("Waiting for operation to complete..."); + database = operation.get(); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + System.out.printf( + "Database %s restored to %s from backup %s using encryption key %s%n", + database.getRestoreInfo().getSourceDatabase(), + database.getId(), + database.getRestoreInfo().getBackup(), + database.getEncryptionConfig().getKmsKeyName() + ); + return null; + } +} +// [END spanner_restore_backup_with_encryption_key] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/SpannerSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/SpannerSample.java new file mode 100644 index 000000000000..98eba6d9442e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/SpannerSample.java @@ -0,0 +1,2233 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.cloud.spanner.Type.StructField; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.paging.Page; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ReadOnlyTransaction; +import com.google.cloud.spanner.RestoreInfo; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.common.io.BaseEncoding; +import com.google.longrunning.Operation; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.spanner.admin.database.v1.CopyBackupMetadata; +import com.google.spanner.admin.database.v1.CreateBackupMetadata; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata; +import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import java.math.BigDecimal; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.threeten.bp.LocalDate; +import org.threeten.bp.LocalDateTime; +import org.threeten.bp.OffsetDateTime; +import org.threeten.bp.temporal.ChronoField; + +/** + * Example code for using the Cloud Spanner API. This example demonstrates all the common operations + * that can be done on Cloud Spanner. These are: + * + *

    + * + *

      + *
    • Creating a Cloud Spanner database. + *
    • Writing, reading and executing SQL queries. + *
    • Writing data using a read-write transaction. + *
    • Using an index to read and execute SQL queries over data. + *
    • Using commit timestamp for tracking when a record was last updated. + *
    • Using Google API Extensions for Java to make thread-safe requests via long-running + * operations. http://googleapis.github.io/gax-java/ + *
    + */ +public class SpannerSample { + + /** Class to contain singer sample data. */ + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + /** Class to contain album sample data. */ + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + + Album(long singerId, long albumId, String albumTitle) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + } + } + + /** Class to contain performance sample data. */ + static class Performance { + + final long singerId; + final long venueId; + final String eventDate; + final long revenue; + + Performance(long singerId, long venueId, String eventDate, long revenue) { + this.singerId = singerId; + this.venueId = venueId; + this.eventDate = eventDate; + this.revenue = revenue; + } + } + + /** Class to contain venue sample data. */ + static class Venue { + + final long venueId; + final String venueName; + final String venueInfo; + final long capacity; + final Value availableDates; + final String lastContactDate; + final boolean outdoorVenue; + final float popularityScore; + final BigDecimal revenue; + final Value venueDetails; + + Venue( + long venueId, + String venueName, + String venueInfo, + long capacity, + Value availableDates, + String lastContactDate, + boolean outdoorVenue, + float popularityScore, + BigDecimal revenue, + Value venueDetails) { + this.venueId = venueId; + this.venueName = venueName; + this.venueInfo = venueInfo; + this.capacity = capacity; + this.availableDates = availableDates; + this.lastContactDate = lastContactDate; + this.outdoorVenue = outdoorVenue; + this.popularityScore = popularityScore; + this.revenue = revenue; + this.venueDetails = venueDetails; + } + } + + /** Get a database id to restore a backup to from the sample database id. */ + static String createRestoredSampleDbId(DatabaseId database) { + int index = database.getDatabase().indexOf('-'); + String prefix = database.getDatabase().substring(0, index); + String restoredDbId = database.getDatabase().replace(prefix, "restored"); + if (restoredDbId.length() > 30) { + restoredDbId = restoredDbId.substring(0, 30); + } + return restoredDbId; + } + + // [START spanner_insert_data] + static final List SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + + static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk"), + new Album(1, 2, "Go, Go, Go"), + new Album(2, 1, "Green"), + new Album(2, 2, "Forever Hold Your Peace"), + new Album(2, 3, "Terrified")); + // [END spanner_insert_data] + + // [START spanner_insert_data_with_timestamp_column] + static final List PERFORMANCES = + Arrays.asList( + new Performance(1, 4, "2017-10-05", 11000), + new Performance(1, 19, "2017-11-02", 15000), + new Performance(2, 42, "2017-12-23", 7000)); + // [END spanner_insert_data_with_timestamp_column] + + // [START spanner_insert_datatypes_data] + static Value availableDates1 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-12-01"), + Date.parseDate("2020-12-02"), + Date.parseDate("2020-12-03"))); + static Value availableDates2 = + Value.dateArray( + Arrays.asList( + Date.parseDate("2020-11-01"), + Date.parseDate("2020-11-05"), + Date.parseDate("2020-11-15"))); + static Value availableDates3 = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-10-07"))); + static String exampleBytes1 = BaseEncoding.base64().encode("Hello World 1".getBytes()); + static String exampleBytes2 = BaseEncoding.base64().encode("Hello World 2".getBytes()); + static String exampleBytes3 = BaseEncoding.base64().encode("Hello World 3".getBytes()); + static final List VENUES = + Arrays.asList( + new Venue( + 4, + "Venue 4", + exampleBytes1, + 1800, + availableDates1, + "2018-09-02", + false, + 0.85543f, + new BigDecimal("215100.10"), + Value.json( + "[{\"name\":\"room 1\",\"open\":true},{\"name\":\"room 2\",\"open\":false}]")), + new Venue( + 19, + "Venue 19", + exampleBytes2, + 6300, + availableDates2, + "2019-01-15", + true, + 0.98716f, + new BigDecimal("1200100.00"), + Value.json("{\"rating\":9,\"open\":true}")), + new Venue( + 42, + "Venue 42", + exampleBytes3, + 3000, + availableDates3, + "2018-10-01", + false, + 0.72598f, + new BigDecimal("390650.99"), + Value.json( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}"))); + // [END spanner_insert_datatypes_data] + + // [START spanner_create_database] + static void createDatabase(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.createDatabase( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")); + try { + // Initiate the request which returns an OperationFuture. + Database db = op.get(); + System.out.println("Created database [" + db.getId() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_database] + + // [START spanner_create_table_with_timestamp_column] + static void createTableWithTimestamp(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.updateDatabaseDdl( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Performances (" + + " SingerId INT64 NOT NULL," + + " VenueId INT64 NOT NULL," + + " EventDate Date," + + " Revenue INT64, " + + " LastUpdateTime TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true)" + + ") PRIMARY KEY (SingerId, VenueId, EventDate)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Created Performances table in database: [" + id + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_table_with_timestamp_column] + + // [START spanner_insert_data_with_timestamp_column] + static void writeExampleDataWithTimestamp(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Performance performance : PERFORMANCES) { + mutations.add( + Mutation.newInsertBuilder("Performances") + .set("SingerId") + .to(performance.singerId) + .set("VenueId") + .to(performance.venueId) + .set("EventDate") + .to(performance.eventDate) + .set("Revenue") + .to(performance.revenue) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_data_with_timestamp_column] + + // [START spanner_insert_data] + static void writeExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_data] + + // [START spanner_delete_data] + static void deleteExampleData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the Albums with the key values (2,1) and (2,3). + mutations.add( + Mutation.delete( + "Albums", KeySet.newBuilder().addKey(Key.of(2, 1)).addKey(Key.of(2, 3)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the column key is >=3 and <5 + mutations.add( + Mutation.delete("Singers", KeySet.range(KeyRange.closedOpen(Key.of(3), Key.of(5))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete remaining Singers rows, which will also delete the remaining Albums rows since it was + // defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Singers", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + // [END spanner_delete_data] + + // [START spanner_query_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single read or query against Cloud Spanner. + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_query_data] + + // [START spanner_read_data] + static void read(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .read( + "Albums", + KeySet.all(), // Read all rows in a table. + Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_read_data] + + // [START spanner_add_column] + static void addMarketingBudget(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList("ALTER TABLE Albums ADD COLUMN MarketingBudget INT64"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added MarketingBudget column"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_add_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // [START spanner_update_data] + static void update(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(100000) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(500000) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_update_data] + + // [START spanner_read_write_transaction] + static void writeWithTransaction(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + Struct row = + transaction.readRow("Albums", Key.of(2, 2), Arrays.asList("MarketingBudget")); + long album2Budget = row.getLong(0); + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = + transaction + .readRow("Albums", Key.of(1, 1), Arrays.asList("MarketingBudget")) + .getLong(0); + album1Budget += transfer; + album2Budget -= transfer; + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(album1Budget) + .build()); + transaction.buffer( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(album2Budget) + .build()); + } + return null; + }); + } + // [END spanner_read_write_transaction] + + // [START spanner_query_data_with_new_column] + static void queryMarketingBudget(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, MarketingBudget FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_query_data_with_new_column] + + // [START spanner_create_index] + static void addIndex(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList("CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added AlbumsByAlbumTitle index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_index] + + // Before running this example, add the index AlbumsByAlbumTitle by applying the DDL statement + // "CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)". + // [START spanner_query_data_with_index] + static void queryUsingIndex(DatabaseClient dbClient) { + Statement statement = + Statement + // We use FORCE_INDEX hint to specify which index to use. For more details see + // https://cloud.google.com/spanner/docs/query-syntax#from-clause + .newBuilder( + "SELECT AlbumId, AlbumTitle, MarketingBudget " + + "FROM Albums@{FORCE_INDEX=AlbumsByAlbumTitle} " + + "WHERE AlbumTitle >= @StartTitle AND AlbumTitle < @EndTitle") + // We use @BoundParameters to help speed up frequently executed queries. + // For more details see https://cloud.google.com/spanner/docs/sql-best-practices + .bind("StartTitle") + .to("Aardvark") + .bind("EndTitle") + .to("Goo") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("AlbumId"), + resultSet.getString("AlbumTitle"), + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_query_data_with_index] + + // [START spanner_read_data_with_index] + static void readUsingIndex(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle"))) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong(0), resultSet.getString(1)); + } + } + } + // [END spanner_read_data_with_index] + + // [START spanner_create_storing_index] + static void addStoringIndex(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList( + "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) " + + "STORING (MarketingBudget)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added AlbumsByAlbumTitle2 index"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_storing_index] + + // Before running this example, create a storing index AlbumsByAlbumTitle2 by applying the DDL + // statement "CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget)". + // [START spanner_read_data_with_storing_index] + static void readStoringIndex(DatabaseClient dbClient) { + // We can read MarketingBudget also from the index since it stores a copy of MarketingBudget. + try (ResultSet resultSet = + dbClient + .singleUse() + .readUsingIndex( + "Albums", + "AlbumsByAlbumTitle2", + KeySet.all(), + Arrays.asList("AlbumId", "AlbumTitle", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong(0), + resultSet.getString(1), + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_read_data_with_storing_index] + + // [START spanner_read_only_transaction] + static void readOnlyTransaction(DatabaseClient dbClient) { + // ReadOnlyTransaction must be closed by calling close() on it to release resources held by it. + // We use a try-with-resource block to automatically do so. + try (ReadOnlyTransaction transaction = dbClient.readOnlyTransaction()) { + ResultSet queryResultSet = + transaction.executeQuery( + Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums")); + while (queryResultSet.next()) { + System.out.printf( + "%d %d %s\n", + queryResultSet.getLong(0), queryResultSet.getLong(1), queryResultSet.getString(2)); + } + try (ResultSet readResultSet = + transaction.read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "AlbumTitle"))) { + while (readResultSet.next()) { + System.out.printf( + "%d %d %s\n", + readResultSet.getLong(0), readResultSet.getLong(1), readResultSet.getString(2)); + } + } + } + } + // [END spanner_read_only_transaction] + + // [START spanner_read_stale_data] + static void readStaleData(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse(TimestampBound.ofExactStaleness(15, TimeUnit.SECONDS)) + .read( + "Albums", KeySet.all(), Arrays.asList("SingerId", "AlbumId", "MarketingBudget"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong(0), + resultSet.getLong(1), + resultSet.isNull(2) ? "NULL" : resultSet.getLong("MarketingBudget")); + } + } + } + // [END spanner_read_stale_data] + + // [START spanner_add_timestamp_column] + static void addCommitTimestamp(DatabaseAdminClient adminClient, DatabaseId dbId) { + OperationFuture op = + adminClient.updateDatabaseDdl( + dbId.getInstanceId().getInstance(), + dbId.getDatabase(), + Arrays.asList( + "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP " + + "OPTIONS (allow_commit_timestamp=true)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Added LastUpdateTime as a commit timestamp column in Albums table."); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_add_timestamp_column] + + // Before executing this method, a new column MarketingBudget has to be added to the Albums + // table by applying the DDL statement "ALTER TABLE Albums ADD COLUMN MarketingBudget INT64". + // In addition this update expects the LastUpdateTime column added by applying the DDL statement + // "ALTER TABLE Albums ADD COLUMN LastUpdateTime TIMESTAMP OPTIONS (allow_commit_timestamp=true)" + // [START spanner_update_data_with_timestamp_column] + static void updateWithTimestamp(DatabaseClient dbClient) { + // Mutation can be used to update/insert/delete a single row in a table. Here we use + // newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(1000000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(750000) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + dbClient.write(mutations); + } + // [END spanner_update_data_with_timestamp_column] + + // [START spanner_query_data_with_timestamp_column] + static void queryMarketingBudgetWithTimestamp(DatabaseClient dbClient) { + // Rows without an explicit value for MarketingBudget will have a MarketingBudget equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT SingerId, AlbumId, MarketingBudget, LastUpdateTime FROM Albums" + + " ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("MarketingBudget") ? "NULL" : resultSet.getLong("MarketingBudget"), + resultSet.isNull("LastUpdateTime") ? "NULL" : resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_query_data_with_timestamp_column] + + static void querySingersTable(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, FirstName, LastName FROM Singers"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + + static void queryPerformancesTable(DatabaseClient dbClient) { + // Rows without an explicit value for Revenue will have a Revenue equal to + // null. A try-with-resource block is used to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement.of( + "SELECT SingerId, VenueId, EventDate, Revenue, LastUpdateTime " + + "FROM Performances ORDER BY LastUpdateTime DESC"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("VenueId"), + resultSet.getDate("EventDate"), + // We check that the value is non null. ResultSet getters can only be used to retrieve + // non null values. + resultSet.isNull("Revenue") ? "NULL" : resultSet.getLong("Revenue"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + + // [START spanner_write_data_for_struct_queries] + static void writeStructExampleData(DatabaseClient dbClient) { + final List singers = + Arrays.asList( + new Singer(6, "Elena", "Campbell"), + new Singer(7, "Gabriel", "Wright"), + new Singer(8, "Benjamin", "Martinez"), + new Singer(9, "Hannah", "Harris")); + + List mutations = new ArrayList<>(); + for (Singer singer : singers) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + dbClient.write(mutations); + System.out.println("Inserted example data for struct parameter queries."); + } + // [END spanner_write_data_for_struct_queries] + + static void queryWithStruct(DatabaseClient dbClient) { + // [START spanner_create_struct_with_data] + Struct name = + Struct.newBuilder().set("FirstName").to("Elena").set("LastName").to("Campbell").build(); + // [END spanner_create_struct_with_data] + + // [START spanner_query_data_with_struct] + Statement s = + Statement.newBuilder( + "SELECT SingerId FROM Singers " + + "WHERE STRUCT(FirstName, LastName) " + + "= @name") + .bind("name") + .to(name) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + // [END spanner_query_data_with_struct] + } + + static void queryWithArrayOfStruct(DatabaseClient dbClient) { + // [START spanner_create_user_defined_struct] + Type nameType = + Type.struct( + Arrays.asList( + StructField.of("FirstName", Type.string()), + StructField.of("LastName", Type.string()))); + // [END spanner_create_user_defined_struct] + + // [START spanner_create_array_of_struct_with_data] + List bandMembers = new ArrayList<>(); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Elena").set("LastName").to("Campbell").build()); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Gabriel").set("LastName").to("Wright").build()); + bandMembers.add( + Struct.newBuilder().set("FirstName").to("Benjamin").set("LastName").to("Martinez").build()); + // [END spanner_create_array_of_struct_with_data] + + // [START spanner_query_data_with_array_of_struct] + Statement s = + Statement.newBuilder( + "SELECT SingerId FROM Singers WHERE " + + "STRUCT(FirstName, LastName) " + + "IN UNNEST(@names) " + + "ORDER BY SingerId DESC") + .bind("names") + .toStructArray(nameType, bandMembers) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + // [END spanner_query_data_with_array_of_struct] + } + + // [START spanner_field_access_on_struct_parameters] + static void queryStructField(DatabaseClient dbClient) { + Statement s = + Statement.newBuilder("SELECT SingerId FROM Singers WHERE FirstName = @name.FirstName") + .bind("name") + .to( + Struct.newBuilder() + .set("FirstName") + .to("Elena") + .set("LastName") + .to("Campbell") + .build()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d\n", resultSet.getLong("SingerId")); + } + } + } + // [END spanner_field_access_on_struct_parameters] + + // [START spanner_field_access_on_nested_struct_parameters] + static void queryNestedStructField(DatabaseClient dbClient) { + Type nameType = + Type.struct( + Arrays.asList( + StructField.of("FirstName", Type.string()), + StructField.of("LastName", Type.string()))); + + Struct songInfo = + Struct.newBuilder() + .set("song_name") + .to("Imagination") + .set("artistNames") + .toStructArray( + nameType, + Arrays.asList( + Struct.newBuilder() + .set("FirstName") + .to("Elena") + .set("LastName") + .to("Campbell") + .build(), + Struct.newBuilder() + .set("FirstName") + .to("Hannah") + .set("LastName") + .to("Harris") + .build())) + .build(); + Statement s = + Statement.newBuilder( + "SELECT SingerId, @song_info.song_name " + + "FROM Singers WHERE " + + "STRUCT(FirstName, LastName) " + + "IN UNNEST(@song_info.artistNames)") + .bind("song_info") + .to(songInfo) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(s)) { + while (resultSet.next()) { + System.out.printf("%d %s\n", resultSet.getLong("SingerId"), resultSet.getString(1)); + } + } + } + // [END spanner_field_access_on_nested_struct_parameters] + + // [START spanner_dml_standard_insert] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (10, 'Virginia', 'Watson')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_insert] + + // [START spanner_dml_standard_update] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_update] + + // [START spanner_dml_standard_delete] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = "DELETE FROM Singers WHERE FirstName = 'Alice'"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record deleted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_delete] + + // [START spanner_dml_standard_update_with_timestamp] + static void updateUsingDmlWithTimestamp(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "UPDATE Albums " + + "SET LastUpdateTime = PENDING_COMMIT_TIMESTAMP() WHERE SingerId = 1"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_standard_update_with_timestamp] + + // [START spanner_dml_write_then_read] + static void writeAndReadUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Insert record. + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + " VALUES (11, 'Timothy', 'Campbell')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record inserted.\n", rowCount); + // Read newly inserted record. + sql = "SELECT FirstName, LastName FROM Singers WHERE SingerId = 11"; + // We use a try-with-resource block to automatically release resources held by + // ResultSet. + try (ResultSet resultSet = transaction.executeQuery(Statement.of(sql))) { + while (resultSet.next()) { + System.out.printf( + "%s %s\n", + resultSet.getString("FirstName"), resultSet.getString("LastName")); + } + } + return null; + }); + } + // [END spanner_dml_write_then_read] + + // [START spanner_dml_structs] + static void updateUsingDmlWithStruct(DatabaseClient dbClient) { + Struct name = + Struct.newBuilder().set("FirstName").to("Timothy").set("LastName").to("Campbell").build(); + Statement s = + Statement.newBuilder( + "UPDATE Singers SET LastName = 'Grant' " + + "WHERE STRUCT(FirstName, LastName) " + + "= @name") + .bind("name") + .to(name) + .build(); + dbClient + .readWriteTransaction() + .run(transaction -> { + long rowCount = transaction.executeUpdate(s); + System.out.printf("%d record updated.\n", rowCount); + return null; + }); + } + // [END spanner_dml_structs] + + // [START spanner_dml_getting_started_insert] + static void writeUsingDml(DatabaseClient dbClient) { + // Insert 4 singer records + dbClient + .readWriteTransaction() + .run(transaction -> { + String sql = + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(12, 'Melissa', 'Garcia'), " + + "(13, 'Russell', 'Morales'), " + + "(14, 'Jacqueline', 'Long'), " + + "(15, 'Dylan', 'Shaw')"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d records inserted.\n", rowCount); + return null; + }); + } + // [END spanner_dml_getting_started_insert] + + // [START spanner_query_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "SELECT SingerId, FirstName, LastName " + + "FROM Singers " + + "WHERE LastName = @lastName") + .bind("lastName") + .to("Garcia") + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + // [END spanner_query_with_parameter] + + // [START spanner_dml_getting_started_update] + static void writeWithTransactionUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + // Transfer marketing budget from one album to another. We do it in a transaction to + // ensure that the transfer is atomic. + String sql1 = + "SELECT MarketingBudget from Albums WHERE SingerId = 2 and AlbumId = 2"; + ResultSet resultSet = transaction.executeQuery(Statement.of(sql1)); + long album2Budget = 0; + while (resultSet.next()) { + album2Budget = resultSet.getLong("MarketingBudget"); + } + // Transaction will only be committed if this condition still holds at the time of + // commit. Otherwise it will be aborted and the callable will be rerun by the + // client library. + long transfer = 200000; + if (album2Budget >= transfer) { + String sql2 = + "SELECT MarketingBudget from Albums WHERE SingerId = 1 and AlbumId = 1"; + ResultSet resultSet2 = transaction.executeQuery(Statement.of(sql2)); + long album1Budget = 0; + while (resultSet2.next()) { + album1Budget = resultSet2.getLong("MarketingBudget"); + } + album1Budget += transfer; + album2Budget -= transfer; + Statement updateStatement = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 1 and AlbumId = 1") + .bind("AlbumBudget") + .to(album1Budget) + .build(); + transaction.executeUpdate(updateStatement); + Statement updateStatement2 = + Statement.newBuilder( + "UPDATE Albums " + + "SET MarketingBudget = @AlbumBudget " + + "WHERE SingerId = 2 and AlbumId = 2") + .bind("AlbumBudget") + .to(album2Budget) + .build(); + transaction.executeUpdate(updateStatement2); + } + return null; + }); + } + // [END spanner_dml_getting_started_update] + + // [START spanner_dml_partitioned_update] + static void updateUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "UPDATE Albums SET MarketingBudget = 100000 WHERE SingerId > 1"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records updated.\n", rowCount); + } + // [END spanner_dml_partitioned_update] + + // [START spanner_dml_partitioned_delete] + static void deleteUsingPartitionedDml(DatabaseClient dbClient) { + String sql = "DELETE FROM Singers WHERE SingerId > 10"; + long rowCount = dbClient.executePartitionedUpdate(Statement.of(sql)); + System.out.printf("%d records deleted.\n", rowCount); + } + // [END spanner_dml_partitioned_delete] + + // [START spanner_dml_batch_update] + static void updateUsingBatchDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run(transaction -> { + List stmts = new ArrayList(); + String sql = + "INSERT INTO Albums " + + "(SingerId, AlbumId, AlbumTitle, MarketingBudget) " + + "VALUES (1, 3, 'Test Album Title', 10000) "; + stmts.add(Statement.of(sql)); + sql = + "UPDATE Albums " + + "SET MarketingBudget = MarketingBudget * 2 " + + "WHERE SingerId = 1 and AlbumId = 3"; + stmts.add(Statement.of(sql)); + long[] rowCounts; + try { + rowCounts = transaction.batchUpdate(stmts); + } catch (SpannerBatchUpdateException e) { + rowCounts = e.getUpdateCounts(); + } + for (int i = 0; i < rowCounts.length; i++) { + System.out.printf("%d record updated by stmt %d.\n", rowCounts[i], i); + } + return null; + }); + } + // [END spanner_dml_batch_update] + + // [START spanner_create_table_with_datatypes] + static void createTableWithDatatypes(DatabaseAdminClient dbAdminClient, DatabaseId id) { + OperationFuture op = + dbAdminClient.updateDatabaseDdl( + id.getInstanceId().getInstance(), + id.getDatabase(), + Arrays.asList( + "CREATE TABLE Venues (" + + " VenueId INT64 NOT NULL," + + " VenueName STRING(100)," + + " VenueInfo BYTES(MAX)," + + " Capacity INT64," + + " AvailableDates ARRAY," + + " LastContactDate DATE," + + " OutdoorVenue BOOL, " + + " PopularityScore FLOAT64, " + + " Revenue NUMERIC, " + + " VenueDetails JSON, " + + " LastUpdateTime TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true)" + + ") PRIMARY KEY (VenueId)"), + null); + try { + // Initiate the request which returns an OperationFuture. + op.get(); + System.out.println("Created Venues table in database: [" + id + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_create_table_with_datatypes] + + // [START spanner_insert_datatypes_data] + static void writeDatatypesData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Venue venue : VENUES) { + mutations.add( + Mutation.newInsertBuilder("Venues") + .set("VenueId") + .to(venue.venueId) + .set("VenueName") + .to(venue.venueName) + .set("VenueInfo") + .to(venue.venueInfo) + .set("Capacity") + .to(venue.capacity) + .set("AvailableDates") + .to(venue.availableDates) + .set("LastContactDate") + .to(venue.lastContactDate) + .set("OutdoorVenue") + .to(venue.outdoorVenue) + .set("PopularityScore") + .to(venue.popularityScore) + .set("Revenue") + .to(venue.revenue) + .set("VenueDetails") + .to(venue.venueDetails) + .set("LastUpdateTime") + .to(Value.COMMIT_TIMESTAMP) + .build()); + } + dbClient.write(mutations); + } + // [END spanner_insert_datatypes_data] + + // [START spanner_query_with_array_parameter] + static void queryWithArray(DatabaseClient dbClient) { + Value exampleArray = + Value.dateArray(Arrays.asList(Date.parseDate("2020-10-01"), Date.parseDate("2020-11-01"))); + + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, AvailableDate FROM Venues v, " + + "UNNEST(v.AvailableDates) as AvailableDate " + + "WHERE AvailableDate in UNNEST(@availableDates)") + .bind("availableDates") + .to(exampleArray) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDate("AvailableDate")); + } + } + } + // [END spanner_query_with_array_parameter] + + // [START spanner_query_with_bool_parameter] + static void queryWithBool(DatabaseClient dbClient) { + boolean exampleBool = true; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, OutdoorVenue FROM Venues " + + "WHERE OutdoorVenue = @outdoorVenue") + .bind("outdoorVenue") + .to(exampleBool) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %b\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBoolean("OutdoorVenue")); + } + } + } + // [END spanner_query_with_bool_parameter] + + // [START spanner_query_with_bytes_parameter] + static void queryWithBytes(DatabaseClient dbClient) { + ByteArray exampleBytes = + ByteArray.fromBase64(BaseEncoding.base64().encode("Hello World 1".getBytes())); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName FROM Venues " + "WHERE VenueInfo = @venueInfo") + .bind("venueInfo") + .to(exampleBytes) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_query_with_bytes_parameter] + + // [START spanner_query_with_date_parameter] + static void queryWithDate(DatabaseClient dbClient) { + String exampleDate = "2019-01-01"; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, LastContactDate FROM Venues " + + "WHERE LastContactDate < @lastContactDate") + .bind("lastContactDate") + .to(exampleDate) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDate("LastContactDate")); + } + } + } + // [END spanner_query_with_date_parameter] + + // [START spanner_query_with_float_parameter] + static void queryWithFloat(DatabaseClient dbClient) { + float exampleFloat = 0.8f; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, PopularityScore FROM Venues " + + "WHERE PopularityScore > @popularityScore") + .bind("popularityScore") + .to(exampleFloat) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %f\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getDouble("PopularityScore")); + } + } + } + // [END spanner_query_with_float_parameter] + + // [START spanner_query_with_int_parameter] + static void queryWithInt(DatabaseClient dbClient) { + long exampleInt = 3000; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, Capacity FROM Venues " + "WHERE Capacity >= @capacity") + .bind("capacity") + .to(exampleInt) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %d\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getLong("Capacity")); + } + } + } + // [END spanner_query_with_int_parameter] + + // [START spanner_query_with_string_parameter] + static void queryWithString(DatabaseClient dbClient) { + String exampleString = "Venue 42"; + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName FROM Venues " + "WHERE VenueName = @venueName") + .bind("venueName") + .to(exampleString) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s\n", resultSet.getLong("VenueId"), resultSet.getString("VenueName")); + } + } + } + // [END spanner_query_with_string_parameter] + + // [START spanner_query_with_timestamp_parameter] + static void queryWithTimestampParameter(DatabaseClient dbClient) { + Instant exampleTimestamp = Instant.now(); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, LastUpdateTime FROM Venues " + + "WHERE LastUpdateTime < @lastUpdateTime") + .bind("lastUpdateTime") + .to(exampleTimestamp.toString()) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getTimestamp("LastUpdateTime")); + } + } + } + // [END spanner_query_with_timestamp_parameter] + + // [START spanner_query_with_numeric_parameter] + static void queryWithNumeric(DatabaseClient dbClient) { + BigDecimal exampleNumeric = new BigDecimal("300000"); + Statement statement = + Statement.newBuilder( + "SELECT VenueId, VenueName, Revenue\n" + + "FROM Venues\n" + + "WHERE Revenue >= @revenue") + .bind("revenue") + .to(exampleNumeric) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s%n", + resultSet.getLong("VenueId"), + resultSet.getString("VenueName"), + resultSet.getBigDecimal("Revenue")); + } + } + } + // [END spanner_query_with_numeric_parameter] + + // [START spanner_create_client_with_query_options] + static void clientWithQueryOptions(DatabaseId db) { + SpannerOptions options = + SpannerOptions.newBuilder() + .setDefaultQueryOptions( + db, QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.SPANNER_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build(); + Spanner spanner = options.getService(); + DatabaseClient dbClient = spanner.getDatabaseClient(db); + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery(Statement.of("SELECT SingerId, AlbumId, AlbumTitle FROM Albums"))) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_create_client_with_query_options] + + // [START spanner_query_with_query_options] + static void queryWithQueryOptions(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() + .executeQuery( + Statement + .newBuilder("SELECT SingerId, AlbumId, AlbumTitle FROM Albums") + .withQueryOptions(QueryOptions + .newBuilder() + .setOptimizerVersion("1") + // The list of available statistics packages can be found by querying the + // "INFORMATION_SCHEMA.SPANNER_STATISTICS" table. + .setOptimizerStatisticsPackage("latest") + .build()) + .build())) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", resultSet.getLong(0), resultSet.getLong(1), resultSet.getString(2)); + } + } + } + // [END spanner_query_with_query_options] + + // [START spanner_create_backup] + static void createBackup(DatabaseAdminClient dbAdminClient, DatabaseId databaseId, + BackupId backupId, Timestamp versionTime) { + // Set expire time to 14 days from now. + Timestamp expireTime = Timestamp.ofTimeMicroseconds(TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), TimeUnit.MILLISECONDS)); + Backup backup = + dbAdminClient + .newBackupBuilder(backupId) + .setDatabase(databaseId) + .setExpireTime(expireTime) + .setVersionTime(versionTime) + .build(); + // Initiate the request which returns an OperationFuture. + System.out.println("Creating backup [" + backup.getId() + "]..."); + OperationFuture op = backup.create(); + try { + // Wait for the backup operation to complete. + backup = op.get(); + System.out.println("Created backup [" + backup.getId() + "]"); + } catch (ExecutionException e) { + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + // Reload the metadata of the backup from the server. + backup = backup.reload(); + System.out.println( + String.format( + "Backup %s of size %d bytes was created at %s for version of database at %s", + backup.getId().getName(), + backup.getSize(), + LocalDateTime.ofEpochSecond( + backup.getProto().getCreateTime().getSeconds(), + backup.getProto().getCreateTime().getNanos(), + OffsetDateTime.now().getOffset()), + LocalDateTime.ofEpochSecond( + backup.getProto().getVersionTime().getSeconds(), + backup.getProto().getVersionTime().getNanos(), + OffsetDateTime.now().getOffset()) + )); + } + // [END spanner_create_backup] + + // [START spanner_cancel_backup_create] + static void cancelCreateBackup( + DatabaseAdminClient dbAdminClient, DatabaseId databaseId, BackupId backupId) { + // Set expire time to 14 days from now. + Timestamp expireTime = Timestamp.ofTimeMicroseconds(TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(14), TimeUnit.MILLISECONDS)); + + // Create a backup instance. + Backup backup = + dbAdminClient + .newBackupBuilder(backupId) + .setDatabase(databaseId) + .setExpireTime(expireTime) + .build(); + // Start the creation of a backup. + System.out.println("Creating backup [" + backup.getId() + "]..."); + OperationFuture op = backup.create(); + try { + // Try to cancel the backup operation. + System.out.println("Cancelling create backup operation for [" + backup.getId() + "]..."); + dbAdminClient.cancelOperation(op.getName()); + // Get a polling future for the running operation. This future will regularly poll the server + // for the current status of the backup operation. + RetryingFuture pollingFuture = op.getPollingFuture(); + // Wait for the operation to finish. + // isDone will return true when the operation is complete, regardless of whether it was + // successful or not. + while (!pollingFuture.get().isDone()) { + System.out.println("Waiting for the cancelled backup operation to finish..."); + Thread.sleep(TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS)); + } + if (pollingFuture.get().getErrorCode() == null) { + // Backup was created before it could be cancelled. Delete the backup. + backup.delete(); + System.out.println("Backup operation for [" + backup.getId() + + "] successfully finished before it could be cancelled"); + } else if (pollingFuture.get().getErrorCode().getCode() == StatusCode.Code.CANCELLED) { + System.out.println("Backup operation for [" + backup.getId() + "] successfully cancelled"); + } + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_cancel_backup_create] + + // [START spanner_list_backup_operations] + static void listBackupOperations( + InstanceAdminClient instanceAdminClient, DatabaseId databaseId, BackupId backupId) { + Instance instance = instanceAdminClient.getInstance(databaseId.getInstanceId().getInstance()); + // Get create backup operations for the sample database. + Timestamp last24Hours = Timestamp.ofTimeSecondsAndNanos(TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(Timestamp.now().getSeconds(), TimeUnit.SECONDS) - 24, + TimeUnit.HOURS), 0); + String filter = + String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CreateBackupMetadata) " + + "AND (metadata.database:%s)", + databaseId.getName()); + Page createBackupOperations = instance.listBackupOperations( + Options.filter(filter)); + System.out.println("Create Backup Operations:"); + for (Operation op : createBackupOperations.iterateAll()) { + try { + CreateBackupMetadata metadata = op.getMetadata().unpack(CreateBackupMetadata.class); + System.out.println( + String.format( + "Backup %s on database %s pending: %d%% complete", + metadata.getName(), + metadata.getDatabase(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CreateBackupMetadata. + System.err.println(e.getMessage()); + } + } + // Get copy backup operations for the sample database. + filter = + String.format( + "(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.CopyBackupMetadata) " + + "AND (metadata.source_backup:%s)", + backupId.getName()); + Page copyBackupOperations = instance.listBackupOperations(Options.filter(filter)); + System.out.println("Copy Backup Operations:"); + for (Operation op : copyBackupOperations.iterateAll()) { + try { + CopyBackupMetadata copyBackupMetadata = + op.getMetadata().unpack(CopyBackupMetadata.class); + System.out.println( + String.format( + "Copy Backup %s on backup %s pending: %d%% complete", + copyBackupMetadata.getName(), + copyBackupMetadata.getSourceBackup(), + copyBackupMetadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain CopyBackupMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_list_backup_operations] + + // [START spanner_list_database_operations] + static void listDatabaseOperations( + InstanceAdminClient instanceAdminClient, + DatabaseAdminClient dbAdminClient, + InstanceId instanceId) { + Instance instance = instanceAdminClient.getInstance(instanceId.getInstance()); + // Get optimize restored database operations. + Timestamp last24Hours = Timestamp.ofTimeSecondsAndNanos(TimeUnit.SECONDS.convert( + TimeUnit.HOURS.convert(Timestamp.now().getSeconds(), TimeUnit.SECONDS) - 24, + TimeUnit.HOURS), 0); + String filter = String.format("(metadata.@type:type.googleapis.com/" + + "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) AND " + + "(metadata.progress.start_time > \"%s\")", last24Hours); + for (Operation op : instance.listDatabaseOperations(Options.filter(filter)).iterateAll()) { + try { + OptimizeRestoredDatabaseMetadata metadata = + op.getMetadata().unpack(OptimizeRestoredDatabaseMetadata.class); + System.out.println(String.format( + "Database %s restored from backup is %d%% optimized", + metadata.getName(), + metadata.getProgress().getProgressPercent())); + } catch (InvalidProtocolBufferException e) { + // The returned operation does not contain OptimizeRestoredDatabaseMetadata. + System.err.println(e.getMessage()); + } + } + } + // [END spanner_list_database_operations] + + // [START spanner_list_backups] + static void listBackups( + InstanceAdminClient instanceAdminClient, DatabaseId databaseId, BackupId backupId) { + Instance instance = instanceAdminClient.getInstance(databaseId.getInstanceId().getInstance()); + // List all backups. + System.out.println("All backups:"); + for (Backup backup : instance.listBackups().iterateAll()) { + System.out.println(backup); + } + + // List all backups with a specific name. + System.out.println( + String.format("All backups with backup name containing \"%s\":", backupId.getBackup())); + for (Backup backup : instance.listBackups( + Options.filter(String.format("name:%s", backupId.getBackup()))).iterateAll()) { + System.out.println(backup); + } + + // List all backups for databases whose name contains a certain text. + System.out.println( + String.format( + "All backups for databases with a name containing \"%s\":", + databaseId.getDatabase())); + for (Backup backup : instance.listBackups( + Options.filter(String.format("database:%s", databaseId.getDatabase()))).iterateAll()) { + System.out.println(backup); + } + + // List all backups that expire before a certain time. + Timestamp expireTime = Timestamp.ofTimeMicroseconds(TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() + TimeUnit.DAYS.toMillis(30), TimeUnit.MILLISECONDS)); + System.out.println(String.format("All backups that expire before %s:", expireTime.toString())); + for (Backup backup : + instance.listBackups( + Options.filter(String.format("expire_time < \"%s\"", expireTime.toString()))) + .iterateAll()) { + System.out.println(backup); + } + + // List all backups with size greater than a certain number of bytes. + System.out.println("All backups with size greater than 100 bytes:"); + for (Backup backup : instance.listBackups(Options.filter("size_bytes > 100")).iterateAll()) { + System.out.println(backup); + } + + // List all backups with a create time after a certain timestamp and that are also ready. + Timestamp createTime = Timestamp.ofTimeMicroseconds(TimeUnit.MICROSECONDS.convert( + System.currentTimeMillis() - TimeUnit.DAYS.toMillis(1), TimeUnit.MILLISECONDS)); + System.out.println( + String.format( + "All databases created after %s and that are ready:", createTime.toString())); + for (Backup backup : + instance + .listBackups(Options.filter( + String.format("create_time >= \"%s\" AND state:READY", createTime.toString()))) + .iterateAll()) { + System.out.println(backup); + } + + // List backups using pagination. + System.out.println("All backups, listed using pagination:"); + Page page = instance.listBackups(Options.pageSize(10)); + while (true) { + for (Backup backup : page.getValues()) { + System.out.println(backup); + } + if (!page.hasNextPage()) { + break; + } + page = page.getNextPage(); + } + } + // [END spanner_list_backups] + + // [START spanner_restore_backup] + static void restoreBackup( + DatabaseAdminClient dbAdminClient, + BackupId backupId, + DatabaseId sourceDatabaseId, + DatabaseId restoreToDatabase) { + Backup backup = dbAdminClient.newBackupBuilder(backupId).build(); + // Initiate the request which returns an OperationFuture. + System.out.println(String.format( + "Restoring backup [%s] to database [%s]...", + backup.getId().toString(), + restoreToDatabase.toString())); + try { + OperationFuture op = backup.restore(restoreToDatabase); + // Wait until the database has been restored. + Database db = op.get(); + // Refresh database metadata and get the restore info. + RestoreInfo restore = db.reload().getRestoreInfo(); + Timestamp versionTime = Timestamp.fromProto(restore + .getProto() + .getBackupInfo() + .getVersionTime()); + System.out.println( + "Restored database [" + + restore.getSourceDatabase().getName() + + "] from [" + + restore.getBackup().getName() + + "] with version time [" + versionTime + "]"); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.newSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + // [END spanner_restore_backup] + + // [START spanner_update_backup] + static void updateBackup(DatabaseAdminClient dbAdminClient, BackupId backupId) { + // Get current backup metadata. + Backup backup = dbAdminClient.newBackupBuilder(backupId).build().reload(); + // Add 30 days to the expire time. + // Expire time must be within 366 days of the create time of the backup. + Timestamp expireTime = + Timestamp.ofTimeMicroseconds( + TimeUnit.SECONDS.toMicros(backup.getExpireTime().getSeconds()) + + TimeUnit.NANOSECONDS.toMicros(backup.getExpireTime().getNanos()) + + TimeUnit.DAYS.toMicros(30L)); + // New Expire Time must be less than Max Expire Time + expireTime = expireTime.compareTo(backup.getMaxExpireTime()) + < 0 ? expireTime : backup.getMaxExpireTime(); + int timeDiff = expireTime.compareTo(backup.getExpireTime()); + Timestamp newExpireTime = (timeDiff < 0) ? expireTime : backup.getExpireTime(); + + System.out.println(String.format( + "Updating expire time of backup [%s] to %s...", + backupId.toString(), + LocalDateTime.ofEpochSecond( + expireTime.getSeconds(), + expireTime.getNanos(), + OffsetDateTime.now().getOffset()).toString())); + + // Update expire time. + backup = backup.toBuilder().setExpireTime(expireTime).build(); + backup.updateExpireTime(); + System.out.println("Updated backup [" + backupId + "]"); + } + // [END spanner_update_backup] + + // [START spanner_delete_backup] + static void deleteBackup(DatabaseAdminClient dbAdminClient, BackupId backupId) { + Backup backup = dbAdminClient.newBackupBuilder(backupId).build(); + // Delete the backup. + System.out.println("Deleting backup [" + backupId + "]..."); + backup.delete(); + // Verify that the backup is deleted. + if (backup.exists()) { + System.out.println("Delete backup [" + backupId + "] failed"); + throw new RuntimeException("Delete backup [" + backupId + "] failed"); + } else { + System.out.println("Deleted backup [" + backupId + "]"); + } + } + // [END spanner_delete_backup] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + InstanceAdminClient instanceAdminClient, + String command, + DatabaseId database, + BackupId backup) { + switch (command) { + case "createdatabase": + createDatabase(dbAdminClient, database); + break; + case "write": + writeExampleData(dbClient); + break; + case "delete": + deleteExampleData(dbClient); + break; + case "query": + query(dbClient); + break; + case "read": + read(dbClient); + break; + case "addmarketingbudget": + addMarketingBudget(dbAdminClient, database); + break; + case "update": + update(dbClient); + break; + case "writetransaction": + writeWithTransaction(dbClient); + break; + case "querymarketingbudget": + queryMarketingBudget(dbClient); + break; + case "addindex": + addIndex(dbAdminClient, database); + break; + case "readindex": + readUsingIndex(dbClient); + break; + case "queryindex": + queryUsingIndex(dbClient); + break; + case "addstoringindex": + addStoringIndex(dbAdminClient, database); + break; + case "readstoringindex": + readStoringIndex(dbClient); + break; + case "readonlytransaction": + readOnlyTransaction(dbClient); + break; + case "readstaledata": + readStaleData(dbClient); + break; + case "addcommittimestamp": + addCommitTimestamp(dbAdminClient, database); + break; + case "updatewithtimestamp": + updateWithTimestamp(dbClient); + break; + case "querywithtimestamp": + queryMarketingBudgetWithTimestamp(dbClient); + break; + case "createtablewithtimestamp": + createTableWithTimestamp(dbAdminClient, database); + break; + case "writewithtimestamp": + writeExampleDataWithTimestamp(dbClient); + break; + case "querysingerstable": + querySingersTable(dbClient); + break; + case "queryperformancestable": + queryPerformancesTable(dbClient); + break; + case "writestructdata": + writeStructExampleData(dbClient); + break; + case "querywithstruct": + queryWithStruct(dbClient); + break; + case "querywitharrayofstruct": + queryWithArrayOfStruct(dbClient); + break; + case "querystructfield": + queryStructField(dbClient); + break; + case "querynestedstructfield": + queryNestedStructField(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "updateusingdmlwithtimestamp": + updateUsingDmlWithTimestamp(dbClient); + break; + case "writeandreadusingdml": + writeAndReadUsingDml(dbClient); + break; + case "updateusingdmlwithstruct": + updateUsingDmlWithStruct(dbClient); + break; + case "writeusingdml": + writeUsingDml(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "writewithtransactionusingdml": + writeWithTransactionUsingDml(dbClient); + break; + case "updateusingpartitioneddml": + updateUsingPartitionedDml(dbClient); + break; + case "deleteusingpartitioneddml": + deleteUsingPartitionedDml(dbClient); + break; + case "updateusingbatchdml": + updateUsingBatchDml(dbClient); + break; + case "createtablewithdatatypes": + createTableWithDatatypes(dbAdminClient, database); + break; + case "writedatatypesdata": + writeDatatypesData(dbClient); + break; + case "querywitharray": + queryWithArray(dbClient); + break; + case "querywithbool": + queryWithBool(dbClient); + break; + case "querywithbytes": + queryWithBytes(dbClient); + break; + case "querywithdate": + queryWithDate(dbClient); + break; + case "querywithfloat": + queryWithFloat(dbClient); + break; + case "querywithint": + queryWithInt(dbClient); + break; + case "querywithstring": + queryWithString(dbClient); + break; + case "querywithtimestampparameter": + queryWithTimestampParameter(dbClient); + break; + case "querywithnumeric": + queryWithNumeric(dbClient); + break; + case "clientwithqueryoptions": + clientWithQueryOptions(database); + break; + case "querywithqueryoptions": + queryWithQueryOptions(dbClient); + break; + case "createbackup": + createBackup(dbAdminClient, database, backup, getVersionTime(dbClient)); + break; + case "cancelcreatebackup": + cancelCreateBackup( + dbAdminClient, + database, + BackupId.of(backup.getInstanceId(), backup.getBackup() + "_cancel")); + break; + case "listbackupoperations": + listBackupOperations(instanceAdminClient, database, backup); + break; + case "listdatabaseoperations": + listDatabaseOperations(instanceAdminClient, dbAdminClient, database.getInstanceId()); + break; + case "listbackups": + listBackups(instanceAdminClient, database, backup); + break; + case "restorebackup": + restoreBackup( + dbAdminClient, + backup, + database, + DatabaseId.of(database.getInstanceId(), createRestoredSampleDbId(database))); + break; + case "updatebackup": + updateBackup(dbAdminClient, backup); + break; + case "deletebackup": + deleteBackup(dbAdminClient, backup); + break; + default: + printUsageAndExit(); + } + } + + static Timestamp getVersionTime(DatabaseClient dbClient) { + // Generates a version time for the backup + Timestamp versionTime; + try (ResultSet resultSet = dbClient.singleUse() + .executeQuery(Statement.of("SELECT CURRENT_TIMESTAMP()"))) { + resultSet.next(); + versionTime = resultSet.getTimestamp(0); + } + return versionTime; + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" SpannerExample "); + System.err.println(""); + System.err.println("Examples:"); + System.err.println(" SpannerExample createdatabase my-instance example-db"); + System.err.println(" SpannerExample write my-instance example-db"); + System.err.println(" SpannerExample delete my-instance example-db"); + System.err.println(" SpannerExample query my-instance example-db"); + System.err.println(" SpannerExample read my-instance example-db"); + System.err.println(" SpannerExample addmarketingbudget my-instance example-db"); + System.err.println(" SpannerExample update my-instance example-db"); + System.err.println(" SpannerExample writetransaction my-instance example-db"); + System.err.println(" SpannerExample querymarketingbudget my-instance example-db"); + System.err.println(" SpannerExample addindex my-instance example-db"); + System.err.println(" SpannerExample readindex my-instance example-db"); + System.err.println(" SpannerExample queryindex my-instance example-db"); + System.err.println(" SpannerExample addstoringindex my-instance example-db"); + System.err.println(" SpannerExample readstoringindex my-instance example-db"); + System.err.println(" SpannerExample readonlytransaction my-instance example-db"); + System.err.println(" SpannerExample readstaledata my-instance example-db"); + System.err.println(" SpannerExample addcommittimestamp my-instance example-db"); + System.err.println(" SpannerExample updatewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample querywithtimestamp my-instance example-db"); + System.err.println(" SpannerExample createtablewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample writewithtimestamp my-instance example-db"); + System.err.println(" SpannerExample querysingerstable my-instance example-db"); + System.err.println(" SpannerExample queryperformancestable my-instance example-db"); + System.err.println(" SpannerExample writestructdata my-instance example-db"); + System.err.println(" SpannerExample querywithstruct my-instance example-db"); + System.err.println(" SpannerExample querywitharrayofstruct my-instance example-db"); + System.err.println(" SpannerExample querystructfield my-instance example-db"); + System.err.println(" SpannerExample querynestedstructfield my-instance example-db"); + System.err.println(" SpannerExample insertusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdml my-instance example-db"); + System.err.println(" SpannerExample deleteusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdmlwithtimestamp my-instance example-db"); + System.err.println(" SpannerExample writeandreadusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingdmlwithstruct my-instance example-db"); + System.err.println(" SpannerExample writeusingdml my-instance example-db"); + System.err.println(" SpannerExample querywithparameter my-instance example-db"); + System.err.println(" SpannerExample writewithtransactionusingdml my-instance example-db"); + System.err.println(" SpannerExample updateusingpartitioneddml my-instance example-db"); + System.err.println(" SpannerExample deleteusingpartitioneddml my-instance example-db"); + System.err.println(" SpannerExample updateusingbatchdml my-instance example-db"); + System.err.println(" SpannerExample createtablewithdatatypes my-instance example-db"); + System.err.println(" SpannerExample writedatatypesdata my-instance example-db"); + System.err.println(" SpannerExample querywitharray my-instance example-db"); + System.err.println(" SpannerExample querywithbool my-instance example-db"); + System.err.println(" SpannerExample querywithbytes my-instance example-db"); + System.err.println(" SpannerExample querywithdate my-instance example-db"); + System.err.println(" SpannerExample querywithfloat my-instance example-db"); + System.err.println(" SpannerExample querywithint my-instance example-db"); + System.err.println(" SpannerExample querywithstring my-instance example-db"); + System.err.println(" SpannerExample querywithtimestampparameter my-instance example-db"); + System.err.println(" SpannerExample clientwithqueryoptions my-instance example-db"); + System.err.println(" SpannerExample querywithqueryoptions my-instance example-db"); + System.err.println(" SpannerExample createbackup my-instance example-db"); + System.err.println(" SpannerExample listbackups my-instance example-db"); + System.err.println(" SpannerExample listbackupoperations my-instance example-db backup-id"); + System.err.println(" SpannerExample listdatabaseoperations my-instance example-db"); + System.err.println(" SpannerExample restorebackup my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) throws Exception { + if (args.length != 3 && args.length != 4) { + printUsageAndExit(); + } + // [START init_client] + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + try { + String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + // [END init_client] + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + // Generate a backup id for the sample database. + String backupName = + String.format( + "%s_%02d", + db.getDatabase(), LocalDate.now().get(ChronoField.ALIGNED_WEEK_OF_YEAR)); + BackupId backup = BackupId.of(db.getInstanceId(), backupName); + if (args.length == 4) { + backupName = args[3]; + } + + // [START init_client] + DatabaseClient dbClient = spanner.getDatabaseClient(db); + DatabaseAdminClient dbAdminClient = spanner.getDatabaseAdminClient(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + // Use client here... + // [END init_client] + + run(dbClient, dbAdminClient, instanceAdminClient, command, db, backup); + // [START init_client] + } finally { + spanner.close(); + } + // [END init_client] + System.out.println("Closed client"); + } +} diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseSample.java new file mode 100644 index 000000000000..5fcd9ab5edbe --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseSample.java @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_update_database] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class UpdateDatabaseSample { + + static void updateDatabase() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + updateDatabase(projectId, instanceId, databaseId); + } + + static void updateDatabase(String projectId, String instanceId, String databaseId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + Database databaseToUpdate = + databaseAdminClient.newDatabaseBuilder(dbId).enableDropProtection().build(); + OperationFuture operation = + databaseAdminClient.updateDatabase(databaseToUpdate, DatabaseField.DROP_PROTECTION); + System.out.printf("Waiting for update operation for %s to complete...\n", dbId); + Database updatedDb = operation.get(5, TimeUnit.MINUTES); + System.out.printf("Updated database %s.\n", updatedDb.getId().getName()); + } catch (ExecutionException | TimeoutException e) { + // If the operation failed during execution, expose the cause. + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +// [END spanner_update_database] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSample.java new file mode 100644 index 000000000000..897139909338 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSample.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +//[START spanner_update_database_with_default_leader] + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +public class UpdateDatabaseWithDefaultLeaderSample { + + static void updateDatabaseWithDefaultLeader() { + // TODO(developer): Replace these variables before running the sample. + final String projectId = "my-project"; + final String instanceId = "my-instance"; + final String databaseId = "my-database"; + final String defaultLeader = "my-default-leader"; + updateDatabaseWithDefaultLeader(projectId, instanceId, databaseId, defaultLeader); + } + + static void updateDatabaseWithDefaultLeader( + String projectId, String instanceId, String databaseId, String defaultLeader) { + try (Spanner spanner = SpannerOptions + .newBuilder() + .setProjectId(projectId) + .build() + .getService()) { + final DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final OperationFuture operation = databaseAdminClient + .updateDatabaseDdl( + instanceId, + databaseId, + Collections.singletonList( + String.format( + "ALTER DATABASE `%s` SET OPTIONS (default_leader = '%s')", + databaseId, + defaultLeader + ) + ), + null + ); + operation.get(); + System.out.println("Updated default leader to " + defaultLeader); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } +} +//[END spanner_update_database_with_default_leader] diff --git a/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateInstanceConfigSample.java b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateInstanceConfigSample.java new file mode 100644 index 000000000000..8110e3782c71 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/java/com/example/spanner/admin/archived/UpdateInstanceConfigSample.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +// [START spanner_update_instance_config] +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceConfigInfo; +import com.google.cloud.spanner.InstanceConfigInfo.InstanceConfigField; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +class UpdateInstanceConfigSample { + static void updateInstanceConfig() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceConfigId = "custom-instance-config"; + updateInstanceConfig(projectId, instanceConfigId); + } + + static void updateInstanceConfig(String projectId, String instanceConfigId) { + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + final InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + InstanceConfigInfo instanceConfigInfo = + InstanceConfig.newBuilder(InstanceConfigId.of(projectId, instanceConfigId)) + .setDisplayName("updated custom instance config") + .addLabel("updated", "true") + .build(); + final OperationFuture operation = + instanceAdminClient.updateInstanceConfig( + instanceConfigInfo, + ImmutableList.of(InstanceConfigField.DISPLAY_NAME, InstanceConfigField.LABELS)); + try { + System.out.printf("Waiting for update operation on %s to complete...\n", instanceConfigId); + InstanceConfig instanceConfig = operation.get(5, TimeUnit.MINUTES); + System.out.printf( + "Updated instance configuration %s with new display name %s\n", + instanceConfig.getId(), instanceConfig.getDisplayName()); + } catch (ExecutionException | TimeoutException e) { + System.out.printf( + "Error: Updating instance config %s failed with error message %s\n", + instanceConfigInfo.getId(), e.getMessage()); + e.printStackTrace(); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for updateInstanceConfig operation to finish was interrupted"); + } + } + } +} +// [END spanner_update_instance_config] diff --git a/java-spanner/samples/snippets/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider b/java-spanner/samples/snippets/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider new file mode 100644 index 000000000000..bbc367f8fc5e --- /dev/null +++ b/java-spanner/samples/snippets/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider @@ -0,0 +1 @@ +io.grpc.internal.PickFirstLoadBalancerProvider diff --git a/java-spanner/samples/snippets/src/main/resources/com/example/spanner/README.md b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/README.md new file mode 100644 index 000000000000..6dc4f7aa59f4 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/README.md @@ -0,0 +1,6 @@ +#### To generate SingerProto.java and descriptors.pb file from singer.proto using `protoc` +```shell +cd samples/snippets/src/main/resources/ +protoc --proto_path=com/example/spanner/ --include_imports --descriptor_set_out=com/example/spanner/descriptors.pb + --java_out=. com/example/spanner/singer.proto +``` diff --git a/java-spanner/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb new file mode 100644 index 000000000000..dd9cf8d43440 Binary files /dev/null and b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb differ diff --git a/java-spanner/samples/snippets/src/main/resources/com/example/spanner/singer.proto b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/singer.proto new file mode 100644 index 000000000000..12b213f3fae2 --- /dev/null +++ b/java-spanner/samples/snippets/src/main/resources/com/example/spanner/singer.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package examples.spanner.music; + +option java_package = "com.example.spanner"; +option java_outer_classname = "SingerProto"; +option java_multiple_files = false; + +message SingerInfo { + optional int64 singer_id = 1; + optional string birth_date = 2; + optional string nationality = 3; + optional Genre genre = 4; +} + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..a22b5ab37509 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSampleIT.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class AlterTableWithForeignKeyDeleteCascadeSampleIT extends SampleTestBaseV2 { + + @Test + public void testAlterTableWithForeignKeyDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(InstanceName.of(projectId, instanceId).toString()) + .addAllExtraStatements(Arrays.asList( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerId" + + " FOREIGN KEY (CustomerId)\n" + + " REFERENCES Customers (CustomerId)\n" + + " ) PRIMARY KEY (CartId)\n")).build(); + databaseAdminClient.createDatabaseAsync(request).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + AlterTableWithForeignKeyDeleteCascadeSample.alterForeignKeyDeleteCascadeConstraint( + projectId, instanceId, databaseId)); + + assertTrue( + "Expected to have created database " + + databaseId + + " with tables containing " + + "foreign key constraints.", + out.contains("Altered ShoppingCarts table " + "with FKShoppingCartsCustomerName")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/AsyncExamplesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/AsyncExamplesIT.java new file mode 100644 index 000000000000..9277d7d0d9cd --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/AsyncExamplesIT.java @@ -0,0 +1,241 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.List; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for Cloud Spanner Async API examples. + */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class AsyncExamplesIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseId = DatabaseId.of(projectId, instanceId, database); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE", + "CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)")) + .get(); + } + + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + final Long marketingBudget; + + Album(long singerId, long albumId, String albumTitle, Long marketingBudget) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + this.marketingBudget = marketingBudget; + } + } + + private static final List TEST_SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + private static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk", 300_000L), + new Album(1, 2, "Go, Go, Go", 400_000L), + new Album(2, 1, "Green", 150_000L), + new Album(2, 2, "Forever Hold Your Peace", 350_000L), + new Album(2, 3, "Terrified", null)); + + @Before + public void insertTestData() { + DatabaseClient client = spanner.getDatabaseClient(databaseId); + ImmutableList.Builder mutations = + ImmutableList.builderWithExpectedSize(TEST_SINGERS.size()); + for (Singer singer : TEST_SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .set("MarketingBudget") + .to(album.marketingBudget) + .build()); + } + client.write(mutations.build()); + } + + private void assertSingersOutput(String out) { + assertThat(out).contains("1 Marc Richard"); + assertThat(out).contains("2 Catalina Smith"); + assertThat(out).contains("3 Alice Trentor"); + assertThat(out).contains("4 Lea Martin"); + assertThat(out).contains("5 David Lomond"); + } + + private void assertAlbumsOutput(String out) { + assertThat(out).contains("1 1 Total Junk"); + assertThat(out).contains("1 2 Go, Go, Go"); + assertThat(out).contains("2 1 Green"); + assertThat(out).contains("2 2 Forever Hold Your Peace"); + assertThat(out).contains("2 3 Terrified"); + } + + @After + public void removeTestData() { + DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Arrays.asList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void asyncQuery_shouldReturnData() throws Exception { + String out = runSample( + () -> AsyncQueryExample.asyncQuery(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncQueryToListAsync_shouldReturnData() + throws Exception { + String out = runSample( + () -> AsyncQueryToListAsyncExample.asyncQueryToList(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncRead_shouldReturnData() + throws Exception { + String out = runSample(() -> AsyncReadExample.asyncRead(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncReadUsingIndex_shouldReturnDataInCorrectOrder() throws Exception { + String out = runSample(() -> AsyncReadUsingIndexExample + .asyncReadUsingIndex(spanner.getDatabaseClient(databaseId))); + assertThat(out) + .contains( + "2 Forever Hold Your Peace\n" + + "2 Go, Go, Go\n" + + "1 Green\n" + + "3 Terrified\n" + + "1 Total Junk"); + } + + @Test + public void asyncReadOnlyTransaction_shouldReturnData() throws Exception { + String out = runSample(() -> AsyncReadOnlyTransactionExample + .asyncReadOnlyTransaction(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + assertSingersOutput(out); + } + + @Test + public void asyncDml_shouldInsertRows() throws Exception { + String out = runSample(() -> AsyncDmlExample.asyncDml(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("4 records inserted."); + } + + @Test + public void asyncRunner_shouldUpdateRows() throws Exception { + String out = runSample( + () -> AsyncRunnerExample.asyncRunner(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("2 records updated."); + } + + @Test + public void asyncTransactionManager_shouldUpdateRows() throws Exception { + String out = runSample(() -> AsyncTransactionManagerExample + .asyncTransactionManager(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("2 records updated."); + } + + @Test + public void asyncReadRow_shouldPrintRow() throws Exception { + String out = runSample( + () -> AsyncReadRowExample.asyncReadRow(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("1 1 Total Junk"); + assertThat(out).doesNotContain("1 2 Go, Go, Go"); + assertThat(out).doesNotContain("2 1 Green"); + assertThat(out).doesNotContain("2 2 Forever Hold Your Peace"); + assertThat(out).doesNotContain("2 3 Terrified"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/BatchWriteAtLeastOnceSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/BatchWriteAtLeastOnceSampleIT.java new file mode 100644 index 000000000000..55f28cda41ab --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/BatchWriteAtLeastOnceSampleIT.java @@ -0,0 +1,60 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseId; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import org.junit.Before; +import org.junit.Test; + +public class BatchWriteAtLeastOnceSampleIT extends SampleTestBase { + private static String databaseId; + + @Before + public void setup() throws ExecutionException, InterruptedException { + databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .build(), + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(1024)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .get(); + } + + @Test + public void testBatchWriteAtLeastOnce() throws Exception { + final String out = + SampleRunner.runSample(() -> BatchWriteAtLeastOnceSample.batchWriteAtLeastOnce( + projectId, instanceId, databaseId)); + assertTrue(out.contains("have been applied with commit timestamp")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/ChangeStreamsTxnExclusionSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ChangeStreamsTxnExclusionSampleIT.java new file mode 100644 index 000000000000..fecf8189f462 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ChangeStreamsTxnExclusionSampleIT.java @@ -0,0 +1,98 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link ChangeStreamsTxnExclusionSample} */ +@RunWith(JUnit4.class) +public class ChangeStreamsTxnExclusionSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)")) + .get(); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("first name 1") + .set("LastName") + .to("last name 1") + .build(), + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(2L) + .set("FirstName") + .to("first name 2") + .set("LastName") + .to("last name 2") + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testSetExcludeTxnFromChangeStreamsSampleSample() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + String out = + runSample( + () -> ChangeStreamsTxnExclusionSample.readWriteTxnExcludedFromChangeStreams(client)); + assertThat(out).contains("New singer inserted."); + assertThat(out).contains("Singer first name updated."); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CopyBackupIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CopyBackupIT.java new file mode 100644 index 000000000000..29854011e1fe --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CopyBackupIT.java @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Ignore +public class CopyBackupIT extends SampleTestBaseV2 { + + private static String key; + + @BeforeClass + public static void setUp() { + String keyLocation = Preconditions + .checkNotNull(System.getProperty("spanner.test.key.location")); + String keyRing = Preconditions.checkNotNull(System.getProperty("spanner.test.key.ring")); + String keyName = Preconditions.checkNotNull(System.getProperty("spanner.test.key.name")); + key = "projects/" + projectId + "/locations/" + keyLocation + "/keyRings/" + keyRing + + "/cryptoKeys/" + keyName; + } + + @Test + public void testEncryptedDatabaseAndBackupAndRestore() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final String sourceBackupId = idGenerator.generateBackupId(); + final String destinationBackupId = idGenerator.generateBackupId(); + + String out = SampleRunner.runSample(() -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out).contains(String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + out = SampleRunner.runSampleWithRetry(() -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, projectId, instanceId, databaseId, sourceBackupId, key + ), new ShouldRetryBackupOperation()); + assertThat(out).containsMatch( + "Backup projects/" + projectId + "/instances/" + instanceId + "/backups/" + + sourceBackupId + " of size \\d+ bytes was created at (.*) using encryption key " + + key); + + out = SampleRunner.runSampleWithRetry(() -> + CopyBackupSample.copyBackup( + databaseAdminClient, projectId, instanceId, sourceBackupId, destinationBackupId + ), new ShouldRetryBackupOperation()); + + assertThat(out).contains("Copied backup [" + BackupName.of( + projectId, instanceId, destinationBackupId).toString() + "]"); + assertThat(out).containsMatch(String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes was copied at (.*)", + projectId, instanceId, destinationBackupId, key)); + } + + static class ShouldRetryBackupOperation implements Predicate { + + private static final int MAX_ATTEMPTS = 20; + private int attempts = 0; + + @Override + public boolean test(SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage().contains("Please retry the operation once the pending")) { + attempts++; + if (attempts == MAX_ATTEMPTS) { + // Throw custom exception so it is easier to locate in the log why it went wrong. + throw SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, + String.format("Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", attempts), + e); + } + // Wait one minute before retrying. + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + return true; + } + return false; + } + } +} + diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSampleIT.java new file mode 100644 index 000000000000..fff67b38b8a7 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSampleIT.java @@ -0,0 +1,60 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceName; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CreateDatabaseWithDefaultLeaderSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateDatabaseWithDefaultLeader() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + + // Finds possible default leader + + final String instanceConfigId = instanceAdminClient.getInstance( + InstanceName.of(projectId, multiRegionalInstanceId)).getConfig(); + final InstanceConfig config = instanceAdminClient.getInstanceConfig(instanceConfigId); + assertTrue( + "Expected instance config " + instanceConfigId + " to have at least one leader option", + config.getLeaderOptionsCount() > 0 + ); + final String defaultLeader = config.getLeaderOptions(0); + + // Runs sample + final String out = SampleRunner.runSample(() -> + CreateDatabaseWithDefaultLeaderSample.createDatabaseWithDefaultLeader( + getInstanceName(projectId, multiRegionalInstanceId), + databaseId, + defaultLeader + ) + ); + + assertTrue( + "Expected created database to have default leader " + defaultLeader + "." + + " Output received was " + out, + out.contains("Default leader: " + defaultLeader) + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSampleIT.java new file mode 100644 index 000000000000..98832f1f5122 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSampleIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for + * {@link com.example.spanner.admin.archived.CreateDatabaseWithVersionRetentionPeriodSample} + */ +@RunWith(JUnit4.class) +public class CreateDatabaseWithVersionRetentionPeriodSampleIT extends SampleTestBaseV2 { + + @Test + public void createsDatabaseWithVersionRetentionPeriod() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final String versionRetentionPeriod = "7d"; + + final String out = SampleRunner.runSample(() -> CreateDatabaseWithVersionRetentionPeriodSample + .createDatabaseWithVersionRetentionPeriod( + projectId, instanceId, databaseId, versionRetentionPeriod + )); + + assertThat(out).contains( + "Created database [projects/" + projectId + "/instances/" + instanceId + "/databases/" + + databaseId + "]"); + assertThat(out).contains("Version retention period: " + versionRetentionPeriod); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java new file mode 100644 index 000000000000..15ec04fe306e --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CreateFullBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testCreateFullBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Created backup schedule: %s", backupScheduleName)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java new file mode 100644 index 000000000000..5d590a5b3825 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CreateIncrementalBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testCreateIncrementalBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, multiRegionalInstanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateIncrementalBackupScheduleSample.createIncrementalBackupSchedule( + projectId, multiRegionalInstanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, multiRegionalInstanceId, databaseId, backupScheduleId); + } + }); + assertThat(out) + .contains(String.format("Created incremental backup schedule: %s", backupScheduleName)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java new file mode 100644 index 000000000000..b243a9229d2d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.spanner.admin.instance.v1.Instance.Edition; +import com.google.spanner.admin.instance.v1.InstancePartitionName; +import org.junit.Test; + +public class CreateInstancePartitionSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateInstancePartition() throws Exception { + String instanceId = idGenerator.generateInstanceId(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + instanceAdminClient + .createInstance( + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setEdition(Edition.ENTERPRISE_PLUS) + .setDisplayName("Geo-partitioning test instance") + .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-central1")) + .setNodeCount(1) + .build()) + .get(); + + String instancePartitionId = "my-instance-partition"; + String out = + SampleRunner.runSample( + () -> + CreateInstancePartitionSample.createInstancePartition( + projectId, instanceId, instancePartitionId)); + assertThat(out) + .contains( + String.format( + "Instance partition %s", + InstancePartitionName.of(projectId, instanceId, instancePartitionId).toString())); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigSampleIT.java new file mode 100644 index 000000000000..b29115ddd0f5 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAsymmetricAutoscalingConfigSampleIT.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.InstanceName; +import org.junit.Test; + +public class CreateInstanceWithAsymmetricAutoscalingConfigSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateInstanceWithAsymmetricAutoscalingConfig() throws Exception { + String instanceId = idGenerator.generateInstanceId(); + String out = + SampleRunner.runSample( + () -> CreateInstanceWithAsymmetricAutoscalingConfigExample + .createInstance(projectId, instanceId)); + assertThat(out) + .contains(String.format("Asymmetric Autoscaling instance %s", + InstanceName.of(projectId, instanceId).toString())); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAutoscalingConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAutoscalingConfigSampleIT.java new file mode 100644 index 000000000000..041d6b97ac02 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithAutoscalingConfigSampleIT.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.InstanceName; +import org.junit.Test; + +public class CreateInstanceWithAutoscalingConfigSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateInstanceWithAutoscalingConfig() throws Exception { + String instanceId = idGenerator.generateInstanceId(); + String out = + SampleRunner.runSample( + () -> CreateInstanceWithAutoscalingConfigExample.createInstance(projectId, instanceId)); + assertThat(out) + .contains(String.format("Autoscaler instance %s", + InstanceName.of(projectId, instanceId).toString())); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithProcessingUnitsSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithProcessingUnitsSampleIT.java new file mode 100644 index 000000000000..dcddea4929b1 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateInstanceWithProcessingUnitsSampleIT.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.InstanceName; +import org.junit.Test; + +public class CreateInstanceWithProcessingUnitsSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateInstanceWithProcessingUnits() throws Exception { + String instanceId = idGenerator.generateInstanceId(); + String out = + SampleRunner.runSample( + () -> CreateInstanceWithProcessingUnitsExample.createInstance(projectId, instanceId)); + assertThat(out) + .contains(String.format("Instance %s has %d processing units", + InstanceName.of(projectId, instanceId).toString(), 500)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..481bdc16bdad --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSampleIT.java @@ -0,0 +1,54 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class CreateTableWithForeignKeyDeleteCascadeSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateTableWithForeignKeyDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(InstanceName.of(projectId, instanceId).toString()).build(); + databaseAdminClient.createDatabaseAsync(request).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + CreateTableWithForeignKeyDeleteCascadeSample + .createForeignKeyDeleteCascadeConstraint(projectId, instanceId, databaseId)); + + assertTrue( + "Expected to have created database " + + databaseId + + " with tables containing " + + "foreign key constraints.", + out.contains( + "Created Customers and ShoppingCarts table " + "with FKShoppingCartsCustomerId")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/CustomInstanceConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CustomInstanceConfigSampleIT.java new file mode 100644 index 000000000000..354e11a5045f --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/CustomInstanceConfigSampleIT.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CustomInstanceConfigSampleIT extends SampleTestBaseV2 { + + @Test + public void testCustomInstanceConfigOperations() throws Exception { + String customInstanceConfigId = idGenerator.generateInstanceConfigId(); + + // Create a random instance config. Display name is set to the instance config id in sample. + final String out1 = + SampleRunner.runSample( + () -> + CreateInstanceConfigSample.createInstanceConfig( + projectId, instanceConfigName, customInstanceConfigId)); + assertTrue(out1.contains("Created instance configuration")); + + // Fetch the instance config that was created above. + final String out2 = + SampleRunner.runSample( + () -> GetInstanceConfigSample.getInstanceConfig(projectId, instanceConfigName)); + assertTrue(out2.contains("Available leader options for instance config")); + + // Fetch the instance config that was created above. + final String out3 = + SampleRunner.runSample( + () -> ListInstanceConfigsSample.listInstanceConfigs(projectId)); + assertTrue(out3.contains("Available leader options for instance config")); + + // List the instance config operations. + final String out4 = + SampleRunner.runSample( + () -> + ListInstanceConfigOperationsSample.listInstanceConfigOperations(projectId)); + assertTrue(out4.contains("Obtained list of instance config operations")); + + // Update display name to a randomly generated instance config id. + final String out5 = + SampleRunner.runSample( + () -> + UpdateInstanceConfigSample.updateInstanceConfig(projectId, customInstanceConfigId)); + assertTrue(out5.contains("Updated instance configuration")); + + // Delete the created instance config. + final String out6 = + SampleRunner.runSample( + () -> + DeleteInstanceConfigSample.deleteInstanceConfig(projectId, customInstanceConfigId)); + assertTrue(out6.contains("Deleted instance configuration")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseAddSplitPointsIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseAddSplitPointsIT.java new file mode 100644 index 000000000000..c9215b78cdc4 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseAddSplitPointsIT.java @@ -0,0 +1,57 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseId; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import org.junit.Before; +import org.junit.Test; + +public class DatabaseAddSplitPointsIT extends SampleTestBase { + private static String databaseId; + + @Before + public void setup() throws ExecutionException, InterruptedException { + databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .build(), + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)" + + ") PRIMARY KEY (SingerId)", + " CREATE INDEX IF NOT EXISTS SingersByFirstLastName ON Singers(FirstName," + + " LastName)")) + .get(); + } + + // TODO: Enable the test once the issue with split points is resolved + // @Test + public void testAddSplits() throws Exception { + final String out = + SampleRunner.runSample( + () -> DatabaseAddSplitPointsSample.addSplitPoints(projectId, instanceId, databaseId)); + assertTrue(out.contains("")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseRolesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseRolesIT.java new file mode 100644 index 000000000000..4b9013e62f48 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DatabaseRolesIT.java @@ -0,0 +1,138 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.Lists; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for FGAC samples for GoogleStandardSql dialect. + */ +@RunWith(JUnit4.class) +public class DatabaseRolesIT extends SampleTestBaseV2 { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent( + com.google.spanner.admin.database.v1.InstanceName.of(projectId, instanceId) + .toString()) + .setCreateStatement("CREATE DATABASE `" + database + "`") + .addAllExtraStatements(Lists.newArrayList( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")).build(); + databaseAdminClient.createDatabaseAsync(request).get(5, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Melissa") + .set("LastName") + .to("Garcia") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .set("MarketingBudget") + .to(20000L) + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testAddAndDropDatabaseRole() throws Exception { + final String out = + SampleRunner.runSample( + () -> + AddAndDropDatabaseRole.addAndDropDatabaseRole( + projectId, instanceId, databaseId.getDatabase(), "new_parent", "new_child", + "Singers", "Albums")); + assertTrue(out.contains("Created roles new_parent and new_child and granted privileges")); + assertTrue(out.contains("Revoked privileges and dropped role new_child")); + } + + @Test + public void testListDatabaseRoles() throws Exception { + final String out = + SampleRunner.runSample( + () -> + ListDatabaseRoles.listDatabaseRoles( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Obtained role ")); + } + + @Test + public void testReadDataWithDatabaseRole() throws Exception { + final String out = + SampleRunner.runSample( + () -> + ReadDataWithDatabaseRole.readDataWithDatabaseRole( + projectId, instanceId, databaseId.getDatabase(), "new_parent")); + assertTrue(out.contains("SingerId: 1")); + assertTrue(out.contains("FirstName: Melissa")); + assertTrue(out.contains("LastName: Garcia")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java new file mode 100644 index 000000000000..3d11bd8dce1d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DeleteBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testDeleteBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Deleted backup schedule: %s", backupScheduleName)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DirectedReadSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DirectedReadSampleIT.java new file mode 100644 index 000000000000..771e157a3df5 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DirectedReadSampleIT.java @@ -0,0 +1,105 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link DirectedReadSample} */ +@RunWith(JUnit4.class) +public class DirectedReadSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + private static Spanner spanner; + + @BeforeClass + public static void createTestDatabase() throws Exception { + spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64," + + " AlbumTitle STRING(1024)" + + ") PRIMARY KEY (SingerId, AlbumId)")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(2L) + .set("AlbumId") + .to(2L) + .set("AlbumTitle") + .to("title 2") + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Albums", KeySet.all()))); + } + + @Test + public void testDirectedRead() throws Exception { + final String out = + runSample( + () -> DirectedReadSample.directedRead(projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("SingerId: 1, AlbumId: 1, AlbumTitle: title 1")); + assertTrue(out.contains("SingerId: 2, AlbumId: 2, AlbumTitle: title 2")); + assertTrue( + out.contains("Successfully executed read-only transaction with directedReadOptions")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DmlReturningSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DmlReturningSampleIT.java new file mode 100644 index 000000000000..bb40c19aa153 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DmlReturningSampleIT.java @@ -0,0 +1,136 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for DML Returning samples for GoogleStandardSql dialect. */ +@RunWith(JUnit4.class) +public class DmlReturningSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Alice") + .set("LastName") + .to("Trentor") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .set("MarketingBudget") + .to(20000L) + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testInsertUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + InsertUsingDmlReturningSample.insertUsingDmlReturning( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Inserted row(s) count: 4")); + assertTrue(out.contains("Melissa Garcia")); + assertTrue(out.contains("Russell Morales")); + assertTrue(out.contains("Jacqueline Long")); + assertTrue(out.contains("Dylan Shaw")); + } + + @Test + public void testUpdateUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + UpdateUsingDmlReturningSample.updateUsingDmlReturning( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Updated row(s) count: 1")); + assertTrue(out.contains("40000")); + } + + @Test + public void testDeleteUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + DeleteUsingDmlReturningSample.deleteUsingDmlReturningSample( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Deleted row(s) count: 1")); + assertTrue(out.contains("Alice Trentor")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..2e763949f48f --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSampleIT.java @@ -0,0 +1,65 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.Lists; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class DropForeignKeyConstraintDeleteCascadeSampleIT extends SampleTestBaseV2 { + + @Test + public void testDropForeignKeyConstraintDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(InstanceName.of(projectId, instanceId).toString()) + .addAllExtraStatements(Lists.newArrayList( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerName" + + " FOREIGN KEY (CustomerName)\n" + + " REFERENCES Customers (CustomerName) ON DELETE CASCADE\n" + + " ) PRIMARY KEY (CartId)\n")).build(); + databaseAdminClient.createDatabaseAsync(request).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + DropForeignKeyConstraintDeleteCascadeSample.deleteForeignKeyDeleteCascadeConstraint( + projectId, instanceId, databaseId)); + + assertTrue( + "Expected to have dropped foreign-key constraints from tables in created database " + + databaseId, + out.contains("Altered ShoppingCarts table to drop FKShoppingCartsCustomerName")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/EncryptionKeyIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/EncryptionKeyIT.java new file mode 100644 index 000000000000..247054091436 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/EncryptionKeyIT.java @@ -0,0 +1,123 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.spanner.admin.archived.CreateDatabaseWithEncryptionKey; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for: {@link CreateDatabaseWithEncryptionKey}, + * {@link com.example.spanner.admin.archived.CreateBackupWithEncryptionKey} and + * {@link com.example.spanner.admin.archived.RestoreBackupWithEncryptionKey} + */ +@RunWith(JUnit4.class) +@Ignore +public class EncryptionKeyIT extends SampleTestBaseV2 { + + private static String key; + + @BeforeClass + public static void setUp() { + String keyLocation = Preconditions + .checkNotNull(System.getProperty("spanner.test.key.location")); + String keyRing = Preconditions.checkNotNull(System.getProperty("spanner.test.key.ring")); + String keyName = Preconditions.checkNotNull(System.getProperty("spanner.test.key.name")); + key = "projects/" + projectId + "/locations/" + keyLocation + "/keyRings/" + keyRing + + "/cryptoKeys/" + keyName; + } + + @Test + public void testEncryptedDatabaseAndBackupAndRestore() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final String backupId = idGenerator.generateBackupId(); + final String restoreId = idGenerator.generateDatabaseId(); + + String out = SampleRunner.runSample(() -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out).contains(String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + out = SampleRunner.runSampleWithRetry(() -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + databaseId, + backupId, + key + ), new ShouldRetryBackupOperation()); + assertThat(out).containsMatch( + "Backup projects/" + projectId + "/instances/" + instanceId + "/backups/" + backupId + + " of size \\d+ bytes was created at (.*) using encryption key " + key); + + out = SampleRunner.runSampleWithRetry(() -> + RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + backupId, + restoreId, + key + ), new ShouldRetryBackupOperation()); + assertThat(out).contains( + "Database projects/" + projectId + "/instances/" + instanceId + "/databases/" + databaseId + + " restored to projects/" + projectId + "/instances/" + instanceId + "/databases/" + + restoreId + " from backup projects/" + projectId + "/instances/" + instanceId + + "/backups/" + backupId + " using encryption key " + key); + } + + static class ShouldRetryBackupOperation implements Predicate { + + private static final int MAX_ATTEMPTS = 20; + private int attempts = 0; + + @Override + public boolean test(SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage().contains("Please retry the operation once the pending")) { + attempts++; + if (attempts == MAX_ATTEMPTS) { + // Throw custom exception so it is easier to locate in the log why it went wrong. + throw SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, + String.format("Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", attempts), + e); + } + // Wait one minute before retrying. + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + return true; + } + return false; + } + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java new file mode 100644 index 000000000000..fa006355a237 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GetBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testGetBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + GetBackupScheduleSample.getBackupSchedule( + projectId, instanceId, + databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetCommitStatsSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetCommitStatsSampleIT.java new file mode 100644 index 000000000000..6bcf307c32de --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetCommitStatsSampleIT.java @@ -0,0 +1,105 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for {@link GetCommitStatsSample} + */ +@RunWith(JUnit4.class) +public class GetCommitStatsSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .get(); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Arrays.asList( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("first name 1") + .set("LastName") + .to("last name 1") + .build(), + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(2L) + .set("FirstName") + .to("first name 2") + .set("LastName") + .to("last name 2") + .build() + )); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testGetCommitStatsSample() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + final String out = runSample(() -> GetCommitStatsSample.getCommitStats(client)); + + assertThat(out).contains("Updated data with 6 mutations."); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetDatabaseDdlSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetDatabaseDdlSampleIT.java new file mode 100644 index 000000000000..a50f55e35578 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/GetDatabaseDdlSampleIT.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.Lists; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceName; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class GetDatabaseDdlSampleIT extends SampleTestBaseV2 { + + @Test + public void testGetDatabaseDdl() throws Exception { + // Finds a possible new leader option + final String instanceConfigId = instanceAdminClient.getInstance( + InstanceName.of(projectId, multiRegionalInstanceId)).getConfig(); + final InstanceConfig config = instanceAdminClient.getInstanceConfig(instanceConfigId); + assertTrue( + "Expected instance config " + instanceConfigId + " to have at least one leader option", + config.getLeaderOptionsList().size() > 0 + ); + final String defaultLeader = config.getLeaderOptions(0); + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setParent( + com.google.spanner.admin.database.v1.InstanceName.of(projectId, + multiRegionalInstanceId).toString()) + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .addAllExtraStatements(Lists.newArrayList( + "CREATE TABLE Singers (Id INT64 NOT NULL) PRIMARY KEY (Id)", + "ALTER DATABASE `" + + databaseId + + "` SET OPTIONS ( default_leader = '" + + defaultLeader + + "')" + )).build(); + databaseAdminClient.createDatabaseAsync(request).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = SampleRunner.runSample(() -> GetDatabaseDdlSample + .getDatabaseDdl(projectId, multiRegionalInstanceId, databaseId) + ); + + assertTrue( + "Expected to have retrieved database DDL for " + databaseId + "." + + " Output received was " + out, + out.contains("Retrieved database DDL for " + databaseId) + ); + assertTrue( + "Expected leader to be set to " + defaultLeader + "." + + " Output received was " + out, + out.contains("default_leader = '" + defaultLeader + "'") + ); + assertTrue( + "Expected table to have been created in " + databaseId + "." + + " Output received was " + out, + out.contains("CREATE TABLE Singers") + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/LastStatementSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/LastStatementSampleIT.java new file mode 100644 index 000000000000..89026b5f92b2 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/LastStatementSampleIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.common.collect.ImmutableList; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link LastStatementSample} */ +@RunWith(JUnit4.class) +public class LastStatementSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)")) + .get(); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Test + public void testSetLastStatementOptionSample() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + String out = runSample(() -> LastStatementSample.insertAndUpdateUsingLastStatement(client)); + assertThat(out).contains("New singer inserted."); + assertThat(out).contains("Singer last name updated."); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java new file mode 100644 index 000000000000..386b9442c014 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ListBackupSchedulesSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testListBackupSchedulesSample() throws Exception { + String backupScheduleId1 = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName1 = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId1); + + String backupScheduleId2 = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName2 = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId2); + + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId1); + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId2); + ListBackupSchedulesSample.listBackupSchedules(projectId, instanceId, databaseId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId1); + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId2); + } + }); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName1)); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName2)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListDatabasesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListDatabasesIT.java new file mode 100644 index 000000000000..ce3e56bc64b9 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ListDatabasesIT.java @@ -0,0 +1,36 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ListDatabasesIT extends SampleTestBaseV2 { + + @Test + public void testListDatabaseRoles() throws Exception { + final String out = + SampleRunner.runSample( + () -> + ListDatabasesSample.listDatabases(projectId, instanceId)); + assertTrue(out.contains("Databases for projects")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgAsyncExamplesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgAsyncExamplesIT.java new file mode 100644 index 000000000000..db5c7362cf81 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgAsyncExamplesIT.java @@ -0,0 +1,251 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for Cloud Spanner Async API examples for Postgresql. + */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class PgAsyncExamplesIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseId = DatabaseId.of(projectId, instanceId, database); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(databaseId) + .setDialect(Dialect.POSTGRESQL).build(), + Collections.emptyList()) + .get(); + databaseAdminClient.updateDatabaseDdl( + instanceId, + database, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " SingerInfo bytea," + + " PRIMARY KEY (SingerId)" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " AlbumTitle character varying(1024)," + + " MarketingBudget bigint," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE", + "CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)"), + null).get(); + } + + static class Singer { + + final long singerId; + final String firstName; + final String lastName; + + Singer(long singerId, String firstName, String lastName) { + this.singerId = singerId; + this.firstName = firstName; + this.lastName = lastName; + } + } + + static class Album { + + final long singerId; + final long albumId; + final String albumTitle; + final Long marketingBudget; + + Album(long singerId, long albumId, String albumTitle, Long marketingBudget) { + this.singerId = singerId; + this.albumId = albumId; + this.albumTitle = albumTitle; + this.marketingBudget = marketingBudget; + } + } + + private static final List TEST_SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + private static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk", 300_000L), + new Album(1, 2, "Go, Go, Go", 400_000L), + new Album(2, 1, "Green", 150_000L), + new Album(2, 2, "Forever Hold Your Peace", 350_000L), + new Album(2, 3, "Terrified", null)); + + @Before + public void insertTestData() { + DatabaseClient client = spanner.getDatabaseClient(databaseId); + ImmutableList.Builder mutations = + ImmutableList.builderWithExpectedSize(TEST_SINGERS.size()); + for (Singer singer : TEST_SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .set("MarketingBudget") + .to(album.marketingBudget) + .build()); + } + client.write(mutations.build()); + } + + private void assertSingersOutput(String out) { + assertThat(out).contains("1 Marc Richard"); + assertThat(out).contains("2 Catalina Smith"); + assertThat(out).contains("3 Alice Trentor"); + assertThat(out).contains("4 Lea Martin"); + assertThat(out).contains("5 David Lomond"); + } + + private void assertAlbumsOutput(String out) { + assertThat(out).contains("1 1 Total Junk"); + assertThat(out).contains("1 2 Go, Go, Go"); + assertThat(out).contains("2 1 Green"); + assertThat(out).contains("2 2 Forever Hold Your Peace"); + assertThat(out).contains("2 3 Terrified"); + } + + @After + public void removeTestData() { + DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Arrays.asList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void asyncQuery_shouldReturnData() throws Exception { + String out = runSample( + () -> AsyncQueryExample.asyncQuery(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncQueryToListAsync_shouldReturnData() + throws Exception { + String out = runSample( + () -> PgAsyncQueryToListAsyncExample + .asyncQueryToList(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncRead_shouldReturnData() + throws Exception { + String out = runSample(() -> AsyncReadExample.asyncRead(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + } + + @Test + public void asyncReadUsingIndex_shouldReturnDataInCorrectOrder() throws Exception { + String out = runSample(() -> AsyncReadUsingIndexExample + .asyncReadUsingIndex(spanner.getDatabaseClient(databaseId))); + assertThat(out) + .contains( + "2 Forever Hold Your Peace\n" + + "2 Go, Go, Go\n" + + "1 Green\n" + + "3 Terrified\n" + + "1 Total Junk"); + } + + @Test + public void asyncReadOnlyTransaction_shouldReturnData() throws Exception { + String out = runSample(() -> AsyncReadOnlyTransactionExample + .asyncReadOnlyTransaction(spanner.getDatabaseClient(databaseId))); + assertAlbumsOutput(out); + assertSingersOutput(out); + } + + @Test + public void asyncDml_shouldInsertRows() throws Exception { + String out = runSample(() -> AsyncDmlExample.asyncDml(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("4 records inserted."); + } + + @Test + public void asyncRunner_shouldUpdateRows() throws Exception { + String out = runSample( + () -> PgAsyncRunnerExample.asyncRunner(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("2 records updated."); + } + + @Test + public void asyncTransactionManager_shouldUpdateRows() throws Exception { + String out = runSample(() -> PgAsyncTransactionManagerExample + .asyncTransactionManager(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("2 records updated."); + } + + @Test + public void asyncReadRow_shouldPrintRow() throws Exception { + String out = runSample( + () -> AsyncReadRowExample.asyncReadRow(spanner.getDatabaseClient(databaseId))); + assertThat(out).contains("1 1 Total Junk"); + assertThat(out).doesNotContain("1 2 Go, Go, Go"); + assertThat(out).doesNotContain("2 1 Green"); + assertThat(out).doesNotContain("2 2 Forever Hold Your Peace"); + assertThat(out).doesNotContain("2 3 Terrified"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgBatchDmlSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgBatchDmlSampleIT.java new file mode 100644 index 000000000000..0b105d41b7da --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgBatchDmlSampleIT.java @@ -0,0 +1,59 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Collections; +import org.junit.Test; + +public class PgBatchDmlSampleIT extends SampleTestBase { + + @Test + public void testPgBatchDml() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(); + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + databaseId, + Collections.singleton( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL PRIMARY KEY," + + " FirstName varchar(1024)," + + " LastName varchar(1024)" + + ")"), + null); + updateOperation.get(); + + final String out = + SampleRunner.runSample(() -> PgBatchDmlSample.batchDml(projectId, instanceId, databaseId)); + + assertTrue(out.contains("Inserted 2 singers")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgCaseSensitivitySampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgCaseSensitivitySampleIT.java new file mode 100644 index 000000000000..fca4ac55c066 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgCaseSensitivitySampleIT.java @@ -0,0 +1,44 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.InstanceName; +import org.junit.Test; + +public class PgCaseSensitivitySampleIT extends SampleTestBaseV2 { + + @Test + public void testPgCaseSensitivitySample() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement(getCreateDatabaseStatement(databaseId, DatabaseDialect.POSTGRESQL)) + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseDialect(DatabaseDialect.POSTGRESQL).build(); + databaseAdminClient.createDatabaseAsync(request).get(); + + final String out = + SampleRunner.runSample( + () -> PgCaseSensitivitySample.pgCaseSensitivity(projectId, instanceId, databaseId)); + assertTrue(out, out.contains("SingerId: 1, FirstName: Bruce, LastName: Allison")); + assertTrue(out, out.contains("SingerId: 1, FullName: Bruce Allison")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgDmlReturningSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgDmlReturningSampleIT.java new file mode 100644 index 000000000000..38d758220c32 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgDmlReturningSampleIT.java @@ -0,0 +1,149 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for DML Returning samples for PostgreSql dialect. */ +@RunWith(JUnit4.class) +public class PgDmlReturningSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, database)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(10, TimeUnit.MINUTES); + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " SingerInfo bytea," + + " FullName character varying(2048) GENERATED " + + " ALWAYS AS (FirstName || ' ' || LastName) STORED," + + " PRIMARY KEY (SingerId)" + + ")", + "CREATE TABLE Albums (" + + " SingerId bigint NOT NULL," + + " AlbumId bigint NOT NULL," + + " AlbumTitle character varying(1024)," + + " MarketingBudget bigint," + + " PRIMARY KEY (SingerId, AlbumId)" + + ") INTERLEAVE IN PARENT Singers ON DELETE CASCADE"), + null); + updateOperation.get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Alice") + .set("LastName") + .to("Trentor") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .set("MarketingBudget") + .to(20000L) + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testInsertUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + PgInsertUsingDmlReturningSample.insertUsingDmlReturning( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Inserted row(s) count: 4")); + assertTrue(out.contains("Melissa Garcia")); + assertTrue(out.contains("Russell Morales")); + assertTrue(out.contains("Jacqueline Long")); + assertTrue(out.contains("Dylan Shaw")); + } + + @Test + public void testUpdateUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + PgUpdateUsingDmlReturningSample.updateUsingDmlReturning( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Updated row(s) count: 1")); + assertTrue(out.contains("40000")); + } + + @Test + public void testDeleteUsingReturningSample() throws Exception { + final String out = + runSample( + () -> + PgDeleteUsingDmlReturningSample.deleteUsingDmlReturningSample( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Deleted row(s) count: 1")); + assertTrue(out.contains("Alice Trentor")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgInterleavedTableSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgInterleavedTableSampleIT.java new file mode 100644 index 000000000000..412147da0b71 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgInterleavedTableSampleIT.java @@ -0,0 +1,43 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.database.v1.InstanceName; +import org.junit.Test; + +public class PgInterleavedTableSampleIT extends SampleTestBaseV2 { + + @Test + public void testPgInterleavedTableSample() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final CreateDatabaseRequest request = + CreateDatabaseRequest.newBuilder() + .setCreateStatement(getCreateDatabaseStatement(databaseId, DatabaseDialect.POSTGRESQL)) + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setDatabaseDialect(DatabaseDialect.POSTGRESQL).build(); + databaseAdminClient.createDatabaseAsync(request).get(); + + final String out = + SampleRunner.runSample( + () -> PgInterleavedTableSample.pgInterleavedTable(projectId, instanceId, databaseId)); + assertTrue(out.contains("Created interleaved table hierarchy using PostgreSQL dialect")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgLastStatementSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgLastStatementSampleIT.java new file mode 100644 index 000000000000..d6d8d43f6a09 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgLastStatementSampleIT.java @@ -0,0 +1,75 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for {@link PgLastStatementSample} */ +@RunWith(JUnit4.class) +public class PgLastStatementSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, database)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(10, TimeUnit.MINUTES); + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " PRIMARY KEY (SingerId)" + + ")"), + null); + updateOperation.get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Test + public void testSetLastStatementOptionSample() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + String out = runSample(() -> PgLastStatementSample.insertAndUpdateUsingLastStatement(client)); + assertThat(out).contains("New singer inserted."); + assertThat(out).contains("Singer last name updated."); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgPartitionedDmlSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgPartitionedDmlSampleIT.java new file mode 100644 index 000000000000..26d7a37b6430 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgPartitionedDmlSampleIT.java @@ -0,0 +1,91 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Mutation; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.Arrays; +import java.util.Collections; +import org.junit.Test; + +public class PgPartitionedDmlSampleIT extends SampleTestBase { + + @Test + public void testPgPartitionedDml() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(); + final OperationFuture updateOperation = + databaseAdminClient.updateDatabaseDdl( + instanceId, + databaseId, + Collections.singleton( + "CREATE TABLE users (" + + " user_id bigint NOT NULL PRIMARY KEY," + + " user_name varchar(1024)," + + " active boolean" + + ")"), + null); + updateOperation.get(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write( + Arrays.asList( + Mutation.newInsertBuilder("users") + .set("user_id") + .to(1L) + .set("user_name") + .to("user1") + .set("active") + .to(false) + .build(), + Mutation.newInsertBuilder("users") + .set("user_id") + .to(2L) + .set("user_name") + .to("user2") + .set("active") + .to(false) + .build(), + Mutation.newInsertBuilder("users") + .set("user_id") + .to(3L) + .set("user_name") + .to("user3") + .set("active") + .to(true) + .build())); + + final String out = + SampleRunner.runSample( + () -> PgPartitionedDmlSample.partitionedDml(projectId, instanceId, databaseId)); + + assertTrue(out.contains("Deleted at least 2 inactive users")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerSampleIT.java new file mode 100644 index 000000000000..d55af6203d5f --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerSampleIT.java @@ -0,0 +1,289 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Unit tests for {@code PgSpannerSample} + */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class PgSpannerSampleIT extends SampleTestBaseV2 { + + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + static Spanner spanner; + static DatabaseId dbId; + static DatabaseAdminClient dbClient; + + @BeforeClass + public static void setUp() throws IOException { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = DatabaseAdminClient.create(); + dbId = DatabaseId.of(options.getProjectId(), instanceId, idGenerator.generateDatabaseId()); + // Delete stale test databases that have been created earlier by this test, but not deleted. + deleteStaleTestDatabases(); + } + + static void deleteStaleTestDatabases() { + Timestamp now = Timestamp.now(); + Pattern samplePattern = getTestDbIdPattern(PgSpannerSampleIT.baseDbId); + Pattern restoredPattern = getTestDbIdPattern("restored"); + for (Database db : dbClient.listDatabases( + InstanceName.of(projectId, instanceId)).iterateAll()) { + DatabaseName databaseName = DatabaseName.parse(db.getName()); + if (TimeUnit.HOURS.convert(now.getSeconds() - db.getCreateTime().getSeconds(), + TimeUnit.SECONDS) > 24) { + if (databaseName.getDatabase().length() >= DBID_LENGTH) { + if (samplePattern.matcher( + toComparableId(PgSpannerSampleIT.baseDbId, databaseName.getDatabase())).matches()) { + dbClient.dropDatabase(db.getName()); + } + if (restoredPattern.matcher(toComparableId("restored", databaseName.getDatabase())) + .matches()) { + dbClient.dropDatabase(db.getName()); + } + } + } + } + } + + private static String toComparableId(String baseId, String existingId) { + String zeroUuid = "00000000-0000-0000-0000-0000-00000000"; + int shouldBeLength = (baseId + "-" + zeroUuid).length(); + int missingLength = shouldBeLength - existingId.length(); + return existingId + zeroUuid.substring(zeroUuid.length() - missingLength); + } + + private static Pattern getTestDbIdPattern(String baseDbId) { + return Pattern.compile( + baseDbId + "-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{8}", + Pattern.CASE_INSENSITIVE); + } + + private String runSample(String command) throws Exception { + final PrintStream stdOut = System.out; + final ByteArrayOutputStream bout = new ByteArrayOutputStream(); + final PrintStream out = new PrintStream(bout); + System.setOut(out); + System.out.println(instanceId + ":" + dbId.getDatabase()); + PgSpannerSample.main(new String[]{command, instanceId, dbId.getDatabase()}); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testSample() throws Exception { + assertThat(instanceId).isNotNull(); + assertThat(dbId.getDatabase()).isNotNull(); + + System.out.println("Create Database ..."); + String out = runSample("createpgdatabase"); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + System.out.println("Create sample tables Singers and Albums ..."); + runSample("createtableusingddl"); + + System.out.println("Write data to sample tables ..."); + runSample("write"); + + System.out.println("Read data from sample tables ..."); + out = runSample("read"); + assertThat(out).contains("1 1 Total Junk"); + + System.out.println("Write data using DML to sample table ..."); + runSample("writeusingdml"); + System.out.println("Query Singers table ..."); + out = runSample("querysingerstable"); + assertThat(out).contains("Melissa Garcia"); + out = runSample("query"); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithparameter"); + assertThat(out).contains("12 Melissa Garcia"); + + System.out.println("Add column marketing budget ..."); + runSample("addmarketingbudget"); + + // wait for 15 seconds to elapse and then run an update, and query for stale data + long lastUpdateDataTimeInMillis = System.currentTimeMillis(); + while (System.currentTimeMillis() < lastUpdateDataTimeInMillis + 16000) { + Thread.sleep(1000); + } + System.out.println("Write data to marketing budget ..."); + runSample("update"); + + System.out.println("Query marketing budget ..."); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 100000"); + assertThat(out).contains("2 2 500000"); + + System.out.println("Write with transaction using dml..."); + runSample("writewithtransactionusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 300000"); + assertThat(out).contains("1 1 300000"); + + System.out.println("Add index ..."); + runSample("addindex"); + + System.out.println("Read index ..."); + out = runSample("readindex"); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).contains("Green"); + + System.out.println("Add Storing index ..."); + runSample("addstoringindex"); + + System.out.println("Read storing index ..."); + out = runSample("readstoringindex"); + assertThat(out).contains("300000"); + + System.out.println("Read only transaction ..."); + out = runSample("readonlytransaction"); + assertThat(out.replaceAll("[\r\n]+", " ")) + .containsMatch("(Total Junk.*){2}"); + + System.out.println("Add Timestamp column ..."); + out = runSample("addlastupdatetimestampcolumn"); + assertThat(out).contains("Added LastUpdateTime as a timestamp column"); + + System.out.println("Update values in Timestamp column ..."); + runSample("updatewithtimestamp"); + out = runSample("querywithtimestamp"); + assertThat(out).contains("1 1 1000000"); + assertThat(out).contains("2 2 750000"); + + System.out.println("Create table with Timestamp column ..."); + out = runSample("createtablewithtimestamp"); + assertThat(out).contains("Created Performances table in database"); + + System.out.println("Write with Timestamp ..."); + runSample("writewithtimestamp"); + out = runSample("queryperformancestable"); + assertThat(out).contains("1 4 11000"); + assertThat(out).contains("1 19 15000"); + assertThat(out).contains("2 42 7000"); + + System.out.println("Write using DML ..."); + runSample("insertusingdml"); + out = runSample("querysingerstable"); + assertThat(out).contains("Virginia Watson"); + + System.out.println("Update using DML ..."); + runSample("updateusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 2000000"); + + System.out.println("Delete using DML ..."); + runSample("deleteusingdml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Alice Trentor"); + + System.out.println("Write and Read using DML ..."); + out = runSample("writeandreadusingdml"); + assertThat(out).contains("Timothy Campbell"); + + System.out.println("Update using partitioned DML ..."); + runSample("updateusingpartitioneddml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("2 2 100000"); + assertThat(out).contains("1 1 2000000"); + + System.out.println("Delete using Partitioned DML ..."); + runSample("deleteusingpartitioneddml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Timothy Grant"); + assertThat(out).doesNotContain("Melissa Garcia"); + assertThat(out).doesNotContain("Russell Morales"); + assertThat(out).doesNotContain("Jacqueline Long"); + assertThat(out).doesNotContain("Dylan Shaw"); + + System.out.println("Update in Batch using DML ..."); + out = runSample("updateusingbatchdml"); + assertThat(out).contains("1 record updated by stmt 0"); + assertThat(out).contains("1 record updated by stmt 1"); + + System.out.println("Create table with data types ..."); + out = runSample("createtablewithdatatypes"); + assertThat(out).contains("Created Venues table in database"); + + System.out.println("Write into table and Query Boolean Type ..."); + runSample("writedatatypesdata"); + out = runSample("querywithbool"); + assertThat(out).contains("19 Venue 19 true"); + + System.out.println("Query with Bytes ..."); + out = runSample("querywithbytes"); + assertThat(out).contains("4 Venue 4"); + + System.out.println("Query with Float ..."); + out = runSample("querywithfloat"); + assertThat(out).contains("4 Venue 4 0.8"); + assertThat(out).contains("19 Venue 19 0.9"); + + System.out.println("Query with Int ..."); + out = runSample("querywithint"); + assertThat(out).contains("19 Venue 19 6300"); + assertThat(out).contains("42 Venue 42 3000"); + + System.out.println("Query with String ..."); + out = runSample("querywithstring"); + assertThat(out).contains("42 Venue 42"); + + System.out.println("Query with Timestamp parameter ..."); + out = runSample("querywithtimestampparameter"); + assertThat(out).contains("4 Venue 4"); + assertThat(out).contains("19 Venue 19"); + assertThat(out).contains("42 Venue 42"); + + System.out.println("Query with Numeric Type ..."); + out = runSample("querywithnumeric"); + assertThat(out).contains("19 Venue 19 1200100"); + assertThat(out).contains("42 Venue 42 390650.99"); + + System.out.println("Query options ..."); + out = runSample("clientwithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerStandaloneExamplesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerStandaloneExamplesIT.java new file mode 100644 index 000000000000..c906006ef5a8 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/PgSpannerStandaloneExamplesIT.java @@ -0,0 +1,287 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SpannerSampleIT.formatForTest; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Value; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for Cloud Spanner cloud client examples. */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class PgSpannerStandaloneExamplesIT { + // The instance needs to exist for tests to pass. + private static String instanceId = System.getProperty("spanner.test.instance"); + private static String baseDatabaseId = System.getProperty("spanner.sample.database", "mysample"); + private static String databaseId = formatForTest(baseDatabaseId); + private static DatabaseId dbId; + private static DatabaseAdminClient dbClient; + private static Spanner spanner; + + private String runExample(Runnable example) { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + example.run(); + System.setOut(stdOut); + return bout.toString(); + } + + @BeforeClass + public static void createTestDatabase() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = spanner.getDatabaseAdminClient(); + if (instanceId == null) { + Iterator iterator = + spanner.getInstanceAdminClient().listInstances().iterateAll().iterator(); + if (iterator.hasNext()) { + instanceId = iterator.next().getId().getInstance(); + } + } + dbId = DatabaseId.of(options.getProjectId(), instanceId, databaseId); + dbClient + .createDatabase( + dbClient.newDatabaseBuilder(dbId).setDialect(Dialect.POSTGRESQL).build(), + Collections.emptyList()) + .get(); + dbClient.updateDatabaseDdl( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (" + + " SingerId bigint NOT NULL," + + " FirstName character varying(1024)," + + " LastName character varying(1024)," + + " SingerInfo bytea," + + " PRIMARY KEY (SingerId)" + + ")", + "CREATE TABLE Venues (" + + "VenueId bigint NOT NULL," + + "Revenue NUMERIC," + + "VenueDetails JSONB," + + "PRIMARY KEY (VenueId))"), + null).get(); + } + + @AfterClass + public static void dropTestDatabase() throws Exception { + dbClient.dropDatabase(dbId.getInstanceId().getInstance(), dbId.getDatabase()); + spanner.close(); + } + + @Before + public void deleteTestData() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write(Collections.singleton(Mutation.delete("Singers", KeySet.all()))); + client.write(Collections.singleton(Mutation.delete("Venues", KeySet.all()))); + } + + @Test + public void executeSqlWithCustomTimeoutAndRetrySettings_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + CustomTimeoutAndRetrySettingsExample.executeSqlWithCustomTimeoutAndRetrySettings( + projectId, instanceId, databaseId)); + assertThat(out).contains("1 record inserted."); + } + + @Test + public void executeSqlWithTimeout_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + String out = runExample(() -> StatementTimeoutExample.executeSqlWithTimeout(client)); + assertThat(out).contains("1 record inserted."); + } + + @Test + public void addNumericColumn_shouldSuccessfullyAddColumn() + throws InterruptedException, ExecutionException { + OperationFuture operation = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues DROP COLUMN Revenue"), + null); + operation.get(); + String out = + runExample( + () -> { + try { + AddNumericColumnSample.addNumericColumn( + spanner.getOptions().getProjectId(), instanceId, databaseId); + } catch (ExecutionException e) { + System.out.printf( + "Adding column `Revenue` failed: %s%n", e.getCause().getMessage()); + } catch (InterruptedException e) { + System.out.printf("Adding column `Revenue` was interrupted%n"); + } + }); + assertThat(out).contains("Successfully added column `Revenue`"); + } + + @Test + public void updateNumericData_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + UpdateNumericDataSample.updateNumericData( + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)))); + assertThat(out).contains("Venues successfully updated"); + } + + @Test + public void queryWithNumericParameter_shouldReturnResults() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("Revenue") + .to(new BigDecimal("35000")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("Revenue") + .to(new BigDecimal("104500")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("Revenue") + .to(new BigDecimal("99999999999999999999999999999.99")) + .build())); + String out = + runExample(() -> PgQueryWithNumericParameterSample.queryWithNumericParameter(client)); + assertThat(out).contains("4 35000"); + } + + @Test + public void addJsonbColumn_shouldSuccessfullyAddColumn() + throws InterruptedException, ExecutionException { + OperationFuture operation = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues DROP COLUMN VenueDetails"), + null); + operation.get(); + String out = + runExample( + () -> { + try { + AddJsonbColumnSample.addJsonbColumn( + spanner.getOptions().getProjectId(), instanceId, databaseId); + } catch (ExecutionException e) { + System.out.printf( + "Adding column `VenueDetails` failed: %s%n", e.getCause().getMessage()); + } catch (InterruptedException e) { + System.out.printf("Adding column `VenueDetails` was interrupted%n"); + } + }); + assertThat(out).contains("Successfully added column `VenueDetails`"); + } + + @Test + public void updateJsonbData_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + UpdateJsonbDataSample.updateJsonbData( + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)))); + assertThat(out).contains("Venues successfully updated"); + } + + @Test + public void queryWithJsonbParameter_shouldReturnResults() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("VenueDetails") + .to( + Value.pgJsonb( + "[{\"name\":\"room 1\",\"open\":true}," + + "{\"name\":\"room 2\",\"open\":false}]")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("VenueDetails") + .to(Value.pgJsonb("{\"rating\":9,\"open\":true}")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("VenueDetails") + .to( + Value.pgJsonb( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}")) + .build())); + String out = runExample(() -> QueryWithJsonbParameterSample.queryWithJsonbParameter(client)); + assertThat(out).contains("VenueId: 19, VenueDetails: {\"open\": true, \"rating\": 9}"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java new file mode 100644 index 000000000000..ba59f7b23844 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java @@ -0,0 +1,129 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for Proto Column type */ +@RunWith(JUnit4.class) +public class ProtoColumnSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + private static Spanner spanner; + + @BeforeClass + public static void createTestDatabase() throws Exception { + spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (\n" + + " SingerId INT64 NOT NULL,\n" + + " FirstName STRING(1024),\n" + + " LastName STRING(1024),\n" + + " ) PRIMARY KEY (SingerId)")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Marc") + .set("LastName") + .to("Richards") + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(2L) + .set("FirstName") + .to("Catalina") + .set("LastName") + .to("Smith") + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(3L) + .set("FirstName") + .to("Alice") + .set("LastName") + .to("Trentor") + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testProtoColumns() throws Exception { + InputStream in = + ProtoColumnSampleIT.class + .getClassLoader() + .getResourceAsStream("com/example/spanner/descriptors.pb"); + System.out.println(in); + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + System.out.println(databaseId.toString()); + System.out.println("Adding Proto columns schema to table ..."); + String out = + runSample( + () -> + AddProtoColumnSample.addProtoColumn( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Added Proto columns")); + + System.out.println("Update data with Proto Columns ..."); + out = runSample(() -> UpdateProtoDataSample.updateProtoData(client)); + assertTrue(out.contains("Data updated")); + + System.out.println("Update data with Proto Columns using DML ..."); + out = runSample(() -> UpdateProtoDataSampleUsingDml.updateProtoDataUsingDml(client)); + assertTrue(out.contains("record(s) updated")); + + System.out.println("Query data with Proto Columns ..."); + out = runSample(() -> QueryWithProtoParameterSample.queryWithProtoParameter(client)); + assertTrue(out.contains("2 singer_id: 2")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSampleIT.java new file mode 100644 index 000000000000..e0f95b9ceefe --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/QueryInformationSchemaDatabaseOptionsSampleIT.java @@ -0,0 +1,73 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class QueryInformationSchemaDatabaseOptionsSampleIT extends SampleTestBase { + + @Test + public void testQueryInformationSchemaDatabaseOptions() throws Exception { + // Finds a possible new leader option + final InstanceConfigId instanceConfigId = instanceAdminClient + .getInstance(multiRegionalInstanceId) + .getInstanceConfigId(); + final InstanceConfig config = instanceAdminClient + .getInstanceConfig(instanceConfigId.getInstanceConfig()); + assertTrue( + "Expected instance config " + instanceConfigId + " to have at least one leader option", + config.getLeaderOptions().size() > 0 + ); + final String defaultLeader = config.getLeaderOptions().get(0); + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient.createDatabase( + multiRegionalInstanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (Id INT64 NOT NULL) PRIMARY KEY (Id)", + "ALTER DATABASE `" + + databaseId + + "` SET OPTIONS ( default_leader = '" + + defaultLeader + + "')" + ) + ).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = SampleRunner.runSample(() -> QueryInformationSchemaDatabaseOptionsSample + .queryInformationSchemaDatabaseOptions(projectId, multiRegionalInstanceId, databaseId) + ); + + assertTrue( + "Expected to have retrieved default_leader for " + databaseId + " as " + defaultLeader + "." + + " Output received was " + out, + out.contains( + "The default_leader for projects/" + + projectId + "/instances/" + multiRegionalInstanceId + "/databases/" + databaseId + + " is " + defaultLeader + ) + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/QuickstartSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/QuickstartSampleIT.java new file mode 100644 index 000000000000..4084c2599327 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/QuickstartSampleIT.java @@ -0,0 +1,91 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Tests for quickstart sample. + */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class QuickstartSampleIT { + private static String instanceId = System.getProperty("spanner.test.instance"); + private static String dbId = formatForTest(System.getProperty("spanner.quickstart.database")); + private static Spanner spanner; + private static DatabaseAdminClient dbClient; + + private ByteArrayOutputStream bout; + private PrintStream stdOut = System.out; + private PrintStream out; + + @BeforeClass + public static void createDatabase() throws InterruptedException, ExecutionException { + final SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = spanner.getDatabaseAdminClient(); + dbClient.createDatabase(instanceId, dbId, Collections.emptyList()).get(); + } + + @AfterClass + public static void dropDatabase() { + dbClient.dropDatabase(instanceId, dbId); + spanner.close(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(stdOut); + } + + @Test + public void testQuickstart() throws Exception { + assertThat(instanceId).isNotNull(); + assertThat(dbId).isNotNull(); + QuickstartSample.main(instanceId, dbId); + String got = bout.toString(); + assertThat(got).contains("1"); + } + + private static String formatForTest(String name) { + return name + "-" + UUID.randomUUID().toString().substring(0, 20); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleIdGenerator.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleIdGenerator.java new file mode 100644 index 000000000000..3b375b071e17 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleIdGenerator.java @@ -0,0 +1,105 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +/** + * Generates database ids and backup ids. Keeps track of the generated ids so they can be deleted + * afterwards. + */ +public class SampleIdGenerator { + + private static final int INSTANCE_NAME_MAX_LENGTH = 30; + private static final int DATABASE_NAME_MAX_LENGTH = 30; + private static final int BACKUP_NAME_MAX_LENGTH = 30; + private static final int INSTANCE_CONFIG_ID_MAX_LENGTH = 30; + private final List instanceIds; + private final List databaseIds; + private final List backupIds; + private final List instanceConfigIds; + private final String baseDatabaseId; + private final String baseBackupId; + private final String baseInstanceConfigId; + private final String baseInstanceId; + + public SampleIdGenerator(String baseDatabaseId, String baseBackupId, + String baseInstanceConfigId, String baseInstanceId) { + this.baseDatabaseId = baseDatabaseId; + this.baseBackupId = baseBackupId; + this.baseInstanceConfigId = baseInstanceConfigId; + this.baseInstanceId = baseInstanceId; + this.databaseIds = new ArrayList<>(); + this.backupIds = new ArrayList<>(); + this.instanceConfigIds = new ArrayList<>(); + this.instanceIds = new ArrayList<>(); + } + + public String generateInstanceId() { + final String instanceId = + (baseInstanceId + "-" + UUID.randomUUID().toString().replaceAll("-", "")) + .substring(0, INSTANCE_NAME_MAX_LENGTH); + + instanceIds.add(instanceId); + return instanceId; + } + + public String generateDatabaseId() { + final String databaseId = + (baseDatabaseId + "-" + UUID.randomUUID().toString().replaceAll("-", "")) + .substring(0, DATABASE_NAME_MAX_LENGTH); + + databaseIds.add(databaseId); + return databaseId; + } + + public String generateBackupId() { + final String databaseId = + (baseBackupId + "-" + UUID.randomUUID().toString().replaceAll("-", "")) + .substring(0, BACKUP_NAME_MAX_LENGTH); + + backupIds.add(databaseId); + return databaseId; + } + + public String generateInstanceConfigId() { + final String instanceConfigId = + ("custom-" + baseInstanceConfigId + "-" + UUID.randomUUID().toString().replaceAll("-", "")) + .substring(0, INSTANCE_CONFIG_ID_MAX_LENGTH); + + instanceConfigIds.add(instanceConfigId); + return instanceConfigId; + } + + public List getDatabaseIds() { + return databaseIds; + } + + public List getBackupIds() { + return backupIds; + } + + public List getInstanceIds() { + return instanceIds; + } + + public List getInstanceConfigIds() { + return instanceConfigIds; + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleRunner.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleRunner.java new file mode 100644 index 000000000000..cbd18faa1531 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleRunner.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.SpannerException; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.function.Predicate; + +/** + * Runs a sample and captures the output as a String. + */ +public class SampleRunner { + + @FunctionalInterface + public interface SampleRunnerCallable { + + void call() throws Exception; + } + + public static String runSample(SampleRunnerCallable sample) throws Exception { + return runSampleWithRetry(sample, e -> false); + } + + /** + * Runs a sample and retries it if the given predicate returns true for a given {@link + * SpannerException}. The predicate can return different answers for the same error, for example + * by only allowing the retry of a certain error a specific number of times. + */ + public static String runSampleWithRetry(SampleRunnerCallable sample, + Predicate shouldRetry) throws Exception { + final PrintStream stdOut = System.out; + final ByteArrayOutputStream bout = new ByteArrayOutputStream(); + final PrintStream out = new PrintStream(bout); + System.setOut(out); + while (true) { + try { + sample.call(); + break; + } catch (SpannerException e) { + if (!shouldRetry.test(e)) { + throw e; + } + } + } + System.setOut(stdOut); + return bout.toString(); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBase.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBase.java new file mode 100644 index 000000000000..ccfe3e6d4d1d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBase.java @@ -0,0 +1,121 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +/** Base class for sample integration tests. */ +public class SampleTestBase { + private static final String BASE_INSTANCE_ID = + System.getProperty("spanner.sample.instance", "mysample-instance"); + private static final String BASE_DATABASE_ID = + System.getProperty("spanner.sample.database", "sampledb"); + private static final String BASE_BACKUP_ID = "samplebk"; + private static final String BASE_INSTANCE_CONFIG_ID = "sampleconfig"; + + protected static Spanner spanner; + protected static DatabaseAdminClient databaseAdminClient; + protected static InstanceAdminClient instanceAdminClient; + protected static String projectId; + protected static final String instanceId = System.getProperty("spanner.test.instance"); + protected static final String multiRegionalInstanceId = + System.getProperty("spanner.test.instance.mr"); + protected static final String instanceConfigName = System + .getProperty("spanner.test.instance.config"); + protected static SampleIdGenerator idGenerator; + + @BeforeClass + public static void beforeClass() { + final String serverUrl = ""; + final SpannerOptions.Builder optionsBuilder = SpannerOptions + .newBuilder() + .setAutoThrottleAdministrativeRequests(); + if (!serverUrl.isEmpty()) { + optionsBuilder.setHost(serverUrl); + } + final SpannerOptions options = optionsBuilder.build(); + projectId = options.getProjectId(); + spanner = options.getService(); + databaseAdminClient = spanner.getDatabaseAdminClient(); + instanceAdminClient = spanner.getInstanceAdminClient(); + idGenerator = new SampleIdGenerator( + BASE_DATABASE_ID, BASE_BACKUP_ID, BASE_INSTANCE_CONFIG_ID, BASE_INSTANCE_ID); + } + + @AfterClass + public static void afterClass() { + for (String instanceId : idGenerator.getInstanceIds()) { + System.out.println("Trying to drop " + instanceId); + try { + // If the database is not found, it is ignored (no exception is thrown) + instanceAdminClient.deleteInstance(instanceId); + } catch (Exception e) { + System.out.println( + "Failed to drop instance " + + instanceId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + + for (String databaseId : idGenerator.getDatabaseIds()) { + System.out.println("Trying to drop " + databaseId); + try { + // If the database is not found, it is ignored (no exception is thrown) + databaseAdminClient.dropDatabase(instanceId, databaseId); + databaseAdminClient.dropDatabase(multiRegionalInstanceId, databaseId); + } catch (Exception e) { + System.out.println( + "Failed to drop database " + + databaseId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + for (String backupId : idGenerator.getBackupIds()) { + try { + // If the backup is not found, it is ignored (no exception is thrown) + databaseAdminClient.deleteBackup(instanceId, backupId); + databaseAdminClient.deleteBackup(multiRegionalInstanceId, backupId); + } catch (Exception e) { + System.out.println( + "Failed to delete backup " + backupId + " due to " + e.getMessage() + ", skipping..."); + } + } + for (String configId : idGenerator.getInstanceConfigIds()) { + try { + // If the config is not found, it is ignored (no exception is thrown) + instanceAdminClient.deleteInstanceConfig(configId); + } catch (Exception e) { + System.out.println( + "Failed to delete instance config " + + configId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + spanner.close(); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBaseV2.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBaseV2.java new file mode 100644 index 000000000000..5a43261269d9 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SampleTestBaseV2.java @@ -0,0 +1,183 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.example.spanner.SampleIdGenerator; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminSettings; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminSettings; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +/** + * Base class for sample integration intests using auto-generated admin clients. + */ +public class SampleTestBaseV2 { + + private static final String BASE_INSTANCE_ID = + System.getProperty("spanner.sample.instance", "mysample-instance"); + + private static final String BASE_DATABASE_ID = + System.getProperty("spanner.sample.database", "sampledb"); + private static final String BASE_BACKUP_ID = "samplebk"; + private static final String BASE_INSTANCE_CONFIG_ID = "sampleconfig"; + private static final int AWAIT_TERMINATION_SECONDS = 10; + + protected static String projectId; + protected static final String instanceId = System.getProperty("spanner.test.instance"); + protected static DatabaseAdminClient databaseAdminClient; + protected static InstanceAdminClient instanceAdminClient; + protected static Spanner spanner; + protected static final String multiRegionalInstanceId = + System.getProperty("spanner.test.instance.mr"); + protected static final String instanceConfigName = System + .getProperty("spanner.test.instance.config"); + protected static SampleIdGenerator idGenerator; + + @BeforeClass + public static void beforeClass() throws IOException { + final String serverUrl = ""; + final SpannerOptions.Builder optionsBuilder = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests(); + final SpannerOptions options = optionsBuilder.build(); + final DatabaseAdminSettings.Builder databaseAdminSettingsBuilder = + DatabaseAdminSettings.newBuilder(); + final InstanceAdminSettings.Builder instanceAdminSettingBuilder = + InstanceAdminSettings.newBuilder(); + + if (!serverUrl.isEmpty()) { + databaseAdminSettingsBuilder.setEndpoint(serverUrl); + instanceAdminSettingBuilder.setEndpoint(serverUrl); + } + + projectId = options.getProjectId(); + spanner = options.getService(); + databaseAdminClient = DatabaseAdminClient.create(databaseAdminSettingsBuilder.build()); + instanceAdminClient = InstanceAdminClient.create(instanceAdminSettingBuilder.build()); + idGenerator = new SampleIdGenerator( + BASE_DATABASE_ID, BASE_BACKUP_ID, BASE_INSTANCE_CONFIG_ID, BASE_INSTANCE_ID); + } + + @AfterClass + public static void afterClass() throws InterruptedException { + for (String instanceId : idGenerator.getInstanceIds()) { + System.out.println("Trying to drop " + instanceId); + try { + // If the database is not found, it is ignored (no exception is thrown) + instanceAdminClient.deleteInstance(getInstanceName(projectId, instanceId)); + } catch (Exception e) { + System.out.println( + "Failed to drop instance " + + instanceId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + + for (String databaseId : idGenerator.getDatabaseIds()) { + System.out.println("Trying to drop " + databaseId); + try { + // If the database is not found, it is ignored (no exception is thrown) + databaseAdminClient.dropDatabase( + getDatabaseName(projectId, instanceId, databaseId)); + databaseAdminClient.dropDatabase( + getDatabaseName(projectId, multiRegionalInstanceId, databaseId)); + } catch (Exception e) { + System.out.println( + "Failed to drop database " + + databaseId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + for (String backupId : idGenerator.getBackupIds()) { + System.out.println("Trying to delete " + backupId); + try { + // If the backup is not found, it is ignored (no exception is thrown) + databaseAdminClient.deleteBackup( + getBackupName(projectId, instanceId, backupId)); + databaseAdminClient.deleteBackup( + getBackupName(projectId, multiRegionalInstanceId, backupId)); + } catch (Exception e) { + System.out.println( + "Failed to delete backup " + backupId + " due to " + e.getMessage() + ", skipping..."); + } + } + for (String configId : idGenerator.getInstanceConfigIds()) { + System.out.println("Trying to delete " + configId); + try { + // If the config is not found, it is ignored (no exception is thrown) + instanceAdminClient.deleteInstanceConfig(getInstanceConfigName(projectId, configId)); + } catch (Exception e) { + System.out.println( + "Failed to delete instance config " + + configId + + " due to " + + e.getMessage() + + ", skipping..."); + } + } + + spanner.close(); + databaseAdminClient.close(); + instanceAdminClient.close(); + + databaseAdminClient.awaitTermination(AWAIT_TERMINATION_SECONDS, TimeUnit.SECONDS); + instanceAdminClient.awaitTermination(AWAIT_TERMINATION_SECONDS, TimeUnit.SECONDS); + } + + static String getDatabaseName(final String projectId, + final String instanceId, final String databaseId) { + return String.format( + "projects/%s/instances/%s/databases/%s", projectId, instanceId, databaseId); + } + + static String getBackupName(final String projectId, + final String instanceId, final String backupId) { + return String.format( + "projects/%s/instances/%s/backups/%s", projectId, instanceId, backupId); + } + + static String getInstanceName(final String projectId, final String instanceId) { + return String.format("projects/%s/instances/%s", projectId, instanceId); + } + + static String getInstanceConfigName(final String projectId, final String instanceConfigId) { + return String.format("projects/%s/instanceConfigs/%s", projectId, instanceConfigId); + } + + static String getProjectName(final String projectId) { + return String.format("projects/%s", projectId); + } + + static String getCreateDatabaseStatement( + final String databaseName, final DatabaseDialect dialect) { + if (dialect == DatabaseDialect.GOOGLE_STANDARD_SQL) { + return "CREATE DATABASE `" + databaseName + "`"; + } else { + return "CREATE DATABASE \"" + databaseName + "\""; + } + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SequenceSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SequenceSampleIT.java new file mode 100644 index 000000000000..de2c3961c22b --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SequenceSampleIT.java @@ -0,0 +1,142 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Integration tests for Bit reversed sequence samples for GoogleStandardSql and PostgreSql + * dialects. + */ +@RunWith(Parameterized.class) +public class SequenceSampleIT extends SampleTestBaseV2 { + + private static String databaseId; + + /** + * Set of dialects for which database has already been created in this test suite. This helps in + * limiting the number of databases created per dialect to one. + */ + private static final HashSet dbInitializedDialects = new HashSet<>(); + + @Parameters(name = "dialect = {0}") + public static Iterable data() { + return ImmutableList.of(DatabaseDialect.GOOGLE_STANDARD_SQL, DatabaseDialect.POSTGRESQL); + } + + @Parameter(0) + public static DatabaseDialect dialect; + + @Before + public void createTestDatabase() throws Exception { + // Limits number of created databases to one per dialect. + if (dbInitializedDialects.contains(dialect)) { + return; + } + dbInitializedDialects.add(dialect); + databaseId = idGenerator.generateDatabaseId(); + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setParent(getInstanceName(projectId, instanceId)) + .setCreateStatement(getCreateDatabaseStatement(databaseId, dialect)) + .setDatabaseDialect(dialect).build(); + databaseAdminClient + .createDatabaseAsync(createDatabaseRequest) + .get(10, TimeUnit.MINUTES); + } + + @Test + public void createSequence() throws Exception { + String out; + if (dialect == DatabaseDialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + CreateSequenceSample.createSequence( + projectId, instanceId, databaseId)); + } else { + out = + runSample( + () -> + PgCreateSequenceSample.pgCreateSequence( + projectId, instanceId, databaseId)); + } + assertTrue( + out.contains( + "Created Seq sequence and Customers table, where the key column " + + "CustomerId uses the sequence as a default value")); + assertEquals(out.split("Inserted customer record with CustomerId", -1).length - 1, 3); + assertTrue(out.contains("Number of customer records inserted is: 3")); + } + + @Test + public void alterSequence() throws Exception { + String out; + if (dialect == DatabaseDialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + AlterSequenceSample.alterSequence( + projectId, instanceId, databaseId)); + } else { + out = + runSample( + () -> + PgAlterSequenceSample.pgAlterSequence( + projectId, instanceId, databaseId)); + } + assertTrue( + out.contains("Altered Seq sequence to skip an inclusive range between 1000 and 5000000")); + assertEquals(out.split("Inserted customer record with CustomerId", -1).length - 1, 3); + assertTrue(out.contains("Number of customer records inserted is: 3")); + } + + @Test + public void dropSequence() throws Exception { + String out; + if (dialect == DatabaseDialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + DropSequenceSample.dropSequence(projectId, instanceId, databaseId)); + } else { + out = + runSample( + () -> + PgDropSequenceSample.pgDropSequence( + projectId, instanceId, databaseId)); + } + assertTrue( + out.contains( + "Altered Customers table to drop DEFAULT from " + + "CustomerId column and dropped the Seq sequence")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SetMaxCommitDelaySampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SetMaxCommitDelaySampleIT.java new file mode 100644 index 000000000000..de9e47951f87 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SetMaxCommitDelaySampleIT.java @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for {@link SetMaxCommitDelaySample} + */ +@RunWith(JUnit4.class) +public class SetMaxCommitDelaySampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .get(); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Arrays.asList( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("first name 1") + .set("LastName") + .to("last name 1") + .build(), + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(2L) + .set("FirstName") + .to("first name 2") + .set("LastName") + .to("last name 2") + .build() + )); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testSetMaxCommitDelaySample() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + final String out = runSample(() -> SetMaxCommitDelaySample.setMaxCommitDelay(client)); + + assertThat(out).contains("Updated data"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java new file mode 100644 index 000000000000..be6e67282ab9 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java @@ -0,0 +1,109 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@code SpannerGraphSample} */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class SpannerGraphSampleIT extends SampleTestBaseV2 { + + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance.mr"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + static Spanner spanner; + static DatabaseAdminClient databaseAdminClient; + + private String runSample(String command, String databaseId) throws Exception { + System.out.println("Running " + command + " on " + instanceId + ":" + databaseId); + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + SpannerGraphSample.main(new String[] {command, instanceId, databaseId}); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testSample() throws Exception { + String databaseId = idGenerator.generateDatabaseId(); + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + + System.out.println("Create database with property graph ..."); + String out = runSample("createdatabase", databaseId); + + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + System.out.println("Insert some data ..."); + out = runSample("insert", databaseId); + + System.out.println("Insert more data using DML ..."); + out = runSample("insertusingdml", databaseId); + assertThat(out).contains("2 record(s) inserted into Account."); + assertThat(out).contains("2 record(s) inserted into AccountTransferAccount."); + + System.out.println("Update some data using DML ..."); + out = runSample("updateusingdml", databaseId); + assertThat(out).contains("1 Account record(s) updated."); + assertThat(out).contains("1 AccountTransferAccount record(s) updated."); + + System.out.println("Update some data using a graph query in DML ..."); + out = runSample("updateusinggraphqueryindml", databaseId); + assertThat(out).contains("2 Account record(s) updated."); + + System.out.println("Query the property graph ..."); + out = runSample("query", databaseId); + assertThat(out).contains("Dana Alex 500.0"); + assertThat(out).contains("Lee Dana 300.0"); + assertThat(out).contains("Alex Lee 300.0"); + assertThat(out).contains("Alex Lee 100.0"); + assertThat(out).contains("Dana Lee 200.0"); + + System.out.println("Query the property graph with a parameter ..."); + out = runSample("querywithparameter", databaseId); + assertThat(out).contains("Dana Alex 500.0"); + + System.out.println("Delete some data using DML ..."); + out = runSample("deleteusingdml", databaseId); + assertThat(out).contains("1 Account record(s) deleted."); + + System.out.println("Delete the remaining data in the database ..."); + out = runSample("delete", databaseId); + assertThat(out).contains("Records deleted."); + + System.out.println("Query the property graph ..."); + out = runSample("query", databaseId); + assertThat(out).doesNotContain("Dana"); + assertThat(out).doesNotContain("Alex"); + assertThat(out).doesNotContain("Lee"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java new file mode 100644 index 000000000000..a3b12caa392d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java @@ -0,0 +1,786 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.rpc.FailedPreconditionException; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.InstanceName; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@code SpannerSample} */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class SpannerSampleIT extends SampleTestBaseV2 { + + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + private static final String keyLocation = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.location")); + private static final String keyRing = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.ring")); + private static final String keyName = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.name")); + private static final String encryptedBackupId = formatForTest(baseDbId); + private static final long STALE_INSTANCE_THRESHOLD_SECS = + TimeUnit.SECONDS.convert(24L, TimeUnit.HOURS); + static Spanner spanner; + static DatabaseAdminClient databaseAdminClient; + private static String key; + private long lastUpdateDataTimeInMillis; + + private String runSample(String command, String databaseId) throws Exception { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + SpannerSample.main(new String[] {command, instanceId, databaseId, null}); + System.setOut(stdOut); + return bout.toString(); + } + + private String runSample(String command, String databaseId, String backupId) throws Exception { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + SpannerSample.main(new String[] {command, instanceId, databaseId, backupId}); + System.setOut(stdOut); + return bout.toString(); + } + + @BeforeClass + public static void setUp() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + databaseAdminClient = DatabaseAdminClient.create(); + // Delete stale test databases that have been created earlier by this test, but not deleted. + deleteStaleTestDatabases(); + key = + String.format( + "projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", + options.getProjectId(), keyLocation, keyRing, keyName); + + /* + * Delete stale instances that have been created earlier by this test but not deleted. + * Backups needed to be deleted from the instance first, as the instance can only be + * deleted once all backups have been deleted. + * */ + deleteStaleEncryptedTestInstances(); + } + + /** + * Deleting all the test instances with name starting with 'encrypted-test-' and were created + * before 24 hours. + * + * @throws InterruptedException If Thread.sleep() interrupted + */ + private static void deleteStaleEncryptedTestInstances() throws InterruptedException { + Timestamp now = Timestamp.now(); + + for (Instance instance : + spanner + .getInstanceAdminClient() + .listInstances(Options.filter("name:encrypted-test-")) + .iterateAll()) { + if ((now.getSeconds() - instance.getCreateTime().getSeconds()) + > STALE_INSTANCE_THRESHOLD_SECS) { + deleteAllBackups(instance.getId().getInstance()); + instance.delete(); + } + } + } + + static void deleteStaleTestDatabases() throws IOException { + Timestamp now = Timestamp.now(); + Pattern samplePattern = getTestDbIdPattern(SpannerSampleIT.baseDbId); + Pattern restoredPattern = getTestDbIdPattern("restored"); + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + for (Database db : + databaseAdminClient.listDatabases(InstanceName.of(projectId, instanceId)).iterateAll()) { + DatabaseName databaseName = DatabaseName.parse(db.getName()); + if (TimeUnit.HOURS.convert( + now.getSeconds() - db.getCreateTime().getSeconds(), TimeUnit.SECONDS) + > 24) { + if (databaseName.getDatabase().length() >= DBID_LENGTH) { + if (samplePattern + .matcher(toComparableId(SpannerSampleIT.baseDbId, databaseName.getDatabase())) + .matches()) { + databaseAdminClient.dropDatabase(db.getName()); + } + if (restoredPattern + .matcher(toComparableId("restored", databaseName.getDatabase())) + .matches()) { + databaseAdminClient.dropDatabase(db.getName()); + } + } + } + } + } + } + + @AfterClass + public static void tearDown() { + databaseAdminClient.deleteBackup(BackupName.of(projectId, instanceId, encryptedBackupId)); + spanner.close(); + } + + @Test + public void testSample() throws Exception { + String databaseId = idGenerator.generateDatabaseId(); + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + String out = runSample("createdatabase", databaseId); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + System.out.println("Write data to sample tables ..."); + runSample("write", databaseId); + + System.out.println("Delete data to sample tables ..."); + out = runSample("delete", databaseId); + assertThat(out).contains("Records deleted."); + + runSample("write", databaseId); + + System.out.println("Read data from sample tables ..."); + out = runSample("read", databaseId); + assertThat(out).contains("1 1 Total Junk"); + + out = runSample("query", databaseId); + assertThat(out).contains("1 1 Total Junk"); + runSample("addmarketingbudget", databaseId); + + // wait for 15 seconds to elapse and then run an update, and query for stale data + lastUpdateDataTimeInMillis = System.currentTimeMillis(); + while (System.currentTimeMillis() < lastUpdateDataTimeInMillis + 16000) { + Thread.sleep(1000); + } + runSample("update", databaseId); + + System.out.println("Read stale data from sample tables ..."); + out = runSample("readstaledata", databaseId); + assertThat(out).contains("1 1 NULL"); + runSample("writetransaction", databaseId); + + System.out.println("Query marketing budget ..."); + out = runSample("querymarketingbudget", databaseId); + assertThat(out).contains("1 1 300000"); + assertThat(out).contains("2 2 300000"); + + System.out.println("Add index ..."); + runSample("addindex", databaseId); + + System.out.println("Query index ..."); + out = runSample("queryindex", databaseId); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).doesNotContain("Green"); + + System.out.println("Read index ..."); + out = runSample("readindex", databaseId); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).contains("Green"); + + System.out.println("Add Storing index ..."); + runSample("addstoringindex", databaseId); + out = runSample("readstoringindex", databaseId); + assertThat(out).contains("300000"); + + System.out.println("Read storing index ..."); + out = runSample("readonlytransaction", databaseId); + assertThat(out.replaceAll("[\r\n]+", " ")).containsMatch("(Total Junk.*){2}"); + + out = runSample("addcommittimestamp", databaseId); + assertThat(out).contains("Added LastUpdateTime as a commit timestamp column"); + + runSample("updatewithtimestamp", databaseId); + out = runSample("querywithtimestamp", databaseId); + assertThat(out).contains("1 1 1000000"); + assertThat(out).contains("2 2 750000"); + + out = runSample("createtablewithtimestamp", databaseId); + assertThat(out).contains("Created Performances table in database"); + + runSample("writewithtimestamp", databaseId); + out = runSample("queryperformancestable", databaseId); + assertThat(out).contains("1 4 2017-10-05 11000"); + assertThat(out).contains("1 19 2017-11-02 15000"); + assertThat(out).contains("2 42 2017-12-23 7000"); + + runSample("writestructdata", databaseId); + out = runSample("querywithstruct", databaseId); + assertThat(out).startsWith("6\n"); + + out = runSample("querywitharrayofstruct", databaseId); + assertThat(out).startsWith("8\n7\n6"); + + out = runSample("querystructfield", databaseId); + assertThat(out).startsWith("6\n"); + + out = runSample("querynestedstructfield", databaseId); + assertThat(out).contains("6 Imagination\n"); + assertThat(out).contains("9 Imagination\n"); + + runSample("insertusingdml", databaseId); + out = runSample("querysingerstable", databaseId); + assertThat(out).contains("Virginia Watson"); + + runSample("updateusingdml", databaseId); + out = runSample("querymarketingbudget", databaseId); + assertThat(out).contains("1 1 2000000"); + + runSample("deleteusingdml", databaseId); + out = runSample("querysingerstable", databaseId); + assertThat(out).doesNotContain("Alice Trentor"); + + out = runSample("updateusingdmlwithtimestamp", databaseId); + assertThat(out).contains("2 records updated"); + + out = runSample("writeandreadusingdml", databaseId); + assertThat(out).contains("Timothy Campbell"); + + runSample("updateusingdmlwithstruct", databaseId); + out = runSample("querysingerstable", databaseId); + assertThat(out).contains("Timothy Grant"); + + runSample("writeusingdml", databaseId); + out = runSample("querysingerstable", databaseId); + assertThat(out).contains("Melissa Garcia"); + assertThat(out).contains("Russell Morales"); + assertThat(out).contains("Jacqueline Long"); + assertThat(out).contains("Dylan Shaw"); + out = runSample("querywithparameter", databaseId); + assertThat(out).contains("12 Melissa Garcia"); + + runSample("writewithtransactionusingdml", databaseId); + out = runSample("querymarketingbudget", databaseId); + assertThat(out).contains("1 1 2200000"); + assertThat(out).contains("2 2 550000"); + + runSample("updateusingpartitioneddml", databaseId); + out = runSample("querymarketingbudget", databaseId); + assertThat(out).contains("1 1 2200000"); + assertThat(out).contains("2 2 100000"); + + runSample("deleteusingpartitioneddml", databaseId); + out = runSample("querysingerstable", databaseId); + assertThat(out).doesNotContain("Timothy Grant"); + assertThat(out).doesNotContain("Melissa Garcia"); + assertThat(out).doesNotContain("Russell Morales"); + assertThat(out).doesNotContain("Jacqueline Long"); + assertThat(out).doesNotContain("Dylan Shaw"); + + out = runSample("updateusingbatchdml", databaseId); + assertThat(out).contains("1 record updated by stmt 0"); + assertThat(out).contains("1 record updated by stmt 1"); + + out = runSample("createtablewithdatatypes", databaseId); + assertThat(out).contains("Created Venues table in database"); + + runSample("writedatatypesdata", databaseId); + out = runSample("querywitharray", databaseId); + assertThat(out).contains("19 Venue 19 2020-11-01"); + assertThat(out).contains("42 Venue 42 2020-10-01"); + + out = runSample("querywithbool", databaseId); + assertThat(out).contains("19 Venue 19 true"); + + out = runSample("querywithbytes", databaseId); + assertThat(out).contains("4 Venue 4"); + + out = runSample("querywithdate", databaseId); + assertThat(out).contains("4 Venue 4 2018-09-02"); + assertThat(out).contains("42 Venue 42 2018-10-01"); + + out = runSample("querywithfloat", databaseId); + assertThat(out).contains("4 Venue 4 0.8"); + assertThat(out).contains("19 Venue 19 0.9"); + + out = runSample("querywithint", databaseId); + assertThat(out).contains("19 Venue 19 6300"); + assertThat(out).contains("42 Venue 42 3000"); + + out = runSample("querywithstring", databaseId); + assertThat(out).contains("42 Venue 42"); + + out = runSample("querywithtimestampparameter", databaseId); + assertThat(out).contains("4 Venue 4"); + assertThat(out).contains("19 Venue 19"); + assertThat(out).contains("42 Venue 42"); + + out = runSample("querywithnumeric", databaseId); + assertThat(out).contains("19 Venue 19 1200100"); + assertThat(out).contains("42 Venue 42 390650.99"); + + out = runSample("clientwithqueryoptions", databaseId); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithqueryoptions", databaseId); + assertThat(out).contains("1 1 Total Junk"); + } + + @Test + public void testBackupSamples_withoutEncryption() { + String databaseId = idGenerator.generateDatabaseId(); + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + String restoreDatabaseId = idGenerator.generateDatabaseId(); + String backupId = idGenerator.generateBackupId(); + + try { + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + + System.out.println("Creating Database ..."); + String out = runSample("createdatabase", databaseId); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + BackupName backupName = BackupName.of(projectId, instanceId, backupId); + + System.out.println("Creating Backup ..."); + out = runSample("createbackup", databaseId, backupId); + assertThat(out).contains("Created backup [" + backupName.toString() + "]"); + + // TODO: remove try-catch when filtering on metadata fields works. + try { + System.out.println("List Backup Operations ..."); + out = runSample("listbackupoperations", databaseId, backupId); + assertThat(out) + .contains( + String.format("Backup %s on database %s pending:", backupName, dbId.getName())); + assertTrue( + "Out does not contain copy backup operations", out.contains("Copy Backup Operations")); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Cannot evaluate filter expression"); + } + + System.out.println("List Backup ..."); + out = runSample("listbackups", databaseId, backupId); + assertThat(out).contains("All backups:"); + assertThat(out) + .contains(String.format("All backups with backup name containing \"%s\":", backupId)); + assertThat(out) + .contains( + String.format( + "All backups for databases with a name containing \"%s\":", dbId.getDatabase())); + assertThat(out).contains(String.format("All backups that expire before")); + assertThat(out).contains("All backups with size greater than 100 bytes:"); + assertThat(out) + .containsMatch(Pattern.compile("All databases created after (.+) and that are ready:")); + assertThat(out).contains("All backups, listed using pagination:"); + // All the above tests should include the created backup exactly once, i.e. exactly 6 times. + assertThat(countOccurrences(out, backupName.toString())).isEqualTo(6); + + // Try the restore operation in a retry loop, as there is a limit on the number of restore + // operations that is allowed to execute simultaneously, and we should retry if we hit this + // limit. + boolean restored = false; + int restoreAttempts = 0; + while (true) { + try { + System.out.println("Restore Backup ..."); + out = runSample("restorebackup", restoreDatabaseId, backupId); + assertThat(out) + .contains( + "Restored database [" + + DatabaseName.of(projectId, instanceId, restoreDatabaseId).toString() + + "] from [" + + backupName + + "]"); + restored = true; + break; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage() + .contains("Please retry the operation once the pending restores complete")) { + restoreAttempts++; + if (restoreAttempts == 10) { + System.out.println( + "Restore operation failed 10 times because of other pending restores. " + + "Giving up restore."); + break; + } + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + } else { + throw e; + } + } + } + + if (restored) { + System.out.println("List Database Operations ..."); + out = runSample("listdatabaseoperations", restoreDatabaseId); + assertThat(out) + .contains( + String.format( + "Database %s restored from backup", + DatabaseId.of(dbId.getInstanceId(), restoreDatabaseId).getName())); + } + + System.out.println("Updating backup ..."); + out = runSample("updatebackup", databaseId, backupId); + assertThat(out).contains(String.format("Updated backup [" + backupId + "]")); + + // Drop the restored database before we try to delete the backup. + // Otherwise the delete backup operation might fail as the backup is still in use by + // the OptimizeRestoredDatabase operation. + databaseAdminClient.dropDatabase( + DatabaseName.of(projectId, dbId.getInstanceId().getInstance(), restoreDatabaseId)); + + System.out.println("Deleting Backup ..."); + out = runSample("deletebackup", databaseId, backupId); + assertThat(out).contains("Deleted backup [" + backupId + "]"); + + } catch (Exception ex) { + Assert.fail("Exception raised => " + ex.getCause()); + } + } + + @Test + public void testCancelBackupSamples() { + String databaseId = idGenerator.generateDatabaseId(); + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + + try { + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + + String out = runSample("createdatabase", databaseId); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + String backupId = idGenerator.generateBackupId(); + + out = runSample("cancelcreatebackup", databaseId, backupId); + assertThat(out).contains("Backup operation for [" + backupId + "_cancel] successfully"); + } catch (Exception ex) { + Assert.fail("Exception raised => " + ex.getCause()); + } + } + + @Test + public void testEncryptedDatabaseAndBackupSamples() throws Exception { + String projectId = spanner.getOptions().getProjectId(); + String databaseId = idGenerator.generateDatabaseId(); + String restoreId = idGenerator.generateDatabaseId(); + // Create a separate instance for this test to prevent multiple parallel backup operations on + // the same instance that need to wait for each other. + String instanceId = idGenerator.generateInstanceId(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + instanceAdminClient + .createInstance( + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setDisplayName("Encrypted test instance") + .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-" + keyLocation)) + .setNodeCount(1) + .build()) + .get(); + System.out.println("Creating database ..."); + try { + String out = + SampleRunner.runSample( + () -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out) + .contains( + String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + System.out.println("Creating backup with encryption key ..."); + out = + SampleRunner.runSampleWithRetry( + () -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + databaseId, + encryptedBackupId, + key), + new ShouldRetryBackupOperation()); + assertThat(out) + .containsMatch( + String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " + + "was created at (.*) using encryption key %s", + projectId, instanceId, encryptedBackupId, key)); + + System.out.println("Restoring backup with encryption key ..."); + out = + SampleRunner.runSampleWithRetry( + () -> + RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + encryptedBackupId, + restoreId, + key), + new ShouldRetryBackupOperation()); + assertThat(out) + .contains( + String.format( + "Database projects/%s/instances/%s/databases/%s" + + " restored to projects/%s/instances/%s/databases/%s" + + " from backup projects/%s/instances/%s/backups/%s" + + " using encryption key %s", + projectId, + instanceId, + databaseId, + projectId, + instanceId, + restoreId, + projectId, + instanceId, + encryptedBackupId, + key)); + } finally { + // Delete the backups from the test instance first, as the instance can only be deleted once + // all backups have been deleted. + System.out.println("Deleting backups ..."); + deleteAllBackups(instanceId); + instanceAdminClient.deleteInstance(instanceId); + } + } + + @Test + public void testDeleteBackups() { + try { + String projectId = spanner.getOptions().getProjectId(); + String databaseId = idGenerator.generateDatabaseId(); + String backupId = idGenerator.generateBackupId(); + + String out = + SampleRunner.runSample( + () -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out) + .contains( + String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + out = + SampleRunner.runSampleWithRetry( + () -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, projectId, instanceId, databaseId, backupId, key), + new ShouldRetryBackupOperation()); + assertThat(out) + .containsMatch( + String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " + + "was created at (.*) using encryption key %s", + projectId, instanceId, backupId, key)); + + out = runSample("deletebackup", databaseId, backupId); + assertThat(out).contains("Deleted backup [" + backupId + "]"); + } catch (Exception ex) { + Assert.fail("Exception raised => " + ex.getCause()); + } + } + + private static void deleteAllBackups(String instanceId) throws InterruptedException { + InstanceName instanceName = InstanceName.of(projectId, instanceId); + for (Backup backup : databaseAdminClient.listBackups(instanceName.toString()).iterateAll()) { + int attempts = 0; + System.out.printf("Deleting backup ... %s%n", backup.getName()); + while (attempts < 30) { + try { + attempts++; + databaseAdminClient.deleteBackup(backup.getName()); + break; + } catch (SpannerException | FailedPreconditionException e) { + ErrorCode errorCode = ErrorCode.FAILED_PRECONDITION; + + if (e instanceof SpannerException) { + errorCode = ((SpannerException) e).getErrorCode(); + } + if (errorCode == ErrorCode.FAILED_PRECONDITION + && e.getMessage() + .contains( + "Please try deleting the backup once the restore or post-restore optimize " + + "operations have completed on these databases.")) { + // Wait 30 seconds and then retry. + Thread.sleep(30_000L); + } else { + throw e; + } + } + } + } + } + + private String runSampleRunnable(Runnable sample) { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + sample.run(); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testCreateAndUpdateInstanceSample() { + String databaseId = idGenerator.generateDatabaseId(); + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + + String instanceId = formatForTest("sample-inst"); + String out = + runSampleRunnable( + () -> { + try { + CreateInstanceExample.createInstance(dbId.getInstanceId().getProject(), instanceId); + UpdateInstanceExample.updateInstance(dbId.getInstanceId().getProject(), instanceId); + } finally { + spanner.getInstanceAdminClient().deleteInstance(instanceId); + } + }); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully created", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully updated", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + } + + @Test + public void testCreateAndUpdateInstanceDefaultBackupScheduleTypeSample() { + String databaseId = idGenerator.generateDatabaseId(); + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + + String instanceId = formatForTest("sample-inst"); + String out = + runSampleRunnable( + () -> { + try { + CreateInstanceWithoutDefaultBackupSchedulesExample + .createInstanceWithoutDefaultBackupSchedules( + dbId.getInstanceId().getProject(), instanceId); + UpdateInstanceDefaultBackupScheduleTypeExample + .updateInstanceDefaultBackupScheduleType( + dbId.getInstanceId().getProject(), instanceId); + } finally { + spanner.getInstanceAdminClient().deleteInstance(instanceId); + } + }); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully created", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully updated", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + } + + private static int countOccurrences(String input, String search) { + return input.split(search).length - 1; + } + + private static String toComparableId(String baseId, String existingId) { + String zeroUuid = "00000000-0000-0000-0000-0000-00000000"; + int shouldBeLength = (baseId + "-" + zeroUuid).length(); + int missingLength = shouldBeLength - existingId.length(); + return existingId + zeroUuid.substring(zeroUuid.length() - missingLength); + } + + private static Pattern getTestDbIdPattern(String baseDbId) { + return Pattern.compile( + baseDbId + "-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{8}", + Pattern.CASE_INSENSITIVE); + } + + static String formatForTest(String name) { + return name + "-" + UUID.randomUUID().toString().substring(0, DBID_LENGTH); + } + + static class ShouldRetryBackupOperation implements Predicate { + + private static final int MAX_ATTEMPTS = 20; + private int attempts = 0; + + @Override + public boolean test(SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage().contains("Please retry the operation once the pending")) { + attempts++; + if (attempts == MAX_ATTEMPTS) { + // Throw custom exception so it is easier to locate in the log why it went wrong. + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, + String.format( + "Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", + attempts), + e); + } + // Wait one minute before retrying. + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + return true; + } + return false; + } + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerStandaloneExamplesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerStandaloneExamplesIT.java new file mode 100644 index 000000000000..0acbf198938d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/SpannerStandaloneExamplesIT.java @@ -0,0 +1,321 @@ +/* + * Copyright 2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SpannerSampleIT.formatForTest; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.example.spanner.CustomTimeoutAndRetrySettingsExample; +import com.example.spanner.QueryWithJsonParameterSample; +import com.example.spanner.QueryWithNumericParameterSample; +import com.example.spanner.StatementTimeoutExample; +import com.example.spanner.TransactionTimeoutExample; +import com.example.spanner.UpdateJsonDataSample; +import com.example.spanner.UpdateNumericDataSample; +import com.example.spanner.admin.archived.SpannerSampleIT; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Value; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.math.BigDecimal; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for Cloud Spanner cloud client examples. */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:AbbreviationAsWordInName") +public class SpannerStandaloneExamplesIT { + // The instance needs to exist for tests to pass. + private static String instanceId = System.getProperty("spanner.test.instance"); + private static String baseDatabaseId = System.getProperty("spanner.sample.database", "mysample"); + private static String databaseId = formatForTest(baseDatabaseId); + private static DatabaseId dbId; + private static DatabaseAdminClient dbClient; + private static Spanner spanner; + + private String runExample(Runnable example) { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + example.run(); + System.setOut(stdOut); + return bout.toString(); + } + + @BeforeClass + public static void createTestDatabase() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = spanner.getDatabaseAdminClient(); + if (instanceId == null) { + Iterator iterator = + spanner.getInstanceAdminClient().listInstances().iterateAll().iterator(); + if (iterator.hasNext()) { + instanceId = iterator.next().getId().getInstance(); + } + } + dbId = DatabaseId.of(options.getProjectId(), instanceId, databaseId); + dbClient + .createDatabase( + instanceId, + databaseId, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Venues (" + + "VenueId INT64 NOT NULL," + + "Revenue NUMERIC," + + "VenueDetails JSON" + + ") PRIMARY KEY (VenueId)")) + .get(); + } + + @AfterClass + public static void dropTestDatabase() throws Exception { + dbClient.dropDatabase(dbId.getInstanceId().getInstance(), dbId.getDatabase()); + spanner.close(); + } + + @Before + public void deleteTestData() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write(Collections.singleton(Mutation.delete("Singers", KeySet.all()))); + client.write(Collections.singleton(Mutation.delete("Venues", KeySet.all()))); + } + + @Test + public void executeSqlWithCustomTimeoutAndRetrySettings_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + CustomTimeoutAndRetrySettingsExample.executeSqlWithCustomTimeoutAndRetrySettings( + projectId, instanceId, databaseId)); + assertThat(out).contains("1 record inserted."); + } + + @Test + public void executeSqlWithTimeout_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + String out = runExample(() -> StatementTimeoutExample.executeSqlWithTimeout(client)); + assertThat(out).contains("1 record inserted."); + } + + @Test + public void testTransactionWithTimeout_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + TransactionTimeoutExample.executeTransactionWithTimeout( + projectId, instanceId, databaseId, 60L, TimeUnit.SECONDS)); + assertTrue(out, out.contains("1 record inserted")); + } + + @Test + public void testTransactionWithTimeout_shouldFailWithDeadlineExceeded() { + String projectId = spanner.getOptions().getProjectId(); + // Execute a transaction with a 5 millisecond timeout. The transaction executes both a read, a + // write, and a commit operation. Each of these would normally take at least 5 milliseconds. + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + runExample( + () -> + TransactionTimeoutExample.executeTransactionWithTimeout( + projectId, instanceId, databaseId, 5L, TimeUnit.MILLISECONDS))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + + @Test + public void addNumericColumn_shouldSuccessfullyAddColumn() + throws InterruptedException, ExecutionException { + OperationFuture operation = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues DROP COLUMN Revenue"), + null); + operation.get(); + String out = + runExample( + () -> { + try { + AddNumericColumnSample.addNumericColumn( + spanner.getOptions().getProjectId(), instanceId, databaseId); + } catch (ExecutionException e) { + System.out.printf( + "Adding column `Revenue` failed: %s%n", e.getCause().getMessage()); + } catch (InterruptedException e) { + System.out.printf("Adding column `Revenue` was interrupted%n"); + } + }); + assertThat(out).contains("Successfully added column `Revenue`"); + } + + @Test + public void updateNumericData_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + UpdateNumericDataSample.updateNumericData( + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)))); + assertThat(out).contains("Venues successfully updated"); + } + + @Test + public void queryWithNumericParameter_shouldReturnResults() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("Revenue") + .to(new BigDecimal("35000")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("Revenue") + .to(new BigDecimal("104500")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("Revenue") + .to(new BigDecimal("99999999999999999999999999999.99")) + .build())); + String out = + runExample(() -> QueryWithNumericParameterSample.queryWithNumericParameter(client)); + assertThat(out).contains("4 35000"); + } + + @Test + public void addJsonColumn_shouldSuccessfullyAddColumn() + throws InterruptedException, ExecutionException { + OperationFuture operation = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + instanceId, + databaseId, + ImmutableList.of("ALTER TABLE Venues DROP COLUMN VenueDetails"), + null); + operation.get(); + String out = + runExample( + () -> { + try { + AddJsonColumnSample.addJsonColumn( + spanner.getOptions().getProjectId(), instanceId, databaseId); + } catch (ExecutionException e) { + System.out.printf( + "Adding column `VenueDetails` failed: %s%n", e.getCause().getMessage()); + } catch (InterruptedException e) { + System.out.printf("Adding column `VenueDetails` was interrupted%n"); + } + }); + assertThat(out).contains("Successfully added column `VenueDetails`"); + } + + @Test + public void updateJsonData_shouldWriteData() { + String projectId = spanner.getOptions().getProjectId(); + String out = + runExample( + () -> + UpdateJsonDataSample.updateJsonData( + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)))); + assertThat(out).contains("Venues successfully updated"); + } + + @Test + public void queryWithJsonParameter_shouldReturnResults() { + String projectId = spanner.getOptions().getProjectId(); + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("VenueDetails") + .to( + Value.json( + "[{\"name\":\"room 1\",\"open\":true}," + + "{\"name\":\"room 2\",\"open\":false}]")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(19L) + .set("VenueDetails") + .to(Value.json("{\"rating\":9,\"open\":true}")) + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(42L) + .set("VenueDetails") + .to( + Value.json( + "{\"name\":null," + + "\"open\":{\"Monday\":true,\"Tuesday\":false}," + + "\"tags\":[\"large\",\"airy\"]}")) + .build())); + String out = runExample(() -> QueryWithJsonParameterSample.queryWithJsonParameter(client)); + assertThat(out).contains("VenueId: 19, VenueDetails: {\"open\":true,\"rating\":9}"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/TagSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/TagSampleIT.java new file mode 100644 index 000000000000..61733c1f0a49 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/TagSampleIT.java @@ -0,0 +1,118 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for {@link TagSample} + */ +@RunWith(JUnit4.class) +public class TagSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64," + + " AlbumTitle STRING(1024)" + + ") PRIMARY KEY (SingerId, AlbumId)", + "CREATE TABLE Venues (" + + " VenueId INT64 NOT NULL," + + " VenueName STRING(MAX)," + + " Capacity INT64," + + " OutdoorVenue BOOL," + + " LastUpdateTime TIMESTAMP OPTIONS (allow_commit_timestamp=true)" + + ") PRIMARY KEY (VenueId)")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .build(), + Mutation.newInsertOrUpdateBuilder("Venues") + .set("VenueId") + .to(4L) + .set("VenueName") + .to("name") + .set("Capacity") + .to(4000000) + .set("OutdoorVenue") + .to(false) + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Albums", KeySet.all()))); + client.write(Collections.singleton(Mutation.delete("Venues", KeySet.all()))); + } + + @Test + public void testSetRequestTag() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + + final String out = runSample(() -> TagSample.setRequestTag(client)); + assertTrue(out.contains("SingerId: 1, AlbumId: 1, AlbumTitle: title 1")); + } + + @Test + public void testSetTransactionTag() throws Exception { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + + final String out = runSample(() -> TagSample.setTransactionTag(client)); + assertTrue(out.contains("Venue capacities updated.")); + assertTrue(out.contains("New venue inserted.")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/UnnamedParametersIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UnnamedParametersIT.java new file mode 100644 index 000000000000..d6c900dd60e0 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UnnamedParametersIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseId; +import com.google.common.collect.ImmutableList; +import java.util.concurrent.ExecutionException; +import org.junit.Before; +import org.junit.Test; + +public class UnnamedParametersIT extends SampleTestBase { + private static String databaseId; + + @Before + public void setup() throws ExecutionException, InterruptedException { + databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .build(), + ImmutableList.of( + "CREATE TABLE Students (" + + " StudentId INT64 NOT NULL PRIMARY KEY," + + " Name STRING(1024) NOT NULL," + + " IsNRI BOOL NOT NULL," + + " AvgMarks FLOAT32 NOT NULL," + + " JoinedAt DATE NOT NULL," + + " PinCode INT64 NOT NULL," + + " CreatedAt TIMESTAMP NOT NULL" + + ")")) + .get(); + } + + @Test + public void testUnnamedParameters() throws Exception { + final String out = + SampleRunner.runSample( + () -> UnnamedParametersExample.executeQueryWithUnnamedParameters(projectId, instanceId, + databaseId)); + assertTrue(out.contains("Row is inserted.")); + assertTrue(out.contains("Google")); + assertTrue(out.contains("Row is fetched.")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java new file mode 100644 index 000000000000..ea299571f5d9 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class UpdateBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testUpdateBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + UpdateBackupScheduleSample.updateBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Updated backup schedule: %s", backupScheduleName)); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseSampleIT.java new file mode 100644 index 000000000000..b53414a2f5b6 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseSampleIT.java @@ -0,0 +1,70 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.common.collect.Lists; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class UpdateDatabaseSampleIT extends SampleTestBaseV2 { + + @Test + public void testUpdateDatabase() throws Exception { + // Create database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabaseAsync(getInstanceName(projectId, instanceId), + "CREATE DATABASE `" + databaseId + "`") + .get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> UpdateDatabaseSample.updateDatabase(projectId, instanceId, databaseId)); + assertTrue( + "Expected that database would have been updated. Output received was " + out, + out.contains(String.format( + "Updated database %s", DatabaseName.of(projectId, instanceId, databaseId)))); + + // Cleanup + final com.google.spanner.admin.database.v1.Database database = + com.google.spanner.admin.database.v1.Database.newBuilder() + .setName(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .setEnableDropProtection(false).build(); + final UpdateDatabaseRequest updateDatabaseRequest = + UpdateDatabaseRequest.newBuilder() + .setDatabase(database) + .setUpdateMask( + FieldMask.newBuilder().addAllPaths( + Lists.newArrayList("enable_drop_protection")).build()) + .build(); + + OperationFuture operation = + databaseAdminClient.updateDatabaseAsync(updateDatabaseRequest); + Database updatedDb = operation.get(5, TimeUnit.MINUTES); + assertFalse(updatedDb.getEnableDropProtection()); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSampleIT.java new file mode 100644 index 000000000000..2953e02935d7 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSampleIT.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static org.junit.Assert.assertTrue; + +import com.google.spanner.admin.database.v1.Database; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class UpdateDatabaseWithDefaultLeaderSampleIT extends SampleTestBaseV2 { + + @Test + public void testUpdateDatabaseWithDefaultLeader() throws Exception { + // Create database + final String databaseId = idGenerator.generateDatabaseId(); + final Database createdDatabase = databaseAdminClient + .createDatabaseAsync(getInstanceName(projectId, multiRegionalInstanceId), + "CREATE DATABASE `" + databaseId + "`") + .get(5, TimeUnit.MINUTES); + final String defaultLeader = createdDatabase.getDefaultLeader(); + + // Finds a possible new leader option + final String instanceConfigId = + instanceAdminClient.getInstance(getInstanceName(projectId, multiRegionalInstanceId)) + .getConfig(); + final InstanceConfig config = instanceAdminClient.getInstanceConfig(instanceConfigId); + final String newLeader = + config.getLeaderOptionsList().stream() + .filter(leader -> !leader.equals(defaultLeader)) + .findFirst().orElseThrow(() -> + new RuntimeException("Expected to find a leader option different than " + + defaultLeader) + ); + + // Runs sample + final String out = SampleRunner.runSample(() -> UpdateDatabaseWithDefaultLeaderSample + .updateDatabaseWithDefaultLeader(projectId, multiRegionalInstanceId, databaseId, newLeader) + ); + + assertTrue( + "Expected that database new leader would had been updated to " + newLeader + "." + + " Output received was " + out, + out.contains("Updated default leader to " + newLeader) + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..89c10cc460e5 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/AlterTableWithForeignKeyDeleteCascadeSampleIT.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class AlterTableWithForeignKeyDeleteCascadeSampleIT extends SampleTestBase { + + @Test + public void testAlterTableWithForeignKeyDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerId" + + " FOREIGN KEY (CustomerId)\n" + + " REFERENCES Customers (CustomerId)\n" + + " ) PRIMARY KEY (CartId)\n")) + .get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + AlterTableWithForeignKeyDeleteCascadeSample.alterForeignKeyDeleteCascadeConstraint( + databaseAdminClient, instanceId, databaseId)); + + assertTrue( + "Expected to have created database " + + databaseId + + " with tables containing " + + "foreign key constraints.", + out.contains("Altered ShoppingCarts table " + "with FKShoppingCartsCustomerName")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSampleIT.java new file mode 100644 index 000000000000..12b7c5044d4b --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithDefaultLeaderSampleIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import org.junit.Test; + +public class CreateDatabaseWithDefaultLeaderSampleIT extends SampleTestBase { + + @Test + public void testCreateDatabaseWithDefaultLeader() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + + // Finds possible default leader + final InstanceConfigId instanceConfigId = instanceAdminClient + .getInstance(multiRegionalInstanceId) + .getInstanceConfigId(); + final InstanceConfig config = instanceAdminClient + .getInstanceConfig(instanceConfigId.getInstanceConfig()); + assertTrue( + "Expected instance config " + instanceConfigId + " to have at least one leader option", + config.getLeaderOptions().size() > 0 + ); + final String defaultLeader = config.getLeaderOptions().get(0); + + // Runs sample + final String out = SampleRunner.runSample(() -> + CreateDatabaseWithDefaultLeaderSample.createDatabaseWithDefaultLeader( + projectId, + multiRegionalInstanceId, + databaseId, + defaultLeader + ) + ); + + assertTrue( + "Expected created database to have default leader " + defaultLeader + "." + + " Output received was " + out, + out.contains("Default leader: " + defaultLeader) + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSampleIT.java new file mode 100644 index 000000000000..448b1c10c445 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateDatabaseWithVersionRetentionPeriodSampleIT.java @@ -0,0 +1,48 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for {@link CreateDatabaseWithVersionRetentionPeriodSample} + */ +@RunWith(JUnit4.class) +public class CreateDatabaseWithVersionRetentionPeriodSampleIT extends SampleTestBase { + + @Test + public void createsDatabaseWithVersionRetentionPeriod() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final String versionRetentionPeriod = "7d"; + + final String out = SampleRunner.runSample(() -> CreateDatabaseWithVersionRetentionPeriodSample + .createDatabaseWithVersionRetentionPeriod( + databaseAdminClient, instanceId, databaseId, versionRetentionPeriod + )); + + assertThat(out).contains( + "Created database [projects/" + projectId + "/instances/" + instanceId + "/databases/" + + databaseId + "]"); + assertThat(out).contains("Version retention period: " + versionRetentionPeriod); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigSampleIT.java new file mode 100644 index 000000000000..041d0c52012a --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigSampleIT.java @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.InstanceId; +import java.util.UUID; +import org.junit.Test; + +public class CreateInstanceWithAutoscalingConfigSampleIT extends SampleTestBase { + + @Test + public void testCreateInstanceWithAutoscalingConfig() throws Exception { + String instanceId = String.format("autoscaler-%s", UUID.randomUUID()); + String out = + SampleRunner.runSample( + () -> { + try { + CreateInstanceWithAutoscalingConfigExample.createInstance(projectId, instanceId); + } finally { + spanner.getInstanceAdminClient().deleteInstance(instanceId); + } + }); + assertThat(out) + .contains(String.format("Autoscaler instance %s", InstanceId.of(projectId, instanceId))); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..a6cdee194739 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CreateTableWithForeignKeyDeleteCascadeSampleIT.java @@ -0,0 +1,54 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class CreateTableWithForeignKeyDeleteCascadeSampleIT extends SampleTestBase { + + @Test + public void testCreateTableWithForeignKeyDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase(instanceId, databaseId, Arrays.asList()) + .get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + CreateTableWithForeignKeyDeleteCascadeSample + .createForeignKeyDeleteCascadeConstraint( + databaseAdminClient, instanceId, databaseId)); + + assertTrue( + "Expected to have created database " + + databaseId + + " with tables containing " + + "foreign key constraints.", + out.contains( + "Created Customers and ShoppingCarts table " + "with FKShoppingCartsCustomerId")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CustomInstanceConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CustomInstanceConfigSampleIT.java new file mode 100644 index 000000000000..c8271225ccba --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/CustomInstanceConfigSampleIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import org.junit.Test; + +public class CustomInstanceConfigSampleIT extends SampleTestBase { + + @Test + public void testCustomInstanceConfigOperations() throws Exception { + String customInstanceConfigId = idGenerator.generateInstanceConfigId(); + + // Create a random instance config. Display name is set to the instance config id in sample. + final String out1 = + SampleRunner.runSample( + () -> + CreateInstanceConfigSample.createInstanceConfig( + projectId, instanceConfigName, customInstanceConfigId)); + assertTrue(out1.contains("Created instance configuration")); + + // List the instance config operations. + final String out2 = + SampleRunner.runSample( + () -> + ListInstanceConfigOperationsSample.listInstanceConfigOperations(projectId)); + assertTrue(out2.contains("Create instance config operation")); + + // Update display name to a randomly generated instance config id. + final String out3 = + SampleRunner.runSample( + () -> + UpdateInstanceConfigSample.updateInstanceConfig( + projectId, customInstanceConfigId)); + assertTrue(out3.contains("Updated instance configuration")); + + // Delete the created instance config. + final String out4 = + SampleRunner.runSample( + () -> + DeleteInstanceConfigSample.deleteInstanceConfig(projectId, customInstanceConfigId)); + assertTrue(out4.contains("Deleted instance configuration")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DatabaseRolesIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DatabaseRolesIT.java new file mode 100644 index 000000000000..f1bad568978d --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DatabaseRolesIT.java @@ -0,0 +1,125 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.junit.runners.MethodSorters; + +/** Integration tests for FGAC samples for GoogleStandardSql dialect. */ +@RunWith(JUnit4.class) +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class DatabaseRolesIT extends SampleTestBase { + + private static DatabaseId databaseId; + + @BeforeClass + public static void createTestDatabase() throws Exception { + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)", + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)," + + " MarketingBudget INT64" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Before + public void insertTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Melissa") + .set("LastName") + .to("Garcia") + .build(), + Mutation.newInsertOrUpdateBuilder("Albums") + .set("SingerId") + .to(1L) + .set("AlbumId") + .to(1L) + .set("AlbumTitle") + .to("title 1") + .set("MarketingBudget") + .to(20000L) + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testAddAndDropDatabaseRole() throws Exception { + final String out = + SampleRunner.runSample( + () -> + AddAndDropDatabaseRole.addAndDropDatabaseRole( + projectId, instanceId, databaseId.getDatabase(), "new_parent", "new_child")); + assertTrue(out.contains("Created roles new_parent and new_child and granted privileges")); + assertTrue(out.contains("Revoked privileges and dropped role new_child")); + } + + @Test + public void testListDatabaseRoles() throws Exception { + final String out = + SampleRunner.runSample( + () -> + ListDatabaseRoles.listDatabaseRoles( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("new_parent")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSampleIT.java new file mode 100644 index 000000000000..a4ce31c9d856 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/DropForeignKeyConstraintDeleteCascadeSampleIT.java @@ -0,0 +1,65 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class DropForeignKeyConstraintDeleteCascadeSampleIT extends SampleTestBase { + + @Test + public void testDropForeignKeyConstraintDeleteCascade() throws Exception { + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Customers (\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " ) PRIMARY KEY (CustomerId)", + "CREATE TABLE ShoppingCarts (\n" + + " CartId INT64 NOT NULL,\n" + + " CustomerId INT64 NOT NULL,\n" + + " CustomerName STRING(62) NOT NULL,\n" + + " CONSTRAINT FKShoppingCartsCustomerName" + + " FOREIGN KEY (CustomerName)\n" + + " REFERENCES Customers (CustomerName) ON DELETE CASCADE\n" + + " ) PRIMARY KEY (CartId)\n")) + .get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> + DropForeignKeyConstraintDeleteCascadeSample.deleteForeignKeyDeleteCascadeConstraint( + databaseAdminClient, instanceId, databaseId)); + + assertTrue( + "Expected to have dropped foreign-key constraints from tables in created database " + + databaseId, + out.contains("Altered ShoppingCarts table to drop FKShoppingCartsCustomerName")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/EncryptionKeyIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/EncryptionKeyIT.java new file mode 100644 index 000000000000..ffd0771fa0b6 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/EncryptionKeyIT.java @@ -0,0 +1,127 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.common.truth.Truth.assertThat; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Integration tests for: {@link CreateDatabaseWithEncryptionKey}, {@link + * CreateBackupWithEncryptionKey} and {@link RestoreBackupWithEncryptionKey} + */ +@RunWith(JUnit4.class) +@Ignore +public class EncryptionKeyIT extends SampleTestBase { + + private static String key; + + @BeforeClass + public static void setUp() { + String keyLocation = Preconditions + .checkNotNull(System.getProperty("spanner.test.key.location")); + String keyRing = Preconditions.checkNotNull(System.getProperty("spanner.test.key.ring")); + String keyName = Preconditions.checkNotNull(System.getProperty("spanner.test.key.name")); + key = "projects/" + projectId + "/locations/" + keyLocation + "/keyRings/" + keyRing + + "/cryptoKeys/" + keyName; + } + + @Test + public void testEncryptedDatabaseAndBackupAndRestore() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + final String backupId = idGenerator.generateBackupId(); + final String restoreId = idGenerator.generateDatabaseId(); + + String out = SampleRunner.runSample(() -> + CreateDatabaseWithEncryptionKey.createDatabaseWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + databaseId, + key + )); + assertThat(out).contains( + "Database projects/" + projectId + "/instances/" + instanceId + "/databases/" + databaseId + + " created with encryption key " + key); + + out = SampleRunner.runSampleWithRetry(() -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + databaseId, + backupId, + key + ), new ShouldRetryBackupOperation()); + assertThat(out).containsMatch( + "Backup projects/" + projectId + "/instances/" + instanceId + "/backups/" + backupId + + " of size \\d+ bytes was created at (.*) using encryption key " + key); + + out = SampleRunner.runSampleWithRetry(() -> + RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + backupId, + restoreId, + key + ), new ShouldRetryBackupOperation()); + assertThat(out).contains( + "Database projects/" + projectId + "/instances/" + instanceId + "/databases/" + databaseId + + " restored to projects/" + projectId + "/instances/" + instanceId + "/databases/" + + restoreId + " from backup projects/" + projectId + "/instances/" + instanceId + + "/backups/" + backupId + " using encryption key " + key); + } + + static class ShouldRetryBackupOperation implements Predicate { + + private static final int MAX_ATTEMPTS = 20; + private int attempts = 0; + + @Override + public boolean test(SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage().contains("Please retry the operation once the pending")) { + attempts++; + if (attempts == MAX_ATTEMPTS) { + // Throw custom exception so it is easier to locate in the log why it went wrong. + throw SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, + String.format("Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", attempts), + e); + } + // Wait one minute before retrying. + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + return true; + } + return false; + } + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetDatabaseDdlSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetDatabaseDdlSampleIT.java new file mode 100644 index 000000000000..404aceca7256 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetDatabaseDdlSampleIT.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class GetDatabaseDdlSampleIT extends SampleTestBase { + + @Test + public void testGetDatabaseDdl() throws Exception { + // Finds a possible new leader option + final InstanceConfigId instanceConfigId = instanceAdminClient + .getInstance(multiRegionalInstanceId) + .getInstanceConfigId(); + final InstanceConfig config = instanceAdminClient + .getInstanceConfig(instanceConfigId.getInstanceConfig()); + assertTrue( + "Expected instance config " + instanceConfigId + " to have at least one leader option", + config.getLeaderOptions().size() > 0 + ); + final String defaultLeader = config.getLeaderOptions().get(0); + + // Creates database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient.createDatabase( + multiRegionalInstanceId, + databaseId, + Arrays.asList( + "CREATE TABLE Singers (Id INT64 NOT NULL) PRIMARY KEY (Id)", + "ALTER DATABASE `" + + databaseId + + "` SET OPTIONS ( default_leader = '" + + defaultLeader + + "')" + ) + ).get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = SampleRunner.runSample(() -> GetDatabaseDdlSample + .getDatabaseDdl(projectId, multiRegionalInstanceId, databaseId) + ); + + assertTrue( + "Expected to have retrieved database DDL for " + databaseId + "." + + " Output received was " + out, + out.contains("Retrieved database DDL for " + databaseId) + ); + assertTrue( + "Expected leader to be set to " + defaultLeader + "." + + " Output received was " + out, + out.contains("default_leader = '" + defaultLeader + "'") + ); + assertTrue( + "Expected table to have been created in " + databaseId + "." + + " Output received was " + out, + out.contains("CREATE TABLE Singers") + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetInstanceConfigSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetInstanceConfigSampleIT.java new file mode 100644 index 000000000000..ac3598f549e8 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/GetInstanceConfigSampleIT.java @@ -0,0 +1,39 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import org.junit.Test; + +public class GetInstanceConfigSampleIT extends SampleTestBase { + + @Test + public void testGetInstanceConfig() throws Exception { + final String out = SampleRunner.runSample(() -> + GetInstanceConfigSample.getInstanceConfig(projectId, instanceConfigName) + ); + + assertTrue( + "Expected instance config " + instanceConfigName + " to contain at least one leader option." + + " Output received was " + out, + out.matches("(?s:.*\\[.+\\].*)") + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/ListInstanceConfigsSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/ListInstanceConfigsSampleIT.java new file mode 100644 index 000000000000..8dcee20313ad --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/ListInstanceConfigsSampleIT.java @@ -0,0 +1,39 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import org.junit.Test; + +public class ListInstanceConfigsSampleIT extends SampleTestBase { + + @Test + public void testListInstanceConfigs() throws Exception { + final String out = SampleRunner.runSample(() -> + ListInstanceConfigsSample.listInstanceConfigs(projectId) + ); + + assertTrue( + "Expected instance config " + instanceConfigName + " to contain at least one leader option." + + " Output received was " + out, + out.matches("(?s:.*nam6: \\[.+\\].*)") + ); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgCaseSensitivitySampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgCaseSensitivitySampleIT.java new file mode 100644 index 000000000000..100014643d10 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgCaseSensitivitySampleIT.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import java.util.Collections; +import org.junit.Test; + +public class PgCaseSensitivitySampleIT extends SampleTestBase { + + @Test + public void testPgCaseSensitivitySample() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(); + + final String out = + SampleRunner.runSample( + () -> PgCaseSensitivitySample.pgCaseSensitivity(projectId, instanceId, databaseId)); + assertTrue(out, out.contains("SingerId: 1, FirstName: Bruce, LastName: Allison")); + assertTrue(out, out.contains("SingerId: 1, FullName: Bruce Allison")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgInterleavedTableSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgInterleavedTableSampleIT.java new file mode 100644 index 000000000000..b7d229a1b8fe --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgInterleavedTableSampleIT.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import java.util.Collections; +import org.junit.Test; + +public class PgInterleavedTableSampleIT extends SampleTestBase { + + @Test + public void testPgInterleavedTableSample() throws Exception { + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseId)) + .setDialect(Dialect.POSTGRESQL) + .build(), + Collections.emptyList()) + .get(); + + final String out = + SampleRunner.runSample( + () -> PgInterleavedTableSample.pgInterleavedTable(projectId, instanceId, databaseId)); + assertTrue(out.contains("Created interleaved table hierarchy using PostgreSQL dialect")); + } +} \ No newline at end of file diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgSpannerSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgSpannerSampleIT.java new file mode 100644 index 000000000000..c4ba80f3f804 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/PgSpannerSampleIT.java @@ -0,0 +1,304 @@ +/* + * Copyright 2022 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Unit tests for {@code PgSpannerSample} + */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class PgSpannerSampleIT { + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + private static final String databaseId = formatForTest(baseDbId); + private static final String encryptedDatabaseId = formatForTest(baseDbId); + private static final String encryptedBackupId = formatForTest(baseDbId); + private static final String encryptedRestoreId = formatForTest(baseDbId); + static Spanner spanner; + static DatabaseId dbId; + static DatabaseAdminClient dbClient; + + @BeforeClass + public static void setUp() { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = spanner.getDatabaseAdminClient(); + dbId = DatabaseId.of(options.getProjectId(), instanceId, databaseId); + // Delete stale test databases that have been created earlier by this test, but not deleted. + deleteStaleTestDatabases(); + } + + static void deleteStaleTestDatabases() { + Timestamp now = Timestamp.now(); + Pattern samplePattern = getTestDbIdPattern(PgSpannerSampleIT.baseDbId); + Pattern restoredPattern = getTestDbIdPattern("restored"); + for (Database db : dbClient.listDatabases(PgSpannerSampleIT.instanceId).iterateAll()) { + if (TimeUnit.HOURS.convert(now.getSeconds() - db.getCreateTime().getSeconds(), + TimeUnit.SECONDS) > 24) { + if (db.getId().getDatabase().length() >= DBID_LENGTH) { + if (samplePattern.matcher(toComparableId(PgSpannerSampleIT.baseDbId, + db.getId().getDatabase())).matches()) { + db.drop(); + } + if (restoredPattern.matcher(toComparableId("restored", db.getId().getDatabase())) + .matches()) { + db.drop(); + } + } + } + } + } + + @AfterClass + public static void tearDown() { + dbClient.dropDatabase(dbId.getInstanceId().getInstance(), dbId.getDatabase()); + dbClient.dropDatabase( + dbId.getInstanceId().getInstance(), SpannerSample.createRestoredSampleDbId(dbId)); + dbClient.dropDatabase(instanceId, encryptedDatabaseId); + dbClient.dropDatabase(instanceId, encryptedRestoreId); + dbClient.deleteBackup(instanceId, encryptedBackupId); + spanner.close(); + } + + private static String toComparableId(String baseId, String existingId) { + String zeroUuid = "00000000-0000-0000-0000-0000-00000000"; + int shouldBeLength = (baseId + "-" + zeroUuid).length(); + int missingLength = shouldBeLength - existingId.length(); + return existingId + zeroUuid.substring(zeroUuid.length() - missingLength); + } + + private static Pattern getTestDbIdPattern(String baseDbId) { + return Pattern.compile( + baseDbId + "-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{8}", + Pattern.CASE_INSENSITIVE); + } + + static String formatForTest(String name) { + return name + "-" + UUID.randomUUID().toString().substring(0, DBID_LENGTH); + } + + private String runSample(String command) { + final PrintStream stdOut = System.out; + final ByteArrayOutputStream bout = new ByteArrayOutputStream(); + final PrintStream out = new PrintStream(bout); + System.setOut(out); + System.out.println(instanceId + ":" + databaseId); + PgSpannerSample.main(new String[]{command, instanceId, databaseId}); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testSample() throws Exception { + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + + System.out.println("Create Database ..."); + String out = runSample("createdatabase"); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + System.out.println("Create sample tables Singers and Albums ..."); + runSample("createtableusingddl"); + + System.out.println("Write data to sample tables ..."); + runSample("write"); + + System.out.println("Read data from sample tables ..."); + out = runSample("read"); + assertThat(out).contains("1 1 Total Junk"); + + System.out.println("Write data using DML to sample table ..."); + runSample("writeusingdml"); + System.out.println("Query Singers table ..."); + out = runSample("querysingerstable"); + assertThat(out).contains("Melissa Garcia"); + out = runSample("query"); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithparameter"); + assertThat(out).contains("12 Melissa Garcia"); + + System.out.println("Add column marketing budget ..."); + runSample("addmarketingbudget"); + + // wait for 15 seconds to elapse and then run an update, and query for stale data + long lastUpdateDataTimeInMillis = System.currentTimeMillis(); + while (System.currentTimeMillis() < lastUpdateDataTimeInMillis + 16000) { + Thread.sleep(1000); + } + System.out.println("Write data to marketing budget ..."); + runSample("update"); + + System.out.println("Query marketing budget ..."); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 100000"); + assertThat(out).contains("2 2 500000"); + + System.out.println("Write with transaction using dml..."); + runSample("writewithtransactionusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 300000"); + assertThat(out).contains("1 1 300000"); + + System.out.println("Add index ..."); + runSample("addindex"); + + System.out.println("Read index ..."); + out = runSample("readindex"); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).contains("Green"); + + System.out.println("Add Storing index ..."); + runSample("addstoringindex"); + + System.out.println("Read storing index ..."); + out = runSample("readstoringindex"); + assertThat(out).contains("300000"); + + System.out.println("Read only transaction ..."); + out = runSample("readonlytransaction"); + assertThat(out.replaceAll("[\r\n]+", " ")) + .containsMatch("(Total Junk.*){2}"); + + System.out.println("Add Timestamp column ..."); + out = runSample("addlastupdatetimestampcolumn"); + assertThat(out).contains("Added LastUpdateTime as a timestamp column"); + + System.out.println("Update values in Timestamp column ..."); + runSample("updatewithtimestamp"); + out = runSample("querywithtimestamp"); + assertThat(out).contains("1 1 1000000"); + assertThat(out).contains("2 2 750000"); + + System.out.println("Create table with Timestamp column ..."); + out = runSample("createtablewithtimestamp"); + assertThat(out).contains("Created Performances table in database"); + + System.out.println("Write with Timestamp ..."); + runSample("writewithtimestamp"); + out = runSample("queryperformancestable"); + assertThat(out).contains("1 4 11000"); + assertThat(out).contains("1 19 15000"); + assertThat(out).contains("2 42 7000"); + + System.out.println("Write using DML ..."); + runSample("insertusingdml"); + out = runSample("querysingerstable"); + assertThat(out).contains("Virginia Watson"); + + System.out.println("Update using DML ..."); + runSample("updateusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 2000000"); + + System.out.println("Delete using DML ..."); + runSample("deleteusingdml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Alice Trentor"); + + System.out.println("Write and Read using DML ..."); + out = runSample("writeandreadusingdml"); + assertThat(out).contains("Timothy Campbell"); + + System.out.println("Update using partitioned DML ..."); + runSample("updateusingpartitioneddml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("2 2 100000"); + assertThat(out).contains("1 1 2000000"); + + System.out.println("Delete using Partitioned DML ..."); + runSample("deleteusingpartitioneddml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Timothy Grant"); + assertThat(out).doesNotContain("Melissa Garcia"); + assertThat(out).doesNotContain("Russell Morales"); + assertThat(out).doesNotContain("Jacqueline Long"); + assertThat(out).doesNotContain("Dylan Shaw"); + + System.out.println("Update in Batch using DML ..."); + out = runSample("updateusingbatchdml"); + assertThat(out).contains("1 record updated by stmt 0"); + assertThat(out).contains("1 record updated by stmt 1"); + + System.out.println("Create table with data types ..."); + out = runSample("createtablewithdatatypes"); + assertThat(out).contains("Created Venues table in database"); + + System.out.println("Write into table and Query Boolean Type ..."); + runSample("writedatatypesdata"); + out = runSample("querywithbool"); + assertThat(out).contains("19 Venue 19 true"); + + System.out.println("Query with Bytes ..."); + out = runSample("querywithbytes"); + assertThat(out).contains("4 Venue 4"); + + System.out.println("Query with Float ..."); + out = runSample("querywithfloat"); + assertThat(out).contains("4 Venue 4 0.8"); + assertThat(out).contains("19 Venue 19 0.9"); + + System.out.println("Query with Int ..."); + out = runSample("querywithint"); + assertThat(out).contains("19 Venue 19 6300"); + assertThat(out).contains("42 Venue 42 3000"); + + System.out.println("Query with String ..."); + out = runSample("querywithstring"); + assertThat(out).contains("42 Venue 42"); + + System.out.println("Query with Timestamp parameter ..."); + out = runSample("querywithtimestampparameter"); + assertThat(out).contains("4 Venue 4"); + assertThat(out).contains("19 Venue 19"); + assertThat(out).contains("42 Venue 42"); + + System.out.println("Query with Numeric Type ..."); + out = runSample("querywithnumeric"); + assertThat(out).contains("19 Venue 19 1200100"); + assertThat(out).contains("42 Venue 42 390650.99"); + + System.out.println("Query options ..."); + out = runSample("clientwithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SequenceSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SequenceSampleIT.java new file mode 100644 index 000000000000..e1e527042a93 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SequenceSampleIT.java @@ -0,0 +1,145 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleTestBase; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.common.collect.ImmutableList; +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Integration tests for Bit reversed sequence samples for GoogleStandardSql and PostgreSql + * dialects. + */ +@RunWith(Parameterized.class) +public class SequenceSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + + /** + * Set of dialects for which database has already been created in this test suite. This helps in + * limiting the number of databases created per dialect to one. + */ + private static final HashSet dbInitializedDialects = new HashSet<>(); + + @Parameters(name = "dialect = {0}") + public static Iterable data() { + return ImmutableList.of(Dialect.GOOGLE_STANDARD_SQL, Dialect.POSTGRESQL); + } + + @Parameter(0) + public static Dialect dialect; + + @Before + public void createTestDatabase() throws Exception { + // Limits number of created databases to one per dialect. + if (dbInitializedDialects.contains(dialect)) { + return; + } + dbInitializedDialects.add(dialect); + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + databaseAdminClient + .newDatabaseBuilder(DatabaseId.of(projectId, instanceId, database)) + .setDialect(dialect) + .build(), + Collections.emptyList()) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + } + + @Test + public void createSequence() throws Exception { + String out; + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + CreateSequenceSample.createSequence( + projectId, instanceId, databaseId.getDatabase())); + } else { + out = + runSample( + () -> + PgCreateSequenceSample.pgCreateSequence( + projectId, instanceId, databaseId.getDatabase())); + } + assertTrue( + out.contains( + "Created Seq sequence and Customers table, where its key column " + + "CustomerId uses the sequence as a default value")); + assertEquals(out.split("Inserted customer record with CustomerId", -1).length - 1, 3); + assertTrue(out.contains("Number of customer records inserted is: 3")); + } + + @Test + public void alterSequence() throws Exception { + String out; + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + AlterSequenceSample.alterSequence( + projectId, instanceId, databaseId.getDatabase())); + } else { + out = + runSample( + () -> + PgAlterSequenceSample.pgAlterSequence( + projectId, instanceId, databaseId.getDatabase())); + } + assertTrue( + out.contains("Altered Seq sequence to skip an inclusive range between 1000 and 5000000")); + assertEquals(out.split("Inserted customer record with CustomerId", -1).length - 1, 3); + assertTrue(out.contains("Number of customer records inserted is: 3")); + } + + @Test + public void dropSequence() throws Exception { + String out; + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + out = + runSample( + () -> + DropSequenceSample.dropSequence(projectId, instanceId, databaseId.getDatabase())); + } else { + out = + runSample( + () -> + PgDropSequenceSample.pgDropSequence( + projectId, instanceId, databaseId.getDatabase())); + } + assertTrue( + out.contains( + "Altered Customers table to drop DEFAULT from " + + "CustomerId column and dropped the Seq sequence")); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SpannerSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SpannerSampleIT.java new file mode 100644 index 000000000000..66410cce4d3b --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/SpannerSampleIT.java @@ -0,0 +1,593 @@ +/* + * Copyright 2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Backup; +import com.google.cloud.spanner.BackupId; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Instance; +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.Uninterruptibles; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.LocalDate; +import org.threeten.bp.temporal.ChronoField; + +/** Unit tests for {@code SpannerSample} */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class SpannerSampleIT { + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + private static final String keyLocation = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.location")); + private static final String keyRing = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.ring")); + private static final String keyName = + Preconditions.checkNotNull(System.getProperty("spanner.test.key.name")); + private static final String databaseId = formatForTest(baseDbId); + private static final String encryptedDatabaseId = formatForTest(baseDbId); + private static final String encryptedBackupId = formatForTest(baseDbId); + private static final String encryptedRestoreId = formatForTest(baseDbId); + private static final long STALE_INSTANCE_THRESHOLD_SECS = + TimeUnit.SECONDS.convert(24L, TimeUnit.HOURS); + static Spanner spanner; + static DatabaseId dbId; + static DatabaseAdminClient dbClient; + private static String key; + private long lastUpdateDataTimeInMillis; + + private String runSample(String command) throws Exception { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + SpannerSample.main(new String[] {command, instanceId, databaseId}); + System.setOut(stdOut); + return bout.toString(); + } + + @BeforeClass + public static void setUp() throws Exception { + SpannerOptions options = + SpannerOptions.newBuilder().setAutoThrottleAdministrativeRequests().build(); + spanner = options.getService(); + dbClient = spanner.getDatabaseAdminClient(); + dbId = DatabaseId.of(options.getProjectId(), instanceId, databaseId); + // Delete stale test databases that have been created earlier by this test, but not deleted. + deleteStaleTestDatabases(instanceId, baseDbId); + key = + String.format( + "projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", + options.getProjectId(), keyLocation, keyRing, keyName); + + /* + * Delete stale instances that have been created earlier by this test but not deleted. + * Backups needed to be deleted from the instance first, as the instance can only be + * deleted once all backups have been deleted. + * */ + deleteStaleEncryptedTestInstances(); + } + + /** + * Deleting all the test instances with name starting with 'encrypted-test-' and were created + * before 24 hours. + * + * @throws InterruptedException If Thread.sleep() interrupted + */ + private static void deleteStaleEncryptedTestInstances() throws InterruptedException { + Timestamp now = Timestamp.now(); + + for (Instance instance : + spanner + .getInstanceAdminClient() + .listInstances(Options.filter("name:encrypted-test-")) + .iterateAll()) { + if ((now.getSeconds() - instance.getCreateTime().getSeconds()) + > STALE_INSTANCE_THRESHOLD_SECS) { + deleteAllBackups(instance.getId().getInstance()); + instance.delete(); + } + } + } + + static void deleteStaleTestDatabases(String instanceId, String baseDbId) { + Timestamp now = Timestamp.now(); + Pattern samplePattern = getTestDbIdPattern(baseDbId); + Pattern restoredPattern = getTestDbIdPattern("restored"); + for (Database db : dbClient.listDatabases(instanceId).iterateAll()) { + if (TimeUnit.HOURS.convert(now.getSeconds() - db.getCreateTime().getSeconds(), + TimeUnit.SECONDS) > 24) { + if (db.getId().getDatabase().length() >= DBID_LENGTH) { + if (samplePattern.matcher(toComparableId(baseDbId, db.getId().getDatabase())).matches()) { + db.drop(); + } + if (restoredPattern.matcher(toComparableId("restored", db.getId().getDatabase())) + .matches()) { + db.drop(); + } + } + } + } + } + + @AfterClass + public static void tearDown() throws Exception { + dbClient.dropDatabase(dbId.getInstanceId().getInstance(), dbId.getDatabase()); + dbClient.dropDatabase( + dbId.getInstanceId().getInstance(), SpannerSample.createRestoredSampleDbId(dbId)); + dbClient.dropDatabase(instanceId, encryptedDatabaseId); + dbClient.dropDatabase(instanceId, encryptedRestoreId); + dbClient.deleteBackup(instanceId, encryptedBackupId); + spanner.close(); + } + + @Test + public void testSample() throws Exception { + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + String out = runSample("createdatabase"); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + runSample("write"); + + out = runSample("delete"); + assertThat(out).contains("Records deleted."); + + runSample("write"); + + out = runSample("read"); + assertThat(out).contains("1 1 Total Junk"); + + out = runSample("query"); + assertThat(out).contains("1 1 Total Junk"); + runSample("addmarketingbudget"); + + // wait for 15 seconds to elapse and then run an update, and query for stale data + lastUpdateDataTimeInMillis = System.currentTimeMillis(); + while (System.currentTimeMillis() < lastUpdateDataTimeInMillis + 16000) { + Thread.sleep(1000); + } + runSample("update"); + out = runSample("readstaledata"); + assertThat(out).contains("1 1 NULL"); + runSample("writetransaction"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 300000"); + assertThat(out).contains("2 2 300000"); + + runSample("addindex"); + out = runSample("queryindex"); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).doesNotContain("Green"); + + out = runSample("readindex"); + assertThat(out).contains("Go, Go, Go"); + assertThat(out).contains("Forever Hold Your Peace"); + assertThat(out).contains("Green"); + + runSample("addstoringindex"); + out = runSample("readstoringindex"); + assertThat(out).contains("300000"); + + out = runSample("readonlytransaction"); + assertThat(out.replaceAll("[\r\n]+", " ")).containsMatch("(Total Junk.*){2}"); + + out = runSample("addcommittimestamp"); + assertThat(out).contains("Added LastUpdateTime as a commit timestamp column"); + + runSample("updatewithtimestamp"); + out = runSample("querywithtimestamp"); + assertThat(out).contains("1 1 1000000"); + assertThat(out).contains("2 2 750000"); + + out = runSample("createtablewithtimestamp"); + assertThat(out).contains("Created Performances table in database"); + + runSample("writewithtimestamp"); + out = runSample("queryperformancestable"); + assertThat(out).contains("1 4 2017-10-05 11000"); + assertThat(out).contains("1 19 2017-11-02 15000"); + assertThat(out).contains("2 42 2017-12-23 7000"); + + runSample("writestructdata"); + out = runSample("querywithstruct"); + assertThat(out).startsWith("6\n"); + + out = runSample("querywitharrayofstruct"); + assertThat(out).startsWith("8\n7\n6"); + + out = runSample("querystructfield"); + assertThat(out).startsWith("6\n"); + + out = runSample("querynestedstructfield"); + assertThat(out).contains("6 Imagination\n"); + assertThat(out).contains("9 Imagination\n"); + + runSample("insertusingdml"); + out = runSample("querysingerstable"); + assertThat(out).contains("Virginia Watson"); + + runSample("updateusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 2000000"); + + runSample("deleteusingdml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Alice Trentor"); + + out = runSample("updateusingdmlwithtimestamp"); + assertThat(out).contains("2 records updated"); + + out = runSample("writeandreadusingdml"); + assertThat(out).contains("Timothy Campbell"); + + runSample("updateusingdmlwithstruct"); + out = runSample("querysingerstable"); + assertThat(out).contains("Timothy Grant"); + + runSample("writeusingdml"); + out = runSample("querysingerstable"); + assertThat(out).contains("Melissa Garcia"); + assertThat(out).contains("Russell Morales"); + assertThat(out).contains("Jacqueline Long"); + assertThat(out).contains("Dylan Shaw"); + out = runSample("querywithparameter"); + assertThat(out).contains("12 Melissa Garcia"); + + runSample("writewithtransactionusingdml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 2200000"); + assertThat(out).contains("2 2 550000"); + + runSample("updateusingpartitioneddml"); + out = runSample("querymarketingbudget"); + assertThat(out).contains("1 1 2200000"); + assertThat(out).contains("2 2 100000"); + + runSample("deleteusingpartitioneddml"); + out = runSample("querysingerstable"); + assertThat(out).doesNotContain("Timothy Grant"); + assertThat(out).doesNotContain("Melissa Garcia"); + assertThat(out).doesNotContain("Russell Morales"); + assertThat(out).doesNotContain("Jacqueline Long"); + assertThat(out).doesNotContain("Dylan Shaw"); + + out = runSample("updateusingbatchdml"); + assertThat(out).contains("1 record updated by stmt 0"); + assertThat(out).contains("1 record updated by stmt 1"); + + out = runSample("createtablewithdatatypes"); + assertThat(out).contains("Created Venues table in database"); + + runSample("writedatatypesdata"); + out = runSample("querywitharray"); + assertThat(out).contains("19 Venue 19 2020-11-01"); + assertThat(out).contains("42 Venue 42 2020-10-01"); + + out = runSample("querywithbool"); + assertThat(out).contains("19 Venue 19 true"); + + out = runSample("querywithbytes"); + assertThat(out).contains("4 Venue 4"); + + out = runSample("querywithdate"); + assertThat(out).contains("4 Venue 4 2018-09-02"); + assertThat(out).contains("42 Venue 42 2018-10-01"); + + out = runSample("querywithfloat"); + assertThat(out).contains("4 Venue 4 0.8"); + assertThat(out).contains("19 Venue 19 0.9"); + + out = runSample("querywithint"); + assertThat(out).contains("19 Venue 19 6300"); + assertThat(out).contains("42 Venue 42 3000"); + + out = runSample("querywithstring"); + assertThat(out).contains("42 Venue 42"); + + out = runSample("querywithtimestampparameter"); + assertThat(out).contains("4 Venue 4"); + assertThat(out).contains("19 Venue 19"); + assertThat(out).contains("42 Venue 42"); + + out = runSample("querywithnumeric"); + assertThat(out).contains("19 Venue 19 1200100"); + assertThat(out).contains("42 Venue 42 390650.99"); + + out = runSample("clientwithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + out = runSample("querywithqueryoptions"); + assertThat(out).contains("1 1 Total Junk"); + + String backupName = + String.format( + "%s_%02d", + dbId.getDatabase(), LocalDate.now().get(ChronoField.ALIGNED_WEEK_OF_YEAR)); + BackupId backupId = BackupId.of(dbId.getInstanceId(), backupName); + + out = runSample("createbackup"); + assertThat(out).contains("Created backup [" + backupId + "]"); + + out = runSample("cancelcreatebackup"); + assertThat(out).contains( + "Backup operation for [" + backupId + "_cancel] successfully"); + + // TODO: remove try-catch when filtering on metadata fields works. + try { + out = runSample("listbackupoperations"); + assertThat(out).contains( + String.format( + "Backup %s on database %s pending:", + backupId.getName(), + dbId.getName())); + assertTrue("Out does not contain copy backup operations", out.contains( + "Copy Backup Operations")); + } catch (SpannerException e) { + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getMessage()).contains("Cannot evaluate filter expression"); + } + + out = runSample("listbackups"); + assertThat(out).contains("All backups:"); + assertThat(out).contains( + String.format("All backups with backup name containing \"%s\":", backupId.getBackup())); + assertThat(out).contains(String.format( + "All backups for databases with a name containing \"%s\":", + dbId.getDatabase())); + assertThat(out).contains( + String.format("All backups that expire before")); + assertThat(out).contains("All backups with size greater than 100 bytes:"); + assertThat(out).containsMatch( + Pattern.compile("All databases created after (.+) and that are ready:")); + assertThat(out).contains("All backups, listed using pagination:"); + // All the above tests should include the created backup exactly once, i.e. exactly 7 times. + assertThat(countOccurrences(out, backupId.getName())).isEqualTo(7); + + // Try the restore operation in a retry loop, as there is a limit on the number of restore + // operations that is allowed to execute simultaneously, and we should retry if we hit this + // limit. + boolean restored = false; + int restoreAttempts = 0; + while (true) { + try { + out = runSample("restorebackup"); + assertThat(out).contains( + "Restored database [" + + dbId.getName() + + "] from [" + + backupId.getName() + + "]"); + restored = true; + break; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage() + .contains("Please retry the operation once the pending restores complete")) { + restoreAttempts++; + if (restoreAttempts == 10) { + System.out.println( + "Restore operation failed 10 times because of other pending restores. " + + "Giving up restore."); + break; + } + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + } else { + throw e; + } + } + } + + if (restored) { + out = runSample("listdatabaseoperations"); + assertThat(out).contains( + String.format( + "Database %s restored from backup", + DatabaseId.of( + dbId.getInstanceId(), + SpannerSample.createRestoredSampleDbId(dbId)) + .getName())); + } + + out = runSample("updatebackup"); + assertThat(out).contains( + String.format("Updated backup [" + backupId + "]")); + + // Drop the restored database before we try to delete the backup. + // Otherwise the delete backup operation might fail as the backup is still in use by + // the OptimizeRestoredDatabase operation. + dbClient.dropDatabase( + dbId.getInstanceId().getInstance(), SpannerSample.createRestoredSampleDbId(dbId)); + + out = runSample("deletebackup"); + assertThat(out).contains("Deleted backup [" + backupId + "]"); + } + + @Test + public void testEncryptedDatabaseAndBackupSamples() throws Exception { + String projectId = spanner.getOptions().getProjectId(); + // Create a separate instance for this test to prevent multiple parallel backup operations on + // the same instance that need to wait for each other. + String instanceId = String.format("encrypted-test-%s", UUID.randomUUID()); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + instanceAdminClient + .createInstance(InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setDisplayName("Encrypted test instance") + .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-" + keyLocation)) + .setNodeCount(1).build()) + .get(); + try { + String out = SampleRunner + .runSample(() -> CreateDatabaseWithEncryptionKey.createDatabaseWithEncryptionKey(dbClient, + projectId, instanceId, encryptedDatabaseId, key)); + assertThat(out).contains(String.format( + "Database projects/%s/instances/%s/databases/%s created with encryption key %s", + projectId, instanceId, encryptedDatabaseId, key)); + + out = SampleRunner.runSampleWithRetry( + () -> CreateBackupWithEncryptionKey.createBackupWithEncryptionKey(dbClient, projectId, + instanceId, encryptedDatabaseId, encryptedBackupId, key), + new ShouldRetryBackupOperation()); + assertThat(out).containsMatch(String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " + + "was created at (.*) using encryption key %s", + projectId, instanceId, encryptedBackupId, key)); + + out = SampleRunner.runSampleWithRetry( + () -> RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey(dbClient, projectId, + instanceId, encryptedBackupId, encryptedRestoreId, key), + new ShouldRetryBackupOperation()); + assertThat(out).contains(String.format( + "Database projects/%s/instances/%s/databases/%s" + + " restored to projects/%s/instances/%s/databases/%s" + + " from backup projects/%s/instances/%s/backups/%s" + " using encryption key %s", + projectId, instanceId, encryptedDatabaseId, projectId, instanceId, encryptedRestoreId, + projectId, instanceId, encryptedBackupId, key)); + } finally { + // Delete the backups from the test instance first, as the instance can only be deleted once + // all backups have been deleted. + deleteAllBackups(instanceId); + instanceAdminClient.deleteInstance(instanceId); + } + } + + private static void deleteAllBackups(String instanceId) throws InterruptedException { + for (Backup backup : dbClient.listBackups(instanceId).iterateAll()) { + int attempts = 0; + while (attempts < 30) { + try { + attempts++; + backup.delete(); + break; + } catch (SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION && e.getMessage() + .contains("Please try deleting the backup once the restore or post-restore optimize " + + "operations have completed on these databases.")) { + // Wait 30 seconds and then retry. + Thread.sleep(30_000L); + } else { + throw e; + } + } + } + } + } + + private String runSampleRunnable(Runnable sample) { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + sample.run(); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testCreateInstanceSample() { + String instanceId = formatForTest("sample-inst"); + String out = + runSampleRunnable(() -> { + try { + CreateInstanceExample.createInstance( + dbId.getInstanceId().getProject(), instanceId); + } finally { + spanner.getInstanceAdminClient().deleteInstance(instanceId); + } + }); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully created", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + } + + private static int countOccurrences(String input, String search) { + return input.split(search).length - 1; + } + + private static String toComparableId(String baseId, String existingId) { + String zeroUuid = "00000000-0000-0000-0000-0000-00000000"; + int shouldBeLength = (baseId + "-" + zeroUuid).length(); + int missingLength = shouldBeLength - existingId.length(); + return existingId + zeroUuid.substring(zeroUuid.length() - missingLength); + } + + private static Pattern getTestDbIdPattern(String baseDbId) { + return Pattern.compile( + baseDbId + "-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{8}", + Pattern.CASE_INSENSITIVE); + } + + static String formatForTest(String name) { + return name + "-" + UUID.randomUUID().toString().substring(0, DBID_LENGTH); + } + + static class ShouldRetryBackupOperation implements Predicate { + private static final int MAX_ATTEMPTS = 20; + private int attempts = 0; + + @Override + public boolean test(SpannerException e) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage().contains("Please retry the operation once the pending")) { + attempts++; + if (attempts == MAX_ATTEMPTS) { + // Throw custom exception so it is easier to locate in the log why it went wrong. + throw SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, + String.format("Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", attempts), + e); + } + // Wait one minute before retrying. + Uninterruptibles.sleepUninterruptibly(60L, TimeUnit.SECONDS); + return true; + } + return false; + } + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseSampleIT.java new file mode 100644 index 000000000000..29a99ed3b890 --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseSampleIT.java @@ -0,0 +1,62 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.example.spanner.admin.archived.UpdateDatabaseSample; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseInfo.DatabaseField; +import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class UpdateDatabaseSampleIT extends SampleTestBase { + + @Test + public void testUpdateDatabase() throws Exception { + // Create database + final String databaseId = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase(instanceId, databaseId, Collections.emptyList()) + .get(5, TimeUnit.MINUTES); + + // Runs sample + final String out = + SampleRunner.runSample( + () -> UpdateDatabaseSample.updateDatabase(projectId, instanceId, databaseId)); + + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + assertTrue( + "Expected that database would have been updated. Output received was " + out, + out.contains(String.format("Updated database %s", dbId))); + + // Cleanup + Database databaseToUpdate = + databaseAdminClient.newDatabaseBuilder(dbId).disableDropProtection().build(); + OperationFuture operation = + databaseAdminClient.updateDatabase(databaseToUpdate, DatabaseField.DROP_PROTECTION); + Database updatedDb = operation.get(5, TimeUnit.MINUTES); + assertFalse(updatedDb.isDropProtectionEnabled()); + } +} diff --git a/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSampleIT.java b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSampleIT.java new file mode 100644 index 000000000000..ce657575f07a --- /dev/null +++ b/java-spanner/samples/snippets/src/test/java/com/example/spanner/admin/archived/UpdateDatabaseWithDefaultLeaderSampleIT.java @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.admin.archived; + +import static org.junit.Assert.assertTrue; + +import com.example.spanner.SampleRunner; +import com.example.spanner.SampleTestBase; +import com.example.spanner.admin.archived.UpdateDatabaseWithDefaultLeaderSample; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.InstanceConfig; +import com.google.cloud.spanner.InstanceConfigId; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class UpdateDatabaseWithDefaultLeaderSampleIT extends SampleTestBase { + + @Test + public void testUpdateDatabaseWithDefaultLeader() throws Exception { + // Create database + final String databaseId = idGenerator.generateDatabaseId(); + final Database createdDatabase = databaseAdminClient + .createDatabase(multiRegionalInstanceId, databaseId, Collections.emptyList()) + .get(5, TimeUnit.MINUTES); + final String defaultLeader = createdDatabase.getDefaultLeader(); + + // Finds a possible new leader option + final InstanceConfigId instanceConfigId = instanceAdminClient + .getInstance(multiRegionalInstanceId) + .getInstanceConfigId(); + final InstanceConfig config = instanceAdminClient + .getInstanceConfig(instanceConfigId.getInstanceConfig()); + final String newLeader = config + .getLeaderOptions() + .stream() + .filter(leader -> !leader.equals(defaultLeader)) + .findFirst() + .orElseThrow(() -> + new RuntimeException("Expected to find a leader option different than " + defaultLeader) + ); + + // Runs sample + final String out = SampleRunner.runSample(() -> UpdateDatabaseWithDefaultLeaderSample + .updateDatabaseWithDefaultLeader(projectId, multiRegionalInstanceId, databaseId, newLeader) + ); + + assertTrue( + "Expected that database new leader would had been updated to " + newLeader + "." + + " Output received was " + out, + out.contains("Updated default leader to " + newLeader) + ); + } +} diff --git a/pom.xml b/pom.xml index 9345b46209e2..af030c8618e3 100644 --- a/pom.xml +++ b/pom.xml @@ -221,6 +221,7 @@ java-shopping-merchant-quota java-shopping-merchant-reports java-shopping-merchant-reviews + java-spanner java-spanneradapter java-speech java-storage-transfer diff --git a/versions.txt b/versions.txt index 9ccbc9229700..50d6874c0191 100644 --- a/versions.txt +++ b/versions.txt @@ -587,7 +587,7 @@ grpc-google-cloud-tpu-v2:2.89.0:2.90.0-SNAPSHOT google-cloud-datalineage:0.80.0:0.81.0-SNAPSHOT proto-google-cloud-datalineage-v1:0.80.0:0.81.0-SNAPSHOT grpc-google-cloud-datalineage-v1:0.80.0:0.81.0-SNAPSHOT -google-iam-policy:1.86.0:1.87.0-SNAPSHOT +google-iam-policy:1.62.0:1.62.1-SNAPSHOT proto-google-cloud-build-v2:3.90.0:3.91.0-SNAPSHOT grpc-google-cloud-build-v2:3.90.0:3.91.0-SNAPSHOT google-cloud-advisorynotifications:0.77.0:0.78.0-SNAPSHOT @@ -993,7 +993,6 @@ proto-google-iam-v1:1.62.0:1.62.1-SNAPSHOT grpc-google-iam-v1:1.62.0:1.62.1-SNAPSHOT proto-google-iam-v2beta:1.62.0:1.62.1-SNAPSHOT grpc-google-iam-v2beta:1.62.0:1.62.1-SNAPSHOT -google-iam-policy:1.62.0:1.62.1-SNAPSHOT proto-google-iam-v2:1.62.0:1.62.1-SNAPSHOT grpc-google-iam-v2:1.62.0:1.62.1-SNAPSHOT google-cloud-core:2.66.0:2.66.1-SNAPSHOT @@ -1005,3 +1004,13 @@ proto-google-iam-v3:1.62.0:1.62.1-SNAPSHOT grpc-google-iam-v3:1.62.0:1.62.1-SNAPSHOT proto-google-iam-v3beta:1.62.0:1.62.1-SNAPSHOT grpc-google-iam-v3beta:1.62.0:1.62.1-SNAPSHOT +proto-google-cloud-spanner-admin-instance-v1:6.112.0:6.112.1-SNAPSHOT +proto-google-cloud-spanner-v1:6.112.0:6.112.1-SNAPSHOT +proto-google-cloud-spanner-admin-database-v1:6.112.0:6.112.1-SNAPSHOT +grpc-google-cloud-spanner-v1:6.112.0:6.112.1-SNAPSHOT +grpc-google-cloud-spanner-admin-instance-v1:6.112.0:6.112.1-SNAPSHOT +grpc-google-cloud-spanner-admin-database-v1:6.112.0:6.112.1-SNAPSHOT +google-cloud-spanner:6.112.0:6.112.1-SNAPSHOT +google-cloud-spanner-executor:6.112.0:6.112.1-SNAPSHOT +proto-google-cloud-spanner-executor-v1:6.112.0:6.112.1-SNAPSHOT +grpc-google-cloud-spanner-executor-v1:6.112.0:6.112.1-SNAPSHOT